source: mainline/uspace/drv/bus/usb/xhci/trb_ring.c@ b2dca8de

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since b2dca8de was e0a5d4c, checked in by Ondřej Hlavatý <aearsis@…>, 7 years ago

usb: update copyrights

The data was generated by a script, guided manually. If you feel your
name is missing somewhere, please add it!

The semi-automated process was roughly:

1) Changes per file and author (limited to our team) were counted
2) Trivial numbers were thrown away
3) Authors were sorted by lines added to file
4) All previous copyrights were replaced by the newly generated one
5) Hunks changing only year were discarded

It seems that a lot of my copyrights were added. It is due to me being
both sticking my nose everywhere and lazy to update the copyright right
away :)

  • Property mode set to 100644
File size: 13.6 KB
Line 
1/*
2 * Copyright (c) 2018 Ondrej Hlavaty, Petr Manek, Jaroslav Jindrak, Jan Hrach
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <errno.h>
30#include <assert.h>
31#include <ddi.h>
32#include <as.h>
33#include <align.h>
34#include <libarch/barrier.h>
35#include <usb/debug.h>
36#include "hw_struct/trb.h"
37#include "trb_ring.h"
38
39/**
40 * A structure representing a segment of a TRB ring.
41 */
42
43#define SEGMENT_FOOTER_SIZE (sizeof(link_t) + sizeof(uintptr_t))
44
45#define SEGMENT_TRB_COUNT ((PAGE_SIZE - SEGMENT_FOOTER_SIZE) / sizeof(xhci_trb_t))
46#define SEGMENT_TRB_USEFUL_COUNT (SEGMENT_TRB_COUNT - 1)
47
48struct trb_segment {
49 xhci_trb_t trb_storage [SEGMENT_TRB_COUNT];
50
51 link_t segments_link;
52 uintptr_t phys;
53} __attribute__((aligned(PAGE_SIZE)));
54
55static_assert(sizeof(trb_segment_t) == PAGE_SIZE);
56
57
58/**
59 * Get the first TRB of a segment.
60 */
61static inline xhci_trb_t *segment_begin(trb_segment_t *segment)
62{
63 return segment->trb_storage;
64}
65
66/**
67 * Get the one-past-end TRB of a segment.
68 */
69static inline xhci_trb_t *segment_end(trb_segment_t *segment)
70{
71 return segment_begin(segment) + SEGMENT_TRB_COUNT;
72}
73
74/**
75 * Return a first segment of a list of segments.
76 */
77static inline trb_segment_t *get_first_segment(list_t *segments)
78{
79 return list_get_instance(list_first(segments), trb_segment_t, segments_link);
80
81}
82
83/**
84 * Allocate and initialize new segment.
85 *
86 * TODO: When the HC supports 64-bit addressing, there's no need to restrict
87 * to DMAMEM_4GiB.
88 */
89static errno_t trb_segment_alloc(trb_segment_t **segment)
90{
91 *segment = AS_AREA_ANY;
92 uintptr_t phys;
93
94 const int err = dmamem_map_anonymous(PAGE_SIZE,
95 DMAMEM_4GiB, AS_AREA_READ | AS_AREA_WRITE, 0,
96 &phys, (void **) segment);
97 if (err)
98 return err;
99
100 memset(*segment, 0, PAGE_SIZE);
101 (*segment)->phys = phys;
102 usb_log_debug("Allocated new ring segment.");
103 return EOK;
104}
105
106static void trb_segment_free(trb_segment_t *segment)
107{
108 dmamem_unmap_anonymous(segment);
109}
110
111/**
112 * Initializes the ring with one segment.
113 *
114 * @param[in] initial_size A number of free slots on the ring, 0 leaves the
115 * choice on a reasonable default (one page-sized segment).
116 */
117errno_t xhci_trb_ring_init(xhci_trb_ring_t *ring, size_t initial_size)
118{
119 errno_t err;
120 if (initial_size == 0)
121 initial_size = SEGMENT_TRB_USEFUL_COUNT;
122
123 list_initialize(&ring->segments);
124 size_t segment_count = (initial_size + SEGMENT_TRB_USEFUL_COUNT - 1)
125 / SEGMENT_TRB_USEFUL_COUNT;
126
127 for (size_t i = 0; i < segment_count; ++i) {
128 struct trb_segment *segment;
129 if ((err = trb_segment_alloc(&segment)) != EOK)
130 return err;
131
132 list_append(&segment->segments_link, &ring->segments);
133 ring->segment_count = i + 1;
134 }
135
136 trb_segment_t * const segment = get_first_segment(&ring->segments);
137 xhci_trb_t *last = segment_end(segment) - 1;
138 xhci_trb_link_fill(last, segment->phys);
139 TRB_LINK_SET_TC(*last, true);
140
141 ring->enqueue_segment = segment;
142 ring->enqueue_trb = segment_begin(segment);
143 ring->dequeue = segment->phys;
144 ring->pcs = 1;
145
146 fibril_mutex_initialize(&ring->guard);
147
148 return EOK;
149}
150
151/**
152 * Free all segments inside the ring.
153 */
154void xhci_trb_ring_fini(xhci_trb_ring_t *ring)
155{
156 assert(ring);
157
158 list_foreach_safe(ring->segments, cur, next) {
159 trb_segment_t *segment =
160 list_get_instance(cur, trb_segment_t, segments_link);
161 trb_segment_free(segment);
162 }
163}
164
165/**
166 * When the enqueue pointer targets a Link TRB, resolve it.
167 *
168 * Relies on segments being in the segment list in linked order.
169 *
170 * According to section 4.9.2.2, figure 16, the link TRBs cannot be chained, so
171 * it shall not be called in cycle, nor have an inner cycle.
172 */
173static void trb_ring_resolve_link(xhci_trb_ring_t *ring)
174{
175 link_t *next_segment =
176 list_next(&ring->enqueue_segment->segments_link, &ring->segments);
177 if (!next_segment)
178 next_segment = list_first(&ring->segments);
179 assert(next_segment);
180
181 ring->enqueue_segment =
182 list_get_instance(next_segment, trb_segment_t, segments_link);
183 ring->enqueue_trb = segment_begin(ring->enqueue_segment);
184}
185
186/**
187 * Get the physical address of the enqueue pointer.
188 */
189static uintptr_t trb_ring_enqueue_phys(xhci_trb_ring_t *ring)
190{
191 size_t trb_id = ring->enqueue_trb - segment_begin(ring->enqueue_segment);
192 return ring->enqueue_segment->phys + trb_id * sizeof(xhci_trb_t);
193}
194
195/**
196 * Decides whether the TRB will trigger an interrupt after being processed.
197 */
198static bool trb_generates_interrupt(xhci_trb_t *trb)
199{
200 return TRB_TYPE(*trb) >= XHCI_TRB_TYPE_ENABLE_SLOT_CMD
201 || TRB_IOC(*trb);
202}
203
204/**
205 * Enqueue TD composed of TRBs.
206 *
207 * This will copy specified number of TRBs chained together into the ring. The
208 * cycle flag in TRBs may be changed.
209 *
210 * The copied TRBs must be contiguous in memory, and must not contain Link TRBs.
211 *
212 * We cannot avoid the copying, because the TRB in ring should be updated
213 * atomically.
214 *
215 * @param first_trb the first TRB
216 * @param trbs number of TRBS to enqueue
217 * @param phys returns address of the last TRB enqueued
218 * @return EOK on success,
219 * EAGAIN when the ring is too full to fit all TRBs (temporary)
220 */
221errno_t xhci_trb_ring_enqueue_multiple(xhci_trb_ring_t *ring, xhci_trb_t *first_trb,
222 size_t trbs, uintptr_t *phys)
223{
224 errno_t err;
225 assert(trbs > 0);
226
227 if (trbs > xhci_trb_ring_size(ring))
228 return ELIMIT;
229
230 fibril_mutex_lock(&ring->guard);
231
232 xhci_trb_t * const saved_enqueue_trb = ring->enqueue_trb;
233 trb_segment_t * const saved_enqueue_segment = ring->enqueue_segment;
234 if (phys)
235 *phys = (uintptr_t)NULL;
236
237 /*
238 * First, dry run and advance the enqueue pointer to see if the ring would
239 * be full anytime during the transaction.
240 */
241 xhci_trb_t *trb = first_trb;
242 for (size_t i = 0; i < trbs; ++i, ++trb) {
243 if (phys && trb_generates_interrupt(trb)) {
244 if (*phys) {
245 err = ENOTSUP;
246 goto err;
247 }
248 *phys = trb_ring_enqueue_phys(ring);
249 }
250
251 ring->enqueue_trb++;
252
253 if (TRB_TYPE(*ring->enqueue_trb) == XHCI_TRB_TYPE_LINK)
254 trb_ring_resolve_link(ring);
255
256 if (trb_ring_enqueue_phys(ring) == ring->dequeue) {
257 err = EAGAIN;
258 goto err;
259 }
260 }
261
262 ring->enqueue_segment = saved_enqueue_segment;
263 ring->enqueue_trb = saved_enqueue_trb;
264
265 /*
266 * Now, copy the TRBs without further checking.
267 */
268 trb = first_trb;
269 for (size_t i = 0; i < trbs; ++i, ++trb) {
270 TRB_SET_CYCLE(*trb, ring->pcs);
271 xhci_trb_copy_to_pio(ring->enqueue_trb, trb);
272
273 usb_log_debug2("TRB ring(%p): Enqueued TRB %p", ring, trb);
274 ring->enqueue_trb++;
275
276 if (TRB_TYPE(*ring->enqueue_trb) == XHCI_TRB_TYPE_LINK) {
277 TRB_SET_CYCLE(*ring->enqueue_trb, ring->pcs);
278
279 if (TRB_LINK_TC(*ring->enqueue_trb)) {
280 ring->pcs = !ring->pcs;
281 usb_log_debug("TRB ring(%p): PCS toggled", ring);
282 }
283
284 trb_ring_resolve_link(ring);
285 }
286 }
287
288 fibril_mutex_unlock(&ring->guard);
289 return EOK;
290
291err:
292 ring->enqueue_segment = saved_enqueue_segment;
293 ring->enqueue_trb = saved_enqueue_trb;
294 fibril_mutex_unlock(&ring->guard);
295 return err;
296}
297
298/**
299 * Enqueue TD composed of a single TRB. See: `xhci_trb_ring_enqueue_multiple`
300 */
301errno_t xhci_trb_ring_enqueue(xhci_trb_ring_t *ring, xhci_trb_t *td, uintptr_t *phys)
302{
303 return xhci_trb_ring_enqueue_multiple(ring, td, 1, phys);
304}
305
306void xhci_trb_ring_reset_dequeue_state(xhci_trb_ring_t *ring, uintptr_t *addr)
307{
308 assert(ring);
309
310 ring->dequeue = trb_ring_enqueue_phys(ring);
311
312 if (addr)
313 *addr = ring->dequeue | ring->pcs;
314}
315
316size_t xhci_trb_ring_size(xhci_trb_ring_t *ring)
317{
318 return ring->segment_count * SEGMENT_TRB_USEFUL_COUNT;
319}
320
321/**
322 * Initializes an event ring.
323 *
324 * @param[in] initial_size A number of free slots on the ring, 0 leaves the
325 * choice on a reasonable default (one page-sized segment).
326 */
327errno_t xhci_event_ring_init(xhci_event_ring_t *ring, size_t initial_size)
328{
329 errno_t err;
330 if (initial_size == 0)
331 initial_size = SEGMENT_TRB_COUNT;
332
333 list_initialize(&ring->segments);
334
335 size_t segment_count = (initial_size + SEGMENT_TRB_COUNT - 1) / SEGMENT_TRB_COUNT;
336 size_t erst_size = segment_count * sizeof(xhci_erst_entry_t);
337
338 if (dma_buffer_alloc(&ring->erst, erst_size)) {
339 xhci_event_ring_fini(ring);
340 return ENOMEM;
341 }
342
343 xhci_erst_entry_t *erst = ring->erst.virt;
344 memset(erst, 0, erst_size);
345
346 for (size_t i = 0; i < segment_count; i++) {
347 trb_segment_t *segment;
348 if ((err = trb_segment_alloc(&segment)) != EOK) {
349 xhci_event_ring_fini(ring);
350 return err;
351 }
352
353 list_append(&segment->segments_link, &ring->segments);
354 ring->segment_count = i + 1;
355 xhci_fill_erst_entry(&erst[i], segment->phys, SEGMENT_TRB_COUNT);
356 }
357
358 fibril_mutex_initialize(&ring->guard);
359
360 usb_log_debug("Initialized event ring.");
361 return EOK;
362}
363
364void xhci_event_ring_reset(xhci_event_ring_t *ring)
365{
366 list_foreach(ring->segments, segments_link, trb_segment_t, segment)
367 memset(segment->trb_storage, 0, sizeof(segment->trb_storage));
368
369 trb_segment_t * const segment = get_first_segment(&ring->segments);
370 ring->dequeue_segment = segment;
371 ring->dequeue_trb = segment_begin(segment);
372 ring->dequeue_ptr = segment->phys;
373 ring->ccs = 1;
374}
375
376void xhci_event_ring_fini(xhci_event_ring_t *ring)
377{
378 list_foreach_safe(ring->segments, cur, next) {
379 trb_segment_t *segment = list_get_instance(cur, trb_segment_t, segments_link);
380 trb_segment_free(segment);
381 }
382
383 dma_buffer_free(&ring->erst);
384}
385
386/**
387 * Get the physical address of the dequeue pointer.
388 */
389static uintptr_t event_ring_dequeue_phys(xhci_event_ring_t *ring)
390{
391 uintptr_t trb_id = ring->dequeue_trb - segment_begin(ring->dequeue_segment);
392 return ring->dequeue_segment->phys + trb_id * sizeof(xhci_trb_t);
393}
394
395/**
396 * Fill the event with next valid event from the ring.
397 *
398 * @param event pointer to event to be overwritten
399 * @return EOK on success,
400 * ENOENT when the ring is empty
401 */
402errno_t xhci_event_ring_dequeue(xhci_event_ring_t *ring, xhci_trb_t *event)
403{
404 fibril_mutex_lock(&ring->guard);
405
406 /**
407 * The ERDP reported to the HC is a half-phase off the one we need to
408 * maintain. Therefore, we keep it extra.
409 */
410 ring->dequeue_ptr = event_ring_dequeue_phys(ring);
411
412 if (TRB_CYCLE(*ring->dequeue_trb) != ring->ccs) {
413 fibril_mutex_unlock(&ring->guard);
414 return ENOENT; /* The ring is empty. */
415 }
416
417 /* Do not reorder the Cycle bit reading with memcpy */
418 read_barrier();
419
420 memcpy(event, ring->dequeue_trb, sizeof(xhci_trb_t));
421
422 ring->dequeue_trb++;
423 const unsigned index = ring->dequeue_trb - segment_begin(ring->dequeue_segment);
424
425 /* Wrapping around segment boundary */
426 if (index >= SEGMENT_TRB_COUNT) {
427 link_t *next_segment =
428 list_next(&ring->dequeue_segment->segments_link, &ring->segments);
429
430 /* Wrapping around table boundary */
431 if (!next_segment) {
432 next_segment = list_first(&ring->segments);
433 ring->ccs = !ring->ccs;
434 }
435
436 ring->dequeue_segment =
437 list_get_instance(next_segment, trb_segment_t, segments_link);
438 ring->dequeue_trb = segment_begin(ring->dequeue_segment);
439 }
440
441 fibril_mutex_unlock(&ring->guard);
442 return EOK;
443}
444
445void xhci_sw_ring_init(xhci_sw_ring_t *ring, size_t size)
446{
447 ring->begin = calloc(size, sizeof(xhci_trb_t));
448 ring->end = ring->begin + size;
449
450 fibril_mutex_initialize(&ring->guard);
451 fibril_condvar_initialize(&ring->enqueued_cv);
452 fibril_condvar_initialize(&ring->dequeued_cv);
453
454 xhci_sw_ring_restart(ring);
455}
456
457errno_t xhci_sw_ring_enqueue(xhci_sw_ring_t *ring, xhci_trb_t *trb)
458{
459 assert(ring);
460 assert(trb);
461
462 fibril_mutex_lock(&ring->guard);
463 while (ring->running && TRB_CYCLE(*ring->enqueue))
464 fibril_condvar_wait(&ring->dequeued_cv, &ring->guard);
465
466 *ring->enqueue = *trb;
467 TRB_SET_CYCLE(*ring->enqueue, 1);
468 if (++ring->enqueue == ring->end)
469 ring->enqueue = ring->begin;
470 fibril_condvar_signal(&ring->enqueued_cv);
471 fibril_mutex_unlock(&ring->guard);
472
473 return ring->running ? EOK : EINTR;
474}
475
476errno_t xhci_sw_ring_dequeue(xhci_sw_ring_t *ring, xhci_trb_t *trb)
477{
478 assert(ring);
479 assert(trb);
480
481 fibril_mutex_lock(&ring->guard);
482 while (ring->running && !TRB_CYCLE(*ring->dequeue))
483 fibril_condvar_wait(&ring->enqueued_cv, &ring->guard);
484
485 *trb = *ring->dequeue;
486 TRB_SET_CYCLE(*ring->dequeue, 0);
487 if (++ring->dequeue == ring->end)
488 ring->dequeue = ring->begin;
489 fibril_condvar_signal(&ring->dequeued_cv);
490 fibril_mutex_unlock(&ring->guard);
491
492 return ring->running ? EOK : EINTR;
493}
494
495void xhci_sw_ring_stop(xhci_sw_ring_t *ring)
496{
497 ring->running = false;
498 fibril_condvar_broadcast(&ring->enqueued_cv);
499 fibril_condvar_broadcast(&ring->dequeued_cv);
500}
501
502void xhci_sw_ring_restart(xhci_sw_ring_t *ring)
503{
504 ring->enqueue = ring->dequeue = ring->begin;
505 memset(ring->begin, 0, sizeof(xhci_trb_t) * (ring->end - ring->begin));
506 ring->running = true;
507}
508
509void xhci_sw_ring_fini(xhci_sw_ring_t *ring)
510{
511 free(ring->begin);
512}
513
514/**
515 * @}
516 */
Note: See TracBrowser for help on using the repository browser.