Ticket #856: trb_ring.c

File trb_ring.c, 13.6 KB (added by Colin Parker, 15 months ago)

uspace/drv/bus/usb/trb_ring.c showing timing modifications to trigger the issue

Line 
1/*
2 * Copyright (c) 2018 Ondrej Hlavaty, Petr Manek, Jaroslav Jindrak, Jan Hrach
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <errno.h>
30#include <assert.h>
31#include <ddi.h>
32#include <as.h>
33#include <align.h>
34#include <barrier.h>
35#include <usb/debug.h>
36#include "hw_struct/trb.h"
37#include "trb_ring.h"
38
39/**
40 * A structure representing a segment of a TRB ring.
41 */
42
43#define SEGMENT_FOOTER_SIZE (sizeof(link_t) + sizeof(uintptr_t))
44
45#define SEGMENT_TRB_COUNT ((PAGE_SIZE - SEGMENT_FOOTER_SIZE) / sizeof(xhci_trb_t))
46#define SEGMENT_TRB_USEFUL_COUNT (SEGMENT_TRB_COUNT - 1)
47
48struct trb_segment {
49 xhci_trb_t trb_storage [SEGMENT_TRB_COUNT];
50
51 link_t segments_link;
52 uintptr_t phys;
53} __attribute__((aligned(PAGE_SIZE)));
54
55static_assert(sizeof(trb_segment_t) == PAGE_SIZE, "");
56
57/**
58 * Get the first TRB of a segment.
59 */
60static inline xhci_trb_t *segment_begin(trb_segment_t *segment)
61{
62 return segment->trb_storage;
63}
64
65/**
66 * Get the one-past-end TRB of a segment.
67 */
68static inline xhci_trb_t *segment_end(trb_segment_t *segment)
69{
70 return segment_begin(segment) + SEGMENT_TRB_COUNT;
71}
72
73/**
74 * Return a first segment of a list of segments.
75 */
76static inline trb_segment_t *get_first_segment(list_t *segments)
77{
78 return list_get_instance(list_first(segments), trb_segment_t, segments_link);
79
80}
81
82/**
83 * Allocate and initialize new segment.
84 *
85 * TODO: When the HC supports 64-bit addressing, there's no need to restrict
86 * to DMAMEM_4GiB.
87 */
88static errno_t trb_segment_alloc(trb_segment_t **segment)
89{
90 *segment = AS_AREA_ANY;
91 uintptr_t phys;
92
93 const int err = dmamem_map_anonymous(PAGE_SIZE,
94 DMAMEM_4GiB, AS_AREA_READ | AS_AREA_WRITE, 0,
95 &phys, (void **) segment);
96 if (err)
97 return err;
98
99 memset(*segment, 0, PAGE_SIZE);
100 (*segment)->phys = phys;
101 usb_log_debug("Allocated new ring segment.");
102 return EOK;
103}
104
105static void trb_segment_free(trb_segment_t *segment)
106{
107 dmamem_unmap_anonymous(segment);
108}
109
110/**
111 * Initializes the ring with one segment.
112 *
113 * @param[in] initial_size A number of free slots on the ring, 0 leaves the
114 * choice on a reasonable default (one page-sized segment).
115 */
116errno_t xhci_trb_ring_init(xhci_trb_ring_t *ring, size_t initial_size)
117{
118 errno_t err;
119 if (initial_size == 0)
120 initial_size = SEGMENT_TRB_USEFUL_COUNT;
121
122 list_initialize(&ring->segments);
123 size_t segment_count = (initial_size + SEGMENT_TRB_USEFUL_COUNT - 1) /
124 SEGMENT_TRB_USEFUL_COUNT;
125
126 for (size_t i = 0; i < segment_count; ++i) {
127 struct trb_segment *segment;
128 if ((err = trb_segment_alloc(&segment)) != EOK)
129 return err;
130
131 list_append(&segment->segments_link, &ring->segments);
132 ring->segment_count = i + 1;
133 }
134
135 trb_segment_t *const segment = get_first_segment(&ring->segments);
136 xhci_trb_t *last = segment_end(segment) - 1;
137 xhci_trb_link_fill(last, segment->phys);
138 TRB_LINK_SET_TC(*last, true);
139
140 ring->enqueue_segment = segment;
141 ring->enqueue_trb = segment_begin(segment);
142 ring->dequeue = segment->phys;
143 ring->pcs = 1;
144
145 fibril_mutex_initialize(&ring->guard);
146
147 return EOK;
148}
149
150/**
151 * Free all segments inside the ring.
152 */
153void xhci_trb_ring_fini(xhci_trb_ring_t *ring)
154{
155 assert(ring);
156
157 list_foreach_safe(ring->segments, cur, next) {
158 trb_segment_t *segment =
159 list_get_instance(cur, trb_segment_t, segments_link);
160 trb_segment_free(segment);
161 }
162}
163
164/**
165 * When the enqueue pointer targets a Link TRB, resolve it.
166 *
167 * Relies on segments being in the segment list in linked order.
168 *
169 * According to section 4.9.2.2, figure 16, the link TRBs cannot be chained, so
170 * it shall not be called in cycle, nor have an inner cycle.
171 */
172static void trb_ring_resolve_link(xhci_trb_ring_t *ring)
173{
174 link_t *next_segment =
175 list_next(&ring->enqueue_segment->segments_link, &ring->segments);
176 if (!next_segment)
177 next_segment = list_first(&ring->segments);
178 assert(next_segment);
179
180 ring->enqueue_segment =
181 list_get_instance(next_segment, trb_segment_t, segments_link);
182 ring->enqueue_trb = segment_begin(ring->enqueue_segment);
183}
184
185/**
186 * Get the physical address of the enqueue pointer.
187 */
188static uintptr_t trb_ring_enqueue_phys(xhci_trb_ring_t *ring)
189{
190 size_t trb_id = ring->enqueue_trb - segment_begin(ring->enqueue_segment);
191 return ring->enqueue_segment->phys + trb_id * sizeof(xhci_trb_t);
192}
193
194/**
195 * Decides whether the TRB will trigger an interrupt after being processed.
196 */
197static bool trb_generates_interrupt(xhci_trb_t *trb)
198{
199 return TRB_TYPE(*trb) >= XHCI_TRB_TYPE_ENABLE_SLOT_CMD ||
200 TRB_IOC(*trb);
201}
202
203/**
204 * Enqueue TD composed of TRBs.
205 *
206 * This will copy specified number of TRBs chained together into the ring. The
207 * cycle flag in TRBs may be changed.
208 *
209 * The copied TRBs must be contiguous in memory, and must not contain Link TRBs.
210 *
211 * We cannot avoid the copying, because the TRB in ring should be updated
212 * atomically.
213 *
214 * @param first_trb the first TRB
215 * @param trbs number of TRBS to enqueue
216 * @param phys returns address of the last TRB enqueued
217 * @return EOK on success,
218 * EAGAIN when the ring is too full to fit all TRBs (temporary)
219 */
220errno_t xhci_trb_ring_enqueue_multiple(xhci_trb_ring_t *ring, xhci_trb_t *first_trb,
221 size_t trbs, uintptr_t *phys)
222{
223 errno_t err;
224 assert(trbs > 0);
225
226 if (trbs > xhci_trb_ring_size(ring))
227 return ELIMIT;
228
229 fibril_mutex_lock(&ring->guard);
230
231 xhci_trb_t *const saved_enqueue_trb = ring->enqueue_trb;
232 trb_segment_t *const saved_enqueue_segment = ring->enqueue_segment;
233 if (phys)
234 *phys = (uintptr_t)NULL;
235
236 /*
237 * First, dry run and advance the enqueue pointer to see if the ring would
238 * be full anytime during the transaction.
239 */
240 xhci_trb_t *trb = first_trb;
241 for (size_t i = 0; i < trbs; ++i, ++trb) {
242 if (phys && trb_generates_interrupt(trb)) {
243 if (*phys) {
244 err = ENOTSUP;
245 goto err;
246 }
247 *phys = trb_ring_enqueue_phys(ring);
248 }
249
250 ring->enqueue_trb++;
251
252 if (TRB_TYPE(*ring->enqueue_trb) == XHCI_TRB_TYPE_LINK)
253 trb_ring_resolve_link(ring);
254
255 if (trb_ring_enqueue_phys(ring) == ring->dequeue) {
256 err = EAGAIN;
257 goto err;
258 }
259 }
260
261 ring->enqueue_segment = saved_enqueue_segment;
262 ring->enqueue_trb = saved_enqueue_trb;
263
264 /*
265 * Now, copy the TRBs without further checking.
266 */
267 trb = first_trb;
268 for (size_t i = 0; i < trbs; ++i, ++trb) {
269 TRB_SET_CYCLE(*trb, ring->pcs);
270 xhci_trb_copy_to_pio(ring->enqueue_trb, trb);
271
272 usb_log_debug2("TRB ring(%p): Enqueued TRB %p", ring, trb);
273 ring->enqueue_trb++;
274
275 if (TRB_TYPE(*ring->enqueue_trb) == XHCI_TRB_TYPE_LINK) {
276 TRB_SET_CYCLE(*ring->enqueue_trb, ring->pcs);
277
278 if (TRB_LINK_TC(*ring->enqueue_trb)) {
279 ring->pcs = !ring->pcs;
280 usb_log_debug("TRB ring(%p): PCS toggled", ring);
281 }
282
283 trb_ring_resolve_link(ring);
284 }
285 }
286
287 fibril_mutex_unlock(&ring->guard);
288 return EOK;
289
290err:
291 ring->enqueue_segment = saved_enqueue_segment;
292 ring->enqueue_trb = saved_enqueue_trb;
293 fibril_mutex_unlock(&ring->guard);
294 return err;
295}
296
297/**
298 * Enqueue TD composed of a single TRB. See: `xhci_trb_ring_enqueue_multiple`
299 */
300errno_t xhci_trb_ring_enqueue(xhci_trb_ring_t *ring, xhci_trb_t *td, uintptr_t *phys)
301{
302 return xhci_trb_ring_enqueue_multiple(ring, td, 1, phys);
303}
304
305void xhci_trb_ring_reset_dequeue_state(xhci_trb_ring_t *ring, uintptr_t *addr)
306{
307 assert(ring);
308
309 ring->dequeue = trb_ring_enqueue_phys(ring);
310
311 if (addr)
312 *addr = ring->dequeue | ring->pcs;
313}
314
315size_t xhci_trb_ring_size(xhci_trb_ring_t *ring)
316{
317 return ring->segment_count * SEGMENT_TRB_USEFUL_COUNT;
318}
319
320/**
321 * Initializes an event ring.
322 *
323 * @param[in] initial_size A number of free slots on the ring, 0 leaves the
324 * choice on a reasonable default (one page-sized segment).
325 */
326errno_t xhci_event_ring_init(xhci_event_ring_t *ring, size_t initial_size)
327{
328 errno_t err;
329 if (initial_size == 0)
330 initial_size = SEGMENT_TRB_COUNT;
331
332 list_initialize(&ring->segments);
333
334 size_t segment_count = (initial_size + SEGMENT_TRB_COUNT - 1) / SEGMENT_TRB_COUNT;
335 size_t erst_size = segment_count * sizeof(xhci_erst_entry_t);
336
337 if (dma_buffer_alloc(&ring->erst, erst_size)) {
338 xhci_event_ring_fini(ring);
339 return ENOMEM;
340 }
341
342 xhci_erst_entry_t *erst = ring->erst.virt;
343 memset(erst, 0, erst_size);
344
345 for (size_t i = 0; i < segment_count; i++) {
346 trb_segment_t *segment;
347 if ((err = trb_segment_alloc(&segment)) != EOK) {
348 xhci_event_ring_fini(ring);
349 return err;
350 }
351
352 list_append(&segment->segments_link, &ring->segments);
353 ring->segment_count = i + 1;
354 xhci_fill_erst_entry(&erst[i], segment->phys, SEGMENT_TRB_COUNT);
355 }
356
357 fibril_usleep(10000);
358
359 fibril_mutex_initialize(&ring->guard);
360
361 usb_log_debug("Initialized event ring.");
362 return EOK;
363}
364
365void xhci_event_ring_reset(xhci_event_ring_t *ring)
366{
367 list_foreach(ring->segments, segments_link, trb_segment_t, segment)
368 memset(segment->trb_storage, 0, sizeof(segment->trb_storage));
369
370 trb_segment_t *const segment = get_first_segment(&ring->segments);
371 ring->dequeue_segment = segment;
372 ring->dequeue_trb = segment_begin(segment);
373 ring->dequeue_ptr = segment->phys;
374 ring->ccs = 1;
375}
376
377void xhci_event_ring_fini(xhci_event_ring_t *ring)
378{
379 list_foreach_safe(ring->segments, cur, next) {
380 trb_segment_t *segment = list_get_instance(cur, trb_segment_t, segments_link);
381 trb_segment_free(segment);
382 }
383
384 dma_buffer_free(&ring->erst);
385}
386
387/**
388 * Get the physical address of the dequeue pointer.
389 */
390static uintptr_t event_ring_dequeue_phys(xhci_event_ring_t *ring)
391{
392 uintptr_t trb_id = ring->dequeue_trb - segment_begin(ring->dequeue_segment);
393 return ring->dequeue_segment->phys + trb_id * sizeof(xhci_trb_t);
394}
395
396/**
397 * Fill the event with next valid event from the ring.
398 *
399 * @param event pointer to event to be overwritten
400 * @return EOK on success,
401 * ENOENT when the ring is empty
402 */
403errno_t xhci_event_ring_dequeue(xhci_event_ring_t *ring, xhci_trb_t *event)
404{
405 fibril_mutex_lock(&ring->guard);
406
407 /**
408 * The ERDP reported to the HC is a half-phase off the one we need to
409 * maintain. Therefore, we keep it extra.
410 */
411 ring->dequeue_ptr = event_ring_dequeue_phys(ring);
412
413 if (TRB_CYCLE(*ring->dequeue_trb) != ring->ccs) {
414 fibril_mutex_unlock(&ring->guard);
415 return ENOENT; /* The ring is empty. */
416 }
417
418 /* Do not reorder the Cycle bit reading with memcpy */
419 read_barrier();
420
421 memcpy(event, ring->dequeue_trb, sizeof(xhci_trb_t));
422
423 ring->dequeue_trb++;
424 const unsigned index = ring->dequeue_trb - segment_begin(ring->dequeue_segment);
425
426 /* Wrapping around segment boundary */
427 if (index >= SEGMENT_TRB_COUNT) {
428 link_t *next_segment =
429 list_next(&ring->dequeue_segment->segments_link, &ring->segments);
430
431 /* Wrapping around table boundary */
432 if (!next_segment) {
433 next_segment = list_first(&ring->segments);
434 ring->ccs = !ring->ccs;
435 }
436
437 ring->dequeue_segment =
438 list_get_instance(next_segment, trb_segment_t, segments_link);
439 ring->dequeue_trb = segment_begin(ring->dequeue_segment);
440 }
441
442 fibril_mutex_unlock(&ring->guard);
443 return EOK;
444}
445
446void xhci_sw_ring_init(xhci_sw_ring_t *ring, size_t size)
447{
448 ring->begin = calloc(size, sizeof(xhci_trb_t));
449 ring->end = ring->begin + size;
450
451 fibril_mutex_initialize(&ring->guard);
452 fibril_condvar_initialize(&ring->enqueued_cv);
453 fibril_condvar_initialize(&ring->dequeued_cv);
454
455 xhci_sw_ring_restart(ring);
456}
457
458errno_t xhci_sw_ring_enqueue(xhci_sw_ring_t *ring, xhci_trb_t *trb)
459{
460 assert(ring);
461 assert(trb);
462
463 fibril_mutex_lock(&ring->guard);
464 while (ring->running && TRB_CYCLE(*ring->enqueue))
465 fibril_condvar_wait(&ring->dequeued_cv, &ring->guard);
466
467 *ring->enqueue = *trb;
468 TRB_SET_CYCLE(*ring->enqueue, 1);
469 if (++ring->enqueue == ring->end)
470 ring->enqueue = ring->begin;
471 fibril_condvar_signal(&ring->enqueued_cv);
472 fibril_mutex_unlock(&ring->guard);
473
474 return ring->running ? EOK : EINTR;
475}
476
477errno_t xhci_sw_ring_dequeue(xhci_sw_ring_t *ring, xhci_trb_t *trb)
478{
479 assert(ring);
480 assert(trb);
481
482 fibril_mutex_lock(&ring->guard);
483 while (ring->running && !TRB_CYCLE(*ring->dequeue))
484 fibril_condvar_wait(&ring->enqueued_cv, &ring->guard);
485
486 *trb = *ring->dequeue;
487 TRB_SET_CYCLE(*ring->dequeue, 0);
488 if (++ring->dequeue == ring->end)
489 ring->dequeue = ring->begin;
490 fibril_condvar_signal(&ring->dequeued_cv);
491 fibril_mutex_unlock(&ring->guard);
492
493 return ring->running ? EOK : EINTR;
494}
495
496void xhci_sw_ring_stop(xhci_sw_ring_t *ring)
497{
498 ring->running = false;
499 fibril_condvar_broadcast(&ring->enqueued_cv);
500 fibril_condvar_broadcast(&ring->dequeued_cv);
501}
502
503void xhci_sw_ring_restart(xhci_sw_ring_t *ring)
504{
505 ring->enqueue = ring->dequeue = ring->begin;
506 memset(ring->begin, 0, sizeof(xhci_trb_t) * (ring->end - ring->begin));
507 ring->running = true;
508}
509
510void xhci_sw_ring_fini(xhci_sw_ring_t *ring)
511{
512 free(ring->begin);
513}
514
515/**
516 * @}
517 */