source: mainline/uspace/drv/bus/usb/xhci/isoch.c@ a35b458

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since a35b458 was e0a5d4c, checked in by Ondřej Hlavatý <aearsis@…>, 8 years ago

usb: update copyrights

The data was generated by a script, guided manually. If you feel your
name is missing somewhere, please add it!

The semi-automated process was roughly:

1) Changes per file and author (limited to our team) were counted
2) Trivial numbers were thrown away
3) Authors were sorted by lines added to file
4) All previous copyrights were replaced by the newly generated one
5) Hunks changing only year were discarded

It seems that a lot of my copyrights were added. It is due to me being
both sticking my nose everywhere and lazy to update the copyright right
away :)

  • Property mode set to 100644
File size: 19.0 KB
Line 
1/*
2 * Copyright (c) 2018 Ondrej Hlavaty, Michal Staruch
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller endpoint management.
34 */
35
36#include <str_error.h>
37#include <macros.h>
38
39#include "endpoint.h"
40#include "hw_struct/trb.h"
41#include "hw_struct/regs.h"
42#include "trb_ring.h"
43#include "hc.h"
44#include "bus.h"
45
46#include "isoch.h"
47
48void isoch_init(xhci_endpoint_t *ep, const usb_endpoint_descriptors_t *desc)
49{
50 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
51 xhci_isoch_t * const isoch = ep->isoch;
52
53 fibril_mutex_initialize(&isoch->guard);
54 fibril_condvar_initialize(&isoch->avail);
55
56 const xhci_hc_t *hc = bus_to_xhci_bus(ep->base.device->bus)->hc;
57
58 /*
59 * We shall cover at least twice the IST period, otherwise we will get
60 * an over/underrun every time.
61 */
62 isoch->buffer_count = (2 * hc->ist) / ep->interval;
63
64 /* 2 buffers are the very minimum. */
65 isoch->buffer_count = max(2, isoch->buffer_count);
66
67 usb_log_debug("[isoch] isoch setup with %zu buffers", isoch->buffer_count);
68}
69
70static void isoch_reset(xhci_endpoint_t *ep)
71{
72 xhci_isoch_t * const isoch = ep->isoch;
73 assert(fibril_mutex_is_locked(&isoch->guard));
74
75 isoch->dequeue = isoch->enqueue = isoch->hw_enqueue = 0;
76
77 for (size_t i = 0; i < isoch->buffer_count; ++i) {
78 isoch->transfers[i].state = ISOCH_EMPTY;
79 }
80
81 fibril_timer_clear_locked(isoch->feeding_timer);
82 isoch->last_mf = -1U;
83 usb_log_info("[isoch] Endpoint" XHCI_EP_FMT ": Data flow reset.",
84 XHCI_EP_ARGS(*ep));
85}
86
87static void isoch_reset_no_timer(xhci_endpoint_t *ep)
88{
89 xhci_isoch_t * const isoch = ep->isoch;
90 assert(fibril_mutex_is_locked(&isoch->guard));
91 /*
92 * As we cannot clear timer when we are triggered by it,
93 * we have to avoid doing it in common method.
94 */
95 fibril_timer_clear_locked(isoch->reset_timer);
96 isoch_reset(ep);
97}
98
99static void isoch_reset_timer(void *ep) {
100 xhci_isoch_t * const isoch = xhci_endpoint_get(ep)->isoch;
101 fibril_mutex_lock(&isoch->guard);
102 isoch_reset(ep);
103 fibril_mutex_unlock(&isoch->guard);
104}
105
106/*
107 * Fast transfers could trigger the reset timer before the data is processed,
108 * leading into false reset.
109 */
110#define RESET_TIMER_DELAY 100000
111static void timer_schedule_reset(xhci_endpoint_t *ep) {
112 xhci_isoch_t * const isoch = ep->isoch;
113 const suseconds_t delay = isoch->buffer_count * ep->interval * 125
114 + RESET_TIMER_DELAY;
115
116 fibril_timer_clear_locked(isoch->reset_timer);
117 fibril_timer_set_locked(isoch->reset_timer, delay,
118 isoch_reset_timer, ep);
119}
120
121void isoch_fini(xhci_endpoint_t *ep)
122{
123 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
124 xhci_isoch_t * const isoch = ep->isoch;
125
126 if (isoch->feeding_timer) {
127 fibril_timer_clear(isoch->feeding_timer);
128 fibril_timer_destroy(isoch->feeding_timer);
129 fibril_timer_clear(isoch->reset_timer);
130 fibril_timer_destroy(isoch->reset_timer);
131 }
132
133 if (isoch->transfers) {
134 for (size_t i = 0; i < isoch->buffer_count; ++i)
135 dma_buffer_free(&isoch->transfers[i].data);
136 free(isoch->transfers);
137 }
138}
139
140/**
141 * Allocate isochronous buffers. Create the feeding timer.
142 */
143errno_t isoch_alloc_transfers(xhci_endpoint_t *ep) {
144 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
145 xhci_isoch_t * const isoch = ep->isoch;
146
147 isoch->feeding_timer = fibril_timer_create(&isoch->guard);
148 isoch->reset_timer = fibril_timer_create(&isoch->guard);
149 if (!isoch->feeding_timer)
150 return ENOMEM;
151
152 isoch->transfers = calloc(isoch->buffer_count, sizeof(xhci_isoch_transfer_t));
153 if(!isoch->transfers)
154 goto err;
155
156 for (size_t i = 0; i < isoch->buffer_count; ++i) {
157 xhci_isoch_transfer_t *transfer = &isoch->transfers[i];
158 if (dma_buffer_alloc(&transfer->data, ep->base.max_transfer_size)) {
159 goto err;
160 }
161 }
162
163 fibril_mutex_lock(&isoch->guard);
164 isoch_reset_no_timer(ep);
165 fibril_mutex_unlock(&isoch->guard);
166
167 return EOK;
168err:
169 isoch_fini(ep);
170 return ENOMEM;
171}
172
173static errno_t schedule_isochronous_trb(xhci_endpoint_t *ep, xhci_isoch_transfer_t *it)
174{
175 xhci_trb_t trb;
176 xhci_trb_clean(&trb);
177
178 trb.parameter = host2xhci(64, dma_buffer_phys_base(&it->data));
179 TRB_CTRL_SET_XFER_LEN(trb, it->size);
180 TRB_CTRL_SET_TD_SIZE(trb, 0);
181 TRB_CTRL_SET_IOC(trb, 1);
182 TRB_CTRL_SET_TRB_TYPE(trb, XHCI_TRB_TYPE_ISOCH);
183
184 // see 4.14.1 and 4.11.2.3 for the explanation, how to calculate those
185 size_t tdpc = it->size / 1024 + ((it->size % 1024) ? 1 : 0);
186 size_t tbc = tdpc / ep->max_burst;
187 if (!tdpc % ep->max_burst) --tbc;
188 size_t bsp = tdpc % ep->max_burst;
189 size_t tlbpc = (bsp ? bsp : ep->max_burst) - 1;
190
191 TRB_ISOCH_SET_TBC(trb, tbc);
192 TRB_ISOCH_SET_TLBPC(trb, tlbpc);
193 TRB_ISOCH_SET_FRAMEID(trb, (it->mfindex / 8) % 2048);
194
195 const errno_t err = xhci_trb_ring_enqueue(&ep->ring, &trb, &it->interrupt_trb_phys);
196 return err;
197}
198
199/** The number of bits the MFINDEX is stored in at HW */
200#define EPOCH_BITS 14
201/** The delay in usec for the epoch wrap */
202#define EPOCH_DELAY 500000
203/** The amount of microframes the epoch is checked for a delay */
204#define EPOCH_LOW_MFINDEX 8 * 100
205
206static inline uint64_t get_system_time()
207{
208 struct timeval tv;
209 getuptime(&tv);
210 return ((uint64_t) tv.tv_sec) * 1000000 + ((uint64_t) tv.tv_usec);
211}
212
213static inline uint64_t get_current_microframe(const xhci_hc_t *hc)
214{
215 const uint32_t reg_mfindex = XHCI_REG_RD(hc->rt_regs, XHCI_RT_MFINDEX);
216 /*
217 * If the mfindex is low and the time passed since last mfindex wrap is too
218 * high, we have entered the new epoch already (and haven't received event
219 * yet).
220 */
221 uint64_t epoch = hc->wrap_count;
222 if (reg_mfindex < EPOCH_LOW_MFINDEX
223 && get_system_time() - hc->wrap_time > EPOCH_DELAY) {
224 ++epoch;
225 }
226 return (epoch << EPOCH_BITS) + reg_mfindex;
227}
228
229static inline void calc_next_mfindex(xhci_endpoint_t *ep, xhci_isoch_transfer_t *it)
230{
231 xhci_isoch_t * const isoch = ep->isoch;
232 if (isoch->last_mf == -1U) {
233 const xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
234 const xhci_hc_t *hc = bus->hc;
235
236 /*
237 * Delay the first frame by some time to fill the buffer, but at most 10
238 * miliseconds.
239 */
240 const uint64_t delay = min(isoch->buffer_count * ep->interval, 10 * 8);
241 it->mfindex = get_current_microframe(hc) + 1 + delay + hc->ist;
242
243 // Align to ESIT start boundary
244 it->mfindex += ep->interval - 1;
245 it->mfindex &= ~(ep->interval - 1);
246 } else {
247 it->mfindex = isoch->last_mf + ep->interval;
248 }
249}
250
251/** 825 ms in uframes */
252#define END_FRAME_DELAY (895000 / 125)
253
254typedef enum {
255 WINDOW_TOO_SOON,
256 WINDOW_INSIDE,
257 WINDOW_TOO_LATE,
258} window_position_t;
259
260typedef struct {
261 window_position_t position;
262 uint64_t offset;
263} window_decision_t;
264
265/**
266 * Decide on the position of mfindex relatively to the window specified by
267 * Start Frame ID and End Frame ID. The resulting structure contains the
268 * decision, and in case of the mfindex being outside, also the number of
269 * uframes it's off.
270 */
271static inline void window_decide(window_decision_t *res, xhci_hc_t *hc,
272 uint64_t mfindex)
273{
274 const uint64_t current_mf = get_current_microframe(hc);
275 const uint64_t start = current_mf + hc->ist + 1;
276 const uint64_t end = current_mf + END_FRAME_DELAY;
277
278 if (mfindex < start) {
279 res->position = WINDOW_TOO_LATE;
280 res->offset = start - mfindex;
281 } else if (mfindex <= end) {
282 res->position = WINDOW_INSIDE;
283 } else {
284 res->position = WINDOW_TOO_SOON;
285 res->offset = mfindex - end;
286 }
287}
288
289static void isoch_feed_out_timer(void *);
290static void isoch_feed_in_timer(void *);
291
292/**
293 * Schedule TRBs with filled buffers to HW. Takes filled isoch transfers and
294 * pushes their TRBs to the ring.
295 *
296 * According to 4.11.2.5, we can't just push all TRBs we have. We must not do
297 * it too late, but also not too soon.
298 */
299static void isoch_feed_out(xhci_endpoint_t *ep)
300{
301 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
302 xhci_isoch_t * const isoch = ep->isoch;
303 assert(fibril_mutex_is_locked(&isoch->guard));
304
305 xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
306 xhci_hc_t *hc = bus->hc;
307
308 bool fed = false;
309
310 while (isoch->transfers[isoch->hw_enqueue].state == ISOCH_FILLED) {
311 xhci_isoch_transfer_t * const it = &isoch->transfers[isoch->hw_enqueue];
312
313 assert(it->state == ISOCH_FILLED);
314
315 window_decision_t wd;
316 window_decide(&wd, hc, it->mfindex);
317
318 switch (wd.position) {
319 case WINDOW_TOO_SOON: {
320 const suseconds_t delay = wd.offset * 125;
321 usb_log_debug("[isoch] delaying feeding buffer %zu for %ldus",
322 it - isoch->transfers, delay);
323 fibril_timer_set_locked(isoch->feeding_timer, delay,
324 isoch_feed_out_timer, ep);
325 goto out;
326 }
327
328 case WINDOW_INSIDE:
329 usb_log_debug("[isoch] feeding buffer %zu at 0x%llx",
330 it - isoch->transfers, it->mfindex);
331 it->error = schedule_isochronous_trb(ep, it);
332 if (it->error) {
333 it->state = ISOCH_COMPLETE;
334 } else {
335 it->state = ISOCH_FED;
336 fed = true;
337 }
338
339 isoch->hw_enqueue = (isoch->hw_enqueue + 1) % isoch->buffer_count;
340 break;
341
342 case WINDOW_TOO_LATE:
343 /*
344 * Missed the opportunity to schedule. Just mark this transfer as
345 * skipped.
346 */
347 usb_log_debug("[isoch] missed feeding buffer %zu at 0x%llx by "
348 "%llu uframes", it - isoch->transfers, it->mfindex, wd.offset);
349 it->state = ISOCH_COMPLETE;
350 it->error = EOK;
351 it->size = 0;
352
353 isoch->hw_enqueue = (isoch->hw_enqueue + 1) % isoch->buffer_count;
354 break;
355 }
356 }
357
358out:
359 if (fed) {
360 hc_ring_ep_doorbell(ep, 0);
361 /*
362 * The ring may be dead. If no event happens until the delay, reset the
363 * endpoint.
364 */
365 timer_schedule_reset(ep);
366 }
367
368}
369
370static void isoch_feed_out_timer(void *ep)
371{
372 xhci_isoch_t * const isoch = xhci_endpoint_get(ep)->isoch;
373 fibril_mutex_lock(&isoch->guard);
374 isoch_feed_out(ep);
375 fibril_mutex_unlock(&isoch->guard);
376}
377
378/**
379 * Schedule TRBs with empty, withdrawn buffers to HW. Takes empty isoch
380 * transfers and pushes their TRBs to the ring.
381 *
382 * According to 4.11.2.5, we can't just push all TRBs we have. We must not do
383 * it too late, but also not too soon.
384 */
385static void isoch_feed_in(xhci_endpoint_t *ep)
386{
387 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
388 xhci_isoch_t * const isoch = ep->isoch;
389 assert(fibril_mutex_is_locked(&isoch->guard));
390
391 xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
392 xhci_hc_t *hc = bus->hc;
393
394 bool fed = false;
395
396 while (isoch->transfers[isoch->enqueue].state <= ISOCH_FILLED) {
397 xhci_isoch_transfer_t * const it = &isoch->transfers[isoch->enqueue];
398
399 /* IN buffers are "filled" with free space */
400 if (it->state == ISOCH_EMPTY) {
401 it->size = ep->base.max_transfer_size;
402 it->state = ISOCH_FILLED;
403 calc_next_mfindex(ep, it);
404 }
405
406 window_decision_t wd;
407 window_decide(&wd, hc, it->mfindex);
408
409 switch (wd.position) {
410 case WINDOW_TOO_SOON: {
411 /* Not allowed to feed yet. Defer to later. */
412 const suseconds_t delay = wd.offset * 125;
413 usb_log_debug("[isoch] delaying feeding buffer %zu for %ldus",
414 it - isoch->transfers, delay);
415 fibril_timer_set_locked(isoch->feeding_timer, delay,
416 isoch_feed_in_timer, ep);
417 goto out;
418 }
419
420 case WINDOW_TOO_LATE:
421 usb_log_debug("[isoch] missed feeding buffer %zu at 0x%llx by"
422 "%llu uframes", it - isoch->transfers, it->mfindex, wd.offset);
423 /* Missed the opportunity to schedule. Schedule ASAP. */
424 it->mfindex += wd.offset;
425 // Align to ESIT start boundary
426 it->mfindex += ep->interval - 1;
427 it->mfindex &= ~(ep->interval - 1);
428
429 /* fallthrough */
430 case WINDOW_INSIDE:
431 isoch->enqueue = (isoch->enqueue + 1) % isoch->buffer_count;
432 isoch->last_mf = it->mfindex;
433
434 usb_log_debug("[isoch] feeding buffer %zu at 0x%llx",
435 it - isoch->transfers, it->mfindex);
436
437 it->error = schedule_isochronous_trb(ep, it);
438 if (it->error) {
439 it->state = ISOCH_COMPLETE;
440 } else {
441 it->state = ISOCH_FED;
442 fed = true;
443 }
444 break;
445 }
446 }
447out:
448
449 if (fed) {
450 hc_ring_ep_doorbell(ep, 0);
451 /*
452 * The ring may be dead. If no event happens until the delay, reset the
453 * endpoint.
454 */
455 timer_schedule_reset(ep);
456 }
457}
458
459static void isoch_feed_in_timer(void *ep)
460{
461 xhci_isoch_t * const isoch = xhci_endpoint_get(ep)->isoch;
462 fibril_mutex_lock(&isoch->guard);
463 isoch_feed_in(ep);
464 fibril_mutex_unlock(&isoch->guard);
465}
466
467/**
468 * First, withdraw all (at least one) results left by previous transfers to
469 * make room in the ring. Stop on first error.
470 *
471 * When there is at least one buffer free, fill it with data. Then try to feed
472 * it to the xHC.
473 */
474errno_t isoch_schedule_out(xhci_transfer_t *transfer)
475{
476 errno_t err = EOK;
477
478 xhci_endpoint_t *ep = xhci_endpoint_get(transfer->batch.ep);
479 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
480 xhci_isoch_t * const isoch = ep->isoch;
481
482 /* This shall be already checked by endpoint */
483 assert(transfer->batch.size <= ep->base.max_transfer_size);
484
485 fibril_mutex_lock(&isoch->guard);
486
487 /* Get the buffer to write to */
488 xhci_isoch_transfer_t *it = &isoch->transfers[isoch->enqueue];
489
490 /* Wait for the buffer to be completed */
491 while (it->state == ISOCH_FED || it->state == ISOCH_FILLED) {
492 fibril_condvar_wait(&isoch->avail, &isoch->guard);
493 /* The enqueue ptr may have changed while sleeping */
494 it = &isoch->transfers[isoch->enqueue];
495 }
496
497 isoch->enqueue = (isoch->enqueue + 1) % isoch->buffer_count;
498
499 /* Withdraw results from previous transfers. */
500 transfer->batch.transferred_size = 0;
501 xhci_isoch_transfer_t *res = &isoch->transfers[isoch->dequeue];
502 while (res->state == ISOCH_COMPLETE) {
503 isoch->dequeue = (isoch->dequeue + 1) % isoch->buffer_count;
504
505 res->state = ISOCH_EMPTY;
506 transfer->batch.transferred_size += res->size;
507 transfer->batch.error = res->error;
508 if (res->error)
509 break; // Announce one error at a time
510
511 res = &isoch->transfers[isoch->dequeue];
512 }
513
514 assert(it->state == ISOCH_EMPTY);
515
516 /* Calculate when to schedule next transfer */
517 calc_next_mfindex(ep, it);
518 isoch->last_mf = it->mfindex;
519 usb_log_debug("[isoch] buffer %zu will be on schedule at 0x%llx",
520 it - isoch->transfers, it->mfindex);
521
522 /* Prepare the transfer. */
523 it->size = transfer->batch.size;
524 memcpy(it->data.virt, transfer->batch.dma_buffer.virt, it->size);
525 it->state = ISOCH_FILLED;
526
527 fibril_timer_clear_locked(isoch->feeding_timer);
528 isoch_feed_out(ep);
529
530 fibril_mutex_unlock(&isoch->guard);
531
532 usb_transfer_batch_finish(&transfer->batch);
533 return err;
534}
535
536/**
537 * IN is in fact easier than OUT. Our responsibility is just to feed all empty
538 * buffers, and fetch one filled buffer from the ring.
539 */
540errno_t isoch_schedule_in(xhci_transfer_t *transfer)
541{
542 xhci_endpoint_t *ep = xhci_endpoint_get(transfer->batch.ep);
543 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
544 xhci_isoch_t * const isoch = ep->isoch;
545
546 if (transfer->batch.size < ep->base.max_transfer_size) {
547 usb_log_error("Cannot schedule an undersized isochronous transfer.");
548 return ELIMIT;
549 }
550
551 fibril_mutex_lock(&isoch->guard);
552
553 xhci_isoch_transfer_t *it = &isoch->transfers[isoch->dequeue];
554
555 /* Wait for at least one transfer to complete. */
556 while (it->state != ISOCH_COMPLETE) {
557 /* First, make sure we will have something to read. */
558 fibril_timer_clear_locked(isoch->feeding_timer);
559 isoch_feed_in(ep);
560
561 usb_log_debug("[isoch] waiting for buffer %zu to be completed",
562 it - isoch->transfers);
563 fibril_condvar_wait(&isoch->avail, &isoch->guard);
564
565 /* The enqueue ptr may have changed while sleeping */
566 it = &isoch->transfers[isoch->dequeue];
567 }
568
569 isoch->dequeue = (isoch->dequeue + 1) % isoch->buffer_count;
570
571 /* Withdraw results from previous transfer. */
572 if (!it->error) {
573 memcpy(transfer->batch.dma_buffer.virt, it->data.virt, it->size);
574 transfer->batch.transferred_size = it->size;
575 transfer->batch.error = it->error;
576 }
577
578 /* Prepare the empty buffer */
579 it->state = ISOCH_EMPTY;
580
581 fibril_mutex_unlock(&isoch->guard);
582 usb_transfer_batch_finish(&transfer->batch);
583
584 return EOK;
585}
586
587void isoch_handle_transfer_event(xhci_hc_t *hc, xhci_endpoint_t *ep,
588 xhci_trb_t *trb)
589{
590 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
591 xhci_isoch_t * const isoch = ep->isoch;
592
593 fibril_mutex_lock(&ep->isoch->guard);
594
595 errno_t err;
596 const xhci_trb_completion_code_t completion_code = TRB_COMPLETION_CODE(*trb);
597
598 switch (completion_code) {
599 case XHCI_TRBC_RING_OVERRUN:
600 case XHCI_TRBC_RING_UNDERRUN:
601 /*
602 * For OUT, there was nothing to process.
603 * For IN, the buffer has overfilled.
604 * In either case, reset the ring.
605 */
606 usb_log_warning("Ring over/underrun.");
607 isoch_reset_no_timer(ep);
608 fibril_condvar_broadcast(&ep->isoch->avail);
609 fibril_mutex_unlock(&ep->isoch->guard);
610 goto out;
611 case XHCI_TRBC_SHORT_PACKET:
612 case XHCI_TRBC_SUCCESS:
613 err = EOK;
614 break;
615 default:
616 usb_log_warning("Transfer not successfull: %u", completion_code);
617 err = EIO;
618 break;
619 }
620
621 /*
622 * The order of delivering events is not necessarily the one we would
623 * expect. It is safer to walk the list of our transfers and check
624 * which one it is.
625 * To minimize the amount of transfers checked, we start at dequeue pointer
626 * and exit the loop as soon as the transfer is found.
627 */
628 bool found_mine = false;
629 for (size_t i = 0, di = isoch->dequeue; i < isoch->buffer_count; ++i, ++di) {
630 /* Wrap it back to 0, don't use modulo every loop traversal */
631 if (di == isoch->buffer_count) {
632 di = 0;
633 }
634
635 xhci_isoch_transfer_t * const it = &isoch->transfers[di];
636
637 if (it->state == ISOCH_FED && it->interrupt_trb_phys == trb->parameter) {
638 usb_log_debug("[isoch] buffer %zu completed", it - isoch->transfers);
639 it->state = ISOCH_COMPLETE;
640 it->size -= TRB_TRANSFER_LENGTH(*trb);
641 it->error = err;
642 found_mine = true;
643 break;
644 }
645 }
646
647 if (!found_mine) {
648 usb_log_warning("[isoch] A transfer event occured for unknown transfer.");
649 }
650
651 /*
652 * It may happen that the driver already stopped reading (writing),
653 * and our buffers are filled (empty). As QEMU (and possibly others)
654 * does not send RING_UNDERRUN (OVERRUN) event, we set a timer to
655 * reset it after the buffers should have been consumed. If there
656 * is no issue, the timer will get restarted often enough.
657 */
658 timer_schedule_reset(ep);
659
660out:
661 fibril_condvar_broadcast(&ep->isoch->avail);
662 fibril_mutex_unlock(&ep->isoch->guard);
663}
664
665/**
666 * @}
667 */
Note: See TracBrowser for help on using the repository browser.