source: mainline/uspace/drv/bus/usb/xhci/isoch.c@ 4ed803f1

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 4ed803f1 was 4ed803f1, checked in by Salmelu <salmelu@…>, 8 years ago

xhci: Rewritten isoch event handling

Traversing loop looking for finished xhci transfer is now only 1 loop traversal in average case.
Added reset timer which resets the endpoint after there is no action for a given period.

  • Property mode set to 100644
File size: 19.2 KB
Line 
1/*
2 * Copyright (c) 2017 HelUSB3 team
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller endpoint management.
34 */
35
36#include <str_error.h>
37#include <macros.h>
38
39#include "endpoint.h"
40#include "hw_struct/trb.h"
41#include "hw_struct/regs.h"
42#include "trb_ring.h"
43#include "hc.h"
44#include "bus.h"
45
46#include "isoch.h"
47
48void isoch_init(xhci_endpoint_t *ep, const usb_endpoint_descriptors_t *desc)
49{
50 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
51 xhci_isoch_t * const isoch = ep->isoch;
52
53 fibril_mutex_initialize(&isoch->guard);
54 fibril_condvar_initialize(&isoch->avail);
55
56 const xhci_hc_t *hc = bus_to_xhci_bus(ep->base.device->bus)->hc;
57
58 /*
59 * We shall cover at least twice the IST period, otherwise we will get
60 * an over/underrun every time.
61 */
62 isoch->buffer_count = (2 * hc->ist) / ep->interval;
63
64 /* 2 buffers are the very minimum. */
65 isoch->buffer_count = max(2, isoch->buffer_count);
66
67 usb_log_error("[isoch] isoch setup with %zu buffers", isoch->buffer_count);
68}
69
70static void isoch_reset(xhci_endpoint_t *ep)
71{
72 xhci_isoch_t * const isoch = ep->isoch;
73 assert(fibril_mutex_is_locked(&isoch->guard));
74
75 isoch->dequeue = isoch->enqueue = isoch->hw_enqueue = 0;
76
77 for (size_t i = 0; i < isoch->buffer_count; ++i) {
78 isoch->transfers[i].state = ISOCH_EMPTY;
79 }
80
81 fibril_timer_clear_locked(isoch->feeding_timer);
82 isoch->last_mfindex = -1U;
83 usb_log_info("[isoch] Endpoint" XHCI_EP_FMT ": Data flow reset.", XHCI_EP_ARGS(*ep));
84}
85
86static void isoch_reset_no_timer(xhci_endpoint_t *ep)
87{
88 xhci_isoch_t * const isoch = ep->isoch;
89 assert(fibril_mutex_is_locked(&isoch->guard));
90 /*
91 * As we cannot clear timer when we are triggered by it,
92 * we have to avoid doing it in common method.
93 */
94 fibril_timer_clear_locked(isoch->reset_timer);
95 isoch_reset(ep);
96}
97
98static void isoch_reset_timer(void *ep) {
99 xhci_isoch_t * const isoch = xhci_endpoint_get(ep)->isoch;
100 fibril_mutex_lock(&isoch->guard);
101 isoch_reset(ep);
102 fibril_mutex_unlock(&isoch->guard);
103}
104
105/*
106 * Fast transfers could trigger the reset timer before the data is processed,
107 * leading into false reset.
108 */
109#define RESET_TIMER_DELAY 100000
110static void timer_schedule_reset(xhci_endpoint_t *ep) {
111 xhci_isoch_t * const isoch = ep->isoch;
112 const suseconds_t delay = isoch->buffer_count * ep->interval * 125 + RESET_TIMER_DELAY;
113
114 fibril_timer_clear_locked(isoch->reset_timer);
115 fibril_timer_set_locked(isoch->reset_timer, delay,
116 isoch_reset_timer, ep);
117}
118
119void isoch_fini(xhci_endpoint_t *ep)
120{
121 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
122 xhci_isoch_t * const isoch = ep->isoch;
123
124 if (isoch->feeding_timer) {
125 fibril_timer_clear(isoch->feeding_timer);
126 fibril_timer_destroy(isoch->feeding_timer);
127 fibril_timer_clear(isoch->reset_timer);
128 fibril_timer_destroy(isoch->reset_timer);
129 }
130
131 if (isoch->transfers) {
132 for (size_t i = 0; i < isoch->buffer_count; ++i)
133 dma_buffer_free(&isoch->transfers[i].data);
134 free(isoch->transfers);
135 }
136}
137
138/**
139 * Allocate isochronous buffers. Create the feeding timer.
140 */
141int isoch_alloc_transfers(xhci_endpoint_t *ep) {
142 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
143 xhci_isoch_t * const isoch = ep->isoch;
144
145 isoch->feeding_timer = fibril_timer_create(&isoch->guard);
146 isoch->reset_timer = fibril_timer_create(&isoch->guard);
147 if (!isoch->feeding_timer)
148 return ENOMEM;
149
150 isoch->transfers = calloc(isoch->buffer_count, sizeof(xhci_isoch_transfer_t));
151 if(!isoch->transfers)
152 goto err;
153
154 for (size_t i = 0; i < isoch->buffer_count; ++i) {
155 xhci_isoch_transfer_t *transfer = &isoch->transfers[i];
156 if (dma_buffer_alloc(&transfer->data, ep->base.max_transfer_size)) {
157 goto err;
158 }
159 }
160
161 fibril_mutex_lock(&isoch->guard);
162 isoch_reset_no_timer(ep);
163 fibril_mutex_unlock(&isoch->guard);
164
165 return EOK;
166err:
167 isoch_fini(ep);
168 return ENOMEM;
169}
170
171static int schedule_isochronous_trb(xhci_endpoint_t *ep, xhci_isoch_transfer_t *it)
172{
173 xhci_trb_t trb;
174 xhci_trb_clean(&trb);
175
176 trb.parameter = it->data.phys;
177 TRB_CTRL_SET_XFER_LEN(trb, it->size);
178 TRB_CTRL_SET_TD_SIZE(trb, 0);
179 TRB_CTRL_SET_IOC(trb, 1);
180 TRB_CTRL_SET_TRB_TYPE(trb, XHCI_TRB_TYPE_ISOCH);
181
182 // see 4.14.1 and 4.11.2.3 for the explanation, how to calculate those
183 size_t tdpc = it->size / 1024 + ((it->size % 1024) ? 1 : 0);
184 size_t tbc = tdpc / ep->max_burst;
185 if (!tdpc % ep->max_burst) --tbc;
186 size_t bsp = tdpc % ep->max_burst;
187 size_t tlbpc = (bsp ? bsp : ep->max_burst) - 1;
188
189 TRB_ISOCH_SET_TBC(trb, tbc);
190 TRB_ISOCH_SET_TLBPC(trb, tlbpc);
191 TRB_ISOCH_SET_FRAMEID(trb, (it->mfindex / 8) % 2048);
192
193 const int err = xhci_trb_ring_enqueue(&ep->ring, &trb, &it->interrupt_trb_phys);
194 return err;
195}
196
197static inline void calc_next_mfindex(xhci_endpoint_t *ep, xhci_isoch_transfer_t *it)
198{
199 xhci_isoch_t * const isoch = ep->isoch;
200 if (isoch->last_mfindex == -1U) {
201 const xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
202 const xhci_hc_t *hc = bus->hc;
203
204 /* Choose some number, give us a little time to prepare the
205 * buffers */
206 it->mfindex = XHCI_REG_RD(hc->rt_regs, XHCI_RT_MFINDEX) + 1
207 + isoch->buffer_count * ep->interval
208 + hc->ist;
209
210 // Align to ESIT start boundary
211 it->mfindex += ep->interval - 1;
212 it->mfindex &= ~(ep->interval - 1);
213 } else {
214 it->mfindex = (isoch->last_mfindex + ep->interval) % XHCI_MFINDEX_MAX;
215 }
216}
217
218/** 825 ms in uframes */
219#define END_FRAME_DELAY (895000 / 125)
220
221typedef enum {
222 WINDOW_TOO_SOON,
223 WINDOW_INSIDE,
224 WINDOW_TOO_LATE,
225} window_position_t;
226
227typedef struct {
228 window_position_t position;
229 uint32_t offset;
230} window_decision_t;
231
232/**
233 * Decide on the position of mfindex relatively to the window specified by
234 * Start Frame ID and End Frame ID. The resulting structure contains the
235 * decision, and in case of the mfindex being outside, also the number of
236 * uframes it's off.
237 */
238static inline void window_decide(window_decision_t *res, xhci_hc_t *hc, uint32_t mfindex)
239{
240 uint32_t current_mfindex = XHCI_REG_RD(hc->rt_regs, XHCI_RT_MFINDEX) + 1;
241
242 /*
243 * In your mind, rotate the clock so the window is at its beginning.
244 * The length of the window is always the same, and by rotating the
245 * mfindex too, we can decide by the value of it easily.
246 */
247 mfindex = (mfindex - current_mfindex - hc->ist + XHCI_MFINDEX_MAX) % XHCI_MFINDEX_MAX;
248 const uint32_t end = END_FRAME_DELAY - hc->ist;
249 const uint32_t threshold = (XHCI_MFINDEX_MAX + end) / 2;
250
251 if (mfindex <= end) {
252 res->position = WINDOW_INSIDE;
253 } else if (mfindex > threshold) {
254 res->position = WINDOW_TOO_LATE;
255 res->offset = XHCI_MFINDEX_MAX - mfindex;
256 } else {
257 res->position = WINDOW_TOO_SOON;
258 res->offset = mfindex - end;
259 }
260 /*
261 * TODO: The "size" of the clock is too low. We have to scale it a bit
262 * to ensure correct scheduling of transfers, that are
263 * buffer_count * interval away from now.
264 * Maximum interval is 8 seconds, which means we need a size of
265 * 16 seconds. The size of MFIINDEX is 2 seconds only.
266 *
267 * A plan is to create a thin abstraction at HC, which would return
268 * a time from 32-bit clock, having its high bits updated by the
269 * MFINDEX Wrap Event, and low bits from the MFINDEX register. Using
270 * this 32-bit clock, one can plan 6 days ahead.
271 */
272}
273
274static void isoch_feed_out_timer(void *);
275static void isoch_feed_in_timer(void *);
276
277/**
278 * Schedule TRBs with filled buffers to HW. Takes filled isoch transfers and
279 * pushes their TRBs to the ring.
280 *
281 * According to 4.11.2.5, we can't just push all TRBs we have. We must not do
282 * it too late, but also not too soon.
283 */
284static void isoch_feed_out(xhci_endpoint_t *ep)
285{
286 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
287 xhci_isoch_t * const isoch = ep->isoch;
288 assert(fibril_mutex_is_locked(&isoch->guard));
289
290 xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
291 xhci_hc_t *hc = bus->hc;
292
293 bool fed = false;
294
295 while (isoch->hw_enqueue != isoch->enqueue) {
296 xhci_isoch_transfer_t * const it = &isoch->transfers[isoch->hw_enqueue];
297
298 assert(it->state == ISOCH_FILLED);
299
300 window_decision_t wd;
301 window_decide(&wd, hc, it->mfindex);
302
303 switch (wd.position) {
304 case WINDOW_TOO_SOON: {
305 const suseconds_t delay = wd.offset * 125;
306 usb_log_debug2("[isoch] delaying feeding buffer %lu for %ldus",
307 it - isoch->transfers, delay);
308 fibril_timer_set_locked(isoch->feeding_timer, delay,
309 isoch_feed_out_timer, ep);
310 goto out;
311 }
312
313 case WINDOW_INSIDE:
314 usb_log_debug2("[isoch] feeding buffer %lu at 0x%x",
315 it - isoch->transfers, it->mfindex);
316 it->error = schedule_isochronous_trb(ep, it);
317 if (it->error) {
318 it->state = ISOCH_COMPLETE;
319 } else {
320 it->state = ISOCH_FED;
321 fed = true;
322 }
323
324 isoch->hw_enqueue = (isoch->hw_enqueue + 1) % isoch->buffer_count;
325 break;
326
327 case WINDOW_TOO_LATE:
328 /* Missed the opportunity to schedule. Just mark this transfer as skipped. */
329 usb_log_debug2("[isoch] missed feeding buffer %lu at 0x%x by %u uframes",
330 it - isoch->transfers, it->mfindex, wd.offset);
331 it->state = ISOCH_COMPLETE;
332 it->error = EOK;
333 it->size = 0;
334
335 isoch->hw_enqueue = (isoch->hw_enqueue + 1) % isoch->buffer_count;
336 break;
337 }
338 }
339
340out:
341 if (fed) {
342 const uint8_t slot_id = xhci_device_get(ep->base.device)->slot_id;
343 const uint8_t target = xhci_endpoint_index(ep) + 1; /* EP Doorbells start at 1 */
344 hc_ring_doorbell(hc, slot_id, target);
345 /* The ring may be dead. If no event happens until the delay, reset the endpoint. */
346 timer_schedule_reset(ep);
347 }
348
349}
350
351static void isoch_feed_out_timer(void *ep)
352{
353 xhci_isoch_t * const isoch = xhci_endpoint_get(ep)->isoch;
354 fibril_mutex_lock(&isoch->guard);
355 isoch_feed_out(ep);
356 fibril_mutex_unlock(&isoch->guard);
357}
358
359/**
360 * Schedule TRBs with empty, withdrawn buffers to HW. Takes empty isoch
361 * transfers and pushes their TRBs to the ring.
362 *
363 * According to 4.11.2.5, we can't just push all TRBs we have. We must not do
364 * it too late, but also not too soon.
365 */
366static void isoch_feed_in(xhci_endpoint_t *ep)
367{
368 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
369 xhci_isoch_t * const isoch = ep->isoch;
370 assert(fibril_mutex_is_locked(&isoch->guard));
371
372 xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
373 xhci_hc_t *hc = bus->hc;
374
375 bool fed = false;
376
377 while (isoch->transfers[isoch->enqueue].state <= ISOCH_FILLED) {
378 xhci_isoch_transfer_t * const it = &isoch->transfers[isoch->enqueue];
379
380 /* IN buffers are "filled" with free space */
381 if (it->state == ISOCH_EMPTY) {
382 it->size = ep->base.max_transfer_size;
383 it->state = ISOCH_FILLED;
384 calc_next_mfindex(ep, it);
385 }
386
387 window_decision_t wd;
388 window_decide(&wd, hc, it->mfindex);
389
390 switch (wd.position) {
391 case WINDOW_TOO_SOON: {
392 /* Not allowed to feed yet. Defer to later. */
393 const suseconds_t delay = wd.offset * 125;
394 usb_log_debug2("[isoch] delaying feeding buffer %lu for %ldus",
395 it - isoch->transfers, delay);
396 fibril_timer_set_locked(isoch->feeding_timer, delay,
397 isoch_feed_in_timer, ep);
398 goto out;
399 }
400
401 case WINDOW_TOO_LATE:
402 usb_log_debug2("[isoch] missed feeding buffer %lu at 0x%x by %u uframes",
403 it - isoch->transfers, it->mfindex, wd.offset);
404 /* Missed the opportunity to schedule. Schedule ASAP. */
405 it->mfindex += wd.offset;
406 // Align to ESIT start boundary
407 it->mfindex += ep->interval - 1;
408 it->mfindex &= ~(ep->interval - 1);
409
410 /* fallthrough */
411 case WINDOW_INSIDE:
412 isoch->enqueue = (isoch->enqueue + 1) % isoch->buffer_count;
413 isoch->last_mfindex = it->mfindex;
414
415 usb_log_debug2("[isoch] feeding buffer %lu at 0x%x",
416 it - isoch->transfers, it->mfindex);
417
418 it->error = schedule_isochronous_trb(ep, it);
419 if (it->error) {
420 it->state = ISOCH_COMPLETE;
421 } else {
422 it->state = ISOCH_FED;
423 fed = true;
424 }
425 break;
426 }
427 }
428out:
429
430 if (fed) {
431 const uint8_t slot_id = xhci_device_get(ep->base.device)->slot_id;
432 const uint8_t target = xhci_endpoint_index(ep) + 1; /* EP Doorbells start at 1 */
433 hc_ring_doorbell(hc, slot_id, target);
434 /* The ring may be dead. If no event happens until the delay, reset the endpoint. */
435 timer_schedule_reset(ep);
436 }
437}
438
439static void isoch_feed_in_timer(void *ep)
440{
441 xhci_isoch_t * const isoch = xhci_endpoint_get(ep)->isoch;
442 fibril_mutex_lock(&isoch->guard);
443 isoch_feed_in(ep);
444 fibril_mutex_unlock(&isoch->guard);
445}
446
447/**
448 * First, withdraw all (at least one) results left by previous transfers to
449 * make room in the ring. Stop on first error.
450 *
451 * When there is at least one buffer free, fill it with data. Then try to feed
452 * it to the xHC.
453 */
454int isoch_schedule_out(xhci_transfer_t *transfer)
455{
456 int err = EOK;
457
458 xhci_endpoint_t *ep = xhci_endpoint_get(transfer->batch.ep);
459 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
460 xhci_isoch_t * const isoch = ep->isoch;
461
462 if (transfer->batch.buffer_size > ep->base.max_transfer_size) {
463 usb_log_error("Cannot schedule an oversized isochronous transfer.");
464 return ELIMIT;
465 }
466
467 fibril_mutex_lock(&isoch->guard);
468
469 /* Get the buffer to write to */
470 xhci_isoch_transfer_t *it = &isoch->transfers[isoch->enqueue];
471
472 /* Wait for the buffer to be completed */
473 while (it->state == ISOCH_FED || it->state == ISOCH_FILLED) {
474 fibril_condvar_wait(&isoch->avail, &isoch->guard);
475 /* The enqueue ptr may have changed while sleeping */
476 it = &isoch->transfers[isoch->enqueue];
477 }
478
479 isoch->enqueue = (isoch->enqueue + 1) % isoch->buffer_count;
480
481 /* Withdraw results from previous transfers. */
482 transfer->batch.transfered_size = 0;
483 xhci_isoch_transfer_t *res = &isoch->transfers[isoch->dequeue];
484 while (res->state == ISOCH_COMPLETE) {
485 isoch->dequeue = (isoch->dequeue + 1) % isoch->buffer_count;
486
487 res->state = ISOCH_EMPTY;
488 transfer->batch.transfered_size += res->size;
489 transfer->batch.error = res->error;
490 if (res->error)
491 break; // Announce one error at a time
492
493 res = &isoch->transfers[isoch->dequeue];
494 }
495
496 assert(it->state == ISOCH_EMPTY);
497
498 /* Calculate when to schedule next transfer */
499 calc_next_mfindex(ep, it);
500 isoch->last_mfindex = it->mfindex;
501 usb_log_debug2("[isoch] buffer %zu will be on schedule at 0x%x", it - isoch->transfers, it->mfindex);
502
503 /* Prepare the transfer. */
504 it->size = transfer->batch.buffer_size;
505 memcpy(it->data.virt, transfer->batch.buffer, it->size);
506 it->state = ISOCH_FILLED;
507
508 fibril_timer_clear_locked(isoch->feeding_timer);
509 isoch_feed_out(ep);
510
511 fibril_mutex_unlock(&isoch->guard);
512
513 usb_transfer_batch_finish(&transfer->batch);
514 return err;
515}
516
517/**
518 * IN is in fact easier than OUT. Our responsibility is just to feed all empty
519 * buffers, and fetch one filled buffer from the ring.
520 */
521int isoch_schedule_in(xhci_transfer_t *transfer)
522{
523 xhci_endpoint_t *ep = xhci_endpoint_get(transfer->batch.ep);
524 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
525 xhci_isoch_t * const isoch = ep->isoch;
526
527 if (transfer->batch.buffer_size < ep->base.max_transfer_size) {
528 usb_log_error("Cannot schedule an undersized isochronous transfer.");
529 return ELIMIT;
530 }
531
532 fibril_mutex_lock(&isoch->guard);
533
534 xhci_isoch_transfer_t *it = &isoch->transfers[isoch->dequeue];
535
536 /* Wait for at least one transfer to complete. */
537 while (it->state != ISOCH_COMPLETE) {
538 /* First, make sure we will have something to read. */
539 fibril_timer_clear_locked(isoch->feeding_timer);
540 isoch_feed_in(ep);
541
542 usb_log_debug2("[isoch] waiting for buffer %zu to be completed", it - isoch->transfers);
543 fibril_condvar_wait(&isoch->avail, &isoch->guard);
544
545 /* The enqueue ptr may have changed while sleeping */
546 it = &isoch->transfers[isoch->dequeue];
547 }
548
549 isoch->dequeue = (isoch->dequeue + 1) % isoch->buffer_count;
550
551 /* Withdraw results from previous transfer. */
552 if (!it->error) {
553 memcpy(transfer->batch.buffer, it->data.virt, it->size);
554 transfer->batch.transfered_size = it->size;
555 transfer->batch.error = it->error;
556 }
557
558 /* Prepare the empty buffer */
559 it->state = ISOCH_EMPTY;
560
561 fibril_mutex_unlock(&isoch->guard);
562 usb_transfer_batch_finish(&transfer->batch);
563
564 return EOK;
565}
566
567int isoch_handle_transfer_event(xhci_hc_t *hc, xhci_endpoint_t *ep, xhci_trb_t *trb)
568{
569 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
570 xhci_isoch_t * const isoch = ep->isoch;
571
572 fibril_mutex_lock(&ep->isoch->guard);
573
574 int err;
575 const xhci_trb_completion_code_t completion_code = TRB_COMPLETION_CODE(*trb);
576
577 switch (completion_code) {
578 case XHCI_TRBC_RING_OVERRUN:
579 case XHCI_TRBC_RING_UNDERRUN:
580 /* For OUT, there was nothing to process */
581 /* For IN, the buffer has overfilled, we empty the buffers and readd TRBs */
582 usb_log_warning("Ring over/underrun.");
583 isoch_reset_no_timer(ep);
584 fibril_condvar_broadcast(&ep->isoch->avail);
585 fibril_mutex_unlock(&ep->isoch->guard);
586 return EOK;
587 case XHCI_TRBC_SHORT_PACKET:
588 case XHCI_TRBC_SUCCESS:
589 err = EOK;
590 break;
591 default:
592 usb_log_warning("Transfer not successfull: %u", completion_code);
593 err = EIO;
594 break;
595 }
596
597 /*
598 * The order of delivering events is not necessarily the one we would
599 * expect. It is safer to walk the list of our transfers and check
600 * which one it is.
601 * To minimize the amount of transfers checked, we start at dequeue pointer
602 * and exit the loop as soon as the transfer is found.
603 */
604 bool found_mine = false;
605 for (size_t i = 0, di = isoch->dequeue; i < isoch->buffer_count; ++i, ++di) {
606 /* Wrap it back to 0, don't use modulo every loop traversal */
607 if (di == isoch->buffer_count) {
608 di = 0;
609 }
610
611 xhci_isoch_transfer_t * const it = &isoch->transfers[di];
612
613 if (it->state == ISOCH_FED && it->interrupt_trb_phys == trb->parameter) {
614 usb_log_debug2("[isoch] buffer %zu completed", it - isoch->transfers);
615 it->state = ISOCH_COMPLETE;
616 it->size -= TRB_TRANSFER_LENGTH(*trb);
617 it->error = err;
618 found_mine = true;
619 break;
620 }
621 }
622
623 if (!found_mine) {
624 usb_log_warning("[isoch] A transfer event occured for unknown transfer.");
625 }
626
627 /*
628 * It may happen that the driver already stopped reading (writing),
629 * and our buffers are filled (empty). As QEMU (and possibly others)
630 * does not send RING_UNDERRUN (OVERRUN) event, we set a timer to
631 * reset it after the buffers should have been consumed. If there
632 * is no issue, the timer will get restarted often enough.
633 */
634 timer_schedule_reset(ep);
635
636 fibril_condvar_broadcast(&ep->isoch->avail);
637 fibril_mutex_unlock(&ep->isoch->guard);
638 return EOK;
639}
640
641/**
642 * @}
643 */
Note: See TracBrowser for help on using the repository browser.