source: mainline/uspace/drv/bus/usb/xhci/isoch.c@ 0f79283b

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 0f79283b was 1d218bf, checked in by Petr Manek <petr.manek@…>, 8 years ago

xhci: decrease message log level

  • Property mode set to 100644
File size: 19.2 KB
Line 
1/*
2 * Copyright (c) 2017 HelUSB3 team
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller endpoint management.
34 */
35
36#include <str_error.h>
37#include <macros.h>
38
39#include "endpoint.h"
40#include "hw_struct/trb.h"
41#include "hw_struct/regs.h"
42#include "trb_ring.h"
43#include "hc.h"
44#include "bus.h"
45
46#include "isoch.h"
47
48void isoch_init(xhci_endpoint_t *ep, const usb_endpoint_descriptors_t *desc)
49{
50 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
51 xhci_isoch_t * const isoch = ep->isoch;
52
53 fibril_mutex_initialize(&isoch->guard);
54 fibril_condvar_initialize(&isoch->avail);
55
56 const xhci_hc_t *hc = bus_to_xhci_bus(ep->base.device->bus)->hc;
57
58 /*
59 * We shall cover at least twice the IST period, otherwise we will get
60 * an over/underrun every time.
61 */
62 isoch->buffer_count = (2 * hc->ist) / ep->interval;
63
64 /* 2 buffers are the very minimum. */
65 isoch->buffer_count = max(2, isoch->buffer_count);
66
67 usb_log_debug2("[isoch] isoch setup with %zu buffers", isoch->buffer_count);
68}
69
70static void isoch_reset(xhci_endpoint_t *ep)
71{
72 xhci_isoch_t * const isoch = ep->isoch;
73 assert(fibril_mutex_is_locked(&isoch->guard));
74
75 isoch->dequeue = isoch->enqueue = isoch->hw_enqueue = 0;
76
77 for (size_t i = 0; i < isoch->buffer_count; ++i) {
78 isoch->transfers[i].state = ISOCH_EMPTY;
79 }
80
81 fibril_timer_clear_locked(isoch->feeding_timer);
82 isoch->last_mf = -1U;
83 usb_log_info("[isoch] Endpoint" XHCI_EP_FMT ": Data flow reset.", XHCI_EP_ARGS(*ep));
84}
85
86static void isoch_reset_no_timer(xhci_endpoint_t *ep)
87{
88 xhci_isoch_t * const isoch = ep->isoch;
89 assert(fibril_mutex_is_locked(&isoch->guard));
90 /*
91 * As we cannot clear timer when we are triggered by it,
92 * we have to avoid doing it in common method.
93 */
94 fibril_timer_clear_locked(isoch->reset_timer);
95 isoch_reset(ep);
96}
97
98static void isoch_reset_timer(void *ep) {
99 xhci_isoch_t * const isoch = xhci_endpoint_get(ep)->isoch;
100 fibril_mutex_lock(&isoch->guard);
101 isoch_reset(ep);
102 fibril_mutex_unlock(&isoch->guard);
103}
104
105/*
106 * Fast transfers could trigger the reset timer before the data is processed,
107 * leading into false reset.
108 */
109#define RESET_TIMER_DELAY 100000
110static void timer_schedule_reset(xhci_endpoint_t *ep) {
111 xhci_isoch_t * const isoch = ep->isoch;
112 const suseconds_t delay = isoch->buffer_count * ep->interval * 125 + RESET_TIMER_DELAY;
113
114 fibril_timer_clear_locked(isoch->reset_timer);
115 fibril_timer_set_locked(isoch->reset_timer, delay,
116 isoch_reset_timer, ep);
117}
118
119void isoch_fini(xhci_endpoint_t *ep)
120{
121 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
122 xhci_isoch_t * const isoch = ep->isoch;
123
124 if (isoch->feeding_timer) {
125 fibril_timer_clear(isoch->feeding_timer);
126 fibril_timer_destroy(isoch->feeding_timer);
127 fibril_timer_clear(isoch->reset_timer);
128 fibril_timer_destroy(isoch->reset_timer);
129 }
130
131 if (isoch->transfers) {
132 for (size_t i = 0; i < isoch->buffer_count; ++i)
133 dma_buffer_free(&isoch->transfers[i].data);
134 free(isoch->transfers);
135 }
136}
137
138/**
139 * Allocate isochronous buffers. Create the feeding timer.
140 */
141int isoch_alloc_transfers(xhci_endpoint_t *ep) {
142 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
143 xhci_isoch_t * const isoch = ep->isoch;
144
145 isoch->feeding_timer = fibril_timer_create(&isoch->guard);
146 isoch->reset_timer = fibril_timer_create(&isoch->guard);
147 if (!isoch->feeding_timer)
148 return ENOMEM;
149
150 isoch->transfers = calloc(isoch->buffer_count, sizeof(xhci_isoch_transfer_t));
151 if(!isoch->transfers)
152 goto err;
153
154 for (size_t i = 0; i < isoch->buffer_count; ++i) {
155 xhci_isoch_transfer_t *transfer = &isoch->transfers[i];
156 if (dma_buffer_alloc(&transfer->data, ep->base.max_transfer_size)) {
157 goto err;
158 }
159 }
160
161 fibril_mutex_lock(&isoch->guard);
162 isoch_reset_no_timer(ep);
163 fibril_mutex_unlock(&isoch->guard);
164
165 return EOK;
166err:
167 isoch_fini(ep);
168 return ENOMEM;
169}
170
171static int schedule_isochronous_trb(xhci_endpoint_t *ep, xhci_isoch_transfer_t *it)
172{
173 xhci_trb_t trb;
174 xhci_trb_clean(&trb);
175
176 trb.parameter = it->data.phys;
177 TRB_CTRL_SET_XFER_LEN(trb, it->size);
178 TRB_CTRL_SET_TD_SIZE(trb, 0);
179 TRB_CTRL_SET_IOC(trb, 1);
180 TRB_CTRL_SET_TRB_TYPE(trb, XHCI_TRB_TYPE_ISOCH);
181
182 // see 4.14.1 and 4.11.2.3 for the explanation, how to calculate those
183 size_t tdpc = it->size / 1024 + ((it->size % 1024) ? 1 : 0);
184 size_t tbc = tdpc / ep->max_burst;
185 if (!tdpc % ep->max_burst) --tbc;
186 size_t bsp = tdpc % ep->max_burst;
187 size_t tlbpc = (bsp ? bsp : ep->max_burst) - 1;
188
189 TRB_ISOCH_SET_TBC(trb, tbc);
190 TRB_ISOCH_SET_TLBPC(trb, tlbpc);
191 TRB_ISOCH_SET_FRAMEID(trb, (it->mfindex / 8) % 2048);
192
193 const int err = xhci_trb_ring_enqueue(&ep->ring, &trb, &it->interrupt_trb_phys);
194 return err;
195}
196
197/** The number of bits the MFINDEX is stored in at HW */
198#define EPOCH_BITS 14
199/** The delay in usec for the epoch wrap */
200#define EPOCH_DELAY 500000
201/** The amount of microframes the epoch is checked for a delay */
202#define EPOCH_LOW_MFINDEX 8 * 100
203
204static inline uint64_t get_system_time()
205{
206 struct timeval tv;
207 getuptime(&tv);
208 return ((uint64_t) tv.tv_sec) * 1000000 + ((uint64_t) tv.tv_usec);
209}
210
211static inline uint64_t get_current_microframe(const xhci_hc_t *hc)
212{
213 const uint32_t reg_mfindex = XHCI_REG_RD(hc->rt_regs, XHCI_RT_MFINDEX);
214 /*
215 * If the mfindex is low and the time passed since last mfindex wrap
216 * is too high, we have entered the new epoch already (and haven't received event yet).
217 */
218 uint64_t epoch = hc->wrap_count;
219 if (reg_mfindex < EPOCH_LOW_MFINDEX && get_system_time() - hc->wrap_time > EPOCH_DELAY) {
220 ++epoch;
221 }
222 return (epoch << EPOCH_BITS) + reg_mfindex;
223}
224
225static inline void calc_next_mfindex(xhci_endpoint_t *ep, xhci_isoch_transfer_t *it)
226{
227 xhci_isoch_t * const isoch = ep->isoch;
228 if (isoch->last_mf == -1U) {
229 const xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
230 const xhci_hc_t *hc = bus->hc;
231
232 /* Delay the first frame by some time to fill the buffer, but at most 10 miliseconds. */
233 const uint64_t delay = min(isoch->buffer_count * ep->interval, 10 * 8);
234 it->mfindex = get_current_microframe(hc) + 1 + delay + hc->ist;
235
236 // Align to ESIT start boundary
237 it->mfindex += ep->interval - 1;
238 it->mfindex &= ~(ep->interval - 1);
239 } else {
240 it->mfindex = isoch->last_mf + ep->interval;
241 }
242}
243
244/** 825 ms in uframes */
245#define END_FRAME_DELAY (895000 / 125)
246
247typedef enum {
248 WINDOW_TOO_SOON,
249 WINDOW_INSIDE,
250 WINDOW_TOO_LATE,
251} window_position_t;
252
253typedef struct {
254 window_position_t position;
255 uint64_t offset;
256} window_decision_t;
257
258/**
259 * Decide on the position of mfindex relatively to the window specified by
260 * Start Frame ID and End Frame ID. The resulting structure contains the
261 * decision, and in case of the mfindex being outside, also the number of
262 * uframes it's off.
263 */
264static inline void window_decide(window_decision_t *res, xhci_hc_t *hc, uint64_t mfindex)
265{
266 const uint64_t current_mf = get_current_microframe(hc);
267 const uint64_t start = current_mf + hc->ist + 1;
268 const uint64_t end = current_mf + END_FRAME_DELAY;
269
270 if (mfindex < start) {
271 res->position = WINDOW_TOO_LATE;
272 res->offset = start - mfindex;
273 } else if (mfindex <= end) {
274 res->position = WINDOW_INSIDE;
275 } else {
276 res->position = WINDOW_TOO_SOON;
277 res->offset = mfindex - end;
278 }
279}
280
281static void isoch_feed_out_timer(void *);
282static void isoch_feed_in_timer(void *);
283
284/**
285 * Schedule TRBs with filled buffers to HW. Takes filled isoch transfers and
286 * pushes their TRBs to the ring.
287 *
288 * According to 4.11.2.5, we can't just push all TRBs we have. We must not do
289 * it too late, but also not too soon.
290 */
291static void isoch_feed_out(xhci_endpoint_t *ep)
292{
293 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
294 xhci_isoch_t * const isoch = ep->isoch;
295 assert(fibril_mutex_is_locked(&isoch->guard));
296
297 xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
298 xhci_hc_t *hc = bus->hc;
299
300 bool fed = false;
301
302 while (isoch->transfers[isoch->hw_enqueue].state == ISOCH_FILLED) {
303 xhci_isoch_transfer_t * const it = &isoch->transfers[isoch->hw_enqueue];
304
305 assert(it->state == ISOCH_FILLED);
306
307 window_decision_t wd;
308 window_decide(&wd, hc, it->mfindex);
309
310 switch (wd.position) {
311 case WINDOW_TOO_SOON: {
312 const suseconds_t delay = wd.offset * 125;
313 usb_log_debug2("[isoch] delaying feeding buffer %lu for %ldus",
314 it - isoch->transfers, delay);
315 fibril_timer_set_locked(isoch->feeding_timer, delay,
316 isoch_feed_out_timer, ep);
317 goto out;
318 }
319
320 case WINDOW_INSIDE:
321 usb_log_debug2("[isoch] feeding buffer %lu at 0x%llx",
322 it - isoch->transfers, it->mfindex);
323 it->error = schedule_isochronous_trb(ep, it);
324 if (it->error) {
325 it->state = ISOCH_COMPLETE;
326 } else {
327 it->state = ISOCH_FED;
328 fed = true;
329 }
330
331 isoch->hw_enqueue = (isoch->hw_enqueue + 1) % isoch->buffer_count;
332 break;
333
334 case WINDOW_TOO_LATE:
335 /* Missed the opportunity to schedule. Just mark this transfer as skipped. */
336 usb_log_debug2("[isoch] missed feeding buffer %lu at 0x%llx by %llu uframes",
337 it - isoch->transfers, it->mfindex, wd.offset);
338 it->state = ISOCH_COMPLETE;
339 it->error = EOK;
340 it->size = 0;
341
342 isoch->hw_enqueue = (isoch->hw_enqueue + 1) % isoch->buffer_count;
343 break;
344 }
345 }
346
347out:
348 if (fed) {
349 const uint8_t slot_id = xhci_device_get(ep->base.device)->slot_id;
350 const uint8_t target = xhci_endpoint_index(ep) + 1; /* EP Doorbells start at 1 */
351 hc_ring_doorbell(hc, slot_id, target);
352 /* The ring may be dead. If no event happens until the delay, reset the endpoint. */
353 timer_schedule_reset(ep);
354 }
355
356}
357
358static void isoch_feed_out_timer(void *ep)
359{
360 xhci_isoch_t * const isoch = xhci_endpoint_get(ep)->isoch;
361 fibril_mutex_lock(&isoch->guard);
362 isoch_feed_out(ep);
363 fibril_mutex_unlock(&isoch->guard);
364}
365
366/**
367 * Schedule TRBs with empty, withdrawn buffers to HW. Takes empty isoch
368 * transfers and pushes their TRBs to the ring.
369 *
370 * According to 4.11.2.5, we can't just push all TRBs we have. We must not do
371 * it too late, but also not too soon.
372 */
373static void isoch_feed_in(xhci_endpoint_t *ep)
374{
375 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
376 xhci_isoch_t * const isoch = ep->isoch;
377 assert(fibril_mutex_is_locked(&isoch->guard));
378
379 xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
380 xhci_hc_t *hc = bus->hc;
381
382 bool fed = false;
383
384 while (isoch->transfers[isoch->enqueue].state <= ISOCH_FILLED) {
385 xhci_isoch_transfer_t * const it = &isoch->transfers[isoch->enqueue];
386
387 /* IN buffers are "filled" with free space */
388 if (it->state == ISOCH_EMPTY) {
389 it->size = ep->base.max_transfer_size;
390 it->state = ISOCH_FILLED;
391 calc_next_mfindex(ep, it);
392 }
393
394 window_decision_t wd;
395 window_decide(&wd, hc, it->mfindex);
396
397 switch (wd.position) {
398 case WINDOW_TOO_SOON: {
399 /* Not allowed to feed yet. Defer to later. */
400 const suseconds_t delay = wd.offset * 125;
401 usb_log_debug2("[isoch] delaying feeding buffer %lu for %ldus",
402 it - isoch->transfers, delay);
403 fibril_timer_set_locked(isoch->feeding_timer, delay,
404 isoch_feed_in_timer, ep);
405 goto out;
406 }
407
408 case WINDOW_TOO_LATE:
409 usb_log_debug2("[isoch] missed feeding buffer %lu at 0x%llx by %llu uframes",
410 it - isoch->transfers, it->mfindex, wd.offset);
411 /* Missed the opportunity to schedule. Schedule ASAP. */
412 it->mfindex += wd.offset;
413 // Align to ESIT start boundary
414 it->mfindex += ep->interval - 1;
415 it->mfindex &= ~(ep->interval - 1);
416
417 /* fallthrough */
418 case WINDOW_INSIDE:
419 isoch->enqueue = (isoch->enqueue + 1) % isoch->buffer_count;
420 isoch->last_mf = it->mfindex;
421
422 usb_log_debug2("[isoch] feeding buffer %lu at 0x%llx",
423 it - isoch->transfers, it->mfindex);
424
425 it->error = schedule_isochronous_trb(ep, it);
426 if (it->error) {
427 it->state = ISOCH_COMPLETE;
428 } else {
429 it->state = ISOCH_FED;
430 fed = true;
431 }
432 break;
433 }
434 }
435out:
436
437 if (fed) {
438 const uint8_t slot_id = xhci_device_get(ep->base.device)->slot_id;
439 const uint8_t target = xhci_endpoint_index(ep) + 1; /* EP Doorbells start at 1 */
440 hc_ring_doorbell(hc, slot_id, target);
441 /* The ring may be dead. If no event happens until the delay, reset the endpoint. */
442 timer_schedule_reset(ep);
443 }
444}
445
446static void isoch_feed_in_timer(void *ep)
447{
448 xhci_isoch_t * const isoch = xhci_endpoint_get(ep)->isoch;
449 fibril_mutex_lock(&isoch->guard);
450 isoch_feed_in(ep);
451 fibril_mutex_unlock(&isoch->guard);
452}
453
454/**
455 * First, withdraw all (at least one) results left by previous transfers to
456 * make room in the ring. Stop on first error.
457 *
458 * When there is at least one buffer free, fill it with data. Then try to feed
459 * it to the xHC.
460 */
461int isoch_schedule_out(xhci_transfer_t *transfer)
462{
463 int err = EOK;
464
465 xhci_endpoint_t *ep = xhci_endpoint_get(transfer->batch.ep);
466 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
467 xhci_isoch_t * const isoch = ep->isoch;
468
469 if (transfer->batch.buffer_size > ep->base.max_transfer_size) {
470 usb_log_error("Cannot schedule an oversized isochronous transfer.");
471 return ELIMIT;
472 }
473
474 fibril_mutex_lock(&isoch->guard);
475
476 /* Get the buffer to write to */
477 xhci_isoch_transfer_t *it = &isoch->transfers[isoch->enqueue];
478
479 /* Wait for the buffer to be completed */
480 while (it->state == ISOCH_FED || it->state == ISOCH_FILLED) {
481 fibril_condvar_wait(&isoch->avail, &isoch->guard);
482 /* The enqueue ptr may have changed while sleeping */
483 it = &isoch->transfers[isoch->enqueue];
484 }
485
486 isoch->enqueue = (isoch->enqueue + 1) % isoch->buffer_count;
487
488 /* Withdraw results from previous transfers. */
489 transfer->batch.transfered_size = 0;
490 xhci_isoch_transfer_t *res = &isoch->transfers[isoch->dequeue];
491 while (res->state == ISOCH_COMPLETE) {
492 isoch->dequeue = (isoch->dequeue + 1) % isoch->buffer_count;
493
494 res->state = ISOCH_EMPTY;
495 transfer->batch.transfered_size += res->size;
496 transfer->batch.error = res->error;
497 if (res->error)
498 break; // Announce one error at a time
499
500 res = &isoch->transfers[isoch->dequeue];
501 }
502
503 assert(it->state == ISOCH_EMPTY);
504
505 /* Calculate when to schedule next transfer */
506 calc_next_mfindex(ep, it);
507 isoch->last_mf = it->mfindex;
508 usb_log_debug2("[isoch] buffer %zu will be on schedule at 0x%llx", it - isoch->transfers, it->mfindex);
509
510 /* Prepare the transfer. */
511 it->size = transfer->batch.buffer_size;
512 memcpy(it->data.virt, transfer->batch.buffer, it->size);
513 it->state = ISOCH_FILLED;
514
515 fibril_timer_clear_locked(isoch->feeding_timer);
516 isoch_feed_out(ep);
517
518 fibril_mutex_unlock(&isoch->guard);
519
520 usb_transfer_batch_finish(&transfer->batch);
521 return err;
522}
523
524/**
525 * IN is in fact easier than OUT. Our responsibility is just to feed all empty
526 * buffers, and fetch one filled buffer from the ring.
527 */
528int isoch_schedule_in(xhci_transfer_t *transfer)
529{
530 xhci_endpoint_t *ep = xhci_endpoint_get(transfer->batch.ep);
531 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
532 xhci_isoch_t * const isoch = ep->isoch;
533
534 if (transfer->batch.buffer_size < ep->base.max_transfer_size) {
535 usb_log_error("Cannot schedule an undersized isochronous transfer.");
536 return ELIMIT;
537 }
538
539 fibril_mutex_lock(&isoch->guard);
540
541 xhci_isoch_transfer_t *it = &isoch->transfers[isoch->dequeue];
542
543 /* Wait for at least one transfer to complete. */
544 while (it->state != ISOCH_COMPLETE) {
545 /* First, make sure we will have something to read. */
546 fibril_timer_clear_locked(isoch->feeding_timer);
547 isoch_feed_in(ep);
548
549 usb_log_debug2("[isoch] waiting for buffer %zu to be completed", it - isoch->transfers);
550 fibril_condvar_wait(&isoch->avail, &isoch->guard);
551
552 /* The enqueue ptr may have changed while sleeping */
553 it = &isoch->transfers[isoch->dequeue];
554 }
555
556 isoch->dequeue = (isoch->dequeue + 1) % isoch->buffer_count;
557
558 /* Withdraw results from previous transfer. */
559 if (!it->error) {
560 memcpy(transfer->batch.buffer, it->data.virt, it->size);
561 transfer->batch.transfered_size = it->size;
562 transfer->batch.error = it->error;
563 }
564
565 /* Prepare the empty buffer */
566 it->state = ISOCH_EMPTY;
567
568 fibril_mutex_unlock(&isoch->guard);
569 usb_transfer_batch_finish(&transfer->batch);
570
571 return EOK;
572}
573
574void isoch_handle_transfer_event(xhci_hc_t *hc, xhci_endpoint_t *ep, xhci_trb_t *trb)
575{
576 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
577 xhci_isoch_t * const isoch = ep->isoch;
578
579 fibril_mutex_lock(&ep->isoch->guard);
580
581 int err;
582 const xhci_trb_completion_code_t completion_code = TRB_COMPLETION_CODE(*trb);
583
584 switch (completion_code) {
585 case XHCI_TRBC_RING_OVERRUN:
586 case XHCI_TRBC_RING_UNDERRUN:
587 /* For OUT, there was nothing to process */
588 /* For IN, the buffer has overfilled, we empty the buffers and readd TRBs */
589 usb_log_warning("Ring over/underrun.");
590 isoch_reset_no_timer(ep);
591 fibril_condvar_broadcast(&ep->isoch->avail);
592 fibril_mutex_unlock(&ep->isoch->guard);
593 goto out;
594 case XHCI_TRBC_SHORT_PACKET:
595 case XHCI_TRBC_SUCCESS:
596 err = EOK;
597 break;
598 default:
599 usb_log_warning("Transfer not successfull: %u", completion_code);
600 err = EIO;
601 break;
602 }
603
604 /*
605 * The order of delivering events is not necessarily the one we would
606 * expect. It is safer to walk the list of our transfers and check
607 * which one it is.
608 * To minimize the amount of transfers checked, we start at dequeue pointer
609 * and exit the loop as soon as the transfer is found.
610 */
611 bool found_mine = false;
612 for (size_t i = 0, di = isoch->dequeue; i < isoch->buffer_count; ++i, ++di) {
613 /* Wrap it back to 0, don't use modulo every loop traversal */
614 if (di == isoch->buffer_count) {
615 di = 0;
616 }
617
618 xhci_isoch_transfer_t * const it = &isoch->transfers[di];
619
620 if (it->state == ISOCH_FED && it->interrupt_trb_phys == trb->parameter) {
621 usb_log_debug2("[isoch] buffer %zu completed", it - isoch->transfers);
622 it->state = ISOCH_COMPLETE;
623 it->size -= TRB_TRANSFER_LENGTH(*trb);
624 it->error = err;
625 found_mine = true;
626 break;
627 }
628 }
629
630 if (!found_mine) {
631 usb_log_warning("[isoch] A transfer event occured for unknown transfer.");
632 }
633
634 /*
635 * It may happen that the driver already stopped reading (writing),
636 * and our buffers are filled (empty). As QEMU (and possibly others)
637 * does not send RING_UNDERRUN (OVERRUN) event, we set a timer to
638 * reset it after the buffers should have been consumed. If there
639 * is no issue, the timer will get restarted often enough.
640 */
641 timer_schedule_reset(ep);
642
643out:
644 fibril_condvar_broadcast(&ep->isoch->avail);
645 fibril_mutex_unlock(&ep->isoch->guard);
646}
647
648/**
649 * @}
650 */
Note: See TracBrowser for help on using the repository browser.