source: mainline/uspace/drv/bus/usb/xhci/isoch.c@ 69a93d02

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 69a93d02 was 69a93d02, checked in by Salmelu <salmelu@…>, 8 years ago

xhci: Fixed isoch failing to start

  • Property mode set to 100644
File size: 19.5 KB
Line 
1/*
2 * Copyright (c) 2017 HelUSB3 team
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller endpoint management.
34 */
35
36#include <str_error.h>
37#include <macros.h>
38
39#include "endpoint.h"
40#include "hw_struct/trb.h"
41#include "hw_struct/regs.h"
42#include "trb_ring.h"
43#include "hc.h"
44#include "bus.h"
45
46#include "isoch.h"
47
48void isoch_init(xhci_endpoint_t *ep, const usb_endpoint_descriptors_t *desc)
49{
50 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
51 xhci_isoch_t * const isoch = ep->isoch;
52
53 fibril_mutex_initialize(&isoch->guard);
54 fibril_condvar_initialize(&isoch->avail);
55
56 const xhci_hc_t *hc = bus_to_xhci_bus(ep->base.device->bus)->hc;
57
58 /*
59 * We shall cover at least twice the IST period, otherwise we will get
60 * an over/underrun every time.
61 */
62 isoch->buffer_count = (2 * hc->ist) / ep->interval;
63
64 /* 2 buffers are the very minimum. */
65 isoch->buffer_count = max(2, isoch->buffer_count);
66
67 usb_log_error("[isoch] isoch setup with %zu buffers", isoch->buffer_count);
68}
69
70static void isoch_reset(xhci_endpoint_t *ep)
71{
72 xhci_isoch_t * const isoch = ep->isoch;
73 assert(fibril_mutex_is_locked(&isoch->guard));
74
75 isoch->dequeue = isoch->enqueue = isoch->hw_enqueue = 0;
76
77 for (size_t i = 0; i < isoch->buffer_count; ++i) {
78 isoch->transfers[i].state = ISOCH_EMPTY;
79 }
80
81 fibril_timer_clear_locked(isoch->feeding_timer);
82 isoch->last_mfindex = -1U;
83 usb_log_info("[isoch] Endpoint" XHCI_EP_FMT ": Data flow reset.", XHCI_EP_ARGS(*ep));
84}
85
86static void isoch_reset_no_timer(xhci_endpoint_t *ep)
87{
88 xhci_isoch_t * const isoch = ep->isoch;
89 assert(fibril_mutex_is_locked(&isoch->guard));
90 /*
91 * As we cannot clear timer when we are triggered by it,
92 * we have to avoid doing it in common method.
93 */
94 fibril_timer_clear_locked(isoch->reset_timer);
95 isoch_reset(ep);
96}
97
98static void isoch_reset_timer(void *ep) {
99 xhci_isoch_t * const isoch = xhci_endpoint_get(ep)->isoch;
100 fibril_mutex_lock(&isoch->guard);
101 isoch_reset(ep);
102 fibril_mutex_unlock(&isoch->guard);
103}
104
105/*
106 * Fast transfers could trigger the reset timer before the data is processed,
107 * leading into false reset.
108 */
109#define RESET_TIMER_DELAY 100000
110static void timer_schedule_reset(xhci_endpoint_t *ep) {
111 xhci_isoch_t * const isoch = ep->isoch;
112 const suseconds_t delay = isoch->buffer_count * ep->interval * 125 + RESET_TIMER_DELAY;
113
114 fibril_timer_clear_locked(isoch->reset_timer);
115 fibril_timer_set_locked(isoch->reset_timer, delay,
116 isoch_reset_timer, ep);
117}
118
119void isoch_fini(xhci_endpoint_t *ep)
120{
121 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
122 xhci_isoch_t * const isoch = ep->isoch;
123
124 if (isoch->feeding_timer) {
125 fibril_timer_clear(isoch->feeding_timer);
126 fibril_timer_destroy(isoch->feeding_timer);
127 fibril_timer_clear(isoch->reset_timer);
128 fibril_timer_destroy(isoch->reset_timer);
129 }
130
131 if (isoch->transfers) {
132 for (size_t i = 0; i < isoch->buffer_count; ++i)
133 dma_buffer_free(&isoch->transfers[i].data);
134 free(isoch->transfers);
135 }
136}
137
138/**
139 * Allocate isochronous buffers. Create the feeding timer.
140 */
141int isoch_alloc_transfers(xhci_endpoint_t *ep) {
142 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
143 xhci_isoch_t * const isoch = ep->isoch;
144
145 isoch->feeding_timer = fibril_timer_create(&isoch->guard);
146 isoch->reset_timer = fibril_timer_create(&isoch->guard);
147 if (!isoch->feeding_timer)
148 return ENOMEM;
149
150 isoch->transfers = calloc(isoch->buffer_count, sizeof(xhci_isoch_transfer_t));
151 if(!isoch->transfers)
152 goto err;
153
154 for (size_t i = 0; i < isoch->buffer_count; ++i) {
155 xhci_isoch_transfer_t *transfer = &isoch->transfers[i];
156 if (dma_buffer_alloc(&transfer->data, ep->base.max_transfer_size)) {
157 goto err;
158 }
159 }
160
161 fibril_mutex_lock(&isoch->guard);
162 isoch_reset_no_timer(ep);
163 fibril_mutex_unlock(&isoch->guard);
164
165 return EOK;
166err:
167 isoch_fini(ep);
168 return ENOMEM;
169}
170
171static int schedule_isochronous_trb(xhci_endpoint_t *ep, xhci_isoch_transfer_t *it)
172{
173 xhci_trb_t trb;
174 xhci_trb_clean(&trb);
175
176 trb.parameter = it->data.phys;
177 TRB_CTRL_SET_XFER_LEN(trb, it->size);
178 TRB_CTRL_SET_TD_SIZE(trb, 0);
179 TRB_CTRL_SET_IOC(trb, 1);
180 TRB_CTRL_SET_TRB_TYPE(trb, XHCI_TRB_TYPE_ISOCH);
181
182 // see 4.14.1 and 4.11.2.3 for the explanation, how to calculate those
183 size_t tdpc = it->size / 1024 + ((it->size % 1024) ? 1 : 0);
184 size_t tbc = tdpc / ep->max_burst;
185 if (!tdpc % ep->max_burst) --tbc;
186 size_t bsp = tdpc % ep->max_burst;
187 size_t tlbpc = (bsp ? bsp : ep->max_burst) - 1;
188
189 TRB_ISOCH_SET_TBC(trb, tbc);
190 TRB_ISOCH_SET_TLBPC(trb, tlbpc);
191 TRB_ISOCH_SET_FRAMEID(trb, (it->mfindex / 8) % 2048);
192
193 const int err = xhci_trb_ring_enqueue(&ep->ring, &trb, &it->interrupt_trb_phys);
194 return err;
195}
196
197static inline void calc_next_mfindex(xhci_endpoint_t *ep, xhci_isoch_transfer_t *it)
198{
199 xhci_isoch_t * const isoch = ep->isoch;
200 if (isoch->last_mfindex == -1U) {
201 const xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
202 const xhci_hc_t *hc = bus->hc;
203
204 /* Choose some number, give us a little time to prepare the
205 * buffers */
206 it->mfindex = XHCI_REG_RD(hc->rt_regs, XHCI_RT_MFINDEX) + 1
207 + isoch->buffer_count * ep->interval
208 + hc->ist;
209
210 // Align to ESIT start boundary
211 it->mfindex += ep->interval - 1;
212 it->mfindex &= ~(ep->interval - 1);
213 } else {
214 it->mfindex = (isoch->last_mfindex + ep->interval) % XHCI_MFINDEX_MAX;
215 }
216}
217
218/** 825 ms in uframes */
219#define END_FRAME_DELAY (895000 / 125)
220
221typedef enum {
222 WINDOW_TOO_SOON,
223 WINDOW_INSIDE,
224 WINDOW_TOO_LATE,
225} window_position_t;
226
227typedef struct {
228 window_position_t position;
229 uint32_t offset;
230} window_decision_t;
231
232/**
233 * Decide on the position of mfindex relatively to the window specified by
234 * Start Frame ID and End Frame ID. The resulting structure contains the
235 * decision, and in case of the mfindex being outside, also the number of
236 * uframes it's off.
237 */
238static inline void window_decide(window_decision_t *res, xhci_hc_t *hc, uint32_t mfindex)
239{
240 uint32_t current_mfindex = XHCI_REG_RD(hc->rt_regs, XHCI_RT_MFINDEX) + 1;
241
242 /*
243 * In your mind, rotate the clock so the window is at its beginning.
244 * The length of the window is always the same, and by rotating the
245 * mfindex too, we can decide by the value of it easily.
246 */
247 mfindex = (mfindex - current_mfindex - hc->ist + XHCI_MFINDEX_MAX) % XHCI_MFINDEX_MAX;
248 const uint32_t end = END_FRAME_DELAY - hc->ist;
249 const uint32_t threshold = (XHCI_MFINDEX_MAX + end) / 2;
250
251 if (mfindex <= end) {
252 res->position = WINDOW_INSIDE;
253 } else if (mfindex > threshold) {
254 res->position = WINDOW_TOO_LATE;
255 res->offset = XHCI_MFINDEX_MAX - mfindex;
256 } else {
257 res->position = WINDOW_TOO_SOON;
258 res->offset = mfindex - end;
259 }
260 /*
261 * TODO: The "size" of the clock is too low. We have to scale it a bit
262 * to ensure correct scheduling of transfers, that are
263 * buffer_count * interval away from now.
264 * Maximum interval is 8 seconds, which means we need a size of
265 * 16 seconds. The size of MFIINDEX is 2 seconds only.
266 *
267 * A plan is to create a thin abstraction at HC, which would return
268 * a time from 32-bit clock, having its high bits updated by the
269 * MFINDEX Wrap Event, and low bits from the MFINDEX register. Using
270 * this 32-bit clock, one can plan 6 days ahead.
271 */
272}
273
274static void isoch_feed_out_timer(void *);
275static void isoch_feed_in_timer(void *);
276
277/**
278 * Schedule TRBs with filled buffers to HW. Takes filled isoch transfers and
279 * pushes their TRBs to the ring.
280 *
281 * According to 4.11.2.5, we can't just push all TRBs we have. We must not do
282 * it too late, but also not too soon.
283 */
284static void isoch_feed_out(xhci_endpoint_t *ep)
285{
286 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
287 xhci_isoch_t * const isoch = ep->isoch;
288 assert(fibril_mutex_is_locked(&isoch->guard));
289
290 xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
291 xhci_hc_t *hc = bus->hc;
292
293 bool fed = false;
294
295 /*
296 * There might be a case, where no transfer can't be put on the ring immediately
297 * (for endpoints with interval >= 500ms). In that case, the transfer buffers could fill
298 * and the first condition wouldn't be enough to enter the loop.
299 */
300 while (isoch->hw_enqueue != isoch->enqueue || isoch->transfers[isoch->hw_enqueue].state == ISOCH_FILLED) {
301 xhci_isoch_transfer_t * const it = &isoch->transfers[isoch->hw_enqueue];
302
303 assert(it->state == ISOCH_FILLED);
304
305 window_decision_t wd;
306 window_decide(&wd, hc, it->mfindex);
307
308 switch (wd.position) {
309 case WINDOW_TOO_SOON: {
310 const suseconds_t delay = wd.offset * 125;
311 usb_log_debug2("[isoch] delaying feeding buffer %lu for %ldus",
312 it - isoch->transfers, delay);
313 fibril_timer_set_locked(isoch->feeding_timer, delay,
314 isoch_feed_out_timer, ep);
315 goto out;
316 }
317
318 case WINDOW_INSIDE:
319 usb_log_debug2("[isoch] feeding buffer %lu at 0x%x",
320 it - isoch->transfers, it->mfindex);
321 it->error = schedule_isochronous_trb(ep, it);
322 if (it->error) {
323 it->state = ISOCH_COMPLETE;
324 } else {
325 it->state = ISOCH_FED;
326 fed = true;
327 }
328
329 isoch->hw_enqueue = (isoch->hw_enqueue + 1) % isoch->buffer_count;
330 break;
331
332 case WINDOW_TOO_LATE:
333 /* Missed the opportunity to schedule. Just mark this transfer as skipped. */
334 usb_log_debug2("[isoch] missed feeding buffer %lu at 0x%x by %u uframes",
335 it - isoch->transfers, it->mfindex, wd.offset);
336 it->state = ISOCH_COMPLETE;
337 it->error = EOK;
338 it->size = 0;
339
340 isoch->hw_enqueue = (isoch->hw_enqueue + 1) % isoch->buffer_count;
341 break;
342 }
343 }
344
345out:
346 if (fed) {
347 const uint8_t slot_id = xhci_device_get(ep->base.device)->slot_id;
348 const uint8_t target = xhci_endpoint_index(ep) + 1; /* EP Doorbells start at 1 */
349 hc_ring_doorbell(hc, slot_id, target);
350 /* The ring may be dead. If no event happens until the delay, reset the endpoint. */
351 timer_schedule_reset(ep);
352 }
353
354}
355
356static void isoch_feed_out_timer(void *ep)
357{
358 xhci_isoch_t * const isoch = xhci_endpoint_get(ep)->isoch;
359 fibril_mutex_lock(&isoch->guard);
360 isoch_feed_out(ep);
361 fibril_mutex_unlock(&isoch->guard);
362}
363
364/**
365 * Schedule TRBs with empty, withdrawn buffers to HW. Takes empty isoch
366 * transfers and pushes their TRBs to the ring.
367 *
368 * According to 4.11.2.5, we can't just push all TRBs we have. We must not do
369 * it too late, but also not too soon.
370 */
371static void isoch_feed_in(xhci_endpoint_t *ep)
372{
373 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
374 xhci_isoch_t * const isoch = ep->isoch;
375 assert(fibril_mutex_is_locked(&isoch->guard));
376
377 xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
378 xhci_hc_t *hc = bus->hc;
379
380 bool fed = false;
381
382 while (isoch->transfers[isoch->enqueue].state <= ISOCH_FILLED) {
383 xhci_isoch_transfer_t * const it = &isoch->transfers[isoch->enqueue];
384
385 /* IN buffers are "filled" with free space */
386 if (it->state == ISOCH_EMPTY) {
387 it->size = ep->base.max_transfer_size;
388 it->state = ISOCH_FILLED;
389 calc_next_mfindex(ep, it);
390 }
391
392 window_decision_t wd;
393 window_decide(&wd, hc, it->mfindex);
394
395 switch (wd.position) {
396 case WINDOW_TOO_SOON: {
397 /* Not allowed to feed yet. Defer to later. */
398 const suseconds_t delay = wd.offset * 125;
399 usb_log_debug2("[isoch] delaying feeding buffer %lu for %ldus",
400 it - isoch->transfers, delay);
401 fibril_timer_set_locked(isoch->feeding_timer, delay,
402 isoch_feed_in_timer, ep);
403 goto out;
404 }
405
406 case WINDOW_TOO_LATE:
407 usb_log_debug2("[isoch] missed feeding buffer %lu at 0x%x by %u uframes",
408 it - isoch->transfers, it->mfindex, wd.offset);
409 /* Missed the opportunity to schedule. Schedule ASAP. */
410 it->mfindex += wd.offset;
411 // Align to ESIT start boundary
412 it->mfindex += ep->interval - 1;
413 it->mfindex &= ~(ep->interval - 1);
414
415 /* fallthrough */
416 case WINDOW_INSIDE:
417 isoch->enqueue = (isoch->enqueue + 1) % isoch->buffer_count;
418 isoch->last_mfindex = it->mfindex;
419
420 usb_log_debug2("[isoch] feeding buffer %lu at 0x%x",
421 it - isoch->transfers, it->mfindex);
422
423 it->error = schedule_isochronous_trb(ep, it);
424 if (it->error) {
425 it->state = ISOCH_COMPLETE;
426 } else {
427 it->state = ISOCH_FED;
428 fed = true;
429 }
430 break;
431 }
432 }
433out:
434
435 if (fed) {
436 const uint8_t slot_id = xhci_device_get(ep->base.device)->slot_id;
437 const uint8_t target = xhci_endpoint_index(ep) + 1; /* EP Doorbells start at 1 */
438 hc_ring_doorbell(hc, slot_id, target);
439 /* The ring may be dead. If no event happens until the delay, reset the endpoint. */
440 timer_schedule_reset(ep);
441 }
442}
443
444static void isoch_feed_in_timer(void *ep)
445{
446 xhci_isoch_t * const isoch = xhci_endpoint_get(ep)->isoch;
447 fibril_mutex_lock(&isoch->guard);
448 isoch_feed_in(ep);
449 fibril_mutex_unlock(&isoch->guard);
450}
451
452/**
453 * First, withdraw all (at least one) results left by previous transfers to
454 * make room in the ring. Stop on first error.
455 *
456 * When there is at least one buffer free, fill it with data. Then try to feed
457 * it to the xHC.
458 */
459int isoch_schedule_out(xhci_transfer_t *transfer)
460{
461 int err = EOK;
462
463 xhci_endpoint_t *ep = xhci_endpoint_get(transfer->batch.ep);
464 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
465 xhci_isoch_t * const isoch = ep->isoch;
466
467 if (transfer->batch.buffer_size > ep->base.max_transfer_size) {
468 usb_log_error("Cannot schedule an oversized isochronous transfer.");
469 return ELIMIT;
470 }
471
472 fibril_mutex_lock(&isoch->guard);
473
474 /* Get the buffer to write to */
475 xhci_isoch_transfer_t *it = &isoch->transfers[isoch->enqueue];
476
477 /* Wait for the buffer to be completed */
478 while (it->state == ISOCH_FED || it->state == ISOCH_FILLED) {
479 fibril_condvar_wait(&isoch->avail, &isoch->guard);
480 /* The enqueue ptr may have changed while sleeping */
481 it = &isoch->transfers[isoch->enqueue];
482 }
483
484 isoch->enqueue = (isoch->enqueue + 1) % isoch->buffer_count;
485
486 /* Withdraw results from previous transfers. */
487 transfer->batch.transfered_size = 0;
488 xhci_isoch_transfer_t *res = &isoch->transfers[isoch->dequeue];
489 while (res->state == ISOCH_COMPLETE) {
490 isoch->dequeue = (isoch->dequeue + 1) % isoch->buffer_count;
491
492 res->state = ISOCH_EMPTY;
493 transfer->batch.transfered_size += res->size;
494 transfer->batch.error = res->error;
495 if (res->error)
496 break; // Announce one error at a time
497
498 res = &isoch->transfers[isoch->dequeue];
499 }
500
501 assert(it->state == ISOCH_EMPTY);
502
503 /* Calculate when to schedule next transfer */
504 calc_next_mfindex(ep, it);
505 isoch->last_mfindex = it->mfindex;
506 usb_log_debug2("[isoch] buffer %zu will be on schedule at 0x%x", it - isoch->transfers, it->mfindex);
507
508 /* Prepare the transfer. */
509 it->size = transfer->batch.buffer_size;
510 memcpy(it->data.virt, transfer->batch.buffer, it->size);
511 it->state = ISOCH_FILLED;
512
513 fibril_timer_clear_locked(isoch->feeding_timer);
514 isoch_feed_out(ep);
515
516 fibril_mutex_unlock(&isoch->guard);
517
518 usb_transfer_batch_finish(&transfer->batch);
519 return err;
520}
521
522/**
523 * IN is in fact easier than OUT. Our responsibility is just to feed all empty
524 * buffers, and fetch one filled buffer from the ring.
525 */
526int isoch_schedule_in(xhci_transfer_t *transfer)
527{
528 xhci_endpoint_t *ep = xhci_endpoint_get(transfer->batch.ep);
529 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
530 xhci_isoch_t * const isoch = ep->isoch;
531
532 if (transfer->batch.buffer_size < ep->base.max_transfer_size) {
533 usb_log_error("Cannot schedule an undersized isochronous transfer.");
534 return ELIMIT;
535 }
536
537 fibril_mutex_lock(&isoch->guard);
538
539 xhci_isoch_transfer_t *it = &isoch->transfers[isoch->dequeue];
540
541 /* Wait for at least one transfer to complete. */
542 while (it->state != ISOCH_COMPLETE) {
543 /* First, make sure we will have something to read. */
544 fibril_timer_clear_locked(isoch->feeding_timer);
545 isoch_feed_in(ep);
546
547 usb_log_debug2("[isoch] waiting for buffer %zu to be completed", it - isoch->transfers);
548 fibril_condvar_wait(&isoch->avail, &isoch->guard);
549
550 /* The enqueue ptr may have changed while sleeping */
551 it = &isoch->transfers[isoch->dequeue];
552 }
553
554 isoch->dequeue = (isoch->dequeue + 1) % isoch->buffer_count;
555
556 /* Withdraw results from previous transfer. */
557 if (!it->error) {
558 memcpy(transfer->batch.buffer, it->data.virt, it->size);
559 transfer->batch.transfered_size = it->size;
560 transfer->batch.error = it->error;
561 }
562
563 /* Prepare the empty buffer */
564 it->state = ISOCH_EMPTY;
565
566 fibril_mutex_unlock(&isoch->guard);
567 usb_transfer_batch_finish(&transfer->batch);
568
569 return EOK;
570}
571
572int isoch_handle_transfer_event(xhci_hc_t *hc, xhci_endpoint_t *ep, xhci_trb_t *trb)
573{
574 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
575 xhci_isoch_t * const isoch = ep->isoch;
576
577 fibril_mutex_lock(&ep->isoch->guard);
578
579 int err;
580 const xhci_trb_completion_code_t completion_code = TRB_COMPLETION_CODE(*trb);
581
582 switch (completion_code) {
583 case XHCI_TRBC_RING_OVERRUN:
584 case XHCI_TRBC_RING_UNDERRUN:
585 /* For OUT, there was nothing to process */
586 /* For IN, the buffer has overfilled, we empty the buffers and readd TRBs */
587 usb_log_warning("Ring over/underrun.");
588 isoch_reset_no_timer(ep);
589 fibril_condvar_broadcast(&ep->isoch->avail);
590 fibril_mutex_unlock(&ep->isoch->guard);
591 return EOK;
592 case XHCI_TRBC_SHORT_PACKET:
593 case XHCI_TRBC_SUCCESS:
594 err = EOK;
595 break;
596 default:
597 usb_log_warning("Transfer not successfull: %u", completion_code);
598 err = EIO;
599 break;
600 }
601
602 /*
603 * The order of delivering events is not necessarily the one we would
604 * expect. It is safer to walk the list of our transfers and check
605 * which one it is.
606 * To minimize the amount of transfers checked, we start at dequeue pointer
607 * and exit the loop as soon as the transfer is found.
608 */
609 bool found_mine = false;
610 for (size_t i = 0, di = isoch->dequeue; i < isoch->buffer_count; ++i, ++di) {
611 /* Wrap it back to 0, don't use modulo every loop traversal */
612 if (di == isoch->buffer_count) {
613 di = 0;
614 }
615
616 xhci_isoch_transfer_t * const it = &isoch->transfers[di];
617
618 if (it->state == ISOCH_FED && it->interrupt_trb_phys == trb->parameter) {
619 usb_log_debug2("[isoch] buffer %zu completed", it - isoch->transfers);
620 it->state = ISOCH_COMPLETE;
621 it->size -= TRB_TRANSFER_LENGTH(*trb);
622 it->error = err;
623 found_mine = true;
624 break;
625 }
626 }
627
628 if (!found_mine) {
629 usb_log_warning("[isoch] A transfer event occured for unknown transfer.");
630 }
631
632 /*
633 * It may happen that the driver already stopped reading (writing),
634 * and our buffers are filled (empty). As QEMU (and possibly others)
635 * does not send RING_UNDERRUN (OVERRUN) event, we set a timer to
636 * reset it after the buffers should have been consumed. If there
637 * is no issue, the timer will get restarted often enough.
638 */
639 timer_schedule_reset(ep);
640
641 fibril_condvar_broadcast(&ep->isoch->avail);
642 fibril_mutex_unlock(&ep->isoch->guard);
643 return EOK;
644}
645
646/**
647 * @}
648 */
Note: See TracBrowser for help on using the repository browser.