source: mainline/uspace/drv/bus/usb/xhci/isoch.c@ af60409

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since af60409 was 338d54a7, checked in by Jiri Svoboda <jiri@…>, 8 years ago

Gratuitous nested block makes ccheck sad.

  • Property mode set to 100644
File size: 19.0 KB
RevLine 
[708d8fcd]1/*
[e0a5d4c]2 * Copyright (c) 2018 Ondrej Hlavaty, Michal Staruch
[708d8fcd]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller endpoint management.
34 */
35
36#include <str_error.h>
[f92f6b1]37#include <macros.h>
[708d8fcd]38
39#include "endpoint.h"
40#include "hw_struct/trb.h"
41#include "hw_struct/regs.h"
42#include "trb_ring.h"
43#include "hc.h"
44#include "bus.h"
45
46#include "isoch.h"
47
48void isoch_init(xhci_endpoint_t *ep, const usb_endpoint_descriptors_t *desc)
49{
50 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
51 xhci_isoch_t * const isoch = ep->isoch;
52
53 fibril_mutex_initialize(&isoch->guard);
54 fibril_condvar_initialize(&isoch->avail);
55
[f92f6b1]56 const xhci_hc_t *hc = bus_to_xhci_bus(ep->base.device->bus)->hc;
57
58 /*
59 * We shall cover at least twice the IST period, otherwise we will get
60 * an over/underrun every time.
61 */
62 isoch->buffer_count = (2 * hc->ist) / ep->interval;
63
64 /* 2 buffers are the very minimum. */
65 isoch->buffer_count = max(2, isoch->buffer_count);
66
[defaab2]67 usb_log_debug("[isoch] isoch setup with %zu buffers", isoch->buffer_count);
[708d8fcd]68}
69
70static void isoch_reset(xhci_endpoint_t *ep)
71{
72 xhci_isoch_t * const isoch = ep->isoch;
73 assert(fibril_mutex_is_locked(&isoch->guard));
74
75 isoch->dequeue = isoch->enqueue = isoch->hw_enqueue = 0;
76
[f92f6b1]77 for (size_t i = 0; i < isoch->buffer_count; ++i) {
[708d8fcd]78 isoch->transfers[i].state = ISOCH_EMPTY;
79 }
80
81 fibril_timer_clear_locked(isoch->feeding_timer);
[94e9c29]82 isoch->last_mf = -1U;
[8033f89]83 usb_log_info("[isoch] Endpoint" XHCI_EP_FMT ": Data flow reset.",
84 XHCI_EP_ARGS(*ep));
[708d8fcd]85}
86
[4ed803f1]87static void isoch_reset_no_timer(xhci_endpoint_t *ep)
88{
89 xhci_isoch_t * const isoch = ep->isoch;
90 assert(fibril_mutex_is_locked(&isoch->guard));
91 /*
92 * As we cannot clear timer when we are triggered by it,
93 * we have to avoid doing it in common method.
94 */
95 fibril_timer_clear_locked(isoch->reset_timer);
96 isoch_reset(ep);
97}
98
99static void isoch_reset_timer(void *ep) {
100 xhci_isoch_t * const isoch = xhci_endpoint_get(ep)->isoch;
101 fibril_mutex_lock(&isoch->guard);
102 isoch_reset(ep);
103 fibril_mutex_unlock(&isoch->guard);
104}
105
106/*
107 * Fast transfers could trigger the reset timer before the data is processed,
108 * leading into false reset.
109 */
110#define RESET_TIMER_DELAY 100000
111static void timer_schedule_reset(xhci_endpoint_t *ep) {
112 xhci_isoch_t * const isoch = ep->isoch;
[8033f89]113 const suseconds_t delay = isoch->buffer_count * ep->interval * 125
114 + RESET_TIMER_DELAY;
[4ed803f1]115
116 fibril_timer_clear_locked(isoch->reset_timer);
117 fibril_timer_set_locked(isoch->reset_timer, delay,
118 isoch_reset_timer, ep);
119}
120
[708d8fcd]121void isoch_fini(xhci_endpoint_t *ep)
122{
123 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
124 xhci_isoch_t * const isoch = ep->isoch;
125
126 if (isoch->feeding_timer) {
127 fibril_timer_clear(isoch->feeding_timer);
128 fibril_timer_destroy(isoch->feeding_timer);
[4ed803f1]129 fibril_timer_clear(isoch->reset_timer);
130 fibril_timer_destroy(isoch->reset_timer);
[708d8fcd]131 }
132
[f92f6b1]133 if (isoch->transfers) {
134 for (size_t i = 0; i < isoch->buffer_count; ++i)
135 dma_buffer_free(&isoch->transfers[i].data);
136 free(isoch->transfers);
137 }
[708d8fcd]138}
139
140/**
141 * Allocate isochronous buffers. Create the feeding timer.
142 */
[45457265]143errno_t isoch_alloc_transfers(xhci_endpoint_t *ep) {
[708d8fcd]144 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
145 xhci_isoch_t * const isoch = ep->isoch;
146
147 isoch->feeding_timer = fibril_timer_create(&isoch->guard);
[4ed803f1]148 isoch->reset_timer = fibril_timer_create(&isoch->guard);
[708d8fcd]149 if (!isoch->feeding_timer)
150 return ENOMEM;
151
[f92f6b1]152 isoch->transfers = calloc(isoch->buffer_count, sizeof(xhci_isoch_transfer_t));
153 if(!isoch->transfers)
154 goto err;
[4ed803f1]155
[f92f6b1]156 for (size_t i = 0; i < isoch->buffer_count; ++i) {
[708d8fcd]157 xhci_isoch_transfer_t *transfer = &isoch->transfers[i];
[398a94c]158 if (dma_buffer_alloc(&transfer->data, ep->base.max_transfer_size)) {
[f92f6b1]159 goto err;
[708d8fcd]160 }
161 }
162
163 fibril_mutex_lock(&isoch->guard);
[4ed803f1]164 isoch_reset_no_timer(ep);
[708d8fcd]165 fibril_mutex_unlock(&isoch->guard);
166
167 return EOK;
[f92f6b1]168err:
169 isoch_fini(ep);
170 return ENOMEM;
[708d8fcd]171}
172
[45457265]173static errno_t schedule_isochronous_trb(xhci_endpoint_t *ep, xhci_isoch_transfer_t *it)
[708d8fcd]174{
175 xhci_trb_t trb;
176 xhci_trb_clean(&trb);
177
[1d758fc]178 trb.parameter = host2xhci(64, dma_buffer_phys_base(&it->data));
[708d8fcd]179 TRB_CTRL_SET_XFER_LEN(trb, it->size);
180 TRB_CTRL_SET_TD_SIZE(trb, 0);
181 TRB_CTRL_SET_IOC(trb, 1);
182 TRB_CTRL_SET_TRB_TYPE(trb, XHCI_TRB_TYPE_ISOCH);
183
184 // see 4.14.1 and 4.11.2.3 for the explanation, how to calculate those
185 size_t tdpc = it->size / 1024 + ((it->size % 1024) ? 1 : 0);
186 size_t tbc = tdpc / ep->max_burst;
187 if (!tdpc % ep->max_burst) --tbc;
188 size_t bsp = tdpc % ep->max_burst;
189 size_t tlbpc = (bsp ? bsp : ep->max_burst) - 1;
190
191 TRB_ISOCH_SET_TBC(trb, tbc);
192 TRB_ISOCH_SET_TLBPC(trb, tlbpc);
193 TRB_ISOCH_SET_FRAMEID(trb, (it->mfindex / 8) % 2048);
194
[45457265]195 const errno_t err = xhci_trb_ring_enqueue(&ep->ring, &trb, &it->interrupt_trb_phys);
[708d8fcd]196 return err;
197}
198
[94e9c29]199/** The number of bits the MFINDEX is stored in at HW */
200#define EPOCH_BITS 14
201/** The delay in usec for the epoch wrap */
202#define EPOCH_DELAY 500000
203/** The amount of microframes the epoch is checked for a delay */
204#define EPOCH_LOW_MFINDEX 8 * 100
205
206static inline uint64_t get_system_time()
207{
208 struct timeval tv;
209 getuptime(&tv);
210 return ((uint64_t) tv.tv_sec) * 1000000 + ((uint64_t) tv.tv_usec);
211}
212
213static inline uint64_t get_current_microframe(const xhci_hc_t *hc)
214{
215 const uint32_t reg_mfindex = XHCI_REG_RD(hc->rt_regs, XHCI_RT_MFINDEX);
216 /*
[8033f89]217 * If the mfindex is low and the time passed since last mfindex wrap is too
218 * high, we have entered the new epoch already (and haven't received event
219 * yet).
[94e9c29]220 */
221 uint64_t epoch = hc->wrap_count;
[8033f89]222 if (reg_mfindex < EPOCH_LOW_MFINDEX
223 && get_system_time() - hc->wrap_time > EPOCH_DELAY) {
[94e9c29]224 ++epoch;
225 }
226 return (epoch << EPOCH_BITS) + reg_mfindex;
227}
228
[708d8fcd]229static inline void calc_next_mfindex(xhci_endpoint_t *ep, xhci_isoch_transfer_t *it)
230{
231 xhci_isoch_t * const isoch = ep->isoch;
[94e9c29]232 if (isoch->last_mf == -1U) {
[708d8fcd]233 const xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
234 const xhci_hc_t *hc = bus->hc;
235
[8033f89]236 /*
237 * Delay the first frame by some time to fill the buffer, but at most 10
238 * miliseconds.
239 */
[94e9c29]240 const uint64_t delay = min(isoch->buffer_count * ep->interval, 10 * 8);
241 it->mfindex = get_current_microframe(hc) + 1 + delay + hc->ist;
[708d8fcd]242
243 // Align to ESIT start boundary
244 it->mfindex += ep->interval - 1;
245 it->mfindex &= ~(ep->interval - 1);
246 } else {
[94e9c29]247 it->mfindex = isoch->last_mf + ep->interval;
[708d8fcd]248 }
249}
250
251/** 825 ms in uframes */
252#define END_FRAME_DELAY (895000 / 125)
253
254typedef enum {
255 WINDOW_TOO_SOON,
256 WINDOW_INSIDE,
257 WINDOW_TOO_LATE,
258} window_position_t;
259
260typedef struct {
261 window_position_t position;
[94e9c29]262 uint64_t offset;
[708d8fcd]263} window_decision_t;
264
265/**
266 * Decide on the position of mfindex relatively to the window specified by
267 * Start Frame ID and End Frame ID. The resulting structure contains the
268 * decision, and in case of the mfindex being outside, also the number of
269 * uframes it's off.
270 */
[8033f89]271static inline void window_decide(window_decision_t *res, xhci_hc_t *hc,
[eb862fd]272 uint64_t mfindex)
[708d8fcd]273{
[94e9c29]274 const uint64_t current_mf = get_current_microframe(hc);
275 const uint64_t start = current_mf + hc->ist + 1;
276 const uint64_t end = current_mf + END_FRAME_DELAY;
[708d8fcd]277
[94e9c29]278 if (mfindex < start) {
[708d8fcd]279 res->position = WINDOW_TOO_LATE;
[94e9c29]280 res->offset = start - mfindex;
281 } else if (mfindex <= end) {
282 res->position = WINDOW_INSIDE;
[708d8fcd]283 } else {
284 res->position = WINDOW_TOO_SOON;
285 res->offset = mfindex - end;
286 }
287}
288
289static void isoch_feed_out_timer(void *);
290static void isoch_feed_in_timer(void *);
291
292/**
293 * Schedule TRBs with filled buffers to HW. Takes filled isoch transfers and
294 * pushes their TRBs to the ring.
295 *
296 * According to 4.11.2.5, we can't just push all TRBs we have. We must not do
297 * it too late, but also not too soon.
298 */
299static void isoch_feed_out(xhci_endpoint_t *ep)
300{
301 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
302 xhci_isoch_t * const isoch = ep->isoch;
303 assert(fibril_mutex_is_locked(&isoch->guard));
304
305 xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
306 xhci_hc_t *hc = bus->hc;
307
308 bool fed = false;
309
[94e9c29]310 while (isoch->transfers[isoch->hw_enqueue].state == ISOCH_FILLED) {
[708d8fcd]311 xhci_isoch_transfer_t * const it = &isoch->transfers[isoch->hw_enqueue];
[338d54a7]312 suseconds_t delay;
[708d8fcd]313
314 assert(it->state == ISOCH_FILLED);
315
316 window_decision_t wd;
317 window_decide(&wd, hc, it->mfindex);
318
319 switch (wd.position) {
[338d54a7]320 case WINDOW_TOO_SOON:
321 delay = wd.offset * 125;
[80f7c54]322 usb_log_debug("[isoch] delaying feeding buffer %zu for %ldus",
[708d8fcd]323 it - isoch->transfers, delay);
324 fibril_timer_set_locked(isoch->feeding_timer, delay,
325 isoch_feed_out_timer, ep);
[398a94c]326 goto out;
[708d8fcd]327
328 case WINDOW_INSIDE:
[80f7c54]329 usb_log_debug("[isoch] feeding buffer %zu at 0x%llx",
[708d8fcd]330 it - isoch->transfers, it->mfindex);
331 it->error = schedule_isochronous_trb(ep, it);
332 if (it->error) {
333 it->state = ISOCH_COMPLETE;
334 } else {
335 it->state = ISOCH_FED;
336 fed = true;
337 }
338
[f92f6b1]339 isoch->hw_enqueue = (isoch->hw_enqueue + 1) % isoch->buffer_count;
[708d8fcd]340 break;
341
342 case WINDOW_TOO_LATE:
[8033f89]343 /*
344 * Missed the opportunity to schedule. Just mark this transfer as
345 * skipped.
346 */
[80f7c54]347 usb_log_debug("[isoch] missed feeding buffer %zu at 0x%llx by "
[8033f89]348 "%llu uframes", it - isoch->transfers, it->mfindex, wd.offset);
[708d8fcd]349 it->state = ISOCH_COMPLETE;
350 it->error = EOK;
351 it->size = 0;
352
[f92f6b1]353 isoch->hw_enqueue = (isoch->hw_enqueue + 1) % isoch->buffer_count;
[708d8fcd]354 break;
355 }
356 }
357
[4ed803f1]358out:
[708d8fcd]359 if (fed) {
[51c1d500]360 hc_ring_ep_doorbell(ep, 0);
[8033f89]361 /*
362 * The ring may be dead. If no event happens until the delay, reset the
363 * endpoint.
364 */
[4ed803f1]365 timer_schedule_reset(ep);
[708d8fcd]366 }
367
368}
369
370static void isoch_feed_out_timer(void *ep)
371{
372 xhci_isoch_t * const isoch = xhci_endpoint_get(ep)->isoch;
373 fibril_mutex_lock(&isoch->guard);
374 isoch_feed_out(ep);
375 fibril_mutex_unlock(&isoch->guard);
376}
377
378/**
379 * Schedule TRBs with empty, withdrawn buffers to HW. Takes empty isoch
380 * transfers and pushes their TRBs to the ring.
381 *
382 * According to 4.11.2.5, we can't just push all TRBs we have. We must not do
383 * it too late, but also not too soon.
384 */
385static void isoch_feed_in(xhci_endpoint_t *ep)
386{
387 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
388 xhci_isoch_t * const isoch = ep->isoch;
389 assert(fibril_mutex_is_locked(&isoch->guard));
390
391 xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
392 xhci_hc_t *hc = bus->hc;
393
394 bool fed = false;
395
396 while (isoch->transfers[isoch->enqueue].state <= ISOCH_FILLED) {
397 xhci_isoch_transfer_t * const it = &isoch->transfers[isoch->enqueue];
[338d54a7]398 suseconds_t delay;
[708d8fcd]399
400 /* IN buffers are "filled" with free space */
401 if (it->state == ISOCH_EMPTY) {
[398a94c]402 it->size = ep->base.max_transfer_size;
[708d8fcd]403 it->state = ISOCH_FILLED;
404 calc_next_mfindex(ep, it);
405 }
406
407 window_decision_t wd;
408 window_decide(&wd, hc, it->mfindex);
409
410 switch (wd.position) {
[338d54a7]411 case WINDOW_TOO_SOON:
[708d8fcd]412 /* Not allowed to feed yet. Defer to later. */
[338d54a7]413 delay = wd.offset * 125;
[80f7c54]414 usb_log_debug("[isoch] delaying feeding buffer %zu for %ldus",
[708d8fcd]415 it - isoch->transfers, delay);
416 fibril_timer_set_locked(isoch->feeding_timer, delay,
417 isoch_feed_in_timer, ep);
[398a94c]418 goto out;
[708d8fcd]419 case WINDOW_TOO_LATE:
[80f7c54]420 usb_log_debug("[isoch] missed feeding buffer %zu at 0x%llx by"
[8033f89]421 "%llu uframes", it - isoch->transfers, it->mfindex, wd.offset);
[708d8fcd]422 /* Missed the opportunity to schedule. Schedule ASAP. */
423 it->mfindex += wd.offset;
424 // Align to ESIT start boundary
425 it->mfindex += ep->interval - 1;
426 it->mfindex &= ~(ep->interval - 1);
427
428 /* fallthrough */
429 case WINDOW_INSIDE:
[f92f6b1]430 isoch->enqueue = (isoch->enqueue + 1) % isoch->buffer_count;
[94e9c29]431 isoch->last_mf = it->mfindex;
[708d8fcd]432
[80f7c54]433 usb_log_debug("[isoch] feeding buffer %zu at 0x%llx",
[708d8fcd]434 it - isoch->transfers, it->mfindex);
435
436 it->error = schedule_isochronous_trb(ep, it);
437 if (it->error) {
438 it->state = ISOCH_COMPLETE;
439 } else {
440 it->state = ISOCH_FED;
441 fed = true;
442 }
443 break;
444 }
445 }
[398a94c]446out:
[708d8fcd]447
448 if (fed) {
[51c1d500]449 hc_ring_ep_doorbell(ep, 0);
[8033f89]450 /*
451 * The ring may be dead. If no event happens until the delay, reset the
452 * endpoint.
453 */
[4ed803f1]454 timer_schedule_reset(ep);
[708d8fcd]455 }
456}
457
458static void isoch_feed_in_timer(void *ep)
459{
460 xhci_isoch_t * const isoch = xhci_endpoint_get(ep)->isoch;
461 fibril_mutex_lock(&isoch->guard);
462 isoch_feed_in(ep);
463 fibril_mutex_unlock(&isoch->guard);
464}
465
466/**
467 * First, withdraw all (at least one) results left by previous transfers to
468 * make room in the ring. Stop on first error.
469 *
470 * When there is at least one buffer free, fill it with data. Then try to feed
471 * it to the xHC.
472 */
[45457265]473errno_t isoch_schedule_out(xhci_transfer_t *transfer)
[708d8fcd]474{
[45457265]475 errno_t err = EOK;
[708d8fcd]476
477 xhci_endpoint_t *ep = xhci_endpoint_get(transfer->batch.ep);
478 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
479 xhci_isoch_t * const isoch = ep->isoch;
480
[c21e6a5]481 /* This shall be already checked by endpoint */
[1d758fc]482 assert(transfer->batch.size <= ep->base.max_transfer_size);
[708d8fcd]483
484 fibril_mutex_lock(&isoch->guard);
485
486 /* Get the buffer to write to */
487 xhci_isoch_transfer_t *it = &isoch->transfers[isoch->enqueue];
488
489 /* Wait for the buffer to be completed */
490 while (it->state == ISOCH_FED || it->state == ISOCH_FILLED) {
491 fibril_condvar_wait(&isoch->avail, &isoch->guard);
492 /* The enqueue ptr may have changed while sleeping */
493 it = &isoch->transfers[isoch->enqueue];
494 }
495
[f92f6b1]496 isoch->enqueue = (isoch->enqueue + 1) % isoch->buffer_count;
[708d8fcd]497
498 /* Withdraw results from previous transfers. */
[db51a6a6]499 transfer->batch.transferred_size = 0;
[708d8fcd]500 xhci_isoch_transfer_t *res = &isoch->transfers[isoch->dequeue];
501 while (res->state == ISOCH_COMPLETE) {
[f92f6b1]502 isoch->dequeue = (isoch->dequeue + 1) % isoch->buffer_count;
[708d8fcd]503
504 res->state = ISOCH_EMPTY;
[db51a6a6]505 transfer->batch.transferred_size += res->size;
[708d8fcd]506 transfer->batch.error = res->error;
507 if (res->error)
508 break; // Announce one error at a time
509
510 res = &isoch->transfers[isoch->dequeue];
511 }
512
513 assert(it->state == ISOCH_EMPTY);
514
515 /* Calculate when to schedule next transfer */
516 calc_next_mfindex(ep, it);
[94e9c29]517 isoch->last_mf = it->mfindex;
[8033f89]518 usb_log_debug("[isoch] buffer %zu will be on schedule at 0x%llx",
519 it - isoch->transfers, it->mfindex);
[708d8fcd]520
521 /* Prepare the transfer. */
[1d758fc]522 it->size = transfer->batch.size;
[c21e6a5]523 memcpy(it->data.virt, transfer->batch.dma_buffer.virt, it->size);
[708d8fcd]524 it->state = ISOCH_FILLED;
525
526 fibril_timer_clear_locked(isoch->feeding_timer);
527 isoch_feed_out(ep);
528
529 fibril_mutex_unlock(&isoch->guard);
530
531 usb_transfer_batch_finish(&transfer->batch);
532 return err;
533}
534
535/**
536 * IN is in fact easier than OUT. Our responsibility is just to feed all empty
537 * buffers, and fetch one filled buffer from the ring.
538 */
[45457265]539errno_t isoch_schedule_in(xhci_transfer_t *transfer)
[708d8fcd]540{
541 xhci_endpoint_t *ep = xhci_endpoint_get(transfer->batch.ep);
542 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
543 xhci_isoch_t * const isoch = ep->isoch;
544
[1d758fc]545 if (transfer->batch.size < ep->base.max_transfer_size) {
[708d8fcd]546 usb_log_error("Cannot schedule an undersized isochronous transfer.");
547 return ELIMIT;
548 }
549
550 fibril_mutex_lock(&isoch->guard);
551
552 xhci_isoch_transfer_t *it = &isoch->transfers[isoch->dequeue];
553
554 /* Wait for at least one transfer to complete. */
555 while (it->state != ISOCH_COMPLETE) {
556 /* First, make sure we will have something to read. */
557 fibril_timer_clear_locked(isoch->feeding_timer);
558 isoch_feed_in(ep);
559
[8033f89]560 usb_log_debug("[isoch] waiting for buffer %zu to be completed",
561 it - isoch->transfers);
[708d8fcd]562 fibril_condvar_wait(&isoch->avail, &isoch->guard);
563
564 /* The enqueue ptr may have changed while sleeping */
565 it = &isoch->transfers[isoch->dequeue];
566 }
567
[f92f6b1]568 isoch->dequeue = (isoch->dequeue + 1) % isoch->buffer_count;
[708d8fcd]569
570 /* Withdraw results from previous transfer. */
571 if (!it->error) {
[c21e6a5]572 memcpy(transfer->batch.dma_buffer.virt, it->data.virt, it->size);
[db51a6a6]573 transfer->batch.transferred_size = it->size;
[708d8fcd]574 transfer->batch.error = it->error;
575 }
576
577 /* Prepare the empty buffer */
578 it->state = ISOCH_EMPTY;
579
580 fibril_mutex_unlock(&isoch->guard);
581 usb_transfer_batch_finish(&transfer->batch);
582
583 return EOK;
584}
585
[8033f89]586void isoch_handle_transfer_event(xhci_hc_t *hc, xhci_endpoint_t *ep,
[eb862fd]587 xhci_trb_t *trb)
[708d8fcd]588{
589 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
590 xhci_isoch_t * const isoch = ep->isoch;
591
592 fibril_mutex_lock(&ep->isoch->guard);
593
[45457265]594 errno_t err;
[708d8fcd]595 const xhci_trb_completion_code_t completion_code = TRB_COMPLETION_CODE(*trb);
596
597 switch (completion_code) {
[5ef3afd]598 case XHCI_TRBC_RING_OVERRUN:
599 case XHCI_TRBC_RING_UNDERRUN:
600 /*
601 * For OUT, there was nothing to process.
602 * For IN, the buffer has overfilled.
603 * In either case, reset the ring.
604 */
605 usb_log_warning("Ring over/underrun.");
606 isoch_reset_no_timer(ep);
607 fibril_condvar_broadcast(&ep->isoch->avail);
608 fibril_mutex_unlock(&ep->isoch->guard);
609 goto out;
610 case XHCI_TRBC_SHORT_PACKET:
611 case XHCI_TRBC_SUCCESS:
612 err = EOK;
613 break;
614 default:
615 usb_log_warning("Transfer not successfull: %u", completion_code);
616 err = EIO;
617 break;
[708d8fcd]618 }
619
620 /*
621 * The order of delivering events is not necessarily the one we would
[4ed803f1]622 * expect. It is safer to walk the list of our transfers and check
[708d8fcd]623 * which one it is.
[4ed803f1]624 * To minimize the amount of transfers checked, we start at dequeue pointer
625 * and exit the loop as soon as the transfer is found.
[708d8fcd]626 */
[4ed803f1]627 bool found_mine = false;
628 for (size_t i = 0, di = isoch->dequeue; i < isoch->buffer_count; ++i, ++di) {
629 /* Wrap it back to 0, don't use modulo every loop traversal */
630 if (di == isoch->buffer_count) {
631 di = 0;
632 }
[708d8fcd]633
[4ed803f1]634 xhci_isoch_transfer_t * const it = &isoch->transfers[di];
[708d8fcd]635
[4ed803f1]636 if (it->state == ISOCH_FED && it->interrupt_trb_phys == trb->parameter) {
[defaab2]637 usb_log_debug("[isoch] buffer %zu completed", it - isoch->transfers);
[708d8fcd]638 it->state = ISOCH_COMPLETE;
639 it->size -= TRB_TRANSFER_LENGTH(*trb);
640 it->error = err;
641 found_mine = true;
642 break;
643 }
644 }
645
646 if (!found_mine) {
647 usb_log_warning("[isoch] A transfer event occured for unknown transfer.");
648 }
649
650 /*
651 * It may happen that the driver already stopped reading (writing),
652 * and our buffers are filled (empty). As QEMU (and possibly others)
[4ed803f1]653 * does not send RING_UNDERRUN (OVERRUN) event, we set a timer to
654 * reset it after the buffers should have been consumed. If there
655 * is no issue, the timer will get restarted often enough.
[708d8fcd]656 */
[4ed803f1]657 timer_schedule_reset(ep);
[708d8fcd]658
[1ed3eb4]659out:
[708d8fcd]660 fibril_condvar_broadcast(&ep->isoch->avail);
661 fibril_mutex_unlock(&ep->isoch->guard);
662}
663
664/**
665 * @}
666 */
Note: See TracBrowser for help on using the repository browser.