source: mainline/uspace/drv/bus/usb/xhci/isoch.c

Last change on this file was 1938b381, checked in by Jiří Zárevúcky <jiri.zarevucky@…>, 7 years ago

Use correct print format specifiers

  • Property mode set to 100644
File size: 19.0 KB
RevLine 
[708d8fcd]1/*
[e0a5d4c]2 * Copyright (c) 2018 Ondrej Hlavaty, Michal Staruch
[708d8fcd]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller endpoint management.
34 */
35
36#include <str_error.h>
[f92f6b1]37#include <macros.h>
[708d8fcd]38
39#include "endpoint.h"
40#include "hw_struct/trb.h"
41#include "hw_struct/regs.h"
42#include "trb_ring.h"
43#include "hc.h"
44#include "bus.h"
45
46#include "isoch.h"
47
48void isoch_init(xhci_endpoint_t *ep, const usb_endpoint_descriptors_t *desc)
49{
50 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
[3bacee1]51 xhci_isoch_t *const isoch = ep->isoch;
[708d8fcd]52
53 fibril_mutex_initialize(&isoch->guard);
54 fibril_condvar_initialize(&isoch->avail);
55
[f92f6b1]56 const xhci_hc_t *hc = bus_to_xhci_bus(ep->base.device->bus)->hc;
57
58 /*
59 * We shall cover at least twice the IST period, otherwise we will get
60 * an over/underrun every time.
61 */
62 isoch->buffer_count = (2 * hc->ist) / ep->interval;
63
64 /* 2 buffers are the very minimum. */
65 isoch->buffer_count = max(2, isoch->buffer_count);
66
[defaab2]67 usb_log_debug("[isoch] isoch setup with %zu buffers", isoch->buffer_count);
[708d8fcd]68}
69
70static void isoch_reset(xhci_endpoint_t *ep)
71{
[3bacee1]72 xhci_isoch_t *const isoch = ep->isoch;
[708d8fcd]73 assert(fibril_mutex_is_locked(&isoch->guard));
74
75 isoch->dequeue = isoch->enqueue = isoch->hw_enqueue = 0;
76
[f92f6b1]77 for (size_t i = 0; i < isoch->buffer_count; ++i) {
[708d8fcd]78 isoch->transfers[i].state = ISOCH_EMPTY;
79 }
80
81 fibril_timer_clear_locked(isoch->feeding_timer);
[94e9c29]82 isoch->last_mf = -1U;
[8033f89]83 usb_log_info("[isoch] Endpoint" XHCI_EP_FMT ": Data flow reset.",
84 XHCI_EP_ARGS(*ep));
[708d8fcd]85}
86
[4ed803f1]87static void isoch_reset_no_timer(xhci_endpoint_t *ep)
88{
[3bacee1]89 xhci_isoch_t *const isoch = ep->isoch;
[4ed803f1]90 assert(fibril_mutex_is_locked(&isoch->guard));
91 /*
92 * As we cannot clear timer when we are triggered by it,
93 * we have to avoid doing it in common method.
94 */
95 fibril_timer_clear_locked(isoch->reset_timer);
96 isoch_reset(ep);
97}
98
[3bacee1]99static void isoch_reset_timer(void *ep)
100{
101 xhci_isoch_t *const isoch = xhci_endpoint_get(ep)->isoch;
[4ed803f1]102 fibril_mutex_lock(&isoch->guard);
103 isoch_reset(ep);
104 fibril_mutex_unlock(&isoch->guard);
105}
106
107/*
108 * Fast transfers could trigger the reset timer before the data is processed,
109 * leading into false reset.
110 */
111#define RESET_TIMER_DELAY 100000
[3bacee1]112static void timer_schedule_reset(xhci_endpoint_t *ep)
113{
114 xhci_isoch_t *const isoch = ep->isoch;
[bd41ac52]115 const usec_t delay = isoch->buffer_count * ep->interval * 125 +
[3bacee1]116 RESET_TIMER_DELAY;
[4ed803f1]117
118 fibril_timer_clear_locked(isoch->reset_timer);
119 fibril_timer_set_locked(isoch->reset_timer, delay,
[3bacee1]120 isoch_reset_timer, ep);
[4ed803f1]121}
122
[708d8fcd]123void isoch_fini(xhci_endpoint_t *ep)
124{
125 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
[3bacee1]126 xhci_isoch_t *const isoch = ep->isoch;
[708d8fcd]127
128 if (isoch->feeding_timer) {
129 fibril_timer_clear(isoch->feeding_timer);
130 fibril_timer_destroy(isoch->feeding_timer);
[4ed803f1]131 fibril_timer_clear(isoch->reset_timer);
132 fibril_timer_destroy(isoch->reset_timer);
[708d8fcd]133 }
134
[f92f6b1]135 if (isoch->transfers) {
136 for (size_t i = 0; i < isoch->buffer_count; ++i)
137 dma_buffer_free(&isoch->transfers[i].data);
138 free(isoch->transfers);
139 }
[708d8fcd]140}
141
142/**
143 * Allocate isochronous buffers. Create the feeding timer.
144 */
[3bacee1]145errno_t isoch_alloc_transfers(xhci_endpoint_t *ep)
146{
[708d8fcd]147 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
[3bacee1]148 xhci_isoch_t *const isoch = ep->isoch;
[708d8fcd]149
150 isoch->feeding_timer = fibril_timer_create(&isoch->guard);
[4ed803f1]151 isoch->reset_timer = fibril_timer_create(&isoch->guard);
[708d8fcd]152 if (!isoch->feeding_timer)
153 return ENOMEM;
154
[f92f6b1]155 isoch->transfers = calloc(isoch->buffer_count, sizeof(xhci_isoch_transfer_t));
[3bacee1]156 if (!isoch->transfers)
[f92f6b1]157 goto err;
[4ed803f1]158
[f92f6b1]159 for (size_t i = 0; i < isoch->buffer_count; ++i) {
[708d8fcd]160 xhci_isoch_transfer_t *transfer = &isoch->transfers[i];
[398a94c]161 if (dma_buffer_alloc(&transfer->data, ep->base.max_transfer_size)) {
[f92f6b1]162 goto err;
[708d8fcd]163 }
164 }
165
166 fibril_mutex_lock(&isoch->guard);
[4ed803f1]167 isoch_reset_no_timer(ep);
[708d8fcd]168 fibril_mutex_unlock(&isoch->guard);
169
170 return EOK;
[f92f6b1]171err:
172 isoch_fini(ep);
173 return ENOMEM;
[708d8fcd]174}
175
[45457265]176static errno_t schedule_isochronous_trb(xhci_endpoint_t *ep, xhci_isoch_transfer_t *it)
[708d8fcd]177{
178 xhci_trb_t trb;
179 xhci_trb_clean(&trb);
180
[1d758fc]181 trb.parameter = host2xhci(64, dma_buffer_phys_base(&it->data));
[708d8fcd]182 TRB_CTRL_SET_XFER_LEN(trb, it->size);
183 TRB_CTRL_SET_TD_SIZE(trb, 0);
184 TRB_CTRL_SET_IOC(trb, 1);
185 TRB_CTRL_SET_TRB_TYPE(trb, XHCI_TRB_TYPE_ISOCH);
186
187 // see 4.14.1 and 4.11.2.3 for the explanation, how to calculate those
188 size_t tdpc = it->size / 1024 + ((it->size % 1024) ? 1 : 0);
189 size_t tbc = tdpc / ep->max_burst;
[3bacee1]190 if (!tdpc % ep->max_burst)
191 --tbc;
[708d8fcd]192 size_t bsp = tdpc % ep->max_burst;
193 size_t tlbpc = (bsp ? bsp : ep->max_burst) - 1;
194
195 TRB_ISOCH_SET_TBC(trb, tbc);
196 TRB_ISOCH_SET_TLBPC(trb, tlbpc);
197 TRB_ISOCH_SET_FRAMEID(trb, (it->mfindex / 8) % 2048);
198
[45457265]199 const errno_t err = xhci_trb_ring_enqueue(&ep->ring, &trb, &it->interrupt_trb_phys);
[708d8fcd]200 return err;
201}
202
[94e9c29]203/** The number of bits the MFINDEX is stored in at HW */
204#define EPOCH_BITS 14
205/** The delay in usec for the epoch wrap */
206#define EPOCH_DELAY 500000
207/** The amount of microframes the epoch is checked for a delay */
208#define EPOCH_LOW_MFINDEX 8 * 100
209
210static inline uint64_t get_system_time()
211{
[bd41ac52]212 struct timespec ts;
213 getuptime(&ts);
214 return SEC2USEC(ts.tv_sec) + NSEC2USEC(ts.tv_nsec);
[94e9c29]215}
216
217static inline uint64_t get_current_microframe(const xhci_hc_t *hc)
218{
219 const uint32_t reg_mfindex = XHCI_REG_RD(hc->rt_regs, XHCI_RT_MFINDEX);
220 /*
[8033f89]221 * If the mfindex is low and the time passed since last mfindex wrap is too
222 * high, we have entered the new epoch already (and haven't received event
223 * yet).
[94e9c29]224 */
225 uint64_t epoch = hc->wrap_count;
[3bacee1]226 if (reg_mfindex < EPOCH_LOW_MFINDEX &&
227 get_system_time() - hc->wrap_time > EPOCH_DELAY) {
[94e9c29]228 ++epoch;
229 }
230 return (epoch << EPOCH_BITS) + reg_mfindex;
231}
232
[708d8fcd]233static inline void calc_next_mfindex(xhci_endpoint_t *ep, xhci_isoch_transfer_t *it)
234{
[3bacee1]235 xhci_isoch_t *const isoch = ep->isoch;
[94e9c29]236 if (isoch->last_mf == -1U) {
[708d8fcd]237 const xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
238 const xhci_hc_t *hc = bus->hc;
239
[8033f89]240 /*
241 * Delay the first frame by some time to fill the buffer, but at most 10
242 * miliseconds.
243 */
[94e9c29]244 const uint64_t delay = min(isoch->buffer_count * ep->interval, 10 * 8);
245 it->mfindex = get_current_microframe(hc) + 1 + delay + hc->ist;
[708d8fcd]246
247 // Align to ESIT start boundary
248 it->mfindex += ep->interval - 1;
249 it->mfindex &= ~(ep->interval - 1);
250 } else {
[94e9c29]251 it->mfindex = isoch->last_mf + ep->interval;
[708d8fcd]252 }
253}
254
255/** 825 ms in uframes */
256#define END_FRAME_DELAY (895000 / 125)
257
258typedef enum {
259 WINDOW_TOO_SOON,
260 WINDOW_INSIDE,
261 WINDOW_TOO_LATE,
262} window_position_t;
263
264typedef struct {
265 window_position_t position;
[94e9c29]266 uint64_t offset;
[708d8fcd]267} window_decision_t;
268
269/**
270 * Decide on the position of mfindex relatively to the window specified by
271 * Start Frame ID and End Frame ID. The resulting structure contains the
272 * decision, and in case of the mfindex being outside, also the number of
273 * uframes it's off.
274 */
[8033f89]275static inline void window_decide(window_decision_t *res, xhci_hc_t *hc,
[3bacee1]276 uint64_t mfindex)
[708d8fcd]277{
[94e9c29]278 const uint64_t current_mf = get_current_microframe(hc);
279 const uint64_t start = current_mf + hc->ist + 1;
280 const uint64_t end = current_mf + END_FRAME_DELAY;
[708d8fcd]281
[94e9c29]282 if (mfindex < start) {
[708d8fcd]283 res->position = WINDOW_TOO_LATE;
[94e9c29]284 res->offset = start - mfindex;
285 } else if (mfindex <= end) {
286 res->position = WINDOW_INSIDE;
[708d8fcd]287 } else {
288 res->position = WINDOW_TOO_SOON;
289 res->offset = mfindex - end;
290 }
291}
292
293static void isoch_feed_out_timer(void *);
294static void isoch_feed_in_timer(void *);
295
296/**
297 * Schedule TRBs with filled buffers to HW. Takes filled isoch transfers and
298 * pushes their TRBs to the ring.
299 *
300 * According to 4.11.2.5, we can't just push all TRBs we have. We must not do
301 * it too late, but also not too soon.
302 */
303static void isoch_feed_out(xhci_endpoint_t *ep)
304{
305 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
[3bacee1]306 xhci_isoch_t *const isoch = ep->isoch;
[708d8fcd]307 assert(fibril_mutex_is_locked(&isoch->guard));
308
309 xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
310 xhci_hc_t *hc = bus->hc;
311
312 bool fed = false;
313
[94e9c29]314 while (isoch->transfers[isoch->hw_enqueue].state == ISOCH_FILLED) {
[3bacee1]315 xhci_isoch_transfer_t *const it = &isoch->transfers[isoch->hw_enqueue];
[bd41ac52]316 usec_t delay;
[708d8fcd]317
318 assert(it->state == ISOCH_FILLED);
319
320 window_decision_t wd;
321 window_decide(&wd, hc, it->mfindex);
322
323 switch (wd.position) {
[338d54a7]324 case WINDOW_TOO_SOON:
325 delay = wd.offset * 125;
[bd41ac52]326 usb_log_debug("[isoch] delaying feeding buffer %zu for %lldus",
[3bacee1]327 it - isoch->transfers, delay);
[708d8fcd]328 fibril_timer_set_locked(isoch->feeding_timer, delay,
329 isoch_feed_out_timer, ep);
[398a94c]330 goto out;
[708d8fcd]331
332 case WINDOW_INSIDE:
[1938b381]333 usb_log_debug("[isoch] feeding buffer %zu at 0x%" PRIx64,
[708d8fcd]334 it - isoch->transfers, it->mfindex);
335 it->error = schedule_isochronous_trb(ep, it);
336 if (it->error) {
337 it->state = ISOCH_COMPLETE;
338 } else {
339 it->state = ISOCH_FED;
340 fed = true;
341 }
342
[f92f6b1]343 isoch->hw_enqueue = (isoch->hw_enqueue + 1) % isoch->buffer_count;
[708d8fcd]344 break;
345
346 case WINDOW_TOO_LATE:
[8033f89]347 /*
348 * Missed the opportunity to schedule. Just mark this transfer as
349 * skipped.
350 */
[1938b381]351 usb_log_debug("[isoch] missed feeding buffer %zu at 0x%" PRIx64 " by "
352 "%" PRIu64 " uframes", it - isoch->transfers, it->mfindex, wd.offset);
[708d8fcd]353 it->state = ISOCH_COMPLETE;
354 it->error = EOK;
355 it->size = 0;
356
[f92f6b1]357 isoch->hw_enqueue = (isoch->hw_enqueue + 1) % isoch->buffer_count;
[708d8fcd]358 break;
359 }
360 }
361
[4ed803f1]362out:
[708d8fcd]363 if (fed) {
[51c1d500]364 hc_ring_ep_doorbell(ep, 0);
[8033f89]365 /*
366 * The ring may be dead. If no event happens until the delay, reset the
367 * endpoint.
368 */
[4ed803f1]369 timer_schedule_reset(ep);
[708d8fcd]370 }
371
372}
373
374static void isoch_feed_out_timer(void *ep)
375{
[3bacee1]376 xhci_isoch_t *const isoch = xhci_endpoint_get(ep)->isoch;
[708d8fcd]377 fibril_mutex_lock(&isoch->guard);
378 isoch_feed_out(ep);
379 fibril_mutex_unlock(&isoch->guard);
380}
381
382/**
383 * Schedule TRBs with empty, withdrawn buffers to HW. Takes empty isoch
384 * transfers and pushes their TRBs to the ring.
385 *
386 * According to 4.11.2.5, we can't just push all TRBs we have. We must not do
387 * it too late, but also not too soon.
388 */
389static void isoch_feed_in(xhci_endpoint_t *ep)
390{
391 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
[3bacee1]392 xhci_isoch_t *const isoch = ep->isoch;
[708d8fcd]393 assert(fibril_mutex_is_locked(&isoch->guard));
394
395 xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
396 xhci_hc_t *hc = bus->hc;
397
398 bool fed = false;
399
400 while (isoch->transfers[isoch->enqueue].state <= ISOCH_FILLED) {
[3bacee1]401 xhci_isoch_transfer_t *const it = &isoch->transfers[isoch->enqueue];
[bd41ac52]402 usec_t delay;
[708d8fcd]403
404 /* IN buffers are "filled" with free space */
405 if (it->state == ISOCH_EMPTY) {
[398a94c]406 it->size = ep->base.max_transfer_size;
[708d8fcd]407 it->state = ISOCH_FILLED;
408 calc_next_mfindex(ep, it);
409 }
410
411 window_decision_t wd;
412 window_decide(&wd, hc, it->mfindex);
413
414 switch (wd.position) {
[338d54a7]415 case WINDOW_TOO_SOON:
[708d8fcd]416 /* Not allowed to feed yet. Defer to later. */
[338d54a7]417 delay = wd.offset * 125;
[bd41ac52]418 usb_log_debug("[isoch] delaying feeding buffer %zu for %lldus",
[708d8fcd]419 it - isoch->transfers, delay);
420 fibril_timer_set_locked(isoch->feeding_timer, delay,
421 isoch_feed_in_timer, ep);
[398a94c]422 goto out;
[708d8fcd]423 case WINDOW_TOO_LATE:
[1938b381]424 usb_log_debug("[isoch] missed feeding buffer %zu at 0x%" PRIx64 " by"
425 "%" PRIu64 " uframes", it - isoch->transfers, it->mfindex, wd.offset);
[708d8fcd]426 /* Missed the opportunity to schedule. Schedule ASAP. */
427 it->mfindex += wd.offset;
428 // Align to ESIT start boundary
429 it->mfindex += ep->interval - 1;
430 it->mfindex &= ~(ep->interval - 1);
431
432 /* fallthrough */
433 case WINDOW_INSIDE:
[f92f6b1]434 isoch->enqueue = (isoch->enqueue + 1) % isoch->buffer_count;
[94e9c29]435 isoch->last_mf = it->mfindex;
[708d8fcd]436
[1938b381]437 usb_log_debug("[isoch] feeding buffer %zu at 0x%" PRIx64,
[708d8fcd]438 it - isoch->transfers, it->mfindex);
439
440 it->error = schedule_isochronous_trb(ep, it);
441 if (it->error) {
442 it->state = ISOCH_COMPLETE;
443 } else {
444 it->state = ISOCH_FED;
445 fed = true;
446 }
447 break;
448 }
449 }
[398a94c]450out:
[708d8fcd]451
452 if (fed) {
[51c1d500]453 hc_ring_ep_doorbell(ep, 0);
[8033f89]454 /*
455 * The ring may be dead. If no event happens until the delay, reset the
456 * endpoint.
457 */
[4ed803f1]458 timer_schedule_reset(ep);
[708d8fcd]459 }
460}
461
462static void isoch_feed_in_timer(void *ep)
463{
[3bacee1]464 xhci_isoch_t *const isoch = xhci_endpoint_get(ep)->isoch;
[708d8fcd]465 fibril_mutex_lock(&isoch->guard);
466 isoch_feed_in(ep);
467 fibril_mutex_unlock(&isoch->guard);
468}
469
470/**
471 * First, withdraw all (at least one) results left by previous transfers to
472 * make room in the ring. Stop on first error.
473 *
474 * When there is at least one buffer free, fill it with data. Then try to feed
475 * it to the xHC.
476 */
[45457265]477errno_t isoch_schedule_out(xhci_transfer_t *transfer)
[708d8fcd]478{
[45457265]479 errno_t err = EOK;
[708d8fcd]480
481 xhci_endpoint_t *ep = xhci_endpoint_get(transfer->batch.ep);
482 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
[3bacee1]483 xhci_isoch_t *const isoch = ep->isoch;
[708d8fcd]484
[c21e6a5]485 /* This shall be already checked by endpoint */
[1d758fc]486 assert(transfer->batch.size <= ep->base.max_transfer_size);
[708d8fcd]487
488 fibril_mutex_lock(&isoch->guard);
489
490 /* Get the buffer to write to */
491 xhci_isoch_transfer_t *it = &isoch->transfers[isoch->enqueue];
492
493 /* Wait for the buffer to be completed */
494 while (it->state == ISOCH_FED || it->state == ISOCH_FILLED) {
495 fibril_condvar_wait(&isoch->avail, &isoch->guard);
496 /* The enqueue ptr may have changed while sleeping */
497 it = &isoch->transfers[isoch->enqueue];
498 }
499
[f92f6b1]500 isoch->enqueue = (isoch->enqueue + 1) % isoch->buffer_count;
[708d8fcd]501
502 /* Withdraw results from previous transfers. */
[db51a6a6]503 transfer->batch.transferred_size = 0;
[708d8fcd]504 xhci_isoch_transfer_t *res = &isoch->transfers[isoch->dequeue];
505 while (res->state == ISOCH_COMPLETE) {
[f92f6b1]506 isoch->dequeue = (isoch->dequeue + 1) % isoch->buffer_count;
[708d8fcd]507
508 res->state = ISOCH_EMPTY;
[db51a6a6]509 transfer->batch.transferred_size += res->size;
[708d8fcd]510 transfer->batch.error = res->error;
511 if (res->error)
512 break; // Announce one error at a time
513
514 res = &isoch->transfers[isoch->dequeue];
515 }
516
517 assert(it->state == ISOCH_EMPTY);
518
519 /* Calculate when to schedule next transfer */
520 calc_next_mfindex(ep, it);
[94e9c29]521 isoch->last_mf = it->mfindex;
[1938b381]522 usb_log_debug("[isoch] buffer %zu will be on schedule at 0x%" PRIx64,
[8033f89]523 it - isoch->transfers, it->mfindex);
[708d8fcd]524
525 /* Prepare the transfer. */
[1d758fc]526 it->size = transfer->batch.size;
[c21e6a5]527 memcpy(it->data.virt, transfer->batch.dma_buffer.virt, it->size);
[708d8fcd]528 it->state = ISOCH_FILLED;
529
530 fibril_timer_clear_locked(isoch->feeding_timer);
531 isoch_feed_out(ep);
532
533 fibril_mutex_unlock(&isoch->guard);
534
535 usb_transfer_batch_finish(&transfer->batch);
536 return err;
537}
538
539/**
540 * IN is in fact easier than OUT. Our responsibility is just to feed all empty
541 * buffers, and fetch one filled buffer from the ring.
542 */
[45457265]543errno_t isoch_schedule_in(xhci_transfer_t *transfer)
[708d8fcd]544{
545 xhci_endpoint_t *ep = xhci_endpoint_get(transfer->batch.ep);
546 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
[3bacee1]547 xhci_isoch_t *const isoch = ep->isoch;
[708d8fcd]548
[1d758fc]549 if (transfer->batch.size < ep->base.max_transfer_size) {
[708d8fcd]550 usb_log_error("Cannot schedule an undersized isochronous transfer.");
551 return ELIMIT;
552 }
553
554 fibril_mutex_lock(&isoch->guard);
555
556 xhci_isoch_transfer_t *it = &isoch->transfers[isoch->dequeue];
557
558 /* Wait for at least one transfer to complete. */
559 while (it->state != ISOCH_COMPLETE) {
560 /* First, make sure we will have something to read. */
561 fibril_timer_clear_locked(isoch->feeding_timer);
562 isoch_feed_in(ep);
563
[8033f89]564 usb_log_debug("[isoch] waiting for buffer %zu to be completed",
565 it - isoch->transfers);
[708d8fcd]566 fibril_condvar_wait(&isoch->avail, &isoch->guard);
567
568 /* The enqueue ptr may have changed while sleeping */
569 it = &isoch->transfers[isoch->dequeue];
570 }
571
[f92f6b1]572 isoch->dequeue = (isoch->dequeue + 1) % isoch->buffer_count;
[708d8fcd]573
574 /* Withdraw results from previous transfer. */
575 if (!it->error) {
[c21e6a5]576 memcpy(transfer->batch.dma_buffer.virt, it->data.virt, it->size);
[db51a6a6]577 transfer->batch.transferred_size = it->size;
[708d8fcd]578 transfer->batch.error = it->error;
579 }
580
581 /* Prepare the empty buffer */
582 it->state = ISOCH_EMPTY;
583
584 fibril_mutex_unlock(&isoch->guard);
585 usb_transfer_batch_finish(&transfer->batch);
586
587 return EOK;
588}
589
[8033f89]590void isoch_handle_transfer_event(xhci_hc_t *hc, xhci_endpoint_t *ep,
[3bacee1]591 xhci_trb_t *trb)
[708d8fcd]592{
593 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
[3bacee1]594 xhci_isoch_t *const isoch = ep->isoch;
[708d8fcd]595
596 fibril_mutex_lock(&ep->isoch->guard);
597
[45457265]598 errno_t err;
[708d8fcd]599 const xhci_trb_completion_code_t completion_code = TRB_COMPLETION_CODE(*trb);
600
601 switch (completion_code) {
[5ef3afd]602 case XHCI_TRBC_RING_OVERRUN:
603 case XHCI_TRBC_RING_UNDERRUN:
604 /*
605 * For OUT, there was nothing to process.
606 * For IN, the buffer has overfilled.
607 * In either case, reset the ring.
608 */
609 usb_log_warning("Ring over/underrun.");
610 isoch_reset_no_timer(ep);
611 fibril_condvar_broadcast(&ep->isoch->avail);
612 fibril_mutex_unlock(&ep->isoch->guard);
613 goto out;
614 case XHCI_TRBC_SHORT_PACKET:
615 case XHCI_TRBC_SUCCESS:
616 err = EOK;
617 break;
618 default:
619 usb_log_warning("Transfer not successfull: %u", completion_code);
620 err = EIO;
621 break;
[708d8fcd]622 }
623
624 /*
625 * The order of delivering events is not necessarily the one we would
[4ed803f1]626 * expect. It is safer to walk the list of our transfers and check
[708d8fcd]627 * which one it is.
[4ed803f1]628 * To minimize the amount of transfers checked, we start at dequeue pointer
629 * and exit the loop as soon as the transfer is found.
[708d8fcd]630 */
[4ed803f1]631 bool found_mine = false;
632 for (size_t i = 0, di = isoch->dequeue; i < isoch->buffer_count; ++i, ++di) {
633 /* Wrap it back to 0, don't use modulo every loop traversal */
634 if (di == isoch->buffer_count) {
635 di = 0;
636 }
[708d8fcd]637
[3bacee1]638 xhci_isoch_transfer_t *const it = &isoch->transfers[di];
[708d8fcd]639
[4ed803f1]640 if (it->state == ISOCH_FED && it->interrupt_trb_phys == trb->parameter) {
[defaab2]641 usb_log_debug("[isoch] buffer %zu completed", it - isoch->transfers);
[708d8fcd]642 it->state = ISOCH_COMPLETE;
643 it->size -= TRB_TRANSFER_LENGTH(*trb);
644 it->error = err;
645 found_mine = true;
646 break;
647 }
648 }
649
650 if (!found_mine) {
651 usb_log_warning("[isoch] A transfer event occured for unknown transfer.");
652 }
653
654 /*
655 * It may happen that the driver already stopped reading (writing),
656 * and our buffers are filled (empty). As QEMU (and possibly others)
[4ed803f1]657 * does not send RING_UNDERRUN (OVERRUN) event, we set a timer to
658 * reset it after the buffers should have been consumed. If there
659 * is no issue, the timer will get restarted often enough.
[708d8fcd]660 */
[4ed803f1]661 timer_schedule_reset(ep);
[708d8fcd]662
[1ed3eb4]663out:
[708d8fcd]664 fibril_condvar_broadcast(&ep->isoch->avail);
665 fibril_mutex_unlock(&ep->isoch->guard);
666}
667
668/**
669 * @}
670 */
Note: See TracBrowser for help on using the repository browser.