source: mainline/uspace/drv/bus/usb/xhci/isoch.c@ 58f4c0f

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 58f4c0f was 51c1d500, checked in by Ondřej Hlavatý <aearsis@…>, 8 years ago

xhci: move HC semantics from endpoint/device to hc module

  • Property mode set to 100644
File size: 18.9 KB
RevLine 
[708d8fcd]1/*
2 * Copyright (c) 2017 HelUSB3 team
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller endpoint management.
34 */
35
36#include <str_error.h>
[f92f6b1]37#include <macros.h>
[708d8fcd]38
39#include "endpoint.h"
40#include "hw_struct/trb.h"
41#include "hw_struct/regs.h"
42#include "trb_ring.h"
43#include "hc.h"
44#include "bus.h"
45
46#include "isoch.h"
47
48void isoch_init(xhci_endpoint_t *ep, const usb_endpoint_descriptors_t *desc)
49{
50 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
51 xhci_isoch_t * const isoch = ep->isoch;
52
53 fibril_mutex_initialize(&isoch->guard);
54 fibril_condvar_initialize(&isoch->avail);
55
[f92f6b1]56 const xhci_hc_t *hc = bus_to_xhci_bus(ep->base.device->bus)->hc;
57
58 /*
59 * We shall cover at least twice the IST period, otherwise we will get
60 * an over/underrun every time.
61 */
62 isoch->buffer_count = (2 * hc->ist) / ep->interval;
63
64 /* 2 buffers are the very minimum. */
65 isoch->buffer_count = max(2, isoch->buffer_count);
66
[1d218bf]67 usb_log_debug2("[isoch] isoch setup with %zu buffers", isoch->buffer_count);
[708d8fcd]68}
69
70static void isoch_reset(xhci_endpoint_t *ep)
71{
72 xhci_isoch_t * const isoch = ep->isoch;
73 assert(fibril_mutex_is_locked(&isoch->guard));
74
75 isoch->dequeue = isoch->enqueue = isoch->hw_enqueue = 0;
76
[f92f6b1]77 for (size_t i = 0; i < isoch->buffer_count; ++i) {
[708d8fcd]78 isoch->transfers[i].state = ISOCH_EMPTY;
79 }
80
81 fibril_timer_clear_locked(isoch->feeding_timer);
[94e9c29]82 isoch->last_mf = -1U;
[708d8fcd]83 usb_log_info("[isoch] Endpoint" XHCI_EP_FMT ": Data flow reset.", XHCI_EP_ARGS(*ep));
84}
85
[4ed803f1]86static void isoch_reset_no_timer(xhci_endpoint_t *ep)
87{
88 xhci_isoch_t * const isoch = ep->isoch;
89 assert(fibril_mutex_is_locked(&isoch->guard));
90 /*
91 * As we cannot clear timer when we are triggered by it,
92 * we have to avoid doing it in common method.
93 */
94 fibril_timer_clear_locked(isoch->reset_timer);
95 isoch_reset(ep);
96}
97
98static void isoch_reset_timer(void *ep) {
99 xhci_isoch_t * const isoch = xhci_endpoint_get(ep)->isoch;
100 fibril_mutex_lock(&isoch->guard);
101 isoch_reset(ep);
102 fibril_mutex_unlock(&isoch->guard);
103}
104
105/*
106 * Fast transfers could trigger the reset timer before the data is processed,
107 * leading into false reset.
108 */
109#define RESET_TIMER_DELAY 100000
110static void timer_schedule_reset(xhci_endpoint_t *ep) {
111 xhci_isoch_t * const isoch = ep->isoch;
112 const suseconds_t delay = isoch->buffer_count * ep->interval * 125 + RESET_TIMER_DELAY;
113
114 fibril_timer_clear_locked(isoch->reset_timer);
115 fibril_timer_set_locked(isoch->reset_timer, delay,
116 isoch_reset_timer, ep);
117}
118
[708d8fcd]119void isoch_fini(xhci_endpoint_t *ep)
120{
121 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
122 xhci_isoch_t * const isoch = ep->isoch;
123
124 if (isoch->feeding_timer) {
125 fibril_timer_clear(isoch->feeding_timer);
126 fibril_timer_destroy(isoch->feeding_timer);
[4ed803f1]127 fibril_timer_clear(isoch->reset_timer);
128 fibril_timer_destroy(isoch->reset_timer);
[708d8fcd]129 }
130
[f92f6b1]131 if (isoch->transfers) {
132 for (size_t i = 0; i < isoch->buffer_count; ++i)
133 dma_buffer_free(&isoch->transfers[i].data);
134 free(isoch->transfers);
135 }
[708d8fcd]136}
137
138/**
139 * Allocate isochronous buffers. Create the feeding timer.
140 */
141int isoch_alloc_transfers(xhci_endpoint_t *ep) {
142 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
143 xhci_isoch_t * const isoch = ep->isoch;
144
145 isoch->feeding_timer = fibril_timer_create(&isoch->guard);
[4ed803f1]146 isoch->reset_timer = fibril_timer_create(&isoch->guard);
[708d8fcd]147 if (!isoch->feeding_timer)
148 return ENOMEM;
149
[f92f6b1]150 isoch->transfers = calloc(isoch->buffer_count, sizeof(xhci_isoch_transfer_t));
151 if(!isoch->transfers)
152 goto err;
[4ed803f1]153
[f92f6b1]154 for (size_t i = 0; i < isoch->buffer_count; ++i) {
[708d8fcd]155 xhci_isoch_transfer_t *transfer = &isoch->transfers[i];
[398a94c]156 if (dma_buffer_alloc(&transfer->data, ep->base.max_transfer_size)) {
[f92f6b1]157 goto err;
[708d8fcd]158 }
159 }
160
161 fibril_mutex_lock(&isoch->guard);
[4ed803f1]162 isoch_reset_no_timer(ep);
[708d8fcd]163 fibril_mutex_unlock(&isoch->guard);
164
165 return EOK;
[f92f6b1]166err:
167 isoch_fini(ep);
168 return ENOMEM;
[708d8fcd]169}
170
171static int schedule_isochronous_trb(xhci_endpoint_t *ep, xhci_isoch_transfer_t *it)
172{
173 xhci_trb_t trb;
174 xhci_trb_clean(&trb);
175
176 trb.parameter = it->data.phys;
177 TRB_CTRL_SET_XFER_LEN(trb, it->size);
178 TRB_CTRL_SET_TD_SIZE(trb, 0);
179 TRB_CTRL_SET_IOC(trb, 1);
180 TRB_CTRL_SET_TRB_TYPE(trb, XHCI_TRB_TYPE_ISOCH);
181
182 // see 4.14.1 and 4.11.2.3 for the explanation, how to calculate those
183 size_t tdpc = it->size / 1024 + ((it->size % 1024) ? 1 : 0);
184 size_t tbc = tdpc / ep->max_burst;
185 if (!tdpc % ep->max_burst) --tbc;
186 size_t bsp = tdpc % ep->max_burst;
187 size_t tlbpc = (bsp ? bsp : ep->max_burst) - 1;
188
189 TRB_ISOCH_SET_TBC(trb, tbc);
190 TRB_ISOCH_SET_TLBPC(trb, tlbpc);
191 TRB_ISOCH_SET_FRAMEID(trb, (it->mfindex / 8) % 2048);
192
193 const int err = xhci_trb_ring_enqueue(&ep->ring, &trb, &it->interrupt_trb_phys);
194 return err;
195}
196
[94e9c29]197/** The number of bits the MFINDEX is stored in at HW */
198#define EPOCH_BITS 14
199/** The delay in usec for the epoch wrap */
200#define EPOCH_DELAY 500000
201/** The amount of microframes the epoch is checked for a delay */
202#define EPOCH_LOW_MFINDEX 8 * 100
203
204static inline uint64_t get_system_time()
205{
206 struct timeval tv;
207 getuptime(&tv);
208 return ((uint64_t) tv.tv_sec) * 1000000 + ((uint64_t) tv.tv_usec);
209}
210
211static inline uint64_t get_current_microframe(const xhci_hc_t *hc)
212{
213 const uint32_t reg_mfindex = XHCI_REG_RD(hc->rt_regs, XHCI_RT_MFINDEX);
214 /*
215 * If the mfindex is low and the time passed since last mfindex wrap
216 * is too high, we have entered the new epoch already (and haven't received event yet).
217 */
218 uint64_t epoch = hc->wrap_count;
219 if (reg_mfindex < EPOCH_LOW_MFINDEX && get_system_time() - hc->wrap_time > EPOCH_DELAY) {
220 ++epoch;
221 }
222 return (epoch << EPOCH_BITS) + reg_mfindex;
223}
224
[708d8fcd]225static inline void calc_next_mfindex(xhci_endpoint_t *ep, xhci_isoch_transfer_t *it)
226{
227 xhci_isoch_t * const isoch = ep->isoch;
[94e9c29]228 if (isoch->last_mf == -1U) {
[708d8fcd]229 const xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
230 const xhci_hc_t *hc = bus->hc;
231
[94e9c29]232 /* Delay the first frame by some time to fill the buffer, but at most 10 miliseconds. */
233 const uint64_t delay = min(isoch->buffer_count * ep->interval, 10 * 8);
234 it->mfindex = get_current_microframe(hc) + 1 + delay + hc->ist;
[708d8fcd]235
236 // Align to ESIT start boundary
237 it->mfindex += ep->interval - 1;
238 it->mfindex &= ~(ep->interval - 1);
239 } else {
[94e9c29]240 it->mfindex = isoch->last_mf + ep->interval;
[708d8fcd]241 }
242}
243
244/** 825 ms in uframes */
245#define END_FRAME_DELAY (895000 / 125)
246
247typedef enum {
248 WINDOW_TOO_SOON,
249 WINDOW_INSIDE,
250 WINDOW_TOO_LATE,
251} window_position_t;
252
253typedef struct {
254 window_position_t position;
[94e9c29]255 uint64_t offset;
[708d8fcd]256} window_decision_t;
257
258/**
259 * Decide on the position of mfindex relatively to the window specified by
260 * Start Frame ID and End Frame ID. The resulting structure contains the
261 * decision, and in case of the mfindex being outside, also the number of
262 * uframes it's off.
263 */
[94e9c29]264static inline void window_decide(window_decision_t *res, xhci_hc_t *hc, uint64_t mfindex)
[708d8fcd]265{
[94e9c29]266 const uint64_t current_mf = get_current_microframe(hc);
267 const uint64_t start = current_mf + hc->ist + 1;
268 const uint64_t end = current_mf + END_FRAME_DELAY;
[708d8fcd]269
[94e9c29]270 if (mfindex < start) {
[708d8fcd]271 res->position = WINDOW_TOO_LATE;
[94e9c29]272 res->offset = start - mfindex;
273 } else if (mfindex <= end) {
274 res->position = WINDOW_INSIDE;
[708d8fcd]275 } else {
276 res->position = WINDOW_TOO_SOON;
277 res->offset = mfindex - end;
278 }
279}
280
281static void isoch_feed_out_timer(void *);
282static void isoch_feed_in_timer(void *);
283
284/**
285 * Schedule TRBs with filled buffers to HW. Takes filled isoch transfers and
286 * pushes their TRBs to the ring.
287 *
288 * According to 4.11.2.5, we can't just push all TRBs we have. We must not do
289 * it too late, but also not too soon.
290 */
291static void isoch_feed_out(xhci_endpoint_t *ep)
292{
293 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
294 xhci_isoch_t * const isoch = ep->isoch;
295 assert(fibril_mutex_is_locked(&isoch->guard));
296
297 xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
298 xhci_hc_t *hc = bus->hc;
299
300 bool fed = false;
301
[94e9c29]302 while (isoch->transfers[isoch->hw_enqueue].state == ISOCH_FILLED) {
[708d8fcd]303 xhci_isoch_transfer_t * const it = &isoch->transfers[isoch->hw_enqueue];
304
305 assert(it->state == ISOCH_FILLED);
306
307 window_decision_t wd;
308 window_decide(&wd, hc, it->mfindex);
309
310 switch (wd.position) {
311 case WINDOW_TOO_SOON: {
312 const suseconds_t delay = wd.offset * 125;
313 usb_log_debug2("[isoch] delaying feeding buffer %lu for %ldus",
314 it - isoch->transfers, delay);
315 fibril_timer_set_locked(isoch->feeding_timer, delay,
316 isoch_feed_out_timer, ep);
[398a94c]317 goto out;
[708d8fcd]318 }
319
320 case WINDOW_INSIDE:
[94e9c29]321 usb_log_debug2("[isoch] feeding buffer %lu at 0x%llx",
[708d8fcd]322 it - isoch->transfers, it->mfindex);
323 it->error = schedule_isochronous_trb(ep, it);
324 if (it->error) {
325 it->state = ISOCH_COMPLETE;
326 } else {
327 it->state = ISOCH_FED;
328 fed = true;
329 }
330
[f92f6b1]331 isoch->hw_enqueue = (isoch->hw_enqueue + 1) % isoch->buffer_count;
[708d8fcd]332 break;
333
334 case WINDOW_TOO_LATE:
335 /* Missed the opportunity to schedule. Just mark this transfer as skipped. */
[94e9c29]336 usb_log_debug2("[isoch] missed feeding buffer %lu at 0x%llx by %llu uframes",
[708d8fcd]337 it - isoch->transfers, it->mfindex, wd.offset);
338 it->state = ISOCH_COMPLETE;
339 it->error = EOK;
340 it->size = 0;
341
[f92f6b1]342 isoch->hw_enqueue = (isoch->hw_enqueue + 1) % isoch->buffer_count;
[708d8fcd]343 break;
344 }
345 }
346
[4ed803f1]347out:
[708d8fcd]348 if (fed) {
[51c1d500]349 hc_ring_ep_doorbell(ep, 0);
[4ed803f1]350 /* The ring may be dead. If no event happens until the delay, reset the endpoint. */
351 timer_schedule_reset(ep);
[708d8fcd]352 }
353
354}
355
356static void isoch_feed_out_timer(void *ep)
357{
358 xhci_isoch_t * const isoch = xhci_endpoint_get(ep)->isoch;
359 fibril_mutex_lock(&isoch->guard);
360 isoch_feed_out(ep);
361 fibril_mutex_unlock(&isoch->guard);
362}
363
364/**
365 * Schedule TRBs with empty, withdrawn buffers to HW. Takes empty isoch
366 * transfers and pushes their TRBs to the ring.
367 *
368 * According to 4.11.2.5, we can't just push all TRBs we have. We must not do
369 * it too late, but also not too soon.
370 */
371static void isoch_feed_in(xhci_endpoint_t *ep)
372{
373 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
374 xhci_isoch_t * const isoch = ep->isoch;
375 assert(fibril_mutex_is_locked(&isoch->guard));
376
377 xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
378 xhci_hc_t *hc = bus->hc;
379
380 bool fed = false;
381
382 while (isoch->transfers[isoch->enqueue].state <= ISOCH_FILLED) {
383 xhci_isoch_transfer_t * const it = &isoch->transfers[isoch->enqueue];
384
385 /* IN buffers are "filled" with free space */
386 if (it->state == ISOCH_EMPTY) {
[398a94c]387 it->size = ep->base.max_transfer_size;
[708d8fcd]388 it->state = ISOCH_FILLED;
389 calc_next_mfindex(ep, it);
390 }
391
392 window_decision_t wd;
393 window_decide(&wd, hc, it->mfindex);
394
395 switch (wd.position) {
396 case WINDOW_TOO_SOON: {
397 /* Not allowed to feed yet. Defer to later. */
398 const suseconds_t delay = wd.offset * 125;
399 usb_log_debug2("[isoch] delaying feeding buffer %lu for %ldus",
400 it - isoch->transfers, delay);
401 fibril_timer_set_locked(isoch->feeding_timer, delay,
402 isoch_feed_in_timer, ep);
[398a94c]403 goto out;
[708d8fcd]404 }
405
406 case WINDOW_TOO_LATE:
[94e9c29]407 usb_log_debug2("[isoch] missed feeding buffer %lu at 0x%llx by %llu uframes",
[708d8fcd]408 it - isoch->transfers, it->mfindex, wd.offset);
409 /* Missed the opportunity to schedule. Schedule ASAP. */
410 it->mfindex += wd.offset;
411 // Align to ESIT start boundary
412 it->mfindex += ep->interval - 1;
413 it->mfindex &= ~(ep->interval - 1);
414
415 /* fallthrough */
416 case WINDOW_INSIDE:
[f92f6b1]417 isoch->enqueue = (isoch->enqueue + 1) % isoch->buffer_count;
[94e9c29]418 isoch->last_mf = it->mfindex;
[708d8fcd]419
[94e9c29]420 usb_log_debug2("[isoch] feeding buffer %lu at 0x%llx",
[708d8fcd]421 it - isoch->transfers, it->mfindex);
422
423 it->error = schedule_isochronous_trb(ep, it);
424 if (it->error) {
425 it->state = ISOCH_COMPLETE;
426 } else {
427 it->state = ISOCH_FED;
428 fed = true;
429 }
430 break;
431 }
432 }
[398a94c]433out:
[708d8fcd]434
435 if (fed) {
[51c1d500]436 hc_ring_ep_doorbell(ep, 0);
[4ed803f1]437 /* The ring may be dead. If no event happens until the delay, reset the endpoint. */
438 timer_schedule_reset(ep);
[708d8fcd]439 }
440}
441
442static void isoch_feed_in_timer(void *ep)
443{
444 xhci_isoch_t * const isoch = xhci_endpoint_get(ep)->isoch;
445 fibril_mutex_lock(&isoch->guard);
446 isoch_feed_in(ep);
447 fibril_mutex_unlock(&isoch->guard);
448}
449
450/**
451 * First, withdraw all (at least one) results left by previous transfers to
452 * make room in the ring. Stop on first error.
453 *
454 * When there is at least one buffer free, fill it with data. Then try to feed
455 * it to the xHC.
456 */
457int isoch_schedule_out(xhci_transfer_t *transfer)
458{
459 int err = EOK;
460
461 xhci_endpoint_t *ep = xhci_endpoint_get(transfer->batch.ep);
462 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
463 xhci_isoch_t * const isoch = ep->isoch;
464
[398a94c]465 if (transfer->batch.buffer_size > ep->base.max_transfer_size) {
[708d8fcd]466 usb_log_error("Cannot schedule an oversized isochronous transfer.");
467 return ELIMIT;
468 }
469
470 fibril_mutex_lock(&isoch->guard);
471
472 /* Get the buffer to write to */
473 xhci_isoch_transfer_t *it = &isoch->transfers[isoch->enqueue];
474
475 /* Wait for the buffer to be completed */
476 while (it->state == ISOCH_FED || it->state == ISOCH_FILLED) {
477 fibril_condvar_wait(&isoch->avail, &isoch->guard);
478 /* The enqueue ptr may have changed while sleeping */
479 it = &isoch->transfers[isoch->enqueue];
480 }
481
[f92f6b1]482 isoch->enqueue = (isoch->enqueue + 1) % isoch->buffer_count;
[708d8fcd]483
484 /* Withdraw results from previous transfers. */
485 transfer->batch.transfered_size = 0;
486 xhci_isoch_transfer_t *res = &isoch->transfers[isoch->dequeue];
487 while (res->state == ISOCH_COMPLETE) {
[f92f6b1]488 isoch->dequeue = (isoch->dequeue + 1) % isoch->buffer_count;
[708d8fcd]489
490 res->state = ISOCH_EMPTY;
491 transfer->batch.transfered_size += res->size;
492 transfer->batch.error = res->error;
493 if (res->error)
494 break; // Announce one error at a time
495
496 res = &isoch->transfers[isoch->dequeue];
497 }
498
499 assert(it->state == ISOCH_EMPTY);
500
501 /* Calculate when to schedule next transfer */
502 calc_next_mfindex(ep, it);
[94e9c29]503 isoch->last_mf = it->mfindex;
504 usb_log_debug2("[isoch] buffer %zu will be on schedule at 0x%llx", it - isoch->transfers, it->mfindex);
[708d8fcd]505
506 /* Prepare the transfer. */
507 it->size = transfer->batch.buffer_size;
508 memcpy(it->data.virt, transfer->batch.buffer, it->size);
509 it->state = ISOCH_FILLED;
510
511 fibril_timer_clear_locked(isoch->feeding_timer);
512 isoch_feed_out(ep);
513
514 fibril_mutex_unlock(&isoch->guard);
515
516 usb_transfer_batch_finish(&transfer->batch);
517 return err;
518}
519
520/**
521 * IN is in fact easier than OUT. Our responsibility is just to feed all empty
522 * buffers, and fetch one filled buffer from the ring.
523 */
524int isoch_schedule_in(xhci_transfer_t *transfer)
525{
526 xhci_endpoint_t *ep = xhci_endpoint_get(transfer->batch.ep);
527 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
528 xhci_isoch_t * const isoch = ep->isoch;
529
[398a94c]530 if (transfer->batch.buffer_size < ep->base.max_transfer_size) {
[708d8fcd]531 usb_log_error("Cannot schedule an undersized isochronous transfer.");
532 return ELIMIT;
533 }
534
535 fibril_mutex_lock(&isoch->guard);
536
537 xhci_isoch_transfer_t *it = &isoch->transfers[isoch->dequeue];
538
539 /* Wait for at least one transfer to complete. */
540 while (it->state != ISOCH_COMPLETE) {
541 /* First, make sure we will have something to read. */
542 fibril_timer_clear_locked(isoch->feeding_timer);
543 isoch_feed_in(ep);
544
545 usb_log_debug2("[isoch] waiting for buffer %zu to be completed", it - isoch->transfers);
546 fibril_condvar_wait(&isoch->avail, &isoch->guard);
547
548 /* The enqueue ptr may have changed while sleeping */
549 it = &isoch->transfers[isoch->dequeue];
550 }
551
[f92f6b1]552 isoch->dequeue = (isoch->dequeue + 1) % isoch->buffer_count;
[708d8fcd]553
554 /* Withdraw results from previous transfer. */
555 if (!it->error) {
556 memcpy(transfer->batch.buffer, it->data.virt, it->size);
557 transfer->batch.transfered_size = it->size;
558 transfer->batch.error = it->error;
559 }
560
561 /* Prepare the empty buffer */
562 it->state = ISOCH_EMPTY;
563
564 fibril_mutex_unlock(&isoch->guard);
565 usb_transfer_batch_finish(&transfer->batch);
566
567 return EOK;
568}
569
[1ed3eb4]570void isoch_handle_transfer_event(xhci_hc_t *hc, xhci_endpoint_t *ep, xhci_trb_t *trb)
[708d8fcd]571{
572 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
573 xhci_isoch_t * const isoch = ep->isoch;
574
575 fibril_mutex_lock(&ep->isoch->guard);
576
577 int err;
578 const xhci_trb_completion_code_t completion_code = TRB_COMPLETION_CODE(*trb);
579
580 switch (completion_code) {
581 case XHCI_TRBC_RING_OVERRUN:
582 case XHCI_TRBC_RING_UNDERRUN:
583 /* For OUT, there was nothing to process */
584 /* For IN, the buffer has overfilled, we empty the buffers and readd TRBs */
585 usb_log_warning("Ring over/underrun.");
[4ed803f1]586 isoch_reset_no_timer(ep);
[708d8fcd]587 fibril_condvar_broadcast(&ep->isoch->avail);
588 fibril_mutex_unlock(&ep->isoch->guard);
[1ed3eb4]589 goto out;
[708d8fcd]590 case XHCI_TRBC_SHORT_PACKET:
591 case XHCI_TRBC_SUCCESS:
592 err = EOK;
593 break;
594 default:
595 usb_log_warning("Transfer not successfull: %u", completion_code);
596 err = EIO;
597 break;
598 }
599
600 /*
601 * The order of delivering events is not necessarily the one we would
[4ed803f1]602 * expect. It is safer to walk the list of our transfers and check
[708d8fcd]603 * which one it is.
[4ed803f1]604 * To minimize the amount of transfers checked, we start at dequeue pointer
605 * and exit the loop as soon as the transfer is found.
[708d8fcd]606 */
[4ed803f1]607 bool found_mine = false;
608 for (size_t i = 0, di = isoch->dequeue; i < isoch->buffer_count; ++i, ++di) {
609 /* Wrap it back to 0, don't use modulo every loop traversal */
610 if (di == isoch->buffer_count) {
611 di = 0;
612 }
[708d8fcd]613
[4ed803f1]614 xhci_isoch_transfer_t * const it = &isoch->transfers[di];
[708d8fcd]615
[4ed803f1]616 if (it->state == ISOCH_FED && it->interrupt_trb_phys == trb->parameter) {
[708d8fcd]617 usb_log_debug2("[isoch] buffer %zu completed", it - isoch->transfers);
618 it->state = ISOCH_COMPLETE;
619 it->size -= TRB_TRANSFER_LENGTH(*trb);
620 it->error = err;
621 found_mine = true;
622 break;
623 }
624 }
625
626 if (!found_mine) {
627 usb_log_warning("[isoch] A transfer event occured for unknown transfer.");
628 }
629
630 /*
631 * It may happen that the driver already stopped reading (writing),
632 * and our buffers are filled (empty). As QEMU (and possibly others)
[4ed803f1]633 * does not send RING_UNDERRUN (OVERRUN) event, we set a timer to
634 * reset it after the buffers should have been consumed. If there
635 * is no issue, the timer will get restarted often enough.
[708d8fcd]636 */
[4ed803f1]637 timer_schedule_reset(ep);
[708d8fcd]638
[1ed3eb4]639out:
[708d8fcd]640 fibril_condvar_broadcast(&ep->isoch->avail);
641 fibril_mutex_unlock(&ep->isoch->guard);
642}
643
644/**
645 * @}
646 */
Note: See TracBrowser for help on using the repository browser.