source: mainline/uspace/drv/bus/usb/xhci/isoch.c@ f92f6b1

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since f92f6b1 was f92f6b1, checked in by Ondřej Hlavatý <aearsis@…>, 8 years ago

xhci isoch: made buffer count variable

  • Property mode set to 100644
File size: 17.9 KB
Line 
1/*
2 * Copyright (c) 2017 HelUSB3 team
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller endpoint management.
34 */
35
36#include <str_error.h>
37#include <macros.h>
38
39#include "endpoint.h"
40#include "hw_struct/trb.h"
41#include "hw_struct/regs.h"
42#include "trb_ring.h"
43#include "hc.h"
44#include "bus.h"
45
46#include "isoch.h"
47
48void isoch_init(xhci_endpoint_t *ep, const usb_endpoint_descriptors_t *desc)
49{
50 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
51 xhci_isoch_t * const isoch = ep->isoch;
52
53 fibril_mutex_initialize(&isoch->guard);
54 fibril_condvar_initialize(&isoch->avail);
55
56 isoch->max_size = desc->companion.bytes_per_interval
57 ? desc->companion.bytes_per_interval
58 : ep->base.max_transfer_size;
59
60 const xhci_hc_t *hc = bus_to_xhci_bus(ep->base.device->bus)->hc;
61
62 /*
63 * We shall cover at least twice the IST period, otherwise we will get
64 * an over/underrun every time.
65 */
66 isoch->buffer_count = (2 * hc->ist) / ep->interval;
67
68 /* 2 buffers are the very minimum. */
69 isoch->buffer_count = max(2, isoch->buffer_count);
70
71 usb_log_error("[isoch] isoch setup with %zu buffers", isoch->buffer_count);
72}
73
74static void isoch_reset(xhci_endpoint_t *ep)
75{
76 xhci_isoch_t * const isoch = ep->isoch;
77 assert(fibril_mutex_is_locked(&isoch->guard));
78
79 isoch->dequeue = isoch->enqueue = isoch->hw_enqueue = 0;
80
81 for (size_t i = 0; i < isoch->buffer_count; ++i) {
82 isoch->transfers[i].state = ISOCH_EMPTY;
83 }
84
85 fibril_timer_clear_locked(isoch->feeding_timer);
86 isoch->last_mfindex = -1U;
87 usb_log_info("[isoch] Endpoint" XHCI_EP_FMT ": Data flow reset.", XHCI_EP_ARGS(*ep));
88}
89
90void isoch_fini(xhci_endpoint_t *ep)
91{
92 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
93 xhci_isoch_t * const isoch = ep->isoch;
94
95 if (isoch->feeding_timer) {
96 fibril_timer_clear(isoch->feeding_timer);
97 fibril_timer_destroy(isoch->feeding_timer);
98 }
99
100 if (isoch->transfers) {
101 for (size_t i = 0; i < isoch->buffer_count; ++i)
102 dma_buffer_free(&isoch->transfers[i].data);
103 free(isoch->transfers);
104 }
105}
106
107/**
108 * Allocate isochronous buffers. Create the feeding timer.
109 */
110int isoch_alloc_transfers(xhci_endpoint_t *ep) {
111 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
112 xhci_isoch_t * const isoch = ep->isoch;
113
114 isoch->feeding_timer = fibril_timer_create(&isoch->guard);
115 if (!isoch->feeding_timer)
116 return ENOMEM;
117
118 isoch->transfers = calloc(isoch->buffer_count, sizeof(xhci_isoch_transfer_t));
119 if(!isoch->transfers)
120 goto err;
121
122 for (size_t i = 0; i < isoch->buffer_count; ++i) {
123 xhci_isoch_transfer_t *transfer = &isoch->transfers[i];
124 if (dma_buffer_alloc(&transfer->data, isoch->max_size)) {
125 goto err;
126 }
127 }
128
129 fibril_mutex_lock(&isoch->guard);
130 isoch_reset(ep);
131 fibril_mutex_unlock(&isoch->guard);
132
133 return EOK;
134err:
135 isoch_fini(ep);
136 return ENOMEM;
137}
138
139static int schedule_isochronous_trb(xhci_endpoint_t *ep, xhci_isoch_transfer_t *it)
140{
141 xhci_trb_t trb;
142 xhci_trb_clean(&trb);
143
144 trb.parameter = it->data.phys;
145 TRB_CTRL_SET_XFER_LEN(trb, it->size);
146 TRB_CTRL_SET_TD_SIZE(trb, 0);
147 TRB_CTRL_SET_IOC(trb, 1);
148 TRB_CTRL_SET_TRB_TYPE(trb, XHCI_TRB_TYPE_ISOCH);
149
150 // see 4.14.1 and 4.11.2.3 for the explanation, how to calculate those
151 size_t tdpc = it->size / 1024 + ((it->size % 1024) ? 1 : 0);
152 size_t tbc = tdpc / ep->max_burst;
153 if (!tdpc % ep->max_burst) --tbc;
154 size_t bsp = tdpc % ep->max_burst;
155 size_t tlbpc = (bsp ? bsp : ep->max_burst) - 1;
156
157 TRB_ISOCH_SET_TBC(trb, tbc);
158 TRB_ISOCH_SET_TLBPC(trb, tlbpc);
159 TRB_ISOCH_SET_FRAMEID(trb, (it->mfindex / 8) % 2048);
160
161 const int err = xhci_trb_ring_enqueue(&ep->ring, &trb, &it->interrupt_trb_phys);
162 return err;
163}
164
165static inline void calc_next_mfindex(xhci_endpoint_t *ep, xhci_isoch_transfer_t *it)
166{
167 xhci_isoch_t * const isoch = ep->isoch;
168 if (isoch->last_mfindex == -1U) {
169 const xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
170 const xhci_hc_t *hc = bus->hc;
171
172 /* Choose some number, give us a little time to prepare the
173 * buffers */
174 it->mfindex = XHCI_REG_RD(hc->rt_regs, XHCI_RT_MFINDEX) + 1
175 + isoch->buffer_count * ep->interval
176 + hc->ist;
177
178 // Align to ESIT start boundary
179 it->mfindex += ep->interval - 1;
180 it->mfindex &= ~(ep->interval - 1);
181 } else {
182 it->mfindex = (isoch->last_mfindex + ep->interval) % XHCI_MFINDEX_MAX;
183 }
184}
185
186/** 825 ms in uframes */
187#define END_FRAME_DELAY (895000 / 125)
188
189typedef enum {
190 WINDOW_TOO_SOON,
191 WINDOW_INSIDE,
192 WINDOW_TOO_LATE,
193} window_position_t;
194
195typedef struct {
196 window_position_t position;
197 uint32_t offset;
198} window_decision_t;
199
200/**
201 * Decide on the position of mfindex relatively to the window specified by
202 * Start Frame ID and End Frame ID. The resulting structure contains the
203 * decision, and in case of the mfindex being outside, also the number of
204 * uframes it's off.
205 */
206static inline void window_decide(window_decision_t *res, xhci_hc_t *hc, uint32_t mfindex)
207{
208 uint32_t current_mfindex = XHCI_REG_RD(hc->rt_regs, XHCI_RT_MFINDEX) + 1;
209
210 /*
211 * In your mind, rotate the clock so the window is at its beginning.
212 * The length of the window is always the same, and by rotating the
213 * mfindex too, we can decide by the value of it easily.
214 */
215 mfindex = (mfindex - current_mfindex - hc->ist + XHCI_MFINDEX_MAX) % XHCI_MFINDEX_MAX;
216 const uint32_t end = END_FRAME_DELAY - hc->ist;
217 const uint32_t threshold = (XHCI_MFINDEX_MAX + end) / 2;
218
219 if (mfindex <= end) {
220 res->position = WINDOW_INSIDE;
221 } else if (mfindex > threshold) {
222 res->position = WINDOW_TOO_LATE;
223 res->offset = XHCI_MFINDEX_MAX - mfindex;
224 } else {
225 res->position = WINDOW_TOO_SOON;
226 res->offset = mfindex - end;
227 }
228 /*
229 * TODO: The "size" of the clock is too low. We have to scale it a bit
230 * to ensure correct scheduling of transfers, that are
231 * buffer_count * interval away from now.
232 * Maximum interval is 8 seconds, which means we need a size of
233 * 16 seconds. The size of MFIINDEX is 2 seconds only.
234 *
235 * A plan is to create a thin abstraction at HC, which would return
236 * a time from 32-bit clock, having its high bits updated by the
237 * MFINDEX Wrap Event, and low bits from the MFINDEX register. Using
238 * this 32-bit clock, one can plan 6 days ahead.
239 */
240}
241
242static void isoch_feed_out_timer(void *);
243static void isoch_feed_in_timer(void *);
244
245/**
246 * Schedule TRBs with filled buffers to HW. Takes filled isoch transfers and
247 * pushes their TRBs to the ring.
248 *
249 * According to 4.11.2.5, we can't just push all TRBs we have. We must not do
250 * it too late, but also not too soon.
251 */
252static void isoch_feed_out(xhci_endpoint_t *ep)
253{
254 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
255 xhci_isoch_t * const isoch = ep->isoch;
256 assert(fibril_mutex_is_locked(&isoch->guard));
257
258 xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
259 xhci_hc_t *hc = bus->hc;
260
261 bool fed = false;
262
263 while (isoch->hw_enqueue != isoch->enqueue) {
264 xhci_isoch_transfer_t * const it = &isoch->transfers[isoch->hw_enqueue];
265
266 assert(it->state == ISOCH_FILLED);
267
268 window_decision_t wd;
269 window_decide(&wd, hc, it->mfindex);
270
271 switch (wd.position) {
272 case WINDOW_TOO_SOON: {
273 const suseconds_t delay = wd.offset * 125;
274 usb_log_debug2("[isoch] delaying feeding buffer %lu for %ldus",
275 it - isoch->transfers, delay);
276 fibril_timer_set_locked(isoch->feeding_timer, delay,
277 isoch_feed_out_timer, ep);
278 break;
279 }
280
281 case WINDOW_INSIDE:
282 usb_log_debug2("[isoch] feeding buffer %lu at 0x%x",
283 it - isoch->transfers, it->mfindex);
284 it->error = schedule_isochronous_trb(ep, it);
285 if (it->error) {
286 it->state = ISOCH_COMPLETE;
287 } else {
288 it->state = ISOCH_FED;
289 fed = true;
290 }
291
292 isoch->hw_enqueue = (isoch->hw_enqueue + 1) % isoch->buffer_count;
293 break;
294
295 case WINDOW_TOO_LATE:
296 /* Missed the opportunity to schedule. Just mark this transfer as skipped. */
297 usb_log_debug2("[isoch] missed feeding buffer %lu at 0x%x by %u uframes",
298 it - isoch->transfers, it->mfindex, wd.offset);
299 it->state = ISOCH_COMPLETE;
300 it->error = EOK;
301 it->size = 0;
302
303 isoch->hw_enqueue = (isoch->hw_enqueue + 1) % isoch->buffer_count;
304 break;
305 }
306 }
307
308 if (fed) {
309 const uint8_t slot_id = xhci_device_get(ep->base.device)->slot_id;
310 const uint8_t target = xhci_endpoint_index(ep) + 1; /* EP Doorbells start at 1 */
311 hc_ring_doorbell(hc, slot_id, target);
312 }
313
314}
315
316static void isoch_feed_out_timer(void *ep)
317{
318 xhci_isoch_t * const isoch = xhci_endpoint_get(ep)->isoch;
319 fibril_mutex_lock(&isoch->guard);
320 isoch_feed_out(ep);
321 fibril_mutex_unlock(&isoch->guard);
322}
323
324/**
325 * Schedule TRBs with empty, withdrawn buffers to HW. Takes empty isoch
326 * transfers and pushes their TRBs to the ring.
327 *
328 * According to 4.11.2.5, we can't just push all TRBs we have. We must not do
329 * it too late, but also not too soon.
330 */
331static void isoch_feed_in(xhci_endpoint_t *ep)
332{
333 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
334 xhci_isoch_t * const isoch = ep->isoch;
335 assert(fibril_mutex_is_locked(&isoch->guard));
336
337 xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
338 xhci_hc_t *hc = bus->hc;
339
340 bool fed = false;
341
342 while (isoch->transfers[isoch->enqueue].state <= ISOCH_FILLED) {
343 xhci_isoch_transfer_t * const it = &isoch->transfers[isoch->enqueue];
344
345 /* IN buffers are "filled" with free space */
346 if (it->state == ISOCH_EMPTY) {
347 it->size = isoch->max_size;
348 it->state = ISOCH_FILLED;
349 calc_next_mfindex(ep, it);
350 }
351
352 window_decision_t wd;
353 window_decide(&wd, hc, it->mfindex);
354
355 switch (wd.position) {
356 case WINDOW_TOO_SOON: {
357 /* Not allowed to feed yet. Defer to later. */
358 const suseconds_t delay = wd.offset * 125;
359 usb_log_debug2("[isoch] delaying feeding buffer %lu for %ldus",
360 it - isoch->transfers, delay);
361 fibril_timer_set_locked(isoch->feeding_timer, delay,
362 isoch_feed_in_timer, ep);
363 break;
364 }
365
366 case WINDOW_TOO_LATE:
367 usb_log_debug2("[isoch] missed feeding buffer %lu at 0x%x by %u uframes",
368 it - isoch->transfers, it->mfindex, wd.offset);
369 /* Missed the opportunity to schedule. Schedule ASAP. */
370 it->mfindex += wd.offset;
371 // Align to ESIT start boundary
372 it->mfindex += ep->interval - 1;
373 it->mfindex &= ~(ep->interval - 1);
374
375 /* fallthrough */
376 case WINDOW_INSIDE:
377 isoch->enqueue = (isoch->enqueue + 1) % isoch->buffer_count;
378 isoch->last_mfindex = it->mfindex;
379
380 usb_log_debug2("[isoch] feeding buffer %lu at 0x%x",
381 it - isoch->transfers, it->mfindex);
382
383 it->error = schedule_isochronous_trb(ep, it);
384 if (it->error) {
385 it->state = ISOCH_COMPLETE;
386 } else {
387 it->state = ISOCH_FED;
388 fed = true;
389 }
390 break;
391 }
392 }
393
394 if (fed) {
395 const uint8_t slot_id = xhci_device_get(ep->base.device)->slot_id;
396 const uint8_t target = xhci_endpoint_index(ep) + 1; /* EP Doorbells start at 1 */
397 hc_ring_doorbell(hc, slot_id, target);
398 }
399}
400
401static void isoch_feed_in_timer(void *ep)
402{
403 xhci_isoch_t * const isoch = xhci_endpoint_get(ep)->isoch;
404 fibril_mutex_lock(&isoch->guard);
405 isoch_feed_in(ep);
406 fibril_mutex_unlock(&isoch->guard);
407}
408
409/**
410 * First, withdraw all (at least one) results left by previous transfers to
411 * make room in the ring. Stop on first error.
412 *
413 * When there is at least one buffer free, fill it with data. Then try to feed
414 * it to the xHC.
415 */
416int isoch_schedule_out(xhci_transfer_t *transfer)
417{
418 int err = EOK;
419
420 xhci_endpoint_t *ep = xhci_endpoint_get(transfer->batch.ep);
421 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
422 xhci_isoch_t * const isoch = ep->isoch;
423
424 if (transfer->batch.buffer_size > isoch->max_size) {
425 usb_log_error("Cannot schedule an oversized isochronous transfer.");
426 return ELIMIT;
427 }
428
429 fibril_mutex_lock(&isoch->guard);
430
431 /* Get the buffer to write to */
432 xhci_isoch_transfer_t *it = &isoch->transfers[isoch->enqueue];
433
434 /* Wait for the buffer to be completed */
435 while (it->state == ISOCH_FED || it->state == ISOCH_FILLED) {
436 fibril_condvar_wait(&isoch->avail, &isoch->guard);
437 /* The enqueue ptr may have changed while sleeping */
438 it = &isoch->transfers[isoch->enqueue];
439 }
440
441 isoch->enqueue = (isoch->enqueue + 1) % isoch->buffer_count;
442
443 /* Withdraw results from previous transfers. */
444 transfer->batch.transfered_size = 0;
445 xhci_isoch_transfer_t *res = &isoch->transfers[isoch->dequeue];
446 while (res->state == ISOCH_COMPLETE) {
447 isoch->dequeue = (isoch->dequeue + 1) % isoch->buffer_count;
448
449 res->state = ISOCH_EMPTY;
450 transfer->batch.transfered_size += res->size;
451 transfer->batch.error = res->error;
452 if (res->error)
453 break; // Announce one error at a time
454
455 res = &isoch->transfers[isoch->dequeue];
456 }
457
458 assert(it->state == ISOCH_EMPTY);
459
460 /* Calculate when to schedule next transfer */
461 calc_next_mfindex(ep, it);
462 isoch->last_mfindex = it->mfindex;
463 usb_log_debug2("[isoch] buffer %zu will be on schedule at 0x%x", it - isoch->transfers, it->mfindex);
464
465 /* Prepare the transfer. */
466 it->size = transfer->batch.buffer_size;
467 memcpy(it->data.virt, transfer->batch.buffer, it->size);
468 it->state = ISOCH_FILLED;
469
470 fibril_timer_clear_locked(isoch->feeding_timer);
471 isoch_feed_out(ep);
472
473 fibril_mutex_unlock(&isoch->guard);
474
475 usb_transfer_batch_finish(&transfer->batch);
476 return err;
477}
478
479/**
480 * IN is in fact easier than OUT. Our responsibility is just to feed all empty
481 * buffers, and fetch one filled buffer from the ring.
482 */
483int isoch_schedule_in(xhci_transfer_t *transfer)
484{
485 xhci_endpoint_t *ep = xhci_endpoint_get(transfer->batch.ep);
486 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
487 xhci_isoch_t * const isoch = ep->isoch;
488
489 if (transfer->batch.buffer_size < isoch->max_size) {
490 usb_log_error("Cannot schedule an undersized isochronous transfer.");
491 return ELIMIT;
492 }
493
494 fibril_mutex_lock(&isoch->guard);
495
496 xhci_isoch_transfer_t *it = &isoch->transfers[isoch->dequeue];
497
498 /* Wait for at least one transfer to complete. */
499 while (it->state != ISOCH_COMPLETE) {
500 /* First, make sure we will have something to read. */
501 fibril_timer_clear_locked(isoch->feeding_timer);
502 isoch_feed_in(ep);
503
504 usb_log_debug2("[isoch] waiting for buffer %zu to be completed", it - isoch->transfers);
505 fibril_condvar_wait(&isoch->avail, &isoch->guard);
506
507 /* The enqueue ptr may have changed while sleeping */
508 it = &isoch->transfers[isoch->dequeue];
509 }
510
511 isoch->dequeue = (isoch->dequeue + 1) % isoch->buffer_count;
512
513 /* Withdraw results from previous transfer. */
514 if (!it->error) {
515 memcpy(transfer->batch.buffer, it->data.virt, it->size);
516 transfer->batch.transfered_size = it->size;
517 transfer->batch.error = it->error;
518 }
519
520 /* Prepare the empty buffer */
521 it->state = ISOCH_EMPTY;
522
523 fibril_mutex_unlock(&isoch->guard);
524 usb_transfer_batch_finish(&transfer->batch);
525
526 return EOK;
527}
528
529int isoch_handle_transfer_event(xhci_hc_t *hc, xhci_endpoint_t *ep, xhci_trb_t *trb)
530{
531 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
532 xhci_isoch_t * const isoch = ep->isoch;
533
534 fibril_mutex_lock(&ep->isoch->guard);
535
536 int err;
537 const xhci_trb_completion_code_t completion_code = TRB_COMPLETION_CODE(*trb);
538
539 switch (completion_code) {
540 case XHCI_TRBC_RING_OVERRUN:
541 case XHCI_TRBC_RING_UNDERRUN:
542 /* For OUT, there was nothing to process */
543 /* For IN, the buffer has overfilled, we empty the buffers and readd TRBs */
544 usb_log_warning("Ring over/underrun.");
545 isoch_reset(ep);
546 fibril_condvar_broadcast(&ep->isoch->avail);
547 fibril_mutex_unlock(&ep->isoch->guard);
548 return EOK;
549 case XHCI_TRBC_SHORT_PACKET:
550 case XHCI_TRBC_SUCCESS:
551 err = EOK;
552 break;
553 default:
554 usb_log_warning("Transfer not successfull: %u", completion_code);
555 err = EIO;
556 break;
557 }
558
559 bool found_mine = false;
560 bool found_incomplete = false;
561
562 /*
563 * The order of delivering events is not necessarily the one we would
564 * expect. It is safer to walk the list of our 4 transfers and check
565 * which one it is.
566 */
567 for (size_t i = 0; i < isoch->buffer_count; ++i) {
568 xhci_isoch_transfer_t * const it = &isoch->transfers[i];
569
570 switch (it->state) {
571 case ISOCH_FILLED:
572 found_incomplete = true;
573 break;
574
575 case ISOCH_FED:
576 if (it->interrupt_trb_phys != trb->parameter) {
577 found_incomplete = true;
578 break;
579 }
580
581 usb_log_debug2("[isoch] buffer %zu completed", it - isoch->transfers);
582 it->state = ISOCH_COMPLETE;
583 it->size -= TRB_TRANSFER_LENGTH(*trb);
584 it->error = err;
585 found_mine = true;
586 break;
587 default:
588 break;
589 }
590 }
591
592 if (!found_mine) {
593 usb_log_warning("[isoch] A transfer event occured for unknown transfer.");
594 }
595
596 /*
597 * It may happen that the driver already stopped reading (writing),
598 * and our buffers are filled (empty). As QEMU (and possibly others)
599 * does not send RING_UNDERRUN (OVERRUN) event, detect it here.
600 */
601 if (!found_incomplete) {
602 usb_log_warning("[isoch] Endpoint" XHCI_EP_FMT ": Detected "
603 "isochronous ring %s.", XHCI_EP_ARGS(*ep),
604 (ep->base.direction == USB_DIRECTION_IN) ? "underrun" : "overrun");
605 isoch_reset(ep);
606 }
607
608 fibril_condvar_broadcast(&ep->isoch->avail);
609 fibril_mutex_unlock(&ep->isoch->guard);
610 return EOK;
611}
612
613/**
614 * @}
615 */
Note: See TracBrowser for help on using the repository browser.