source: mainline/uspace/drv/bus/usb/xhci/isoch.c@ 398a94c

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 398a94c was 398a94c, checked in by Ondřej Hlavatý <aearsis@…>, 8 years ago

xhci isoch: bug fixing

  • Property mode set to 100644
File size: 17.9 KB
Line 
1/*
2 * Copyright (c) 2017 HelUSB3 team
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller endpoint management.
34 */
35
36#include <str_error.h>
37#include <macros.h>
38
39#include "endpoint.h"
40#include "hw_struct/trb.h"
41#include "hw_struct/regs.h"
42#include "trb_ring.h"
43#include "hc.h"
44#include "bus.h"
45
46#include "isoch.h"
47
48void isoch_init(xhci_endpoint_t *ep, const usb_endpoint_descriptors_t *desc)
49{
50 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
51 xhci_isoch_t * const isoch = ep->isoch;
52
53 fibril_mutex_initialize(&isoch->guard);
54 fibril_condvar_initialize(&isoch->avail);
55
56 const xhci_hc_t *hc = bus_to_xhci_bus(ep->base.device->bus)->hc;
57
58 /*
59 * We shall cover at least twice the IST period, otherwise we will get
60 * an over/underrun every time.
61 */
62 isoch->buffer_count = (2 * hc->ist) / ep->interval;
63
64 /* 2 buffers are the very minimum. */
65 isoch->buffer_count = max(2, isoch->buffer_count);
66
67 usb_log_error("[isoch] isoch setup with %zu buffers", isoch->buffer_count);
68}
69
70static void isoch_reset(xhci_endpoint_t *ep)
71{
72 xhci_isoch_t * const isoch = ep->isoch;
73 assert(fibril_mutex_is_locked(&isoch->guard));
74
75 isoch->dequeue = isoch->enqueue = isoch->hw_enqueue = 0;
76
77 for (size_t i = 0; i < isoch->buffer_count; ++i) {
78 isoch->transfers[i].state = ISOCH_EMPTY;
79 }
80
81 fibril_timer_clear_locked(isoch->feeding_timer);
82 isoch->last_mfindex = -1U;
83 usb_log_info("[isoch] Endpoint" XHCI_EP_FMT ": Data flow reset.", XHCI_EP_ARGS(*ep));
84}
85
86void isoch_fini(xhci_endpoint_t *ep)
87{
88 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
89 xhci_isoch_t * const isoch = ep->isoch;
90
91 if (isoch->feeding_timer) {
92 fibril_timer_clear(isoch->feeding_timer);
93 fibril_timer_destroy(isoch->feeding_timer);
94 }
95
96 if (isoch->transfers) {
97 for (size_t i = 0; i < isoch->buffer_count; ++i)
98 dma_buffer_free(&isoch->transfers[i].data);
99 free(isoch->transfers);
100 }
101}
102
103/**
104 * Allocate isochronous buffers. Create the feeding timer.
105 */
106int isoch_alloc_transfers(xhci_endpoint_t *ep) {
107 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
108 xhci_isoch_t * const isoch = ep->isoch;
109
110 isoch->feeding_timer = fibril_timer_create(&isoch->guard);
111 if (!isoch->feeding_timer)
112 return ENOMEM;
113
114 isoch->transfers = calloc(isoch->buffer_count, sizeof(xhci_isoch_transfer_t));
115 if(!isoch->transfers)
116 goto err;
117
118 for (size_t i = 0; i < isoch->buffer_count; ++i) {
119 xhci_isoch_transfer_t *transfer = &isoch->transfers[i];
120 if (dma_buffer_alloc(&transfer->data, ep->base.max_transfer_size)) {
121 goto err;
122 }
123 }
124
125 fibril_mutex_lock(&isoch->guard);
126 isoch_reset(ep);
127 fibril_mutex_unlock(&isoch->guard);
128
129 return EOK;
130err:
131 isoch_fini(ep);
132 return ENOMEM;
133}
134
135static int schedule_isochronous_trb(xhci_endpoint_t *ep, xhci_isoch_transfer_t *it)
136{
137 xhci_trb_t trb;
138 xhci_trb_clean(&trb);
139
140 trb.parameter = it->data.phys;
141 TRB_CTRL_SET_XFER_LEN(trb, it->size);
142 TRB_CTRL_SET_TD_SIZE(trb, 0);
143 TRB_CTRL_SET_IOC(trb, 1);
144 TRB_CTRL_SET_TRB_TYPE(trb, XHCI_TRB_TYPE_ISOCH);
145
146 // see 4.14.1 and 4.11.2.3 for the explanation, how to calculate those
147 size_t tdpc = it->size / 1024 + ((it->size % 1024) ? 1 : 0);
148 size_t tbc = tdpc / ep->max_burst;
149 if (!tdpc % ep->max_burst) --tbc;
150 size_t bsp = tdpc % ep->max_burst;
151 size_t tlbpc = (bsp ? bsp : ep->max_burst) - 1;
152
153 TRB_ISOCH_SET_TBC(trb, tbc);
154 TRB_ISOCH_SET_TLBPC(trb, tlbpc);
155 TRB_ISOCH_SET_FRAMEID(trb, (it->mfindex / 8) % 2048);
156
157 const int err = xhci_trb_ring_enqueue(&ep->ring, &trb, &it->interrupt_trb_phys);
158 return err;
159}
160
161static inline void calc_next_mfindex(xhci_endpoint_t *ep, xhci_isoch_transfer_t *it)
162{
163 xhci_isoch_t * const isoch = ep->isoch;
164 if (isoch->last_mfindex == -1U) {
165 const xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
166 const xhci_hc_t *hc = bus->hc;
167
168 /* Choose some number, give us a little time to prepare the
169 * buffers */
170 it->mfindex = XHCI_REG_RD(hc->rt_regs, XHCI_RT_MFINDEX) + 1
171 + isoch->buffer_count * ep->interval
172 + hc->ist;
173
174 // Align to ESIT start boundary
175 it->mfindex += ep->interval - 1;
176 it->mfindex &= ~(ep->interval - 1);
177 } else {
178 it->mfindex = (isoch->last_mfindex + ep->interval) % XHCI_MFINDEX_MAX;
179 }
180}
181
182/** 825 ms in uframes */
183#define END_FRAME_DELAY (895000 / 125)
184
185typedef enum {
186 WINDOW_TOO_SOON,
187 WINDOW_INSIDE,
188 WINDOW_TOO_LATE,
189} window_position_t;
190
191typedef struct {
192 window_position_t position;
193 uint32_t offset;
194} window_decision_t;
195
196/**
197 * Decide on the position of mfindex relatively to the window specified by
198 * Start Frame ID and End Frame ID. The resulting structure contains the
199 * decision, and in case of the mfindex being outside, also the number of
200 * uframes it's off.
201 */
202static inline void window_decide(window_decision_t *res, xhci_hc_t *hc, uint32_t mfindex)
203{
204 uint32_t current_mfindex = XHCI_REG_RD(hc->rt_regs, XHCI_RT_MFINDEX) + 1;
205
206 /*
207 * In your mind, rotate the clock so the window is at its beginning.
208 * The length of the window is always the same, and by rotating the
209 * mfindex too, we can decide by the value of it easily.
210 */
211 mfindex = (mfindex - current_mfindex - hc->ist + XHCI_MFINDEX_MAX) % XHCI_MFINDEX_MAX;
212 const uint32_t end = END_FRAME_DELAY - hc->ist;
213 const uint32_t threshold = (XHCI_MFINDEX_MAX + end) / 2;
214
215 if (mfindex <= end) {
216 res->position = WINDOW_INSIDE;
217 } else if (mfindex > threshold) {
218 res->position = WINDOW_TOO_LATE;
219 res->offset = XHCI_MFINDEX_MAX - mfindex;
220 } else {
221 res->position = WINDOW_TOO_SOON;
222 res->offset = mfindex - end;
223 }
224 /*
225 * TODO: The "size" of the clock is too low. We have to scale it a bit
226 * to ensure correct scheduling of transfers, that are
227 * buffer_count * interval away from now.
228 * Maximum interval is 8 seconds, which means we need a size of
229 * 16 seconds. The size of MFIINDEX is 2 seconds only.
230 *
231 * A plan is to create a thin abstraction at HC, which would return
232 * a time from 32-bit clock, having its high bits updated by the
233 * MFINDEX Wrap Event, and low bits from the MFINDEX register. Using
234 * this 32-bit clock, one can plan 6 days ahead.
235 */
236}
237
238static void isoch_feed_out_timer(void *);
239static void isoch_feed_in_timer(void *);
240
241/**
242 * Schedule TRBs with filled buffers to HW. Takes filled isoch transfers and
243 * pushes their TRBs to the ring.
244 *
245 * According to 4.11.2.5, we can't just push all TRBs we have. We must not do
246 * it too late, but also not too soon.
247 */
248static void isoch_feed_out(xhci_endpoint_t *ep)
249{
250 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
251 xhci_isoch_t * const isoch = ep->isoch;
252 assert(fibril_mutex_is_locked(&isoch->guard));
253
254 xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
255 xhci_hc_t *hc = bus->hc;
256
257 bool fed = false;
258
259 while (isoch->hw_enqueue != isoch->enqueue) {
260 xhci_isoch_transfer_t * const it = &isoch->transfers[isoch->hw_enqueue];
261
262 assert(it->state == ISOCH_FILLED);
263
264 window_decision_t wd;
265 window_decide(&wd, hc, it->mfindex);
266
267 switch (wd.position) {
268 case WINDOW_TOO_SOON: {
269 const suseconds_t delay = wd.offset * 125;
270 usb_log_debug2("[isoch] delaying feeding buffer %lu for %ldus",
271 it - isoch->transfers, delay);
272 fibril_timer_set_locked(isoch->feeding_timer, delay,
273 isoch_feed_out_timer, ep);
274 goto out;
275 }
276
277 case WINDOW_INSIDE:
278 usb_log_debug2("[isoch] feeding buffer %lu at 0x%x",
279 it - isoch->transfers, it->mfindex);
280 it->error = schedule_isochronous_trb(ep, it);
281 if (it->error) {
282 it->state = ISOCH_COMPLETE;
283 } else {
284 it->state = ISOCH_FED;
285 fed = true;
286 }
287
288 isoch->hw_enqueue = (isoch->hw_enqueue + 1) % isoch->buffer_count;
289 break;
290
291 case WINDOW_TOO_LATE:
292 /* Missed the opportunity to schedule. Just mark this transfer as skipped. */
293 usb_log_debug2("[isoch] missed feeding buffer %lu at 0x%x by %u uframes",
294 it - isoch->transfers, it->mfindex, wd.offset);
295 it->state = ISOCH_COMPLETE;
296 it->error = EOK;
297 it->size = 0;
298
299 isoch->hw_enqueue = (isoch->hw_enqueue + 1) % isoch->buffer_count;
300 break;
301 }
302 }
303out:
304
305 if (fed) {
306 const uint8_t slot_id = xhci_device_get(ep->base.device)->slot_id;
307 const uint8_t target = xhci_endpoint_index(ep) + 1; /* EP Doorbells start at 1 */
308 hc_ring_doorbell(hc, slot_id, target);
309 }
310
311}
312
313static void isoch_feed_out_timer(void *ep)
314{
315 xhci_isoch_t * const isoch = xhci_endpoint_get(ep)->isoch;
316 fibril_mutex_lock(&isoch->guard);
317 isoch_feed_out(ep);
318 fibril_mutex_unlock(&isoch->guard);
319}
320
321/**
322 * Schedule TRBs with empty, withdrawn buffers to HW. Takes empty isoch
323 * transfers and pushes their TRBs to the ring.
324 *
325 * According to 4.11.2.5, we can't just push all TRBs we have. We must not do
326 * it too late, but also not too soon.
327 */
328static void isoch_feed_in(xhci_endpoint_t *ep)
329{
330 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
331 xhci_isoch_t * const isoch = ep->isoch;
332 assert(fibril_mutex_is_locked(&isoch->guard));
333
334 xhci_bus_t *bus = bus_to_xhci_bus(ep->base.device->bus);
335 xhci_hc_t *hc = bus->hc;
336
337 bool fed = false;
338
339 while (isoch->transfers[isoch->enqueue].state <= ISOCH_FILLED) {
340 xhci_isoch_transfer_t * const it = &isoch->transfers[isoch->enqueue];
341
342 /* IN buffers are "filled" with free space */
343 if (it->state == ISOCH_EMPTY) {
344 it->size = ep->base.max_transfer_size;
345 it->state = ISOCH_FILLED;
346 calc_next_mfindex(ep, it);
347 }
348
349 window_decision_t wd;
350 window_decide(&wd, hc, it->mfindex);
351
352 switch (wd.position) {
353 case WINDOW_TOO_SOON: {
354 /* Not allowed to feed yet. Defer to later. */
355 const suseconds_t delay = wd.offset * 125;
356 usb_log_debug2("[isoch] delaying feeding buffer %lu for %ldus",
357 it - isoch->transfers, delay);
358 fibril_timer_set_locked(isoch->feeding_timer, delay,
359 isoch_feed_in_timer, ep);
360 goto out;
361 }
362
363 case WINDOW_TOO_LATE:
364 usb_log_debug2("[isoch] missed feeding buffer %lu at 0x%x by %u uframes",
365 it - isoch->transfers, it->mfindex, wd.offset);
366 /* Missed the opportunity to schedule. Schedule ASAP. */
367 it->mfindex += wd.offset;
368 // Align to ESIT start boundary
369 it->mfindex += ep->interval - 1;
370 it->mfindex &= ~(ep->interval - 1);
371
372 /* fallthrough */
373 case WINDOW_INSIDE:
374 isoch->enqueue = (isoch->enqueue + 1) % isoch->buffer_count;
375 isoch->last_mfindex = it->mfindex;
376
377 usb_log_debug2("[isoch] feeding buffer %lu at 0x%x",
378 it - isoch->transfers, it->mfindex);
379
380 it->error = schedule_isochronous_trb(ep, it);
381 if (it->error) {
382 it->state = ISOCH_COMPLETE;
383 } else {
384 it->state = ISOCH_FED;
385 fed = true;
386 }
387 break;
388 }
389 }
390out:
391
392 if (fed) {
393 const uint8_t slot_id = xhci_device_get(ep->base.device)->slot_id;
394 const uint8_t target = xhci_endpoint_index(ep) + 1; /* EP Doorbells start at 1 */
395 hc_ring_doorbell(hc, slot_id, target);
396 }
397}
398
399static void isoch_feed_in_timer(void *ep)
400{
401 xhci_isoch_t * const isoch = xhci_endpoint_get(ep)->isoch;
402 fibril_mutex_lock(&isoch->guard);
403 isoch_feed_in(ep);
404 fibril_mutex_unlock(&isoch->guard);
405}
406
407/**
408 * First, withdraw all (at least one) results left by previous transfers to
409 * make room in the ring. Stop on first error.
410 *
411 * When there is at least one buffer free, fill it with data. Then try to feed
412 * it to the xHC.
413 */
414int isoch_schedule_out(xhci_transfer_t *transfer)
415{
416 int err = EOK;
417
418 xhci_endpoint_t *ep = xhci_endpoint_get(transfer->batch.ep);
419 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
420 xhci_isoch_t * const isoch = ep->isoch;
421
422 if (transfer->batch.buffer_size > ep->base.max_transfer_size) {
423 usb_log_error("Cannot schedule an oversized isochronous transfer.");
424 return ELIMIT;
425 }
426
427 fibril_mutex_lock(&isoch->guard);
428
429 /* Get the buffer to write to */
430 xhci_isoch_transfer_t *it = &isoch->transfers[isoch->enqueue];
431
432 /* Wait for the buffer to be completed */
433 while (it->state == ISOCH_FED || it->state == ISOCH_FILLED) {
434 fibril_condvar_wait(&isoch->avail, &isoch->guard);
435 /* The enqueue ptr may have changed while sleeping */
436 it = &isoch->transfers[isoch->enqueue];
437 }
438
439 isoch->enqueue = (isoch->enqueue + 1) % isoch->buffer_count;
440
441 /* Withdraw results from previous transfers. */
442 transfer->batch.transfered_size = 0;
443 xhci_isoch_transfer_t *res = &isoch->transfers[isoch->dequeue];
444 while (res->state == ISOCH_COMPLETE) {
445 isoch->dequeue = (isoch->dequeue + 1) % isoch->buffer_count;
446
447 res->state = ISOCH_EMPTY;
448 transfer->batch.transfered_size += res->size;
449 transfer->batch.error = res->error;
450 if (res->error)
451 break; // Announce one error at a time
452
453 res = &isoch->transfers[isoch->dequeue];
454 }
455
456 assert(it->state == ISOCH_EMPTY);
457
458 /* Calculate when to schedule next transfer */
459 calc_next_mfindex(ep, it);
460 isoch->last_mfindex = it->mfindex;
461 usb_log_debug2("[isoch] buffer %zu will be on schedule at 0x%x", it - isoch->transfers, it->mfindex);
462
463 /* Prepare the transfer. */
464 it->size = transfer->batch.buffer_size;
465 memcpy(it->data.virt, transfer->batch.buffer, it->size);
466 it->state = ISOCH_FILLED;
467
468 fibril_timer_clear_locked(isoch->feeding_timer);
469 isoch_feed_out(ep);
470
471 fibril_mutex_unlock(&isoch->guard);
472
473 usb_transfer_batch_finish(&transfer->batch);
474 return err;
475}
476
477/**
478 * IN is in fact easier than OUT. Our responsibility is just to feed all empty
479 * buffers, and fetch one filled buffer from the ring.
480 */
481int isoch_schedule_in(xhci_transfer_t *transfer)
482{
483 xhci_endpoint_t *ep = xhci_endpoint_get(transfer->batch.ep);
484 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
485 xhci_isoch_t * const isoch = ep->isoch;
486
487 if (transfer->batch.buffer_size < ep->base.max_transfer_size) {
488 usb_log_error("Cannot schedule an undersized isochronous transfer.");
489 return ELIMIT;
490 }
491
492 fibril_mutex_lock(&isoch->guard);
493
494 xhci_isoch_transfer_t *it = &isoch->transfers[isoch->dequeue];
495
496 /* Wait for at least one transfer to complete. */
497 while (it->state != ISOCH_COMPLETE) {
498 /* First, make sure we will have something to read. */
499 fibril_timer_clear_locked(isoch->feeding_timer);
500 isoch_feed_in(ep);
501
502 usb_log_debug2("[isoch] waiting for buffer %zu to be completed", it - isoch->transfers);
503 fibril_condvar_wait(&isoch->avail, &isoch->guard);
504
505 /* The enqueue ptr may have changed while sleeping */
506 it = &isoch->transfers[isoch->dequeue];
507 }
508
509 isoch->dequeue = (isoch->dequeue + 1) % isoch->buffer_count;
510
511 /* Withdraw results from previous transfer. */
512 if (!it->error) {
513 memcpy(transfer->batch.buffer, it->data.virt, it->size);
514 transfer->batch.transfered_size = it->size;
515 transfer->batch.error = it->error;
516 }
517
518 /* Prepare the empty buffer */
519 it->state = ISOCH_EMPTY;
520
521 fibril_mutex_unlock(&isoch->guard);
522 usb_transfer_batch_finish(&transfer->batch);
523
524 return EOK;
525}
526
527int isoch_handle_transfer_event(xhci_hc_t *hc, xhci_endpoint_t *ep, xhci_trb_t *trb)
528{
529 assert(ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS);
530 xhci_isoch_t * const isoch = ep->isoch;
531
532 fibril_mutex_lock(&ep->isoch->guard);
533
534 int err;
535 const xhci_trb_completion_code_t completion_code = TRB_COMPLETION_CODE(*trb);
536
537 switch (completion_code) {
538 case XHCI_TRBC_RING_OVERRUN:
539 case XHCI_TRBC_RING_UNDERRUN:
540 /* For OUT, there was nothing to process */
541 /* For IN, the buffer has overfilled, we empty the buffers and readd TRBs */
542 usb_log_warning("Ring over/underrun.");
543 isoch_reset(ep);
544 fibril_condvar_broadcast(&ep->isoch->avail);
545 fibril_mutex_unlock(&ep->isoch->guard);
546 return EOK;
547 case XHCI_TRBC_SHORT_PACKET:
548 case XHCI_TRBC_SUCCESS:
549 err = EOK;
550 break;
551 default:
552 usb_log_warning("Transfer not successfull: %u", completion_code);
553 err = EIO;
554 break;
555 }
556
557 bool found_mine = false;
558 bool found_incomplete = false;
559
560 /*
561 * The order of delivering events is not necessarily the one we would
562 * expect. It is safer to walk the list of our 4 transfers and check
563 * which one it is.
564 */
565 for (size_t i = 0; i < isoch->buffer_count; ++i) {
566 xhci_isoch_transfer_t * const it = &isoch->transfers[i];
567
568 switch (it->state) {
569 case ISOCH_FILLED:
570 found_incomplete = true;
571 break;
572
573 case ISOCH_FED:
574 if (it->interrupt_trb_phys != trb->parameter) {
575 found_incomplete = true;
576 break;
577 }
578
579 usb_log_debug2("[isoch] buffer %zu completed", it - isoch->transfers);
580 it->state = ISOCH_COMPLETE;
581 it->size -= TRB_TRANSFER_LENGTH(*trb);
582 it->error = err;
583 found_mine = true;
584 break;
585 default:
586 break;
587 }
588 }
589
590 if (!found_mine) {
591 usb_log_warning("[isoch] A transfer event occured for unknown transfer.");
592 }
593
594 /*
595 * It may happen that the driver already stopped reading (writing),
596 * and our buffers are filled (empty). As QEMU (and possibly others)
597 * does not send RING_UNDERRUN (OVERRUN) event, detect it here.
598 */
599 if (!found_incomplete) {
600 usb_log_warning("[isoch] Endpoint" XHCI_EP_FMT ": Detected "
601 "isochronous ring %s.", XHCI_EP_ARGS(*ep),
602 (ep->base.direction == USB_DIRECTION_IN) ? "underrun" : "overrun");
603 isoch_reset(ep);
604 }
605
606 fibril_condvar_broadcast(&ep->isoch->avail);
607 fibril_mutex_unlock(&ep->isoch->guard);
608 return EOK;
609}
610
611/**
612 * @}
613 */
Note: See TracBrowser for help on using the repository browser.