source: mainline/uspace/drv/bus/usb/xhci/transfers.c@ 10cd715

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 10cd715 was 6455d39, checked in by Salmelu <salmelu@…>, 8 years ago

Added isochronous locks, over/underrun detection

  • Property mode set to 100644
File size: 17.8 KB
Line 
1/*
2 * Copyright (c) 2017 Michal Staruch
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller transfer ring management
34 */
35
36#include <usb/debug.h>
37#include <usb/request.h>
38#include "endpoint.h"
39#include "hc.h"
40#include "hw_struct/trb.h"
41#include "transfers.h"
42#include "trb_ring.h"
43
44typedef enum {
45 STAGE_OUT,
46 STAGE_IN,
47} stage_dir_flag_t;
48
49#define REQUEST_TYPE_DTD (0x80)
50#define REQUEST_TYPE_IS_DEVICE_TO_HOST(rq) ((rq) & REQUEST_TYPE_DTD)
51
52
53/** Get direction flag of data stage.
54 * See Table 7 of xHCI specification.
55 */
56static inline stage_dir_flag_t get_status_direction_flag(xhci_trb_t* trb,
57 uint8_t bmRequestType, uint16_t wLength)
58{
59 /* See Table 7 of xHCI specification */
60 return REQUEST_TYPE_IS_DEVICE_TO_HOST(bmRequestType) && (wLength > 0)
61 ? STAGE_OUT
62 : STAGE_IN;
63}
64
65typedef enum {
66 DATA_STAGE_NO = 0,
67 DATA_STAGE_OUT = 2,
68 DATA_STAGE_IN = 3,
69} data_stage_type_t;
70
71/** Get transfer type flag.
72 * See Table 8 of xHCI specification.
73 */
74static inline data_stage_type_t get_transfer_type(xhci_trb_t* trb, uint8_t
75 bmRequestType, uint16_t wLength)
76{
77 if (wLength == 0)
78 return DATA_STAGE_NO;
79
80 /* See Table 7 of xHCI specification */
81 return REQUEST_TYPE_IS_DEVICE_TO_HOST(bmRequestType)
82 ? DATA_STAGE_IN
83 : DATA_STAGE_NO;
84}
85
86static inline bool configure_endpoint_needed(usb_device_request_setup_packet_t *setup)
87{
88 usb_request_type_t request_type = SETUP_REQUEST_TYPE_GET_TYPE(setup->request_type);
89
90 return request_type == USB_REQUEST_TYPE_STANDARD &&
91 (setup->request == USB_DEVREQ_SET_CONFIGURATION
92 || setup->request == USB_DEVREQ_SET_INTERFACE);
93}
94
95/**
96 * There can currently be only one active transfer, because
97 * usb_transfer_batch_init locks the endpoint by endpoint_use.
98 * Therefore, we store the only active transfer per endpoint there.
99 */
100xhci_transfer_t* xhci_transfer_create(endpoint_t* ep)
101{
102 xhci_transfer_t *transfer = calloc(1, sizeof(xhci_transfer_t));
103 if (!transfer)
104 return NULL;
105
106 usb_transfer_batch_init(&transfer->batch, ep);
107 return transfer;
108}
109
110void xhci_transfer_destroy(xhci_transfer_t* transfer)
111{
112 assert(transfer);
113
114 dma_buffer_free(&transfer->hc_buffer);
115 free(transfer);
116}
117
118static xhci_trb_ring_t *get_ring(xhci_hc_t *hc, xhci_transfer_t *transfer)
119{
120 return &xhci_endpoint_get(transfer->batch.ep)->ring;
121}
122
123static int schedule_control(xhci_hc_t* hc, xhci_transfer_t* transfer)
124{
125 usb_transfer_batch_t *batch = &transfer->batch;
126 xhci_trb_ring_t *ring = get_ring(hc, transfer);
127 xhci_endpoint_t *xhci_ep = xhci_endpoint_get(transfer->batch.ep);
128
129 usb_device_request_setup_packet_t* setup = &batch->setup.packet;
130
131 xhci_trb_t trbs[3];
132 int trbs_used = 0;
133
134 xhci_trb_t *trb_setup = trbs + trbs_used++;
135 xhci_trb_clean(trb_setup);
136
137 TRB_CTRL_SET_SETUP_WVALUE(*trb_setup, setup->value);
138 TRB_CTRL_SET_SETUP_WLENGTH(*trb_setup, setup->length);
139 TRB_CTRL_SET_SETUP_WINDEX(*trb_setup, setup->index);
140 TRB_CTRL_SET_SETUP_BREQ(*trb_setup, setup->request);
141 TRB_CTRL_SET_SETUP_BMREQTYPE(*trb_setup, setup->request_type);
142
143 /* Size of the setup packet is always 8 */
144 TRB_CTRL_SET_XFER_LEN(*trb_setup, 8);
145
146 /* Immediate data */
147 TRB_CTRL_SET_IDT(*trb_setup, 1);
148 TRB_CTRL_SET_TRB_TYPE(*trb_setup, XHCI_TRB_TYPE_SETUP_STAGE);
149 TRB_CTRL_SET_TRT(*trb_setup, get_transfer_type(trb_setup, setup->request_type, setup->length));
150
151 /* Data stage */
152 xhci_trb_t *trb_data = NULL;
153 if (setup->length > 0) {
154 trb_data = trbs + trbs_used++;
155 xhci_trb_clean(trb_data);
156
157 trb_data->parameter = host2xhci(64, transfer->hc_buffer.phys);
158
159 // data size (sent for OUT, or buffer size)
160 TRB_CTRL_SET_XFER_LEN(*trb_data, batch->buffer_size);
161 // FIXME: TD size 4.11.2.4
162 TRB_CTRL_SET_TD_SIZE(*trb_data, 1);
163
164 // Some more fields here, no idea what they mean
165 TRB_CTRL_SET_TRB_TYPE(*trb_data, XHCI_TRB_TYPE_DATA_STAGE);
166
167 int stage_dir = REQUEST_TYPE_IS_DEVICE_TO_HOST(setup->request_type)
168 ? STAGE_IN : STAGE_OUT;
169 TRB_CTRL_SET_DIR(*trb_data, stage_dir);
170 }
171
172 /* Status stage */
173 xhci_trb_t *trb_status = trbs + trbs_used++;
174 xhci_trb_clean(trb_status);
175
176 // FIXME: Evaluate next TRB? 4.12.3
177 // TRB_CTRL_SET_ENT(*trb_status, 1);
178
179 TRB_CTRL_SET_IOC(*trb_status, 1);
180 TRB_CTRL_SET_TRB_TYPE(*trb_status, XHCI_TRB_TYPE_STATUS_STAGE);
181 TRB_CTRL_SET_DIR(*trb_status, get_status_direction_flag(trb_setup, setup->request_type, setup->length));
182
183 // Issue a Configure Endpoint command, if needed.
184 if (configure_endpoint_needed(setup)) {
185 const int err = hc_configure_device(hc, xhci_ep_to_dev(xhci_ep)->slot_id);
186 if (err)
187 return err;
188 }
189
190 return xhci_trb_ring_enqueue_multiple(ring, trbs, trbs_used, &transfer->interrupt_trb_phys);
191}
192
193static int schedule_bulk(xhci_hc_t* hc, xhci_transfer_t *transfer)
194{
195 xhci_trb_t trb;
196 xhci_trb_clean(&trb);
197 trb.parameter = host2xhci(64, transfer->hc_buffer.phys);
198
199 // data size (sent for OUT, or buffer size)
200 TRB_CTRL_SET_XFER_LEN(trb, transfer->batch.buffer_size);
201 // FIXME: TD size 4.11.2.4
202 TRB_CTRL_SET_TD_SIZE(trb, 1);
203
204 // we want an interrupt after this td is done
205 TRB_CTRL_SET_IOC(trb, 1);
206
207 TRB_CTRL_SET_TRB_TYPE(trb, XHCI_TRB_TYPE_NORMAL);
208
209 xhci_trb_ring_t* ring = get_ring(hc, transfer);
210
211 return xhci_trb_ring_enqueue(ring, &trb, &transfer->interrupt_trb_phys);
212}
213
214static int schedule_interrupt(xhci_hc_t* hc, xhci_transfer_t* transfer)
215{
216 xhci_trb_t trb;
217 xhci_trb_clean(&trb);
218 trb.parameter = host2xhci(64, transfer->hc_buffer.phys);
219
220 // data size (sent for OUT, or buffer size)
221 TRB_CTRL_SET_XFER_LEN(trb, transfer->batch.buffer_size);
222 // FIXME: TD size 4.11.2.4
223 TRB_CTRL_SET_TD_SIZE(trb, 1);
224
225 // we want an interrupt after this td is done
226 TRB_CTRL_SET_IOC(trb, 1);
227
228 TRB_CTRL_SET_TRB_TYPE(trb, XHCI_TRB_TYPE_NORMAL);
229
230 xhci_trb_ring_t* ring = get_ring(hc, transfer);
231
232 return xhci_trb_ring_enqueue(ring, &trb, &transfer->interrupt_trb_phys);
233}
234
235static xhci_isoch_transfer_t* isoch_transfer_get_enqueue(xhci_endpoint_t *ep) {
236 if ((ep->isoch_enqueue % XHCI_ISOCH_BUFFER_COUNT) == ep->isoch_dequeue) {
237 /* None ready */
238 return NULL;
239 }
240 xhci_isoch_transfer_t *isoch_transfer = ep->isoch_transfers[ep->isoch_enqueue];
241 ep->isoch_enqueue = (ep->isoch_enqueue + 1) % XHCI_ISOCH_BUFFER_COUNT;
242 return isoch_transfer;
243}
244
245static xhci_isoch_transfer_t* isoch_transfer_get_dequeue(xhci_endpoint_t *ep) {
246 xhci_isoch_transfer_t *isoch_transfer = ep->isoch_transfers[ep->isoch_dequeue];
247 ep->isoch_dequeue = (ep->isoch_dequeue + 1) % XHCI_ISOCH_BUFFER_COUNT;
248 return isoch_transfer;
249}
250
251static int schedule_isochronous_trb(xhci_trb_ring_t *ring, xhci_endpoint_t *ep, xhci_trb_t *trb,
252 const size_t len, uintptr_t *trb_phys)
253{
254 TRB_CTRL_SET_XFER_LEN(*trb, len);
255 // FIXME: TD size 4.11.2.4 (there is no next TRB, so 0?)
256 TRB_CTRL_SET_TD_SIZE(*trb, 0);
257 TRB_CTRL_SET_IOC(*trb, 1);
258 TRB_CTRL_SET_TRB_TYPE(*trb, XHCI_TRB_TYPE_ISOCH);
259
260 // see 4.14.1 and 4.11.2.3 for the explanation, how to calculate those
261 size_t tdpc = len / 1024 + ((len % 1024) ? 1 : 0);
262 size_t tbc = tdpc / (ep->max_burst + 1);
263 if(!tdpc % (ep->max_burst + 1)) --tbc;
264 size_t bsp = tdpc % (ep->max_burst + 1);
265 size_t tlbpc = (bsp ? bsp - 1 : ep->max_burst);
266
267 TRB_CTRL_SET_TBC(*trb, tbc);
268 TRB_CTRL_SET_TLBPC(*trb, tlbpc);
269
270 // FIXME: do we want this? 6.4.1.3, p 366 (also possibly frame id?)
271 TRB_CTRL_SET_SIA(*trb, 1);
272
273 return xhci_trb_ring_enqueue(ring, trb, trb_phys);
274}
275
276static int schedule_isochronous_out(xhci_hc_t* hc, xhci_transfer_t* transfer, xhci_endpoint_t *xhci_ep,
277 xhci_device_t *xhci_dev)
278{
279 xhci_trb_t trb;
280 xhci_trb_clean(&trb);
281
282 fibril_mutex_lock(&xhci_ep->isoch_guard);
283 xhci_isoch_transfer_t *isoch_transfer = isoch_transfer_get_enqueue(xhci_ep);
284 while (!isoch_transfer) {
285 fibril_condvar_wait(&xhci_ep->isoch_avail, &xhci_ep->isoch_guard);
286 isoch_transfer = isoch_transfer_get_enqueue(xhci_ep);
287 }
288
289 isoch_transfer->size = transfer->batch.buffer_size;
290 if (isoch_transfer->size > 0) {
291 memcpy(isoch_transfer->data.virt, transfer->batch.buffer, isoch_transfer->size);
292 }
293
294 trb.parameter = isoch_transfer->data.phys;
295
296 xhci_trb_ring_t *ring = get_ring(hc, transfer);
297 int err = schedule_isochronous_trb(ring, xhci_ep, &trb, isoch_transfer->size,
298 &isoch_transfer->interrupt_trb_phys);
299 if (err) {
300 fibril_mutex_unlock(&xhci_ep->isoch_guard);
301 return err;
302 }
303
304 /* If not yet started, start the isochronous endpoint transfers - after buffer count - 1 writes */
305 /* The -2 is there because of the enqueue != dequeue check. The buffer must have at least 2 transfers. */
306 if (xhci_ep->isoch_enqueue == XHCI_ISOCH_BUFFER_COUNT - 2 && !xhci_ep->isoch_started) {
307 const uint8_t slot_id = xhci_dev->slot_id;
308 const uint8_t target = xhci_endpoint_index(xhci_ep) + 1; /* EP Doorbells start at 1 */
309 err = hc_ring_doorbell(hc, slot_id, target);
310 xhci_ep->isoch_started = true;
311 }
312 fibril_mutex_unlock(&xhci_ep->isoch_guard);
313 if (err) {
314 return err;
315 }
316
317 /* Isochronous transfers don't handle errors, they skip them all. */
318 transfer->batch.error = EOK;
319 transfer->batch.transfered_size = transfer->batch.buffer_size;
320 usb_transfer_batch_finish(&transfer->batch);
321 return EOK;
322}
323
324static int schedule_isochronous_in(xhci_hc_t* hc, xhci_transfer_t* transfer, xhci_endpoint_t *xhci_ep,
325 xhci_device_t *xhci_dev)
326{
327 fibril_mutex_lock(&xhci_ep->isoch_guard);
328 /* If not yet started, start the isochronous endpoint transfers - before first read */
329 if (!xhci_ep->isoch_started) {
330 const uint8_t slot_id = xhci_dev->slot_id;
331 const uint8_t target = xhci_endpoint_index(xhci_ep) + 1; /* EP Doorbells start at 1 */
332 int err = hc_ring_doorbell(hc, slot_id, target);
333 if (err) {
334 fibril_mutex_unlock(&xhci_ep->isoch_guard);
335 return err;
336 }
337 xhci_ep->isoch_started = true;
338 }
339
340 xhci_isoch_transfer_t *isoch_transfer = isoch_transfer_get_enqueue(xhci_ep);
341 while(!isoch_transfer) {
342 fibril_condvar_wait(&xhci_ep->isoch_avail, &xhci_ep->isoch_guard);
343 isoch_transfer = isoch_transfer_get_enqueue(xhci_ep);
344 }
345
346 isoch_transfer->size = transfer->batch.buffer_size;
347 if (transfer->batch.buffer_size <= isoch_transfer->size) {
348 if (transfer->batch.buffer_size > 0) {
349 memcpy(transfer->batch.buffer, isoch_transfer->data.virt, transfer->batch.buffer_size);
350 }
351 if (transfer->batch.buffer_size < isoch_transfer->size) {
352 // FIXME: somehow notify that buffer was too small, probably batch error code
353 }
354 transfer->batch.transfered_size = transfer->batch.buffer_size;
355 }
356 else {
357 memcpy(transfer->batch.buffer, isoch_transfer->data.virt, isoch_transfer->size);
358 transfer->batch.transfered_size = isoch_transfer->size;
359 }
360
361 // Clear and requeue the transfer with new TRB
362 xhci_trb_t trb;
363 xhci_trb_clean(&trb);
364
365 trb.parameter = isoch_transfer->data.phys;
366 isoch_transfer->size = xhci_ep->isoch_max_size;
367
368 xhci_trb_ring_t *ring = get_ring(hc, transfer);
369 int err = schedule_isochronous_trb(ring, xhci_ep, &trb, isoch_transfer->size,
370 &isoch_transfer->interrupt_trb_phys);
371 fibril_mutex_unlock(&xhci_ep->isoch_guard);
372
373 if (err) {
374 return err;
375 }
376
377 /* Isochronous transfers don't handle errors, they skip them all. */
378 transfer->batch.error = EOK;
379 usb_transfer_batch_finish(&transfer->batch);
380 return EOK;
381}
382
383static int schedule_isochronous(xhci_hc_t* hc, xhci_transfer_t* transfer, xhci_endpoint_t *xhci_ep,
384 xhci_device_t *xhci_dev)
385{
386 if (transfer->batch.buffer_size > xhci_ep->isoch_max_size) {
387 usb_log_error("Cannot schedule an oversized isochronous transfer.");
388 return EINVAL;
389 }
390
391 if (xhci_ep->base.direction == USB_DIRECTION_OUT) {
392 return schedule_isochronous_out(hc, transfer, xhci_ep, xhci_dev);
393 }
394 else {
395 return schedule_isochronous_in(hc, transfer, xhci_ep, xhci_dev);
396 }
397}
398
399static int handle_isochronous_transfer_event(xhci_hc_t *hc, xhci_trb_t *trb, xhci_endpoint_t *ep) {
400 fibril_mutex_lock(&ep->isoch_guard);
401
402 int err = EOK;
403
404 const xhci_trb_completion_code_t completion_code = TRB_COMPLETION_CODE(*trb);
405 switch (completion_code) {
406 case XHCI_TRBC_RING_OVERRUN:
407 case XHCI_TRBC_RING_UNDERRUN:
408 // TODO: abort the phone; rings are unscheduled by xHC by now
409 ep->isoch_started = false;
410 err = EIO;
411 break;
412 case XHCI_TRBC_SHORT_PACKET:
413 usb_log_debug("Short transfer.");
414 /* fallthrough */
415 case XHCI_TRBC_SUCCESS:
416 break;
417 default:
418 usb_log_warning("Transfer not successfull: %u", completion_code);
419 err = EIO;
420 }
421
422 xhci_isoch_transfer_t *isoch_transfer = isoch_transfer_get_dequeue(ep);
423 if (isoch_transfer->interrupt_trb_phys != trb->parameter) {
424 usb_log_error("Non-matching trb to isochronous transfer, skipping.");
425 // FIXME: what to do? probably just kill the whole endpoint
426 err = ENOENT;
427 }
428
429 if (ep->base.direction == USB_DIRECTION_IN) {
430 // We may have received less data, that's fine
431 isoch_transfer->size -= TRB_TRANSFER_LENGTH(*trb);
432 }
433
434 fibril_condvar_signal(&ep->isoch_avail);
435 fibril_mutex_unlock(&ep->isoch_guard);
436 return err;
437}
438
439int xhci_handle_transfer_event(xhci_hc_t* hc, xhci_trb_t* trb)
440{
441 uintptr_t addr = trb->parameter;
442 const unsigned slot_id = XHCI_DWORD_EXTRACT(trb->control, 31, 24);
443 const unsigned ep_dci = XHCI_DWORD_EXTRACT(trb->control, 20, 16);
444
445 xhci_device_t *dev = hc->bus.devices_by_slot[slot_id];
446 if (!dev) {
447 usb_log_error("Transfer event on disabled slot %u", slot_id);
448 return ENOENT;
449 }
450
451 const usb_endpoint_t ep_num = ep_dci / 2;
452 xhci_endpoint_t *ep = xhci_device_get_endpoint(dev, ep_num);
453 if (!ep) {
454 usb_log_error("Transfer event on dropped endpoint %u of device "
455 XHCI_DEV_FMT, ep_num, XHCI_DEV_ARGS(*dev));
456 return ENOENT;
457 }
458
459 /* FIXME: This is racy. Do we care? */
460 ep->ring.dequeue = addr;
461
462 if (ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS) {
463 return handle_isochronous_transfer_event(hc, trb, ep);
464 }
465
466 fibril_mutex_lock(&ep->base.guard);
467 usb_transfer_batch_t *batch = ep->base.active_batch;
468 if (!batch) {
469 fibril_mutex_unlock(&ep->base.guard);
470 return ENOENT;
471 }
472
473 const xhci_trb_completion_code_t completion_code = TRB_COMPLETION_CODE(*trb);
474 switch (completion_code) {
475 case XHCI_TRBC_SHORT_PACKET:
476 usb_log_debug("Short transfer.");
477 /* fallthrough */
478 case XHCI_TRBC_SUCCESS:
479 batch->error = EOK;
480 batch->transfered_size = batch->buffer_size - TRB_TRANSFER_LENGTH(*trb);
481 break;
482
483 default:
484 usb_log_warning("Transfer not successfull: %u", completion_code);
485 batch->error = EIO;
486 }
487
488 usb_transfer_batch_reset_toggle(batch);
489 endpoint_deactivate_locked(&ep->base);
490 fibril_mutex_unlock(&ep->base.guard);
491
492 xhci_transfer_t *transfer = xhci_transfer_from_batch(batch);
493
494 if (batch->dir == USB_DIRECTION_IN) {
495 assert(batch->buffer);
496 assert(batch->transfered_size <= batch->buffer_size);
497 memcpy(batch->buffer, transfer->hc_buffer.virt, batch->transfered_size);
498 }
499
500 usb_transfer_batch_finish(batch);
501 return EOK;
502}
503
504typedef int (*transfer_handler)(xhci_hc_t *, xhci_transfer_t *);
505
506static const transfer_handler transfer_handlers[] = {
507 [USB_TRANSFER_CONTROL] = schedule_control,
508 [USB_TRANSFER_ISOCHRONOUS] = NULL,
509 [USB_TRANSFER_BULK] = schedule_bulk,
510 [USB_TRANSFER_INTERRUPT] = schedule_interrupt,
511};
512
513int xhci_transfer_schedule(xhci_hc_t *hc, usb_transfer_batch_t *batch)
514{
515 assert(hc);
516 endpoint_t *ep = batch->ep;
517
518 xhci_transfer_t *transfer = xhci_transfer_from_batch(batch);
519 xhci_endpoint_t *xhci_ep = xhci_endpoint_get(ep);
520 xhci_device_t *xhci_dev = xhci_ep_to_dev(xhci_ep);
521
522 /* Offline devices don't schedule transfers other than on EP0. */
523 if (!xhci_dev->online && ep->endpoint > 0) {
524 return EAGAIN;
525 }
526
527 // FIXME: find a better way to check if the ring is not initialized
528 if (!xhci_ep->ring.segment_count) {
529 usb_log_error("Ring not initialized for endpoint " XHCI_EP_FMT,
530 XHCI_EP_ARGS(*xhci_ep));
531 return EINVAL;
532 }
533
534 // Isochronous transfer needs to be handled differently
535 if(batch->ep->transfer_type == USB_TRANSFER_ISOCHRONOUS) {
536 return schedule_isochronous(hc, transfer, xhci_ep, xhci_dev);
537 }
538
539 const usb_transfer_type_t type = batch->ep->transfer_type;
540 assert(type >= 0 && type < ARRAY_SIZE(transfer_handlers));
541 assert(transfer_handlers[type]);
542
543 if (batch->buffer_size > 0) {
544 if (dma_buffer_alloc(&transfer->hc_buffer, batch->buffer_size))
545 return ENOMEM;
546 }
547
548 if (batch->dir != USB_DIRECTION_IN) {
549 // Sending stuff from host to device, we need to copy the actual data.
550 memcpy(transfer->hc_buffer.virt, batch->buffer, batch->buffer_size);
551 }
552
553 fibril_mutex_lock(&ep->guard);
554 endpoint_activate_locked(ep, batch);
555 const int err = transfer_handlers[batch->ep->transfer_type](hc, transfer);
556
557 if (err) {
558 endpoint_deactivate_locked(ep);
559 fibril_mutex_unlock(&ep->guard);
560 return err;
561 }
562
563 /* After the critical section, the transfer can already be finished or aborted. */
564 transfer = NULL; batch = NULL;
565 fibril_mutex_unlock(&ep->guard);
566
567 const uint8_t slot_id = xhci_dev->slot_id;
568 const uint8_t target = xhci_endpoint_index(xhci_ep) + 1; /* EP Doorbells start at 1 */
569 return hc_ring_doorbell(hc, slot_id, target);
570}
Note: See TracBrowser for help on using the repository browser.