source: mainline/uspace/drv/bus/usb/xhci/transfers.c@ 1d758fc

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 1d758fc was 1d758fc, checked in by Ondřej Hlavatý <aearsis@…>, 8 years ago

usb: rethinking DMA buffers

  • Property mode set to 100644
File size: 15.6 KB
Line 
1/*
2 * Copyright (c) 2017 Michal Staruch
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller transfer ring management
34 */
35
36#include <usb/debug.h>
37#include <usb/request.h>
38#include "endpoint.h"
39#include "hc.h"
40#include "hw_struct/trb.h"
41#include "streams.h"
42#include "transfers.h"
43#include "trb_ring.h"
44
45typedef enum {
46 STAGE_OUT,
47 STAGE_IN,
48} stage_dir_flag_t;
49
50#define REQUEST_TYPE_DTD (0x80)
51#define REQUEST_TYPE_IS_DEVICE_TO_HOST(rq) ((rq) & REQUEST_TYPE_DTD)
52
53
54/** Get direction flag of data stage.
55 * See Table 7 of xHCI specification.
56 */
57static inline stage_dir_flag_t get_status_direction_flag(xhci_trb_t* trb,
58 uint8_t bmRequestType, uint16_t wLength)
59{
60 /* See Table 7 of xHCI specification */
61 return REQUEST_TYPE_IS_DEVICE_TO_HOST(bmRequestType) && (wLength > 0)
62 ? STAGE_OUT
63 : STAGE_IN;
64}
65
66typedef enum {
67 DATA_STAGE_NO = 0,
68 DATA_STAGE_OUT = 2,
69 DATA_STAGE_IN = 3,
70} data_stage_type_t;
71
72/** Get transfer type flag.
73 * See Table 8 of xHCI specification.
74 */
75static inline data_stage_type_t get_transfer_type(xhci_trb_t* trb, uint8_t
76 bmRequestType, uint16_t wLength)
77{
78 if (wLength == 0)
79 return DATA_STAGE_NO;
80
81 /* See Table 7 of xHCI specification */
82 return REQUEST_TYPE_IS_DEVICE_TO_HOST(bmRequestType)
83 ? DATA_STAGE_IN
84 : DATA_STAGE_NO;
85}
86
87static inline bool configure_endpoint_needed(usb_device_request_setup_packet_t *setup)
88{
89 usb_request_type_t request_type = SETUP_REQUEST_TYPE_GET_TYPE(setup->request_type);
90
91 return request_type == USB_REQUEST_TYPE_STANDARD &&
92 (setup->request == USB_DEVREQ_SET_CONFIGURATION
93 || setup->request == USB_DEVREQ_SET_INTERFACE);
94}
95
96/**
97 * Create a xHCI-specific transfer batch.
98 *
99 * Bus callback.
100 */
101usb_transfer_batch_t * xhci_transfer_create(endpoint_t* ep)
102{
103 xhci_transfer_t *transfer = calloc(1, sizeof(xhci_transfer_t));
104 if (!transfer)
105 return NULL;
106
107 usb_transfer_batch_init(&transfer->batch, ep);
108 return &transfer->batch;
109}
110
111/**
112 * Destroy a xHCI transfer.
113 */
114void xhci_transfer_destroy(usb_transfer_batch_t* batch)
115{
116 xhci_transfer_t *transfer = xhci_transfer_from_batch(batch);
117 free(transfer);
118}
119
120static xhci_trb_ring_t *get_ring(xhci_transfer_t *transfer)
121{
122 xhci_endpoint_t *xhci_ep = xhci_endpoint_get(transfer->batch.ep);
123 return xhci_endpoint_get_ring(xhci_ep, transfer->batch.target.stream);
124}
125
126static int calculate_trb_count(xhci_transfer_t *transfer)
127{
128 const size_t size = transfer->batch.size;
129 return (size + PAGE_SIZE - 1 )/ PAGE_SIZE;
130}
131
132static void trb_set_buffer(xhci_transfer_t *transfer, xhci_trb_t *trb,
133 size_t i, size_t total, size_t *remaining)
134{
135 const uintptr_t ptr = dma_buffer_phys(&transfer->batch.dma_buffer,
136 transfer->batch.dma_buffer.virt + i * PAGE_SIZE);
137
138 trb->parameter = host2xhci(64, ptr);
139 TRB_CTRL_SET_TD_SIZE(*trb, max(31, total - i - 1));
140 if (*remaining > PAGE_SIZE) {
141 TRB_CTRL_SET_XFER_LEN(*trb, PAGE_SIZE);
142 *remaining -= PAGE_SIZE;
143 }
144 else {
145 TRB_CTRL_SET_XFER_LEN(*trb, *remaining);
146 *remaining = 0;
147 }
148}
149
150static errno_t schedule_control(xhci_hc_t* hc, xhci_transfer_t* transfer)
151{
152 usb_transfer_batch_t *batch = &transfer->batch;
153 xhci_endpoint_t *xhci_ep = xhci_endpoint_get(transfer->batch.ep);
154
155 usb_device_request_setup_packet_t* setup = &batch->setup.packet;
156
157 size_t buffer_count = 0;
158 if (setup->length > 0) {
159 buffer_count = calculate_trb_count(transfer);
160 }
161
162 xhci_trb_t trbs[buffer_count + 2];
163
164 xhci_trb_t *trb_setup = trbs;
165 xhci_trb_clean(trb_setup);
166
167 TRB_CTRL_SET_SETUP_WVALUE(*trb_setup, setup->value);
168 TRB_CTRL_SET_SETUP_WLENGTH(*trb_setup, setup->length);
169 TRB_CTRL_SET_SETUP_WINDEX(*trb_setup, setup->index);
170 TRB_CTRL_SET_SETUP_BREQ(*trb_setup, setup->request);
171 TRB_CTRL_SET_SETUP_BMREQTYPE(*trb_setup, setup->request_type);
172
173 /* Size of the setup packet is always 8 */
174 TRB_CTRL_SET_XFER_LEN(*trb_setup, 8);
175
176 /* Immediate data */
177 TRB_CTRL_SET_IDT(*trb_setup, 1);
178 TRB_CTRL_SET_TRB_TYPE(*trb_setup, XHCI_TRB_TYPE_SETUP_STAGE);
179 TRB_CTRL_SET_TRT(*trb_setup,
180 get_transfer_type(trb_setup, setup->request_type, setup->length));
181
182 /* Data stage */
183 if (setup->length > 0) {
184 int stage_dir = REQUEST_TYPE_IS_DEVICE_TO_HOST(setup->request_type)
185 ? STAGE_IN : STAGE_OUT;
186 size_t remaining = transfer->batch.size;
187
188 for (size_t i = 0; i < buffer_count; ++i) {
189 xhci_trb_clean(&trbs[i + 1]);
190 trb_set_buffer(transfer, &trbs[i + 1], i, buffer_count, &remaining);
191
192 TRB_CTRL_SET_DIR(trbs[i + 1], stage_dir);
193 TRB_CTRL_SET_TRB_TYPE(trbs[i + 1], XHCI_TRB_TYPE_DATA_STAGE);
194
195 if (i == buffer_count - 1) break;
196
197 /* Set the chain bit as this is not the last TRB */
198 TRB_CTRL_SET_CHAIN(trbs[i], 1);
199 }
200 }
201
202 /* Status stage */
203 xhci_trb_t *trb_status = trbs + buffer_count + 1;
204 xhci_trb_clean(trb_status);
205
206 TRB_CTRL_SET_IOC(*trb_status, 1);
207 TRB_CTRL_SET_TRB_TYPE(*trb_status, XHCI_TRB_TYPE_STATUS_STAGE);
208 TRB_CTRL_SET_DIR(*trb_status, get_status_direction_flag(trb_setup,
209 setup->request_type, setup->length));
210
211 // Issue a Configure Endpoint command, if needed.
212 if (configure_endpoint_needed(setup)) {
213 const errno_t err = hc_configure_device(xhci_ep_to_dev(xhci_ep));
214 if (err)
215 return err;
216 }
217
218 return xhci_trb_ring_enqueue_multiple(get_ring(transfer), trbs,
219 buffer_count + 2, &transfer->interrupt_trb_phys);
220}
221
222static errno_t schedule_bulk(xhci_hc_t* hc, xhci_transfer_t *transfer)
223{
224 /* The stream-enabled endpoints need to chain ED trb */
225 xhci_endpoint_t *ep = xhci_endpoint_get(transfer->batch.ep);
226 if (!ep->primary_stream_data_size) {
227 const size_t buffer_count = calculate_trb_count(transfer);
228 xhci_trb_t trbs[buffer_count];
229 size_t remaining = transfer->batch.size;
230
231 for (size_t i = 0; i < buffer_count; ++i) {
232 xhci_trb_clean(&trbs[i]);
233 trb_set_buffer(transfer, &trbs[i], i, buffer_count, &remaining);
234 TRB_CTRL_SET_TRB_TYPE(trbs[i], XHCI_TRB_TYPE_NORMAL);
235
236 if (i == buffer_count - 1) break;
237
238 /* Set the chain bit as this is not the last TRB */
239 TRB_CTRL_SET_CHAIN(trbs[i], 1);
240 }
241 /* Set the interrupt bit for last TRB */
242 TRB_CTRL_SET_IOC(trbs[buffer_count - 1], 1);
243
244 xhci_trb_ring_t* ring = get_ring(transfer);
245 return xhci_trb_ring_enqueue_multiple(ring, &trbs[0], buffer_count,
246 &transfer->interrupt_trb_phys);
247 }
248 else {
249 xhci_trb_ring_t* ring = get_ring(transfer);
250 if (!ring) {
251 return EINVAL;
252 }
253
254 const size_t buffer_count = calculate_trb_count(transfer);
255 xhci_trb_t trbs[buffer_count + 1];
256 size_t remaining = transfer->batch.size;
257
258 for (size_t i = 0; i < buffer_count; ++i) {
259 xhci_trb_clean(&trbs[i]);
260 trb_set_buffer(transfer, &trbs[i], i, buffer_count + 1, &remaining);
261 TRB_CTRL_SET_TRB_TYPE(trbs[i], XHCI_TRB_TYPE_NORMAL);
262 TRB_CTRL_SET_CHAIN(trbs[i], 1);
263 }
264 TRB_CTRL_SET_ENT(trbs[buffer_count - 1], 1);
265
266 xhci_trb_clean(&trbs[buffer_count]);
267 trbs[buffer_count].parameter = host2xhci(64, (uintptr_t) transfer);
268 TRB_CTRL_SET_TRB_TYPE(trbs[buffer_count], XHCI_TRB_TYPE_EVENT_DATA);
269 TRB_CTRL_SET_IOC(trbs[buffer_count], 1);
270
271 return xhci_trb_ring_enqueue_multiple(ring, &trbs[0], buffer_count + 1,
272 &transfer->interrupt_trb_phys);
273 }
274}
275
276static errno_t schedule_interrupt(xhci_hc_t* hc, xhci_transfer_t* transfer)
277{
278 const size_t buffer_count = calculate_trb_count(transfer);
279 xhci_trb_t trbs[buffer_count];
280 size_t remaining = transfer->batch.size;
281
282 for (size_t i = 0; i < buffer_count; ++i) {
283 xhci_trb_clean(&trbs[i]);
284 trb_set_buffer(transfer, &trbs[i], i, buffer_count, &remaining);
285 TRB_CTRL_SET_TRB_TYPE(trbs[i], XHCI_TRB_TYPE_NORMAL);
286
287 if (i == buffer_count - 1) break;
288
289 /* Set the chain bit as this is not the last TRB */
290 TRB_CTRL_SET_CHAIN(trbs[i], 1);
291 }
292 /* Set the interrupt bit for last TRB */
293 TRB_CTRL_SET_IOC(trbs[buffer_count - 1], 1);
294
295 xhci_trb_ring_t* ring = get_ring(transfer);
296 return xhci_trb_ring_enqueue_multiple(ring, &trbs[0], buffer_count,
297 &transfer->interrupt_trb_phys);
298}
299
300static int schedule_isochronous(xhci_transfer_t* transfer)
301{
302 endpoint_t *ep = transfer->batch.ep;
303
304 return ep->direction == USB_DIRECTION_OUT
305 ? isoch_schedule_out(transfer)
306 : isoch_schedule_in(transfer);
307}
308
309errno_t xhci_handle_transfer_event(xhci_hc_t* hc, xhci_trb_t* trb)
310{
311 uintptr_t addr = trb->parameter;
312 const unsigned slot_id = XHCI_DWORD_EXTRACT(trb->control, 31, 24);
313 const unsigned ep_dci = XHCI_DWORD_EXTRACT(trb->control, 20, 16);
314
315 xhci_device_t *dev = hc->bus.devices_by_slot[slot_id];
316 if (!dev) {
317 usb_log_error("Transfer event on disabled slot %u", slot_id);
318 return ENOENT;
319 }
320
321 const usb_endpoint_t ep_num = ep_dci / 2;
322 const usb_endpoint_t dir = ep_dci % 2 ? USB_DIRECTION_IN : USB_DIRECTION_OUT;
323 /* Creating temporary reference */
324 endpoint_t *ep_base = bus_find_endpoint(&dev->base, ep_num, dir);
325 if (!ep_base) {
326 usb_log_error("Transfer event on dropped endpoint %u %s of device "
327 XHCI_DEV_FMT, ep_num, usb_str_direction(dir), XHCI_DEV_ARGS(*dev));
328 return ENOENT;
329 }
330 xhci_endpoint_t *ep = xhci_endpoint_get(ep_base);
331
332 usb_transfer_batch_t *batch;
333 xhci_transfer_t *transfer;
334
335 if (TRB_EVENT_DATA(*trb)) {
336 /* We schedule those only when streams are involved */
337 assert(ep->primary_stream_ctx_array != NULL);
338
339 /* We are received transfer pointer instead - work with that */
340 transfer = (xhci_transfer_t *) addr;
341 xhci_trb_ring_update_dequeue(get_ring(transfer),
342 transfer->interrupt_trb_phys);
343 batch = &transfer->batch;
344 }
345 else {
346 xhci_trb_ring_update_dequeue(&ep->ring, addr);
347
348 if (ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS) {
349 isoch_handle_transfer_event(hc, ep, trb);
350 /* Dropping temporary reference */
351 endpoint_del_ref(&ep->base);
352 return EOK;
353 }
354
355 fibril_mutex_lock(&ep->guard);
356 batch = ep->base.active_batch;
357 endpoint_deactivate_locked(&ep->base);
358 fibril_mutex_unlock(&ep->guard);
359
360 if (!batch) {
361 /* Dropping temporary reference */
362 endpoint_del_ref(&ep->base);
363 return ENOENT;
364 }
365
366 transfer = xhci_transfer_from_batch(batch);
367 }
368
369 const xhci_trb_completion_code_t completion_code = TRB_COMPLETION_CODE(*trb);
370 switch (completion_code) {
371 case XHCI_TRBC_SHORT_PACKET:
372 case XHCI_TRBC_SUCCESS:
373 batch->error = EOK;
374 batch->transferred_size = batch->size - TRB_TRANSFER_LENGTH(*trb);
375 break;
376
377 case XHCI_TRBC_DATA_BUFFER_ERROR:
378 usb_log_warning("Transfer ended with data buffer error.");
379 batch->error = EAGAIN;
380 batch->transferred_size = 0;
381 break;
382
383 case XHCI_TRBC_BABBLE_DETECTED_ERROR:
384 usb_log_warning("Babble detected during the transfer.");
385 batch->error = EAGAIN;
386 batch->transferred_size = 0;
387 break;
388
389 case XHCI_TRBC_USB_TRANSACTION_ERROR:
390 usb_log_warning("USB Transaction error.");
391 batch->error = EAGAIN;
392 batch->transferred_size = 0;
393 break;
394
395 case XHCI_TRBC_TRB_ERROR:
396 usb_log_error("Invalid transfer parameters.");
397 batch->error = EINVAL;
398 batch->transferred_size = 0;
399 break;
400
401 case XHCI_TRBC_STALL_ERROR:
402 usb_log_warning("Stall condition detected.");
403 batch->error = ESTALL;
404 batch->transferred_size = 0;
405 break;
406
407 case XHCI_TRBC_SPLIT_TRANSACTION_ERROR:
408 usb_log_error("Split transcation error detected.");
409 batch->error = EAGAIN;
410 batch->transferred_size = 0;
411 break;
412
413 default:
414 usb_log_warning("Transfer not successfull: %u", completion_code);
415 batch->error = EIO;
416 }
417
418 assert(batch->transferred_size <= batch->size);
419
420 usb_transfer_batch_finish(batch);
421 /* Dropping temporary reference */
422 endpoint_del_ref(&ep->base);
423 return EOK;
424}
425
426typedef errno_t (*transfer_handler)(xhci_hc_t *, xhci_transfer_t *);
427
428static const transfer_handler transfer_handlers[] = {
429 [USB_TRANSFER_CONTROL] = schedule_control,
430 [USB_TRANSFER_ISOCHRONOUS] = NULL,
431 [USB_TRANSFER_BULK] = schedule_bulk,
432 [USB_TRANSFER_INTERRUPT] = schedule_interrupt,
433};
434
435/**
436 * Schedule a batch for xHC.
437 *
438 * Bus callback.
439 */
440errno_t xhci_transfer_schedule(usb_transfer_batch_t *batch)
441{
442 endpoint_t *ep = batch->ep;
443
444 xhci_hc_t *hc = bus_to_hc(endpoint_get_bus(batch->ep));
445 xhci_transfer_t *transfer = xhci_transfer_from_batch(batch);
446 xhci_endpoint_t *xhci_ep = xhci_endpoint_get(ep);
447 xhci_device_t *xhci_dev = xhci_ep_to_dev(xhci_ep);
448
449 if (!batch->target.address) {
450 usb_log_error("Attempted to schedule transfer to address 0.");
451 return EINVAL;
452 }
453
454 // FIXME: find a better way to check if the ring is not initialized
455 if (!xhci_ep->ring.segment_count) {
456 usb_log_error("Ring not initialized for endpoint " XHCI_EP_FMT,
457 XHCI_EP_ARGS(*xhci_ep));
458 return EINVAL;
459 }
460
461 // Isochronous transfer needs to be handled differently
462 if (batch->ep->transfer_type == USB_TRANSFER_ISOCHRONOUS) {
463 return schedule_isochronous(transfer);
464 }
465
466 const usb_transfer_type_t type = batch->ep->transfer_type;
467 assert(transfer_handlers[type]);
468
469 /*
470 * If this is a ClearFeature(ENDPOINT_HALT) request, we have to issue
471 * the Reset Endpoint command.
472 */
473 if (batch->ep->transfer_type == USB_TRANSFER_CONTROL
474 && batch->dir == USB_DIRECTION_OUT) {
475 const usb_device_request_setup_packet_t *request = &batch->setup.packet;
476 if (request->request == USB_DEVREQ_CLEAR_FEATURE
477 && request->request_type == USB_REQUEST_RECIPIENT_ENDPOINT
478 && request->value == USB_FEATURE_ENDPOINT_HALT) {
479 const uint16_t index = uint16_usb2host(request->index);
480 const usb_endpoint_t ep_num = index & 0xf;
481 const usb_direction_t dir = (index >> 7)
482 ? USB_DIRECTION_IN
483 : USB_DIRECTION_OUT;
484 endpoint_t *halted_ep = bus_find_endpoint(&xhci_dev->base, ep_num, dir);
485 if (halted_ep) {
486 /*
487 * TODO: Find out how to come up with stream_id. It might be
488 * possible that we have to clear all of them.
489 */
490 const errno_t err = xhci_endpoint_clear_halt(xhci_endpoint_get(halted_ep), 0);
491 endpoint_del_ref(halted_ep);
492 if (err) {
493 /*
494 * The endpoint halt condition failed to be cleared in HC.
495 * As it does not make sense to send the reset to the device
496 * itself, return as unschedulable answer.
497 *
498 * Furthermore, if this is a request to clear EP 0 stall, it
499 * would be gone forever, as the endpoint is halted.
500 */
501 return err;
502 }
503 } else {
504 usb_log_warning("Device(%u): Resetting unregistered endpoint"
505 " %u %s.", xhci_dev->base.address, ep_num,
506 usb_str_direction(dir));
507 }
508 }
509 }
510
511
512 errno_t err;
513 fibril_mutex_lock(&xhci_ep->guard);
514
515 if ((err = endpoint_activate_locked(ep, batch))) {
516 fibril_mutex_unlock(&xhci_ep->guard);
517 return err;
518 }
519
520 if ((err = transfer_handlers[batch->ep->transfer_type](hc, transfer))) {
521 endpoint_deactivate_locked(ep);
522 fibril_mutex_unlock(&xhci_ep->guard);
523 return err;
524 }
525
526 hc_ring_ep_doorbell(xhci_ep, batch->target.stream);
527 fibril_mutex_unlock(&xhci_ep->guard);
528 return EOK;
529}
Note: See TracBrowser for help on using the repository browser.