source: mainline/uspace/drv/bus/usb/xhci/transfers.c@ 961a5ee

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 961a5ee was 3038d51, checked in by Salmelu <salmelu@…>, 8 years ago

xhci: Multi TRB TD for control and stream bulk

  • Property mode set to 100644
File size: 16.1 KB
Line 
1/*
2 * Copyright (c) 2017 Michal Staruch
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller transfer ring management
34 */
35
36#include <usb/debug.h>
37#include <usb/request.h>
38#include "endpoint.h"
39#include "hc.h"
40#include "hw_struct/trb.h"
41#include "streams.h"
42#include "transfers.h"
43#include "trb_ring.h"
44
45typedef enum {
46 STAGE_OUT,
47 STAGE_IN,
48} stage_dir_flag_t;
49
50#define REQUEST_TYPE_DTD (0x80)
51#define REQUEST_TYPE_IS_DEVICE_TO_HOST(rq) ((rq) & REQUEST_TYPE_DTD)
52
53
54/** Get direction flag of data stage.
55 * See Table 7 of xHCI specification.
56 */
57static inline stage_dir_flag_t get_status_direction_flag(xhci_trb_t* trb,
58 uint8_t bmRequestType, uint16_t wLength)
59{
60 /* See Table 7 of xHCI specification */
61 return REQUEST_TYPE_IS_DEVICE_TO_HOST(bmRequestType) && (wLength > 0)
62 ? STAGE_OUT
63 : STAGE_IN;
64}
65
66typedef enum {
67 DATA_STAGE_NO = 0,
68 DATA_STAGE_OUT = 2,
69 DATA_STAGE_IN = 3,
70} data_stage_type_t;
71
72/** Get transfer type flag.
73 * See Table 8 of xHCI specification.
74 */
75static inline data_stage_type_t get_transfer_type(xhci_trb_t* trb, uint8_t
76 bmRequestType, uint16_t wLength)
77{
78 if (wLength == 0)
79 return DATA_STAGE_NO;
80
81 /* See Table 7 of xHCI specification */
82 return REQUEST_TYPE_IS_DEVICE_TO_HOST(bmRequestType)
83 ? DATA_STAGE_IN
84 : DATA_STAGE_NO;
85}
86
87static inline bool configure_endpoint_needed(usb_device_request_setup_packet_t *setup)
88{
89 usb_request_type_t request_type = SETUP_REQUEST_TYPE_GET_TYPE(setup->request_type);
90
91 return request_type == USB_REQUEST_TYPE_STANDARD &&
92 (setup->request == USB_DEVREQ_SET_CONFIGURATION
93 || setup->request == USB_DEVREQ_SET_INTERFACE);
94}
95
96/**
97 * Create a xHCI-specific transfer batch.
98 *
99 * Bus callback.
100 */
101usb_transfer_batch_t * xhci_transfer_create(endpoint_t* ep)
102{
103 xhci_transfer_t *transfer = calloc(1, sizeof(xhci_transfer_t));
104 if (!transfer)
105 return NULL;
106
107 usb_transfer_batch_init(&transfer->batch, ep);
108 return &transfer->batch;
109}
110
111/**
112 * Destroy a xHCI transfer.
113 */
114void xhci_transfer_destroy(usb_transfer_batch_t* batch)
115{
116 xhci_transfer_t *transfer = xhci_transfer_from_batch(batch);
117
118 dma_buffer_free(&transfer->hc_buffer);
119 free(transfer);
120}
121
122static xhci_trb_ring_t *get_ring(xhci_transfer_t *transfer)
123{
124 xhci_endpoint_t *xhci_ep = xhci_endpoint_get(transfer->batch.ep);
125 return xhci_endpoint_get_ring(xhci_ep, transfer->batch.target.stream);
126}
127
128static int calculate_trb_count(xhci_transfer_t *transfer)
129{
130 const size_t size = transfer->batch.buffer_size;
131 return (size + PAGE_SIZE - 1 )/ PAGE_SIZE;
132}
133
134static void trb_set_buffer(xhci_transfer_t *transfer, xhci_trb_t *trb,
135 size_t i, size_t total, size_t *remaining)
136{
137 const uintptr_t ptr = dma_buffer_phys(&transfer->hc_buffer,
138 transfer->hc_buffer.virt + i * PAGE_SIZE);
139
140 trb->parameter = host2xhci(64, ptr);
141 TRB_CTRL_SET_TD_SIZE(*trb, max(31, total - i - 1));
142 if (*remaining > PAGE_SIZE) {
143 TRB_CTRL_SET_XFER_LEN(*trb, PAGE_SIZE);
144 *remaining -= PAGE_SIZE;
145 }
146 else {
147 TRB_CTRL_SET_XFER_LEN(*trb, *remaining);
148 *remaining = 0;
149 }
150}
151
152static int schedule_control(xhci_hc_t* hc, xhci_transfer_t* transfer)
153{
154 usb_transfer_batch_t *batch = &transfer->batch;
155 xhci_endpoint_t *xhci_ep = xhci_endpoint_get(transfer->batch.ep);
156
157 usb_device_request_setup_packet_t* setup = &batch->setup.packet;
158
159 size_t buffer_count = 0;
160 if (setup->length > 0) {
161 buffer_count = calculate_trb_count(transfer);
162 }
163
164 xhci_trb_t trbs[buffer_count + 2];
165
166 xhci_trb_t *trb_setup = trbs;
167 xhci_trb_clean(trb_setup);
168
169 TRB_CTRL_SET_SETUP_WVALUE(*trb_setup, setup->value);
170 TRB_CTRL_SET_SETUP_WLENGTH(*trb_setup, setup->length);
171 TRB_CTRL_SET_SETUP_WINDEX(*trb_setup, setup->index);
172 TRB_CTRL_SET_SETUP_BREQ(*trb_setup, setup->request);
173 TRB_CTRL_SET_SETUP_BMREQTYPE(*trb_setup, setup->request_type);
174
175 /* Size of the setup packet is always 8 */
176 TRB_CTRL_SET_XFER_LEN(*trb_setup, 8);
177
178 /* Immediate data */
179 TRB_CTRL_SET_IDT(*trb_setup, 1);
180 TRB_CTRL_SET_TRB_TYPE(*trb_setup, XHCI_TRB_TYPE_SETUP_STAGE);
181 TRB_CTRL_SET_TRT(*trb_setup,
182 get_transfer_type(trb_setup, setup->request_type, setup->length));
183
184 /* Data stage */
185 if (setup->length > 0) {
186 int stage_dir = REQUEST_TYPE_IS_DEVICE_TO_HOST(setup->request_type)
187 ? STAGE_IN : STAGE_OUT;
188 size_t remaining = transfer->batch.buffer_size;
189
190 for (size_t i = 0; i < buffer_count; ++i) {
191 xhci_trb_clean(&trbs[i + 1]);
192 trb_set_buffer(transfer, &trbs[i + 1], i, buffer_count, &remaining);
193
194 TRB_CTRL_SET_DIR(trbs[i + 1], stage_dir);
195 TRB_CTRL_SET_TRB_TYPE(trbs[i + 1], XHCI_TRB_TYPE_DATA_STAGE);
196
197 if (i == buffer_count - 1) break;
198
199 /* Set the chain bit as this is not the last TRB */
200 TRB_CTRL_SET_CHAIN(trbs[i], 1);
201 }
202 }
203
204 /* Status stage */
205 xhci_trb_t *trb_status = trbs + buffer_count + 1;
206 xhci_trb_clean(trb_status);
207
208 TRB_CTRL_SET_IOC(*trb_status, 1);
209 TRB_CTRL_SET_TRB_TYPE(*trb_status, XHCI_TRB_TYPE_STATUS_STAGE);
210 TRB_CTRL_SET_DIR(*trb_status, get_status_direction_flag(trb_setup,
211 setup->request_type, setup->length));
212
213 // Issue a Configure Endpoint command, if needed.
214 if (configure_endpoint_needed(setup)) {
215 const int err = hc_configure_device(xhci_ep_to_dev(xhci_ep));
216 if (err)
217 return err;
218 }
219
220 return xhci_trb_ring_enqueue_multiple(get_ring(transfer), trbs,
221 buffer_count + 2, &transfer->interrupt_trb_phys);
222}
223
224static int schedule_bulk(xhci_hc_t* hc, xhci_transfer_t *transfer)
225{
226 /* The stream-enabled endpoints need to chain ED trb */
227 xhci_endpoint_t *ep = xhci_endpoint_get(transfer->batch.ep);
228 if (!ep->primary_stream_data_size) {
229 const size_t buffer_count = calculate_trb_count(transfer);
230 xhci_trb_t trbs[buffer_count];
231 size_t remaining = transfer->batch.buffer_size;
232
233 for (size_t i = 0; i < buffer_count; ++i) {
234 xhci_trb_clean(&trbs[i]);
235 trb_set_buffer(transfer, &trbs[i], i, buffer_count, &remaining);
236 TRB_CTRL_SET_TRB_TYPE(trbs[i], XHCI_TRB_TYPE_NORMAL);
237
238 if (i == buffer_count - 1) break;
239
240 /* Set the chain bit as this is not the last TRB */
241 TRB_CTRL_SET_CHAIN(trbs[i], 1);
242 }
243 /* Set the interrupt bit for last TRB */
244 TRB_CTRL_SET_IOC(trbs[buffer_count - 1], 1);
245
246 xhci_trb_ring_t* ring = get_ring(transfer);
247 return xhci_trb_ring_enqueue_multiple(ring, &trbs[0], buffer_count,
248 &transfer->interrupt_trb_phys);
249 }
250 else {
251 xhci_trb_ring_t* ring = get_ring(transfer);
252 if (!ring) {
253 return EINVAL;
254 }
255
256 const size_t buffer_count = calculate_trb_count(transfer);
257 xhci_trb_t trbs[buffer_count + 1];
258 size_t remaining = transfer->batch.buffer_size;
259
260 for (size_t i = 0; i < buffer_count; ++i) {
261 xhci_trb_clean(&trbs[i]);
262 trb_set_buffer(transfer, &trbs[i], i, buffer_count + 1, &remaining);
263 TRB_CTRL_SET_TRB_TYPE(trbs[i], XHCI_TRB_TYPE_NORMAL);
264 TRB_CTRL_SET_CHAIN(trbs[i], 1);
265 }
266 TRB_CTRL_SET_ENT(trbs[buffer_count - 1], 1);
267
268 xhci_trb_clean(&trbs[buffer_count]);
269 trbs[buffer_count].parameter = host2xhci(64, (uintptr_t) transfer);
270 TRB_CTRL_SET_TRB_TYPE(trbs[buffer_count], XHCI_TRB_TYPE_EVENT_DATA);
271 TRB_CTRL_SET_IOC(trbs[buffer_count], 1);
272
273 return xhci_trb_ring_enqueue_multiple(ring, &trbs[0], buffer_count + 1,
274 &transfer->interrupt_trb_phys);
275 }
276}
277
278static int schedule_interrupt(xhci_hc_t* hc, xhci_transfer_t* transfer)
279{
280 const size_t buffer_count = calculate_trb_count(transfer);
281 xhci_trb_t trbs[buffer_count];
282 size_t remaining = transfer->batch.buffer_size;
283
284 for (size_t i = 0; i < buffer_count; ++i) {
285 xhci_trb_clean(&trbs[i]);
286 trb_set_buffer(transfer, &trbs[i], i, buffer_count, &remaining);
287 TRB_CTRL_SET_TRB_TYPE(trbs[i], XHCI_TRB_TYPE_NORMAL);
288
289 if (i == buffer_count - 1) break;
290
291 /* Set the chain bit as this is not the last TRB */
292 TRB_CTRL_SET_CHAIN(trbs[i], 1);
293 }
294 /* Set the interrupt bit for last TRB */
295 TRB_CTRL_SET_IOC(trbs[buffer_count - 1], 1);
296
297 xhci_trb_ring_t* ring = get_ring(transfer);
298 return xhci_trb_ring_enqueue_multiple(ring, &trbs[0], buffer_count,
299 &transfer->interrupt_trb_phys);
300}
301
302static int schedule_isochronous(xhci_transfer_t* transfer)
303{
304 endpoint_t *ep = transfer->batch.ep;
305
306 return ep->direction == USB_DIRECTION_OUT
307 ? isoch_schedule_out(transfer)
308 : isoch_schedule_in(transfer);
309}
310
311int xhci_handle_transfer_event(xhci_hc_t* hc, xhci_trb_t* trb)
312{
313 uintptr_t addr = trb->parameter;
314 const unsigned slot_id = XHCI_DWORD_EXTRACT(trb->control, 31, 24);
315 const unsigned ep_dci = XHCI_DWORD_EXTRACT(trb->control, 20, 16);
316
317 xhci_device_t *dev = hc->bus.devices_by_slot[slot_id];
318 if (!dev) {
319 usb_log_error("Transfer event on disabled slot %u", slot_id);
320 return ENOENT;
321 }
322
323 const usb_endpoint_t ep_num = ep_dci / 2;
324 const usb_endpoint_t dir = ep_dci % 2 ? USB_DIRECTION_IN : USB_DIRECTION_OUT;
325 /* Creating temporary reference */
326 endpoint_t *ep_base = bus_find_endpoint(&dev->base, ep_num, dir);
327 if (!ep_base) {
328 usb_log_error("Transfer event on dropped endpoint %u %s of device "
329 XHCI_DEV_FMT, ep_num, usb_str_direction(dir), XHCI_DEV_ARGS(*dev));
330 return ENOENT;
331 }
332 xhci_endpoint_t *ep = xhci_endpoint_get(ep_base);
333
334 usb_transfer_batch_t *batch;
335 xhci_transfer_t *transfer;
336
337 if (TRB_EVENT_DATA(*trb)) {
338 /* We schedule those only when streams are involved */
339 assert(ep->primary_stream_ctx_array != NULL);
340
341 /* We are received transfer pointer instead - work with that */
342 transfer = (xhci_transfer_t *) addr;
343 xhci_trb_ring_update_dequeue(get_ring(transfer),
344 transfer->interrupt_trb_phys);
345 batch = &transfer->batch;
346 }
347 else {
348 xhci_trb_ring_update_dequeue(&ep->ring, addr);
349
350 if (ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS) {
351 isoch_handle_transfer_event(hc, ep, trb);
352 /* Dropping temporary reference */
353 endpoint_del_ref(&ep->base);
354 return EOK;
355 }
356
357 fibril_mutex_lock(&ep->guard);
358 batch = ep->base.active_batch;
359 endpoint_deactivate_locked(&ep->base);
360 fibril_mutex_unlock(&ep->guard);
361
362 if (!batch) {
363 /* Dropping temporary reference */
364 endpoint_del_ref(&ep->base);
365 return ENOENT;
366 }
367
368 transfer = xhci_transfer_from_batch(batch);
369 }
370
371 const xhci_trb_completion_code_t completion_code = TRB_COMPLETION_CODE(*trb);
372 switch (completion_code) {
373 case XHCI_TRBC_SHORT_PACKET:
374 case XHCI_TRBC_SUCCESS:
375 batch->error = EOK;
376 batch->transferred_size = batch->buffer_size - TRB_TRANSFER_LENGTH(*trb);
377 break;
378
379 case XHCI_TRBC_DATA_BUFFER_ERROR:
380 usb_log_warning("Transfer ended with data buffer error.");
381 batch->error = EAGAIN;
382 batch->transferred_size = 0;
383 break;
384
385 case XHCI_TRBC_BABBLE_DETECTED_ERROR:
386 usb_log_warning("Babble detected during the transfer.");
387 batch->error = EAGAIN;
388 batch->transferred_size = 0;
389 break;
390
391 case XHCI_TRBC_USB_TRANSACTION_ERROR:
392 usb_log_warning("USB Transaction error.");
393 batch->error = EAGAIN;
394 batch->transferred_size = 0;
395 break;
396
397 case XHCI_TRBC_TRB_ERROR:
398 usb_log_error("Invalid transfer parameters.");
399 batch->error = EINVAL;
400 batch->transferred_size = 0;
401 break;
402
403 case XHCI_TRBC_STALL_ERROR:
404 usb_log_warning("Stall condition detected.");
405 batch->error = ESTALL;
406 batch->transferred_size = 0;
407 break;
408
409 case XHCI_TRBC_SPLIT_TRANSACTION_ERROR:
410 usb_log_error("Split transcation error detected.");
411 batch->error = EAGAIN;
412 batch->transferred_size = 0;
413 break;
414
415 default:
416 usb_log_warning("Transfer not successfull: %u", completion_code);
417 batch->error = EIO;
418 }
419
420 if (batch->dir == USB_DIRECTION_IN) {
421 assert(batch->buffer);
422 assert(batch->transferred_size <= batch->buffer_size);
423 memcpy(batch->buffer, transfer->hc_buffer.virt, batch->transferred_size);
424 }
425
426 usb_transfer_batch_finish(batch);
427 /* Dropping temporary reference */
428 endpoint_del_ref(&ep->base);
429 return EOK;
430}
431
432typedef int (*transfer_handler)(xhci_hc_t *, xhci_transfer_t *);
433
434static const transfer_handler transfer_handlers[] = {
435 [USB_TRANSFER_CONTROL] = schedule_control,
436 [USB_TRANSFER_ISOCHRONOUS] = NULL,
437 [USB_TRANSFER_BULK] = schedule_bulk,
438 [USB_TRANSFER_INTERRUPT] = schedule_interrupt,
439};
440
441/**
442 * Schedule a batch for xHC.
443 *
444 * Bus callback.
445 */
446int xhci_transfer_schedule(usb_transfer_batch_t *batch)
447{
448 endpoint_t *ep = batch->ep;
449
450 xhci_hc_t *hc = bus_to_hc(endpoint_get_bus(batch->ep));
451 xhci_transfer_t *transfer = xhci_transfer_from_batch(batch);
452 xhci_endpoint_t *xhci_ep = xhci_endpoint_get(ep);
453 xhci_device_t *xhci_dev = xhci_ep_to_dev(xhci_ep);
454
455 if (!batch->target.address) {
456 usb_log_error("Attempted to schedule transfer to address 0.");
457 return EINVAL;
458 }
459
460 // FIXME: find a better way to check if the ring is not initialized
461 if (!xhci_ep->ring.segment_count) {
462 usb_log_error("Ring not initialized for endpoint " XHCI_EP_FMT,
463 XHCI_EP_ARGS(*xhci_ep));
464 return EINVAL;
465 }
466
467 // Isochronous transfer needs to be handled differently
468 if (batch->ep->transfer_type == USB_TRANSFER_ISOCHRONOUS) {
469 return schedule_isochronous(transfer);
470 }
471
472 const usb_transfer_type_t type = batch->ep->transfer_type;
473 assert(transfer_handlers[type]);
474
475 if (batch->buffer_size > 0) {
476 if (dma_buffer_alloc(&transfer->hc_buffer, batch->buffer_size))
477 return ENOMEM;
478 }
479
480 if (batch->dir != USB_DIRECTION_IN) {
481 // Sending stuff from host to device, we need to copy the actual data.
482 memcpy(transfer->hc_buffer.virt, batch->buffer, batch->buffer_size);
483 }
484
485 /*
486 * If this is a ClearFeature(ENDPOINT_HALT) request, we have to issue
487 * the Reset Endpoint command.
488 */
489 if (batch->ep->transfer_type == USB_TRANSFER_CONTROL
490 && batch->dir == USB_DIRECTION_OUT) {
491 const usb_device_request_setup_packet_t *request = &batch->setup.packet;
492 if (request->request == USB_DEVREQ_CLEAR_FEATURE
493 && request->request_type == USB_REQUEST_RECIPIENT_ENDPOINT
494 && request->value == USB_FEATURE_ENDPOINT_HALT) {
495 const uint16_t index = uint16_usb2host(request->index);
496 const usb_endpoint_t ep_num = index & 0xf;
497 const usb_direction_t dir = (index >> 7)
498 ? USB_DIRECTION_IN
499 : USB_DIRECTION_OUT;
500 endpoint_t *halted_ep = bus_find_endpoint(&xhci_dev->base, ep_num, dir);
501 if (halted_ep) {
502 /*
503 * TODO: Find out how to come up with stream_id. It might be
504 * possible that we have to clear all of them.
505 */
506 const int err = xhci_endpoint_clear_halt(xhci_endpoint_get(halted_ep), 0);
507 endpoint_del_ref(halted_ep);
508 if (err) {
509 /*
510 * The endpoint halt condition failed to be cleared in HC.
511 * As it does not make sense to send the reset to the device
512 * itself, return as unschedulable answer.
513 *
514 * Furthermore, if this is a request to clear EP 0 stall, it
515 * would be gone forever, as the endpoint is halted.
516 */
517 return err;
518 }
519 } else {
520 usb_log_warning("Device(%u): Resetting unregistered endpoint"
521 " %u %s.", xhci_dev->base.address, ep_num,
522 usb_str_direction(dir));
523 }
524 }
525 }
526
527
528 int err;
529 fibril_mutex_lock(&xhci_ep->guard);
530
531 if ((err = endpoint_activate_locked(ep, batch))) {
532 fibril_mutex_unlock(&xhci_ep->guard);
533 return err;
534 }
535
536 if ((err = transfer_handlers[batch->ep->transfer_type](hc, transfer))) {
537 endpoint_deactivate_locked(ep);
538 fibril_mutex_unlock(&xhci_ep->guard);
539 return err;
540 }
541
542 hc_ring_ep_doorbell(xhci_ep, batch->target.stream);
543 fibril_mutex_unlock(&xhci_ep->guard);
544 return EOK;
545}
Note: See TracBrowser for help on using the repository browser.