source: mainline/uspace/drv/bus/usb/xhci/transfers.c@ a1ce9bd

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since a1ce9bd was a1ce9bd, checked in by Salmelu <salmelu@…>, 8 years ago

xhci: Split buffer into TRBs by pages

Only done for interrupt and non-stream bulk for now

  • Property mode set to 100644
File size: 15.9 KB
Line 
1/*
2 * Copyright (c) 2017 Michal Staruch
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller transfer ring management
34 */
35
36#include <usb/debug.h>
37#include <usb/request.h>
38#include "endpoint.h"
39#include "hc.h"
40#include "hw_struct/trb.h"
41#include "streams.h"
42#include "transfers.h"
43#include "trb_ring.h"
44
45typedef enum {
46 STAGE_OUT,
47 STAGE_IN,
48} stage_dir_flag_t;
49
50#define REQUEST_TYPE_DTD (0x80)
51#define REQUEST_TYPE_IS_DEVICE_TO_HOST(rq) ((rq) & REQUEST_TYPE_DTD)
52
53
54/** Get direction flag of data stage.
55 * See Table 7 of xHCI specification.
56 */
57static inline stage_dir_flag_t get_status_direction_flag(xhci_trb_t* trb,
58 uint8_t bmRequestType, uint16_t wLength)
59{
60 /* See Table 7 of xHCI specification */
61 return REQUEST_TYPE_IS_DEVICE_TO_HOST(bmRequestType) && (wLength > 0)
62 ? STAGE_OUT
63 : STAGE_IN;
64}
65
66typedef enum {
67 DATA_STAGE_NO = 0,
68 DATA_STAGE_OUT = 2,
69 DATA_STAGE_IN = 3,
70} data_stage_type_t;
71
72/** Get transfer type flag.
73 * See Table 8 of xHCI specification.
74 */
75static inline data_stage_type_t get_transfer_type(xhci_trb_t* trb, uint8_t
76 bmRequestType, uint16_t wLength)
77{
78 if (wLength == 0)
79 return DATA_STAGE_NO;
80
81 /* See Table 7 of xHCI specification */
82 return REQUEST_TYPE_IS_DEVICE_TO_HOST(bmRequestType)
83 ? DATA_STAGE_IN
84 : DATA_STAGE_NO;
85}
86
87static inline bool configure_endpoint_needed(usb_device_request_setup_packet_t *setup)
88{
89 usb_request_type_t request_type = SETUP_REQUEST_TYPE_GET_TYPE(setup->request_type);
90
91 return request_type == USB_REQUEST_TYPE_STANDARD &&
92 (setup->request == USB_DEVREQ_SET_CONFIGURATION
93 || setup->request == USB_DEVREQ_SET_INTERFACE);
94}
95
96/**
97 * Create a xHCI-specific transfer batch.
98 *
99 * Bus callback.
100 */
101usb_transfer_batch_t * xhci_transfer_create(endpoint_t* ep)
102{
103 xhci_transfer_t *transfer = calloc(1, sizeof(xhci_transfer_t));
104 if (!transfer)
105 return NULL;
106
107 usb_transfer_batch_init(&transfer->batch, ep);
108 return &transfer->batch;
109}
110
111/**
112 * Destroy a xHCI transfer.
113 */
114void xhci_transfer_destroy(usb_transfer_batch_t* batch)
115{
116 xhci_transfer_t *transfer = xhci_transfer_from_batch(batch);
117
118 dma_buffer_free(&transfer->hc_buffer);
119 free(transfer);
120}
121
122static xhci_trb_ring_t *get_ring(xhci_transfer_t *transfer)
123{
124 xhci_endpoint_t *xhci_ep = xhci_endpoint_get(transfer->batch.ep);
125 return xhci_endpoint_get_ring(xhci_ep, transfer->batch.target.stream);
126}
127
128static int calculate_trb_count(xhci_transfer_t *transfer)
129{
130 const size_t size = transfer->batch.buffer_size;
131 return (size + PAGE_SIZE - 1 )/ PAGE_SIZE;
132}
133
134static void trb_set_buffer(xhci_transfer_t *transfer, xhci_trb_t *trb,
135 size_t i, size_t total)
136{
137 const uintptr_t ptr = dma_buffer_phys(&transfer->hc_buffer,
138 transfer->hc_buffer.virt + i * PAGE_SIZE);
139
140 trb->parameter = host2xhci(64, ptr);
141 TRB_CTRL_SET_TD_SIZE(*trb, max(31, total - i - 1));
142 if (i < total - 1) {
143 TRB_CTRL_SET_XFER_LEN(*trb, PAGE_SIZE);
144 }
145 else {
146 const size_t size = ((transfer->batch.buffer_size - 1) % PAGE_SIZE) + 1;
147 TRB_CTRL_SET_XFER_LEN(*trb, size);
148 }
149}
150
151static int schedule_control(xhci_hc_t* hc, xhci_transfer_t* transfer)
152{
153 usb_transfer_batch_t *batch = &transfer->batch;
154 xhci_endpoint_t *xhci_ep = xhci_endpoint_get(transfer->batch.ep);
155
156 usb_device_request_setup_packet_t* setup = &batch->setup.packet;
157
158 xhci_trb_t trbs[3];
159 int trbs_used = 0;
160
161 xhci_trb_t *trb_setup = trbs + trbs_used++;
162 xhci_trb_clean(trb_setup);
163
164 TRB_CTRL_SET_SETUP_WVALUE(*trb_setup, setup->value);
165 TRB_CTRL_SET_SETUP_WLENGTH(*trb_setup, setup->length);
166 TRB_CTRL_SET_SETUP_WINDEX(*trb_setup, setup->index);
167 TRB_CTRL_SET_SETUP_BREQ(*trb_setup, setup->request);
168 TRB_CTRL_SET_SETUP_BMREQTYPE(*trb_setup, setup->request_type);
169
170 /* Size of the setup packet is always 8 */
171 TRB_CTRL_SET_XFER_LEN(*trb_setup, 8);
172
173 /* Immediate data */
174 TRB_CTRL_SET_IDT(*trb_setup, 1);
175 TRB_CTRL_SET_TRB_TYPE(*trb_setup, XHCI_TRB_TYPE_SETUP_STAGE);
176 TRB_CTRL_SET_TRT(*trb_setup,
177 get_transfer_type(trb_setup, setup->request_type, setup->length));
178
179 /* Data stage */
180 xhci_trb_t *trb_data = NULL;
181 if (setup->length > 0) {
182 trb_data = trbs + trbs_used++;
183 xhci_trb_clean(trb_data);
184
185 trb_data->parameter = host2xhci(64, transfer->hc_buffer.phys);
186
187 // data size (sent for OUT, or buffer size)
188 TRB_CTRL_SET_XFER_LEN(*trb_data, batch->buffer_size);
189 // FIXME: TD size 4.11.2.4
190 TRB_CTRL_SET_TD_SIZE(*trb_data, 1);
191
192 // Some more fields here, no idea what they mean
193 TRB_CTRL_SET_TRB_TYPE(*trb_data, XHCI_TRB_TYPE_DATA_STAGE);
194
195 int stage_dir = REQUEST_TYPE_IS_DEVICE_TO_HOST(setup->request_type)
196 ? STAGE_IN : STAGE_OUT;
197 TRB_CTRL_SET_DIR(*trb_data, stage_dir);
198 }
199
200 /* Status stage */
201 xhci_trb_t *trb_status = trbs + trbs_used++;
202 xhci_trb_clean(trb_status);
203
204 TRB_CTRL_SET_IOC(*trb_status, 1);
205 TRB_CTRL_SET_TRB_TYPE(*trb_status, XHCI_TRB_TYPE_STATUS_STAGE);
206 TRB_CTRL_SET_DIR(*trb_status, get_status_direction_flag(trb_setup,
207 setup->request_type, setup->length));
208
209 // Issue a Configure Endpoint command, if needed.
210 if (configure_endpoint_needed(setup)) {
211 const int err = hc_configure_device(xhci_ep_to_dev(xhci_ep));
212 if (err)
213 return err;
214 }
215
216 return xhci_trb_ring_enqueue_multiple(get_ring(transfer), trbs,
217 trbs_used, &transfer->interrupt_trb_phys);
218}
219
220static int schedule_bulk(xhci_hc_t* hc, xhci_transfer_t *transfer)
221{
222 /* The stream-enabled endpoints need to chain ED trb */
223 xhci_endpoint_t *ep = xhci_endpoint_get(transfer->batch.ep);
224 if (!ep->primary_stream_data_size) {
225 const size_t buffer_count = calculate_trb_count(transfer);
226 xhci_trb_ring_t* ring = get_ring(transfer);
227 xhci_trb_t trbs[buffer_count];
228
229 for (size_t i = 0; i < buffer_count; ++i) {
230 xhci_trb_clean(&trbs[i]);
231 trb_set_buffer(transfer, &trbs[i], i, buffer_count);
232 TRB_CTRL_SET_TRB_TYPE(trbs[i], XHCI_TRB_TYPE_NORMAL);
233
234 if (i == buffer_count - 1) break;
235
236 /* Set the chain bit as this is not the last TRB */
237 TRB_CTRL_SET_CHAIN(trbs[i], 1);
238 }
239 /* Set the interrupt bit for last TRB */
240 TRB_CTRL_SET_IOC(trbs[buffer_count - 1], 1);
241 return xhci_trb_ring_enqueue_multiple(ring, &trbs[0], buffer_count,
242 &transfer->interrupt_trb_phys);
243 }
244 else {
245 xhci_trb_t trb;
246 xhci_trb_clean(&trb);
247 trb.parameter = host2xhci(64, transfer->hc_buffer.phys);
248
249 // data size (sent for OUT, or buffer size)
250 TRB_CTRL_SET_XFER_LEN(trb, transfer->batch.buffer_size);
251 TRB_CTRL_SET_TD_SIZE(trb, 2);
252 TRB_CTRL_SET_TRB_TYPE(trb, XHCI_TRB_TYPE_NORMAL);
253 TRB_CTRL_SET_CHAIN(trb, 1);
254 TRB_CTRL_SET_ENT(trb, 1);
255
256 xhci_trb_ring_t* ring = get_ring(transfer);
257 if (!ring) {
258 return EINVAL;
259 }
260
261 int err = xhci_trb_ring_enqueue(ring, &trb, &transfer->interrupt_trb_phys);
262
263 if (err) {
264 return err;
265 }
266
267 xhci_trb_clean(&trb);
268 trb.parameter = host2xhci(64, (uintptr_t) transfer);
269 TRB_CTRL_SET_TRB_TYPE(trb, XHCI_TRB_TYPE_EVENT_DATA);
270 TRB_CTRL_SET_IOC(trb, 1);
271
272 return xhci_trb_ring_enqueue(ring, &trb, &transfer->interrupt_trb_phys);
273 }
274}
275
276static int schedule_interrupt(xhci_hc_t* hc, xhci_transfer_t* transfer)
277{
278 const size_t buffer_count = calculate_trb_count(transfer);
279 xhci_trb_ring_t* ring = get_ring(transfer);
280 xhci_trb_t trbs[buffer_count];
281
282 for (size_t i = 0; i < buffer_count; ++i) {
283 xhci_trb_clean(&trbs[i]);
284 trb_set_buffer(transfer, &trbs[i], i, buffer_count);
285 TRB_CTRL_SET_TRB_TYPE(trbs[i], XHCI_TRB_TYPE_NORMAL);
286
287 if (i == buffer_count - 1) break;
288
289 /* Set the chain bit as this is not the last TRB */
290 TRB_CTRL_SET_CHAIN(trbs[i], 1);
291 }
292 /* Set the interrupt bit for last TRB */
293 TRB_CTRL_SET_IOC(trbs[buffer_count - 1], 1);
294 return xhci_trb_ring_enqueue_multiple(ring, &trbs[0], buffer_count,
295 &transfer->interrupt_trb_phys);
296}
297
298static int schedule_isochronous(xhci_transfer_t* transfer)
299{
300 endpoint_t *ep = transfer->batch.ep;
301
302 return ep->direction == USB_DIRECTION_OUT
303 ? isoch_schedule_out(transfer)
304 : isoch_schedule_in(transfer);
305}
306
307int xhci_handle_transfer_event(xhci_hc_t* hc, xhci_trb_t* trb)
308{
309 uintptr_t addr = trb->parameter;
310 const unsigned slot_id = XHCI_DWORD_EXTRACT(trb->control, 31, 24);
311 const unsigned ep_dci = XHCI_DWORD_EXTRACT(trb->control, 20, 16);
312
313 xhci_device_t *dev = hc->bus.devices_by_slot[slot_id];
314 if (!dev) {
315 usb_log_error("Transfer event on disabled slot %u", slot_id);
316 return ENOENT;
317 }
318
319 const usb_endpoint_t ep_num = ep_dci / 2;
320 const usb_endpoint_t dir = ep_dci % 2 ? USB_DIRECTION_IN : USB_DIRECTION_OUT;
321 /* Creating temporary reference */
322 endpoint_t *ep_base = bus_find_endpoint(&dev->base, ep_num, dir);
323 if (!ep_base) {
324 usb_log_error("Transfer event on dropped endpoint %u %s of device "
325 XHCI_DEV_FMT, ep_num, usb_str_direction(dir), XHCI_DEV_ARGS(*dev));
326 return ENOENT;
327 }
328 xhci_endpoint_t *ep = xhci_endpoint_get(ep_base);
329
330 usb_transfer_batch_t *batch;
331 xhci_transfer_t *transfer;
332
333 if (TRB_EVENT_DATA(*trb)) {
334 /* We schedule those only when streams are involved */
335 assert(ep->primary_stream_ctx_array != NULL);
336
337 /* We are received transfer pointer instead - work with that */
338 transfer = (xhci_transfer_t *) addr;
339 xhci_trb_ring_update_dequeue(get_ring(transfer),
340 transfer->interrupt_trb_phys);
341 batch = &transfer->batch;
342 }
343 else {
344 xhci_trb_ring_update_dequeue(&ep->ring, addr);
345
346 if (ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS) {
347 isoch_handle_transfer_event(hc, ep, trb);
348 /* Dropping temporary reference */
349 endpoint_del_ref(&ep->base);
350 return EOK;
351 }
352
353 fibril_mutex_lock(&ep->guard);
354 batch = ep->base.active_batch;
355 endpoint_deactivate_locked(&ep->base);
356 fibril_mutex_unlock(&ep->guard);
357
358 if (!batch) {
359 /* Dropping temporary reference */
360 endpoint_del_ref(&ep->base);
361 return ENOENT;
362 }
363
364 transfer = xhci_transfer_from_batch(batch);
365 }
366
367 const xhci_trb_completion_code_t completion_code = TRB_COMPLETION_CODE(*trb);
368 switch (completion_code) {
369 case XHCI_TRBC_SHORT_PACKET:
370 case XHCI_TRBC_SUCCESS:
371 batch->error = EOK;
372 batch->transferred_size = batch->buffer_size - TRB_TRANSFER_LENGTH(*trb);
373 break;
374
375 case XHCI_TRBC_DATA_BUFFER_ERROR:
376 usb_log_warning("Transfer ended with data buffer error.");
377 batch->error = EAGAIN;
378 batch->transferred_size = 0;
379 break;
380
381 case XHCI_TRBC_BABBLE_DETECTED_ERROR:
382 usb_log_warning("Babble detected during the transfer.");
383 batch->error = EAGAIN;
384 batch->transferred_size = 0;
385 break;
386
387 case XHCI_TRBC_USB_TRANSACTION_ERROR:
388 usb_log_warning("USB Transaction error.");
389 batch->error = EAGAIN;
390 batch->transferred_size = 0;
391 break;
392
393 case XHCI_TRBC_TRB_ERROR:
394 usb_log_error("Invalid transfer parameters.");
395 batch->error = EINVAL;
396 batch->transferred_size = 0;
397 break;
398
399 case XHCI_TRBC_STALL_ERROR:
400 usb_log_warning("Stall condition detected.");
401 batch->error = ESTALL;
402 batch->transferred_size = 0;
403 break;
404
405 case XHCI_TRBC_SPLIT_TRANSACTION_ERROR:
406 usb_log_error("Split transcation error detected.");
407 batch->error = EAGAIN;
408 batch->transferred_size = 0;
409 break;
410
411 default:
412 usb_log_warning("Transfer not successfull: %u", completion_code);
413 batch->error = EIO;
414 }
415
416 if (batch->dir == USB_DIRECTION_IN) {
417 assert(batch->buffer);
418 assert(batch->transferred_size <= batch->buffer_size);
419 memcpy(batch->buffer, transfer->hc_buffer.virt, batch->transferred_size);
420 }
421
422 usb_transfer_batch_finish(batch);
423 /* Dropping temporary reference */
424 endpoint_del_ref(&ep->base);
425 return EOK;
426}
427
428typedef int (*transfer_handler)(xhci_hc_t *, xhci_transfer_t *);
429
430static const transfer_handler transfer_handlers[] = {
431 [USB_TRANSFER_CONTROL] = schedule_control,
432 [USB_TRANSFER_ISOCHRONOUS] = NULL,
433 [USB_TRANSFER_BULK] = schedule_bulk,
434 [USB_TRANSFER_INTERRUPT] = schedule_interrupt,
435};
436
437/**
438 * Schedule a batch for xHC.
439 *
440 * Bus callback.
441 */
442int xhci_transfer_schedule(usb_transfer_batch_t *batch)
443{
444 endpoint_t *ep = batch->ep;
445
446 xhci_hc_t *hc = bus_to_hc(endpoint_get_bus(batch->ep));
447 xhci_transfer_t *transfer = xhci_transfer_from_batch(batch);
448 xhci_endpoint_t *xhci_ep = xhci_endpoint_get(ep);
449 xhci_device_t *xhci_dev = xhci_ep_to_dev(xhci_ep);
450
451 if (!batch->target.address) {
452 usb_log_error("Attempted to schedule transfer to address 0.");
453 return EINVAL;
454 }
455
456 // FIXME: find a better way to check if the ring is not initialized
457 if (!xhci_ep->ring.segment_count) {
458 usb_log_error("Ring not initialized for endpoint " XHCI_EP_FMT,
459 XHCI_EP_ARGS(*xhci_ep));
460 return EINVAL;
461 }
462
463 // Isochronous transfer needs to be handled differently
464 if (batch->ep->transfer_type == USB_TRANSFER_ISOCHRONOUS) {
465 return schedule_isochronous(transfer);
466 }
467
468 const usb_transfer_type_t type = batch->ep->transfer_type;
469 assert(transfer_handlers[type]);
470
471 if (batch->buffer_size > 0) {
472 if (dma_buffer_alloc(&transfer->hc_buffer, batch->buffer_size))
473 return ENOMEM;
474 }
475
476 if (batch->dir != USB_DIRECTION_IN) {
477 // Sending stuff from host to device, we need to copy the actual data.
478 memcpy(transfer->hc_buffer.virt, batch->buffer, batch->buffer_size);
479 }
480
481 /*
482 * If this is a ClearFeature(ENDPOINT_HALT) request, we have to issue
483 * the Reset Endpoint command.
484 */
485 if (batch->ep->transfer_type == USB_TRANSFER_CONTROL
486 && batch->dir == USB_DIRECTION_OUT) {
487 const usb_device_request_setup_packet_t *request = &batch->setup.packet;
488 if (request->request == USB_DEVREQ_CLEAR_FEATURE
489 && request->request_type == USB_REQUEST_RECIPIENT_ENDPOINT
490 && request->value == USB_FEATURE_ENDPOINT_HALT) {
491 const uint16_t index = uint16_usb2host(request->index);
492 const usb_endpoint_t ep_num = index & 0xf;
493 const usb_direction_t dir = (index >> 7)
494 ? USB_DIRECTION_IN
495 : USB_DIRECTION_OUT;
496 endpoint_t *halted_ep = bus_find_endpoint(&xhci_dev->base, ep_num, dir);
497 if (halted_ep) {
498 /*
499 * TODO: Find out how to come up with stream_id. It might be
500 * possible that we have to clear all of them.
501 */
502 const int err = xhci_endpoint_clear_halt(xhci_endpoint_get(halted_ep), 0);
503 endpoint_del_ref(halted_ep);
504 if (err) {
505 /*
506 * The endpoint halt condition failed to be cleared in HC.
507 * As it does not make sense to send the reset to the device
508 * itself, return as unschedulable answer.
509 *
510 * Furthermore, if this is a request to clear EP 0 stall, it
511 * would be gone forever, as the endpoint is halted.
512 */
513 return err;
514 }
515 } else {
516 usb_log_warning("Device(%u): Resetting unregistered endpoint"
517 " %u %s.", xhci_dev->base.address, ep_num,
518 usb_str_direction(dir));
519 }
520 }
521 }
522
523
524 int err;
525 fibril_mutex_lock(&xhci_ep->guard);
526
527 if ((err = endpoint_activate_locked(ep, batch))) {
528 fibril_mutex_unlock(&xhci_ep->guard);
529 return err;
530 }
531
532 if ((err = transfer_handlers[batch->ep->transfer_type](hc, transfer))) {
533 endpoint_deactivate_locked(ep);
534 fibril_mutex_unlock(&xhci_ep->guard);
535 return err;
536 }
537
538 hc_ring_ep_doorbell(xhci_ep, batch->target.stream);
539 fibril_mutex_unlock(&xhci_ep->guard);
540 return EOK;
541}
Note: See TracBrowser for help on using the repository browser.