source: mainline/uspace/drv/bus/usb/xhci/transfers.c@ f92f6b1

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since f92f6b1 was 708d8fcd, checked in by Ondřej Hlavatý <aearsis@…>, 8 years ago

xhci: rewritten isochronous transfers

There was a fundamental problem with relying on hardware to send
RING_OVERRUN/UNDERRUN events, which QEMU (and possibly others) do not
send. That resulted in not knowing if the transfer is still on schedule,
and having to ring the doorbell every time. That is not feasible,
because then the transfer can be more frequent than it should be.
Furthermore, it ignored the fact that isochronous TRBs are to be
scheduled not too late, but also not too soon (see 4.11.2.5 of the xHCI
spec).

Now, scheduling the TRBs to hardware is called feeding, and can be
delayed by setting a timer. Ring overruns/underruns are detected also at
the end of handling an event.

  • Property mode set to 100644
File size: 11.0 KB
Line 
1/*
2 * Copyright (c) 2017 Michal Staruch
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller transfer ring management
34 */
35
36#include <usb/debug.h>
37#include <usb/request.h>
38#include "endpoint.h"
39#include "hc.h"
40#include "hw_struct/trb.h"
41#include "transfers.h"
42#include "trb_ring.h"
43
44typedef enum {
45 STAGE_OUT,
46 STAGE_IN,
47} stage_dir_flag_t;
48
49#define REQUEST_TYPE_DTD (0x80)
50#define REQUEST_TYPE_IS_DEVICE_TO_HOST(rq) ((rq) & REQUEST_TYPE_DTD)
51
52
53/** Get direction flag of data stage.
54 * See Table 7 of xHCI specification.
55 */
56static inline stage_dir_flag_t get_status_direction_flag(xhci_trb_t* trb,
57 uint8_t bmRequestType, uint16_t wLength)
58{
59 /* See Table 7 of xHCI specification */
60 return REQUEST_TYPE_IS_DEVICE_TO_HOST(bmRequestType) && (wLength > 0)
61 ? STAGE_OUT
62 : STAGE_IN;
63}
64
65typedef enum {
66 DATA_STAGE_NO = 0,
67 DATA_STAGE_OUT = 2,
68 DATA_STAGE_IN = 3,
69} data_stage_type_t;
70
71/** Get transfer type flag.
72 * See Table 8 of xHCI specification.
73 */
74static inline data_stage_type_t get_transfer_type(xhci_trb_t* trb, uint8_t
75 bmRequestType, uint16_t wLength)
76{
77 if (wLength == 0)
78 return DATA_STAGE_NO;
79
80 /* See Table 7 of xHCI specification */
81 return REQUEST_TYPE_IS_DEVICE_TO_HOST(bmRequestType)
82 ? DATA_STAGE_IN
83 : DATA_STAGE_NO;
84}
85
86static inline bool configure_endpoint_needed(usb_device_request_setup_packet_t *setup)
87{
88 usb_request_type_t request_type = SETUP_REQUEST_TYPE_GET_TYPE(setup->request_type);
89
90 return request_type == USB_REQUEST_TYPE_STANDARD &&
91 (setup->request == USB_DEVREQ_SET_CONFIGURATION
92 || setup->request == USB_DEVREQ_SET_INTERFACE);
93}
94
95/**
96 * Create a xHCI-specific transfer batch.
97 *
98 * Bus callback.
99 */
100usb_transfer_batch_t * xhci_transfer_create(endpoint_t* ep)
101{
102 xhci_transfer_t *transfer = calloc(1, sizeof(xhci_transfer_t));
103 if (!transfer)
104 return NULL;
105
106 usb_transfer_batch_init(&transfer->batch, ep);
107 return &transfer->batch;
108}
109
110/**
111 * Destroy a xHCI transfer.
112 */
113void xhci_transfer_destroy(usb_transfer_batch_t* batch)
114{
115 xhci_transfer_t *transfer = xhci_transfer_from_batch(batch);
116
117 dma_buffer_free(&transfer->hc_buffer);
118 free(transfer);
119}
120
121static xhci_trb_ring_t *get_ring(xhci_hc_t *hc, xhci_transfer_t *transfer)
122{
123 return &xhci_endpoint_get(transfer->batch.ep)->ring;
124}
125
126static int schedule_control(xhci_hc_t* hc, xhci_transfer_t* transfer)
127{
128 usb_transfer_batch_t *batch = &transfer->batch;
129 xhci_trb_ring_t *ring = get_ring(hc, transfer);
130 xhci_endpoint_t *xhci_ep = xhci_endpoint_get(transfer->batch.ep);
131
132 usb_device_request_setup_packet_t* setup = &batch->setup.packet;
133
134 xhci_trb_t trbs[3];
135 int trbs_used = 0;
136
137 xhci_trb_t *trb_setup = trbs + trbs_used++;
138 xhci_trb_clean(trb_setup);
139
140 TRB_CTRL_SET_SETUP_WVALUE(*trb_setup, setup->value);
141 TRB_CTRL_SET_SETUP_WLENGTH(*trb_setup, setup->length);
142 TRB_CTRL_SET_SETUP_WINDEX(*trb_setup, setup->index);
143 TRB_CTRL_SET_SETUP_BREQ(*trb_setup, setup->request);
144 TRB_CTRL_SET_SETUP_BMREQTYPE(*trb_setup, setup->request_type);
145
146 /* Size of the setup packet is always 8 */
147 TRB_CTRL_SET_XFER_LEN(*trb_setup, 8);
148
149 /* Immediate data */
150 TRB_CTRL_SET_IDT(*trb_setup, 1);
151 TRB_CTRL_SET_TRB_TYPE(*trb_setup, XHCI_TRB_TYPE_SETUP_STAGE);
152 TRB_CTRL_SET_TRT(*trb_setup, get_transfer_type(trb_setup, setup->request_type, setup->length));
153
154 /* Data stage */
155 xhci_trb_t *trb_data = NULL;
156 if (setup->length > 0) {
157 trb_data = trbs + trbs_used++;
158 xhci_trb_clean(trb_data);
159
160 trb_data->parameter = host2xhci(64, transfer->hc_buffer.phys);
161
162 // data size (sent for OUT, or buffer size)
163 TRB_CTRL_SET_XFER_LEN(*trb_data, batch->buffer_size);
164 // FIXME: TD size 4.11.2.4
165 TRB_CTRL_SET_TD_SIZE(*trb_data, 1);
166
167 // Some more fields here, no idea what they mean
168 TRB_CTRL_SET_TRB_TYPE(*trb_data, XHCI_TRB_TYPE_DATA_STAGE);
169
170 int stage_dir = REQUEST_TYPE_IS_DEVICE_TO_HOST(setup->request_type)
171 ? STAGE_IN : STAGE_OUT;
172 TRB_CTRL_SET_DIR(*trb_data, stage_dir);
173 }
174
175 /* Status stage */
176 xhci_trb_t *trb_status = trbs + trbs_used++;
177 xhci_trb_clean(trb_status);
178
179 // FIXME: Evaluate next TRB? 4.12.3
180 // TRB_CTRL_SET_ENT(*trb_status, 1);
181
182 TRB_CTRL_SET_IOC(*trb_status, 1);
183 TRB_CTRL_SET_TRB_TYPE(*trb_status, XHCI_TRB_TYPE_STATUS_STAGE);
184 TRB_CTRL_SET_DIR(*trb_status, get_status_direction_flag(trb_setup, setup->request_type, setup->length));
185
186 // Issue a Configure Endpoint command, if needed.
187 if (configure_endpoint_needed(setup)) {
188 const int err = hc_configure_device(hc, xhci_ep_to_dev(xhci_ep)->slot_id);
189 if (err)
190 return err;
191 }
192
193 return xhci_trb_ring_enqueue_multiple(ring, trbs, trbs_used, &transfer->interrupt_trb_phys);
194}
195
196static int schedule_bulk(xhci_hc_t* hc, xhci_transfer_t *transfer)
197{
198 xhci_trb_t trb;
199 xhci_trb_clean(&trb);
200 trb.parameter = host2xhci(64, transfer->hc_buffer.phys);
201
202 // data size (sent for OUT, or buffer size)
203 TRB_CTRL_SET_XFER_LEN(trb, transfer->batch.buffer_size);
204 // FIXME: TD size 4.11.2.4
205 TRB_CTRL_SET_TD_SIZE(trb, 1);
206
207 // we want an interrupt after this td is done
208 TRB_CTRL_SET_IOC(trb, 1);
209
210 TRB_CTRL_SET_TRB_TYPE(trb, XHCI_TRB_TYPE_NORMAL);
211
212 xhci_trb_ring_t* ring = get_ring(hc, transfer);
213
214 return xhci_trb_ring_enqueue(ring, &trb, &transfer->interrupt_trb_phys);
215}
216
217static int schedule_interrupt(xhci_hc_t* hc, xhci_transfer_t* transfer)
218{
219 xhci_trb_t trb;
220 xhci_trb_clean(&trb);
221 trb.parameter = host2xhci(64, transfer->hc_buffer.phys);
222
223 // data size (sent for OUT, or buffer size)
224 TRB_CTRL_SET_XFER_LEN(trb, transfer->batch.buffer_size);
225 // FIXME: TD size 4.11.2.4
226 TRB_CTRL_SET_TD_SIZE(trb, 1);
227
228 // we want an interrupt after this td is done
229 TRB_CTRL_SET_IOC(trb, 1);
230
231 TRB_CTRL_SET_TRB_TYPE(trb, XHCI_TRB_TYPE_NORMAL);
232
233 xhci_trb_ring_t* ring = get_ring(hc, transfer);
234
235 return xhci_trb_ring_enqueue(ring, &trb, &transfer->interrupt_trb_phys);
236}
237
238static int schedule_isochronous(xhci_transfer_t* transfer)
239{
240 endpoint_t *ep = transfer->batch.ep;
241
242 return ep->direction == USB_DIRECTION_OUT
243 ? isoch_schedule_out(transfer)
244 : isoch_schedule_in(transfer);
245}
246
247int xhci_handle_transfer_event(xhci_hc_t* hc, xhci_trb_t* trb)
248{
249 uintptr_t addr = trb->parameter;
250 const unsigned slot_id = XHCI_DWORD_EXTRACT(trb->control, 31, 24);
251 const unsigned ep_dci = XHCI_DWORD_EXTRACT(trb->control, 20, 16);
252
253 xhci_device_t *dev = hc->bus.devices_by_slot[slot_id];
254 if (!dev) {
255 usb_log_error("Transfer event on disabled slot %u", slot_id);
256 return ENOENT;
257 }
258
259 const usb_endpoint_t ep_num = ep_dci / 2;
260 xhci_endpoint_t *ep = xhci_device_get_endpoint(dev, ep_num);
261 if (!ep) {
262 usb_log_error("Transfer event on dropped endpoint %u of device "
263 XHCI_DEV_FMT, ep_num, XHCI_DEV_ARGS(*dev));
264 return ENOENT;
265 }
266 // No need to add reference for endpoint, it is held by the transfer batch.
267
268 /* FIXME: This is racy. Do we care? */
269 ep->ring.dequeue = addr;
270
271 if (ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS) {
272 return isoch_handle_transfer_event(hc, ep, trb);
273 }
274
275 fibril_mutex_lock(&ep->base.guard);
276 usb_transfer_batch_t *batch = ep->base.active_batch;
277 if (!batch) {
278 fibril_mutex_unlock(&ep->base.guard);
279 return ENOENT;
280 }
281
282 const xhci_trb_completion_code_t completion_code = TRB_COMPLETION_CODE(*trb);
283 switch (completion_code) {
284 case XHCI_TRBC_SHORT_PACKET:
285 usb_log_debug("Short transfer.");
286 /* fallthrough */
287 case XHCI_TRBC_SUCCESS:
288 batch->error = EOK;
289 batch->transfered_size = batch->buffer_size - TRB_TRANSFER_LENGTH(*trb);
290 break;
291
292 default:
293 usb_log_warning("Transfer not successfull: %u", completion_code);
294 batch->error = EIO;
295 }
296
297 endpoint_deactivate_locked(&ep->base);
298 fibril_mutex_unlock(&ep->base.guard);
299
300 xhci_transfer_t *transfer = xhci_transfer_from_batch(batch);
301
302 if (batch->dir == USB_DIRECTION_IN) {
303 assert(batch->buffer);
304 assert(batch->transfered_size <= batch->buffer_size);
305 memcpy(batch->buffer, transfer->hc_buffer.virt, batch->transfered_size);
306 }
307
308 usb_transfer_batch_finish(batch);
309 return EOK;
310}
311
312typedef int (*transfer_handler)(xhci_hc_t *, xhci_transfer_t *);
313
314static const transfer_handler transfer_handlers[] = {
315 [USB_TRANSFER_CONTROL] = schedule_control,
316 [USB_TRANSFER_ISOCHRONOUS] = NULL,
317 [USB_TRANSFER_BULK] = schedule_bulk,
318 [USB_TRANSFER_INTERRUPT] = schedule_interrupt,
319};
320
321int xhci_transfer_schedule(xhci_hc_t *hc, usb_transfer_batch_t *batch)
322{
323 assert(hc);
324 endpoint_t *ep = batch->ep;
325
326 xhci_transfer_t *transfer = xhci_transfer_from_batch(batch);
327 xhci_endpoint_t *xhci_ep = xhci_endpoint_get(ep);
328 xhci_device_t *xhci_dev = xhci_ep_to_dev(xhci_ep);
329
330 // FIXME: find a better way to check if the ring is not initialized
331 if (!xhci_ep->ring.segment_count) {
332 usb_log_error("Ring not initialized for endpoint " XHCI_EP_FMT,
333 XHCI_EP_ARGS(*xhci_ep));
334 return EINVAL;
335 }
336
337 // Isochronous transfer needs to be handled differently
338 if (batch->ep->transfer_type == USB_TRANSFER_ISOCHRONOUS) {
339 return schedule_isochronous(transfer);
340 }
341
342 const usb_transfer_type_t type = batch->ep->transfer_type;
343 assert(type >= 0 && type < ARRAY_SIZE(transfer_handlers));
344 assert(transfer_handlers[type]);
345
346 if (batch->buffer_size > 0) {
347 if (dma_buffer_alloc(&transfer->hc_buffer, batch->buffer_size))
348 return ENOMEM;
349 }
350
351 if (batch->dir != USB_DIRECTION_IN) {
352 // Sending stuff from host to device, we need to copy the actual data.
353 memcpy(transfer->hc_buffer.virt, batch->buffer, batch->buffer_size);
354 }
355
356 fibril_mutex_lock(&ep->guard);
357 endpoint_activate_locked(ep, batch);
358 const int err = transfer_handlers[batch->ep->transfer_type](hc, transfer);
359
360 if (err) {
361 endpoint_deactivate_locked(ep);
362 fibril_mutex_unlock(&ep->guard);
363 return err;
364 }
365
366 /* After the critical section, the transfer can already be finished or aborted. */
367 transfer = NULL; batch = NULL;
368 fibril_mutex_unlock(&ep->guard);
369
370 const uint8_t slot_id = xhci_dev->slot_id;
371 const uint8_t target = xhci_endpoint_index(xhci_ep) + 1; /* EP Doorbells start at 1 */
372 hc_ring_doorbell(hc, slot_id, target);
373 return EOK;
374}
Note: See TracBrowser for help on using the repository browser.