source: mainline/uspace/drv/bus/usb/xhci/transfers.c

Last change on this file was 09ab0a9a, checked in by Jiri Svoboda <jiri@…>, 7 years ago

Fix vertical spacing with new Ccheck revision.

  • Property mode set to 100644
File size: 14.8 KB
Line 
1/*
2 * Copyright (c) 2018 Michal Staruch, Ondrej Hlavaty, Petr Manek, Jan Hrach
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller transfer ring management
34 */
35
36#include <usb/debug.h>
37#include <usb/request.h>
38#include "endpoint.h"
39#include "hc.h"
40#include "hw_struct/trb.h"
41#include "streams.h"
42#include "transfers.h"
43#include "trb_ring.h"
44
45typedef enum {
46 STAGE_OUT,
47 STAGE_IN,
48} stage_dir_flag_t;
49
50/** Get direction flag of data stage.
51 * See Table 7 of xHCI specification.
52 */
53static inline stage_dir_flag_t get_status_direction_flag(xhci_trb_t *trb,
54 uint8_t bmRequestType, uint16_t wLength)
55{
56 /* See Table 7 of xHCI specification */
57 return SETUP_REQUEST_TYPE_IS_DEVICE_TO_HOST(bmRequestType) && (wLength > 0) ?
58 STAGE_OUT :
59 STAGE_IN;
60}
61
62typedef enum {
63 DATA_STAGE_NO = 0,
64 DATA_STAGE_OUT = 2,
65 DATA_STAGE_IN = 3,
66} data_stage_type_t;
67
68/** Get transfer type flag.
69 * See Table 8 of xHCI specification.
70 */
71static inline data_stage_type_t get_transfer_type(xhci_trb_t *trb, uint8_t
72 bmRequestType, uint16_t wLength)
73{
74 if (wLength == 0)
75 return DATA_STAGE_NO;
76
77 /* See Table 7 of xHCI specification */
78 return SETUP_REQUEST_TYPE_IS_DEVICE_TO_HOST(bmRequestType) ?
79 DATA_STAGE_IN :
80 DATA_STAGE_NO;
81}
82
83static inline bool configure_endpoint_needed(usb_device_request_setup_packet_t *setup)
84{
85 usb_request_type_t request_type = SETUP_REQUEST_TYPE_GET_TYPE(setup->request_type);
86
87 return request_type == USB_REQUEST_TYPE_STANDARD &&
88 (setup->request == USB_DEVREQ_SET_CONFIGURATION ||
89 setup->request == USB_DEVREQ_SET_INTERFACE);
90}
91
92/**
93 * Create a xHCI-specific transfer batch.
94 *
95 * Bus callback.
96 */
97usb_transfer_batch_t *xhci_transfer_create(endpoint_t *ep)
98{
99 xhci_transfer_t *transfer = calloc(1, sizeof(xhci_transfer_t));
100 if (!transfer)
101 return NULL;
102
103 usb_transfer_batch_init(&transfer->batch, ep);
104 return &transfer->batch;
105}
106
107/**
108 * Destroy a xHCI transfer.
109 */
110void xhci_transfer_destroy(usb_transfer_batch_t *batch)
111{
112 xhci_transfer_t *transfer = xhci_transfer_from_batch(batch);
113 free(transfer);
114}
115
116static xhci_trb_ring_t *get_ring(xhci_transfer_t *transfer)
117{
118 xhci_endpoint_t *xhci_ep = xhci_endpoint_get(transfer->batch.ep);
119 return xhci_endpoint_get_ring(xhci_ep, transfer->batch.target.stream);
120}
121
122#define MAX_CHUNK_SIZE (1 << 16)
123
124typedef struct {
125 /* Input parameters */
126 dma_buffer_t buf;
127 size_t chunk_size, packet_count, mps, max_trb_count;
128
129 /* Changing at runtime */
130 size_t transferred, remaining;
131 void *pos;
132} trb_splitter_t;
133
134static void trb_splitter_init(trb_splitter_t *ts, xhci_transfer_t *transfer)
135{
136 ts->buf = transfer->batch.dma_buffer;
137
138 const size_t chunk_mask = dma_policy_chunk_mask(ts->buf.policy);
139 ts->chunk_size = (chunk_mask > MAX_CHUNK_SIZE + 1) ?
140 MAX_CHUNK_SIZE : (chunk_mask + 1);
141
142 ts->remaining = transfer->batch.size;
143 ts->max_trb_count = (ts->remaining + ts->chunk_size - 1) / ts->chunk_size + 1;
144 ts->mps = transfer->batch.ep->max_packet_size;
145 ts->packet_count = (ts->remaining + ts->mps - 1) / ts->mps;
146
147 ts->transferred = 0;
148 ts->pos = ts->buf.virt + transfer->batch.offset;
149}
150
151static void trb_split_next(xhci_trb_t *trb, trb_splitter_t *ts)
152{
153 xhci_trb_clean(trb);
154
155 size_t size = min(ts->remaining, ts->chunk_size);
156
157 /* First TRB might be misaligned */
158 if (ts->transferred == 0) {
159 const size_t offset = (ts->pos - ts->buf.virt) % ts->chunk_size;
160 size = min(size, ts->chunk_size - offset);
161 }
162
163 ts->transferred += size;
164 ts->remaining -= size;
165
166 const size_t tx_packets = (ts->transferred + ts->mps - 1) / ts->mps;
167 const unsigned td_size = min(31, ts->packet_count - tx_packets);
168
169 /* Last TRB must have TD Size = 0 */
170 assert(ts->remaining > 0 || td_size == 0);
171
172 uintptr_t phys = dma_buffer_phys(&ts->buf, ts->pos);
173
174 trb->parameter = host2xhci(64, phys);
175 TRB_CTRL_SET_TD_SIZE(*trb, td_size);
176 TRB_CTRL_SET_XFER_LEN(*trb, size);
177 TRB_CTRL_SET_TRB_TYPE(*trb, XHCI_TRB_TYPE_NORMAL);
178
179 if (ts->remaining)
180 TRB_CTRL_SET_CHAIN(*trb, 1);
181
182 ts->pos += size;
183}
184
185static errno_t schedule_control(xhci_hc_t *hc, xhci_transfer_t *transfer)
186{
187 usb_transfer_batch_t *batch = &transfer->batch;
188 xhci_endpoint_t *xhci_ep = xhci_endpoint_get(transfer->batch.ep);
189
190 usb_device_request_setup_packet_t *setup = &batch->setup.packet;
191
192 trb_splitter_t splitter;
193 trb_splitter_init(&splitter, transfer);
194
195 xhci_trb_t trbs[splitter.max_trb_count + 2];
196 size_t trbs_used = 0;
197
198 xhci_trb_t *trb_setup = &trbs[trbs_used++];
199 xhci_trb_clean(trb_setup);
200
201 trb_setup->parameter = batch->setup.packed;
202
203 /* Size of the setup packet is always 8 */
204 TRB_CTRL_SET_XFER_LEN(*trb_setup, 8);
205
206 /* Immediate data */
207 TRB_CTRL_SET_IDT(*trb_setup, 1);
208 TRB_CTRL_SET_TRB_TYPE(*trb_setup, XHCI_TRB_TYPE_SETUP_STAGE);
209 TRB_CTRL_SET_TRT(*trb_setup,
210 get_transfer_type(trb_setup, setup->request_type, setup->length));
211
212 stage_dir_flag_t stage_dir = (transfer->batch.dir == USB_DIRECTION_IN) ?
213 STAGE_IN : STAGE_OUT;
214
215 /* Data stage - first TRB is special */
216 if (splitter.remaining > 0) {
217 xhci_trb_t *trb = &trbs[trbs_used++];
218 trb_split_next(trb, &splitter);
219 TRB_CTRL_SET_TRB_TYPE(*trb, XHCI_TRB_TYPE_DATA_STAGE);
220 TRB_CTRL_SET_DIR(*trb, stage_dir);
221 }
222 while (splitter.remaining > 0)
223 trb_split_next(&trbs[trbs_used++], &splitter);
224
225 /* Status stage */
226 xhci_trb_t *trb_status = &trbs[trbs_used++];
227 xhci_trb_clean(trb_status);
228
229 TRB_CTRL_SET_IOC(*trb_status, 1);
230 TRB_CTRL_SET_TRB_TYPE(*trb_status, XHCI_TRB_TYPE_STATUS_STAGE);
231 TRB_CTRL_SET_DIR(*trb_status, get_status_direction_flag(trb_setup,
232 setup->request_type, setup->length));
233
234 // Issue a Configure Endpoint command, if needed.
235 if (configure_endpoint_needed(setup)) {
236 const errno_t err = hc_configure_device(xhci_ep_to_dev(xhci_ep));
237 if (err)
238 return err;
239 }
240
241 return xhci_trb_ring_enqueue_multiple(get_ring(transfer), trbs,
242 trbs_used, &transfer->interrupt_trb_phys);
243}
244
245static errno_t schedule_bulk_intr(xhci_hc_t *hc, xhci_transfer_t *transfer)
246{
247 xhci_trb_ring_t *const ring = get_ring(transfer);
248 if (!ring)
249 return EINVAL;
250
251 /* The stream-enabled endpoints need to chain ED trb */
252 xhci_endpoint_t *ep = xhci_endpoint_get(transfer->batch.ep);
253 const bool use_streams = !!ep->primary_stream_data_size;
254
255 trb_splitter_t splitter;
256 trb_splitter_init(&splitter, transfer);
257
258 const size_t trb_count = splitter.max_trb_count + use_streams;
259 xhci_trb_t trbs[trb_count];
260 size_t trbs_used = 0;
261
262 while (splitter.remaining > 0)
263 trb_split_next(&trbs[trbs_used++], &splitter);
264
265 if (!use_streams) {
266 /* Set the interrupt bit for last TRB */
267 TRB_CTRL_SET_IOC(trbs[trbs_used - 1], 1);
268 } else {
269 /* Clear the chain bit on the last TRB */
270 TRB_CTRL_SET_CHAIN(trbs[trbs_used - 1], 1);
271 TRB_CTRL_SET_ENT(trbs[trbs_used - 1], 1);
272
273 xhci_trb_t *ed = &trbs[trbs_used++];
274 xhci_trb_clean(ed);
275 ed->parameter = host2xhci(64, (uintptr_t) transfer);
276 TRB_CTRL_SET_TRB_TYPE(*ed, XHCI_TRB_TYPE_EVENT_DATA);
277 TRB_CTRL_SET_IOC(*ed, 1);
278 }
279
280 return xhci_trb_ring_enqueue_multiple(ring, trbs, trbs_used,
281 &transfer->interrupt_trb_phys);
282}
283
284static int schedule_isochronous(xhci_transfer_t *transfer)
285{
286 endpoint_t *ep = transfer->batch.ep;
287
288 return ep->direction == USB_DIRECTION_OUT ?
289 isoch_schedule_out(transfer) :
290 isoch_schedule_in(transfer);
291}
292
293errno_t xhci_handle_transfer_event(xhci_hc_t *hc, xhci_trb_t *trb)
294{
295 uintptr_t addr = trb->parameter;
296 const unsigned slot_id = XHCI_DWORD_EXTRACT(trb->control, 31, 24);
297 const unsigned ep_dci = XHCI_DWORD_EXTRACT(trb->control, 20, 16);
298
299 xhci_device_t *dev = hc->bus.devices_by_slot[slot_id];
300 if (!dev) {
301 usb_log_error("Transfer event on disabled slot %u", slot_id);
302 return ENOENT;
303 }
304
305 const usb_endpoint_t ep_num = ep_dci / 2;
306 const usb_endpoint_t dir = ep_dci % 2 ? USB_DIRECTION_IN : USB_DIRECTION_OUT;
307 /* Creating temporary reference */
308 endpoint_t *ep_base = bus_find_endpoint(&dev->base, ep_num, dir);
309 if (!ep_base) {
310 usb_log_error("Transfer event on dropped endpoint %u %s of device "
311 XHCI_DEV_FMT, ep_num, usb_str_direction(dir), XHCI_DEV_ARGS(*dev));
312 return ENOENT;
313 }
314 xhci_endpoint_t *ep = xhci_endpoint_get(ep_base);
315
316 usb_transfer_batch_t *batch;
317 xhci_transfer_t *transfer;
318
319 if (TRB_EVENT_DATA(*trb)) {
320 /* We schedule those only when streams are involved */
321 assert(ep->primary_stream_ctx_array != NULL);
322
323 /* We are received transfer pointer instead - work with that */
324 transfer = (xhci_transfer_t *) addr;
325 xhci_trb_ring_update_dequeue(get_ring(transfer),
326 transfer->interrupt_trb_phys);
327 batch = &transfer->batch;
328 } else {
329 xhci_trb_ring_update_dequeue(&ep->ring, addr);
330
331 if (ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS) {
332 isoch_handle_transfer_event(hc, ep, trb);
333 /* Dropping temporary reference */
334 endpoint_del_ref(&ep->base);
335 return EOK;
336 }
337
338 fibril_mutex_lock(&ep->guard);
339 batch = ep->base.active_batch;
340 endpoint_deactivate_locked(&ep->base);
341 fibril_mutex_unlock(&ep->guard);
342
343 if (!batch) {
344 /* Dropping temporary reference */
345 endpoint_del_ref(&ep->base);
346 return ENOENT;
347 }
348
349 transfer = xhci_transfer_from_batch(batch);
350 }
351
352 const xhci_trb_completion_code_t completion_code = TRB_COMPLETION_CODE(*trb);
353 switch (completion_code) {
354 case XHCI_TRBC_SHORT_PACKET:
355 case XHCI_TRBC_SUCCESS:
356 batch->error = EOK;
357 batch->transferred_size = batch->size - TRB_TRANSFER_LENGTH(*trb);
358 break;
359
360 case XHCI_TRBC_DATA_BUFFER_ERROR:
361 usb_log_warning("Transfer ended with data buffer error.");
362 batch->error = EAGAIN;
363 batch->transferred_size = 0;
364 break;
365
366 case XHCI_TRBC_BABBLE_DETECTED_ERROR:
367 usb_log_warning("Babble detected during the transfer.");
368 batch->error = EAGAIN;
369 batch->transferred_size = 0;
370 break;
371
372 case XHCI_TRBC_USB_TRANSACTION_ERROR:
373 usb_log_warning("USB Transaction error.");
374 batch->error = EAGAIN;
375 batch->transferred_size = 0;
376 break;
377
378 case XHCI_TRBC_TRB_ERROR:
379 usb_log_error("Invalid transfer parameters.");
380 batch->error = EINVAL;
381 batch->transferred_size = 0;
382 break;
383
384 case XHCI_TRBC_STALL_ERROR:
385 usb_log_warning("Stall condition detected.");
386 batch->error = ESTALL;
387 batch->transferred_size = 0;
388 break;
389
390 case XHCI_TRBC_SPLIT_TRANSACTION_ERROR:
391 usb_log_error("Split transcation error detected.");
392 batch->error = EAGAIN;
393 batch->transferred_size = 0;
394 break;
395
396 default:
397 usb_log_warning("Transfer not successfull: %u", completion_code);
398 batch->error = EIO;
399 }
400
401 assert(batch->transferred_size <= batch->size);
402
403 usb_transfer_batch_finish(batch);
404 /* Dropping temporary reference */
405 endpoint_del_ref(&ep->base);
406 return EOK;
407}
408
409typedef errno_t (*transfer_handler)(xhci_hc_t *, xhci_transfer_t *);
410
411static const transfer_handler transfer_handlers[] = {
412 [USB_TRANSFER_CONTROL] = schedule_control,
413 [USB_TRANSFER_ISOCHRONOUS] = NULL,
414 [USB_TRANSFER_BULK] = schedule_bulk_intr,
415 [USB_TRANSFER_INTERRUPT] = schedule_bulk_intr,
416};
417
418/**
419 * Schedule a batch for xHC.
420 *
421 * Bus callback.
422 */
423errno_t xhci_transfer_schedule(usb_transfer_batch_t *batch)
424{
425 endpoint_t *ep = batch->ep;
426
427 xhci_hc_t *hc = bus_to_hc(endpoint_get_bus(batch->ep));
428 xhci_transfer_t *transfer = xhci_transfer_from_batch(batch);
429 xhci_endpoint_t *xhci_ep = xhci_endpoint_get(ep);
430 xhci_device_t *xhci_dev = xhci_ep_to_dev(xhci_ep);
431
432 if (!batch->target.address) {
433 usb_log_error("Attempted to schedule transfer to address 0.");
434 return EINVAL;
435 }
436
437 // FIXME: find a better way to check if the ring is not initialized
438 if (!xhci_ep->ring.segment_count) {
439 usb_log_error("Ring not initialized for endpoint " XHCI_EP_FMT,
440 XHCI_EP_ARGS(*xhci_ep));
441 return EINVAL;
442 }
443
444 // Isochronous transfer needs to be handled differently
445 if (batch->ep->transfer_type == USB_TRANSFER_ISOCHRONOUS) {
446 return schedule_isochronous(transfer);
447 }
448
449 const usb_transfer_type_t type = batch->ep->transfer_type;
450 assert(transfer_handlers[type]);
451
452 /*
453 * If this is a ClearFeature(ENDPOINT_HALT) request, we have to issue
454 * the Reset Endpoint command.
455 */
456 if (batch->ep->transfer_type == USB_TRANSFER_CONTROL &&
457 batch->dir == USB_DIRECTION_OUT) {
458 const usb_device_request_setup_packet_t *request = &batch->setup.packet;
459 if (request->request == USB_DEVREQ_CLEAR_FEATURE &&
460 request->request_type == USB_REQUEST_RECIPIENT_ENDPOINT &&
461 request->value == USB_FEATURE_ENDPOINT_HALT) {
462 const uint16_t index = uint16_usb2host(request->index);
463 const usb_endpoint_t ep_num = index & 0xf;
464 const usb_direction_t dir = (index >> 7) ?
465 USB_DIRECTION_IN :
466 USB_DIRECTION_OUT;
467 endpoint_t *halted_ep = bus_find_endpoint(&xhci_dev->base, ep_num, dir);
468 if (halted_ep) {
469 /*
470 * TODO: Find out how to come up with stream_id. It might be
471 * possible that we have to clear all of them.
472 */
473 const errno_t err = xhci_endpoint_clear_halt(xhci_endpoint_get(halted_ep), 0);
474 endpoint_del_ref(halted_ep);
475 if (err) {
476 /*
477 * The endpoint halt condition failed to be cleared in HC.
478 * As it does not make sense to send the reset to the device
479 * itself, return as unschedulable answer.
480 *
481 * Furthermore, if this is a request to clear EP 0 stall, it
482 * would be gone forever, as the endpoint is halted.
483 */
484 return err;
485 }
486 } else {
487 usb_log_warning("Device(%u): Resetting unregistered endpoint"
488 " %u %s.", xhci_dev->base.address, ep_num,
489 usb_str_direction(dir));
490 }
491 }
492 }
493
494 errno_t err;
495 fibril_mutex_lock(&xhci_ep->guard);
496
497 if ((err = endpoint_activate_locked(ep, batch))) {
498 fibril_mutex_unlock(&xhci_ep->guard);
499 return err;
500 }
501
502 if ((err = transfer_handlers[batch->ep->transfer_type](hc, transfer))) {
503 endpoint_deactivate_locked(ep);
504 fibril_mutex_unlock(&xhci_ep->guard);
505 return err;
506 }
507
508 hc_ring_ep_doorbell(xhci_ep, batch->target.stream);
509 fibril_mutex_unlock(&xhci_ep->guard);
510 return EOK;
511}
Note: See TracBrowser for help on using the repository browser.