1 | /*
|
---|
2 | * Copyright (c) 2017 Michal Staruch
|
---|
3 | * All rights reserved.
|
---|
4 | *
|
---|
5 | * Redistribution and use in source and binary forms, with or without
|
---|
6 | * modification, are permitted provided that the following conditions
|
---|
7 | * are met:
|
---|
8 | *
|
---|
9 | * - Redistributions of source code must retain the above copyright
|
---|
10 | * notice, this list of conditions and the following disclaimer.
|
---|
11 | * - Redistributions in binary form must reproduce the above copyright
|
---|
12 | * notice, this list of conditions and the following disclaimer in the
|
---|
13 | * documentation and/or other materials provided with the distribution.
|
---|
14 | * - The name of the author may not be used to endorse or promote products
|
---|
15 | * derived from this software without specific prior written permission.
|
---|
16 | *
|
---|
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
---|
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
---|
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
---|
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
---|
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
---|
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
---|
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
---|
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
---|
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
27 | */
|
---|
28 |
|
---|
29 | /** @addtogroup drvusbxhci
|
---|
30 | * @{
|
---|
31 | */
|
---|
32 | /** @file
|
---|
33 | * @brief The host controller transfer ring management
|
---|
34 | */
|
---|
35 |
|
---|
36 | #include <usb/debug.h>
|
---|
37 | #include <usb/request.h>
|
---|
38 | #include "endpoint.h"
|
---|
39 | #include "hc.h"
|
---|
40 | #include "hw_struct/trb.h"
|
---|
41 | #include "transfers.h"
|
---|
42 | #include "trb_ring.h"
|
---|
43 |
|
---|
44 | typedef enum {
|
---|
45 | STAGE_OUT,
|
---|
46 | STAGE_IN,
|
---|
47 | } stage_dir_flag_t;
|
---|
48 |
|
---|
49 | #define REQUEST_TYPE_DTD (0x80)
|
---|
50 | #define REQUEST_TYPE_IS_DEVICE_TO_HOST(rq) ((rq) & REQUEST_TYPE_DTD)
|
---|
51 |
|
---|
52 |
|
---|
53 | /** Get direction flag of data stage.
|
---|
54 | * See Table 7 of xHCI specification.
|
---|
55 | */
|
---|
56 | static inline stage_dir_flag_t get_status_direction_flag(xhci_trb_t* trb,
|
---|
57 | uint8_t bmRequestType, uint16_t wLength)
|
---|
58 | {
|
---|
59 | /* See Table 7 of xHCI specification */
|
---|
60 | return REQUEST_TYPE_IS_DEVICE_TO_HOST(bmRequestType) && (wLength > 0)
|
---|
61 | ? STAGE_OUT
|
---|
62 | : STAGE_IN;
|
---|
63 | }
|
---|
64 |
|
---|
65 | typedef enum {
|
---|
66 | DATA_STAGE_NO = 0,
|
---|
67 | DATA_STAGE_OUT = 2,
|
---|
68 | DATA_STAGE_IN = 3,
|
---|
69 | } data_stage_type_t;
|
---|
70 |
|
---|
71 | /** Get transfer type flag.
|
---|
72 | * See Table 8 of xHCI specification.
|
---|
73 | */
|
---|
74 | static inline data_stage_type_t get_transfer_type(xhci_trb_t* trb, uint8_t
|
---|
75 | bmRequestType, uint16_t wLength)
|
---|
76 | {
|
---|
77 | if (wLength == 0)
|
---|
78 | return DATA_STAGE_NO;
|
---|
79 |
|
---|
80 | /* See Table 7 of xHCI specification */
|
---|
81 | return REQUEST_TYPE_IS_DEVICE_TO_HOST(bmRequestType)
|
---|
82 | ? DATA_STAGE_IN
|
---|
83 | : DATA_STAGE_NO;
|
---|
84 | }
|
---|
85 |
|
---|
86 | static inline bool configure_endpoint_needed(usb_device_request_setup_packet_t *setup)
|
---|
87 | {
|
---|
88 | usb_request_type_t request_type = SETUP_REQUEST_TYPE_GET_TYPE(setup->request_type);
|
---|
89 |
|
---|
90 | return request_type == USB_REQUEST_TYPE_STANDARD &&
|
---|
91 | (setup->request == USB_DEVREQ_SET_CONFIGURATION
|
---|
92 | || setup->request == USB_DEVREQ_SET_INTERFACE);
|
---|
93 | }
|
---|
94 |
|
---|
95 | /**
|
---|
96 | * Create a xHCI-specific transfer batch.
|
---|
97 | *
|
---|
98 | * Bus callback.
|
---|
99 | */
|
---|
100 | usb_transfer_batch_t * xhci_transfer_create(endpoint_t* ep)
|
---|
101 | {
|
---|
102 | xhci_transfer_t *transfer = calloc(1, sizeof(xhci_transfer_t));
|
---|
103 | if (!transfer)
|
---|
104 | return NULL;
|
---|
105 |
|
---|
106 | usb_transfer_batch_init(&transfer->batch, ep);
|
---|
107 | return &transfer->batch;
|
---|
108 | }
|
---|
109 |
|
---|
110 | /**
|
---|
111 | * Destroy a xHCI transfer.
|
---|
112 | */
|
---|
113 | void xhci_transfer_destroy(usb_transfer_batch_t* batch)
|
---|
114 | {
|
---|
115 | xhci_transfer_t *transfer = xhci_transfer_from_batch(batch);
|
---|
116 |
|
---|
117 | dma_buffer_free(&transfer->hc_buffer);
|
---|
118 | free(transfer);
|
---|
119 | }
|
---|
120 |
|
---|
121 | static xhci_trb_ring_t *get_ring(xhci_hc_t *hc, xhci_transfer_t *transfer)
|
---|
122 | {
|
---|
123 | return &xhci_endpoint_get(transfer->batch.ep)->ring;
|
---|
124 | }
|
---|
125 |
|
---|
126 | static int schedule_control(xhci_hc_t* hc, xhci_transfer_t* transfer)
|
---|
127 | {
|
---|
128 | usb_transfer_batch_t *batch = &transfer->batch;
|
---|
129 | xhci_trb_ring_t *ring = get_ring(hc, transfer);
|
---|
130 | xhci_endpoint_t *xhci_ep = xhci_endpoint_get(transfer->batch.ep);
|
---|
131 |
|
---|
132 | usb_device_request_setup_packet_t* setup = &batch->setup.packet;
|
---|
133 |
|
---|
134 | xhci_trb_t trbs[3];
|
---|
135 | int trbs_used = 0;
|
---|
136 |
|
---|
137 | xhci_trb_t *trb_setup = trbs + trbs_used++;
|
---|
138 | xhci_trb_clean(trb_setup);
|
---|
139 |
|
---|
140 | TRB_CTRL_SET_SETUP_WVALUE(*trb_setup, setup->value);
|
---|
141 | TRB_CTRL_SET_SETUP_WLENGTH(*trb_setup, setup->length);
|
---|
142 | TRB_CTRL_SET_SETUP_WINDEX(*trb_setup, setup->index);
|
---|
143 | TRB_CTRL_SET_SETUP_BREQ(*trb_setup, setup->request);
|
---|
144 | TRB_CTRL_SET_SETUP_BMREQTYPE(*trb_setup, setup->request_type);
|
---|
145 |
|
---|
146 | /* Size of the setup packet is always 8 */
|
---|
147 | TRB_CTRL_SET_XFER_LEN(*trb_setup, 8);
|
---|
148 |
|
---|
149 | /* Immediate data */
|
---|
150 | TRB_CTRL_SET_IDT(*trb_setup, 1);
|
---|
151 | TRB_CTRL_SET_TRB_TYPE(*trb_setup, XHCI_TRB_TYPE_SETUP_STAGE);
|
---|
152 | TRB_CTRL_SET_TRT(*trb_setup, get_transfer_type(trb_setup, setup->request_type, setup->length));
|
---|
153 |
|
---|
154 | /* Data stage */
|
---|
155 | xhci_trb_t *trb_data = NULL;
|
---|
156 | if (setup->length > 0) {
|
---|
157 | trb_data = trbs + trbs_used++;
|
---|
158 | xhci_trb_clean(trb_data);
|
---|
159 |
|
---|
160 | trb_data->parameter = host2xhci(64, transfer->hc_buffer.phys);
|
---|
161 |
|
---|
162 | // data size (sent for OUT, or buffer size)
|
---|
163 | TRB_CTRL_SET_XFER_LEN(*trb_data, batch->buffer_size);
|
---|
164 | // FIXME: TD size 4.11.2.4
|
---|
165 | TRB_CTRL_SET_TD_SIZE(*trb_data, 1);
|
---|
166 |
|
---|
167 | // Some more fields here, no idea what they mean
|
---|
168 | TRB_CTRL_SET_TRB_TYPE(*trb_data, XHCI_TRB_TYPE_DATA_STAGE);
|
---|
169 |
|
---|
170 | int stage_dir = REQUEST_TYPE_IS_DEVICE_TO_HOST(setup->request_type)
|
---|
171 | ? STAGE_IN : STAGE_OUT;
|
---|
172 | TRB_CTRL_SET_DIR(*trb_data, stage_dir);
|
---|
173 | }
|
---|
174 |
|
---|
175 | /* Status stage */
|
---|
176 | xhci_trb_t *trb_status = trbs + trbs_used++;
|
---|
177 | xhci_trb_clean(trb_status);
|
---|
178 |
|
---|
179 | // FIXME: Evaluate next TRB? 4.12.3
|
---|
180 | // TRB_CTRL_SET_ENT(*trb_status, 1);
|
---|
181 |
|
---|
182 | TRB_CTRL_SET_IOC(*trb_status, 1);
|
---|
183 | TRB_CTRL_SET_TRB_TYPE(*trb_status, XHCI_TRB_TYPE_STATUS_STAGE);
|
---|
184 | TRB_CTRL_SET_DIR(*trb_status, get_status_direction_flag(trb_setup, setup->request_type, setup->length));
|
---|
185 |
|
---|
186 | // Issue a Configure Endpoint command, if needed.
|
---|
187 | if (configure_endpoint_needed(setup)) {
|
---|
188 | const int err = hc_configure_device(hc, xhci_ep_to_dev(xhci_ep)->slot_id);
|
---|
189 | if (err)
|
---|
190 | return err;
|
---|
191 | }
|
---|
192 |
|
---|
193 | return xhci_trb_ring_enqueue_multiple(ring, trbs, trbs_used, &transfer->interrupt_trb_phys);
|
---|
194 | }
|
---|
195 |
|
---|
196 | static int schedule_bulk(xhci_hc_t* hc, xhci_transfer_t *transfer)
|
---|
197 | {
|
---|
198 | xhci_trb_t trb;
|
---|
199 | xhci_trb_clean(&trb);
|
---|
200 | trb.parameter = host2xhci(64, transfer->hc_buffer.phys);
|
---|
201 |
|
---|
202 | // data size (sent for OUT, or buffer size)
|
---|
203 | TRB_CTRL_SET_XFER_LEN(trb, transfer->batch.buffer_size);
|
---|
204 | // FIXME: TD size 4.11.2.4
|
---|
205 | TRB_CTRL_SET_TD_SIZE(trb, 1);
|
---|
206 |
|
---|
207 | // we want an interrupt after this td is done
|
---|
208 | TRB_CTRL_SET_IOC(trb, 1);
|
---|
209 |
|
---|
210 | TRB_CTRL_SET_TRB_TYPE(trb, XHCI_TRB_TYPE_NORMAL);
|
---|
211 |
|
---|
212 | xhci_trb_ring_t* ring = get_ring(hc, transfer);
|
---|
213 |
|
---|
214 | return xhci_trb_ring_enqueue(ring, &trb, &transfer->interrupt_trb_phys);
|
---|
215 | }
|
---|
216 |
|
---|
217 | static int schedule_interrupt(xhci_hc_t* hc, xhci_transfer_t* transfer)
|
---|
218 | {
|
---|
219 | xhci_trb_t trb;
|
---|
220 | xhci_trb_clean(&trb);
|
---|
221 | trb.parameter = host2xhci(64, transfer->hc_buffer.phys);
|
---|
222 |
|
---|
223 | // data size (sent for OUT, or buffer size)
|
---|
224 | TRB_CTRL_SET_XFER_LEN(trb, transfer->batch.buffer_size);
|
---|
225 | // FIXME: TD size 4.11.2.4
|
---|
226 | TRB_CTRL_SET_TD_SIZE(trb, 1);
|
---|
227 |
|
---|
228 | // we want an interrupt after this td is done
|
---|
229 | TRB_CTRL_SET_IOC(trb, 1);
|
---|
230 |
|
---|
231 | TRB_CTRL_SET_TRB_TYPE(trb, XHCI_TRB_TYPE_NORMAL);
|
---|
232 |
|
---|
233 | xhci_trb_ring_t* ring = get_ring(hc, transfer);
|
---|
234 |
|
---|
235 | return xhci_trb_ring_enqueue(ring, &trb, &transfer->interrupt_trb_phys);
|
---|
236 | }
|
---|
237 |
|
---|
238 | static xhci_isoch_transfer_t* isoch_transfer_get_enqueue(xhci_endpoint_t *ep) {
|
---|
239 | if (((ep->isoch_enqueue + 1) % XHCI_ISOCH_BUFFER_COUNT) == ep->isoch_dequeue) {
|
---|
240 | /* None ready */
|
---|
241 | return NULL;
|
---|
242 | }
|
---|
243 | xhci_isoch_transfer_t *isoch_transfer = &ep->isoch_transfers[ep->isoch_enqueue];
|
---|
244 | ep->isoch_enqueue = (ep->isoch_enqueue + 1) % XHCI_ISOCH_BUFFER_COUNT;
|
---|
245 | return isoch_transfer;
|
---|
246 | }
|
---|
247 |
|
---|
248 | static xhci_isoch_transfer_t* isoch_transfer_get_dequeue(xhci_endpoint_t *ep) {
|
---|
249 | xhci_isoch_transfer_t *isoch_transfer = &ep->isoch_transfers[ep->isoch_dequeue];
|
---|
250 | ep->isoch_dequeue = (ep->isoch_dequeue + 1) % XHCI_ISOCH_BUFFER_COUNT;
|
---|
251 | return isoch_transfer;
|
---|
252 | }
|
---|
253 |
|
---|
254 | static int schedule_isochronous_trb(xhci_trb_ring_t *ring, xhci_endpoint_t *ep, xhci_trb_t *trb,
|
---|
255 | const size_t len, uintptr_t *trb_phys)
|
---|
256 | {
|
---|
257 | TRB_CTRL_SET_XFER_LEN(*trb, len);
|
---|
258 | // FIXME: TD size 4.11.2.4 (there is no next TRB, so 0?)
|
---|
259 | TRB_CTRL_SET_TD_SIZE(*trb, 0);
|
---|
260 | TRB_CTRL_SET_IOC(*trb, 1);
|
---|
261 | TRB_CTRL_SET_TRB_TYPE(*trb, XHCI_TRB_TYPE_ISOCH);
|
---|
262 |
|
---|
263 | // see 4.14.1 and 4.11.2.3 for the explanation, how to calculate those
|
---|
264 | size_t tdpc = len / 1024 + ((len % 1024) ? 1 : 0);
|
---|
265 | size_t tbc = tdpc / ep->max_burst;
|
---|
266 | if (!tdpc % ep->max_burst) --tbc;
|
---|
267 | size_t bsp = tdpc % ep->max_burst;
|
---|
268 | size_t tlbpc = (bsp ? bsp : ep->max_burst) - 1;
|
---|
269 |
|
---|
270 | TRB_CTRL_SET_TBC(*trb, tbc);
|
---|
271 | TRB_CTRL_SET_TLBPC(*trb, tlbpc);
|
---|
272 |
|
---|
273 | // FIXME: do we want this? 6.4.1.3, p 366 (also possibly frame id?)
|
---|
274 | TRB_CTRL_SET_SIA(*trb, 1);
|
---|
275 |
|
---|
276 | return xhci_trb_ring_enqueue(ring, trb, trb_phys);
|
---|
277 | }
|
---|
278 |
|
---|
279 | static int schedule_isochronous_out(xhci_hc_t* hc, xhci_transfer_t* transfer, xhci_endpoint_t *xhci_ep,
|
---|
280 | xhci_device_t *xhci_dev)
|
---|
281 | {
|
---|
282 | xhci_trb_t trb;
|
---|
283 | xhci_trb_clean(&trb);
|
---|
284 |
|
---|
285 | fibril_mutex_lock(&xhci_ep->isoch_guard);
|
---|
286 | xhci_isoch_transfer_t *isoch_transfer = isoch_transfer_get_enqueue(xhci_ep);
|
---|
287 | while (!isoch_transfer) {
|
---|
288 | fibril_condvar_wait(&xhci_ep->isoch_avail, &xhci_ep->isoch_guard);
|
---|
289 | isoch_transfer = isoch_transfer_get_enqueue(xhci_ep);
|
---|
290 | }
|
---|
291 |
|
---|
292 | isoch_transfer->size = transfer->batch.buffer_size;
|
---|
293 | if (isoch_transfer->size > 0) {
|
---|
294 | memcpy(isoch_transfer->data.virt, transfer->batch.buffer, isoch_transfer->size);
|
---|
295 | }
|
---|
296 |
|
---|
297 | trb.parameter = isoch_transfer->data.phys;
|
---|
298 |
|
---|
299 | xhci_trb_ring_t *ring = get_ring(hc, transfer);
|
---|
300 | int err = schedule_isochronous_trb(ring, xhci_ep, &trb, isoch_transfer->size,
|
---|
301 | &isoch_transfer->interrupt_trb_phys);
|
---|
302 | if (err) {
|
---|
303 | fibril_mutex_unlock(&xhci_ep->isoch_guard);
|
---|
304 | return err;
|
---|
305 | }
|
---|
306 |
|
---|
307 | /* If not yet started, start the isochronous endpoint transfers - after buffer count - 1 writes */
|
---|
308 | /* The -1 is there because of the enqueue != dequeue check. The buffer must have at least 2 transfers. */
|
---|
309 | if (((xhci_ep->isoch_enqueue + 1) % XHCI_ISOCH_BUFFER_COUNT) == xhci_ep->isoch_dequeue && !xhci_ep->isoch_started) {
|
---|
310 | const uint8_t slot_id = xhci_dev->slot_id;
|
---|
311 | const uint8_t target = xhci_endpoint_index(xhci_ep) + 1; /* EP Doorbells start at 1 */
|
---|
312 | err = hc_ring_doorbell(hc, slot_id, target);
|
---|
313 | xhci_ep->isoch_started = true;
|
---|
314 | }
|
---|
315 | fibril_mutex_unlock(&xhci_ep->isoch_guard);
|
---|
316 | if (err) {
|
---|
317 | return err;
|
---|
318 | }
|
---|
319 |
|
---|
320 | /* Isochronous transfers don't handle errors, they skip them all. */
|
---|
321 | transfer->batch.error = EOK;
|
---|
322 | transfer->batch.transfered_size = transfer->batch.buffer_size;
|
---|
323 | usb_transfer_batch_finish(&transfer->batch);
|
---|
324 | return EOK;
|
---|
325 | }
|
---|
326 |
|
---|
327 | static int schedule_isochronous_in_trbs(xhci_endpoint_t *xhci_ep, xhci_trb_ring_t *ring) {
|
---|
328 | xhci_trb_t trb;
|
---|
329 | xhci_isoch_transfer_t *isoch_transfer;
|
---|
330 | while ((isoch_transfer = isoch_transfer_get_enqueue(xhci_ep)) != NULL) {
|
---|
331 | xhci_trb_clean(&trb);
|
---|
332 | trb.parameter = isoch_transfer->data.phys;
|
---|
333 | isoch_transfer->size = xhci_ep->isoch_max_size;
|
---|
334 |
|
---|
335 | int err = schedule_isochronous_trb(ring, xhci_ep, &trb, isoch_transfer->size,
|
---|
336 | &isoch_transfer->interrupt_trb_phys);
|
---|
337 | if (err)
|
---|
338 | return err;
|
---|
339 | }
|
---|
340 | return EOK;
|
---|
341 | }
|
---|
342 |
|
---|
343 | static int schedule_isochronous_in(xhci_hc_t* hc, xhci_transfer_t* transfer, xhci_endpoint_t *xhci_ep,
|
---|
344 | xhci_device_t *xhci_dev)
|
---|
345 | {
|
---|
346 | fibril_mutex_lock(&xhci_ep->isoch_guard);
|
---|
347 | /* If not yet started, start the isochronous endpoint transfers - before first read */
|
---|
348 | if (!xhci_ep->isoch_started) {
|
---|
349 | xhci_trb_ring_t *ring = get_ring(hc, transfer);
|
---|
350 | /* Fill the TRB ring. */
|
---|
351 | int err = schedule_isochronous_in_trbs(xhci_ep, ring);
|
---|
352 | if (err) {
|
---|
353 | fibril_mutex_unlock(&xhci_ep->isoch_guard);
|
---|
354 | return err;
|
---|
355 | }
|
---|
356 | /* Ring the doorbell to start it. */
|
---|
357 | const uint8_t slot_id = xhci_dev->slot_id;
|
---|
358 | const uint8_t target = xhci_endpoint_index(xhci_ep) + 1; /* EP Doorbells start at 1 */
|
---|
359 | err = hc_ring_doorbell(hc, slot_id, target);
|
---|
360 | if (err) {
|
---|
361 | fibril_mutex_unlock(&xhci_ep->isoch_guard);
|
---|
362 | return err;
|
---|
363 | }
|
---|
364 | xhci_ep->isoch_started = true;
|
---|
365 | }
|
---|
366 |
|
---|
367 | xhci_isoch_transfer_t *isoch_transfer = isoch_transfer_get_enqueue(xhci_ep);
|
---|
368 | while(!isoch_transfer) {
|
---|
369 | fibril_condvar_wait(&xhci_ep->isoch_avail, &xhci_ep->isoch_guard);
|
---|
370 | isoch_transfer = isoch_transfer_get_enqueue(xhci_ep);
|
---|
371 | }
|
---|
372 |
|
---|
373 | isoch_transfer->size = transfer->batch.buffer_size;
|
---|
374 | if (transfer->batch.buffer_size <= isoch_transfer->size) {
|
---|
375 | if (transfer->batch.buffer_size > 0) {
|
---|
376 | memcpy(transfer->batch.buffer, isoch_transfer->data.virt, transfer->batch.buffer_size);
|
---|
377 | }
|
---|
378 | if (transfer->batch.buffer_size < isoch_transfer->size) {
|
---|
379 | // FIXME: somehow notify that buffer was too small, probably batch error code
|
---|
380 | }
|
---|
381 | transfer->batch.transfered_size = transfer->batch.buffer_size;
|
---|
382 | }
|
---|
383 | else {
|
---|
384 | memcpy(transfer->batch.buffer, isoch_transfer->data.virt, isoch_transfer->size);
|
---|
385 | transfer->batch.transfered_size = isoch_transfer->size;
|
---|
386 | }
|
---|
387 |
|
---|
388 | // Clear and requeue the transfer with new TRB
|
---|
389 | xhci_trb_t trb;
|
---|
390 | xhci_trb_clean(&trb);
|
---|
391 |
|
---|
392 | trb.parameter = isoch_transfer->data.phys;
|
---|
393 | isoch_transfer->size = xhci_ep->isoch_max_size;
|
---|
394 |
|
---|
395 | xhci_trb_ring_t *ring = get_ring(hc, transfer);
|
---|
396 | int err = schedule_isochronous_trb(ring, xhci_ep, &trb, isoch_transfer->size,
|
---|
397 | &isoch_transfer->interrupt_trb_phys);
|
---|
398 | fibril_mutex_unlock(&xhci_ep->isoch_guard);
|
---|
399 |
|
---|
400 | if (err) {
|
---|
401 | return err;
|
---|
402 | }
|
---|
403 |
|
---|
404 | /* Isochronous transfers don't handle errors, they skip them all. */
|
---|
405 | transfer->batch.error = EOK;
|
---|
406 | usb_transfer_batch_finish(&transfer->batch);
|
---|
407 | return EOK;
|
---|
408 | }
|
---|
409 |
|
---|
410 | static int schedule_isochronous(xhci_hc_t* hc, xhci_transfer_t* transfer, xhci_endpoint_t *xhci_ep,
|
---|
411 | xhci_device_t *xhci_dev)
|
---|
412 | {
|
---|
413 | if (transfer->batch.buffer_size > xhci_ep->isoch_max_size) {
|
---|
414 | usb_log_error("Cannot schedule an oversized isochronous transfer.");
|
---|
415 | return EINVAL;
|
---|
416 | }
|
---|
417 |
|
---|
418 | if (xhci_ep->base.direction == USB_DIRECTION_OUT) {
|
---|
419 | return schedule_isochronous_out(hc, transfer, xhci_ep, xhci_dev);
|
---|
420 | }
|
---|
421 | else {
|
---|
422 | return schedule_isochronous_in(hc, transfer, xhci_ep, xhci_dev);
|
---|
423 | }
|
---|
424 | }
|
---|
425 |
|
---|
426 | static int handle_isochronous_transfer_event(xhci_hc_t *hc, xhci_trb_t *trb, xhci_endpoint_t *ep) {
|
---|
427 | fibril_mutex_lock(&ep->isoch_guard);
|
---|
428 |
|
---|
429 | int err = EOK;
|
---|
430 |
|
---|
431 | const xhci_trb_completion_code_t completion_code = TRB_COMPLETION_CODE(*trb);
|
---|
432 | switch (completion_code) {
|
---|
433 | case XHCI_TRBC_RING_OVERRUN:
|
---|
434 | case XHCI_TRBC_RING_UNDERRUN:
|
---|
435 | /* Rings are unscheduled by xHC now */
|
---|
436 | ep->isoch_started = false;
|
---|
437 | /* For OUT, there was nothing to process */
|
---|
438 | /* For IN, the buffer has overfilled, we empty the buffers and readd TRBs */
|
---|
439 | ep->isoch_enqueue = ep->isoch_dequeue = 0;
|
---|
440 | err = EIO;
|
---|
441 | break;
|
---|
442 | case XHCI_TRBC_SHORT_PACKET:
|
---|
443 | usb_log_debug("Short transfer.");
|
---|
444 | /* fallthrough */
|
---|
445 | case XHCI_TRBC_SUCCESS:
|
---|
446 | break;
|
---|
447 | default:
|
---|
448 | usb_log_warning("Transfer not successfull: %u", completion_code);
|
---|
449 | err = EIO;
|
---|
450 | }
|
---|
451 |
|
---|
452 | xhci_isoch_transfer_t *isoch_transfer = isoch_transfer_get_dequeue(ep);
|
---|
453 | if (isoch_transfer->interrupt_trb_phys != trb->parameter) {
|
---|
454 | usb_log_error("Non-matching trb to isochronous transfer, skipping.");
|
---|
455 | // FIXME: what to do? probably just kill the whole endpoint
|
---|
456 | err = ENOENT;
|
---|
457 | }
|
---|
458 |
|
---|
459 | if (ep->base.direction == USB_DIRECTION_IN) {
|
---|
460 | // We may have received less data, that's fine
|
---|
461 | isoch_transfer->size -= TRB_TRANSFER_LENGTH(*trb);
|
---|
462 | }
|
---|
463 |
|
---|
464 | fibril_condvar_signal(&ep->isoch_avail);
|
---|
465 | fibril_mutex_unlock(&ep->isoch_guard);
|
---|
466 | return err;
|
---|
467 | }
|
---|
468 |
|
---|
469 | int xhci_handle_transfer_event(xhci_hc_t* hc, xhci_trb_t* trb)
|
---|
470 | {
|
---|
471 | uintptr_t addr = trb->parameter;
|
---|
472 | const unsigned slot_id = XHCI_DWORD_EXTRACT(trb->control, 31, 24);
|
---|
473 | const unsigned ep_dci = XHCI_DWORD_EXTRACT(trb->control, 20, 16);
|
---|
474 |
|
---|
475 | xhci_device_t *dev = hc->bus.devices_by_slot[slot_id];
|
---|
476 | if (!dev) {
|
---|
477 | usb_log_error("Transfer event on disabled slot %u", slot_id);
|
---|
478 | return ENOENT;
|
---|
479 | }
|
---|
480 |
|
---|
481 | const usb_endpoint_t ep_num = ep_dci / 2;
|
---|
482 | xhci_endpoint_t *ep = xhci_device_get_endpoint(dev, ep_num);
|
---|
483 | if (!ep) {
|
---|
484 | usb_log_error("Transfer event on dropped endpoint %u of device "
|
---|
485 | XHCI_DEV_FMT, ep_num, XHCI_DEV_ARGS(*dev));
|
---|
486 | return ENOENT;
|
---|
487 | }
|
---|
488 |
|
---|
489 | /* FIXME: This is racy. Do we care? */
|
---|
490 | ep->ring.dequeue = addr;
|
---|
491 |
|
---|
492 | if (ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS) {
|
---|
493 | return handle_isochronous_transfer_event(hc, trb, ep);
|
---|
494 | }
|
---|
495 |
|
---|
496 | fibril_mutex_lock(&ep->base.guard);
|
---|
497 | usb_transfer_batch_t *batch = ep->base.active_batch;
|
---|
498 | if (!batch) {
|
---|
499 | fibril_mutex_unlock(&ep->base.guard);
|
---|
500 | return ENOENT;
|
---|
501 | }
|
---|
502 |
|
---|
503 | const xhci_trb_completion_code_t completion_code = TRB_COMPLETION_CODE(*trb);
|
---|
504 | switch (completion_code) {
|
---|
505 | case XHCI_TRBC_SHORT_PACKET:
|
---|
506 | usb_log_debug("Short transfer.");
|
---|
507 | /* fallthrough */
|
---|
508 | case XHCI_TRBC_SUCCESS:
|
---|
509 | batch->error = EOK;
|
---|
510 | batch->transfered_size = batch->buffer_size - TRB_TRANSFER_LENGTH(*trb);
|
---|
511 | break;
|
---|
512 |
|
---|
513 | default:
|
---|
514 | usb_log_warning("Transfer not successfull: %u", completion_code);
|
---|
515 | batch->error = EIO;
|
---|
516 | }
|
---|
517 |
|
---|
518 | endpoint_deactivate_locked(&ep->base);
|
---|
519 | fibril_mutex_unlock(&ep->base.guard);
|
---|
520 |
|
---|
521 | xhci_transfer_t *transfer = xhci_transfer_from_batch(batch);
|
---|
522 |
|
---|
523 | if (batch->dir == USB_DIRECTION_IN) {
|
---|
524 | assert(batch->buffer);
|
---|
525 | assert(batch->transfered_size <= batch->buffer_size);
|
---|
526 | memcpy(batch->buffer, transfer->hc_buffer.virt, batch->transfered_size);
|
---|
527 | }
|
---|
528 |
|
---|
529 | usb_transfer_batch_finish(batch);
|
---|
530 | return EOK;
|
---|
531 | }
|
---|
532 |
|
---|
533 | typedef int (*transfer_handler)(xhci_hc_t *, xhci_transfer_t *);
|
---|
534 |
|
---|
535 | static const transfer_handler transfer_handlers[] = {
|
---|
536 | [USB_TRANSFER_CONTROL] = schedule_control,
|
---|
537 | [USB_TRANSFER_ISOCHRONOUS] = NULL,
|
---|
538 | [USB_TRANSFER_BULK] = schedule_bulk,
|
---|
539 | [USB_TRANSFER_INTERRUPT] = schedule_interrupt,
|
---|
540 | };
|
---|
541 |
|
---|
542 | int xhci_transfer_schedule(xhci_hc_t *hc, usb_transfer_batch_t *batch)
|
---|
543 | {
|
---|
544 | assert(hc);
|
---|
545 | endpoint_t *ep = batch->ep;
|
---|
546 |
|
---|
547 | xhci_transfer_t *transfer = xhci_transfer_from_batch(batch);
|
---|
548 | xhci_endpoint_t *xhci_ep = xhci_endpoint_get(ep);
|
---|
549 | xhci_device_t *xhci_dev = xhci_ep_to_dev(xhci_ep);
|
---|
550 |
|
---|
551 | // FIXME: find a better way to check if the ring is not initialized
|
---|
552 | if (!xhci_ep->ring.segment_count) {
|
---|
553 | usb_log_error("Ring not initialized for endpoint " XHCI_EP_FMT,
|
---|
554 | XHCI_EP_ARGS(*xhci_ep));
|
---|
555 | return EINVAL;
|
---|
556 | }
|
---|
557 |
|
---|
558 | // Isochronous transfer needs to be handled differently
|
---|
559 | if (batch->ep->transfer_type == USB_TRANSFER_ISOCHRONOUS) {
|
---|
560 | return schedule_isochronous(hc, transfer, xhci_ep, xhci_dev);
|
---|
561 | }
|
---|
562 |
|
---|
563 | const usb_transfer_type_t type = batch->ep->transfer_type;
|
---|
564 | assert(type >= 0 && type < ARRAY_SIZE(transfer_handlers));
|
---|
565 | assert(transfer_handlers[type]);
|
---|
566 |
|
---|
567 | if (batch->buffer_size > 0) {
|
---|
568 | if (dma_buffer_alloc(&transfer->hc_buffer, batch->buffer_size))
|
---|
569 | return ENOMEM;
|
---|
570 | }
|
---|
571 |
|
---|
572 | if (batch->dir != USB_DIRECTION_IN) {
|
---|
573 | // Sending stuff from host to device, we need to copy the actual data.
|
---|
574 | memcpy(transfer->hc_buffer.virt, batch->buffer, batch->buffer_size);
|
---|
575 | }
|
---|
576 |
|
---|
577 | fibril_mutex_lock(&ep->guard);
|
---|
578 | endpoint_activate_locked(ep, batch);
|
---|
579 | const int err = transfer_handlers[batch->ep->transfer_type](hc, transfer);
|
---|
580 |
|
---|
581 | if (err) {
|
---|
582 | endpoint_deactivate_locked(ep);
|
---|
583 | fibril_mutex_unlock(&ep->guard);
|
---|
584 | return err;
|
---|
585 | }
|
---|
586 |
|
---|
587 | /* After the critical section, the transfer can already be finished or aborted. */
|
---|
588 | transfer = NULL; batch = NULL;
|
---|
589 | fibril_mutex_unlock(&ep->guard);
|
---|
590 |
|
---|
591 | const uint8_t slot_id = xhci_dev->slot_id;
|
---|
592 | const uint8_t target = xhci_endpoint_index(xhci_ep) + 1; /* EP Doorbells start at 1 */
|
---|
593 | return hc_ring_doorbell(hc, slot_id, target);
|
---|
594 | }
|
---|