| 1 | /*
|
|---|
| 2 | * Copyright (c) 2017 Michal Staruch
|
|---|
| 3 | * All rights reserved.
|
|---|
| 4 | *
|
|---|
| 5 | * Redistribution and use in source and binary forms, with or without
|
|---|
| 6 | * modification, are permitted provided that the following conditions
|
|---|
| 7 | * are met:
|
|---|
| 8 | *
|
|---|
| 9 | * - Redistributions of source code must retain the above copyright
|
|---|
| 10 | * notice, this list of conditions and the following disclaimer.
|
|---|
| 11 | * - Redistributions in binary form must reproduce the above copyright
|
|---|
| 12 | * notice, this list of conditions and the following disclaimer in the
|
|---|
| 13 | * documentation and/or other materials provided with the distribution.
|
|---|
| 14 | * - The name of the author may not be used to endorse or promote products
|
|---|
| 15 | * derived from this software without specific prior written permission.
|
|---|
| 16 | *
|
|---|
| 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|---|
| 18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|---|
| 19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|---|
| 20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|---|
| 21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|---|
| 22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|---|
| 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|---|
| 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|---|
| 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|---|
| 26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|---|
| 27 | */
|
|---|
| 28 |
|
|---|
| 29 | /** @addtogroup drvusbxhci
|
|---|
| 30 | * @{
|
|---|
| 31 | */
|
|---|
| 32 | /** @file
|
|---|
| 33 | * @brief The host controller transfer ring management
|
|---|
| 34 | */
|
|---|
| 35 |
|
|---|
| 36 | #include <usb/debug.h>
|
|---|
| 37 | #include <usb/request.h>
|
|---|
| 38 | #include "endpoint.h"
|
|---|
| 39 | #include "hc.h"
|
|---|
| 40 | #include "hw_struct/trb.h"
|
|---|
| 41 | #include "transfers.h"
|
|---|
| 42 | #include "trb_ring.h"
|
|---|
| 43 |
|
|---|
| 44 | typedef enum {
|
|---|
| 45 | STAGE_OUT,
|
|---|
| 46 | STAGE_IN,
|
|---|
| 47 | } stage_dir_flag_t;
|
|---|
| 48 |
|
|---|
| 49 | #define REQUEST_TYPE_DTD (0x80)
|
|---|
| 50 | #define REQUEST_TYPE_IS_DEVICE_TO_HOST(rq) ((rq) & REQUEST_TYPE_DTD)
|
|---|
| 51 |
|
|---|
| 52 |
|
|---|
| 53 | /** Get direction flag of data stage.
|
|---|
| 54 | * See Table 7 of xHCI specification.
|
|---|
| 55 | */
|
|---|
| 56 | static inline stage_dir_flag_t get_status_direction_flag(xhci_trb_t* trb,
|
|---|
| 57 | uint8_t bmRequestType, uint16_t wLength)
|
|---|
| 58 | {
|
|---|
| 59 | /* See Table 7 of xHCI specification */
|
|---|
| 60 | return REQUEST_TYPE_IS_DEVICE_TO_HOST(bmRequestType) && (wLength > 0)
|
|---|
| 61 | ? STAGE_OUT
|
|---|
| 62 | : STAGE_IN;
|
|---|
| 63 | }
|
|---|
| 64 |
|
|---|
| 65 | typedef enum {
|
|---|
| 66 | DATA_STAGE_NO = 0,
|
|---|
| 67 | DATA_STAGE_OUT = 2,
|
|---|
| 68 | DATA_STAGE_IN = 3,
|
|---|
| 69 | } data_stage_type_t;
|
|---|
| 70 |
|
|---|
| 71 | /** Get transfer type flag.
|
|---|
| 72 | * See Table 8 of xHCI specification.
|
|---|
| 73 | */
|
|---|
| 74 | static inline data_stage_type_t get_transfer_type(xhci_trb_t* trb, uint8_t
|
|---|
| 75 | bmRequestType, uint16_t wLength)
|
|---|
| 76 | {
|
|---|
| 77 | if (wLength == 0)
|
|---|
| 78 | return DATA_STAGE_NO;
|
|---|
| 79 |
|
|---|
| 80 | /* See Table 7 of xHCI specification */
|
|---|
| 81 | return REQUEST_TYPE_IS_DEVICE_TO_HOST(bmRequestType)
|
|---|
| 82 | ? DATA_STAGE_IN
|
|---|
| 83 | : DATA_STAGE_NO;
|
|---|
| 84 | }
|
|---|
| 85 |
|
|---|
| 86 | static inline bool configure_endpoint_needed(usb_device_request_setup_packet_t *setup)
|
|---|
| 87 | {
|
|---|
| 88 | usb_request_type_t request_type = SETUP_REQUEST_TYPE_GET_TYPE(setup->request_type);
|
|---|
| 89 |
|
|---|
| 90 | return request_type == USB_REQUEST_TYPE_STANDARD &&
|
|---|
| 91 | (setup->request == USB_DEVREQ_SET_CONFIGURATION
|
|---|
| 92 | || setup->request == USB_DEVREQ_SET_INTERFACE);
|
|---|
| 93 | }
|
|---|
| 94 |
|
|---|
| 95 | /**
|
|---|
| 96 | * There can currently be only one active transfer, because
|
|---|
| 97 | * usb_transfer_batch_init locks the endpoint by endpoint_use.
|
|---|
| 98 | * Therefore, we store the only active transfer per endpoint there.
|
|---|
| 99 | */
|
|---|
| 100 | xhci_transfer_t* xhci_transfer_create(endpoint_t* ep)
|
|---|
| 101 | {
|
|---|
| 102 | xhci_transfer_t *transfer = calloc(1, sizeof(xhci_transfer_t));
|
|---|
| 103 | if (!transfer)
|
|---|
| 104 | return NULL;
|
|---|
| 105 |
|
|---|
| 106 | usb_transfer_batch_init(&transfer->batch, ep);
|
|---|
| 107 | return transfer;
|
|---|
| 108 | }
|
|---|
| 109 |
|
|---|
| 110 | void xhci_transfer_destroy(xhci_transfer_t* transfer)
|
|---|
| 111 | {
|
|---|
| 112 | assert(transfer);
|
|---|
| 113 |
|
|---|
| 114 | dma_buffer_free(&transfer->hc_buffer);
|
|---|
| 115 | free(transfer);
|
|---|
| 116 | }
|
|---|
| 117 |
|
|---|
| 118 | static xhci_trb_ring_t *get_ring(xhci_hc_t *hc, xhci_transfer_t *transfer)
|
|---|
| 119 | {
|
|---|
| 120 | return &xhci_endpoint_get(transfer->batch.ep)->ring;
|
|---|
| 121 | }
|
|---|
| 122 |
|
|---|
| 123 | static int schedule_control(xhci_hc_t* hc, xhci_transfer_t* transfer)
|
|---|
| 124 | {
|
|---|
| 125 | usb_transfer_batch_t *batch = &transfer->batch;
|
|---|
| 126 | xhci_trb_ring_t *ring = get_ring(hc, transfer);
|
|---|
| 127 | xhci_endpoint_t *xhci_ep = xhci_endpoint_get(transfer->batch.ep);
|
|---|
| 128 |
|
|---|
| 129 | usb_device_request_setup_packet_t* setup = &batch->setup.packet;
|
|---|
| 130 |
|
|---|
| 131 | xhci_trb_t trbs[3];
|
|---|
| 132 | int trbs_used = 0;
|
|---|
| 133 |
|
|---|
| 134 | xhci_trb_t *trb_setup = trbs + trbs_used++;
|
|---|
| 135 | xhci_trb_clean(trb_setup);
|
|---|
| 136 |
|
|---|
| 137 | TRB_CTRL_SET_SETUP_WVALUE(*trb_setup, setup->value);
|
|---|
| 138 | TRB_CTRL_SET_SETUP_WLENGTH(*trb_setup, setup->length);
|
|---|
| 139 | TRB_CTRL_SET_SETUP_WINDEX(*trb_setup, setup->index);
|
|---|
| 140 | TRB_CTRL_SET_SETUP_BREQ(*trb_setup, setup->request);
|
|---|
| 141 | TRB_CTRL_SET_SETUP_BMREQTYPE(*trb_setup, setup->request_type);
|
|---|
| 142 |
|
|---|
| 143 | /* Size of the setup packet is always 8 */
|
|---|
| 144 | TRB_CTRL_SET_XFER_LEN(*trb_setup, 8);
|
|---|
| 145 |
|
|---|
| 146 | /* Immediate data */
|
|---|
| 147 | TRB_CTRL_SET_IDT(*trb_setup, 1);
|
|---|
| 148 | TRB_CTRL_SET_TRB_TYPE(*trb_setup, XHCI_TRB_TYPE_SETUP_STAGE);
|
|---|
| 149 | TRB_CTRL_SET_TRT(*trb_setup, get_transfer_type(trb_setup, setup->request_type, setup->length));
|
|---|
| 150 |
|
|---|
| 151 | /* Data stage */
|
|---|
| 152 | xhci_trb_t *trb_data = NULL;
|
|---|
| 153 | if (setup->length > 0) {
|
|---|
| 154 | trb_data = trbs + trbs_used++;
|
|---|
| 155 | xhci_trb_clean(trb_data);
|
|---|
| 156 |
|
|---|
| 157 | trb_data->parameter = host2xhci(64, transfer->hc_buffer.phys);
|
|---|
| 158 |
|
|---|
| 159 | // data size (sent for OUT, or buffer size)
|
|---|
| 160 | TRB_CTRL_SET_XFER_LEN(*trb_data, batch->buffer_size);
|
|---|
| 161 | // FIXME: TD size 4.11.2.4
|
|---|
| 162 | TRB_CTRL_SET_TD_SIZE(*trb_data, 1);
|
|---|
| 163 |
|
|---|
| 164 | // Some more fields here, no idea what they mean
|
|---|
| 165 | TRB_CTRL_SET_TRB_TYPE(*trb_data, XHCI_TRB_TYPE_DATA_STAGE);
|
|---|
| 166 |
|
|---|
| 167 | int stage_dir = REQUEST_TYPE_IS_DEVICE_TO_HOST(setup->request_type)
|
|---|
| 168 | ? STAGE_IN : STAGE_OUT;
|
|---|
| 169 | TRB_CTRL_SET_DIR(*trb_data, stage_dir);
|
|---|
| 170 | }
|
|---|
| 171 |
|
|---|
| 172 | /* Status stage */
|
|---|
| 173 | xhci_trb_t *trb_status = trbs + trbs_used++;
|
|---|
| 174 | xhci_trb_clean(trb_status);
|
|---|
| 175 |
|
|---|
| 176 | // FIXME: Evaluate next TRB? 4.12.3
|
|---|
| 177 | // TRB_CTRL_SET_ENT(*trb_status, 1);
|
|---|
| 178 |
|
|---|
| 179 | TRB_CTRL_SET_IOC(*trb_status, 1);
|
|---|
| 180 | TRB_CTRL_SET_TRB_TYPE(*trb_status, XHCI_TRB_TYPE_STATUS_STAGE);
|
|---|
| 181 | TRB_CTRL_SET_DIR(*trb_status, get_status_direction_flag(trb_setup, setup->request_type, setup->length));
|
|---|
| 182 |
|
|---|
| 183 | // Issue a Configure Endpoint command, if needed.
|
|---|
| 184 | if (configure_endpoint_needed(setup)) {
|
|---|
| 185 | const int err = hc_configure_device(hc, xhci_ep_to_dev(xhci_ep)->slot_id);
|
|---|
| 186 | if (err)
|
|---|
| 187 | return err;
|
|---|
| 188 | }
|
|---|
| 189 |
|
|---|
| 190 | return xhci_trb_ring_enqueue_multiple(ring, trbs, trbs_used, &transfer->interrupt_trb_phys);
|
|---|
| 191 | }
|
|---|
| 192 |
|
|---|
| 193 | static int schedule_bulk(xhci_hc_t* hc, xhci_transfer_t *transfer)
|
|---|
| 194 | {
|
|---|
| 195 | xhci_trb_t trb;
|
|---|
| 196 | xhci_trb_clean(&trb);
|
|---|
| 197 | trb.parameter = host2xhci(64, transfer->hc_buffer.phys);
|
|---|
| 198 |
|
|---|
| 199 | // data size (sent for OUT, or buffer size)
|
|---|
| 200 | TRB_CTRL_SET_XFER_LEN(trb, transfer->batch.buffer_size);
|
|---|
| 201 | // FIXME: TD size 4.11.2.4
|
|---|
| 202 | TRB_CTRL_SET_TD_SIZE(trb, 1);
|
|---|
| 203 |
|
|---|
| 204 | // we want an interrupt after this td is done
|
|---|
| 205 | TRB_CTRL_SET_IOC(trb, 1);
|
|---|
| 206 |
|
|---|
| 207 | TRB_CTRL_SET_TRB_TYPE(trb, XHCI_TRB_TYPE_NORMAL);
|
|---|
| 208 |
|
|---|
| 209 | xhci_trb_ring_t* ring = get_ring(hc, transfer);
|
|---|
| 210 |
|
|---|
| 211 | return xhci_trb_ring_enqueue(ring, &trb, &transfer->interrupt_trb_phys);
|
|---|
| 212 | }
|
|---|
| 213 |
|
|---|
| 214 | static int schedule_interrupt(xhci_hc_t* hc, xhci_transfer_t* transfer)
|
|---|
| 215 | {
|
|---|
| 216 | xhci_trb_t trb;
|
|---|
| 217 | xhci_trb_clean(&trb);
|
|---|
| 218 | trb.parameter = host2xhci(64, transfer->hc_buffer.phys);
|
|---|
| 219 |
|
|---|
| 220 | // data size (sent for OUT, or buffer size)
|
|---|
| 221 | TRB_CTRL_SET_XFER_LEN(trb, transfer->batch.buffer_size);
|
|---|
| 222 | // FIXME: TD size 4.11.2.4
|
|---|
| 223 | TRB_CTRL_SET_TD_SIZE(trb, 1);
|
|---|
| 224 |
|
|---|
| 225 | // we want an interrupt after this td is done
|
|---|
| 226 | TRB_CTRL_SET_IOC(trb, 1);
|
|---|
| 227 |
|
|---|
| 228 | TRB_CTRL_SET_TRB_TYPE(trb, XHCI_TRB_TYPE_NORMAL);
|
|---|
| 229 |
|
|---|
| 230 | xhci_trb_ring_t* ring = get_ring(hc, transfer);
|
|---|
| 231 |
|
|---|
| 232 | return xhci_trb_ring_enqueue(ring, &trb, &transfer->interrupt_trb_phys);
|
|---|
| 233 | }
|
|---|
| 234 |
|
|---|
| 235 | static xhci_isoch_transfer_t* isoch_transfer_get_enqueue(xhci_endpoint_t *ep) {
|
|---|
| 236 | if (((ep->isoch_enqueue + 1) % XHCI_ISOCH_BUFFER_COUNT) == ep->isoch_dequeue) {
|
|---|
| 237 | /* None ready */
|
|---|
| 238 | return NULL;
|
|---|
| 239 | }
|
|---|
| 240 | xhci_isoch_transfer_t *isoch_transfer = &ep->isoch_transfers[ep->isoch_enqueue];
|
|---|
| 241 | ep->isoch_enqueue = (ep->isoch_enqueue + 1) % XHCI_ISOCH_BUFFER_COUNT;
|
|---|
| 242 | return isoch_transfer;
|
|---|
| 243 | }
|
|---|
| 244 |
|
|---|
| 245 | static xhci_isoch_transfer_t* isoch_transfer_get_dequeue(xhci_endpoint_t *ep) {
|
|---|
| 246 | xhci_isoch_transfer_t *isoch_transfer = &ep->isoch_transfers[ep->isoch_dequeue];
|
|---|
| 247 | ep->isoch_dequeue = (ep->isoch_dequeue + 1) % XHCI_ISOCH_BUFFER_COUNT;
|
|---|
| 248 | return isoch_transfer;
|
|---|
| 249 | }
|
|---|
| 250 |
|
|---|
| 251 | static int schedule_isochronous_trb(xhci_trb_ring_t *ring, xhci_endpoint_t *ep, xhci_trb_t *trb,
|
|---|
| 252 | const size_t len, uintptr_t *trb_phys)
|
|---|
| 253 | {
|
|---|
| 254 | TRB_CTRL_SET_XFER_LEN(*trb, len);
|
|---|
| 255 | // FIXME: TD size 4.11.2.4 (there is no next TRB, so 0?)
|
|---|
| 256 | TRB_CTRL_SET_TD_SIZE(*trb, 0);
|
|---|
| 257 | TRB_CTRL_SET_IOC(*trb, 1);
|
|---|
| 258 | TRB_CTRL_SET_TRB_TYPE(*trb, XHCI_TRB_TYPE_ISOCH);
|
|---|
| 259 |
|
|---|
| 260 | // see 4.14.1 and 4.11.2.3 for the explanation, how to calculate those
|
|---|
| 261 | size_t tdpc = len / 1024 + ((len % 1024) ? 1 : 0);
|
|---|
| 262 | size_t tbc = tdpc / (ep->max_burst + 1);
|
|---|
| 263 | if (!tdpc % (ep->max_burst + 1)) --tbc;
|
|---|
| 264 | size_t bsp = tdpc % (ep->max_burst + 1);
|
|---|
| 265 | size_t tlbpc = (bsp ? bsp - 1 : ep->max_burst);
|
|---|
| 266 |
|
|---|
| 267 | TRB_CTRL_SET_TBC(*trb, tbc);
|
|---|
| 268 | TRB_CTRL_SET_TLBPC(*trb, tlbpc);
|
|---|
| 269 |
|
|---|
| 270 | // FIXME: do we want this? 6.4.1.3, p 366 (also possibly frame id?)
|
|---|
| 271 | TRB_CTRL_SET_SIA(*trb, 1);
|
|---|
| 272 |
|
|---|
| 273 | return xhci_trb_ring_enqueue(ring, trb, trb_phys);
|
|---|
| 274 | }
|
|---|
| 275 |
|
|---|
| 276 | static int schedule_isochronous_out(xhci_hc_t* hc, xhci_transfer_t* transfer, xhci_endpoint_t *xhci_ep,
|
|---|
| 277 | xhci_device_t *xhci_dev)
|
|---|
| 278 | {
|
|---|
| 279 | xhci_trb_t trb;
|
|---|
| 280 | xhci_trb_clean(&trb);
|
|---|
| 281 |
|
|---|
| 282 | fibril_mutex_lock(&xhci_ep->isoch_guard);
|
|---|
| 283 | xhci_isoch_transfer_t *isoch_transfer = isoch_transfer_get_enqueue(xhci_ep);
|
|---|
| 284 | while (!isoch_transfer) {
|
|---|
| 285 | fibril_condvar_wait(&xhci_ep->isoch_avail, &xhci_ep->isoch_guard);
|
|---|
| 286 | isoch_transfer = isoch_transfer_get_enqueue(xhci_ep);
|
|---|
| 287 | }
|
|---|
| 288 |
|
|---|
| 289 | isoch_transfer->size = transfer->batch.buffer_size;
|
|---|
| 290 | if (isoch_transfer->size > 0) {
|
|---|
| 291 | memcpy(isoch_transfer->data.virt, transfer->batch.buffer, isoch_transfer->size);
|
|---|
| 292 | }
|
|---|
| 293 |
|
|---|
| 294 | trb.parameter = isoch_transfer->data.phys;
|
|---|
| 295 |
|
|---|
| 296 | xhci_trb_ring_t *ring = get_ring(hc, transfer);
|
|---|
| 297 | int err = schedule_isochronous_trb(ring, xhci_ep, &trb, isoch_transfer->size,
|
|---|
| 298 | &isoch_transfer->interrupt_trb_phys);
|
|---|
| 299 | if (err) {
|
|---|
| 300 | fibril_mutex_unlock(&xhci_ep->isoch_guard);
|
|---|
| 301 | return err;
|
|---|
| 302 | }
|
|---|
| 303 |
|
|---|
| 304 | /* If not yet started, start the isochronous endpoint transfers - after buffer count - 1 writes */
|
|---|
| 305 | /* The -1 is there because of the enqueue != dequeue check. The buffer must have at least 2 transfers. */
|
|---|
| 306 | if (((xhci_ep->isoch_enqueue + 1) % XHCI_ISOCH_BUFFER_COUNT) == xhci_ep->isoch_dequeue && !xhci_ep->isoch_started) {
|
|---|
| 307 | const uint8_t slot_id = xhci_dev->slot_id;
|
|---|
| 308 | const uint8_t target = xhci_endpoint_index(xhci_ep) + 1; /* EP Doorbells start at 1 */
|
|---|
| 309 | err = hc_ring_doorbell(hc, slot_id, target);
|
|---|
| 310 | xhci_ep->isoch_started = true;
|
|---|
| 311 | }
|
|---|
| 312 | fibril_mutex_unlock(&xhci_ep->isoch_guard);
|
|---|
| 313 | if (err) {
|
|---|
| 314 | return err;
|
|---|
| 315 | }
|
|---|
| 316 |
|
|---|
| 317 | /* Isochronous transfers don't handle errors, they skip them all. */
|
|---|
| 318 | transfer->batch.error = EOK;
|
|---|
| 319 | transfer->batch.transfered_size = transfer->batch.buffer_size;
|
|---|
| 320 | usb_transfer_batch_finish(&transfer->batch);
|
|---|
| 321 | return EOK;
|
|---|
| 322 | }
|
|---|
| 323 |
|
|---|
| 324 | static int schedule_isochronous_in_trbs(xhci_endpoint_t *xhci_ep, xhci_trb_ring_t *ring) {
|
|---|
| 325 | xhci_trb_t trb;
|
|---|
| 326 | xhci_isoch_transfer_t *isoch_transfer;
|
|---|
| 327 | while ((isoch_transfer = isoch_transfer_get_enqueue(xhci_ep)) != NULL) {
|
|---|
| 328 | xhci_trb_clean(&trb);
|
|---|
| 329 | trb.parameter = isoch_transfer->data.phys;
|
|---|
| 330 | isoch_transfer->size = xhci_ep->isoch_max_size;
|
|---|
| 331 |
|
|---|
| 332 | int err = schedule_isochronous_trb(ring, xhci_ep, &trb, isoch_transfer->size,
|
|---|
| 333 | &isoch_transfer->interrupt_trb_phys);
|
|---|
| 334 | if (err)
|
|---|
| 335 | return err;
|
|---|
| 336 | }
|
|---|
| 337 | return EOK;
|
|---|
| 338 | }
|
|---|
| 339 |
|
|---|
| 340 | static int schedule_isochronous_in(xhci_hc_t* hc, xhci_transfer_t* transfer, xhci_endpoint_t *xhci_ep,
|
|---|
| 341 | xhci_device_t *xhci_dev)
|
|---|
| 342 | {
|
|---|
| 343 | fibril_mutex_lock(&xhci_ep->isoch_guard);
|
|---|
| 344 | /* If not yet started, start the isochronous endpoint transfers - before first read */
|
|---|
| 345 | if (!xhci_ep->isoch_started) {
|
|---|
| 346 | xhci_trb_ring_t *ring = get_ring(hc, transfer);
|
|---|
| 347 | /* Fill the TRB ring. */
|
|---|
| 348 | int err = schedule_isochronous_in_trbs(xhci_ep, ring);
|
|---|
| 349 | if (err) {
|
|---|
| 350 | fibril_mutex_unlock(&xhci_ep->isoch_guard);
|
|---|
| 351 | return err;
|
|---|
| 352 | }
|
|---|
| 353 | /* Ring the doorbell to start it. */
|
|---|
| 354 | const uint8_t slot_id = xhci_dev->slot_id;
|
|---|
| 355 | const uint8_t target = xhci_endpoint_index(xhci_ep) + 1; /* EP Doorbells start at 1 */
|
|---|
| 356 | err = hc_ring_doorbell(hc, slot_id, target);
|
|---|
| 357 | if (err) {
|
|---|
| 358 | fibril_mutex_unlock(&xhci_ep->isoch_guard);
|
|---|
| 359 | return err;
|
|---|
| 360 | }
|
|---|
| 361 | xhci_ep->isoch_started = true;
|
|---|
| 362 | }
|
|---|
| 363 |
|
|---|
| 364 | xhci_isoch_transfer_t *isoch_transfer = isoch_transfer_get_enqueue(xhci_ep);
|
|---|
| 365 | while(!isoch_transfer) {
|
|---|
| 366 | fibril_condvar_wait(&xhci_ep->isoch_avail, &xhci_ep->isoch_guard);
|
|---|
| 367 | isoch_transfer = isoch_transfer_get_enqueue(xhci_ep);
|
|---|
| 368 | }
|
|---|
| 369 |
|
|---|
| 370 | isoch_transfer->size = transfer->batch.buffer_size;
|
|---|
| 371 | if (transfer->batch.buffer_size <= isoch_transfer->size) {
|
|---|
| 372 | if (transfer->batch.buffer_size > 0) {
|
|---|
| 373 | memcpy(transfer->batch.buffer, isoch_transfer->data.virt, transfer->batch.buffer_size);
|
|---|
| 374 | }
|
|---|
| 375 | if (transfer->batch.buffer_size < isoch_transfer->size) {
|
|---|
| 376 | // FIXME: somehow notify that buffer was too small, probably batch error code
|
|---|
| 377 | }
|
|---|
| 378 | transfer->batch.transfered_size = transfer->batch.buffer_size;
|
|---|
| 379 | }
|
|---|
| 380 | else {
|
|---|
| 381 | memcpy(transfer->batch.buffer, isoch_transfer->data.virt, isoch_transfer->size);
|
|---|
| 382 | transfer->batch.transfered_size = isoch_transfer->size;
|
|---|
| 383 | }
|
|---|
| 384 |
|
|---|
| 385 | // Clear and requeue the transfer with new TRB
|
|---|
| 386 | xhci_trb_t trb;
|
|---|
| 387 | xhci_trb_clean(&trb);
|
|---|
| 388 |
|
|---|
| 389 | trb.parameter = isoch_transfer->data.phys;
|
|---|
| 390 | isoch_transfer->size = xhci_ep->isoch_max_size;
|
|---|
| 391 |
|
|---|
| 392 | xhci_trb_ring_t *ring = get_ring(hc, transfer);
|
|---|
| 393 | int err = schedule_isochronous_trb(ring, xhci_ep, &trb, isoch_transfer->size,
|
|---|
| 394 | &isoch_transfer->interrupt_trb_phys);
|
|---|
| 395 | fibril_mutex_unlock(&xhci_ep->isoch_guard);
|
|---|
| 396 |
|
|---|
| 397 | if (err) {
|
|---|
| 398 | return err;
|
|---|
| 399 | }
|
|---|
| 400 |
|
|---|
| 401 | /* Isochronous transfers don't handle errors, they skip them all. */
|
|---|
| 402 | transfer->batch.error = EOK;
|
|---|
| 403 | usb_transfer_batch_finish(&transfer->batch);
|
|---|
| 404 | return EOK;
|
|---|
| 405 | }
|
|---|
| 406 |
|
|---|
| 407 | static int schedule_isochronous(xhci_hc_t* hc, xhci_transfer_t* transfer, xhci_endpoint_t *xhci_ep,
|
|---|
| 408 | xhci_device_t *xhci_dev)
|
|---|
| 409 | {
|
|---|
| 410 | if (transfer->batch.buffer_size > xhci_ep->isoch_max_size) {
|
|---|
| 411 | usb_log_error("Cannot schedule an oversized isochronous transfer.");
|
|---|
| 412 | return EINVAL;
|
|---|
| 413 | }
|
|---|
| 414 |
|
|---|
| 415 | if (xhci_ep->base.direction == USB_DIRECTION_OUT) {
|
|---|
| 416 | return schedule_isochronous_out(hc, transfer, xhci_ep, xhci_dev);
|
|---|
| 417 | }
|
|---|
| 418 | else {
|
|---|
| 419 | return schedule_isochronous_in(hc, transfer, xhci_ep, xhci_dev);
|
|---|
| 420 | }
|
|---|
| 421 | }
|
|---|
| 422 |
|
|---|
| 423 | static int handle_isochronous_transfer_event(xhci_hc_t *hc, xhci_trb_t *trb, xhci_endpoint_t *ep) {
|
|---|
| 424 | fibril_mutex_lock(&ep->isoch_guard);
|
|---|
| 425 |
|
|---|
| 426 | int err = EOK;
|
|---|
| 427 |
|
|---|
| 428 | const xhci_trb_completion_code_t completion_code = TRB_COMPLETION_CODE(*trb);
|
|---|
| 429 | switch (completion_code) {
|
|---|
| 430 | case XHCI_TRBC_RING_OVERRUN:
|
|---|
| 431 | case XHCI_TRBC_RING_UNDERRUN:
|
|---|
| 432 | /* Rings are unscheduled by xHC now */
|
|---|
| 433 | ep->isoch_started = false;
|
|---|
| 434 | /* For OUT, there was nothing to process */
|
|---|
| 435 | /* For IN, the buffer has overfilled, we empty the buffers and readd TRBs */
|
|---|
| 436 | ep->isoch_enqueue = ep->isoch_dequeue = 0;
|
|---|
| 437 | err = EIO;
|
|---|
| 438 | break;
|
|---|
| 439 | case XHCI_TRBC_SHORT_PACKET:
|
|---|
| 440 | usb_log_debug("Short transfer.");
|
|---|
| 441 | /* fallthrough */
|
|---|
| 442 | case XHCI_TRBC_SUCCESS:
|
|---|
| 443 | break;
|
|---|
| 444 | default:
|
|---|
| 445 | usb_log_warning("Transfer not successfull: %u", completion_code);
|
|---|
| 446 | err = EIO;
|
|---|
| 447 | }
|
|---|
| 448 |
|
|---|
| 449 | xhci_isoch_transfer_t *isoch_transfer = isoch_transfer_get_dequeue(ep);
|
|---|
| 450 | if (isoch_transfer->interrupt_trb_phys != trb->parameter) {
|
|---|
| 451 | usb_log_error("Non-matching trb to isochronous transfer, skipping.");
|
|---|
| 452 | // FIXME: what to do? probably just kill the whole endpoint
|
|---|
| 453 | err = ENOENT;
|
|---|
| 454 | }
|
|---|
| 455 |
|
|---|
| 456 | if (ep->base.direction == USB_DIRECTION_IN) {
|
|---|
| 457 | // We may have received less data, that's fine
|
|---|
| 458 | isoch_transfer->size -= TRB_TRANSFER_LENGTH(*trb);
|
|---|
| 459 | }
|
|---|
| 460 |
|
|---|
| 461 | fibril_condvar_signal(&ep->isoch_avail);
|
|---|
| 462 | fibril_mutex_unlock(&ep->isoch_guard);
|
|---|
| 463 | return err;
|
|---|
| 464 | }
|
|---|
| 465 |
|
|---|
| 466 | int xhci_handle_transfer_event(xhci_hc_t* hc, xhci_trb_t* trb)
|
|---|
| 467 | {
|
|---|
| 468 | uintptr_t addr = trb->parameter;
|
|---|
| 469 | const unsigned slot_id = XHCI_DWORD_EXTRACT(trb->control, 31, 24);
|
|---|
| 470 | const unsigned ep_dci = XHCI_DWORD_EXTRACT(trb->control, 20, 16);
|
|---|
| 471 |
|
|---|
| 472 | xhci_device_t *dev = hc->bus.devices_by_slot[slot_id];
|
|---|
| 473 | if (!dev) {
|
|---|
| 474 | usb_log_error("Transfer event on disabled slot %u", slot_id);
|
|---|
| 475 | return ENOENT;
|
|---|
| 476 | }
|
|---|
| 477 |
|
|---|
| 478 | const usb_endpoint_t ep_num = ep_dci / 2;
|
|---|
| 479 | xhci_endpoint_t *ep = xhci_device_get_endpoint(dev, ep_num);
|
|---|
| 480 | if (!ep) {
|
|---|
| 481 | usb_log_error("Transfer event on dropped endpoint %u of device "
|
|---|
| 482 | XHCI_DEV_FMT, ep_num, XHCI_DEV_ARGS(*dev));
|
|---|
| 483 | return ENOENT;
|
|---|
| 484 | }
|
|---|
| 485 |
|
|---|
| 486 | /* FIXME: This is racy. Do we care? */
|
|---|
| 487 | ep->ring.dequeue = addr;
|
|---|
| 488 |
|
|---|
| 489 | if (ep->base.transfer_type == USB_TRANSFER_ISOCHRONOUS) {
|
|---|
| 490 | return handle_isochronous_transfer_event(hc, trb, ep);
|
|---|
| 491 | }
|
|---|
| 492 |
|
|---|
| 493 | fibril_mutex_lock(&ep->base.guard);
|
|---|
| 494 | usb_transfer_batch_t *batch = ep->base.active_batch;
|
|---|
| 495 | if (!batch) {
|
|---|
| 496 | fibril_mutex_unlock(&ep->base.guard);
|
|---|
| 497 | return ENOENT;
|
|---|
| 498 | }
|
|---|
| 499 |
|
|---|
| 500 | const xhci_trb_completion_code_t completion_code = TRB_COMPLETION_CODE(*trb);
|
|---|
| 501 | switch (completion_code) {
|
|---|
| 502 | case XHCI_TRBC_SHORT_PACKET:
|
|---|
| 503 | usb_log_debug("Short transfer.");
|
|---|
| 504 | /* fallthrough */
|
|---|
| 505 | case XHCI_TRBC_SUCCESS:
|
|---|
| 506 | batch->error = EOK;
|
|---|
| 507 | batch->transfered_size = batch->buffer_size - TRB_TRANSFER_LENGTH(*trb);
|
|---|
| 508 | break;
|
|---|
| 509 |
|
|---|
| 510 | default:
|
|---|
| 511 | usb_log_warning("Transfer not successfull: %u", completion_code);
|
|---|
| 512 | batch->error = EIO;
|
|---|
| 513 | }
|
|---|
| 514 |
|
|---|
| 515 | usb_transfer_batch_reset_toggle(batch);
|
|---|
| 516 | endpoint_deactivate_locked(&ep->base);
|
|---|
| 517 | fibril_mutex_unlock(&ep->base.guard);
|
|---|
| 518 |
|
|---|
| 519 | xhci_transfer_t *transfer = xhci_transfer_from_batch(batch);
|
|---|
| 520 |
|
|---|
| 521 | if (batch->dir == USB_DIRECTION_IN) {
|
|---|
| 522 | assert(batch->buffer);
|
|---|
| 523 | assert(batch->transfered_size <= batch->buffer_size);
|
|---|
| 524 | memcpy(batch->buffer, transfer->hc_buffer.virt, batch->transfered_size);
|
|---|
| 525 | }
|
|---|
| 526 |
|
|---|
| 527 | usb_transfer_batch_finish(batch);
|
|---|
| 528 | return EOK;
|
|---|
| 529 | }
|
|---|
| 530 |
|
|---|
| 531 | typedef int (*transfer_handler)(xhci_hc_t *, xhci_transfer_t *);
|
|---|
| 532 |
|
|---|
| 533 | static const transfer_handler transfer_handlers[] = {
|
|---|
| 534 | [USB_TRANSFER_CONTROL] = schedule_control,
|
|---|
| 535 | [USB_TRANSFER_ISOCHRONOUS] = NULL,
|
|---|
| 536 | [USB_TRANSFER_BULK] = schedule_bulk,
|
|---|
| 537 | [USB_TRANSFER_INTERRUPT] = schedule_interrupt,
|
|---|
| 538 | };
|
|---|
| 539 |
|
|---|
| 540 | int xhci_transfer_schedule(xhci_hc_t *hc, usb_transfer_batch_t *batch)
|
|---|
| 541 | {
|
|---|
| 542 | assert(hc);
|
|---|
| 543 | endpoint_t *ep = batch->ep;
|
|---|
| 544 |
|
|---|
| 545 | xhci_transfer_t *transfer = xhci_transfer_from_batch(batch);
|
|---|
| 546 | xhci_endpoint_t *xhci_ep = xhci_endpoint_get(ep);
|
|---|
| 547 | xhci_device_t *xhci_dev = xhci_ep_to_dev(xhci_ep);
|
|---|
| 548 |
|
|---|
| 549 | // FIXME: find a better way to check if the ring is not initialized
|
|---|
| 550 | if (!xhci_ep->ring.segment_count) {
|
|---|
| 551 | usb_log_error("Ring not initialized for endpoint " XHCI_EP_FMT,
|
|---|
| 552 | XHCI_EP_ARGS(*xhci_ep));
|
|---|
| 553 | return EINVAL;
|
|---|
| 554 | }
|
|---|
| 555 |
|
|---|
| 556 | // Isochronous transfer needs to be handled differently
|
|---|
| 557 | if (batch->ep->transfer_type == USB_TRANSFER_ISOCHRONOUS) {
|
|---|
| 558 | return schedule_isochronous(hc, transfer, xhci_ep, xhci_dev);
|
|---|
| 559 | }
|
|---|
| 560 |
|
|---|
| 561 | const usb_transfer_type_t type = batch->ep->transfer_type;
|
|---|
| 562 | assert(type >= 0 && type < ARRAY_SIZE(transfer_handlers));
|
|---|
| 563 | assert(transfer_handlers[type]);
|
|---|
| 564 |
|
|---|
| 565 | if (batch->buffer_size > 0) {
|
|---|
| 566 | if (dma_buffer_alloc(&transfer->hc_buffer, batch->buffer_size))
|
|---|
| 567 | return ENOMEM;
|
|---|
| 568 | }
|
|---|
| 569 |
|
|---|
| 570 | if (batch->dir != USB_DIRECTION_IN) {
|
|---|
| 571 | // Sending stuff from host to device, we need to copy the actual data.
|
|---|
| 572 | memcpy(transfer->hc_buffer.virt, batch->buffer, batch->buffer_size);
|
|---|
| 573 | }
|
|---|
| 574 |
|
|---|
| 575 | fibril_mutex_lock(&ep->guard);
|
|---|
| 576 | endpoint_activate_locked(ep, batch);
|
|---|
| 577 | const int err = transfer_handlers[batch->ep->transfer_type](hc, transfer);
|
|---|
| 578 |
|
|---|
| 579 | if (err) {
|
|---|
| 580 | endpoint_deactivate_locked(ep);
|
|---|
| 581 | fibril_mutex_unlock(&ep->guard);
|
|---|
| 582 | return err;
|
|---|
| 583 | }
|
|---|
| 584 |
|
|---|
| 585 | /* After the critical section, the transfer can already be finished or aborted. */
|
|---|
| 586 | transfer = NULL; batch = NULL;
|
|---|
| 587 | fibril_mutex_unlock(&ep->guard);
|
|---|
| 588 |
|
|---|
| 589 | const uint8_t slot_id = xhci_dev->slot_id;
|
|---|
| 590 | const uint8_t target = xhci_endpoint_index(xhci_ep) + 1; /* EP Doorbells start at 1 */
|
|---|
| 591 | return hc_ring_doorbell(hc, slot_id, target);
|
|---|
| 592 | }
|
|---|