source: mainline/uspace/drv/bus/usb/xhci/hc.c@ f08da1c

Last change on this file since f08da1c was 09ab0a9a, checked in by Jiri Svoboda <jiri@…>, 7 years ago

Fix vertical spacing with new Ccheck revision.

  • Property mode set to 100644
File size: 30.9 KB
RevLine 
[5cbccd4]1/*
[e0a5d4c]2 * Copyright (c) 2018 Ondrej Hlavaty, Petr Manek, Jaroslav Jindrak, Jan Hrach, Michal Staruch
[5cbccd4]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller data bookkeeping.
34 */
35
36#include <errno.h>
[cb89430]37#include <str_error.h>
[5cbccd4]38#include <usb/debug.h>
[5fd9c30]39#include <usb/host/endpoint.h>
[5cbccd4]40#include "debug.h"
41#include "hc.h"
[7bd99bf]42#include "rh.h"
[cb89430]43#include "hw_struct/trb.h"
[0206d35]44#include "hw_struct/context.h"
45#include "endpoint.h"
[e9e24f2]46#include "transfers.h"
47#include "trb_ring.h"
[5cbccd4]48
[91ca111]49/**
50 * Default USB Speed ID mapping: Table 157
51 */
52#define PSI_TO_BPS(psie, psim) (((uint64_t) psim) << (10 * psie))
[f668d60]53#define PORT_SPEED(usb, mjr, psie, psim) { \
[816335c]54 .name = "USB ", \
55 .major = mjr, \
56 .minor = 0, \
[f668d60]57 .usb_speed = USB_SPEED_##usb, \
[91ca111]58 .rx_bps = PSI_TO_BPS(psie, psim), \
59 .tx_bps = PSI_TO_BPS(psie, psim) \
60}
[a75f9cbc]61
62static const xhci_port_speed_t default_psiv_to_port_speed [] = {
63 [1] = PORT_SPEED(FULL, 2, 2, 12),
64 [2] = PORT_SPEED(LOW, 2, 1, 1500),
65 [3] = PORT_SPEED(HIGH, 2, 2, 480),
66 [4] = PORT_SPEED(SUPER, 3, 3, 5),
67};
68
69static const unsigned usb_speed_to_psiv [] = {
70 [USB_SPEED_FULL] = 1,
71 [USB_SPEED_LOW] = 2,
72 [USB_SPEED_HIGH] = 3,
73 [USB_SPEED_SUPER] = 4,
74};
[91ca111]75
76/**
77 * Walk the list of extended capabilities.
[eb928c4]78 *
79 * The most interesting thing hidden in extended capabilities is the mapping of
80 * ports to protocol versions and speeds.
[91ca111]81 */
[45457265]82static errno_t hc_parse_ec(xhci_hc_t *hc)
[91ca111]83{
[816335c]84 unsigned psic, major, minor;
85 xhci_sp_name_t name;
86
[f668d60]87 xhci_port_speed_t *speeds = hc->speeds;
[91ca111]88
[8ebe212]89 for (xhci_extcap_t *ec = hc->xecp; ec; ec = xhci_extcap_next(ec)) {
[91ca111]90 xhci_dump_extcap(ec);
91 switch (XHCI_REG_RD(ec, XHCI_EC_CAP_ID)) {
92 case XHCI_EC_USB_LEGACY:
93 assert(hc->legsup == NULL);
94 hc->legsup = (xhci_legsup_t *) ec;
95 break;
96 case XHCI_EC_SUPPORTED_PROTOCOL:
97 psic = XHCI_REG_RD(ec, XHCI_EC_SP_PSIC);
98 major = XHCI_REG_RD(ec, XHCI_EC_SP_MAJOR);
[816335c]99 minor = XHCI_REG_RD(ec, XHCI_EC_SP_MINOR);
100 name.packed = host2uint32_t_le(XHCI_REG_RD(ec, XHCI_EC_SP_NAME));
101
102 if (name.packed != xhci_name_usb.packed) {
103 /**
104 * The detection of such protocol would work,
105 * but the rest of the implementation is made
106 * for the USB protocol only.
107 */
108 usb_log_error("Unknown protocol %.4s.", name.str);
109 return ENOTSUP;
110 }
[91ca111]111
[a9fcd73]112 unsigned offset = XHCI_REG_RD(ec, XHCI_EC_SP_CP_OFF);
113 unsigned count = XHCI_REG_RD(ec, XHCI_EC_SP_CP_COUNT);
114 xhci_rh_set_ports_protocol(&hc->rh, offset, count, major);
115
[91ca111]116 // "Implied" speed
117 if (psic == 0) {
[816335c]118 assert(minor == 0);
[370a1c8]119
[91ca111]120 if (major == 2) {
[a75f9cbc]121 speeds[1] = default_psiv_to_port_speed[1];
122 speeds[2] = default_psiv_to_port_speed[2];
123 speeds[3] = default_psiv_to_port_speed[3];
[91ca111]124 } else if (major == 3) {
[a75f9cbc]125 speeds[4] = default_psiv_to_port_speed[4];
[91ca111]126 } else {
127 return EINVAL;
128 }
129
[defaab2]130 usb_log_debug("Implied speed of USB %u.0 set up.", major);
[91ca111]131 } else {
132 for (unsigned i = 0; i < psic; i++) {
133 xhci_psi_t *psi = xhci_extcap_psi(ec, i);
134 unsigned sim = XHCI_REG_RD(psi, XHCI_PSI_PSIM);
135 unsigned psiv = XHCI_REG_RD(psi, XHCI_PSI_PSIV);
136 unsigned psie = XHCI_REG_RD(psi, XHCI_PSI_PSIE);
137 unsigned psim = XHCI_REG_RD(psi, XHCI_PSI_PSIM);
[a75f9cbc]138 uint64_t bps = PSI_TO_BPS(psie, psim);
139
140 /*
[8033f89]141 * Speed is not implied, but using one of default PSIV. This
142 * is not clearly stated in xHCI spec. There is a clear
143 * intention to allow xHCI to specify its own speed
144 * parameters, but throughout the document, they used fixed
145 * values for e.g. High-speed (3), without stating the
146 * controller shall have implied default speeds - and for
147 * instance Intel controllers do not. So let's check if the
148 * values match and if so, accept the implied USB speed too.
[a75f9cbc]149 *
150 * The main reason we need this is the usb_speed to have
151 * mapping also for devices connected to hubs.
152 */
[3bacee1]153 if (psiv < ARRAY_SIZE(default_psiv_to_port_speed) &&
154 default_psiv_to_port_speed[psiv].major == major &&
155 default_psiv_to_port_speed[psiv].minor == minor &&
156 default_psiv_to_port_speed[psiv].rx_bps == bps &&
157 default_psiv_to_port_speed[psiv].tx_bps == bps) {
[a75f9cbc]158 speeds[psiv] = default_psiv_to_port_speed[psiv];
[8033f89]159 usb_log_debug("Assumed default %s speed of USB %u.",
[3bacee1]160 usb_str_speed(speeds[psiv].usb_speed), major);
[a75f9cbc]161 continue;
162 }
[91ca111]163
[a75f9cbc]164 // Custom speed
[816335c]165 speeds[psiv].major = major;
166 speeds[psiv].minor = minor;
167 str_ncpy(speeds[psiv].name, 4, name.str, 4);
[f668d60]168 speeds[psiv].usb_speed = USB_SPEED_MAX;
[816335c]169
[91ca111]170 if (sim == XHCI_PSI_PLT_SYMM || sim == XHCI_PSI_PLT_RX)
[816335c]171 speeds[psiv].rx_bps = bps;
[91ca111]172 if (sim == XHCI_PSI_PLT_SYMM || sim == XHCI_PSI_PLT_TX) {
[816335c]173 speeds[psiv].tx_bps = bps;
[8033f89]174 usb_log_debug("Speed %u set up for bps %" PRIu64
[3bacee1]175 " / %" PRIu64 ".", psiv, speeds[psiv].rx_bps,
176 speeds[psiv].tx_bps);
[91ca111]177 }
178 }
179 }
180 }
181 }
182 return EOK;
183}
184
[eb928c4]185/**
186 * Initialize MMIO spaces of xHC.
187 */
[45457265]188errno_t hc_init_mmio(xhci_hc_t *hc, const hw_res_list_parsed_t *hw_res)
[e4d7363]189{
[45457265]190 errno_t err;
[e4d7363]191
192 if (hw_res->mem_ranges.count != 1) {
193 usb_log_error("Unexpected MMIO area, bailing out.");
194 return EINVAL;
195 }
196
197 hc->mmio_range = hw_res->mem_ranges.ranges[0];
198
[a1732929]199 usb_log_debug("MMIO area at %p (size %zu), IRQ %d.",
[e4d7363]200 RNGABSPTR(hc->mmio_range), RNGSZ(hc->mmio_range), hw_res->irqs.irqs[0]);
201
202 if (RNGSZ(hc->mmio_range) < sizeof(xhci_cap_regs_t))
203 return EOVERFLOW;
204
205 void *base;
206 if ((err = pio_enable_range(&hc->mmio_range, &base)))
207 return err;
208
[20eaa82]209 hc->reg_base = base;
[e4d7363]210 hc->cap_regs = (xhci_cap_regs_t *) base;
211 hc->op_regs = (xhci_op_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH));
212 hc->rt_regs = (xhci_rt_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF));
213 hc->db_arry = (xhci_doorbell_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_DBOFF));
214
[91ca111]215 uintptr_t xec_offset = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_XECP) * sizeof(xhci_dword_t);
216 if (xec_offset > 0)
217 hc->xecp = (xhci_extcap_t *) (base + xec_offset);
218
[defaab2]219 usb_log_debug("Initialized MMIO reg areas:");
220 usb_log_debug("\tCapability regs: %p", hc->cap_regs);
221 usb_log_debug("\tOperational regs: %p", hc->op_regs);
222 usb_log_debug("\tRuntime regs: %p", hc->rt_regs);
223 usb_log_debug("\tDoorbell array base: %p", hc->db_arry);
[e4d7363]224
225 xhci_dump_cap_regs(hc->cap_regs);
226
227 hc->ac64 = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_AC64);
[7ec7b7e]228 hc->csz = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_CSZ);
[e4d7363]229 hc->max_slots = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_MAX_SLOTS);
[94e9c29]230
[bd41ac52]231 struct timespec ts;
232 getuptime(&ts);
233 hc->wrap_time = SEC2USEC(ts.tv_sec) + NSEC2USEC(ts.tv_nsec);
[665368c]234 hc->wrap_count = 0;
[94e9c29]235
[708d8fcd]236 unsigned ist = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_IST);
237 hc->ist = (ist & 0x10 >> 1) * (ist & 0xf);
[e4d7363]238
[a9fcd73]239 if ((err = xhci_rh_init(&hc->rh, hc)))
240 goto err_pio;
241
242 if ((err = hc_parse_ec(hc)))
243 goto err_rh;
[91ca111]244
[e4d7363]245 return EOK;
[a9fcd73]246
247err_rh:
248 xhci_rh_fini(&hc->rh);
249err_pio:
250 pio_disable(hc->reg_base, RNGSZ(hc->mmio_range));
251 return err;
[e4d7363]252}
253
[2c0564c]254static int event_worker(void *arg);
255
[eb928c4]256/**
257 * Initialize structures kept in allocated memory.
258 */
[45457265]259errno_t hc_init_memory(xhci_hc_t *hc, ddf_dev_t *device)
[e4d7363]260{
[45457265]261 errno_t err = ENOMEM;
[e4d7363]262
[b80c1ab]263 if (dma_buffer_alloc(&hc->dcbaa_dma, (1 + hc->max_slots) * sizeof(uint64_t)))
[e4d7363]264 return ENOMEM;
[b80c1ab]265 hc->dcbaa = hc->dcbaa_dma.virt;
[e4d7363]266
[19f0048]267 hc->event_worker = joinable_fibril_create(&event_worker, hc);
268 if (!hc->event_worker)
[889146e]269 goto err_dcbaa;
[e4d7363]270
[19f0048]271 if ((err = xhci_event_ring_init(&hc->event_ring, 1)))
272 goto err_worker;
273
[b19131c5]274 if ((err = xhci_scratchpad_alloc(hc)))
[5a9ae994]275 goto err_event_ring;
[e4d7363]276
[aee352c]277 if ((err = xhci_init_commands(hc)))
[ee28ae66]278 goto err_scratch;
[aee352c]279
[2b61945]280 if ((err = xhci_bus_init(&hc->bus, hc)))
[6832245]281 goto err_cmd;
[e6b9182]282
[2c0564c]283 xhci_sw_ring_init(&hc->sw_ring, PAGE_SIZE / sizeof(xhci_trb_t));
284
[e4d7363]285 return EOK;
286
[ee28ae66]287err_cmd:
[d271f78]288 xhci_fini_commands(hc);
[ee28ae66]289err_scratch:
290 xhci_scratchpad_free(hc);
[5a9ae994]291err_event_ring:
[e4d7363]292 xhci_event_ring_fini(&hc->event_ring);
[19f0048]293err_worker:
294 joinable_fibril_destroy(hc->event_worker);
[e4d7363]295err_dcbaa:
[b80c1ab]296 hc->dcbaa = NULL;
297 dma_buffer_free(&hc->dcbaa_dma);
[e4d7363]298 return err;
299}
300
[ab5a0830]301/*
302 * Pseudocode:
303 * ip = read(intr[0].iman)
304 * if (ip) {
305 * status = read(usbsts)
306 * assert status
307 * assert ip
308 * accept (passing status)
309 * }
310 * decline
311 */
312static const irq_cmd_t irq_commands[] = {
313 {
314 .cmd = CMD_PIO_READ_32,
315 .dstarg = 3,
316 .addr = NULL /* intr[0].iman */
317 },
318 {
319 .cmd = CMD_AND,
320 .srcarg = 3,
321 .dstarg = 4,
322 .value = 0 /* host2xhci(32, 1) */
323 },
324 {
325 .cmd = CMD_PREDICATE,
326 .srcarg = 4,
327 .value = 5
328 },
329 {
330 .cmd = CMD_PIO_READ_32,
331 .dstarg = 1,
332 .addr = NULL /* usbsts */
333 },
334 {
335 .cmd = CMD_AND,
336 .srcarg = 1,
337 .dstarg = 2,
338 .value = 0 /* host2xhci(32, XHCI_STATUS_ACK_MASK) */
339 },
340 {
341 .cmd = CMD_PIO_WRITE_A_32,
342 .srcarg = 2,
343 .addr = NULL /* usbsts */
344 },
345 {
346 .cmd = CMD_PIO_WRITE_A_32,
[efe9463]347 .srcarg = 3,
[ab5a0830]348 .addr = NULL /* intr[0].iman */
349 },
350 {
351 .cmd = CMD_ACCEPT
352 },
353 {
354 .cmd = CMD_DECLINE
355 }
356};
357
[cb89430]358/**
359 * Generates code to accept interrupts. The xHCI is designed primarily for
360 * MSI/MSI-X, but we use PCI Interrupt Pin. In this mode, all the Interrupters
361 * (except 0) are disabled.
362 */
[45457265]363errno_t hc_irq_code_gen(irq_code_t *code, xhci_hc_t *hc, const hw_res_list_parsed_t *hw_res, int *irq)
[cb89430]364{
365 assert(code);
366 assert(hw_res);
367
[e4d7363]368 if (hw_res->irqs.count != 1) {
[cb89430]369 usb_log_info("Unexpected HW resources to enable interrupts.");
370 return EINVAL;
371 }
372
373 code->ranges = malloc(sizeof(irq_pio_range_t));
374 if (code->ranges == NULL)
375 return ENOMEM;
376
377 code->cmds = malloc(sizeof(irq_commands));
378 if (code->cmds == NULL) {
379 free(code->ranges);
380 return ENOMEM;
381 }
382
383 code->rangecount = 1;
384 code->ranges[0] = (irq_pio_range_t) {
[3bacee1]385 .base = RNGABS(hc->mmio_range),
386 .size = RNGSZ(hc->mmio_range),
[cb89430]387 };
388
389 code->cmdcount = ARRAY_SIZE(irq_commands);
390 memcpy(code->cmds, irq_commands, sizeof(irq_commands));
391
[3bacee1]392 void *intr0_iman = RNGABSPTR(hc->mmio_range) +
393 XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF) +
394 offsetof(xhci_rt_regs_t, ir[0]);
395 void *usbsts = RNGABSPTR(hc->mmio_range) +
396 XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH) +
397 offsetof(xhci_op_regs_t, usbsts);
[8033f89]398
[cb89430]399 code->cmds[0].addr = intr0_iman;
400 code->cmds[1].value = host2xhci(32, 1);
[ab5a0830]401 code->cmds[3].addr = usbsts;
402 code->cmds[4].value = host2xhci(32, XHCI_STATUS_ACK_MASK);
403 code->cmds[5].addr = usbsts;
404 code->cmds[6].addr = intr0_iman;
[cb89430]405
[eb862fd]406 *irq = hw_res->irqs.irqs[0];
407 return EOK;
[cb89430]408}
409
[eb928c4]410/**
411 * Claim xHC from BIOS. Implements handoff as per Section 4.22.1 of xHCI spec.
412 */
[45457265]413errno_t hc_claim(xhci_hc_t *hc, ddf_dev_t *dev)
[cb89430]414{
[91ca111]415 /* No legacy support capability, the controller is solely for us */
416 if (!hc->legsup)
417 return EOK;
418
[0e7380f]419 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_CNR), 0))
420 return ETIMEOUT;
421
[defaab2]422 usb_log_debug("LEGSUP: bios: %x, os: %x", hc->legsup->sem_bios, hc->legsup->sem_os);
[0e7380f]423 XHCI_REG_SET(hc->legsup, XHCI_LEGSUP_SEM_OS, 1);
[4d28d86]424 for (int i = 0; i <= (XHCI_LEGSUP_BIOS_TIMEOUT_US / XHCI_LEGSUP_POLLING_DELAY_1MS); i++) {
[defaab2]425 usb_log_debug("LEGSUP: elapsed: %i ms, bios: %x, os: %x", i,
[3bacee1]426 XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_BIOS),
427 XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_OS));
[e6b0dba]428 if (XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_BIOS) == 0) {
[0e7380f]429 return XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_OS) == 1 ? EOK : EIO;
[e6b0dba]430 }
[5f97ef44]431 fibril_usleep(XHCI_LEGSUP_POLLING_DELAY_1MS);
[e6b0dba]432 }
[a1732929]433 usb_log_error("BIOS did not release XHCI legacy hold!");
[e6b0dba]434
[91ca111]435 return ENOTSUP;
[cb89430]436}
437
[eb928c4]438/**
[665368c]439 * Ask the xHC to reset its state. Implements sequence
[eb928c4]440 */
[45457265]441static errno_t hc_reset(xhci_hc_t *hc)
[cb89430]442{
[0e7380f]443 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_CNR), 0))
444 return ETIMEOUT;
445
[cb89430]446 /* Stop the HC: set R/S to 0 */
447 XHCI_REG_CLR(hc->op_regs, XHCI_OP_RS, 1);
448
[0e7380f]449 /* Wait until the HC is halted - it shall take at most 16 ms */
[8033f89]450 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_HCH),
451 XHCI_REG_MASK(XHCI_OP_HCH)))
[0e7380f]452 return ETIMEOUT;
[cb89430]453
454 /* Reset */
455 XHCI_REG_SET(hc->op_regs, XHCI_OP_HCRST, 1);
456
457 /* Wait until the reset is complete */
[0e7380f]458 if (xhci_reg_wait(&hc->op_regs->usbcmd, XHCI_REG_MASK(XHCI_OP_HCRST), 0))
459 return ETIMEOUT;
[cb89430]460
461 return EOK;
462}
463
464/**
465 * Initialize the HC: section 4.2
466 */
[45457265]467errno_t hc_start(xhci_hc_t *hc)
[cb89430]468{
[45457265]469 errno_t err;
[cb89430]470
471 if ((err = hc_reset(hc)))
472 return err;
473
[0e7380f]474 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_CNR), 0))
475 return ETIMEOUT;
[cb89430]476
[1d758fc]477 uintptr_t dcbaa_phys = dma_buffer_phys_base(&hc->dcbaa_dma);
478 XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP, dcbaa_phys);
[15f8079]479 XHCI_REG_WR(hc->op_regs, XHCI_OP_MAX_SLOTS_EN, hc->max_slots);
[cb89430]480
[fb28cde]481 uintptr_t crcr;
482 xhci_trb_ring_reset_dequeue_state(&hc->cr.trb_ring, &crcr);
[77ded647]483 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR, crcr);
[cb89430]484
[665368c]485 XHCI_REG_SET(hc->op_regs, XHCI_OP_EWE, 1);
486
[19f0048]487 xhci_event_ring_reset(&hc->event_ring);
488
[cb89430]489 xhci_interrupter_regs_t *intr0 = &hc->rt_regs->ir[0];
490 XHCI_REG_WR(intr0, XHCI_INTR_ERSTSZ, hc->event_ring.segment_count);
[77ded647]491 XHCI_REG_WR(intr0, XHCI_INTR_ERDP, hc->event_ring.dequeue_ptr);
[1d758fc]492
493 const uintptr_t erstba_phys = dma_buffer_phys_base(&hc->event_ring.erst);
494 XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA, erstba_phys);
[cb89430]495
[eadaeae8]496 if (CAP_HANDLE_VALID(hc->base.irq_handle)) {
[cb89430]497 XHCI_REG_SET(intr0, XHCI_INTR_IE, 1);
498 XHCI_REG_SET(hc->op_regs, XHCI_OP_INTE, 1);
499 }
500
[503086d8]501 XHCI_REG_SET(hc->op_regs, XHCI_OP_HSEE, 1);
502
[19f0048]503 xhci_sw_ring_restart(&hc->sw_ring);
504 joinable_fibril_start(hc->event_worker);
505
506 xhci_start_command_ring(hc);
507
[cb89430]508 XHCI_REG_SET(hc->op_regs, XHCI_OP_RS, 1);
509
[19f0048]510 /* RH needs to access port states on startup */
511 xhci_rh_start(&hc->rh);
[dcf0597]512
[cb89430]513 return EOK;
514}
515
[19f0048]516static void hc_stop(xhci_hc_t *hc)
517{
518 /* Stop the HC in hardware. */
519 XHCI_REG_CLR(hc->op_regs, XHCI_OP_RS, 1);
520
521 /*
522 * Wait until the HC is halted - it shall take at most 16 ms.
523 * Note that we ignore the return value here.
524 */
525 xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_HCH),
526 XHCI_REG_MASK(XHCI_OP_HCH));
527
528 /* Make sure commands will not block other fibrils. */
529 xhci_nuke_command_ring(hc);
530
531 /* Stop the event worker fibril to restart it */
532 xhci_sw_ring_stop(&hc->sw_ring);
533 joinable_fibril_join(hc->event_worker);
534
[7c3fb9b]535 /*
536 * Then, disconnect all roothub devices, which shall trigger
537 * disconnection of everything
538 */
[19f0048]539 xhci_rh_stop(&hc->rh);
540}
541
542static void hc_reinitialize(xhci_hc_t *hc)
543{
544 /* Stop everything. */
545 hc_stop(hc);
546
547 usb_log_info("HC stopped. Starting again...");
548
549 /* The worker fibrils need to be started again */
550 joinable_fibril_recreate(hc->event_worker);
551 joinable_fibril_recreate(hc->rh.event_worker);
552
553 /* Now, the HC shall be stopped and software shall be clean. */
554 hc_start(hc);
555}
556
557static bool hc_is_broken(xhci_hc_t *hc)
558{
559 const uint32_t usbcmd = XHCI_REG_RD_FIELD(&hc->op_regs->usbcmd, 32);
560 const uint32_t usbsts = XHCI_REG_RD_FIELD(&hc->op_regs->usbsts, 32);
561
[3bacee1]562 return !(usbcmd & XHCI_REG_MASK(XHCI_OP_RS)) ||
563 (usbsts & XHCI_REG_MASK(XHCI_OP_HCE)) ||
564 (usbsts & XHCI_REG_MASK(XHCI_OP_HSE));
[19f0048]565}
566
[ab5a0830]567/**
568 * Used only when polling. Shall supplement the irq_commands.
569 */
[45457265]570errno_t hc_status(bus_t *bus, uint32_t *status)
[5cbccd4]571{
[32fb6bce]572 xhci_hc_t *hc = bus_to_hc(bus);
[ab5a0830]573 int ip = XHCI_REG_RD(hc->rt_regs->ir, XHCI_INTR_IP);
574 if (ip) {
575 *status = XHCI_REG_RD(hc->op_regs, XHCI_OP_STATUS);
576 XHCI_REG_WR(hc->op_regs, XHCI_OP_STATUS, *status & XHCI_STATUS_ACK_MASK);
577 XHCI_REG_WR(hc->rt_regs->ir, XHCI_INTR_IP, 1);
578
[7c3fb9b]579 /*
580 * interrupt handler expects status from irq_commands, which is
581 * in xhci order.
582 */
[ab5a0830]583 *status = host2xhci(32, *status);
584 }
[62ba2cbe]585
[defaab2]586 usb_log_debug("Polled status: %x", *status);
[cb89430]587 return EOK;
588}
589
[45457265]590static errno_t xhci_handle_mfindex_wrap_event(xhci_hc_t *hc, xhci_trb_t *trb)
[665368c]591{
[bd41ac52]592 struct timespec ts;
593 getuptime(&ts);
594 usb_log_debug("Microframe index wrapped (@%lld.%lld, %" PRIu64 " total).",
595 ts.tv_sec, NSEC2USEC(ts.tv_nsec), hc->wrap_count);
596 hc->wrap_time = SEC2USEC(ts.tv_sec) + NSEC2USEC(ts.tv_nsec);
[665368c]597 ++hc->wrap_count;
598 return EOK;
599}
600
[45457265]601typedef errno_t (*event_handler) (xhci_hc_t *, xhci_trb_t *trb);
[472235a]602
[2c0564c]603/**
604 * These events are handled by separate event handling fibril.
605 */
[472235a]606static event_handler event_handlers [] = {
[629255a]607 [XHCI_TRB_TYPE_TRANSFER_EVENT] = &xhci_handle_transfer_event,
[2c0564c]608};
609
610/**
611 * These events are handled directly in the interrupt handler, thus they must
612 * not block waiting for another interrupt.
613 */
614static event_handler event_handlers_fast [] = {
615 [XHCI_TRB_TYPE_COMMAND_COMPLETION_EVENT] = &xhci_handle_command_completion,
[665368c]616 [XHCI_TRB_TYPE_MFINDEX_WRAP_EVENT] = &xhci_handle_mfindex_wrap_event,
[472235a]617};
618
[45457265]619static errno_t hc_handle_event(xhci_hc_t *hc, xhci_trb_t *trb)
[2c0564c]620{
621 const unsigned type = TRB_TYPE(*trb);
622
623 if (type <= ARRAY_SIZE(event_handlers_fast) && event_handlers_fast[type])
624 return event_handlers_fast[type](hc, trb);
625
626 if (type <= ARRAY_SIZE(event_handlers) && event_handlers[type])
627 return xhci_sw_ring_enqueue(&hc->sw_ring, trb);
628
[047fbc8]629 if (type == XHCI_TRB_TYPE_PORT_STATUS_CHANGE_EVENT)
630 return xhci_sw_ring_enqueue(&hc->rh.event_ring, trb);
631
[2c0564c]632 return ENOTSUP;
633}
634
635static int event_worker(void *arg)
[7ee5408]636{
[45457265]637 errno_t err;
[2c0564c]638 xhci_trb_t trb;
[3bacee1]639 xhci_hc_t *const hc = arg;
[2c0564c]640 assert(hc);
641
642 while (xhci_sw_ring_dequeue(&hc->sw_ring, &trb) != EINTR) {
643 const unsigned type = TRB_TYPE(trb);
[472235a]644
[2c0564c]645 if ((err = event_handlers[type](hc, &trb)))
646 usb_log_error("Failed to handle event: %s", str_error(err));
647 }
648
[73a5857]649 return 0;
[7ee5408]650}
651
[eb928c4]652/**
653 * Dequeue from event ring and handle dequeued events.
654 *
655 * As there can be events, that blocks on waiting for subsequent events,
[0247bd2]656 * we solve this problem by deferring some types of events to separate fibrils.
[eb928c4]657 */
[8033f89]658static void hc_run_event_ring(xhci_hc_t *hc, xhci_event_ring_t *event_ring,
[3bacee1]659 xhci_interrupter_regs_t *intr)
[62ba2cbe]660{
[45457265]661 errno_t err;
[472235a]662
[f3baab1]663 xhci_trb_t trb;
664 hc->event_handler = fibril_get_id();
[e50bdd92]665
[f3baab1]666 while ((err = xhci_event_ring_dequeue(event_ring, &trb)) != ENOENT) {
[2c0564c]667 if ((err = hc_handle_event(hc, &trb)) != EOK) {
668 usb_log_error("Failed to handle event in interrupt: %s", str_error(err));
[adb4e683]669 }
[f543804]670
[77ded647]671 XHCI_REG_WR(intr, XHCI_INTR_ERDP, hc->event_ring.dequeue_ptr);
[cb89430]672 }
673
[f3baab1]674 hc->event_handler = 0;
675
[12fba858]676 uint64_t erdp = hc->event_ring.dequeue_ptr;
[f543804]677 erdp |= XHCI_REG_MASK(XHCI_INTR_ERDP_EHB);
[77ded647]678 XHCI_REG_WR(intr, XHCI_INTR_ERDP, erdp);
[adb4e683]679
[472235a]680 usb_log_debug2("Event ring run finished.");
[cb89430]681}
682
[eb928c4]683/**
684 * Handle an interrupt request from xHC. Resolve all situations that trigger an
685 * interrupt separately.
686 *
687 * Note that all RW1C bits in USBSTS register are cleared at the time of
688 * handling the interrupt in irq_code. This method is the top-half.
689 *
690 * @param status contents of USBSTS register at the time of the interrupt.
691 */
[32fb6bce]692void hc_interrupt(bus_t *bus, uint32_t status)
[cb89430]693{
[32fb6bce]694 xhci_hc_t *hc = bus_to_hc(bus);
[ab5a0830]695 status = xhci2host(32, status);
[aee352c]696
[cb89430]697 if (status & XHCI_REG_MASK(XHCI_OP_HSE)) {
[19f0048]698 usb_log_error("Host system error occured. Aren't we supposed to be dead already?");
699 return;
700 }
701
702 if (status & XHCI_REG_MASK(XHCI_OP_HCE)) {
703 usb_log_error("Host controller error occured. Reinitializing...");
704 hc_reinitialize(hc);
705 return;
[cb89430]706 }
707
708 if (status & XHCI_REG_MASK(XHCI_OP_EINT)) {
[472235a]709 usb_log_debug2("Event interrupt, running the event ring.");
[ab5a0830]710 hc_run_event_ring(hc, &hc->event_ring, &hc->rt_regs->ir[0]);
711 status &= ~XHCI_REG_MASK(XHCI_OP_EINT);
[cb89430]712 }
[275f529]713
[cb89430]714 if (status & XHCI_REG_MASK(XHCI_OP_SRE)) {
[8033f89]715 usb_log_error("Save/Restore error occured. WTF, "
716 "S/R mechanism not implemented!");
[ab5a0830]717 status &= ~XHCI_REG_MASK(XHCI_OP_SRE);
718 }
719
[fb154e13]720 /* According to Note on p. 302, we may safely ignore the PCD bit. */
721 status &= ~XHCI_REG_MASK(XHCI_OP_PCD);
722
[ab5a0830]723 if (status) {
[8033f89]724 usb_log_error("Non-zero status after interrupt handling (%08x) "
[3bacee1]725 " - missing something?", status);
[cb89430]726 }
727}
728
[eb928c4]729/**
730 * Tear down all in-memory structures.
731 */
[e4d7363]732void hc_fini(xhci_hc_t *hc)
[cb89430]733{
[19f0048]734 hc_stop(hc);
[2c0564c]735
[19f0048]736 xhci_sw_ring_fini(&hc->sw_ring);
737 joinable_fibril_destroy(hc->event_worker);
[e6b9182]738 xhci_bus_fini(&hc->bus);
[cb89430]739 xhci_event_ring_fini(&hc->event_ring);
[b60944b]740 xhci_scratchpad_free(hc);
741 dma_buffer_free(&hc->dcbaa_dma);
[c46c356]742 xhci_fini_commands(hc);
[d32d51d]743 xhci_rh_fini(&hc->rh);
[20eaa82]744 pio_disable(hc->reg_base, RNGSZ(hc->mmio_range));
[837581fd]745 usb_log_info("Finalized.");
[62ba2cbe]746}
747
[51c1d500]748unsigned hc_speed_to_psiv(usb_speed_t speed)
749{
750 assert(speed < ARRAY_SIZE(usb_speed_to_psiv));
751 return usb_speed_to_psiv[speed];
752}
753
[eb928c4]754/**
755 * Ring a xHC Doorbell. Implements section 4.7.
756 */
[708d8fcd]757void hc_ring_doorbell(xhci_hc_t *hc, unsigned doorbell, unsigned target)
[a0be5d0]758{
759 assert(hc);
760 uint32_t v = host2xhci(32, target & BIT_RRANGE(uint32_t, 7));
761 pio_write_32(&hc->db_arry[doorbell], v);
[2896ff6]762 usb_log_debug2("Ringing doorbell %d (target: %d)", doorbell, target);
[a0be5d0]763}
[5cbccd4]764
[51c1d500]765/**
766 * Return an index to device context.
767 */
768static uint8_t endpoint_dci(xhci_endpoint_t *ep)
769{
770 return (2 * ep->base.endpoint) +
[3bacee1]771 (ep->base.transfer_type == USB_TRANSFER_CONTROL ||
772 ep->base.direction == USB_DIRECTION_IN);
[51c1d500]773}
774
775void hc_ring_ep_doorbell(xhci_endpoint_t *ep, uint32_t stream_id)
776{
[3bacee1]777 xhci_device_t *const dev = xhci_ep_to_dev(ep);
778 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
[51c1d500]779 const uint8_t dci = endpoint_dci(ep);
780 const uint32_t target = (stream_id << 16) | (dci & 0x1ff);
781 hc_ring_doorbell(hc, dev->slot_id, target);
782}
783
[eb928c4]784/**
[7e5a12b]785 * Issue an Enable Slot command. Allocate memory for the slot and fill the
786 * DCBAA with the newly created slot.
[eb928c4]787 */
[45457265]788errno_t hc_enable_slot(xhci_device_t *dev)
[8ea7459]789{
[45457265]790 errno_t err;
[3bacee1]791 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
[7e5a12b]792
793 /* Prepare memory for the context */
[7ec7b7e]794 if ((err = dma_buffer_alloc(&dev->dev_ctx, XHCI_DEVICE_CTX_SIZE(hc))))
[7e5a12b]795 return err;
[7ec7b7e]796 memset(dev->dev_ctx.virt, 0, XHCI_DEVICE_CTX_SIZE(hc));
[7e5a12b]797
798 /* Get the slot number */
[8ea7459]799 xhci_cmd_t cmd;
[c3d926f3]800 xhci_cmd_init(&cmd, XHCI_CMD_ENABLE_SLOT);
[8ea7459]801
[7e5a12b]802 err = xhci_cmd_sync(hc, &cmd);
[8ea7459]803
[7e5a12b]804 /* Link them together */
805 if (err == EOK) {
806 dev->slot_id = cmd.slot_id;
[1d758fc]807 hc->dcbaa[dev->slot_id] =
808 host2xhci(64, dma_buffer_phys_base(&dev->dev_ctx));
[c3d926f3]809 }
[8ea7459]810
811 xhci_cmd_fini(&cmd);
[abb5d08]812
813 if (err)
814 dma_buffer_free(&dev->dev_ctx);
815
[c3d926f3]816 return err;
[8ea7459]817}
818
[eb928c4]819/**
820 * Issue a Disable Slot command for a slot occupied by device.
[7e5a12b]821 * Frees the device context.
[eb928c4]822 */
[45457265]823errno_t hc_disable_slot(xhci_device_t *dev)
[f270ecb]824{
[45457265]825 errno_t err;
[3bacee1]826 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
[e2172284]827 xhci_cmd_t cmd;
[9620a54]828
[e2172284]829 xhci_cmd_init(&cmd, XHCI_CMD_DISABLE_SLOT);
830 cmd.slot_id = dev->slot_id;
831 err = xhci_cmd_sync(hc, &cmd);
832 xhci_cmd_fini(&cmd);
833 if (err != EOK)
[9620a54]834 return err;
835
836 /* Free the device context. */
837 hc->dcbaa[dev->slot_id] = 0;
[b80c1ab]838 dma_buffer_free(&dev->dev_ctx);
[9620a54]839
840 /* Mark the slot as invalid. */
841 dev->slot_id = 0;
842
843 return EOK;
[f270ecb]844}
845
[eb928c4]846/**
847 * Prepare an empty Endpoint Input Context inside a dma buffer.
848 */
[45457265]849static errno_t create_configure_ep_input_ctx(xhci_device_t *dev, dma_buffer_t *dma_buf)
[b724494]850{
[3bacee1]851 const xhci_hc_t *hc = bus_to_hc(dev->base.bus);
[45457265]852 const errno_t err = dma_buffer_alloc(dma_buf, XHCI_INPUT_CTX_SIZE(hc));
[b80c1ab]853 if (err)
854 return err;
[b724494]855
[b80c1ab]856 xhci_input_ctx_t *ictx = dma_buf->virt;
[7ec7b7e]857 memset(ictx, 0, XHCI_INPUT_CTX_SIZE(hc));
[b724494]858
[e76c0ea]859 // Quoting sec. 4.6.5 and 4.6.6: A1, D0, D1 are down (already zeroed), A0 is up.
[7ec7b7e]860 XHCI_INPUT_CTRL_CTX_ADD_SET(*XHCI_GET_CTRL_CTX(ictx, hc), 0);
861 xhci_slot_ctx_t *slot_ctx = XHCI_GET_SLOT_CTX(XHCI_GET_DEVICE_CTX(ictx, hc), hc);
862 xhci_setup_slot_context(dev, slot_ctx);
[001778c]863
[b724494]864 return EOK;
865}
866
[eb928c4]867/**
868 * Initialize a device, assigning it an address. Implements section 4.3.4.
869 *
870 * @param dev Device to assing an address (unconfigured yet)
871 */
[45457265]872errno_t hc_address_device(xhci_device_t *dev)
[b724494]873{
[45457265]874 errno_t err = ENOMEM;
[3bacee1]875 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
[51c1d500]876 xhci_endpoint_t *ep0 = xhci_endpoint_get(dev->base.endpoints[0]);
[0206d35]877
[7c3fb9b]878 /*
879 * Although we have the precise PSIV value on devices of tier 1,
880 * we have to rely on reverse mapping on others.
881 */
[a75f9cbc]882 if (!usb_speed_to_psiv[dev->base.speed]) {
[8033f89]883 usb_log_error("Device reported an USB speed (%s) that cannot be mapped "
884 "to HC port speed.", usb_str_speed(dev->base.speed));
[2cf28b9]885 return EINVAL;
886 }
887
[b724494]888 /* Issue configure endpoint command (sec 4.3.5). */
[b80c1ab]889 dma_buffer_t ictx_dma_buf;
[7e5a12b]890 if ((err = create_configure_ep_input_ctx(dev, &ictx_dma_buf)))
891 return err;
[b80c1ab]892 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
[b724494]893
894 /* Copy endpoint 0 context and set A1 flag. */
[7ec7b7e]895 XHCI_INPUT_CTRL_CTX_ADD_SET(*XHCI_GET_CTRL_CTX(ictx, hc), 1);
[51c1d500]896 xhci_ep_ctx_t *ep_ctx = XHCI_GET_EP_CTX(XHCI_GET_DEVICE_CTX(ictx, hc), hc, 1);
[7ec7b7e]897 xhci_setup_endpoint_context(ep0, ep_ctx);
[51c1d500]898
[69b2dfee]899 /* Address device needs Ctx entries set to 1 only */
[7ec7b7e]900 xhci_slot_ctx_t *slot_ctx = XHCI_GET_SLOT_CTX(XHCI_GET_DEVICE_CTX(ictx, hc), hc);
[69b2dfee]901 XHCI_SLOT_CTX_ENTRIES_SET(*slot_ctx, 1);
902
[c3d926f3]903 /* Issue Address Device command. */
[e2172284]904 xhci_cmd_t cmd;
905 xhci_cmd_init(&cmd, XHCI_CMD_ADDRESS_DEVICE);
906 cmd.slot_id = dev->slot_id;
907 cmd.input_ctx = ictx_dma_buf;
908 err = xhci_cmd_sync(hc, &cmd);
909 xhci_cmd_fini(&cmd);
910 if (err != EOK)
[7e5a12b]911 return err;
[b724494]912
[7ec7b7e]913 xhci_device_ctx_t *device_ctx = dev->dev_ctx.virt;
914 dev->base.address = XHCI_SLOT_DEVICE_ADDRESS(*XHCI_GET_SLOT_CTX(device_ctx, hc));
[defaab2]915 usb_log_debug("Obtained USB address: %d.", dev->base.address);
[0206d35]916
[b724494]917 return EOK;
918}
919
[eb928c4]920/**
921 * Issue a Configure Device command for a device in slot.
922 *
923 * @param slot_id Slot ID assigned to the device.
924 */
[45457265]925errno_t hc_configure_device(xhci_device_t *dev)
[b724494]926{
[3bacee1]927 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
[e2172284]928 xhci_cmd_t cmd;
[a4e7e6e1]929
[b724494]930 /* Issue configure endpoint command (sec 4.3.5). */
[b80c1ab]931 dma_buffer_t ictx_dma_buf;
[e2172284]932 errno_t err = create_configure_ep_input_ctx(dev, &ictx_dma_buf);
933 if (err != EOK)
[c3d926f3]934 return err;
[b724494]935
[e2172284]936 xhci_cmd_init(&cmd, XHCI_CMD_CONFIGURE_ENDPOINT);
937 cmd.slot_id = dev->slot_id;
938 cmd.input_ctx = ictx_dma_buf;
939 err = xhci_cmd_sync(hc, &cmd);
940 xhci_cmd_fini(&cmd);
941
942 return err;
[b724494]943}
944
[eb928c4]945/**
946 * Issue a Deconfigure Device command for a device in slot.
947 *
[a4e7e6e1]948 * @param dev The owner of the device
[eb928c4]949 */
[45457265]950errno_t hc_deconfigure_device(xhci_device_t *dev)
[b724494]951{
[3bacee1]952 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
[e2172284]953 xhci_cmd_t cmd;
954 errno_t err;
[a4e7e6e1]955
[19f0048]956 if (hc_is_broken(hc))
957 return EOK;
958
[b724494]959 /* Issue configure endpoint command (sec 4.3.5) with the DC flag. */
[e2172284]960 xhci_cmd_init(&cmd, XHCI_CMD_CONFIGURE_ENDPOINT);
961 cmd.slot_id = dev->slot_id;
962 cmd.deconfigure = true;
963
964 err = xhci_cmd_sync(hc, &cmd);
965 xhci_cmd_fini(&cmd);
966
967 return err;
[b724494]968}
969
[eb928c4]970/**
971 * Instruct xHC to add an endpoint with supplied endpoint context.
972 *
[a4e7e6e1]973 * @param dev The owner of the device
974 * @param ep_idx Endpoint DCI in question
[eb928c4]975 * @param ep_ctx Endpoint context of the endpoint
976 */
[45457265]977errno_t hc_add_endpoint(xhci_endpoint_t *ep)
[b724494]978{
[3bacee1]979 xhci_device_t *const dev = xhci_ep_to_dev(ep);
[51c1d500]980 const unsigned dci = endpoint_dci(ep);
[e2172284]981 xhci_cmd_t cmd;
[51c1d500]982
[b724494]983 /* Issue configure endpoint command (sec 4.3.5). */
[b80c1ab]984 dma_buffer_t ictx_dma_buf;
[e2172284]985 errno_t err = create_configure_ep_input_ctx(dev, &ictx_dma_buf);
986 if (err != EOK)
[c3d926f3]987 return err;
[b724494]988
[b80c1ab]989 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
[001778c]990
[3bacee1]991 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
[51c1d500]992 XHCI_INPUT_CTRL_CTX_ADD_SET(*XHCI_GET_CTRL_CTX(ictx, hc), dci);
[7ec7b7e]993
[51c1d500]994 xhci_ep_ctx_t *ep_ctx = XHCI_GET_EP_CTX(XHCI_GET_DEVICE_CTX(ictx, hc), hc, dci);
995 xhci_setup_endpoint_context(ep, ep_ctx);
[7ec7b7e]996
[e2172284]997 xhci_cmd_init(&cmd, XHCI_CMD_CONFIGURE_ENDPOINT);
998 cmd.slot_id = dev->slot_id;
999 cmd.input_ctx = ictx_dma_buf;
1000 err = xhci_cmd_sync(hc, &cmd);
1001 xhci_cmd_fini(&cmd);
1002
1003 return err;
[b724494]1004}
1005
[eb928c4]1006/**
1007 * Instruct xHC to drop an endpoint.
1008 *
[a4e7e6e1]1009 * @param dev The owner of the endpoint
1010 * @param ep_idx Endpoint DCI in question
[eb928c4]1011 */
[45457265]1012errno_t hc_drop_endpoint(xhci_endpoint_t *ep)
[b724494]1013{
[3bacee1]1014 xhci_device_t *const dev = xhci_ep_to_dev(ep);
1015 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
[51c1d500]1016 const unsigned dci = endpoint_dci(ep);
[e2172284]1017 xhci_cmd_t cmd;
[51c1d500]1018
[19f0048]1019 if (hc_is_broken(hc))
1020 return EOK;
1021
[b724494]1022 /* Issue configure endpoint command (sec 4.3.5). */
[b80c1ab]1023 dma_buffer_t ictx_dma_buf;
[e2172284]1024 errno_t err = create_configure_ep_input_ctx(dev, &ictx_dma_buf);
1025 if (err != EOK)
[c3d926f3]1026 return err;
[b724494]1027
[b80c1ab]1028 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
[51c1d500]1029 XHCI_INPUT_CTRL_CTX_DROP_SET(*XHCI_GET_CTRL_CTX(ictx, hc), dci);
[b724494]1030
[e2172284]1031 xhci_cmd_init(&cmd, XHCI_CMD_CONFIGURE_ENDPOINT);
1032 cmd.slot_id = dev->slot_id;
1033 cmd.input_ctx = ictx_dma_buf;
1034 err = xhci_cmd_sync(hc, &cmd);
1035 xhci_cmd_fini(&cmd);
1036
1037 return err;
[b724494]1038}
1039
[eb928c4]1040/**
1041 * Instruct xHC to update information about an endpoint, using supplied
1042 * endpoint context.
1043 *
[a4e7e6e1]1044 * @param dev The owner of the endpoint
1045 * @param ep_idx Endpoint DCI in question
[eb928c4]1046 * @param ep_ctx Endpoint context of the endpoint
1047 */
[45457265]1048errno_t hc_update_endpoint(xhci_endpoint_t *ep)
[306a36d]1049{
[3bacee1]1050 xhci_device_t *const dev = xhci_ep_to_dev(ep);
[51c1d500]1051 const unsigned dci = endpoint_dci(ep);
[e2172284]1052 xhci_cmd_t cmd;
[51c1d500]1053
[306a36d]1054 dma_buffer_t ictx_dma_buf;
[3bacee1]1055 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
[7ec7b7e]1056
[e2172284]1057 errno_t err = dma_buffer_alloc(&ictx_dma_buf, XHCI_INPUT_CTX_SIZE(hc));
1058 if (err != EOK)
[306a36d]1059 return err;
1060
1061 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
[7ec7b7e]1062 memset(ictx, 0, XHCI_INPUT_CTX_SIZE(hc));
[306a36d]1063
[51c1d500]1064 XHCI_INPUT_CTRL_CTX_ADD_SET(*XHCI_GET_CTRL_CTX(ictx, hc), dci);
1065 xhci_ep_ctx_t *ep_ctx = XHCI_GET_EP_CTX(XHCI_GET_DEVICE_CTX(ictx, hc), hc, dci);
1066 xhci_setup_endpoint_context(ep, ep_ctx);
[306a36d]1067
[e2172284]1068 xhci_cmd_init(&cmd, XHCI_CMD_EVALUATE_CONTEXT);
1069 cmd.slot_id = dev->slot_id;
1070 cmd.input_ctx = ictx_dma_buf;
1071 err = xhci_cmd_sync(hc, &cmd);
1072 xhci_cmd_fini(&cmd);
1073
1074 return err;
[306a36d]1075}
1076
[30fc56f]1077/**
1078 * Instruct xHC to stop running a transfer ring on an endpoint.
1079 *
[a4e7e6e1]1080 * @param dev The owner of the endpoint
1081 * @param ep_idx Endpoint DCI in question
[30fc56f]1082 */
[45457265]1083errno_t hc_stop_endpoint(xhci_endpoint_t *ep)
[30fc56f]1084{
[3bacee1]1085 xhci_device_t *const dev = xhci_ep_to_dev(ep);
[51c1d500]1086 const unsigned dci = endpoint_dci(ep);
[3bacee1]1087 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
[e2172284]1088 xhci_cmd_t cmd;
1089 errno_t err;
[19f0048]1090
1091 if (hc_is_broken(hc))
1092 return EOK;
1093
[e2172284]1094 xhci_cmd_init(&cmd, XHCI_CMD_STOP_ENDPOINT);
1095 cmd.slot_id = dev->slot_id;
1096 cmd.endpoint_id = dci;
1097 err = xhci_cmd_sync(hc, &cmd);
1098 xhci_cmd_fini(&cmd);
1099
1100 return err;
[30fc56f]1101}
1102
[feabe163]1103/**
1104 * Instruct xHC to reset halted endpoint.
1105 *
[a4e7e6e1]1106 * @param dev The owner of the endpoint
1107 * @param ep_idx Endpoint DCI in question
[feabe163]1108 */
[45457265]1109errno_t hc_reset_endpoint(xhci_endpoint_t *ep)
[feabe163]1110{
[3bacee1]1111 xhci_device_t *const dev = xhci_ep_to_dev(ep);
[51c1d500]1112 const unsigned dci = endpoint_dci(ep);
[3bacee1]1113 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
[e2172284]1114 xhci_cmd_t cmd;
1115 errno_t err;
1116
1117 xhci_cmd_init(&cmd, XHCI_CMD_RESET_ENDPOINT);
1118 cmd.slot_id = dev->slot_id;
1119 cmd.endpoint_id = dci;
1120 err = xhci_cmd_sync(hc, &cmd);
1121 xhci_cmd_fini(&cmd);
1122
1123 return err;
[51c1d500]1124}
1125
1126/**
1127 * Reset a ring position in both software and hardware.
1128 *
1129 * @param dev The owner of the endpoint
1130 */
[45457265]1131errno_t hc_reset_ring(xhci_endpoint_t *ep, uint32_t stream_id)
[51c1d500]1132{
[3bacee1]1133 xhci_device_t *const dev = xhci_ep_to_dev(ep);
[51c1d500]1134 const unsigned dci = endpoint_dci(ep);
1135 uintptr_t addr;
[e2172284]1136 xhci_cmd_t cmd;
1137 errno_t err;
[51c1d500]1138
1139 xhci_trb_ring_t *ring = xhci_endpoint_get_ring(ep, stream_id);
1140 xhci_trb_ring_reset_dequeue_state(ring, &addr);
1141
[3bacee1]1142 xhci_hc_t *const hc = bus_to_hc(endpoint_get_bus(&ep->base));
[e2172284]1143
1144 xhci_cmd_init(&cmd, XHCI_CMD_SET_TR_DEQUEUE_POINTER);
1145 cmd.slot_id = dev->slot_id;
1146 cmd.endpoint_id = dci;
1147 cmd.stream_id = stream_id;
1148 cmd.dequeue_ptr = addr;
1149 err = xhci_cmd_sync(hc, &cmd);
1150 xhci_cmd_fini(&cmd);
1151
1152 return err;
[feabe163]1153}
1154
[5cbccd4]1155/**
1156 * @}
1157 */
Note: See TracBrowser for help on using the repository browser.