source: mainline/uspace/drv/bus/usb/xhci/hc.c

Last change on this file was 8300c72, checked in by Jiri Svoboda <jiri@…>, 4 months ago

Quiesce devices before proceeding with shutdown.

Only implemented for e1k, uhci and xhci.

  • Property mode set to 100644
File size: 31.1 KB
RevLine 
[5cbccd4]1/*
[8300c72]2 * Copyright (c) 2025 Jiri Svoboda
[e0a5d4c]3 * Copyright (c) 2018 Ondrej Hlavaty, Petr Manek, Jaroslav Jindrak, Jan Hrach, Michal Staruch
[5cbccd4]4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup drvusbxhci
31 * @{
32 */
33/** @file
34 * @brief The host controller data bookkeeping.
35 */
36
37#include <errno.h>
[cb89430]38#include <str_error.h>
[5cbccd4]39#include <usb/debug.h>
[5fd9c30]40#include <usb/host/endpoint.h>
[5cbccd4]41#include "debug.h"
42#include "hc.h"
[7bd99bf]43#include "rh.h"
[cb89430]44#include "hw_struct/trb.h"
[0206d35]45#include "hw_struct/context.h"
46#include "endpoint.h"
[e9e24f2]47#include "transfers.h"
48#include "trb_ring.h"
[5cbccd4]49
[91ca111]50/**
51 * Default USB Speed ID mapping: Table 157
52 */
53#define PSI_TO_BPS(psie, psim) (((uint64_t) psim) << (10 * psie))
[f668d60]54#define PORT_SPEED(usb, mjr, psie, psim) { \
[816335c]55 .name = "USB ", \
56 .major = mjr, \
57 .minor = 0, \
[f668d60]58 .usb_speed = USB_SPEED_##usb, \
[91ca111]59 .rx_bps = PSI_TO_BPS(psie, psim), \
60 .tx_bps = PSI_TO_BPS(psie, psim) \
61}
[a75f9cbc]62
63static const xhci_port_speed_t default_psiv_to_port_speed [] = {
64 [1] = PORT_SPEED(FULL, 2, 2, 12),
65 [2] = PORT_SPEED(LOW, 2, 1, 1500),
66 [3] = PORT_SPEED(HIGH, 2, 2, 480),
67 [4] = PORT_SPEED(SUPER, 3, 3, 5),
68};
69
70static const unsigned usb_speed_to_psiv [] = {
71 [USB_SPEED_FULL] = 1,
72 [USB_SPEED_LOW] = 2,
73 [USB_SPEED_HIGH] = 3,
74 [USB_SPEED_SUPER] = 4,
75};
[91ca111]76
77/**
78 * Walk the list of extended capabilities.
[eb928c4]79 *
80 * The most interesting thing hidden in extended capabilities is the mapping of
81 * ports to protocol versions and speeds.
[91ca111]82 */
[45457265]83static errno_t hc_parse_ec(xhci_hc_t *hc)
[91ca111]84{
[816335c]85 unsigned psic, major, minor;
86 xhci_sp_name_t name;
87
[f668d60]88 xhci_port_speed_t *speeds = hc->speeds;
[91ca111]89
[8ebe212]90 for (xhci_extcap_t *ec = hc->xecp; ec; ec = xhci_extcap_next(ec)) {
[91ca111]91 xhci_dump_extcap(ec);
92 switch (XHCI_REG_RD(ec, XHCI_EC_CAP_ID)) {
93 case XHCI_EC_USB_LEGACY:
94 assert(hc->legsup == NULL);
95 hc->legsup = (xhci_legsup_t *) ec;
96 break;
97 case XHCI_EC_SUPPORTED_PROTOCOL:
98 psic = XHCI_REG_RD(ec, XHCI_EC_SP_PSIC);
99 major = XHCI_REG_RD(ec, XHCI_EC_SP_MAJOR);
[816335c]100 minor = XHCI_REG_RD(ec, XHCI_EC_SP_MINOR);
101 name.packed = host2uint32_t_le(XHCI_REG_RD(ec, XHCI_EC_SP_NAME));
102
103 if (name.packed != xhci_name_usb.packed) {
104 /**
105 * The detection of such protocol would work,
106 * but the rest of the implementation is made
107 * for the USB protocol only.
108 */
109 usb_log_error("Unknown protocol %.4s.", name.str);
110 return ENOTSUP;
111 }
[91ca111]112
[a9fcd73]113 unsigned offset = XHCI_REG_RD(ec, XHCI_EC_SP_CP_OFF);
114 unsigned count = XHCI_REG_RD(ec, XHCI_EC_SP_CP_COUNT);
115 xhci_rh_set_ports_protocol(&hc->rh, offset, count, major);
116
[91ca111]117 // "Implied" speed
118 if (psic == 0) {
[816335c]119 assert(minor == 0);
[370a1c8]120
[91ca111]121 if (major == 2) {
[a75f9cbc]122 speeds[1] = default_psiv_to_port_speed[1];
123 speeds[2] = default_psiv_to_port_speed[2];
124 speeds[3] = default_psiv_to_port_speed[3];
[91ca111]125 } else if (major == 3) {
[a75f9cbc]126 speeds[4] = default_psiv_to_port_speed[4];
[91ca111]127 } else {
128 return EINVAL;
129 }
130
[defaab2]131 usb_log_debug("Implied speed of USB %u.0 set up.", major);
[91ca111]132 } else {
133 for (unsigned i = 0; i < psic; i++) {
134 xhci_psi_t *psi = xhci_extcap_psi(ec, i);
135 unsigned sim = XHCI_REG_RD(psi, XHCI_PSI_PSIM);
136 unsigned psiv = XHCI_REG_RD(psi, XHCI_PSI_PSIV);
137 unsigned psie = XHCI_REG_RD(psi, XHCI_PSI_PSIE);
138 unsigned psim = XHCI_REG_RD(psi, XHCI_PSI_PSIM);
[a75f9cbc]139 uint64_t bps = PSI_TO_BPS(psie, psim);
140
141 /*
[8033f89]142 * Speed is not implied, but using one of default PSIV. This
143 * is not clearly stated in xHCI spec. There is a clear
144 * intention to allow xHCI to specify its own speed
145 * parameters, but throughout the document, they used fixed
146 * values for e.g. High-speed (3), without stating the
147 * controller shall have implied default speeds - and for
148 * instance Intel controllers do not. So let's check if the
149 * values match and if so, accept the implied USB speed too.
[a75f9cbc]150 *
151 * The main reason we need this is the usb_speed to have
152 * mapping also for devices connected to hubs.
153 */
[3bacee1]154 if (psiv < ARRAY_SIZE(default_psiv_to_port_speed) &&
155 default_psiv_to_port_speed[psiv].major == major &&
156 default_psiv_to_port_speed[psiv].minor == minor &&
157 default_psiv_to_port_speed[psiv].rx_bps == bps &&
158 default_psiv_to_port_speed[psiv].tx_bps == bps) {
[a75f9cbc]159 speeds[psiv] = default_psiv_to_port_speed[psiv];
[8033f89]160 usb_log_debug("Assumed default %s speed of USB %u.",
[3bacee1]161 usb_str_speed(speeds[psiv].usb_speed), major);
[a75f9cbc]162 continue;
163 }
[91ca111]164
[a75f9cbc]165 // Custom speed
[816335c]166 speeds[psiv].major = major;
167 speeds[psiv].minor = minor;
168 str_ncpy(speeds[psiv].name, 4, name.str, 4);
[f668d60]169 speeds[psiv].usb_speed = USB_SPEED_MAX;
[816335c]170
[91ca111]171 if (sim == XHCI_PSI_PLT_SYMM || sim == XHCI_PSI_PLT_RX)
[816335c]172 speeds[psiv].rx_bps = bps;
[91ca111]173 if (sim == XHCI_PSI_PLT_SYMM || sim == XHCI_PSI_PLT_TX) {
[816335c]174 speeds[psiv].tx_bps = bps;
[8033f89]175 usb_log_debug("Speed %u set up for bps %" PRIu64
[3bacee1]176 " / %" PRIu64 ".", psiv, speeds[psiv].rx_bps,
177 speeds[psiv].tx_bps);
[91ca111]178 }
179 }
180 }
181 }
182 }
183 return EOK;
184}
185
[eb928c4]186/**
187 * Initialize MMIO spaces of xHC.
188 */
[45457265]189errno_t hc_init_mmio(xhci_hc_t *hc, const hw_res_list_parsed_t *hw_res)
[e4d7363]190{
[45457265]191 errno_t err;
[e4d7363]192
193 if (hw_res->mem_ranges.count != 1) {
194 usb_log_error("Unexpected MMIO area, bailing out.");
195 return EINVAL;
196 }
197
198 hc->mmio_range = hw_res->mem_ranges.ranges[0];
199
[a1732929]200 usb_log_debug("MMIO area at %p (size %zu), IRQ %d.",
[e4d7363]201 RNGABSPTR(hc->mmio_range), RNGSZ(hc->mmio_range), hw_res->irqs.irqs[0]);
202
203 if (RNGSZ(hc->mmio_range) < sizeof(xhci_cap_regs_t))
204 return EOVERFLOW;
205
206 void *base;
207 if ((err = pio_enable_range(&hc->mmio_range, &base)))
208 return err;
209
[20eaa82]210 hc->reg_base = base;
[e4d7363]211 hc->cap_regs = (xhci_cap_regs_t *) base;
212 hc->op_regs = (xhci_op_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH));
213 hc->rt_regs = (xhci_rt_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF));
214 hc->db_arry = (xhci_doorbell_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_DBOFF));
215
[91ca111]216 uintptr_t xec_offset = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_XECP) * sizeof(xhci_dword_t);
217 if (xec_offset > 0)
218 hc->xecp = (xhci_extcap_t *) (base + xec_offset);
219
[defaab2]220 usb_log_debug("Initialized MMIO reg areas:");
221 usb_log_debug("\tCapability regs: %p", hc->cap_regs);
222 usb_log_debug("\tOperational regs: %p", hc->op_regs);
223 usb_log_debug("\tRuntime regs: %p", hc->rt_regs);
224 usb_log_debug("\tDoorbell array base: %p", hc->db_arry);
[e4d7363]225
226 xhci_dump_cap_regs(hc->cap_regs);
227
228 hc->ac64 = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_AC64);
[7ec7b7e]229 hc->csz = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_CSZ);
[e4d7363]230 hc->max_slots = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_MAX_SLOTS);
[94e9c29]231
[bd41ac52]232 struct timespec ts;
233 getuptime(&ts);
234 hc->wrap_time = SEC2USEC(ts.tv_sec) + NSEC2USEC(ts.tv_nsec);
[665368c]235 hc->wrap_count = 0;
[94e9c29]236
[708d8fcd]237 unsigned ist = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_IST);
238 hc->ist = (ist & 0x10 >> 1) * (ist & 0xf);
[e4d7363]239
[a9fcd73]240 if ((err = xhci_rh_init(&hc->rh, hc)))
241 goto err_pio;
242
243 if ((err = hc_parse_ec(hc)))
244 goto err_rh;
[91ca111]245
[e4d7363]246 return EOK;
[a9fcd73]247
248err_rh:
249 xhci_rh_fini(&hc->rh);
250err_pio:
251 pio_disable(hc->reg_base, RNGSZ(hc->mmio_range));
252 return err;
[e4d7363]253}
254
[2c0564c]255static int event_worker(void *arg);
256
[eb928c4]257/**
258 * Initialize structures kept in allocated memory.
259 */
[45457265]260errno_t hc_init_memory(xhci_hc_t *hc, ddf_dev_t *device)
[e4d7363]261{
[45457265]262 errno_t err = ENOMEM;
[e4d7363]263
[b80c1ab]264 if (dma_buffer_alloc(&hc->dcbaa_dma, (1 + hc->max_slots) * sizeof(uint64_t)))
[e4d7363]265 return ENOMEM;
[b80c1ab]266 hc->dcbaa = hc->dcbaa_dma.virt;
[e4d7363]267
[19f0048]268 hc->event_worker = joinable_fibril_create(&event_worker, hc);
269 if (!hc->event_worker)
[889146e]270 goto err_dcbaa;
[e4d7363]271
[19f0048]272 if ((err = xhci_event_ring_init(&hc->event_ring, 1)))
273 goto err_worker;
274
[b19131c5]275 if ((err = xhci_scratchpad_alloc(hc)))
[5a9ae994]276 goto err_event_ring;
[e4d7363]277
[aee352c]278 if ((err = xhci_init_commands(hc)))
[ee28ae66]279 goto err_scratch;
[aee352c]280
[2b61945]281 if ((err = xhci_bus_init(&hc->bus, hc)))
[6832245]282 goto err_cmd;
[e6b9182]283
[2c0564c]284 xhci_sw_ring_init(&hc->sw_ring, PAGE_SIZE / sizeof(xhci_trb_t));
285
[e4d7363]286 return EOK;
287
[ee28ae66]288err_cmd:
[d271f78]289 xhci_fini_commands(hc);
[ee28ae66]290err_scratch:
291 xhci_scratchpad_free(hc);
[5a9ae994]292err_event_ring:
[e4d7363]293 xhci_event_ring_fini(&hc->event_ring);
[19f0048]294err_worker:
295 joinable_fibril_destroy(hc->event_worker);
[e4d7363]296err_dcbaa:
[b80c1ab]297 hc->dcbaa = NULL;
298 dma_buffer_free(&hc->dcbaa_dma);
[e4d7363]299 return err;
300}
301
[ab5a0830]302/*
303 * Pseudocode:
304 * ip = read(intr[0].iman)
305 * if (ip) {
306 * status = read(usbsts)
307 * assert status
308 * assert ip
309 * accept (passing status)
310 * }
311 * decline
312 */
313static const irq_cmd_t irq_commands[] = {
314 {
315 .cmd = CMD_PIO_READ_32,
316 .dstarg = 3,
317 .addr = NULL /* intr[0].iman */
318 },
319 {
320 .cmd = CMD_AND,
321 .srcarg = 3,
322 .dstarg = 4,
323 .value = 0 /* host2xhci(32, 1) */
324 },
325 {
326 .cmd = CMD_PREDICATE,
327 .srcarg = 4,
328 .value = 5
329 },
330 {
331 .cmd = CMD_PIO_READ_32,
332 .dstarg = 1,
333 .addr = NULL /* usbsts */
334 },
335 {
336 .cmd = CMD_AND,
337 .srcarg = 1,
338 .dstarg = 2,
339 .value = 0 /* host2xhci(32, XHCI_STATUS_ACK_MASK) */
340 },
341 {
342 .cmd = CMD_PIO_WRITE_A_32,
343 .srcarg = 2,
344 .addr = NULL /* usbsts */
345 },
346 {
347 .cmd = CMD_PIO_WRITE_A_32,
[efe9463]348 .srcarg = 3,
[ab5a0830]349 .addr = NULL /* intr[0].iman */
350 },
351 {
352 .cmd = CMD_ACCEPT
353 },
354 {
355 .cmd = CMD_DECLINE
356 }
357};
358
[cb89430]359/**
360 * Generates code to accept interrupts. The xHCI is designed primarily for
361 * MSI/MSI-X, but we use PCI Interrupt Pin. In this mode, all the Interrupters
362 * (except 0) are disabled.
363 */
[45457265]364errno_t hc_irq_code_gen(irq_code_t *code, xhci_hc_t *hc, const hw_res_list_parsed_t *hw_res, int *irq)
[cb89430]365{
366 assert(code);
367 assert(hw_res);
368
[e4d7363]369 if (hw_res->irqs.count != 1) {
[cb89430]370 usb_log_info("Unexpected HW resources to enable interrupts.");
371 return EINVAL;
372 }
373
374 code->ranges = malloc(sizeof(irq_pio_range_t));
375 if (code->ranges == NULL)
376 return ENOMEM;
377
378 code->cmds = malloc(sizeof(irq_commands));
379 if (code->cmds == NULL) {
380 free(code->ranges);
381 return ENOMEM;
382 }
383
384 code->rangecount = 1;
385 code->ranges[0] = (irq_pio_range_t) {
[3bacee1]386 .base = RNGABS(hc->mmio_range),
387 .size = RNGSZ(hc->mmio_range),
[cb89430]388 };
389
390 code->cmdcount = ARRAY_SIZE(irq_commands);
391 memcpy(code->cmds, irq_commands, sizeof(irq_commands));
392
[3bacee1]393 void *intr0_iman = RNGABSPTR(hc->mmio_range) +
394 XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF) +
395 offsetof(xhci_rt_regs_t, ir[0]);
396 void *usbsts = RNGABSPTR(hc->mmio_range) +
397 XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH) +
398 offsetof(xhci_op_regs_t, usbsts);
[8033f89]399
[cb89430]400 code->cmds[0].addr = intr0_iman;
401 code->cmds[1].value = host2xhci(32, 1);
[ab5a0830]402 code->cmds[3].addr = usbsts;
403 code->cmds[4].value = host2xhci(32, XHCI_STATUS_ACK_MASK);
404 code->cmds[5].addr = usbsts;
405 code->cmds[6].addr = intr0_iman;
[cb89430]406
[eb862fd]407 *irq = hw_res->irqs.irqs[0];
408 return EOK;
[cb89430]409}
410
[eb928c4]411/**
412 * Claim xHC from BIOS. Implements handoff as per Section 4.22.1 of xHCI spec.
413 */
[45457265]414errno_t hc_claim(xhci_hc_t *hc, ddf_dev_t *dev)
[cb89430]415{
[91ca111]416 /* No legacy support capability, the controller is solely for us */
417 if (!hc->legsup)
418 return EOK;
419
[0e7380f]420 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_CNR), 0))
421 return ETIMEOUT;
422
[defaab2]423 usb_log_debug("LEGSUP: bios: %x, os: %x", hc->legsup->sem_bios, hc->legsup->sem_os);
[0e7380f]424 XHCI_REG_SET(hc->legsup, XHCI_LEGSUP_SEM_OS, 1);
[4d28d86]425 for (int i = 0; i <= (XHCI_LEGSUP_BIOS_TIMEOUT_US / XHCI_LEGSUP_POLLING_DELAY_1MS); i++) {
[defaab2]426 usb_log_debug("LEGSUP: elapsed: %i ms, bios: %x, os: %x", i,
[3bacee1]427 XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_BIOS),
428 XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_OS));
[e6b0dba]429 if (XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_BIOS) == 0) {
[0e7380f]430 return XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_OS) == 1 ? EOK : EIO;
[e6b0dba]431 }
[5f97ef44]432 fibril_usleep(XHCI_LEGSUP_POLLING_DELAY_1MS);
[e6b0dba]433 }
[a1732929]434 usb_log_error("BIOS did not release XHCI legacy hold!");
[e6b0dba]435
[91ca111]436 return ENOTSUP;
[cb89430]437}
438
[eb928c4]439/**
[665368c]440 * Ask the xHC to reset its state. Implements sequence
[eb928c4]441 */
[45457265]442static errno_t hc_reset(xhci_hc_t *hc)
[cb89430]443{
[0e7380f]444 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_CNR), 0))
445 return ETIMEOUT;
446
[cb89430]447 /* Stop the HC: set R/S to 0 */
448 XHCI_REG_CLR(hc->op_regs, XHCI_OP_RS, 1);
449
[0e7380f]450 /* Wait until the HC is halted - it shall take at most 16 ms */
[8033f89]451 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_HCH),
452 XHCI_REG_MASK(XHCI_OP_HCH)))
[0e7380f]453 return ETIMEOUT;
[cb89430]454
455 /* Reset */
456 XHCI_REG_SET(hc->op_regs, XHCI_OP_HCRST, 1);
457
458 /* Wait until the reset is complete */
[0e7380f]459 if (xhci_reg_wait(&hc->op_regs->usbcmd, XHCI_REG_MASK(XHCI_OP_HCRST), 0))
460 return ETIMEOUT;
[cb89430]461
462 return EOK;
463}
464
465/**
466 * Initialize the HC: section 4.2
467 */
[45457265]468errno_t hc_start(xhci_hc_t *hc)
[cb89430]469{
[45457265]470 errno_t err;
[cb89430]471
472 if ((err = hc_reset(hc)))
473 return err;
474
[0e7380f]475 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_CNR), 0))
476 return ETIMEOUT;
[cb89430]477
[1d758fc]478 uintptr_t dcbaa_phys = dma_buffer_phys_base(&hc->dcbaa_dma);
479 XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP, dcbaa_phys);
[15f8079]480 XHCI_REG_WR(hc->op_regs, XHCI_OP_MAX_SLOTS_EN, hc->max_slots);
[cb89430]481
[fb28cde]482 uintptr_t crcr;
483 xhci_trb_ring_reset_dequeue_state(&hc->cr.trb_ring, &crcr);
[77ded647]484 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR, crcr);
[cb89430]485
[665368c]486 XHCI_REG_SET(hc->op_regs, XHCI_OP_EWE, 1);
487
[19f0048]488 xhci_event_ring_reset(&hc->event_ring);
489
[cb89430]490 xhci_interrupter_regs_t *intr0 = &hc->rt_regs->ir[0];
491 XHCI_REG_WR(intr0, XHCI_INTR_ERSTSZ, hc->event_ring.segment_count);
[77ded647]492 XHCI_REG_WR(intr0, XHCI_INTR_ERDP, hc->event_ring.dequeue_ptr);
[1d758fc]493
494 const uintptr_t erstba_phys = dma_buffer_phys_base(&hc->event_ring.erst);
495 XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA, erstba_phys);
[cb89430]496
[bb97118]497 if (cap_handle_valid(hc->base.irq_handle)) {
[cb89430]498 XHCI_REG_SET(intr0, XHCI_INTR_IE, 1);
499 XHCI_REG_SET(hc->op_regs, XHCI_OP_INTE, 1);
500 }
501
[503086d8]502 XHCI_REG_SET(hc->op_regs, XHCI_OP_HSEE, 1);
503
[19f0048]504 xhci_sw_ring_restart(&hc->sw_ring);
505 joinable_fibril_start(hc->event_worker);
506
507 xhci_start_command_ring(hc);
508
[cb89430]509 XHCI_REG_SET(hc->op_regs, XHCI_OP_RS, 1);
510
[19f0048]511 /* RH needs to access port states on startup */
512 xhci_rh_start(&hc->rh);
[dcf0597]513
[cb89430]514 return EOK;
515}
516
[19f0048]517static void hc_stop(xhci_hc_t *hc)
518{
519 /* Stop the HC in hardware. */
520 XHCI_REG_CLR(hc->op_regs, XHCI_OP_RS, 1);
521
522 /*
523 * Wait until the HC is halted - it shall take at most 16 ms.
524 * Note that we ignore the return value here.
525 */
526 xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_HCH),
527 XHCI_REG_MASK(XHCI_OP_HCH));
528
529 /* Make sure commands will not block other fibrils. */
530 xhci_nuke_command_ring(hc);
531
532 /* Stop the event worker fibril to restart it */
533 xhci_sw_ring_stop(&hc->sw_ring);
534 joinable_fibril_join(hc->event_worker);
535
[7c3fb9b]536 /*
537 * Then, disconnect all roothub devices, which shall trigger
538 * disconnection of everything
539 */
[19f0048]540 xhci_rh_stop(&hc->rh);
541}
542
543static void hc_reinitialize(xhci_hc_t *hc)
544{
545 /* Stop everything. */
546 hc_stop(hc);
547
548 usb_log_info("HC stopped. Starting again...");
549
550 /* The worker fibrils need to be started again */
551 joinable_fibril_recreate(hc->event_worker);
552 joinable_fibril_recreate(hc->rh.event_worker);
553
554 /* Now, the HC shall be stopped and software shall be clean. */
555 hc_start(hc);
556}
557
558static bool hc_is_broken(xhci_hc_t *hc)
559{
560 const uint32_t usbcmd = XHCI_REG_RD_FIELD(&hc->op_regs->usbcmd, 32);
561 const uint32_t usbsts = XHCI_REG_RD_FIELD(&hc->op_regs->usbsts, 32);
562
[3bacee1]563 return !(usbcmd & XHCI_REG_MASK(XHCI_OP_RS)) ||
564 (usbsts & XHCI_REG_MASK(XHCI_OP_HCE)) ||
565 (usbsts & XHCI_REG_MASK(XHCI_OP_HSE));
[19f0048]566}
567
[ab5a0830]568/**
569 * Used only when polling. Shall supplement the irq_commands.
570 */
[45457265]571errno_t hc_status(bus_t *bus, uint32_t *status)
[5cbccd4]572{
[32fb6bce]573 xhci_hc_t *hc = bus_to_hc(bus);
[ab5a0830]574 int ip = XHCI_REG_RD(hc->rt_regs->ir, XHCI_INTR_IP);
575 if (ip) {
576 *status = XHCI_REG_RD(hc->op_regs, XHCI_OP_STATUS);
577 XHCI_REG_WR(hc->op_regs, XHCI_OP_STATUS, *status & XHCI_STATUS_ACK_MASK);
578 XHCI_REG_WR(hc->rt_regs->ir, XHCI_INTR_IP, 1);
579
[7c3fb9b]580 /*
581 * interrupt handler expects status from irq_commands, which is
582 * in xhci order.
583 */
[ab5a0830]584 *status = host2xhci(32, *status);
585 }
[62ba2cbe]586
[defaab2]587 usb_log_debug("Polled status: %x", *status);
[cb89430]588 return EOK;
589}
590
[45457265]591static errno_t xhci_handle_mfindex_wrap_event(xhci_hc_t *hc, xhci_trb_t *trb)
[665368c]592{
[bd41ac52]593 struct timespec ts;
594 getuptime(&ts);
595 usb_log_debug("Microframe index wrapped (@%lld.%lld, %" PRIu64 " total).",
596 ts.tv_sec, NSEC2USEC(ts.tv_nsec), hc->wrap_count);
597 hc->wrap_time = SEC2USEC(ts.tv_sec) + NSEC2USEC(ts.tv_nsec);
[665368c]598 ++hc->wrap_count;
599 return EOK;
600}
601
[45457265]602typedef errno_t (*event_handler) (xhci_hc_t *, xhci_trb_t *trb);
[472235a]603
[2c0564c]604/**
605 * These events are handled by separate event handling fibril.
606 */
[472235a]607static event_handler event_handlers [] = {
[629255a]608 [XHCI_TRB_TYPE_TRANSFER_EVENT] = &xhci_handle_transfer_event,
[2c0564c]609};
610
611/**
612 * These events are handled directly in the interrupt handler, thus they must
613 * not block waiting for another interrupt.
614 */
615static event_handler event_handlers_fast [] = {
616 [XHCI_TRB_TYPE_COMMAND_COMPLETION_EVENT] = &xhci_handle_command_completion,
[665368c]617 [XHCI_TRB_TYPE_MFINDEX_WRAP_EVENT] = &xhci_handle_mfindex_wrap_event,
[472235a]618};
619
[45457265]620static errno_t hc_handle_event(xhci_hc_t *hc, xhci_trb_t *trb)
[2c0564c]621{
622 const unsigned type = TRB_TYPE(*trb);
623
624 if (type <= ARRAY_SIZE(event_handlers_fast) && event_handlers_fast[type])
625 return event_handlers_fast[type](hc, trb);
626
627 if (type <= ARRAY_SIZE(event_handlers) && event_handlers[type])
628 return xhci_sw_ring_enqueue(&hc->sw_ring, trb);
629
[047fbc8]630 if (type == XHCI_TRB_TYPE_PORT_STATUS_CHANGE_EVENT)
631 return xhci_sw_ring_enqueue(&hc->rh.event_ring, trb);
632
[2c0564c]633 return ENOTSUP;
634}
635
636static int event_worker(void *arg)
[7ee5408]637{
[45457265]638 errno_t err;
[2c0564c]639 xhci_trb_t trb;
[3bacee1]640 xhci_hc_t *const hc = arg;
[2c0564c]641 assert(hc);
642
643 while (xhci_sw_ring_dequeue(&hc->sw_ring, &trb) != EINTR) {
644 const unsigned type = TRB_TYPE(trb);
[472235a]645
[2c0564c]646 if ((err = event_handlers[type](hc, &trb)))
647 usb_log_error("Failed to handle event: %s", str_error(err));
648 }
649
[73a5857]650 return 0;
[7ee5408]651}
652
[eb928c4]653/**
654 * Dequeue from event ring and handle dequeued events.
655 *
656 * As there can be events, that blocks on waiting for subsequent events,
[0247bd2]657 * we solve this problem by deferring some types of events to separate fibrils.
[eb928c4]658 */
[8033f89]659static void hc_run_event_ring(xhci_hc_t *hc, xhci_event_ring_t *event_ring,
[3bacee1]660 xhci_interrupter_regs_t *intr)
[62ba2cbe]661{
[45457265]662 errno_t err;
[472235a]663
[f3baab1]664 xhci_trb_t trb;
665 hc->event_handler = fibril_get_id();
[e50bdd92]666
[f3baab1]667 while ((err = xhci_event_ring_dequeue(event_ring, &trb)) != ENOENT) {
[2c0564c]668 if ((err = hc_handle_event(hc, &trb)) != EOK) {
669 usb_log_error("Failed to handle event in interrupt: %s", str_error(err));
[adb4e683]670 }
[f543804]671
[77ded647]672 XHCI_REG_WR(intr, XHCI_INTR_ERDP, hc->event_ring.dequeue_ptr);
[cb89430]673 }
674
[f3baab1]675 hc->event_handler = 0;
676
[12fba858]677 uint64_t erdp = hc->event_ring.dequeue_ptr;
[f543804]678 erdp |= XHCI_REG_MASK(XHCI_INTR_ERDP_EHB);
[77ded647]679 XHCI_REG_WR(intr, XHCI_INTR_ERDP, erdp);
[adb4e683]680
[472235a]681 usb_log_debug2("Event ring run finished.");
[cb89430]682}
683
[eb928c4]684/**
685 * Handle an interrupt request from xHC. Resolve all situations that trigger an
686 * interrupt separately.
687 *
688 * Note that all RW1C bits in USBSTS register are cleared at the time of
689 * handling the interrupt in irq_code. This method is the top-half.
690 *
691 * @param status contents of USBSTS register at the time of the interrupt.
692 */
[32fb6bce]693void hc_interrupt(bus_t *bus, uint32_t status)
[cb89430]694{
[32fb6bce]695 xhci_hc_t *hc = bus_to_hc(bus);
[ab5a0830]696 status = xhci2host(32, status);
[aee352c]697
[cb89430]698 if (status & XHCI_REG_MASK(XHCI_OP_HSE)) {
[19f0048]699 usb_log_error("Host system error occured. Aren't we supposed to be dead already?");
700 return;
701 }
702
703 if (status & XHCI_REG_MASK(XHCI_OP_HCE)) {
704 usb_log_error("Host controller error occured. Reinitializing...");
705 hc_reinitialize(hc);
706 return;
[cb89430]707 }
708
709 if (status & XHCI_REG_MASK(XHCI_OP_EINT)) {
[472235a]710 usb_log_debug2("Event interrupt, running the event ring.");
[ab5a0830]711 hc_run_event_ring(hc, &hc->event_ring, &hc->rt_regs->ir[0]);
712 status &= ~XHCI_REG_MASK(XHCI_OP_EINT);
[cb89430]713 }
[275f529]714
[cb89430]715 if (status & XHCI_REG_MASK(XHCI_OP_SRE)) {
[8033f89]716 usb_log_error("Save/Restore error occured. WTF, "
717 "S/R mechanism not implemented!");
[ab5a0830]718 status &= ~XHCI_REG_MASK(XHCI_OP_SRE);
719 }
720
[fb154e13]721 /* According to Note on p. 302, we may safely ignore the PCD bit. */
722 status &= ~XHCI_REG_MASK(XHCI_OP_PCD);
723
[ab5a0830]724 if (status) {
[8033f89]725 usb_log_error("Non-zero status after interrupt handling (%08x) "
[3bacee1]726 " - missing something?", status);
[cb89430]727 }
728}
729
[eb928c4]730/**
731 * Tear down all in-memory structures.
732 */
[e4d7363]733void hc_fini(xhci_hc_t *hc)
[cb89430]734{
[19f0048]735 hc_stop(hc);
[2c0564c]736
[19f0048]737 xhci_sw_ring_fini(&hc->sw_ring);
738 joinable_fibril_destroy(hc->event_worker);
[e6b9182]739 xhci_bus_fini(&hc->bus);
[cb89430]740 xhci_event_ring_fini(&hc->event_ring);
[b60944b]741 xhci_scratchpad_free(hc);
742 dma_buffer_free(&hc->dcbaa_dma);
[c46c356]743 xhci_fini_commands(hc);
[d32d51d]744 xhci_rh_fini(&hc->rh);
[20eaa82]745 pio_disable(hc->reg_base, RNGSZ(hc->mmio_range));
[837581fd]746 usb_log_info("Finalized.");
[62ba2cbe]747}
748
[8300c72]749/**
750 * Quiesce host controller.
751 */
752errno_t hc_quiesce(xhci_hc_t *hc)
753{
754 hc_stop(hc);
755 usb_log_info("HC quiesced.");
756 return EOK;
757}
758
[51c1d500]759unsigned hc_speed_to_psiv(usb_speed_t speed)
760{
761 assert(speed < ARRAY_SIZE(usb_speed_to_psiv));
762 return usb_speed_to_psiv[speed];
763}
764
[eb928c4]765/**
766 * Ring a xHC Doorbell. Implements section 4.7.
767 */
[708d8fcd]768void hc_ring_doorbell(xhci_hc_t *hc, unsigned doorbell, unsigned target)
[a0be5d0]769{
770 assert(hc);
771 uint32_t v = host2xhci(32, target & BIT_RRANGE(uint32_t, 7));
772 pio_write_32(&hc->db_arry[doorbell], v);
[2896ff6]773 usb_log_debug2("Ringing doorbell %d (target: %d)", doorbell, target);
[a0be5d0]774}
[5cbccd4]775
[51c1d500]776/**
777 * Return an index to device context.
778 */
779static uint8_t endpoint_dci(xhci_endpoint_t *ep)
780{
781 return (2 * ep->base.endpoint) +
[3bacee1]782 (ep->base.transfer_type == USB_TRANSFER_CONTROL ||
783 ep->base.direction == USB_DIRECTION_IN);
[51c1d500]784}
785
786void hc_ring_ep_doorbell(xhci_endpoint_t *ep, uint32_t stream_id)
787{
[3bacee1]788 xhci_device_t *const dev = xhci_ep_to_dev(ep);
789 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
[51c1d500]790 const uint8_t dci = endpoint_dci(ep);
791 const uint32_t target = (stream_id << 16) | (dci & 0x1ff);
792 hc_ring_doorbell(hc, dev->slot_id, target);
793}
794
[eb928c4]795/**
[7e5a12b]796 * Issue an Enable Slot command. Allocate memory for the slot and fill the
797 * DCBAA with the newly created slot.
[eb928c4]798 */
[45457265]799errno_t hc_enable_slot(xhci_device_t *dev)
[8ea7459]800{
[45457265]801 errno_t err;
[3bacee1]802 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
[7e5a12b]803
804 /* Prepare memory for the context */
[7ec7b7e]805 if ((err = dma_buffer_alloc(&dev->dev_ctx, XHCI_DEVICE_CTX_SIZE(hc))))
[7e5a12b]806 return err;
[7ec7b7e]807 memset(dev->dev_ctx.virt, 0, XHCI_DEVICE_CTX_SIZE(hc));
[7e5a12b]808
809 /* Get the slot number */
[8ea7459]810 xhci_cmd_t cmd;
[c3d926f3]811 xhci_cmd_init(&cmd, XHCI_CMD_ENABLE_SLOT);
[8ea7459]812
[7e5a12b]813 err = xhci_cmd_sync(hc, &cmd);
[8ea7459]814
[7e5a12b]815 /* Link them together */
816 if (err == EOK) {
817 dev->slot_id = cmd.slot_id;
[1d758fc]818 hc->dcbaa[dev->slot_id] =
819 host2xhci(64, dma_buffer_phys_base(&dev->dev_ctx));
[c3d926f3]820 }
[8ea7459]821
822 xhci_cmd_fini(&cmd);
[abb5d08]823
824 if (err)
825 dma_buffer_free(&dev->dev_ctx);
826
[c3d926f3]827 return err;
[8ea7459]828}
829
[eb928c4]830/**
831 * Issue a Disable Slot command for a slot occupied by device.
[7e5a12b]832 * Frees the device context.
[eb928c4]833 */
[45457265]834errno_t hc_disable_slot(xhci_device_t *dev)
[f270ecb]835{
[45457265]836 errno_t err;
[3bacee1]837 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
[e2172284]838 xhci_cmd_t cmd;
[9620a54]839
[e2172284]840 xhci_cmd_init(&cmd, XHCI_CMD_DISABLE_SLOT);
841 cmd.slot_id = dev->slot_id;
842 err = xhci_cmd_sync(hc, &cmd);
843 xhci_cmd_fini(&cmd);
844 if (err != EOK)
[9620a54]845 return err;
846
847 /* Free the device context. */
848 hc->dcbaa[dev->slot_id] = 0;
[b80c1ab]849 dma_buffer_free(&dev->dev_ctx);
[9620a54]850
851 /* Mark the slot as invalid. */
852 dev->slot_id = 0;
853
854 return EOK;
[f270ecb]855}
856
[eb928c4]857/**
858 * Prepare an empty Endpoint Input Context inside a dma buffer.
859 */
[45457265]860static errno_t create_configure_ep_input_ctx(xhci_device_t *dev, dma_buffer_t *dma_buf)
[b724494]861{
[3bacee1]862 const xhci_hc_t *hc = bus_to_hc(dev->base.bus);
[45457265]863 const errno_t err = dma_buffer_alloc(dma_buf, XHCI_INPUT_CTX_SIZE(hc));
[b80c1ab]864 if (err)
865 return err;
[b724494]866
[b80c1ab]867 xhci_input_ctx_t *ictx = dma_buf->virt;
[7ec7b7e]868 memset(ictx, 0, XHCI_INPUT_CTX_SIZE(hc));
[b724494]869
[e76c0ea]870 // Quoting sec. 4.6.5 and 4.6.6: A1, D0, D1 are down (already zeroed), A0 is up.
[7ec7b7e]871 XHCI_INPUT_CTRL_CTX_ADD_SET(*XHCI_GET_CTRL_CTX(ictx, hc), 0);
872 xhci_slot_ctx_t *slot_ctx = XHCI_GET_SLOT_CTX(XHCI_GET_DEVICE_CTX(ictx, hc), hc);
873 xhci_setup_slot_context(dev, slot_ctx);
[001778c]874
[b724494]875 return EOK;
876}
877
[eb928c4]878/**
879 * Initialize a device, assigning it an address. Implements section 4.3.4.
880 *
881 * @param dev Device to assing an address (unconfigured yet)
882 */
[45457265]883errno_t hc_address_device(xhci_device_t *dev)
[b724494]884{
[45457265]885 errno_t err = ENOMEM;
[3bacee1]886 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
[51c1d500]887 xhci_endpoint_t *ep0 = xhci_endpoint_get(dev->base.endpoints[0]);
[0206d35]888
[7c3fb9b]889 /*
890 * Although we have the precise PSIV value on devices of tier 1,
891 * we have to rely on reverse mapping on others.
892 */
[a75f9cbc]893 if (!usb_speed_to_psiv[dev->base.speed]) {
[8033f89]894 usb_log_error("Device reported an USB speed (%s) that cannot be mapped "
895 "to HC port speed.", usb_str_speed(dev->base.speed));
[2cf28b9]896 return EINVAL;
897 }
898
[d1582b50]899 /* Issue configure endpoint command (sec 4.3.5).  */
[b80c1ab]900 dma_buffer_t ictx_dma_buf;
[7e5a12b]901 if ((err = create_configure_ep_input_ctx(dev, &ictx_dma_buf)))
902 return err;
[b80c1ab]903 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
[b724494]904
905 /* Copy endpoint 0 context and set A1 flag. */
[7ec7b7e]906 XHCI_INPUT_CTRL_CTX_ADD_SET(*XHCI_GET_CTRL_CTX(ictx, hc), 1);
[51c1d500]907 xhci_ep_ctx_t *ep_ctx = XHCI_GET_EP_CTX(XHCI_GET_DEVICE_CTX(ictx, hc), hc, 1);
[7ec7b7e]908 xhci_setup_endpoint_context(ep0, ep_ctx);
[51c1d500]909
[69b2dfee]910 /* Address device needs Ctx entries set to 1 only */
[7ec7b7e]911 xhci_slot_ctx_t *slot_ctx = XHCI_GET_SLOT_CTX(XHCI_GET_DEVICE_CTX(ictx, hc), hc);
[69b2dfee]912 XHCI_SLOT_CTX_ENTRIES_SET(*slot_ctx, 1);
913
[c3d926f3]914 /* Issue Address Device command. */
[e2172284]915 xhci_cmd_t cmd;
916 xhci_cmd_init(&cmd, XHCI_CMD_ADDRESS_DEVICE);
917 cmd.slot_id = dev->slot_id;
918 cmd.input_ctx = ictx_dma_buf;
919 err = xhci_cmd_sync(hc, &cmd);
920 xhci_cmd_fini(&cmd);
921 if (err != EOK)
[7e5a12b]922 return err;
[b724494]923
[7ec7b7e]924 xhci_device_ctx_t *device_ctx = dev->dev_ctx.virt;
925 dev->base.address = XHCI_SLOT_DEVICE_ADDRESS(*XHCI_GET_SLOT_CTX(device_ctx, hc));
[defaab2]926 usb_log_debug("Obtained USB address: %d.", dev->base.address);
[0206d35]927
[b724494]928 return EOK;
929}
930
[eb928c4]931/**
932 * Issue a Configure Device command for a device in slot.
933 *
934 * @param slot_id Slot ID assigned to the device.
935 */
[45457265]936errno_t hc_configure_device(xhci_device_t *dev)
[b724494]937{
[3bacee1]938 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
[e2172284]939 xhci_cmd_t cmd;
[a4e7e6e1]940
[d1582b50]941 /* Issue configure endpoint command (sec 4.3.5).  */
[b80c1ab]942 dma_buffer_t ictx_dma_buf;
[e2172284]943 errno_t err = create_configure_ep_input_ctx(dev, &ictx_dma_buf);
944 if (err != EOK)
[c3d926f3]945 return err;
[b724494]946
[e2172284]947 xhci_cmd_init(&cmd, XHCI_CMD_CONFIGURE_ENDPOINT);
948 cmd.slot_id = dev->slot_id;
949 cmd.input_ctx = ictx_dma_buf;
950 err = xhci_cmd_sync(hc, &cmd);
951 xhci_cmd_fini(&cmd);
952
953 return err;
[b724494]954}
955
[eb928c4]956/**
957 * Issue a Deconfigure Device command for a device in slot.
958 *
[a4e7e6e1]959 * @param dev The owner of the device
[eb928c4]960 */
[45457265]961errno_t hc_deconfigure_device(xhci_device_t *dev)
[b724494]962{
[3bacee1]963 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
[e2172284]964 xhci_cmd_t cmd;
965 errno_t err;
[a4e7e6e1]966
[19f0048]967 if (hc_is_broken(hc))
968 return EOK;
969
[d1582b50]970 /* Issue configure endpoint command (sec 4.3.5) with the DC flag.  */
[e2172284]971 xhci_cmd_init(&cmd, XHCI_CMD_CONFIGURE_ENDPOINT);
972 cmd.slot_id = dev->slot_id;
973 cmd.deconfigure = true;
974
975 err = xhci_cmd_sync(hc, &cmd);
976 xhci_cmd_fini(&cmd);
977
978 return err;
[b724494]979}
980
[eb928c4]981/**
982 * Instruct xHC to add an endpoint with supplied endpoint context.
983 *
[a4e7e6e1]984 * @param dev The owner of the device
985 * @param ep_idx Endpoint DCI in question
[eb928c4]986 * @param ep_ctx Endpoint context of the endpoint
987 */
[45457265]988errno_t hc_add_endpoint(xhci_endpoint_t *ep)
[b724494]989{
[3bacee1]990 xhci_device_t *const dev = xhci_ep_to_dev(ep);
[51c1d500]991 const unsigned dci = endpoint_dci(ep);
[e2172284]992 xhci_cmd_t cmd;
[51c1d500]993
[d1582b50]994 /* Issue configure endpoint command (sec 4.3.5).  */
[b80c1ab]995 dma_buffer_t ictx_dma_buf;
[e2172284]996 errno_t err = create_configure_ep_input_ctx(dev, &ictx_dma_buf);
997 if (err != EOK)
[c3d926f3]998 return err;
[b724494]999
[b80c1ab]1000 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
[001778c]1001
[3bacee1]1002 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
[51c1d500]1003 XHCI_INPUT_CTRL_CTX_ADD_SET(*XHCI_GET_CTRL_CTX(ictx, hc), dci);
[7ec7b7e]1004
[51c1d500]1005 xhci_ep_ctx_t *ep_ctx = XHCI_GET_EP_CTX(XHCI_GET_DEVICE_CTX(ictx, hc), hc, dci);
1006 xhci_setup_endpoint_context(ep, ep_ctx);
[7ec7b7e]1007
[e2172284]1008 xhci_cmd_init(&cmd, XHCI_CMD_CONFIGURE_ENDPOINT);
1009 cmd.slot_id = dev->slot_id;
1010 cmd.input_ctx = ictx_dma_buf;
1011 err = xhci_cmd_sync(hc, &cmd);
1012 xhci_cmd_fini(&cmd);
1013
1014 return err;
[b724494]1015}
1016
[eb928c4]1017/**
1018 * Instruct xHC to drop an endpoint.
1019 *
[a4e7e6e1]1020 * @param dev The owner of the endpoint
1021 * @param ep_idx Endpoint DCI in question
[eb928c4]1022 */
[45457265]1023errno_t hc_drop_endpoint(xhci_endpoint_t *ep)
[b724494]1024{
[3bacee1]1025 xhci_device_t *const dev = xhci_ep_to_dev(ep);
1026 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
[51c1d500]1027 const unsigned dci = endpoint_dci(ep);
[e2172284]1028 xhci_cmd_t cmd;
[51c1d500]1029
[19f0048]1030 if (hc_is_broken(hc))
1031 return EOK;
1032
[d1582b50]1033 /* Issue configure endpoint command (sec 4.3.5).  */
[b80c1ab]1034 dma_buffer_t ictx_dma_buf;
[e2172284]1035 errno_t err = create_configure_ep_input_ctx(dev, &ictx_dma_buf);
1036 if (err != EOK)
[c3d926f3]1037 return err;
[b724494]1038
[b80c1ab]1039 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
[51c1d500]1040 XHCI_INPUT_CTRL_CTX_DROP_SET(*XHCI_GET_CTRL_CTX(ictx, hc), dci);
[b724494]1041
[e2172284]1042 xhci_cmd_init(&cmd, XHCI_CMD_CONFIGURE_ENDPOINT);
1043 cmd.slot_id = dev->slot_id;
1044 cmd.input_ctx = ictx_dma_buf;
1045 err = xhci_cmd_sync(hc, &cmd);
1046 xhci_cmd_fini(&cmd);
1047
1048 return err;
[b724494]1049}
1050
[eb928c4]1051/**
1052 * Instruct xHC to update information about an endpoint, using supplied
1053 * endpoint context.
1054 *
[a4e7e6e1]1055 * @param dev The owner of the endpoint
1056 * @param ep_idx Endpoint DCI in question
[eb928c4]1057 * @param ep_ctx Endpoint context of the endpoint
1058 */
[45457265]1059errno_t hc_update_endpoint(xhci_endpoint_t *ep)
[306a36d]1060{
[3bacee1]1061 xhci_device_t *const dev = xhci_ep_to_dev(ep);
[51c1d500]1062 const unsigned dci = endpoint_dci(ep);
[e2172284]1063 xhci_cmd_t cmd;
[51c1d500]1064
[306a36d]1065 dma_buffer_t ictx_dma_buf;
[3bacee1]1066 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
[7ec7b7e]1067
[e2172284]1068 errno_t err = dma_buffer_alloc(&ictx_dma_buf, XHCI_INPUT_CTX_SIZE(hc));
1069 if (err != EOK)
[306a36d]1070 return err;
1071
1072 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
[7ec7b7e]1073 memset(ictx, 0, XHCI_INPUT_CTX_SIZE(hc));
[306a36d]1074
[51c1d500]1075 XHCI_INPUT_CTRL_CTX_ADD_SET(*XHCI_GET_CTRL_CTX(ictx, hc), dci);
1076 xhci_ep_ctx_t *ep_ctx = XHCI_GET_EP_CTX(XHCI_GET_DEVICE_CTX(ictx, hc), hc, dci);
1077 xhci_setup_endpoint_context(ep, ep_ctx);
[306a36d]1078
[e2172284]1079 xhci_cmd_init(&cmd, XHCI_CMD_EVALUATE_CONTEXT);
1080 cmd.slot_id = dev->slot_id;
1081 cmd.input_ctx = ictx_dma_buf;
1082 err = xhci_cmd_sync(hc, &cmd);
1083 xhci_cmd_fini(&cmd);
1084
1085 return err;
[306a36d]1086}
1087
[30fc56f]1088/**
1089 * Instruct xHC to stop running a transfer ring on an endpoint.
1090 *
[a4e7e6e1]1091 * @param dev The owner of the endpoint
1092 * @param ep_idx Endpoint DCI in question
[30fc56f]1093 */
[45457265]1094errno_t hc_stop_endpoint(xhci_endpoint_t *ep)
[30fc56f]1095{
[3bacee1]1096 xhci_device_t *const dev = xhci_ep_to_dev(ep);
[51c1d500]1097 const unsigned dci = endpoint_dci(ep);
[3bacee1]1098 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
[e2172284]1099 xhci_cmd_t cmd;
1100 errno_t err;
[19f0048]1101
1102 if (hc_is_broken(hc))
1103 return EOK;
1104
[e2172284]1105 xhci_cmd_init(&cmd, XHCI_CMD_STOP_ENDPOINT);
1106 cmd.slot_id = dev->slot_id;
1107 cmd.endpoint_id = dci;
1108 err = xhci_cmd_sync(hc, &cmd);
1109 xhci_cmd_fini(&cmd);
1110
1111 return err;
[30fc56f]1112}
1113
[feabe163]1114/**
1115 * Instruct xHC to reset halted endpoint.
1116 *
[a4e7e6e1]1117 * @param dev The owner of the endpoint
1118 * @param ep_idx Endpoint DCI in question
[feabe163]1119 */
[45457265]1120errno_t hc_reset_endpoint(xhci_endpoint_t *ep)
[feabe163]1121{
[3bacee1]1122 xhci_device_t *const dev = xhci_ep_to_dev(ep);
[51c1d500]1123 const unsigned dci = endpoint_dci(ep);
[3bacee1]1124 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
[e2172284]1125 xhci_cmd_t cmd;
1126 errno_t err;
1127
1128 xhci_cmd_init(&cmd, XHCI_CMD_RESET_ENDPOINT);
1129 cmd.slot_id = dev->slot_id;
1130 cmd.endpoint_id = dci;
1131 err = xhci_cmd_sync(hc, &cmd);
1132 xhci_cmd_fini(&cmd);
1133
1134 return err;
[51c1d500]1135}
1136
1137/**
1138 * Reset a ring position in both software and hardware.
1139 *
1140 * @param dev The owner of the endpoint
1141 */
[45457265]1142errno_t hc_reset_ring(xhci_endpoint_t *ep, uint32_t stream_id)
[51c1d500]1143{
[3bacee1]1144 xhci_device_t *const dev = xhci_ep_to_dev(ep);
[51c1d500]1145 const unsigned dci = endpoint_dci(ep);
1146 uintptr_t addr;
[e2172284]1147 xhci_cmd_t cmd;
1148 errno_t err;
[51c1d500]1149
1150 xhci_trb_ring_t *ring = xhci_endpoint_get_ring(ep, stream_id);
1151 xhci_trb_ring_reset_dequeue_state(ring, &addr);
1152
[3bacee1]1153 xhci_hc_t *const hc = bus_to_hc(endpoint_get_bus(&ep->base));
[e2172284]1154
1155 xhci_cmd_init(&cmd, XHCI_CMD_SET_TR_DEQUEUE_POINTER);
1156 cmd.slot_id = dev->slot_id;
1157 cmd.endpoint_id = dci;
1158 cmd.stream_id = stream_id;
1159 cmd.dequeue_ptr = addr;
1160 err = xhci_cmd_sync(hc, &cmd);
1161 xhci_cmd_fini(&cmd);
1162
1163 return err;
[feabe163]1164}
1165
[5cbccd4]1166/**
1167 * @}
1168 */
Note: See TracBrowser for help on using the repository browser.