source: mainline/uspace/drv/bus/usb/xhci/hc.c@ 2755a622

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 2755a622 was a1732929, checked in by Ondřej Hlavatý <aearsis@…>, 8 years ago

usb: unified logging

Use logger instead of printf. Logger adds newlines automatically.

  • Property mode set to 100644
File size: 26.1 KB
RevLine 
[5cbccd4]1/*
2 * Copyright (c) 2017 Ondrej Hlavaty
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller data bookkeeping.
34 */
35
36#include <errno.h>
[cb89430]37#include <str_error.h>
[5cbccd4]38#include <usb/debug.h>
[5fd9c30]39#include <usb/host/endpoint.h>
[5cbccd4]40#include "debug.h"
41#include "hc.h"
[7bd99bf]42#include "rh.h"
[cb89430]43#include "hw_struct/trb.h"
[0206d35]44#include "hw_struct/context.h"
45#include "endpoint.h"
[e9e24f2]46#include "transfers.h"
47#include "trb_ring.h"
[5cbccd4]48
[91ca111]49/**
50 * Default USB Speed ID mapping: Table 157
51 */
52#define PSI_TO_BPS(psie, psim) (((uint64_t) psim) << (10 * psie))
[f668d60]53#define PORT_SPEED(usb, mjr, psie, psim) { \
[816335c]54 .name = "USB ", \
55 .major = mjr, \
56 .minor = 0, \
[f668d60]57 .usb_speed = USB_SPEED_##usb, \
[91ca111]58 .rx_bps = PSI_TO_BPS(psie, psim), \
59 .tx_bps = PSI_TO_BPS(psie, psim) \
60}
[a75f9cbc]61
62static const xhci_port_speed_t default_psiv_to_port_speed [] = {
63 [1] = PORT_SPEED(FULL, 2, 2, 12),
64 [2] = PORT_SPEED(LOW, 2, 1, 1500),
65 [3] = PORT_SPEED(HIGH, 2, 2, 480),
66 [4] = PORT_SPEED(SUPER, 3, 3, 5),
67};
68
69static const unsigned usb_speed_to_psiv [] = {
70 [USB_SPEED_FULL] = 1,
71 [USB_SPEED_LOW] = 2,
72 [USB_SPEED_HIGH] = 3,
73 [USB_SPEED_SUPER] = 4,
74};
[91ca111]75
76/**
77 * Walk the list of extended capabilities.
[eb928c4]78 *
79 * The most interesting thing hidden in extended capabilities is the mapping of
80 * ports to protocol versions and speeds.
[91ca111]81 */
82static int hc_parse_ec(xhci_hc_t *hc)
83{
[816335c]84 unsigned psic, major, minor;
85 xhci_sp_name_t name;
86
[f668d60]87 xhci_port_speed_t *speeds = hc->speeds;
[91ca111]88
89 for (xhci_extcap_t *ec = hc->xecp; ec; ec = xhci_extcap_next(ec)) {
90 xhci_dump_extcap(ec);
91 switch (XHCI_REG_RD(ec, XHCI_EC_CAP_ID)) {
92 case XHCI_EC_USB_LEGACY:
93 assert(hc->legsup == NULL);
94 hc->legsup = (xhci_legsup_t *) ec;
95 break;
96 case XHCI_EC_SUPPORTED_PROTOCOL:
97 psic = XHCI_REG_RD(ec, XHCI_EC_SP_PSIC);
98 major = XHCI_REG_RD(ec, XHCI_EC_SP_MAJOR);
[816335c]99 minor = XHCI_REG_RD(ec, XHCI_EC_SP_MINOR);
100 name.packed = host2uint32_t_le(XHCI_REG_RD(ec, XHCI_EC_SP_NAME));
101
102 if (name.packed != xhci_name_usb.packed) {
103 /**
104 * The detection of such protocol would work,
105 * but the rest of the implementation is made
106 * for the USB protocol only.
107 */
108 usb_log_error("Unknown protocol %.4s.", name.str);
109 return ENOTSUP;
110 }
[91ca111]111
112 // "Implied" speed
113 if (psic == 0) {
[816335c]114 assert(minor == 0);
[370a1c8]115
[91ca111]116 if (major == 2) {
[a75f9cbc]117 speeds[1] = default_psiv_to_port_speed[1];
118 speeds[2] = default_psiv_to_port_speed[2];
119 speeds[3] = default_psiv_to_port_speed[3];
[91ca111]120 } else if (major == 3) {
[a75f9cbc]121 speeds[4] = default_psiv_to_port_speed[4];
[91ca111]122 } else {
123 return EINVAL;
124 }
125
[816335c]126 usb_log_debug2("Implied speed of USB %u.0 set up.", major);
[91ca111]127 } else {
128 for (unsigned i = 0; i < psic; i++) {
129 xhci_psi_t *psi = xhci_extcap_psi(ec, i);
130 unsigned sim = XHCI_REG_RD(psi, XHCI_PSI_PSIM);
131 unsigned psiv = XHCI_REG_RD(psi, XHCI_PSI_PSIV);
132 unsigned psie = XHCI_REG_RD(psi, XHCI_PSI_PSIE);
133 unsigned psim = XHCI_REG_RD(psi, XHCI_PSI_PSIM);
[a75f9cbc]134 uint64_t bps = PSI_TO_BPS(psie, psim);
135
136 /*
137 * Speed is not implied, but using one of default PSIV. This is
138 * not clearly stated in xHCI spec. There is a clear intention
139 * to allow xHCI to specify its own speed parameters, but
140 * throughout the document, they used fixed values for e.g.
141 * High-speed (3), without stating the controller shall have
142 * implied default speeds - and for instance Intel controllers
143 * do not. So let's check if the values match and if so, accept
144 * the implied USB speed too.
145 *
146 * The main reason we need this is the usb_speed to have
147 * mapping also for devices connected to hubs.
148 */
149 if (psiv < ARRAY_SIZE(default_psiv_to_port_speed)
150 && default_psiv_to_port_speed[psiv].major == major
151 && default_psiv_to_port_speed[psiv].minor == minor
152 && default_psiv_to_port_speed[psiv].rx_bps == bps
153 && default_psiv_to_port_speed[psiv].tx_bps == bps) {
154 speeds[psiv] = default_psiv_to_port_speed[psiv];
155 usb_log_debug2("Assumed default %s speed of USB %u.", usb_str_speed(speeds[psiv].usb_speed), major);
156 continue;
157 }
[91ca111]158
[a75f9cbc]159 // Custom speed
[816335c]160 speeds[psiv].major = major;
161 speeds[psiv].minor = minor;
162 str_ncpy(speeds[psiv].name, 4, name.str, 4);
[f668d60]163 speeds[psiv].usb_speed = USB_SPEED_MAX;
[816335c]164
[91ca111]165 if (sim == XHCI_PSI_PLT_SYMM || sim == XHCI_PSI_PLT_RX)
[816335c]166 speeds[psiv].rx_bps = bps;
[91ca111]167 if (sim == XHCI_PSI_PLT_SYMM || sim == XHCI_PSI_PLT_TX) {
[816335c]168 speeds[psiv].tx_bps = bps;
169 usb_log_debug2("Speed %u set up for bps %" PRIu64 " / %" PRIu64 ".", psiv, speeds[psiv].rx_bps, speeds[psiv].tx_bps);
[91ca111]170 }
171 }
172 }
173 }
174 }
175 return EOK;
176}
177
[eb928c4]178/**
179 * Initialize MMIO spaces of xHC.
180 */
[e4d7363]181int hc_init_mmio(xhci_hc_t *hc, const hw_res_list_parsed_t *hw_res)
182{
183 int err;
184
185 if (hw_res->mem_ranges.count != 1) {
186 usb_log_error("Unexpected MMIO area, bailing out.");
187 return EINVAL;
188 }
189
190 hc->mmio_range = hw_res->mem_ranges.ranges[0];
191
[a1732929]192 usb_log_debug("MMIO area at %p (size %zu), IRQ %d.",
[e4d7363]193 RNGABSPTR(hc->mmio_range), RNGSZ(hc->mmio_range), hw_res->irqs.irqs[0]);
194
195 if (RNGSZ(hc->mmio_range) < sizeof(xhci_cap_regs_t))
196 return EOVERFLOW;
197
198 void *base;
199 if ((err = pio_enable_range(&hc->mmio_range, &base)))
200 return err;
201
[20eaa82]202 hc->reg_base = base;
[e4d7363]203 hc->cap_regs = (xhci_cap_regs_t *) base;
204 hc->op_regs = (xhci_op_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH));
205 hc->rt_regs = (xhci_rt_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF));
206 hc->db_arry = (xhci_doorbell_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_DBOFF));
207
[91ca111]208 uintptr_t xec_offset = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_XECP) * sizeof(xhci_dword_t);
209 if (xec_offset > 0)
210 hc->xecp = (xhci_extcap_t *) (base + xec_offset);
211
[e4d7363]212 usb_log_debug2("Initialized MMIO reg areas:");
213 usb_log_debug2("\tCapability regs: %p", hc->cap_regs);
214 usb_log_debug2("\tOperational regs: %p", hc->op_regs);
215 usb_log_debug2("\tRuntime regs: %p", hc->rt_regs);
216 usb_log_debug2("\tDoorbell array base: %p", hc->db_arry);
217
218 xhci_dump_cap_regs(hc->cap_regs);
219
220 hc->ac64 = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_AC64);
221 hc->max_slots = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_MAX_SLOTS);
[94e9c29]222
223 struct timeval tv;
224 getuptime(&tv);
225 hc->wrap_time = tv.tv_sec * 1000000 + tv.tv_usec;
[665368c]226 hc->wrap_count = 0;
[94e9c29]227
[708d8fcd]228 unsigned ist = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_IST);
229 hc->ist = (ist & 0x10 >> 1) * (ist & 0xf);
[e4d7363]230
[91ca111]231 if ((err = hc_parse_ec(hc))) {
[20eaa82]232 pio_disable(hc->reg_base, RNGSZ(hc->mmio_range));
[91ca111]233 return err;
234 }
235
[e4d7363]236 return EOK;
237}
238
[eb928c4]239/**
240 * Initialize structures kept in allocated memory.
241 */
[0f6b50f]242int hc_init_memory(xhci_hc_t *hc, ddf_dev_t *device)
[e4d7363]243{
244 int err;
245
[b80c1ab]246 if (dma_buffer_alloc(&hc->dcbaa_dma, (1 + hc->max_slots) * sizeof(uint64_t)))
[e4d7363]247 return ENOMEM;
[b80c1ab]248 hc->dcbaa = hc->dcbaa_dma.virt;
[e4d7363]249
[9b2f69e]250 if ((err = xhci_event_ring_init(&hc->event_ring)))
[889146e]251 goto err_dcbaa;
[e4d7363]252
[b19131c5]253 if ((err = xhci_scratchpad_alloc(hc)))
[5a9ae994]254 goto err_event_ring;
[e4d7363]255
[aee352c]256 if ((err = xhci_init_commands(hc)))
[ee28ae66]257 goto err_scratch;
[aee352c]258
[2b61945]259 if ((err = xhci_bus_init(&hc->bus, hc)))
[6832245]260 goto err_cmd;
[e6b9182]261
[63431db2]262 if ((err = xhci_rh_init(&hc->rh, hc)))
[6832245]263 goto err_bus;
[e6b9182]264
[e4d7363]265 return EOK;
266
[6832245]267err_bus:
268 xhci_bus_fini(&hc->bus);
[ee28ae66]269err_cmd:
[d271f78]270 xhci_fini_commands(hc);
[ee28ae66]271err_scratch:
272 xhci_scratchpad_free(hc);
[5a9ae994]273err_event_ring:
[e4d7363]274 xhci_event_ring_fini(&hc->event_ring);
275err_dcbaa:
[b80c1ab]276 hc->dcbaa = NULL;
277 dma_buffer_free(&hc->dcbaa_dma);
[e4d7363]278 return err;
279}
280
[ab5a0830]281/*
282 * Pseudocode:
283 * ip = read(intr[0].iman)
284 * if (ip) {
285 * status = read(usbsts)
286 * assert status
287 * assert ip
288 * accept (passing status)
289 * }
290 * decline
291 */
292static const irq_cmd_t irq_commands[] = {
293 {
294 .cmd = CMD_PIO_READ_32,
295 .dstarg = 3,
296 .addr = NULL /* intr[0].iman */
297 },
298 {
299 .cmd = CMD_AND,
300 .srcarg = 3,
301 .dstarg = 4,
302 .value = 0 /* host2xhci(32, 1) */
303 },
304 {
305 .cmd = CMD_PREDICATE,
306 .srcarg = 4,
307 .value = 5
308 },
309 {
310 .cmd = CMD_PIO_READ_32,
311 .dstarg = 1,
312 .addr = NULL /* usbsts */
313 },
314 {
315 .cmd = CMD_AND,
316 .srcarg = 1,
317 .dstarg = 2,
318 .value = 0 /* host2xhci(32, XHCI_STATUS_ACK_MASK) */
319 },
320 {
321 .cmd = CMD_PIO_WRITE_A_32,
322 .srcarg = 2,
323 .addr = NULL /* usbsts */
324 },
325 {
326 .cmd = CMD_PIO_WRITE_A_32,
[efe9463]327 .srcarg = 3,
[ab5a0830]328 .addr = NULL /* intr[0].iman */
329 },
330 {
331 .cmd = CMD_ACCEPT
332 },
333 {
334 .cmd = CMD_DECLINE
335 }
336};
337
[e4d7363]338
[cb89430]339/**
340 * Generates code to accept interrupts. The xHCI is designed primarily for
341 * MSI/MSI-X, but we use PCI Interrupt Pin. In this mode, all the Interrupters
342 * (except 0) are disabled.
343 */
[e4d7363]344int hc_irq_code_gen(irq_code_t *code, xhci_hc_t *hc, const hw_res_list_parsed_t *hw_res)
[cb89430]345{
346 assert(code);
347 assert(hw_res);
348
[e4d7363]349 if (hw_res->irqs.count != 1) {
[cb89430]350 usb_log_info("Unexpected HW resources to enable interrupts.");
351 return EINVAL;
352 }
353
354 code->ranges = malloc(sizeof(irq_pio_range_t));
355 if (code->ranges == NULL)
356 return ENOMEM;
357
358 code->cmds = malloc(sizeof(irq_commands));
359 if (code->cmds == NULL) {
360 free(code->ranges);
361 return ENOMEM;
362 }
363
364 code->rangecount = 1;
365 code->ranges[0] = (irq_pio_range_t) {
[91ca111]366 .base = RNGABS(hc->mmio_range),
367 .size = RNGSZ(hc->mmio_range),
[cb89430]368 };
369
370 code->cmdcount = ARRAY_SIZE(irq_commands);
371 memcpy(code->cmds, irq_commands, sizeof(irq_commands));
372
[91ca111]373 void *intr0_iman = RNGABSPTR(hc->mmio_range) + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF) + offsetof(xhci_rt_regs_t, ir[0]);
[ab5a0830]374 void *usbsts = RNGABSPTR(hc->mmio_range) + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH) + offsetof(xhci_op_regs_t, usbsts);
[cb89430]375 code->cmds[0].addr = intr0_iman;
376 code->cmds[1].value = host2xhci(32, 1);
[ab5a0830]377 code->cmds[3].addr = usbsts;
378 code->cmds[4].value = host2xhci(32, XHCI_STATUS_ACK_MASK);
379 code->cmds[5].addr = usbsts;
380 code->cmds[6].addr = intr0_iman;
[cb89430]381
382 return hw_res->irqs.irqs[0];
383}
384
[eb928c4]385/**
386 * Claim xHC from BIOS. Implements handoff as per Section 4.22.1 of xHCI spec.
387 */
[e4d7363]388int hc_claim(xhci_hc_t *hc, ddf_dev_t *dev)
[cb89430]389{
[91ca111]390 /* No legacy support capability, the controller is solely for us */
391 if (!hc->legsup)
392 return EOK;
393
[0e7380f]394 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_CNR), 0))
395 return ETIMEOUT;
396
[e6b0dba]397 usb_log_debug2("LEGSUP: bios: %x, os: %x", hc->legsup->sem_bios, hc->legsup->sem_os);
[0e7380f]398 XHCI_REG_SET(hc->legsup, XHCI_LEGSUP_SEM_OS, 1);
[4d28d86]399 for (int i = 0; i <= (XHCI_LEGSUP_BIOS_TIMEOUT_US / XHCI_LEGSUP_POLLING_DELAY_1MS); i++) {
[e6b0dba]400 usb_log_debug2("LEGSUP: elapsed: %i ms, bios: %x, os: %x", i,
401 XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_BIOS),
402 XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_OS));
403 if (XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_BIOS) == 0) {
[0e7380f]404 return XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_OS) == 1 ? EOK : EIO;
[e6b0dba]405 }
[c9d905f]406 async_usleep(XHCI_LEGSUP_POLLING_DELAY_1MS);
[e6b0dba]407 }
[a1732929]408 usb_log_error("BIOS did not release XHCI legacy hold!");
[e6b0dba]409
[91ca111]410 return ENOTSUP;
[cb89430]411}
412
[eb928c4]413/**
[665368c]414 * Ask the xHC to reset its state. Implements sequence
[eb928c4]415 */
[cb89430]416static int hc_reset(xhci_hc_t *hc)
417{
[0e7380f]418 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_CNR), 0))
419 return ETIMEOUT;
420
[cb89430]421 /* Stop the HC: set R/S to 0 */
422 XHCI_REG_CLR(hc->op_regs, XHCI_OP_RS, 1);
423
[0e7380f]424 /* Wait until the HC is halted - it shall take at most 16 ms */
425 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_HCH), XHCI_REG_MASK(XHCI_OP_HCH)))
426 return ETIMEOUT;
[cb89430]427
428 /* Reset */
429 XHCI_REG_SET(hc->op_regs, XHCI_OP_HCRST, 1);
430
431 /* Wait until the reset is complete */
[0e7380f]432 if (xhci_reg_wait(&hc->op_regs->usbcmd, XHCI_REG_MASK(XHCI_OP_HCRST), 0))
433 return ETIMEOUT;
[cb89430]434
435 return EOK;
436}
437
438/**
439 * Initialize the HC: section 4.2
440 */
[e4d7363]441int hc_start(xhci_hc_t *hc, bool irq)
[cb89430]442{
443 int err;
444
445 if ((err = hc_reset(hc)))
446 return err;
447
[0e7380f]448 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_CNR), 0))
449 return ETIMEOUT;
[cb89430]450
[b80c1ab]451 uint64_t dcbaaptr = hc->dcbaa_dma.phys;
[cb89430]452 XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP_LO, LOWER32(dcbaaptr));
453 XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP_HI, UPPER32(dcbaaptr));
[15f8079]454 XHCI_REG_WR(hc->op_regs, XHCI_OP_MAX_SLOTS_EN, hc->max_slots);
[cb89430]455
[4abb134]456 uint64_t crcr = xhci_trb_ring_get_dequeue_ptr(&hc->cr.trb_ring);
457 if (hc->cr.trb_ring.pcs)
458 crcr |= XHCI_REG_MASK(XHCI_OP_RCS);
459 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR_LO, LOWER32(crcr));
460 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR_HI, UPPER32(crcr));
[cb89430]461
[665368c]462 XHCI_REG_SET(hc->op_regs, XHCI_OP_EWE, 1);
463
[cb89430]464 xhci_interrupter_regs_t *intr0 = &hc->rt_regs->ir[0];
465 XHCI_REG_WR(intr0, XHCI_INTR_ERSTSZ, hc->event_ring.segment_count);
[b80c1ab]466 uint64_t erdp = hc->event_ring.dequeue_ptr;
[12fba858]467 XHCI_REG_WR(intr0, XHCI_INTR_ERDP_LO, LOWER32(erdp));
468 XHCI_REG_WR(intr0, XHCI_INTR_ERDP_HI, UPPER32(erdp));
[b80c1ab]469 uint64_t erstptr = hc->event_ring.erst.phys;
[cb89430]470 XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA_LO, LOWER32(erstptr));
471 XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA_HI, UPPER32(erstptr));
472
[665368c]473
[cb89430]474 if (irq) {
475 XHCI_REG_SET(intr0, XHCI_INTR_IE, 1);
476 XHCI_REG_SET(hc->op_regs, XHCI_OP_INTE, 1);
477 }
478
[503086d8]479 XHCI_REG_SET(hc->op_regs, XHCI_OP_HSEE, 1);
480
[cb89430]481 XHCI_REG_SET(hc->op_regs, XHCI_OP_RS, 1);
482
[dcf0597]483 /* The reset changed status of all ports, and SW originated reason does
484 * not cause an interrupt.
485 */
[fb154e13]486 for (uint8_t port = 1; port <= hc->rh.max_ports; ++port)
487 xhci_rh_handle_port_change(&hc->rh, port);
[dcf0597]488
[cb89430]489 return EOK;
490}
491
[ab5a0830]492/**
493 * Used only when polling. Shall supplement the irq_commands.
494 */
[32fb6bce]495int hc_status(bus_t *bus, uint32_t *status)
[5cbccd4]496{
[32fb6bce]497 xhci_hc_t *hc = bus_to_hc(bus);
[ab5a0830]498 int ip = XHCI_REG_RD(hc->rt_regs->ir, XHCI_INTR_IP);
499 if (ip) {
500 *status = XHCI_REG_RD(hc->op_regs, XHCI_OP_STATUS);
501 XHCI_REG_WR(hc->op_regs, XHCI_OP_STATUS, *status & XHCI_STATUS_ACK_MASK);
502 XHCI_REG_WR(hc->rt_regs->ir, XHCI_INTR_IP, 1);
503
504 /* interrupt handler expects status from irq_commands, which is
505 * in xhci order. */
506 *status = host2xhci(32, *status);
507 }
[62ba2cbe]508
[598733c9]509 usb_log_debug2("Polled status: %x", *status);
[cb89430]510 return EOK;
511}
512
[665368c]513static int xhci_handle_mfindex_wrap_event(xhci_hc_t *hc, xhci_trb_t *trb)
514{
[94e9c29]515 struct timeval tv;
516 getuptime(&tv);
[598733c9]517 usb_log_debug2("Microframe index wrapped (@%lu.%li, %"PRIu64" total).", tv.tv_sec, tv.tv_usec, hc->wrap_count);
[94e9c29]518 hc->wrap_time = ((uint64_t) tv.tv_sec) * 1000000 + ((uint64_t) tv.tv_usec);
[665368c]519 ++hc->wrap_count;
520 return EOK;
521}
522
[fb154e13]523static int handle_port_status_change_event(xhci_hc_t *hc, xhci_trb_t *trb)
524{
525 uint8_t port_id = XHCI_QWORD_EXTRACT(trb->parameter, 31, 24);
526 usb_log_debug("Port status change event detected for port %u.", port_id);
527 xhci_rh_handle_port_change(&hc->rh, port_id);
528 return EOK;
529}
530
[472235a]531typedef int (*event_handler) (xhci_hc_t *, xhci_trb_t *trb);
532
533static event_handler event_handlers [] = {
534 [XHCI_TRB_TYPE_COMMAND_COMPLETION_EVENT] = &xhci_handle_command_completion,
[fb154e13]535 [XHCI_TRB_TYPE_PORT_STATUS_CHANGE_EVENT] = &handle_port_status_change_event,
[e9e24f2]536 [XHCI_TRB_TYPE_TRANSFER_EVENT] = &xhci_handle_transfer_event,
[665368c]537 [XHCI_TRB_TYPE_MFINDEX_WRAP_EVENT] = &xhci_handle_mfindex_wrap_event,
[472235a]538};
539
540static int hc_handle_event(xhci_hc_t *hc, xhci_trb_t *trb, xhci_interrupter_regs_t *intr)
[7ee5408]541{
[472235a]542 unsigned type = TRB_TYPE(*trb);
543 if (type >= ARRAY_SIZE(event_handlers) || !event_handlers[type])
544 return ENOTSUP;
545
546 return event_handlers[type](hc, trb);
[7ee5408]547}
548
[eb928c4]549/**
550 * Dequeue from event ring and handle dequeued events.
551 *
552 * As there can be events, that blocks on waiting for subsequent events,
553 * we solve this problem by first copying the event TRBs from the event ring,
554 * then asserting EHB and only after, handling the events.
555 *
556 * Whenever the event handling blocks, it switches fibril, and incoming
557 * IPC notification will create new event handling fibril for us.
558 */
[cb89430]559static void hc_run_event_ring(xhci_hc_t *hc, xhci_event_ring_t *event_ring, xhci_interrupter_regs_t *intr)
[62ba2cbe]560{
[cb89430]561 int err;
[472235a]562
[f3baab1]563 xhci_trb_t trb;
564 hc->event_handler = fibril_get_id();
[e50bdd92]565
[f3baab1]566 while ((err = xhci_event_ring_dequeue(event_ring, &trb)) != ENOENT) {
567 if ((err = hc_handle_event(hc, &trb, intr)) != EOK) {
568 usb_log_error("Failed to handle event: %s", str_error(err));
[adb4e683]569 }
[f543804]570
571 uint64_t erdp = hc->event_ring.dequeue_ptr;
572 XHCI_REG_WR(intr, XHCI_INTR_ERDP_LO, LOWER32(erdp));
573 XHCI_REG_WR(intr, XHCI_INTR_ERDP_HI, UPPER32(erdp));
[cb89430]574 }
575
[f3baab1]576 hc->event_handler = 0;
577
[adb4e683]578 /* Update the ERDP to make room in the ring. */
[12fba858]579 uint64_t erdp = hc->event_ring.dequeue_ptr;
[f543804]580 erdp |= XHCI_REG_MASK(XHCI_INTR_ERDP_EHB);
[12fba858]581 XHCI_REG_WR(intr, XHCI_INTR_ERDP_LO, LOWER32(erdp));
582 XHCI_REG_WR(intr, XHCI_INTR_ERDP_HI, UPPER32(erdp));
[adb4e683]583
[472235a]584 usb_log_debug2("Event ring run finished.");
[cb89430]585}
586
[eb928c4]587/**
588 * Handle an interrupt request from xHC. Resolve all situations that trigger an
589 * interrupt separately.
590 *
591 * Note that all RW1C bits in USBSTS register are cleared at the time of
592 * handling the interrupt in irq_code. This method is the top-half.
593 *
594 * @param status contents of USBSTS register at the time of the interrupt.
595 */
[32fb6bce]596void hc_interrupt(bus_t *bus, uint32_t status)
[cb89430]597{
[32fb6bce]598 xhci_hc_t *hc = bus_to_hc(bus);
[ab5a0830]599 status = xhci2host(32, status);
[aee352c]600
[cb89430]601 if (status & XHCI_REG_MASK(XHCI_OP_HSE)) {
602 usb_log_error("Host controller error occured. Bad things gonna happen...");
[ab5a0830]603 status &= ~XHCI_REG_MASK(XHCI_OP_HSE);
[cb89430]604 }
605
606 if (status & XHCI_REG_MASK(XHCI_OP_EINT)) {
[472235a]607 usb_log_debug2("Event interrupt, running the event ring.");
[ab5a0830]608 hc_run_event_ring(hc, &hc->event_ring, &hc->rt_regs->ir[0]);
609 status &= ~XHCI_REG_MASK(XHCI_OP_EINT);
[cb89430]610 }
[275f529]611
[cb89430]612 if (status & XHCI_REG_MASK(XHCI_OP_SRE)) {
613 usb_log_error("Save/Restore error occured. WTF, S/R mechanism not implemented!");
[ab5a0830]614 status &= ~XHCI_REG_MASK(XHCI_OP_SRE);
615 }
616
[fb154e13]617 /* According to Note on p. 302, we may safely ignore the PCD bit. */
618 status &= ~XHCI_REG_MASK(XHCI_OP_PCD);
619
[ab5a0830]620 if (status) {
621 usb_log_error("Non-zero status after interrupt handling (%08x) - missing something?", status);
[cb89430]622 }
623}
624
[eb928c4]625/**
626 * Tear down all in-memory structures.
627 */
[e4d7363]628void hc_fini(xhci_hc_t *hc)
[cb89430]629{
[e6b9182]630 xhci_bus_fini(&hc->bus);
[cb89430]631 xhci_event_ring_fini(&hc->event_ring);
[b60944b]632 xhci_scratchpad_free(hc);
633 dma_buffer_free(&hc->dcbaa_dma);
[c46c356]634 xhci_fini_commands(hc);
[d32d51d]635 xhci_rh_fini(&hc->rh);
[20eaa82]636 pio_disable(hc->reg_base, RNGSZ(hc->mmio_range));
[837581fd]637 usb_log_info("Finalized.");
[62ba2cbe]638}
639
[eb928c4]640/**
641 * Ring a xHC Doorbell. Implements section 4.7.
642 */
[708d8fcd]643void hc_ring_doorbell(xhci_hc_t *hc, unsigned doorbell, unsigned target)
[a0be5d0]644{
645 assert(hc);
646 uint32_t v = host2xhci(32, target & BIT_RRANGE(uint32_t, 7));
647 pio_write_32(&hc->db_arry[doorbell], v);
[2896ff6]648 usb_log_debug2("Ringing doorbell %d (target: %d)", doorbell, target);
[a0be5d0]649}
[5cbccd4]650
[eb928c4]651/**
652 * Issue an Enable Slot command, returning the obtained Slot ID.
653 *
654 * @param slot_id Pointer where to store the obtained Slot ID.
655 */
[8ea7459]656int hc_enable_slot(xhci_hc_t *hc, uint32_t *slot_id)
657{
658 assert(hc);
659
660 int err;
661 xhci_cmd_t cmd;
[c3d926f3]662 xhci_cmd_init(&cmd, XHCI_CMD_ENABLE_SLOT);
[8ea7459]663
[c3d926f3]664 if ((err = xhci_cmd_sync(hc, &cmd))) {
665 goto end;
666 }
[8ea7459]667
[c3d926f3]668 if (slot_id) {
[8ea7459]669 *slot_id = cmd.slot_id;
[c3d926f3]670 }
[8ea7459]671
[c3d926f3]672end:
[8ea7459]673 xhci_cmd_fini(&cmd);
[c3d926f3]674 return err;
[8ea7459]675}
676
[eb928c4]677/**
678 * Issue a Disable Slot command for a slot occupied by device.
679 *
[665368c]680 * Frees the device context
[eb928c4]681 */
[9620a54]682int hc_disable_slot(xhci_hc_t *hc, xhci_device_t *dev)
[f270ecb]683{
[9620a54]684 int err;
[f270ecb]685 assert(hc);
[9620a54]686
687 if ((err = xhci_cmd_sync_inline(hc, DISABLE_SLOT, .slot_id = dev->slot_id))) {
688 return err;
689 }
690
691 /* Free the device context. */
692 hc->dcbaa[dev->slot_id] = 0;
[b80c1ab]693 dma_buffer_free(&dev->dev_ctx);
[9620a54]694
695 /* Mark the slot as invalid. */
696 dev->slot_id = 0;
697
698 return EOK;
[f270ecb]699}
700
[eb928c4]701/**
702 * Prepare an empty Endpoint Input Context inside a dma buffer.
703 */
[e76c0ea]704static int create_configure_ep_input_ctx(dma_buffer_t *dma_buf)
[b724494]705{
[b80c1ab]706 const int err = dma_buffer_alloc(dma_buf, sizeof(xhci_input_ctx_t));
707 if (err)
708 return err;
[b724494]709
[b80c1ab]710 xhci_input_ctx_t *ictx = dma_buf->virt;
[b724494]711 memset(ictx, 0, sizeof(xhci_input_ctx_t));
712
[e76c0ea]713 // Quoting sec. 4.6.5 and 4.6.6: A1, D0, D1 are down (already zeroed), A0 is up.
[b724494]714 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, 0);
715
[001778c]716 // As we always allocate space for whole input context, we can set this to maximum
717 XHCI_SLOT_CTX_ENTRIES_SET(ictx->slot_ctx, 31);
718
[b724494]719 return EOK;
720}
721
[eb928c4]722/**
723 * Initialize a device, assigning it an address. Implements section 4.3.4.
724 *
725 * @param dev Device to assing an address (unconfigured yet)
726 * @param ep0 EP0 of device TODO remove, can be fetched from dev
727 */
[0206d35]728int hc_address_device(xhci_hc_t *hc, xhci_device_t *dev, xhci_endpoint_t *ep0)
[b724494]729{
[0206d35]730 int err = ENOMEM;
731
[2cf28b9]732 /* Although we have the precise PSIV value on devices of tier 1,
733 * we have to rely on reverse mapping on others. */
[a75f9cbc]734 if (!usb_speed_to_psiv[dev->base.speed]) {
735 usb_log_error("Device reported an USB speed (%s) that cannot be mapped to HC port speed.", usb_str_speed(dev->base.speed));
[2cf28b9]736 return EINVAL;
737 }
738
[0206d35]739 /* Setup and register device context */
[b80c1ab]740 if (dma_buffer_alloc(&dev->dev_ctx, sizeof(xhci_device_ctx_t)))
[0206d35]741 goto err;
[b80c1ab]742 memset(dev->dev_ctx.virt, 0, sizeof(xhci_device_ctx_t));
[0206d35]743
[b80c1ab]744 hc->dcbaa[dev->slot_id] = host2xhci(64, dev->dev_ctx.phys);
[b724494]745
746 /* Issue configure endpoint command (sec 4.3.5). */
[b80c1ab]747 dma_buffer_t ictx_dma_buf;
[e76c0ea]748 if ((err = create_configure_ep_input_ctx(&ictx_dma_buf))) {
[0206d35]749 goto err_dev_ctx;
[b724494]750 }
[b80c1ab]751 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
[b724494]752
753 /* Initialize slot_ctx according to section 4.3.3 point 3. */
[2cf28b9]754 XHCI_SLOT_ROOT_HUB_PORT_SET(ictx->slot_ctx, dev->rh_port);
[b724494]755 XHCI_SLOT_CTX_ENTRIES_SET(ictx->slot_ctx, 1);
[2cf28b9]756 XHCI_SLOT_ROUTE_STRING_SET(ictx->slot_ctx, dev->route_str);
[a75f9cbc]757 XHCI_SLOT_SPEED_SET(ictx->slot_ctx, usb_speed_to_psiv[dev->base.speed]);
[2cf28b9]758
[8a98e4a]759 /* Setup Transaction Translation. TODO: Test this with HS hub. */
760 if (dev->base.tt.dev != NULL) {
761 xhci_device_t *hub = xhci_device_get(dev->base.tt.dev);
762 XHCI_SLOT_TT_HUB_SLOT_ID_SET(ictx->slot_ctx, hub->slot_id);
763 XHCI_SLOT_TT_HUB_PORT_SET(ictx->slot_ctx, dev->base.tt.port);
764 XHCI_SLOT_MTT_SET(ictx->slot_ctx, 0); // MTT not supported yet
765 }
[b724494]766
767 /* Copy endpoint 0 context and set A1 flag. */
768 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, 1);
[0206d35]769 xhci_setup_endpoint_context(ep0, &ictx->endpoint_ctx[0]);
770
[c3d926f3]771 /* Issue Address Device command. */
[b80c1ab]772 if ((err = xhci_cmd_sync_inline(hc, ADDRESS_DEVICE, .slot_id = dev->slot_id, .input_ctx = ictx_dma_buf))) {
[c3d926f3]773 goto err_dev_ctx;
774 }
[b724494]775
[b80c1ab]776 xhci_device_ctx_t *dev_ctx = dev->dev_ctx.virt;
777 dev->base.address = XHCI_SLOT_DEVICE_ADDRESS(dev_ctx->slot_ctx);
[a1732929]778 usb_log_debug2("Obtained USB address: %d.", dev->base.address);
[0206d35]779
[b724494]780 return EOK;
781
[0206d35]782err_dev_ctx:
783 hc->dcbaa[dev->slot_id] = 0;
[b80c1ab]784 dma_buffer_free(&dev->dev_ctx);
[b724494]785err:
786 return err;
787}
788
[eb928c4]789/**
790 * Issue a Configure Device command for a device in slot.
791 *
792 * @param slot_id Slot ID assigned to the device.
793 */
[b724494]794int hc_configure_device(xhci_hc_t *hc, uint32_t slot_id)
795{
796 /* Issue configure endpoint command (sec 4.3.5). */
[b80c1ab]797 dma_buffer_t ictx_dma_buf;
[e76c0ea]798 const int err = create_configure_ep_input_ctx(&ictx_dma_buf);
[928afc8d]799 if (err)
[c3d926f3]800 return err;
[b724494]801
[928afc8d]802 // TODO: Set slot context and other flags. (probably forgot a lot of 'em)
[b724494]803
[b80c1ab]804 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
[b724494]805}
806
[eb928c4]807/**
808 * Issue a Deconfigure Device command for a device in slot.
809 *
810 * @param slot_id Slot ID assigned to the device.
811 */
[b724494]812int hc_deconfigure_device(xhci_hc_t *hc, uint32_t slot_id)
813{
814 /* Issue configure endpoint command (sec 4.3.5) with the DC flag. */
[928afc8d]815 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .deconfigure = true);
[b724494]816}
817
[eb928c4]818/**
819 * Instruct xHC to add an endpoint with supplied endpoint context.
820 *
821 * @param slot_id Slot ID assigned to the device.
822 * @param ep_idx Endpoint index (number + direction) in question
823 * @param ep_ctx Endpoint context of the endpoint
824 */
[b724494]825int hc_add_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx, xhci_ep_ctx_t *ep_ctx)
826{
827 /* Issue configure endpoint command (sec 4.3.5). */
[b80c1ab]828 dma_buffer_t ictx_dma_buf;
[e76c0ea]829 const int err = create_configure_ep_input_ctx(&ictx_dma_buf);
[928afc8d]830 if (err)
[c3d926f3]831 return err;
[b724494]832
[b80c1ab]833 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
[b724494]834 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, ep_idx + 1); /* Preceded by slot ctx */
835 memcpy(&ictx->endpoint_ctx[ep_idx], ep_ctx, sizeof(xhci_ep_ctx_t));
[001778c]836
[b724494]837 // TODO: Set slot context and other flags. (probably forgot a lot of 'em)
838
[b80c1ab]839 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
[b724494]840}
841
[eb928c4]842/**
843 * Instruct xHC to drop an endpoint.
844 *
845 * @param slot_id Slot ID assigned to the device.
846 * @param ep_idx Endpoint index (number + direction) in question
847 */
[b724494]848int hc_drop_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx)
849{
850 /* Issue configure endpoint command (sec 4.3.5). */
[b80c1ab]851 dma_buffer_t ictx_dma_buf;
[e76c0ea]852 const int err = create_configure_ep_input_ctx(&ictx_dma_buf);
[928afc8d]853 if (err)
[c3d926f3]854 return err;
[b724494]855
[b80c1ab]856 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
[b724494]857 XHCI_INPUT_CTRL_CTX_DROP_SET(ictx->ctrl_ctx, ep_idx + 1); /* Preceded by slot ctx */
858 // TODO: Set slot context and other flags. (probably forgot a lot of 'em)
859
[b80c1ab]860 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
[b724494]861}
862
[eb928c4]863/**
864 * Instruct xHC to update information about an endpoint, using supplied
865 * endpoint context.
866 *
867 * @param slot_id Slot ID assigned to the device.
868 * @param ep_idx Endpoint index (number + direction) in question
869 * @param ep_ctx Endpoint context of the endpoint
870 */
[306a36d]871int hc_update_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx, xhci_ep_ctx_t *ep_ctx)
872{
873 dma_buffer_t ictx_dma_buf;
874 const int err = dma_buffer_alloc(&ictx_dma_buf, sizeof(xhci_input_ctx_t));
875 if (err)
876 return err;
877
878 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
879 memset(ictx, 0, sizeof(xhci_input_ctx_t));
880
881 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, ep_idx + 1);
882 memcpy(&ictx->endpoint_ctx[ep_idx], ep_ctx, sizeof(xhci_ep_ctx_t));
883
884 return xhci_cmd_sync_inline(hc, EVALUATE_CONTEXT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
885}
886
[30fc56f]887/**
888 * Instruct xHC to stop running a transfer ring on an endpoint.
889 *
890 * @param slot_id Slot ID assigned to the device.
891 * @param ep_idx Endpoint index (number + direction) in question
892 */
893int hc_stop_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx)
894{
895
896 return xhci_cmd_sync_inline(hc, STOP_ENDPOINT, .slot_id = slot_id, .endpoint_id = ep_idx);
897}
898
[5cbccd4]899/**
900 * @}
901 */
Note: See TracBrowser for help on using the repository browser.