source: mainline/uspace/drv/bus/usb/xhci/hc.c@ 36fb6d7

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 36fb6d7 was 598733c9, checked in by Jenda <jenda.jzqk73@…>, 8 years ago

fix broken debug messages && make it compile

  • Property mode set to 100644
File size: 25.9 KB
Line 
1/*
2 * Copyright (c) 2017 Ondrej Hlavaty
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller data bookkeeping.
34 */
35
36#include <errno.h>
37#include <str_error.h>
38#include <usb/debug.h>
39#include <usb/host/endpoint.h>
40#include "debug.h"
41#include "hc.h"
42#include "rh.h"
43#include "hw_struct/trb.h"
44#include "hw_struct/context.h"
45#include "endpoint.h"
46#include "transfers.h"
47#include "trb_ring.h"
48
49/**
50 * Default USB Speed ID mapping: Table 157
51 */
52#define PSI_TO_BPS(psie, psim) (((uint64_t) psim) << (10 * psie))
53#define PORT_SPEED(usb, mjr, psie, psim) { \
54 .name = "USB ", \
55 .major = mjr, \
56 .minor = 0, \
57 .usb_speed = USB_SPEED_##usb, \
58 .rx_bps = PSI_TO_BPS(psie, psim), \
59 .tx_bps = PSI_TO_BPS(psie, psim) \
60}
61
62static const xhci_port_speed_t default_psiv_to_port_speed [] = {
63 [1] = PORT_SPEED(FULL, 2, 2, 12),
64 [2] = PORT_SPEED(LOW, 2, 1, 1500),
65 [3] = PORT_SPEED(HIGH, 2, 2, 480),
66 [4] = PORT_SPEED(SUPER, 3, 3, 5),
67};
68
69static const unsigned usb_speed_to_psiv [] = {
70 [USB_SPEED_FULL] = 1,
71 [USB_SPEED_LOW] = 2,
72 [USB_SPEED_HIGH] = 3,
73 [USB_SPEED_SUPER] = 4,
74};
75
76/**
77 * Walk the list of extended capabilities.
78 *
79 * The most interesting thing hidden in extended capabilities is the mapping of
80 * ports to protocol versions and speeds.
81 */
82static int hc_parse_ec(xhci_hc_t *hc)
83{
84 unsigned psic, major, minor;
85 xhci_sp_name_t name;
86
87 xhci_port_speed_t *speeds = hc->speeds;
88
89 for (xhci_extcap_t *ec = hc->xecp; ec; ec = xhci_extcap_next(ec)) {
90 xhci_dump_extcap(ec);
91 switch (XHCI_REG_RD(ec, XHCI_EC_CAP_ID)) {
92 case XHCI_EC_USB_LEGACY:
93 assert(hc->legsup == NULL);
94 hc->legsup = (xhci_legsup_t *) ec;
95 break;
96 case XHCI_EC_SUPPORTED_PROTOCOL:
97 psic = XHCI_REG_RD(ec, XHCI_EC_SP_PSIC);
98 major = XHCI_REG_RD(ec, XHCI_EC_SP_MAJOR);
99 minor = XHCI_REG_RD(ec, XHCI_EC_SP_MINOR);
100 name.packed = host2uint32_t_le(XHCI_REG_RD(ec, XHCI_EC_SP_NAME));
101
102 if (name.packed != xhci_name_usb.packed) {
103 /**
104 * The detection of such protocol would work,
105 * but the rest of the implementation is made
106 * for the USB protocol only.
107 */
108 usb_log_error("Unknown protocol %.4s.", name.str);
109 return ENOTSUP;
110 }
111
112 // "Implied" speed
113 if (psic == 0) {
114 assert(minor == 0);
115
116 if (major == 2) {
117 speeds[1] = default_psiv_to_port_speed[1];
118 speeds[2] = default_psiv_to_port_speed[2];
119 speeds[3] = default_psiv_to_port_speed[3];
120 } else if (major == 3) {
121 speeds[4] = default_psiv_to_port_speed[4];
122 } else {
123 return EINVAL;
124 }
125
126 usb_log_debug2("Implied speed of USB %u.0 set up.", major);
127 } else {
128 for (unsigned i = 0; i < psic; i++) {
129 xhci_psi_t *psi = xhci_extcap_psi(ec, i);
130 unsigned sim = XHCI_REG_RD(psi, XHCI_PSI_PSIM);
131 unsigned psiv = XHCI_REG_RD(psi, XHCI_PSI_PSIV);
132 unsigned psie = XHCI_REG_RD(psi, XHCI_PSI_PSIE);
133 unsigned psim = XHCI_REG_RD(psi, XHCI_PSI_PSIM);
134 uint64_t bps = PSI_TO_BPS(psie, psim);
135
136 /*
137 * Speed is not implied, but using one of default PSIV. This is
138 * not clearly stated in xHCI spec. There is a clear intention
139 * to allow xHCI to specify its own speed parameters, but
140 * throughout the document, they used fixed values for e.g.
141 * High-speed (3), without stating the controller shall have
142 * implied default speeds - and for instance Intel controllers
143 * do not. So let's check if the values match and if so, accept
144 * the implied USB speed too.
145 *
146 * The main reason we need this is the usb_speed to have
147 * mapping also for devices connected to hubs.
148 */
149 if (psiv < ARRAY_SIZE(default_psiv_to_port_speed)
150 && default_psiv_to_port_speed[psiv].major == major
151 && default_psiv_to_port_speed[psiv].minor == minor
152 && default_psiv_to_port_speed[psiv].rx_bps == bps
153 && default_psiv_to_port_speed[psiv].tx_bps == bps) {
154 speeds[psiv] = default_psiv_to_port_speed[psiv];
155 usb_log_debug2("Assumed default %s speed of USB %u.", usb_str_speed(speeds[psiv].usb_speed), major);
156 continue;
157 }
158
159 // Custom speed
160 speeds[psiv].major = major;
161 speeds[psiv].minor = minor;
162 str_ncpy(speeds[psiv].name, 4, name.str, 4);
163 speeds[psiv].usb_speed = USB_SPEED_MAX;
164
165 if (sim == XHCI_PSI_PLT_SYMM || sim == XHCI_PSI_PLT_RX)
166 speeds[psiv].rx_bps = bps;
167 if (sim == XHCI_PSI_PLT_SYMM || sim == XHCI_PSI_PLT_TX) {
168 speeds[psiv].tx_bps = bps;
169 usb_log_debug2("Speed %u set up for bps %" PRIu64 " / %" PRIu64 ".", psiv, speeds[psiv].rx_bps, speeds[psiv].tx_bps);
170 }
171 }
172 }
173 }
174 }
175 return EOK;
176}
177
178/**
179 * Initialize MMIO spaces of xHC.
180 */
181int hc_init_mmio(xhci_hc_t *hc, const hw_res_list_parsed_t *hw_res)
182{
183 int err;
184
185 if (hw_res->mem_ranges.count != 1) {
186 usb_log_error("Unexpected MMIO area, bailing out.");
187 return EINVAL;
188 }
189
190 hc->mmio_range = hw_res->mem_ranges.ranges[0];
191
192 usb_log_debug("MMIO area at %p (size %zu), IRQ %d.\n",
193 RNGABSPTR(hc->mmio_range), RNGSZ(hc->mmio_range), hw_res->irqs.irqs[0]);
194
195 if (RNGSZ(hc->mmio_range) < sizeof(xhci_cap_regs_t))
196 return EOVERFLOW;
197
198 void *base;
199 if ((err = pio_enable_range(&hc->mmio_range, &base)))
200 return err;
201
202 hc->reg_base = base;
203 hc->cap_regs = (xhci_cap_regs_t *) base;
204 hc->op_regs = (xhci_op_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH));
205 hc->rt_regs = (xhci_rt_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF));
206 hc->db_arry = (xhci_doorbell_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_DBOFF));
207
208 uintptr_t xec_offset = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_XECP) * sizeof(xhci_dword_t);
209 if (xec_offset > 0)
210 hc->xecp = (xhci_extcap_t *) (base + xec_offset);
211
212 usb_log_debug2("Initialized MMIO reg areas:");
213 usb_log_debug2("\tCapability regs: %p", hc->cap_regs);
214 usb_log_debug2("\tOperational regs: %p", hc->op_regs);
215 usb_log_debug2("\tRuntime regs: %p", hc->rt_regs);
216 usb_log_debug2("\tDoorbell array base: %p", hc->db_arry);
217
218 xhci_dump_cap_regs(hc->cap_regs);
219
220 hc->ac64 = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_AC64);
221 hc->max_slots = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_MAX_SLOTS);
222
223 struct timeval tv;
224 getuptime(&tv);
225 hc->wrap_time = tv.tv_sec * 1000000 + tv.tv_usec;
226 hc->wrap_count = 0;
227
228 unsigned ist = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_IST);
229 hc->ist = (ist & 0x10 >> 1) * (ist & 0xf);
230
231 if ((err = hc_parse_ec(hc))) {
232 pio_disable(hc->reg_base, RNGSZ(hc->mmio_range));
233 return err;
234 }
235
236 return EOK;
237}
238
239/**
240 * Initialize structures kept in allocated memory.
241 */
242int hc_init_memory(xhci_hc_t *hc, ddf_dev_t *device)
243{
244 int err;
245
246 if (dma_buffer_alloc(&hc->dcbaa_dma, (1 + hc->max_slots) * sizeof(uint64_t)))
247 return ENOMEM;
248 hc->dcbaa = hc->dcbaa_dma.virt;
249
250 if ((err = xhci_event_ring_init(&hc->event_ring)))
251 goto err_dcbaa;
252
253 if ((err = xhci_scratchpad_alloc(hc)))
254 goto err_event_ring;
255
256 if ((err = xhci_init_commands(hc)))
257 goto err_scratch;
258
259 if ((err = xhci_bus_init(&hc->bus, hc)))
260 goto err_cmd;
261
262 if ((err = xhci_rh_init(&hc->rh, hc)))
263 goto err_bus;
264
265 return EOK;
266
267err_bus:
268 xhci_bus_fini(&hc->bus);
269err_cmd:
270 xhci_fini_commands(hc);
271err_scratch:
272 xhci_scratchpad_free(hc);
273err_event_ring:
274 xhci_event_ring_fini(&hc->event_ring);
275err_dcbaa:
276 hc->dcbaa = NULL;
277 dma_buffer_free(&hc->dcbaa_dma);
278 return err;
279}
280
281/*
282 * Pseudocode:
283 * ip = read(intr[0].iman)
284 * if (ip) {
285 * status = read(usbsts)
286 * assert status
287 * assert ip
288 * accept (passing status)
289 * }
290 * decline
291 */
292static const irq_cmd_t irq_commands[] = {
293 {
294 .cmd = CMD_PIO_READ_32,
295 .dstarg = 3,
296 .addr = NULL /* intr[0].iman */
297 },
298 {
299 .cmd = CMD_AND,
300 .srcarg = 3,
301 .dstarg = 4,
302 .value = 0 /* host2xhci(32, 1) */
303 },
304 {
305 .cmd = CMD_PREDICATE,
306 .srcarg = 4,
307 .value = 5
308 },
309 {
310 .cmd = CMD_PIO_READ_32,
311 .dstarg = 1,
312 .addr = NULL /* usbsts */
313 },
314 {
315 .cmd = CMD_AND,
316 .srcarg = 1,
317 .dstarg = 2,
318 .value = 0 /* host2xhci(32, XHCI_STATUS_ACK_MASK) */
319 },
320 {
321 .cmd = CMD_PIO_WRITE_A_32,
322 .srcarg = 2,
323 .addr = NULL /* usbsts */
324 },
325 {
326 .cmd = CMD_PIO_WRITE_A_32,
327 .srcarg = 3,
328 .addr = NULL /* intr[0].iman */
329 },
330 {
331 .cmd = CMD_ACCEPT
332 },
333 {
334 .cmd = CMD_DECLINE
335 }
336};
337
338
339/**
340 * Generates code to accept interrupts. The xHCI is designed primarily for
341 * MSI/MSI-X, but we use PCI Interrupt Pin. In this mode, all the Interrupters
342 * (except 0) are disabled.
343 */
344int hc_irq_code_gen(irq_code_t *code, xhci_hc_t *hc, const hw_res_list_parsed_t *hw_res)
345{
346 assert(code);
347 assert(hw_res);
348
349 if (hw_res->irqs.count != 1) {
350 usb_log_info("Unexpected HW resources to enable interrupts.");
351 return EINVAL;
352 }
353
354 code->ranges = malloc(sizeof(irq_pio_range_t));
355 if (code->ranges == NULL)
356 return ENOMEM;
357
358 code->cmds = malloc(sizeof(irq_commands));
359 if (code->cmds == NULL) {
360 free(code->ranges);
361 return ENOMEM;
362 }
363
364 code->rangecount = 1;
365 code->ranges[0] = (irq_pio_range_t) {
366 .base = RNGABS(hc->mmio_range),
367 .size = RNGSZ(hc->mmio_range),
368 };
369
370 code->cmdcount = ARRAY_SIZE(irq_commands);
371 memcpy(code->cmds, irq_commands, sizeof(irq_commands));
372
373 void *intr0_iman = RNGABSPTR(hc->mmio_range) + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF) + offsetof(xhci_rt_regs_t, ir[0]);
374 void *usbsts = RNGABSPTR(hc->mmio_range) + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH) + offsetof(xhci_op_regs_t, usbsts);
375 code->cmds[0].addr = intr0_iman;
376 code->cmds[1].value = host2xhci(32, 1);
377 code->cmds[3].addr = usbsts;
378 code->cmds[4].value = host2xhci(32, XHCI_STATUS_ACK_MASK);
379 code->cmds[5].addr = usbsts;
380 code->cmds[6].addr = intr0_iman;
381
382 return hw_res->irqs.irqs[0];
383}
384
385/**
386 * Claim xHC from BIOS. Implements handoff as per Section 4.22.1 of xHCI spec.
387 */
388int hc_claim(xhci_hc_t *hc, ddf_dev_t *dev)
389{
390 /* No legacy support capability, the controller is solely for us */
391 if (!hc->legsup)
392 return EOK;
393
394 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_CNR), 0))
395 return ETIMEOUT;
396
397 usb_log_debug2("LEGSUP: bios: %x, os: %x", hc->legsup->sem_bios, hc->legsup->sem_os);
398 XHCI_REG_SET(hc->legsup, XHCI_LEGSUP_SEM_OS, 1);
399 for (int i = 0; i <= (XHCI_LEGSUP_BIOS_TIMEOUT_US / XHCI_LEGSUP_POLLING_DELAY_1MS); i++) {
400 usb_log_debug2("LEGSUP: elapsed: %i ms, bios: %x, os: %x", i,
401 XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_BIOS),
402 XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_OS));
403 if (XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_BIOS) == 0) {
404 return XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_OS) == 1 ? EOK : EIO;
405 }
406 async_usleep(XHCI_LEGSUP_POLLING_DELAY_1MS);
407 }
408 usb_log_error("BIOS did not release XHCI legacy hold!\n");
409
410 return ENOTSUP;
411}
412
413/**
414 * Ask the xHC to reset its state. Implements sequence
415 */
416static int hc_reset(xhci_hc_t *hc)
417{
418 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_CNR), 0))
419 return ETIMEOUT;
420
421 /* Stop the HC: set R/S to 0 */
422 XHCI_REG_CLR(hc->op_regs, XHCI_OP_RS, 1);
423
424 /* Wait until the HC is halted - it shall take at most 16 ms */
425 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_HCH), XHCI_REG_MASK(XHCI_OP_HCH)))
426 return ETIMEOUT;
427
428 /* Reset */
429 XHCI_REG_SET(hc->op_regs, XHCI_OP_HCRST, 1);
430
431 /* Wait until the reset is complete */
432 if (xhci_reg_wait(&hc->op_regs->usbcmd, XHCI_REG_MASK(XHCI_OP_HCRST), 0))
433 return ETIMEOUT;
434
435 return EOK;
436}
437
438/**
439 * Initialize the HC: section 4.2
440 */
441int hc_start(xhci_hc_t *hc, bool irq)
442{
443 int err;
444
445 if ((err = hc_reset(hc)))
446 return err;
447
448 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_CNR), 0))
449 return ETIMEOUT;
450
451 uint64_t dcbaaptr = hc->dcbaa_dma.phys;
452 XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP_LO, LOWER32(dcbaaptr));
453 XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP_HI, UPPER32(dcbaaptr));
454 XHCI_REG_WR(hc->op_regs, XHCI_OP_MAX_SLOTS_EN, hc->max_slots);
455
456 uint64_t crcr = xhci_trb_ring_get_dequeue_ptr(&hc->cr.trb_ring);
457 if (hc->cr.trb_ring.pcs)
458 crcr |= XHCI_REG_MASK(XHCI_OP_RCS);
459 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR_LO, LOWER32(crcr));
460 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR_HI, UPPER32(crcr));
461
462 XHCI_REG_SET(hc->op_regs, XHCI_OP_EWE, 1);
463
464 xhci_interrupter_regs_t *intr0 = &hc->rt_regs->ir[0];
465 XHCI_REG_WR(intr0, XHCI_INTR_ERSTSZ, hc->event_ring.segment_count);
466 uint64_t erdp = hc->event_ring.dequeue_ptr;
467 XHCI_REG_WR(intr0, XHCI_INTR_ERDP_LO, LOWER32(erdp));
468 XHCI_REG_WR(intr0, XHCI_INTR_ERDP_HI, UPPER32(erdp));
469 uint64_t erstptr = hc->event_ring.erst.phys;
470 XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA_LO, LOWER32(erstptr));
471 XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA_HI, UPPER32(erstptr));
472
473
474 if (irq) {
475 XHCI_REG_SET(intr0, XHCI_INTR_IE, 1);
476 XHCI_REG_SET(hc->op_regs, XHCI_OP_INTE, 1);
477 }
478
479 XHCI_REG_SET(hc->op_regs, XHCI_OP_HSEE, 1);
480
481 XHCI_REG_SET(hc->op_regs, XHCI_OP_RS, 1);
482
483 /* The reset changed status of all ports, and SW originated reason does
484 * not cause an interrupt.
485 */
486 for (uint8_t port = 1; port <= hc->rh.max_ports; ++port)
487 xhci_rh_handle_port_change(&hc->rh, port);
488
489 return EOK;
490}
491
492/**
493 * Used only when polling. Shall supplement the irq_commands.
494 */
495int hc_status(bus_t *bus, uint32_t *status)
496{
497 xhci_hc_t *hc = bus_to_hc(bus);
498 int ip = XHCI_REG_RD(hc->rt_regs->ir, XHCI_INTR_IP);
499 if (ip) {
500 *status = XHCI_REG_RD(hc->op_regs, XHCI_OP_STATUS);
501 XHCI_REG_WR(hc->op_regs, XHCI_OP_STATUS, *status & XHCI_STATUS_ACK_MASK);
502 XHCI_REG_WR(hc->rt_regs->ir, XHCI_INTR_IP, 1);
503
504 /* interrupt handler expects status from irq_commands, which is
505 * in xhci order. */
506 *status = host2xhci(32, *status);
507 }
508
509 usb_log_debug2("Polled status: %x", *status);
510 return EOK;
511}
512
513static int xhci_handle_mfindex_wrap_event(xhci_hc_t *hc, xhci_trb_t *trb)
514{
515 struct timeval tv;
516 getuptime(&tv);
517 usb_log_debug2("Microframe index wrapped (@%lu.%li, %"PRIu64" total).", tv.tv_sec, tv.tv_usec, hc->wrap_count);
518 hc->wrap_time = ((uint64_t) tv.tv_sec) * 1000000 + ((uint64_t) tv.tv_usec);
519 ++hc->wrap_count;
520 return EOK;
521}
522
523static int handle_port_status_change_event(xhci_hc_t *hc, xhci_trb_t *trb)
524{
525 uint8_t port_id = XHCI_QWORD_EXTRACT(trb->parameter, 31, 24);
526 usb_log_debug("Port status change event detected for port %u.", port_id);
527 xhci_rh_handle_port_change(&hc->rh, port_id);
528 return EOK;
529}
530
531typedef int (*event_handler) (xhci_hc_t *, xhci_trb_t *trb);
532
533static event_handler event_handlers [] = {
534 [XHCI_TRB_TYPE_COMMAND_COMPLETION_EVENT] = &xhci_handle_command_completion,
535 [XHCI_TRB_TYPE_PORT_STATUS_CHANGE_EVENT] = &handle_port_status_change_event,
536 [XHCI_TRB_TYPE_TRANSFER_EVENT] = &xhci_handle_transfer_event,
537 [XHCI_TRB_TYPE_MFINDEX_WRAP_EVENT] = &xhci_handle_mfindex_wrap_event,
538};
539
540static int hc_handle_event(xhci_hc_t *hc, xhci_trb_t *trb, xhci_interrupter_regs_t *intr)
541{
542 unsigned type = TRB_TYPE(*trb);
543 if (type >= ARRAY_SIZE(event_handlers) || !event_handlers[type])
544 return ENOTSUP;
545
546 return event_handlers[type](hc, trb);
547}
548
549/**
550 * Dequeue from event ring and handle dequeued events.
551 *
552 * As there can be events, that blocks on waiting for subsequent events,
553 * we solve this problem by first copying the event TRBs from the event ring,
554 * then asserting EHB and only after, handling the events.
555 *
556 * Whenever the event handling blocks, it switches fibril, and incoming
557 * IPC notification will create new event handling fibril for us.
558 */
559static void hc_run_event_ring(xhci_hc_t *hc, xhci_event_ring_t *event_ring, xhci_interrupter_regs_t *intr)
560{
561 int err;
562
563 xhci_trb_t trb;
564 hc->event_handler = fibril_get_id();
565
566 while ((err = xhci_event_ring_dequeue(event_ring, &trb)) != ENOENT) {
567 if ((err = hc_handle_event(hc, &trb, intr)) != EOK) {
568 usb_log_error("Failed to handle event: %s", str_error(err));
569 }
570
571 uint64_t erdp = hc->event_ring.dequeue_ptr;
572 XHCI_REG_WR(intr, XHCI_INTR_ERDP_LO, LOWER32(erdp));
573 XHCI_REG_WR(intr, XHCI_INTR_ERDP_HI, UPPER32(erdp));
574 }
575
576 hc->event_handler = 0;
577
578 /* Update the ERDP to make room in the ring. */
579 uint64_t erdp = hc->event_ring.dequeue_ptr;
580 erdp |= XHCI_REG_MASK(XHCI_INTR_ERDP_EHB);
581 XHCI_REG_WR(intr, XHCI_INTR_ERDP_LO, LOWER32(erdp));
582 XHCI_REG_WR(intr, XHCI_INTR_ERDP_HI, UPPER32(erdp));
583
584 usb_log_debug2("Event ring run finished.");
585}
586
587/**
588 * Handle an interrupt request from xHC. Resolve all situations that trigger an
589 * interrupt separately.
590 *
591 * Note that all RW1C bits in USBSTS register are cleared at the time of
592 * handling the interrupt in irq_code. This method is the top-half.
593 *
594 * @param status contents of USBSTS register at the time of the interrupt.
595 */
596void hc_interrupt(bus_t *bus, uint32_t status)
597{
598 xhci_hc_t *hc = bus_to_hc(bus);
599 status = xhci2host(32, status);
600
601 if (status & XHCI_REG_MASK(XHCI_OP_HSE)) {
602 usb_log_error("Host controller error occured. Bad things gonna happen...");
603 status &= ~XHCI_REG_MASK(XHCI_OP_HSE);
604 }
605
606 if (status & XHCI_REG_MASK(XHCI_OP_EINT)) {
607 usb_log_debug2("Event interrupt, running the event ring.");
608 hc_run_event_ring(hc, &hc->event_ring, &hc->rt_regs->ir[0]);
609 status &= ~XHCI_REG_MASK(XHCI_OP_EINT);
610 }
611
612 if (status & XHCI_REG_MASK(XHCI_OP_SRE)) {
613 usb_log_error("Save/Restore error occured. WTF, S/R mechanism not implemented!");
614 status &= ~XHCI_REG_MASK(XHCI_OP_SRE);
615 }
616
617 /* According to Note on p. 302, we may safely ignore the PCD bit. */
618 status &= ~XHCI_REG_MASK(XHCI_OP_PCD);
619
620 if (status) {
621 usb_log_error("Non-zero status after interrupt handling (%08x) - missing something?", status);
622 }
623}
624
625/**
626 * Tear down all in-memory structures.
627 */
628void hc_fini(xhci_hc_t *hc)
629{
630 xhci_bus_fini(&hc->bus);
631 xhci_event_ring_fini(&hc->event_ring);
632 xhci_scratchpad_free(hc);
633 dma_buffer_free(&hc->dcbaa_dma);
634 xhci_fini_commands(hc);
635 xhci_rh_fini(&hc->rh);
636 pio_disable(hc->reg_base, RNGSZ(hc->mmio_range));
637 usb_log_info("Finalized.");
638}
639
640/**
641 * Ring a xHC Doorbell. Implements section 4.7.
642 */
643void hc_ring_doorbell(xhci_hc_t *hc, unsigned doorbell, unsigned target)
644{
645 assert(hc);
646 uint32_t v = host2xhci(32, target & BIT_RRANGE(uint32_t, 7));
647 pio_write_32(&hc->db_arry[doorbell], v);
648 usb_log_debug2("Ringing doorbell %d (target: %d)", doorbell, target);
649}
650
651/**
652 * Issue an Enable Slot command, returning the obtained Slot ID.
653 *
654 * @param slot_id Pointer where to store the obtained Slot ID.
655 */
656int hc_enable_slot(xhci_hc_t *hc, uint32_t *slot_id)
657{
658 assert(hc);
659
660 int err;
661 xhci_cmd_t cmd;
662 xhci_cmd_init(&cmd, XHCI_CMD_ENABLE_SLOT);
663
664 if ((err = xhci_cmd_sync(hc, &cmd))) {
665 goto end;
666 }
667
668 if (slot_id) {
669 *slot_id = cmd.slot_id;
670 }
671
672end:
673 xhci_cmd_fini(&cmd);
674 return err;
675}
676
677/**
678 * Issue a Disable Slot command for a slot occupied by device.
679 *
680 * Frees the device context
681 */
682int hc_disable_slot(xhci_hc_t *hc, xhci_device_t *dev)
683{
684 int err;
685 assert(hc);
686
687 if ((err = xhci_cmd_sync_inline(hc, DISABLE_SLOT, .slot_id = dev->slot_id))) {
688 return err;
689 }
690
691 /* Free the device context. */
692 hc->dcbaa[dev->slot_id] = 0;
693 dma_buffer_free(&dev->dev_ctx);
694
695 /* Mark the slot as invalid. */
696 dev->slot_id = 0;
697
698 return EOK;
699}
700
701/**
702 * Prepare an empty Endpoint Input Context inside a dma buffer.
703 */
704static int create_configure_ep_input_ctx(dma_buffer_t *dma_buf)
705{
706 const int err = dma_buffer_alloc(dma_buf, sizeof(xhci_input_ctx_t));
707 if (err)
708 return err;
709
710 xhci_input_ctx_t *ictx = dma_buf->virt;
711 memset(ictx, 0, sizeof(xhci_input_ctx_t));
712
713 // Quoting sec. 4.6.5 and 4.6.6: A1, D0, D1 are down (already zeroed), A0 is up.
714 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, 0);
715
716 return EOK;
717}
718
719/**
720 * Initialize a device, assigning it an address. Implements section 4.3.4.
721 *
722 * @param dev Device to assing an address (unconfigured yet)
723 * @param ep0 EP0 of device TODO remove, can be fetched from dev
724 */
725int hc_address_device(xhci_hc_t *hc, xhci_device_t *dev, xhci_endpoint_t *ep0)
726{
727 int err = ENOMEM;
728
729 /* Although we have the precise PSIV value on devices of tier 1,
730 * we have to rely on reverse mapping on others. */
731 if (!usb_speed_to_psiv[dev->base.speed]) {
732 usb_log_error("Device reported an USB speed (%s) that cannot be mapped to HC port speed.", usb_str_speed(dev->base.speed));
733 return EINVAL;
734 }
735
736 /* Setup and register device context */
737 if (dma_buffer_alloc(&dev->dev_ctx, sizeof(xhci_device_ctx_t)))
738 goto err;
739 memset(dev->dev_ctx.virt, 0, sizeof(xhci_device_ctx_t));
740
741 hc->dcbaa[dev->slot_id] = host2xhci(64, dev->dev_ctx.phys);
742
743 /* Issue configure endpoint command (sec 4.3.5). */
744 dma_buffer_t ictx_dma_buf;
745 if ((err = create_configure_ep_input_ctx(&ictx_dma_buf))) {
746 goto err_dev_ctx;
747 }
748 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
749
750 /* Initialize slot_ctx according to section 4.3.3 point 3. */
751 XHCI_SLOT_ROOT_HUB_PORT_SET(ictx->slot_ctx, dev->rh_port);
752 XHCI_SLOT_CTX_ENTRIES_SET(ictx->slot_ctx, 1);
753 XHCI_SLOT_ROUTE_STRING_SET(ictx->slot_ctx, dev->route_str);
754 XHCI_SLOT_SPEED_SET(ictx->slot_ctx, usb_speed_to_psiv[dev->base.speed]);
755
756 /* In a very specific case, we have to set also these. But before that,
757 * we need to refactor how TT is handled in libusbhost. */
758 XHCI_SLOT_TT_HUB_SLOT_ID_SET(ictx->slot_ctx, 0);
759 XHCI_SLOT_TT_HUB_PORT_SET(ictx->slot_ctx, 0);
760 XHCI_SLOT_MTT_SET(ictx->slot_ctx, 0);
761
762 /* Copy endpoint 0 context and set A1 flag. */
763 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, 1);
764 xhci_setup_endpoint_context(ep0, &ictx->endpoint_ctx[0]);
765
766 /* Issue Address Device command. */
767 if ((err = xhci_cmd_sync_inline(hc, ADDRESS_DEVICE, .slot_id = dev->slot_id, .input_ctx = ictx_dma_buf))) {
768 goto err_dev_ctx;
769 }
770
771 xhci_device_ctx_t *dev_ctx = dev->dev_ctx.virt;
772 dev->base.address = XHCI_SLOT_DEVICE_ADDRESS(dev_ctx->slot_ctx);
773 usb_log_debug2("Obtained USB address: %d.\n", dev->base.address);
774
775 return EOK;
776
777err_dev_ctx:
778 hc->dcbaa[dev->slot_id] = 0;
779 dma_buffer_free(&dev->dev_ctx);
780err:
781 return err;
782}
783
784/**
785 * Issue a Configure Device command for a device in slot.
786 *
787 * @param slot_id Slot ID assigned to the device.
788 */
789int hc_configure_device(xhci_hc_t *hc, uint32_t slot_id)
790{
791 /* Issue configure endpoint command (sec 4.3.5). */
792 dma_buffer_t ictx_dma_buf;
793 const int err = create_configure_ep_input_ctx(&ictx_dma_buf);
794 if (err)
795 return err;
796
797 // TODO: Set slot context and other flags. (probably forgot a lot of 'em)
798
799 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
800}
801
802/**
803 * Issue a Deconfigure Device command for a device in slot.
804 *
805 * @param slot_id Slot ID assigned to the device.
806 */
807int hc_deconfigure_device(xhci_hc_t *hc, uint32_t slot_id)
808{
809 /* Issue configure endpoint command (sec 4.3.5) with the DC flag. */
810 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .deconfigure = true);
811}
812
813/**
814 * Instruct xHC to add an endpoint with supplied endpoint context.
815 *
816 * @param slot_id Slot ID assigned to the device.
817 * @param ep_idx Endpoint index (number + direction) in question
818 * @param ep_ctx Endpoint context of the endpoint
819 */
820int hc_add_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx, xhci_ep_ctx_t *ep_ctx)
821{
822 /* Issue configure endpoint command (sec 4.3.5). */
823 dma_buffer_t ictx_dma_buf;
824 const int err = create_configure_ep_input_ctx(&ictx_dma_buf);
825 if (err)
826 return err;
827
828 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
829 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, ep_idx + 1); /* Preceded by slot ctx */
830 memcpy(&ictx->endpoint_ctx[ep_idx], ep_ctx, sizeof(xhci_ep_ctx_t));
831 // TODO: Set slot context and other flags. (probably forgot a lot of 'em)
832
833 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
834}
835
836/**
837 * Instruct xHC to drop an endpoint.
838 *
839 * @param slot_id Slot ID assigned to the device.
840 * @param ep_idx Endpoint index (number + direction) in question
841 */
842int hc_drop_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx)
843{
844 /* Issue configure endpoint command (sec 4.3.5). */
845 dma_buffer_t ictx_dma_buf;
846 const int err = create_configure_ep_input_ctx(&ictx_dma_buf);
847 if (err)
848 return err;
849
850 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
851 XHCI_INPUT_CTRL_CTX_DROP_SET(ictx->ctrl_ctx, ep_idx + 1); /* Preceded by slot ctx */
852 // TODO: Set slot context and other flags. (probably forgot a lot of 'em)
853
854 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
855}
856
857/**
858 * Instruct xHC to update information about an endpoint, using supplied
859 * endpoint context.
860 *
861 * @param slot_id Slot ID assigned to the device.
862 * @param ep_idx Endpoint index (number + direction) in question
863 * @param ep_ctx Endpoint context of the endpoint
864 */
865int hc_update_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx, xhci_ep_ctx_t *ep_ctx)
866{
867 dma_buffer_t ictx_dma_buf;
868 const int err = dma_buffer_alloc(&ictx_dma_buf, sizeof(xhci_input_ctx_t));
869 if (err)
870 return err;
871
872 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
873 memset(ictx, 0, sizeof(xhci_input_ctx_t));
874
875 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, ep_idx + 1);
876 memcpy(&ictx->endpoint_ctx[ep_idx], ep_ctx, sizeof(xhci_ep_ctx_t));
877
878 return xhci_cmd_sync_inline(hc, EVALUATE_CONTEXT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
879}
880
881/**
882 * Instruct xHC to stop running a transfer ring on an endpoint.
883 *
884 * @param slot_id Slot ID assigned to the device.
885 * @param ep_idx Endpoint index (number + direction) in question
886 */
887int hc_stop_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx)
888{
889
890 return xhci_cmd_sync_inline(hc, STOP_ENDPOINT, .slot_id = slot_id, .endpoint_id = ep_idx);
891}
892
893/**
894 * @}
895 */
Note: See TracBrowser for help on using the repository browser.