source: mainline/uspace/drv/bus/usb/xhci/hc.c@ fb154e13

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since fb154e13 was fb154e13, checked in by Ondřej Hlavatý <aearsis@…>, 8 years ago

xhci: revised roothub event handling

According to the xHCI specification, Port Status Change Event is
generated per port. Also, the PCD bit can be safely ignored. Added
mutual exclusion to roothub event handling to avoid duplicate device
adding.

  • Property mode set to 100644
File size: 25.4 KB
Line 
1/*
2 * Copyright (c) 2017 Ondrej Hlavaty
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller data bookkeeping.
34 */
35
36#include <errno.h>
37#include <str_error.h>
38#include <usb/debug.h>
39#include <usb/host/endpoint.h>
40#include "debug.h"
41#include "hc.h"
42#include "rh.h"
43#include "hw_struct/trb.h"
44#include "hw_struct/context.h"
45#include "endpoint.h"
46#include "transfers.h"
47#include "trb_ring.h"
48
49/**
50 * Default USB Speed ID mapping: Table 157
51 */
52#define PSI_TO_BPS(psie, psim) (((uint64_t) psim) << (10 * psie))
53#define PORT_SPEED(usb, mjr, psie, psim) { \
54 .name = "USB ", \
55 .major = mjr, \
56 .minor = 0, \
57 .usb_speed = USB_SPEED_##usb, \
58 .rx_bps = PSI_TO_BPS(psie, psim), \
59 .tx_bps = PSI_TO_BPS(psie, psim) \
60}
61
62static const xhci_port_speed_t default_psiv_to_port_speed [] = {
63 [1] = PORT_SPEED(FULL, 2, 2, 12),
64 [2] = PORT_SPEED(LOW, 2, 1, 1500),
65 [3] = PORT_SPEED(HIGH, 2, 2, 480),
66 [4] = PORT_SPEED(SUPER, 3, 3, 5),
67};
68
69static const unsigned usb_speed_to_psiv [] = {
70 [USB_SPEED_FULL] = 1,
71 [USB_SPEED_LOW] = 2,
72 [USB_SPEED_HIGH] = 3,
73 [USB_SPEED_SUPER] = 4,
74};
75
76/**
77 * Walk the list of extended capabilities.
78 *
79 * The most interesting thing hidden in extended capabilities is the mapping of
80 * ports to protocol versions and speeds.
81 */
82static int hc_parse_ec(xhci_hc_t *hc)
83{
84 unsigned psic, major, minor;
85 xhci_sp_name_t name;
86
87 xhci_port_speed_t *speeds = hc->speeds;
88
89 for (xhci_extcap_t *ec = hc->xecp; ec; ec = xhci_extcap_next(ec)) {
90 xhci_dump_extcap(ec);
91 switch (XHCI_REG_RD(ec, XHCI_EC_CAP_ID)) {
92 case XHCI_EC_USB_LEGACY:
93 assert(hc->legsup == NULL);
94 hc->legsup = (xhci_legsup_t *) ec;
95 break;
96 case XHCI_EC_SUPPORTED_PROTOCOL:
97 psic = XHCI_REG_RD(ec, XHCI_EC_SP_PSIC);
98 major = XHCI_REG_RD(ec, XHCI_EC_SP_MAJOR);
99 minor = XHCI_REG_RD(ec, XHCI_EC_SP_MINOR);
100 name.packed = host2uint32_t_le(XHCI_REG_RD(ec, XHCI_EC_SP_NAME));
101
102 if (name.packed != xhci_name_usb.packed) {
103 /**
104 * The detection of such protocol would work,
105 * but the rest of the implementation is made
106 * for the USB protocol only.
107 */
108 usb_log_error("Unknown protocol %.4s.", name.str);
109 return ENOTSUP;
110 }
111
112 // "Implied" speed
113 if (psic == 0) {
114 assert(minor == 0);
115
116 if (major == 2) {
117 speeds[1] = default_psiv_to_port_speed[1];
118 speeds[2] = default_psiv_to_port_speed[2];
119 speeds[3] = default_psiv_to_port_speed[3];
120 } else if (major == 3) {
121 speeds[4] = default_psiv_to_port_speed[4];
122 } else {
123 return EINVAL;
124 }
125
126 usb_log_debug2("Implied speed of USB %u.0 set up.", major);
127 } else {
128 for (unsigned i = 0; i < psic; i++) {
129 xhci_psi_t *psi = xhci_extcap_psi(ec, i);
130 unsigned sim = XHCI_REG_RD(psi, XHCI_PSI_PSIM);
131 unsigned psiv = XHCI_REG_RD(psi, XHCI_PSI_PSIV);
132 unsigned psie = XHCI_REG_RD(psi, XHCI_PSI_PSIE);
133 unsigned psim = XHCI_REG_RD(psi, XHCI_PSI_PSIM);
134 uint64_t bps = PSI_TO_BPS(psie, psim);
135
136 /*
137 * Speed is not implied, but using one of default PSIV. This is
138 * not clearly stated in xHCI spec. There is a clear intention
139 * to allow xHCI to specify its own speed parameters, but
140 * throughout the document, they used fixed values for e.g.
141 * High-speed (3), without stating the controller shall have
142 * implied default speeds - and for instance Intel controllers
143 * do not. So let's check if the values match and if so, accept
144 * the implied USB speed too.
145 *
146 * The main reason we need this is the usb_speed to have
147 * mapping also for devices connected to hubs.
148 */
149 if (psiv < ARRAY_SIZE(default_psiv_to_port_speed)
150 && default_psiv_to_port_speed[psiv].major == major
151 && default_psiv_to_port_speed[psiv].minor == minor
152 && default_psiv_to_port_speed[psiv].rx_bps == bps
153 && default_psiv_to_port_speed[psiv].tx_bps == bps) {
154 speeds[psiv] = default_psiv_to_port_speed[psiv];
155 usb_log_debug2("Assumed default %s speed of USB %u.", usb_str_speed(speeds[psiv].usb_speed), major);
156 continue;
157 }
158
159 // Custom speed
160 speeds[psiv].major = major;
161 speeds[psiv].minor = minor;
162 str_ncpy(speeds[psiv].name, 4, name.str, 4);
163 speeds[psiv].usb_speed = USB_SPEED_MAX;
164
165 if (sim == XHCI_PSI_PLT_SYMM || sim == XHCI_PSI_PLT_RX)
166 speeds[psiv].rx_bps = bps;
167 if (sim == XHCI_PSI_PLT_SYMM || sim == XHCI_PSI_PLT_TX) {
168 speeds[psiv].tx_bps = bps;
169 usb_log_debug2("Speed %u set up for bps %" PRIu64 " / %" PRIu64 ".", psiv, speeds[psiv].rx_bps, speeds[psiv].tx_bps);
170 }
171 }
172 }
173 }
174 }
175 return EOK;
176}
177
178/**
179 * Initialize MMIO spaces of xHC.
180 */
181int hc_init_mmio(xhci_hc_t *hc, const hw_res_list_parsed_t *hw_res)
182{
183 int err;
184
185 if (hw_res->mem_ranges.count != 1) {
186 usb_log_error("Unexpected MMIO area, bailing out.");
187 return EINVAL;
188 }
189
190 hc->mmio_range = hw_res->mem_ranges.ranges[0];
191
192 usb_log_debug("MMIO area at %p (size %zu), IRQ %d.\n",
193 RNGABSPTR(hc->mmio_range), RNGSZ(hc->mmio_range), hw_res->irqs.irqs[0]);
194
195 if (RNGSZ(hc->mmio_range) < sizeof(xhci_cap_regs_t))
196 return EOVERFLOW;
197
198 void *base;
199 if ((err = pio_enable_range(&hc->mmio_range, &base)))
200 return err;
201
202 hc->reg_base = base;
203 hc->cap_regs = (xhci_cap_regs_t *) base;
204 hc->op_regs = (xhci_op_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH));
205 hc->rt_regs = (xhci_rt_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF));
206 hc->db_arry = (xhci_doorbell_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_DBOFF));
207
208 uintptr_t xec_offset = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_XECP) * sizeof(xhci_dword_t);
209 if (xec_offset > 0)
210 hc->xecp = (xhci_extcap_t *) (base + xec_offset);
211
212 usb_log_debug2("Initialized MMIO reg areas:");
213 usb_log_debug2("\tCapability regs: %p", hc->cap_regs);
214 usb_log_debug2("\tOperational regs: %p", hc->op_regs);
215 usb_log_debug2("\tRuntime regs: %p", hc->rt_regs);
216 usb_log_debug2("\tDoorbell array base: %p", hc->db_arry);
217
218 xhci_dump_cap_regs(hc->cap_regs);
219
220 hc->ac64 = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_AC64);
221 hc->max_slots = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_MAX_SLOTS);
222 hc->wrap_count = 0;
223 unsigned ist = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_IST);
224 hc->ist = (ist & 0x10 >> 1) * (ist & 0xf);
225
226 if ((err = hc_parse_ec(hc))) {
227 pio_disable(hc->reg_base, RNGSZ(hc->mmio_range));
228 return err;
229 }
230
231 return EOK;
232}
233
234/**
235 * Initialize structures kept in allocated memory.
236 */
237int hc_init_memory(xhci_hc_t *hc, ddf_dev_t *device)
238{
239 int err;
240
241 if (dma_buffer_alloc(&hc->dcbaa_dma, (1 + hc->max_slots) * sizeof(uint64_t)))
242 return ENOMEM;
243 hc->dcbaa = hc->dcbaa_dma.virt;
244
245 if ((err = xhci_event_ring_init(&hc->event_ring)))
246 goto err_dcbaa;
247
248 if ((err = xhci_scratchpad_alloc(hc)))
249 goto err_event_ring;
250
251 if ((err = xhci_init_commands(hc)))
252 goto err_scratch;
253
254 if ((err = xhci_bus_init(&hc->bus, hc)))
255 goto err_cmd;
256
257 if ((err = xhci_rh_init(&hc->rh, hc)))
258 goto err_bus;
259
260 return EOK;
261
262err_bus:
263 xhci_bus_fini(&hc->bus);
264err_cmd:
265 xhci_fini_commands(hc);
266err_scratch:
267 xhci_scratchpad_free(hc);
268err_event_ring:
269 xhci_event_ring_fini(&hc->event_ring);
270err_dcbaa:
271 hc->dcbaa = NULL;
272 dma_buffer_free(&hc->dcbaa_dma);
273 return err;
274}
275
276/*
277 * Pseudocode:
278 * ip = read(intr[0].iman)
279 * if (ip) {
280 * status = read(usbsts)
281 * assert status
282 * assert ip
283 * accept (passing status)
284 * }
285 * decline
286 */
287static const irq_cmd_t irq_commands[] = {
288 {
289 .cmd = CMD_PIO_READ_32,
290 .dstarg = 3,
291 .addr = NULL /* intr[0].iman */
292 },
293 {
294 .cmd = CMD_AND,
295 .srcarg = 3,
296 .dstarg = 4,
297 .value = 0 /* host2xhci(32, 1) */
298 },
299 {
300 .cmd = CMD_PREDICATE,
301 .srcarg = 4,
302 .value = 5
303 },
304 {
305 .cmd = CMD_PIO_READ_32,
306 .dstarg = 1,
307 .addr = NULL /* usbsts */
308 },
309 {
310 .cmd = CMD_AND,
311 .srcarg = 1,
312 .dstarg = 2,
313 .value = 0 /* host2xhci(32, XHCI_STATUS_ACK_MASK) */
314 },
315 {
316 .cmd = CMD_PIO_WRITE_A_32,
317 .srcarg = 2,
318 .addr = NULL /* usbsts */
319 },
320 {
321 .cmd = CMD_PIO_WRITE_A_32,
322 .srcarg = 3,
323 .addr = NULL /* intr[0].iman */
324 },
325 {
326 .cmd = CMD_ACCEPT
327 },
328 {
329 .cmd = CMD_DECLINE
330 }
331};
332
333
334/**
335 * Generates code to accept interrupts. The xHCI is designed primarily for
336 * MSI/MSI-X, but we use PCI Interrupt Pin. In this mode, all the Interrupters
337 * (except 0) are disabled.
338 */
339int hc_irq_code_gen(irq_code_t *code, xhci_hc_t *hc, const hw_res_list_parsed_t *hw_res)
340{
341 assert(code);
342 assert(hw_res);
343
344 if (hw_res->irqs.count != 1) {
345 usb_log_info("Unexpected HW resources to enable interrupts.");
346 return EINVAL;
347 }
348
349 code->ranges = malloc(sizeof(irq_pio_range_t));
350 if (code->ranges == NULL)
351 return ENOMEM;
352
353 code->cmds = malloc(sizeof(irq_commands));
354 if (code->cmds == NULL) {
355 free(code->ranges);
356 return ENOMEM;
357 }
358
359 code->rangecount = 1;
360 code->ranges[0] = (irq_pio_range_t) {
361 .base = RNGABS(hc->mmio_range),
362 .size = RNGSZ(hc->mmio_range),
363 };
364
365 code->cmdcount = ARRAY_SIZE(irq_commands);
366 memcpy(code->cmds, irq_commands, sizeof(irq_commands));
367
368 void *intr0_iman = RNGABSPTR(hc->mmio_range) + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF) + offsetof(xhci_rt_regs_t, ir[0]);
369 void *usbsts = RNGABSPTR(hc->mmio_range) + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH) + offsetof(xhci_op_regs_t, usbsts);
370 code->cmds[0].addr = intr0_iman;
371 code->cmds[1].value = host2xhci(32, 1);
372 code->cmds[3].addr = usbsts;
373 code->cmds[4].value = host2xhci(32, XHCI_STATUS_ACK_MASK);
374 code->cmds[5].addr = usbsts;
375 code->cmds[6].addr = intr0_iman;
376
377 return hw_res->irqs.irqs[0];
378}
379
380/**
381 * Claim xHC from BIOS. Implements handoff as per Section 4.22.1 of xHCI spec.
382 */
383int hc_claim(xhci_hc_t *hc, ddf_dev_t *dev)
384{
385 /* No legacy support capability, the controller is solely for us */
386 if (!hc->legsup)
387 return EOK;
388
389 /* TODO: Test this with USB3-aware BIOS */
390 usb_log_debug2("LEGSUP: bios: %x, os: %x", hc->legsup->sem_bios, hc->legsup->sem_os);
391 XHCI_REG_WR(hc->legsup, XHCI_LEGSUP_SEM_OS, 1);
392 for (int i = 0; i <= (XHCI_LEGSUP_BIOS_TIMEOUT_US / XHCI_LEGSUP_POLLING_DELAY_1MS); i++) {
393 usb_log_debug2("LEGSUP: elapsed: %i ms, bios: %x, os: %x", i,
394 XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_BIOS),
395 XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_OS));
396 if (XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_BIOS) == 0) {
397 assert(XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_OS) == 1);
398 return EOK;
399 }
400 async_usleep(XHCI_LEGSUP_POLLING_DELAY_1MS);
401 }
402 usb_log_error("BIOS did not release XHCI legacy hold!\n");
403
404 return ENOTSUP;
405}
406
407/**
408 * Ask the xHC to reset its state. Implements sequence
409 */
410static int hc_reset(xhci_hc_t *hc)
411{
412 /* Stop the HC: set R/S to 0 */
413 XHCI_REG_CLR(hc->op_regs, XHCI_OP_RS, 1);
414
415 /* Wait 16 ms until the HC is halted */
416 async_usleep(16000);
417 assert(XHCI_REG_RD(hc->op_regs, XHCI_OP_HCH));
418
419 /* Reset */
420 XHCI_REG_SET(hc->op_regs, XHCI_OP_HCRST, 1);
421
422 /* Wait until the reset is complete */
423 while (XHCI_REG_RD(hc->op_regs, XHCI_OP_HCRST))
424 async_usleep(1000);
425
426 return EOK;
427}
428
429/**
430 * Initialize the HC: section 4.2
431 */
432int hc_start(xhci_hc_t *hc, bool irq)
433{
434 int err;
435
436 if ((err = hc_reset(hc)))
437 return err;
438
439 // FIXME: Waiting forever.
440 while (XHCI_REG_RD(hc->op_regs, XHCI_OP_CNR))
441 async_usleep(1000);
442
443 uint64_t dcbaaptr = hc->dcbaa_dma.phys;
444 XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP_LO, LOWER32(dcbaaptr));
445 XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP_HI, UPPER32(dcbaaptr));
446 XHCI_REG_WR(hc->op_regs, XHCI_OP_MAX_SLOTS_EN, hc->max_slots);
447
448 uint64_t crcr = xhci_trb_ring_get_dequeue_ptr(&hc->cr.trb_ring);
449 if (hc->cr.trb_ring.pcs)
450 crcr |= XHCI_REG_MASK(XHCI_OP_RCS);
451 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR_LO, LOWER32(crcr));
452 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR_HI, UPPER32(crcr));
453
454 XHCI_REG_SET(hc->op_regs, XHCI_OP_EWE, 1);
455
456 xhci_interrupter_regs_t *intr0 = &hc->rt_regs->ir[0];
457 XHCI_REG_WR(intr0, XHCI_INTR_ERSTSZ, hc->event_ring.segment_count);
458 uint64_t erdp = hc->event_ring.dequeue_ptr;
459 XHCI_REG_WR(intr0, XHCI_INTR_ERDP_LO, LOWER32(erdp));
460 XHCI_REG_WR(intr0, XHCI_INTR_ERDP_HI, UPPER32(erdp));
461 uint64_t erstptr = hc->event_ring.erst.phys;
462 XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA_LO, LOWER32(erstptr));
463 XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA_HI, UPPER32(erstptr));
464
465
466 if (irq) {
467 XHCI_REG_SET(intr0, XHCI_INTR_IE, 1);
468 XHCI_REG_SET(hc->op_regs, XHCI_OP_INTE, 1);
469 }
470
471 XHCI_REG_SET(hc->op_regs, XHCI_OP_HSEE, 1);
472
473 XHCI_REG_SET(hc->op_regs, XHCI_OP_RS, 1);
474
475 /* The reset changed status of all ports, and SW originated reason does
476 * not cause an interrupt.
477 */
478 for (uint8_t port = 1; port <= hc->rh.max_ports; ++port)
479 xhci_rh_handle_port_change(&hc->rh, port);
480
481 return EOK;
482}
483
484/**
485 * Used only when polling. Shall supplement the irq_commands.
486 */
487int hc_status(bus_t *bus, uint32_t *status)
488{
489 xhci_hc_t *hc = bus_to_hc(bus);
490 int ip = XHCI_REG_RD(hc->rt_regs->ir, XHCI_INTR_IP);
491 if (ip) {
492 *status = XHCI_REG_RD(hc->op_regs, XHCI_OP_STATUS);
493 XHCI_REG_WR(hc->op_regs, XHCI_OP_STATUS, *status & XHCI_STATUS_ACK_MASK);
494 XHCI_REG_WR(hc->rt_regs->ir, XHCI_INTR_IP, 1);
495
496 /* interrupt handler expects status from irq_commands, which is
497 * in xhci order. */
498 *status = host2xhci(32, *status);
499 }
500
501 usb_log_debug2("HC(%p): Polled status: %x", hc, *status);
502 return EOK;
503}
504
505static int xhci_handle_mfindex_wrap_event(xhci_hc_t *hc, xhci_trb_t *trb)
506{
507 ++hc->wrap_count;
508 return EOK;
509}
510
511static int handle_port_status_change_event(xhci_hc_t *hc, xhci_trb_t *trb)
512{
513 uint8_t port_id = XHCI_QWORD_EXTRACT(trb->parameter, 31, 24);
514 usb_log_debug("Port status change event detected for port %u.", port_id);
515 xhci_rh_handle_port_change(&hc->rh, port_id);
516 return EOK;
517}
518
519typedef int (*event_handler) (xhci_hc_t *, xhci_trb_t *trb);
520
521static event_handler event_handlers [] = {
522 [XHCI_TRB_TYPE_COMMAND_COMPLETION_EVENT] = &xhci_handle_command_completion,
523 [XHCI_TRB_TYPE_PORT_STATUS_CHANGE_EVENT] = &handle_port_status_change_event,
524 [XHCI_TRB_TYPE_TRANSFER_EVENT] = &xhci_handle_transfer_event,
525 [XHCI_TRB_TYPE_MFINDEX_WRAP_EVENT] = &xhci_handle_mfindex_wrap_event,
526};
527
528static int hc_handle_event(xhci_hc_t *hc, xhci_trb_t *trb, xhci_interrupter_regs_t *intr)
529{
530 unsigned type = TRB_TYPE(*trb);
531 if (type >= ARRAY_SIZE(event_handlers) || !event_handlers[type])
532 return ENOTSUP;
533
534 return event_handlers[type](hc, trb);
535}
536
537/**
538 * Dequeue from event ring and handle dequeued events.
539 *
540 * As there can be events, that blocks on waiting for subsequent events,
541 * we solve this problem by first copying the event TRBs from the event ring,
542 * then asserting EHB and only after, handling the events.
543 *
544 * Whenever the event handling blocks, it switches fibril, and incoming
545 * IPC notification will create new event handling fibril for us.
546 */
547static void hc_run_event_ring(xhci_hc_t *hc, xhci_event_ring_t *event_ring, xhci_interrupter_regs_t *intr)
548{
549 int err;
550
551 xhci_trb_t trb;
552 hc->event_handler = fibril_get_id();
553
554 while ((err = xhci_event_ring_dequeue(event_ring, &trb)) != ENOENT) {
555 if ((err = hc_handle_event(hc, &trb, intr)) != EOK) {
556 usb_log_error("Failed to handle event: %s", str_error(err));
557 }
558
559 uint64_t erdp = hc->event_ring.dequeue_ptr;
560 XHCI_REG_WR(intr, XHCI_INTR_ERDP_LO, LOWER32(erdp));
561 XHCI_REG_WR(intr, XHCI_INTR_ERDP_HI, UPPER32(erdp));
562 }
563
564 hc->event_handler = 0;
565
566 /* Update the ERDP to make room in the ring. */
567 uint64_t erdp = hc->event_ring.dequeue_ptr;
568 erdp |= XHCI_REG_MASK(XHCI_INTR_ERDP_EHB);
569 XHCI_REG_WR(intr, XHCI_INTR_ERDP_LO, LOWER32(erdp));
570 XHCI_REG_WR(intr, XHCI_INTR_ERDP_HI, UPPER32(erdp));
571
572 usb_log_debug2("Event ring run finished.");
573}
574
575/**
576 * Handle an interrupt request from xHC. Resolve all situations that trigger an
577 * interrupt separately.
578 *
579 * Note that all RW1C bits in USBSTS register are cleared at the time of
580 * handling the interrupt in irq_code. This method is the top-half.
581 *
582 * @param status contents of USBSTS register at the time of the interrupt.
583 */
584void hc_interrupt(bus_t *bus, uint32_t status)
585{
586 xhci_hc_t *hc = bus_to_hc(bus);
587 status = xhci2host(32, status);
588
589 if (status & XHCI_REG_MASK(XHCI_OP_HSE)) {
590 usb_log_error("Host controller error occured. Bad things gonna happen...");
591 status &= ~XHCI_REG_MASK(XHCI_OP_HSE);
592 }
593
594 if (status & XHCI_REG_MASK(XHCI_OP_EINT)) {
595 usb_log_debug2("Event interrupt, running the event ring.");
596 hc_run_event_ring(hc, &hc->event_ring, &hc->rt_regs->ir[0]);
597 status &= ~XHCI_REG_MASK(XHCI_OP_EINT);
598 }
599
600 if (status & XHCI_REG_MASK(XHCI_OP_SRE)) {
601 usb_log_error("Save/Restore error occured. WTF, S/R mechanism not implemented!");
602 status &= ~XHCI_REG_MASK(XHCI_OP_SRE);
603 }
604
605 /* According to Note on p. 302, we may safely ignore the PCD bit. */
606 status &= ~XHCI_REG_MASK(XHCI_OP_PCD);
607
608 if (status) {
609 usb_log_error("Non-zero status after interrupt handling (%08x) - missing something?", status);
610 }
611}
612
613/**
614 * Tear down all in-memory structures.
615 */
616void hc_fini(xhci_hc_t *hc)
617{
618 xhci_bus_fini(&hc->bus);
619 xhci_event_ring_fini(&hc->event_ring);
620 xhci_scratchpad_free(hc);
621 dma_buffer_free(&hc->dcbaa_dma);
622 xhci_fini_commands(hc);
623 xhci_rh_fini(&hc->rh);
624 pio_disable(hc->reg_base, RNGSZ(hc->mmio_range));
625 usb_log_info("HC(%p): Finalized.", hc);
626}
627
628/**
629 * Ring a xHC Doorbell. Implements section 4.7.
630 */
631void hc_ring_doorbell(xhci_hc_t *hc, unsigned doorbell, unsigned target)
632{
633 assert(hc);
634 uint32_t v = host2xhci(32, target & BIT_RRANGE(uint32_t, 7));
635 pio_write_32(&hc->db_arry[doorbell], v);
636 usb_log_debug2("Ringing doorbell %d (target: %d)", doorbell, target);
637}
638
639/**
640 * Issue an Enable Slot command, returning the obtained Slot ID.
641 *
642 * @param slot_id Pointer where to store the obtained Slot ID.
643 */
644int hc_enable_slot(xhci_hc_t *hc, uint32_t *slot_id)
645{
646 assert(hc);
647
648 int err;
649 xhci_cmd_t cmd;
650 xhci_cmd_init(&cmd, XHCI_CMD_ENABLE_SLOT);
651
652 if ((err = xhci_cmd_sync(hc, &cmd))) {
653 goto end;
654 }
655
656 if (slot_id) {
657 *slot_id = cmd.slot_id;
658 }
659
660end:
661 xhci_cmd_fini(&cmd);
662 return err;
663}
664
665/**
666 * Issue a Disable Slot command for a slot occupied by device.
667 *
668 * Frees the device context
669 */
670int hc_disable_slot(xhci_hc_t *hc, xhci_device_t *dev)
671{
672 int err;
673 assert(hc);
674
675 if ((err = xhci_cmd_sync_inline(hc, DISABLE_SLOT, .slot_id = dev->slot_id))) {
676 return err;
677 }
678
679 /* Free the device context. */
680 hc->dcbaa[dev->slot_id] = 0;
681 dma_buffer_free(&dev->dev_ctx);
682
683 /* Mark the slot as invalid. */
684 dev->slot_id = 0;
685
686 return EOK;
687}
688
689/**
690 * Prepare an empty Endpoint Input Context inside a dma buffer.
691 */
692static int create_configure_ep_input_ctx(dma_buffer_t *dma_buf)
693{
694 const int err = dma_buffer_alloc(dma_buf, sizeof(xhci_input_ctx_t));
695 if (err)
696 return err;
697
698 xhci_input_ctx_t *ictx = dma_buf->virt;
699 memset(ictx, 0, sizeof(xhci_input_ctx_t));
700
701 // Quoting sec. 4.6.5 and 4.6.6: A1, D0, D1 are down (already zeroed), A0 is up.
702 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, 0);
703
704 return EOK;
705}
706
707/**
708 * Initialize a device, assigning it an address. Implements section 4.3.4.
709 *
710 * @param dev Device to assing an address (unconfigured yet)
711 * @param ep0 EP0 of device TODO remove, can be fetched from dev
712 */
713int hc_address_device(xhci_hc_t *hc, xhci_device_t *dev, xhci_endpoint_t *ep0)
714{
715 int err = ENOMEM;
716
717 /* Although we have the precise PSIV value on devices of tier 1,
718 * we have to rely on reverse mapping on others. */
719 if (!usb_speed_to_psiv[dev->base.speed]) {
720 usb_log_error("Device reported an USB speed (%s) that cannot be mapped to HC port speed.", usb_str_speed(dev->base.speed));
721 return EINVAL;
722 }
723
724 /* Setup and register device context */
725 if (dma_buffer_alloc(&dev->dev_ctx, sizeof(xhci_device_ctx_t)))
726 goto err;
727 memset(dev->dev_ctx.virt, 0, sizeof(xhci_device_ctx_t));
728
729 hc->dcbaa[dev->slot_id] = host2xhci(64, dev->dev_ctx.phys);
730
731 /* Issue configure endpoint command (sec 4.3.5). */
732 dma_buffer_t ictx_dma_buf;
733 if ((err = create_configure_ep_input_ctx(&ictx_dma_buf))) {
734 goto err_dev_ctx;
735 }
736 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
737
738 /* Initialize slot_ctx according to section 4.3.3 point 3. */
739 XHCI_SLOT_ROOT_HUB_PORT_SET(ictx->slot_ctx, dev->rh_port);
740 XHCI_SLOT_CTX_ENTRIES_SET(ictx->slot_ctx, 1);
741 XHCI_SLOT_ROUTE_STRING_SET(ictx->slot_ctx, dev->route_str);
742 XHCI_SLOT_SPEED_SET(ictx->slot_ctx, usb_speed_to_psiv[dev->base.speed]);
743
744 /* In a very specific case, we have to set also these. But before that,
745 * we need to refactor how TT is handled in libusbhost. */
746 XHCI_SLOT_TT_HUB_SLOT_ID_SET(ictx->slot_ctx, 0);
747 XHCI_SLOT_TT_HUB_PORT_SET(ictx->slot_ctx, 0);
748 XHCI_SLOT_MTT_SET(ictx->slot_ctx, 0);
749
750 /* Copy endpoint 0 context and set A1 flag. */
751 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, 1);
752 xhci_setup_endpoint_context(ep0, &ictx->endpoint_ctx[0]);
753
754 /* Issue Address Device command. */
755 if ((err = xhci_cmd_sync_inline(hc, ADDRESS_DEVICE, .slot_id = dev->slot_id, .input_ctx = ictx_dma_buf))) {
756 goto err_dev_ctx;
757 }
758
759 xhci_device_ctx_t *dev_ctx = dev->dev_ctx.virt;
760 dev->base.address = XHCI_SLOT_DEVICE_ADDRESS(dev_ctx->slot_ctx);
761 usb_log_debug2("Obtained USB address: %d.\n", dev->base.address);
762
763 return EOK;
764
765err_dev_ctx:
766 hc->dcbaa[dev->slot_id] = 0;
767 dma_buffer_free(&dev->dev_ctx);
768err:
769 return err;
770}
771
772/**
773 * Issue a Configure Device command for a device in slot.
774 *
775 * @param slot_id Slot ID assigned to the device.
776 */
777int hc_configure_device(xhci_hc_t *hc, uint32_t slot_id)
778{
779 /* Issue configure endpoint command (sec 4.3.5). */
780 dma_buffer_t ictx_dma_buf;
781 const int err = create_configure_ep_input_ctx(&ictx_dma_buf);
782 if (err)
783 return err;
784
785 // TODO: Set slot context and other flags. (probably forgot a lot of 'em)
786
787 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
788}
789
790/**
791 * Issue a Deconfigure Device command for a device in slot.
792 *
793 * @param slot_id Slot ID assigned to the device.
794 */
795int hc_deconfigure_device(xhci_hc_t *hc, uint32_t slot_id)
796{
797 /* Issue configure endpoint command (sec 4.3.5) with the DC flag. */
798 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .deconfigure = true);
799}
800
801/**
802 * Instruct xHC to add an endpoint with supplied endpoint context.
803 *
804 * @param slot_id Slot ID assigned to the device.
805 * @param ep_idx Endpoint index (number + direction) in question
806 * @param ep_ctx Endpoint context of the endpoint
807 */
808int hc_add_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx, xhci_ep_ctx_t *ep_ctx)
809{
810 /* Issue configure endpoint command (sec 4.3.5). */
811 dma_buffer_t ictx_dma_buf;
812 const int err = create_configure_ep_input_ctx(&ictx_dma_buf);
813 if (err)
814 return err;
815
816 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
817 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, ep_idx + 1); /* Preceded by slot ctx */
818 memcpy(&ictx->endpoint_ctx[ep_idx], ep_ctx, sizeof(xhci_ep_ctx_t));
819 // TODO: Set slot context and other flags. (probably forgot a lot of 'em)
820
821 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
822}
823
824/**
825 * Instruct xHC to drop an endpoint.
826 *
827 * @param slot_id Slot ID assigned to the device.
828 * @param ep_idx Endpoint index (number + direction) in question
829 */
830int hc_drop_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx)
831{
832 /* Issue configure endpoint command (sec 4.3.5). */
833 dma_buffer_t ictx_dma_buf;
834 const int err = create_configure_ep_input_ctx(&ictx_dma_buf);
835 if (err)
836 return err;
837
838 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
839 XHCI_INPUT_CTRL_CTX_DROP_SET(ictx->ctrl_ctx, ep_idx + 1); /* Preceded by slot ctx */
840 // TODO: Set slot context and other flags. (probably forgot a lot of 'em)
841
842 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
843}
844
845/**
846 * Instruct xHC to update information about an endpoint, using supplied
847 * endpoint context.
848 *
849 * @param slot_id Slot ID assigned to the device.
850 * @param ep_idx Endpoint index (number + direction) in question
851 * @param ep_ctx Endpoint context of the endpoint
852 */
853int hc_update_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx, xhci_ep_ctx_t *ep_ctx)
854{
855 dma_buffer_t ictx_dma_buf;
856 const int err = dma_buffer_alloc(&ictx_dma_buf, sizeof(xhci_input_ctx_t));
857 if (err)
858 return err;
859
860 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
861 memset(ictx, 0, sizeof(xhci_input_ctx_t));
862
863 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, ep_idx + 1);
864 memcpy(&ictx->endpoint_ctx[ep_idx], ep_ctx, sizeof(xhci_ep_ctx_t));
865
866 return xhci_cmd_sync_inline(hc, EVALUATE_CONTEXT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
867}
868
869/**
870 * Instruct xHC to stop running a transfer ring on an endpoint.
871 *
872 * @param slot_id Slot ID assigned to the device.
873 * @param ep_idx Endpoint index (number + direction) in question
874 */
875int hc_stop_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx)
876{
877
878 return xhci_cmd_sync_inline(hc, STOP_ENDPOINT, .slot_id = slot_id, .endpoint_id = ep_idx);
879}
880
881/**
882 * @}
883 */
Note: See TracBrowser for help on using the repository browser.