source: mainline/uspace/drv/bus/usb/xhci/hc.c@ 69b2dfee

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 69b2dfee was 69b2dfee, checked in by Ondřej Hlavatý <aearsis@…>, 7 years ago

xhci: satisfy picky Intel xHC

  • Property mode set to 100644
File size: 28.6 KB
Line 
1/*
2 * Copyright (c) 2017 Ondrej Hlavaty
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller data bookkeeping.
34 */
35
36#include <errno.h>
37#include <str_error.h>
38#include <usb/debug.h>
39#include <usb/host/endpoint.h>
40#include "debug.h"
41#include "hc.h"
42#include "rh.h"
43#include "hw_struct/trb.h"
44#include "hw_struct/context.h"
45#include "endpoint.h"
46#include "transfers.h"
47#include "trb_ring.h"
48
49/**
50 * Default USB Speed ID mapping: Table 157
51 */
52#define PSI_TO_BPS(psie, psim) (((uint64_t) psim) << (10 * psie))
53#define PORT_SPEED(usb, mjr, psie, psim) { \
54 .name = "USB ", \
55 .major = mjr, \
56 .minor = 0, \
57 .usb_speed = USB_SPEED_##usb, \
58 .rx_bps = PSI_TO_BPS(psie, psim), \
59 .tx_bps = PSI_TO_BPS(psie, psim) \
60}
61
62static const xhci_port_speed_t default_psiv_to_port_speed [] = {
63 [1] = PORT_SPEED(FULL, 2, 2, 12),
64 [2] = PORT_SPEED(LOW, 2, 1, 1500),
65 [3] = PORT_SPEED(HIGH, 2, 2, 480),
66 [4] = PORT_SPEED(SUPER, 3, 3, 5),
67};
68
69static const unsigned usb_speed_to_psiv [] = {
70 [USB_SPEED_FULL] = 1,
71 [USB_SPEED_LOW] = 2,
72 [USB_SPEED_HIGH] = 3,
73 [USB_SPEED_SUPER] = 4,
74};
75
76/**
77 * Walk the list of extended capabilities.
78 *
79 * The most interesting thing hidden in extended capabilities is the mapping of
80 * ports to protocol versions and speeds.
81 */
82static int hc_parse_ec(xhci_hc_t *hc)
83{
84 unsigned psic, major, minor;
85 xhci_sp_name_t name;
86
87 xhci_port_speed_t *speeds = hc->speeds;
88
89 for (xhci_extcap_t *ec = hc->xecp; ec; ec = xhci_extcap_next(ec)) {
90 xhci_dump_extcap(ec);
91 switch (XHCI_REG_RD(ec, XHCI_EC_CAP_ID)) {
92 case XHCI_EC_USB_LEGACY:
93 assert(hc->legsup == NULL);
94 hc->legsup = (xhci_legsup_t *) ec;
95 break;
96 case XHCI_EC_SUPPORTED_PROTOCOL:
97 psic = XHCI_REG_RD(ec, XHCI_EC_SP_PSIC);
98 major = XHCI_REG_RD(ec, XHCI_EC_SP_MAJOR);
99 minor = XHCI_REG_RD(ec, XHCI_EC_SP_MINOR);
100 name.packed = host2uint32_t_le(XHCI_REG_RD(ec, XHCI_EC_SP_NAME));
101
102 if (name.packed != xhci_name_usb.packed) {
103 /**
104 * The detection of such protocol would work,
105 * but the rest of the implementation is made
106 * for the USB protocol only.
107 */
108 usb_log_error("Unknown protocol %.4s.", name.str);
109 return ENOTSUP;
110 }
111
112 unsigned offset = XHCI_REG_RD(ec, XHCI_EC_SP_CP_OFF);
113 unsigned count = XHCI_REG_RD(ec, XHCI_EC_SP_CP_COUNT);
114 xhci_rh_set_ports_protocol(&hc->rh, offset, count, major);
115
116 // "Implied" speed
117 if (psic == 0) {
118 assert(minor == 0);
119
120 if (major == 2) {
121 speeds[1] = default_psiv_to_port_speed[1];
122 speeds[2] = default_psiv_to_port_speed[2];
123 speeds[3] = default_psiv_to_port_speed[3];
124 } else if (major == 3) {
125 speeds[4] = default_psiv_to_port_speed[4];
126 } else {
127 return EINVAL;
128 }
129
130 usb_log_debug2("Implied speed of USB %u.0 set up.", major);
131 } else {
132 for (unsigned i = 0; i < psic; i++) {
133 xhci_psi_t *psi = xhci_extcap_psi(ec, i);
134 unsigned sim = XHCI_REG_RD(psi, XHCI_PSI_PSIM);
135 unsigned psiv = XHCI_REG_RD(psi, XHCI_PSI_PSIV);
136 unsigned psie = XHCI_REG_RD(psi, XHCI_PSI_PSIE);
137 unsigned psim = XHCI_REG_RD(psi, XHCI_PSI_PSIM);
138 uint64_t bps = PSI_TO_BPS(psie, psim);
139
140 /*
141 * Speed is not implied, but using one of default PSIV. This is
142 * not clearly stated in xHCI spec. There is a clear intention
143 * to allow xHCI to specify its own speed parameters, but
144 * throughout the document, they used fixed values for e.g.
145 * High-speed (3), without stating the controller shall have
146 * implied default speeds - and for instance Intel controllers
147 * do not. So let's check if the values match and if so, accept
148 * the implied USB speed too.
149 *
150 * The main reason we need this is the usb_speed to have
151 * mapping also for devices connected to hubs.
152 */
153 if (psiv < ARRAY_SIZE(default_psiv_to_port_speed)
154 && default_psiv_to_port_speed[psiv].major == major
155 && default_psiv_to_port_speed[psiv].minor == minor
156 && default_psiv_to_port_speed[psiv].rx_bps == bps
157 && default_psiv_to_port_speed[psiv].tx_bps == bps) {
158 speeds[psiv] = default_psiv_to_port_speed[psiv];
159 usb_log_debug2("Assumed default %s speed of USB %u.", usb_str_speed(speeds[psiv].usb_speed), major);
160 continue;
161 }
162
163 // Custom speed
164 speeds[psiv].major = major;
165 speeds[psiv].minor = minor;
166 str_ncpy(speeds[psiv].name, 4, name.str, 4);
167 speeds[psiv].usb_speed = USB_SPEED_MAX;
168
169 if (sim == XHCI_PSI_PLT_SYMM || sim == XHCI_PSI_PLT_RX)
170 speeds[psiv].rx_bps = bps;
171 if (sim == XHCI_PSI_PLT_SYMM || sim == XHCI_PSI_PLT_TX) {
172 speeds[psiv].tx_bps = bps;
173 usb_log_debug2("Speed %u set up for bps %" PRIu64 " / %" PRIu64 ".", psiv, speeds[psiv].rx_bps, speeds[psiv].tx_bps);
174 }
175 }
176 }
177 }
178 }
179 return EOK;
180}
181
182/**
183 * Initialize MMIO spaces of xHC.
184 */
185int hc_init_mmio(xhci_hc_t *hc, const hw_res_list_parsed_t *hw_res)
186{
187 int err;
188
189 if (hw_res->mem_ranges.count != 1) {
190 usb_log_error("Unexpected MMIO area, bailing out.");
191 return EINVAL;
192 }
193
194 hc->mmio_range = hw_res->mem_ranges.ranges[0];
195
196 usb_log_debug("MMIO area at %p (size %zu), IRQ %d.",
197 RNGABSPTR(hc->mmio_range), RNGSZ(hc->mmio_range), hw_res->irqs.irqs[0]);
198
199 if (RNGSZ(hc->mmio_range) < sizeof(xhci_cap_regs_t))
200 return EOVERFLOW;
201
202 void *base;
203 if ((err = pio_enable_range(&hc->mmio_range, &base)))
204 return err;
205
206 hc->reg_base = base;
207 hc->cap_regs = (xhci_cap_regs_t *) base;
208 hc->op_regs = (xhci_op_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH));
209 hc->rt_regs = (xhci_rt_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF));
210 hc->db_arry = (xhci_doorbell_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_DBOFF));
211
212 uintptr_t xec_offset = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_XECP) * sizeof(xhci_dword_t);
213 if (xec_offset > 0)
214 hc->xecp = (xhci_extcap_t *) (base + xec_offset);
215
216 usb_log_debug2("Initialized MMIO reg areas:");
217 usb_log_debug2("\tCapability regs: %p", hc->cap_regs);
218 usb_log_debug2("\tOperational regs: %p", hc->op_regs);
219 usb_log_debug2("\tRuntime regs: %p", hc->rt_regs);
220 usb_log_debug2("\tDoorbell array base: %p", hc->db_arry);
221
222 xhci_dump_cap_regs(hc->cap_regs);
223
224 hc->ac64 = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_AC64);
225 hc->max_slots = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_MAX_SLOTS);
226
227 struct timeval tv;
228 getuptime(&tv);
229 hc->wrap_time = tv.tv_sec * 1000000 + tv.tv_usec;
230 hc->wrap_count = 0;
231
232 unsigned ist = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_IST);
233 hc->ist = (ist & 0x10 >> 1) * (ist & 0xf);
234
235 if ((err = xhci_rh_init(&hc->rh, hc)))
236 goto err_pio;
237
238 if ((err = hc_parse_ec(hc)))
239 goto err_rh;
240
241 return EOK;
242
243err_rh:
244 xhci_rh_fini(&hc->rh);
245err_pio:
246 pio_disable(hc->reg_base, RNGSZ(hc->mmio_range));
247 return err;
248}
249
250static int event_worker(void *arg);
251
252/**
253 * Initialize structures kept in allocated memory.
254 */
255int hc_init_memory(xhci_hc_t *hc, ddf_dev_t *device)
256{
257 int err;
258
259 if (dma_buffer_alloc(&hc->dcbaa_dma, (1 + hc->max_slots) * sizeof(uint64_t)))
260 return ENOMEM;
261 hc->dcbaa = hc->dcbaa_dma.virt;
262
263 if ((err = xhci_event_ring_init(&hc->event_ring)))
264 goto err_dcbaa;
265
266 if ((err = xhci_scratchpad_alloc(hc)))
267 goto err_event_ring;
268
269 if ((err = xhci_init_commands(hc)))
270 goto err_scratch;
271
272 if ((err = xhci_bus_init(&hc->bus, hc)))
273 goto err_cmd;
274
275 fid_t fid = fibril_create(&event_worker, hc);
276 if (!fid)
277 goto err_bus;
278
279 // TODO: completion_reset
280 hc->event_fibril_completion.active = true;
281 fibril_mutex_initialize(&hc->event_fibril_completion.guard);
282 fibril_condvar_initialize(&hc->event_fibril_completion.cv);
283
284 xhci_sw_ring_init(&hc->sw_ring, PAGE_SIZE / sizeof(xhci_trb_t));
285
286 fibril_add_ready(fid);
287
288 return EOK;
289
290err_bus:
291 xhci_bus_fini(&hc->bus);
292err_cmd:
293 xhci_fini_commands(hc);
294err_scratch:
295 xhci_scratchpad_free(hc);
296err_event_ring:
297 xhci_event_ring_fini(&hc->event_ring);
298err_dcbaa:
299 hc->dcbaa = NULL;
300 dma_buffer_free(&hc->dcbaa_dma);
301 return err;
302}
303
304/*
305 * Pseudocode:
306 * ip = read(intr[0].iman)
307 * if (ip) {
308 * status = read(usbsts)
309 * assert status
310 * assert ip
311 * accept (passing status)
312 * }
313 * decline
314 */
315static const irq_cmd_t irq_commands[] = {
316 {
317 .cmd = CMD_PIO_READ_32,
318 .dstarg = 3,
319 .addr = NULL /* intr[0].iman */
320 },
321 {
322 .cmd = CMD_AND,
323 .srcarg = 3,
324 .dstarg = 4,
325 .value = 0 /* host2xhci(32, 1) */
326 },
327 {
328 .cmd = CMD_PREDICATE,
329 .srcarg = 4,
330 .value = 5
331 },
332 {
333 .cmd = CMD_PIO_READ_32,
334 .dstarg = 1,
335 .addr = NULL /* usbsts */
336 },
337 {
338 .cmd = CMD_AND,
339 .srcarg = 1,
340 .dstarg = 2,
341 .value = 0 /* host2xhci(32, XHCI_STATUS_ACK_MASK) */
342 },
343 {
344 .cmd = CMD_PIO_WRITE_A_32,
345 .srcarg = 2,
346 .addr = NULL /* usbsts */
347 },
348 {
349 .cmd = CMD_PIO_WRITE_A_32,
350 .srcarg = 3,
351 .addr = NULL /* intr[0].iman */
352 },
353 {
354 .cmd = CMD_ACCEPT
355 },
356 {
357 .cmd = CMD_DECLINE
358 }
359};
360
361
362/**
363 * Generates code to accept interrupts. The xHCI is designed primarily for
364 * MSI/MSI-X, but we use PCI Interrupt Pin. In this mode, all the Interrupters
365 * (except 0) are disabled.
366 */
367int hc_irq_code_gen(irq_code_t *code, xhci_hc_t *hc, const hw_res_list_parsed_t *hw_res)
368{
369 assert(code);
370 assert(hw_res);
371
372 if (hw_res->irqs.count != 1) {
373 usb_log_info("Unexpected HW resources to enable interrupts.");
374 return EINVAL;
375 }
376
377 code->ranges = malloc(sizeof(irq_pio_range_t));
378 if (code->ranges == NULL)
379 return ENOMEM;
380
381 code->cmds = malloc(sizeof(irq_commands));
382 if (code->cmds == NULL) {
383 free(code->ranges);
384 return ENOMEM;
385 }
386
387 code->rangecount = 1;
388 code->ranges[0] = (irq_pio_range_t) {
389 .base = RNGABS(hc->mmio_range),
390 .size = RNGSZ(hc->mmio_range),
391 };
392
393 code->cmdcount = ARRAY_SIZE(irq_commands);
394 memcpy(code->cmds, irq_commands, sizeof(irq_commands));
395
396 void *intr0_iman = RNGABSPTR(hc->mmio_range) + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF) + offsetof(xhci_rt_regs_t, ir[0]);
397 void *usbsts = RNGABSPTR(hc->mmio_range) + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH) + offsetof(xhci_op_regs_t, usbsts);
398 code->cmds[0].addr = intr0_iman;
399 code->cmds[1].value = host2xhci(32, 1);
400 code->cmds[3].addr = usbsts;
401 code->cmds[4].value = host2xhci(32, XHCI_STATUS_ACK_MASK);
402 code->cmds[5].addr = usbsts;
403 code->cmds[6].addr = intr0_iman;
404
405 return hw_res->irqs.irqs[0];
406}
407
408/**
409 * Claim xHC from BIOS. Implements handoff as per Section 4.22.1 of xHCI spec.
410 */
411int hc_claim(xhci_hc_t *hc, ddf_dev_t *dev)
412{
413 /* No legacy support capability, the controller is solely for us */
414 if (!hc->legsup)
415 return EOK;
416
417 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_CNR), 0))
418 return ETIMEOUT;
419
420 usb_log_debug2("LEGSUP: bios: %x, os: %x", hc->legsup->sem_bios, hc->legsup->sem_os);
421 XHCI_REG_SET(hc->legsup, XHCI_LEGSUP_SEM_OS, 1);
422 for (int i = 0; i <= (XHCI_LEGSUP_BIOS_TIMEOUT_US / XHCI_LEGSUP_POLLING_DELAY_1MS); i++) {
423 usb_log_debug2("LEGSUP: elapsed: %i ms, bios: %x, os: %x", i,
424 XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_BIOS),
425 XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_OS));
426 if (XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_BIOS) == 0) {
427 return XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_OS) == 1 ? EOK : EIO;
428 }
429 async_usleep(XHCI_LEGSUP_POLLING_DELAY_1MS);
430 }
431 usb_log_error("BIOS did not release XHCI legacy hold!");
432
433 return ENOTSUP;
434}
435
436/**
437 * Ask the xHC to reset its state. Implements sequence
438 */
439static int hc_reset(xhci_hc_t *hc)
440{
441 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_CNR), 0))
442 return ETIMEOUT;
443
444 /* Stop the HC: set R/S to 0 */
445 XHCI_REG_CLR(hc->op_regs, XHCI_OP_RS, 1);
446
447 /* Wait until the HC is halted - it shall take at most 16 ms */
448 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_HCH), XHCI_REG_MASK(XHCI_OP_HCH)))
449 return ETIMEOUT;
450
451 /* Reset */
452 XHCI_REG_SET(hc->op_regs, XHCI_OP_HCRST, 1);
453
454 /* Wait until the reset is complete */
455 if (xhci_reg_wait(&hc->op_regs->usbcmd, XHCI_REG_MASK(XHCI_OP_HCRST), 0))
456 return ETIMEOUT;
457
458 return EOK;
459}
460
461/**
462 * Initialize the HC: section 4.2
463 */
464int hc_start(xhci_hc_t *hc, bool irq)
465{
466 int err;
467
468 if ((err = hc_reset(hc)))
469 return err;
470
471 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_CNR), 0))
472 return ETIMEOUT;
473
474 uint64_t dcbaaptr = hc->dcbaa_dma.phys;
475 XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP_LO, LOWER32(dcbaaptr));
476 XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP_HI, UPPER32(dcbaaptr));
477 XHCI_REG_WR(hc->op_regs, XHCI_OP_MAX_SLOTS_EN, hc->max_slots);
478
479 uintptr_t crcr;
480 xhci_trb_ring_reset_dequeue_state(&hc->cr.trb_ring, &crcr);
481 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR_LO, LOWER32(crcr));
482 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR_HI, UPPER32(crcr));
483
484 XHCI_REG_SET(hc->op_regs, XHCI_OP_EWE, 1);
485
486 xhci_interrupter_regs_t *intr0 = &hc->rt_regs->ir[0];
487 XHCI_REG_WR(intr0, XHCI_INTR_ERSTSZ, hc->event_ring.segment_count);
488 uint64_t erdp = hc->event_ring.dequeue_ptr;
489 XHCI_REG_WR(intr0, XHCI_INTR_ERDP_LO, LOWER32(erdp));
490 XHCI_REG_WR(intr0, XHCI_INTR_ERDP_HI, UPPER32(erdp));
491 uint64_t erstptr = hc->event_ring.erst.phys;
492 XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA_LO, LOWER32(erstptr));
493 XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA_HI, UPPER32(erstptr));
494
495
496 if (irq) {
497 XHCI_REG_SET(intr0, XHCI_INTR_IE, 1);
498 XHCI_REG_SET(hc->op_regs, XHCI_OP_INTE, 1);
499 }
500
501 XHCI_REG_SET(hc->op_regs, XHCI_OP_HSEE, 1);
502
503 XHCI_REG_SET(hc->op_regs, XHCI_OP_RS, 1);
504
505 xhci_rh_startup(&hc->rh);
506
507 return EOK;
508}
509
510/**
511 * Used only when polling. Shall supplement the irq_commands.
512 */
513int hc_status(bus_t *bus, uint32_t *status)
514{
515 xhci_hc_t *hc = bus_to_hc(bus);
516 int ip = XHCI_REG_RD(hc->rt_regs->ir, XHCI_INTR_IP);
517 if (ip) {
518 *status = XHCI_REG_RD(hc->op_regs, XHCI_OP_STATUS);
519 XHCI_REG_WR(hc->op_regs, XHCI_OP_STATUS, *status & XHCI_STATUS_ACK_MASK);
520 XHCI_REG_WR(hc->rt_regs->ir, XHCI_INTR_IP, 1);
521
522 /* interrupt handler expects status from irq_commands, which is
523 * in xhci order. */
524 *status = host2xhci(32, *status);
525 }
526
527 usb_log_debug2("Polled status: %x", *status);
528 return EOK;
529}
530
531static int xhci_handle_mfindex_wrap_event(xhci_hc_t *hc, xhci_trb_t *trb)
532{
533 struct timeval tv;
534 getuptime(&tv);
535 usb_log_debug2("Microframe index wrapped (@%lu.%li, %"PRIu64" total).", tv.tv_sec, tv.tv_usec, hc->wrap_count);
536 hc->wrap_time = ((uint64_t) tv.tv_sec) * 1000000 + ((uint64_t) tv.tv_usec);
537 ++hc->wrap_count;
538 return EOK;
539}
540
541static int handle_port_status_change_event(xhci_hc_t *hc, xhci_trb_t *trb)
542{
543 uint8_t port_id = XHCI_QWORD_EXTRACT(trb->parameter, 31, 24);
544 usb_log_debug("Port status change event detected for port %u.", port_id);
545 xhci_rh_handle_port_change(&hc->rh, port_id);
546 return EOK;
547}
548
549typedef int (*event_handler) (xhci_hc_t *, xhci_trb_t *trb);
550
551/**
552 * These events are handled by separate event handling fibril.
553 */
554static event_handler event_handlers [] = {
555 [XHCI_TRB_TYPE_PORT_STATUS_CHANGE_EVENT] = &handle_port_status_change_event,
556 [XHCI_TRB_TYPE_TRANSFER_EVENT] = &xhci_handle_transfer_event,
557};
558
559/**
560 * These events are handled directly in the interrupt handler, thus they must
561 * not block waiting for another interrupt.
562 */
563static event_handler event_handlers_fast [] = {
564 [XHCI_TRB_TYPE_COMMAND_COMPLETION_EVENT] = &xhci_handle_command_completion,
565 [XHCI_TRB_TYPE_MFINDEX_WRAP_EVENT] = &xhci_handle_mfindex_wrap_event,
566};
567
568static int hc_handle_event(xhci_hc_t *hc, xhci_trb_t *trb)
569{
570 const unsigned type = TRB_TYPE(*trb);
571
572 if (type <= ARRAY_SIZE(event_handlers_fast) && event_handlers_fast[type])
573 return event_handlers_fast[type](hc, trb);
574
575 if (type <= ARRAY_SIZE(event_handlers) && event_handlers[type])
576 return xhci_sw_ring_enqueue(&hc->sw_ring, trb);
577
578 return ENOTSUP;
579}
580
581static int event_worker(void *arg)
582{
583 int err;
584 xhci_trb_t trb;
585 xhci_hc_t * const hc = arg;
586 assert(hc);
587
588 while (xhci_sw_ring_dequeue(&hc->sw_ring, &trb) != EINTR) {
589 const unsigned type = TRB_TYPE(trb);
590
591 if ((err = event_handlers[type](hc, &trb)))
592 usb_log_error("Failed to handle event: %s", str_error(err));
593 }
594
595 // TODO: completion_complete
596 fibril_mutex_lock(&hc->event_fibril_completion.guard);
597 hc->event_fibril_completion.active = false;
598 fibril_condvar_wait(&hc->event_fibril_completion.cv, &hc->event_fibril_completion.guard);
599 fibril_mutex_unlock(&hc->event_fibril_completion.guard);
600
601 return EOK;
602}
603
604/**
605 * Dequeue from event ring and handle dequeued events.
606 *
607 * As there can be events, that blocks on waiting for subsequent events,
608 * we solve this problem by first copying the event TRBs from the event ring,
609 * then asserting EHB and only after, handling the events.
610 *
611 * Whenever the event handling blocks, it switches fibril, and incoming
612 * IPC notification will create new event handling fibril for us.
613 */
614static void hc_run_event_ring(xhci_hc_t *hc, xhci_event_ring_t *event_ring, xhci_interrupter_regs_t *intr)
615{
616 int err;
617
618 xhci_trb_t trb;
619 hc->event_handler = fibril_get_id();
620
621 while ((err = xhci_event_ring_dequeue(event_ring, &trb)) != ENOENT) {
622 if ((err = hc_handle_event(hc, &trb)) != EOK) {
623 usb_log_error("Failed to handle event in interrupt: %s", str_error(err));
624 }
625
626 uint64_t erdp = hc->event_ring.dequeue_ptr;
627 XHCI_REG_WR(intr, XHCI_INTR_ERDP_LO, LOWER32(erdp));
628 XHCI_REG_WR(intr, XHCI_INTR_ERDP_HI, UPPER32(erdp));
629 }
630
631 hc->event_handler = 0;
632
633 /* Update the ERDP to make room in the ring. */
634 uint64_t erdp = hc->event_ring.dequeue_ptr;
635 erdp |= XHCI_REG_MASK(XHCI_INTR_ERDP_EHB);
636 XHCI_REG_WR(intr, XHCI_INTR_ERDP_LO, LOWER32(erdp));
637 XHCI_REG_WR(intr, XHCI_INTR_ERDP_HI, UPPER32(erdp));
638
639 usb_log_debug2("Event ring run finished.");
640}
641
642/**
643 * Handle an interrupt request from xHC. Resolve all situations that trigger an
644 * interrupt separately.
645 *
646 * Note that all RW1C bits in USBSTS register are cleared at the time of
647 * handling the interrupt in irq_code. This method is the top-half.
648 *
649 * @param status contents of USBSTS register at the time of the interrupt.
650 */
651void hc_interrupt(bus_t *bus, uint32_t status)
652{
653 xhci_hc_t *hc = bus_to_hc(bus);
654 status = xhci2host(32, status);
655
656 if (status & XHCI_REG_MASK(XHCI_OP_HSE)) {
657 usb_log_error("Host controller error occured. Bad things gonna happen...");
658 status &= ~XHCI_REG_MASK(XHCI_OP_HSE);
659 }
660
661 if (status & XHCI_REG_MASK(XHCI_OP_EINT)) {
662 usb_log_debug2("Event interrupt, running the event ring.");
663 hc_run_event_ring(hc, &hc->event_ring, &hc->rt_regs->ir[0]);
664 status &= ~XHCI_REG_MASK(XHCI_OP_EINT);
665 }
666
667 if (status & XHCI_REG_MASK(XHCI_OP_SRE)) {
668 usb_log_error("Save/Restore error occured. WTF, S/R mechanism not implemented!");
669 status &= ~XHCI_REG_MASK(XHCI_OP_SRE);
670 }
671
672 /* According to Note on p. 302, we may safely ignore the PCD bit. */
673 status &= ~XHCI_REG_MASK(XHCI_OP_PCD);
674
675 if (status) {
676 usb_log_error("Non-zero status after interrupt handling (%08x) - missing something?", status);
677 }
678}
679
680/**
681 * Tear down all in-memory structures.
682 */
683void hc_fini(xhci_hc_t *hc)
684{
685 xhci_sw_ring_stop(&hc->sw_ring);
686
687 // TODO: completion_wait
688 fibril_mutex_lock(&hc->event_fibril_completion.guard);
689 while (hc->event_fibril_completion.active)
690 fibril_condvar_wait(&hc->event_fibril_completion.cv, &hc->event_fibril_completion.guard);
691 fibril_mutex_unlock(&hc->event_fibril_completion.guard);
692 xhci_sw_ring_fini(&hc->sw_ring);
693
694 xhci_bus_fini(&hc->bus);
695 xhci_event_ring_fini(&hc->event_ring);
696 xhci_scratchpad_free(hc);
697 dma_buffer_free(&hc->dcbaa_dma);
698 xhci_fini_commands(hc);
699 xhci_rh_fini(&hc->rh);
700 pio_disable(hc->reg_base, RNGSZ(hc->mmio_range));
701 usb_log_info("Finalized.");
702}
703
704/**
705 * Ring a xHC Doorbell. Implements section 4.7.
706 */
707void hc_ring_doorbell(xhci_hc_t *hc, unsigned doorbell, unsigned target)
708{
709 assert(hc);
710 uint32_t v = host2xhci(32, target & BIT_RRANGE(uint32_t, 7));
711 pio_write_32(&hc->db_arry[doorbell], v);
712 usb_log_debug2("Ringing doorbell %d (target: %d)", doorbell, target);
713}
714
715/**
716 * Issue an Enable Slot command. Allocate memory for the slot and fill the
717 * DCBAA with the newly created slot.
718 */
719int hc_enable_slot(xhci_device_t *dev)
720{
721 int err;
722 xhci_hc_t * const hc = bus_to_hc(dev->base.bus);
723
724 /* Prepare memory for the context */
725 if ((err = dma_buffer_alloc(&dev->dev_ctx, sizeof(xhci_device_ctx_t))))
726 return err;
727 memset(dev->dev_ctx.virt, 0, sizeof(xhci_device_ctx_t));
728
729 /* Get the slot number */
730 xhci_cmd_t cmd;
731 xhci_cmd_init(&cmd, XHCI_CMD_ENABLE_SLOT);
732
733 err = xhci_cmd_sync(hc, &cmd);
734
735 /* Link them together */
736 if (err == EOK) {
737 dev->slot_id = cmd.slot_id;
738 hc->dcbaa[dev->slot_id] = host2xhci(64, dev->dev_ctx.phys);
739 }
740
741 xhci_cmd_fini(&cmd);
742 return err;
743}
744
745/**
746 * Issue a Disable Slot command for a slot occupied by device.
747 * Frees the device context.
748 */
749int hc_disable_slot(xhci_device_t *dev)
750{
751 int err;
752 xhci_hc_t * const hc = bus_to_hc(dev->base.bus);
753
754 if ((err = xhci_cmd_sync_inline(hc, DISABLE_SLOT, .slot_id = dev->slot_id))) {
755 return err;
756 }
757
758 /* Free the device context. */
759 hc->dcbaa[dev->slot_id] = 0;
760 dma_buffer_free(&dev->dev_ctx);
761
762 /* Mark the slot as invalid. */
763 dev->slot_id = 0;
764
765 return EOK;
766}
767
768/**
769 * Fill a slot context that is part of an Input Context with appropriate
770 * values.
771 *
772 * @param ctx Slot context, zeroed out.
773 */
774static void xhci_setup_slot_context(xhci_device_t *dev, xhci_slot_ctx_t *ctx)
775{
776 /* Initialize slot_ctx according to section 4.3.3 point 3. */
777 XHCI_SLOT_ROOT_HUB_PORT_SET(*ctx, dev->rh_port);
778 XHCI_SLOT_ROUTE_STRING_SET(*ctx, dev->route_str);
779 XHCI_SLOT_SPEED_SET(*ctx, usb_speed_to_psiv[dev->base.speed]);
780
781 /*
782 * Note: This function is used even before this flag can be set, to
783 * issue the address device command. It is OK, because these
784 * flags are not required to be valid for that command.
785 */
786 if (dev->is_hub) {
787 XHCI_SLOT_HUB_SET(*ctx, 1);
788 XHCI_SLOT_NUM_PORTS_SET(*ctx, dev->num_ports);
789 XHCI_SLOT_TT_THINK_TIME_SET(*ctx, dev->tt_think_time);
790 XHCI_SLOT_MTT_SET(*ctx, 0); // MTT not supported yet
791 }
792
793 /* Setup Transaction Translation. TODO: Test this with HS hub. */
794 if (dev->base.tt.dev != NULL) {
795 xhci_device_t *hub = xhci_device_get(dev->base.tt.dev);
796 XHCI_SLOT_TT_HUB_SLOT_ID_SET(*ctx, hub->slot_id);
797 XHCI_SLOT_TT_HUB_PORT_SET(*ctx, dev->base.tt.port);
798 }
799
800 // As we always allocate space for whole input context, we can set this to maximum
801 XHCI_SLOT_CTX_ENTRIES_SET(*ctx, 31);
802}
803
804/**
805 * Prepare an empty Endpoint Input Context inside a dma buffer.
806 */
807static int create_configure_ep_input_ctx(xhci_device_t *dev, dma_buffer_t *dma_buf)
808{
809 const int err = dma_buffer_alloc(dma_buf, sizeof(xhci_input_ctx_t));
810 if (err)
811 return err;
812
813 xhci_input_ctx_t *ictx = dma_buf->virt;
814 memset(ictx, 0, sizeof(xhci_input_ctx_t));
815
816 // Quoting sec. 4.6.5 and 4.6.6: A1, D0, D1 are down (already zeroed), A0 is up.
817 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, 0);
818 xhci_setup_slot_context(dev, &ictx->slot_ctx);
819
820 return EOK;
821}
822
823/**
824 * Initialize a device, assigning it an address. Implements section 4.3.4.
825 *
826 * @param dev Device to assing an address (unconfigured yet)
827 * @param ep0 EP0 of device TODO remove, can be fetched from dev
828 */
829int hc_address_device(xhci_device_t *dev, xhci_endpoint_t *ep0)
830{
831 int err = ENOMEM;
832 xhci_hc_t * const hc = bus_to_hc(dev->base.bus);
833
834 /* Although we have the precise PSIV value on devices of tier 1,
835 * we have to rely on reverse mapping on others. */
836 if (!usb_speed_to_psiv[dev->base.speed]) {
837 usb_log_error("Device reported an USB speed (%s) that cannot be mapped to HC port speed.", usb_str_speed(dev->base.speed));
838 return EINVAL;
839 }
840
841 /* Issue configure endpoint command (sec 4.3.5). */
842 dma_buffer_t ictx_dma_buf;
843 if ((err = create_configure_ep_input_ctx(dev, &ictx_dma_buf)))
844 return err;
845 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
846
847 /* Copy endpoint 0 context and set A1 flag. */
848 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, 1);
849 xhci_setup_endpoint_context(ep0, &ictx->endpoint_ctx[0]);
850
851 /* Address device needs Ctx entries set to 1 only */
852 xhci_slot_ctx_t *slot_ctx = &ictx->slot_ctx;
853 XHCI_SLOT_CTX_ENTRIES_SET(*slot_ctx, 1);
854
855 /* Issue Address Device command. */
856 if ((err = xhci_cmd_sync_inline(hc, ADDRESS_DEVICE, .slot_id = dev->slot_id, .input_ctx = ictx_dma_buf)))
857 return err;
858
859 xhci_device_ctx_t *dev_ctx = dev->dev_ctx.virt;
860 dev->base.address = XHCI_SLOT_DEVICE_ADDRESS(dev_ctx->slot_ctx);
861 usb_log_debug2("Obtained USB address: %d.", dev->base.address);
862
863 return EOK;
864}
865
866/**
867 * Issue a Configure Device command for a device in slot.
868 *
869 * @param slot_id Slot ID assigned to the device.
870 */
871int hc_configure_device(xhci_device_t *dev)
872{
873 xhci_hc_t * const hc = bus_to_hc(dev->base.bus);
874
875 /* Issue configure endpoint command (sec 4.3.5). */
876 dma_buffer_t ictx_dma_buf;
877 const int err = create_configure_ep_input_ctx(dev, &ictx_dma_buf);
878 if (err)
879 return err;
880
881 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = dev->slot_id, .input_ctx = ictx_dma_buf);
882}
883
884/**
885 * Issue a Deconfigure Device command for a device in slot.
886 *
887 * @param dev The owner of the device
888 */
889int hc_deconfigure_device(xhci_device_t *dev)
890{
891 xhci_hc_t * const hc = bus_to_hc(dev->base.bus);
892
893 /* Issue configure endpoint command (sec 4.3.5) with the DC flag. */
894 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = dev->slot_id, .deconfigure = true);
895}
896
897/**
898 * Instruct xHC to add an endpoint with supplied endpoint context.
899 *
900 * @param dev The owner of the device
901 * @param ep_idx Endpoint DCI in question
902 * @param ep_ctx Endpoint context of the endpoint
903 */
904int hc_add_endpoint(xhci_device_t *dev, uint8_t ep_idx, xhci_ep_ctx_t *ep_ctx)
905{
906 /* Issue configure endpoint command (sec 4.3.5). */
907 dma_buffer_t ictx_dma_buf;
908 const int err = create_configure_ep_input_ctx(dev, &ictx_dma_buf);
909 if (err)
910 return err;
911
912 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
913 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, ep_idx + 1); /* Preceded by slot ctx */
914 memcpy(&ictx->endpoint_ctx[ep_idx], ep_ctx, sizeof(xhci_ep_ctx_t));
915
916 xhci_hc_t * const hc = bus_to_hc(dev->base.bus);
917 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = dev->slot_id, .input_ctx = ictx_dma_buf);
918}
919
920/**
921 * Instruct xHC to drop an endpoint.
922 *
923 * @param dev The owner of the endpoint
924 * @param ep_idx Endpoint DCI in question
925 */
926int hc_drop_endpoint(xhci_device_t *dev, uint8_t ep_idx)
927{
928 /* Issue configure endpoint command (sec 4.3.5). */
929 dma_buffer_t ictx_dma_buf;
930 const int err = create_configure_ep_input_ctx(dev, &ictx_dma_buf);
931 if (err)
932 return err;
933
934 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
935 XHCI_INPUT_CTRL_CTX_DROP_SET(ictx->ctrl_ctx, ep_idx + 1); /* Preceded by slot ctx */
936
937 xhci_hc_t * const hc = bus_to_hc(dev->base.bus);
938 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = dev->slot_id, .input_ctx = ictx_dma_buf);
939}
940
941/**
942 * Instruct xHC to update information about an endpoint, using supplied
943 * endpoint context.
944 *
945 * @param dev The owner of the endpoint
946 * @param ep_idx Endpoint DCI in question
947 * @param ep_ctx Endpoint context of the endpoint
948 */
949int hc_update_endpoint(xhci_device_t *dev, uint8_t ep_idx, xhci_ep_ctx_t *ep_ctx)
950{
951 dma_buffer_t ictx_dma_buf;
952 const int err = dma_buffer_alloc(&ictx_dma_buf, sizeof(xhci_input_ctx_t));
953 if (err)
954 return err;
955
956 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
957 memset(ictx, 0, sizeof(xhci_input_ctx_t));
958
959 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, ep_idx + 1);
960 memcpy(&ictx->endpoint_ctx[ep_idx], ep_ctx, sizeof(xhci_ep_ctx_t));
961
962 xhci_hc_t * const hc = bus_to_hc(dev->base.bus);
963 return xhci_cmd_sync_inline(hc, EVALUATE_CONTEXT, .slot_id = dev->slot_id, .input_ctx = ictx_dma_buf);
964}
965
966/**
967 * Instruct xHC to stop running a transfer ring on an endpoint.
968 *
969 * @param dev The owner of the endpoint
970 * @param ep_idx Endpoint DCI in question
971 */
972int hc_stop_endpoint(xhci_device_t *dev, uint8_t ep_idx)
973{
974 xhci_hc_t * const hc = bus_to_hc(dev->base.bus);
975 return xhci_cmd_sync_inline(hc, STOP_ENDPOINT, .slot_id = dev->slot_id, .endpoint_id = ep_idx);
976}
977
978/**
979 * Instruct xHC to reset halted endpoint.
980 *
981 * @param dev The owner of the endpoint
982 * @param ep_idx Endpoint DCI in question
983 */
984int hc_reset_endpoint(xhci_device_t *dev, uint8_t ep_idx)
985{
986 xhci_hc_t * const hc = bus_to_hc(dev->base.bus);
987 return xhci_cmd_sync_inline(hc, RESET_ENDPOINT, .slot_id = dev->slot_id, .endpoint_id = ep_idx);
988}
989
990/**
991 * @}
992 */
Note: See TracBrowser for help on using the repository browser.