source: mainline/uspace/drv/bus/usb/xhci/hc.c@ b277bef

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since b277bef was 8ebe212, checked in by Jiri Svoboda <jiri@…>, 7 years ago

ccheck-fix a few files with for loops.

  • Property mode set to 100644
File size: 30.9 KB
Line 
1/*
2 * Copyright (c) 2018 Ondrej Hlavaty, Petr Manek, Jaroslav Jindrak, Jan Hrach, Michal Staruch
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller data bookkeeping.
34 */
35
36#include <errno.h>
37#include <str_error.h>
38#include <usb/debug.h>
39#include <usb/host/endpoint.h>
40#include "debug.h"
41#include "hc.h"
42#include "rh.h"
43#include "hw_struct/trb.h"
44#include "hw_struct/context.h"
45#include "endpoint.h"
46#include "transfers.h"
47#include "trb_ring.h"
48
49/**
50 * Default USB Speed ID mapping: Table 157
51 */
52#define PSI_TO_BPS(psie, psim) (((uint64_t) psim) << (10 * psie))
53#define PORT_SPEED(usb, mjr, psie, psim) { \
54 .name = "USB ", \
55 .major = mjr, \
56 .minor = 0, \
57 .usb_speed = USB_SPEED_##usb, \
58 .rx_bps = PSI_TO_BPS(psie, psim), \
59 .tx_bps = PSI_TO_BPS(psie, psim) \
60}
61
62static const xhci_port_speed_t default_psiv_to_port_speed [] = {
63 [1] = PORT_SPEED(FULL, 2, 2, 12),
64 [2] = PORT_SPEED(LOW, 2, 1, 1500),
65 [3] = PORT_SPEED(HIGH, 2, 2, 480),
66 [4] = PORT_SPEED(SUPER, 3, 3, 5),
67};
68
69static const unsigned usb_speed_to_psiv [] = {
70 [USB_SPEED_FULL] = 1,
71 [USB_SPEED_LOW] = 2,
72 [USB_SPEED_HIGH] = 3,
73 [USB_SPEED_SUPER] = 4,
74};
75
76/**
77 * Walk the list of extended capabilities.
78 *
79 * The most interesting thing hidden in extended capabilities is the mapping of
80 * ports to protocol versions and speeds.
81 */
82static errno_t hc_parse_ec(xhci_hc_t *hc)
83{
84 unsigned psic, major, minor;
85 xhci_sp_name_t name;
86
87 xhci_port_speed_t *speeds = hc->speeds;
88
89 for (xhci_extcap_t *ec = hc->xecp; ec; ec = xhci_extcap_next(ec)) {
90 xhci_dump_extcap(ec);
91 switch (XHCI_REG_RD(ec, XHCI_EC_CAP_ID)) {
92 case XHCI_EC_USB_LEGACY:
93 assert(hc->legsup == NULL);
94 hc->legsup = (xhci_legsup_t *) ec;
95 break;
96 case XHCI_EC_SUPPORTED_PROTOCOL:
97 psic = XHCI_REG_RD(ec, XHCI_EC_SP_PSIC);
98 major = XHCI_REG_RD(ec, XHCI_EC_SP_MAJOR);
99 minor = XHCI_REG_RD(ec, XHCI_EC_SP_MINOR);
100 name.packed = host2uint32_t_le(XHCI_REG_RD(ec, XHCI_EC_SP_NAME));
101
102 if (name.packed != xhci_name_usb.packed) {
103 /**
104 * The detection of such protocol would work,
105 * but the rest of the implementation is made
106 * for the USB protocol only.
107 */
108 usb_log_error("Unknown protocol %.4s.", name.str);
109 return ENOTSUP;
110 }
111
112 unsigned offset = XHCI_REG_RD(ec, XHCI_EC_SP_CP_OFF);
113 unsigned count = XHCI_REG_RD(ec, XHCI_EC_SP_CP_COUNT);
114 xhci_rh_set_ports_protocol(&hc->rh, offset, count, major);
115
116 // "Implied" speed
117 if (psic == 0) {
118 assert(minor == 0);
119
120 if (major == 2) {
121 speeds[1] = default_psiv_to_port_speed[1];
122 speeds[2] = default_psiv_to_port_speed[2];
123 speeds[3] = default_psiv_to_port_speed[3];
124 } else if (major == 3) {
125 speeds[4] = default_psiv_to_port_speed[4];
126 } else {
127 return EINVAL;
128 }
129
130 usb_log_debug("Implied speed of USB %u.0 set up.", major);
131 } else {
132 for (unsigned i = 0; i < psic; i++) {
133 xhci_psi_t *psi = xhci_extcap_psi(ec, i);
134 unsigned sim = XHCI_REG_RD(psi, XHCI_PSI_PSIM);
135 unsigned psiv = XHCI_REG_RD(psi, XHCI_PSI_PSIV);
136 unsigned psie = XHCI_REG_RD(psi, XHCI_PSI_PSIE);
137 unsigned psim = XHCI_REG_RD(psi, XHCI_PSI_PSIM);
138 uint64_t bps = PSI_TO_BPS(psie, psim);
139
140 /*
141 * Speed is not implied, but using one of default PSIV. This
142 * is not clearly stated in xHCI spec. There is a clear
143 * intention to allow xHCI to specify its own speed
144 * parameters, but throughout the document, they used fixed
145 * values for e.g. High-speed (3), without stating the
146 * controller shall have implied default speeds - and for
147 * instance Intel controllers do not. So let's check if the
148 * values match and if so, accept the implied USB speed too.
149 *
150 * The main reason we need this is the usb_speed to have
151 * mapping also for devices connected to hubs.
152 */
153 if (psiv < ARRAY_SIZE(default_psiv_to_port_speed) &&
154 default_psiv_to_port_speed[psiv].major == major &&
155 default_psiv_to_port_speed[psiv].minor == minor &&
156 default_psiv_to_port_speed[psiv].rx_bps == bps &&
157 default_psiv_to_port_speed[psiv].tx_bps == bps) {
158 speeds[psiv] = default_psiv_to_port_speed[psiv];
159 usb_log_debug("Assumed default %s speed of USB %u.",
160 usb_str_speed(speeds[psiv].usb_speed), major);
161 continue;
162 }
163
164 // Custom speed
165 speeds[psiv].major = major;
166 speeds[psiv].minor = minor;
167 str_ncpy(speeds[psiv].name, 4, name.str, 4);
168 speeds[psiv].usb_speed = USB_SPEED_MAX;
169
170 if (sim == XHCI_PSI_PLT_SYMM || sim == XHCI_PSI_PLT_RX)
171 speeds[psiv].rx_bps = bps;
172 if (sim == XHCI_PSI_PLT_SYMM || sim == XHCI_PSI_PLT_TX) {
173 speeds[psiv].tx_bps = bps;
174 usb_log_debug("Speed %u set up for bps %" PRIu64
175 " / %" PRIu64 ".", psiv, speeds[psiv].rx_bps,
176 speeds[psiv].tx_bps);
177 }
178 }
179 }
180 }
181 }
182 return EOK;
183}
184
185/**
186 * Initialize MMIO spaces of xHC.
187 */
188errno_t hc_init_mmio(xhci_hc_t *hc, const hw_res_list_parsed_t *hw_res)
189{
190 errno_t err;
191
192 if (hw_res->mem_ranges.count != 1) {
193 usb_log_error("Unexpected MMIO area, bailing out.");
194 return EINVAL;
195 }
196
197 hc->mmio_range = hw_res->mem_ranges.ranges[0];
198
199 usb_log_debug("MMIO area at %p (size %zu), IRQ %d.",
200 RNGABSPTR(hc->mmio_range), RNGSZ(hc->mmio_range), hw_res->irqs.irqs[0]);
201
202 if (RNGSZ(hc->mmio_range) < sizeof(xhci_cap_regs_t))
203 return EOVERFLOW;
204
205 void *base;
206 if ((err = pio_enable_range(&hc->mmio_range, &base)))
207 return err;
208
209 hc->reg_base = base;
210 hc->cap_regs = (xhci_cap_regs_t *) base;
211 hc->op_regs = (xhci_op_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH));
212 hc->rt_regs = (xhci_rt_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF));
213 hc->db_arry = (xhci_doorbell_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_DBOFF));
214
215 uintptr_t xec_offset = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_XECP) * sizeof(xhci_dword_t);
216 if (xec_offset > 0)
217 hc->xecp = (xhci_extcap_t *) (base + xec_offset);
218
219 usb_log_debug("Initialized MMIO reg areas:");
220 usb_log_debug("\tCapability regs: %p", hc->cap_regs);
221 usb_log_debug("\tOperational regs: %p", hc->op_regs);
222 usb_log_debug("\tRuntime regs: %p", hc->rt_regs);
223 usb_log_debug("\tDoorbell array base: %p", hc->db_arry);
224
225 xhci_dump_cap_regs(hc->cap_regs);
226
227 hc->ac64 = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_AC64);
228 hc->csz = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_CSZ);
229 hc->max_slots = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_MAX_SLOTS);
230
231 struct timeval tv;
232 getuptime(&tv);
233 hc->wrap_time = tv.tv_sec * 1000000 + tv.tv_usec;
234 hc->wrap_count = 0;
235
236 unsigned ist = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_IST);
237 hc->ist = (ist & 0x10 >> 1) * (ist & 0xf);
238
239 if ((err = xhci_rh_init(&hc->rh, hc)))
240 goto err_pio;
241
242 if ((err = hc_parse_ec(hc)))
243 goto err_rh;
244
245 return EOK;
246
247err_rh:
248 xhci_rh_fini(&hc->rh);
249err_pio:
250 pio_disable(hc->reg_base, RNGSZ(hc->mmio_range));
251 return err;
252}
253
254static int event_worker(void *arg);
255
256/**
257 * Initialize structures kept in allocated memory.
258 */
259errno_t hc_init_memory(xhci_hc_t *hc, ddf_dev_t *device)
260{
261 errno_t err = ENOMEM;
262
263 if (dma_buffer_alloc(&hc->dcbaa_dma, (1 + hc->max_slots) * sizeof(uint64_t)))
264 return ENOMEM;
265 hc->dcbaa = hc->dcbaa_dma.virt;
266
267 hc->event_worker = joinable_fibril_create(&event_worker, hc);
268 if (!hc->event_worker)
269 goto err_dcbaa;
270
271 if ((err = xhci_event_ring_init(&hc->event_ring, 1)))
272 goto err_worker;
273
274 if ((err = xhci_scratchpad_alloc(hc)))
275 goto err_event_ring;
276
277 if ((err = xhci_init_commands(hc)))
278 goto err_scratch;
279
280 if ((err = xhci_bus_init(&hc->bus, hc)))
281 goto err_cmd;
282
283 xhci_sw_ring_init(&hc->sw_ring, PAGE_SIZE / sizeof(xhci_trb_t));
284
285 return EOK;
286
287err_cmd:
288 xhci_fini_commands(hc);
289err_scratch:
290 xhci_scratchpad_free(hc);
291err_event_ring:
292 xhci_event_ring_fini(&hc->event_ring);
293err_worker:
294 joinable_fibril_destroy(hc->event_worker);
295err_dcbaa:
296 hc->dcbaa = NULL;
297 dma_buffer_free(&hc->dcbaa_dma);
298 return err;
299}
300
301/*
302 * Pseudocode:
303 * ip = read(intr[0].iman)
304 * if (ip) {
305 * status = read(usbsts)
306 * assert status
307 * assert ip
308 * accept (passing status)
309 * }
310 * decline
311 */
312static const irq_cmd_t irq_commands[] = {
313 {
314 .cmd = CMD_PIO_READ_32,
315 .dstarg = 3,
316 .addr = NULL /* intr[0].iman */
317 },
318 {
319 .cmd = CMD_AND,
320 .srcarg = 3,
321 .dstarg = 4,
322 .value = 0 /* host2xhci(32, 1) */
323 },
324 {
325 .cmd = CMD_PREDICATE,
326 .srcarg = 4,
327 .value = 5
328 },
329 {
330 .cmd = CMD_PIO_READ_32,
331 .dstarg = 1,
332 .addr = NULL /* usbsts */
333 },
334 {
335 .cmd = CMD_AND,
336 .srcarg = 1,
337 .dstarg = 2,
338 .value = 0 /* host2xhci(32, XHCI_STATUS_ACK_MASK) */
339 },
340 {
341 .cmd = CMD_PIO_WRITE_A_32,
342 .srcarg = 2,
343 .addr = NULL /* usbsts */
344 },
345 {
346 .cmd = CMD_PIO_WRITE_A_32,
347 .srcarg = 3,
348 .addr = NULL /* intr[0].iman */
349 },
350 {
351 .cmd = CMD_ACCEPT
352 },
353 {
354 .cmd = CMD_DECLINE
355 }
356};
357
358
359/**
360 * Generates code to accept interrupts. The xHCI is designed primarily for
361 * MSI/MSI-X, but we use PCI Interrupt Pin. In this mode, all the Interrupters
362 * (except 0) are disabled.
363 */
364errno_t hc_irq_code_gen(irq_code_t *code, xhci_hc_t *hc, const hw_res_list_parsed_t *hw_res, int *irq)
365{
366 assert(code);
367 assert(hw_res);
368
369 if (hw_res->irqs.count != 1) {
370 usb_log_info("Unexpected HW resources to enable interrupts.");
371 return EINVAL;
372 }
373
374 code->ranges = malloc(sizeof(irq_pio_range_t));
375 if (code->ranges == NULL)
376 return ENOMEM;
377
378 code->cmds = malloc(sizeof(irq_commands));
379 if (code->cmds == NULL) {
380 free(code->ranges);
381 return ENOMEM;
382 }
383
384 code->rangecount = 1;
385 code->ranges[0] = (irq_pio_range_t) {
386 .base = RNGABS(hc->mmio_range),
387 .size = RNGSZ(hc->mmio_range),
388 };
389
390 code->cmdcount = ARRAY_SIZE(irq_commands);
391 memcpy(code->cmds, irq_commands, sizeof(irq_commands));
392
393 void *intr0_iman = RNGABSPTR(hc->mmio_range) +
394 XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF) +
395 offsetof(xhci_rt_regs_t, ir[0]);
396 void *usbsts = RNGABSPTR(hc->mmio_range) +
397 XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH) +
398 offsetof(xhci_op_regs_t, usbsts);
399
400 code->cmds[0].addr = intr0_iman;
401 code->cmds[1].value = host2xhci(32, 1);
402 code->cmds[3].addr = usbsts;
403 code->cmds[4].value = host2xhci(32, XHCI_STATUS_ACK_MASK);
404 code->cmds[5].addr = usbsts;
405 code->cmds[6].addr = intr0_iman;
406
407 *irq = hw_res->irqs.irqs[0];
408 return EOK;
409}
410
411/**
412 * Claim xHC from BIOS. Implements handoff as per Section 4.22.1 of xHCI spec.
413 */
414errno_t hc_claim(xhci_hc_t *hc, ddf_dev_t *dev)
415{
416 /* No legacy support capability, the controller is solely for us */
417 if (!hc->legsup)
418 return EOK;
419
420 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_CNR), 0))
421 return ETIMEOUT;
422
423 usb_log_debug("LEGSUP: bios: %x, os: %x", hc->legsup->sem_bios, hc->legsup->sem_os);
424 XHCI_REG_SET(hc->legsup, XHCI_LEGSUP_SEM_OS, 1);
425 for (int i = 0; i <= (XHCI_LEGSUP_BIOS_TIMEOUT_US / XHCI_LEGSUP_POLLING_DELAY_1MS); i++) {
426 usb_log_debug("LEGSUP: elapsed: %i ms, bios: %x, os: %x", i,
427 XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_BIOS),
428 XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_OS));
429 if (XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_BIOS) == 0) {
430 return XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_OS) == 1 ? EOK : EIO;
431 }
432 async_usleep(XHCI_LEGSUP_POLLING_DELAY_1MS);
433 }
434 usb_log_error("BIOS did not release XHCI legacy hold!");
435
436 return ENOTSUP;
437}
438
439/**
440 * Ask the xHC to reset its state. Implements sequence
441 */
442static errno_t hc_reset(xhci_hc_t *hc)
443{
444 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_CNR), 0))
445 return ETIMEOUT;
446
447 /* Stop the HC: set R/S to 0 */
448 XHCI_REG_CLR(hc->op_regs, XHCI_OP_RS, 1);
449
450 /* Wait until the HC is halted - it shall take at most 16 ms */
451 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_HCH),
452 XHCI_REG_MASK(XHCI_OP_HCH)))
453 return ETIMEOUT;
454
455 /* Reset */
456 XHCI_REG_SET(hc->op_regs, XHCI_OP_HCRST, 1);
457
458 /* Wait until the reset is complete */
459 if (xhci_reg_wait(&hc->op_regs->usbcmd, XHCI_REG_MASK(XHCI_OP_HCRST), 0))
460 return ETIMEOUT;
461
462 return EOK;
463}
464
465/**
466 * Initialize the HC: section 4.2
467 */
468errno_t hc_start(xhci_hc_t *hc)
469{
470 errno_t err;
471
472 if ((err = hc_reset(hc)))
473 return err;
474
475 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_CNR), 0))
476 return ETIMEOUT;
477
478 uintptr_t dcbaa_phys = dma_buffer_phys_base(&hc->dcbaa_dma);
479 XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP, dcbaa_phys);
480 XHCI_REG_WR(hc->op_regs, XHCI_OP_MAX_SLOTS_EN, hc->max_slots);
481
482 uintptr_t crcr;
483 xhci_trb_ring_reset_dequeue_state(&hc->cr.trb_ring, &crcr);
484 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR, crcr);
485
486 XHCI_REG_SET(hc->op_regs, XHCI_OP_EWE, 1);
487
488 xhci_event_ring_reset(&hc->event_ring);
489
490 xhci_interrupter_regs_t *intr0 = &hc->rt_regs->ir[0];
491 XHCI_REG_WR(intr0, XHCI_INTR_ERSTSZ, hc->event_ring.segment_count);
492 XHCI_REG_WR(intr0, XHCI_INTR_ERDP, hc->event_ring.dequeue_ptr);
493
494 const uintptr_t erstba_phys = dma_buffer_phys_base(&hc->event_ring.erst);
495 XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA, erstba_phys);
496
497 if (CAP_HANDLE_VALID(hc->base.irq_handle)) {
498 XHCI_REG_SET(intr0, XHCI_INTR_IE, 1);
499 XHCI_REG_SET(hc->op_regs, XHCI_OP_INTE, 1);
500 }
501
502 XHCI_REG_SET(hc->op_regs, XHCI_OP_HSEE, 1);
503
504 xhci_sw_ring_restart(&hc->sw_ring);
505 joinable_fibril_start(hc->event_worker);
506
507 xhci_start_command_ring(hc);
508
509 XHCI_REG_SET(hc->op_regs, XHCI_OP_RS, 1);
510
511 /* RH needs to access port states on startup */
512 xhci_rh_start(&hc->rh);
513
514 return EOK;
515}
516
517static void hc_stop(xhci_hc_t *hc)
518{
519 /* Stop the HC in hardware. */
520 XHCI_REG_CLR(hc->op_regs, XHCI_OP_RS, 1);
521
522 /*
523 * Wait until the HC is halted - it shall take at most 16 ms.
524 * Note that we ignore the return value here.
525 */
526 xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_HCH),
527 XHCI_REG_MASK(XHCI_OP_HCH));
528
529 /* Make sure commands will not block other fibrils. */
530 xhci_nuke_command_ring(hc);
531
532 /* Stop the event worker fibril to restart it */
533 xhci_sw_ring_stop(&hc->sw_ring);
534 joinable_fibril_join(hc->event_worker);
535
536 /* Then, disconnect all roothub devices, which shall trigger
537 * disconnection of everything */
538 xhci_rh_stop(&hc->rh);
539}
540
541static void hc_reinitialize(xhci_hc_t *hc)
542{
543 /* Stop everything. */
544 hc_stop(hc);
545
546 usb_log_info("HC stopped. Starting again...");
547
548 /* The worker fibrils need to be started again */
549 joinable_fibril_recreate(hc->event_worker);
550 joinable_fibril_recreate(hc->rh.event_worker);
551
552 /* Now, the HC shall be stopped and software shall be clean. */
553 hc_start(hc);
554}
555
556static bool hc_is_broken(xhci_hc_t *hc)
557{
558 const uint32_t usbcmd = XHCI_REG_RD_FIELD(&hc->op_regs->usbcmd, 32);
559 const uint32_t usbsts = XHCI_REG_RD_FIELD(&hc->op_regs->usbsts, 32);
560
561 return !(usbcmd & XHCI_REG_MASK(XHCI_OP_RS)) ||
562 (usbsts & XHCI_REG_MASK(XHCI_OP_HCE)) ||
563 (usbsts & XHCI_REG_MASK(XHCI_OP_HSE));
564}
565
566/**
567 * Used only when polling. Shall supplement the irq_commands.
568 */
569errno_t hc_status(bus_t *bus, uint32_t *status)
570{
571 xhci_hc_t *hc = bus_to_hc(bus);
572 int ip = XHCI_REG_RD(hc->rt_regs->ir, XHCI_INTR_IP);
573 if (ip) {
574 *status = XHCI_REG_RD(hc->op_regs, XHCI_OP_STATUS);
575 XHCI_REG_WR(hc->op_regs, XHCI_OP_STATUS, *status & XHCI_STATUS_ACK_MASK);
576 XHCI_REG_WR(hc->rt_regs->ir, XHCI_INTR_IP, 1);
577
578 /* interrupt handler expects status from irq_commands, which is
579 * in xhci order. */
580 *status = host2xhci(32, *status);
581 }
582
583 usb_log_debug("Polled status: %x", *status);
584 return EOK;
585}
586
587static errno_t xhci_handle_mfindex_wrap_event(xhci_hc_t *hc, xhci_trb_t *trb)
588{
589 struct timeval tv;
590 getuptime(&tv);
591 usb_log_debug("Microframe index wrapped (@%lu.%li, %" PRIu64 " total).",
592 tv.tv_sec, tv.tv_usec, hc->wrap_count);
593 hc->wrap_time = ((uint64_t) tv.tv_sec) * 1000000 + ((uint64_t) tv.tv_usec);
594 ++hc->wrap_count;
595 return EOK;
596}
597
598typedef errno_t (*event_handler) (xhci_hc_t *, xhci_trb_t *trb);
599
600/**
601 * These events are handled by separate event handling fibril.
602 */
603static event_handler event_handlers [] = {
604 [XHCI_TRB_TYPE_TRANSFER_EVENT] = &xhci_handle_transfer_event,
605};
606
607/**
608 * These events are handled directly in the interrupt handler, thus they must
609 * not block waiting for another interrupt.
610 */
611static event_handler event_handlers_fast [] = {
612 [XHCI_TRB_TYPE_COMMAND_COMPLETION_EVENT] = &xhci_handle_command_completion,
613 [XHCI_TRB_TYPE_MFINDEX_WRAP_EVENT] = &xhci_handle_mfindex_wrap_event,
614};
615
616static errno_t hc_handle_event(xhci_hc_t *hc, xhci_trb_t *trb)
617{
618 const unsigned type = TRB_TYPE(*trb);
619
620 if (type <= ARRAY_SIZE(event_handlers_fast) && event_handlers_fast[type])
621 return event_handlers_fast[type](hc, trb);
622
623 if (type <= ARRAY_SIZE(event_handlers) && event_handlers[type])
624 return xhci_sw_ring_enqueue(&hc->sw_ring, trb);
625
626 if (type == XHCI_TRB_TYPE_PORT_STATUS_CHANGE_EVENT)
627 return xhci_sw_ring_enqueue(&hc->rh.event_ring, trb);
628
629 return ENOTSUP;
630}
631
632static int event_worker(void *arg)
633{
634 errno_t err;
635 xhci_trb_t trb;
636 xhci_hc_t *const hc = arg;
637 assert(hc);
638
639 while (xhci_sw_ring_dequeue(&hc->sw_ring, &trb) != EINTR) {
640 const unsigned type = TRB_TYPE(trb);
641
642 if ((err = event_handlers[type](hc, &trb)))
643 usb_log_error("Failed to handle event: %s", str_error(err));
644 }
645
646 return 0;
647}
648
649/**
650 * Dequeue from event ring and handle dequeued events.
651 *
652 * As there can be events, that blocks on waiting for subsequent events,
653 * we solve this problem by deferring some types of events to separate fibrils.
654 */
655static void hc_run_event_ring(xhci_hc_t *hc, xhci_event_ring_t *event_ring,
656 xhci_interrupter_regs_t *intr)
657{
658 errno_t err;
659
660 xhci_trb_t trb;
661 hc->event_handler = fibril_get_id();
662
663 while ((err = xhci_event_ring_dequeue(event_ring, &trb)) != ENOENT) {
664 if ((err = hc_handle_event(hc, &trb)) != EOK) {
665 usb_log_error("Failed to handle event in interrupt: %s", str_error(err));
666 }
667
668 XHCI_REG_WR(intr, XHCI_INTR_ERDP, hc->event_ring.dequeue_ptr);
669 }
670
671 hc->event_handler = 0;
672
673 uint64_t erdp = hc->event_ring.dequeue_ptr;
674 erdp |= XHCI_REG_MASK(XHCI_INTR_ERDP_EHB);
675 XHCI_REG_WR(intr, XHCI_INTR_ERDP, erdp);
676
677 usb_log_debug2("Event ring run finished.");
678}
679
680/**
681 * Handle an interrupt request from xHC. Resolve all situations that trigger an
682 * interrupt separately.
683 *
684 * Note that all RW1C bits in USBSTS register are cleared at the time of
685 * handling the interrupt in irq_code. This method is the top-half.
686 *
687 * @param status contents of USBSTS register at the time of the interrupt.
688 */
689void hc_interrupt(bus_t *bus, uint32_t status)
690{
691 xhci_hc_t *hc = bus_to_hc(bus);
692 status = xhci2host(32, status);
693
694 if (status & XHCI_REG_MASK(XHCI_OP_HSE)) {
695 usb_log_error("Host system error occured. Aren't we supposed to be dead already?");
696 return;
697 }
698
699 if (status & XHCI_REG_MASK(XHCI_OP_HCE)) {
700 usb_log_error("Host controller error occured. Reinitializing...");
701 hc_reinitialize(hc);
702 return;
703 }
704
705 if (status & XHCI_REG_MASK(XHCI_OP_EINT)) {
706 usb_log_debug2("Event interrupt, running the event ring.");
707 hc_run_event_ring(hc, &hc->event_ring, &hc->rt_regs->ir[0]);
708 status &= ~XHCI_REG_MASK(XHCI_OP_EINT);
709 }
710
711 if (status & XHCI_REG_MASK(XHCI_OP_SRE)) {
712 usb_log_error("Save/Restore error occured. WTF, "
713 "S/R mechanism not implemented!");
714 status &= ~XHCI_REG_MASK(XHCI_OP_SRE);
715 }
716
717 /* According to Note on p. 302, we may safely ignore the PCD bit. */
718 status &= ~XHCI_REG_MASK(XHCI_OP_PCD);
719
720 if (status) {
721 usb_log_error("Non-zero status after interrupt handling (%08x) "
722 " - missing something?", status);
723 }
724}
725
726/**
727 * Tear down all in-memory structures.
728 */
729void hc_fini(xhci_hc_t *hc)
730{
731 hc_stop(hc);
732
733 xhci_sw_ring_fini(&hc->sw_ring);
734 joinable_fibril_destroy(hc->event_worker);
735 xhci_bus_fini(&hc->bus);
736 xhci_event_ring_fini(&hc->event_ring);
737 xhci_scratchpad_free(hc);
738 dma_buffer_free(&hc->dcbaa_dma);
739 xhci_fini_commands(hc);
740 xhci_rh_fini(&hc->rh);
741 pio_disable(hc->reg_base, RNGSZ(hc->mmio_range));
742 usb_log_info("Finalized.");
743}
744
745unsigned hc_speed_to_psiv(usb_speed_t speed)
746{
747 assert(speed < ARRAY_SIZE(usb_speed_to_psiv));
748 return usb_speed_to_psiv[speed];
749}
750
751/**
752 * Ring a xHC Doorbell. Implements section 4.7.
753 */
754void hc_ring_doorbell(xhci_hc_t *hc, unsigned doorbell, unsigned target)
755{
756 assert(hc);
757 uint32_t v = host2xhci(32, target & BIT_RRANGE(uint32_t, 7));
758 pio_write_32(&hc->db_arry[doorbell], v);
759 usb_log_debug2("Ringing doorbell %d (target: %d)", doorbell, target);
760}
761
762/**
763 * Return an index to device context.
764 */
765static uint8_t endpoint_dci(xhci_endpoint_t *ep)
766{
767 return (2 * ep->base.endpoint) +
768 (ep->base.transfer_type == USB_TRANSFER_CONTROL ||
769 ep->base.direction == USB_DIRECTION_IN);
770}
771
772void hc_ring_ep_doorbell(xhci_endpoint_t *ep, uint32_t stream_id)
773{
774 xhci_device_t *const dev = xhci_ep_to_dev(ep);
775 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
776 const uint8_t dci = endpoint_dci(ep);
777 const uint32_t target = (stream_id << 16) | (dci & 0x1ff);
778 hc_ring_doorbell(hc, dev->slot_id, target);
779}
780
781/**
782 * Issue an Enable Slot command. Allocate memory for the slot and fill the
783 * DCBAA with the newly created slot.
784 */
785errno_t hc_enable_slot(xhci_device_t *dev)
786{
787 errno_t err;
788 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
789
790 /* Prepare memory for the context */
791 if ((err = dma_buffer_alloc(&dev->dev_ctx, XHCI_DEVICE_CTX_SIZE(hc))))
792 return err;
793 memset(dev->dev_ctx.virt, 0, XHCI_DEVICE_CTX_SIZE(hc));
794
795 /* Get the slot number */
796 xhci_cmd_t cmd;
797 xhci_cmd_init(&cmd, XHCI_CMD_ENABLE_SLOT);
798
799 err = xhci_cmd_sync(hc, &cmd);
800
801 /* Link them together */
802 if (err == EOK) {
803 dev->slot_id = cmd.slot_id;
804 hc->dcbaa[dev->slot_id] =
805 host2xhci(64, dma_buffer_phys_base(&dev->dev_ctx));
806 }
807
808 xhci_cmd_fini(&cmd);
809
810 if (err)
811 dma_buffer_free(&dev->dev_ctx);
812
813 return err;
814}
815
816/**
817 * Issue a Disable Slot command for a slot occupied by device.
818 * Frees the device context.
819 */
820errno_t hc_disable_slot(xhci_device_t *dev)
821{
822 errno_t err;
823 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
824 xhci_cmd_t cmd;
825
826 xhci_cmd_init(&cmd, XHCI_CMD_DISABLE_SLOT);
827 cmd.slot_id = dev->slot_id;
828 err = xhci_cmd_sync(hc, &cmd);
829 xhci_cmd_fini(&cmd);
830 if (err != EOK)
831 return err;
832
833 /* Free the device context. */
834 hc->dcbaa[dev->slot_id] = 0;
835 dma_buffer_free(&dev->dev_ctx);
836
837 /* Mark the slot as invalid. */
838 dev->slot_id = 0;
839
840 return EOK;
841}
842
843/**
844 * Prepare an empty Endpoint Input Context inside a dma buffer.
845 */
846static errno_t create_configure_ep_input_ctx(xhci_device_t *dev, dma_buffer_t *dma_buf)
847{
848 const xhci_hc_t *hc = bus_to_hc(dev->base.bus);
849 const errno_t err = dma_buffer_alloc(dma_buf, XHCI_INPUT_CTX_SIZE(hc));
850 if (err)
851 return err;
852
853 xhci_input_ctx_t *ictx = dma_buf->virt;
854 memset(ictx, 0, XHCI_INPUT_CTX_SIZE(hc));
855
856 // Quoting sec. 4.6.5 and 4.6.6: A1, D0, D1 are down (already zeroed), A0 is up.
857 XHCI_INPUT_CTRL_CTX_ADD_SET(*XHCI_GET_CTRL_CTX(ictx, hc), 0);
858 xhci_slot_ctx_t *slot_ctx = XHCI_GET_SLOT_CTX(XHCI_GET_DEVICE_CTX(ictx, hc), hc);
859 xhci_setup_slot_context(dev, slot_ctx);
860
861 return EOK;
862}
863
864/**
865 * Initialize a device, assigning it an address. Implements section 4.3.4.
866 *
867 * @param dev Device to assing an address (unconfigured yet)
868 */
869errno_t hc_address_device(xhci_device_t *dev)
870{
871 errno_t err = ENOMEM;
872 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
873 xhci_endpoint_t *ep0 = xhci_endpoint_get(dev->base.endpoints[0]);
874
875 /* Although we have the precise PSIV value on devices of tier 1,
876 * we have to rely on reverse mapping on others. */
877 if (!usb_speed_to_psiv[dev->base.speed]) {
878 usb_log_error("Device reported an USB speed (%s) that cannot be mapped "
879 "to HC port speed.", usb_str_speed(dev->base.speed));
880 return EINVAL;
881 }
882
883 /* Issue configure endpoint command (sec 4.3.5). */
884 dma_buffer_t ictx_dma_buf;
885 if ((err = create_configure_ep_input_ctx(dev, &ictx_dma_buf)))
886 return err;
887 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
888
889 /* Copy endpoint 0 context and set A1 flag. */
890 XHCI_INPUT_CTRL_CTX_ADD_SET(*XHCI_GET_CTRL_CTX(ictx, hc), 1);
891 xhci_ep_ctx_t *ep_ctx = XHCI_GET_EP_CTX(XHCI_GET_DEVICE_CTX(ictx, hc), hc, 1);
892 xhci_setup_endpoint_context(ep0, ep_ctx);
893
894 /* Address device needs Ctx entries set to 1 only */
895 xhci_slot_ctx_t *slot_ctx = XHCI_GET_SLOT_CTX(XHCI_GET_DEVICE_CTX(ictx, hc), hc);
896 XHCI_SLOT_CTX_ENTRIES_SET(*slot_ctx, 1);
897
898 /* Issue Address Device command. */
899 xhci_cmd_t cmd;
900 xhci_cmd_init(&cmd, XHCI_CMD_ADDRESS_DEVICE);
901 cmd.slot_id = dev->slot_id;
902 cmd.input_ctx = ictx_dma_buf;
903 err = xhci_cmd_sync(hc, &cmd);
904 xhci_cmd_fini(&cmd);
905 if (err != EOK)
906 return err;
907
908 xhci_device_ctx_t *device_ctx = dev->dev_ctx.virt;
909 dev->base.address = XHCI_SLOT_DEVICE_ADDRESS(*XHCI_GET_SLOT_CTX(device_ctx, hc));
910 usb_log_debug("Obtained USB address: %d.", dev->base.address);
911
912 return EOK;
913}
914
915/**
916 * Issue a Configure Device command for a device in slot.
917 *
918 * @param slot_id Slot ID assigned to the device.
919 */
920errno_t hc_configure_device(xhci_device_t *dev)
921{
922 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
923 xhci_cmd_t cmd;
924
925 /* Issue configure endpoint command (sec 4.3.5). */
926 dma_buffer_t ictx_dma_buf;
927 errno_t err = create_configure_ep_input_ctx(dev, &ictx_dma_buf);
928 if (err != EOK)
929 return err;
930
931 xhci_cmd_init(&cmd, XHCI_CMD_CONFIGURE_ENDPOINT);
932 cmd.slot_id = dev->slot_id;
933 cmd.input_ctx = ictx_dma_buf;
934 err = xhci_cmd_sync(hc, &cmd);
935 xhci_cmd_fini(&cmd);
936
937 return err;
938}
939
940/**
941 * Issue a Deconfigure Device command for a device in slot.
942 *
943 * @param dev The owner of the device
944 */
945errno_t hc_deconfigure_device(xhci_device_t *dev)
946{
947 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
948 xhci_cmd_t cmd;
949 errno_t err;
950
951 if (hc_is_broken(hc))
952 return EOK;
953
954 /* Issue configure endpoint command (sec 4.3.5) with the DC flag. */
955 xhci_cmd_init(&cmd, XHCI_CMD_CONFIGURE_ENDPOINT);
956 cmd.slot_id = dev->slot_id;
957 cmd.deconfigure = true;
958
959 err = xhci_cmd_sync(hc, &cmd);
960 xhci_cmd_fini(&cmd);
961
962 return err;
963}
964
965/**
966 * Instruct xHC to add an endpoint with supplied endpoint context.
967 *
968 * @param dev The owner of the device
969 * @param ep_idx Endpoint DCI in question
970 * @param ep_ctx Endpoint context of the endpoint
971 */
972errno_t hc_add_endpoint(xhci_endpoint_t *ep)
973{
974 xhci_device_t *const dev = xhci_ep_to_dev(ep);
975 const unsigned dci = endpoint_dci(ep);
976 xhci_cmd_t cmd;
977
978 /* Issue configure endpoint command (sec 4.3.5). */
979 dma_buffer_t ictx_dma_buf;
980 errno_t err = create_configure_ep_input_ctx(dev, &ictx_dma_buf);
981 if (err != EOK)
982 return err;
983
984 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
985
986 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
987 XHCI_INPUT_CTRL_CTX_ADD_SET(*XHCI_GET_CTRL_CTX(ictx, hc), dci);
988
989 xhci_ep_ctx_t *ep_ctx = XHCI_GET_EP_CTX(XHCI_GET_DEVICE_CTX(ictx, hc), hc, dci);
990 xhci_setup_endpoint_context(ep, ep_ctx);
991
992 xhci_cmd_init(&cmd, XHCI_CMD_CONFIGURE_ENDPOINT);
993 cmd.slot_id = dev->slot_id;
994 cmd.input_ctx = ictx_dma_buf;
995 err = xhci_cmd_sync(hc, &cmd);
996 xhci_cmd_fini(&cmd);
997
998 return err;
999}
1000
1001/**
1002 * Instruct xHC to drop an endpoint.
1003 *
1004 * @param dev The owner of the endpoint
1005 * @param ep_idx Endpoint DCI in question
1006 */
1007errno_t hc_drop_endpoint(xhci_endpoint_t *ep)
1008{
1009 xhci_device_t *const dev = xhci_ep_to_dev(ep);
1010 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
1011 const unsigned dci = endpoint_dci(ep);
1012 xhci_cmd_t cmd;
1013
1014 if (hc_is_broken(hc))
1015 return EOK;
1016
1017 /* Issue configure endpoint command (sec 4.3.5). */
1018 dma_buffer_t ictx_dma_buf;
1019 errno_t err = create_configure_ep_input_ctx(dev, &ictx_dma_buf);
1020 if (err != EOK)
1021 return err;
1022
1023 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
1024 XHCI_INPUT_CTRL_CTX_DROP_SET(*XHCI_GET_CTRL_CTX(ictx, hc), dci);
1025
1026 xhci_cmd_init(&cmd, XHCI_CMD_CONFIGURE_ENDPOINT);
1027 cmd.slot_id = dev->slot_id;
1028 cmd.input_ctx = ictx_dma_buf;
1029 err = xhci_cmd_sync(hc, &cmd);
1030 xhci_cmd_fini(&cmd);
1031
1032 return err;
1033}
1034
1035/**
1036 * Instruct xHC to update information about an endpoint, using supplied
1037 * endpoint context.
1038 *
1039 * @param dev The owner of the endpoint
1040 * @param ep_idx Endpoint DCI in question
1041 * @param ep_ctx Endpoint context of the endpoint
1042 */
1043errno_t hc_update_endpoint(xhci_endpoint_t *ep)
1044{
1045 xhci_device_t *const dev = xhci_ep_to_dev(ep);
1046 const unsigned dci = endpoint_dci(ep);
1047 xhci_cmd_t cmd;
1048
1049 dma_buffer_t ictx_dma_buf;
1050 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
1051
1052 errno_t err = dma_buffer_alloc(&ictx_dma_buf, XHCI_INPUT_CTX_SIZE(hc));
1053 if (err != EOK)
1054 return err;
1055
1056 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
1057 memset(ictx, 0, XHCI_INPUT_CTX_SIZE(hc));
1058
1059 XHCI_INPUT_CTRL_CTX_ADD_SET(*XHCI_GET_CTRL_CTX(ictx, hc), dci);
1060 xhci_ep_ctx_t *ep_ctx = XHCI_GET_EP_CTX(XHCI_GET_DEVICE_CTX(ictx, hc), hc, dci);
1061 xhci_setup_endpoint_context(ep, ep_ctx);
1062
1063 xhci_cmd_init(&cmd, XHCI_CMD_EVALUATE_CONTEXT);
1064 cmd.slot_id = dev->slot_id;
1065 cmd.input_ctx = ictx_dma_buf;
1066 err = xhci_cmd_sync(hc, &cmd);
1067 xhci_cmd_fini(&cmd);
1068
1069 return err;
1070}
1071
1072/**
1073 * Instruct xHC to stop running a transfer ring on an endpoint.
1074 *
1075 * @param dev The owner of the endpoint
1076 * @param ep_idx Endpoint DCI in question
1077 */
1078errno_t hc_stop_endpoint(xhci_endpoint_t *ep)
1079{
1080 xhci_device_t *const dev = xhci_ep_to_dev(ep);
1081 const unsigned dci = endpoint_dci(ep);
1082 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
1083 xhci_cmd_t cmd;
1084 errno_t err;
1085
1086 if (hc_is_broken(hc))
1087 return EOK;
1088
1089 xhci_cmd_init(&cmd, XHCI_CMD_STOP_ENDPOINT);
1090 cmd.slot_id = dev->slot_id;
1091 cmd.endpoint_id = dci;
1092 err = xhci_cmd_sync(hc, &cmd);
1093 xhci_cmd_fini(&cmd);
1094
1095 return err;
1096}
1097
1098/**
1099 * Instruct xHC to reset halted endpoint.
1100 *
1101 * @param dev The owner of the endpoint
1102 * @param ep_idx Endpoint DCI in question
1103 */
1104errno_t hc_reset_endpoint(xhci_endpoint_t *ep)
1105{
1106 xhci_device_t *const dev = xhci_ep_to_dev(ep);
1107 const unsigned dci = endpoint_dci(ep);
1108 xhci_hc_t *const hc = bus_to_hc(dev->base.bus);
1109 xhci_cmd_t cmd;
1110 errno_t err;
1111
1112 xhci_cmd_init(&cmd, XHCI_CMD_RESET_ENDPOINT);
1113 cmd.slot_id = dev->slot_id;
1114 cmd.endpoint_id = dci;
1115 err = xhci_cmd_sync(hc, &cmd);
1116 xhci_cmd_fini(&cmd);
1117
1118 return err;
1119}
1120
1121/**
1122 * Reset a ring position in both software and hardware.
1123 *
1124 * @param dev The owner of the endpoint
1125 */
1126errno_t hc_reset_ring(xhci_endpoint_t *ep, uint32_t stream_id)
1127{
1128 xhci_device_t *const dev = xhci_ep_to_dev(ep);
1129 const unsigned dci = endpoint_dci(ep);
1130 uintptr_t addr;
1131 xhci_cmd_t cmd;
1132 errno_t err;
1133
1134 xhci_trb_ring_t *ring = xhci_endpoint_get_ring(ep, stream_id);
1135 xhci_trb_ring_reset_dequeue_state(ring, &addr);
1136
1137 xhci_hc_t *const hc = bus_to_hc(endpoint_get_bus(&ep->base));
1138
1139 xhci_cmd_init(&cmd, XHCI_CMD_SET_TR_DEQUEUE_POINTER);
1140 cmd.slot_id = dev->slot_id;
1141 cmd.endpoint_id = dci;
1142 cmd.stream_id = stream_id;
1143 cmd.dequeue_ptr = addr;
1144 err = xhci_cmd_sync(hc, &cmd);
1145 xhci_cmd_fini(&cmd);
1146
1147 return err;
1148}
1149
1150/**
1151 * @}
1152 */
Note: See TracBrowser for help on using the repository browser.