source: mainline/uspace/drv/bus/usb/xhci/hc.c@ f3baab1

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since f3baab1 was f3baab1, checked in by Ondřej Hlavatý <aearsis@…>, 8 years ago

xhci: do not rely on internal fibril quirks

Previousy, we abused the fact new fibrils are spawned for handling
notifications, so we could afford blocking the event handler. We were
told this is a subject to change and we should stop doing it.

This commit removes the abuse, but newly requires event handlers not to
block waiting for another event (e.g. commands do wait for events). To
quickly detect this situation, deadlock detection was added.

This commit breaks current functionality. Our current job is to identify
processes which do block and have them moved to separate fibril / spawn
fibril for the process alone.

  • Property mode set to 100644
File size: 24.1 KB
Line 
1/*
2 * Copyright (c) 2017 Ondrej Hlavaty
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller data bookkeeping.
34 */
35
36#include <errno.h>
37#include <str_error.h>
38#include <usb/debug.h>
39#include <usb/host/endpoint.h>
40#include "debug.h"
41#include "hc.h"
42#include "rh.h"
43#include "hw_struct/trb.h"
44#include "hw_struct/context.h"
45#include "endpoint.h"
46#include "transfers.h"
47#include "trb_ring.h"
48
49/**
50 * Default USB Speed ID mapping: Table 157
51 */
52#define PSI_TO_BPS(psie, psim) (((uint64_t) psim) << (10 * psie))
53#define PORT_SPEED(usb, mjr, psie, psim) { \
54 .name = "USB ", \
55 .major = mjr, \
56 .minor = 0, \
57 .usb_speed = USB_SPEED_##usb, \
58 .rx_bps = PSI_TO_BPS(psie, psim), \
59 .tx_bps = PSI_TO_BPS(psie, psim) \
60}
61static const xhci_port_speed_t ps_default_full = PORT_SPEED(FULL, 2, 2, 12);
62static const xhci_port_speed_t ps_default_low = PORT_SPEED(LOW, 2, 1, 1500);
63static const xhci_port_speed_t ps_default_high = PORT_SPEED(HIGH, 2, 2, 480);
64static const xhci_port_speed_t ps_default_super = PORT_SPEED(SUPER, 3, 3, 5);
65
66/**
67 * Walk the list of extended capabilities.
68 *
69 * The most interesting thing hidden in extended capabilities is the mapping of
70 * ports to protocol versions and speeds.
71 */
72static int hc_parse_ec(xhci_hc_t *hc)
73{
74 unsigned psic, major, minor;
75 xhci_sp_name_t name;
76
77 xhci_port_speed_t *speeds = hc->speeds;
78
79 for (xhci_extcap_t *ec = hc->xecp; ec; ec = xhci_extcap_next(ec)) {
80 xhci_dump_extcap(ec);
81 switch (XHCI_REG_RD(ec, XHCI_EC_CAP_ID)) {
82 case XHCI_EC_USB_LEGACY:
83 assert(hc->legsup == NULL);
84 hc->legsup = (xhci_legsup_t *) ec;
85 break;
86 case XHCI_EC_SUPPORTED_PROTOCOL:
87 psic = XHCI_REG_RD(ec, XHCI_EC_SP_PSIC);
88 major = XHCI_REG_RD(ec, XHCI_EC_SP_MAJOR);
89 minor = XHCI_REG_RD(ec, XHCI_EC_SP_MINOR);
90 name.packed = host2uint32_t_le(XHCI_REG_RD(ec, XHCI_EC_SP_NAME));
91
92 if (name.packed != xhci_name_usb.packed) {
93 /**
94 * The detection of such protocol would work,
95 * but the rest of the implementation is made
96 * for the USB protocol only.
97 */
98 usb_log_error("Unknown protocol %.4s.", name.str);
99 return ENOTSUP;
100 }
101
102 // "Implied" speed
103 if (psic == 0) {
104 assert(minor == 0);
105
106 if (major == 2) {
107 speeds[1] = ps_default_full;
108 speeds[2] = ps_default_low;
109 speeds[3] = ps_default_high;
110
111 hc->speed_to_psiv[USB_SPEED_FULL] = 1;
112 hc->speed_to_psiv[USB_SPEED_LOW] = 2;
113 hc->speed_to_psiv[USB_SPEED_HIGH] = 3;
114 } else if (major == 3) {
115 speeds[4] = ps_default_super;
116 hc->speed_to_psiv[USB_SPEED_SUPER] = 4;
117 } else {
118 return EINVAL;
119 }
120
121 usb_log_debug2("Implied speed of USB %u.0 set up.", major);
122 } else {
123 for (unsigned i = 0; i < psic; i++) {
124 xhci_psi_t *psi = xhci_extcap_psi(ec, i);
125 unsigned sim = XHCI_REG_RD(psi, XHCI_PSI_PSIM);
126 unsigned psiv = XHCI_REG_RD(psi, XHCI_PSI_PSIV);
127 unsigned psie = XHCI_REG_RD(psi, XHCI_PSI_PSIE);
128 unsigned psim = XHCI_REG_RD(psi, XHCI_PSI_PSIM);
129
130 speeds[psiv].major = major;
131 speeds[psiv].minor = minor;
132 str_ncpy(speeds[psiv].name, 4, name.str, 4);
133 speeds[psiv].usb_speed = USB_SPEED_MAX;
134
135 uint64_t bps = PSI_TO_BPS(psie, psim);
136
137 if (sim == XHCI_PSI_PLT_SYMM || sim == XHCI_PSI_PLT_RX)
138 speeds[psiv].rx_bps = bps;
139 if (sim == XHCI_PSI_PLT_SYMM || sim == XHCI_PSI_PLT_TX) {
140 speeds[psiv].tx_bps = bps;
141 usb_log_debug2("Speed %u set up for bps %" PRIu64 " / %" PRIu64 ".", psiv, speeds[psiv].rx_bps, speeds[psiv].tx_bps);
142 }
143 }
144 }
145 }
146 }
147 return EOK;
148}
149
150/**
151 * Initialize MMIO spaces of xHC.
152 */
153int hc_init_mmio(xhci_hc_t *hc, const hw_res_list_parsed_t *hw_res)
154{
155 int err;
156
157 if (hw_res->mem_ranges.count != 1) {
158 usb_log_error("Unexpected MMIO area, bailing out.");
159 return EINVAL;
160 }
161
162 hc->mmio_range = hw_res->mem_ranges.ranges[0];
163
164 usb_log_debug("MMIO area at %p (size %zu), IRQ %d.\n",
165 RNGABSPTR(hc->mmio_range), RNGSZ(hc->mmio_range), hw_res->irqs.irqs[0]);
166
167 if (RNGSZ(hc->mmio_range) < sizeof(xhci_cap_regs_t))
168 return EOVERFLOW;
169
170 void *base;
171 if ((err = pio_enable_range(&hc->mmio_range, &base)))
172 return err;
173
174 hc->reg_base = base;
175 hc->cap_regs = (xhci_cap_regs_t *) base;
176 hc->op_regs = (xhci_op_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH));
177 hc->rt_regs = (xhci_rt_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF));
178 hc->db_arry = (xhci_doorbell_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_DBOFF));
179
180 uintptr_t xec_offset = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_XECP) * sizeof(xhci_dword_t);
181 if (xec_offset > 0)
182 hc->xecp = (xhci_extcap_t *) (base + xec_offset);
183
184 usb_log_debug2("Initialized MMIO reg areas:");
185 usb_log_debug2("\tCapability regs: %p", hc->cap_regs);
186 usb_log_debug2("\tOperational regs: %p", hc->op_regs);
187 usb_log_debug2("\tRuntime regs: %p", hc->rt_regs);
188 usb_log_debug2("\tDoorbell array base: %p", hc->db_arry);
189
190 xhci_dump_cap_regs(hc->cap_regs);
191
192 hc->ac64 = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_AC64);
193 hc->max_slots = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_MAX_SLOTS);
194 hc->wrap_count = 0;
195 unsigned ist = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_IST);
196 hc->ist = (ist & 0x10 >> 1) * (ist & 0xf);
197
198 if ((err = hc_parse_ec(hc))) {
199 pio_disable(hc->reg_base, RNGSZ(hc->mmio_range));
200 return err;
201 }
202
203 return EOK;
204}
205
206/**
207 * Initialize structures kept in allocated memory.
208 */
209int hc_init_memory(xhci_hc_t *hc, ddf_dev_t *device)
210{
211 int err;
212
213 if (dma_buffer_alloc(&hc->dcbaa_dma, (1 + hc->max_slots) * sizeof(uint64_t)))
214 return ENOMEM;
215 hc->dcbaa = hc->dcbaa_dma.virt;
216
217 if ((err = xhci_event_ring_init(&hc->event_ring)))
218 goto err_dcbaa;
219
220 if ((err = xhci_scratchpad_alloc(hc)))
221 goto err_event_ring;
222
223 if ((err = xhci_init_commands(hc)))
224 goto err_scratch;
225
226 if ((err = xhci_bus_init(&hc->bus, hc)))
227 goto err_cmd;
228
229 if ((err = xhci_rh_init(&hc->rh, hc)))
230 goto err_bus;
231
232 return EOK;
233
234err_bus:
235 xhci_bus_fini(&hc->bus);
236err_cmd:
237 xhci_fini_commands(hc);
238err_scratch:
239 xhci_scratchpad_free(hc);
240err_event_ring:
241 xhci_event_ring_fini(&hc->event_ring);
242err_dcbaa:
243 hc->dcbaa = NULL;
244 dma_buffer_free(&hc->dcbaa_dma);
245 return err;
246}
247
248/*
249 * Pseudocode:
250 * ip = read(intr[0].iman)
251 * if (ip) {
252 * status = read(usbsts)
253 * assert status
254 * assert ip
255 * accept (passing status)
256 * }
257 * decline
258 */
259static const irq_cmd_t irq_commands[] = {
260 {
261 .cmd = CMD_PIO_READ_32,
262 .dstarg = 3,
263 .addr = NULL /* intr[0].iman */
264 },
265 {
266 .cmd = CMD_AND,
267 .srcarg = 3,
268 .dstarg = 4,
269 .value = 0 /* host2xhci(32, 1) */
270 },
271 {
272 .cmd = CMD_PREDICATE,
273 .srcarg = 4,
274 .value = 5
275 },
276 {
277 .cmd = CMD_PIO_READ_32,
278 .dstarg = 1,
279 .addr = NULL /* usbsts */
280 },
281 {
282 .cmd = CMD_AND,
283 .srcarg = 1,
284 .dstarg = 2,
285 .value = 0 /* host2xhci(32, XHCI_STATUS_ACK_MASK) */
286 },
287 {
288 .cmd = CMD_PIO_WRITE_A_32,
289 .srcarg = 2,
290 .addr = NULL /* usbsts */
291 },
292 {
293 .cmd = CMD_PIO_WRITE_A_32,
294 .srcarg = 3,
295 .addr = NULL /* intr[0].iman */
296 },
297 {
298 .cmd = CMD_ACCEPT
299 },
300 {
301 .cmd = CMD_DECLINE
302 }
303};
304
305
306/**
307 * Generates code to accept interrupts. The xHCI is designed primarily for
308 * MSI/MSI-X, but we use PCI Interrupt Pin. In this mode, all the Interrupters
309 * (except 0) are disabled.
310 */
311int hc_irq_code_gen(irq_code_t *code, xhci_hc_t *hc, const hw_res_list_parsed_t *hw_res)
312{
313 assert(code);
314 assert(hw_res);
315
316 if (hw_res->irqs.count != 1) {
317 usb_log_info("Unexpected HW resources to enable interrupts.");
318 return EINVAL;
319 }
320
321 code->ranges = malloc(sizeof(irq_pio_range_t));
322 if (code->ranges == NULL)
323 return ENOMEM;
324
325 code->cmds = malloc(sizeof(irq_commands));
326 if (code->cmds == NULL) {
327 free(code->ranges);
328 return ENOMEM;
329 }
330
331 code->rangecount = 1;
332 code->ranges[0] = (irq_pio_range_t) {
333 .base = RNGABS(hc->mmio_range),
334 .size = RNGSZ(hc->mmio_range),
335 };
336
337 code->cmdcount = ARRAY_SIZE(irq_commands);
338 memcpy(code->cmds, irq_commands, sizeof(irq_commands));
339
340 void *intr0_iman = RNGABSPTR(hc->mmio_range) + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF) + offsetof(xhci_rt_regs_t, ir[0]);
341 void *usbsts = RNGABSPTR(hc->mmio_range) + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH) + offsetof(xhci_op_regs_t, usbsts);
342 code->cmds[0].addr = intr0_iman;
343 code->cmds[1].value = host2xhci(32, 1);
344 code->cmds[3].addr = usbsts;
345 code->cmds[4].value = host2xhci(32, XHCI_STATUS_ACK_MASK);
346 code->cmds[5].addr = usbsts;
347 code->cmds[6].addr = intr0_iman;
348
349 return hw_res->irqs.irqs[0];
350}
351
352/**
353 * Claim xHC from BIOS. Implements handoff as per Section 4.22.1 of xHCI spec.
354 */
355int hc_claim(xhci_hc_t *hc, ddf_dev_t *dev)
356{
357 /* No legacy support capability, the controller is solely for us */
358 if (!hc->legsup)
359 return EOK;
360
361 /* TODO: Test this with USB3-aware BIOS */
362 usb_log_debug2("LEGSUP: bios: %x, os: %x", hc->legsup->sem_bios, hc->legsup->sem_os);
363 XHCI_REG_WR(hc->legsup, XHCI_LEGSUP_SEM_OS, 1);
364 for (int i = 0; i <= (XHCI_LEGSUP_BIOS_TIMEOUT_US / XHCI_LEGSUP_POLLING_DELAY_1MS); i++) {
365 usb_log_debug2("LEGSUP: elapsed: %i ms, bios: %x, os: %x", i,
366 XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_BIOS),
367 XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_OS));
368 if (XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_BIOS) == 0) {
369 assert(XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_OS) == 1);
370 return EOK;
371 }
372 async_usleep(XHCI_LEGSUP_POLLING_DELAY_1MS);
373 }
374 usb_log_error("BIOS did not release XHCI legacy hold!\n");
375
376 return ENOTSUP;
377}
378
379/**
380 * Ask the xHC to reset its state. Implements sequence
381 */
382static int hc_reset(xhci_hc_t *hc)
383{
384 /* Stop the HC: set R/S to 0 */
385 XHCI_REG_CLR(hc->op_regs, XHCI_OP_RS, 1);
386
387 /* Wait 16 ms until the HC is halted */
388 async_usleep(16000);
389 assert(XHCI_REG_RD(hc->op_regs, XHCI_OP_HCH));
390
391 /* Reset */
392 XHCI_REG_SET(hc->op_regs, XHCI_OP_HCRST, 1);
393
394 /* Wait until the reset is complete */
395 while (XHCI_REG_RD(hc->op_regs, XHCI_OP_HCRST))
396 async_usleep(1000);
397
398 return EOK;
399}
400
401/**
402 * Initialize the HC: section 4.2
403 */
404int hc_start(xhci_hc_t *hc, bool irq)
405{
406 int err;
407
408 if ((err = hc_reset(hc)))
409 return err;
410
411 // FIXME: Waiting forever.
412 while (XHCI_REG_RD(hc->op_regs, XHCI_OP_CNR))
413 async_usleep(1000);
414
415 uint64_t dcbaaptr = hc->dcbaa_dma.phys;
416 XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP_LO, LOWER32(dcbaaptr));
417 XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP_HI, UPPER32(dcbaaptr));
418 XHCI_REG_WR(hc->op_regs, XHCI_OP_MAX_SLOTS_EN, hc->max_slots);
419
420 uint64_t crcr = xhci_trb_ring_get_dequeue_ptr(&hc->cr.trb_ring);
421 if (hc->cr.trb_ring.pcs)
422 crcr |= XHCI_REG_MASK(XHCI_OP_RCS);
423 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR_LO, LOWER32(crcr));
424 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR_HI, UPPER32(crcr));
425
426 XHCI_REG_SET(hc->op_regs, XHCI_OP_EWE, 1);
427
428 xhci_interrupter_regs_t *intr0 = &hc->rt_regs->ir[0];
429 XHCI_REG_WR(intr0, XHCI_INTR_ERSTSZ, hc->event_ring.segment_count);
430 uint64_t erdp = hc->event_ring.dequeue_ptr;
431 XHCI_REG_WR(intr0, XHCI_INTR_ERDP_LO, LOWER32(erdp));
432 XHCI_REG_WR(intr0, XHCI_INTR_ERDP_HI, UPPER32(erdp));
433 uint64_t erstptr = hc->event_ring.erst.phys;
434 XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA_LO, LOWER32(erstptr));
435 XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA_HI, UPPER32(erstptr));
436
437
438 if (irq) {
439 XHCI_REG_SET(intr0, XHCI_INTR_IE, 1);
440 XHCI_REG_SET(hc->op_regs, XHCI_OP_INTE, 1);
441 }
442
443 XHCI_REG_SET(hc->op_regs, XHCI_OP_HSEE, 1);
444
445 XHCI_REG_SET(hc->op_regs, XHCI_OP_RS, 1);
446
447 /* The reset changed status of all ports, and SW originated reason does
448 * not cause an interrupt.
449 */
450 xhci_rh_handle_port_change(&hc->rh);
451
452 return EOK;
453}
454
455/**
456 * Used only when polling. Shall supplement the irq_commands.
457 */
458int hc_status(bus_t *bus, uint32_t *status)
459{
460 xhci_hc_t *hc = bus_to_hc(bus);
461 int ip = XHCI_REG_RD(hc->rt_regs->ir, XHCI_INTR_IP);
462 if (ip) {
463 *status = XHCI_REG_RD(hc->op_regs, XHCI_OP_STATUS);
464 XHCI_REG_WR(hc->op_regs, XHCI_OP_STATUS, *status & XHCI_STATUS_ACK_MASK);
465 XHCI_REG_WR(hc->rt_regs->ir, XHCI_INTR_IP, 1);
466
467 /* interrupt handler expects status from irq_commands, which is
468 * in xhci order. */
469 *status = host2xhci(32, *status);
470 }
471
472 usb_log_debug2("HC(%p): Polled status: %x", hc, *status);
473 return EOK;
474}
475
476static int xhci_handle_mfindex_wrap_event(xhci_hc_t *hc, xhci_trb_t *trb)
477{
478 ++hc->wrap_count;
479 return EOK;
480}
481
482typedef int (*event_handler) (xhci_hc_t *, xhci_trb_t *trb);
483
484static event_handler event_handlers [] = {
485 [XHCI_TRB_TYPE_COMMAND_COMPLETION_EVENT] = &xhci_handle_command_completion,
486 [XHCI_TRB_TYPE_PORT_STATUS_CHANGE_EVENT] = &xhci_rh_handle_port_status_change_event,
487 [XHCI_TRB_TYPE_TRANSFER_EVENT] = &xhci_handle_transfer_event,
488 [XHCI_TRB_TYPE_MFINDEX_WRAP_EVENT] = &xhci_handle_mfindex_wrap_event,
489};
490
491static int hc_handle_event(xhci_hc_t *hc, xhci_trb_t *trb, xhci_interrupter_regs_t *intr)
492{
493 unsigned type = TRB_TYPE(*trb);
494 if (type >= ARRAY_SIZE(event_handlers) || !event_handlers[type])
495 return ENOTSUP;
496
497 return event_handlers[type](hc, trb);
498}
499
500/**
501 * Dequeue from event ring and handle dequeued events.
502 *
503 * As there can be events, that blocks on waiting for subsequent events,
504 * we solve this problem by first copying the event TRBs from the event ring,
505 * then asserting EHB and only after, handling the events.
506 *
507 * Whenever the event handling blocks, it switches fibril, and incoming
508 * IPC notification will create new event handling fibril for us.
509 */
510static void hc_run_event_ring(xhci_hc_t *hc, xhci_event_ring_t *event_ring, xhci_interrupter_regs_t *intr)
511{
512 int err;
513
514 xhci_trb_t trb;
515 hc->event_handler = fibril_get_id();
516
517 while ((err = xhci_event_ring_dequeue(event_ring, &trb)) != ENOENT) {
518 if ((err = hc_handle_event(hc, &trb, intr)) != EOK) {
519 usb_log_error("Failed to handle event: %s", str_error(err));
520 }
521
522 uint64_t erdp = hc->event_ring.dequeue_ptr;
523 XHCI_REG_WR(intr, XHCI_INTR_ERDP_LO, LOWER32(erdp));
524 XHCI_REG_WR(intr, XHCI_INTR_ERDP_HI, UPPER32(erdp));
525 }
526
527 hc->event_handler = 0;
528
529 /* Update the ERDP to make room in the ring. */
530 usb_log_debug2("Copying from ring finished, updating ERDP.");
531 uint64_t erdp = hc->event_ring.dequeue_ptr;
532 erdp |= XHCI_REG_MASK(XHCI_INTR_ERDP_EHB);
533 XHCI_REG_WR(intr, XHCI_INTR_ERDP_LO, LOWER32(erdp));
534 XHCI_REG_WR(intr, XHCI_INTR_ERDP_HI, UPPER32(erdp));
535
536 usb_log_debug2("Event ring run finished.");
537}
538
539/**
540 * Handle an interrupt request from xHC. Resolve all situations that trigger an
541 * interrupt separately.
542 *
543 * Note that all RW1C bits in USBSTS register are cleared at the time of
544 * handling the interrupt in irq_code. This method is the top-half.
545 *
546 * @param status contents of USBSTS register at the time of the interrupt.
547 */
548void hc_interrupt(bus_t *bus, uint32_t status)
549{
550 xhci_hc_t *hc = bus_to_hc(bus);
551 status = xhci2host(32, status);
552
553 if (status & XHCI_REG_MASK(XHCI_OP_PCD)) {
554 usb_log_debug2("Root hub interrupt.");
555 xhci_rh_handle_port_change(&hc->rh);
556 status &= ~XHCI_REG_MASK(XHCI_OP_PCD);
557 }
558
559 if (status & XHCI_REG_MASK(XHCI_OP_HSE)) {
560 usb_log_error("Host controller error occured. Bad things gonna happen...");
561 status &= ~XHCI_REG_MASK(XHCI_OP_HSE);
562 }
563
564 if (status & XHCI_REG_MASK(XHCI_OP_EINT)) {
565 usb_log_debug2("Event interrupt, running the event ring.");
566 hc_run_event_ring(hc, &hc->event_ring, &hc->rt_regs->ir[0]);
567 status &= ~XHCI_REG_MASK(XHCI_OP_EINT);
568 }
569
570 if (status & XHCI_REG_MASK(XHCI_OP_SRE)) {
571 usb_log_error("Save/Restore error occured. WTF, S/R mechanism not implemented!");
572 status &= ~XHCI_REG_MASK(XHCI_OP_SRE);
573 }
574
575 if (status) {
576 usb_log_error("Non-zero status after interrupt handling (%08x) - missing something?", status);
577 }
578}
579
580/**
581 * Tear down all in-memory structures.
582 */
583void hc_fini(xhci_hc_t *hc)
584{
585 xhci_bus_fini(&hc->bus);
586 xhci_event_ring_fini(&hc->event_ring);
587 xhci_scratchpad_free(hc);
588 dma_buffer_free(&hc->dcbaa_dma);
589 xhci_fini_commands(hc);
590 xhci_rh_fini(&hc->rh);
591 pio_disable(hc->reg_base, RNGSZ(hc->mmio_range));
592 usb_log_info("HC(%p): Finalized.", hc);
593}
594
595/**
596 * Ring a xHC Doorbell. Implements section 4.7.
597 */
598void hc_ring_doorbell(xhci_hc_t *hc, unsigned doorbell, unsigned target)
599{
600 assert(hc);
601 uint32_t v = host2xhci(32, target & BIT_RRANGE(uint32_t, 7));
602 pio_write_32(&hc->db_arry[doorbell], v);
603 usb_log_debug2("Ringing doorbell %d (target: %d)", doorbell, target);
604}
605
606/**
607 * Issue an Enable Slot command, returning the obtained Slot ID.
608 *
609 * @param slot_id Pointer where to store the obtained Slot ID.
610 */
611int hc_enable_slot(xhci_hc_t *hc, uint32_t *slot_id)
612{
613 assert(hc);
614
615 int err;
616 xhci_cmd_t cmd;
617 xhci_cmd_init(&cmd, XHCI_CMD_ENABLE_SLOT);
618
619 if ((err = xhci_cmd_sync(hc, &cmd))) {
620 goto end;
621 }
622
623 if (slot_id) {
624 *slot_id = cmd.slot_id;
625 }
626
627end:
628 xhci_cmd_fini(&cmd);
629 return err;
630}
631
632/**
633 * Issue a Disable Slot command for a slot occupied by device.
634 *
635 * Frees the device context
636 */
637int hc_disable_slot(xhci_hc_t *hc, xhci_device_t *dev)
638{
639 int err;
640 assert(hc);
641
642 if ((err = xhci_cmd_sync_inline(hc, DISABLE_SLOT, .slot_id = dev->slot_id))) {
643 return err;
644 }
645
646 /* Free the device context. */
647 hc->dcbaa[dev->slot_id] = 0;
648 dma_buffer_free(&dev->dev_ctx);
649
650 /* Mark the slot as invalid. */
651 dev->slot_id = 0;
652
653 return EOK;
654}
655
656/**
657 * Prepare an empty Endpoint Input Context inside a dma buffer.
658 */
659static int create_configure_ep_input_ctx(dma_buffer_t *dma_buf)
660{
661 const int err = dma_buffer_alloc(dma_buf, sizeof(xhci_input_ctx_t));
662 if (err)
663 return err;
664
665 xhci_input_ctx_t *ictx = dma_buf->virt;
666 memset(ictx, 0, sizeof(xhci_input_ctx_t));
667
668 // Quoting sec. 4.6.5 and 4.6.6: A1, D0, D1 are down (already zeroed), A0 is up.
669 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, 0);
670
671 return EOK;
672}
673
674/**
675 * Initialize a device, assigning it an address. Implements section 4.3.4.
676 *
677 * @param dev Device to assing an address (unconfigured yet)
678 * @param ep0 EP0 of device TODO remove, can be fetched from dev
679 */
680int hc_address_device(xhci_hc_t *hc, xhci_device_t *dev, xhci_endpoint_t *ep0)
681{
682 int err = ENOMEM;
683
684 /* Although we have the precise PSIV value on devices of tier 1,
685 * we have to rely on reverse mapping on others. */
686 if (!hc->speed_to_psiv[dev->base.speed]) {
687 usb_log_error("Device reported an USB speed that cannot be mapped to HC port speed.");
688 return EINVAL;
689 }
690
691 /* Setup and register device context */
692 if (dma_buffer_alloc(&dev->dev_ctx, sizeof(xhci_device_ctx_t)))
693 goto err;
694 memset(dev->dev_ctx.virt, 0, sizeof(xhci_device_ctx_t));
695
696 hc->dcbaa[dev->slot_id] = host2xhci(64, dev->dev_ctx.phys);
697
698 /* Issue configure endpoint command (sec 4.3.5). */
699 dma_buffer_t ictx_dma_buf;
700 if ((err = create_configure_ep_input_ctx(&ictx_dma_buf))) {
701 goto err_dev_ctx;
702 }
703 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
704
705 /* Initialize slot_ctx according to section 4.3.3 point 3. */
706 XHCI_SLOT_ROOT_HUB_PORT_SET(ictx->slot_ctx, dev->rh_port);
707 XHCI_SLOT_CTX_ENTRIES_SET(ictx->slot_ctx, 1);
708 XHCI_SLOT_ROUTE_STRING_SET(ictx->slot_ctx, dev->route_str);
709 XHCI_SLOT_SPEED_SET(ictx->slot_ctx, hc->speed_to_psiv[dev->base.speed]);
710
711 /* In a very specific case, we have to set also these. But before that,
712 * we need to refactor how TT is handled in libusbhost. */
713 XHCI_SLOT_TT_HUB_SLOT_ID_SET(ictx->slot_ctx, 0);
714 XHCI_SLOT_TT_HUB_PORT_SET(ictx->slot_ctx, 0);
715 XHCI_SLOT_MTT_SET(ictx->slot_ctx, 0);
716
717 /* Copy endpoint 0 context and set A1 flag. */
718 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, 1);
719 xhci_setup_endpoint_context(ep0, &ictx->endpoint_ctx[0]);
720
721 /* Issue Address Device command. */
722 if ((err = xhci_cmd_sync_inline(hc, ADDRESS_DEVICE, .slot_id = dev->slot_id, .input_ctx = ictx_dma_buf))) {
723 goto err_dev_ctx;
724 }
725
726 xhci_device_ctx_t *dev_ctx = dev->dev_ctx.virt;
727 dev->base.address = XHCI_SLOT_DEVICE_ADDRESS(dev_ctx->slot_ctx);
728 usb_log_debug2("Obtained USB address: %d.\n", dev->base.address);
729
730 return EOK;
731
732err_dev_ctx:
733 hc->dcbaa[dev->slot_id] = 0;
734 dma_buffer_free(&dev->dev_ctx);
735err:
736 return err;
737}
738
739/**
740 * Issue a Configure Device command for a device in slot.
741 *
742 * @param slot_id Slot ID assigned to the device.
743 */
744int hc_configure_device(xhci_hc_t *hc, uint32_t slot_id)
745{
746 /* Issue configure endpoint command (sec 4.3.5). */
747 dma_buffer_t ictx_dma_buf;
748 const int err = create_configure_ep_input_ctx(&ictx_dma_buf);
749 if (err)
750 return err;
751
752 // TODO: Set slot context and other flags. (probably forgot a lot of 'em)
753
754 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
755}
756
757/**
758 * Issue a Deconfigure Device command for a device in slot.
759 *
760 * @param slot_id Slot ID assigned to the device.
761 */
762int hc_deconfigure_device(xhci_hc_t *hc, uint32_t slot_id)
763{
764 /* Issue configure endpoint command (sec 4.3.5) with the DC flag. */
765 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .deconfigure = true);
766}
767
768/**
769 * Instruct xHC to add an endpoint with supplied endpoint context.
770 *
771 * @param slot_id Slot ID assigned to the device.
772 * @param ep_idx Endpoint index (number + direction) in question
773 * @param ep_ctx Endpoint context of the endpoint
774 */
775int hc_add_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx, xhci_ep_ctx_t *ep_ctx)
776{
777 /* Issue configure endpoint command (sec 4.3.5). */
778 dma_buffer_t ictx_dma_buf;
779 const int err = create_configure_ep_input_ctx(&ictx_dma_buf);
780 if (err)
781 return err;
782
783 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
784 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, ep_idx + 1); /* Preceded by slot ctx */
785 memcpy(&ictx->endpoint_ctx[ep_idx], ep_ctx, sizeof(xhci_ep_ctx_t));
786 // TODO: Set slot context and other flags. (probably forgot a lot of 'em)
787
788 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
789}
790
791/**
792 * Instruct xHC to drop an endpoint.
793 *
794 * @param slot_id Slot ID assigned to the device.
795 * @param ep_idx Endpoint index (number + direction) in question
796 */
797int hc_drop_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx)
798{
799 /* Issue configure endpoint command (sec 4.3.5). */
800 dma_buffer_t ictx_dma_buf;
801 const int err = create_configure_ep_input_ctx(&ictx_dma_buf);
802 if (err)
803 return err;
804
805 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
806 XHCI_INPUT_CTRL_CTX_DROP_SET(ictx->ctrl_ctx, ep_idx + 1); /* Preceded by slot ctx */
807 // TODO: Set slot context and other flags. (probably forgot a lot of 'em)
808
809 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
810}
811
812/**
813 * Instruct xHC to update information about an endpoint, using supplied
814 * endpoint context.
815 *
816 * @param slot_id Slot ID assigned to the device.
817 * @param ep_idx Endpoint index (number + direction) in question
818 * @param ep_ctx Endpoint context of the endpoint
819 */
820int hc_update_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx, xhci_ep_ctx_t *ep_ctx)
821{
822 dma_buffer_t ictx_dma_buf;
823 const int err = dma_buffer_alloc(&ictx_dma_buf, sizeof(xhci_input_ctx_t));
824 if (err)
825 return err;
826
827 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
828 memset(ictx, 0, sizeof(xhci_input_ctx_t));
829
830 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, ep_idx + 1);
831 memcpy(&ictx->endpoint_ctx[ep_idx], ep_ctx, sizeof(xhci_ep_ctx_t));
832
833 return xhci_cmd_sync_inline(hc, EVALUATE_CONTEXT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
834}
835
836/**
837 * Instruct xHC to stop running a transfer ring on an endpoint.
838 *
839 * @param slot_id Slot ID assigned to the device.
840 * @param ep_idx Endpoint index (number + direction) in question
841 */
842int hc_stop_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx)
843{
844
845 return xhci_cmd_sync_inline(hc, STOP_ENDPOINT, .slot_id = slot_id, .endpoint_id = ep_idx);
846}
847
848/**
849 * @}
850 */
Note: See TracBrowser for help on using the repository browser.