source: mainline/uspace/drv/bus/usb/xhci/hc.c@ 0eadfd1e

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 0eadfd1e was eb928c4, checked in by Ondřej Hlavatý <aearsis@…>, 8 years ago

xhci: documentation & cleanup

Also, a simple refactoring to remove functions that only wraps another
functions unused anywhere else.

  • Property mode set to 100644
File size: 24.3 KB
Line 
1/*
2 * Copyright (c) 2017 Ondrej Hlavaty
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller data bookkeeping.
34 */
35
36#include <errno.h>
37#include <str_error.h>
38#include <usb/debug.h>
39#include <usb/host/endpoint.h>
40#include "debug.h"
41#include "hc.h"
42#include "rh.h"
43#include "hw_struct/trb.h"
44#include "hw_struct/context.h"
45#include "endpoint.h"
46#include "transfers.h"
47#include "trb_ring.h"
48
49/**
50 * Default USB Speed ID mapping: Table 157
51 */
52#define PSI_TO_BPS(psie, psim) (((uint64_t) psim) << (10 * psie))
53#define PORT_SPEED(usb, mjr, psie, psim) { \
54 .name = "USB ", \
55 .major = mjr, \
56 .minor = 0, \
57 .usb_speed = USB_SPEED_##usb, \
58 .rx_bps = PSI_TO_BPS(psie, psim), \
59 .tx_bps = PSI_TO_BPS(psie, psim) \
60}
61static const xhci_port_speed_t ps_default_full = PORT_SPEED(FULL, 2, 2, 12);
62static const xhci_port_speed_t ps_default_low = PORT_SPEED(LOW, 2, 1, 1500);
63static const xhci_port_speed_t ps_default_high = PORT_SPEED(HIGH, 2, 2, 480);
64static const xhci_port_speed_t ps_default_super = PORT_SPEED(SUPER, 3, 3, 5);
65
66/**
67 * Walk the list of extended capabilities.
68 *
69 * The most interesting thing hidden in extended capabilities is the mapping of
70 * ports to protocol versions and speeds.
71 */
72static int hc_parse_ec(xhci_hc_t *hc)
73{
74 unsigned psic, major, minor;
75 xhci_sp_name_t name;
76
77 xhci_port_speed_t *speeds = hc->speeds;
78
79 for (xhci_extcap_t *ec = hc->xecp; ec; ec = xhci_extcap_next(ec)) {
80 xhci_dump_extcap(ec);
81 switch (XHCI_REG_RD(ec, XHCI_EC_CAP_ID)) {
82 case XHCI_EC_USB_LEGACY:
83 assert(hc->legsup == NULL);
84 hc->legsup = (xhci_legsup_t *) ec;
85 break;
86 case XHCI_EC_SUPPORTED_PROTOCOL:
87 psic = XHCI_REG_RD(ec, XHCI_EC_SP_PSIC);
88 major = XHCI_REG_RD(ec, XHCI_EC_SP_MAJOR);
89 minor = XHCI_REG_RD(ec, XHCI_EC_SP_MINOR);
90 name.packed = host2uint32_t_le(XHCI_REG_RD(ec, XHCI_EC_SP_NAME));
91
92 if (name.packed != xhci_name_usb.packed) {
93 /**
94 * The detection of such protocol would work,
95 * but the rest of the implementation is made
96 * for the USB protocol only.
97 */
98 usb_log_error("Unknown protocol %.4s.", name.str);
99 return ENOTSUP;
100 }
101
102 // "Implied" speed
103 if (psic == 0) {
104 assert(minor == 0);
105
106 if (major == 2) {
107 speeds[1] = ps_default_full;
108 speeds[2] = ps_default_low;
109 speeds[3] = ps_default_high;
110
111 hc->speed_to_psiv[USB_SPEED_FULL] = 1;
112 hc->speed_to_psiv[USB_SPEED_LOW] = 2;
113 hc->speed_to_psiv[USB_SPEED_HIGH] = 3;
114 } else if (major == 3) {
115 speeds[4] = ps_default_super;
116 hc->speed_to_psiv[USB_SPEED_SUPER] = 4;
117 } else {
118 return EINVAL;
119 }
120
121 usb_log_debug2("Implied speed of USB %u.0 set up.", major);
122 } else {
123 for (unsigned i = 0; i < psic; i++) {
124 xhci_psi_t *psi = xhci_extcap_psi(ec, i);
125 unsigned sim = XHCI_REG_RD(psi, XHCI_PSI_PSIM);
126 unsigned psiv = XHCI_REG_RD(psi, XHCI_PSI_PSIV);
127 unsigned psie = XHCI_REG_RD(psi, XHCI_PSI_PSIE);
128 unsigned psim = XHCI_REG_RD(psi, XHCI_PSI_PSIM);
129
130 speeds[psiv].major = major;
131 speeds[psiv].minor = minor;
132 str_ncpy(speeds[psiv].name, 4, name.str, 4);
133 speeds[psiv].usb_speed = USB_SPEED_MAX;
134
135 uint64_t bps = PSI_TO_BPS(psie, psim);
136
137 if (sim == XHCI_PSI_PLT_SYMM || sim == XHCI_PSI_PLT_RX)
138 speeds[psiv].rx_bps = bps;
139 if (sim == XHCI_PSI_PLT_SYMM || sim == XHCI_PSI_PLT_TX) {
140 speeds[psiv].tx_bps = bps;
141 usb_log_debug2("Speed %u set up for bps %" PRIu64 " / %" PRIu64 ".", psiv, speeds[psiv].rx_bps, speeds[psiv].tx_bps);
142 }
143 }
144 }
145 }
146 }
147 return EOK;
148}
149
150/**
151 * Initialize MMIO spaces of xHC.
152 */
153int hc_init_mmio(xhci_hc_t *hc, const hw_res_list_parsed_t *hw_res)
154{
155 int err;
156
157 if (hw_res->mem_ranges.count != 1) {
158 usb_log_error("Unexpected MMIO area, bailing out.");
159 return EINVAL;
160 }
161
162 hc->mmio_range = hw_res->mem_ranges.ranges[0];
163
164 usb_log_debug("MMIO area at %p (size %zu), IRQ %d.\n",
165 RNGABSPTR(hc->mmio_range), RNGSZ(hc->mmio_range), hw_res->irqs.irqs[0]);
166
167 if (RNGSZ(hc->mmio_range) < sizeof(xhci_cap_regs_t))
168 return EOVERFLOW;
169
170 void *base;
171 if ((err = pio_enable_range(&hc->mmio_range, &base)))
172 return err;
173
174 hc->reg_base = base;
175 hc->cap_regs = (xhci_cap_regs_t *) base;
176 hc->op_regs = (xhci_op_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH));
177 hc->rt_regs = (xhci_rt_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF));
178 hc->db_arry = (xhci_doorbell_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_DBOFF));
179
180 uintptr_t xec_offset = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_XECP) * sizeof(xhci_dword_t);
181 if (xec_offset > 0)
182 hc->xecp = (xhci_extcap_t *) (base + xec_offset);
183
184 usb_log_debug2("Initialized MMIO reg areas:");
185 usb_log_debug2("\tCapability regs: %p", hc->cap_regs);
186 usb_log_debug2("\tOperational regs: %p", hc->op_regs);
187 usb_log_debug2("\tRuntime regs: %p", hc->rt_regs);
188 usb_log_debug2("\tDoorbell array base: %p", hc->db_arry);
189
190 xhci_dump_cap_regs(hc->cap_regs);
191
192 hc->ac64 = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_AC64);
193 hc->max_slots = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_MAX_SLOTS);
194
195 if ((err = hc_parse_ec(hc))) {
196 pio_disable(hc->reg_base, RNGSZ(hc->mmio_range));
197 return err;
198 }
199
200 return EOK;
201}
202
203/**
204 * Initialize structures kept in allocated memory.
205 */
206int hc_init_memory(xhci_hc_t *hc, ddf_dev_t *device)
207{
208 int err;
209
210 if (dma_buffer_alloc(&hc->dcbaa_dma, (1 + hc->max_slots) * sizeof(uint64_t)))
211 return ENOMEM;
212 hc->dcbaa = hc->dcbaa_dma.virt;
213
214 if ((err = xhci_event_ring_init(&hc->event_ring)))
215 goto err_dcbaa;
216
217 if ((err = xhci_scratchpad_alloc(hc)))
218 goto err_event_ring;
219
220 if ((err = xhci_init_commands(hc)))
221 goto err_scratch;
222
223 if ((err = xhci_bus_init(&hc->bus, hc)))
224 goto err_cmd;
225
226 if ((err = xhci_rh_init(&hc->rh, hc)))
227 goto err_bus;
228
229 return EOK;
230
231err_bus:
232 xhci_bus_fini(&hc->bus);
233err_cmd:
234 xhci_fini_commands(hc);
235err_scratch:
236 xhci_scratchpad_free(hc);
237err_event_ring:
238 xhci_event_ring_fini(&hc->event_ring);
239err_dcbaa:
240 hc->dcbaa = NULL;
241 dma_buffer_free(&hc->dcbaa_dma);
242 return err;
243}
244
245/*
246 * Pseudocode:
247 * ip = read(intr[0].iman)
248 * if (ip) {
249 * status = read(usbsts)
250 * assert status
251 * assert ip
252 * accept (passing status)
253 * }
254 * decline
255 */
256static const irq_cmd_t irq_commands[] = {
257 {
258 .cmd = CMD_PIO_READ_32,
259 .dstarg = 3,
260 .addr = NULL /* intr[0].iman */
261 },
262 {
263 .cmd = CMD_AND,
264 .srcarg = 3,
265 .dstarg = 4,
266 .value = 0 /* host2xhci(32, 1) */
267 },
268 {
269 .cmd = CMD_PREDICATE,
270 .srcarg = 4,
271 .value = 5
272 },
273 {
274 .cmd = CMD_PIO_READ_32,
275 .dstarg = 1,
276 .addr = NULL /* usbsts */
277 },
278 {
279 .cmd = CMD_AND,
280 .srcarg = 1,
281 .dstarg = 2,
282 .value = 0 /* host2xhci(32, XHCI_STATUS_ACK_MASK) */
283 },
284 {
285 .cmd = CMD_PIO_WRITE_A_32,
286 .srcarg = 2,
287 .addr = NULL /* usbsts */
288 },
289 {
290 .cmd = CMD_PIO_WRITE_A_32,
291 .srcarg = 3,
292 .addr = NULL /* intr[0].iman */
293 },
294 {
295 .cmd = CMD_ACCEPT
296 },
297 {
298 .cmd = CMD_DECLINE
299 }
300};
301
302
303/**
304 * Generates code to accept interrupts. The xHCI is designed primarily for
305 * MSI/MSI-X, but we use PCI Interrupt Pin. In this mode, all the Interrupters
306 * (except 0) are disabled.
307 */
308int hc_irq_code_gen(irq_code_t *code, xhci_hc_t *hc, const hw_res_list_parsed_t *hw_res)
309{
310 assert(code);
311 assert(hw_res);
312
313 if (hw_res->irqs.count != 1) {
314 usb_log_info("Unexpected HW resources to enable interrupts.");
315 return EINVAL;
316 }
317
318 code->ranges = malloc(sizeof(irq_pio_range_t));
319 if (code->ranges == NULL)
320 return ENOMEM;
321
322 code->cmds = malloc(sizeof(irq_commands));
323 if (code->cmds == NULL) {
324 free(code->ranges);
325 return ENOMEM;
326 }
327
328 code->rangecount = 1;
329 code->ranges[0] = (irq_pio_range_t) {
330 .base = RNGABS(hc->mmio_range),
331 .size = RNGSZ(hc->mmio_range),
332 };
333
334 code->cmdcount = ARRAY_SIZE(irq_commands);
335 memcpy(code->cmds, irq_commands, sizeof(irq_commands));
336
337 void *intr0_iman = RNGABSPTR(hc->mmio_range) + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF) + offsetof(xhci_rt_regs_t, ir[0]);
338 void *usbsts = RNGABSPTR(hc->mmio_range) + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH) + offsetof(xhci_op_regs_t, usbsts);
339 code->cmds[0].addr = intr0_iman;
340 code->cmds[1].value = host2xhci(32, 1);
341 code->cmds[3].addr = usbsts;
342 code->cmds[4].value = host2xhci(32, XHCI_STATUS_ACK_MASK);
343 code->cmds[5].addr = usbsts;
344 code->cmds[6].addr = intr0_iman;
345
346 return hw_res->irqs.irqs[0];
347}
348
349/**
350 * Claim xHC from BIOS. Implements handoff as per Section 4.22.1 of xHCI spec.
351 */
352int hc_claim(xhci_hc_t *hc, ddf_dev_t *dev)
353{
354 /* No legacy support capability, the controller is solely for us */
355 if (!hc->legsup)
356 return EOK;
357
358 /* TODO: Test this with USB3-aware BIOS */
359 usb_log_debug2("LEGSUP: bios: %x, os: %x", hc->legsup->sem_bios, hc->legsup->sem_os);
360 XHCI_REG_WR(hc->legsup, XHCI_LEGSUP_SEM_OS, 1);
361 for (int i = 0; i <= (XHCI_LEGSUP_BIOS_TIMEOUT_US / XHCI_LEGSUP_POLLING_DELAY_1MS); i++) {
362 usb_log_debug2("LEGSUP: elapsed: %i ms, bios: %x, os: %x", i,
363 XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_BIOS),
364 XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_OS));
365 if (XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_BIOS) == 0) {
366 assert(XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_OS) == 1);
367 return EOK;
368 }
369 async_usleep(XHCI_LEGSUP_POLLING_DELAY_1MS);
370 }
371 usb_log_error("BIOS did not release XHCI legacy hold!\n");
372
373 return ENOTSUP;
374}
375
376/**
377 * Ask the xHC to reset its state. Implements sequence
378 */
379static int hc_reset(xhci_hc_t *hc)
380{
381 /* Stop the HC: set R/S to 0 */
382 XHCI_REG_CLR(hc->op_regs, XHCI_OP_RS, 1);
383
384 /* Wait 16 ms until the HC is halted */
385 async_usleep(16000);
386 assert(XHCI_REG_RD(hc->op_regs, XHCI_OP_HCH));
387
388 /* Reset */
389 XHCI_REG_SET(hc->op_regs, XHCI_OP_HCRST, 1);
390
391 /* Wait until the reset is complete */
392 while (XHCI_REG_RD(hc->op_regs, XHCI_OP_HCRST))
393 async_usleep(1000);
394
395 return EOK;
396}
397
398/**
399 * Initialize the HC: section 4.2
400 */
401int hc_start(xhci_hc_t *hc, bool irq)
402{
403 int err;
404
405 if ((err = hc_reset(hc)))
406 return err;
407
408 // FIXME: Waiting forever.
409 while (XHCI_REG_RD(hc->op_regs, XHCI_OP_CNR))
410 async_usleep(1000);
411
412 uint64_t dcbaaptr = hc->dcbaa_dma.phys;
413 XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP_LO, LOWER32(dcbaaptr));
414 XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP_HI, UPPER32(dcbaaptr));
415 XHCI_REG_WR(hc->op_regs, XHCI_OP_MAX_SLOTS_EN, hc->max_slots);
416
417 uint64_t crcr = xhci_trb_ring_get_dequeue_ptr(&hc->cr.trb_ring);
418 if (hc->cr.trb_ring.pcs)
419 crcr |= XHCI_REG_MASK(XHCI_OP_RCS);
420 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR_LO, LOWER32(crcr));
421 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR_HI, UPPER32(crcr));
422
423 xhci_interrupter_regs_t *intr0 = &hc->rt_regs->ir[0];
424 XHCI_REG_WR(intr0, XHCI_INTR_ERSTSZ, hc->event_ring.segment_count);
425 uint64_t erdp = hc->event_ring.dequeue_ptr;
426 XHCI_REG_WR(intr0, XHCI_INTR_ERDP_LO, LOWER32(erdp));
427 XHCI_REG_WR(intr0, XHCI_INTR_ERDP_HI, UPPER32(erdp));
428 uint64_t erstptr = hc->event_ring.erst.phys;
429 XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA_LO, LOWER32(erstptr));
430 XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA_HI, UPPER32(erstptr));
431
432 if (irq) {
433 XHCI_REG_SET(intr0, XHCI_INTR_IE, 1);
434 XHCI_REG_SET(hc->op_regs, XHCI_OP_INTE, 1);
435 }
436
437 XHCI_REG_SET(hc->op_regs, XHCI_OP_HSEE, 1);
438
439 XHCI_REG_SET(hc->op_regs, XHCI_OP_RS, 1);
440
441 /* The reset changed status of all ports, and SW originated reason does
442 * not cause an interrupt.
443 */
444 xhci_rh_handle_port_change(&hc->rh);
445
446 return EOK;
447}
448
449/**
450 * Used only when polling. Shall supplement the irq_commands.
451 */
452int hc_status(bus_t *bus, uint32_t *status)
453{
454 xhci_hc_t *hc = bus_to_hc(bus);
455 int ip = XHCI_REG_RD(hc->rt_regs->ir, XHCI_INTR_IP);
456 if (ip) {
457 *status = XHCI_REG_RD(hc->op_regs, XHCI_OP_STATUS);
458 XHCI_REG_WR(hc->op_regs, XHCI_OP_STATUS, *status & XHCI_STATUS_ACK_MASK);
459 XHCI_REG_WR(hc->rt_regs->ir, XHCI_INTR_IP, 1);
460
461 /* interrupt handler expects status from irq_commands, which is
462 * in xhci order. */
463 *status = host2xhci(32, *status);
464 }
465
466 usb_log_debug2("HC(%p): Polled status: %x", hc, *status);
467 return EOK;
468}
469
470typedef int (*event_handler) (xhci_hc_t *, xhci_trb_t *trb);
471
472static event_handler event_handlers [] = {
473 [XHCI_TRB_TYPE_COMMAND_COMPLETION_EVENT] = &xhci_handle_command_completion,
474 [XHCI_TRB_TYPE_PORT_STATUS_CHANGE_EVENT] = &xhci_rh_handle_port_status_change_event,
475 [XHCI_TRB_TYPE_TRANSFER_EVENT] = &xhci_handle_transfer_event,
476};
477
478static int hc_handle_event(xhci_hc_t *hc, xhci_trb_t *trb, xhci_interrupter_regs_t *intr)
479{
480 unsigned type = TRB_TYPE(*trb);
481 if (type >= ARRAY_SIZE(event_handlers) || !event_handlers[type])
482 return ENOTSUP;
483
484 return event_handlers[type](hc, trb);
485}
486
487/**
488 * Dequeue from event ring and handle dequeued events.
489 *
490 * As there can be events, that blocks on waiting for subsequent events,
491 * we solve this problem by first copying the event TRBs from the event ring,
492 * then asserting EHB and only after, handling the events.
493 *
494 * Whenever the event handling blocks, it switches fibril, and incoming
495 * IPC notification will create new event handling fibril for us.
496 */
497static void hc_run_event_ring(xhci_hc_t *hc, xhci_event_ring_t *event_ring, xhci_interrupter_regs_t *intr)
498{
499 int err;
500 ssize_t size = 16;
501 xhci_trb_t *queue = malloc(sizeof(xhci_trb_t) * size);
502 if (!queue) {
503 usb_log_error("Not enough memory to run the event ring.");
504 return;
505 }
506
507 xhci_trb_t *head = queue;
508
509 while ((err = xhci_event_ring_dequeue(event_ring, head)) != ENOENT) {
510 if (err != EOK) {
511 usb_log_warning("Error while accessing event ring: %s", str_error(err));
512 break;
513 }
514
515 usb_log_debug2("Dequeued trb from event ring: %s", xhci_trb_str_type(TRB_TYPE(*head)));
516 head++;
517
518 /* Expand the array if needed. */
519 if (head - queue >= size) {
520 size *= 2;
521 xhci_trb_t *new_queue = realloc(queue, size);
522 if (new_queue == NULL)
523 break; /* Will process only those TRBs we have memory for. */
524
525 head = new_queue + (head - queue);
526 }
527
528 uint64_t erdp = hc->event_ring.dequeue_ptr;
529 XHCI_REG_WR(intr, XHCI_INTR_ERDP_LO, LOWER32(erdp));
530 XHCI_REG_WR(intr, XHCI_INTR_ERDP_HI, UPPER32(erdp));
531 }
532
533 /* Update the ERDP to make room in the ring. */
534 usb_log_debug2("Copying from ring finished, updating ERDP.");
535 uint64_t erdp = hc->event_ring.dequeue_ptr;
536 erdp |= XHCI_REG_MASK(XHCI_INTR_ERDP_EHB);
537 XHCI_REG_WR(intr, XHCI_INTR_ERDP_LO, LOWER32(erdp));
538 XHCI_REG_WR(intr, XHCI_INTR_ERDP_HI, UPPER32(erdp));
539
540 /* Handle all of the collected events if possible. */
541 if (head == queue)
542 usb_log_warning("No events to be handled!");
543
544 for (xhci_trb_t *tail = queue; tail != head; tail++) {
545 if ((err = hc_handle_event(hc, tail, intr)) != EOK) {
546 usb_log_error("Failed to handle event: %s", str_error(err));
547 }
548 }
549
550 free(queue);
551 usb_log_debug2("Event ring run finished.");
552}
553
554/**
555 * Handle an interrupt request from xHC. Resolve all situations that trigger an
556 * interrupt separately.
557 *
558 * Note that all RW1C bits in USBSTS register are cleared at the time of
559 * handling the interrupt in irq_code. This method is the top-half.
560 *
561 * @param status contents of USBSTS register at the time of the interrupt.
562 */
563void hc_interrupt(bus_t *bus, uint32_t status)
564{
565 xhci_hc_t *hc = bus_to_hc(bus);
566 status = xhci2host(32, status);
567
568 if (status & XHCI_REG_MASK(XHCI_OP_PCD)) {
569 usb_log_debug2("Root hub interrupt.");
570 xhci_rh_handle_port_change(&hc->rh);
571 status &= ~XHCI_REG_MASK(XHCI_OP_PCD);
572 }
573
574 if (status & XHCI_REG_MASK(XHCI_OP_HSE)) {
575 usb_log_error("Host controller error occured. Bad things gonna happen...");
576 status &= ~XHCI_REG_MASK(XHCI_OP_HSE);
577 }
578
579 if (status & XHCI_REG_MASK(XHCI_OP_EINT)) {
580 usb_log_debug2("Event interrupt, running the event ring.");
581 hc_run_event_ring(hc, &hc->event_ring, &hc->rt_regs->ir[0]);
582 status &= ~XHCI_REG_MASK(XHCI_OP_EINT);
583 }
584
585 if (status & XHCI_REG_MASK(XHCI_OP_SRE)) {
586 usb_log_error("Save/Restore error occured. WTF, S/R mechanism not implemented!");
587 status &= ~XHCI_REG_MASK(XHCI_OP_SRE);
588 }
589
590 if (status) {
591 usb_log_error("Non-zero status after interrupt handling (%08x) - missing something?", status);
592 }
593}
594
595/**
596 * Tear down all in-memory structures.
597 */
598void hc_fini(xhci_hc_t *hc)
599{
600 xhci_bus_fini(&hc->bus);
601 xhci_event_ring_fini(&hc->event_ring);
602 xhci_scratchpad_free(hc);
603 dma_buffer_free(&hc->dcbaa_dma);
604 xhci_fini_commands(hc);
605 xhci_rh_fini(&hc->rh);
606 pio_disable(hc->reg_base, RNGSZ(hc->mmio_range));
607 usb_log_info("HC(%p): Finalized.", hc);
608}
609
610/**
611 * Ring a xHC Doorbell. Implements section 4.7.
612 */
613int hc_ring_doorbell(xhci_hc_t *hc, unsigned doorbell, unsigned target)
614{
615 assert(hc);
616 uint32_t v = host2xhci(32, target & BIT_RRANGE(uint32_t, 7));
617 pio_write_32(&hc->db_arry[doorbell], v);
618 usb_log_debug2("Ringing doorbell %d (target: %d)", doorbell, target);
619 return EOK;
620}
621
622/**
623 * Issue an Enable Slot command, returning the obtained Slot ID.
624 *
625 * @param slot_id Pointer where to store the obtained Slot ID.
626 */
627int hc_enable_slot(xhci_hc_t *hc, uint32_t *slot_id)
628{
629 assert(hc);
630
631 int err;
632 xhci_cmd_t cmd;
633 xhci_cmd_init(&cmd, XHCI_CMD_ENABLE_SLOT);
634
635 if ((err = xhci_cmd_sync(hc, &cmd))) {
636 goto end;
637 }
638
639 if (slot_id) {
640 *slot_id = cmd.slot_id;
641 }
642
643end:
644 xhci_cmd_fini(&cmd);
645 return err;
646}
647
648/**
649 * Issue a Disable Slot command for a slot occupied by device.
650 *
651 * Frees the device context
652 */
653int hc_disable_slot(xhci_hc_t *hc, xhci_device_t *dev)
654{
655 int err;
656 assert(hc);
657
658 if ((err = xhci_cmd_sync_inline(hc, DISABLE_SLOT, .slot_id = dev->slot_id))) {
659 return err;
660 }
661
662 /* Free the device context. */
663 hc->dcbaa[dev->slot_id] = 0;
664 dma_buffer_free(&dev->dev_ctx);
665
666 /* Mark the slot as invalid. */
667 dev->slot_id = 0;
668
669 return EOK;
670}
671
672/**
673 * Prepare an empty Endpoint Input Context inside a dma buffer.
674 */
675static int create_configure_ep_input_ctx(dma_buffer_t *dma_buf)
676{
677 const int err = dma_buffer_alloc(dma_buf, sizeof(xhci_input_ctx_t));
678 if (err)
679 return err;
680
681 xhci_input_ctx_t *ictx = dma_buf->virt;
682 memset(ictx, 0, sizeof(xhci_input_ctx_t));
683
684 // Quoting sec. 4.6.5 and 4.6.6: A1, D0, D1 are down (already zeroed), A0 is up.
685 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, 0);
686
687 return EOK;
688}
689
690/**
691 * Initialize a device, assigning it an address. Implements section 4.3.4.
692 *
693 * @param dev Device to assing an address (unconfigured yet)
694 * @param ep0 EP0 of device TODO remove, can be fetched from dev
695 */
696int hc_address_device(xhci_hc_t *hc, xhci_device_t *dev, xhci_endpoint_t *ep0)
697{
698 int err = ENOMEM;
699
700 /* Although we have the precise PSIV value on devices of tier 1,
701 * we have to rely on reverse mapping on others. */
702 if (!hc->speed_to_psiv[dev->base.speed]) {
703 usb_log_error("Device reported an USB speed that cannot be mapped to HC port speed.");
704 return EINVAL;
705 }
706
707 /* Setup and register device context */
708 if (dma_buffer_alloc(&dev->dev_ctx, sizeof(xhci_device_ctx_t)))
709 goto err;
710 memset(dev->dev_ctx.virt, 0, sizeof(xhci_device_ctx_t));
711
712 hc->dcbaa[dev->slot_id] = host2xhci(64, dev->dev_ctx.phys);
713
714 /* Issue configure endpoint command (sec 4.3.5). */
715 dma_buffer_t ictx_dma_buf;
716 if ((err = create_configure_ep_input_ctx(&ictx_dma_buf))) {
717 goto err_dev_ctx;
718 }
719 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
720
721 /* Initialize slot_ctx according to section 4.3.3 point 3. */
722 XHCI_SLOT_ROOT_HUB_PORT_SET(ictx->slot_ctx, dev->rh_port);
723 XHCI_SLOT_CTX_ENTRIES_SET(ictx->slot_ctx, 1);
724 XHCI_SLOT_ROUTE_STRING_SET(ictx->slot_ctx, dev->route_str);
725 XHCI_SLOT_SPEED_SET(ictx->slot_ctx, hc->speed_to_psiv[dev->base.speed]);
726
727 /* In a very specific case, we have to set also these. But before that,
728 * we need to refactor how TT is handled in libusbhost. */
729 XHCI_SLOT_TT_HUB_SLOT_ID_SET(ictx->slot_ctx, 0);
730 XHCI_SLOT_TT_HUB_PORT_SET(ictx->slot_ctx, 0);
731 XHCI_SLOT_MTT_SET(ictx->slot_ctx, 0);
732
733 /* Copy endpoint 0 context and set A1 flag. */
734 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, 1);
735 xhci_setup_endpoint_context(ep0, &ictx->endpoint_ctx[0]);
736
737 /* Issue Address Device command. */
738 if ((err = xhci_cmd_sync_inline(hc, ADDRESS_DEVICE, .slot_id = dev->slot_id, .input_ctx = ictx_dma_buf))) {
739 goto err_dev_ctx;
740 }
741
742 xhci_device_ctx_t *dev_ctx = dev->dev_ctx.virt;
743 dev->base.address = XHCI_SLOT_DEVICE_ADDRESS(dev_ctx->slot_ctx);
744 usb_log_debug2("Obtained USB address: %d.\n", dev->base.address);
745
746 /* From now on, the device is officially online, yay! */
747 fibril_mutex_lock(&dev->base.guard);
748 dev->base.online = true;
749 fibril_mutex_unlock(&dev->base.guard);
750
751 return EOK;
752
753err_dev_ctx:
754 hc->dcbaa[dev->slot_id] = 0;
755 dma_buffer_free(&dev->dev_ctx);
756err:
757 return err;
758}
759
760/**
761 * Issue a Configure Device command for a device in slot.
762 *
763 * @param slot_id Slot ID assigned to the device.
764 */
765int hc_configure_device(xhci_hc_t *hc, uint32_t slot_id)
766{
767 /* Issue configure endpoint command (sec 4.3.5). */
768 dma_buffer_t ictx_dma_buf;
769 const int err = create_configure_ep_input_ctx(&ictx_dma_buf);
770 if (err)
771 return err;
772
773 // TODO: Set slot context and other flags. (probably forgot a lot of 'em)
774
775 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
776}
777
778/**
779 * Issue a Deconfigure Device command for a device in slot.
780 *
781 * @param slot_id Slot ID assigned to the device.
782 */
783int hc_deconfigure_device(xhci_hc_t *hc, uint32_t slot_id)
784{
785 /* Issue configure endpoint command (sec 4.3.5) with the DC flag. */
786 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .deconfigure = true);
787}
788
789/**
790 * Instruct xHC to add an endpoint with supplied endpoint context.
791 *
792 * @param slot_id Slot ID assigned to the device.
793 * @param ep_idx Endpoint index (number + direction) in question
794 * @param ep_ctx Endpoint context of the endpoint
795 */
796int hc_add_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx, xhci_ep_ctx_t *ep_ctx)
797{
798 /* Issue configure endpoint command (sec 4.3.5). */
799 dma_buffer_t ictx_dma_buf;
800 const int err = create_configure_ep_input_ctx(&ictx_dma_buf);
801 if (err)
802 return err;
803
804 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
805 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, ep_idx + 1); /* Preceded by slot ctx */
806 memcpy(&ictx->endpoint_ctx[ep_idx], ep_ctx, sizeof(xhci_ep_ctx_t));
807 // TODO: Set slot context and other flags. (probably forgot a lot of 'em)
808
809 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
810}
811
812/**
813 * Instruct xHC to drop an endpoint.
814 *
815 * @param slot_id Slot ID assigned to the device.
816 * @param ep_idx Endpoint index (number + direction) in question
817 */
818int hc_drop_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx)
819{
820 /* Issue configure endpoint command (sec 4.3.5). */
821 dma_buffer_t ictx_dma_buf;
822 const int err = create_configure_ep_input_ctx(&ictx_dma_buf);
823 if (err)
824 return err;
825
826 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
827 XHCI_INPUT_CTRL_CTX_DROP_SET(ictx->ctrl_ctx, ep_idx + 1); /* Preceded by slot ctx */
828 // TODO: Set slot context and other flags. (probably forgot a lot of 'em)
829
830 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
831}
832
833/**
834 * Instruct xHC to update information about an endpoint, using supplied
835 * endpoint context.
836 *
837 * @param slot_id Slot ID assigned to the device.
838 * @param ep_idx Endpoint index (number + direction) in question
839 * @param ep_ctx Endpoint context of the endpoint
840 */
841int hc_update_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx, xhci_ep_ctx_t *ep_ctx)
842{
843 dma_buffer_t ictx_dma_buf;
844 const int err = dma_buffer_alloc(&ictx_dma_buf, sizeof(xhci_input_ctx_t));
845 if (err)
846 return err;
847
848 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
849 memset(ictx, 0, sizeof(xhci_input_ctx_t));
850
851 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, ep_idx + 1);
852 memcpy(&ictx->endpoint_ctx[ep_idx], ep_ctx, sizeof(xhci_ep_ctx_t));
853
854 return xhci_cmd_sync_inline(hc, EVALUATE_CONTEXT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
855}
856
857/**
858 * @}
859 */
Note: See TracBrowser for help on using the repository browser.