source: mainline/uspace/drv/bus/usb/xhci/hc.c@ 69a93d02

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 69a93d02 was 30fc56f, checked in by Ondřej Hlavatý <aearsis@…>, 8 years ago

xhci: stop endpoint on unregister

  • Property mode set to 100644
File size: 24.6 KB
Line 
1/*
2 * Copyright (c) 2017 Ondrej Hlavaty
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller data bookkeeping.
34 */
35
36#include <errno.h>
37#include <str_error.h>
38#include <usb/debug.h>
39#include <usb/host/endpoint.h>
40#include "debug.h"
41#include "hc.h"
42#include "rh.h"
43#include "hw_struct/trb.h"
44#include "hw_struct/context.h"
45#include "endpoint.h"
46#include "transfers.h"
47#include "trb_ring.h"
48
49/**
50 * Default USB Speed ID mapping: Table 157
51 */
52#define PSI_TO_BPS(psie, psim) (((uint64_t) psim) << (10 * psie))
53#define PORT_SPEED(usb, mjr, psie, psim) { \
54 .name = "USB ", \
55 .major = mjr, \
56 .minor = 0, \
57 .usb_speed = USB_SPEED_##usb, \
58 .rx_bps = PSI_TO_BPS(psie, psim), \
59 .tx_bps = PSI_TO_BPS(psie, psim) \
60}
61static const xhci_port_speed_t ps_default_full = PORT_SPEED(FULL, 2, 2, 12);
62static const xhci_port_speed_t ps_default_low = PORT_SPEED(LOW, 2, 1, 1500);
63static const xhci_port_speed_t ps_default_high = PORT_SPEED(HIGH, 2, 2, 480);
64static const xhci_port_speed_t ps_default_super = PORT_SPEED(SUPER, 3, 3, 5);
65
66/**
67 * Walk the list of extended capabilities.
68 *
69 * The most interesting thing hidden in extended capabilities is the mapping of
70 * ports to protocol versions and speeds.
71 */
72static int hc_parse_ec(xhci_hc_t *hc)
73{
74 unsigned psic, major, minor;
75 xhci_sp_name_t name;
76
77 xhci_port_speed_t *speeds = hc->speeds;
78
79 for (xhci_extcap_t *ec = hc->xecp; ec; ec = xhci_extcap_next(ec)) {
80 xhci_dump_extcap(ec);
81 switch (XHCI_REG_RD(ec, XHCI_EC_CAP_ID)) {
82 case XHCI_EC_USB_LEGACY:
83 assert(hc->legsup == NULL);
84 hc->legsup = (xhci_legsup_t *) ec;
85 break;
86 case XHCI_EC_SUPPORTED_PROTOCOL:
87 psic = XHCI_REG_RD(ec, XHCI_EC_SP_PSIC);
88 major = XHCI_REG_RD(ec, XHCI_EC_SP_MAJOR);
89 minor = XHCI_REG_RD(ec, XHCI_EC_SP_MINOR);
90 name.packed = host2uint32_t_le(XHCI_REG_RD(ec, XHCI_EC_SP_NAME));
91
92 if (name.packed != xhci_name_usb.packed) {
93 /**
94 * The detection of such protocol would work,
95 * but the rest of the implementation is made
96 * for the USB protocol only.
97 */
98 usb_log_error("Unknown protocol %.4s.", name.str);
99 return ENOTSUP;
100 }
101
102 // "Implied" speed
103 if (psic == 0) {
104 assert(minor == 0);
105
106 if (major == 2) {
107 speeds[1] = ps_default_full;
108 speeds[2] = ps_default_low;
109 speeds[3] = ps_default_high;
110
111 hc->speed_to_psiv[USB_SPEED_FULL] = 1;
112 hc->speed_to_psiv[USB_SPEED_LOW] = 2;
113 hc->speed_to_psiv[USB_SPEED_HIGH] = 3;
114 } else if (major == 3) {
115 speeds[4] = ps_default_super;
116 hc->speed_to_psiv[USB_SPEED_SUPER] = 4;
117 } else {
118 return EINVAL;
119 }
120
121 usb_log_debug2("Implied speed of USB %u.0 set up.", major);
122 } else {
123 for (unsigned i = 0; i < psic; i++) {
124 xhci_psi_t *psi = xhci_extcap_psi(ec, i);
125 unsigned sim = XHCI_REG_RD(psi, XHCI_PSI_PSIM);
126 unsigned psiv = XHCI_REG_RD(psi, XHCI_PSI_PSIV);
127 unsigned psie = XHCI_REG_RD(psi, XHCI_PSI_PSIE);
128 unsigned psim = XHCI_REG_RD(psi, XHCI_PSI_PSIM);
129
130 speeds[psiv].major = major;
131 speeds[psiv].minor = minor;
132 str_ncpy(speeds[psiv].name, 4, name.str, 4);
133 speeds[psiv].usb_speed = USB_SPEED_MAX;
134
135 uint64_t bps = PSI_TO_BPS(psie, psim);
136
137 if (sim == XHCI_PSI_PLT_SYMM || sim == XHCI_PSI_PLT_RX)
138 speeds[psiv].rx_bps = bps;
139 if (sim == XHCI_PSI_PLT_SYMM || sim == XHCI_PSI_PLT_TX) {
140 speeds[psiv].tx_bps = bps;
141 usb_log_debug2("Speed %u set up for bps %" PRIu64 " / %" PRIu64 ".", psiv, speeds[psiv].rx_bps, speeds[psiv].tx_bps);
142 }
143 }
144 }
145 }
146 }
147 return EOK;
148}
149
150/**
151 * Initialize MMIO spaces of xHC.
152 */
153int hc_init_mmio(xhci_hc_t *hc, const hw_res_list_parsed_t *hw_res)
154{
155 int err;
156
157 if (hw_res->mem_ranges.count != 1) {
158 usb_log_error("Unexpected MMIO area, bailing out.");
159 return EINVAL;
160 }
161
162 hc->mmio_range = hw_res->mem_ranges.ranges[0];
163
164 usb_log_debug("MMIO area at %p (size %zu), IRQ %d.\n",
165 RNGABSPTR(hc->mmio_range), RNGSZ(hc->mmio_range), hw_res->irqs.irqs[0]);
166
167 if (RNGSZ(hc->mmio_range) < sizeof(xhci_cap_regs_t))
168 return EOVERFLOW;
169
170 void *base;
171 if ((err = pio_enable_range(&hc->mmio_range, &base)))
172 return err;
173
174 hc->reg_base = base;
175 hc->cap_regs = (xhci_cap_regs_t *) base;
176 hc->op_regs = (xhci_op_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH));
177 hc->rt_regs = (xhci_rt_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF));
178 hc->db_arry = (xhci_doorbell_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_DBOFF));
179
180 uintptr_t xec_offset = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_XECP) * sizeof(xhci_dword_t);
181 if (xec_offset > 0)
182 hc->xecp = (xhci_extcap_t *) (base + xec_offset);
183
184 usb_log_debug2("Initialized MMIO reg areas:");
185 usb_log_debug2("\tCapability regs: %p", hc->cap_regs);
186 usb_log_debug2("\tOperational regs: %p", hc->op_regs);
187 usb_log_debug2("\tRuntime regs: %p", hc->rt_regs);
188 usb_log_debug2("\tDoorbell array base: %p", hc->db_arry);
189
190 xhci_dump_cap_regs(hc->cap_regs);
191
192 hc->ac64 = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_AC64);
193 hc->max_slots = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_MAX_SLOTS);
194 unsigned ist = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_IST);
195 hc->ist = (ist & 0x10 >> 1) * (ist & 0xf);
196
197 if ((err = hc_parse_ec(hc))) {
198 pio_disable(hc->reg_base, RNGSZ(hc->mmio_range));
199 return err;
200 }
201
202 return EOK;
203}
204
205/**
206 * Initialize structures kept in allocated memory.
207 */
208int hc_init_memory(xhci_hc_t *hc, ddf_dev_t *device)
209{
210 int err;
211
212 if (dma_buffer_alloc(&hc->dcbaa_dma, (1 + hc->max_slots) * sizeof(uint64_t)))
213 return ENOMEM;
214 hc->dcbaa = hc->dcbaa_dma.virt;
215
216 if ((err = xhci_event_ring_init(&hc->event_ring)))
217 goto err_dcbaa;
218
219 if ((err = xhci_scratchpad_alloc(hc)))
220 goto err_event_ring;
221
222 if ((err = xhci_init_commands(hc)))
223 goto err_scratch;
224
225 if ((err = xhci_bus_init(&hc->bus, hc)))
226 goto err_cmd;
227
228 if ((err = xhci_rh_init(&hc->rh, hc)))
229 goto err_bus;
230
231 return EOK;
232
233err_bus:
234 xhci_bus_fini(&hc->bus);
235err_cmd:
236 xhci_fini_commands(hc);
237err_scratch:
238 xhci_scratchpad_free(hc);
239err_event_ring:
240 xhci_event_ring_fini(&hc->event_ring);
241err_dcbaa:
242 hc->dcbaa = NULL;
243 dma_buffer_free(&hc->dcbaa_dma);
244 return err;
245}
246
247/*
248 * Pseudocode:
249 * ip = read(intr[0].iman)
250 * if (ip) {
251 * status = read(usbsts)
252 * assert status
253 * assert ip
254 * accept (passing status)
255 * }
256 * decline
257 */
258static const irq_cmd_t irq_commands[] = {
259 {
260 .cmd = CMD_PIO_READ_32,
261 .dstarg = 3,
262 .addr = NULL /* intr[0].iman */
263 },
264 {
265 .cmd = CMD_AND,
266 .srcarg = 3,
267 .dstarg = 4,
268 .value = 0 /* host2xhci(32, 1) */
269 },
270 {
271 .cmd = CMD_PREDICATE,
272 .srcarg = 4,
273 .value = 5
274 },
275 {
276 .cmd = CMD_PIO_READ_32,
277 .dstarg = 1,
278 .addr = NULL /* usbsts */
279 },
280 {
281 .cmd = CMD_AND,
282 .srcarg = 1,
283 .dstarg = 2,
284 .value = 0 /* host2xhci(32, XHCI_STATUS_ACK_MASK) */
285 },
286 {
287 .cmd = CMD_PIO_WRITE_A_32,
288 .srcarg = 2,
289 .addr = NULL /* usbsts */
290 },
291 {
292 .cmd = CMD_PIO_WRITE_A_32,
293 .srcarg = 3,
294 .addr = NULL /* intr[0].iman */
295 },
296 {
297 .cmd = CMD_ACCEPT
298 },
299 {
300 .cmd = CMD_DECLINE
301 }
302};
303
304
305/**
306 * Generates code to accept interrupts. The xHCI is designed primarily for
307 * MSI/MSI-X, but we use PCI Interrupt Pin. In this mode, all the Interrupters
308 * (except 0) are disabled.
309 */
310int hc_irq_code_gen(irq_code_t *code, xhci_hc_t *hc, const hw_res_list_parsed_t *hw_res)
311{
312 assert(code);
313 assert(hw_res);
314
315 if (hw_res->irqs.count != 1) {
316 usb_log_info("Unexpected HW resources to enable interrupts.");
317 return EINVAL;
318 }
319
320 code->ranges = malloc(sizeof(irq_pio_range_t));
321 if (code->ranges == NULL)
322 return ENOMEM;
323
324 code->cmds = malloc(sizeof(irq_commands));
325 if (code->cmds == NULL) {
326 free(code->ranges);
327 return ENOMEM;
328 }
329
330 code->rangecount = 1;
331 code->ranges[0] = (irq_pio_range_t) {
332 .base = RNGABS(hc->mmio_range),
333 .size = RNGSZ(hc->mmio_range),
334 };
335
336 code->cmdcount = ARRAY_SIZE(irq_commands);
337 memcpy(code->cmds, irq_commands, sizeof(irq_commands));
338
339 void *intr0_iman = RNGABSPTR(hc->mmio_range) + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF) + offsetof(xhci_rt_regs_t, ir[0]);
340 void *usbsts = RNGABSPTR(hc->mmio_range) + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH) + offsetof(xhci_op_regs_t, usbsts);
341 code->cmds[0].addr = intr0_iman;
342 code->cmds[1].value = host2xhci(32, 1);
343 code->cmds[3].addr = usbsts;
344 code->cmds[4].value = host2xhci(32, XHCI_STATUS_ACK_MASK);
345 code->cmds[5].addr = usbsts;
346 code->cmds[6].addr = intr0_iman;
347
348 return hw_res->irqs.irqs[0];
349}
350
351/**
352 * Claim xHC from BIOS. Implements handoff as per Section 4.22.1 of xHCI spec.
353 */
354int hc_claim(xhci_hc_t *hc, ddf_dev_t *dev)
355{
356 /* No legacy support capability, the controller is solely for us */
357 if (!hc->legsup)
358 return EOK;
359
360 /* TODO: Test this with USB3-aware BIOS */
361 usb_log_debug2("LEGSUP: bios: %x, os: %x", hc->legsup->sem_bios, hc->legsup->sem_os);
362 XHCI_REG_WR(hc->legsup, XHCI_LEGSUP_SEM_OS, 1);
363 for (int i = 0; i <= (XHCI_LEGSUP_BIOS_TIMEOUT_US / XHCI_LEGSUP_POLLING_DELAY_1MS); i++) {
364 usb_log_debug2("LEGSUP: elapsed: %i ms, bios: %x, os: %x", i,
365 XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_BIOS),
366 XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_OS));
367 if (XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_BIOS) == 0) {
368 assert(XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_OS) == 1);
369 return EOK;
370 }
371 async_usleep(XHCI_LEGSUP_POLLING_DELAY_1MS);
372 }
373 usb_log_error("BIOS did not release XHCI legacy hold!\n");
374
375 return ENOTSUP;
376}
377
378/**
379 * Ask the xHC to reset its state. Implements sequence
380 */
381static int hc_reset(xhci_hc_t *hc)
382{
383 /* Stop the HC: set R/S to 0 */
384 XHCI_REG_CLR(hc->op_regs, XHCI_OP_RS, 1);
385
386 /* Wait 16 ms until the HC is halted */
387 async_usleep(16000);
388 assert(XHCI_REG_RD(hc->op_regs, XHCI_OP_HCH));
389
390 /* Reset */
391 XHCI_REG_SET(hc->op_regs, XHCI_OP_HCRST, 1);
392
393 /* Wait until the reset is complete */
394 while (XHCI_REG_RD(hc->op_regs, XHCI_OP_HCRST))
395 async_usleep(1000);
396
397 return EOK;
398}
399
400/**
401 * Initialize the HC: section 4.2
402 */
403int hc_start(xhci_hc_t *hc, bool irq)
404{
405 int err;
406
407 if ((err = hc_reset(hc)))
408 return err;
409
410 // FIXME: Waiting forever.
411 while (XHCI_REG_RD(hc->op_regs, XHCI_OP_CNR))
412 async_usleep(1000);
413
414 uint64_t dcbaaptr = hc->dcbaa_dma.phys;
415 XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP_LO, LOWER32(dcbaaptr));
416 XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP_HI, UPPER32(dcbaaptr));
417 XHCI_REG_WR(hc->op_regs, XHCI_OP_MAX_SLOTS_EN, hc->max_slots);
418
419 uint64_t crcr = xhci_trb_ring_get_dequeue_ptr(&hc->cr.trb_ring);
420 if (hc->cr.trb_ring.pcs)
421 crcr |= XHCI_REG_MASK(XHCI_OP_RCS);
422 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR_LO, LOWER32(crcr));
423 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR_HI, UPPER32(crcr));
424
425 xhci_interrupter_regs_t *intr0 = &hc->rt_regs->ir[0];
426 XHCI_REG_WR(intr0, XHCI_INTR_ERSTSZ, hc->event_ring.segment_count);
427 uint64_t erdp = hc->event_ring.dequeue_ptr;
428 XHCI_REG_WR(intr0, XHCI_INTR_ERDP_LO, LOWER32(erdp));
429 XHCI_REG_WR(intr0, XHCI_INTR_ERDP_HI, UPPER32(erdp));
430 uint64_t erstptr = hc->event_ring.erst.phys;
431 XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA_LO, LOWER32(erstptr));
432 XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA_HI, UPPER32(erstptr));
433
434 if (irq) {
435 XHCI_REG_SET(intr0, XHCI_INTR_IE, 1);
436 XHCI_REG_SET(hc->op_regs, XHCI_OP_INTE, 1);
437 }
438
439 XHCI_REG_SET(hc->op_regs, XHCI_OP_HSEE, 1);
440
441 XHCI_REG_SET(hc->op_regs, XHCI_OP_RS, 1);
442
443 /* The reset changed status of all ports, and SW originated reason does
444 * not cause an interrupt.
445 */
446 xhci_rh_handle_port_change(&hc->rh);
447
448 return EOK;
449}
450
451/**
452 * Used only when polling. Shall supplement the irq_commands.
453 */
454int hc_status(bus_t *bus, uint32_t *status)
455{
456 xhci_hc_t *hc = bus_to_hc(bus);
457 int ip = XHCI_REG_RD(hc->rt_regs->ir, XHCI_INTR_IP);
458 if (ip) {
459 *status = XHCI_REG_RD(hc->op_regs, XHCI_OP_STATUS);
460 XHCI_REG_WR(hc->op_regs, XHCI_OP_STATUS, *status & XHCI_STATUS_ACK_MASK);
461 XHCI_REG_WR(hc->rt_regs->ir, XHCI_INTR_IP, 1);
462
463 /* interrupt handler expects status from irq_commands, which is
464 * in xhci order. */
465 *status = host2xhci(32, *status);
466 }
467
468 usb_log_debug2("HC(%p): Polled status: %x", hc, *status);
469 return EOK;
470}
471
472typedef int (*event_handler) (xhci_hc_t *, xhci_trb_t *trb);
473
474static event_handler event_handlers [] = {
475 [XHCI_TRB_TYPE_COMMAND_COMPLETION_EVENT] = &xhci_handle_command_completion,
476 [XHCI_TRB_TYPE_PORT_STATUS_CHANGE_EVENT] = &xhci_rh_handle_port_status_change_event,
477 [XHCI_TRB_TYPE_TRANSFER_EVENT] = &xhci_handle_transfer_event,
478};
479
480static int hc_handle_event(xhci_hc_t *hc, xhci_trb_t *trb, xhci_interrupter_regs_t *intr)
481{
482 unsigned type = TRB_TYPE(*trb);
483 if (type >= ARRAY_SIZE(event_handlers) || !event_handlers[type])
484 return ENOTSUP;
485
486 return event_handlers[type](hc, trb);
487}
488
489/**
490 * Dequeue from event ring and handle dequeued events.
491 *
492 * As there can be events, that blocks on waiting for subsequent events,
493 * we solve this problem by first copying the event TRBs from the event ring,
494 * then asserting EHB and only after, handling the events.
495 *
496 * Whenever the event handling blocks, it switches fibril, and incoming
497 * IPC notification will create new event handling fibril for us.
498 */
499static void hc_run_event_ring(xhci_hc_t *hc, xhci_event_ring_t *event_ring, xhci_interrupter_regs_t *intr)
500{
501 int err;
502 ssize_t size = 16;
503 xhci_trb_t *queue = malloc(sizeof(xhci_trb_t) * size);
504 if (!queue) {
505 usb_log_error("Not enough memory to run the event ring.");
506 return;
507 }
508
509 xhci_trb_t *head = queue;
510
511 while ((err = xhci_event_ring_dequeue(event_ring, head)) != ENOENT) {
512 if (err != EOK) {
513 usb_log_warning("Error while accessing event ring: %s", str_error(err));
514 break;
515 }
516
517 usb_log_debug2("Dequeued trb from event ring: %s", xhci_trb_str_type(TRB_TYPE(*head)));
518 head++;
519
520 /* Expand the array if needed. */
521 if (head - queue >= size) {
522 size *= 2;
523 xhci_trb_t *new_queue = realloc(queue, size);
524 if (new_queue == NULL)
525 break; /* Will process only those TRBs we have memory for. */
526
527 head = new_queue + (head - queue);
528 }
529
530 uint64_t erdp = hc->event_ring.dequeue_ptr;
531 XHCI_REG_WR(intr, XHCI_INTR_ERDP_LO, LOWER32(erdp));
532 XHCI_REG_WR(intr, XHCI_INTR_ERDP_HI, UPPER32(erdp));
533 }
534
535 /* Update the ERDP to make room in the ring. */
536 usb_log_debug2("Copying from ring finished, updating ERDP.");
537 uint64_t erdp = hc->event_ring.dequeue_ptr;
538 erdp |= XHCI_REG_MASK(XHCI_INTR_ERDP_EHB);
539 XHCI_REG_WR(intr, XHCI_INTR_ERDP_LO, LOWER32(erdp));
540 XHCI_REG_WR(intr, XHCI_INTR_ERDP_HI, UPPER32(erdp));
541
542 /* Handle all of the collected events if possible. */
543 if (head == queue)
544 usb_log_warning("No events to be handled!");
545
546 for (xhci_trb_t *tail = queue; tail != head; tail++) {
547 if ((err = hc_handle_event(hc, tail, intr)) != EOK) {
548 usb_log_error("Failed to handle event: %s", str_error(err));
549 }
550 }
551
552 free(queue);
553 usb_log_debug2("Event ring run finished.");
554}
555
556/**
557 * Handle an interrupt request from xHC. Resolve all situations that trigger an
558 * interrupt separately.
559 *
560 * Note that all RW1C bits in USBSTS register are cleared at the time of
561 * handling the interrupt in irq_code. This method is the top-half.
562 *
563 * @param status contents of USBSTS register at the time of the interrupt.
564 */
565void hc_interrupt(bus_t *bus, uint32_t status)
566{
567 xhci_hc_t *hc = bus_to_hc(bus);
568 status = xhci2host(32, status);
569
570 if (status & XHCI_REG_MASK(XHCI_OP_PCD)) {
571 usb_log_debug2("Root hub interrupt.");
572 xhci_rh_handle_port_change(&hc->rh);
573 status &= ~XHCI_REG_MASK(XHCI_OP_PCD);
574 }
575
576 if (status & XHCI_REG_MASK(XHCI_OP_HSE)) {
577 usb_log_error("Host controller error occured. Bad things gonna happen...");
578 status &= ~XHCI_REG_MASK(XHCI_OP_HSE);
579 }
580
581 if (status & XHCI_REG_MASK(XHCI_OP_EINT)) {
582 usb_log_debug2("Event interrupt, running the event ring.");
583 hc_run_event_ring(hc, &hc->event_ring, &hc->rt_regs->ir[0]);
584 status &= ~XHCI_REG_MASK(XHCI_OP_EINT);
585 }
586
587 if (status & XHCI_REG_MASK(XHCI_OP_SRE)) {
588 usb_log_error("Save/Restore error occured. WTF, S/R mechanism not implemented!");
589 status &= ~XHCI_REG_MASK(XHCI_OP_SRE);
590 }
591
592 if (status) {
593 usb_log_error("Non-zero status after interrupt handling (%08x) - missing something?", status);
594 }
595}
596
597/**
598 * Tear down all in-memory structures.
599 */
600void hc_fini(xhci_hc_t *hc)
601{
602 xhci_bus_fini(&hc->bus);
603 xhci_event_ring_fini(&hc->event_ring);
604 xhci_scratchpad_free(hc);
605 dma_buffer_free(&hc->dcbaa_dma);
606 xhci_fini_commands(hc);
607 xhci_rh_fini(&hc->rh);
608 pio_disable(hc->reg_base, RNGSZ(hc->mmio_range));
609 usb_log_info("HC(%p): Finalized.", hc);
610}
611
612/**
613 * Ring a xHC Doorbell. Implements section 4.7.
614 */
615void hc_ring_doorbell(xhci_hc_t *hc, unsigned doorbell, unsigned target)
616{
617 assert(hc);
618 uint32_t v = host2xhci(32, target & BIT_RRANGE(uint32_t, 7));
619 pio_write_32(&hc->db_arry[doorbell], v);
620 usb_log_debug2("Ringing doorbell %d (target: %d)", doorbell, target);
621}
622
623/**
624 * Issue an Enable Slot command, returning the obtained Slot ID.
625 *
626 * @param slot_id Pointer where to store the obtained Slot ID.
627 */
628int hc_enable_slot(xhci_hc_t *hc, uint32_t *slot_id)
629{
630 assert(hc);
631
632 int err;
633 xhci_cmd_t cmd;
634 xhci_cmd_init(&cmd, XHCI_CMD_ENABLE_SLOT);
635
636 if ((err = xhci_cmd_sync(hc, &cmd))) {
637 goto end;
638 }
639
640 if (slot_id) {
641 *slot_id = cmd.slot_id;
642 }
643
644end:
645 xhci_cmd_fini(&cmd);
646 return err;
647}
648
649/**
650 * Issue a Disable Slot command for a slot occupied by device.
651 *
652 * Frees the device context
653 */
654int hc_disable_slot(xhci_hc_t *hc, xhci_device_t *dev)
655{
656 int err;
657 assert(hc);
658
659 if ((err = xhci_cmd_sync_inline(hc, DISABLE_SLOT, .slot_id = dev->slot_id))) {
660 return err;
661 }
662
663 /* Free the device context. */
664 hc->dcbaa[dev->slot_id] = 0;
665 dma_buffer_free(&dev->dev_ctx);
666
667 /* Mark the slot as invalid. */
668 dev->slot_id = 0;
669
670 return EOK;
671}
672
673/**
674 * Prepare an empty Endpoint Input Context inside a dma buffer.
675 */
676static int create_configure_ep_input_ctx(dma_buffer_t *dma_buf)
677{
678 const int err = dma_buffer_alloc(dma_buf, sizeof(xhci_input_ctx_t));
679 if (err)
680 return err;
681
682 xhci_input_ctx_t *ictx = dma_buf->virt;
683 memset(ictx, 0, sizeof(xhci_input_ctx_t));
684
685 // Quoting sec. 4.6.5 and 4.6.6: A1, D0, D1 are down (already zeroed), A0 is up.
686 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, 0);
687
688 return EOK;
689}
690
691/**
692 * Initialize a device, assigning it an address. Implements section 4.3.4.
693 *
694 * @param dev Device to assing an address (unconfigured yet)
695 * @param ep0 EP0 of device TODO remove, can be fetched from dev
696 */
697int hc_address_device(xhci_hc_t *hc, xhci_device_t *dev, xhci_endpoint_t *ep0)
698{
699 int err = ENOMEM;
700
701 /* Although we have the precise PSIV value on devices of tier 1,
702 * we have to rely on reverse mapping on others. */
703 if (!hc->speed_to_psiv[dev->base.speed]) {
704 usb_log_error("Device reported an USB speed that cannot be mapped to HC port speed.");
705 return EINVAL;
706 }
707
708 /* Setup and register device context */
709 if (dma_buffer_alloc(&dev->dev_ctx, sizeof(xhci_device_ctx_t)))
710 goto err;
711 memset(dev->dev_ctx.virt, 0, sizeof(xhci_device_ctx_t));
712
713 hc->dcbaa[dev->slot_id] = host2xhci(64, dev->dev_ctx.phys);
714
715 /* Issue configure endpoint command (sec 4.3.5). */
716 dma_buffer_t ictx_dma_buf;
717 if ((err = create_configure_ep_input_ctx(&ictx_dma_buf))) {
718 goto err_dev_ctx;
719 }
720 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
721
722 /* Initialize slot_ctx according to section 4.3.3 point 3. */
723 XHCI_SLOT_ROOT_HUB_PORT_SET(ictx->slot_ctx, dev->rh_port);
724 XHCI_SLOT_CTX_ENTRIES_SET(ictx->slot_ctx, 1);
725 XHCI_SLOT_ROUTE_STRING_SET(ictx->slot_ctx, dev->route_str);
726 XHCI_SLOT_SPEED_SET(ictx->slot_ctx, hc->speed_to_psiv[dev->base.speed]);
727
728 /* In a very specific case, we have to set also these. But before that,
729 * we need to refactor how TT is handled in libusbhost. */
730 XHCI_SLOT_TT_HUB_SLOT_ID_SET(ictx->slot_ctx, 0);
731 XHCI_SLOT_TT_HUB_PORT_SET(ictx->slot_ctx, 0);
732 XHCI_SLOT_MTT_SET(ictx->slot_ctx, 0);
733
734 /* Copy endpoint 0 context and set A1 flag. */
735 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, 1);
736 xhci_setup_endpoint_context(ep0, &ictx->endpoint_ctx[0]);
737
738 /* Issue Address Device command. */
739 if ((err = xhci_cmd_sync_inline(hc, ADDRESS_DEVICE, .slot_id = dev->slot_id, .input_ctx = ictx_dma_buf))) {
740 goto err_dev_ctx;
741 }
742
743 xhci_device_ctx_t *dev_ctx = dev->dev_ctx.virt;
744 dev->base.address = XHCI_SLOT_DEVICE_ADDRESS(dev_ctx->slot_ctx);
745 usb_log_debug2("Obtained USB address: %d.\n", dev->base.address);
746
747 return EOK;
748
749err_dev_ctx:
750 hc->dcbaa[dev->slot_id] = 0;
751 dma_buffer_free(&dev->dev_ctx);
752err:
753 return err;
754}
755
756/**
757 * Issue a Configure Device command for a device in slot.
758 *
759 * @param slot_id Slot ID assigned to the device.
760 */
761int hc_configure_device(xhci_hc_t *hc, uint32_t slot_id)
762{
763 /* Issue configure endpoint command (sec 4.3.5). */
764 dma_buffer_t ictx_dma_buf;
765 const int err = create_configure_ep_input_ctx(&ictx_dma_buf);
766 if (err)
767 return err;
768
769 // TODO: Set slot context and other flags. (probably forgot a lot of 'em)
770
771 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
772}
773
774/**
775 * Issue a Deconfigure Device command for a device in slot.
776 *
777 * @param slot_id Slot ID assigned to the device.
778 */
779int hc_deconfigure_device(xhci_hc_t *hc, uint32_t slot_id)
780{
781 /* Issue configure endpoint command (sec 4.3.5) with the DC flag. */
782 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .deconfigure = true);
783}
784
785/**
786 * Instruct xHC to add an endpoint with supplied endpoint context.
787 *
788 * @param slot_id Slot ID assigned to the device.
789 * @param ep_idx Endpoint index (number + direction) in question
790 * @param ep_ctx Endpoint context of the endpoint
791 */
792int hc_add_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx, xhci_ep_ctx_t *ep_ctx)
793{
794 /* Issue configure endpoint command (sec 4.3.5). */
795 dma_buffer_t ictx_dma_buf;
796 const int err = create_configure_ep_input_ctx(&ictx_dma_buf);
797 if (err)
798 return err;
799
800 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
801 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, ep_idx + 1); /* Preceded by slot ctx */
802 memcpy(&ictx->endpoint_ctx[ep_idx], ep_ctx, sizeof(xhci_ep_ctx_t));
803 // TODO: Set slot context and other flags. (probably forgot a lot of 'em)
804
805 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
806}
807
808/**
809 * Instruct xHC to drop an endpoint.
810 *
811 * @param slot_id Slot ID assigned to the device.
812 * @param ep_idx Endpoint index (number + direction) in question
813 */
814int hc_drop_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx)
815{
816 /* Issue configure endpoint command (sec 4.3.5). */
817 dma_buffer_t ictx_dma_buf;
818 const int err = create_configure_ep_input_ctx(&ictx_dma_buf);
819 if (err)
820 return err;
821
822 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
823 XHCI_INPUT_CTRL_CTX_DROP_SET(ictx->ctrl_ctx, ep_idx + 1); /* Preceded by slot ctx */
824 // TODO: Set slot context and other flags. (probably forgot a lot of 'em)
825
826 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
827}
828
829/**
830 * Instruct xHC to update information about an endpoint, using supplied
831 * endpoint context.
832 *
833 * @param slot_id Slot ID assigned to the device.
834 * @param ep_idx Endpoint index (number + direction) in question
835 * @param ep_ctx Endpoint context of the endpoint
836 */
837int hc_update_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx, xhci_ep_ctx_t *ep_ctx)
838{
839 dma_buffer_t ictx_dma_buf;
840 const int err = dma_buffer_alloc(&ictx_dma_buf, sizeof(xhci_input_ctx_t));
841 if (err)
842 return err;
843
844 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
845 memset(ictx, 0, sizeof(xhci_input_ctx_t));
846
847 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, ep_idx + 1);
848 memcpy(&ictx->endpoint_ctx[ep_idx], ep_ctx, sizeof(xhci_ep_ctx_t));
849
850 return xhci_cmd_sync_inline(hc, EVALUATE_CONTEXT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
851}
852
853/**
854 * Instruct xHC to stop running a transfer ring on an endpoint.
855 *
856 * @param slot_id Slot ID assigned to the device.
857 * @param ep_idx Endpoint index (number + direction) in question
858 */
859int hc_stop_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx)
860{
861
862 return xhci_cmd_sync_inline(hc, STOP_ENDPOINT, .slot_id = slot_id, .endpoint_id = ep_idx);
863}
864
865/**
866 * @}
867 */
Note: See TracBrowser for help on using the repository browser.