source: mainline/uspace/drv/bus/usb/xhci/hc.c@ 889146e

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 889146e was 889146e, checked in by Ondřej Hlavatý <aearsis@…>, 8 years ago

xhci: commands shall not just timeout

Previous behavior was breaking semantic: if a command was successful,
but just took too long to complete, we returned an error, and the caller
had no way to know if the command's effect has taken place.

This commit implements command aborting. The wait_for_command now cannot
just timeout - instead it aborts currently running (probably blocked)
command, and then gets back to waiting. So now, if command_sync returns
an error, it means the command was really unsuccessful.

If aborting the command takes too long, we should reset the whole HC.
This is not yet implemented.

  • Property mode set to 100644
File size: 21.7 KB
Line 
1/*
2 * Copyright (c) 2017 Ondrej Hlavaty
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller data bookkeeping.
34 */
35
36#include <errno.h>
37#include <str_error.h>
38#include <usb/debug.h>
39#include <usb/host/endpoint.h>
40#include "debug.h"
41#include "hc.h"
42#include "rh.h"
43#include "hw_struct/trb.h"
44#include "hw_struct/context.h"
45#include "endpoint.h"
46#include "transfers.h"
47#include "trb_ring.h"
48
49/**
50 * Default USB Speed ID mapping: Table 157
51 */
52#define PSI_TO_BPS(psie, psim) (((uint64_t) psim) << (10 * psie))
53#define PORT_SPEED(usb, mjr, psie, psim) { \
54 .name = "USB ", \
55 .major = mjr, \
56 .minor = 0, \
57 .usb_speed = USB_SPEED_##usb, \
58 .rx_bps = PSI_TO_BPS(psie, psim), \
59 .tx_bps = PSI_TO_BPS(psie, psim) \
60}
61static const xhci_port_speed_t ps_default_full = PORT_SPEED(FULL, 2, 2, 12);
62static const xhci_port_speed_t ps_default_low = PORT_SPEED(LOW, 2, 1, 1500);
63static const xhci_port_speed_t ps_default_high = PORT_SPEED(HIGH, 2, 2, 480);
64static const xhci_port_speed_t ps_default_super = PORT_SPEED(SUPER, 3, 3, 5);
65
66/**
67 * Walk the list of extended capabilities.
68 */
69static int hc_parse_ec(xhci_hc_t *hc)
70{
71 unsigned psic, major, minor;
72 xhci_sp_name_t name;
73
74 xhci_port_speed_t *speeds = hc->speeds;
75
76 for (xhci_extcap_t *ec = hc->xecp; ec; ec = xhci_extcap_next(ec)) {
77 xhci_dump_extcap(ec);
78 switch (XHCI_REG_RD(ec, XHCI_EC_CAP_ID)) {
79 case XHCI_EC_USB_LEGACY:
80 assert(hc->legsup == NULL);
81 hc->legsup = (xhci_legsup_t *) ec;
82 break;
83 case XHCI_EC_SUPPORTED_PROTOCOL:
84 psic = XHCI_REG_RD(ec, XHCI_EC_SP_PSIC);
85 major = XHCI_REG_RD(ec, XHCI_EC_SP_MAJOR);
86 minor = XHCI_REG_RD(ec, XHCI_EC_SP_MINOR);
87 name.packed = host2uint32_t_le(XHCI_REG_RD(ec, XHCI_EC_SP_NAME));
88
89 if (name.packed != xhci_name_usb.packed) {
90 /**
91 * The detection of such protocol would work,
92 * but the rest of the implementation is made
93 * for the USB protocol only.
94 */
95 usb_log_error("Unknown protocol %.4s.", name.str);
96 return ENOTSUP;
97 }
98
99 // "Implied" speed
100 if (psic == 0) {
101 assert(minor == 0);
102
103 if (major == 2) {
104 speeds[1] = ps_default_full;
105 speeds[2] = ps_default_low;
106 speeds[3] = ps_default_high;
107
108 hc->speed_to_psiv[USB_SPEED_FULL] = 1;
109 hc->speed_to_psiv[USB_SPEED_LOW] = 2;
110 hc->speed_to_psiv[USB_SPEED_HIGH] = 3;
111 } else if (major == 3) {
112 speeds[4] = ps_default_super;
113 hc->speed_to_psiv[USB_SPEED_SUPER] = 4;
114 } else {
115 return EINVAL;
116 }
117
118 usb_log_debug2("Implied speed of USB %u.0 set up.", major);
119 } else {
120 for (unsigned i = 0; i < psic; i++) {
121 xhci_psi_t *psi = xhci_extcap_psi(ec, i);
122 unsigned sim = XHCI_REG_RD(psi, XHCI_PSI_PSIM);
123 unsigned psiv = XHCI_REG_RD(psi, XHCI_PSI_PSIV);
124 unsigned psie = XHCI_REG_RD(psi, XHCI_PSI_PSIE);
125 unsigned psim = XHCI_REG_RD(psi, XHCI_PSI_PSIM);
126
127 speeds[psiv].major = major;
128 speeds[psiv].minor = minor;
129 str_ncpy(speeds[psiv].name, 4, name.str, 4);
130 speeds[psiv].usb_speed = USB_SPEED_MAX;
131
132 uint64_t bps = PSI_TO_BPS(psie, psim);
133
134 if (sim == XHCI_PSI_PLT_SYMM || sim == XHCI_PSI_PLT_RX)
135 speeds[psiv].rx_bps = bps;
136 if (sim == XHCI_PSI_PLT_SYMM || sim == XHCI_PSI_PLT_TX) {
137 speeds[psiv].tx_bps = bps;
138 usb_log_debug2("Speed %u set up for bps %" PRIu64 " / %" PRIu64 ".", psiv, speeds[psiv].rx_bps, speeds[psiv].tx_bps);
139 }
140 }
141 }
142 }
143 }
144 return EOK;
145}
146
147int hc_init_mmio(xhci_hc_t *hc, const hw_res_list_parsed_t *hw_res)
148{
149 int err;
150
151 if (hw_res->mem_ranges.count != 1) {
152 usb_log_error("Unexpected MMIO area, bailing out.");
153 return EINVAL;
154 }
155
156 hc->mmio_range = hw_res->mem_ranges.ranges[0];
157
158 usb_log_debug("MMIO area at %p (size %zu), IRQ %d.\n",
159 RNGABSPTR(hc->mmio_range), RNGSZ(hc->mmio_range), hw_res->irqs.irqs[0]);
160
161 if (RNGSZ(hc->mmio_range) < sizeof(xhci_cap_regs_t))
162 return EOVERFLOW;
163
164 void *base;
165 if ((err = pio_enable_range(&hc->mmio_range, &base)))
166 return err;
167
168 hc->reg_base = base;
169 hc->cap_regs = (xhci_cap_regs_t *) base;
170 hc->op_regs = (xhci_op_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH));
171 hc->rt_regs = (xhci_rt_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF));
172 hc->db_arry = (xhci_doorbell_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_DBOFF));
173
174 uintptr_t xec_offset = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_XECP) * sizeof(xhci_dword_t);
175 if (xec_offset > 0)
176 hc->xecp = (xhci_extcap_t *) (base + xec_offset);
177
178 usb_log_debug2("Initialized MMIO reg areas:");
179 usb_log_debug2("\tCapability regs: %p", hc->cap_regs);
180 usb_log_debug2("\tOperational regs: %p", hc->op_regs);
181 usb_log_debug2("\tRuntime regs: %p", hc->rt_regs);
182 usb_log_debug2("\tDoorbell array base: %p", hc->db_arry);
183
184 xhci_dump_cap_regs(hc->cap_regs);
185
186 hc->ac64 = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_AC64);
187 hc->max_slots = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_MAX_SLOTS);
188
189 if ((err = hc_parse_ec(hc))) {
190 pio_disable(hc->reg_base, RNGSZ(hc->mmio_range));
191 return err;
192 }
193
194 return EOK;
195}
196
197int hc_init_memory(xhci_hc_t *hc, ddf_dev_t *device)
198{
199 int err;
200
201 if (dma_buffer_alloc(&hc->dcbaa_dma, (1 + hc->max_slots) * sizeof(uint64_t)))
202 return ENOMEM;
203 hc->dcbaa = hc->dcbaa_dma.virt;
204
205 if ((err = xhci_event_ring_init(&hc->event_ring)))
206 goto err_dcbaa;
207
208 if ((err = xhci_scratchpad_alloc(hc)))
209 goto err_event_ring;
210
211 if ((err = xhci_init_commands(hc)))
212 goto err_scratch;
213
214 if ((err = xhci_rh_init(&hc->rh, hc, device)))
215 goto err_cmd;
216
217 if ((err = xhci_bus_init(&hc->bus, hc)))
218 goto err_rh;
219
220
221 return EOK;
222
223err_rh:
224 xhci_rh_fini(&hc->rh);
225err_cmd:
226 xhci_fini_commands(hc);
227err_scratch:
228 xhci_scratchpad_free(hc);
229err_event_ring:
230 xhci_event_ring_fini(&hc->event_ring);
231err_dcbaa:
232 hc->dcbaa = NULL;
233 dma_buffer_free(&hc->dcbaa_dma);
234 return err;
235}
236
237/*
238 * Pseudocode:
239 * ip = read(intr[0].iman)
240 * if (ip) {
241 * status = read(usbsts)
242 * assert status
243 * assert ip
244 * accept (passing status)
245 * }
246 * decline
247 */
248static const irq_cmd_t irq_commands[] = {
249 {
250 .cmd = CMD_PIO_READ_32,
251 .dstarg = 3,
252 .addr = NULL /* intr[0].iman */
253 },
254 {
255 .cmd = CMD_AND,
256 .srcarg = 3,
257 .dstarg = 4,
258 .value = 0 /* host2xhci(32, 1) */
259 },
260 {
261 .cmd = CMD_PREDICATE,
262 .srcarg = 4,
263 .value = 5
264 },
265 {
266 .cmd = CMD_PIO_READ_32,
267 .dstarg = 1,
268 .addr = NULL /* usbsts */
269 },
270 {
271 .cmd = CMD_AND,
272 .srcarg = 1,
273 .dstarg = 2,
274 .value = 0 /* host2xhci(32, XHCI_STATUS_ACK_MASK) */
275 },
276 {
277 .cmd = CMD_PIO_WRITE_A_32,
278 .srcarg = 2,
279 .addr = NULL /* usbsts */
280 },
281 {
282 .cmd = CMD_PIO_WRITE_A_32,
283 .srcarg = 3,
284 .addr = NULL /* intr[0].iman */
285 },
286 {
287 .cmd = CMD_ACCEPT
288 },
289 {
290 .cmd = CMD_DECLINE
291 }
292};
293
294
295/**
296 * Generates code to accept interrupts. The xHCI is designed primarily for
297 * MSI/MSI-X, but we use PCI Interrupt Pin. In this mode, all the Interrupters
298 * (except 0) are disabled.
299 */
300int hc_irq_code_gen(irq_code_t *code, xhci_hc_t *hc, const hw_res_list_parsed_t *hw_res)
301{
302 assert(code);
303 assert(hw_res);
304
305 if (hw_res->irqs.count != 1) {
306 usb_log_info("Unexpected HW resources to enable interrupts.");
307 return EINVAL;
308 }
309
310 code->ranges = malloc(sizeof(irq_pio_range_t));
311 if (code->ranges == NULL)
312 return ENOMEM;
313
314 code->cmds = malloc(sizeof(irq_commands));
315 if (code->cmds == NULL) {
316 free(code->ranges);
317 return ENOMEM;
318 }
319
320 code->rangecount = 1;
321 code->ranges[0] = (irq_pio_range_t) {
322 .base = RNGABS(hc->mmio_range),
323 .size = RNGSZ(hc->mmio_range),
324 };
325
326 code->cmdcount = ARRAY_SIZE(irq_commands);
327 memcpy(code->cmds, irq_commands, sizeof(irq_commands));
328
329 void *intr0_iman = RNGABSPTR(hc->mmio_range) + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF) + offsetof(xhci_rt_regs_t, ir[0]);
330 void *usbsts = RNGABSPTR(hc->mmio_range) + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH) + offsetof(xhci_op_regs_t, usbsts);
331 code->cmds[0].addr = intr0_iman;
332 code->cmds[1].value = host2xhci(32, 1);
333 code->cmds[3].addr = usbsts;
334 code->cmds[4].value = host2xhci(32, XHCI_STATUS_ACK_MASK);
335 code->cmds[5].addr = usbsts;
336 code->cmds[6].addr = intr0_iman;
337
338 return hw_res->irqs.irqs[0];
339}
340
341int hc_claim(xhci_hc_t *hc, ddf_dev_t *dev)
342{
343 /* No legacy support capability, the controller is solely for us */
344 if (!hc->legsup)
345 return EOK;
346
347 /* Section 4.22.1 */
348 /* TODO: Test this with USB3-aware BIOS */
349 usb_log_debug2("LEGSUP: bios: %x, os: %x", hc->legsup->sem_bios, hc->legsup->sem_os);
350 XHCI_REG_WR(hc->legsup, XHCI_LEGSUP_SEM_OS, 1);
351 for (int i = 0; i <= (XHCI_LEGSUP_BIOS_TIMEOUT_US / XHCI_LEGSUP_POLLING_DELAY_1MS); i++) {
352 usb_log_debug2("LEGSUP: elapsed: %i ms, bios: %x, os: %x", i,
353 XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_BIOS),
354 XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_OS));
355 if (XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_BIOS) == 0) {
356 assert(XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_OS) == 1);
357 return EOK;
358 }
359 async_usleep(XHCI_LEGSUP_POLLING_DELAY_1MS);
360 }
361 usb_log_error("BIOS did not release XHCI legacy hold!\n");
362
363 return ENOTSUP;
364}
365
366static int hc_reset(xhci_hc_t *hc)
367{
368 /* Stop the HC: set R/S to 0 */
369 XHCI_REG_CLR(hc->op_regs, XHCI_OP_RS, 1);
370
371 /* Wait 16 ms until the HC is halted */
372 async_usleep(16000);
373 assert(XHCI_REG_RD(hc->op_regs, XHCI_OP_HCH));
374
375 /* Reset */
376 XHCI_REG_SET(hc->op_regs, XHCI_OP_HCRST, 1);
377
378 /* Wait until the reset is complete */
379 while (XHCI_REG_RD(hc->op_regs, XHCI_OP_HCRST))
380 async_usleep(1000);
381
382 return EOK;
383}
384
385/**
386 * Initialize the HC: section 4.2
387 */
388int hc_start(xhci_hc_t *hc, bool irq)
389{
390 int err;
391
392 if ((err = hc_reset(hc)))
393 return err;
394
395 // FIXME: Waiting forever.
396 while (XHCI_REG_RD(hc->op_regs, XHCI_OP_CNR))
397 async_usleep(1000);
398
399 uint64_t dcbaaptr = hc->dcbaa_dma.phys;
400 XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP_LO, LOWER32(dcbaaptr));
401 XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP_HI, UPPER32(dcbaaptr));
402 XHCI_REG_WR(hc->op_regs, XHCI_OP_MAX_SLOTS_EN, 0);
403
404 uint64_t crptr = xhci_trb_ring_get_dequeue_ptr(&hc->cr.trb_ring);
405 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR_LO, LOWER32(crptr) >> 6);
406 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR_HI, UPPER32(crptr));
407
408 xhci_interrupter_regs_t *intr0 = &hc->rt_regs->ir[0];
409 XHCI_REG_WR(intr0, XHCI_INTR_ERSTSZ, hc->event_ring.segment_count);
410 uint64_t erdp = hc->event_ring.dequeue_ptr;
411 XHCI_REG_WR(intr0, XHCI_INTR_ERDP_LO, LOWER32(erdp));
412 XHCI_REG_WR(intr0, XHCI_INTR_ERDP_HI, UPPER32(erdp));
413 uint64_t erstptr = hc->event_ring.erst.phys;
414 XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA_LO, LOWER32(erstptr));
415 XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA_HI, UPPER32(erstptr));
416
417 if (irq) {
418 XHCI_REG_SET(intr0, XHCI_INTR_IE, 1);
419 XHCI_REG_SET(hc->op_regs, XHCI_OP_INTE, 1);
420 }
421
422 XHCI_REG_SET(hc->op_regs, XHCI_OP_RS, 1);
423
424 /* The reset changed status of all ports, and SW originated reason does
425 * not cause an interrupt.
426 */
427 xhci_rh_handle_port_change(&hc->rh);
428
429 return EOK;
430}
431
432/**
433 * Used only when polling. Shall supplement the irq_commands.
434 */
435int hc_status(xhci_hc_t *hc, uint32_t *status)
436{
437 int ip = XHCI_REG_RD(hc->rt_regs->ir, XHCI_INTR_IP);
438 if (ip) {
439 *status = XHCI_REG_RD(hc->op_regs, XHCI_OP_STATUS);
440 XHCI_REG_WR(hc->op_regs, XHCI_OP_STATUS, *status & XHCI_STATUS_ACK_MASK);
441 XHCI_REG_WR(hc->rt_regs->ir, XHCI_INTR_IP, 1);
442
443 /* interrupt handler expects status from irq_commands, which is
444 * in xhci order. */
445 *status = host2xhci(32, *status);
446 }
447
448 usb_log_debug2("HC(%p): Polled status: %x", hc, *status);
449 return EOK;
450}
451
452int hc_schedule(xhci_hc_t *hc, usb_transfer_batch_t *batch)
453{
454 assert(batch);
455 assert(batch->ep);
456
457 if (!batch->target.address) {
458 usb_log_error("Attempted to schedule transfer to address 0.");
459 return EINVAL;
460 }
461
462 return xhci_transfer_schedule(hc, batch);
463}
464
465typedef int (*event_handler) (xhci_hc_t *, xhci_trb_t *trb);
466
467static event_handler event_handlers [] = {
468 [XHCI_TRB_TYPE_COMMAND_COMPLETION_EVENT] = &xhci_handle_command_completion,
469 [XHCI_TRB_TYPE_PORT_STATUS_CHANGE_EVENT] = &xhci_rh_handle_port_status_change_event,
470 [XHCI_TRB_TYPE_TRANSFER_EVENT] = &xhci_handle_transfer_event,
471};
472
473static int hc_handle_event(xhci_hc_t *hc, xhci_trb_t *trb, xhci_interrupter_regs_t *intr)
474{
475 unsigned type = TRB_TYPE(*trb);
476 if (type >= ARRAY_SIZE(event_handlers) || !event_handlers[type])
477 return ENOTSUP;
478
479 return event_handlers[type](hc, trb);
480}
481
482static void hc_run_event_ring(xhci_hc_t *hc, xhci_event_ring_t *event_ring, xhci_interrupter_regs_t *intr)
483{
484 int err;
485 ssize_t size = 16;
486 xhci_trb_t *queue = malloc(sizeof(xhci_trb_t) * size);
487 if (!queue) {
488 usb_log_error("Not enough memory to run the event ring.");
489 return;
490 }
491
492 xhci_trb_t *head = queue;
493
494 while ((err = xhci_event_ring_dequeue(event_ring, head)) != ENOENT) {
495 if (err != EOK) {
496 usb_log_warning("Error while accessing event ring: %s", str_error(err));
497 break;
498 }
499
500 usb_log_debug2("Dequeued trb from event ring: %s", xhci_trb_str_type(TRB_TYPE(*head)));
501 head++;
502
503 /* Expand the array if needed. */
504 if (head - queue >= size) {
505 size *= 2;
506 xhci_trb_t *new_queue = realloc(queue, size);
507 if (new_queue == NULL)
508 break; /* Will process only those TRBs we have memory for. */
509
510 head = new_queue + (head - queue);
511 }
512 }
513
514 /* Update the ERDP to make room in the ring. */
515 usb_log_debug2("Copying from ring finished, updating ERDP.");
516 uint64_t erdp = hc->event_ring.dequeue_ptr;
517 XHCI_REG_WR(intr, XHCI_INTR_ERDP_LO, LOWER32(erdp));
518 XHCI_REG_WR(intr, XHCI_INTR_ERDP_HI, UPPER32(erdp));
519 XHCI_REG_SET(intr, XHCI_INTR_ERDP_EHB, 1);
520
521 /* Handle all of the collected events if possible. */
522 if (head == queue)
523 usb_log_warning("No events to be handled!");
524
525 for (xhci_trb_t *tail = queue; tail != head; tail++) {
526 if ((err = hc_handle_event(hc, tail, intr)) != EOK) {
527 usb_log_error("Failed to handle event: %s", str_error(err));
528 }
529 }
530
531 free(queue);
532 usb_log_debug2("Event ring run finished.");
533}
534
535void hc_interrupt(xhci_hc_t *hc, uint32_t status)
536{
537 status = xhci2host(32, status);
538
539 if (status & XHCI_REG_MASK(XHCI_OP_PCD)) {
540 usb_log_debug2("Root hub interrupt.");
541 xhci_rh_handle_port_change(&hc->rh);
542 status &= ~XHCI_REG_MASK(XHCI_OP_PCD);
543 }
544
545 if (status & XHCI_REG_MASK(XHCI_OP_HSE)) {
546 usb_log_error("Host controller error occured. Bad things gonna happen...");
547 status &= ~XHCI_REG_MASK(XHCI_OP_HSE);
548 }
549
550 if (status & XHCI_REG_MASK(XHCI_OP_EINT)) {
551 usb_log_debug2("Event interrupt, running the event ring.");
552 hc_run_event_ring(hc, &hc->event_ring, &hc->rt_regs->ir[0]);
553 status &= ~XHCI_REG_MASK(XHCI_OP_EINT);
554 }
555
556 if (status & XHCI_REG_MASK(XHCI_OP_SRE)) {
557 usb_log_error("Save/Restore error occured. WTF, S/R mechanism not implemented!");
558 status &= ~XHCI_REG_MASK(XHCI_OP_SRE);
559 }
560
561 if (status) {
562 usb_log_error("Non-zero status after interrupt handling (%08x) - missing something?", status);
563 }
564}
565
566static void hc_dcbaa_fini(xhci_hc_t *hc)
567{
568 xhci_scratchpad_free(hc);
569 dma_buffer_free(&hc->dcbaa_dma);
570}
571
572void hc_fini(xhci_hc_t *hc)
573{
574 xhci_bus_fini(&hc->bus);
575 xhci_event_ring_fini(&hc->event_ring);
576 hc_dcbaa_fini(hc);
577 xhci_fini_commands(hc);
578 xhci_rh_fini(&hc->rh);
579 pio_disable(hc->reg_base, RNGSZ(hc->mmio_range));
580 usb_log_info("HC(%p): Finalized.", hc);
581}
582
583int hc_ring_doorbell(xhci_hc_t *hc, unsigned doorbell, unsigned target)
584{
585 assert(hc);
586 uint32_t v = host2xhci(32, target & BIT_RRANGE(uint32_t, 7));
587 pio_write_32(&hc->db_arry[doorbell], v);
588 usb_log_debug2("Ringing doorbell %d (target: %d)", doorbell, target);
589 return EOK;
590}
591
592int hc_enable_slot(xhci_hc_t *hc, uint32_t *slot_id)
593{
594 assert(hc);
595
596 int err;
597 xhci_cmd_t cmd;
598 xhci_cmd_init(&cmd, XHCI_CMD_ENABLE_SLOT);
599
600 if ((err = xhci_cmd_sync(hc, &cmd))) {
601 goto end;
602 }
603
604 if (slot_id) {
605 *slot_id = cmd.slot_id;
606 }
607
608end:
609 xhci_cmd_fini(&cmd);
610 return err;
611}
612
613int hc_disable_slot(xhci_hc_t *hc, xhci_device_t *dev)
614{
615 int err;
616 assert(hc);
617
618 if ((err = xhci_cmd_sync_inline(hc, DISABLE_SLOT, .slot_id = dev->slot_id))) {
619 return err;
620 }
621
622 /* Free the device context. */
623 hc->dcbaa[dev->slot_id] = 0;
624 dma_buffer_free(&dev->dev_ctx);
625
626 /* Mark the slot as invalid. */
627 dev->slot_id = 0;
628
629 return EOK;
630}
631
632static int create_configure_ep_input_ctx(dma_buffer_t *dma_buf)
633{
634 const int err = dma_buffer_alloc(dma_buf, sizeof(xhci_input_ctx_t));
635 if (err)
636 return err;
637
638 xhci_input_ctx_t *ictx = dma_buf->virt;
639 memset(ictx, 0, sizeof(xhci_input_ctx_t));
640
641 // Quoting sec. 4.6.5 and 4.6.6: A1, D0, D1 are down (already zeroed), A0 is up.
642 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, 0);
643
644 return EOK;
645}
646
647int hc_address_device(xhci_hc_t *hc, xhci_device_t *dev, xhci_endpoint_t *ep0)
648{
649 int err = ENOMEM;
650
651 /* Although we have the precise PSIV value on devices of tier 1,
652 * we have to rely on reverse mapping on others. */
653 if (!hc->speed_to_psiv[dev->base.speed]) {
654 usb_log_error("Device reported an USB speed that cannot be mapped to HC port speed.");
655 return EINVAL;
656 }
657
658 /* Setup and register device context */
659 if (dma_buffer_alloc(&dev->dev_ctx, sizeof(xhci_device_ctx_t)))
660 goto err;
661 memset(dev->dev_ctx.virt, 0, sizeof(xhci_device_ctx_t));
662
663 hc->dcbaa[dev->slot_id] = host2xhci(64, dev->dev_ctx.phys);
664
665 /* Issue configure endpoint command (sec 4.3.5). */
666 dma_buffer_t ictx_dma_buf;
667 if ((err = create_configure_ep_input_ctx(&ictx_dma_buf))) {
668 goto err_dev_ctx;
669 }
670 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
671
672 /* Initialize slot_ctx according to section 4.3.3 point 3. */
673 XHCI_SLOT_ROOT_HUB_PORT_SET(ictx->slot_ctx, dev->rh_port);
674 XHCI_SLOT_CTX_ENTRIES_SET(ictx->slot_ctx, 1);
675 XHCI_SLOT_ROUTE_STRING_SET(ictx->slot_ctx, dev->route_str);
676 XHCI_SLOT_SPEED_SET(ictx->slot_ctx, hc->speed_to_psiv[dev->base.speed]);
677
678 /* In a very specific case, we have to set also these. But before that,
679 * we need to refactor how TT is handled in libusbhost. */
680 XHCI_SLOT_TT_HUB_SLOT_ID_SET(ictx->slot_ctx, 0);
681 XHCI_SLOT_TT_HUB_PORT_SET(ictx->slot_ctx, 0);
682 XHCI_SLOT_MTT_SET(ictx->slot_ctx, 0);
683
684 /* Copy endpoint 0 context and set A1 flag. */
685 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, 1);
686 xhci_setup_endpoint_context(ep0, &ictx->endpoint_ctx[0]);
687
688 /* Issue Address Device command. */
689 if ((err = xhci_cmd_sync_inline(hc, ADDRESS_DEVICE, .slot_id = dev->slot_id, .input_ctx = ictx_dma_buf))) {
690 goto err_dev_ctx;
691 }
692
693 xhci_device_ctx_t *dev_ctx = dev->dev_ctx.virt;
694 dev->base.address = XHCI_SLOT_DEVICE_ADDRESS(dev_ctx->slot_ctx);
695 usb_log_debug2("Obtained USB address: %d.\n", dev->base.address);
696
697 /* From now on, the device is officially online, yay! */
698 fibril_mutex_lock(&dev->base.guard);
699 dev->online = true;
700 fibril_mutex_unlock(&dev->base.guard);
701
702 return EOK;
703
704err_dev_ctx:
705 hc->dcbaa[dev->slot_id] = 0;
706 dma_buffer_free(&dev->dev_ctx);
707err:
708 return err;
709}
710
711int hc_configure_device(xhci_hc_t *hc, uint32_t slot_id)
712{
713 /* Issue configure endpoint command (sec 4.3.5). */
714 dma_buffer_t ictx_dma_buf;
715 const int err = create_configure_ep_input_ctx(&ictx_dma_buf);
716 if (err)
717 return err;
718
719 // TODO: Set slot context and other flags. (probably forgot a lot of 'em)
720
721 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
722}
723
724int hc_deconfigure_device(xhci_hc_t *hc, uint32_t slot_id)
725{
726 /* Issue configure endpoint command (sec 4.3.5) with the DC flag. */
727 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .deconfigure = true);
728}
729
730int hc_add_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx, xhci_ep_ctx_t *ep_ctx)
731{
732 /* Issue configure endpoint command (sec 4.3.5). */
733 dma_buffer_t ictx_dma_buf;
734 const int err = create_configure_ep_input_ctx(&ictx_dma_buf);
735 if (err)
736 return err;
737
738 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
739 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, ep_idx + 1); /* Preceded by slot ctx */
740 memcpy(&ictx->endpoint_ctx[ep_idx], ep_ctx, sizeof(xhci_ep_ctx_t));
741 // TODO: Set slot context and other flags. (probably forgot a lot of 'em)
742
743 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
744}
745
746int hc_drop_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx)
747{
748 /* Issue configure endpoint command (sec 4.3.5). */
749 dma_buffer_t ictx_dma_buf;
750 const int err = create_configure_ep_input_ctx(&ictx_dma_buf);
751 if (err)
752 return err;
753
754 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
755 XHCI_INPUT_CTRL_CTX_DROP_SET(ictx->ctrl_ctx, ep_idx + 1); /* Preceded by slot ctx */
756 // TODO: Set slot context and other flags. (probably forgot a lot of 'em)
757
758 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
759}
760
761int hc_update_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx, xhci_ep_ctx_t *ep_ctx)
762{
763 dma_buffer_t ictx_dma_buf;
764 const int err = dma_buffer_alloc(&ictx_dma_buf, sizeof(xhci_input_ctx_t));
765 if (err)
766 return err;
767
768 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
769 memset(ictx, 0, sizeof(xhci_input_ctx_t));
770
771 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, ep_idx + 1);
772 memcpy(&ictx->endpoint_ctx[ep_idx], ep_ctx, sizeof(xhci_ep_ctx_t));
773
774 return xhci_cmd_sync_inline(hc, EVALUATE_CONTEXT, .slot_id = slot_id, .input_ctx = ictx_dma_buf);
775}
776
777/**
778 * @}
779 */
Note: See TracBrowser for help on using the repository browser.