source: mainline/uspace/drv/bus/usb/xhci/hc.c@ 3038d51

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 3038d51 was 047fbc8, checked in by Ondřej Hlavatý <aearsis@…>, 8 years ago

xhci rh: have standalone buffer for events

  • Property mode set to 100644
File size: 29.3 KB
RevLine 
[5cbccd4]1/*
2 * Copyright (c) 2017 Ondrej Hlavaty
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief The host controller data bookkeeping.
34 */
35
36#include <errno.h>
[cb89430]37#include <str_error.h>
[5cbccd4]38#include <usb/debug.h>
[5fd9c30]39#include <usb/host/endpoint.h>
[5cbccd4]40#include "debug.h"
41#include "hc.h"
[7bd99bf]42#include "rh.h"
[cb89430]43#include "hw_struct/trb.h"
[0206d35]44#include "hw_struct/context.h"
45#include "endpoint.h"
[e9e24f2]46#include "transfers.h"
47#include "trb_ring.h"
[5cbccd4]48
[91ca111]49/**
50 * Default USB Speed ID mapping: Table 157
51 */
52#define PSI_TO_BPS(psie, psim) (((uint64_t) psim) << (10 * psie))
[f668d60]53#define PORT_SPEED(usb, mjr, psie, psim) { \
[816335c]54 .name = "USB ", \
55 .major = mjr, \
56 .minor = 0, \
[f668d60]57 .usb_speed = USB_SPEED_##usb, \
[91ca111]58 .rx_bps = PSI_TO_BPS(psie, psim), \
59 .tx_bps = PSI_TO_BPS(psie, psim) \
60}
[a75f9cbc]61
62static const xhci_port_speed_t default_psiv_to_port_speed [] = {
63 [1] = PORT_SPEED(FULL, 2, 2, 12),
64 [2] = PORT_SPEED(LOW, 2, 1, 1500),
65 [3] = PORT_SPEED(HIGH, 2, 2, 480),
66 [4] = PORT_SPEED(SUPER, 3, 3, 5),
67};
68
69static const unsigned usb_speed_to_psiv [] = {
70 [USB_SPEED_FULL] = 1,
71 [USB_SPEED_LOW] = 2,
72 [USB_SPEED_HIGH] = 3,
73 [USB_SPEED_SUPER] = 4,
74};
[91ca111]75
76/**
77 * Walk the list of extended capabilities.
[eb928c4]78 *
79 * The most interesting thing hidden in extended capabilities is the mapping of
80 * ports to protocol versions and speeds.
[91ca111]81 */
82static int hc_parse_ec(xhci_hc_t *hc)
83{
[816335c]84 unsigned psic, major, minor;
85 xhci_sp_name_t name;
86
[f668d60]87 xhci_port_speed_t *speeds = hc->speeds;
[91ca111]88
89 for (xhci_extcap_t *ec = hc->xecp; ec; ec = xhci_extcap_next(ec)) {
90 xhci_dump_extcap(ec);
91 switch (XHCI_REG_RD(ec, XHCI_EC_CAP_ID)) {
92 case XHCI_EC_USB_LEGACY:
93 assert(hc->legsup == NULL);
94 hc->legsup = (xhci_legsup_t *) ec;
95 break;
96 case XHCI_EC_SUPPORTED_PROTOCOL:
97 psic = XHCI_REG_RD(ec, XHCI_EC_SP_PSIC);
98 major = XHCI_REG_RD(ec, XHCI_EC_SP_MAJOR);
[816335c]99 minor = XHCI_REG_RD(ec, XHCI_EC_SP_MINOR);
100 name.packed = host2uint32_t_le(XHCI_REG_RD(ec, XHCI_EC_SP_NAME));
101
102 if (name.packed != xhci_name_usb.packed) {
103 /**
104 * The detection of such protocol would work,
105 * but the rest of the implementation is made
106 * for the USB protocol only.
107 */
108 usb_log_error("Unknown protocol %.4s.", name.str);
109 return ENOTSUP;
110 }
[91ca111]111
[a9fcd73]112 unsigned offset = XHCI_REG_RD(ec, XHCI_EC_SP_CP_OFF);
113 unsigned count = XHCI_REG_RD(ec, XHCI_EC_SP_CP_COUNT);
114 xhci_rh_set_ports_protocol(&hc->rh, offset, count, major);
115
[91ca111]116 // "Implied" speed
117 if (psic == 0) {
[816335c]118 assert(minor == 0);
[370a1c8]119
[91ca111]120 if (major == 2) {
[a75f9cbc]121 speeds[1] = default_psiv_to_port_speed[1];
122 speeds[2] = default_psiv_to_port_speed[2];
123 speeds[3] = default_psiv_to_port_speed[3];
[91ca111]124 } else if (major == 3) {
[a75f9cbc]125 speeds[4] = default_psiv_to_port_speed[4];
[91ca111]126 } else {
127 return EINVAL;
128 }
129
[defaab2]130 usb_log_debug("Implied speed of USB %u.0 set up.", major);
[91ca111]131 } else {
132 for (unsigned i = 0; i < psic; i++) {
133 xhci_psi_t *psi = xhci_extcap_psi(ec, i);
134 unsigned sim = XHCI_REG_RD(psi, XHCI_PSI_PSIM);
135 unsigned psiv = XHCI_REG_RD(psi, XHCI_PSI_PSIV);
136 unsigned psie = XHCI_REG_RD(psi, XHCI_PSI_PSIE);
137 unsigned psim = XHCI_REG_RD(psi, XHCI_PSI_PSIM);
[a75f9cbc]138 uint64_t bps = PSI_TO_BPS(psie, psim);
139
140 /*
[8033f89]141 * Speed is not implied, but using one of default PSIV. This
142 * is not clearly stated in xHCI spec. There is a clear
143 * intention to allow xHCI to specify its own speed
144 * parameters, but throughout the document, they used fixed
145 * values for e.g. High-speed (3), without stating the
146 * controller shall have implied default speeds - and for
147 * instance Intel controllers do not. So let's check if the
148 * values match and if so, accept the implied USB speed too.
[a75f9cbc]149 *
150 * The main reason we need this is the usb_speed to have
151 * mapping also for devices connected to hubs.
152 */
153 if (psiv < ARRAY_SIZE(default_psiv_to_port_speed)
154 && default_psiv_to_port_speed[psiv].major == major
155 && default_psiv_to_port_speed[psiv].minor == minor
156 && default_psiv_to_port_speed[psiv].rx_bps == bps
157 && default_psiv_to_port_speed[psiv].tx_bps == bps) {
158 speeds[psiv] = default_psiv_to_port_speed[psiv];
[8033f89]159 usb_log_debug("Assumed default %s speed of USB %u.",
160 usb_str_speed(speeds[psiv].usb_speed), major);
[a75f9cbc]161 continue;
162 }
[91ca111]163
[a75f9cbc]164 // Custom speed
[816335c]165 speeds[psiv].major = major;
166 speeds[psiv].minor = minor;
167 str_ncpy(speeds[psiv].name, 4, name.str, 4);
[f668d60]168 speeds[psiv].usb_speed = USB_SPEED_MAX;
[816335c]169
[91ca111]170 if (sim == XHCI_PSI_PLT_SYMM || sim == XHCI_PSI_PLT_RX)
[816335c]171 speeds[psiv].rx_bps = bps;
[91ca111]172 if (sim == XHCI_PSI_PLT_SYMM || sim == XHCI_PSI_PLT_TX) {
[816335c]173 speeds[psiv].tx_bps = bps;
[8033f89]174 usb_log_debug("Speed %u set up for bps %" PRIu64
175 " / %" PRIu64 ".", psiv, speeds[psiv].rx_bps,
176 speeds[psiv].tx_bps);
[91ca111]177 }
178 }
179 }
180 }
181 }
182 return EOK;
183}
184
[eb928c4]185/**
186 * Initialize MMIO spaces of xHC.
187 */
[e4d7363]188int hc_init_mmio(xhci_hc_t *hc, const hw_res_list_parsed_t *hw_res)
189{
190 int err;
191
192 if (hw_res->mem_ranges.count != 1) {
193 usb_log_error("Unexpected MMIO area, bailing out.");
194 return EINVAL;
195 }
196
197 hc->mmio_range = hw_res->mem_ranges.ranges[0];
198
[a1732929]199 usb_log_debug("MMIO area at %p (size %zu), IRQ %d.",
[e4d7363]200 RNGABSPTR(hc->mmio_range), RNGSZ(hc->mmio_range), hw_res->irqs.irqs[0]);
201
202 if (RNGSZ(hc->mmio_range) < sizeof(xhci_cap_regs_t))
203 return EOVERFLOW;
204
205 void *base;
206 if ((err = pio_enable_range(&hc->mmio_range, &base)))
207 return err;
208
[20eaa82]209 hc->reg_base = base;
[e4d7363]210 hc->cap_regs = (xhci_cap_regs_t *) base;
211 hc->op_regs = (xhci_op_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH));
212 hc->rt_regs = (xhci_rt_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF));
213 hc->db_arry = (xhci_doorbell_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_DBOFF));
214
[91ca111]215 uintptr_t xec_offset = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_XECP) * sizeof(xhci_dword_t);
216 if (xec_offset > 0)
217 hc->xecp = (xhci_extcap_t *) (base + xec_offset);
218
[defaab2]219 usb_log_debug("Initialized MMIO reg areas:");
220 usb_log_debug("\tCapability regs: %p", hc->cap_regs);
221 usb_log_debug("\tOperational regs: %p", hc->op_regs);
222 usb_log_debug("\tRuntime regs: %p", hc->rt_regs);
223 usb_log_debug("\tDoorbell array base: %p", hc->db_arry);
[e4d7363]224
225 xhci_dump_cap_regs(hc->cap_regs);
226
227 hc->ac64 = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_AC64);
[7ec7b7e]228 hc->csz = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_CSZ);
[e4d7363]229 hc->max_slots = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_MAX_SLOTS);
[94e9c29]230
231 struct timeval tv;
232 getuptime(&tv);
233 hc->wrap_time = tv.tv_sec * 1000000 + tv.tv_usec;
[665368c]234 hc->wrap_count = 0;
[94e9c29]235
[708d8fcd]236 unsigned ist = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_IST);
237 hc->ist = (ist & 0x10 >> 1) * (ist & 0xf);
[e4d7363]238
[a9fcd73]239 if ((err = xhci_rh_init(&hc->rh, hc)))
240 goto err_pio;
241
242 if ((err = hc_parse_ec(hc)))
243 goto err_rh;
[91ca111]244
[e4d7363]245 return EOK;
[a9fcd73]246
247err_rh:
248 xhci_rh_fini(&hc->rh);
249err_pio:
250 pio_disable(hc->reg_base, RNGSZ(hc->mmio_range));
251 return err;
[e4d7363]252}
253
[2c0564c]254static int event_worker(void *arg);
255
[eb928c4]256/**
257 * Initialize structures kept in allocated memory.
258 */
[0f6b50f]259int hc_init_memory(xhci_hc_t *hc, ddf_dev_t *device)
[e4d7363]260{
261 int err;
262
[b80c1ab]263 if (dma_buffer_alloc(&hc->dcbaa_dma, (1 + hc->max_slots) * sizeof(uint64_t)))
[e4d7363]264 return ENOMEM;
[b80c1ab]265 hc->dcbaa = hc->dcbaa_dma.virt;
[e4d7363]266
[998773d]267 if ((err = xhci_event_ring_init(&hc->event_ring, 1)))
[889146e]268 goto err_dcbaa;
[e4d7363]269
[b19131c5]270 if ((err = xhci_scratchpad_alloc(hc)))
[5a9ae994]271 goto err_event_ring;
[e4d7363]272
[aee352c]273 if ((err = xhci_init_commands(hc)))
[ee28ae66]274 goto err_scratch;
[aee352c]275
[2b61945]276 if ((err = xhci_bus_init(&hc->bus, hc)))
[6832245]277 goto err_cmd;
[e6b9182]278
[2c0564c]279 fid_t fid = fibril_create(&event_worker, hc);
280 if (!fid)
281 goto err_bus;
282
283 // TODO: completion_reset
284 hc->event_fibril_completion.active = true;
285 fibril_mutex_initialize(&hc->event_fibril_completion.guard);
286 fibril_condvar_initialize(&hc->event_fibril_completion.cv);
287
288 xhci_sw_ring_init(&hc->sw_ring, PAGE_SIZE / sizeof(xhci_trb_t));
289
290 fibril_add_ready(fid);
291
[e4d7363]292 return EOK;
293
[2c0564c]294err_bus:
295 xhci_bus_fini(&hc->bus);
[ee28ae66]296err_cmd:
[d271f78]297 xhci_fini_commands(hc);
[ee28ae66]298err_scratch:
299 xhci_scratchpad_free(hc);
[5a9ae994]300err_event_ring:
[e4d7363]301 xhci_event_ring_fini(&hc->event_ring);
302err_dcbaa:
[b80c1ab]303 hc->dcbaa = NULL;
304 dma_buffer_free(&hc->dcbaa_dma);
[e4d7363]305 return err;
306}
307
[ab5a0830]308/*
309 * Pseudocode:
310 * ip = read(intr[0].iman)
311 * if (ip) {
312 * status = read(usbsts)
313 * assert status
314 * assert ip
315 * accept (passing status)
316 * }
317 * decline
318 */
319static const irq_cmd_t irq_commands[] = {
320 {
321 .cmd = CMD_PIO_READ_32,
322 .dstarg = 3,
323 .addr = NULL /* intr[0].iman */
324 },
325 {
326 .cmd = CMD_AND,
327 .srcarg = 3,
328 .dstarg = 4,
329 .value = 0 /* host2xhci(32, 1) */
330 },
331 {
332 .cmd = CMD_PREDICATE,
333 .srcarg = 4,
334 .value = 5
335 },
336 {
337 .cmd = CMD_PIO_READ_32,
338 .dstarg = 1,
339 .addr = NULL /* usbsts */
340 },
341 {
342 .cmd = CMD_AND,
343 .srcarg = 1,
344 .dstarg = 2,
345 .value = 0 /* host2xhci(32, XHCI_STATUS_ACK_MASK) */
346 },
347 {
348 .cmd = CMD_PIO_WRITE_A_32,
349 .srcarg = 2,
350 .addr = NULL /* usbsts */
351 },
352 {
353 .cmd = CMD_PIO_WRITE_A_32,
[efe9463]354 .srcarg = 3,
[ab5a0830]355 .addr = NULL /* intr[0].iman */
356 },
357 {
358 .cmd = CMD_ACCEPT
359 },
360 {
361 .cmd = CMD_DECLINE
362 }
363};
364
[e4d7363]365
[cb89430]366/**
367 * Generates code to accept interrupts. The xHCI is designed primarily for
368 * MSI/MSI-X, but we use PCI Interrupt Pin. In this mode, all the Interrupters
369 * (except 0) are disabled.
370 */
[e4d7363]371int hc_irq_code_gen(irq_code_t *code, xhci_hc_t *hc, const hw_res_list_parsed_t *hw_res)
[cb89430]372{
373 assert(code);
374 assert(hw_res);
375
[e4d7363]376 if (hw_res->irqs.count != 1) {
[cb89430]377 usb_log_info("Unexpected HW resources to enable interrupts.");
378 return EINVAL;
379 }
380
381 code->ranges = malloc(sizeof(irq_pio_range_t));
382 if (code->ranges == NULL)
383 return ENOMEM;
384
385 code->cmds = malloc(sizeof(irq_commands));
386 if (code->cmds == NULL) {
387 free(code->ranges);
388 return ENOMEM;
389 }
390
391 code->rangecount = 1;
392 code->ranges[0] = (irq_pio_range_t) {
[91ca111]393 .base = RNGABS(hc->mmio_range),
394 .size = RNGSZ(hc->mmio_range),
[cb89430]395 };
396
397 code->cmdcount = ARRAY_SIZE(irq_commands);
398 memcpy(code->cmds, irq_commands, sizeof(irq_commands));
399
[8033f89]400 void *intr0_iman = RNGABSPTR(hc->mmio_range)
401 + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF)
402 + offsetof(xhci_rt_regs_t, ir[0]);
403 void *usbsts = RNGABSPTR(hc->mmio_range)
404 + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH)
405 + offsetof(xhci_op_regs_t, usbsts);
406
[cb89430]407 code->cmds[0].addr = intr0_iman;
408 code->cmds[1].value = host2xhci(32, 1);
[ab5a0830]409 code->cmds[3].addr = usbsts;
410 code->cmds[4].value = host2xhci(32, XHCI_STATUS_ACK_MASK);
411 code->cmds[5].addr = usbsts;
412 code->cmds[6].addr = intr0_iman;
[cb89430]413
414 return hw_res->irqs.irqs[0];
415}
416
[eb928c4]417/**
418 * Claim xHC from BIOS. Implements handoff as per Section 4.22.1 of xHCI spec.
419 */
[e4d7363]420int hc_claim(xhci_hc_t *hc, ddf_dev_t *dev)
[cb89430]421{
[91ca111]422 /* No legacy support capability, the controller is solely for us */
423 if (!hc->legsup)
424 return EOK;
425
[0e7380f]426 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_CNR), 0))
427 return ETIMEOUT;
428
[defaab2]429 usb_log_debug("LEGSUP: bios: %x, os: %x", hc->legsup->sem_bios, hc->legsup->sem_os);
[0e7380f]430 XHCI_REG_SET(hc->legsup, XHCI_LEGSUP_SEM_OS, 1);
[4d28d86]431 for (int i = 0; i <= (XHCI_LEGSUP_BIOS_TIMEOUT_US / XHCI_LEGSUP_POLLING_DELAY_1MS); i++) {
[defaab2]432 usb_log_debug("LEGSUP: elapsed: %i ms, bios: %x, os: %x", i,
[e6b0dba]433 XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_BIOS),
434 XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_OS));
435 if (XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_BIOS) == 0) {
[0e7380f]436 return XHCI_REG_RD(hc->legsup, XHCI_LEGSUP_SEM_OS) == 1 ? EOK : EIO;
[e6b0dba]437 }
[c9d905f]438 async_usleep(XHCI_LEGSUP_POLLING_DELAY_1MS);
[e6b0dba]439 }
[a1732929]440 usb_log_error("BIOS did not release XHCI legacy hold!");
[e6b0dba]441
[91ca111]442 return ENOTSUP;
[cb89430]443}
444
[eb928c4]445/**
[665368c]446 * Ask the xHC to reset its state. Implements sequence
[eb928c4]447 */
[cb89430]448static int hc_reset(xhci_hc_t *hc)
449{
[0e7380f]450 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_CNR), 0))
451 return ETIMEOUT;
452
[cb89430]453 /* Stop the HC: set R/S to 0 */
454 XHCI_REG_CLR(hc->op_regs, XHCI_OP_RS, 1);
455
[0e7380f]456 /* Wait until the HC is halted - it shall take at most 16 ms */
[8033f89]457 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_HCH),
458 XHCI_REG_MASK(XHCI_OP_HCH)))
[0e7380f]459 return ETIMEOUT;
[cb89430]460
461 /* Reset */
462 XHCI_REG_SET(hc->op_regs, XHCI_OP_HCRST, 1);
463
464 /* Wait until the reset is complete */
[0e7380f]465 if (xhci_reg_wait(&hc->op_regs->usbcmd, XHCI_REG_MASK(XHCI_OP_HCRST), 0))
466 return ETIMEOUT;
[cb89430]467
468 return EOK;
469}
470
471/**
472 * Initialize the HC: section 4.2
473 */
[e4d7363]474int hc_start(xhci_hc_t *hc, bool irq)
[cb89430]475{
476 int err;
477
478 if ((err = hc_reset(hc)))
479 return err;
480
[0e7380f]481 if (xhci_reg_wait(&hc->op_regs->usbsts, XHCI_REG_MASK(XHCI_OP_CNR), 0))
482 return ETIMEOUT;
[cb89430]483
[b80c1ab]484 uint64_t dcbaaptr = hc->dcbaa_dma.phys;
[cb89430]485 XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP_LO, LOWER32(dcbaaptr));
486 XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP_HI, UPPER32(dcbaaptr));
[15f8079]487 XHCI_REG_WR(hc->op_regs, XHCI_OP_MAX_SLOTS_EN, hc->max_slots);
[cb89430]488
[fb28cde]489 uintptr_t crcr;
490 xhci_trb_ring_reset_dequeue_state(&hc->cr.trb_ring, &crcr);
[4abb134]491 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR_LO, LOWER32(crcr));
492 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR_HI, UPPER32(crcr));
[cb89430]493
[665368c]494 XHCI_REG_SET(hc->op_regs, XHCI_OP_EWE, 1);
495
[cb89430]496 xhci_interrupter_regs_t *intr0 = &hc->rt_regs->ir[0];
497 XHCI_REG_WR(intr0, XHCI_INTR_ERSTSZ, hc->event_ring.segment_count);
[b80c1ab]498 uint64_t erdp = hc->event_ring.dequeue_ptr;
[12fba858]499 XHCI_REG_WR(intr0, XHCI_INTR_ERDP_LO, LOWER32(erdp));
500 XHCI_REG_WR(intr0, XHCI_INTR_ERDP_HI, UPPER32(erdp));
[b80c1ab]501 uint64_t erstptr = hc->event_ring.erst.phys;
[cb89430]502 XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA_LO, LOWER32(erstptr));
503 XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA_HI, UPPER32(erstptr));
504
505 if (irq) {
506 XHCI_REG_SET(intr0, XHCI_INTR_IE, 1);
507 XHCI_REG_SET(hc->op_regs, XHCI_OP_INTE, 1);
508 }
509
[503086d8]510 XHCI_REG_SET(hc->op_regs, XHCI_OP_HSEE, 1);
511
[cb89430]512 XHCI_REG_SET(hc->op_regs, XHCI_OP_RS, 1);
513
[05770666]514 xhci_rh_startup(&hc->rh);
[dcf0597]515
[cb89430]516 return EOK;
517}
518
[ab5a0830]519/**
520 * Used only when polling. Shall supplement the irq_commands.
521 */
[32fb6bce]522int hc_status(bus_t *bus, uint32_t *status)
[5cbccd4]523{
[32fb6bce]524 xhci_hc_t *hc = bus_to_hc(bus);
[ab5a0830]525 int ip = XHCI_REG_RD(hc->rt_regs->ir, XHCI_INTR_IP);
526 if (ip) {
527 *status = XHCI_REG_RD(hc->op_regs, XHCI_OP_STATUS);
528 XHCI_REG_WR(hc->op_regs, XHCI_OP_STATUS, *status & XHCI_STATUS_ACK_MASK);
529 XHCI_REG_WR(hc->rt_regs->ir, XHCI_INTR_IP, 1);
530
531 /* interrupt handler expects status from irq_commands, which is
532 * in xhci order. */
533 *status = host2xhci(32, *status);
534 }
[62ba2cbe]535
[defaab2]536 usb_log_debug("Polled status: %x", *status);
[cb89430]537 return EOK;
538}
539
[665368c]540static int xhci_handle_mfindex_wrap_event(xhci_hc_t *hc, xhci_trb_t *trb)
541{
[94e9c29]542 struct timeval tv;
543 getuptime(&tv);
[8033f89]544 usb_log_debug("Microframe index wrapped (@%lu.%li, %"PRIu64" total).",
545 tv.tv_sec, tv.tv_usec, hc->wrap_count);
[94e9c29]546 hc->wrap_time = ((uint64_t) tv.tv_sec) * 1000000 + ((uint64_t) tv.tv_usec);
[665368c]547 ++hc->wrap_count;
548 return EOK;
549}
550
[472235a]551typedef int (*event_handler) (xhci_hc_t *, xhci_trb_t *trb);
552
[2c0564c]553/**
554 * These events are handled by separate event handling fibril.
555 */
[472235a]556static event_handler event_handlers [] = {
[629255a]557 [XHCI_TRB_TYPE_TRANSFER_EVENT] = &xhci_handle_transfer_event,
[2c0564c]558};
559
560/**
561 * These events are handled directly in the interrupt handler, thus they must
562 * not block waiting for another interrupt.
563 */
564static event_handler event_handlers_fast [] = {
565 [XHCI_TRB_TYPE_COMMAND_COMPLETION_EVENT] = &xhci_handle_command_completion,
[665368c]566 [XHCI_TRB_TYPE_MFINDEX_WRAP_EVENT] = &xhci_handle_mfindex_wrap_event,
[472235a]567};
568
[2c0564c]569static int hc_handle_event(xhci_hc_t *hc, xhci_trb_t *trb)
570{
571 const unsigned type = TRB_TYPE(*trb);
572
573 if (type <= ARRAY_SIZE(event_handlers_fast) && event_handlers_fast[type])
574 return event_handlers_fast[type](hc, trb);
575
576 if (type <= ARRAY_SIZE(event_handlers) && event_handlers[type])
577 return xhci_sw_ring_enqueue(&hc->sw_ring, trb);
578
[047fbc8]579 if (type == XHCI_TRB_TYPE_PORT_STATUS_CHANGE_EVENT)
580 return xhci_sw_ring_enqueue(&hc->rh.event_ring, trb);
581
[2c0564c]582 return ENOTSUP;
583}
584
585static int event_worker(void *arg)
[7ee5408]586{
[2c0564c]587 int err;
588 xhci_trb_t trb;
589 xhci_hc_t * const hc = arg;
590 assert(hc);
591
592 while (xhci_sw_ring_dequeue(&hc->sw_ring, &trb) != EINTR) {
593 const unsigned type = TRB_TYPE(trb);
[472235a]594
[2c0564c]595 if ((err = event_handlers[type](hc, &trb)))
596 usb_log_error("Failed to handle event: %s", str_error(err));
597 }
598
599 // TODO: completion_complete
600 fibril_mutex_lock(&hc->event_fibril_completion.guard);
601 hc->event_fibril_completion.active = false;
[047fbc8]602 fibril_condvar_broadcast(&hc->event_fibril_completion.cv);
[2c0564c]603 fibril_mutex_unlock(&hc->event_fibril_completion.guard);
604
605 return EOK;
[7ee5408]606}
607
[eb928c4]608/**
609 * Dequeue from event ring and handle dequeued events.
610 *
611 * As there can be events, that blocks on waiting for subsequent events,
612 * we solve this problem by first copying the event TRBs from the event ring,
613 * then asserting EHB and only after, handling the events.
614 *
615 * Whenever the event handling blocks, it switches fibril, and incoming
616 * IPC notification will create new event handling fibril for us.
617 */
[8033f89]618static void hc_run_event_ring(xhci_hc_t *hc, xhci_event_ring_t *event_ring,
619 xhci_interrupter_regs_t *intr)
[62ba2cbe]620{
[cb89430]621 int err;
[472235a]622
[f3baab1]623 xhci_trb_t trb;
624 hc->event_handler = fibril_get_id();
[e50bdd92]625
[f3baab1]626 while ((err = xhci_event_ring_dequeue(event_ring, &trb)) != ENOENT) {
[2c0564c]627 if ((err = hc_handle_event(hc, &trb)) != EOK) {
628 usb_log_error("Failed to handle event in interrupt: %s", str_error(err));
[adb4e683]629 }
[f543804]630
631 uint64_t erdp = hc->event_ring.dequeue_ptr;
632 XHCI_REG_WR(intr, XHCI_INTR_ERDP_LO, LOWER32(erdp));
633 XHCI_REG_WR(intr, XHCI_INTR_ERDP_HI, UPPER32(erdp));
[cb89430]634 }
635
[f3baab1]636 hc->event_handler = 0;
637
[adb4e683]638 /* Update the ERDP to make room in the ring. */
[12fba858]639 uint64_t erdp = hc->event_ring.dequeue_ptr;
[f543804]640 erdp |= XHCI_REG_MASK(XHCI_INTR_ERDP_EHB);
[12fba858]641 XHCI_REG_WR(intr, XHCI_INTR_ERDP_LO, LOWER32(erdp));
642 XHCI_REG_WR(intr, XHCI_INTR_ERDP_HI, UPPER32(erdp));
[adb4e683]643
[472235a]644 usb_log_debug2("Event ring run finished.");
[cb89430]645}
646
[eb928c4]647/**
648 * Handle an interrupt request from xHC. Resolve all situations that trigger an
649 * interrupt separately.
650 *
651 * Note that all RW1C bits in USBSTS register are cleared at the time of
652 * handling the interrupt in irq_code. This method is the top-half.
653 *
654 * @param status contents of USBSTS register at the time of the interrupt.
655 */
[32fb6bce]656void hc_interrupt(bus_t *bus, uint32_t status)
[cb89430]657{
[32fb6bce]658 xhci_hc_t *hc = bus_to_hc(bus);
[ab5a0830]659 status = xhci2host(32, status);
[aee352c]660
[cb89430]661 if (status & XHCI_REG_MASK(XHCI_OP_HSE)) {
662 usb_log_error("Host controller error occured. Bad things gonna happen...");
[ab5a0830]663 status &= ~XHCI_REG_MASK(XHCI_OP_HSE);
[cb89430]664 }
665
666 if (status & XHCI_REG_MASK(XHCI_OP_EINT)) {
[472235a]667 usb_log_debug2("Event interrupt, running the event ring.");
[ab5a0830]668 hc_run_event_ring(hc, &hc->event_ring, &hc->rt_regs->ir[0]);
669 status &= ~XHCI_REG_MASK(XHCI_OP_EINT);
[cb89430]670 }
[275f529]671
[cb89430]672 if (status & XHCI_REG_MASK(XHCI_OP_SRE)) {
[8033f89]673 usb_log_error("Save/Restore error occured. WTF, "
674 "S/R mechanism not implemented!");
[ab5a0830]675 status &= ~XHCI_REG_MASK(XHCI_OP_SRE);
676 }
677
[fb154e13]678 /* According to Note on p. 302, we may safely ignore the PCD bit. */
679 status &= ~XHCI_REG_MASK(XHCI_OP_PCD);
680
[ab5a0830]681 if (status) {
[8033f89]682 usb_log_error("Non-zero status after interrupt handling (%08x) "
683 " - missing something?", status);
[cb89430]684 }
685}
686
[eb928c4]687/**
688 * Tear down all in-memory structures.
689 */
[e4d7363]690void hc_fini(xhci_hc_t *hc)
[cb89430]691{
[2c0564c]692 xhci_sw_ring_stop(&hc->sw_ring);
693
694 // TODO: completion_wait
695 fibril_mutex_lock(&hc->event_fibril_completion.guard);
696 while (hc->event_fibril_completion.active)
[8033f89]697 fibril_condvar_wait(&hc->event_fibril_completion.cv,
698 &hc->event_fibril_completion.guard);
[2c0564c]699 fibril_mutex_unlock(&hc->event_fibril_completion.guard);
700 xhci_sw_ring_fini(&hc->sw_ring);
701
[e6b9182]702 xhci_bus_fini(&hc->bus);
[cb89430]703 xhci_event_ring_fini(&hc->event_ring);
[b60944b]704 xhci_scratchpad_free(hc);
705 dma_buffer_free(&hc->dcbaa_dma);
[c46c356]706 xhci_fini_commands(hc);
[d32d51d]707 xhci_rh_fini(&hc->rh);
[20eaa82]708 pio_disable(hc->reg_base, RNGSZ(hc->mmio_range));
[837581fd]709 usb_log_info("Finalized.");
[62ba2cbe]710}
711
[51c1d500]712unsigned hc_speed_to_psiv(usb_speed_t speed)
713{
714 assert(speed < ARRAY_SIZE(usb_speed_to_psiv));
715 return usb_speed_to_psiv[speed];
716}
717
[eb928c4]718/**
719 * Ring a xHC Doorbell. Implements section 4.7.
720 */
[708d8fcd]721void hc_ring_doorbell(xhci_hc_t *hc, unsigned doorbell, unsigned target)
[a0be5d0]722{
723 assert(hc);
724 uint32_t v = host2xhci(32, target & BIT_RRANGE(uint32_t, 7));
725 pio_write_32(&hc->db_arry[doorbell], v);
[2896ff6]726 usb_log_debug2("Ringing doorbell %d (target: %d)", doorbell, target);
[a0be5d0]727}
[5cbccd4]728
[51c1d500]729/**
730 * Return an index to device context.
731 */
732static uint8_t endpoint_dci(xhci_endpoint_t *ep)
733{
734 return (2 * ep->base.endpoint) +
735 (ep->base.transfer_type == USB_TRANSFER_CONTROL
736 || ep->base.direction == USB_DIRECTION_IN);
737}
738
739void hc_ring_ep_doorbell(xhci_endpoint_t *ep, uint32_t stream_id)
740{
741 xhci_device_t * const dev = xhci_ep_to_dev(ep);
742 xhci_hc_t * const hc = bus_to_hc(dev->base.bus);
743 const uint8_t dci = endpoint_dci(ep);
744 const uint32_t target = (stream_id << 16) | (dci & 0x1ff);
745 hc_ring_doorbell(hc, dev->slot_id, target);
746}
747
[eb928c4]748/**
[7e5a12b]749 * Issue an Enable Slot command. Allocate memory for the slot and fill the
750 * DCBAA with the newly created slot.
[eb928c4]751 */
[7e5a12b]752int hc_enable_slot(xhci_device_t *dev)
[8ea7459]753{
754 int err;
[7e5a12b]755 xhci_hc_t * const hc = bus_to_hc(dev->base.bus);
756
757 /* Prepare memory for the context */
[7ec7b7e]758 if ((err = dma_buffer_alloc(&dev->dev_ctx, XHCI_DEVICE_CTX_SIZE(hc))))
[7e5a12b]759 return err;
[7ec7b7e]760 memset(dev->dev_ctx.virt, 0, XHCI_DEVICE_CTX_SIZE(hc));
[7e5a12b]761
762 /* Get the slot number */
[8ea7459]763 xhci_cmd_t cmd;
[c3d926f3]764 xhci_cmd_init(&cmd, XHCI_CMD_ENABLE_SLOT);
[8ea7459]765
[7e5a12b]766 err = xhci_cmd_sync(hc, &cmd);
[8ea7459]767
[7e5a12b]768 /* Link them together */
769 if (err == EOK) {
770 dev->slot_id = cmd.slot_id;
771 hc->dcbaa[dev->slot_id] = host2xhci(64, dev->dev_ctx.phys);
[c3d926f3]772 }
[8ea7459]773
774 xhci_cmd_fini(&cmd);
[abb5d08]775
776 if (err)
777 dma_buffer_free(&dev->dev_ctx);
778
[c3d926f3]779 return err;
[8ea7459]780}
781
[eb928c4]782/**
783 * Issue a Disable Slot command for a slot occupied by device.
[7e5a12b]784 * Frees the device context.
[eb928c4]785 */
[7e5a12b]786int hc_disable_slot(xhci_device_t *dev)
[f270ecb]787{
[9620a54]788 int err;
[7e5a12b]789 xhci_hc_t * const hc = bus_to_hc(dev->base.bus);
[9620a54]790
791 if ((err = xhci_cmd_sync_inline(hc, DISABLE_SLOT, .slot_id = dev->slot_id))) {
792 return err;
793 }
794
795 /* Free the device context. */
796 hc->dcbaa[dev->slot_id] = 0;
[b80c1ab]797 dma_buffer_free(&dev->dev_ctx);
[9620a54]798
799 /* Mark the slot as invalid. */
800 dev->slot_id = 0;
801
802 return EOK;
[f270ecb]803}
804
[eb928c4]805/**
806 * Prepare an empty Endpoint Input Context inside a dma buffer.
807 */
[a4e7e6e1]808static int create_configure_ep_input_ctx(xhci_device_t *dev, dma_buffer_t *dma_buf)
[b724494]809{
[7ec7b7e]810 const xhci_hc_t * hc = bus_to_hc(dev->base.bus);
811 const int err = dma_buffer_alloc(dma_buf, XHCI_INPUT_CTX_SIZE(hc));
[b80c1ab]812 if (err)
813 return err;
[b724494]814
[b80c1ab]815 xhci_input_ctx_t *ictx = dma_buf->virt;
[7ec7b7e]816 memset(ictx, 0, XHCI_INPUT_CTX_SIZE(hc));
[b724494]817
[e76c0ea]818 // Quoting sec. 4.6.5 and 4.6.6: A1, D0, D1 are down (already zeroed), A0 is up.
[7ec7b7e]819 XHCI_INPUT_CTRL_CTX_ADD_SET(*XHCI_GET_CTRL_CTX(ictx, hc), 0);
820 xhci_slot_ctx_t *slot_ctx = XHCI_GET_SLOT_CTX(XHCI_GET_DEVICE_CTX(ictx, hc), hc);
821 xhci_setup_slot_context(dev, slot_ctx);
[001778c]822
[b724494]823 return EOK;
824}
825
[eb928c4]826/**
827 * Initialize a device, assigning it an address. Implements section 4.3.4.
828 *
829 * @param dev Device to assing an address (unconfigured yet)
830 */
[51c1d500]831int hc_address_device(xhci_device_t *dev)
[b724494]832{
[0206d35]833 int err = ENOMEM;
[a4e7e6e1]834 xhci_hc_t * const hc = bus_to_hc(dev->base.bus);
[51c1d500]835 xhci_endpoint_t *ep0 = xhci_endpoint_get(dev->base.endpoints[0]);
[0206d35]836
[2cf28b9]837 /* Although we have the precise PSIV value on devices of tier 1,
838 * we have to rely on reverse mapping on others. */
[a75f9cbc]839 if (!usb_speed_to_psiv[dev->base.speed]) {
[8033f89]840 usb_log_error("Device reported an USB speed (%s) that cannot be mapped "
841 "to HC port speed.", usb_str_speed(dev->base.speed));
[2cf28b9]842 return EINVAL;
843 }
844
[b724494]845 /* Issue configure endpoint command (sec 4.3.5). */
[b80c1ab]846 dma_buffer_t ictx_dma_buf;
[7e5a12b]847 if ((err = create_configure_ep_input_ctx(dev, &ictx_dma_buf)))
848 return err;
[b80c1ab]849 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
[b724494]850
851 /* Copy endpoint 0 context and set A1 flag. */
[7ec7b7e]852 XHCI_INPUT_CTRL_CTX_ADD_SET(*XHCI_GET_CTRL_CTX(ictx, hc), 1);
[51c1d500]853 xhci_ep_ctx_t *ep_ctx = XHCI_GET_EP_CTX(XHCI_GET_DEVICE_CTX(ictx, hc), hc, 1);
[7ec7b7e]854 xhci_setup_endpoint_context(ep0, ep_ctx);
[51c1d500]855
[69b2dfee]856 /* Address device needs Ctx entries set to 1 only */
[7ec7b7e]857 xhci_slot_ctx_t *slot_ctx = XHCI_GET_SLOT_CTX(XHCI_GET_DEVICE_CTX(ictx, hc), hc);
[69b2dfee]858 XHCI_SLOT_CTX_ENTRIES_SET(*slot_ctx, 1);
859
[c3d926f3]860 /* Issue Address Device command. */
[8033f89]861 if ((err = xhci_cmd_sync_inline(hc, ADDRESS_DEVICE,
862 .slot_id = dev->slot_id,
863 .input_ctx = ictx_dma_buf
864 )))
[7e5a12b]865 return err;
[b724494]866
[7ec7b7e]867 xhci_device_ctx_t *device_ctx = dev->dev_ctx.virt;
868 dev->base.address = XHCI_SLOT_DEVICE_ADDRESS(*XHCI_GET_SLOT_CTX(device_ctx, hc));
[defaab2]869 usb_log_debug("Obtained USB address: %d.", dev->base.address);
[0206d35]870
[b724494]871 return EOK;
872}
873
[eb928c4]874/**
875 * Issue a Configure Device command for a device in slot.
876 *
877 * @param slot_id Slot ID assigned to the device.
878 */
[a4e7e6e1]879int hc_configure_device(xhci_device_t *dev)
[b724494]880{
[a4e7e6e1]881 xhci_hc_t * const hc = bus_to_hc(dev->base.bus);
882
[b724494]883 /* Issue configure endpoint command (sec 4.3.5). */
[b80c1ab]884 dma_buffer_t ictx_dma_buf;
[a4e7e6e1]885 const int err = create_configure_ep_input_ctx(dev, &ictx_dma_buf);
[928afc8d]886 if (err)
[c3d926f3]887 return err;
[b724494]888
[8033f89]889 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT,
890 .slot_id = dev->slot_id,
891 .input_ctx = ictx_dma_buf
892 );
[b724494]893}
894
[eb928c4]895/**
896 * Issue a Deconfigure Device command for a device in slot.
897 *
[a4e7e6e1]898 * @param dev The owner of the device
[eb928c4]899 */
[a4e7e6e1]900int hc_deconfigure_device(xhci_device_t *dev)
[b724494]901{
[a4e7e6e1]902 xhci_hc_t * const hc = bus_to_hc(dev->base.bus);
903
[b724494]904 /* Issue configure endpoint command (sec 4.3.5) with the DC flag. */
[8033f89]905 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT,
906 .slot_id = dev->slot_id,
907 .deconfigure = true
908 );
[b724494]909}
910
[eb928c4]911/**
912 * Instruct xHC to add an endpoint with supplied endpoint context.
913 *
[a4e7e6e1]914 * @param dev The owner of the device
915 * @param ep_idx Endpoint DCI in question
[eb928c4]916 * @param ep_ctx Endpoint context of the endpoint
917 */
[51c1d500]918int hc_add_endpoint(xhci_endpoint_t *ep)
[b724494]919{
[51c1d500]920 xhci_device_t * const dev = xhci_ep_to_dev(ep);
921 const unsigned dci = endpoint_dci(ep);
922
[b724494]923 /* Issue configure endpoint command (sec 4.3.5). */
[b80c1ab]924 dma_buffer_t ictx_dma_buf;
[a4e7e6e1]925 const int err = create_configure_ep_input_ctx(dev, &ictx_dma_buf);
[928afc8d]926 if (err)
[c3d926f3]927 return err;
[b724494]928
[b80c1ab]929 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
[001778c]930
[a4e7e6e1]931 xhci_hc_t * const hc = bus_to_hc(dev->base.bus);
[51c1d500]932 XHCI_INPUT_CTRL_CTX_ADD_SET(*XHCI_GET_CTRL_CTX(ictx, hc), dci);
[7ec7b7e]933
[51c1d500]934 xhci_ep_ctx_t *ep_ctx = XHCI_GET_EP_CTX(XHCI_GET_DEVICE_CTX(ictx, hc), hc, dci);
935 xhci_setup_endpoint_context(ep, ep_ctx);
[7ec7b7e]936
[8033f89]937 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT,
938 .slot_id = dev->slot_id,
939 .input_ctx = ictx_dma_buf
940 );
[b724494]941}
942
[eb928c4]943/**
944 * Instruct xHC to drop an endpoint.
945 *
[a4e7e6e1]946 * @param dev The owner of the endpoint
947 * @param ep_idx Endpoint DCI in question
[eb928c4]948 */
[51c1d500]949int hc_drop_endpoint(xhci_endpoint_t *ep)
[b724494]950{
[51c1d500]951 xhci_device_t * const dev = xhci_ep_to_dev(ep);
952 const unsigned dci = endpoint_dci(ep);
953
[b724494]954 /* Issue configure endpoint command (sec 4.3.5). */
[b80c1ab]955 dma_buffer_t ictx_dma_buf;
[a4e7e6e1]956 const int err = create_configure_ep_input_ctx(dev, &ictx_dma_buf);
[928afc8d]957 if (err)
[c3d926f3]958 return err;
[b724494]959
[7ec7b7e]960 xhci_hc_t * const hc = bus_to_hc(dev->base.bus);
[b80c1ab]961 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
[51c1d500]962 XHCI_INPUT_CTRL_CTX_DROP_SET(*XHCI_GET_CTRL_CTX(ictx, hc), dci);
[b724494]963
[8033f89]964 return xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT,
965 .slot_id = dev->slot_id,
966 .input_ctx = ictx_dma_buf
967 );
[b724494]968}
969
[eb928c4]970/**
971 * Instruct xHC to update information about an endpoint, using supplied
972 * endpoint context.
973 *
[a4e7e6e1]974 * @param dev The owner of the endpoint
975 * @param ep_idx Endpoint DCI in question
[eb928c4]976 * @param ep_ctx Endpoint context of the endpoint
977 */
[51c1d500]978int hc_update_endpoint(xhci_endpoint_t *ep)
[306a36d]979{
[51c1d500]980 xhci_device_t * const dev = xhci_ep_to_dev(ep);
981 const unsigned dci = endpoint_dci(ep);
982
[306a36d]983 dma_buffer_t ictx_dma_buf;
[7ec7b7e]984 xhci_hc_t * const hc = bus_to_hc(dev->base.bus);
985
986 const int err = dma_buffer_alloc(&ictx_dma_buf, XHCI_INPUT_CTX_SIZE(hc));
[306a36d]987 if (err)
988 return err;
989
990 xhci_input_ctx_t *ictx = ictx_dma_buf.virt;
[7ec7b7e]991 memset(ictx, 0, XHCI_INPUT_CTX_SIZE(hc));
[306a36d]992
[51c1d500]993 XHCI_INPUT_CTRL_CTX_ADD_SET(*XHCI_GET_CTRL_CTX(ictx, hc), dci);
994 xhci_ep_ctx_t *ep_ctx = XHCI_GET_EP_CTX(XHCI_GET_DEVICE_CTX(ictx, hc), hc, dci);
995 xhci_setup_endpoint_context(ep, ep_ctx);
[306a36d]996
[8033f89]997 return xhci_cmd_sync_inline(hc, EVALUATE_CONTEXT,
998 .slot_id = dev->slot_id,
999 .input_ctx = ictx_dma_buf
1000 );
[306a36d]1001}
1002
[30fc56f]1003/**
1004 * Instruct xHC to stop running a transfer ring on an endpoint.
1005 *
[a4e7e6e1]1006 * @param dev The owner of the endpoint
1007 * @param ep_idx Endpoint DCI in question
[30fc56f]1008 */
[51c1d500]1009int hc_stop_endpoint(xhci_endpoint_t *ep)
[30fc56f]1010{
[51c1d500]1011 xhci_device_t * const dev = xhci_ep_to_dev(ep);
1012 const unsigned dci = endpoint_dci(ep);
[a4e7e6e1]1013 xhci_hc_t * const hc = bus_to_hc(dev->base.bus);
[8033f89]1014 return xhci_cmd_sync_inline(hc, STOP_ENDPOINT,
1015 .slot_id = dev->slot_id,
1016 .endpoint_id = dci
1017 );
[30fc56f]1018}
1019
[feabe163]1020/**
1021 * Instruct xHC to reset halted endpoint.
1022 *
[a4e7e6e1]1023 * @param dev The owner of the endpoint
1024 * @param ep_idx Endpoint DCI in question
[feabe163]1025 */
[51c1d500]1026int hc_reset_endpoint(xhci_endpoint_t *ep)
[feabe163]1027{
[51c1d500]1028 xhci_device_t * const dev = xhci_ep_to_dev(ep);
1029 const unsigned dci = endpoint_dci(ep);
[a4e7e6e1]1030 xhci_hc_t * const hc = bus_to_hc(dev->base.bus);
[8033f89]1031 return xhci_cmd_sync_inline(hc, RESET_ENDPOINT,
1032 .slot_id = dev->slot_id,
1033 .endpoint_id = dci
1034 );
[51c1d500]1035}
1036
1037/**
1038 * Reset a ring position in both software and hardware.
1039 *
1040 * @param dev The owner of the endpoint
1041 */
1042int hc_reset_ring(xhci_endpoint_t *ep, uint32_t stream_id)
1043{
1044 xhci_device_t * const dev = xhci_ep_to_dev(ep);
1045 const unsigned dci = endpoint_dci(ep);
1046 uintptr_t addr;
1047
1048 xhci_trb_ring_t *ring = xhci_endpoint_get_ring(ep, stream_id);
1049 xhci_trb_ring_reset_dequeue_state(ring, &addr);
1050
1051 xhci_hc_t * const hc = bus_to_hc(endpoint_get_bus(&ep->base));
1052 return xhci_cmd_sync_inline(hc, SET_TR_DEQUEUE_POINTER,
[8033f89]1053 .slot_id = dev->slot_id,
1054 .endpoint_id = dci,
1055 .stream_id = stream_id,
1056 .dequeue_ptr = addr,
1057 );
[feabe163]1058}
1059
[5cbccd4]1060/**
1061 * @}
1062 */
Note: See TracBrowser for help on using the repository browser.