source: mainline/uspace/drv/bus/usb/xhci/commands.c

Last change on this file was 7c3fb9b, checked in by Jiri Svoboda <jiri@…>, 7 years ago

Fix block comment formatting (ccheck).

  • Property mode set to 100644
File size: 22.2 KB
RevLine 
[c9c0e41]1/*
[e0a5d4c]2 * Copyright (c) 2018 Jaroslav Jindrak, Ondrej Hlavaty, Petr Manek, Michal Staruch, Jan Hrach
[c9c0e41]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief Command sending functions.
34 */
35
36#include <errno.h>
37#include <str_error.h>
38#include <usb/debug.h>
39#include "commands.h"
40#include "debug.h"
41#include "hc.h"
[8db42f7]42#include "hw_struct/context.h"
[c9c0e41]43#include "hw_struct/trb.h"
44
[e7f21884]45#define TRB_SET_TSP(trb, tsp) (trb).control |= host2xhci(32, (((tsp) & 0x1) << 9))
[1b78a7c1]46#define TRB_SET_TYPE(trb, type) (trb).control |= host2xhci(32, (type) << 10)
[b724494]47#define TRB_SET_DC(trb, dc) (trb).control |= host2xhci(32, (dc) << 9)
[1b78a7c1]48#define TRB_SET_EP(trb, ep) (trb).control |= host2xhci(32, ((ep) & 0x5) << 16)
[0cabd10]49#define TRB_SET_STREAM(trb, st) (trb).control |= host2xhci(32, ((st) & 0xFFFF) << 16)
[1b78a7c1]50#define TRB_SET_SUSP(trb, susp) (trb).control |= host2xhci(32, ((susp) & 0x1) << 23)
51#define TRB_SET_SLOT(trb, slot) (trb).control |= host2xhci(32, (slot) << 24)
[60af4cdb]52#define TRB_SET_DEV_SPEED(trb, speed) (trb).control |= host2xhci(32, (speed & 0xF) << 16)
[0cabd10]53#define TRB_SET_DEQUEUE_PTR(trb, dptr) (trb).parameter |= host2xhci(64, (dptr))
[b80c1ab]54#define TRB_SET_ICTX(trb, phys) (trb).parameter |= host2xhci(64, (phys) & (~0xF))
[1b78a7c1]55
56#define TRB_GET_CODE(trb) XHCI_DWORD_EXTRACT((trb).status, 31, 24)
57#define TRB_GET_SLOT(trb) XHCI_DWORD_EXTRACT((trb).control, 31, 24)
58#define TRB_GET_PHYS(trb) (XHCI_QWORD_EXTRACT((trb).parameter, 63, 4) << 4)
59
[c3d926f3]60/* Control functions */
61
[889146e]62static xhci_cmd_ring_t *get_cmd_ring(xhci_hc_t *hc)
[110d795]63{
64 assert(hc);
[889146e]65 return &hc->cr;
66}
67
[eb928c4]68/**
69 * Initialize the command subsystem. Allocates the comand ring.
70 *
71 * Does not configure the CR pointer to the hardware, because the xHC will be
72 * reset before starting.
73 */
[45457265]74errno_t xhci_init_commands(xhci_hc_t *hc)
[889146e]75{
76 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
[45457265]77 errno_t err;
[889146e]78
[998773d]79 if ((err = xhci_trb_ring_init(&cr->trb_ring, 0)))
[889146e]80 return err;
[110d795]81
[889146e]82 fibril_mutex_initialize(&cr->guard);
83 fibril_condvar_initialize(&cr->state_cv);
84 fibril_condvar_initialize(&cr->stopped_cv);
[74b852b]85
[889146e]86 list_initialize(&cr->cmd_list);
87
[110d795]88 return EOK;
89}
90
[eb928c4]91/**
92 * Finish the command subsystem. Stops the hardware from running commands, then
93 * deallocates the ring.
94 */
[c46c356]95void xhci_fini_commands(xhci_hc_t *hc)
96{
97 assert(hc);
[eb928c4]98 xhci_stop_command_ring(hc);
99
100 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
101
102 fibril_mutex_lock(&cr->guard);
103 xhci_trb_ring_fini(&cr->trb_ring);
104 fibril_mutex_unlock(&cr->guard);
[c46c356]105}
106
[eb928c4]107/**
108 * Initialize a command structure for the given command.
109 */
[c3d926f3]110void xhci_cmd_init(xhci_cmd_t *cmd, xhci_cmd_type_t type)
[110d795]111{
[c3d926f3]112 memset(cmd, 0, sizeof(*cmd));
[110d795]113
[c3d926f3]114 link_initialize(&cmd->_header.link);
[110d795]115
[c3d926f3]116 fibril_mutex_initialize(&cmd->_header.completed_mtx);
117 fibril_condvar_initialize(&cmd->_header.completed_cv);
[04df063]118
[c3d926f3]119 cmd->_header.cmd = type;
[4688350b]120}
121
[eb928c4]122/**
123 * Finish the command structure. Some command invocation includes allocating
124 * a context structure. To have the convenience in calling commands, this
125 * method deallocates all resources.
126 */
[c3d926f3]127void xhci_cmd_fini(xhci_cmd_t *cmd)
[4688350b]128{
[c3d926f3]129 list_remove(&cmd->_header.link);
[110d795]130
[b80c1ab]131 dma_buffer_free(&cmd->input_ctx);
132 dma_buffer_free(&cmd->bandwidth_ctx);
[9304b66]133
[c3d926f3]134 if (cmd->_header.async) {
135 free(cmd);
136 }
[110d795]137}
138
[eb928c4]139/**
140 * Find a command issued by TRB at @c phys inside the command list.
141 *
142 * Call with guard locked only.
143 */
[889146e]144static inline xhci_cmd_t *find_command(xhci_hc_t *hc, uint64_t phys)
[110d795]145{
[889146e]146 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
147 assert(fibril_mutex_is_locked(&cr->guard));
[74b852b]148
[889146e]149 link_t *cmd_link = list_first(&cr->cmd_list);
[110d795]150
[2fa43d1]151 while (cmd_link != NULL) {
[ae3a941]152 xhci_cmd_t *cmd = list_get_instance(cmd_link, xhci_cmd_t,
153 _header.link);
[2fa43d1]154
[c3d926f3]155 if (cmd->_header.trb_phys == phys)
[2fa43d1]156 break;
157
[889146e]158 cmd_link = list_next(cmd_link, &cr->cmd_list);
[2fa43d1]159 }
160
[3bacee1]161 return cmd_link ?
162 list_get_instance(cmd_link, xhci_cmd_t, _header.link) :
163 NULL;
[110d795]164}
165
[d2c3dcd]166static void cr_set_state(xhci_cmd_ring_t *cr, xhci_cr_state_t state)
167{
168 assert(fibril_mutex_is_locked(&cr->guard));
169
170 cr->state = state;
[ae3a941]171 if (state == XHCI_CR_STATE_OPEN || state == XHCI_CR_STATE_CLOSED)
[d2c3dcd]172 fibril_condvar_broadcast(&cr->state_cv);
173}
174
[45457265]175static errno_t wait_for_ring_open(xhci_cmd_ring_t *cr)
[d2c3dcd]176{
177 assert(fibril_mutex_is_locked(&cr->guard));
178
179 while (true) {
180 switch (cr->state) {
181 case XHCI_CR_STATE_CHANGING:
182 case XHCI_CR_STATE_FULL:
183 fibril_condvar_wait(&cr->state_cv, &cr->guard);
184 break;
185 case XHCI_CR_STATE_OPEN:
186 return EOK;
187 case XHCI_CR_STATE_CLOSED:
188 return ENAK;
189 }
190 }
191}
192
[eb928c4]193/**
194 * Enqueue a command on the TRB ring. Ring the doorbell to initiate processing.
195 * Register the command as waiting for completion inside the command list.
196 */
[45457265]197static inline errno_t enqueue_command(xhci_hc_t *hc, xhci_cmd_t *cmd)
[481af21e]198{
[889146e]199 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
[548c123]200 assert(cmd);
201
[889146e]202 fibril_mutex_lock(&cr->guard);
[c058a388]203
[d2c3dcd]204 if (wait_for_ring_open(cr)) {
[889146e]205 fibril_mutex_unlock(&cr->guard);
206 return ENAK;
207 }
208
[ae3a941]209 usb_log_debug("Sending command %s",
210 xhci_trb_str_type(TRB_TYPE(cmd->_header.trb)));
[481af21e]211
[889146e]212 list_append(&cmd->_header.link, &cr->cmd_list);
213
[45457265]214 errno_t err = EOK;
[d2c3dcd]215 while (err == EOK) {
216 err = xhci_trb_ring_enqueue(&cr->trb_ring,
217 &cmd->_header.trb, &cmd->_header.trb_phys);
218 if (err != EAGAIN)
219 break;
220
221 cr_set_state(cr, XHCI_CR_STATE_FULL);
222 err = wait_for_ring_open(cr);
223 }
224
225 if (err == EOK)
226 hc_ring_doorbell(hc, 0, 0);
[889146e]227
228 fibril_mutex_unlock(&cr->guard);
229
[d2c3dcd]230 return err;
[481af21e]231}
232
[eb928c4]233/**
234 * Stop the command ring. Stop processing commands, block issuing new ones.
235 * Wait until hardware acknowledges it is stopped.
236 */
[3dc519f]237void xhci_stop_command_ring(xhci_hc_t *hc)
238{
[889146e]239 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
[3dc519f]240
[889146e]241 fibril_mutex_lock(&cr->guard);
[3dc519f]242
[889146e]243 // Prevent others from starting CR again.
[d2c3dcd]244 cr_set_state(cr, XHCI_CR_STATE_CLOSED);
[3dc519f]245
[889146e]246 XHCI_REG_SET(hc->op_regs, XHCI_OP_CS, 1);
[3dc519f]247
[889146e]248 while (XHCI_REG_RD(hc->op_regs, XHCI_OP_CRR))
249 fibril_condvar_wait(&cr->stopped_cv, &cr->guard);
250
251 fibril_mutex_unlock(&cr->guard);
[3dc519f]252}
253
[19f0048]254/**
255 * Mark the command ring as stopped. NAK new commands, abort running, do not
256 * touch the HC as it's probably broken.
257 */
258void xhci_nuke_command_ring(xhci_hc_t *hc)
259{
260 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
261 fibril_mutex_lock(&cr->guard);
262 // Prevent others from starting CR again.
263 cr_set_state(cr, XHCI_CR_STATE_CLOSED);
[c21e6a5]264
265 XHCI_REG_SET(hc->op_regs, XHCI_OP_CS, 1);
[19f0048]266 fibril_mutex_unlock(&cr->guard);
267}
268
269/**
270 * Mark the command ring as working again.
271 */
272void xhci_start_command_ring(xhci_hc_t *hc)
273{
274 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
275 fibril_mutex_lock(&cr->guard);
276 // Prevent others from starting CR again.
277 cr_set_state(cr, XHCI_CR_STATE_OPEN);
278 fibril_mutex_unlock(&cr->guard);
279}
280
[eb928c4]281/**
282 * Abort currently processed command. Note that it is only aborted when the
283 * command is "blocking" - see section 4.6.1.2 of xHCI spec.
284 */
[889146e]285static void abort_command_ring(xhci_hc_t *hc)
[3dc519f]286{
[8033f89]287 XHCI_REG_SET(hc->op_regs, XHCI_OP_CA, 1);
[3dc519f]288}
289
[4fa5342]290static const char *trb_codes [] = {
291#define TRBC(t) [XHCI_TRBC_##t] = #t
292 TRBC(INVALID),
293 TRBC(SUCCESS),
294 TRBC(DATA_BUFFER_ERROR),
295 TRBC(BABBLE_DETECTED_ERROR),
296 TRBC(USB_TRANSACTION_ERROR),
297 TRBC(TRB_ERROR),
298 TRBC(STALL_ERROR),
299 TRBC(RESOURCE_ERROR),
300 TRBC(BANDWIDTH_ERROR),
301 TRBC(NO_SLOTS_ERROR),
302 TRBC(INVALID_STREAM_ERROR),
303 TRBC(SLOT_NOT_ENABLED_ERROR),
304 TRBC(EP_NOT_ENABLED_ERROR),
305 TRBC(SHORT_PACKET),
306 TRBC(RING_UNDERRUN),
307 TRBC(RING_OVERRUN),
308 TRBC(VF_EVENT_RING_FULL),
309 TRBC(PARAMETER_ERROR),
310 TRBC(BANDWIDTH_OVERRUN_ERROR),
311 TRBC(CONTEXT_STATE_ERROR),
312 TRBC(NO_PING_RESPONSE_ERROR),
313 TRBC(EVENT_RING_FULL_ERROR),
314 TRBC(INCOMPATIBLE_DEVICE_ERROR),
315 TRBC(MISSED_SERVICE_ERROR),
316 TRBC(COMMAND_RING_STOPPED),
317 TRBC(COMMAND_ABORTED),
318 TRBC(STOPPED),
319 TRBC(STOPPED_LENGTH_INVALID),
320 TRBC(STOPPED_SHORT_PACKET),
321 TRBC(MAX_EXIT_LATENCY_TOO_LARGE_ERROR),
322 [30] = "<reserved>",
323 TRBC(ISOCH_BUFFER_OVERRUN),
324 TRBC(EVENT_LOST_ERROR),
325 TRBC(UNDEFINED_ERROR),
326 TRBC(INVALID_STREAM_ID_ERROR),
327 TRBC(SECONDARY_BANDWIDTH_ERROR),
328 TRBC(SPLIT_TRANSACTION_ERROR),
329 [XHCI_TRBC_MAX] = NULL
330#undef TRBC
331};
332
[eb928c4]333/**
334 * Report an error according to command completion code.
335 */
[4fa5342]336static void report_error(int code)
337{
338 if (code < XHCI_TRBC_MAX && trb_codes[code] != NULL)
339 usb_log_error("Command resulted in error: %s.", trb_codes[code]);
340 else
[ae3a941]341 usb_log_error("Command resulted in reserved or "
342 "vendor specific error.");
[4fa5342]343}
344
[eb928c4]345/**
346 * Handle a command completion. Feed the fibril waiting for result.
347 *
348 * @param trb The COMMAND_COMPLETION TRB found in event ring.
349 */
[45457265]350errno_t xhci_handle_command_completion(xhci_hc_t *hc, xhci_trb_t *trb)
[c9c0e41]351{
[889146e]352 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
[c3d926f3]353 assert(trb);
354
[889146e]355 fibril_mutex_lock(&cr->guard);
356
357 int code = TRB_GET_CODE(*trb);
358
359 if (code == XHCI_TRBC_COMMAND_RING_STOPPED) {
[7c3fb9b]360 /*
361 * This can either mean that the ring is being stopped, or
[889146e]362 * a command was aborted. In either way, wake threads waiting
363 * on stopped_cv.
364 *
365 * Note that we need to hold mutex, because we must be sure the
366 * requesting thread is waiting inside the CV.
367 */
[defaab2]368 usb_log_debug("Command ring stopped.");
[889146e]369 fibril_condvar_broadcast(&cr->stopped_cv);
370 fibril_mutex_unlock(&cr->guard);
371 return EOK;
372 }
373
[d2c3dcd]374 const uint64_t phys = TRB_GET_PHYS(*trb);
375 xhci_trb_ring_update_dequeue(&cr->trb_ring, phys);
376
377 if (cr->state == XHCI_CR_STATE_FULL)
378 cr_set_state(cr, XHCI_CR_STATE_OPEN);
379
[889146e]380 xhci_cmd_t *command = find_command(hc, phys);
[c3d926f3]381 if (command == NULL) {
[837581fd]382 usb_log_error("No command struct for completion event found.");
[c3d926f3]383
384 if (code != XHCI_TRBC_SUCCESS)
385 report_error(code);
386
387 return EOK;
388 }
[c058a388]389
[889146e]390 list_remove(&command->_header.link);
391
[3cbc138]392 /* Semantics of NO_OP_CMD is that success is marked as a TRB error. */
393 if (command->_header.cmd == XHCI_CMD_NO_OP && code == XHCI_TRBC_TRB_ERROR)
394 code = XHCI_TRBC_SUCCESS;
395
[c3d926f3]396 command->status = code;
397 command->slot_id = TRB_GET_SLOT(*trb);
[c9c0e41]398
[8033f89]399 usb_log_debug("Completed command %s",
400 xhci_trb_str_type(TRB_TYPE(command->_header.trb)));
[3cbc138]401
402 if (code != XHCI_TRBC_SUCCESS) {
403 report_error(code);
404 xhci_dump_trb(&command->_header.trb);
[c3d926f3]405 }
406
[889146e]407 fibril_mutex_unlock(&cr->guard);
408
[c3d926f3]409 fibril_mutex_lock(&command->_header.completed_mtx);
410 command->_header.completed = true;
411 fibril_condvar_broadcast(&command->_header.completed_cv);
412 fibril_mutex_unlock(&command->_header.completed_mtx);
413
414 if (command->_header.async) {
415 /* Free the command and other DS upon completion. */
416 xhci_cmd_fini(command);
417 }
418
419 return EOK;
420}
421
422/* Command-issuing functions */
423
[45457265]424static errno_t no_op_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[c3d926f3]425{
426 assert(hc);
427
428 xhci_trb_clean(&cmd->_header.trb);
429
430 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_NO_OP_CMD);
[110d795]431
[eb928c4]432 return enqueue_command(hc, cmd);
[c9c0e41]433}
434
[45457265]435static errno_t enable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[c9c0e41]436{
[c058a388]437 assert(hc);
438
[c3d926f3]439 xhci_trb_clean(&cmd->_header.trb);
[c9c0e41]440
[c3d926f3]441 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ENABLE_SLOT_CMD);
[8033f89]442 cmd->_header.trb.control |=
443 host2xhci(32, XHCI_REG_RD(hc->xecp, XHCI_EC_SP_SLOT_TYPE) << 16);
[110d795]444
[eb928c4]445 return enqueue_command(hc, cmd);
[5ac5eb1]446}
447
[45457265]448static errno_t disable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[5ac5eb1]449{
[c058a388]450 assert(hc);
[110d795]451 assert(cmd);
[c058a388]452
[c3d926f3]453 xhci_trb_clean(&cmd->_header.trb);
[5ac5eb1]454
[c3d926f3]455 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_DISABLE_SLOT_CMD);
456 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
[110d795]457
[eb928c4]458 return enqueue_command(hc, cmd);
[c9c0e41]459}
460
[45457265]461static errno_t address_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[8db42f7]462{
[c058a388]463 assert(hc);
[110d795]464 assert(cmd);
[b80c1ab]465 assert(dma_buffer_is_set(&cmd->input_ctx));
[c058a388]466
[8db42f7]467 /**
468 * TODO: Requirements for this command:
469 * dcbaa[slot_id] is properly sized and initialized
470 * ictx has valids slot context and endpoint 0, all
471 * other should be ignored at this point (see section 4.6.5).
472 */
[04df063]473
[c3d926f3]474 xhci_trb_clean(&cmd->_header.trb);
[8db42f7]475
[1d758fc]476 const uintptr_t phys = dma_buffer_phys_base(&cmd->input_ctx);
477 TRB_SET_ICTX(cmd->_header.trb, phys);
[8db42f7]478
479 /**
480 * Note: According to section 6.4.3.4, we can set the 9th bit
481 * of the control field of the trb (BSR) to 1 and then the xHC
482 * will not issue the SET_ADDRESS request to the USB device.
483 * This can be used to provide compatibility with legacy USB devices
484 * that require their device descriptor to be read before such request.
485 */
[c3d926f3]486 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ADDRESS_DEVICE_CMD);
487 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
[8db42f7]488
[eb928c4]489 return enqueue_command(hc, cmd);
[8db42f7]490}
491
[45457265]492static errno_t configure_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[665bf3c]493{
[c058a388]494 assert(hc);
[110d795]495 assert(cmd);
[c058a388]496
[c3d926f3]497 xhci_trb_clean(&cmd->_header.trb);
[665bf3c]498
[b724494]499 if (!cmd->deconfigure) {
500 /* If the DC flag is on, input context is not evaluated. */
[b80c1ab]501 assert(dma_buffer_is_set(&cmd->input_ctx));
[b724494]502
[1d758fc]503 const uintptr_t phys = dma_buffer_phys_base(&cmd->input_ctx);
504 TRB_SET_ICTX(cmd->_header.trb, phys);
[b724494]505 }
[110d795]506
[c3d926f3]507 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_CONFIGURE_ENDPOINT_CMD);
508 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
509 TRB_SET_DC(cmd->_header.trb, cmd->deconfigure);
[665bf3c]510
[eb928c4]511 return enqueue_command(hc, cmd);
[665bf3c]512}
513
[45457265]514static errno_t evaluate_context_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[c9ce62ae]515{
[c058a388]516 assert(hc);
[110d795]517 assert(cmd);
[b80c1ab]518 assert(dma_buffer_is_set(&cmd->input_ctx));
[c058a388]519
[c9ce62ae]520 /**
521 * Note: All Drop Context flags of the input context shall be 0,
522 * all Add Context flags shall be initialize to indicate IDs
523 * of the contexts affected by the command.
524 * Refer to sections 6.2.2.3 and 6.3.3.3 for further info.
525 */
[c3d926f3]526 xhci_trb_clean(&cmd->_header.trb);
[c9ce62ae]527
[1d758fc]528 const uintptr_t phys = dma_buffer_phys_base(&cmd->input_ctx);
529 TRB_SET_ICTX(cmd->_header.trb, phys);
[c9ce62ae]530
[c3d926f3]531 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_EVALUATE_CONTEXT_CMD);
532 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
[110d795]533
[eb928c4]534 return enqueue_command(hc, cmd);
[c9ce62ae]535}
536
[45457265]537static errno_t reset_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[05aeee0e]538{
[c058a388]539 assert(hc);
[110d795]540 assert(cmd);
[c058a388]541
[c3d926f3]542 xhci_trb_clean(&cmd->_header.trb);
[05aeee0e]543
[c3d926f3]544 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_ENDPOINT_CMD);
[e7f21884]545 TRB_SET_TSP(cmd->_header.trb, cmd->tsp);
[c3d926f3]546 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
547 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
[c9bec1c]548
[eb928c4]549 return enqueue_command(hc, cmd);
[05aeee0e]550}
551
[45457265]552static errno_t stop_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[05aeee0e]553{
[c058a388]554 assert(hc);
[110d795]555 assert(cmd);
[c058a388]556
[c3d926f3]557 xhci_trb_clean(&cmd->_header.trb);
[110d795]558
[c3d926f3]559 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_STOP_ENDPOINT_CMD);
560 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
561 TRB_SET_SUSP(cmd->_header.trb, cmd->susp);
562 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
[05aeee0e]563
[eb928c4]564 return enqueue_command(hc, cmd);
[c058a388]565}
[05aeee0e]566
[45457265]567static errno_t set_tr_dequeue_pointer_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[0cabd10]568{
569 assert(hc);
570 assert(cmd);
571
[c3d926f3]572 xhci_trb_clean(&cmd->_header.trb);
[0cabd10]573
[c3d926f3]574 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_SET_TR_DEQUEUE_POINTER_CMD);
575 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
576 TRB_SET_STREAM(cmd->_header.trb, cmd->stream_id);
577 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
578 TRB_SET_DEQUEUE_PTR(cmd->_header.trb, cmd->dequeue_ptr);
[0cabd10]579
[eb928c4]580 return enqueue_command(hc, cmd);
[0cabd10]581}
582
[45457265]583static errno_t reset_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[c058a388]584{
585 assert(hc);
[110d795]586 assert(cmd);
[c058a388]587
[c3d926f3]588 xhci_trb_clean(&cmd->_header.trb);
[c058a388]589
[c3d926f3]590 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_DEVICE_CMD);
591 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
[c9bec1c]592
[eb928c4]593 return enqueue_command(hc, cmd);
[05aeee0e]594}
595
[45457265]596static errno_t get_port_bandwidth_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[60af4cdb]597{
598 assert(hc);
599 assert(cmd);
600
[c3d926f3]601 xhci_trb_clean(&cmd->_header.trb);
[60af4cdb]602
[1d758fc]603 const uintptr_t phys = dma_buffer_phys_base(&cmd->input_ctx);
604 TRB_SET_ICTX(cmd->_header.trb, phys);
[60af4cdb]605
[c3d926f3]606 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_GET_PORT_BANDWIDTH_CMD);
607 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
608 TRB_SET_DEV_SPEED(cmd->_header.trb, cmd->device_speed);
[60af4cdb]609
[eb928c4]610 return enqueue_command(hc, cmd);
[60af4cdb]611}
612
[c3d926f3]613/* The table of command-issuing functions. */
614
[45457265]615typedef errno_t (*cmd_handler) (xhci_hc_t *hc, xhci_cmd_t *cmd);
[c3d926f3]616
617static cmd_handler cmd_handlers [] = {
618 [XHCI_CMD_ENABLE_SLOT] = enable_slot_cmd,
619 [XHCI_CMD_DISABLE_SLOT] = disable_slot_cmd,
620 [XHCI_CMD_ADDRESS_DEVICE] = address_device_cmd,
621 [XHCI_CMD_CONFIGURE_ENDPOINT] = configure_endpoint_cmd,
622 [XHCI_CMD_EVALUATE_CONTEXT] = evaluate_context_cmd,
623 [XHCI_CMD_RESET_ENDPOINT] = reset_endpoint_cmd,
624 [XHCI_CMD_STOP_ENDPOINT] = stop_endpoint_cmd,
625 [XHCI_CMD_SET_TR_DEQUEUE_POINTER] = set_tr_dequeue_pointer_cmd,
626 [XHCI_CMD_RESET_DEVICE] = reset_device_cmd,
627 [XHCI_CMD_FORCE_EVENT] = NULL,
628 [XHCI_CMD_NEGOTIATE_BANDWIDTH] = NULL,
629 [XHCI_CMD_SET_LATENCY_TOLERANCE_VALUE] = NULL,
630 [XHCI_CMD_GET_PORT_BANDWIDTH] = get_port_bandwidth_cmd,
631 [XHCI_CMD_FORCE_HEADER] = NULL,
632 [XHCI_CMD_NO_OP] = no_op_cmd
633};
634
[eb928c4]635/**
636 * Try to abort currently processed command. This is tricky, because
637 * calling fibril is not necessarily the one which issued the blocked command.
638 * Also, the trickiness intensifies by the fact that stopping a CR is denoted by
639 * event, which is again handled in different fibril. but, once we go to sleep
640 * on waiting for that event, another fibril may wake up and try to abort the
641 * blocked command.
642 *
643 * So, we mark the command ring as being restarted, wait for it to stop, and
644 * then start it again. If there was a blocked command, it will be satisfied by
645 * COMMAND_ABORTED event.
646 */
[45457265]647static errno_t try_abort_current_command(xhci_hc_t *hc)
[889146e]648{
649 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
650
651 fibril_mutex_lock(&cr->guard);
652
[d2c3dcd]653 if (cr->state == XHCI_CR_STATE_CLOSED) {
654 fibril_mutex_unlock(&cr->guard);
655 return ENAK;
656 }
657
658 if (cr->state == XHCI_CR_STATE_CHANGING) {
[889146e]659 fibril_mutex_unlock(&cr->guard);
660 return EOK;
661 }
662
[ae3a941]663 usb_log_error("Timeout while waiting for command: "
664 "aborting current command.");
[889146e]665
[d2c3dcd]666 cr_set_state(cr, XHCI_CR_STATE_CHANGING);
[889146e]667
668 abort_command_ring(hc);
669
[ae3a941]670 fibril_condvar_wait_timeout(&cr->stopped_cv, &cr->guard,
671 XHCI_CR_ABORT_TIMEOUT);
[889146e]672
673 if (XHCI_REG_RD(hc->op_regs, XHCI_OP_CRR)) {
[7c3fb9b]674 /*
675 * 4.6.1.2, implementation note
[889146e]676 * Assume there are larger problems with HC and
677 * reset it.
678 */
[837581fd]679 usb_log_error("Command didn't abort.");
[889146e]680
[d2c3dcd]681 cr_set_state(cr, XHCI_CR_STATE_CLOSED);
[889146e]682
683 // TODO: Reset HC completely.
684 // Don't forget to somehow complete all commands with error.
685
686 fibril_mutex_unlock(&cr->guard);
687 return ENAK;
688 }
689
[d2c3dcd]690 cr_set_state(cr, XHCI_CR_STATE_OPEN);
691
692 fibril_mutex_unlock(&cr->guard);
693
[837581fd]694 usb_log_error("Command ring stopped. Starting again.");
[889146e]695 hc_ring_doorbell(hc, 0, 0);
696
697 return EOK;
698}
699
[eb928c4]700/**
701 * Wait, until the command is completed. The completion is triggered by
702 * COMMAND_COMPLETION event. As we do not want to rely on HW completing the
703 * command in timely manner, we timeout. Note that we can't just return an
704 * error after the timeout pass - it may be other command blocking the ring,
705 * and ours can be completed afterwards. Therefore, it is not guaranteed that
706 * this function will return in XHCI_COMMAND_TIMEOUT. It will continue waiting
707 * until COMMAND_COMPLETION event arrives.
708 */
[45457265]709static errno_t wait_for_cmd_completion(xhci_hc_t *hc, xhci_cmd_t *cmd)
[f9e7fe8]710{
[45457265]711 errno_t rv = EOK;
[c058a388]712
[f3baab1]713 if (fibril_get_id() == hc->event_handler) {
714 usb_log_error("Deadlock detected in waiting for command.");
715 abort();
716 }
717
[c3d926f3]718 fibril_mutex_lock(&cmd->_header.completed_mtx);
719 while (!cmd->_header.completed) {
[f9e7fe8]720
[8033f89]721 rv = fibril_condvar_wait_timeout(&cmd->_header.completed_cv,
722 &cmd->_header.completed_mtx, XHCI_COMMAND_TIMEOUT);
[889146e]723
[7c3fb9b]724 /*
725 * The waiting timed out. Current command (not necessarily
[889146e]726 * ours) is probably blocked.
727 */
728 if (!cmd->_header.completed && rv == ETIMEOUT) {
729 fibril_mutex_unlock(&cmd->_header.completed_mtx);
730
731 rv = try_abort_current_command(hc);
732 if (rv)
733 return rv;
734
735 fibril_mutex_lock(&cmd->_header.completed_mtx);
[c3d926f3]736 }
737 }
738 fibril_mutex_unlock(&cmd->_header.completed_mtx);
[f711f06]739
[c3d926f3]740 return rv;
741}
[2fa43d1]742
[eb928c4]743/**
744 * Issue command and block the current fibril until it is completed or timeout
745 * expires. Nothing is deallocated. Caller should always execute `xhci_cmd_fini`.
[c3d926f3]746 */
[45457265]747errno_t xhci_cmd_sync(xhci_hc_t *hc, xhci_cmd_t *cmd)
[c3d926f3]748{
749 assert(hc);
750 assert(cmd);
[2fa43d1]751
[45457265]752 errno_t err;
[c3d926f3]753
754 if (!cmd_handlers[cmd->_header.cmd]) {
755 /* Handler not implemented. */
756 return ENOTSUP;
[2fa43d1]757 }
[110d795]758
[c3d926f3]759 if ((err = cmd_handlers[cmd->_header.cmd](hc, cmd))) {
760 /* Command could not be issued. */
761 return err;
762 }
[110d795]763
[889146e]764 if ((err = wait_for_cmd_completion(hc, cmd))) {
765 /* Command failed. */
[c3d926f3]766 return err;
[665bf3c]767 }
[c362127]768
[e7e99bf]769 switch (cmd->status) {
770 case XHCI_TRBC_SUCCESS:
771 return EOK;
772 case XHCI_TRBC_USB_TRANSACTION_ERROR:
773 return ESTALL;
[feabe163]774 case XHCI_TRBC_RESOURCE_ERROR:
775 case XHCI_TRBC_BANDWIDTH_ERROR:
776 case XHCI_TRBC_NO_SLOTS_ERROR:
777 return ELIMIT;
778 case XHCI_TRBC_SLOT_NOT_ENABLED_ERROR:
779 return ENOENT;
[e7e99bf]780 default:
781 return EINVAL;
782 }
[c3d926f3]783}
[110d795]784
[eb928c4]785/**
786 * Does the same thing as `xhci_cmd_sync` and executes `xhci_cmd_fini`. This
787 * is a useful shorthand for issuing commands without out parameters.
[c3d926f3]788 */
[45457265]789errno_t xhci_cmd_sync_fini(xhci_hc_t *hc, xhci_cmd_t *cmd)
[c3d926f3]790{
[45457265]791 const errno_t err = xhci_cmd_sync(hc, cmd);
[c3d926f3]792 xhci_cmd_fini(cmd);
793
794 return err;
795}
796
[eb928c4]797/**
798 * Does the same thing as `xhci_cmd_sync_fini` without blocking the current
799 * fibril. The command is copied to stack memory and `fini` is called upon its completion.
[c3d926f3]800 */
[45457265]801errno_t xhci_cmd_async_fini(xhci_hc_t *hc, xhci_cmd_t *stack_cmd)
[c3d926f3]802{
803 assert(hc);
804 assert(stack_cmd);
805
806 /* Save the command for later. */
807 xhci_cmd_t *heap_cmd = (xhci_cmd_t *) malloc(sizeof(xhci_cmd_t));
808 if (!heap_cmd) {
809 return ENOMEM;
810 }
811
812 /* TODO: Is this good for the mutex and the condvar? */
813 memcpy(heap_cmd, stack_cmd, sizeof(xhci_cmd_t));
814 heap_cmd->_header.async = true;
815
816 /* Issue the command. */
[45457265]817 errno_t err;
[c3d926f3]818
819 if (!cmd_handlers[heap_cmd->_header.cmd]) {
820 /* Handler not implemented. */
821 err = ENOTSUP;
822 goto err_heap_cmd;
[f711f06]823 }
[110d795]824
[c3d926f3]825 if ((err = cmd_handlers[heap_cmd->_header.cmd](hc, heap_cmd))) {
826 /* Command could not be issued. */
827 goto err_heap_cmd;
828 }
[4688350b]829
[110d795]830 return EOK;
[c9c0e41]831
[c3d926f3]832err_heap_cmd:
833 free(heap_cmd);
834 return err;
835}
[c9c0e41]836
837/**
838 * @}
839 */
Note: See TracBrowser for help on using the repository browser.