source: mainline/uspace/drv/bus/usb/xhci/commands.c@ 17c5e62

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 17c5e62 was eb928c4, checked in by Ondřej Hlavatý <aearsis@…>, 8 years ago

xhci: documentation & cleanup

Also, a simple refactoring to remove functions that only wraps another
functions unused anywhere else.

  • Property mode set to 100644
File size: 21.3 KB
RevLine 
[c9c0e41]1/*
2 * Copyright (c) 2017 Jaroslav Jindrak
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief Command sending functions.
34 */
35
36#include <errno.h>
37#include <str_error.h>
38#include <usb/debug.h>
39#include "commands.h"
40#include "debug.h"
41#include "hc.h"
[8db42f7]42#include "hw_struct/context.h"
[c9c0e41]43#include "hw_struct/trb.h"
44
[1b78a7c1]45#define TRB_SET_TCS(trb, tcs) (trb).control |= host2xhci(32, ((tcs &0x1) << 9))
46#define TRB_SET_TYPE(trb, type) (trb).control |= host2xhci(32, (type) << 10)
[b724494]47#define TRB_SET_DC(trb, dc) (trb).control |= host2xhci(32, (dc) << 9)
[1b78a7c1]48#define TRB_SET_EP(trb, ep) (trb).control |= host2xhci(32, ((ep) & 0x5) << 16)
[0cabd10]49#define TRB_SET_STREAM(trb, st) (trb).control |= host2xhci(32, ((st) & 0xFFFF) << 16)
[1b78a7c1]50#define TRB_SET_SUSP(trb, susp) (trb).control |= host2xhci(32, ((susp) & 0x1) << 23)
51#define TRB_SET_SLOT(trb, slot) (trb).control |= host2xhci(32, (slot) << 24)
[60af4cdb]52#define TRB_SET_DEV_SPEED(trb, speed) (trb).control |= host2xhci(32, (speed & 0xF) << 16)
[1b78a7c1]53
[0cabd10]54/**
55 * TODO: Not sure about SCT and DCS (see section 6.4.3.9).
56 */
57#define TRB_SET_DEQUEUE_PTR(trb, dptr) (trb).parameter |= host2xhci(64, (dptr))
[b80c1ab]58#define TRB_SET_ICTX(trb, phys) (trb).parameter |= host2xhci(64, (phys) & (~0xF))
[1b78a7c1]59
60#define TRB_GET_CODE(trb) XHCI_DWORD_EXTRACT((trb).status, 31, 24)
61#define TRB_GET_SLOT(trb) XHCI_DWORD_EXTRACT((trb).control, 31, 24)
62#define TRB_GET_PHYS(trb) (XHCI_QWORD_EXTRACT((trb).parameter, 63, 4) << 4)
63
[c3d926f3]64/* Control functions */
65
[889146e]66static xhci_cmd_ring_t *get_cmd_ring(xhci_hc_t *hc)
[110d795]67{
68 assert(hc);
[889146e]69 return &hc->cr;
70}
71
[eb928c4]72/**
73 * Initialize the command subsystem. Allocates the comand ring.
74 *
75 * Does not configure the CR pointer to the hardware, because the xHC will be
76 * reset before starting.
77 */
[889146e]78int xhci_init_commands(xhci_hc_t *hc)
79{
80 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
81 int err;
82
83 if ((err = xhci_trb_ring_init(&cr->trb_ring)))
84 return err;
[110d795]85
[889146e]86 fibril_mutex_initialize(&cr->guard);
87 fibril_condvar_initialize(&cr->state_cv);
88 fibril_condvar_initialize(&cr->stopped_cv);
[74b852b]89
[889146e]90 list_initialize(&cr->cmd_list);
91
92 cr->state = XHCI_CR_STATE_OPEN;
[74b852b]93
[110d795]94 return EOK;
95}
96
[eb928c4]97/**
98 * Finish the command subsystem. Stops the hardware from running commands, then
99 * deallocates the ring.
100 */
[c46c356]101void xhci_fini_commands(xhci_hc_t *hc)
102{
103 assert(hc);
[eb928c4]104 xhci_stop_command_ring(hc);
105
106 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
107
108 fibril_mutex_lock(&cr->guard);
109 xhci_trb_ring_fini(&cr->trb_ring);
110 fibril_mutex_unlock(&cr->guard);
[c46c356]111}
112
[eb928c4]113/**
114 * Initialize a command structure for the given command.
115 */
[c3d926f3]116void xhci_cmd_init(xhci_cmd_t *cmd, xhci_cmd_type_t type)
[110d795]117{
[c3d926f3]118 memset(cmd, 0, sizeof(*cmd));
[110d795]119
[c3d926f3]120 link_initialize(&cmd->_header.link);
[110d795]121
[c3d926f3]122 fibril_mutex_initialize(&cmd->_header.completed_mtx);
123 fibril_condvar_initialize(&cmd->_header.completed_cv);
[04df063]124
[c3d926f3]125 cmd->_header.cmd = type;
[4688350b]126}
127
[eb928c4]128/**
129 * Finish the command structure. Some command invocation includes allocating
130 * a context structure. To have the convenience in calling commands, this
131 * method deallocates all resources.
132 */
[c3d926f3]133void xhci_cmd_fini(xhci_cmd_t *cmd)
[4688350b]134{
[c3d926f3]135 list_remove(&cmd->_header.link);
[110d795]136
[b80c1ab]137 dma_buffer_free(&cmd->input_ctx);
138 dma_buffer_free(&cmd->bandwidth_ctx);
[9304b66]139
[c3d926f3]140 if (cmd->_header.async) {
141 free(cmd);
142 }
[110d795]143}
144
[eb928c4]145/**
146 * Find a command issued by TRB at @c phys inside the command list.
147 *
148 * Call with guard locked only.
149 */
[889146e]150static inline xhci_cmd_t *find_command(xhci_hc_t *hc, uint64_t phys)
[110d795]151{
[889146e]152 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
153 assert(fibril_mutex_is_locked(&cr->guard));
[74b852b]154
[889146e]155 link_t *cmd_link = list_first(&cr->cmd_list);
[110d795]156
[2fa43d1]157 while (cmd_link != NULL) {
[c3d926f3]158 xhci_cmd_t *cmd = list_get_instance(cmd_link, xhci_cmd_t, _header.link);
[2fa43d1]159
[c3d926f3]160 if (cmd->_header.trb_phys == phys)
[2fa43d1]161 break;
162
[889146e]163 cmd_link = list_next(cmd_link, &cr->cmd_list);
[2fa43d1]164 }
165
[889146e]166 return cmd_link ? list_get_instance(cmd_link, xhci_cmd_t, _header.link)
167 : NULL;
[110d795]168}
169
[eb928c4]170/**
171 * Enqueue a command on the TRB ring. Ring the doorbell to initiate processing.
172 * Register the command as waiting for completion inside the command list.
173 */
174static inline int enqueue_command(xhci_hc_t *hc, xhci_cmd_t *cmd)
[481af21e]175{
[889146e]176 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
[548c123]177 assert(cmd);
178
[889146e]179 fibril_mutex_lock(&cr->guard);
[c058a388]180
[889146e]181 while (cr->state == XHCI_CR_STATE_CHANGING)
182 fibril_condvar_wait(&cr->state_cv, &cr->guard);
[481af21e]183
[889146e]184 if (cr->state != XHCI_CR_STATE_OPEN) {
185 fibril_mutex_unlock(&cr->guard);
186 return ENAK;
187 }
188
189 usb_log_debug2("HC(%p): Sending command:", hc);
[c3d926f3]190 xhci_dump_trb(&cmd->_header.trb);
[481af21e]191
[889146e]192 list_append(&cmd->_header.link, &cr->cmd_list);
193
194 xhci_trb_ring_enqueue(&cr->trb_ring, &cmd->_header.trb, &cmd->_header.trb_phys);
195 hc_ring_doorbell(hc, 0, 0);
196
197 fibril_mutex_unlock(&cr->guard);
198
[481af21e]199 return EOK;
200}
201
[eb928c4]202/**
203 * Stop the command ring. Stop processing commands, block issuing new ones.
204 * Wait until hardware acknowledges it is stopped.
205 */
[3dc519f]206void xhci_stop_command_ring(xhci_hc_t *hc)
207{
[889146e]208 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
[3dc519f]209
[889146e]210 fibril_mutex_lock(&cr->guard);
[3dc519f]211
[889146e]212 // Prevent others from starting CR again.
213 cr->state = XHCI_CR_STATE_CLOSED;
214 fibril_condvar_broadcast(&cr->state_cv);
[3dc519f]215
[889146e]216 XHCI_REG_SET(hc->op_regs, XHCI_OP_CS, 1);
217 XHCI_REG_SET(hc->op_regs, XHCI_OP_CRCR_HI, 0); // Some systems (incl. QEMU) require 64-bit write
[3dc519f]218
[889146e]219 while (XHCI_REG_RD(hc->op_regs, XHCI_OP_CRR))
220 fibril_condvar_wait(&cr->stopped_cv, &cr->guard);
221
222 fibril_mutex_unlock(&cr->guard);
[3dc519f]223}
224
[eb928c4]225/**
226 * Abort currently processed command. Note that it is only aborted when the
227 * command is "blocking" - see section 4.6.1.2 of xHCI spec.
228 */
[889146e]229static void abort_command_ring(xhci_hc_t *hc)
[3dc519f]230{
[889146e]231 XHCI_REG_WR(hc->op_regs, XHCI_OP_CA, 1);
232 XHCI_REG_SET(hc->op_regs, XHCI_OP_CRCR_HI, 0); // Some systems (incl. QEMU) require 64-bit write
[3dc519f]233}
234
[4fa5342]235static const char *trb_codes [] = {
236#define TRBC(t) [XHCI_TRBC_##t] = #t
237 TRBC(INVALID),
238 TRBC(SUCCESS),
239 TRBC(DATA_BUFFER_ERROR),
240 TRBC(BABBLE_DETECTED_ERROR),
241 TRBC(USB_TRANSACTION_ERROR),
242 TRBC(TRB_ERROR),
243 TRBC(STALL_ERROR),
244 TRBC(RESOURCE_ERROR),
245 TRBC(BANDWIDTH_ERROR),
246 TRBC(NO_SLOTS_ERROR),
247 TRBC(INVALID_STREAM_ERROR),
248 TRBC(SLOT_NOT_ENABLED_ERROR),
249 TRBC(EP_NOT_ENABLED_ERROR),
250 TRBC(SHORT_PACKET),
251 TRBC(RING_UNDERRUN),
252 TRBC(RING_OVERRUN),
253 TRBC(VF_EVENT_RING_FULL),
254 TRBC(PARAMETER_ERROR),
255 TRBC(BANDWIDTH_OVERRUN_ERROR),
256 TRBC(CONTEXT_STATE_ERROR),
257 TRBC(NO_PING_RESPONSE_ERROR),
258 TRBC(EVENT_RING_FULL_ERROR),
259 TRBC(INCOMPATIBLE_DEVICE_ERROR),
260 TRBC(MISSED_SERVICE_ERROR),
261 TRBC(COMMAND_RING_STOPPED),
262 TRBC(COMMAND_ABORTED),
263 TRBC(STOPPED),
264 TRBC(STOPPED_LENGTH_INVALID),
265 TRBC(STOPPED_SHORT_PACKET),
266 TRBC(MAX_EXIT_LATENCY_TOO_LARGE_ERROR),
267 [30] = "<reserved>",
268 TRBC(ISOCH_BUFFER_OVERRUN),
269 TRBC(EVENT_LOST_ERROR),
270 TRBC(UNDEFINED_ERROR),
271 TRBC(INVALID_STREAM_ID_ERROR),
272 TRBC(SECONDARY_BANDWIDTH_ERROR),
273 TRBC(SPLIT_TRANSACTION_ERROR),
274 [XHCI_TRBC_MAX] = NULL
275#undef TRBC
276};
277
[eb928c4]278/**
279 * Report an error according to command completion code.
280 */
[4fa5342]281static void report_error(int code)
282{
283 if (code < XHCI_TRBC_MAX && trb_codes[code] != NULL)
284 usb_log_error("Command resulted in error: %s.", trb_codes[code]);
285 else
286 usb_log_error("Command resulted in reserved or vendor specific error.");
287}
288
[eb928c4]289/**
290 * Handle a command completion. Feed the fibril waiting for result.
291 *
292 * @param trb The COMMAND_COMPLETION TRB found in event ring.
293 */
[c3d926f3]294int xhci_handle_command_completion(xhci_hc_t *hc, xhci_trb_t *trb)
[c9c0e41]295{
[889146e]296 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
[c3d926f3]297 assert(trb);
298
299 usb_log_debug2("HC(%p) Command completed.", hc);
300
[889146e]301 fibril_mutex_lock(&cr->guard);
302
303 int code = TRB_GET_CODE(*trb);
304 const uint64_t phys = TRB_GET_PHYS(*trb);
[c3d926f3]305
[889146e]306 xhci_trb_ring_update_dequeue(&cr->trb_ring, phys);
307
308 if (code == XHCI_TRBC_COMMAND_RING_STOPPED) {
309 /* This can either mean that the ring is being stopped, or
310 * a command was aborted. In either way, wake threads waiting
311 * on stopped_cv.
312 *
313 * Note that we need to hold mutex, because we must be sure the
314 * requesting thread is waiting inside the CV.
315 */
316 fibril_condvar_broadcast(&cr->stopped_cv);
317 fibril_mutex_unlock(&cr->guard);
318 return EOK;
319 }
320
321 xhci_cmd_t *command = find_command(hc, phys);
[c3d926f3]322 if (command == NULL) {
[889146e]323 usb_log_error("No command struct for this completion event found.");
[c3d926f3]324
325 if (code != XHCI_TRBC_SUCCESS)
326 report_error(code);
327
328 return EOK;
329 }
[c058a388]330
[889146e]331 list_remove(&command->_header.link);
332
[3cbc138]333 /* Semantics of NO_OP_CMD is that success is marked as a TRB error. */
334 if (command->_header.cmd == XHCI_CMD_NO_OP && code == XHCI_TRBC_TRB_ERROR)
335 code = XHCI_TRBC_SUCCESS;
336
[c3d926f3]337 command->status = code;
338 command->slot_id = TRB_GET_SLOT(*trb);
[c9c0e41]339
[c3d926f3]340 usb_log_debug2("Completed command trb: %s", xhci_trb_str_type(TRB_TYPE(command->_header.trb)));
[3cbc138]341
342 if (code != XHCI_TRBC_SUCCESS) {
343 report_error(code);
344 xhci_dump_trb(&command->_header.trb);
[c3d926f3]345 }
346
347 switch (TRB_TYPE(command->_header.trb)) {
348 case XHCI_TRB_TYPE_NO_OP_CMD:
349 case XHCI_TRB_TYPE_ENABLE_SLOT_CMD:
350 case XHCI_TRB_TYPE_DISABLE_SLOT_CMD:
351 case XHCI_TRB_TYPE_ADDRESS_DEVICE_CMD:
352 case XHCI_TRB_TYPE_CONFIGURE_ENDPOINT_CMD:
353 case XHCI_TRB_TYPE_EVALUATE_CONTEXT_CMD:
354 case XHCI_TRB_TYPE_RESET_ENDPOINT_CMD:
355 break;
356 case XHCI_TRB_TYPE_STOP_ENDPOINT_CMD:
357 // Note: If the endpoint was in the middle of a transfer, then the xHC
358 // will add a Transfer TRB before the Event TRB, research that and
359 // handle it appropriately!
360 break;
361 case XHCI_TRB_TYPE_RESET_DEVICE_CMD:
362 break;
363 default:
364 usb_log_debug2("Unsupported command trb: %s", xhci_trb_str_type(TRB_TYPE(command->_header.trb)));
365 return ENAK;
366 }
367
[889146e]368 fibril_mutex_unlock(&cr->guard);
369
[c3d926f3]370 fibril_mutex_lock(&command->_header.completed_mtx);
371 command->_header.completed = true;
372 fibril_condvar_broadcast(&command->_header.completed_cv);
373 fibril_mutex_unlock(&command->_header.completed_mtx);
374
375 if (command->_header.async) {
376 /* Free the command and other DS upon completion. */
377 xhci_cmd_fini(command);
378 }
379
380 return EOK;
381}
382
383/* Command-issuing functions */
384
385static int no_op_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
386{
387 assert(hc);
388
389 xhci_trb_clean(&cmd->_header.trb);
390
391 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_NO_OP_CMD);
[110d795]392
[eb928c4]393 return enqueue_command(hc, cmd);
[c9c0e41]394}
395
[c3d926f3]396static int enable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[c9c0e41]397{
[c058a388]398 assert(hc);
399
[c3d926f3]400 xhci_trb_clean(&cmd->_header.trb);
[c9c0e41]401
[c3d926f3]402 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ENABLE_SLOT_CMD);
403 cmd->_header.trb.control |= host2xhci(32, XHCI_REG_RD(hc->xecp, XHCI_EC_SP_SLOT_TYPE) << 16);
[110d795]404
[eb928c4]405 return enqueue_command(hc, cmd);
[5ac5eb1]406}
407
[c3d926f3]408static int disable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[5ac5eb1]409{
[c058a388]410 assert(hc);
[110d795]411 assert(cmd);
[c058a388]412
[c3d926f3]413 xhci_trb_clean(&cmd->_header.trb);
[5ac5eb1]414
[c3d926f3]415 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_DISABLE_SLOT_CMD);
416 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
[110d795]417
[eb928c4]418 return enqueue_command(hc, cmd);
[c9c0e41]419}
420
[c3d926f3]421static int address_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[8db42f7]422{
[c058a388]423 assert(hc);
[110d795]424 assert(cmd);
[b80c1ab]425 assert(dma_buffer_is_set(&cmd->input_ctx));
[c058a388]426
[8db42f7]427 /**
428 * TODO: Requirements for this command:
429 * dcbaa[slot_id] is properly sized and initialized
430 * ictx has valids slot context and endpoint 0, all
431 * other should be ignored at this point (see section 4.6.5).
432 */
[04df063]433
[c3d926f3]434 xhci_trb_clean(&cmd->_header.trb);
[8db42f7]435
[b80c1ab]436 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
[8db42f7]437
438 /**
439 * Note: According to section 6.4.3.4, we can set the 9th bit
440 * of the control field of the trb (BSR) to 1 and then the xHC
441 * will not issue the SET_ADDRESS request to the USB device.
442 * This can be used to provide compatibility with legacy USB devices
443 * that require their device descriptor to be read before such request.
444 */
[c3d926f3]445 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ADDRESS_DEVICE_CMD);
446 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
[8db42f7]447
[eb928c4]448 return enqueue_command(hc, cmd);
[8db42f7]449}
450
[c3d926f3]451static int configure_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[665bf3c]452{
[c058a388]453 assert(hc);
[110d795]454 assert(cmd);
[c058a388]455
[c3d926f3]456 xhci_trb_clean(&cmd->_header.trb);
[665bf3c]457
[b724494]458 if (!cmd->deconfigure) {
459 /* If the DC flag is on, input context is not evaluated. */
[b80c1ab]460 assert(dma_buffer_is_set(&cmd->input_ctx));
[b724494]461
[b80c1ab]462 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
[b724494]463 }
[110d795]464
[c3d926f3]465 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_CONFIGURE_ENDPOINT_CMD);
466 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
467 TRB_SET_DC(cmd->_header.trb, cmd->deconfigure);
[665bf3c]468
[eb928c4]469 return enqueue_command(hc, cmd);
[665bf3c]470}
471
[c3d926f3]472static int evaluate_context_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[c9ce62ae]473{
[c058a388]474 assert(hc);
[110d795]475 assert(cmd);
[b80c1ab]476 assert(dma_buffer_is_set(&cmd->input_ctx));
[c058a388]477
[c9ce62ae]478 /**
479 * Note: All Drop Context flags of the input context shall be 0,
480 * all Add Context flags shall be initialize to indicate IDs
481 * of the contexts affected by the command.
482 * Refer to sections 6.2.2.3 and 6.3.3.3 for further info.
483 */
[c3d926f3]484 xhci_trb_clean(&cmd->_header.trb);
[c9ce62ae]485
[b80c1ab]486 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
[c9ce62ae]487
[c3d926f3]488 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_EVALUATE_CONTEXT_CMD);
489 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
[110d795]490
[eb928c4]491 return enqueue_command(hc, cmd);
[c9ce62ae]492}
493
[c3d926f3]494static int reset_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[05aeee0e]495{
[c058a388]496 assert(hc);
[110d795]497 assert(cmd);
[c058a388]498
[05aeee0e]499 /**
500 * Note: TCS can have values 0 or 1. If it is set to 0, see sectuon 4.5.8 for
501 * information about this flag.
502 */
[c3d926f3]503 xhci_trb_clean(&cmd->_header.trb);
[05aeee0e]504
[c3d926f3]505 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_ENDPOINT_CMD);
506 TRB_SET_TCS(cmd->_header.trb, cmd->tcs);
507 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
508 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
[c9bec1c]509
[eb928c4]510 return enqueue_command(hc, cmd);
[05aeee0e]511}
512
[c3d926f3]513static int stop_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[05aeee0e]514{
[c058a388]515 assert(hc);
[110d795]516 assert(cmd);
[c058a388]517
[c3d926f3]518 xhci_trb_clean(&cmd->_header.trb);
[110d795]519
[c3d926f3]520 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_STOP_ENDPOINT_CMD);
521 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
522 TRB_SET_SUSP(cmd->_header.trb, cmd->susp);
523 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
[05aeee0e]524
[eb928c4]525 return enqueue_command(hc, cmd);
[c058a388]526}
[05aeee0e]527
[c3d926f3]528static int set_tr_dequeue_pointer_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[0cabd10]529{
530 assert(hc);
531 assert(cmd);
532
[c3d926f3]533 xhci_trb_clean(&cmd->_header.trb);
[0cabd10]534
[c3d926f3]535 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_SET_TR_DEQUEUE_POINTER_CMD);
536 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
537 TRB_SET_STREAM(cmd->_header.trb, cmd->stream_id);
538 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
539 TRB_SET_DEQUEUE_PTR(cmd->_header.trb, cmd->dequeue_ptr);
[0cabd10]540
541 /**
542 * TODO: Set DCS (see section 4.6.10).
543 */
544
[eb928c4]545 return enqueue_command(hc, cmd);
[0cabd10]546}
547
[c3d926f3]548static int reset_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[c058a388]549{
550 assert(hc);
[110d795]551 assert(cmd);
[c058a388]552
[c3d926f3]553 xhci_trb_clean(&cmd->_header.trb);
[c058a388]554
[c3d926f3]555 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_DEVICE_CMD);
556 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
[c9bec1c]557
[eb928c4]558 return enqueue_command(hc, cmd);
[05aeee0e]559}
560
[c3d926f3]561static int get_port_bandwidth_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[60af4cdb]562{
563 assert(hc);
564 assert(cmd);
565
[c3d926f3]566 xhci_trb_clean(&cmd->_header.trb);
[60af4cdb]567
[b80c1ab]568 TRB_SET_ICTX(cmd->_header.trb, cmd->bandwidth_ctx.phys);
[60af4cdb]569
[c3d926f3]570 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_GET_PORT_BANDWIDTH_CMD);
571 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
572 TRB_SET_DEV_SPEED(cmd->_header.trb, cmd->device_speed);
[60af4cdb]573
[eb928c4]574 return enqueue_command(hc, cmd);
[60af4cdb]575}
576
[c3d926f3]577/* The table of command-issuing functions. */
578
579typedef int (*cmd_handler) (xhci_hc_t *hc, xhci_cmd_t *cmd);
580
581static cmd_handler cmd_handlers [] = {
582 [XHCI_CMD_ENABLE_SLOT] = enable_slot_cmd,
583 [XHCI_CMD_DISABLE_SLOT] = disable_slot_cmd,
584 [XHCI_CMD_ADDRESS_DEVICE] = address_device_cmd,
585 [XHCI_CMD_CONFIGURE_ENDPOINT] = configure_endpoint_cmd,
586 [XHCI_CMD_EVALUATE_CONTEXT] = evaluate_context_cmd,
587 [XHCI_CMD_RESET_ENDPOINT] = reset_endpoint_cmd,
588 [XHCI_CMD_STOP_ENDPOINT] = stop_endpoint_cmd,
589 [XHCI_CMD_SET_TR_DEQUEUE_POINTER] = set_tr_dequeue_pointer_cmd,
590 [XHCI_CMD_RESET_DEVICE] = reset_device_cmd,
591 [XHCI_CMD_FORCE_EVENT] = NULL,
592 [XHCI_CMD_NEGOTIATE_BANDWIDTH] = NULL,
593 [XHCI_CMD_SET_LATENCY_TOLERANCE_VALUE] = NULL,
594 [XHCI_CMD_GET_PORT_BANDWIDTH] = get_port_bandwidth_cmd,
595 [XHCI_CMD_FORCE_HEADER] = NULL,
596 [XHCI_CMD_NO_OP] = no_op_cmd
597};
598
[eb928c4]599/**
600 * Try to abort currently processed command. This is tricky, because
601 * calling fibril is not necessarily the one which issued the blocked command.
602 * Also, the trickiness intensifies by the fact that stopping a CR is denoted by
603 * event, which is again handled in different fibril. but, once we go to sleep
604 * on waiting for that event, another fibril may wake up and try to abort the
605 * blocked command.
606 *
607 * So, we mark the command ring as being restarted, wait for it to stop, and
608 * then start it again. If there was a blocked command, it will be satisfied by
609 * COMMAND_ABORTED event.
610 */
[889146e]611static int try_abort_current_command(xhci_hc_t *hc)
612{
613 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
614
615 fibril_mutex_lock(&cr->guard);
616
617 if (cr->state != XHCI_CR_STATE_OPEN) {
618 // The CR is either stopped, or different fibril is already
619 // restarting it.
620 fibril_mutex_unlock(&cr->guard);
621 return EOK;
622 }
623
624 usb_log_error("HC(%p): Timeout while waiting for command: aborting current command.", hc);
625
626 cr->state = XHCI_CR_STATE_CHANGING;
627 fibril_condvar_broadcast(&cr->state_cv);
628
629 abort_command_ring(hc);
630
631 fibril_condvar_wait_timeout(&cr->stopped_cv, &cr->guard, XHCI_CR_ABORT_TIMEOUT);
632
633 if (XHCI_REG_RD(hc->op_regs, XHCI_OP_CRR)) {
634 /* 4.6.1.2, implementation note
635 * Assume there are larger problems with HC and
636 * reset it.
637 */
638 usb_log_error("HC(%p): Command didn't abort.", hc);
639
640 cr->state = XHCI_CR_STATE_CLOSED;
641 fibril_condvar_broadcast(&cr->state_cv);
642
643 // TODO: Reset HC completely.
644 // Don't forget to somehow complete all commands with error.
645
646 fibril_mutex_unlock(&cr->guard);
647 return ENAK;
648 }
649
650 usb_log_error("HC(%p): Command ring stopped. Starting again.", hc);
651 hc_ring_doorbell(hc, 0, 0);
652
653 cr->state = XHCI_CR_STATE_OPEN;
654 fibril_condvar_broadcast(&cr->state_cv);
655
656 fibril_mutex_unlock(&cr->guard);
657 return EOK;
658}
659
[eb928c4]660/**
661 * Wait, until the command is completed. The completion is triggered by
662 * COMMAND_COMPLETION event. As we do not want to rely on HW completing the
663 * command in timely manner, we timeout. Note that we can't just return an
664 * error after the timeout pass - it may be other command blocking the ring,
665 * and ours can be completed afterwards. Therefore, it is not guaranteed that
666 * this function will return in XHCI_COMMAND_TIMEOUT. It will continue waiting
667 * until COMMAND_COMPLETION event arrives.
668 */
[889146e]669static int wait_for_cmd_completion(xhci_hc_t *hc, xhci_cmd_t *cmd)
[f9e7fe8]670{
[c3d926f3]671 int rv = EOK;
[c058a388]672
[c3d926f3]673 fibril_mutex_lock(&cmd->_header.completed_mtx);
674 while (!cmd->_header.completed) {
[f9e7fe8]675
[889146e]676 rv = fibril_condvar_wait_timeout(&cmd->_header.completed_cv, &cmd->_header.completed_mtx, XHCI_COMMAND_TIMEOUT);
677
678 /* The waiting timed out. Current command (not necessarily
679 * ours) is probably blocked.
680 */
681 if (!cmd->_header.completed && rv == ETIMEOUT) {
682 fibril_mutex_unlock(&cmd->_header.completed_mtx);
683
684 rv = try_abort_current_command(hc);
685 if (rv)
686 return rv;
687
688 fibril_mutex_lock(&cmd->_header.completed_mtx);
[c3d926f3]689 }
690 }
691 fibril_mutex_unlock(&cmd->_header.completed_mtx);
[f711f06]692
[c3d926f3]693 return rv;
694}
[2fa43d1]695
[eb928c4]696/**
697 * Issue command and block the current fibril until it is completed or timeout
698 * expires. Nothing is deallocated. Caller should always execute `xhci_cmd_fini`.
[c3d926f3]699 */
700int xhci_cmd_sync(xhci_hc_t *hc, xhci_cmd_t *cmd)
701{
702 assert(hc);
703 assert(cmd);
[2fa43d1]704
[c3d926f3]705 int err;
706
707 if (!cmd_handlers[cmd->_header.cmd]) {
708 /* Handler not implemented. */
709 return ENOTSUP;
[2fa43d1]710 }
[110d795]711
[c3d926f3]712 if ((err = cmd_handlers[cmd->_header.cmd](hc, cmd))) {
713 /* Command could not be issued. */
714 return err;
715 }
[110d795]716
[889146e]717 if ((err = wait_for_cmd_completion(hc, cmd))) {
718 /* Command failed. */
[c3d926f3]719 return err;
[665bf3c]720 }
[c362127]721
[3cbc138]722 return cmd->status == XHCI_TRBC_SUCCESS ? EOK : EINVAL;
[c3d926f3]723}
[110d795]724
[eb928c4]725/**
726 * Does the same thing as `xhci_cmd_sync` and executes `xhci_cmd_fini`. This
727 * is a useful shorthand for issuing commands without out parameters.
[c3d926f3]728 */
729int xhci_cmd_sync_fini(xhci_hc_t *hc, xhci_cmd_t *cmd)
730{
731 const int err = xhci_cmd_sync(hc, cmd);
732 xhci_cmd_fini(cmd);
733
734 return err;
735}
736
[eb928c4]737/**
738 * Does the same thing as `xhci_cmd_sync_fini` without blocking the current
739 * fibril. The command is copied to stack memory and `fini` is called upon its completion.
[c3d926f3]740 */
741int xhci_cmd_async_fini(xhci_hc_t *hc, xhci_cmd_t *stack_cmd)
742{
743 assert(hc);
744 assert(stack_cmd);
745
746 /* Save the command for later. */
747 xhci_cmd_t *heap_cmd = (xhci_cmd_t *) malloc(sizeof(xhci_cmd_t));
748 if (!heap_cmd) {
749 return ENOMEM;
750 }
751
752 /* TODO: Is this good for the mutex and the condvar? */
753 memcpy(heap_cmd, stack_cmd, sizeof(xhci_cmd_t));
754 heap_cmd->_header.async = true;
755
756 /* Issue the command. */
757 int err;
758
759 if (!cmd_handlers[heap_cmd->_header.cmd]) {
760 /* Handler not implemented. */
761 err = ENOTSUP;
762 goto err_heap_cmd;
[f711f06]763 }
[110d795]764
[c3d926f3]765 if ((err = cmd_handlers[heap_cmd->_header.cmd](hc, heap_cmd))) {
766 /* Command could not be issued. */
767 goto err_heap_cmd;
768 }
[4688350b]769
[110d795]770 return EOK;
[c9c0e41]771
[c3d926f3]772err_heap_cmd:
773 free(heap_cmd);
774 return err;
775}
[c9c0e41]776
777/**
778 * @}
779 */
Note: See TracBrowser for help on using the repository browser.