source: mainline/uspace/drv/bus/usb/xhci/commands.c@ 2aaba7e

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 2aaba7e was fb28cde, checked in by Ondřej Hlavatý <aearsis@…>, 8 years ago

xhci: changed api to get dequeue state of trb ring

  • Property mode set to 100644
File size: 21.3 KB
RevLine 
[c9c0e41]1/*
2 * Copyright (c) 2017 Jaroslav Jindrak
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief Command sending functions.
34 */
35
36#include <errno.h>
37#include <str_error.h>
38#include <usb/debug.h>
39#include "commands.h"
40#include "debug.h"
41#include "hc.h"
[8db42f7]42#include "hw_struct/context.h"
[c9c0e41]43#include "hw_struct/trb.h"
44
[e7f21884]45#define TRB_SET_TSP(trb, tsp) (trb).control |= host2xhci(32, (((tsp) & 0x1) << 9))
[1b78a7c1]46#define TRB_SET_TYPE(trb, type) (trb).control |= host2xhci(32, (type) << 10)
[b724494]47#define TRB_SET_DC(trb, dc) (trb).control |= host2xhci(32, (dc) << 9)
[1b78a7c1]48#define TRB_SET_EP(trb, ep) (trb).control |= host2xhci(32, ((ep) & 0x5) << 16)
[0cabd10]49#define TRB_SET_STREAM(trb, st) (trb).control |= host2xhci(32, ((st) & 0xFFFF) << 16)
[1b78a7c1]50#define TRB_SET_SUSP(trb, susp) (trb).control |= host2xhci(32, ((susp) & 0x1) << 23)
51#define TRB_SET_SLOT(trb, slot) (trb).control |= host2xhci(32, (slot) << 24)
[60af4cdb]52#define TRB_SET_DEV_SPEED(trb, speed) (trb).control |= host2xhci(32, (speed & 0xF) << 16)
[0cabd10]53#define TRB_SET_DEQUEUE_PTR(trb, dptr) (trb).parameter |= host2xhci(64, (dptr))
[b80c1ab]54#define TRB_SET_ICTX(trb, phys) (trb).parameter |= host2xhci(64, (phys) & (~0xF))
[1b78a7c1]55
56#define TRB_GET_CODE(trb) XHCI_DWORD_EXTRACT((trb).status, 31, 24)
57#define TRB_GET_SLOT(trb) XHCI_DWORD_EXTRACT((trb).control, 31, 24)
58#define TRB_GET_PHYS(trb) (XHCI_QWORD_EXTRACT((trb).parameter, 63, 4) << 4)
59
[c3d926f3]60/* Control functions */
61
[889146e]62static xhci_cmd_ring_t *get_cmd_ring(xhci_hc_t *hc)
[110d795]63{
64 assert(hc);
[889146e]65 return &hc->cr;
66}
67
[eb928c4]68/**
69 * Initialize the command subsystem. Allocates the comand ring.
70 *
71 * Does not configure the CR pointer to the hardware, because the xHC will be
72 * reset before starting.
73 */
[889146e]74int xhci_init_commands(xhci_hc_t *hc)
75{
76 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
77 int err;
78
79 if ((err = xhci_trb_ring_init(&cr->trb_ring)))
80 return err;
[110d795]81
[889146e]82 fibril_mutex_initialize(&cr->guard);
83 fibril_condvar_initialize(&cr->state_cv);
84 fibril_condvar_initialize(&cr->stopped_cv);
[74b852b]85
[889146e]86 list_initialize(&cr->cmd_list);
87
88 cr->state = XHCI_CR_STATE_OPEN;
[74b852b]89
[110d795]90 return EOK;
91}
92
[eb928c4]93/**
94 * Finish the command subsystem. Stops the hardware from running commands, then
95 * deallocates the ring.
96 */
[c46c356]97void xhci_fini_commands(xhci_hc_t *hc)
98{
99 assert(hc);
[eb928c4]100 xhci_stop_command_ring(hc);
101
102 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
103
104 fibril_mutex_lock(&cr->guard);
105 xhci_trb_ring_fini(&cr->trb_ring);
106 fibril_mutex_unlock(&cr->guard);
[c46c356]107}
108
[eb928c4]109/**
110 * Initialize a command structure for the given command.
111 */
[c3d926f3]112void xhci_cmd_init(xhci_cmd_t *cmd, xhci_cmd_type_t type)
[110d795]113{
[c3d926f3]114 memset(cmd, 0, sizeof(*cmd));
[110d795]115
[c3d926f3]116 link_initialize(&cmd->_header.link);
[110d795]117
[c3d926f3]118 fibril_mutex_initialize(&cmd->_header.completed_mtx);
119 fibril_condvar_initialize(&cmd->_header.completed_cv);
[04df063]120
[c3d926f3]121 cmd->_header.cmd = type;
[4688350b]122}
123
[eb928c4]124/**
125 * Finish the command structure. Some command invocation includes allocating
126 * a context structure. To have the convenience in calling commands, this
127 * method deallocates all resources.
128 */
[c3d926f3]129void xhci_cmd_fini(xhci_cmd_t *cmd)
[4688350b]130{
[c3d926f3]131 list_remove(&cmd->_header.link);
[110d795]132
[b80c1ab]133 dma_buffer_free(&cmd->input_ctx);
134 dma_buffer_free(&cmd->bandwidth_ctx);
[9304b66]135
[c3d926f3]136 if (cmd->_header.async) {
137 free(cmd);
138 }
[110d795]139}
140
[eb928c4]141/**
142 * Find a command issued by TRB at @c phys inside the command list.
143 *
144 * Call with guard locked only.
145 */
[889146e]146static inline xhci_cmd_t *find_command(xhci_hc_t *hc, uint64_t phys)
[110d795]147{
[889146e]148 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
149 assert(fibril_mutex_is_locked(&cr->guard));
[74b852b]150
[889146e]151 link_t *cmd_link = list_first(&cr->cmd_list);
[110d795]152
[2fa43d1]153 while (cmd_link != NULL) {
[c3d926f3]154 xhci_cmd_t *cmd = list_get_instance(cmd_link, xhci_cmd_t, _header.link);
[2fa43d1]155
[c3d926f3]156 if (cmd->_header.trb_phys == phys)
[2fa43d1]157 break;
158
[889146e]159 cmd_link = list_next(cmd_link, &cr->cmd_list);
[2fa43d1]160 }
161
[889146e]162 return cmd_link ? list_get_instance(cmd_link, xhci_cmd_t, _header.link)
163 : NULL;
[110d795]164}
165
[d2c3dcd]166static void cr_set_state(xhci_cmd_ring_t *cr, xhci_cr_state_t state)
167{
168 assert(fibril_mutex_is_locked(&cr->guard));
169
170 cr->state = state;
171 if (state == XHCI_CR_STATE_OPEN
172 || state == XHCI_CR_STATE_CLOSED)
173 fibril_condvar_broadcast(&cr->state_cv);
174}
175
176static int wait_for_ring_open(xhci_cmd_ring_t *cr)
177{
178 assert(fibril_mutex_is_locked(&cr->guard));
179
180 while (true) {
181 switch (cr->state) {
182 case XHCI_CR_STATE_CHANGING:
183 case XHCI_CR_STATE_FULL:
184 fibril_condvar_wait(&cr->state_cv, &cr->guard);
185 break;
186 case XHCI_CR_STATE_OPEN:
187 return EOK;
188 case XHCI_CR_STATE_CLOSED:
189 return ENAK;
190 }
191 }
192}
193
[eb928c4]194/**
195 * Enqueue a command on the TRB ring. Ring the doorbell to initiate processing.
196 * Register the command as waiting for completion inside the command list.
197 */
198static inline int enqueue_command(xhci_hc_t *hc, xhci_cmd_t *cmd)
[481af21e]199{
[889146e]200 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
[548c123]201 assert(cmd);
202
[889146e]203 fibril_mutex_lock(&cr->guard);
[c058a388]204
[d2c3dcd]205 if (wait_for_ring_open(cr)) {
[889146e]206 fibril_mutex_unlock(&cr->guard);
207 return ENAK;
208 }
209
[837581fd]210 usb_log_debug("Sending command %s", xhci_trb_str_type(TRB_TYPE(cmd->_header.trb)));
[481af21e]211
[889146e]212 list_append(&cmd->_header.link, &cr->cmd_list);
213
[d2c3dcd]214 int err = EOK;
215 while (err == EOK) {
216 err = xhci_trb_ring_enqueue(&cr->trb_ring,
217 &cmd->_header.trb, &cmd->_header.trb_phys);
218 if (err != EAGAIN)
219 break;
220
221 cr_set_state(cr, XHCI_CR_STATE_FULL);
222 err = wait_for_ring_open(cr);
223 }
224
225 if (err == EOK)
226 hc_ring_doorbell(hc, 0, 0);
[889146e]227
228 fibril_mutex_unlock(&cr->guard);
229
[d2c3dcd]230 return err;
[481af21e]231}
232
[eb928c4]233/**
234 * Stop the command ring. Stop processing commands, block issuing new ones.
235 * Wait until hardware acknowledges it is stopped.
236 */
[3dc519f]237void xhci_stop_command_ring(xhci_hc_t *hc)
238{
[889146e]239 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
[3dc519f]240
[889146e]241 fibril_mutex_lock(&cr->guard);
[3dc519f]242
[889146e]243 // Prevent others from starting CR again.
[d2c3dcd]244 cr_set_state(cr, XHCI_CR_STATE_CLOSED);
[3dc519f]245
[889146e]246 XHCI_REG_SET(hc->op_regs, XHCI_OP_CS, 1);
247 XHCI_REG_SET(hc->op_regs, XHCI_OP_CRCR_HI, 0); // Some systems (incl. QEMU) require 64-bit write
[3dc519f]248
[889146e]249 while (XHCI_REG_RD(hc->op_regs, XHCI_OP_CRR))
250 fibril_condvar_wait(&cr->stopped_cv, &cr->guard);
251
252 fibril_mutex_unlock(&cr->guard);
[3dc519f]253}
254
[eb928c4]255/**
256 * Abort currently processed command. Note that it is only aborted when the
257 * command is "blocking" - see section 4.6.1.2 of xHCI spec.
258 */
[889146e]259static void abort_command_ring(xhci_hc_t *hc)
[3dc519f]260{
[889146e]261 XHCI_REG_WR(hc->op_regs, XHCI_OP_CA, 1);
262 XHCI_REG_SET(hc->op_regs, XHCI_OP_CRCR_HI, 0); // Some systems (incl. QEMU) require 64-bit write
[3dc519f]263}
264
[4fa5342]265static const char *trb_codes [] = {
266#define TRBC(t) [XHCI_TRBC_##t] = #t
267 TRBC(INVALID),
268 TRBC(SUCCESS),
269 TRBC(DATA_BUFFER_ERROR),
270 TRBC(BABBLE_DETECTED_ERROR),
271 TRBC(USB_TRANSACTION_ERROR),
272 TRBC(TRB_ERROR),
273 TRBC(STALL_ERROR),
274 TRBC(RESOURCE_ERROR),
275 TRBC(BANDWIDTH_ERROR),
276 TRBC(NO_SLOTS_ERROR),
277 TRBC(INVALID_STREAM_ERROR),
278 TRBC(SLOT_NOT_ENABLED_ERROR),
279 TRBC(EP_NOT_ENABLED_ERROR),
280 TRBC(SHORT_PACKET),
281 TRBC(RING_UNDERRUN),
282 TRBC(RING_OVERRUN),
283 TRBC(VF_EVENT_RING_FULL),
284 TRBC(PARAMETER_ERROR),
285 TRBC(BANDWIDTH_OVERRUN_ERROR),
286 TRBC(CONTEXT_STATE_ERROR),
287 TRBC(NO_PING_RESPONSE_ERROR),
288 TRBC(EVENT_RING_FULL_ERROR),
289 TRBC(INCOMPATIBLE_DEVICE_ERROR),
290 TRBC(MISSED_SERVICE_ERROR),
291 TRBC(COMMAND_RING_STOPPED),
292 TRBC(COMMAND_ABORTED),
293 TRBC(STOPPED),
294 TRBC(STOPPED_LENGTH_INVALID),
295 TRBC(STOPPED_SHORT_PACKET),
296 TRBC(MAX_EXIT_LATENCY_TOO_LARGE_ERROR),
297 [30] = "<reserved>",
298 TRBC(ISOCH_BUFFER_OVERRUN),
299 TRBC(EVENT_LOST_ERROR),
300 TRBC(UNDEFINED_ERROR),
301 TRBC(INVALID_STREAM_ID_ERROR),
302 TRBC(SECONDARY_BANDWIDTH_ERROR),
303 TRBC(SPLIT_TRANSACTION_ERROR),
304 [XHCI_TRBC_MAX] = NULL
305#undef TRBC
306};
307
[eb928c4]308/**
309 * Report an error according to command completion code.
310 */
[4fa5342]311static void report_error(int code)
312{
313 if (code < XHCI_TRBC_MAX && trb_codes[code] != NULL)
314 usb_log_error("Command resulted in error: %s.", trb_codes[code]);
315 else
316 usb_log_error("Command resulted in reserved or vendor specific error.");
317}
318
[eb928c4]319/**
320 * Handle a command completion. Feed the fibril waiting for result.
321 *
322 * @param trb The COMMAND_COMPLETION TRB found in event ring.
323 */
[c3d926f3]324int xhci_handle_command_completion(xhci_hc_t *hc, xhci_trb_t *trb)
[c9c0e41]325{
[889146e]326 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
[c3d926f3]327 assert(trb);
328
[889146e]329 fibril_mutex_lock(&cr->guard);
330
331 int code = TRB_GET_CODE(*trb);
332
333 if (code == XHCI_TRBC_COMMAND_RING_STOPPED) {
334 /* This can either mean that the ring is being stopped, or
335 * a command was aborted. In either way, wake threads waiting
336 * on stopped_cv.
337 *
338 * Note that we need to hold mutex, because we must be sure the
339 * requesting thread is waiting inside the CV.
340 */
[837581fd]341 usb_log_debug2("Command ring stopped.");
[889146e]342 fibril_condvar_broadcast(&cr->stopped_cv);
343 fibril_mutex_unlock(&cr->guard);
344 return EOK;
345 }
346
[d2c3dcd]347 const uint64_t phys = TRB_GET_PHYS(*trb);
348 xhci_trb_ring_update_dequeue(&cr->trb_ring, phys);
349
350 if (cr->state == XHCI_CR_STATE_FULL)
351 cr_set_state(cr, XHCI_CR_STATE_OPEN);
352
[889146e]353 xhci_cmd_t *command = find_command(hc, phys);
[c3d926f3]354 if (command == NULL) {
[837581fd]355 usb_log_error("No command struct for completion event found.");
[c3d926f3]356
357 if (code != XHCI_TRBC_SUCCESS)
358 report_error(code);
359
360 return EOK;
361 }
[c058a388]362
[889146e]363 list_remove(&command->_header.link);
364
[3cbc138]365 /* Semantics of NO_OP_CMD is that success is marked as a TRB error. */
366 if (command->_header.cmd == XHCI_CMD_NO_OP && code == XHCI_TRBC_TRB_ERROR)
367 code = XHCI_TRBC_SUCCESS;
368
[c3d926f3]369 command->status = code;
370 command->slot_id = TRB_GET_SLOT(*trb);
[c9c0e41]371
[837581fd]372 usb_log_debug("Completed command %s", xhci_trb_str_type(TRB_TYPE(command->_header.trb)));
[3cbc138]373
374 if (code != XHCI_TRBC_SUCCESS) {
375 report_error(code);
376 xhci_dump_trb(&command->_header.trb);
[c3d926f3]377 }
378
[889146e]379 fibril_mutex_unlock(&cr->guard);
380
[c3d926f3]381 fibril_mutex_lock(&command->_header.completed_mtx);
382 command->_header.completed = true;
383 fibril_condvar_broadcast(&command->_header.completed_cv);
384 fibril_mutex_unlock(&command->_header.completed_mtx);
385
386 if (command->_header.async) {
387 /* Free the command and other DS upon completion. */
388 xhci_cmd_fini(command);
389 }
390
391 return EOK;
392}
393
394/* Command-issuing functions */
395
396static int no_op_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
397{
398 assert(hc);
399
400 xhci_trb_clean(&cmd->_header.trb);
401
402 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_NO_OP_CMD);
[110d795]403
[eb928c4]404 return enqueue_command(hc, cmd);
[c9c0e41]405}
406
[c3d926f3]407static int enable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[c9c0e41]408{
[c058a388]409 assert(hc);
410
[c3d926f3]411 xhci_trb_clean(&cmd->_header.trb);
[c9c0e41]412
[c3d926f3]413 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ENABLE_SLOT_CMD);
414 cmd->_header.trb.control |= host2xhci(32, XHCI_REG_RD(hc->xecp, XHCI_EC_SP_SLOT_TYPE) << 16);
[110d795]415
[eb928c4]416 return enqueue_command(hc, cmd);
[5ac5eb1]417}
418
[c3d926f3]419static int disable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[5ac5eb1]420{
[c058a388]421 assert(hc);
[110d795]422 assert(cmd);
[c058a388]423
[c3d926f3]424 xhci_trb_clean(&cmd->_header.trb);
[5ac5eb1]425
[c3d926f3]426 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_DISABLE_SLOT_CMD);
427 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
[110d795]428
[eb928c4]429 return enqueue_command(hc, cmd);
[c9c0e41]430}
431
[c3d926f3]432static int address_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[8db42f7]433{
[c058a388]434 assert(hc);
[110d795]435 assert(cmd);
[b80c1ab]436 assert(dma_buffer_is_set(&cmd->input_ctx));
[c058a388]437
[8db42f7]438 /**
439 * TODO: Requirements for this command:
440 * dcbaa[slot_id] is properly sized and initialized
441 * ictx has valids slot context and endpoint 0, all
442 * other should be ignored at this point (see section 4.6.5).
443 */
[04df063]444
[c3d926f3]445 xhci_trb_clean(&cmd->_header.trb);
[8db42f7]446
[b80c1ab]447 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
[8db42f7]448
449 /**
450 * Note: According to section 6.4.3.4, we can set the 9th bit
451 * of the control field of the trb (BSR) to 1 and then the xHC
452 * will not issue the SET_ADDRESS request to the USB device.
453 * This can be used to provide compatibility with legacy USB devices
454 * that require their device descriptor to be read before such request.
455 */
[c3d926f3]456 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ADDRESS_DEVICE_CMD);
457 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
[8db42f7]458
[eb928c4]459 return enqueue_command(hc, cmd);
[8db42f7]460}
461
[c3d926f3]462static int configure_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[665bf3c]463{
[c058a388]464 assert(hc);
[110d795]465 assert(cmd);
[c058a388]466
[c3d926f3]467 xhci_trb_clean(&cmd->_header.trb);
[665bf3c]468
[b724494]469 if (!cmd->deconfigure) {
470 /* If the DC flag is on, input context is not evaluated. */
[b80c1ab]471 assert(dma_buffer_is_set(&cmd->input_ctx));
[b724494]472
[b80c1ab]473 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
[b724494]474 }
[110d795]475
[c3d926f3]476 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_CONFIGURE_ENDPOINT_CMD);
477 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
478 TRB_SET_DC(cmd->_header.trb, cmd->deconfigure);
[665bf3c]479
[eb928c4]480 return enqueue_command(hc, cmd);
[665bf3c]481}
482
[c3d926f3]483static int evaluate_context_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[c9ce62ae]484{
[c058a388]485 assert(hc);
[110d795]486 assert(cmd);
[b80c1ab]487 assert(dma_buffer_is_set(&cmd->input_ctx));
[c058a388]488
[c9ce62ae]489 /**
490 * Note: All Drop Context flags of the input context shall be 0,
491 * all Add Context flags shall be initialize to indicate IDs
492 * of the contexts affected by the command.
493 * Refer to sections 6.2.2.3 and 6.3.3.3 for further info.
494 */
[c3d926f3]495 xhci_trb_clean(&cmd->_header.trb);
[c9ce62ae]496
[b80c1ab]497 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
[c9ce62ae]498
[c3d926f3]499 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_EVALUATE_CONTEXT_CMD);
500 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
[110d795]501
[eb928c4]502 return enqueue_command(hc, cmd);
[c9ce62ae]503}
504
[c3d926f3]505static int reset_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[05aeee0e]506{
[c058a388]507 assert(hc);
[110d795]508 assert(cmd);
[c058a388]509
[c3d926f3]510 xhci_trb_clean(&cmd->_header.trb);
[05aeee0e]511
[c3d926f3]512 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_ENDPOINT_CMD);
[e7f21884]513 TRB_SET_TSP(cmd->_header.trb, cmd->tsp);
[c3d926f3]514 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
515 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
[c9bec1c]516
[eb928c4]517 return enqueue_command(hc, cmd);
[05aeee0e]518}
519
[c3d926f3]520static int stop_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[05aeee0e]521{
[c058a388]522 assert(hc);
[110d795]523 assert(cmd);
[c058a388]524
[c3d926f3]525 xhci_trb_clean(&cmd->_header.trb);
[110d795]526
[c3d926f3]527 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_STOP_ENDPOINT_CMD);
528 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
529 TRB_SET_SUSP(cmd->_header.trb, cmd->susp);
530 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
[05aeee0e]531
[eb928c4]532 return enqueue_command(hc, cmd);
[c058a388]533}
[05aeee0e]534
[c3d926f3]535static int set_tr_dequeue_pointer_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[0cabd10]536{
537 assert(hc);
538 assert(cmd);
539
[c3d926f3]540 xhci_trb_clean(&cmd->_header.trb);
[0cabd10]541
[c3d926f3]542 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_SET_TR_DEQUEUE_POINTER_CMD);
543 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
544 TRB_SET_STREAM(cmd->_header.trb, cmd->stream_id);
545 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
546 TRB_SET_DEQUEUE_PTR(cmd->_header.trb, cmd->dequeue_ptr);
[0cabd10]547
[eb928c4]548 return enqueue_command(hc, cmd);
[0cabd10]549}
550
[c3d926f3]551static int reset_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[c058a388]552{
553 assert(hc);
[110d795]554 assert(cmd);
[c058a388]555
[c3d926f3]556 xhci_trb_clean(&cmd->_header.trb);
[c058a388]557
[c3d926f3]558 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_DEVICE_CMD);
559 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
[c9bec1c]560
[eb928c4]561 return enqueue_command(hc, cmd);
[05aeee0e]562}
563
[c3d926f3]564static int get_port_bandwidth_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[60af4cdb]565{
566 assert(hc);
567 assert(cmd);
568
[c3d926f3]569 xhci_trb_clean(&cmd->_header.trb);
[60af4cdb]570
[b80c1ab]571 TRB_SET_ICTX(cmd->_header.trb, cmd->bandwidth_ctx.phys);
[60af4cdb]572
[c3d926f3]573 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_GET_PORT_BANDWIDTH_CMD);
574 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
575 TRB_SET_DEV_SPEED(cmd->_header.trb, cmd->device_speed);
[60af4cdb]576
[eb928c4]577 return enqueue_command(hc, cmd);
[60af4cdb]578}
579
[c3d926f3]580/* The table of command-issuing functions. */
581
582typedef int (*cmd_handler) (xhci_hc_t *hc, xhci_cmd_t *cmd);
583
584static cmd_handler cmd_handlers [] = {
585 [XHCI_CMD_ENABLE_SLOT] = enable_slot_cmd,
586 [XHCI_CMD_DISABLE_SLOT] = disable_slot_cmd,
587 [XHCI_CMD_ADDRESS_DEVICE] = address_device_cmd,
588 [XHCI_CMD_CONFIGURE_ENDPOINT] = configure_endpoint_cmd,
589 [XHCI_CMD_EVALUATE_CONTEXT] = evaluate_context_cmd,
590 [XHCI_CMD_RESET_ENDPOINT] = reset_endpoint_cmd,
591 [XHCI_CMD_STOP_ENDPOINT] = stop_endpoint_cmd,
592 [XHCI_CMD_SET_TR_DEQUEUE_POINTER] = set_tr_dequeue_pointer_cmd,
593 [XHCI_CMD_RESET_DEVICE] = reset_device_cmd,
594 [XHCI_CMD_FORCE_EVENT] = NULL,
595 [XHCI_CMD_NEGOTIATE_BANDWIDTH] = NULL,
596 [XHCI_CMD_SET_LATENCY_TOLERANCE_VALUE] = NULL,
597 [XHCI_CMD_GET_PORT_BANDWIDTH] = get_port_bandwidth_cmd,
598 [XHCI_CMD_FORCE_HEADER] = NULL,
599 [XHCI_CMD_NO_OP] = no_op_cmd
600};
601
[eb928c4]602/**
603 * Try to abort currently processed command. This is tricky, because
604 * calling fibril is not necessarily the one which issued the blocked command.
605 * Also, the trickiness intensifies by the fact that stopping a CR is denoted by
606 * event, which is again handled in different fibril. but, once we go to sleep
607 * on waiting for that event, another fibril may wake up and try to abort the
608 * blocked command.
609 *
610 * So, we mark the command ring as being restarted, wait for it to stop, and
611 * then start it again. If there was a blocked command, it will be satisfied by
612 * COMMAND_ABORTED event.
613 */
[889146e]614static int try_abort_current_command(xhci_hc_t *hc)
615{
616 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
617
618 fibril_mutex_lock(&cr->guard);
619
[d2c3dcd]620 if (cr->state == XHCI_CR_STATE_CLOSED) {
621 fibril_mutex_unlock(&cr->guard);
622 return ENAK;
623 }
624
625 if (cr->state == XHCI_CR_STATE_CHANGING) {
[889146e]626 fibril_mutex_unlock(&cr->guard);
627 return EOK;
628 }
629
[837581fd]630 usb_log_error("Timeout while waiting for command: aborting current command.");
[889146e]631
[d2c3dcd]632 cr_set_state(cr, XHCI_CR_STATE_CHANGING);
[889146e]633
634 abort_command_ring(hc);
635
636 fibril_condvar_wait_timeout(&cr->stopped_cv, &cr->guard, XHCI_CR_ABORT_TIMEOUT);
637
638 if (XHCI_REG_RD(hc->op_regs, XHCI_OP_CRR)) {
639 /* 4.6.1.2, implementation note
640 * Assume there are larger problems with HC and
641 * reset it.
642 */
[837581fd]643 usb_log_error("Command didn't abort.");
[889146e]644
[d2c3dcd]645 cr_set_state(cr, XHCI_CR_STATE_CLOSED);
[889146e]646
647 // TODO: Reset HC completely.
648 // Don't forget to somehow complete all commands with error.
649
650 fibril_mutex_unlock(&cr->guard);
651 return ENAK;
652 }
653
[d2c3dcd]654 cr_set_state(cr, XHCI_CR_STATE_OPEN);
655
656 fibril_mutex_unlock(&cr->guard);
657
[837581fd]658 usb_log_error("Command ring stopped. Starting again.");
[889146e]659 hc_ring_doorbell(hc, 0, 0);
660
661 return EOK;
662}
663
[eb928c4]664/**
665 * Wait, until the command is completed. The completion is triggered by
666 * COMMAND_COMPLETION event. As we do not want to rely on HW completing the
667 * command in timely manner, we timeout. Note that we can't just return an
668 * error after the timeout pass - it may be other command blocking the ring,
669 * and ours can be completed afterwards. Therefore, it is not guaranteed that
670 * this function will return in XHCI_COMMAND_TIMEOUT. It will continue waiting
671 * until COMMAND_COMPLETION event arrives.
672 */
[889146e]673static int wait_for_cmd_completion(xhci_hc_t *hc, xhci_cmd_t *cmd)
[f9e7fe8]674{
[c3d926f3]675 int rv = EOK;
[c058a388]676
[f3baab1]677 if (fibril_get_id() == hc->event_handler) {
678 usb_log_error("Deadlock detected in waiting for command.");
679 abort();
680 }
681
[c3d926f3]682 fibril_mutex_lock(&cmd->_header.completed_mtx);
683 while (!cmd->_header.completed) {
[f9e7fe8]684
[889146e]685 rv = fibril_condvar_wait_timeout(&cmd->_header.completed_cv, &cmd->_header.completed_mtx, XHCI_COMMAND_TIMEOUT);
686
687 /* The waiting timed out. Current command (not necessarily
688 * ours) is probably blocked.
689 */
690 if (!cmd->_header.completed && rv == ETIMEOUT) {
691 fibril_mutex_unlock(&cmd->_header.completed_mtx);
692
693 rv = try_abort_current_command(hc);
694 if (rv)
695 return rv;
696
697 fibril_mutex_lock(&cmd->_header.completed_mtx);
[c3d926f3]698 }
699 }
700 fibril_mutex_unlock(&cmd->_header.completed_mtx);
[f711f06]701
[c3d926f3]702 return rv;
703}
[2fa43d1]704
[eb928c4]705/**
706 * Issue command and block the current fibril until it is completed or timeout
707 * expires. Nothing is deallocated. Caller should always execute `xhci_cmd_fini`.
[c3d926f3]708 */
709int xhci_cmd_sync(xhci_hc_t *hc, xhci_cmd_t *cmd)
710{
711 assert(hc);
712 assert(cmd);
[2fa43d1]713
[c3d926f3]714 int err;
715
716 if (!cmd_handlers[cmd->_header.cmd]) {
717 /* Handler not implemented. */
718 return ENOTSUP;
[2fa43d1]719 }
[110d795]720
[c3d926f3]721 if ((err = cmd_handlers[cmd->_header.cmd](hc, cmd))) {
722 /* Command could not be issued. */
723 return err;
724 }
[110d795]725
[889146e]726 if ((err = wait_for_cmd_completion(hc, cmd))) {
727 /* Command failed. */
[c3d926f3]728 return err;
[665bf3c]729 }
[c362127]730
[e7e99bf]731 switch (cmd->status) {
732 case XHCI_TRBC_SUCCESS:
733 return EOK;
734 case XHCI_TRBC_USB_TRANSACTION_ERROR:
735 return ESTALL;
[feabe163]736 case XHCI_TRBC_RESOURCE_ERROR:
737 case XHCI_TRBC_BANDWIDTH_ERROR:
738 case XHCI_TRBC_NO_SLOTS_ERROR:
739 return ELIMIT;
740 case XHCI_TRBC_SLOT_NOT_ENABLED_ERROR:
741 return ENOENT;
[e7e99bf]742 default:
743 return EINVAL;
744 }
[c3d926f3]745}
[110d795]746
[eb928c4]747/**
748 * Does the same thing as `xhci_cmd_sync` and executes `xhci_cmd_fini`. This
749 * is a useful shorthand for issuing commands without out parameters.
[c3d926f3]750 */
751int xhci_cmd_sync_fini(xhci_hc_t *hc, xhci_cmd_t *cmd)
752{
753 const int err = xhci_cmd_sync(hc, cmd);
754 xhci_cmd_fini(cmd);
755
756 return err;
757}
758
[eb928c4]759/**
760 * Does the same thing as `xhci_cmd_sync_fini` without blocking the current
761 * fibril. The command is copied to stack memory and `fini` is called upon its completion.
[c3d926f3]762 */
763int xhci_cmd_async_fini(xhci_hc_t *hc, xhci_cmd_t *stack_cmd)
764{
765 assert(hc);
766 assert(stack_cmd);
767
768 /* Save the command for later. */
769 xhci_cmd_t *heap_cmd = (xhci_cmd_t *) malloc(sizeof(xhci_cmd_t));
770 if (!heap_cmd) {
771 return ENOMEM;
772 }
773
774 /* TODO: Is this good for the mutex and the condvar? */
775 memcpy(heap_cmd, stack_cmd, sizeof(xhci_cmd_t));
776 heap_cmd->_header.async = true;
777
778 /* Issue the command. */
779 int err;
780
781 if (!cmd_handlers[heap_cmd->_header.cmd]) {
782 /* Handler not implemented. */
783 err = ENOTSUP;
784 goto err_heap_cmd;
[f711f06]785 }
[110d795]786
[c3d926f3]787 if ((err = cmd_handlers[heap_cmd->_header.cmd](hc, heap_cmd))) {
788 /* Command could not be issued. */
789 goto err_heap_cmd;
790 }
[4688350b]791
[110d795]792 return EOK;
[c9c0e41]793
[c3d926f3]794err_heap_cmd:
795 free(heap_cmd);
796 return err;
797}
[c9c0e41]798
799/**
800 * @}
801 */
Note: See TracBrowser for help on using the repository browser.