source: mainline/uspace/drv/bus/usb/xhci/commands.c@ 889146e

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 889146e was 889146e, checked in by Ondřej Hlavatý <aearsis@…>, 8 years ago

xhci: commands shall not just timeout

Previous behavior was breaking semantic: if a command was successful,
but just took too long to complete, we returned an error, and the caller
had no way to know if the command's effect has taken place.

This commit implements command aborting. The wait_for_command now cannot
just timeout - instead it aborts currently running (probably blocked)
command, and then gets back to waiting. So now, if command_sync returns
an error, it means the command was really unsuccessful.

If aborting the command takes too long, we should reset the whole HC.
This is not yet implemented.

  • Property mode set to 100644
File size: 19.3 KB
RevLine 
[c9c0e41]1/*
2 * Copyright (c) 2017 Jaroslav Jindrak
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief Command sending functions.
34 */
35
36#include <errno.h>
37#include <str_error.h>
38#include <usb/debug.h>
39#include "commands.h"
40#include "debug.h"
41#include "hc.h"
[8db42f7]42#include "hw_struct/context.h"
[c9c0e41]43#include "hw_struct/trb.h"
44
[1b78a7c1]45#define TRB_SET_TCS(trb, tcs) (trb).control |= host2xhci(32, ((tcs &0x1) << 9))
46#define TRB_SET_TYPE(trb, type) (trb).control |= host2xhci(32, (type) << 10)
[b724494]47#define TRB_SET_DC(trb, dc) (trb).control |= host2xhci(32, (dc) << 9)
[1b78a7c1]48#define TRB_SET_EP(trb, ep) (trb).control |= host2xhci(32, ((ep) & 0x5) << 16)
[0cabd10]49#define TRB_SET_STREAM(trb, st) (trb).control |= host2xhci(32, ((st) & 0xFFFF) << 16)
[1b78a7c1]50#define TRB_SET_SUSP(trb, susp) (trb).control |= host2xhci(32, ((susp) & 0x1) << 23)
51#define TRB_SET_SLOT(trb, slot) (trb).control |= host2xhci(32, (slot) << 24)
[60af4cdb]52#define TRB_SET_DEV_SPEED(trb, speed) (trb).control |= host2xhci(32, (speed & 0xF) << 16)
[1b78a7c1]53
[0cabd10]54/**
55 * TODO: Not sure about SCT and DCS (see section 6.4.3.9).
56 */
57#define TRB_SET_DEQUEUE_PTR(trb, dptr) (trb).parameter |= host2xhci(64, (dptr))
[b80c1ab]58#define TRB_SET_ICTX(trb, phys) (trb).parameter |= host2xhci(64, (phys) & (~0xF))
[1b78a7c1]59
60#define TRB_GET_CODE(trb) XHCI_DWORD_EXTRACT((trb).status, 31, 24)
61#define TRB_GET_SLOT(trb) XHCI_DWORD_EXTRACT((trb).control, 31, 24)
62#define TRB_GET_PHYS(trb) (XHCI_QWORD_EXTRACT((trb).parameter, 63, 4) << 4)
63
[c3d926f3]64/* Control functions */
65
[889146e]66static xhci_cmd_ring_t *get_cmd_ring(xhci_hc_t *hc)
[110d795]67{
68 assert(hc);
[889146e]69 return &hc->cr;
70}
71
72int xhci_init_commands(xhci_hc_t *hc)
73{
74 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
75 int err;
76
77 if ((err = xhci_trb_ring_init(&cr->trb_ring)))
78 return err;
[110d795]79
[889146e]80 fibril_mutex_initialize(&cr->guard);
81 fibril_condvar_initialize(&cr->state_cv);
82 fibril_condvar_initialize(&cr->stopped_cv);
[74b852b]83
[889146e]84 list_initialize(&cr->cmd_list);
85
86 cr->state = XHCI_CR_STATE_OPEN;
[74b852b]87
[110d795]88 return EOK;
89}
90
[c46c356]91void xhci_fini_commands(xhci_hc_t *hc)
92{
[889146e]93 xhci_stop_command_ring(hc);
[c46c356]94 assert(hc);
95}
96
[c3d926f3]97void xhci_cmd_init(xhci_cmd_t *cmd, xhci_cmd_type_t type)
[110d795]98{
[c3d926f3]99 memset(cmd, 0, sizeof(*cmd));
[110d795]100
[c3d926f3]101 link_initialize(&cmd->_header.link);
[110d795]102
[c3d926f3]103 fibril_mutex_initialize(&cmd->_header.completed_mtx);
104 fibril_condvar_initialize(&cmd->_header.completed_cv);
[04df063]105
[c3d926f3]106 cmd->_header.cmd = type;
[4688350b]107}
108
[c3d926f3]109void xhci_cmd_fini(xhci_cmd_t *cmd)
[4688350b]110{
[c3d926f3]111 list_remove(&cmd->_header.link);
[110d795]112
[b80c1ab]113 dma_buffer_free(&cmd->input_ctx);
114 dma_buffer_free(&cmd->bandwidth_ctx);
[9304b66]115
[c3d926f3]116 if (cmd->_header.async) {
117 free(cmd);
118 }
[110d795]119}
120
[889146e]121/** Call with guard locked. */
122static inline xhci_cmd_t *find_command(xhci_hc_t *hc, uint64_t phys)
[110d795]123{
[889146e]124 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
125 assert(fibril_mutex_is_locked(&cr->guard));
[74b852b]126
[889146e]127 link_t *cmd_link = list_first(&cr->cmd_list);
[110d795]128
[2fa43d1]129 while (cmd_link != NULL) {
[c3d926f3]130 xhci_cmd_t *cmd = list_get_instance(cmd_link, xhci_cmd_t, _header.link);
[2fa43d1]131
[c3d926f3]132 if (cmd->_header.trb_phys == phys)
[2fa43d1]133 break;
134
[889146e]135 cmd_link = list_next(cmd_link, &cr->cmd_list);
[2fa43d1]136 }
137
[889146e]138 return cmd_link ? list_get_instance(cmd_link, xhci_cmd_t, _header.link)
139 : NULL;
[110d795]140}
141
[548c123]142static inline int enqueue_command(xhci_hc_t *hc, xhci_cmd_t *cmd, unsigned doorbell, unsigned target)
[481af21e]143{
[889146e]144 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
[548c123]145 assert(cmd);
146
[889146e]147 fibril_mutex_lock(&cr->guard);
[c058a388]148
[889146e]149 while (cr->state == XHCI_CR_STATE_CHANGING)
150 fibril_condvar_wait(&cr->state_cv, &cr->guard);
[481af21e]151
[889146e]152 if (cr->state != XHCI_CR_STATE_OPEN) {
153 fibril_mutex_unlock(&cr->guard);
154 return ENAK;
155 }
156
157 usb_log_debug2("HC(%p): Sending command:", hc);
[c3d926f3]158 xhci_dump_trb(&cmd->_header.trb);
[481af21e]159
[889146e]160 list_append(&cmd->_header.link, &cr->cmd_list);
161
162 xhci_trb_ring_enqueue(&cr->trb_ring, &cmd->_header.trb, &cmd->_header.trb_phys);
163 hc_ring_doorbell(hc, 0, 0);
164
165 fibril_mutex_unlock(&cr->guard);
166
[481af21e]167 return EOK;
168}
169
[3dc519f]170void xhci_stop_command_ring(xhci_hc_t *hc)
171{
[889146e]172 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
[3dc519f]173
[889146e]174 fibril_mutex_lock(&cr->guard);
[3dc519f]175
[889146e]176 // Prevent others from starting CR again.
177 cr->state = XHCI_CR_STATE_CLOSED;
178 fibril_condvar_broadcast(&cr->state_cv);
[3dc519f]179
[889146e]180 XHCI_REG_SET(hc->op_regs, XHCI_OP_CS, 1);
181 XHCI_REG_SET(hc->op_regs, XHCI_OP_CRCR_HI, 0); // Some systems (incl. QEMU) require 64-bit write
[3dc519f]182
[889146e]183 while (XHCI_REG_RD(hc->op_regs, XHCI_OP_CRR))
184 fibril_condvar_wait(&cr->stopped_cv, &cr->guard);
185
186 fibril_mutex_unlock(&cr->guard);
[3dc519f]187}
188
[889146e]189static void abort_command_ring(xhci_hc_t *hc)
[3dc519f]190{
[889146e]191 XHCI_REG_WR(hc->op_regs, XHCI_OP_CA, 1);
192 XHCI_REG_SET(hc->op_regs, XHCI_OP_CRCR_HI, 0); // Some systems (incl. QEMU) require 64-bit write
[3dc519f]193}
194
[4fa5342]195static const char *trb_codes [] = {
196#define TRBC(t) [XHCI_TRBC_##t] = #t
197 TRBC(INVALID),
198 TRBC(SUCCESS),
199 TRBC(DATA_BUFFER_ERROR),
200 TRBC(BABBLE_DETECTED_ERROR),
201 TRBC(USB_TRANSACTION_ERROR),
202 TRBC(TRB_ERROR),
203 TRBC(STALL_ERROR),
204 TRBC(RESOURCE_ERROR),
205 TRBC(BANDWIDTH_ERROR),
206 TRBC(NO_SLOTS_ERROR),
207 TRBC(INVALID_STREAM_ERROR),
208 TRBC(SLOT_NOT_ENABLED_ERROR),
209 TRBC(EP_NOT_ENABLED_ERROR),
210 TRBC(SHORT_PACKET),
211 TRBC(RING_UNDERRUN),
212 TRBC(RING_OVERRUN),
213 TRBC(VF_EVENT_RING_FULL),
214 TRBC(PARAMETER_ERROR),
215 TRBC(BANDWIDTH_OVERRUN_ERROR),
216 TRBC(CONTEXT_STATE_ERROR),
217 TRBC(NO_PING_RESPONSE_ERROR),
218 TRBC(EVENT_RING_FULL_ERROR),
219 TRBC(INCOMPATIBLE_DEVICE_ERROR),
220 TRBC(MISSED_SERVICE_ERROR),
221 TRBC(COMMAND_RING_STOPPED),
222 TRBC(COMMAND_ABORTED),
223 TRBC(STOPPED),
224 TRBC(STOPPED_LENGTH_INVALID),
225 TRBC(STOPPED_SHORT_PACKET),
226 TRBC(MAX_EXIT_LATENCY_TOO_LARGE_ERROR),
227 [30] = "<reserved>",
228 TRBC(ISOCH_BUFFER_OVERRUN),
229 TRBC(EVENT_LOST_ERROR),
230 TRBC(UNDEFINED_ERROR),
231 TRBC(INVALID_STREAM_ID_ERROR),
232 TRBC(SECONDARY_BANDWIDTH_ERROR),
233 TRBC(SPLIT_TRANSACTION_ERROR),
234 [XHCI_TRBC_MAX] = NULL
235#undef TRBC
236};
237
238static void report_error(int code)
239{
240 if (code < XHCI_TRBC_MAX && trb_codes[code] != NULL)
241 usb_log_error("Command resulted in error: %s.", trb_codes[code]);
242 else
243 usb_log_error("Command resulted in reserved or vendor specific error.");
244}
245
[c3d926f3]246int xhci_handle_command_completion(xhci_hc_t *hc, xhci_trb_t *trb)
[c9c0e41]247{
[889146e]248 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
[c3d926f3]249 assert(trb);
250
251 usb_log_debug2("HC(%p) Command completed.", hc);
252
[889146e]253 fibril_mutex_lock(&cr->guard);
254
255 int code = TRB_GET_CODE(*trb);
256 const uint64_t phys = TRB_GET_PHYS(*trb);
[c3d926f3]257
[889146e]258 xhci_trb_ring_update_dequeue(&cr->trb_ring, phys);
259
260 if (code == XHCI_TRBC_COMMAND_RING_STOPPED) {
261 /* This can either mean that the ring is being stopped, or
262 * a command was aborted. In either way, wake threads waiting
263 * on stopped_cv.
264 *
265 * Note that we need to hold mutex, because we must be sure the
266 * requesting thread is waiting inside the CV.
267 */
268 fibril_condvar_broadcast(&cr->stopped_cv);
269 fibril_mutex_unlock(&cr->guard);
270 return EOK;
271 }
272
273 xhci_cmd_t *command = find_command(hc, phys);
[c3d926f3]274 if (command == NULL) {
[889146e]275 usb_log_error("No command struct for this completion event found.");
[c3d926f3]276
277 if (code != XHCI_TRBC_SUCCESS)
278 report_error(code);
279
280 return EOK;
281 }
[c058a388]282
[889146e]283 list_remove(&command->_header.link);
284
[3cbc138]285 /* Semantics of NO_OP_CMD is that success is marked as a TRB error. */
286 if (command->_header.cmd == XHCI_CMD_NO_OP && code == XHCI_TRBC_TRB_ERROR)
287 code = XHCI_TRBC_SUCCESS;
288
[c3d926f3]289 command->status = code;
290 command->slot_id = TRB_GET_SLOT(*trb);
[c9c0e41]291
[c3d926f3]292 usb_log_debug2("Completed command trb: %s", xhci_trb_str_type(TRB_TYPE(command->_header.trb)));
[3cbc138]293
294 if (code != XHCI_TRBC_SUCCESS) {
295 report_error(code);
296 xhci_dump_trb(&command->_header.trb);
[c3d926f3]297 }
298
299 switch (TRB_TYPE(command->_header.trb)) {
300 case XHCI_TRB_TYPE_NO_OP_CMD:
301 case XHCI_TRB_TYPE_ENABLE_SLOT_CMD:
302 case XHCI_TRB_TYPE_DISABLE_SLOT_CMD:
303 case XHCI_TRB_TYPE_ADDRESS_DEVICE_CMD:
304 case XHCI_TRB_TYPE_CONFIGURE_ENDPOINT_CMD:
305 case XHCI_TRB_TYPE_EVALUATE_CONTEXT_CMD:
306 case XHCI_TRB_TYPE_RESET_ENDPOINT_CMD:
307 break;
308 case XHCI_TRB_TYPE_STOP_ENDPOINT_CMD:
309 // Note: If the endpoint was in the middle of a transfer, then the xHC
310 // will add a Transfer TRB before the Event TRB, research that and
311 // handle it appropriately!
312 break;
313 case XHCI_TRB_TYPE_RESET_DEVICE_CMD:
314 break;
315 default:
316 usb_log_debug2("Unsupported command trb: %s", xhci_trb_str_type(TRB_TYPE(command->_header.trb)));
317 return ENAK;
318 }
319
[889146e]320 fibril_mutex_unlock(&cr->guard);
321
[c3d926f3]322 fibril_mutex_lock(&command->_header.completed_mtx);
323 command->_header.completed = true;
324 fibril_condvar_broadcast(&command->_header.completed_cv);
325 fibril_mutex_unlock(&command->_header.completed_mtx);
326
327 if (command->_header.async) {
328 /* Free the command and other DS upon completion. */
329 xhci_cmd_fini(command);
330 }
331
332 return EOK;
333}
334
335/* Command-issuing functions */
336
337static int no_op_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
338{
339 assert(hc);
340
341 xhci_trb_clean(&cmd->_header.trb);
342
343 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_NO_OP_CMD);
[110d795]344
[548c123]345 return enqueue_command(hc, cmd, 0, 0);
[c9c0e41]346}
347
[c3d926f3]348static int enable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[c9c0e41]349{
[c058a388]350 assert(hc);
351
[c3d926f3]352 xhci_trb_clean(&cmd->_header.trb);
[c9c0e41]353
[c3d926f3]354 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ENABLE_SLOT_CMD);
355 cmd->_header.trb.control |= host2xhci(32, XHCI_REG_RD(hc->xecp, XHCI_EC_SP_SLOT_TYPE) << 16);
[110d795]356
[548c123]357 return enqueue_command(hc, cmd, 0, 0);
[5ac5eb1]358}
359
[c3d926f3]360static int disable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[5ac5eb1]361{
[c058a388]362 assert(hc);
[110d795]363 assert(cmd);
[c058a388]364
[c3d926f3]365 xhci_trb_clean(&cmd->_header.trb);
[5ac5eb1]366
[c3d926f3]367 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_DISABLE_SLOT_CMD);
368 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
[110d795]369
[548c123]370 return enqueue_command(hc, cmd, 0, 0);
[c9c0e41]371}
372
[c3d926f3]373static int address_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[8db42f7]374{
[c058a388]375 assert(hc);
[110d795]376 assert(cmd);
[b80c1ab]377 assert(dma_buffer_is_set(&cmd->input_ctx));
[c058a388]378
[8db42f7]379 /**
380 * TODO: Requirements for this command:
381 * dcbaa[slot_id] is properly sized and initialized
382 * ictx has valids slot context and endpoint 0, all
383 * other should be ignored at this point (see section 4.6.5).
384 */
[04df063]385
[c3d926f3]386 xhci_trb_clean(&cmd->_header.trb);
[8db42f7]387
[b80c1ab]388 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
[8db42f7]389
390 /**
391 * Note: According to section 6.4.3.4, we can set the 9th bit
392 * of the control field of the trb (BSR) to 1 and then the xHC
393 * will not issue the SET_ADDRESS request to the USB device.
394 * This can be used to provide compatibility with legacy USB devices
395 * that require their device descriptor to be read before such request.
396 */
[c3d926f3]397 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ADDRESS_DEVICE_CMD);
398 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
[8db42f7]399
[548c123]400 return enqueue_command(hc, cmd, 0, 0);
[8db42f7]401}
402
[c3d926f3]403static int configure_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[665bf3c]404{
[c058a388]405 assert(hc);
[110d795]406 assert(cmd);
[c058a388]407
[c3d926f3]408 xhci_trb_clean(&cmd->_header.trb);
[665bf3c]409
[b724494]410 if (!cmd->deconfigure) {
411 /* If the DC flag is on, input context is not evaluated. */
[b80c1ab]412 assert(dma_buffer_is_set(&cmd->input_ctx));
[b724494]413
[b80c1ab]414 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
[b724494]415 }
[110d795]416
[c3d926f3]417 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_CONFIGURE_ENDPOINT_CMD);
418 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
419 TRB_SET_DC(cmd->_header.trb, cmd->deconfigure);
[665bf3c]420
[548c123]421 return enqueue_command(hc, cmd, 0, 0);
[665bf3c]422}
423
[c3d926f3]424static int evaluate_context_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[c9ce62ae]425{
[c058a388]426 assert(hc);
[110d795]427 assert(cmd);
[b80c1ab]428 assert(dma_buffer_is_set(&cmd->input_ctx));
[c058a388]429
[c9ce62ae]430 /**
431 * Note: All Drop Context flags of the input context shall be 0,
432 * all Add Context flags shall be initialize to indicate IDs
433 * of the contexts affected by the command.
434 * Refer to sections 6.2.2.3 and 6.3.3.3 for further info.
435 */
[c3d926f3]436 xhci_trb_clean(&cmd->_header.trb);
[c9ce62ae]437
[b80c1ab]438 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
[c9ce62ae]439
[c3d926f3]440 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_EVALUATE_CONTEXT_CMD);
441 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
[110d795]442
[548c123]443 return enqueue_command(hc, cmd, 0, 0);
[c9ce62ae]444}
445
[c3d926f3]446static int reset_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[05aeee0e]447{
[c058a388]448 assert(hc);
[110d795]449 assert(cmd);
[c058a388]450
[05aeee0e]451 /**
452 * Note: TCS can have values 0 or 1. If it is set to 0, see sectuon 4.5.8 for
453 * information about this flag.
454 */
[c3d926f3]455 xhci_trb_clean(&cmd->_header.trb);
[05aeee0e]456
[c3d926f3]457 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_ENDPOINT_CMD);
458 TRB_SET_TCS(cmd->_header.trb, cmd->tcs);
459 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
460 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
[c9bec1c]461
[548c123]462 return enqueue_command(hc, cmd, 0, 0);
[05aeee0e]463}
464
[c3d926f3]465static int stop_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[05aeee0e]466{
[c058a388]467 assert(hc);
[110d795]468 assert(cmd);
[c058a388]469
[c3d926f3]470 xhci_trb_clean(&cmd->_header.trb);
[110d795]471
[c3d926f3]472 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_STOP_ENDPOINT_CMD);
473 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
474 TRB_SET_SUSP(cmd->_header.trb, cmd->susp);
475 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
[05aeee0e]476
[548c123]477 return enqueue_command(hc, cmd, 0, 0);
[c058a388]478}
[05aeee0e]479
[c3d926f3]480static int set_tr_dequeue_pointer_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[0cabd10]481{
482 assert(hc);
483 assert(cmd);
484
[c3d926f3]485 xhci_trb_clean(&cmd->_header.trb);
[0cabd10]486
[c3d926f3]487 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_SET_TR_DEQUEUE_POINTER_CMD);
488 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
489 TRB_SET_STREAM(cmd->_header.trb, cmd->stream_id);
490 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
491 TRB_SET_DEQUEUE_PTR(cmd->_header.trb, cmd->dequeue_ptr);
[0cabd10]492
493 /**
494 * TODO: Set DCS (see section 4.6.10).
495 */
496
[548c123]497 return enqueue_command(hc, cmd, 0, 0);
[0cabd10]498}
499
[c3d926f3]500static int reset_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[c058a388]501{
502 assert(hc);
[110d795]503 assert(cmd);
[c058a388]504
[c3d926f3]505 xhci_trb_clean(&cmd->_header.trb);
[c058a388]506
[c3d926f3]507 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_DEVICE_CMD);
508 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
[c9bec1c]509
[548c123]510 return enqueue_command(hc, cmd, 0, 0);
[05aeee0e]511}
512
[c3d926f3]513static int get_port_bandwidth_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
[60af4cdb]514{
515 assert(hc);
516 assert(cmd);
517
[c3d926f3]518 xhci_trb_clean(&cmd->_header.trb);
[60af4cdb]519
[b80c1ab]520 TRB_SET_ICTX(cmd->_header.trb, cmd->bandwidth_ctx.phys);
[60af4cdb]521
[c3d926f3]522 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_GET_PORT_BANDWIDTH_CMD);
523 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
524 TRB_SET_DEV_SPEED(cmd->_header.trb, cmd->device_speed);
[60af4cdb]525
526 return enqueue_command(hc, cmd, 0, 0);
527}
528
[c3d926f3]529/* The table of command-issuing functions. */
530
531typedef int (*cmd_handler) (xhci_hc_t *hc, xhci_cmd_t *cmd);
532
533static cmd_handler cmd_handlers [] = {
534 [XHCI_CMD_ENABLE_SLOT] = enable_slot_cmd,
535 [XHCI_CMD_DISABLE_SLOT] = disable_slot_cmd,
536 [XHCI_CMD_ADDRESS_DEVICE] = address_device_cmd,
537 [XHCI_CMD_CONFIGURE_ENDPOINT] = configure_endpoint_cmd,
538 [XHCI_CMD_EVALUATE_CONTEXT] = evaluate_context_cmd,
539 [XHCI_CMD_RESET_ENDPOINT] = reset_endpoint_cmd,
540 [XHCI_CMD_STOP_ENDPOINT] = stop_endpoint_cmd,
541 [XHCI_CMD_SET_TR_DEQUEUE_POINTER] = set_tr_dequeue_pointer_cmd,
542 [XHCI_CMD_RESET_DEVICE] = reset_device_cmd,
543 // TODO: Force event (optional normative, for VMM, section 4.6.12).
544 [XHCI_CMD_FORCE_EVENT] = NULL,
545 // TODO: Negotiate bandwidth (optional normative, section 4.6.13).
546 [XHCI_CMD_NEGOTIATE_BANDWIDTH] = NULL,
547 // TODO: Set latency tolerance value (optional normative, section 4.6.14).
548 [XHCI_CMD_SET_LATENCY_TOLERANCE_VALUE] = NULL,
549 // TODO: Get port bandwidth (mandatory, but needs root hub implementation, section 4.6.15).
550 [XHCI_CMD_GET_PORT_BANDWIDTH] = get_port_bandwidth_cmd,
551 // TODO: Force header (mandatory, but needs root hub implementation, section 4.6.16).
552 [XHCI_CMD_FORCE_HEADER] = NULL,
553 [XHCI_CMD_NO_OP] = no_op_cmd
554};
555
[889146e]556static int try_abort_current_command(xhci_hc_t *hc)
557{
558 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
559
560 fibril_mutex_lock(&cr->guard);
561
562 if (cr->state != XHCI_CR_STATE_OPEN) {
563 // The CR is either stopped, or different fibril is already
564 // restarting it.
565 fibril_mutex_unlock(&cr->guard);
566 return EOK;
567 }
568
569 usb_log_error("HC(%p): Timeout while waiting for command: aborting current command.", hc);
570
571 cr->state = XHCI_CR_STATE_CHANGING;
572 fibril_condvar_broadcast(&cr->state_cv);
573
574 abort_command_ring(hc);
575
576 fibril_condvar_wait_timeout(&cr->stopped_cv, &cr->guard, XHCI_CR_ABORT_TIMEOUT);
577
578 if (XHCI_REG_RD(hc->op_regs, XHCI_OP_CRR)) {
579 /* 4.6.1.2, implementation note
580 * Assume there are larger problems with HC and
581 * reset it.
582 */
583 usb_log_error("HC(%p): Command didn't abort.", hc);
584
585 cr->state = XHCI_CR_STATE_CLOSED;
586 fibril_condvar_broadcast(&cr->state_cv);
587
588 // TODO: Reset HC completely.
589 // Don't forget to somehow complete all commands with error.
590
591 fibril_mutex_unlock(&cr->guard);
592 return ENAK;
593 }
594
595 usb_log_error("HC(%p): Command ring stopped. Starting again.", hc);
596 hc_ring_doorbell(hc, 0, 0);
597
598 cr->state = XHCI_CR_STATE_OPEN;
599 fibril_condvar_broadcast(&cr->state_cv);
600
601 fibril_mutex_unlock(&cr->guard);
602 return EOK;
603}
604
605static int wait_for_cmd_completion(xhci_hc_t *hc, xhci_cmd_t *cmd)
[f9e7fe8]606{
[c3d926f3]607 int rv = EOK;
[c058a388]608
[c3d926f3]609 fibril_mutex_lock(&cmd->_header.completed_mtx);
610 while (!cmd->_header.completed) {
[f9e7fe8]611
[889146e]612 rv = fibril_condvar_wait_timeout(&cmd->_header.completed_cv, &cmd->_header.completed_mtx, XHCI_COMMAND_TIMEOUT);
613
614 /* The waiting timed out. Current command (not necessarily
615 * ours) is probably blocked.
616 */
617 if (!cmd->_header.completed && rv == ETIMEOUT) {
618 fibril_mutex_unlock(&cmd->_header.completed_mtx);
619
620 rv = try_abort_current_command(hc);
621 if (rv)
622 return rv;
623
624 fibril_mutex_lock(&cmd->_header.completed_mtx);
[c3d926f3]625 }
626 }
627 fibril_mutex_unlock(&cmd->_header.completed_mtx);
[f711f06]628
[c3d926f3]629 return rv;
630}
[2fa43d1]631
[c3d926f3]632/** Issue command and block the current fibril until it is completed or timeout
633 * expires. Nothing is deallocated. Caller should always execute `xhci_cmd_fini`.
634 */
635int xhci_cmd_sync(xhci_hc_t *hc, xhci_cmd_t *cmd)
636{
637 assert(hc);
638 assert(cmd);
[2fa43d1]639
[c3d926f3]640 int err;
641
642 if (!cmd_handlers[cmd->_header.cmd]) {
643 /* Handler not implemented. */
644 return ENOTSUP;
[2fa43d1]645 }
[110d795]646
[c3d926f3]647 if ((err = cmd_handlers[cmd->_header.cmd](hc, cmd))) {
648 /* Command could not be issued. */
649 return err;
650 }
[110d795]651
[889146e]652 if ((err = wait_for_cmd_completion(hc, cmd))) {
653 /* Command failed. */
[c3d926f3]654 return err;
[665bf3c]655 }
[c362127]656
[3cbc138]657 return cmd->status == XHCI_TRBC_SUCCESS ? EOK : EINVAL;
[c3d926f3]658}
[110d795]659
[c3d926f3]660/** Does the same thing as `xhci_cmd_sync` and executes `xhci_cmd_fini`. This
661 * is a useful shorthand for issuing commands without out parameters.
662 */
663int xhci_cmd_sync_fini(xhci_hc_t *hc, xhci_cmd_t *cmd)
664{
665 const int err = xhci_cmd_sync(hc, cmd);
666 xhci_cmd_fini(cmd);
667
668 return err;
669}
670
671/** Does the same thing as `xhci_cmd_sync_fini` without blocking the current
672 * fibril. The command is copied to stack memory and `fini` is called upon its completion.
673 */
674int xhci_cmd_async_fini(xhci_hc_t *hc, xhci_cmd_t *stack_cmd)
675{
676 assert(hc);
677 assert(stack_cmd);
678
679 /* Save the command for later. */
680 xhci_cmd_t *heap_cmd = (xhci_cmd_t *) malloc(sizeof(xhci_cmd_t));
681 if (!heap_cmd) {
682 return ENOMEM;
683 }
684
685 /* TODO: Is this good for the mutex and the condvar? */
686 memcpy(heap_cmd, stack_cmd, sizeof(xhci_cmd_t));
687 heap_cmd->_header.async = true;
688
689 /* Issue the command. */
690 int err;
691
692 if (!cmd_handlers[heap_cmd->_header.cmd]) {
693 /* Handler not implemented. */
694 err = ENOTSUP;
695 goto err_heap_cmd;
[f711f06]696 }
[110d795]697
[c3d926f3]698 if ((err = cmd_handlers[heap_cmd->_header.cmd](hc, heap_cmd))) {
699 /* Command could not be issued. */
700 goto err_heap_cmd;
701 }
[4688350b]702
[110d795]703 return EOK;
[c9c0e41]704
[c3d926f3]705err_heap_cmd:
706 free(heap_cmd);
707 return err;
708}
[c9c0e41]709
710/**
711 * @}
712 */
Note: See TracBrowser for help on using the repository browser.