source: mainline/uspace/drv/bus/usb/xhci/commands.c@ 889146e

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 889146e was 889146e, checked in by Ondřej Hlavatý <aearsis@…>, 8 years ago

xhci: commands shall not just timeout

Previous behavior was breaking semantic: if a command was successful,
but just took too long to complete, we returned an error, and the caller
had no way to know if the command's effect has taken place.

This commit implements command aborting. The wait_for_command now cannot
just timeout - instead it aborts currently running (probably blocked)
command, and then gets back to waiting. So now, if command_sync returns
an error, it means the command was really unsuccessful.

If aborting the command takes too long, we should reset the whole HC.
This is not yet implemented.

  • Property mode set to 100644
File size: 19.3 KB
Line 
1/*
2 * Copyright (c) 2017 Jaroslav Jindrak
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief Command sending functions.
34 */
35
36#include <errno.h>
37#include <str_error.h>
38#include <usb/debug.h>
39#include "commands.h"
40#include "debug.h"
41#include "hc.h"
42#include "hw_struct/context.h"
43#include "hw_struct/trb.h"
44
45#define TRB_SET_TCS(trb, tcs) (trb).control |= host2xhci(32, ((tcs &0x1) << 9))
46#define TRB_SET_TYPE(trb, type) (trb).control |= host2xhci(32, (type) << 10)
47#define TRB_SET_DC(trb, dc) (trb).control |= host2xhci(32, (dc) << 9)
48#define TRB_SET_EP(trb, ep) (trb).control |= host2xhci(32, ((ep) & 0x5) << 16)
49#define TRB_SET_STREAM(trb, st) (trb).control |= host2xhci(32, ((st) & 0xFFFF) << 16)
50#define TRB_SET_SUSP(trb, susp) (trb).control |= host2xhci(32, ((susp) & 0x1) << 23)
51#define TRB_SET_SLOT(trb, slot) (trb).control |= host2xhci(32, (slot) << 24)
52#define TRB_SET_DEV_SPEED(trb, speed) (trb).control |= host2xhci(32, (speed & 0xF) << 16)
53
54/**
55 * TODO: Not sure about SCT and DCS (see section 6.4.3.9).
56 */
57#define TRB_SET_DEQUEUE_PTR(trb, dptr) (trb).parameter |= host2xhci(64, (dptr))
58#define TRB_SET_ICTX(trb, phys) (trb).parameter |= host2xhci(64, (phys) & (~0xF))
59
60#define TRB_GET_CODE(trb) XHCI_DWORD_EXTRACT((trb).status, 31, 24)
61#define TRB_GET_SLOT(trb) XHCI_DWORD_EXTRACT((trb).control, 31, 24)
62#define TRB_GET_PHYS(trb) (XHCI_QWORD_EXTRACT((trb).parameter, 63, 4) << 4)
63
64/* Control functions */
65
66static xhci_cmd_ring_t *get_cmd_ring(xhci_hc_t *hc)
67{
68 assert(hc);
69 return &hc->cr;
70}
71
72int xhci_init_commands(xhci_hc_t *hc)
73{
74 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
75 int err;
76
77 if ((err = xhci_trb_ring_init(&cr->trb_ring)))
78 return err;
79
80 fibril_mutex_initialize(&cr->guard);
81 fibril_condvar_initialize(&cr->state_cv);
82 fibril_condvar_initialize(&cr->stopped_cv);
83
84 list_initialize(&cr->cmd_list);
85
86 cr->state = XHCI_CR_STATE_OPEN;
87
88 return EOK;
89}
90
91void xhci_fini_commands(xhci_hc_t *hc)
92{
93 xhci_stop_command_ring(hc);
94 assert(hc);
95}
96
97void xhci_cmd_init(xhci_cmd_t *cmd, xhci_cmd_type_t type)
98{
99 memset(cmd, 0, sizeof(*cmd));
100
101 link_initialize(&cmd->_header.link);
102
103 fibril_mutex_initialize(&cmd->_header.completed_mtx);
104 fibril_condvar_initialize(&cmd->_header.completed_cv);
105
106 cmd->_header.cmd = type;
107}
108
109void xhci_cmd_fini(xhci_cmd_t *cmd)
110{
111 list_remove(&cmd->_header.link);
112
113 dma_buffer_free(&cmd->input_ctx);
114 dma_buffer_free(&cmd->bandwidth_ctx);
115
116 if (cmd->_header.async) {
117 free(cmd);
118 }
119}
120
121/** Call with guard locked. */
122static inline xhci_cmd_t *find_command(xhci_hc_t *hc, uint64_t phys)
123{
124 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
125 assert(fibril_mutex_is_locked(&cr->guard));
126
127 link_t *cmd_link = list_first(&cr->cmd_list);
128
129 while (cmd_link != NULL) {
130 xhci_cmd_t *cmd = list_get_instance(cmd_link, xhci_cmd_t, _header.link);
131
132 if (cmd->_header.trb_phys == phys)
133 break;
134
135 cmd_link = list_next(cmd_link, &cr->cmd_list);
136 }
137
138 return cmd_link ? list_get_instance(cmd_link, xhci_cmd_t, _header.link)
139 : NULL;
140}
141
142static inline int enqueue_command(xhci_hc_t *hc, xhci_cmd_t *cmd, unsigned doorbell, unsigned target)
143{
144 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
145 assert(cmd);
146
147 fibril_mutex_lock(&cr->guard);
148
149 while (cr->state == XHCI_CR_STATE_CHANGING)
150 fibril_condvar_wait(&cr->state_cv, &cr->guard);
151
152 if (cr->state != XHCI_CR_STATE_OPEN) {
153 fibril_mutex_unlock(&cr->guard);
154 return ENAK;
155 }
156
157 usb_log_debug2("HC(%p): Sending command:", hc);
158 xhci_dump_trb(&cmd->_header.trb);
159
160 list_append(&cmd->_header.link, &cr->cmd_list);
161
162 xhci_trb_ring_enqueue(&cr->trb_ring, &cmd->_header.trb, &cmd->_header.trb_phys);
163 hc_ring_doorbell(hc, 0, 0);
164
165 fibril_mutex_unlock(&cr->guard);
166
167 return EOK;
168}
169
170void xhci_stop_command_ring(xhci_hc_t *hc)
171{
172 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
173
174 fibril_mutex_lock(&cr->guard);
175
176 // Prevent others from starting CR again.
177 cr->state = XHCI_CR_STATE_CLOSED;
178 fibril_condvar_broadcast(&cr->state_cv);
179
180 XHCI_REG_SET(hc->op_regs, XHCI_OP_CS, 1);
181 XHCI_REG_SET(hc->op_regs, XHCI_OP_CRCR_HI, 0); // Some systems (incl. QEMU) require 64-bit write
182
183 while (XHCI_REG_RD(hc->op_regs, XHCI_OP_CRR))
184 fibril_condvar_wait(&cr->stopped_cv, &cr->guard);
185
186 fibril_mutex_unlock(&cr->guard);
187}
188
189static void abort_command_ring(xhci_hc_t *hc)
190{
191 XHCI_REG_WR(hc->op_regs, XHCI_OP_CA, 1);
192 XHCI_REG_SET(hc->op_regs, XHCI_OP_CRCR_HI, 0); // Some systems (incl. QEMU) require 64-bit write
193}
194
195static const char *trb_codes [] = {
196#define TRBC(t) [XHCI_TRBC_##t] = #t
197 TRBC(INVALID),
198 TRBC(SUCCESS),
199 TRBC(DATA_BUFFER_ERROR),
200 TRBC(BABBLE_DETECTED_ERROR),
201 TRBC(USB_TRANSACTION_ERROR),
202 TRBC(TRB_ERROR),
203 TRBC(STALL_ERROR),
204 TRBC(RESOURCE_ERROR),
205 TRBC(BANDWIDTH_ERROR),
206 TRBC(NO_SLOTS_ERROR),
207 TRBC(INVALID_STREAM_ERROR),
208 TRBC(SLOT_NOT_ENABLED_ERROR),
209 TRBC(EP_NOT_ENABLED_ERROR),
210 TRBC(SHORT_PACKET),
211 TRBC(RING_UNDERRUN),
212 TRBC(RING_OVERRUN),
213 TRBC(VF_EVENT_RING_FULL),
214 TRBC(PARAMETER_ERROR),
215 TRBC(BANDWIDTH_OVERRUN_ERROR),
216 TRBC(CONTEXT_STATE_ERROR),
217 TRBC(NO_PING_RESPONSE_ERROR),
218 TRBC(EVENT_RING_FULL_ERROR),
219 TRBC(INCOMPATIBLE_DEVICE_ERROR),
220 TRBC(MISSED_SERVICE_ERROR),
221 TRBC(COMMAND_RING_STOPPED),
222 TRBC(COMMAND_ABORTED),
223 TRBC(STOPPED),
224 TRBC(STOPPED_LENGTH_INVALID),
225 TRBC(STOPPED_SHORT_PACKET),
226 TRBC(MAX_EXIT_LATENCY_TOO_LARGE_ERROR),
227 [30] = "<reserved>",
228 TRBC(ISOCH_BUFFER_OVERRUN),
229 TRBC(EVENT_LOST_ERROR),
230 TRBC(UNDEFINED_ERROR),
231 TRBC(INVALID_STREAM_ID_ERROR),
232 TRBC(SECONDARY_BANDWIDTH_ERROR),
233 TRBC(SPLIT_TRANSACTION_ERROR),
234 [XHCI_TRBC_MAX] = NULL
235#undef TRBC
236};
237
238static void report_error(int code)
239{
240 if (code < XHCI_TRBC_MAX && trb_codes[code] != NULL)
241 usb_log_error("Command resulted in error: %s.", trb_codes[code]);
242 else
243 usb_log_error("Command resulted in reserved or vendor specific error.");
244}
245
246int xhci_handle_command_completion(xhci_hc_t *hc, xhci_trb_t *trb)
247{
248 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
249 assert(trb);
250
251 usb_log_debug2("HC(%p) Command completed.", hc);
252
253 fibril_mutex_lock(&cr->guard);
254
255 int code = TRB_GET_CODE(*trb);
256 const uint64_t phys = TRB_GET_PHYS(*trb);
257
258 xhci_trb_ring_update_dequeue(&cr->trb_ring, phys);
259
260 if (code == XHCI_TRBC_COMMAND_RING_STOPPED) {
261 /* This can either mean that the ring is being stopped, or
262 * a command was aborted. In either way, wake threads waiting
263 * on stopped_cv.
264 *
265 * Note that we need to hold mutex, because we must be sure the
266 * requesting thread is waiting inside the CV.
267 */
268 fibril_condvar_broadcast(&cr->stopped_cv);
269 fibril_mutex_unlock(&cr->guard);
270 return EOK;
271 }
272
273 xhci_cmd_t *command = find_command(hc, phys);
274 if (command == NULL) {
275 usb_log_error("No command struct for this completion event found.");
276
277 if (code != XHCI_TRBC_SUCCESS)
278 report_error(code);
279
280 return EOK;
281 }
282
283 list_remove(&command->_header.link);
284
285 /* Semantics of NO_OP_CMD is that success is marked as a TRB error. */
286 if (command->_header.cmd == XHCI_CMD_NO_OP && code == XHCI_TRBC_TRB_ERROR)
287 code = XHCI_TRBC_SUCCESS;
288
289 command->status = code;
290 command->slot_id = TRB_GET_SLOT(*trb);
291
292 usb_log_debug2("Completed command trb: %s", xhci_trb_str_type(TRB_TYPE(command->_header.trb)));
293
294 if (code != XHCI_TRBC_SUCCESS) {
295 report_error(code);
296 xhci_dump_trb(&command->_header.trb);
297 }
298
299 switch (TRB_TYPE(command->_header.trb)) {
300 case XHCI_TRB_TYPE_NO_OP_CMD:
301 case XHCI_TRB_TYPE_ENABLE_SLOT_CMD:
302 case XHCI_TRB_TYPE_DISABLE_SLOT_CMD:
303 case XHCI_TRB_TYPE_ADDRESS_DEVICE_CMD:
304 case XHCI_TRB_TYPE_CONFIGURE_ENDPOINT_CMD:
305 case XHCI_TRB_TYPE_EVALUATE_CONTEXT_CMD:
306 case XHCI_TRB_TYPE_RESET_ENDPOINT_CMD:
307 break;
308 case XHCI_TRB_TYPE_STOP_ENDPOINT_CMD:
309 // Note: If the endpoint was in the middle of a transfer, then the xHC
310 // will add a Transfer TRB before the Event TRB, research that and
311 // handle it appropriately!
312 break;
313 case XHCI_TRB_TYPE_RESET_DEVICE_CMD:
314 break;
315 default:
316 usb_log_debug2("Unsupported command trb: %s", xhci_trb_str_type(TRB_TYPE(command->_header.trb)));
317 return ENAK;
318 }
319
320 fibril_mutex_unlock(&cr->guard);
321
322 fibril_mutex_lock(&command->_header.completed_mtx);
323 command->_header.completed = true;
324 fibril_condvar_broadcast(&command->_header.completed_cv);
325 fibril_mutex_unlock(&command->_header.completed_mtx);
326
327 if (command->_header.async) {
328 /* Free the command and other DS upon completion. */
329 xhci_cmd_fini(command);
330 }
331
332 return EOK;
333}
334
335/* Command-issuing functions */
336
337static int no_op_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
338{
339 assert(hc);
340
341 xhci_trb_clean(&cmd->_header.trb);
342
343 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_NO_OP_CMD);
344
345 return enqueue_command(hc, cmd, 0, 0);
346}
347
348static int enable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
349{
350 assert(hc);
351
352 xhci_trb_clean(&cmd->_header.trb);
353
354 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ENABLE_SLOT_CMD);
355 cmd->_header.trb.control |= host2xhci(32, XHCI_REG_RD(hc->xecp, XHCI_EC_SP_SLOT_TYPE) << 16);
356
357 return enqueue_command(hc, cmd, 0, 0);
358}
359
360static int disable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
361{
362 assert(hc);
363 assert(cmd);
364
365 xhci_trb_clean(&cmd->_header.trb);
366
367 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_DISABLE_SLOT_CMD);
368 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
369
370 return enqueue_command(hc, cmd, 0, 0);
371}
372
373static int address_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
374{
375 assert(hc);
376 assert(cmd);
377 assert(dma_buffer_is_set(&cmd->input_ctx));
378
379 /**
380 * TODO: Requirements for this command:
381 * dcbaa[slot_id] is properly sized and initialized
382 * ictx has valids slot context and endpoint 0, all
383 * other should be ignored at this point (see section 4.6.5).
384 */
385
386 xhci_trb_clean(&cmd->_header.trb);
387
388 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
389
390 /**
391 * Note: According to section 6.4.3.4, we can set the 9th bit
392 * of the control field of the trb (BSR) to 1 and then the xHC
393 * will not issue the SET_ADDRESS request to the USB device.
394 * This can be used to provide compatibility with legacy USB devices
395 * that require their device descriptor to be read before such request.
396 */
397 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ADDRESS_DEVICE_CMD);
398 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
399
400 return enqueue_command(hc, cmd, 0, 0);
401}
402
403static int configure_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
404{
405 assert(hc);
406 assert(cmd);
407
408 xhci_trb_clean(&cmd->_header.trb);
409
410 if (!cmd->deconfigure) {
411 /* If the DC flag is on, input context is not evaluated. */
412 assert(dma_buffer_is_set(&cmd->input_ctx));
413
414 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
415 }
416
417 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_CONFIGURE_ENDPOINT_CMD);
418 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
419 TRB_SET_DC(cmd->_header.trb, cmd->deconfigure);
420
421 return enqueue_command(hc, cmd, 0, 0);
422}
423
424static int evaluate_context_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
425{
426 assert(hc);
427 assert(cmd);
428 assert(dma_buffer_is_set(&cmd->input_ctx));
429
430 /**
431 * Note: All Drop Context flags of the input context shall be 0,
432 * all Add Context flags shall be initialize to indicate IDs
433 * of the contexts affected by the command.
434 * Refer to sections 6.2.2.3 and 6.3.3.3 for further info.
435 */
436 xhci_trb_clean(&cmd->_header.trb);
437
438 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
439
440 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_EVALUATE_CONTEXT_CMD);
441 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
442
443 return enqueue_command(hc, cmd, 0, 0);
444}
445
446static int reset_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
447{
448 assert(hc);
449 assert(cmd);
450
451 /**
452 * Note: TCS can have values 0 or 1. If it is set to 0, see sectuon 4.5.8 for
453 * information about this flag.
454 */
455 xhci_trb_clean(&cmd->_header.trb);
456
457 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_ENDPOINT_CMD);
458 TRB_SET_TCS(cmd->_header.trb, cmd->tcs);
459 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
460 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
461
462 return enqueue_command(hc, cmd, 0, 0);
463}
464
465static int stop_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
466{
467 assert(hc);
468 assert(cmd);
469
470 xhci_trb_clean(&cmd->_header.trb);
471
472 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_STOP_ENDPOINT_CMD);
473 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
474 TRB_SET_SUSP(cmd->_header.trb, cmd->susp);
475 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
476
477 return enqueue_command(hc, cmd, 0, 0);
478}
479
480static int set_tr_dequeue_pointer_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
481{
482 assert(hc);
483 assert(cmd);
484
485 xhci_trb_clean(&cmd->_header.trb);
486
487 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_SET_TR_DEQUEUE_POINTER_CMD);
488 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
489 TRB_SET_STREAM(cmd->_header.trb, cmd->stream_id);
490 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
491 TRB_SET_DEQUEUE_PTR(cmd->_header.trb, cmd->dequeue_ptr);
492
493 /**
494 * TODO: Set DCS (see section 4.6.10).
495 */
496
497 return enqueue_command(hc, cmd, 0, 0);
498}
499
500static int reset_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
501{
502 assert(hc);
503 assert(cmd);
504
505 xhci_trb_clean(&cmd->_header.trb);
506
507 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_DEVICE_CMD);
508 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
509
510 return enqueue_command(hc, cmd, 0, 0);
511}
512
513static int get_port_bandwidth_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
514{
515 assert(hc);
516 assert(cmd);
517
518 xhci_trb_clean(&cmd->_header.trb);
519
520 TRB_SET_ICTX(cmd->_header.trb, cmd->bandwidth_ctx.phys);
521
522 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_GET_PORT_BANDWIDTH_CMD);
523 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
524 TRB_SET_DEV_SPEED(cmd->_header.trb, cmd->device_speed);
525
526 return enqueue_command(hc, cmd, 0, 0);
527}
528
529/* The table of command-issuing functions. */
530
531typedef int (*cmd_handler) (xhci_hc_t *hc, xhci_cmd_t *cmd);
532
533static cmd_handler cmd_handlers [] = {
534 [XHCI_CMD_ENABLE_SLOT] = enable_slot_cmd,
535 [XHCI_CMD_DISABLE_SLOT] = disable_slot_cmd,
536 [XHCI_CMD_ADDRESS_DEVICE] = address_device_cmd,
537 [XHCI_CMD_CONFIGURE_ENDPOINT] = configure_endpoint_cmd,
538 [XHCI_CMD_EVALUATE_CONTEXT] = evaluate_context_cmd,
539 [XHCI_CMD_RESET_ENDPOINT] = reset_endpoint_cmd,
540 [XHCI_CMD_STOP_ENDPOINT] = stop_endpoint_cmd,
541 [XHCI_CMD_SET_TR_DEQUEUE_POINTER] = set_tr_dequeue_pointer_cmd,
542 [XHCI_CMD_RESET_DEVICE] = reset_device_cmd,
543 // TODO: Force event (optional normative, for VMM, section 4.6.12).
544 [XHCI_CMD_FORCE_EVENT] = NULL,
545 // TODO: Negotiate bandwidth (optional normative, section 4.6.13).
546 [XHCI_CMD_NEGOTIATE_BANDWIDTH] = NULL,
547 // TODO: Set latency tolerance value (optional normative, section 4.6.14).
548 [XHCI_CMD_SET_LATENCY_TOLERANCE_VALUE] = NULL,
549 // TODO: Get port bandwidth (mandatory, but needs root hub implementation, section 4.6.15).
550 [XHCI_CMD_GET_PORT_BANDWIDTH] = get_port_bandwidth_cmd,
551 // TODO: Force header (mandatory, but needs root hub implementation, section 4.6.16).
552 [XHCI_CMD_FORCE_HEADER] = NULL,
553 [XHCI_CMD_NO_OP] = no_op_cmd
554};
555
556static int try_abort_current_command(xhci_hc_t *hc)
557{
558 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
559
560 fibril_mutex_lock(&cr->guard);
561
562 if (cr->state != XHCI_CR_STATE_OPEN) {
563 // The CR is either stopped, or different fibril is already
564 // restarting it.
565 fibril_mutex_unlock(&cr->guard);
566 return EOK;
567 }
568
569 usb_log_error("HC(%p): Timeout while waiting for command: aborting current command.", hc);
570
571 cr->state = XHCI_CR_STATE_CHANGING;
572 fibril_condvar_broadcast(&cr->state_cv);
573
574 abort_command_ring(hc);
575
576 fibril_condvar_wait_timeout(&cr->stopped_cv, &cr->guard, XHCI_CR_ABORT_TIMEOUT);
577
578 if (XHCI_REG_RD(hc->op_regs, XHCI_OP_CRR)) {
579 /* 4.6.1.2, implementation note
580 * Assume there are larger problems with HC and
581 * reset it.
582 */
583 usb_log_error("HC(%p): Command didn't abort.", hc);
584
585 cr->state = XHCI_CR_STATE_CLOSED;
586 fibril_condvar_broadcast(&cr->state_cv);
587
588 // TODO: Reset HC completely.
589 // Don't forget to somehow complete all commands with error.
590
591 fibril_mutex_unlock(&cr->guard);
592 return ENAK;
593 }
594
595 usb_log_error("HC(%p): Command ring stopped. Starting again.", hc);
596 hc_ring_doorbell(hc, 0, 0);
597
598 cr->state = XHCI_CR_STATE_OPEN;
599 fibril_condvar_broadcast(&cr->state_cv);
600
601 fibril_mutex_unlock(&cr->guard);
602 return EOK;
603}
604
605static int wait_for_cmd_completion(xhci_hc_t *hc, xhci_cmd_t *cmd)
606{
607 int rv = EOK;
608
609 fibril_mutex_lock(&cmd->_header.completed_mtx);
610 while (!cmd->_header.completed) {
611
612 rv = fibril_condvar_wait_timeout(&cmd->_header.completed_cv, &cmd->_header.completed_mtx, XHCI_COMMAND_TIMEOUT);
613
614 /* The waiting timed out. Current command (not necessarily
615 * ours) is probably blocked.
616 */
617 if (!cmd->_header.completed && rv == ETIMEOUT) {
618 fibril_mutex_unlock(&cmd->_header.completed_mtx);
619
620 rv = try_abort_current_command(hc);
621 if (rv)
622 return rv;
623
624 fibril_mutex_lock(&cmd->_header.completed_mtx);
625 }
626 }
627 fibril_mutex_unlock(&cmd->_header.completed_mtx);
628
629 return rv;
630}
631
632/** Issue command and block the current fibril until it is completed or timeout
633 * expires. Nothing is deallocated. Caller should always execute `xhci_cmd_fini`.
634 */
635int xhci_cmd_sync(xhci_hc_t *hc, xhci_cmd_t *cmd)
636{
637 assert(hc);
638 assert(cmd);
639
640 int err;
641
642 if (!cmd_handlers[cmd->_header.cmd]) {
643 /* Handler not implemented. */
644 return ENOTSUP;
645 }
646
647 if ((err = cmd_handlers[cmd->_header.cmd](hc, cmd))) {
648 /* Command could not be issued. */
649 return err;
650 }
651
652 if ((err = wait_for_cmd_completion(hc, cmd))) {
653 /* Command failed. */
654 return err;
655 }
656
657 return cmd->status == XHCI_TRBC_SUCCESS ? EOK : EINVAL;
658}
659
660/** Does the same thing as `xhci_cmd_sync` and executes `xhci_cmd_fini`. This
661 * is a useful shorthand for issuing commands without out parameters.
662 */
663int xhci_cmd_sync_fini(xhci_hc_t *hc, xhci_cmd_t *cmd)
664{
665 const int err = xhci_cmd_sync(hc, cmd);
666 xhci_cmd_fini(cmd);
667
668 return err;
669}
670
671/** Does the same thing as `xhci_cmd_sync_fini` without blocking the current
672 * fibril. The command is copied to stack memory and `fini` is called upon its completion.
673 */
674int xhci_cmd_async_fini(xhci_hc_t *hc, xhci_cmd_t *stack_cmd)
675{
676 assert(hc);
677 assert(stack_cmd);
678
679 /* Save the command for later. */
680 xhci_cmd_t *heap_cmd = (xhci_cmd_t *) malloc(sizeof(xhci_cmd_t));
681 if (!heap_cmd) {
682 return ENOMEM;
683 }
684
685 /* TODO: Is this good for the mutex and the condvar? */
686 memcpy(heap_cmd, stack_cmd, sizeof(xhci_cmd_t));
687 heap_cmd->_header.async = true;
688
689 /* Issue the command. */
690 int err;
691
692 if (!cmd_handlers[heap_cmd->_header.cmd]) {
693 /* Handler not implemented. */
694 err = ENOTSUP;
695 goto err_heap_cmd;
696 }
697
698 if ((err = cmd_handlers[heap_cmd->_header.cmd](hc, heap_cmd))) {
699 /* Command could not be issued. */
700 goto err_heap_cmd;
701 }
702
703 return EOK;
704
705err_heap_cmd:
706 free(heap_cmd);
707 return err;
708}
709
710/**
711 * @}
712 */
Note: See TracBrowser for help on using the repository browser.