source: mainline/uspace/drv/bus/usb/xhci/commands.c@ f3baab1

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since f3baab1 was f3baab1, checked in by Ondřej Hlavatý <aearsis@…>, 8 years ago

xhci: do not rely on internal fibril quirks

Previousy, we abused the fact new fibrils are spawned for handling
notifications, so we could afford blocking the event handler. We were
told this is a subject to change and we should stop doing it.

This commit removes the abuse, but newly requires event handlers not to
block waiting for another event (e.g. commands do wait for events). To
quickly detect this situation, deadlock detection was added.

This commit breaks current functionality. Our current job is to identify
processes which do block and have them moved to separate fibril / spawn
fibril for the process alone.

  • Property mode set to 100644
File size: 21.2 KB
Line 
1/*
2 * Copyright (c) 2017 Jaroslav Jindrak
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief Command sending functions.
34 */
35
36#include <errno.h>
37#include <str_error.h>
38#include <usb/debug.h>
39#include "commands.h"
40#include "debug.h"
41#include "hc.h"
42#include "hw_struct/context.h"
43#include "hw_struct/trb.h"
44
45#define TRB_SET_TCS(trb, tcs) (trb).control |= host2xhci(32, ((tcs &0x1) << 9))
46#define TRB_SET_TYPE(trb, type) (trb).control |= host2xhci(32, (type) << 10)
47#define TRB_SET_DC(trb, dc) (trb).control |= host2xhci(32, (dc) << 9)
48#define TRB_SET_EP(trb, ep) (trb).control |= host2xhci(32, ((ep) & 0x5) << 16)
49#define TRB_SET_STREAM(trb, st) (trb).control |= host2xhci(32, ((st) & 0xFFFF) << 16)
50#define TRB_SET_SUSP(trb, susp) (trb).control |= host2xhci(32, ((susp) & 0x1) << 23)
51#define TRB_SET_SLOT(trb, slot) (trb).control |= host2xhci(32, (slot) << 24)
52#define TRB_SET_DEV_SPEED(trb, speed) (trb).control |= host2xhci(32, (speed & 0xF) << 16)
53
54/**
55 * TODO: Not sure about SCT and DCS (see section 6.4.3.9).
56 */
57#define TRB_SET_DEQUEUE_PTR(trb, dptr) (trb).parameter |= host2xhci(64, (dptr))
58#define TRB_SET_ICTX(trb, phys) (trb).parameter |= host2xhci(64, (phys) & (~0xF))
59
60#define TRB_GET_CODE(trb) XHCI_DWORD_EXTRACT((trb).status, 31, 24)
61#define TRB_GET_SLOT(trb) XHCI_DWORD_EXTRACT((trb).control, 31, 24)
62#define TRB_GET_PHYS(trb) (XHCI_QWORD_EXTRACT((trb).parameter, 63, 4) << 4)
63
64/* Control functions */
65
66static xhci_cmd_ring_t *get_cmd_ring(xhci_hc_t *hc)
67{
68 assert(hc);
69 return &hc->cr;
70}
71
72/**
73 * Initialize the command subsystem. Allocates the comand ring.
74 *
75 * Does not configure the CR pointer to the hardware, because the xHC will be
76 * reset before starting.
77 */
78int xhci_init_commands(xhci_hc_t *hc)
79{
80 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
81 int err;
82
83 if ((err = xhci_trb_ring_init(&cr->trb_ring)))
84 return err;
85
86 fibril_mutex_initialize(&cr->guard);
87 fibril_condvar_initialize(&cr->state_cv);
88 fibril_condvar_initialize(&cr->stopped_cv);
89
90 list_initialize(&cr->cmd_list);
91
92 cr->state = XHCI_CR_STATE_OPEN;
93
94 return EOK;
95}
96
97/**
98 * Finish the command subsystem. Stops the hardware from running commands, then
99 * deallocates the ring.
100 */
101void xhci_fini_commands(xhci_hc_t *hc)
102{
103 assert(hc);
104 xhci_stop_command_ring(hc);
105
106 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
107
108 fibril_mutex_lock(&cr->guard);
109 xhci_trb_ring_fini(&cr->trb_ring);
110 fibril_mutex_unlock(&cr->guard);
111}
112
113/**
114 * Initialize a command structure for the given command.
115 */
116void xhci_cmd_init(xhci_cmd_t *cmd, xhci_cmd_type_t type)
117{
118 memset(cmd, 0, sizeof(*cmd));
119
120 link_initialize(&cmd->_header.link);
121
122 fibril_mutex_initialize(&cmd->_header.completed_mtx);
123 fibril_condvar_initialize(&cmd->_header.completed_cv);
124
125 cmd->_header.cmd = type;
126}
127
128/**
129 * Finish the command structure. Some command invocation includes allocating
130 * a context structure. To have the convenience in calling commands, this
131 * method deallocates all resources.
132 */
133void xhci_cmd_fini(xhci_cmd_t *cmd)
134{
135 list_remove(&cmd->_header.link);
136
137 dma_buffer_free(&cmd->input_ctx);
138 dma_buffer_free(&cmd->bandwidth_ctx);
139
140 if (cmd->_header.async) {
141 free(cmd);
142 }
143}
144
145/**
146 * Find a command issued by TRB at @c phys inside the command list.
147 *
148 * Call with guard locked only.
149 */
150static inline xhci_cmd_t *find_command(xhci_hc_t *hc, uint64_t phys)
151{
152 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
153 assert(fibril_mutex_is_locked(&cr->guard));
154
155 link_t *cmd_link = list_first(&cr->cmd_list);
156
157 while (cmd_link != NULL) {
158 xhci_cmd_t *cmd = list_get_instance(cmd_link, xhci_cmd_t, _header.link);
159
160 if (cmd->_header.trb_phys == phys)
161 break;
162
163 cmd_link = list_next(cmd_link, &cr->cmd_list);
164 }
165
166 return cmd_link ? list_get_instance(cmd_link, xhci_cmd_t, _header.link)
167 : NULL;
168}
169
170/**
171 * Enqueue a command on the TRB ring. Ring the doorbell to initiate processing.
172 * Register the command as waiting for completion inside the command list.
173 */
174static inline int enqueue_command(xhci_hc_t *hc, xhci_cmd_t *cmd)
175{
176 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
177 assert(cmd);
178
179 fibril_mutex_lock(&cr->guard);
180
181 while (cr->state == XHCI_CR_STATE_CHANGING)
182 fibril_condvar_wait(&cr->state_cv, &cr->guard);
183
184 if (cr->state != XHCI_CR_STATE_OPEN) {
185 fibril_mutex_unlock(&cr->guard);
186 return ENAK;
187 }
188
189 usb_log_debug2("HC(%p): Sending command:", hc);
190 xhci_dump_trb(&cmd->_header.trb);
191
192 list_append(&cmd->_header.link, &cr->cmd_list);
193
194 xhci_trb_ring_enqueue(&cr->trb_ring, &cmd->_header.trb, &cmd->_header.trb_phys);
195 hc_ring_doorbell(hc, 0, 0);
196
197 fibril_mutex_unlock(&cr->guard);
198
199 return EOK;
200}
201
202/**
203 * Stop the command ring. Stop processing commands, block issuing new ones.
204 * Wait until hardware acknowledges it is stopped.
205 */
206void xhci_stop_command_ring(xhci_hc_t *hc)
207{
208 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
209
210 fibril_mutex_lock(&cr->guard);
211
212 // Prevent others from starting CR again.
213 cr->state = XHCI_CR_STATE_CLOSED;
214 fibril_condvar_broadcast(&cr->state_cv);
215
216 XHCI_REG_SET(hc->op_regs, XHCI_OP_CS, 1);
217 XHCI_REG_SET(hc->op_regs, XHCI_OP_CRCR_HI, 0); // Some systems (incl. QEMU) require 64-bit write
218
219 while (XHCI_REG_RD(hc->op_regs, XHCI_OP_CRR))
220 fibril_condvar_wait(&cr->stopped_cv, &cr->guard);
221
222 fibril_mutex_unlock(&cr->guard);
223}
224
225/**
226 * Abort currently processed command. Note that it is only aborted when the
227 * command is "blocking" - see section 4.6.1.2 of xHCI spec.
228 */
229static void abort_command_ring(xhci_hc_t *hc)
230{
231 XHCI_REG_WR(hc->op_regs, XHCI_OP_CA, 1);
232 XHCI_REG_SET(hc->op_regs, XHCI_OP_CRCR_HI, 0); // Some systems (incl. QEMU) require 64-bit write
233}
234
235static const char *trb_codes [] = {
236#define TRBC(t) [XHCI_TRBC_##t] = #t
237 TRBC(INVALID),
238 TRBC(SUCCESS),
239 TRBC(DATA_BUFFER_ERROR),
240 TRBC(BABBLE_DETECTED_ERROR),
241 TRBC(USB_TRANSACTION_ERROR),
242 TRBC(TRB_ERROR),
243 TRBC(STALL_ERROR),
244 TRBC(RESOURCE_ERROR),
245 TRBC(BANDWIDTH_ERROR),
246 TRBC(NO_SLOTS_ERROR),
247 TRBC(INVALID_STREAM_ERROR),
248 TRBC(SLOT_NOT_ENABLED_ERROR),
249 TRBC(EP_NOT_ENABLED_ERROR),
250 TRBC(SHORT_PACKET),
251 TRBC(RING_UNDERRUN),
252 TRBC(RING_OVERRUN),
253 TRBC(VF_EVENT_RING_FULL),
254 TRBC(PARAMETER_ERROR),
255 TRBC(BANDWIDTH_OVERRUN_ERROR),
256 TRBC(CONTEXT_STATE_ERROR),
257 TRBC(NO_PING_RESPONSE_ERROR),
258 TRBC(EVENT_RING_FULL_ERROR),
259 TRBC(INCOMPATIBLE_DEVICE_ERROR),
260 TRBC(MISSED_SERVICE_ERROR),
261 TRBC(COMMAND_RING_STOPPED),
262 TRBC(COMMAND_ABORTED),
263 TRBC(STOPPED),
264 TRBC(STOPPED_LENGTH_INVALID),
265 TRBC(STOPPED_SHORT_PACKET),
266 TRBC(MAX_EXIT_LATENCY_TOO_LARGE_ERROR),
267 [30] = "<reserved>",
268 TRBC(ISOCH_BUFFER_OVERRUN),
269 TRBC(EVENT_LOST_ERROR),
270 TRBC(UNDEFINED_ERROR),
271 TRBC(INVALID_STREAM_ID_ERROR),
272 TRBC(SECONDARY_BANDWIDTH_ERROR),
273 TRBC(SPLIT_TRANSACTION_ERROR),
274 [XHCI_TRBC_MAX] = NULL
275#undef TRBC
276};
277
278/**
279 * Report an error according to command completion code.
280 */
281static void report_error(int code)
282{
283 if (code < XHCI_TRBC_MAX && trb_codes[code] != NULL)
284 usb_log_error("Command resulted in error: %s.", trb_codes[code]);
285 else
286 usb_log_error("Command resulted in reserved or vendor specific error.");
287}
288
289/**
290 * Handle a command completion. Feed the fibril waiting for result.
291 *
292 * @param trb The COMMAND_COMPLETION TRB found in event ring.
293 */
294int xhci_handle_command_completion(xhci_hc_t *hc, xhci_trb_t *trb)
295{
296 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
297 assert(trb);
298
299 usb_log_debug2("HC(%p) Command completed.", hc);
300
301 fibril_mutex_lock(&cr->guard);
302
303 int code = TRB_GET_CODE(*trb);
304 const uint64_t phys = TRB_GET_PHYS(*trb);
305
306 xhci_trb_ring_update_dequeue(&cr->trb_ring, phys);
307
308 if (code == XHCI_TRBC_COMMAND_RING_STOPPED) {
309 /* This can either mean that the ring is being stopped, or
310 * a command was aborted. In either way, wake threads waiting
311 * on stopped_cv.
312 *
313 * Note that we need to hold mutex, because we must be sure the
314 * requesting thread is waiting inside the CV.
315 */
316 fibril_condvar_broadcast(&cr->stopped_cv);
317 fibril_mutex_unlock(&cr->guard);
318 return EOK;
319 }
320
321 xhci_cmd_t *command = find_command(hc, phys);
322 if (command == NULL) {
323 usb_log_error("No command struct for this completion event found.");
324
325 if (code != XHCI_TRBC_SUCCESS)
326 report_error(code);
327
328 return EOK;
329 }
330
331 list_remove(&command->_header.link);
332
333 /* Semantics of NO_OP_CMD is that success is marked as a TRB error. */
334 if (command->_header.cmd == XHCI_CMD_NO_OP && code == XHCI_TRBC_TRB_ERROR)
335 code = XHCI_TRBC_SUCCESS;
336
337 command->status = code;
338 command->slot_id = TRB_GET_SLOT(*trb);
339
340 usb_log_debug2("Completed command trb: %s", xhci_trb_str_type(TRB_TYPE(command->_header.trb)));
341
342 if (code != XHCI_TRBC_SUCCESS) {
343 report_error(code);
344 xhci_dump_trb(&command->_header.trb);
345 }
346
347 switch (TRB_TYPE(command->_header.trb)) {
348 case XHCI_TRB_TYPE_NO_OP_CMD:
349 case XHCI_TRB_TYPE_ENABLE_SLOT_CMD:
350 case XHCI_TRB_TYPE_DISABLE_SLOT_CMD:
351 case XHCI_TRB_TYPE_ADDRESS_DEVICE_CMD:
352 case XHCI_TRB_TYPE_CONFIGURE_ENDPOINT_CMD:
353 case XHCI_TRB_TYPE_EVALUATE_CONTEXT_CMD:
354 case XHCI_TRB_TYPE_RESET_ENDPOINT_CMD:
355 case XHCI_TRB_TYPE_STOP_ENDPOINT_CMD:
356 case XHCI_TRB_TYPE_RESET_DEVICE_CMD:
357 break;
358 default:
359 usb_log_debug2("Unsupported command trb: %s", xhci_trb_str_type(TRB_TYPE(command->_header.trb)));
360 return ENAK;
361 }
362
363 fibril_mutex_unlock(&cr->guard);
364
365 fibril_mutex_lock(&command->_header.completed_mtx);
366 command->_header.completed = true;
367 fibril_condvar_broadcast(&command->_header.completed_cv);
368 fibril_mutex_unlock(&command->_header.completed_mtx);
369
370 if (command->_header.async) {
371 /* Free the command and other DS upon completion. */
372 xhci_cmd_fini(command);
373 }
374
375 return EOK;
376}
377
378/* Command-issuing functions */
379
380static int no_op_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
381{
382 assert(hc);
383
384 xhci_trb_clean(&cmd->_header.trb);
385
386 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_NO_OP_CMD);
387
388 return enqueue_command(hc, cmd);
389}
390
391static int enable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
392{
393 assert(hc);
394
395 xhci_trb_clean(&cmd->_header.trb);
396
397 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ENABLE_SLOT_CMD);
398 cmd->_header.trb.control |= host2xhci(32, XHCI_REG_RD(hc->xecp, XHCI_EC_SP_SLOT_TYPE) << 16);
399
400 return enqueue_command(hc, cmd);
401}
402
403static int disable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
404{
405 assert(hc);
406 assert(cmd);
407
408 xhci_trb_clean(&cmd->_header.trb);
409
410 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_DISABLE_SLOT_CMD);
411 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
412
413 return enqueue_command(hc, cmd);
414}
415
416static int address_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
417{
418 assert(hc);
419 assert(cmd);
420 assert(dma_buffer_is_set(&cmd->input_ctx));
421
422 /**
423 * TODO: Requirements for this command:
424 * dcbaa[slot_id] is properly sized and initialized
425 * ictx has valids slot context and endpoint 0, all
426 * other should be ignored at this point (see section 4.6.5).
427 */
428
429 xhci_trb_clean(&cmd->_header.trb);
430
431 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
432
433 /**
434 * Note: According to section 6.4.3.4, we can set the 9th bit
435 * of the control field of the trb (BSR) to 1 and then the xHC
436 * will not issue the SET_ADDRESS request to the USB device.
437 * This can be used to provide compatibility with legacy USB devices
438 * that require their device descriptor to be read before such request.
439 */
440 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ADDRESS_DEVICE_CMD);
441 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
442
443 return enqueue_command(hc, cmd);
444}
445
446static int configure_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
447{
448 assert(hc);
449 assert(cmd);
450
451 xhci_trb_clean(&cmd->_header.trb);
452
453 if (!cmd->deconfigure) {
454 /* If the DC flag is on, input context is not evaluated. */
455 assert(dma_buffer_is_set(&cmd->input_ctx));
456
457 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
458 }
459
460 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_CONFIGURE_ENDPOINT_CMD);
461 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
462 TRB_SET_DC(cmd->_header.trb, cmd->deconfigure);
463
464 return enqueue_command(hc, cmd);
465}
466
467static int evaluate_context_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
468{
469 assert(hc);
470 assert(cmd);
471 assert(dma_buffer_is_set(&cmd->input_ctx));
472
473 /**
474 * Note: All Drop Context flags of the input context shall be 0,
475 * all Add Context flags shall be initialize to indicate IDs
476 * of the contexts affected by the command.
477 * Refer to sections 6.2.2.3 and 6.3.3.3 for further info.
478 */
479 xhci_trb_clean(&cmd->_header.trb);
480
481 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
482
483 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_EVALUATE_CONTEXT_CMD);
484 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
485
486 return enqueue_command(hc, cmd);
487}
488
489static int reset_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
490{
491 assert(hc);
492 assert(cmd);
493
494 /**
495 * Note: TCS can have values 0 or 1. If it is set to 0, see sectuon 4.5.8 for
496 * information about this flag.
497 */
498 xhci_trb_clean(&cmd->_header.trb);
499
500 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_ENDPOINT_CMD);
501 TRB_SET_TCS(cmd->_header.trb, cmd->tcs);
502 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
503 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
504
505 return enqueue_command(hc, cmd);
506}
507
508static int stop_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
509{
510 assert(hc);
511 assert(cmd);
512
513 xhci_trb_clean(&cmd->_header.trb);
514
515 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_STOP_ENDPOINT_CMD);
516 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
517 TRB_SET_SUSP(cmd->_header.trb, cmd->susp);
518 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
519
520 return enqueue_command(hc, cmd);
521}
522
523static int set_tr_dequeue_pointer_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
524{
525 assert(hc);
526 assert(cmd);
527
528 xhci_trb_clean(&cmd->_header.trb);
529
530 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_SET_TR_DEQUEUE_POINTER_CMD);
531 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
532 TRB_SET_STREAM(cmd->_header.trb, cmd->stream_id);
533 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
534 TRB_SET_DEQUEUE_PTR(cmd->_header.trb, cmd->dequeue_ptr);
535
536 /**
537 * TODO: Set DCS (see section 4.6.10).
538 */
539
540 return enqueue_command(hc, cmd);
541}
542
543static int reset_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
544{
545 assert(hc);
546 assert(cmd);
547
548 xhci_trb_clean(&cmd->_header.trb);
549
550 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_DEVICE_CMD);
551 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
552
553 return enqueue_command(hc, cmd);
554}
555
556static int get_port_bandwidth_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
557{
558 assert(hc);
559 assert(cmd);
560
561 xhci_trb_clean(&cmd->_header.trb);
562
563 TRB_SET_ICTX(cmd->_header.trb, cmd->bandwidth_ctx.phys);
564
565 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_GET_PORT_BANDWIDTH_CMD);
566 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
567 TRB_SET_DEV_SPEED(cmd->_header.trb, cmd->device_speed);
568
569 return enqueue_command(hc, cmd);
570}
571
572/* The table of command-issuing functions. */
573
574typedef int (*cmd_handler) (xhci_hc_t *hc, xhci_cmd_t *cmd);
575
576static cmd_handler cmd_handlers [] = {
577 [XHCI_CMD_ENABLE_SLOT] = enable_slot_cmd,
578 [XHCI_CMD_DISABLE_SLOT] = disable_slot_cmd,
579 [XHCI_CMD_ADDRESS_DEVICE] = address_device_cmd,
580 [XHCI_CMD_CONFIGURE_ENDPOINT] = configure_endpoint_cmd,
581 [XHCI_CMD_EVALUATE_CONTEXT] = evaluate_context_cmd,
582 [XHCI_CMD_RESET_ENDPOINT] = reset_endpoint_cmd,
583 [XHCI_CMD_STOP_ENDPOINT] = stop_endpoint_cmd,
584 [XHCI_CMD_SET_TR_DEQUEUE_POINTER] = set_tr_dequeue_pointer_cmd,
585 [XHCI_CMD_RESET_DEVICE] = reset_device_cmd,
586 [XHCI_CMD_FORCE_EVENT] = NULL,
587 [XHCI_CMD_NEGOTIATE_BANDWIDTH] = NULL,
588 [XHCI_CMD_SET_LATENCY_TOLERANCE_VALUE] = NULL,
589 [XHCI_CMD_GET_PORT_BANDWIDTH] = get_port_bandwidth_cmd,
590 [XHCI_CMD_FORCE_HEADER] = NULL,
591 [XHCI_CMD_NO_OP] = no_op_cmd
592};
593
594/**
595 * Try to abort currently processed command. This is tricky, because
596 * calling fibril is not necessarily the one which issued the blocked command.
597 * Also, the trickiness intensifies by the fact that stopping a CR is denoted by
598 * event, which is again handled in different fibril. but, once we go to sleep
599 * on waiting for that event, another fibril may wake up and try to abort the
600 * blocked command.
601 *
602 * So, we mark the command ring as being restarted, wait for it to stop, and
603 * then start it again. If there was a blocked command, it will be satisfied by
604 * COMMAND_ABORTED event.
605 */
606static int try_abort_current_command(xhci_hc_t *hc)
607{
608 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
609
610 fibril_mutex_lock(&cr->guard);
611
612 if (cr->state != XHCI_CR_STATE_OPEN) {
613 // The CR is either stopped, or different fibril is already
614 // restarting it.
615 fibril_mutex_unlock(&cr->guard);
616 return EOK;
617 }
618
619 usb_log_error("HC(%p): Timeout while waiting for command: aborting current command.", hc);
620
621 cr->state = XHCI_CR_STATE_CHANGING;
622 fibril_condvar_broadcast(&cr->state_cv);
623
624 abort_command_ring(hc);
625
626 fibril_condvar_wait_timeout(&cr->stopped_cv, &cr->guard, XHCI_CR_ABORT_TIMEOUT);
627
628 if (XHCI_REG_RD(hc->op_regs, XHCI_OP_CRR)) {
629 /* 4.6.1.2, implementation note
630 * Assume there are larger problems with HC and
631 * reset it.
632 */
633 usb_log_error("HC(%p): Command didn't abort.", hc);
634
635 cr->state = XHCI_CR_STATE_CLOSED;
636 fibril_condvar_broadcast(&cr->state_cv);
637
638 // TODO: Reset HC completely.
639 // Don't forget to somehow complete all commands with error.
640
641 fibril_mutex_unlock(&cr->guard);
642 return ENAK;
643 }
644
645 usb_log_error("HC(%p): Command ring stopped. Starting again.", hc);
646 hc_ring_doorbell(hc, 0, 0);
647
648 cr->state = XHCI_CR_STATE_OPEN;
649 fibril_condvar_broadcast(&cr->state_cv);
650
651 fibril_mutex_unlock(&cr->guard);
652 return EOK;
653}
654
655/**
656 * Wait, until the command is completed. The completion is triggered by
657 * COMMAND_COMPLETION event. As we do not want to rely on HW completing the
658 * command in timely manner, we timeout. Note that we can't just return an
659 * error after the timeout pass - it may be other command blocking the ring,
660 * and ours can be completed afterwards. Therefore, it is not guaranteed that
661 * this function will return in XHCI_COMMAND_TIMEOUT. It will continue waiting
662 * until COMMAND_COMPLETION event arrives.
663 */
664static int wait_for_cmd_completion(xhci_hc_t *hc, xhci_cmd_t *cmd)
665{
666 int rv = EOK;
667
668 if (fibril_get_id() == hc->event_handler) {
669 usb_log_error("Deadlock detected in waiting for command.");
670 abort();
671 }
672
673 fibril_mutex_lock(&cmd->_header.completed_mtx);
674 while (!cmd->_header.completed) {
675
676 rv = fibril_condvar_wait_timeout(&cmd->_header.completed_cv, &cmd->_header.completed_mtx, XHCI_COMMAND_TIMEOUT);
677
678 /* The waiting timed out. Current command (not necessarily
679 * ours) is probably blocked.
680 */
681 if (!cmd->_header.completed && rv == ETIMEOUT) {
682 fibril_mutex_unlock(&cmd->_header.completed_mtx);
683
684 rv = try_abort_current_command(hc);
685 if (rv)
686 return rv;
687
688 fibril_mutex_lock(&cmd->_header.completed_mtx);
689 }
690 }
691 fibril_mutex_unlock(&cmd->_header.completed_mtx);
692
693 return rv;
694}
695
696/**
697 * Issue command and block the current fibril until it is completed or timeout
698 * expires. Nothing is deallocated. Caller should always execute `xhci_cmd_fini`.
699 */
700int xhci_cmd_sync(xhci_hc_t *hc, xhci_cmd_t *cmd)
701{
702 assert(hc);
703 assert(cmd);
704
705 int err;
706
707 if (!cmd_handlers[cmd->_header.cmd]) {
708 /* Handler not implemented. */
709 return ENOTSUP;
710 }
711
712 if ((err = cmd_handlers[cmd->_header.cmd](hc, cmd))) {
713 /* Command could not be issued. */
714 return err;
715 }
716
717 if ((err = wait_for_cmd_completion(hc, cmd))) {
718 /* Command failed. */
719 return err;
720 }
721
722 return cmd->status == XHCI_TRBC_SUCCESS ? EOK : EINVAL;
723}
724
725/**
726 * Does the same thing as `xhci_cmd_sync` and executes `xhci_cmd_fini`. This
727 * is a useful shorthand for issuing commands without out parameters.
728 */
729int xhci_cmd_sync_fini(xhci_hc_t *hc, xhci_cmd_t *cmd)
730{
731 const int err = xhci_cmd_sync(hc, cmd);
732 xhci_cmd_fini(cmd);
733
734 return err;
735}
736
737/**
738 * Does the same thing as `xhci_cmd_sync_fini` without blocking the current
739 * fibril. The command is copied to stack memory and `fini` is called upon its completion.
740 */
741int xhci_cmd_async_fini(xhci_hc_t *hc, xhci_cmd_t *stack_cmd)
742{
743 assert(hc);
744 assert(stack_cmd);
745
746 /* Save the command for later. */
747 xhci_cmd_t *heap_cmd = (xhci_cmd_t *) malloc(sizeof(xhci_cmd_t));
748 if (!heap_cmd) {
749 return ENOMEM;
750 }
751
752 /* TODO: Is this good for the mutex and the condvar? */
753 memcpy(heap_cmd, stack_cmd, sizeof(xhci_cmd_t));
754 heap_cmd->_header.async = true;
755
756 /* Issue the command. */
757 int err;
758
759 if (!cmd_handlers[heap_cmd->_header.cmd]) {
760 /* Handler not implemented. */
761 err = ENOTSUP;
762 goto err_heap_cmd;
763 }
764
765 if ((err = cmd_handlers[heap_cmd->_header.cmd](hc, heap_cmd))) {
766 /* Command could not be issued. */
767 goto err_heap_cmd;
768 }
769
770 return EOK;
771
772err_heap_cmd:
773 free(heap_cmd);
774 return err;
775}
776
777/**
778 * @}
779 */
Note: See TracBrowser for help on using the repository browser.