source: mainline/uspace/drv/bus/usb/xhci/commands.c@ 17c5e62

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 17c5e62 was eb928c4, checked in by Ondřej Hlavatý <aearsis@…>, 8 years ago

xhci: documentation & cleanup

Also, a simple refactoring to remove functions that only wraps another
functions unused anywhere else.

  • Property mode set to 100644
File size: 21.3 KB
Line 
1/*
2 * Copyright (c) 2017 Jaroslav Jindrak
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief Command sending functions.
34 */
35
36#include <errno.h>
37#include <str_error.h>
38#include <usb/debug.h>
39#include "commands.h"
40#include "debug.h"
41#include "hc.h"
42#include "hw_struct/context.h"
43#include "hw_struct/trb.h"
44
45#define TRB_SET_TCS(trb, tcs) (trb).control |= host2xhci(32, ((tcs &0x1) << 9))
46#define TRB_SET_TYPE(trb, type) (trb).control |= host2xhci(32, (type) << 10)
47#define TRB_SET_DC(trb, dc) (trb).control |= host2xhci(32, (dc) << 9)
48#define TRB_SET_EP(trb, ep) (trb).control |= host2xhci(32, ((ep) & 0x5) << 16)
49#define TRB_SET_STREAM(trb, st) (trb).control |= host2xhci(32, ((st) & 0xFFFF) << 16)
50#define TRB_SET_SUSP(trb, susp) (trb).control |= host2xhci(32, ((susp) & 0x1) << 23)
51#define TRB_SET_SLOT(trb, slot) (trb).control |= host2xhci(32, (slot) << 24)
52#define TRB_SET_DEV_SPEED(trb, speed) (trb).control |= host2xhci(32, (speed & 0xF) << 16)
53
54/**
55 * TODO: Not sure about SCT and DCS (see section 6.4.3.9).
56 */
57#define TRB_SET_DEQUEUE_PTR(trb, dptr) (trb).parameter |= host2xhci(64, (dptr))
58#define TRB_SET_ICTX(trb, phys) (trb).parameter |= host2xhci(64, (phys) & (~0xF))
59
60#define TRB_GET_CODE(trb) XHCI_DWORD_EXTRACT((trb).status, 31, 24)
61#define TRB_GET_SLOT(trb) XHCI_DWORD_EXTRACT((trb).control, 31, 24)
62#define TRB_GET_PHYS(trb) (XHCI_QWORD_EXTRACT((trb).parameter, 63, 4) << 4)
63
64/* Control functions */
65
66static xhci_cmd_ring_t *get_cmd_ring(xhci_hc_t *hc)
67{
68 assert(hc);
69 return &hc->cr;
70}
71
72/**
73 * Initialize the command subsystem. Allocates the comand ring.
74 *
75 * Does not configure the CR pointer to the hardware, because the xHC will be
76 * reset before starting.
77 */
78int xhci_init_commands(xhci_hc_t *hc)
79{
80 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
81 int err;
82
83 if ((err = xhci_trb_ring_init(&cr->trb_ring)))
84 return err;
85
86 fibril_mutex_initialize(&cr->guard);
87 fibril_condvar_initialize(&cr->state_cv);
88 fibril_condvar_initialize(&cr->stopped_cv);
89
90 list_initialize(&cr->cmd_list);
91
92 cr->state = XHCI_CR_STATE_OPEN;
93
94 return EOK;
95}
96
97/**
98 * Finish the command subsystem. Stops the hardware from running commands, then
99 * deallocates the ring.
100 */
101void xhci_fini_commands(xhci_hc_t *hc)
102{
103 assert(hc);
104 xhci_stop_command_ring(hc);
105
106 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
107
108 fibril_mutex_lock(&cr->guard);
109 xhci_trb_ring_fini(&cr->trb_ring);
110 fibril_mutex_unlock(&cr->guard);
111}
112
113/**
114 * Initialize a command structure for the given command.
115 */
116void xhci_cmd_init(xhci_cmd_t *cmd, xhci_cmd_type_t type)
117{
118 memset(cmd, 0, sizeof(*cmd));
119
120 link_initialize(&cmd->_header.link);
121
122 fibril_mutex_initialize(&cmd->_header.completed_mtx);
123 fibril_condvar_initialize(&cmd->_header.completed_cv);
124
125 cmd->_header.cmd = type;
126}
127
128/**
129 * Finish the command structure. Some command invocation includes allocating
130 * a context structure. To have the convenience in calling commands, this
131 * method deallocates all resources.
132 */
133void xhci_cmd_fini(xhci_cmd_t *cmd)
134{
135 list_remove(&cmd->_header.link);
136
137 dma_buffer_free(&cmd->input_ctx);
138 dma_buffer_free(&cmd->bandwidth_ctx);
139
140 if (cmd->_header.async) {
141 free(cmd);
142 }
143}
144
145/**
146 * Find a command issued by TRB at @c phys inside the command list.
147 *
148 * Call with guard locked only.
149 */
150static inline xhci_cmd_t *find_command(xhci_hc_t *hc, uint64_t phys)
151{
152 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
153 assert(fibril_mutex_is_locked(&cr->guard));
154
155 link_t *cmd_link = list_first(&cr->cmd_list);
156
157 while (cmd_link != NULL) {
158 xhci_cmd_t *cmd = list_get_instance(cmd_link, xhci_cmd_t, _header.link);
159
160 if (cmd->_header.trb_phys == phys)
161 break;
162
163 cmd_link = list_next(cmd_link, &cr->cmd_list);
164 }
165
166 return cmd_link ? list_get_instance(cmd_link, xhci_cmd_t, _header.link)
167 : NULL;
168}
169
170/**
171 * Enqueue a command on the TRB ring. Ring the doorbell to initiate processing.
172 * Register the command as waiting for completion inside the command list.
173 */
174static inline int enqueue_command(xhci_hc_t *hc, xhci_cmd_t *cmd)
175{
176 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
177 assert(cmd);
178
179 fibril_mutex_lock(&cr->guard);
180
181 while (cr->state == XHCI_CR_STATE_CHANGING)
182 fibril_condvar_wait(&cr->state_cv, &cr->guard);
183
184 if (cr->state != XHCI_CR_STATE_OPEN) {
185 fibril_mutex_unlock(&cr->guard);
186 return ENAK;
187 }
188
189 usb_log_debug2("HC(%p): Sending command:", hc);
190 xhci_dump_trb(&cmd->_header.trb);
191
192 list_append(&cmd->_header.link, &cr->cmd_list);
193
194 xhci_trb_ring_enqueue(&cr->trb_ring, &cmd->_header.trb, &cmd->_header.trb_phys);
195 hc_ring_doorbell(hc, 0, 0);
196
197 fibril_mutex_unlock(&cr->guard);
198
199 return EOK;
200}
201
202/**
203 * Stop the command ring. Stop processing commands, block issuing new ones.
204 * Wait until hardware acknowledges it is stopped.
205 */
206void xhci_stop_command_ring(xhci_hc_t *hc)
207{
208 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
209
210 fibril_mutex_lock(&cr->guard);
211
212 // Prevent others from starting CR again.
213 cr->state = XHCI_CR_STATE_CLOSED;
214 fibril_condvar_broadcast(&cr->state_cv);
215
216 XHCI_REG_SET(hc->op_regs, XHCI_OP_CS, 1);
217 XHCI_REG_SET(hc->op_regs, XHCI_OP_CRCR_HI, 0); // Some systems (incl. QEMU) require 64-bit write
218
219 while (XHCI_REG_RD(hc->op_regs, XHCI_OP_CRR))
220 fibril_condvar_wait(&cr->stopped_cv, &cr->guard);
221
222 fibril_mutex_unlock(&cr->guard);
223}
224
225/**
226 * Abort currently processed command. Note that it is only aborted when the
227 * command is "blocking" - see section 4.6.1.2 of xHCI spec.
228 */
229static void abort_command_ring(xhci_hc_t *hc)
230{
231 XHCI_REG_WR(hc->op_regs, XHCI_OP_CA, 1);
232 XHCI_REG_SET(hc->op_regs, XHCI_OP_CRCR_HI, 0); // Some systems (incl. QEMU) require 64-bit write
233}
234
235static const char *trb_codes [] = {
236#define TRBC(t) [XHCI_TRBC_##t] = #t
237 TRBC(INVALID),
238 TRBC(SUCCESS),
239 TRBC(DATA_BUFFER_ERROR),
240 TRBC(BABBLE_DETECTED_ERROR),
241 TRBC(USB_TRANSACTION_ERROR),
242 TRBC(TRB_ERROR),
243 TRBC(STALL_ERROR),
244 TRBC(RESOURCE_ERROR),
245 TRBC(BANDWIDTH_ERROR),
246 TRBC(NO_SLOTS_ERROR),
247 TRBC(INVALID_STREAM_ERROR),
248 TRBC(SLOT_NOT_ENABLED_ERROR),
249 TRBC(EP_NOT_ENABLED_ERROR),
250 TRBC(SHORT_PACKET),
251 TRBC(RING_UNDERRUN),
252 TRBC(RING_OVERRUN),
253 TRBC(VF_EVENT_RING_FULL),
254 TRBC(PARAMETER_ERROR),
255 TRBC(BANDWIDTH_OVERRUN_ERROR),
256 TRBC(CONTEXT_STATE_ERROR),
257 TRBC(NO_PING_RESPONSE_ERROR),
258 TRBC(EVENT_RING_FULL_ERROR),
259 TRBC(INCOMPATIBLE_DEVICE_ERROR),
260 TRBC(MISSED_SERVICE_ERROR),
261 TRBC(COMMAND_RING_STOPPED),
262 TRBC(COMMAND_ABORTED),
263 TRBC(STOPPED),
264 TRBC(STOPPED_LENGTH_INVALID),
265 TRBC(STOPPED_SHORT_PACKET),
266 TRBC(MAX_EXIT_LATENCY_TOO_LARGE_ERROR),
267 [30] = "<reserved>",
268 TRBC(ISOCH_BUFFER_OVERRUN),
269 TRBC(EVENT_LOST_ERROR),
270 TRBC(UNDEFINED_ERROR),
271 TRBC(INVALID_STREAM_ID_ERROR),
272 TRBC(SECONDARY_BANDWIDTH_ERROR),
273 TRBC(SPLIT_TRANSACTION_ERROR),
274 [XHCI_TRBC_MAX] = NULL
275#undef TRBC
276};
277
278/**
279 * Report an error according to command completion code.
280 */
281static void report_error(int code)
282{
283 if (code < XHCI_TRBC_MAX && trb_codes[code] != NULL)
284 usb_log_error("Command resulted in error: %s.", trb_codes[code]);
285 else
286 usb_log_error("Command resulted in reserved or vendor specific error.");
287}
288
289/**
290 * Handle a command completion. Feed the fibril waiting for result.
291 *
292 * @param trb The COMMAND_COMPLETION TRB found in event ring.
293 */
294int xhci_handle_command_completion(xhci_hc_t *hc, xhci_trb_t *trb)
295{
296 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
297 assert(trb);
298
299 usb_log_debug2("HC(%p) Command completed.", hc);
300
301 fibril_mutex_lock(&cr->guard);
302
303 int code = TRB_GET_CODE(*trb);
304 const uint64_t phys = TRB_GET_PHYS(*trb);
305
306 xhci_trb_ring_update_dequeue(&cr->trb_ring, phys);
307
308 if (code == XHCI_TRBC_COMMAND_RING_STOPPED) {
309 /* This can either mean that the ring is being stopped, or
310 * a command was aborted. In either way, wake threads waiting
311 * on stopped_cv.
312 *
313 * Note that we need to hold mutex, because we must be sure the
314 * requesting thread is waiting inside the CV.
315 */
316 fibril_condvar_broadcast(&cr->stopped_cv);
317 fibril_mutex_unlock(&cr->guard);
318 return EOK;
319 }
320
321 xhci_cmd_t *command = find_command(hc, phys);
322 if (command == NULL) {
323 usb_log_error("No command struct for this completion event found.");
324
325 if (code != XHCI_TRBC_SUCCESS)
326 report_error(code);
327
328 return EOK;
329 }
330
331 list_remove(&command->_header.link);
332
333 /* Semantics of NO_OP_CMD is that success is marked as a TRB error. */
334 if (command->_header.cmd == XHCI_CMD_NO_OP && code == XHCI_TRBC_TRB_ERROR)
335 code = XHCI_TRBC_SUCCESS;
336
337 command->status = code;
338 command->slot_id = TRB_GET_SLOT(*trb);
339
340 usb_log_debug2("Completed command trb: %s", xhci_trb_str_type(TRB_TYPE(command->_header.trb)));
341
342 if (code != XHCI_TRBC_SUCCESS) {
343 report_error(code);
344 xhci_dump_trb(&command->_header.trb);
345 }
346
347 switch (TRB_TYPE(command->_header.trb)) {
348 case XHCI_TRB_TYPE_NO_OP_CMD:
349 case XHCI_TRB_TYPE_ENABLE_SLOT_CMD:
350 case XHCI_TRB_TYPE_DISABLE_SLOT_CMD:
351 case XHCI_TRB_TYPE_ADDRESS_DEVICE_CMD:
352 case XHCI_TRB_TYPE_CONFIGURE_ENDPOINT_CMD:
353 case XHCI_TRB_TYPE_EVALUATE_CONTEXT_CMD:
354 case XHCI_TRB_TYPE_RESET_ENDPOINT_CMD:
355 break;
356 case XHCI_TRB_TYPE_STOP_ENDPOINT_CMD:
357 // Note: If the endpoint was in the middle of a transfer, then the xHC
358 // will add a Transfer TRB before the Event TRB, research that and
359 // handle it appropriately!
360 break;
361 case XHCI_TRB_TYPE_RESET_DEVICE_CMD:
362 break;
363 default:
364 usb_log_debug2("Unsupported command trb: %s", xhci_trb_str_type(TRB_TYPE(command->_header.trb)));
365 return ENAK;
366 }
367
368 fibril_mutex_unlock(&cr->guard);
369
370 fibril_mutex_lock(&command->_header.completed_mtx);
371 command->_header.completed = true;
372 fibril_condvar_broadcast(&command->_header.completed_cv);
373 fibril_mutex_unlock(&command->_header.completed_mtx);
374
375 if (command->_header.async) {
376 /* Free the command and other DS upon completion. */
377 xhci_cmd_fini(command);
378 }
379
380 return EOK;
381}
382
383/* Command-issuing functions */
384
385static int no_op_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
386{
387 assert(hc);
388
389 xhci_trb_clean(&cmd->_header.trb);
390
391 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_NO_OP_CMD);
392
393 return enqueue_command(hc, cmd);
394}
395
396static int enable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
397{
398 assert(hc);
399
400 xhci_trb_clean(&cmd->_header.trb);
401
402 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ENABLE_SLOT_CMD);
403 cmd->_header.trb.control |= host2xhci(32, XHCI_REG_RD(hc->xecp, XHCI_EC_SP_SLOT_TYPE) << 16);
404
405 return enqueue_command(hc, cmd);
406}
407
408static int disable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
409{
410 assert(hc);
411 assert(cmd);
412
413 xhci_trb_clean(&cmd->_header.trb);
414
415 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_DISABLE_SLOT_CMD);
416 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
417
418 return enqueue_command(hc, cmd);
419}
420
421static int address_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
422{
423 assert(hc);
424 assert(cmd);
425 assert(dma_buffer_is_set(&cmd->input_ctx));
426
427 /**
428 * TODO: Requirements for this command:
429 * dcbaa[slot_id] is properly sized and initialized
430 * ictx has valids slot context and endpoint 0, all
431 * other should be ignored at this point (see section 4.6.5).
432 */
433
434 xhci_trb_clean(&cmd->_header.trb);
435
436 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
437
438 /**
439 * Note: According to section 6.4.3.4, we can set the 9th bit
440 * of the control field of the trb (BSR) to 1 and then the xHC
441 * will not issue the SET_ADDRESS request to the USB device.
442 * This can be used to provide compatibility with legacy USB devices
443 * that require their device descriptor to be read before such request.
444 */
445 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ADDRESS_DEVICE_CMD);
446 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
447
448 return enqueue_command(hc, cmd);
449}
450
451static int configure_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
452{
453 assert(hc);
454 assert(cmd);
455
456 xhci_trb_clean(&cmd->_header.trb);
457
458 if (!cmd->deconfigure) {
459 /* If the DC flag is on, input context is not evaluated. */
460 assert(dma_buffer_is_set(&cmd->input_ctx));
461
462 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
463 }
464
465 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_CONFIGURE_ENDPOINT_CMD);
466 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
467 TRB_SET_DC(cmd->_header.trb, cmd->deconfigure);
468
469 return enqueue_command(hc, cmd);
470}
471
472static int evaluate_context_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
473{
474 assert(hc);
475 assert(cmd);
476 assert(dma_buffer_is_set(&cmd->input_ctx));
477
478 /**
479 * Note: All Drop Context flags of the input context shall be 0,
480 * all Add Context flags shall be initialize to indicate IDs
481 * of the contexts affected by the command.
482 * Refer to sections 6.2.2.3 and 6.3.3.3 for further info.
483 */
484 xhci_trb_clean(&cmd->_header.trb);
485
486 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
487
488 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_EVALUATE_CONTEXT_CMD);
489 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
490
491 return enqueue_command(hc, cmd);
492}
493
494static int reset_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
495{
496 assert(hc);
497 assert(cmd);
498
499 /**
500 * Note: TCS can have values 0 or 1. If it is set to 0, see sectuon 4.5.8 for
501 * information about this flag.
502 */
503 xhci_trb_clean(&cmd->_header.trb);
504
505 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_ENDPOINT_CMD);
506 TRB_SET_TCS(cmd->_header.trb, cmd->tcs);
507 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
508 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
509
510 return enqueue_command(hc, cmd);
511}
512
513static int stop_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
514{
515 assert(hc);
516 assert(cmd);
517
518 xhci_trb_clean(&cmd->_header.trb);
519
520 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_STOP_ENDPOINT_CMD);
521 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
522 TRB_SET_SUSP(cmd->_header.trb, cmd->susp);
523 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
524
525 return enqueue_command(hc, cmd);
526}
527
528static int set_tr_dequeue_pointer_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
529{
530 assert(hc);
531 assert(cmd);
532
533 xhci_trb_clean(&cmd->_header.trb);
534
535 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_SET_TR_DEQUEUE_POINTER_CMD);
536 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
537 TRB_SET_STREAM(cmd->_header.trb, cmd->stream_id);
538 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
539 TRB_SET_DEQUEUE_PTR(cmd->_header.trb, cmd->dequeue_ptr);
540
541 /**
542 * TODO: Set DCS (see section 4.6.10).
543 */
544
545 return enqueue_command(hc, cmd);
546}
547
548static int reset_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
549{
550 assert(hc);
551 assert(cmd);
552
553 xhci_trb_clean(&cmd->_header.trb);
554
555 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_DEVICE_CMD);
556 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
557
558 return enqueue_command(hc, cmd);
559}
560
561static int get_port_bandwidth_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
562{
563 assert(hc);
564 assert(cmd);
565
566 xhci_trb_clean(&cmd->_header.trb);
567
568 TRB_SET_ICTX(cmd->_header.trb, cmd->bandwidth_ctx.phys);
569
570 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_GET_PORT_BANDWIDTH_CMD);
571 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
572 TRB_SET_DEV_SPEED(cmd->_header.trb, cmd->device_speed);
573
574 return enqueue_command(hc, cmd);
575}
576
577/* The table of command-issuing functions. */
578
579typedef int (*cmd_handler) (xhci_hc_t *hc, xhci_cmd_t *cmd);
580
581static cmd_handler cmd_handlers [] = {
582 [XHCI_CMD_ENABLE_SLOT] = enable_slot_cmd,
583 [XHCI_CMD_DISABLE_SLOT] = disable_slot_cmd,
584 [XHCI_CMD_ADDRESS_DEVICE] = address_device_cmd,
585 [XHCI_CMD_CONFIGURE_ENDPOINT] = configure_endpoint_cmd,
586 [XHCI_CMD_EVALUATE_CONTEXT] = evaluate_context_cmd,
587 [XHCI_CMD_RESET_ENDPOINT] = reset_endpoint_cmd,
588 [XHCI_CMD_STOP_ENDPOINT] = stop_endpoint_cmd,
589 [XHCI_CMD_SET_TR_DEQUEUE_POINTER] = set_tr_dequeue_pointer_cmd,
590 [XHCI_CMD_RESET_DEVICE] = reset_device_cmd,
591 [XHCI_CMD_FORCE_EVENT] = NULL,
592 [XHCI_CMD_NEGOTIATE_BANDWIDTH] = NULL,
593 [XHCI_CMD_SET_LATENCY_TOLERANCE_VALUE] = NULL,
594 [XHCI_CMD_GET_PORT_BANDWIDTH] = get_port_bandwidth_cmd,
595 [XHCI_CMD_FORCE_HEADER] = NULL,
596 [XHCI_CMD_NO_OP] = no_op_cmd
597};
598
599/**
600 * Try to abort currently processed command. This is tricky, because
601 * calling fibril is not necessarily the one which issued the blocked command.
602 * Also, the trickiness intensifies by the fact that stopping a CR is denoted by
603 * event, which is again handled in different fibril. but, once we go to sleep
604 * on waiting for that event, another fibril may wake up and try to abort the
605 * blocked command.
606 *
607 * So, we mark the command ring as being restarted, wait for it to stop, and
608 * then start it again. If there was a blocked command, it will be satisfied by
609 * COMMAND_ABORTED event.
610 */
611static int try_abort_current_command(xhci_hc_t *hc)
612{
613 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
614
615 fibril_mutex_lock(&cr->guard);
616
617 if (cr->state != XHCI_CR_STATE_OPEN) {
618 // The CR is either stopped, or different fibril is already
619 // restarting it.
620 fibril_mutex_unlock(&cr->guard);
621 return EOK;
622 }
623
624 usb_log_error("HC(%p): Timeout while waiting for command: aborting current command.", hc);
625
626 cr->state = XHCI_CR_STATE_CHANGING;
627 fibril_condvar_broadcast(&cr->state_cv);
628
629 abort_command_ring(hc);
630
631 fibril_condvar_wait_timeout(&cr->stopped_cv, &cr->guard, XHCI_CR_ABORT_TIMEOUT);
632
633 if (XHCI_REG_RD(hc->op_regs, XHCI_OP_CRR)) {
634 /* 4.6.1.2, implementation note
635 * Assume there are larger problems with HC and
636 * reset it.
637 */
638 usb_log_error("HC(%p): Command didn't abort.", hc);
639
640 cr->state = XHCI_CR_STATE_CLOSED;
641 fibril_condvar_broadcast(&cr->state_cv);
642
643 // TODO: Reset HC completely.
644 // Don't forget to somehow complete all commands with error.
645
646 fibril_mutex_unlock(&cr->guard);
647 return ENAK;
648 }
649
650 usb_log_error("HC(%p): Command ring stopped. Starting again.", hc);
651 hc_ring_doorbell(hc, 0, 0);
652
653 cr->state = XHCI_CR_STATE_OPEN;
654 fibril_condvar_broadcast(&cr->state_cv);
655
656 fibril_mutex_unlock(&cr->guard);
657 return EOK;
658}
659
660/**
661 * Wait, until the command is completed. The completion is triggered by
662 * COMMAND_COMPLETION event. As we do not want to rely on HW completing the
663 * command in timely manner, we timeout. Note that we can't just return an
664 * error after the timeout pass - it may be other command blocking the ring,
665 * and ours can be completed afterwards. Therefore, it is not guaranteed that
666 * this function will return in XHCI_COMMAND_TIMEOUT. It will continue waiting
667 * until COMMAND_COMPLETION event arrives.
668 */
669static int wait_for_cmd_completion(xhci_hc_t *hc, xhci_cmd_t *cmd)
670{
671 int rv = EOK;
672
673 fibril_mutex_lock(&cmd->_header.completed_mtx);
674 while (!cmd->_header.completed) {
675
676 rv = fibril_condvar_wait_timeout(&cmd->_header.completed_cv, &cmd->_header.completed_mtx, XHCI_COMMAND_TIMEOUT);
677
678 /* The waiting timed out. Current command (not necessarily
679 * ours) is probably blocked.
680 */
681 if (!cmd->_header.completed && rv == ETIMEOUT) {
682 fibril_mutex_unlock(&cmd->_header.completed_mtx);
683
684 rv = try_abort_current_command(hc);
685 if (rv)
686 return rv;
687
688 fibril_mutex_lock(&cmd->_header.completed_mtx);
689 }
690 }
691 fibril_mutex_unlock(&cmd->_header.completed_mtx);
692
693 return rv;
694}
695
696/**
697 * Issue command and block the current fibril until it is completed or timeout
698 * expires. Nothing is deallocated. Caller should always execute `xhci_cmd_fini`.
699 */
700int xhci_cmd_sync(xhci_hc_t *hc, xhci_cmd_t *cmd)
701{
702 assert(hc);
703 assert(cmd);
704
705 int err;
706
707 if (!cmd_handlers[cmd->_header.cmd]) {
708 /* Handler not implemented. */
709 return ENOTSUP;
710 }
711
712 if ((err = cmd_handlers[cmd->_header.cmd](hc, cmd))) {
713 /* Command could not be issued. */
714 return err;
715 }
716
717 if ((err = wait_for_cmd_completion(hc, cmd))) {
718 /* Command failed. */
719 return err;
720 }
721
722 return cmd->status == XHCI_TRBC_SUCCESS ? EOK : EINVAL;
723}
724
725/**
726 * Does the same thing as `xhci_cmd_sync` and executes `xhci_cmd_fini`. This
727 * is a useful shorthand for issuing commands without out parameters.
728 */
729int xhci_cmd_sync_fini(xhci_hc_t *hc, xhci_cmd_t *cmd)
730{
731 const int err = xhci_cmd_sync(hc, cmd);
732 xhci_cmd_fini(cmd);
733
734 return err;
735}
736
737/**
738 * Does the same thing as `xhci_cmd_sync_fini` without blocking the current
739 * fibril. The command is copied to stack memory and `fini` is called upon its completion.
740 */
741int xhci_cmd_async_fini(xhci_hc_t *hc, xhci_cmd_t *stack_cmd)
742{
743 assert(hc);
744 assert(stack_cmd);
745
746 /* Save the command for later. */
747 xhci_cmd_t *heap_cmd = (xhci_cmd_t *) malloc(sizeof(xhci_cmd_t));
748 if (!heap_cmd) {
749 return ENOMEM;
750 }
751
752 /* TODO: Is this good for the mutex and the condvar? */
753 memcpy(heap_cmd, stack_cmd, sizeof(xhci_cmd_t));
754 heap_cmd->_header.async = true;
755
756 /* Issue the command. */
757 int err;
758
759 if (!cmd_handlers[heap_cmd->_header.cmd]) {
760 /* Handler not implemented. */
761 err = ENOTSUP;
762 goto err_heap_cmd;
763 }
764
765 if ((err = cmd_handlers[heap_cmd->_header.cmd](hc, heap_cmd))) {
766 /* Command could not be issued. */
767 goto err_heap_cmd;
768 }
769
770 return EOK;
771
772err_heap_cmd:
773 free(heap_cmd);
774 return err;
775}
776
777/**
778 * @}
779 */
Note: See TracBrowser for help on using the repository browser.