source: mainline/uspace/drv/bus/usb/xhci/commands.c@ 9ff99e8

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 9ff99e8 was 837581fd, checked in by Ondřej Hlavatý <aearsis@…>, 8 years ago

xhci: logging changes

  • Property mode set to 100644
File size: 20.8 KB
Line 
1/*
2 * Copyright (c) 2017 Jaroslav Jindrak
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief Command sending functions.
34 */
35
36#include <errno.h>
37#include <str_error.h>
38#include <usb/debug.h>
39#include "commands.h"
40#include "debug.h"
41#include "hc.h"
42#include "hw_struct/context.h"
43#include "hw_struct/trb.h"
44
45#define TRB_SET_TCS(trb, tcs) (trb).control |= host2xhci(32, ((tcs &0x1) << 9))
46#define TRB_SET_TYPE(trb, type) (trb).control |= host2xhci(32, (type) << 10)
47#define TRB_SET_DC(trb, dc) (trb).control |= host2xhci(32, (dc) << 9)
48#define TRB_SET_EP(trb, ep) (trb).control |= host2xhci(32, ((ep) & 0x5) << 16)
49#define TRB_SET_STREAM(trb, st) (trb).control |= host2xhci(32, ((st) & 0xFFFF) << 16)
50#define TRB_SET_SUSP(trb, susp) (trb).control |= host2xhci(32, ((susp) & 0x1) << 23)
51#define TRB_SET_SLOT(trb, slot) (trb).control |= host2xhci(32, (slot) << 24)
52#define TRB_SET_DEV_SPEED(trb, speed) (trb).control |= host2xhci(32, (speed & 0xF) << 16)
53
54/**
55 * TODO: Not sure about SCT and DCS (see section 6.4.3.9).
56 */
57#define TRB_SET_DEQUEUE_PTR(trb, dptr) (trb).parameter |= host2xhci(64, (dptr))
58#define TRB_SET_ICTX(trb, phys) (trb).parameter |= host2xhci(64, (phys) & (~0xF))
59
60#define TRB_GET_CODE(trb) XHCI_DWORD_EXTRACT((trb).status, 31, 24)
61#define TRB_GET_SLOT(trb) XHCI_DWORD_EXTRACT((trb).control, 31, 24)
62#define TRB_GET_PHYS(trb) (XHCI_QWORD_EXTRACT((trb).parameter, 63, 4) << 4)
63
64/* Control functions */
65
66static xhci_cmd_ring_t *get_cmd_ring(xhci_hc_t *hc)
67{
68 assert(hc);
69 return &hc->cr;
70}
71
72/**
73 * Initialize the command subsystem. Allocates the comand ring.
74 *
75 * Does not configure the CR pointer to the hardware, because the xHC will be
76 * reset before starting.
77 */
78int xhci_init_commands(xhci_hc_t *hc)
79{
80 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
81 int err;
82
83 if ((err = xhci_trb_ring_init(&cr->trb_ring)))
84 return err;
85
86 fibril_mutex_initialize(&cr->guard);
87 fibril_condvar_initialize(&cr->state_cv);
88 fibril_condvar_initialize(&cr->stopped_cv);
89
90 list_initialize(&cr->cmd_list);
91
92 cr->state = XHCI_CR_STATE_OPEN;
93
94 return EOK;
95}
96
97/**
98 * Finish the command subsystem. Stops the hardware from running commands, then
99 * deallocates the ring.
100 */
101void xhci_fini_commands(xhci_hc_t *hc)
102{
103 assert(hc);
104 xhci_stop_command_ring(hc);
105
106 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
107
108 fibril_mutex_lock(&cr->guard);
109 xhci_trb_ring_fini(&cr->trb_ring);
110 fibril_mutex_unlock(&cr->guard);
111}
112
113/**
114 * Initialize a command structure for the given command.
115 */
116void xhci_cmd_init(xhci_cmd_t *cmd, xhci_cmd_type_t type)
117{
118 memset(cmd, 0, sizeof(*cmd));
119
120 link_initialize(&cmd->_header.link);
121
122 fibril_mutex_initialize(&cmd->_header.completed_mtx);
123 fibril_condvar_initialize(&cmd->_header.completed_cv);
124
125 cmd->_header.cmd = type;
126}
127
128/**
129 * Finish the command structure. Some command invocation includes allocating
130 * a context structure. To have the convenience in calling commands, this
131 * method deallocates all resources.
132 */
133void xhci_cmd_fini(xhci_cmd_t *cmd)
134{
135 list_remove(&cmd->_header.link);
136
137 dma_buffer_free(&cmd->input_ctx);
138 dma_buffer_free(&cmd->bandwidth_ctx);
139
140 if (cmd->_header.async) {
141 free(cmd);
142 }
143}
144
145/**
146 * Find a command issued by TRB at @c phys inside the command list.
147 *
148 * Call with guard locked only.
149 */
150static inline xhci_cmd_t *find_command(xhci_hc_t *hc, uint64_t phys)
151{
152 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
153 assert(fibril_mutex_is_locked(&cr->guard));
154
155 link_t *cmd_link = list_first(&cr->cmd_list);
156
157 while (cmd_link != NULL) {
158 xhci_cmd_t *cmd = list_get_instance(cmd_link, xhci_cmd_t, _header.link);
159
160 if (cmd->_header.trb_phys == phys)
161 break;
162
163 cmd_link = list_next(cmd_link, &cr->cmd_list);
164 }
165
166 return cmd_link ? list_get_instance(cmd_link, xhci_cmd_t, _header.link)
167 : NULL;
168}
169
170/**
171 * Enqueue a command on the TRB ring. Ring the doorbell to initiate processing.
172 * Register the command as waiting for completion inside the command list.
173 */
174static inline int enqueue_command(xhci_hc_t *hc, xhci_cmd_t *cmd)
175{
176 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
177 assert(cmd);
178
179 fibril_mutex_lock(&cr->guard);
180
181 while (cr->state == XHCI_CR_STATE_CHANGING)
182 fibril_condvar_wait(&cr->state_cv, &cr->guard);
183
184 if (cr->state != XHCI_CR_STATE_OPEN) {
185 fibril_mutex_unlock(&cr->guard);
186 return ENAK;
187 }
188
189 usb_log_debug("Sending command %s", xhci_trb_str_type(TRB_TYPE(cmd->_header.trb)));
190
191 list_append(&cmd->_header.link, &cr->cmd_list);
192
193 xhci_trb_ring_enqueue(&cr->trb_ring, &cmd->_header.trb, &cmd->_header.trb_phys);
194 hc_ring_doorbell(hc, 0, 0);
195
196 fibril_mutex_unlock(&cr->guard);
197
198 return EOK;
199}
200
201/**
202 * Stop the command ring. Stop processing commands, block issuing new ones.
203 * Wait until hardware acknowledges it is stopped.
204 */
205void xhci_stop_command_ring(xhci_hc_t *hc)
206{
207 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
208
209 fibril_mutex_lock(&cr->guard);
210
211 // Prevent others from starting CR again.
212 cr->state = XHCI_CR_STATE_CLOSED;
213 fibril_condvar_broadcast(&cr->state_cv);
214
215 XHCI_REG_SET(hc->op_regs, XHCI_OP_CS, 1);
216 XHCI_REG_SET(hc->op_regs, XHCI_OP_CRCR_HI, 0); // Some systems (incl. QEMU) require 64-bit write
217
218 while (XHCI_REG_RD(hc->op_regs, XHCI_OP_CRR))
219 fibril_condvar_wait(&cr->stopped_cv, &cr->guard);
220
221 fibril_mutex_unlock(&cr->guard);
222}
223
224/**
225 * Abort currently processed command. Note that it is only aborted when the
226 * command is "blocking" - see section 4.6.1.2 of xHCI spec.
227 */
228static void abort_command_ring(xhci_hc_t *hc)
229{
230 XHCI_REG_WR(hc->op_regs, XHCI_OP_CA, 1);
231 XHCI_REG_SET(hc->op_regs, XHCI_OP_CRCR_HI, 0); // Some systems (incl. QEMU) require 64-bit write
232}
233
234static const char *trb_codes [] = {
235#define TRBC(t) [XHCI_TRBC_##t] = #t
236 TRBC(INVALID),
237 TRBC(SUCCESS),
238 TRBC(DATA_BUFFER_ERROR),
239 TRBC(BABBLE_DETECTED_ERROR),
240 TRBC(USB_TRANSACTION_ERROR),
241 TRBC(TRB_ERROR),
242 TRBC(STALL_ERROR),
243 TRBC(RESOURCE_ERROR),
244 TRBC(BANDWIDTH_ERROR),
245 TRBC(NO_SLOTS_ERROR),
246 TRBC(INVALID_STREAM_ERROR),
247 TRBC(SLOT_NOT_ENABLED_ERROR),
248 TRBC(EP_NOT_ENABLED_ERROR),
249 TRBC(SHORT_PACKET),
250 TRBC(RING_UNDERRUN),
251 TRBC(RING_OVERRUN),
252 TRBC(VF_EVENT_RING_FULL),
253 TRBC(PARAMETER_ERROR),
254 TRBC(BANDWIDTH_OVERRUN_ERROR),
255 TRBC(CONTEXT_STATE_ERROR),
256 TRBC(NO_PING_RESPONSE_ERROR),
257 TRBC(EVENT_RING_FULL_ERROR),
258 TRBC(INCOMPATIBLE_DEVICE_ERROR),
259 TRBC(MISSED_SERVICE_ERROR),
260 TRBC(COMMAND_RING_STOPPED),
261 TRBC(COMMAND_ABORTED),
262 TRBC(STOPPED),
263 TRBC(STOPPED_LENGTH_INVALID),
264 TRBC(STOPPED_SHORT_PACKET),
265 TRBC(MAX_EXIT_LATENCY_TOO_LARGE_ERROR),
266 [30] = "<reserved>",
267 TRBC(ISOCH_BUFFER_OVERRUN),
268 TRBC(EVENT_LOST_ERROR),
269 TRBC(UNDEFINED_ERROR),
270 TRBC(INVALID_STREAM_ID_ERROR),
271 TRBC(SECONDARY_BANDWIDTH_ERROR),
272 TRBC(SPLIT_TRANSACTION_ERROR),
273 [XHCI_TRBC_MAX] = NULL
274#undef TRBC
275};
276
277/**
278 * Report an error according to command completion code.
279 */
280static void report_error(int code)
281{
282 if (code < XHCI_TRBC_MAX && trb_codes[code] != NULL)
283 usb_log_error("Command resulted in error: %s.", trb_codes[code]);
284 else
285 usb_log_error("Command resulted in reserved or vendor specific error.");
286}
287
288/**
289 * Handle a command completion. Feed the fibril waiting for result.
290 *
291 * @param trb The COMMAND_COMPLETION TRB found in event ring.
292 */
293int xhci_handle_command_completion(xhci_hc_t *hc, xhci_trb_t *trb)
294{
295 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
296 assert(trb);
297
298 fibril_mutex_lock(&cr->guard);
299
300 int code = TRB_GET_CODE(*trb);
301 const uint64_t phys = TRB_GET_PHYS(*trb);
302
303 xhci_trb_ring_update_dequeue(&cr->trb_ring, phys);
304
305 if (code == XHCI_TRBC_COMMAND_RING_STOPPED) {
306 /* This can either mean that the ring is being stopped, or
307 * a command was aborted. In either way, wake threads waiting
308 * on stopped_cv.
309 *
310 * Note that we need to hold mutex, because we must be sure the
311 * requesting thread is waiting inside the CV.
312 */
313 usb_log_debug2("Command ring stopped.");
314 fibril_condvar_broadcast(&cr->stopped_cv);
315 fibril_mutex_unlock(&cr->guard);
316 return EOK;
317 }
318
319 xhci_cmd_t *command = find_command(hc, phys);
320 if (command == NULL) {
321 usb_log_error("No command struct for completion event found.");
322
323 if (code != XHCI_TRBC_SUCCESS)
324 report_error(code);
325
326 return EOK;
327 }
328
329 list_remove(&command->_header.link);
330
331 /* Semantics of NO_OP_CMD is that success is marked as a TRB error. */
332 if (command->_header.cmd == XHCI_CMD_NO_OP && code == XHCI_TRBC_TRB_ERROR)
333 code = XHCI_TRBC_SUCCESS;
334
335 command->status = code;
336 command->slot_id = TRB_GET_SLOT(*trb);
337
338 usb_log_debug("Completed command %s", xhci_trb_str_type(TRB_TYPE(command->_header.trb)));
339
340 if (code != XHCI_TRBC_SUCCESS) {
341 report_error(code);
342 xhci_dump_trb(&command->_header.trb);
343 }
344
345 fibril_mutex_unlock(&cr->guard);
346
347 fibril_mutex_lock(&command->_header.completed_mtx);
348 command->_header.completed = true;
349 fibril_condvar_broadcast(&command->_header.completed_cv);
350 fibril_mutex_unlock(&command->_header.completed_mtx);
351
352 if (command->_header.async) {
353 /* Free the command and other DS upon completion. */
354 xhci_cmd_fini(command);
355 }
356
357 return EOK;
358}
359
360/* Command-issuing functions */
361
362static int no_op_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
363{
364 assert(hc);
365
366 xhci_trb_clean(&cmd->_header.trb);
367
368 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_NO_OP_CMD);
369
370 return enqueue_command(hc, cmd);
371}
372
373static int enable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
374{
375 assert(hc);
376
377 xhci_trb_clean(&cmd->_header.trb);
378
379 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ENABLE_SLOT_CMD);
380 cmd->_header.trb.control |= host2xhci(32, XHCI_REG_RD(hc->xecp, XHCI_EC_SP_SLOT_TYPE) << 16);
381
382 return enqueue_command(hc, cmd);
383}
384
385static int disable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
386{
387 assert(hc);
388 assert(cmd);
389
390 xhci_trb_clean(&cmd->_header.trb);
391
392 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_DISABLE_SLOT_CMD);
393 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
394
395 return enqueue_command(hc, cmd);
396}
397
398static int address_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
399{
400 assert(hc);
401 assert(cmd);
402 assert(dma_buffer_is_set(&cmd->input_ctx));
403
404 /**
405 * TODO: Requirements for this command:
406 * dcbaa[slot_id] is properly sized and initialized
407 * ictx has valids slot context and endpoint 0, all
408 * other should be ignored at this point (see section 4.6.5).
409 */
410
411 xhci_trb_clean(&cmd->_header.trb);
412
413 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
414
415 /**
416 * Note: According to section 6.4.3.4, we can set the 9th bit
417 * of the control field of the trb (BSR) to 1 and then the xHC
418 * will not issue the SET_ADDRESS request to the USB device.
419 * This can be used to provide compatibility with legacy USB devices
420 * that require their device descriptor to be read before such request.
421 */
422 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ADDRESS_DEVICE_CMD);
423 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
424
425 return enqueue_command(hc, cmd);
426}
427
428static int configure_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
429{
430 assert(hc);
431 assert(cmd);
432
433 xhci_trb_clean(&cmd->_header.trb);
434
435 if (!cmd->deconfigure) {
436 /* If the DC flag is on, input context is not evaluated. */
437 assert(dma_buffer_is_set(&cmd->input_ctx));
438
439 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
440 }
441
442 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_CONFIGURE_ENDPOINT_CMD);
443 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
444 TRB_SET_DC(cmd->_header.trb, cmd->deconfigure);
445
446 return enqueue_command(hc, cmd);
447}
448
449static int evaluate_context_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
450{
451 assert(hc);
452 assert(cmd);
453 assert(dma_buffer_is_set(&cmd->input_ctx));
454
455 /**
456 * Note: All Drop Context flags of the input context shall be 0,
457 * all Add Context flags shall be initialize to indicate IDs
458 * of the contexts affected by the command.
459 * Refer to sections 6.2.2.3 and 6.3.3.3 for further info.
460 */
461 xhci_trb_clean(&cmd->_header.trb);
462
463 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
464
465 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_EVALUATE_CONTEXT_CMD);
466 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
467
468 return enqueue_command(hc, cmd);
469}
470
471static int reset_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
472{
473 assert(hc);
474 assert(cmd);
475
476 /**
477 * Note: TCS can have values 0 or 1. If it is set to 0, see sectuon 4.5.8 for
478 * information about this flag.
479 */
480 xhci_trb_clean(&cmd->_header.trb);
481
482 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_ENDPOINT_CMD);
483 TRB_SET_TCS(cmd->_header.trb, cmd->tcs);
484 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
485 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
486
487 return enqueue_command(hc, cmd);
488}
489
490static int stop_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
491{
492 assert(hc);
493 assert(cmd);
494
495 xhci_trb_clean(&cmd->_header.trb);
496
497 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_STOP_ENDPOINT_CMD);
498 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
499 TRB_SET_SUSP(cmd->_header.trb, cmd->susp);
500 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
501
502 return enqueue_command(hc, cmd);
503}
504
505static int set_tr_dequeue_pointer_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
506{
507 assert(hc);
508 assert(cmd);
509
510 xhci_trb_clean(&cmd->_header.trb);
511
512 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_SET_TR_DEQUEUE_POINTER_CMD);
513 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
514 TRB_SET_STREAM(cmd->_header.trb, cmd->stream_id);
515 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
516 TRB_SET_DEQUEUE_PTR(cmd->_header.trb, cmd->dequeue_ptr);
517
518 /**
519 * TODO: Set DCS (see section 4.6.10).
520 */
521
522 return enqueue_command(hc, cmd);
523}
524
525static int reset_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
526{
527 assert(hc);
528 assert(cmd);
529
530 xhci_trb_clean(&cmd->_header.trb);
531
532 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_DEVICE_CMD);
533 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
534
535 return enqueue_command(hc, cmd);
536}
537
538static int get_port_bandwidth_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
539{
540 assert(hc);
541 assert(cmd);
542
543 xhci_trb_clean(&cmd->_header.trb);
544
545 TRB_SET_ICTX(cmd->_header.trb, cmd->bandwidth_ctx.phys);
546
547 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_GET_PORT_BANDWIDTH_CMD);
548 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
549 TRB_SET_DEV_SPEED(cmd->_header.trb, cmd->device_speed);
550
551 return enqueue_command(hc, cmd);
552}
553
554/* The table of command-issuing functions. */
555
556typedef int (*cmd_handler) (xhci_hc_t *hc, xhci_cmd_t *cmd);
557
558static cmd_handler cmd_handlers [] = {
559 [XHCI_CMD_ENABLE_SLOT] = enable_slot_cmd,
560 [XHCI_CMD_DISABLE_SLOT] = disable_slot_cmd,
561 [XHCI_CMD_ADDRESS_DEVICE] = address_device_cmd,
562 [XHCI_CMD_CONFIGURE_ENDPOINT] = configure_endpoint_cmd,
563 [XHCI_CMD_EVALUATE_CONTEXT] = evaluate_context_cmd,
564 [XHCI_CMD_RESET_ENDPOINT] = reset_endpoint_cmd,
565 [XHCI_CMD_STOP_ENDPOINT] = stop_endpoint_cmd,
566 [XHCI_CMD_SET_TR_DEQUEUE_POINTER] = set_tr_dequeue_pointer_cmd,
567 [XHCI_CMD_RESET_DEVICE] = reset_device_cmd,
568 [XHCI_CMD_FORCE_EVENT] = NULL,
569 [XHCI_CMD_NEGOTIATE_BANDWIDTH] = NULL,
570 [XHCI_CMD_SET_LATENCY_TOLERANCE_VALUE] = NULL,
571 [XHCI_CMD_GET_PORT_BANDWIDTH] = get_port_bandwidth_cmd,
572 [XHCI_CMD_FORCE_HEADER] = NULL,
573 [XHCI_CMD_NO_OP] = no_op_cmd
574};
575
576/**
577 * Try to abort currently processed command. This is tricky, because
578 * calling fibril is not necessarily the one which issued the blocked command.
579 * Also, the trickiness intensifies by the fact that stopping a CR is denoted by
580 * event, which is again handled in different fibril. but, once we go to sleep
581 * on waiting for that event, another fibril may wake up and try to abort the
582 * blocked command.
583 *
584 * So, we mark the command ring as being restarted, wait for it to stop, and
585 * then start it again. If there was a blocked command, it will be satisfied by
586 * COMMAND_ABORTED event.
587 */
588static int try_abort_current_command(xhci_hc_t *hc)
589{
590 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
591
592 fibril_mutex_lock(&cr->guard);
593
594 if (cr->state != XHCI_CR_STATE_OPEN) {
595 // The CR is either stopped, or different fibril is already
596 // restarting it.
597 usb_log_debug2("Command ring already being stopped.");
598 fibril_mutex_unlock(&cr->guard);
599 return EOK;
600 }
601
602 usb_log_error("Timeout while waiting for command: aborting current command.");
603
604 cr->state = XHCI_CR_STATE_CHANGING;
605 fibril_condvar_broadcast(&cr->state_cv);
606
607 abort_command_ring(hc);
608
609 fibril_condvar_wait_timeout(&cr->stopped_cv, &cr->guard, XHCI_CR_ABORT_TIMEOUT);
610
611 if (XHCI_REG_RD(hc->op_regs, XHCI_OP_CRR)) {
612 /* 4.6.1.2, implementation note
613 * Assume there are larger problems with HC and
614 * reset it.
615 */
616 usb_log_error("Command didn't abort.");
617
618 cr->state = XHCI_CR_STATE_CLOSED;
619 fibril_condvar_broadcast(&cr->state_cv);
620
621 // TODO: Reset HC completely.
622 // Don't forget to somehow complete all commands with error.
623
624 fibril_mutex_unlock(&cr->guard);
625 return ENAK;
626 }
627
628 usb_log_error("Command ring stopped. Starting again.");
629 hc_ring_doorbell(hc, 0, 0);
630
631 cr->state = XHCI_CR_STATE_OPEN;
632 fibril_condvar_broadcast(&cr->state_cv);
633
634 fibril_mutex_unlock(&cr->guard);
635 return EOK;
636}
637
638/**
639 * Wait, until the command is completed. The completion is triggered by
640 * COMMAND_COMPLETION event. As we do not want to rely on HW completing the
641 * command in timely manner, we timeout. Note that we can't just return an
642 * error after the timeout pass - it may be other command blocking the ring,
643 * and ours can be completed afterwards. Therefore, it is not guaranteed that
644 * this function will return in XHCI_COMMAND_TIMEOUT. It will continue waiting
645 * until COMMAND_COMPLETION event arrives.
646 */
647static int wait_for_cmd_completion(xhci_hc_t *hc, xhci_cmd_t *cmd)
648{
649 int rv = EOK;
650
651 if (fibril_get_id() == hc->event_handler) {
652 usb_log_error("Deadlock detected in waiting for command.");
653 abort();
654 }
655
656 fibril_mutex_lock(&cmd->_header.completed_mtx);
657 while (!cmd->_header.completed) {
658
659 rv = fibril_condvar_wait_timeout(&cmd->_header.completed_cv, &cmd->_header.completed_mtx, XHCI_COMMAND_TIMEOUT);
660
661 /* The waiting timed out. Current command (not necessarily
662 * ours) is probably blocked.
663 */
664 if (!cmd->_header.completed && rv == ETIMEOUT) {
665 fibril_mutex_unlock(&cmd->_header.completed_mtx);
666
667 rv = try_abort_current_command(hc);
668 if (rv)
669 return rv;
670
671 fibril_mutex_lock(&cmd->_header.completed_mtx);
672 }
673 }
674 fibril_mutex_unlock(&cmd->_header.completed_mtx);
675
676 return rv;
677}
678
679/**
680 * Issue command and block the current fibril until it is completed or timeout
681 * expires. Nothing is deallocated. Caller should always execute `xhci_cmd_fini`.
682 */
683int xhci_cmd_sync(xhci_hc_t *hc, xhci_cmd_t *cmd)
684{
685 assert(hc);
686 assert(cmd);
687
688 int err;
689
690 if (!cmd_handlers[cmd->_header.cmd]) {
691 /* Handler not implemented. */
692 return ENOTSUP;
693 }
694
695 if ((err = cmd_handlers[cmd->_header.cmd](hc, cmd))) {
696 /* Command could not be issued. */
697 return err;
698 }
699
700 if ((err = wait_for_cmd_completion(hc, cmd))) {
701 /* Command failed. */
702 return err;
703 }
704
705 switch (cmd->status) {
706 case XHCI_TRBC_SUCCESS:
707 return EOK;
708 case XHCI_TRBC_USB_TRANSACTION_ERROR:
709 return ESTALL;
710 default:
711 return EINVAL;
712 }
713}
714
715/**
716 * Does the same thing as `xhci_cmd_sync` and executes `xhci_cmd_fini`. This
717 * is a useful shorthand for issuing commands without out parameters.
718 */
719int xhci_cmd_sync_fini(xhci_hc_t *hc, xhci_cmd_t *cmd)
720{
721 const int err = xhci_cmd_sync(hc, cmd);
722 xhci_cmd_fini(cmd);
723
724 return err;
725}
726
727/**
728 * Does the same thing as `xhci_cmd_sync_fini` without blocking the current
729 * fibril. The command is copied to stack memory and `fini` is called upon its completion.
730 */
731int xhci_cmd_async_fini(xhci_hc_t *hc, xhci_cmd_t *stack_cmd)
732{
733 assert(hc);
734 assert(stack_cmd);
735
736 /* Save the command for later. */
737 xhci_cmd_t *heap_cmd = (xhci_cmd_t *) malloc(sizeof(xhci_cmd_t));
738 if (!heap_cmd) {
739 return ENOMEM;
740 }
741
742 /* TODO: Is this good for the mutex and the condvar? */
743 memcpy(heap_cmd, stack_cmd, sizeof(xhci_cmd_t));
744 heap_cmd->_header.async = true;
745
746 /* Issue the command. */
747 int err;
748
749 if (!cmd_handlers[heap_cmd->_header.cmd]) {
750 /* Handler not implemented. */
751 err = ENOTSUP;
752 goto err_heap_cmd;
753 }
754
755 if ((err = cmd_handlers[heap_cmd->_header.cmd](hc, heap_cmd))) {
756 /* Command could not be issued. */
757 goto err_heap_cmd;
758 }
759
760 return EOK;
761
762err_heap_cmd:
763 free(heap_cmd);
764 return err;
765}
766
767/**
768 * @}
769 */
Note: See TracBrowser for help on using the repository browser.