source: mainline/uspace/drv/bus/usb/xhci/commands.c@ 961a5ee

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 961a5ee was 77ded647, checked in by Ondřej Hlavatý <aearsis@…>, 8 years ago

xhci: do not avoid 64-bit writes

  • Property mode set to 100644
File size: 21.7 KB
Line 
1/*
2 * Copyright (c) 2017 Jaroslav Jindrak
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief Command sending functions.
34 */
35
36#include <errno.h>
37#include <str_error.h>
38#include <usb/debug.h>
39#include "commands.h"
40#include "debug.h"
41#include "hc.h"
42#include "hw_struct/context.h"
43#include "hw_struct/trb.h"
44
45#define TRB_SET_TSP(trb, tsp) (trb).control |= host2xhci(32, (((tsp) & 0x1) << 9))
46#define TRB_SET_TYPE(trb, type) (trb).control |= host2xhci(32, (type) << 10)
47#define TRB_SET_DC(trb, dc) (trb).control |= host2xhci(32, (dc) << 9)
48#define TRB_SET_EP(trb, ep) (trb).control |= host2xhci(32, ((ep) & 0x5) << 16)
49#define TRB_SET_STREAM(trb, st) (trb).control |= host2xhci(32, ((st) & 0xFFFF) << 16)
50#define TRB_SET_SUSP(trb, susp) (trb).control |= host2xhci(32, ((susp) & 0x1) << 23)
51#define TRB_SET_SLOT(trb, slot) (trb).control |= host2xhci(32, (slot) << 24)
52#define TRB_SET_DEV_SPEED(trb, speed) (trb).control |= host2xhci(32, (speed & 0xF) << 16)
53#define TRB_SET_DEQUEUE_PTR(trb, dptr) (trb).parameter |= host2xhci(64, (dptr))
54#define TRB_SET_ICTX(trb, phys) (trb).parameter |= host2xhci(64, (phys) & (~0xF))
55
56#define TRB_GET_CODE(trb) XHCI_DWORD_EXTRACT((trb).status, 31, 24)
57#define TRB_GET_SLOT(trb) XHCI_DWORD_EXTRACT((trb).control, 31, 24)
58#define TRB_GET_PHYS(trb) (XHCI_QWORD_EXTRACT((trb).parameter, 63, 4) << 4)
59
60/* Control functions */
61
62static xhci_cmd_ring_t *get_cmd_ring(xhci_hc_t *hc)
63{
64 assert(hc);
65 return &hc->cr;
66}
67
68/**
69 * Initialize the command subsystem. Allocates the comand ring.
70 *
71 * Does not configure the CR pointer to the hardware, because the xHC will be
72 * reset before starting.
73 */
74int xhci_init_commands(xhci_hc_t *hc)
75{
76 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
77 int err;
78
79 if ((err = xhci_trb_ring_init(&cr->trb_ring, 0)))
80 return err;
81
82 fibril_mutex_initialize(&cr->guard);
83 fibril_condvar_initialize(&cr->state_cv);
84 fibril_condvar_initialize(&cr->stopped_cv);
85
86 list_initialize(&cr->cmd_list);
87
88 return EOK;
89}
90
91/**
92 * Finish the command subsystem. Stops the hardware from running commands, then
93 * deallocates the ring.
94 */
95void xhci_fini_commands(xhci_hc_t *hc)
96{
97 assert(hc);
98 xhci_stop_command_ring(hc);
99
100 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
101
102 fibril_mutex_lock(&cr->guard);
103 xhci_trb_ring_fini(&cr->trb_ring);
104 fibril_mutex_unlock(&cr->guard);
105}
106
107/**
108 * Initialize a command structure for the given command.
109 */
110void xhci_cmd_init(xhci_cmd_t *cmd, xhci_cmd_type_t type)
111{
112 memset(cmd, 0, sizeof(*cmd));
113
114 link_initialize(&cmd->_header.link);
115
116 fibril_mutex_initialize(&cmd->_header.completed_mtx);
117 fibril_condvar_initialize(&cmd->_header.completed_cv);
118
119 cmd->_header.cmd = type;
120}
121
122/**
123 * Finish the command structure. Some command invocation includes allocating
124 * a context structure. To have the convenience in calling commands, this
125 * method deallocates all resources.
126 */
127void xhci_cmd_fini(xhci_cmd_t *cmd)
128{
129 list_remove(&cmd->_header.link);
130
131 dma_buffer_free(&cmd->input_ctx);
132 dma_buffer_free(&cmd->bandwidth_ctx);
133
134 if (cmd->_header.async) {
135 free(cmd);
136 }
137}
138
139/**
140 * Find a command issued by TRB at @c phys inside the command list.
141 *
142 * Call with guard locked only.
143 */
144static inline xhci_cmd_t *find_command(xhci_hc_t *hc, uint64_t phys)
145{
146 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
147 assert(fibril_mutex_is_locked(&cr->guard));
148
149 link_t *cmd_link = list_first(&cr->cmd_list);
150
151 while (cmd_link != NULL) {
152 xhci_cmd_t *cmd = list_get_instance(cmd_link, xhci_cmd_t, _header.link);
153
154 if (cmd->_header.trb_phys == phys)
155 break;
156
157 cmd_link = list_next(cmd_link, &cr->cmd_list);
158 }
159
160 return cmd_link ? list_get_instance(cmd_link, xhci_cmd_t, _header.link)
161 : NULL;
162}
163
164static void cr_set_state(xhci_cmd_ring_t *cr, xhci_cr_state_t state)
165{
166 assert(fibril_mutex_is_locked(&cr->guard));
167
168 cr->state = state;
169 if (state == XHCI_CR_STATE_OPEN
170 || state == XHCI_CR_STATE_CLOSED)
171 fibril_condvar_broadcast(&cr->state_cv);
172}
173
174static int wait_for_ring_open(xhci_cmd_ring_t *cr)
175{
176 assert(fibril_mutex_is_locked(&cr->guard));
177
178 while (true) {
179 switch (cr->state) {
180 case XHCI_CR_STATE_CHANGING:
181 case XHCI_CR_STATE_FULL:
182 fibril_condvar_wait(&cr->state_cv, &cr->guard);
183 break;
184 case XHCI_CR_STATE_OPEN:
185 return EOK;
186 case XHCI_CR_STATE_CLOSED:
187 return ENAK;
188 }
189 }
190}
191
192/**
193 * Enqueue a command on the TRB ring. Ring the doorbell to initiate processing.
194 * Register the command as waiting for completion inside the command list.
195 */
196static inline int enqueue_command(xhci_hc_t *hc, xhci_cmd_t *cmd)
197{
198 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
199 assert(cmd);
200
201 fibril_mutex_lock(&cr->guard);
202
203 if (wait_for_ring_open(cr)) {
204 fibril_mutex_unlock(&cr->guard);
205 return ENAK;
206 }
207
208 usb_log_debug("Sending command %s", xhci_trb_str_type(TRB_TYPE(cmd->_header.trb)));
209
210 list_append(&cmd->_header.link, &cr->cmd_list);
211
212 int err = EOK;
213 while (err == EOK) {
214 err = xhci_trb_ring_enqueue(&cr->trb_ring,
215 &cmd->_header.trb, &cmd->_header.trb_phys);
216 if (err != EAGAIN)
217 break;
218
219 cr_set_state(cr, XHCI_CR_STATE_FULL);
220 err = wait_for_ring_open(cr);
221 }
222
223 if (err == EOK)
224 hc_ring_doorbell(hc, 0, 0);
225
226 fibril_mutex_unlock(&cr->guard);
227
228 return err;
229}
230
231/**
232 * Stop the command ring. Stop processing commands, block issuing new ones.
233 * Wait until hardware acknowledges it is stopped.
234 */
235void xhci_stop_command_ring(xhci_hc_t *hc)
236{
237 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
238
239 fibril_mutex_lock(&cr->guard);
240
241 // Prevent others from starting CR again.
242 cr_set_state(cr, XHCI_CR_STATE_CLOSED);
243
244 XHCI_REG_SET(hc->op_regs, XHCI_OP_CS, 1);
245
246 while (XHCI_REG_RD(hc->op_regs, XHCI_OP_CRR))
247 fibril_condvar_wait(&cr->stopped_cv, &cr->guard);
248
249 fibril_mutex_unlock(&cr->guard);
250}
251
252/**
253 * Mark the command ring as stopped. NAK new commands, abort running, do not
254 * touch the HC as it's probably broken.
255 */
256void xhci_nuke_command_ring(xhci_hc_t *hc)
257{
258 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
259 fibril_mutex_lock(&cr->guard);
260 // Prevent others from starting CR again.
261 cr_set_state(cr, XHCI_CR_STATE_CLOSED);
262 fibril_mutex_unlock(&cr->guard);
263}
264
265/**
266 * Mark the command ring as working again.
267 */
268void xhci_start_command_ring(xhci_hc_t *hc)
269{
270 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
271 fibril_mutex_lock(&cr->guard);
272 // Prevent others from starting CR again.
273 cr_set_state(cr, XHCI_CR_STATE_OPEN);
274 fibril_mutex_unlock(&cr->guard);
275}
276
277/**
278 * Abort currently processed command. Note that it is only aborted when the
279 * command is "blocking" - see section 4.6.1.2 of xHCI spec.
280 */
281static void abort_command_ring(xhci_hc_t *hc)
282{
283 XHCI_REG_SET(hc->op_regs, XHCI_OP_CA, 1);
284}
285
286static const char *trb_codes [] = {
287#define TRBC(t) [XHCI_TRBC_##t] = #t
288 TRBC(INVALID),
289 TRBC(SUCCESS),
290 TRBC(DATA_BUFFER_ERROR),
291 TRBC(BABBLE_DETECTED_ERROR),
292 TRBC(USB_TRANSACTION_ERROR),
293 TRBC(TRB_ERROR),
294 TRBC(STALL_ERROR),
295 TRBC(RESOURCE_ERROR),
296 TRBC(BANDWIDTH_ERROR),
297 TRBC(NO_SLOTS_ERROR),
298 TRBC(INVALID_STREAM_ERROR),
299 TRBC(SLOT_NOT_ENABLED_ERROR),
300 TRBC(EP_NOT_ENABLED_ERROR),
301 TRBC(SHORT_PACKET),
302 TRBC(RING_UNDERRUN),
303 TRBC(RING_OVERRUN),
304 TRBC(VF_EVENT_RING_FULL),
305 TRBC(PARAMETER_ERROR),
306 TRBC(BANDWIDTH_OVERRUN_ERROR),
307 TRBC(CONTEXT_STATE_ERROR),
308 TRBC(NO_PING_RESPONSE_ERROR),
309 TRBC(EVENT_RING_FULL_ERROR),
310 TRBC(INCOMPATIBLE_DEVICE_ERROR),
311 TRBC(MISSED_SERVICE_ERROR),
312 TRBC(COMMAND_RING_STOPPED),
313 TRBC(COMMAND_ABORTED),
314 TRBC(STOPPED),
315 TRBC(STOPPED_LENGTH_INVALID),
316 TRBC(STOPPED_SHORT_PACKET),
317 TRBC(MAX_EXIT_LATENCY_TOO_LARGE_ERROR),
318 [30] = "<reserved>",
319 TRBC(ISOCH_BUFFER_OVERRUN),
320 TRBC(EVENT_LOST_ERROR),
321 TRBC(UNDEFINED_ERROR),
322 TRBC(INVALID_STREAM_ID_ERROR),
323 TRBC(SECONDARY_BANDWIDTH_ERROR),
324 TRBC(SPLIT_TRANSACTION_ERROR),
325 [XHCI_TRBC_MAX] = NULL
326#undef TRBC
327};
328
329/**
330 * Report an error according to command completion code.
331 */
332static void report_error(int code)
333{
334 if (code < XHCI_TRBC_MAX && trb_codes[code] != NULL)
335 usb_log_error("Command resulted in error: %s.", trb_codes[code]);
336 else
337 usb_log_error("Command resulted in reserved or vendor specific error.");
338}
339
340/**
341 * Handle a command completion. Feed the fibril waiting for result.
342 *
343 * @param trb The COMMAND_COMPLETION TRB found in event ring.
344 */
345int xhci_handle_command_completion(xhci_hc_t *hc, xhci_trb_t *trb)
346{
347 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
348 assert(trb);
349
350 fibril_mutex_lock(&cr->guard);
351
352 int code = TRB_GET_CODE(*trb);
353
354 if (code == XHCI_TRBC_COMMAND_RING_STOPPED) {
355 /* This can either mean that the ring is being stopped, or
356 * a command was aborted. In either way, wake threads waiting
357 * on stopped_cv.
358 *
359 * Note that we need to hold mutex, because we must be sure the
360 * requesting thread is waiting inside the CV.
361 */
362 usb_log_debug("Command ring stopped.");
363 fibril_condvar_broadcast(&cr->stopped_cv);
364 fibril_mutex_unlock(&cr->guard);
365 return EOK;
366 }
367
368 const uint64_t phys = TRB_GET_PHYS(*trb);
369 xhci_trb_ring_update_dequeue(&cr->trb_ring, phys);
370
371 if (cr->state == XHCI_CR_STATE_FULL)
372 cr_set_state(cr, XHCI_CR_STATE_OPEN);
373
374 xhci_cmd_t *command = find_command(hc, phys);
375 if (command == NULL) {
376 usb_log_error("No command struct for completion event found.");
377
378 if (code != XHCI_TRBC_SUCCESS)
379 report_error(code);
380
381 return EOK;
382 }
383
384 list_remove(&command->_header.link);
385
386 /* Semantics of NO_OP_CMD is that success is marked as a TRB error. */
387 if (command->_header.cmd == XHCI_CMD_NO_OP && code == XHCI_TRBC_TRB_ERROR)
388 code = XHCI_TRBC_SUCCESS;
389
390 command->status = code;
391 command->slot_id = TRB_GET_SLOT(*trb);
392
393 usb_log_debug("Completed command %s",
394 xhci_trb_str_type(TRB_TYPE(command->_header.trb)));
395
396 if (code != XHCI_TRBC_SUCCESS) {
397 report_error(code);
398 xhci_dump_trb(&command->_header.trb);
399 }
400
401 fibril_mutex_unlock(&cr->guard);
402
403 fibril_mutex_lock(&command->_header.completed_mtx);
404 command->_header.completed = true;
405 fibril_condvar_broadcast(&command->_header.completed_cv);
406 fibril_mutex_unlock(&command->_header.completed_mtx);
407
408 if (command->_header.async) {
409 /* Free the command and other DS upon completion. */
410 xhci_cmd_fini(command);
411 }
412
413 return EOK;
414}
415
416/* Command-issuing functions */
417
418static int no_op_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
419{
420 assert(hc);
421
422 xhci_trb_clean(&cmd->_header.trb);
423
424 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_NO_OP_CMD);
425
426 return enqueue_command(hc, cmd);
427}
428
429static int enable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
430{
431 assert(hc);
432
433 xhci_trb_clean(&cmd->_header.trb);
434
435 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ENABLE_SLOT_CMD);
436 cmd->_header.trb.control |=
437 host2xhci(32, XHCI_REG_RD(hc->xecp, XHCI_EC_SP_SLOT_TYPE) << 16);
438
439 return enqueue_command(hc, cmd);
440}
441
442static int disable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
443{
444 assert(hc);
445 assert(cmd);
446
447 xhci_trb_clean(&cmd->_header.trb);
448
449 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_DISABLE_SLOT_CMD);
450 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
451
452 return enqueue_command(hc, cmd);
453}
454
455static int address_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
456{
457 assert(hc);
458 assert(cmd);
459 assert(dma_buffer_is_set(&cmd->input_ctx));
460
461 /**
462 * TODO: Requirements for this command:
463 * dcbaa[slot_id] is properly sized and initialized
464 * ictx has valids slot context and endpoint 0, all
465 * other should be ignored at this point (see section 4.6.5).
466 */
467
468 xhci_trb_clean(&cmd->_header.trb);
469
470 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
471
472 /**
473 * Note: According to section 6.4.3.4, we can set the 9th bit
474 * of the control field of the trb (BSR) to 1 and then the xHC
475 * will not issue the SET_ADDRESS request to the USB device.
476 * This can be used to provide compatibility with legacy USB devices
477 * that require their device descriptor to be read before such request.
478 */
479 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ADDRESS_DEVICE_CMD);
480 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
481
482 return enqueue_command(hc, cmd);
483}
484
485static int configure_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
486{
487 assert(hc);
488 assert(cmd);
489
490 xhci_trb_clean(&cmd->_header.trb);
491
492 if (!cmd->deconfigure) {
493 /* If the DC flag is on, input context is not evaluated. */
494 assert(dma_buffer_is_set(&cmd->input_ctx));
495
496 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
497 }
498
499 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_CONFIGURE_ENDPOINT_CMD);
500 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
501 TRB_SET_DC(cmd->_header.trb, cmd->deconfigure);
502
503 return enqueue_command(hc, cmd);
504}
505
506static int evaluate_context_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
507{
508 assert(hc);
509 assert(cmd);
510 assert(dma_buffer_is_set(&cmd->input_ctx));
511
512 /**
513 * Note: All Drop Context flags of the input context shall be 0,
514 * all Add Context flags shall be initialize to indicate IDs
515 * of the contexts affected by the command.
516 * Refer to sections 6.2.2.3 and 6.3.3.3 for further info.
517 */
518 xhci_trb_clean(&cmd->_header.trb);
519
520 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
521
522 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_EVALUATE_CONTEXT_CMD);
523 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
524
525 return enqueue_command(hc, cmd);
526}
527
528static int reset_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
529{
530 assert(hc);
531 assert(cmd);
532
533 xhci_trb_clean(&cmd->_header.trb);
534
535 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_ENDPOINT_CMD);
536 TRB_SET_TSP(cmd->_header.trb, cmd->tsp);
537 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
538 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
539
540 return enqueue_command(hc, cmd);
541}
542
543static int stop_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
544{
545 assert(hc);
546 assert(cmd);
547
548 xhci_trb_clean(&cmd->_header.trb);
549
550 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_STOP_ENDPOINT_CMD);
551 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
552 TRB_SET_SUSP(cmd->_header.trb, cmd->susp);
553 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
554
555 return enqueue_command(hc, cmd);
556}
557
558static int set_tr_dequeue_pointer_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
559{
560 assert(hc);
561 assert(cmd);
562
563 xhci_trb_clean(&cmd->_header.trb);
564
565 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_SET_TR_DEQUEUE_POINTER_CMD);
566 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
567 TRB_SET_STREAM(cmd->_header.trb, cmd->stream_id);
568 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
569 TRB_SET_DEQUEUE_PTR(cmd->_header.trb, cmd->dequeue_ptr);
570
571 return enqueue_command(hc, cmd);
572}
573
574static int reset_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
575{
576 assert(hc);
577 assert(cmd);
578
579 xhci_trb_clean(&cmd->_header.trb);
580
581 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_DEVICE_CMD);
582 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
583
584 return enqueue_command(hc, cmd);
585}
586
587static int get_port_bandwidth_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
588{
589 assert(hc);
590 assert(cmd);
591
592 xhci_trb_clean(&cmd->_header.trb);
593
594 TRB_SET_ICTX(cmd->_header.trb, cmd->bandwidth_ctx.phys);
595
596 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_GET_PORT_BANDWIDTH_CMD);
597 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
598 TRB_SET_DEV_SPEED(cmd->_header.trb, cmd->device_speed);
599
600 return enqueue_command(hc, cmd);
601}
602
603/* The table of command-issuing functions. */
604
605typedef int (*cmd_handler) (xhci_hc_t *hc, xhci_cmd_t *cmd);
606
607static cmd_handler cmd_handlers [] = {
608 [XHCI_CMD_ENABLE_SLOT] = enable_slot_cmd,
609 [XHCI_CMD_DISABLE_SLOT] = disable_slot_cmd,
610 [XHCI_CMD_ADDRESS_DEVICE] = address_device_cmd,
611 [XHCI_CMD_CONFIGURE_ENDPOINT] = configure_endpoint_cmd,
612 [XHCI_CMD_EVALUATE_CONTEXT] = evaluate_context_cmd,
613 [XHCI_CMD_RESET_ENDPOINT] = reset_endpoint_cmd,
614 [XHCI_CMD_STOP_ENDPOINT] = stop_endpoint_cmd,
615 [XHCI_CMD_SET_TR_DEQUEUE_POINTER] = set_tr_dequeue_pointer_cmd,
616 [XHCI_CMD_RESET_DEVICE] = reset_device_cmd,
617 [XHCI_CMD_FORCE_EVENT] = NULL,
618 [XHCI_CMD_NEGOTIATE_BANDWIDTH] = NULL,
619 [XHCI_CMD_SET_LATENCY_TOLERANCE_VALUE] = NULL,
620 [XHCI_CMD_GET_PORT_BANDWIDTH] = get_port_bandwidth_cmd,
621 [XHCI_CMD_FORCE_HEADER] = NULL,
622 [XHCI_CMD_NO_OP] = no_op_cmd
623};
624
625/**
626 * Try to abort currently processed command. This is tricky, because
627 * calling fibril is not necessarily the one which issued the blocked command.
628 * Also, the trickiness intensifies by the fact that stopping a CR is denoted by
629 * event, which is again handled in different fibril. but, once we go to sleep
630 * on waiting for that event, another fibril may wake up and try to abort the
631 * blocked command.
632 *
633 * So, we mark the command ring as being restarted, wait for it to stop, and
634 * then start it again. If there was a blocked command, it will be satisfied by
635 * COMMAND_ABORTED event.
636 */
637static int try_abort_current_command(xhci_hc_t *hc)
638{
639 xhci_cmd_ring_t *cr = get_cmd_ring(hc);
640
641 fibril_mutex_lock(&cr->guard);
642
643 if (cr->state == XHCI_CR_STATE_CLOSED) {
644 fibril_mutex_unlock(&cr->guard);
645 return ENAK;
646 }
647
648 if (cr->state == XHCI_CR_STATE_CHANGING) {
649 fibril_mutex_unlock(&cr->guard);
650 return EOK;
651 }
652
653 usb_log_error("Timeout while waiting for command: aborting current command.");
654
655 cr_set_state(cr, XHCI_CR_STATE_CHANGING);
656
657 abort_command_ring(hc);
658
659 fibril_condvar_wait_timeout(&cr->stopped_cv, &cr->guard, XHCI_CR_ABORT_TIMEOUT);
660
661 if (XHCI_REG_RD(hc->op_regs, XHCI_OP_CRR)) {
662 /* 4.6.1.2, implementation note
663 * Assume there are larger problems with HC and
664 * reset it.
665 */
666 usb_log_error("Command didn't abort.");
667
668 cr_set_state(cr, XHCI_CR_STATE_CLOSED);
669
670 // TODO: Reset HC completely.
671 // Don't forget to somehow complete all commands with error.
672
673 fibril_mutex_unlock(&cr->guard);
674 return ENAK;
675 }
676
677 cr_set_state(cr, XHCI_CR_STATE_OPEN);
678
679 fibril_mutex_unlock(&cr->guard);
680
681 usb_log_error("Command ring stopped. Starting again.");
682 hc_ring_doorbell(hc, 0, 0);
683
684 return EOK;
685}
686
687/**
688 * Wait, until the command is completed. The completion is triggered by
689 * COMMAND_COMPLETION event. As we do not want to rely on HW completing the
690 * command in timely manner, we timeout. Note that we can't just return an
691 * error after the timeout pass - it may be other command blocking the ring,
692 * and ours can be completed afterwards. Therefore, it is not guaranteed that
693 * this function will return in XHCI_COMMAND_TIMEOUT. It will continue waiting
694 * until COMMAND_COMPLETION event arrives.
695 */
696static int wait_for_cmd_completion(xhci_hc_t *hc, xhci_cmd_t *cmd)
697{
698 int rv = EOK;
699
700 if (fibril_get_id() == hc->event_handler) {
701 usb_log_error("Deadlock detected in waiting for command.");
702 abort();
703 }
704
705 fibril_mutex_lock(&cmd->_header.completed_mtx);
706 while (!cmd->_header.completed) {
707
708 rv = fibril_condvar_wait_timeout(&cmd->_header.completed_cv,
709 &cmd->_header.completed_mtx, XHCI_COMMAND_TIMEOUT);
710
711 /* The waiting timed out. Current command (not necessarily
712 * ours) is probably blocked.
713 */
714 if (!cmd->_header.completed && rv == ETIMEOUT) {
715 fibril_mutex_unlock(&cmd->_header.completed_mtx);
716
717 rv = try_abort_current_command(hc);
718 if (rv)
719 return rv;
720
721 fibril_mutex_lock(&cmd->_header.completed_mtx);
722 }
723 }
724 fibril_mutex_unlock(&cmd->_header.completed_mtx);
725
726 return rv;
727}
728
729/**
730 * Issue command and block the current fibril until it is completed or timeout
731 * expires. Nothing is deallocated. Caller should always execute `xhci_cmd_fini`.
732 */
733int xhci_cmd_sync(xhci_hc_t *hc, xhci_cmd_t *cmd)
734{
735 assert(hc);
736 assert(cmd);
737
738 int err;
739
740 if (!cmd_handlers[cmd->_header.cmd]) {
741 /* Handler not implemented. */
742 return ENOTSUP;
743 }
744
745 if ((err = cmd_handlers[cmd->_header.cmd](hc, cmd))) {
746 /* Command could not be issued. */
747 return err;
748 }
749
750 if ((err = wait_for_cmd_completion(hc, cmd))) {
751 /* Command failed. */
752 return err;
753 }
754
755 switch (cmd->status) {
756 case XHCI_TRBC_SUCCESS:
757 return EOK;
758 case XHCI_TRBC_USB_TRANSACTION_ERROR:
759 return ESTALL;
760 case XHCI_TRBC_RESOURCE_ERROR:
761 case XHCI_TRBC_BANDWIDTH_ERROR:
762 case XHCI_TRBC_NO_SLOTS_ERROR:
763 return ELIMIT;
764 case XHCI_TRBC_SLOT_NOT_ENABLED_ERROR:
765 return ENOENT;
766 default:
767 return EINVAL;
768 }
769}
770
771/**
772 * Does the same thing as `xhci_cmd_sync` and executes `xhci_cmd_fini`. This
773 * is a useful shorthand for issuing commands without out parameters.
774 */
775int xhci_cmd_sync_fini(xhci_hc_t *hc, xhci_cmd_t *cmd)
776{
777 const int err = xhci_cmd_sync(hc, cmd);
778 xhci_cmd_fini(cmd);
779
780 return err;
781}
782
783/**
784 * Does the same thing as `xhci_cmd_sync_fini` without blocking the current
785 * fibril. The command is copied to stack memory and `fini` is called upon its completion.
786 */
787int xhci_cmd_async_fini(xhci_hc_t *hc, xhci_cmd_t *stack_cmd)
788{
789 assert(hc);
790 assert(stack_cmd);
791
792 /* Save the command for later. */
793 xhci_cmd_t *heap_cmd = (xhci_cmd_t *) malloc(sizeof(xhci_cmd_t));
794 if (!heap_cmd) {
795 return ENOMEM;
796 }
797
798 /* TODO: Is this good for the mutex and the condvar? */
799 memcpy(heap_cmd, stack_cmd, sizeof(xhci_cmd_t));
800 heap_cmd->_header.async = true;
801
802 /* Issue the command. */
803 int err;
804
805 if (!cmd_handlers[heap_cmd->_header.cmd]) {
806 /* Handler not implemented. */
807 err = ENOTSUP;
808 goto err_heap_cmd;
809 }
810
811 if ((err = cmd_handlers[heap_cmd->_header.cmd](hc, heap_cmd))) {
812 /* Command could not be issued. */
813 goto err_heap_cmd;
814 }
815
816 return EOK;
817
818err_heap_cmd:
819 free(heap_cmd);
820 return err;
821}
822
823/**
824 * @}
825 */
Note: See TracBrowser for help on using the repository browser.