source: mainline/uspace/drv/bus/usb/xhci/commands.c@ c3d926f3

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since c3d926f3 was c3d926f3, checked in by Petr Manek <petr.manek@…>, 8 years ago

Big command refactoring. Unified and encapsulated command function API. Removed explicit heap command (de)allocation functions. Added three functions for (a)synchronous command issuing and neat inline macro with syntax sugar.

  • Property mode set to 100644
File size: 17.3 KB
Line 
1/*
2 * Copyright (c) 2017 Jaroslav Jindrak
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief Command sending functions.
34 */
35
36#include <errno.h>
37#include <str_error.h>
38#include <usb/debug.h>
39#include <usb/host/utils/malloc32.h>
40#include "commands.h"
41#include "debug.h"
42#include "hc.h"
43#include "hw_struct/context.h"
44#include "hw_struct/trb.h"
45
46#define TRB_SET_TCS(trb, tcs) (trb).control |= host2xhci(32, ((tcs &0x1) << 9))
47#define TRB_SET_TYPE(trb, type) (trb).control |= host2xhci(32, (type) << 10)
48#define TRB_SET_DC(trb, dc) (trb).control |= host2xhci(32, (dc) << 9)
49#define TRB_SET_EP(trb, ep) (trb).control |= host2xhci(32, ((ep) & 0x5) << 16)
50#define TRB_SET_STREAM(trb, st) (trb).control |= host2xhci(32, ((st) & 0xFFFF) << 16)
51#define TRB_SET_SUSP(trb, susp) (trb).control |= host2xhci(32, ((susp) & 0x1) << 23)
52#define TRB_SET_SLOT(trb, slot) (trb).control |= host2xhci(32, (slot) << 24)
53#define TRB_SET_DEV_SPEED(trb, speed) (trb).control |= host2xhci(32, (speed & 0xF) << 16)
54
55/**
56 * TODO: Not sure about SCT and DCS (see section 6.4.3.9).
57 */
58#define TRB_SET_DEQUEUE_PTR(trb, dptr) (trb).parameter |= host2xhci(64, (dptr))
59#define TRB_SET_ICTX(trb, phys) (trb).parameter |= host2xhci(64, phys_addr & (~0xF))
60
61#define TRB_GET_CODE(trb) XHCI_DWORD_EXTRACT((trb).status, 31, 24)
62#define TRB_GET_SLOT(trb) XHCI_DWORD_EXTRACT((trb).control, 31, 24)
63#define TRB_GET_PHYS(trb) (XHCI_QWORD_EXTRACT((trb).parameter, 63, 4) << 4)
64
65/* Control functions */
66
67int xhci_init_commands(xhci_hc_t *hc)
68{
69 assert(hc);
70
71 list_initialize(&hc->commands);
72
73 fibril_mutex_initialize(&hc->commands_mtx);
74
75 return EOK;
76}
77
78void xhci_fini_commands(xhci_hc_t *hc)
79{
80 // Note: Untested.
81 assert(hc);
82}
83
84void xhci_cmd_init(xhci_cmd_t *cmd, xhci_cmd_type_t type)
85{
86 memset(cmd, 0, sizeof(*cmd));
87
88 link_initialize(&cmd->_header.link);
89
90 fibril_mutex_initialize(&cmd->_header.completed_mtx);
91 fibril_condvar_initialize(&cmd->_header.completed_cv);
92
93 cmd->_header.cmd = type;
94 cmd->_header.timeout = XHCI_DEFAULT_TIMEOUT;
95}
96
97void xhci_cmd_fini(xhci_cmd_t *cmd)
98{
99 list_remove(&cmd->_header.link);
100
101 if (cmd->input_ctx) {
102 free32(cmd->input_ctx);
103 };
104
105 if (cmd->bandwidth_ctx) {
106 free32(cmd->bandwidth_ctx);
107 }
108
109 if (cmd->_header.async) {
110 free(cmd);
111 }
112}
113
114static inline xhci_cmd_t *get_command(xhci_hc_t *hc, uint64_t phys)
115{
116 fibril_mutex_lock(&hc->commands_mtx);
117
118 link_t *cmd_link = list_first(&hc->commands);
119
120 while (cmd_link != NULL) {
121 xhci_cmd_t *cmd = list_get_instance(cmd_link, xhci_cmd_t, _header.link);
122
123 if (cmd->_header.trb_phys == phys)
124 break;
125
126 cmd_link = list_next(cmd_link, &hc->commands);
127 }
128
129 if (cmd_link != NULL) {
130 list_remove(cmd_link);
131 fibril_mutex_unlock(&hc->commands_mtx);
132
133 return list_get_instance(cmd_link, xhci_cmd_t, _header.link);
134 }
135
136 fibril_mutex_unlock(&hc->commands_mtx);
137 return NULL;
138}
139
140static inline int enqueue_command(xhci_hc_t *hc, xhci_cmd_t *cmd, unsigned doorbell, unsigned target)
141{
142 assert(hc);
143 assert(cmd);
144
145 fibril_mutex_lock(&hc->commands_mtx);
146 list_append(&cmd->_header.link, &hc->commands);
147 fibril_mutex_unlock(&hc->commands_mtx);
148
149 xhci_trb_ring_enqueue(&hc->command_ring, &cmd->_header.trb, &cmd->_header.trb_phys);
150 hc_ring_doorbell(hc, doorbell, target);
151
152 usb_log_debug2("HC(%p): Sent command:", hc);
153 xhci_dump_trb(&cmd->_header.trb);
154
155 return EOK;
156}
157
158void xhci_stop_command_ring(xhci_hc_t *hc)
159{
160 assert(hc);
161
162 XHCI_REG_SET(hc->op_regs, XHCI_OP_CS, 1);
163
164 /**
165 * Note: There is a bug in qemu that checks CS only when CRCR_HI
166 * is written, this (and the read/write in abort) ensures
167 * the command rings stops.
168 */
169 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR_HI, XHCI_REG_RD(hc->op_regs, XHCI_OP_CRCR_HI));
170}
171
172void xhci_abort_command_ring(xhci_hc_t *hc)
173{
174 assert(hc);
175
176 XHCI_REG_WR(hc->op_regs, XHCI_OP_CA, 1);
177 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR_HI, XHCI_REG_RD(hc->op_regs, XHCI_OP_CRCR_HI));
178}
179
180void xhci_start_command_ring(xhci_hc_t *hc)
181{
182 assert(hc);
183
184 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRR, 1);
185 hc_ring_doorbell(hc, 0, 0);
186}
187
188static const char *trb_codes [] = {
189#define TRBC(t) [XHCI_TRBC_##t] = #t
190 TRBC(INVALID),
191 TRBC(SUCCESS),
192 TRBC(DATA_BUFFER_ERROR),
193 TRBC(BABBLE_DETECTED_ERROR),
194 TRBC(USB_TRANSACTION_ERROR),
195 TRBC(TRB_ERROR),
196 TRBC(STALL_ERROR),
197 TRBC(RESOURCE_ERROR),
198 TRBC(BANDWIDTH_ERROR),
199 TRBC(NO_SLOTS_ERROR),
200 TRBC(INVALID_STREAM_ERROR),
201 TRBC(SLOT_NOT_ENABLED_ERROR),
202 TRBC(EP_NOT_ENABLED_ERROR),
203 TRBC(SHORT_PACKET),
204 TRBC(RING_UNDERRUN),
205 TRBC(RING_OVERRUN),
206 TRBC(VF_EVENT_RING_FULL),
207 TRBC(PARAMETER_ERROR),
208 TRBC(BANDWIDTH_OVERRUN_ERROR),
209 TRBC(CONTEXT_STATE_ERROR),
210 TRBC(NO_PING_RESPONSE_ERROR),
211 TRBC(EVENT_RING_FULL_ERROR),
212 TRBC(INCOMPATIBLE_DEVICE_ERROR),
213 TRBC(MISSED_SERVICE_ERROR),
214 TRBC(COMMAND_RING_STOPPED),
215 TRBC(COMMAND_ABORTED),
216 TRBC(STOPPED),
217 TRBC(STOPPED_LENGTH_INVALID),
218 TRBC(STOPPED_SHORT_PACKET),
219 TRBC(MAX_EXIT_LATENCY_TOO_LARGE_ERROR),
220 [30] = "<reserved>",
221 TRBC(ISOCH_BUFFER_OVERRUN),
222 TRBC(EVENT_LOST_ERROR),
223 TRBC(UNDEFINED_ERROR),
224 TRBC(INVALID_STREAM_ID_ERROR),
225 TRBC(SECONDARY_BANDWIDTH_ERROR),
226 TRBC(SPLIT_TRANSACTION_ERROR),
227 [XHCI_TRBC_MAX] = NULL
228#undef TRBC
229};
230
231static void report_error(int code)
232{
233 if (code < XHCI_TRBC_MAX && trb_codes[code] != NULL)
234 usb_log_error("Command resulted in error: %s.", trb_codes[code]);
235 else
236 usb_log_error("Command resulted in reserved or vendor specific error.");
237}
238
239int xhci_handle_command_completion(xhci_hc_t *hc, xhci_trb_t *trb)
240{
241 // TODO: Update dequeue ptrs.
242 assert(hc);
243 assert(trb);
244
245 usb_log_debug2("HC(%p) Command completed.", hc);
246
247 int code;
248 uint64_t phys;
249 xhci_cmd_t *command;
250
251 code = TRB_GET_CODE(*trb);
252 phys = TRB_GET_PHYS(*trb);;
253 command = get_command(hc, phys);
254 if (command == NULL) {
255 // TODO: STOP & ABORT may not have command structs in the list!
256 usb_log_warning("No command struct for this completion event found.");
257
258 if (code != XHCI_TRBC_SUCCESS)
259 report_error(code);
260
261 return EOK;
262 }
263
264 command->status = code;
265 command->slot_id = TRB_GET_SLOT(*trb);
266
267 usb_log_debug2("Completed command trb: %s", xhci_trb_str_type(TRB_TYPE(command->_header.trb)));
268 if (TRB_TYPE(command->_header.trb) != XHCI_TRB_TYPE_NO_OP_CMD) {
269 if (code != XHCI_TRBC_SUCCESS) {
270 report_error(code);
271 xhci_dump_trb(&command->_header.trb);
272 }
273 }
274
275 switch (TRB_TYPE(command->_header.trb)) {
276 case XHCI_TRB_TYPE_NO_OP_CMD:
277 assert(code == XHCI_TRBC_TRB_ERROR);
278 break;
279 case XHCI_TRB_TYPE_ENABLE_SLOT_CMD:
280 break;
281 case XHCI_TRB_TYPE_DISABLE_SLOT_CMD:
282 break;
283 case XHCI_TRB_TYPE_ADDRESS_DEVICE_CMD:
284 break;
285 case XHCI_TRB_TYPE_CONFIGURE_ENDPOINT_CMD:
286 break;
287 case XHCI_TRB_TYPE_EVALUATE_CONTEXT_CMD:
288 break;
289 case XHCI_TRB_TYPE_RESET_ENDPOINT_CMD:
290 break;
291 case XHCI_TRB_TYPE_STOP_ENDPOINT_CMD:
292 // Note: If the endpoint was in the middle of a transfer, then the xHC
293 // will add a Transfer TRB before the Event TRB, research that and
294 // handle it appropriately!
295 break;
296 case XHCI_TRB_TYPE_RESET_DEVICE_CMD:
297 break;
298 default:
299 usb_log_debug2("Unsupported command trb: %s", xhci_trb_str_type(TRB_TYPE(command->_header.trb)));
300
301 command->_header.completed = true;
302 return ENAK;
303 }
304
305 fibril_mutex_lock(&command->_header.completed_mtx);
306 command->_header.completed = true;
307 fibril_condvar_broadcast(&command->_header.completed_cv);
308 fibril_mutex_unlock(&command->_header.completed_mtx);
309
310 if (command->_header.async) {
311 /* Free the command and other DS upon completion. */
312 xhci_cmd_fini(command);
313 }
314
315 return EOK;
316}
317
318/* Command-issuing functions */
319
320static int no_op_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
321{
322 assert(hc);
323
324 xhci_trb_clean(&cmd->_header.trb);
325
326 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_NO_OP_CMD);
327
328 return enqueue_command(hc, cmd, 0, 0);
329}
330
331static int enable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
332{
333 assert(hc);
334
335 xhci_trb_clean(&cmd->_header.trb);
336
337 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ENABLE_SLOT_CMD);
338 cmd->_header.trb.control |= host2xhci(32, XHCI_REG_RD(hc->xecp, XHCI_EC_SP_SLOT_TYPE) << 16);
339
340 return enqueue_command(hc, cmd, 0, 0);
341}
342
343static int disable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
344{
345 assert(hc);
346 assert(cmd);
347
348 xhci_trb_clean(&cmd->_header.trb);
349
350 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_DISABLE_SLOT_CMD);
351 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
352
353 return enqueue_command(hc, cmd, 0, 0);
354}
355
356static int address_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
357{
358 assert(hc);
359 assert(cmd);
360 assert(cmd->input_ctx);
361
362 /**
363 * TODO: Requirements for this command:
364 * dcbaa[slot_id] is properly sized and initialized
365 * ictx has valids slot context and endpoint 0, all
366 * other should be ignored at this point (see section 4.6.5).
367 */
368
369 xhci_trb_clean(&cmd->_header.trb);
370
371 uint64_t phys_addr = (uint64_t) addr_to_phys(cmd->input_ctx);
372 TRB_SET_ICTX(cmd->_header.trb, phys_addr);
373
374 /**
375 * Note: According to section 6.4.3.4, we can set the 9th bit
376 * of the control field of the trb (BSR) to 1 and then the xHC
377 * will not issue the SET_ADDRESS request to the USB device.
378 * This can be used to provide compatibility with legacy USB devices
379 * that require their device descriptor to be read before such request.
380 */
381 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ADDRESS_DEVICE_CMD);
382 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
383
384 return enqueue_command(hc, cmd, 0, 0);
385}
386
387static int configure_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
388{
389 assert(hc);
390 assert(cmd);
391
392 xhci_trb_clean(&cmd->_header.trb);
393
394 if (!cmd->deconfigure) {
395 /* If the DC flag is on, input context is not evaluated. */
396 assert(cmd->input_ctx);
397
398 uint64_t phys_addr = (uint64_t) addr_to_phys(cmd->input_ctx);
399 TRB_SET_ICTX(cmd->_header.trb, phys_addr);
400 }
401
402 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_CONFIGURE_ENDPOINT_CMD);
403 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
404 TRB_SET_DC(cmd->_header.trb, cmd->deconfigure);
405
406 return enqueue_command(hc, cmd, 0, 0);
407}
408
409static int evaluate_context_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
410{
411 assert(hc);
412 assert(cmd);
413 assert(cmd->input_ctx);
414
415 /**
416 * Note: All Drop Context flags of the input context shall be 0,
417 * all Add Context flags shall be initialize to indicate IDs
418 * of the contexts affected by the command.
419 * Refer to sections 6.2.2.3 and 6.3.3.3 for further info.
420 */
421 xhci_trb_clean(&cmd->_header.trb);
422
423 uint64_t phys_addr = (uint64_t) addr_to_phys(cmd->input_ctx);
424 TRB_SET_ICTX(cmd->_header.trb, phys_addr);
425
426 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_EVALUATE_CONTEXT_CMD);
427 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
428
429 return enqueue_command(hc, cmd, 0, 0);
430}
431
432static int reset_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
433{
434 assert(hc);
435 assert(cmd);
436
437 /**
438 * Note: TCS can have values 0 or 1. If it is set to 0, see sectuon 4.5.8 for
439 * information about this flag.
440 */
441 xhci_trb_clean(&cmd->_header.trb);
442
443 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_ENDPOINT_CMD);
444 TRB_SET_TCS(cmd->_header.trb, cmd->tcs);
445 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
446 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
447
448 return enqueue_command(hc, cmd, 0, 0);
449}
450
451static int stop_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
452{
453 assert(hc);
454 assert(cmd);
455
456 xhci_trb_clean(&cmd->_header.trb);
457
458 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_STOP_ENDPOINT_CMD);
459 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
460 TRB_SET_SUSP(cmd->_header.trb, cmd->susp);
461 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
462
463 return enqueue_command(hc, cmd, 0, 0);
464}
465
466static int set_tr_dequeue_pointer_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
467{
468 assert(hc);
469 assert(cmd);
470
471 xhci_trb_clean(&cmd->_header.trb);
472
473 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_SET_TR_DEQUEUE_POINTER_CMD);
474 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
475 TRB_SET_STREAM(cmd->_header.trb, cmd->stream_id);
476 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
477 TRB_SET_DEQUEUE_PTR(cmd->_header.trb, cmd->dequeue_ptr);
478
479 /**
480 * TODO: Set DCS (see section 4.6.10).
481 */
482
483 return enqueue_command(hc, cmd, 0, 0);
484}
485
486static int reset_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
487{
488 assert(hc);
489 assert(cmd);
490
491 xhci_trb_clean(&cmd->_header.trb);
492
493 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_DEVICE_CMD);
494 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
495
496 return enqueue_command(hc, cmd, 0, 0);
497}
498
499static int get_port_bandwidth_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
500{
501 assert(hc);
502 assert(cmd);
503
504 xhci_trb_clean(&cmd->_header.trb);
505
506 uint64_t phys_addr = (uint64_t) addr_to_phys(cmd->bandwidth_ctx);
507 TRB_SET_ICTX(cmd->_header.trb, phys_addr);
508
509 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_GET_PORT_BANDWIDTH_CMD);
510 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
511 TRB_SET_DEV_SPEED(cmd->_header.trb, cmd->device_speed);
512
513 return enqueue_command(hc, cmd, 0, 0);
514}
515
516/* The table of command-issuing functions. */
517
518typedef int (*cmd_handler) (xhci_hc_t *hc, xhci_cmd_t *cmd);
519
520static cmd_handler cmd_handlers [] = {
521 [XHCI_CMD_ENABLE_SLOT] = enable_slot_cmd,
522 [XHCI_CMD_DISABLE_SLOT] = disable_slot_cmd,
523 [XHCI_CMD_ADDRESS_DEVICE] = address_device_cmd,
524 [XHCI_CMD_CONFIGURE_ENDPOINT] = configure_endpoint_cmd,
525 [XHCI_CMD_EVALUATE_CONTEXT] = evaluate_context_cmd,
526 [XHCI_CMD_RESET_ENDPOINT] = reset_endpoint_cmd,
527 [XHCI_CMD_STOP_ENDPOINT] = stop_endpoint_cmd,
528 [XHCI_CMD_SET_TR_DEQUEUE_POINTER] = set_tr_dequeue_pointer_cmd,
529 [XHCI_CMD_RESET_DEVICE] = reset_device_cmd,
530 // TODO: Force event (optional normative, for VMM, section 4.6.12).
531 [XHCI_CMD_FORCE_EVENT] = NULL,
532 // TODO: Negotiate bandwidth (optional normative, section 4.6.13).
533 [XHCI_CMD_NEGOTIATE_BANDWIDTH] = NULL,
534 // TODO: Set latency tolerance value (optional normative, section 4.6.14).
535 [XHCI_CMD_SET_LATENCY_TOLERANCE_VALUE] = NULL,
536 // TODO: Get port bandwidth (mandatory, but needs root hub implementation, section 4.6.15).
537 [XHCI_CMD_GET_PORT_BANDWIDTH] = get_port_bandwidth_cmd,
538 // TODO: Force header (mandatory, but needs root hub implementation, section 4.6.16).
539 [XHCI_CMD_FORCE_HEADER] = NULL,
540 [XHCI_CMD_NO_OP] = no_op_cmd
541};
542
543static int wait_for_cmd_completion(xhci_cmd_t *cmd)
544{
545 int rv = EOK;
546
547 fibril_mutex_lock(&cmd->_header.completed_mtx);
548 while (!cmd->_header.completed) {
549 usb_log_debug2("Waiting for event completion: going to sleep.");
550 rv = fibril_condvar_wait_timeout(&cmd->_header.completed_cv, &cmd->_header.completed_mtx, cmd->_header.timeout);
551
552 usb_log_debug2("Waiting for event completion: woken: %s", str_error(rv));
553 if (rv == ETIMEOUT) {
554 break;
555 }
556 }
557 fibril_mutex_unlock(&cmd->_header.completed_mtx);
558
559 return rv;
560}
561
562/** Issue command and block the current fibril until it is completed or timeout
563 * expires. Nothing is deallocated. Caller should always execute `xhci_cmd_fini`.
564 */
565int xhci_cmd_sync(xhci_hc_t *hc, xhci_cmd_t *cmd)
566{
567 assert(hc);
568 assert(cmd);
569
570 int err;
571
572 if (!cmd_handlers[cmd->_header.cmd]) {
573 /* Handler not implemented. */
574 return ENOTSUP;
575 }
576
577 if ((err = cmd_handlers[cmd->_header.cmd](hc, cmd))) {
578 /* Command could not be issued. */
579 return err;
580 }
581
582 if ((err = wait_for_cmd_completion(cmd))) {
583 /* Timeout expired or command failed. */
584 return err;
585 }
586
587 return EOK;
588}
589
590/** Does the same thing as `xhci_cmd_sync` and executes `xhci_cmd_fini`. This
591 * is a useful shorthand for issuing commands without out parameters.
592 */
593int xhci_cmd_sync_fini(xhci_hc_t *hc, xhci_cmd_t *cmd)
594{
595 const int err = xhci_cmd_sync(hc, cmd);
596 xhci_cmd_fini(cmd);
597
598 return err;
599}
600
601/** Does the same thing as `xhci_cmd_sync_fini` without blocking the current
602 * fibril. The command is copied to stack memory and `fini` is called upon its completion.
603 */
604int xhci_cmd_async_fini(xhci_hc_t *hc, xhci_cmd_t *stack_cmd)
605{
606 assert(hc);
607 assert(stack_cmd);
608
609 /* Save the command for later. */
610 xhci_cmd_t *heap_cmd = (xhci_cmd_t *) malloc(sizeof(xhci_cmd_t));
611 if (!heap_cmd) {
612 return ENOMEM;
613 }
614
615 /* TODO: Is this good for the mutex and the condvar? */
616 memcpy(heap_cmd, stack_cmd, sizeof(xhci_cmd_t));
617 heap_cmd->_header.async = true;
618
619 /* Issue the command. */
620 int err;
621
622 if (!cmd_handlers[heap_cmd->_header.cmd]) {
623 /* Handler not implemented. */
624 err = ENOTSUP;
625 goto err_heap_cmd;
626 }
627
628 if ((err = cmd_handlers[heap_cmd->_header.cmd](hc, heap_cmd))) {
629 /* Command could not be issued. */
630 goto err_heap_cmd;
631 }
632
633 return EOK;
634
635err_heap_cmd:
636 free(heap_cmd);
637 return err;
638}
639
640/**
641 * @}
642 */
Note: See TracBrowser for help on using the repository browser.