source: mainline/uspace/drv/bus/usb/xhci/commands.c@ b80c1ab

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since b80c1ab was b80c1ab, checked in by Aearsis <Hlavaty.Ondrej@…>, 8 years ago

xhci: use dma_buffers instead of malloc32 util

A bit of refactoring was needed to adapt scratchpad buffers.

  • Property mode set to 100644
File size: 17.2 KB
Line 
1/*
2 * Copyright (c) 2017 Jaroslav Jindrak
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup drvusbxhci
30 * @{
31 */
32/** @file
33 * @brief Command sending functions.
34 */
35
36#include <errno.h>
37#include <str_error.h>
38#include <usb/debug.h>
39#include "commands.h"
40#include "debug.h"
41#include "hc.h"
42#include "hw_struct/context.h"
43#include "hw_struct/trb.h"
44
45#define TRB_SET_TCS(trb, tcs) (trb).control |= host2xhci(32, ((tcs &0x1) << 9))
46#define TRB_SET_TYPE(trb, type) (trb).control |= host2xhci(32, (type) << 10)
47#define TRB_SET_DC(trb, dc) (trb).control |= host2xhci(32, (dc) << 9)
48#define TRB_SET_EP(trb, ep) (trb).control |= host2xhci(32, ((ep) & 0x5) << 16)
49#define TRB_SET_STREAM(trb, st) (trb).control |= host2xhci(32, ((st) & 0xFFFF) << 16)
50#define TRB_SET_SUSP(trb, susp) (trb).control |= host2xhci(32, ((susp) & 0x1) << 23)
51#define TRB_SET_SLOT(trb, slot) (trb).control |= host2xhci(32, (slot) << 24)
52#define TRB_SET_DEV_SPEED(trb, speed) (trb).control |= host2xhci(32, (speed & 0xF) << 16)
53
54/**
55 * TODO: Not sure about SCT and DCS (see section 6.4.3.9).
56 */
57#define TRB_SET_DEQUEUE_PTR(trb, dptr) (trb).parameter |= host2xhci(64, (dptr))
58#define TRB_SET_ICTX(trb, phys) (trb).parameter |= host2xhci(64, (phys) & (~0xF))
59
60#define TRB_GET_CODE(trb) XHCI_DWORD_EXTRACT((trb).status, 31, 24)
61#define TRB_GET_SLOT(trb) XHCI_DWORD_EXTRACT((trb).control, 31, 24)
62#define TRB_GET_PHYS(trb) (XHCI_QWORD_EXTRACT((trb).parameter, 63, 4) << 4)
63
64/* Control functions */
65
66int xhci_init_commands(xhci_hc_t *hc)
67{
68 assert(hc);
69
70 list_initialize(&hc->commands);
71
72 fibril_mutex_initialize(&hc->commands_mtx);
73
74 return EOK;
75}
76
77void xhci_fini_commands(xhci_hc_t *hc)
78{
79 // Note: Untested.
80 assert(hc);
81}
82
83void xhci_cmd_init(xhci_cmd_t *cmd, xhci_cmd_type_t type)
84{
85 memset(cmd, 0, sizeof(*cmd));
86
87 link_initialize(&cmd->_header.link);
88
89 fibril_mutex_initialize(&cmd->_header.completed_mtx);
90 fibril_condvar_initialize(&cmd->_header.completed_cv);
91
92 cmd->_header.cmd = type;
93 cmd->_header.timeout = XHCI_DEFAULT_TIMEOUT;
94}
95
96void xhci_cmd_fini(xhci_cmd_t *cmd)
97{
98 list_remove(&cmd->_header.link);
99
100 dma_buffer_free(&cmd->input_ctx);
101 dma_buffer_free(&cmd->bandwidth_ctx);
102
103 if (cmd->_header.async) {
104 free(cmd);
105 }
106}
107
108static inline xhci_cmd_t *get_command(xhci_hc_t *hc, uint64_t phys)
109{
110 fibril_mutex_lock(&hc->commands_mtx);
111
112 link_t *cmd_link = list_first(&hc->commands);
113
114 while (cmd_link != NULL) {
115 xhci_cmd_t *cmd = list_get_instance(cmd_link, xhci_cmd_t, _header.link);
116
117 if (cmd->_header.trb_phys == phys)
118 break;
119
120 cmd_link = list_next(cmd_link, &hc->commands);
121 }
122
123 if (cmd_link != NULL) {
124 list_remove(cmd_link);
125 fibril_mutex_unlock(&hc->commands_mtx);
126
127 return list_get_instance(cmd_link, xhci_cmd_t, _header.link);
128 }
129
130 fibril_mutex_unlock(&hc->commands_mtx);
131 return NULL;
132}
133
134static inline int enqueue_command(xhci_hc_t *hc, xhci_cmd_t *cmd, unsigned doorbell, unsigned target)
135{
136 assert(hc);
137 assert(cmd);
138
139 fibril_mutex_lock(&hc->commands_mtx);
140 list_append(&cmd->_header.link, &hc->commands);
141 fibril_mutex_unlock(&hc->commands_mtx);
142
143 xhci_trb_ring_enqueue(&hc->command_ring, &cmd->_header.trb, &cmd->_header.trb_phys);
144 hc_ring_doorbell(hc, doorbell, target);
145
146 usb_log_debug2("HC(%p): Sent command:", hc);
147 xhci_dump_trb(&cmd->_header.trb);
148
149 return EOK;
150}
151
152void xhci_stop_command_ring(xhci_hc_t *hc)
153{
154 assert(hc);
155
156 XHCI_REG_SET(hc->op_regs, XHCI_OP_CS, 1);
157
158 /**
159 * Note: There is a bug in qemu that checks CS only when CRCR_HI
160 * is written, this (and the read/write in abort) ensures
161 * the command rings stops.
162 */
163 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR_HI, XHCI_REG_RD(hc->op_regs, XHCI_OP_CRCR_HI));
164}
165
166void xhci_abort_command_ring(xhci_hc_t *hc)
167{
168 assert(hc);
169
170 XHCI_REG_WR(hc->op_regs, XHCI_OP_CA, 1);
171 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR_HI, XHCI_REG_RD(hc->op_regs, XHCI_OP_CRCR_HI));
172}
173
174void xhci_start_command_ring(xhci_hc_t *hc)
175{
176 assert(hc);
177
178 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRR, 1);
179 hc_ring_doorbell(hc, 0, 0);
180}
181
182static const char *trb_codes [] = {
183#define TRBC(t) [XHCI_TRBC_##t] = #t
184 TRBC(INVALID),
185 TRBC(SUCCESS),
186 TRBC(DATA_BUFFER_ERROR),
187 TRBC(BABBLE_DETECTED_ERROR),
188 TRBC(USB_TRANSACTION_ERROR),
189 TRBC(TRB_ERROR),
190 TRBC(STALL_ERROR),
191 TRBC(RESOURCE_ERROR),
192 TRBC(BANDWIDTH_ERROR),
193 TRBC(NO_SLOTS_ERROR),
194 TRBC(INVALID_STREAM_ERROR),
195 TRBC(SLOT_NOT_ENABLED_ERROR),
196 TRBC(EP_NOT_ENABLED_ERROR),
197 TRBC(SHORT_PACKET),
198 TRBC(RING_UNDERRUN),
199 TRBC(RING_OVERRUN),
200 TRBC(VF_EVENT_RING_FULL),
201 TRBC(PARAMETER_ERROR),
202 TRBC(BANDWIDTH_OVERRUN_ERROR),
203 TRBC(CONTEXT_STATE_ERROR),
204 TRBC(NO_PING_RESPONSE_ERROR),
205 TRBC(EVENT_RING_FULL_ERROR),
206 TRBC(INCOMPATIBLE_DEVICE_ERROR),
207 TRBC(MISSED_SERVICE_ERROR),
208 TRBC(COMMAND_RING_STOPPED),
209 TRBC(COMMAND_ABORTED),
210 TRBC(STOPPED),
211 TRBC(STOPPED_LENGTH_INVALID),
212 TRBC(STOPPED_SHORT_PACKET),
213 TRBC(MAX_EXIT_LATENCY_TOO_LARGE_ERROR),
214 [30] = "<reserved>",
215 TRBC(ISOCH_BUFFER_OVERRUN),
216 TRBC(EVENT_LOST_ERROR),
217 TRBC(UNDEFINED_ERROR),
218 TRBC(INVALID_STREAM_ID_ERROR),
219 TRBC(SECONDARY_BANDWIDTH_ERROR),
220 TRBC(SPLIT_TRANSACTION_ERROR),
221 [XHCI_TRBC_MAX] = NULL
222#undef TRBC
223};
224
225static void report_error(int code)
226{
227 if (code < XHCI_TRBC_MAX && trb_codes[code] != NULL)
228 usb_log_error("Command resulted in error: %s.", trb_codes[code]);
229 else
230 usb_log_error("Command resulted in reserved or vendor specific error.");
231}
232
233int xhci_handle_command_completion(xhci_hc_t *hc, xhci_trb_t *trb)
234{
235 // TODO: Update dequeue ptrs.
236 assert(hc);
237 assert(trb);
238
239 usb_log_debug2("HC(%p) Command completed.", hc);
240
241 int code;
242 uint64_t phys;
243 xhci_cmd_t *command;
244
245 code = TRB_GET_CODE(*trb);
246 phys = TRB_GET_PHYS(*trb);;
247 command = get_command(hc, phys);
248 if (command == NULL) {
249 // TODO: STOP & ABORT may not have command structs in the list!
250 usb_log_warning("No command struct for this completion event found.");
251
252 if (code != XHCI_TRBC_SUCCESS)
253 report_error(code);
254
255 return EOK;
256 }
257
258 /* Semantics of NO_OP_CMD is that success is marked as a TRB error. */
259 if (command->_header.cmd == XHCI_CMD_NO_OP && code == XHCI_TRBC_TRB_ERROR)
260 code = XHCI_TRBC_SUCCESS;
261
262 command->status = code;
263 command->slot_id = TRB_GET_SLOT(*trb);
264
265 usb_log_debug2("Completed command trb: %s", xhci_trb_str_type(TRB_TYPE(command->_header.trb)));
266
267 if (code != XHCI_TRBC_SUCCESS) {
268 report_error(code);
269 xhci_dump_trb(&command->_header.trb);
270 }
271
272 switch (TRB_TYPE(command->_header.trb)) {
273 case XHCI_TRB_TYPE_NO_OP_CMD:
274 break;
275 case XHCI_TRB_TYPE_ENABLE_SLOT_CMD:
276 break;
277 case XHCI_TRB_TYPE_DISABLE_SLOT_CMD:
278 break;
279 case XHCI_TRB_TYPE_ADDRESS_DEVICE_CMD:
280 break;
281 case XHCI_TRB_TYPE_CONFIGURE_ENDPOINT_CMD:
282 break;
283 case XHCI_TRB_TYPE_EVALUATE_CONTEXT_CMD:
284 break;
285 case XHCI_TRB_TYPE_RESET_ENDPOINT_CMD:
286 break;
287 case XHCI_TRB_TYPE_STOP_ENDPOINT_CMD:
288 // Note: If the endpoint was in the middle of a transfer, then the xHC
289 // will add a Transfer TRB before the Event TRB, research that and
290 // handle it appropriately!
291 break;
292 case XHCI_TRB_TYPE_RESET_DEVICE_CMD:
293 break;
294 default:
295 usb_log_debug2("Unsupported command trb: %s", xhci_trb_str_type(TRB_TYPE(command->_header.trb)));
296
297 command->_header.completed = true;
298 return ENAK;
299 }
300
301 fibril_mutex_lock(&command->_header.completed_mtx);
302 command->_header.completed = true;
303 fibril_condvar_broadcast(&command->_header.completed_cv);
304 fibril_mutex_unlock(&command->_header.completed_mtx);
305
306 if (command->_header.async) {
307 /* Free the command and other DS upon completion. */
308 xhci_cmd_fini(command);
309 }
310
311 return EOK;
312}
313
314/* Command-issuing functions */
315
316static int no_op_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
317{
318 assert(hc);
319
320 xhci_trb_clean(&cmd->_header.trb);
321
322 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_NO_OP_CMD);
323
324 return enqueue_command(hc, cmd, 0, 0);
325}
326
327static int enable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
328{
329 assert(hc);
330
331 xhci_trb_clean(&cmd->_header.trb);
332
333 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ENABLE_SLOT_CMD);
334 cmd->_header.trb.control |= host2xhci(32, XHCI_REG_RD(hc->xecp, XHCI_EC_SP_SLOT_TYPE) << 16);
335
336 return enqueue_command(hc, cmd, 0, 0);
337}
338
339static int disable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
340{
341 assert(hc);
342 assert(cmd);
343
344 xhci_trb_clean(&cmd->_header.trb);
345
346 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_DISABLE_SLOT_CMD);
347 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
348
349 return enqueue_command(hc, cmd, 0, 0);
350}
351
352static int address_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
353{
354 assert(hc);
355 assert(cmd);
356 assert(dma_buffer_is_set(&cmd->input_ctx));
357
358 /**
359 * TODO: Requirements for this command:
360 * dcbaa[slot_id] is properly sized and initialized
361 * ictx has valids slot context and endpoint 0, all
362 * other should be ignored at this point (see section 4.6.5).
363 */
364
365 xhci_trb_clean(&cmd->_header.trb);
366
367 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
368
369 /**
370 * Note: According to section 6.4.3.4, we can set the 9th bit
371 * of the control field of the trb (BSR) to 1 and then the xHC
372 * will not issue the SET_ADDRESS request to the USB device.
373 * This can be used to provide compatibility with legacy USB devices
374 * that require their device descriptor to be read before such request.
375 */
376 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ADDRESS_DEVICE_CMD);
377 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
378
379 return enqueue_command(hc, cmd, 0, 0);
380}
381
382static int configure_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
383{
384 assert(hc);
385 assert(cmd);
386
387 xhci_trb_clean(&cmd->_header.trb);
388
389 if (!cmd->deconfigure) {
390 /* If the DC flag is on, input context is not evaluated. */
391 assert(dma_buffer_is_set(&cmd->input_ctx));
392
393 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
394 }
395
396 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_CONFIGURE_ENDPOINT_CMD);
397 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
398 TRB_SET_DC(cmd->_header.trb, cmd->deconfigure);
399
400 return enqueue_command(hc, cmd, 0, 0);
401}
402
403static int evaluate_context_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
404{
405 assert(hc);
406 assert(cmd);
407 assert(dma_buffer_is_set(&cmd->input_ctx));
408
409 /**
410 * Note: All Drop Context flags of the input context shall be 0,
411 * all Add Context flags shall be initialize to indicate IDs
412 * of the contexts affected by the command.
413 * Refer to sections 6.2.2.3 and 6.3.3.3 for further info.
414 */
415 xhci_trb_clean(&cmd->_header.trb);
416
417 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
418
419 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_EVALUATE_CONTEXT_CMD);
420 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
421
422 return enqueue_command(hc, cmd, 0, 0);
423}
424
425static int reset_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
426{
427 assert(hc);
428 assert(cmd);
429
430 /**
431 * Note: TCS can have values 0 or 1. If it is set to 0, see sectuon 4.5.8 for
432 * information about this flag.
433 */
434 xhci_trb_clean(&cmd->_header.trb);
435
436 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_ENDPOINT_CMD);
437 TRB_SET_TCS(cmd->_header.trb, cmd->tcs);
438 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
439 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
440
441 return enqueue_command(hc, cmd, 0, 0);
442}
443
444static int stop_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
445{
446 assert(hc);
447 assert(cmd);
448
449 xhci_trb_clean(&cmd->_header.trb);
450
451 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_STOP_ENDPOINT_CMD);
452 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
453 TRB_SET_SUSP(cmd->_header.trb, cmd->susp);
454 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
455
456 return enqueue_command(hc, cmd, 0, 0);
457}
458
459static int set_tr_dequeue_pointer_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
460{
461 assert(hc);
462 assert(cmd);
463
464 xhci_trb_clean(&cmd->_header.trb);
465
466 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_SET_TR_DEQUEUE_POINTER_CMD);
467 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id);
468 TRB_SET_STREAM(cmd->_header.trb, cmd->stream_id);
469 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
470 TRB_SET_DEQUEUE_PTR(cmd->_header.trb, cmd->dequeue_ptr);
471
472 /**
473 * TODO: Set DCS (see section 4.6.10).
474 */
475
476 return enqueue_command(hc, cmd, 0, 0);
477}
478
479static int reset_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
480{
481 assert(hc);
482 assert(cmd);
483
484 xhci_trb_clean(&cmd->_header.trb);
485
486 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_DEVICE_CMD);
487 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
488
489 return enqueue_command(hc, cmd, 0, 0);
490}
491
492static int get_port_bandwidth_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd)
493{
494 assert(hc);
495 assert(cmd);
496
497 xhci_trb_clean(&cmd->_header.trb);
498
499 TRB_SET_ICTX(cmd->_header.trb, cmd->bandwidth_ctx.phys);
500
501 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_GET_PORT_BANDWIDTH_CMD);
502 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id);
503 TRB_SET_DEV_SPEED(cmd->_header.trb, cmd->device_speed);
504
505 return enqueue_command(hc, cmd, 0, 0);
506}
507
508/* The table of command-issuing functions. */
509
510typedef int (*cmd_handler) (xhci_hc_t *hc, xhci_cmd_t *cmd);
511
512static cmd_handler cmd_handlers [] = {
513 [XHCI_CMD_ENABLE_SLOT] = enable_slot_cmd,
514 [XHCI_CMD_DISABLE_SLOT] = disable_slot_cmd,
515 [XHCI_CMD_ADDRESS_DEVICE] = address_device_cmd,
516 [XHCI_CMD_CONFIGURE_ENDPOINT] = configure_endpoint_cmd,
517 [XHCI_CMD_EVALUATE_CONTEXT] = evaluate_context_cmd,
518 [XHCI_CMD_RESET_ENDPOINT] = reset_endpoint_cmd,
519 [XHCI_CMD_STOP_ENDPOINT] = stop_endpoint_cmd,
520 [XHCI_CMD_SET_TR_DEQUEUE_POINTER] = set_tr_dequeue_pointer_cmd,
521 [XHCI_CMD_RESET_DEVICE] = reset_device_cmd,
522 // TODO: Force event (optional normative, for VMM, section 4.6.12).
523 [XHCI_CMD_FORCE_EVENT] = NULL,
524 // TODO: Negotiate bandwidth (optional normative, section 4.6.13).
525 [XHCI_CMD_NEGOTIATE_BANDWIDTH] = NULL,
526 // TODO: Set latency tolerance value (optional normative, section 4.6.14).
527 [XHCI_CMD_SET_LATENCY_TOLERANCE_VALUE] = NULL,
528 // TODO: Get port bandwidth (mandatory, but needs root hub implementation, section 4.6.15).
529 [XHCI_CMD_GET_PORT_BANDWIDTH] = get_port_bandwidth_cmd,
530 // TODO: Force header (mandatory, but needs root hub implementation, section 4.6.16).
531 [XHCI_CMD_FORCE_HEADER] = NULL,
532 [XHCI_CMD_NO_OP] = no_op_cmd
533};
534
535static int wait_for_cmd_completion(xhci_cmd_t *cmd)
536{
537 int rv = EOK;
538
539 fibril_mutex_lock(&cmd->_header.completed_mtx);
540 while (!cmd->_header.completed) {
541 usb_log_debug2("Waiting for event completion: going to sleep.");
542 rv = fibril_condvar_wait_timeout(&cmd->_header.completed_cv, &cmd->_header.completed_mtx, cmd->_header.timeout);
543
544 usb_log_debug2("Waiting for event completion: woken: %s", str_error(rv));
545 if (rv == ETIMEOUT) {
546 break;
547 }
548 }
549 fibril_mutex_unlock(&cmd->_header.completed_mtx);
550
551 return rv;
552}
553
554/** Issue command and block the current fibril until it is completed or timeout
555 * expires. Nothing is deallocated. Caller should always execute `xhci_cmd_fini`.
556 */
557int xhci_cmd_sync(xhci_hc_t *hc, xhci_cmd_t *cmd)
558{
559 assert(hc);
560 assert(cmd);
561
562 int err;
563
564 if (!cmd_handlers[cmd->_header.cmd]) {
565 /* Handler not implemented. */
566 return ENOTSUP;
567 }
568
569 if ((err = cmd_handlers[cmd->_header.cmd](hc, cmd))) {
570 /* Command could not be issued. */
571 return err;
572 }
573
574 if ((err = wait_for_cmd_completion(cmd))) {
575 /* Timeout expired or command failed. */
576 return err;
577 }
578
579 return cmd->status == XHCI_TRBC_SUCCESS ? EOK : EINVAL;
580}
581
582/** Does the same thing as `xhci_cmd_sync` and executes `xhci_cmd_fini`. This
583 * is a useful shorthand for issuing commands without out parameters.
584 */
585int xhci_cmd_sync_fini(xhci_hc_t *hc, xhci_cmd_t *cmd)
586{
587 const int err = xhci_cmd_sync(hc, cmd);
588 xhci_cmd_fini(cmd);
589
590 return err;
591}
592
593/** Does the same thing as `xhci_cmd_sync_fini` without blocking the current
594 * fibril. The command is copied to stack memory and `fini` is called upon its completion.
595 */
596int xhci_cmd_async_fini(xhci_hc_t *hc, xhci_cmd_t *stack_cmd)
597{
598 assert(hc);
599 assert(stack_cmd);
600
601 /* Save the command for later. */
602 xhci_cmd_t *heap_cmd = (xhci_cmd_t *) malloc(sizeof(xhci_cmd_t));
603 if (!heap_cmd) {
604 return ENOMEM;
605 }
606
607 /* TODO: Is this good for the mutex and the condvar? */
608 memcpy(heap_cmd, stack_cmd, sizeof(xhci_cmd_t));
609 heap_cmd->_header.async = true;
610
611 /* Issue the command. */
612 int err;
613
614 if (!cmd_handlers[heap_cmd->_header.cmd]) {
615 /* Handler not implemented. */
616 err = ENOTSUP;
617 goto err_heap_cmd;
618 }
619
620 if ((err = cmd_handlers[heap_cmd->_header.cmd](hc, heap_cmd))) {
621 /* Command could not be issued. */
622 goto err_heap_cmd;
623 }
624
625 return EOK;
626
627err_heap_cmd:
628 free(heap_cmd);
629 return err;
630}
631
632/**
633 * @}
634 */
Note: See TracBrowser for help on using the repository browser.