Changeset c3d926f in mainline
- Timestamp:
- 2017-10-25T08:03:13Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- a5b3de6
- Parents:
- 0206d35
- Location:
- uspace/drv/bus/usb/xhci
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/drv/bus/usb/xhci/commands.c
r0206d35 rc3d926f 63 63 #define TRB_GET_PHYS(trb) (XHCI_QWORD_EXTRACT((trb).parameter, 63, 4) << 4) 64 64 65 /* Control functions */ 66 65 67 int xhci_init_commands(xhci_hc_t *hc) 66 68 { … … 80 82 } 81 83 82 int xhci_cmd_wait(xhci_cmd_t *cmd, suseconds_t timeout) 83 { 84 int rv = EOK; 85 86 fibril_mutex_lock(&cmd->completed_mtx); 87 while (!cmd->completed) { 88 usb_log_debug2("Waiting for event completion: going to sleep."); 89 rv = fibril_condvar_wait_timeout(&cmd->completed_cv, &cmd->completed_mtx, timeout); 90 91 usb_log_debug2("Waiting for event completion: woken: %s", str_error(rv)); 92 if (rv == ETIMEOUT) 93 break; 94 } 95 fibril_mutex_unlock(&cmd->completed_mtx); 96 97 return rv; 98 } 99 100 xhci_cmd_t *xhci_cmd_alloc(void) 101 { 102 xhci_cmd_t *cmd = malloc(sizeof(xhci_cmd_t)); 103 xhci_cmd_init(cmd); 104 105 usb_log_debug2("Allocating cmd on the heap. Don't forget to deallocate it!"); 106 return cmd; 107 } 108 109 void xhci_cmd_init(xhci_cmd_t *cmd) 84 void xhci_cmd_init(xhci_cmd_t *cmd, xhci_cmd_type_t type) 110 85 { 111 86 memset(cmd, 0, sizeof(*cmd)); 112 87 113 link_initialize(&cmd->link); 114 115 fibril_mutex_initialize(&cmd->completed_mtx); 116 fibril_condvar_initialize(&cmd->completed_cv); 88 link_initialize(&cmd->_header.link); 89 90 fibril_mutex_initialize(&cmd->_header.completed_mtx); 91 fibril_condvar_initialize(&cmd->_header.completed_cv); 92 93 cmd->_header.cmd = type; 94 cmd->_header.timeout = XHCI_DEFAULT_TIMEOUT; 117 95 } 118 96 119 97 void xhci_cmd_fini(xhci_cmd_t *cmd) 120 98 { 121 list_remove(&cmd->link); 122 } 123 124 void xhci_cmd_free(xhci_cmd_t *cmd) 125 { 126 xhci_cmd_fini(cmd); 127 free(cmd); 99 list_remove(&cmd->_header.link); 100 101 if (cmd->input_ctx) { 102 free32(cmd->input_ctx); 103 }; 104 105 if (cmd->bandwidth_ctx) { 106 free32(cmd->bandwidth_ctx); 107 } 108 109 if (cmd->_header.async) { 110 free(cmd); 111 } 128 112 } 129 113 … … 135 119 136 120 while (cmd_link != NULL) { 137 xhci_cmd_t *cmd = list_get_instance(cmd_link, xhci_cmd_t, link);138 139 if (cmd-> trb_phys == phys)121 xhci_cmd_t *cmd = list_get_instance(cmd_link, xhci_cmd_t, _header.link); 122 123 if (cmd->_header.trb_phys == phys) 140 124 break; 141 125 … … 147 131 fibril_mutex_unlock(&hc->commands_mtx); 148 132 149 return list_get_instance(cmd_link, xhci_cmd_t, link);133 return list_get_instance(cmd_link, xhci_cmd_t, _header.link); 150 134 } 151 135 … … 160 144 161 145 fibril_mutex_lock(&hc->commands_mtx); 162 list_append(&cmd-> link, &hc->commands);146 list_append(&cmd->_header.link, &hc->commands); 163 147 fibril_mutex_unlock(&hc->commands_mtx); 164 148 165 xhci_trb_ring_enqueue(&hc->command_ring, &cmd-> trb, &cmd->trb_phys);149 xhci_trb_ring_enqueue(&hc->command_ring, &cmd->_header.trb, &cmd->_header.trb_phys); 166 150 hc_ring_doorbell(hc, doorbell, target); 167 151 168 152 usb_log_debug2("HC(%p): Sent command:", hc); 169 xhci_dump_trb(&cmd-> trb);153 xhci_dump_trb(&cmd->_header.trb); 170 154 171 155 return EOK; … … 253 237 } 254 238 255 int xhci_send_no_op_command(xhci_hc_t *hc, xhci_cmd_t *cmd) 256 { 257 assert(hc); 258 259 xhci_trb_clean(&cmd->trb); 260 261 TRB_SET_TYPE(cmd->trb, XHCI_TRB_TYPE_NO_OP_CMD); 262 263 return enqueue_command(hc, cmd, 0, 0); 264 } 265 266 int xhci_send_enable_slot_command(xhci_hc_t *hc, xhci_cmd_t *cmd) 267 { 268 assert(hc); 269 270 xhci_trb_clean(&cmd->trb); 271 272 TRB_SET_TYPE(cmd->trb, XHCI_TRB_TYPE_ENABLE_SLOT_CMD); 273 cmd->trb.control |= host2xhci(32, XHCI_REG_RD(hc->xecp, XHCI_EC_SP_SLOT_TYPE) << 16); 274 275 return enqueue_command(hc, cmd, 0, 0); 276 } 277 278 int xhci_send_disable_slot_command(xhci_hc_t *hc, xhci_cmd_t *cmd) 279 { 280 assert(hc); 281 assert(cmd); 282 283 xhci_trb_clean(&cmd->trb); 284 285 TRB_SET_TYPE(cmd->trb, XHCI_TRB_TYPE_DISABLE_SLOT_CMD); 286 TRB_SET_SLOT(cmd->trb, cmd->slot_id); 287 288 return enqueue_command(hc, cmd, 0, 0); 289 } 290 291 int xhci_send_address_device_command(xhci_hc_t *hc, xhci_cmd_t *cmd, xhci_input_ctx_t *ictx) 292 { 293 assert(hc); 294 assert(cmd); 295 assert(ictx); 239 int xhci_handle_command_completion(xhci_hc_t *hc, xhci_trb_t *trb) 240 { 241 // TODO: Update dequeue ptrs. 242 assert(hc); 243 assert(trb); 244 245 usb_log_debug2("HC(%p) Command completed.", hc); 246 247 int code; 248 uint64_t phys; 249 xhci_cmd_t *command; 250 251 code = TRB_GET_CODE(*trb); 252 phys = TRB_GET_PHYS(*trb);; 253 command = get_command(hc, phys); 254 if (command == NULL) { 255 // TODO: STOP & ABORT may not have command structs in the list! 256 usb_log_warning("No command struct for this completion event found."); 257 258 if (code != XHCI_TRBC_SUCCESS) 259 report_error(code); 260 261 return EOK; 262 } 263 264 command->status = code; 265 command->slot_id = TRB_GET_SLOT(*trb); 266 267 usb_log_debug2("Completed command trb: %s", xhci_trb_str_type(TRB_TYPE(command->_header.trb))); 268 if (TRB_TYPE(command->_header.trb) != XHCI_TRB_TYPE_NO_OP_CMD) { 269 if (code != XHCI_TRBC_SUCCESS) { 270 report_error(code); 271 xhci_dump_trb(&command->_header.trb); 272 } 273 } 274 275 switch (TRB_TYPE(command->_header.trb)) { 276 case XHCI_TRB_TYPE_NO_OP_CMD: 277 assert(code == XHCI_TRBC_TRB_ERROR); 278 break; 279 case XHCI_TRB_TYPE_ENABLE_SLOT_CMD: 280 break; 281 case XHCI_TRB_TYPE_DISABLE_SLOT_CMD: 282 break; 283 case XHCI_TRB_TYPE_ADDRESS_DEVICE_CMD: 284 break; 285 case XHCI_TRB_TYPE_CONFIGURE_ENDPOINT_CMD: 286 break; 287 case XHCI_TRB_TYPE_EVALUATE_CONTEXT_CMD: 288 break; 289 case XHCI_TRB_TYPE_RESET_ENDPOINT_CMD: 290 break; 291 case XHCI_TRB_TYPE_STOP_ENDPOINT_CMD: 292 // Note: If the endpoint was in the middle of a transfer, then the xHC 293 // will add a Transfer TRB before the Event TRB, research that and 294 // handle it appropriately! 295 break; 296 case XHCI_TRB_TYPE_RESET_DEVICE_CMD: 297 break; 298 default: 299 usb_log_debug2("Unsupported command trb: %s", xhci_trb_str_type(TRB_TYPE(command->_header.trb))); 300 301 command->_header.completed = true; 302 return ENAK; 303 } 304 305 fibril_mutex_lock(&command->_header.completed_mtx); 306 command->_header.completed = true; 307 fibril_condvar_broadcast(&command->_header.completed_cv); 308 fibril_mutex_unlock(&command->_header.completed_mtx); 309 310 if (command->_header.async) { 311 /* Free the command and other DS upon completion. */ 312 xhci_cmd_fini(command); 313 } 314 315 return EOK; 316 } 317 318 /* Command-issuing functions */ 319 320 static int no_op_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd) 321 { 322 assert(hc); 323 324 xhci_trb_clean(&cmd->_header.trb); 325 326 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_NO_OP_CMD); 327 328 return enqueue_command(hc, cmd, 0, 0); 329 } 330 331 static int enable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd) 332 { 333 assert(hc); 334 335 xhci_trb_clean(&cmd->_header.trb); 336 337 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ENABLE_SLOT_CMD); 338 cmd->_header.trb.control |= host2xhci(32, XHCI_REG_RD(hc->xecp, XHCI_EC_SP_SLOT_TYPE) << 16); 339 340 return enqueue_command(hc, cmd, 0, 0); 341 } 342 343 static int disable_slot_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd) 344 { 345 assert(hc); 346 assert(cmd); 347 348 xhci_trb_clean(&cmd->_header.trb); 349 350 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_DISABLE_SLOT_CMD); 351 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id); 352 353 return enqueue_command(hc, cmd, 0, 0); 354 } 355 356 static int address_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd) 357 { 358 assert(hc); 359 assert(cmd); 360 assert(cmd->input_ctx); 296 361 297 362 /** … … 302 367 */ 303 368 304 xhci_trb_clean(&cmd-> trb);305 306 uint64_t phys_addr = (uint64_t) addr_to_phys( ictx);307 TRB_SET_ICTX(cmd-> trb, phys_addr);369 xhci_trb_clean(&cmd->_header.trb); 370 371 uint64_t phys_addr = (uint64_t) addr_to_phys(cmd->input_ctx); 372 TRB_SET_ICTX(cmd->_header.trb, phys_addr); 308 373 309 374 /** … … 314 379 * that require their device descriptor to be read before such request. 315 380 */ 316 TRB_SET_TYPE(cmd-> trb, XHCI_TRB_TYPE_ADDRESS_DEVICE_CMD);317 TRB_SET_SLOT(cmd-> trb, cmd->slot_id);318 319 return enqueue_command(hc, cmd, 0, 0); 320 } 321 322 int xhci_send_configure_endpoint_command(xhci_hc_t *hc, xhci_cmd_t *cmd, xhci_input_ctx_t *ictx)323 { 324 assert(hc); 325 assert(cmd); 326 327 xhci_trb_clean(&cmd-> trb);381 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_ADDRESS_DEVICE_CMD); 382 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id); 383 384 return enqueue_command(hc, cmd, 0, 0); 385 } 386 387 static int configure_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd) 388 { 389 assert(hc); 390 assert(cmd); 391 392 xhci_trb_clean(&cmd->_header.trb); 328 393 329 394 if (!cmd->deconfigure) { 330 395 /* If the DC flag is on, input context is not evaluated. */ 331 assert( ictx);332 333 uint64_t phys_addr = (uint64_t) addr_to_phys( ictx);334 TRB_SET_ICTX(cmd-> trb, phys_addr);335 } 336 337 TRB_SET_TYPE(cmd-> trb, XHCI_TRB_TYPE_CONFIGURE_ENDPOINT_CMD);338 TRB_SET_SLOT(cmd-> trb, cmd->slot_id);339 TRB_SET_DC(cmd-> trb, cmd->deconfigure);340 341 return enqueue_command(hc, cmd, 0, 0); 342 } 343 344 int xhci_send_evaluate_context_command(xhci_hc_t *hc, xhci_cmd_t *cmd, xhci_input_ctx_t *ictx)345 { 346 assert(hc); 347 assert(cmd); 348 assert( ictx);396 assert(cmd->input_ctx); 397 398 uint64_t phys_addr = (uint64_t) addr_to_phys(cmd->input_ctx); 399 TRB_SET_ICTX(cmd->_header.trb, phys_addr); 400 } 401 402 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_CONFIGURE_ENDPOINT_CMD); 403 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id); 404 TRB_SET_DC(cmd->_header.trb, cmd->deconfigure); 405 406 return enqueue_command(hc, cmd, 0, 0); 407 } 408 409 static int evaluate_context_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd) 410 { 411 assert(hc); 412 assert(cmd); 413 assert(cmd->input_ctx); 349 414 350 415 /** … … 354 419 * Refer to sections 6.2.2.3 and 6.3.3.3 for further info. 355 420 */ 356 xhci_trb_clean(&cmd-> trb);357 358 uint64_t phys_addr = (uint64_t) addr_to_phys( ictx);359 TRB_SET_ICTX(cmd-> trb, phys_addr);360 361 TRB_SET_TYPE(cmd-> trb, XHCI_TRB_TYPE_EVALUATE_CONTEXT_CMD);362 TRB_SET_SLOT(cmd-> trb, cmd->slot_id);363 364 return enqueue_command(hc, cmd, 0, 0); 365 } 366 367 int xhci_send_reset_endpoint_command(xhci_hc_t *hc, xhci_cmd_t *cmd, uint32_t ep_id, uint8_t tcs)421 xhci_trb_clean(&cmd->_header.trb); 422 423 uint64_t phys_addr = (uint64_t) addr_to_phys(cmd->input_ctx); 424 TRB_SET_ICTX(cmd->_header.trb, phys_addr); 425 426 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_EVALUATE_CONTEXT_CMD); 427 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id); 428 429 return enqueue_command(hc, cmd, 0, 0); 430 } 431 432 static int reset_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd) 368 433 { 369 434 assert(hc); … … 374 439 * information about this flag. 375 440 */ 376 xhci_trb_clean(&cmd->trb); 377 378 TRB_SET_TYPE(cmd->trb, XHCI_TRB_TYPE_RESET_ENDPOINT_CMD); 379 TRB_SET_TCS(cmd->trb, tcs); 380 TRB_SET_EP(cmd->trb, ep_id); 381 TRB_SET_SLOT(cmd->trb, cmd->slot_id); 382 383 return enqueue_command(hc, cmd, 0, 0); 384 } 385 386 int xhci_send_stop_endpoint_command(xhci_hc_t *hc, xhci_cmd_t *cmd, uint32_t ep_id, uint8_t susp) 387 { 388 assert(hc); 389 assert(cmd); 390 391 xhci_trb_clean(&cmd->trb); 392 393 TRB_SET_TYPE(cmd->trb, XHCI_TRB_TYPE_STOP_ENDPOINT_CMD); 394 TRB_SET_EP(cmd->trb, ep_id); 395 TRB_SET_SUSP(cmd->trb, susp); 396 TRB_SET_SLOT(cmd->trb, cmd->slot_id); 397 398 return enqueue_command(hc, cmd, 0, 0); 399 } 400 401 int xhci_send_set_dequeue_ptr_command(xhci_hc_t *hc, xhci_cmd_t *cmd, 402 uintptr_t dequeue_ptr, uint16_t stream_id, 403 uint32_t ep_id) 404 { 405 assert(hc); 406 assert(cmd); 407 408 xhci_trb_clean(&cmd->trb); 409 410 TRB_SET_TYPE(cmd->trb, XHCI_TRB_TYPE_SET_TR_DEQUEUE_POINTER_CMD); 411 TRB_SET_EP(cmd->trb, ep_id); 412 TRB_SET_STREAM(cmd->trb, stream_id); 413 TRB_SET_SLOT(cmd->trb, cmd->slot_id); 414 TRB_SET_DEQUEUE_PTR(cmd->trb, dequeue_ptr); 441 xhci_trb_clean(&cmd->_header.trb); 442 443 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_ENDPOINT_CMD); 444 TRB_SET_TCS(cmd->_header.trb, cmd->tcs); 445 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id); 446 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id); 447 448 return enqueue_command(hc, cmd, 0, 0); 449 } 450 451 static int stop_endpoint_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd) 452 { 453 assert(hc); 454 assert(cmd); 455 456 xhci_trb_clean(&cmd->_header.trb); 457 458 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_STOP_ENDPOINT_CMD); 459 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id); 460 TRB_SET_SUSP(cmd->_header.trb, cmd->susp); 461 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id); 462 463 return enqueue_command(hc, cmd, 0, 0); 464 } 465 466 static int set_tr_dequeue_pointer_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd) 467 { 468 assert(hc); 469 assert(cmd); 470 471 xhci_trb_clean(&cmd->_header.trb); 472 473 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_SET_TR_DEQUEUE_POINTER_CMD); 474 TRB_SET_EP(cmd->_header.trb, cmd->endpoint_id); 475 TRB_SET_STREAM(cmd->_header.trb, cmd->stream_id); 476 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id); 477 TRB_SET_DEQUEUE_PTR(cmd->_header.trb, cmd->dequeue_ptr); 415 478 416 479 /** … … 421 484 } 422 485 423 int xhci_send_reset_device_command(xhci_hc_t *hc, xhci_cmd_t *cmd) 424 { 425 assert(hc); 426 assert(cmd); 427 428 xhci_trb_clean(&cmd->trb); 429 430 TRB_SET_TYPE(cmd->trb, XHCI_TRB_TYPE_RESET_DEVICE_CMD); 431 TRB_SET_SLOT(cmd->trb, cmd->slot_id); 432 433 return enqueue_command(hc, cmd, 0, 0); 434 } 435 436 int xhci_get_port_bandwidth_command(xhci_hc_t *hc, xhci_cmd_t *cmd, 437 xhci_port_bandwidth_ctx_t *ctx, uint8_t device_speed) 438 { 439 assert(hc); 440 assert(cmd); 441 442 xhci_trb_clean(&cmd->trb); 443 444 uint64_t phys_addr = (uint64_t) addr_to_phys(ctx); 445 TRB_SET_ICTX(cmd->trb, phys_addr); 446 447 TRB_SET_TYPE(cmd->trb, XHCI_TRB_TYPE_GET_PORT_BANDWIDTH_CMD); 448 TRB_SET_SLOT(cmd->trb, cmd->slot_id); 449 TRB_SET_DEV_SPEED(cmd->trb, device_speed); 450 451 return enqueue_command(hc, cmd, 0, 0); 452 } 453 454 int xhci_handle_command_completion(xhci_hc_t *hc, xhci_trb_t *trb) 455 { 456 // TODO: Update dequeue ptrs. 457 assert(hc); 458 assert(trb); 459 460 usb_log_debug2("HC(%p) Command completed.", hc); 461 462 int code; 463 uint64_t phys; 464 xhci_cmd_t *command; 465 466 code = TRB_GET_CODE(*trb); 467 phys = TRB_GET_PHYS(*trb);; 468 command = get_command(hc, phys); 469 if (command == NULL) { 470 // TODO: STOP & ABORT may not have command structs in the list! 471 usb_log_warning("No command struct for this completion event found."); 472 473 if (code != XHCI_TRBC_SUCCESS) 474 report_error(code); 475 476 return EOK; 477 } 478 479 command->status = code; 480 command->slot_id = TRB_GET_SLOT(*trb); 481 482 usb_log_debug2("Completed command trb: %s", xhci_trb_str_type(TRB_TYPE(command->trb))); 483 if (TRB_TYPE(command->trb) != XHCI_TRB_TYPE_NO_OP_CMD) { 484 if (code != XHCI_TRBC_SUCCESS) { 485 report_error(code); 486 xhci_dump_trb(&command->trb); 486 static int reset_device_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd) 487 { 488 assert(hc); 489 assert(cmd); 490 491 xhci_trb_clean(&cmd->_header.trb); 492 493 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_RESET_DEVICE_CMD); 494 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id); 495 496 return enqueue_command(hc, cmd, 0, 0); 497 } 498 499 static int get_port_bandwidth_cmd(xhci_hc_t *hc, xhci_cmd_t *cmd) 500 { 501 assert(hc); 502 assert(cmd); 503 504 xhci_trb_clean(&cmd->_header.trb); 505 506 uint64_t phys_addr = (uint64_t) addr_to_phys(cmd->bandwidth_ctx); 507 TRB_SET_ICTX(cmd->_header.trb, phys_addr); 508 509 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_GET_PORT_BANDWIDTH_CMD); 510 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id); 511 TRB_SET_DEV_SPEED(cmd->_header.trb, cmd->device_speed); 512 513 return enqueue_command(hc, cmd, 0, 0); 514 } 515 516 /* The table of command-issuing functions. */ 517 518 typedef int (*cmd_handler) (xhci_hc_t *hc, xhci_cmd_t *cmd); 519 520 static cmd_handler cmd_handlers [] = { 521 [XHCI_CMD_ENABLE_SLOT] = enable_slot_cmd, 522 [XHCI_CMD_DISABLE_SLOT] = disable_slot_cmd, 523 [XHCI_CMD_ADDRESS_DEVICE] = address_device_cmd, 524 [XHCI_CMD_CONFIGURE_ENDPOINT] = configure_endpoint_cmd, 525 [XHCI_CMD_EVALUATE_CONTEXT] = evaluate_context_cmd, 526 [XHCI_CMD_RESET_ENDPOINT] = reset_endpoint_cmd, 527 [XHCI_CMD_STOP_ENDPOINT] = stop_endpoint_cmd, 528 [XHCI_CMD_SET_TR_DEQUEUE_POINTER] = set_tr_dequeue_pointer_cmd, 529 [XHCI_CMD_RESET_DEVICE] = reset_device_cmd, 530 // TODO: Force event (optional normative, for VMM, section 4.6.12). 531 [XHCI_CMD_FORCE_EVENT] = NULL, 532 // TODO: Negotiate bandwidth (optional normative, section 4.6.13). 533 [XHCI_CMD_NEGOTIATE_BANDWIDTH] = NULL, 534 // TODO: Set latency tolerance value (optional normative, section 4.6.14). 535 [XHCI_CMD_SET_LATENCY_TOLERANCE_VALUE] = NULL, 536 // TODO: Get port bandwidth (mandatory, but needs root hub implementation, section 4.6.15). 537 [XHCI_CMD_GET_PORT_BANDWIDTH] = get_port_bandwidth_cmd, 538 // TODO: Force header (mandatory, but needs root hub implementation, section 4.6.16). 539 [XHCI_CMD_FORCE_HEADER] = NULL, 540 [XHCI_CMD_NO_OP] = no_op_cmd 541 }; 542 543 static int wait_for_cmd_completion(xhci_cmd_t *cmd) 544 { 545 int rv = EOK; 546 547 fibril_mutex_lock(&cmd->_header.completed_mtx); 548 while (!cmd->_header.completed) { 549 usb_log_debug2("Waiting for event completion: going to sleep."); 550 rv = fibril_condvar_wait_timeout(&cmd->_header.completed_cv, &cmd->_header.completed_mtx, cmd->_header.timeout); 551 552 usb_log_debug2("Waiting for event completion: woken: %s", str_error(rv)); 553 if (rv == ETIMEOUT) { 554 break; 487 555 } 488 556 } 489 490 switch (TRB_TYPE(command->trb)) { 491 case XHCI_TRB_TYPE_NO_OP_CMD: 492 assert(code == XHCI_TRBC_TRB_ERROR); 493 break; 494 case XHCI_TRB_TYPE_ENABLE_SLOT_CMD: 495 break; 496 case XHCI_TRB_TYPE_DISABLE_SLOT_CMD: 497 break; 498 case XHCI_TRB_TYPE_ADDRESS_DEVICE_CMD: 499 break; 500 case XHCI_TRB_TYPE_CONFIGURE_ENDPOINT_CMD: 501 break; 502 case XHCI_TRB_TYPE_EVALUATE_CONTEXT_CMD: 503 break; 504 case XHCI_TRB_TYPE_RESET_ENDPOINT_CMD: 505 break; 506 case XHCI_TRB_TYPE_STOP_ENDPOINT_CMD: 507 // Note: If the endpoint was in the middle of a transfer, then the xHC 508 // will add a Transfer TRB before the Event TRB, research that and 509 // handle it appropriately! 510 break; 511 case XHCI_TRB_TYPE_RESET_DEVICE_CMD: 512 break; 513 default: 514 usb_log_debug2("Unsupported command trb: %s", xhci_trb_str_type(TRB_TYPE(command->trb))); 515 516 command->completed = true; 517 return ENAK; 518 } 519 520 fibril_mutex_lock(&command->completed_mtx); 521 command->completed = true; 522 fibril_condvar_broadcast(&command->completed_cv); 523 fibril_mutex_unlock(&command->completed_mtx); 557 fibril_mutex_unlock(&cmd->_header.completed_mtx); 558 559 return rv; 560 } 561 562 /** Issue command and block the current fibril until it is completed or timeout 563 * expires. Nothing is deallocated. Caller should always execute `xhci_cmd_fini`. 564 */ 565 int xhci_cmd_sync(xhci_hc_t *hc, xhci_cmd_t *cmd) 566 { 567 assert(hc); 568 assert(cmd); 569 570 int err; 571 572 if (!cmd_handlers[cmd->_header.cmd]) { 573 /* Handler not implemented. */ 574 return ENOTSUP; 575 } 576 577 if ((err = cmd_handlers[cmd->_header.cmd](hc, cmd))) { 578 /* Command could not be issued. */ 579 return err; 580 } 581 582 if ((err = wait_for_cmd_completion(cmd))) { 583 /* Timeout expired or command failed. */ 584 return err; 585 } 524 586 525 587 return EOK; 526 588 } 527 589 590 /** Does the same thing as `xhci_cmd_sync` and executes `xhci_cmd_fini`. This 591 * is a useful shorthand for issuing commands without out parameters. 592 */ 593 int xhci_cmd_sync_fini(xhci_hc_t *hc, xhci_cmd_t *cmd) 594 { 595 const int err = xhci_cmd_sync(hc, cmd); 596 xhci_cmd_fini(cmd); 597 598 return err; 599 } 600 601 /** Does the same thing as `xhci_cmd_sync_fini` without blocking the current 602 * fibril. The command is copied to stack memory and `fini` is called upon its completion. 603 */ 604 int xhci_cmd_async_fini(xhci_hc_t *hc, xhci_cmd_t *stack_cmd) 605 { 606 assert(hc); 607 assert(stack_cmd); 608 609 /* Save the command for later. */ 610 xhci_cmd_t *heap_cmd = (xhci_cmd_t *) malloc(sizeof(xhci_cmd_t)); 611 if (!heap_cmd) { 612 return ENOMEM; 613 } 614 615 /* TODO: Is this good for the mutex and the condvar? */ 616 memcpy(heap_cmd, stack_cmd, sizeof(xhci_cmd_t)); 617 heap_cmd->_header.async = true; 618 619 /* Issue the command. */ 620 int err; 621 622 if (!cmd_handlers[heap_cmd->_header.cmd]) { 623 /* Handler not implemented. */ 624 err = ENOTSUP; 625 goto err_heap_cmd; 626 } 627 628 if ((err = cmd_handlers[heap_cmd->_header.cmd](hc, heap_cmd))) { 629 /* Command could not be issued. */ 630 goto err_heap_cmd; 631 } 632 633 return EOK; 634 635 err_heap_cmd: 636 free(heap_cmd); 637 return err; 638 } 528 639 529 640 /** -
uspace/drv/bus/usb/xhci/commands.h
r0206d35 rc3d926f 42 42 #include "hw_struct/trb.h" 43 43 44 /* Useful timeouts for `xhci_cmd_wait()` */ 45 #define XHCI_DEFAULT_TIMEOUT 1000000 46 #define XHCI_BLOCK_INDEFINITELY 0 44 #define XHCI_DEFAULT_TIMEOUT 1000000 45 #define XHCI_BLOCK_INDEFINITELY 0 47 46 48 47 typedef struct xhci_hc xhci_hc_t; … … 50 49 typedef struct xhci_port_bandwidth_ctx xhci_port_bandwidth_ctx_t; 51 50 51 typedef enum xhci_cmd_type { 52 XHCI_CMD_ENABLE_SLOT, 53 XHCI_CMD_DISABLE_SLOT, 54 XHCI_CMD_ADDRESS_DEVICE, 55 XHCI_CMD_CONFIGURE_ENDPOINT, 56 XHCI_CMD_EVALUATE_CONTEXT, 57 XHCI_CMD_RESET_ENDPOINT, 58 XHCI_CMD_STOP_ENDPOINT, 59 XHCI_CMD_SET_TR_DEQUEUE_POINTER, 60 XHCI_CMD_RESET_DEVICE, 61 XHCI_CMD_FORCE_EVENT, 62 XHCI_CMD_NEGOTIATE_BANDWIDTH, 63 XHCI_CMD_SET_LATENCY_TOLERANCE_VALUE, 64 XHCI_CMD_GET_PORT_BANDWIDTH, 65 XHCI_CMD_FORCE_HEADER, 66 XHCI_CMD_NO_OP, 67 } xhci_cmd_type_t; 68 52 69 typedef struct xhci_command { 53 link_t link; 70 /** Internal fields used for bookkeeping. Need not worry about these. */ 71 struct { 72 link_t link; 54 73 55 xhci_trb_t trb; 56 uintptr_t trb_phys; 74 xhci_cmd_type_t cmd; 75 suseconds_t timeout; 76 77 xhci_trb_t trb; 78 uintptr_t trb_phys; 79 80 bool async; 81 bool completed; 82 83 /* Will broadcast after command completes. */ 84 fibril_mutex_t completed_mtx; 85 fibril_condvar_t completed_cv; 86 } _header; 87 88 /** Below are arguments of all commands mixed together. 89 * Be sure to know which command accepts what arguments. */ 57 90 58 91 uint32_t slot_id; 92 uint32_t endpoint_id; 93 uint16_t stream_id; 94 95 xhci_input_ctx_t *input_ctx; 96 xhci_port_bandwidth_ctx_t *bandwidth_ctx; 97 uintptr_t dequeue_ptr; 98 99 uint8_t tcs; 100 uint8_t susp; 101 uint8_t device_speed; 59 102 uint32_t status; 60 103 bool deconfigure; 61 62 bool completed;63 64 /* Will broadcast after command completes. */65 fibril_mutex_t completed_mtx;66 fibril_condvar_t completed_cv;67 104 } xhci_cmd_t; 68 105 106 /* Command handling control */ 69 107 int xhci_init_commands(xhci_hc_t *); 70 108 void xhci_fini_commands(xhci_hc_t *); 71 72 xhci_cmd_t *xhci_cmd_alloc(void);73 void xhci_cmd_init(xhci_cmd_t *);74 int xhci_cmd_wait(xhci_cmd_t *, suseconds_t);75 void xhci_cmd_fini(xhci_cmd_t *);76 void xhci_cmd_free(xhci_cmd_t *);77 109 78 110 void xhci_stop_command_ring(xhci_hc_t *); … … 80 112 void xhci_start_command_ring(xhci_hc_t *); 81 113 82 int xhci_send_no_op_command(xhci_hc_t *, xhci_cmd_t *); 83 int xhci_send_enable_slot_command(xhci_hc_t *, xhci_cmd_t *); 84 int xhci_send_disable_slot_command(xhci_hc_t *, xhci_cmd_t *); 85 int xhci_send_address_device_command(xhci_hc_t *, xhci_cmd_t *, xhci_input_ctx_t *); 86 int xhci_send_configure_endpoint_command(xhci_hc_t *, xhci_cmd_t *, xhci_input_ctx_t *); 87 int xhci_send_evaluate_context_command(xhci_hc_t *, xhci_cmd_t *, xhci_input_ctx_t *); 88 int xhci_send_reset_endpoint_command(xhci_hc_t *, xhci_cmd_t *, uint32_t, uint8_t); 89 int xhci_send_stop_endpoint_command(xhci_hc_t *, xhci_cmd_t *, uint32_t, uint8_t); 90 int xhci_send_set_dequeue_ptr_command(xhci_hc_t *, xhci_cmd_t *, uintptr_t, uint16_t, uint32_t); 91 int xhci_send_reset_device_command(xhci_hc_t *, xhci_cmd_t *); 92 // TODO: Force event (optional normative, for VMM, section 4.6.12). 93 // TODO: Negotiate bandwidth (optional normative, section 4.6.13). 94 // TODO: Set latency tolerance value (optional normative, section 4.6.14). 95 int xhci_get_port_bandwidth_command(xhci_hc_t *, xhci_cmd_t *, xhci_port_bandwidth_ctx_t *, uint8_t); 96 // TODO: Get port bandwidth (mandatory, but needs root hub implementation, section 4.6.15). 97 // TODO: Force header (mandatory, but needs root hub implementation, section 4.6.16). 114 int xhci_handle_command_completion(xhci_hc_t *, xhci_trb_t *); 98 115 99 int xhci_handle_command_completion(xhci_hc_t *, xhci_trb_t *); 116 /* Command lifecycle */ 117 void xhci_cmd_init(xhci_cmd_t *, xhci_cmd_type_t); 118 void xhci_cmd_fini(xhci_cmd_t *); 119 120 /* Issuing commands */ 121 int xhci_cmd_sync(xhci_hc_t *, xhci_cmd_t *); 122 int xhci_cmd_sync_fini(xhci_hc_t *, xhci_cmd_t *); 123 int xhci_cmd_async_fini(xhci_hc_t *, xhci_cmd_t *); 124 125 static inline int xhci_cmd_sync_inline_wrapper(xhci_hc_t *hc, xhci_cmd_t cmd) 126 { 127 /* Poor man's xhci_cmd_init (everything else is zeroed) */ 128 link_initialize(&cmd._header.link); 129 fibril_mutex_initialize(&cmd._header.completed_mtx); 130 fibril_condvar_initialize(&cmd._header.completed_cv); 131 132 if (!cmd._header.timeout) { 133 cmd._header.timeout = XHCI_DEFAULT_TIMEOUT; 134 } 135 136 /* Issue the command */ 137 const int err = xhci_cmd_sync(hc, &cmd); 138 xhci_cmd_fini(&cmd); 139 140 return err; 141 } 142 143 /** The inline macro expects: 144 * - hc - HC to schedule command on (xhci_hc_t *). 145 * - command - Member of `xhci_cmd_type_t` without the "XHCI_CMD_" prefix. 146 * - VA_ARGS - (optional) Command arguments in struct initialization notation. 147 * 148 * The return code and semantics matches those of `xhci_cmd_sync_fini`. 149 * 150 * Example: 151 * int err = xhci_cmd_sync_inline(hc, DISABLE_SLOT, .slot_id = 42); 152 */ 153 154 #define xhci_cmd_sync_inline(hc, command, ...) \ 155 xhci_cmd_sync_inline_wrapper(hc, \ 156 (xhci_cmd_t) { ._header.cmd = XHCI_CMD_##command, ##__VA_ARGS__ }) 100 157 101 158 #endif -
uspace/drv/bus/usb/xhci/hc.c
r0206d35 rc3d926f 600 600 int err; 601 601 xhci_cmd_t cmd; 602 xhci_cmd_init(&cmd); 603 604 if ((err = xhci_send_enable_slot_command(hc, &cmd)) != EOK) 605 return err; 606 607 if ((err = xhci_cmd_wait(&cmd, XHCI_DEFAULT_TIMEOUT)) != EOK) 608 return err; 609 610 if (slot_id) 602 xhci_cmd_init(&cmd, XHCI_CMD_ENABLE_SLOT); 603 604 if ((err = xhci_cmd_sync(hc, &cmd))) { 605 goto end; 606 } 607 608 if (slot_id) { 611 609 *slot_id = cmd.slot_id; 612 610 } 611 612 end: 613 613 xhci_cmd_fini(&cmd); 614 return EOK;614 return err; 615 615 } 616 616 … … 620 620 621 621 int err; 622 xhci_cmd_t cmd; 623 xhci_cmd_init(&cmd); 624 625 cmd.slot_id = slot_id; 626 627 if ((err = xhci_send_disable_slot_command(hc, &cmd)) != EOK) 628 return err; 629 630 if ((err = xhci_cmd_wait(&cmd, XHCI_DEFAULT_TIMEOUT)) != EOK) 631 return err; 632 633 xhci_cmd_fini(&cmd); 622 if ((err = xhci_cmd_sync_inline(hc, DISABLE_SLOT, .slot_id = slot_id))) { 623 return err; 624 } 625 634 626 return EOK; 635 627 } … … 688 680 xhci_setup_endpoint_context(ep0, &ictx->endpoint_ctx[0]); 689 681 690 xhci_cmd_t cmd; 691 xhci_cmd_init(&cmd); 692 693 cmd.slot_id = dev->slot_id; 694 695 if ((err = xhci_send_address_device_command(hc, &cmd, ictx)) != EOK) 696 goto err_cmd; 697 698 if ((err = xhci_cmd_wait(&cmd, XHCI_DEFAULT_TIMEOUT)) != EOK) 699 goto err_cmd; 682 /* Issue Address Device command. */ 683 if ((err = xhci_cmd_sync_inline(hc, ADDRESS_DEVICE, .slot_id = dev->slot_id, .input_ctx = ictx))) { 684 goto err_dev_ctx; 685 } 700 686 701 687 dev->base.address = XHCI_SLOT_DEVICE_ADDRESS(dev->dev_ctx->slot_ctx); … … 707 693 fibril_mutex_unlock(&dev->base.guard); 708 694 709 xhci_cmd_fini(&cmd); 710 free32(ictx); 711 return EOK; 712 713 err_cmd: 714 xhci_cmd_fini(&cmd); 715 free32(ictx); 695 return EOK; 696 716 697 err_dev_ctx: 717 698 free32(dev->dev_ctx); … … 728 709 xhci_input_ctx_t *ictx; 729 710 if ((err = create_valid_input_ctx(&ictx))) { 730 goto err; 731 } 732 711 return err; 712 } 733 713 // TODO: Set slot context and other flags. (probably forgot a lot of 'em) 734 714 735 xhci_cmd_t cmd; 736 xhci_cmd_init(&cmd); 737 738 cmd.slot_id = slot_id; 739 740 if ((err = xhci_send_configure_endpoint_command(hc, &cmd, ictx))) { 741 goto err_cmd; 742 } 743 744 if ((err = xhci_cmd_wait(&cmd, XHCI_DEFAULT_TIMEOUT))) { 745 goto err_cmd; 746 } 747 748 xhci_cmd_fini(&cmd); 749 750 free32(ictx); 751 return EOK; 752 753 err_cmd: 754 free32(ictx); 755 err: 756 return err; 715 if ((err = xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .input_ctx = ictx))) { 716 return err; 717 } 718 719 return EOK; 757 720 } 758 721 … … 762 725 763 726 /* Issue configure endpoint command (sec 4.3.5) with the DC flag. */ 764 xhci_cmd_t cmd; 765 xhci_cmd_init(&cmd); 766 767 cmd.slot_id = slot_id; 768 cmd.deconfigure = true; 769 770 if ((err = xhci_send_configure_endpoint_command(hc, &cmd, NULL))) { 771 return err; 772 } 773 774 if ((err = xhci_cmd_wait(&cmd, XHCI_DEFAULT_TIMEOUT))) { 775 return err; 776 } 777 778 xhci_cmd_fini(&cmd); 727 if ((err = xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .deconfigure = true))) { 728 return err; 729 } 779 730 780 731 return EOK; … … 788 739 xhci_input_ctx_t *ictx; 789 740 if ((err = create_valid_input_ctx(&ictx))) { 790 gotoerr;741 return err; 791 742 } 792 743 793 744 XHCI_INPUT_CTRL_CTX_ADD_SET(ictx->ctrl_ctx, ep_idx + 1); /* Preceded by slot ctx */ 794 745 memcpy(&ictx->endpoint_ctx[ep_idx], ep_ctx, sizeof(xhci_ep_ctx_t)); 795 796 746 // TODO: Set slot context and other flags. (probably forgot a lot of 'em) 797 747 798 xhci_cmd_t cmd; 799 xhci_cmd_init(&cmd); 800 801 cmd.slot_id = slot_id; 802 803 if ((err = xhci_send_configure_endpoint_command(hc, &cmd, ictx))) { 804 goto err_cmd; 805 } 806 807 if ((err = xhci_cmd_wait(&cmd, XHCI_DEFAULT_TIMEOUT))) { 808 goto err_cmd; 809 } 810 811 xhci_cmd_fini(&cmd); 812 813 free32(ictx); 814 return EOK; 815 816 err_cmd: 817 free32(ictx); 818 err: 819 return err; 748 if ((err = xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .input_ctx = ictx))) { 749 return err; 750 } 751 752 return EOK; 820 753 } 821 754 … … 827 760 xhci_input_ctx_t *ictx; 828 761 if ((err = create_valid_input_ctx(&ictx))) { 829 gotoerr;762 return err; 830 763 } 831 764 832 765 XHCI_INPUT_CTRL_CTX_DROP_SET(ictx->ctrl_ctx, ep_idx + 1); /* Preceded by slot ctx */ 833 834 766 // TODO: Set slot context and other flags. (probably forgot a lot of 'em) 835 767 836 xhci_cmd_t cmd; 837 xhci_cmd_init(&cmd); 838 839 cmd.slot_id = slot_id; 840 841 if ((err = xhci_send_configure_endpoint_command(hc, &cmd, ictx))) { 842 goto err_cmd; 843 } 844 845 if ((err = xhci_cmd_wait(&cmd, XHCI_DEFAULT_TIMEOUT))) { 846 goto err_cmd; 847 } 848 849 xhci_cmd_fini(&cmd); 850 851 free32(ictx); 852 return EOK; 853 854 err_cmd: 855 free32(ictx); 856 err: 857 return err; 768 if ((err = xhci_cmd_sync_inline(hc, CONFIGURE_ENDPOINT, .slot_id = slot_id, .input_ctx = ictx))) { 769 return err; 770 } 771 772 return EOK; 858 773 } 859 774 -
uspace/drv/bus/usb/xhci/rh.c
r0206d35 rc3d926f 374 374 // ctx is an out parameter as of now 375 375 assert(dev); 376 377 ctx = malloc(sizeof(xhci_port_bandwidth_ctx_t)); 378 if(!ctx) 376 assert(ctx); 377 378 xhci_port_bandwidth_ctx_t *in_ctx = malloc32(sizeof(xhci_port_bandwidth_ctx_t)); 379 if (!in_ctx) { 379 380 return ENOMEM; 381 } 380 382 381 383 xhci_cmd_t cmd; 382 xhci_cmd_init(&cmd); 383 384 xhci_get_port_bandwidth_command(dev->hc, &cmd, ctx, speed); 385 386 int err = xhci_cmd_wait(&cmd, XHCI_DEFAULT_TIMEOUT); 387 if(err != EOK) { 388 free(ctx); 389 ctx = NULL; 390 } 391 384 xhci_cmd_init(&cmd, XHCI_CMD_GET_PORT_BANDWIDTH); 385 386 cmd.bandwidth_ctx = in_ctx; 387 cmd.device_speed = speed; 388 389 int err; 390 if ((err = xhci_cmd_sync(dev->hc, &cmd))) { 391 goto end; 392 } 393 394 memcpy(ctx, in_ctx, sizeof(xhci_port_bandwidth_ctx_t)); 395 396 end: 397 xhci_cmd_fini(&cmd); 392 398 return EOK; 393 399 }
Note:
See TracChangeset
for help on using the changeset viewer.