Changeset 4db49344 in mainline


Ignore:
Timestamp:
2018-01-23T21:52:28Z (6 years ago)
Author:
Ondřej Hlavatý <aearsis@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
3dd80f8
Parents:
a6afb4c
git-author:
Ondřej Hlavatý <aearsis@…> (2018-01-23 20:49:35)
git-committer:
Ondřej Hlavatý <aearsis@…> (2018-01-23 21:52:28)
Message:

usb: fix wrong design of transfer aborting

Apparently, we didn't do a good job in thinking through the problem.
In older HCs, it was done just wrong - the UHCI implementation commited
a batch that could have been already aborted, and EHCI+OHCI might miss
an interrupt because they commited the batch sooner than they added it
to their checked list.

This commit takes everything from the other end, which is probably the
only right one. Instead of an endpoint having an extra mutex, it
inherits a mutex from the outside. It never locks it though, it just
checks if the mutex is locked and uses it for waiting on condition
variables.

This mutex is supposed to be the one which the HC driver uses for
locking its structures in scheduling. This way, we avoid the ABBA
deadlock completely, while preserving the synchronization on an
endpoint.

The good thing is that this implementation is much easier to extend with
multiple active batches per endpoint.

Location:
uspace
Files:
14 edited

Legend:

Unmodified
Added
Removed
  • uspace/drv/bus/usb/ehci/ehci_bus.c

    ra6afb4c r4db49344  
    107107        qh_init(ehci_ep->qh, ep);
    108108        hc_enqueue_endpoint(bus->hc, ep);
    109 
     109        endpoint_set_online(ep, &bus->hc->guard);
    110110        return EOK;
    111111}
     
    121121        usb2_bus_ops.endpoint_unregister(ep);
    122122        hc_dequeue_endpoint(hc, ep);
     123        /*
     124         * Now we can be sure the active transfer will not be completed,
     125         * as it's out of the schedule, and HC acknowledged it.
     126         */
    123127
    124128        ehci_endpoint_t *ehci_ep = ehci_endpoint_get(ep);
    125129
    126         /*
    127          * Now we can be sure the active transfer will not be completed. But first,
    128          * make sure that the handling fibril won't use its link in pending list.
    129          */
    130130        fibril_mutex_lock(&hc->guard);
    131         if (link_in_use(&ehci_ep->pending_link))
    132                 /* pending list reference */
    133                 endpoint_del_ref(ep);
     131        endpoint_set_offline_locked(ep);
    134132        list_remove(&ehci_ep->pending_link);
    135         fibril_mutex_unlock(&hc->guard);
    136 
    137         /*
    138          * Finally, the endpoint shall not be used anywhere else. Finish the
    139          * pending batch.
    140          */
    141         fibril_mutex_lock(&ep->guard);
    142133        usb_transfer_batch_t * const batch = ep->active_batch;
    143134        endpoint_deactivate_locked(ep);
    144         fibril_mutex_unlock(&ep->guard);
     135        fibril_mutex_unlock(&hc->guard);
    145136
    146137        if (batch) {
  • uspace/drv/bus/usb/ehci/hc.c

    ra6afb4c r4db49344  
    302302        endpoint_t * const ep = batch->ep;
    303303        ehci_endpoint_t * const ehci_ep = ehci_endpoint_get(ep);
    304 
    305         /* creating local reference */
    306         endpoint_add_ref(ep);
    307 
    308         fibril_mutex_lock(&ep->guard);
    309         endpoint_activate_locked(ep, batch);
    310 
    311304        ehci_transfer_batch_t *ehci_batch = ehci_transfer_batch_get(batch);
    312         const int err = ehci_transfer_batch_prepare(ehci_batch);
    313         if (err) {
    314                 endpoint_deactivate_locked(ep);
    315                 fibril_mutex_unlock(&ep->guard);
    316                 /* dropping local reference */
    317                 endpoint_del_ref(ep);
     305
     306        int err;
     307
     308        if ((err = ehci_transfer_batch_prepare(ehci_batch)))
     309                return err;
     310
     311        fibril_mutex_lock(&hc->guard);
     312
     313        if ((err = endpoint_activate_locked(ep, batch))) {
     314                fibril_mutex_unlock(&hc->guard);
    318315                return err;
    319316        }
     
    321318        usb_log_debug("HC(%p): Committing BATCH(%p)", hc, batch);
    322319        ehci_transfer_batch_commit(ehci_batch);
    323         fibril_mutex_unlock(&ep->guard);
    324320
    325321        /* Enqueue endpoint to the checked list */
    326         fibril_mutex_lock(&hc->guard);
    327322        usb_log_debug2("HC(%p): Appending BATCH(%p)", hc, batch);
    328 
    329         /* local reference -> pending list reference */
    330323        list_append(&ehci_ep->pending_link, &hc->pending_endpoints);
     324
    331325        fibril_mutex_unlock(&hc->guard);
    332 
    333326        return EOK;
    334327}
     
    368361                                = list_get_instance(current, ehci_endpoint_t, pending_link);
    369362
    370                         fibril_mutex_lock(&ep->base.guard);
    371363                        ehci_transfer_batch_t *batch
    372364                                = ehci_transfer_batch_get(ep->base.active_batch);
     
    376368                                endpoint_deactivate_locked(&ep->base);
    377369                                list_remove(current);
    378                                 endpoint_del_ref(&ep->base);
    379370                                hc_reset_toggles(&batch->base, &ehci_ep_toggle_reset);
    380371                                usb_transfer_batch_finish(&batch->base);
    381372                        }
    382                         fibril_mutex_unlock(&ep->base.guard);
    383373                }
    384374                fibril_mutex_unlock(&hc->guard);
  • uspace/drv/bus/usb/ohci/hc.c

    ra6afb4c r4db49344  
    297297                return ohci_rh_schedule(&hc->rh, batch);
    298298        }
    299         ohci_transfer_batch_t *ohci_batch = ohci_transfer_batch_get(batch);
    300         if (!ohci_batch)
    301                 return ENOMEM;
    302 
    303         const int err = ohci_transfer_batch_prepare(ohci_batch);
    304         if (err)
    305                 return err;
    306299
    307300        endpoint_t *ep = batch->ep;
    308301        ohci_endpoint_t * const ohci_ep = ohci_endpoint_get(ep);
    309 
    310         /* creating local reference */
    311         endpoint_add_ref(ep);
    312 
    313         fibril_mutex_lock(&ep->guard);
    314         endpoint_activate_locked(ep, batch);
     302        ohci_transfer_batch_t *ohci_batch = ohci_transfer_batch_get(batch);
     303
     304        int err;
     305        if ((err = ohci_transfer_batch_prepare(ohci_batch)))
     306                return err;
     307
     308        fibril_mutex_lock(&hc->guard);
     309        if ((err = endpoint_activate_locked(ep, batch))) {
     310                fibril_mutex_unlock(&hc->guard);
     311                return err;
     312        }
     313
    315314        ohci_transfer_batch_commit(ohci_batch);
    316         fibril_mutex_unlock(&ep->guard);
     315        list_append(&ohci_ep->pending_link, &hc->pending_endpoints);
     316        fibril_mutex_unlock(&hc->guard);
    317317
    318318        /* Control and bulk schedules need a kick to start working */
     
    328328                break;
    329329        }
    330 
    331         fibril_mutex_lock(&hc->guard);
    332         list_append(&ohci_ep->pending_link, &hc->pending_endpoints);
    333         fibril_mutex_unlock(&hc->guard);
    334330
    335331        return EOK;
     
    369365                                = list_get_instance(current, ohci_endpoint_t, pending_link);
    370366
    371                         fibril_mutex_lock(&ep->base.guard);
    372367                        ohci_transfer_batch_t *batch
    373368                                = ohci_transfer_batch_get(ep->base.active_batch);
     
    377372                                endpoint_deactivate_locked(&ep->base);
    378373                                list_remove(current);
    379                                 endpoint_del_ref(&ep->base);
    380374                                hc_reset_toggles(&batch->base, &ohci_ep_toggle_reset);
    381375                                usb_transfer_batch_finish(&batch->base);
    382376                        }
    383                         fibril_mutex_unlock(&ep->base.guard);
    384377                }
    385378                fibril_mutex_unlock(&hc->guard);
  • uspace/drv/bus/usb/ohci/ohci_bus.c

    ra6afb4c r4db49344  
    115115        ed_init(ohci_ep->ed, ep, ohci_ep->td);
    116116        hc_enqueue_endpoint(bus->hc, ep);
     117        endpoint_set_online(ep, &bus->hc->guard);
    117118
    118119        return EOK;
     
    128129        hc_dequeue_endpoint(bus->hc, ep);
    129130
    130         ohci_endpoint_t * const ohci_ep = ohci_endpoint_get(ep);
     131        /*
     132         * Now we can be sure the active transfer will not be completed,
     133         * as it's out of the schedule, and HC acknowledged it.
     134         */
    131135
    132         /*
    133          * Now we can be sure the active transfer will not be completed. But first,
    134          * make sure that the handling fibril won't use its link in pending list.
    135          */
     136        ohci_endpoint_t *ohci_ep = ohci_endpoint_get(ep);
     137
    136138        fibril_mutex_lock(&hc->guard);
    137         if (link_in_use(&ohci_ep->pending_link))
    138                 /* pending list reference */
    139                 endpoint_del_ref(ep);
     139        endpoint_set_offline_locked(ep);
    140140        list_remove(&ohci_ep->pending_link);
    141         fibril_mutex_unlock(&hc->guard);
    142 
    143         /*
    144          * Finally, the endpoint shall not be used anywhere else. Finish the
    145          * pending batch.
    146          */
    147         fibril_mutex_lock(&ep->guard);
    148141        usb_transfer_batch_t * const batch = ep->active_batch;
    149142        endpoint_deactivate_locked(ep);
    150         fibril_mutex_unlock(&ep->guard);
     143        fibril_mutex_unlock(&hc->guard);
    151144
    152145        if (batch) {
  • uspace/drv/bus/usb/uhci/hc.c

    ra6afb4c r4db49344  
    9797
    9898static void hc_init_hw(const hc_t *instance);
    99 static int hc_init_mem_structures(hc_t *instance, hc_device_t *);
     99static int hc_init_mem_structures(hc_t *instance);
    100100static int hc_init_transfer_lists(hc_t *instance);
    101101
     
    164164        /* Lower 2 bits are transaction error and transaction complete */
    165165        if (status & (UHCI_STATUS_INTERRUPT | UHCI_STATUS_ERROR_INTERRUPT)) {
    166                 LIST_INITIALIZE(done);
    167                 transfer_list_remove_finished(
    168                     &instance->transfers_interrupt, &done);
    169                 transfer_list_remove_finished(
    170                     &instance->transfers_control_slow, &done);
    171                 transfer_list_remove_finished(
    172                     &instance->transfers_control_full, &done);
    173                 transfer_list_remove_finished(
    174                     &instance->transfers_bulk_full, &done);
    175 
    176                 list_foreach_safe(done, current, next) {
    177                         list_remove(current);
    178                         uhci_transfer_batch_t *batch =
    179                             uhci_transfer_batch_from_link(current);
    180                         usb_transfer_batch_finish(&batch->base);
    181                 }
    182         }
     166                transfer_list_check_finished(&instance->transfers_interrupt);
     167                transfer_list_check_finished(&instance->transfers_control_slow);
     168                transfer_list_check_finished(&instance->transfers_control_full);
     169                transfer_list_check_finished(&instance->transfers_bulk_full);
     170        }
     171
    183172        /* Resume interrupts are not supported */
    184173        if (status & UHCI_STATUS_RESUME) {
     
    239228            hw_res->io_ranges.ranges[0].size);
    240229
    241         ret = hc_init_mem_structures(instance, hcd);
     230        ret = hc_init_mem_structures(instance);
    242231        if (ret != EOK) {
    243232                usb_log_error("Failed to init UHCI memory structures: %s.",
     
    328317}
    329318
     319static int endpoint_register(endpoint_t *ep)
     320{
     321        hc_t * const hc = bus_to_hc(endpoint_get_bus(ep));
     322
     323        const int err = usb2_bus_ops.endpoint_register(ep);
     324        if (err)
     325                return err;
     326
     327        transfer_list_t *list = hc->transfers[ep->device->speed][ep->transfer_type];
     328        if (!list)
     329                /*
     330                 * We don't support this combination (e.g. isochronous). Do not
     331                 * fail early, because that would block any device with these
     332                 * endpoints from connecting. Instead, make sure these transfers
     333                 * are denied soon enough with ENOTSUP not to fail on asserts.
     334                 */
     335                return EOK;
     336
     337        endpoint_set_online(ep, &list->guard);
     338        return EOK;
     339}
     340
    330341static void endpoint_unregister(endpoint_t *ep)
    331342{
    332343        hc_t * const hc = bus_to_hc(endpoint_get_bus(ep));
    333344        usb2_bus_ops.endpoint_unregister(ep);
    334 
    335         uhci_transfer_batch_t *batch = NULL;
    336345
    337346        // Check for the roothub, as it does not schedule into lists
     
    344353
    345354        transfer_list_t *list = hc->transfers[ep->device->speed][ep->transfer_type];
    346 
    347355        if (!list)
    348356                /*
     
    352360                return;
    353361
    354         // To avoid ABBA deadlock, we need to take the list first
    355362        fibril_mutex_lock(&list->guard);
    356         fibril_mutex_lock(&ep->guard);
    357         if (ep->active_batch) {
    358                 batch = uhci_transfer_batch_get(ep->active_batch);
    359                 endpoint_deactivate_locked(ep);
    360                 transfer_list_remove_batch(list, batch);
    361         }
    362         fibril_mutex_unlock(&ep->guard);
     363
     364        endpoint_set_offline_locked(ep);
     365        /* From now on, no other transfer will be scheduled. */
     366
     367        if (!ep->active_batch) {
     368                fibril_mutex_unlock(&list->guard);
     369                return;
     370        }
     371
     372        /* First, offer the batch a short chance to be finished. */
     373        endpoint_wait_timeout_locked(ep, 10000);
     374
     375        if (!ep->active_batch) {
     376                fibril_mutex_unlock(&list->guard);
     377                return;
     378        }
     379
     380        uhci_transfer_batch_t * const batch =
     381                uhci_transfer_batch_get(ep->active_batch);
     382
     383        /* Remove the batch from the schedule to stop it from being finished. */
     384        endpoint_deactivate_locked(ep);
     385        transfer_list_remove_batch(list, batch);
     386
    363387        fibril_mutex_unlock(&list->guard);
    364388
    365         if (batch) {
    366                 // The HW could have been looking at the batch.
    367                 // Better wait two frames before we release the buffers.
    368                 async_usleep(2000);
    369                 batch->base.error = EINTR;
    370                 batch->base.transferred_size = 0;
    371                 usb_transfer_batch_finish(&batch->base);
    372         }
     389        /*
     390         * We removed the batch from software schedule only, it's still possible
     391         * that HC has it in its caches. Better wait a while before we release
     392         * the buffers.
     393         */
     394        async_usleep(20000);
     395        batch->base.error = EINTR;
     396        batch->base.transferred_size = 0;
     397        usb_transfer_batch_finish(&batch->base);
    373398}
    374399
     
    382407        .status = hc_status,
    383408
     409        .endpoint_register = endpoint_register,
    384410        .endpoint_unregister = endpoint_unregister,
    385411        .endpoint_count_bw = bandwidth_count_usb11,
     
    400426 *  - frame list page (needs to be one UHCI hw accessible 4K page)
    401427 */
    402 int hc_init_mem_structures(hc_t *instance, hc_device_t *hcd)
     428int hc_init_mem_structures(hc_t *instance)
    403429{
    404430        assert(instance);
     
    425451                return ENOMEM;
    426452        }
     453        list_initialize(&instance->pending_endpoints);
    427454        usb_log_debug("Initialized transfer lists.");
    428455
     
    514541}
    515542
    516 /** Schedule batch for execution.
     543/**
     544 * Schedule batch for execution.
    517545 *
    518546 * @param[in] instance UHCI structure to use.
    519547 * @param[in] batch Transfer batch to schedule.
    520548 * @return Error code
    521  *
    522  * Checks for bandwidth availability and appends the batch to the proper queue.
    523549 */
    524550static int hc_schedule(usb_transfer_batch_t *batch)
     
    531557                return uhci_rh_schedule(&hc->rh, batch);
    532558
    533 
    534         const int err = uhci_transfer_batch_prepare(uhci_batch);
    535         if (err)
     559        transfer_list_t * const list =
     560            hc->transfers[ep->device->speed][ep->transfer_type];
     561
     562        if (!list)
     563                return ENOTSUP;
     564
     565        int err;
     566        if ((err = uhci_transfer_batch_prepare(uhci_batch)))
    536567                return err;
    537568
    538         transfer_list_t *list = hc->transfers[ep->device->speed][ep->transfer_type];
    539         assert(list);
    540         transfer_list_add_batch(list, uhci_batch);
    541 
    542         return EOK;
    543 }
    544 
    545 int hc_unschedule_batch(usb_transfer_batch_t *batch)
    546 {
    547 
    548         return EOK;
     569        return transfer_list_add_batch(list, uhci_batch);
    549570}
    550571
  • uspace/drv/bus/usb/uhci/hc.h

    ra6afb4c r4db49344  
    123123        transfer_list_t *transfers[2][4];
    124124
     125        /**
     126         * Guard for the pending list. Can be locked under EP guard, but not
     127         * vice versa.
     128         */
     129        fibril_mutex_t guard;
     130        /** List of endpoints with a transfer scheduled */
     131        list_t pending_endpoints;
     132
    125133        /** Number of hw failures detected. */
    126134        unsigned hw_failures;
  • uspace/drv/bus/usb/uhci/transfer_list.c

    ra6afb4c r4db49344  
    102102}
    103103
    104 /** Add transfer batch to the list and queue.
    105  *
    106  * @param[in] instance List to use.
    107  * @param[in] batch Transfer batch to submit.
     104/**
     105 * Add transfer batch to the list and queue.
    108106 *
    109107 * The batch is added to the end of the list and queue.
    110  */
    111 void transfer_list_add_batch(
     108 *
     109 * @param[in] instance List to use.
     110 * @param[in] batch Transfer batch to submit. After return, the batch must
     111 *                  not be used further.
     112 */
     113int transfer_list_add_batch(
    112114    transfer_list_t *instance, uhci_transfer_batch_t *uhci_batch)
    113115{
     
    117119        endpoint_t *ep = uhci_batch->base.ep;
    118120
    119         /* First, wait until the endpoint is free to use */
    120         fibril_mutex_lock(&ep->guard);
    121         endpoint_activate_locked(ep, &uhci_batch->base);
    122         fibril_mutex_unlock(&ep->guard);
     121        fibril_mutex_lock(&instance->guard);
     122
     123        const int err = endpoint_activate_locked(ep, &uhci_batch->base);
     124        if (err) {
     125                fibril_mutex_unlock(&instance->guard);
     126                return err;
     127        }
    123128
    124129        usb_log_debug2("Batch %p adding to queue %s.",
    125130            uhci_batch, instance->name);
    126 
    127         fibril_mutex_lock(&instance->guard);
    128131
    129132        /* Assume there is nothing scheduled */
     
    155158            USB_TRANSFER_BATCH_ARGS(uhci_batch->base), instance->name);
    156159        fibril_mutex_unlock(&instance->guard);
     160        return EOK;
    157161}
    158162
     
    171175 * @param[in] done list to fill
    172176 */
    173 void transfer_list_remove_finished(transfer_list_t *instance, list_t *done)
    174 {
    175         assert(instance);
    176         assert(done);
     177void transfer_list_check_finished(transfer_list_t *instance)
     178{
     179        assert(instance);
    177180
    178181        fibril_mutex_lock(&instance->guard);
    179         link_t *current = list_first(&instance->batch_list);
    180         while (current && current != &instance->batch_list.head) {
    181                 link_t * const next = current->next;
    182                 uhci_transfer_batch_t *batch =
    183                     uhci_transfer_batch_from_link(current);
     182        list_foreach_safe(instance->batch_list, current, next) {
     183                uhci_transfer_batch_t *batch = uhci_transfer_batch_from_link(current);
    184184
    185185                if (uhci_transfer_batch_check_completed(batch)) {
    186                         /* Remove from schedule, save for processing */
    187                         fibril_mutex_lock(&batch->base.ep->guard);
    188186                        assert(batch->base.ep->active_batch == &batch->base);
     187                        endpoint_deactivate_locked(batch->base.ep);
    189188                        hc_reset_toggles(&batch->base, &uhci_reset_toggle);
    190                         endpoint_deactivate_locked(batch->base.ep);
    191189                        transfer_list_remove_batch(instance, batch);
    192                         fibril_mutex_unlock(&batch->base.ep->guard);
    193 
    194                         list_append(current, done);
     190                        usb_transfer_batch_finish(&batch->base);
    195191                }
    196                 current = next;
    197192        }
    198193        fibril_mutex_unlock(&instance->guard);
  • uspace/drv/bus/usb/uhci/transfer_list.h

    ra6afb4c r4db49344  
    5959int transfer_list_init(transfer_list_t *, const char *);
    6060void transfer_list_set_next(transfer_list_t *, transfer_list_t *);
    61 void transfer_list_add_batch(transfer_list_t *, uhci_transfer_batch_t *);
     61int transfer_list_add_batch(transfer_list_t *, uhci_transfer_batch_t *);
    6262void transfer_list_remove_batch(transfer_list_t *, uhci_transfer_batch_t *);
    63 void transfer_list_remove_finished(transfer_list_t *, list_t *);
     63void transfer_list_check_finished(transfer_list_t *);
    6464void transfer_list_abort_all(transfer_list_t *);
    6565
  • uspace/drv/bus/usb/xhci/device.c

    ra6afb4c r4db49344  
    8181        usb_log_debug("Obtained slot ID: %u.", dev->slot_id);
    8282
    83         /* Create and configure control endpoint. */
    84         endpoint_t *ep0_base = xhci_endpoint_create(&dev->base, &ep0_initial_desc);
    85         if (!ep0_base)
     83        endpoint_t *ep0_base;
     84        if ((err = bus_endpoint_add(&dev->base, &ep0_initial_desc, &ep0_base)))
    8685                goto err_slot;
    87 
    88         /* Bus reference */
    89         endpoint_add_ref(ep0_base);
    90         dev->base.endpoints[0] = ep0_base;
    9186
    9287        usb_log_debug("Looking up new device initial MPS: %s",
  • uspace/drv/bus/usb/xhci/endpoint.c

    ra6afb4c r4db49344  
    6868        endpoint_init(ep, dev, desc);
    6969
     70        fibril_mutex_initialize(&xhci_ep->guard);
     71
    7072        xhci_ep->max_burst = desc->companion.max_burst + 1;
    7173
     
    177179        xhci_endpoint_t *ep = xhci_endpoint_get(ep_base);
    178180
    179         if ((err = hc_add_endpoint(ep)))
     181        if (ep_base->endpoint != 0 && (err = hc_add_endpoint(ep)))
    180182                return err;
    181183
     184        endpoint_set_online(ep_base, &ep->guard);
    182185        return EOK;
    183186}
     
    186189 * Abort a transfer on an endpoint.
    187190 */
    188 static int endpoint_abort(endpoint_t *ep)
     191static void endpoint_abort(endpoint_t *ep)
    189192{
    190193        xhci_device_t *dev = xhci_device_get(ep->device);
    191194        xhci_endpoint_t *xhci_ep = xhci_endpoint_get(ep);
    192195
    193         usb_transfer_batch_t *batch = NULL;
    194         fibril_mutex_lock(&ep->guard);
    195         if (ep->active_batch) {
    196                 if (dev->slot_id) {
    197                         const int err = hc_stop_endpoint(xhci_ep);
    198                         if (err) {
    199                                 usb_log_warning("Failed to stop endpoint %u of device "
    200                                     XHCI_DEV_FMT ": %s", ep->endpoint, XHCI_DEV_ARGS(*dev),
    201                                     str_error(err));
    202                         }
    203 
    204                         endpoint_wait_timeout_locked(ep, 2000);
    205                 }
    206 
    207                 batch = ep->active_batch;
    208                 if (batch) {
    209                         endpoint_deactivate_locked(ep);
    210                 }
    211         }
    212         fibril_mutex_unlock(&ep->guard);
    213 
    214         if (batch) {
    215                 batch->error = EINTR;
    216                 batch->transferred_size = 0;
    217                 usb_transfer_batch_finish(batch);
    218         }
    219         return EOK;
     196        /* This function can only abort endpoints without streams. */
     197        assert(xhci_ep->primary_stream_data_array == NULL);
     198
     199        fibril_mutex_lock(&xhci_ep->guard);
     200
     201        endpoint_set_offline_locked(ep);
     202
     203        if (!ep->active_batch) {
     204                fibril_mutex_unlock(&xhci_ep->guard);
     205                return;
     206        }
     207
     208        /* First, offer the batch a short chance to be finished. */
     209        endpoint_wait_timeout_locked(ep, 10000);
     210
     211        if (!ep->active_batch) {
     212                fibril_mutex_unlock(&xhci_ep->guard);
     213                return;
     214        }
     215
     216        usb_transfer_batch_t * const batch = ep->active_batch;
     217
     218        const int err = hc_stop_endpoint(xhci_ep);
     219        if (err) {
     220                usb_log_error("Failed to stop endpoint %u of device "
     221                    XHCI_DEV_FMT ": %s", ep->endpoint, XHCI_DEV_ARGS(*dev),
     222                    str_error(err));
     223        }
     224
     225        fibril_mutex_unlock(&xhci_ep->guard);
     226
     227        batch->error = EINTR;
     228        batch->transferred_size = 0;
     229        usb_transfer_batch_finish(batch);
     230        return;
    220231}
    221232
     
    235246
    236247        /* If device slot is still available, drop the endpoint. */
    237         if (dev->slot_id) {
     248        if (ep_base->endpoint != 0 && dev->slot_id) {
    238249
    239250                if ((err = hc_drop_endpoint(ep))) {
  • uspace/drv/bus/usb/xhci/endpoint.h

    ra6afb4c r4db49344  
    7070        endpoint_t base;        /**< Inheritance. Keep this first. */
    7171
     72        /** Guarding scheduling of this endpoint. */
     73        fibril_mutex_t guard;
     74
    7275        /** Main transfer ring (unused if streams are enabled) */
    7376        xhci_trb_ring_t ring;
  • uspace/drv/bus/usb/xhci/transfers.c

    ra6afb4c r4db49344  
    300300
    301301        if (TRB_EVENT_DATA(*trb)) {
    302                 assert(ep->base.transfer_type != USB_TRANSFER_ISOCHRONOUS);
     302                /* We schedule those only when streams are involved */
     303                assert(ep->primary_stream_ctx_array != NULL);
     304
    303305                /* We are received transfer pointer instead - work with that */
    304306                transfer = (xhci_transfer_t *) addr;
     
    306308                    transfer->interrupt_trb_phys);
    307309                batch = &transfer->batch;
    308 
    309                 fibril_mutex_lock(&ep->base.guard);
    310                 endpoint_deactivate_locked(&ep->base);
    311                 fibril_mutex_unlock(&ep->base.guard);
    312310        }
    313311        else {
     
    321319                }
    322320
    323                 fibril_mutex_lock(&ep->base.guard);
     321                fibril_mutex_lock(&ep->guard);
    324322                batch = ep->base.active_batch;
     323                endpoint_deactivate_locked(&ep->base);
     324                fibril_mutex_unlock(&ep->guard);
     325
    325326                if (!batch) {
    326                         fibril_mutex_unlock(&ep->base.guard);
    327327                        /* Dropping temporary reference */
    328328                        endpoint_del_ref(&ep->base);
     
    331331
    332332                transfer = xhci_transfer_from_batch(batch);
    333 
    334                 endpoint_deactivate_locked(&ep->base);
    335                 fibril_mutex_unlock(&ep->base.guard);
    336333        }
    337334
     
    482479
    483480
    484         fibril_mutex_lock(&ep->guard);
    485         endpoint_activate_locked(ep, batch);
    486         const int err = transfer_handlers[batch->ep->transfer_type](hc, transfer);
    487 
    488         if (err) {
     481        int err;
     482        fibril_mutex_lock(&xhci_ep->guard);
     483
     484        if ((err = endpoint_activate_locked(ep, batch))) {
     485                fibril_mutex_unlock(&xhci_ep->guard);
     486                return err;
     487        }
     488
     489        if ((err = transfer_handlers[batch->ep->transfer_type](hc, transfer))) {
    489490                endpoint_deactivate_locked(ep);
    490                 fibril_mutex_unlock(&ep->guard);
     491                fibril_mutex_unlock(&xhci_ep->guard);
    491492                return err;
    492493        }
    493494
    494495        hc_ring_ep_doorbell(xhci_ep, batch->target.stream);
    495         fibril_mutex_unlock(&ep->guard);
     496        fibril_mutex_unlock(&xhci_ep->guard);
    496497        return EOK;
    497498}
  • uspace/lib/usbhost/include/usb/host/endpoint.h

    ra6afb4c r4db49344  
    5353typedef struct usb_transfer_batch usb_transfer_batch_t;
    5454
    55 /** Host controller side endpoint structure. */
     55/**
     56 * Host controller side endpoint structure.
     57 *
     58 * This structure, though reference-counted, is very fragile. It is responsible
     59 * for synchronizing transfer batch scheduling and completion.
     60 *
     61 * To avoid situations, in which two locks must be obtained to schedule/finish
     62 * a transfer, the endpoint inherits a lock from the outside. Because the
     63 * concrete instance of mutex can be unknown at the time of initialization,
     64 * the HC shall pass the right lock at the time of onlining the endpoint.
     65 *
     66 * The fields used for scheduling (online, active_batch) are to be used only
     67 * under that guard and by functions designed for this purpose. The driver can
     68 * also completely avoid using this mechanism, in which case it is on its own in
     69 * question of transfer aborting.
     70 *
     71 * Relevant information can be found in the documentation of HelenOS xHCI
     72 * project.
     73 */
    5674typedef struct endpoint {
    5775        /** USB device */
     
    5977        /** Reference count. */
    6078        atomic_t refcnt;
    61         /** Reserved bandwidth. */
    62         size_t bandwidth;
    63         /** The currently active transfer batch. Write using methods, read under guard. */
     79
     80        /** An inherited guard */
     81        fibril_mutex_t *guard;
     82        /** Whether it's allowed to schedule on this endpoint */
     83        bool online;
     84        /** The currently active transfer batch. */
    6485        usb_transfer_batch_t *active_batch;
    65         /** Protects resources and active status changes. */
    66         fibril_mutex_t guard;
    6786        /** Signals change of active status. */
    6887        fibril_condvar_t avail;
    6988
    70         /** Enpoint number */
     89        /** Reserved bandwidth. Needed for USB2 bus. */
     90        size_t bandwidth;
     91        /** Endpoint number */
    7192        usb_endpoint_t endpoint;
    7293        /** Communication direction. */
     
    79100        /** Maximum size of one transfer */
    80101        size_t max_transfer_size;
    81         /** Number of packats that can be sent in one service interval (not necessarily uframe) */
     102        /**
     103         * Number of packets that can be sent in one service interval
     104         * (not necessarily uframe, despite its name)
     105         */
    82106        unsigned packets_per_uframe;
    83107
     
    90114extern void endpoint_del_ref(endpoint_t *);
    91115
     116extern void endpoint_set_online(endpoint_t *, fibril_mutex_t *);
     117extern void endpoint_set_offline_locked(endpoint_t *);
     118
    92119extern void endpoint_wait_timeout_locked(endpoint_t *ep, suseconds_t);
    93 extern void endpoint_activate_locked(endpoint_t *, usb_transfer_batch_t *);
     120extern int endpoint_activate_locked(endpoint_t *, usb_transfer_batch_t *);
    94121extern void endpoint_deactivate_locked(endpoint_t *);
    95122
  • uspace/lib/usbhost/src/endpoint.c

    ra6afb4c r4db49344  
    6161
    6262        atomic_set(&ep->refcnt, 0);
    63         fibril_mutex_initialize(&ep->guard);
    6463        fibril_condvar_initialize(&ep->avail);
    6564
     
    122121
    123122/**
    124  * Wait until the endpoint have no transfer scheduled.
     123 * Mark the endpoint as online. Supply a guard to be used for this endpoint
     124 * synchronization.
     125 */
     126void endpoint_set_online(endpoint_t *ep, fibril_mutex_t *guard)
     127{
     128        ep->guard = guard;
     129        ep->online = true;
     130}
     131
     132/**
     133 * Mark the endpoint as offline. All other fibrils waiting to activate this
     134 * endpoint will be interrupted.
     135 */
     136void endpoint_set_offline_locked(endpoint_t *ep)
     137{
     138        assert(ep);
     139        assert(fibril_mutex_is_locked(ep->guard));
     140
     141        ep->online = false;
     142        fibril_condvar_broadcast(&ep->avail);
     143}
     144
     145/**
     146 * Wait until a transfer finishes. Can be used even when the endpoint is
     147 * offline (and is interrupted by the endpoint going offline).
    125148 */
    126149void endpoint_wait_timeout_locked(endpoint_t *ep, suseconds_t timeout)
    127150{
    128         assert(fibril_mutex_is_locked(&ep->guard));
    129 
    130         if (ep->active_batch != NULL)
    131                 fibril_condvar_wait_timeout(&ep->avail, &ep->guard, timeout);
    132 
    133         while (timeout == 0 && ep->active_batch != NULL)
    134                 fibril_condvar_wait_timeout(&ep->avail, &ep->guard, timeout);
     151        assert(ep);
     152        assert(fibril_mutex_is_locked(ep->guard));
     153
     154        if (ep->active_batch == NULL)
     155                return;
     156
     157        fibril_condvar_wait_timeout(&ep->avail, ep->guard, timeout);
    135158}
    136159
     
    140163 *
    141164 * Call only under endpoint guard. After you activate the endpoint and release
    142  * the guard, you must assume that particular transfer is already finished/aborted.
    143  *
    144  * @param ep endpoint_t structure.
    145  * @param batch Transfer batch this endpoint is bocked by.
    146  */
    147 void endpoint_activate_locked(endpoint_t *ep, usb_transfer_batch_t *batch)
     165 * the guard, you must assume that particular transfer is already
     166 * finished/aborted.
     167 *
     168 * Activation and deactivation is not done by the library to maximize
     169 * performance. The HC might want to prepare some memory buffers prior to
     170 * interfering with other world.
     171 *
     172 * @param batch Transfer batch this endpoint is blocked by.
     173 */
     174int endpoint_activate_locked(endpoint_t *ep, usb_transfer_batch_t *batch)
    148175{
    149176        assert(ep);
    150177        assert(batch);
    151178        assert(batch->ep == ep);
    152 
    153         endpoint_wait_timeout_locked(ep, 0);
     179        assert(ep->guard);
     180        assert(fibril_mutex_is_locked(ep->guard));
     181
     182        while (ep->online && ep->active_batch != NULL)
     183                fibril_condvar_wait(&ep->avail, ep->guard);
     184
     185        if (!ep->online)
     186                return EINTR;
     187
     188        assert(ep->active_batch == NULL);
    154189        ep->active_batch = batch;
     190        return EOK;
    155191}
    156192
    157193/**
    158194 * Mark the endpoint as inactive and allow access for further fibrils.
    159  *
    160  * @param ep endpoint_t structure.
    161195 */
    162196void endpoint_deactivate_locked(endpoint_t *ep)
    163197{
    164198        assert(ep);
    165         assert(fibril_mutex_is_locked(&ep->guard));
     199        assert(fibril_mutex_is_locked(ep->guard));
     200
    166201        ep->active_batch = NULL;
    167202        fibril_condvar_signal(&ep->avail);
Note: See TracChangeset for help on using the changeset viewer.