Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/ipc/irq.c

    r48bcf49 r63e27ef  
    3737 *
    3838 * This framework allows applications to subscribe to receive a notification
    39  * when an interrupt is detected. The application may provide a simple
    40  * 'top-half' handler as part of its registration, which can perform simple
    41  * operations (read/write port/memory, add information to notification IPC
    42  * message).
     39 * when interrupt is detected. The application may provide a simple 'top-half'
     40 * handler as part of its registration, which can perform simple operations
     41 * (read/write port/memory, add information to notification IPC message).
    4342 *
    4443 * The structure of a notification message is as follows:
     
    5150 * - in_phone_hash: interrupt counter (may be needed to assure correct order
    5251 *                  in multithreaded drivers)
     52 *
     53 * Note on synchronization for ipc_irq_subscribe(), ipc_irq_unsubscribe(),
     54 * ipc_irq_cleanup() and IRQ handlers:
     55 *
     56 *   By always taking all of the uspace IRQ hash table lock, IRQ structure lock
     57 *   and answerbox lock, we can rule out race conditions between the
     58 *   registration functions and also the cleanup function. Thus the observer can
     59 *   either see the IRQ structure present in both the hash table and the
     60 *   answerbox list or absent in both. Views in which the IRQ structure would be
     61 *   linked in the hash table but not in the answerbox list, or vice versa, are
     62 *   not possible.
     63 *
     64 *   By always taking the hash table lock and the IRQ structure lock, we can
     65 *   rule out a scenario in which we would free up an IRQ structure, which is
     66 *   still referenced by, for example, an IRQ handler. The locking scheme forces
     67 *   us to lock the IRQ structure only after any progressing IRQs on that
     68 *   structure are finished. Because we hold the hash table lock, we prevent new
     69 *   IRQs from taking new references to the IRQ structure.
     70 *
    5371 */
    5472
     
    6684#include <print.h>
    6785#include <macros.h>
    68 #include <cap/cap.h>
    6986
    7087static void ranges_unmap(irq_pio_range_t *ranges, size_t rangecount)
     
    101118        }
    102119       
    103         /* Rewrite the IRQ code addresses from physical to kernel virtual. */
     120        /* Rewrite the pseudocode addresses from physical to kernel virtual. */
    104121        for (size_t i = 0; i < cmdcount; i++) {
    105122                uintptr_t addr;
     
    159176}
    160177
    161 /** Statically check the top-half IRQ code
    162  *
    163  * Check the top-half IRQ code for invalid or unsafe constructs.
     178/** Statically check the top-half pseudocode
     179 *
     180 * Check the top-half pseudocode for invalid or unsafe
     181 * constructs.
    164182 *
    165183 */
     
    198216}
    199217
    200 /** Free the top-half IRQ code.
    201  *
    202  * @param code Pointer to the top-half IRQ code.
     218/** Free the top-half pseudocode.
     219 *
     220 * @param code Pointer to the top-half pseudocode.
    203221 *
    204222 */
     
    213231}
    214232
    215 /** Copy the top-half IRQ code from userspace into the kernel.
    216  *
    217  * @param ucode Userspace address of the top-half IRQ code.
    218  *
    219  * @return Kernel address of the copied IRQ code.
     233/** Copy the top-half pseudocode from userspace into the kernel.
     234 *
     235 * @param ucode Userspace address of the top-half pseudocode.
     236 *
     237 * @return Kernel address of the copied pseudocode.
    220238 *
    221239 */
     
    271289}
    272290
    273 static void irq_destroy(void *arg)
    274 {
    275         irq_t *irq = (irq_t *) arg;
    276 
    277         /* Free up the IRQ code and associated structures. */
    278         code_free(irq->notif_cfg.code);
    279         slab_free(irq_slab, irq);
    280 }
    281 
    282 static kobject_ops_t irq_kobject_ops = {
    283         .destroy = irq_destroy
    284 };
    285 
    286291/** Subscribe an answerbox as a receiving end for IRQ notifications.
    287292 *
    288293 * @param box     Receiving answerbox.
    289294 * @param inr     IRQ number.
    290  * @param imethod Interface and method to be associated with the notification.
    291  * @param ucode   Uspace pointer to top-half IRQ code.
    292  *
    293  * @return  IRQ capability handle.
    294  * @return  Negative error code.
    295  *
    296  */
    297 int ipc_irq_subscribe(answerbox_t *box, inr_t inr, sysarg_t imethod,
    298     irq_code_t *ucode)
     295 * @param devno   Device number.
     296 * @param imethod Interface and method to be associated with the
     297 *                notification.
     298 * @param ucode   Uspace pointer to top-half pseudocode.
     299 *
     300 * @return EOK on success or a negative error code.
     301 *
     302 */
     303int ipc_irq_subscribe(answerbox_t *box, inr_t inr, devno_t devno,
     304    sysarg_t imethod, irq_code_t *ucode)
    299305{
    300306        sysarg_t key[] = {
    301                 [IRQ_HT_KEY_INR] = (sysarg_t) inr,
    302                 [IRQ_HT_KEY_MODE] = (sysarg_t) IRQ_HT_MODE_NO_CLAIM
     307                (sysarg_t) inr,
     308                (sysarg_t) devno
    303309        };
    304310       
     
    315321       
    316322        /*
    317          * Allocate and populate the IRQ kernel object.
     323         * Allocate and populate the IRQ structure.
    318324         */
    319         cap_handle_t handle = cap_alloc(TASK);
    320         if (handle < 0)
    321                 return handle;
    322        
    323         irq_t *irq = (irq_t *) slab_alloc(irq_slab, FRAME_ATOMIC);
    324         if (!irq) {
    325                 cap_free(TASK, handle);
    326                 return ENOMEM;
    327         }
    328 
    329         kobject_t *kobject = malloc(sizeof(kobject_t), FRAME_ATOMIC);
    330         if (!kobject) {
    331                 cap_free(TASK, handle);
    332                 slab_free(irq_slab, irq);
    333                 return ENOMEM;
    334         }
     325        irq_t *irq = malloc(sizeof(irq_t), 0);
    335326       
    336327        irq_initialize(irq);
     328        irq->devno = devno;
    337329        irq->inr = inr;
    338330        irq->claim = ipc_irq_top_half_claim;
     
    345337       
    346338        /*
    347          * Insert the IRQ structure into the uspace IRQ hash table.
     339         * Enlist the IRQ structure in the uspace IRQ hash table and the
     340         * answerbox's list.
    348341         */
    349342        irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
     343       
     344        link_t *hlp = hash_table_find(&irq_uspace_hash_table, key);
     345        if (hlp) {
     346                irq_t *hirq = hash_table_get_instance(hlp, irq_t, link);
     347               
     348                /* hirq is locked */
     349                irq_spinlock_unlock(&hirq->lock, false);
     350                code_free(code);
     351                irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
     352               
     353                free(irq);
     354                return EEXIST;
     355        }
     356       
     357        /* Locking is not really necessary, but paranoid */
    350358        irq_spinlock_lock(&irq->lock, false);
    351        
    352         irq->notif_cfg.hashed_in = true;
     359        irq_spinlock_lock(&box->irq_lock, false);
     360       
    353361        hash_table_insert(&irq_uspace_hash_table, key, &irq->link);
    354        
     362        list_append(&irq->notif_cfg.link, &box->irq_list);
     363       
     364        irq_spinlock_unlock(&box->irq_lock, false);
    355365        irq_spinlock_unlock(&irq->lock, false);
    356366        irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
    357 
    358         kobject_initialize(kobject, KOBJECT_TYPE_IRQ, irq, &irq_kobject_ops);
    359         cap_publish(TASK, handle, kobject);
    360        
    361         return handle;
     367       
     368        return EOK;
    362369}
    363370
    364371/** Unsubscribe task from IRQ notification.
    365372 *
    366  * @param box     Answerbox associated with the notification.
    367  * @param handle  IRQ capability handle.
     373 * @param box   Answerbox associated with the notification.
     374 * @param inr   IRQ number.
     375 * @param devno Device number.
    368376 *
    369377 * @return EOK on success or a negative error code.
    370378 *
    371379 */
    372 int ipc_irq_unsubscribe(answerbox_t *box, int handle)
    373 {
    374         kobject_t *kobj = cap_unpublish(TASK, handle, KOBJECT_TYPE_IRQ);
    375         if (!kobj)
     380int ipc_irq_unsubscribe(answerbox_t *box, inr_t inr, devno_t devno)
     381{
     382        sysarg_t key[] = {
     383                (sysarg_t) inr,
     384                (sysarg_t) devno
     385        };
     386       
     387        if ((inr < 0) || (inr > last_inr))
     388                return ELIMIT;
     389       
     390        irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
     391        link_t *lnk = hash_table_find(&irq_uspace_hash_table, key);
     392        if (!lnk) {
     393                irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
    376394                return ENOENT;
    377        
    378         assert(kobj->irq->notif_cfg.answerbox == box);
    379 
     395        }
     396       
     397        irq_t *irq = hash_table_get_instance(lnk, irq_t, link);
     398       
     399        /* irq is locked */
     400        irq_spinlock_lock(&box->irq_lock, false);
     401       
     402        assert(irq->notif_cfg.answerbox == box);
     403       
     404        /* Remove the IRQ from the answerbox's list. */
     405        list_remove(&irq->notif_cfg.link);
     406       
     407        /*
     408         * We need to drop the IRQ lock now because hash_table_remove() will try
     409         * to reacquire it. That basically violates the natural locking order,
     410         * but a deadlock in hash_table_remove() is prevented by the fact that
     411         * we already held the IRQ lock and didn't drop the hash table lock in
     412         * the meantime.
     413         */
     414        irq_spinlock_unlock(&irq->lock, false);
     415       
     416        /* Remove the IRQ from the uspace IRQ hash table. */
     417        hash_table_remove(&irq_uspace_hash_table, key, 2);
     418       
     419        irq_spinlock_unlock(&box->irq_lock, false);
     420        irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
     421       
     422        /* Free up the pseudo code and associated structures. */
     423        code_free(irq->notif_cfg.code);
     424       
     425        /* Free up the IRQ structure. */
     426        free(irq);
     427       
     428        return EOK;
     429}
     430
     431/** Disconnect all IRQ notifications from an answerbox.
     432 *
     433 * This function is effective because the answerbox contains
     434 * list of all irq_t structures that are subscribed to
     435 * send notifications to it.
     436 *
     437 * @param box Answerbox for which we want to carry out the cleanup.
     438 *
     439 */
     440void ipc_irq_cleanup(answerbox_t *box)
     441{
     442loop:
    380443        irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
    381         irq_spinlock_lock(&kobj->irq->lock, false);
    382        
    383         if (kobj->irq->notif_cfg.hashed_in) {
    384                 /* Remove the IRQ from the uspace IRQ hash table. */
    385                 hash_table_remove_item(&irq_uspace_hash_table,
    386                     &kobj->irq->link);
    387                 kobj->irq->notif_cfg.hashed_in = false;
    388         }
    389 
    390         /* kobj->irq->lock unlocked by the hash table remove_callback */
     444        irq_spinlock_lock(&box->irq_lock, false);
     445       
     446        while (!list_empty(&box->irq_list)) {
     447                DEADLOCK_PROBE_INIT(p_irqlock);
     448               
     449                irq_t *irq = list_get_instance(list_first(&box->irq_list), irq_t,
     450                    notif_cfg.link);
     451               
     452                if (!irq_spinlock_trylock(&irq->lock)) {
     453                        /*
     454                         * Avoid deadlock by trying again.
     455                         */
     456                        irq_spinlock_unlock(&box->irq_lock, false);
     457                        irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
     458                        DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD);
     459                        goto loop;
     460                }
     461               
     462                sysarg_t key[2];
     463                key[0] = irq->inr;
     464                key[1] = irq->devno;
     465               
     466                assert(irq->notif_cfg.answerbox == box);
     467               
     468                /* Unlist from the answerbox. */
     469                list_remove(&irq->notif_cfg.link);
     470               
     471                /*
     472                 * We need to drop the IRQ lock now because hash_table_remove()
     473                 * will try to reacquire it. That basically violates the natural
     474                 * locking order, but a deadlock in hash_table_remove() is
     475                 * prevented by the fact that we already held the IRQ lock and
     476                 * didn't drop the hash table lock in the meantime.
     477                 */
     478                irq_spinlock_unlock(&irq->lock, false);
     479               
     480                /* Remove from the hash table. */
     481                hash_table_remove(&irq_uspace_hash_table, key, 2);
     482               
     483                /*
     484                 * Release both locks so that we can free the pseudo code.
     485                 */
     486                irq_spinlock_unlock(&box->irq_lock, false);
     487                irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
     488               
     489                code_free(irq->notif_cfg.code);
     490                free(irq);
     491               
     492                /* Reacquire both locks before taking another round. */
     493                irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
     494                irq_spinlock_lock(&box->irq_lock, false);
     495        }
     496       
     497        irq_spinlock_unlock(&box->irq_lock, false);
    391498        irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
    392 
    393         kobject_put(kobj);
    394         cap_free(TASK, handle);
    395        
    396         return EOK;
    397499}
    398500
     
    414516}
    415517
    416 /** Apply the top-half IRQ code to find out whether to accept the IRQ or not.
     518/** Apply the top-half pseudo code to find out whether to accept the IRQ or not.
    417519 *
    418520 * @param irq IRQ structure.
    419521 *
    420  * @return IRQ_ACCEPT if the interrupt is accepted by the IRQ code.
    421  * @return IRQ_DECLINE if the interrupt is not accepted byt the IRQ code.
     522 * @return IRQ_ACCEPT if the interrupt is accepted by the
     523 *         pseudocode, IRQ_DECLINE otherwise.
    422524 *
    423525 */
Note: See TracChangeset for help on using the changeset viewer.