Changeset 91b60499 in mainline for kernel/generic/src/ipc/irq.c
- Timestamp:
- 2017-09-30T06:29:42Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 300f4c4
- Parents:
- d076f16 (diff), 6636fb19 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/ipc/irq.c
rd076f16 r91b60499 37 37 * 38 38 * This framework allows applications to subscribe to receive a notification 39 * when interrupt is detected. The application may provide a simple 'top-half' 40 * handler as part of its registration, which can perform simple operations 41 * (read/write port/memory, add information to notification IPC message). 39 * when an interrupt is detected. The application may provide a simple 40 * 'top-half' handler as part of its registration, which can perform simple 41 * operations (read/write port/memory, add information to notification IPC 42 * message). 42 43 * 43 44 * The structure of a notification message is as follows: … … 50 51 * - in_phone_hash: interrupt counter (may be needed to assure correct order 51 52 * in multithreaded drivers) 52 *53 * Note on synchronization for ipc_irq_subscribe(), ipc_irq_unsubscribe(),54 * ipc_irq_cleanup() and IRQ handlers:55 *56 * By always taking all of the uspace IRQ hash table lock, IRQ structure lock57 * and answerbox lock, we can rule out race conditions between the58 * registration functions and also the cleanup function. Thus the observer can59 * either see the IRQ structure present in both the hash table and the60 * answerbox list or absent in both. Views in which the IRQ structure would be61 * linked in the hash table but not in the answerbox list, or vice versa, are62 * not possible.63 *64 * By always taking the hash table lock and the IRQ structure lock, we can65 * rule out a scenario in which we would free up an IRQ structure, which is66 * still referenced by, for example, an IRQ handler. The locking scheme forces67 * us to lock the IRQ structure only after any progressing IRQs on that68 * structure are finished. Because we hold the hash table lock, we prevent new69 * IRQs from taking new references to the IRQ structure.70 *71 53 */ 72 54 … … 84 66 #include <print.h> 85 67 #include <macros.h> 68 #include <cap/cap.h> 86 69 87 70 static void ranges_unmap(irq_pio_range_t *ranges, size_t rangecount) … … 118 101 } 119 102 120 /* Rewrite the pseudocode addresses from physical to kernel virtual. */103 /* Rewrite the IRQ code addresses from physical to kernel virtual. */ 121 104 for (size_t i = 0; i < cmdcount; i++) { 122 105 uintptr_t addr; … … 176 159 } 177 160 178 /** Statically check the top-half pseudocode 179 * 180 * Check the top-half pseudocode for invalid or unsafe 181 * constructs. 161 /** Statically check the top-half IRQ code 162 * 163 * Check the top-half IRQ code for invalid or unsafe constructs. 182 164 * 183 165 */ … … 216 198 } 217 199 218 /** Free the top-half pseudocode.219 * 220 * @param code Pointer to the top-half pseudocode.200 /** Free the top-half IRQ code. 201 * 202 * @param code Pointer to the top-half IRQ code. 221 203 * 222 204 */ … … 231 213 } 232 214 233 /** Copy the top-half pseudocode from userspace into the kernel.234 * 235 * @param ucode Userspace address of the top-half pseudocode.236 * 237 * @return Kernel address of the copied pseudocode.215 /** Copy the top-half IRQ code from userspace into the kernel. 216 * 217 * @param ucode Userspace address of the top-half IRQ code. 218 * 219 * @return Kernel address of the copied IRQ code. 238 220 * 239 221 */ … … 289 271 } 290 272 273 static void irq_destroy(void *arg) 274 { 275 irq_t *irq = (irq_t *) arg; 276 277 /* Free up the IRQ code and associated structures. */ 278 code_free(irq->notif_cfg.code); 279 slab_free(irq_slab, irq); 280 } 281 282 static kobject_ops_t irq_kobject_ops = { 283 .destroy = irq_destroy 284 }; 285 291 286 /** Subscribe an answerbox as a receiving end for IRQ notifications. 292 287 * 293 288 * @param box Receiving answerbox. 294 289 * @param inr IRQ number. 295 * @param devno Device number. 296 * @param imethod Interface and method to be associated with the 297 * notification. 298 * @param ucode Uspace pointer to top-half pseudocode. 299 * 300 * @return EOK on success or a negative error code. 301 * 302 */ 303 int ipc_irq_subscribe(answerbox_t *box, inr_t inr, devno_t devno, 304 sysarg_t imethod, irq_code_t *ucode) 290 * @param imethod Interface and method to be associated with the notification. 291 * @param ucode Uspace pointer to top-half IRQ code. 292 * 293 * @return IRQ capability handle. 294 * @return Negative error code. 295 * 296 */ 297 int ipc_irq_subscribe(answerbox_t *box, inr_t inr, sysarg_t imethod, 298 irq_code_t *ucode) 305 299 { 306 300 sysarg_t key[] = { 307 (sysarg_t) inr,308 (sysarg_t) devno301 [IRQ_HT_KEY_INR] = (sysarg_t) inr, 302 [IRQ_HT_KEY_MODE] = (sysarg_t) IRQ_HT_MODE_NO_CLAIM 309 303 }; 310 304 … … 321 315 322 316 /* 323 * Allocate and populate the IRQ structure.317 * Allocate and populate the IRQ kernel object. 324 318 */ 325 irq_t *irq = malloc(sizeof(irq_t), 0); 319 cap_handle_t handle = cap_alloc(TASK); 320 if (handle < 0) 321 return handle; 322 323 irq_t *irq = (irq_t *) slab_alloc(irq_slab, FRAME_ATOMIC); 324 if (!irq) { 325 cap_free(TASK, handle); 326 return ENOMEM; 327 } 328 329 kobject_t *kobject = malloc(sizeof(kobject_t), FRAME_ATOMIC); 330 if (!kobject) { 331 cap_free(TASK, handle); 332 slab_free(irq_slab, irq); 333 return ENOMEM; 334 } 326 335 327 336 irq_initialize(irq); 328 irq->devno = devno;329 337 irq->inr = inr; 330 338 irq->claim = ipc_irq_top_half_claim; … … 337 345 338 346 /* 339 * Enlist the IRQ structure in the uspace IRQ hash table and the 340 * answerbox's list. 347 * Insert the IRQ structure into the uspace IRQ hash table. 341 348 */ 342 349 irq_spinlock_lock(&irq_uspace_hash_table_lock, true); 343 344 link_t *hlp = hash_table_find(&irq_uspace_hash_table, key);345 if (hlp) {346 irq_t *hirq = hash_table_get_instance(hlp, irq_t, link);347 348 /* hirq is locked */349 irq_spinlock_unlock(&hirq->lock, false);350 code_free(code);351 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);352 353 free(irq);354 return EEXIST;355 }356 357 /* Locking is not really necessary, but paranoid */358 350 irq_spinlock_lock(&irq->lock, false); 359 irq_spinlock_lock(&box->irq_lock, false);360 351 352 irq->notif_cfg.hashed_in = true; 361 353 hash_table_insert(&irq_uspace_hash_table, key, &irq->link); 362 list_append(&irq->notif_cfg.link, &box->irq_list); 363 364 irq_spinlock_unlock(&box->irq_lock, false); 354 365 355 irq_spinlock_unlock(&irq->lock, false); 366 356 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true); 357 358 kobject_initialize(kobject, KOBJECT_TYPE_IRQ, irq, &irq_kobject_ops); 359 cap_publish(TASK, handle, kobject); 360 361 return handle; 362 } 363 364 /** Unsubscribe task from IRQ notification. 365 * 366 * @param box Answerbox associated with the notification. 367 * @param handle IRQ capability handle. 368 * 369 * @return EOK on success or a negative error code. 370 * 371 */ 372 int ipc_irq_unsubscribe(answerbox_t *box, int handle) 373 { 374 kobject_t *kobj = cap_unpublish(TASK, handle, KOBJECT_TYPE_IRQ); 375 if (!kobj) 376 return ENOENT; 377 378 assert(kobj->irq->notif_cfg.answerbox == box); 379 380 irq_spinlock_lock(&irq_uspace_hash_table_lock, true); 381 irq_spinlock_lock(&kobj->irq->lock, false); 382 383 if (kobj->irq->notif_cfg.hashed_in) { 384 /* Remove the IRQ from the uspace IRQ hash table. */ 385 hash_table_remove_item(&irq_uspace_hash_table, 386 &kobj->irq->link); 387 kobj->irq->notif_cfg.hashed_in = false; 388 } 389 390 /* kobj->irq->lock unlocked by the hash table remove_callback */ 391 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true); 392 393 kobject_put(kobj); 394 cap_free(TASK, handle); 367 395 368 396 return EOK; 369 }370 371 /** Unsubscribe task from IRQ notification.372 *373 * @param box Answerbox associated with the notification.374 * @param inr IRQ number.375 * @param devno Device number.376 *377 * @return EOK on success or a negative error code.378 *379 */380 int ipc_irq_unsubscribe(answerbox_t *box, inr_t inr, devno_t devno)381 {382 sysarg_t key[] = {383 (sysarg_t) inr,384 (sysarg_t) devno385 };386 387 if ((inr < 0) || (inr > last_inr))388 return ELIMIT;389 390 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);391 link_t *lnk = hash_table_find(&irq_uspace_hash_table, key);392 if (!lnk) {393 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);394 return ENOENT;395 }396 397 irq_t *irq = hash_table_get_instance(lnk, irq_t, link);398 399 /* irq is locked */400 irq_spinlock_lock(&box->irq_lock, false);401 402 assert(irq->notif_cfg.answerbox == box);403 404 /* Remove the IRQ from the answerbox's list. */405 list_remove(&irq->notif_cfg.link);406 407 /*408 * We need to drop the IRQ lock now because hash_table_remove() will try409 * to reacquire it. That basically violates the natural locking order,410 * but a deadlock in hash_table_remove() is prevented by the fact that411 * we already held the IRQ lock and didn't drop the hash table lock in412 * the meantime.413 */414 irq_spinlock_unlock(&irq->lock, false);415 416 /* Remove the IRQ from the uspace IRQ hash table. */417 hash_table_remove(&irq_uspace_hash_table, key, 2);418 419 irq_spinlock_unlock(&box->irq_lock, false);420 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);421 422 /* Free up the pseudo code and associated structures. */423 code_free(irq->notif_cfg.code);424 425 /* Free up the IRQ structure. */426 free(irq);427 428 return EOK;429 }430 431 /** Disconnect all IRQ notifications from an answerbox.432 *433 * This function is effective because the answerbox contains434 * list of all irq_t structures that are subscribed to435 * send notifications to it.436 *437 * @param box Answerbox for which we want to carry out the cleanup.438 *439 */440 void ipc_irq_cleanup(answerbox_t *box)441 {442 loop:443 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);444 irq_spinlock_lock(&box->irq_lock, false);445 446 while (!list_empty(&box->irq_list)) {447 DEADLOCK_PROBE_INIT(p_irqlock);448 449 irq_t *irq = list_get_instance(list_first(&box->irq_list), irq_t,450 notif_cfg.link);451 452 if (!irq_spinlock_trylock(&irq->lock)) {453 /*454 * Avoid deadlock by trying again.455 */456 irq_spinlock_unlock(&box->irq_lock, false);457 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);458 DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD);459 goto loop;460 }461 462 sysarg_t key[2];463 key[0] = irq->inr;464 key[1] = irq->devno;465 466 assert(irq->notif_cfg.answerbox == box);467 468 /* Unlist from the answerbox. */469 list_remove(&irq->notif_cfg.link);470 471 /*472 * We need to drop the IRQ lock now because hash_table_remove()473 * will try to reacquire it. That basically violates the natural474 * locking order, but a deadlock in hash_table_remove() is475 * prevented by the fact that we already held the IRQ lock and476 * didn't drop the hash table lock in the meantime.477 */478 irq_spinlock_unlock(&irq->lock, false);479 480 /* Remove from the hash table. */481 hash_table_remove(&irq_uspace_hash_table, key, 2);482 483 /*484 * Release both locks so that we can free the pseudo code.485 */486 irq_spinlock_unlock(&box->irq_lock, false);487 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);488 489 code_free(irq->notif_cfg.code);490 free(irq);491 492 /* Reacquire both locks before taking another round. */493 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);494 irq_spinlock_lock(&box->irq_lock, false);495 }496 497 irq_spinlock_unlock(&box->irq_lock, false);498 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);499 397 } 500 398 … … 516 414 } 517 415 518 /** Apply the top-half pseudocode to find out whether to accept the IRQ or not.416 /** Apply the top-half IRQ code to find out whether to accept the IRQ or not. 519 417 * 520 418 * @param irq IRQ structure. 521 419 * 522 * @return IRQ_ACCEPT if the interrupt is accepted by the 523 * pseudocode, IRQ_DECLINE otherwise.420 * @return IRQ_ACCEPT if the interrupt is accepted by the IRQ code. 421 * @return IRQ_DECLINE if the interrupt is not accepted byt the IRQ code. 524 422 * 525 423 */
Note:
See TracChangeset
for help on using the changeset viewer.