[162f919] | 1 | /*
|
---|
[df4ed85] | 2 | * Copyright (c) 2006 Ondrej Palkovsky
|
---|
| 3 | * Copyright (c) 2006 Jakub Jermar
|
---|
[162f919] | 4 | * All rights reserved.
|
---|
| 5 | *
|
---|
| 6 | * Redistribution and use in source and binary forms, with or without
|
---|
| 7 | * modification, are permitted provided that the following conditions
|
---|
| 8 | * are met:
|
---|
| 9 | *
|
---|
| 10 | * - Redistributions of source code must retain the above copyright
|
---|
| 11 | * notice, this list of conditions and the following disclaimer.
|
---|
| 12 | * - Redistributions in binary form must reproduce the above copyright
|
---|
| 13 | * notice, this list of conditions and the following disclaimer in the
|
---|
| 14 | * documentation and/or other materials provided with the distribution.
|
---|
| 15 | * - The name of the author may not be used to endorse or promote products
|
---|
| 16 | * derived from this software without specific prior written permission.
|
---|
| 17 | *
|
---|
| 18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
---|
| 19 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
---|
| 20 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
---|
| 21 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
---|
| 22 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
---|
| 23 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
---|
| 24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
---|
| 25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
| 26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
---|
| 27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
| 28 | */
|
---|
| 29 |
|
---|
[cc73a8a1] | 30 | /** @addtogroup genericipc
|
---|
[b45c443] | 31 | * @{
|
---|
| 32 | */
|
---|
[da1bafb] | 33 |
|
---|
[cc73a8a1] | 34 | /**
|
---|
| 35 | * @file
|
---|
| 36 | * @brief IRQ notification framework.
|
---|
[bdc5c516] | 37 | *
|
---|
| 38 | * This framework allows applications to register to receive a notification
|
---|
| 39 | * when interrupt is detected. The application may provide a simple 'top-half'
|
---|
| 40 | * handler as part of its registration, which can perform simple operations
|
---|
| 41 | * (read/write port/memory, add information to notification ipc message).
|
---|
| 42 | *
|
---|
| 43 | * The structure of a notification message is as follows:
|
---|
[17b3cc6] | 44 | * - IMETHOD: interface and method as registered by
|
---|
| 45 | * the SYS_IRQ_REGISTER syscall
|
---|
[43752b6] | 46 | * - ARG1: payload modified by a 'top-half' handler
|
---|
[2b017ba] | 47 | * - ARG2: payload modified by a 'top-half' handler
|
---|
| 48 | * - ARG3: payload modified by a 'top-half' handler
|
---|
[cecb0789] | 49 | * - ARG4: payload modified by a 'top-half' handler
|
---|
| 50 | * - ARG5: payload modified by a 'top-half' handler
|
---|
[43752b6] | 51 | * - in_phone_hash: interrupt counter (may be needed to assure correct order
|
---|
[228e490] | 52 | * in multithreaded drivers)
|
---|
[cecb0789] | 53 | *
|
---|
| 54 | * Note on synchronization for ipc_irq_register(), ipc_irq_unregister(),
|
---|
| 55 | * ipc_irq_cleanup() and IRQ handlers:
|
---|
| 56 | *
|
---|
| 57 | * By always taking all of the uspace IRQ hash table lock, IRQ structure lock
|
---|
| 58 | * and answerbox lock, we can rule out race conditions between the
|
---|
| 59 | * registration functions and also the cleanup function. Thus the observer can
|
---|
| 60 | * either see the IRQ structure present in both the hash table and the
|
---|
| 61 | * answerbox list or absent in both. Views in which the IRQ structure would be
|
---|
| 62 | * linked in the hash table but not in the answerbox list, or vice versa, are
|
---|
| 63 | * not possible.
|
---|
| 64 | *
|
---|
| 65 | * By always taking the hash table lock and the IRQ structure lock, we can
|
---|
| 66 | * rule out a scenario in which we would free up an IRQ structure, which is
|
---|
| 67 | * still referenced by, for example, an IRQ handler. The locking scheme forces
|
---|
| 68 | * us to lock the IRQ structure only after any progressing IRQs on that
|
---|
| 69 | * structure are finished. Because we hold the hash table lock, we prevent new
|
---|
| 70 | * IRQs from taking new references to the IRQ structure.
|
---|
[da1bafb] | 71 | *
|
---|
[bdc5c516] | 72 | */
|
---|
| 73 |
|
---|
[162f919] | 74 | #include <arch.h>
|
---|
| 75 | #include <mm/slab.h>
|
---|
[a996ae31] | 76 | #include <mm/page.h>
|
---|
| 77 | #include <mm/km.h>
|
---|
[162f919] | 78 | #include <errno.h>
|
---|
[2b017ba] | 79 | #include <ddi/irq.h>
|
---|
[162f919] | 80 | #include <ipc/ipc.h>
|
---|
| 81 | #include <ipc/irq.h>
|
---|
[e3c762cd] | 82 | #include <syscall/copy.h>
|
---|
[d0c5901] | 83 | #include <console/console.h>
|
---|
[253f35a1] | 84 | #include <print.h>
|
---|
[a996ae31] | 85 | #include <macros.h>
|
---|
| 86 |
|
---|
| 87 | static void ranges_unmap(irq_pio_range_t *ranges, size_t rangecount)
|
---|
| 88 | {
|
---|
| 89 | size_t i;
|
---|
| 90 |
|
---|
| 91 | for (i = 0; i < rangecount; i++) {
|
---|
| 92 | if ((void *) ranges[i].base >= IO_SPACE_BOUNDARY)
|
---|
| 93 | km_unmap(ranges[i].base, ranges[i].size);
|
---|
| 94 | }
|
---|
| 95 | }
|
---|
| 96 |
|
---|
| 97 | static int ranges_map_and_apply(irq_pio_range_t *ranges, size_t rangecount,
|
---|
| 98 | irq_cmd_t *cmds, size_t cmdcount)
|
---|
| 99 | {
|
---|
| 100 | uintptr_t *pbase;
|
---|
| 101 | size_t i, j;
|
---|
| 102 |
|
---|
| 103 | /* Copy the physical base addresses aside. */
|
---|
| 104 | pbase = malloc(rangecount * sizeof(uintptr_t), 0);
|
---|
| 105 | for (i = 0; i < rangecount; i++)
|
---|
| 106 | pbase[i] = ranges[i].base;
|
---|
| 107 |
|
---|
| 108 | /* Map the PIO ranges into the kernel virtual address space. */
|
---|
| 109 | for (i = 0; i < rangecount; i++) {
|
---|
| 110 | if ((void *) ranges[i].base < IO_SPACE_BOUNDARY)
|
---|
| 111 | continue;
|
---|
| 112 | ranges[i].base = km_map(pbase[i], ranges[i].size,
|
---|
| 113 | PAGE_READ | PAGE_WRITE | PAGE_KERNEL | PAGE_NOT_CACHEABLE);
|
---|
| 114 | if (!ranges[i].base) {
|
---|
| 115 | ranges_unmap(ranges, i);
|
---|
| 116 | free(pbase);
|
---|
| 117 | return ENOMEM;
|
---|
| 118 | }
|
---|
| 119 | }
|
---|
| 120 |
|
---|
| 121 | /* Rewrite the pseudocode addresses from physical to kernel virtual. */
|
---|
| 122 | for (i = 0; i < cmdcount; i++) {
|
---|
| 123 | uintptr_t addr;
|
---|
| 124 |
|
---|
| 125 | /* Process only commands that use an address. */
|
---|
| 126 | switch (cmds[i].cmd) {
|
---|
| 127 | case CMD_PIO_READ_8:
|
---|
| 128 | case CMD_PIO_READ_16:
|
---|
| 129 | case CMD_PIO_READ_32:
|
---|
| 130 | case CMD_PIO_WRITE_8:
|
---|
| 131 | case CMD_PIO_WRITE_16:
|
---|
| 132 | case CMD_PIO_WRITE_32:
|
---|
| 133 | case CMD_PIO_WRITE_A_8:
|
---|
| 134 | case CMD_PIO_WRITE_A_16:
|
---|
| 135 | case CMD_PIO_WRITE_A_32:
|
---|
| 136 | break;
|
---|
| 137 | default:
|
---|
| 138 | /* Move onto the next command. */
|
---|
| 139 | continue;
|
---|
| 140 | }
|
---|
| 141 |
|
---|
| 142 | addr = (uintptr_t) cmds[i].addr;
|
---|
| 143 |
|
---|
| 144 | /* Process only memory mapped PIO addresses. */
|
---|
| 145 | if ((void *) addr < IO_SPACE_BOUNDARY)
|
---|
| 146 | continue;
|
---|
| 147 |
|
---|
| 148 | for (j = 0; j < rangecount; j++) {
|
---|
| 149 |
|
---|
| 150 | /* Find the matching range. */
|
---|
| 151 | if (!iswithin(pbase[j], ranges[j].size, addr, 1))
|
---|
| 152 | continue;
|
---|
| 153 |
|
---|
| 154 | /* Switch the command to a kernel virtual address. */
|
---|
| 155 | addr -= pbase[j];
|
---|
| 156 | addr += ranges[j].base;
|
---|
| 157 |
|
---|
| 158 | cmds[i].addr = (void *) addr;
|
---|
| 159 | break;
|
---|
[bd8c6537] | 160 | }
|
---|
| 161 |
|
---|
| 162 | if (j == rangecount) {
|
---|
| 163 | /*
|
---|
| 164 | * The address used in this command is outside of all
|
---|
| 165 | * defined ranges.
|
---|
| 166 | */
|
---|
| 167 | ranges_unmap(ranges, rangecount);
|
---|
| 168 | free(pbase);
|
---|
| 169 | return EINVAL;
|
---|
| 170 | }
|
---|
[a996ae31] | 171 | }
|
---|
| 172 |
|
---|
| 173 | free(pbase);
|
---|
| 174 | return EOK;
|
---|
| 175 | }
|
---|
[162f919] | 176 |
|
---|
[cecb0789] | 177 | /** Free the top-half pseudocode.
|
---|
[8b243f2] | 178 | *
|
---|
[da1bafb] | 179 | * @param code Pointer to the top-half pseudocode.
|
---|
| 180 | *
|
---|
[8b243f2] | 181 | */
|
---|
[162f919] | 182 | static void code_free(irq_code_t *code)
|
---|
| 183 | {
|
---|
| 184 | if (code) {
|
---|
[a996ae31] | 185 | ranges_unmap(code->ranges, code->rangecount);
|
---|
| 186 | free(code->ranges);
|
---|
[162f919] | 187 | free(code->cmds);
|
---|
| 188 | free(code);
|
---|
| 189 | }
|
---|
| 190 | }
|
---|
| 191 |
|
---|
[cecb0789] | 192 | /** Copy the top-half pseudocode from userspace into the kernel.
|
---|
[8b243f2] | 193 | *
|
---|
[da1bafb] | 194 | * @param ucode Userspace address of the top-half pseudocode.
|
---|
| 195 | *
|
---|
| 196 | * @return Kernel address of the copied pseudocode.
|
---|
[8b243f2] | 197 | *
|
---|
| 198 | */
|
---|
| 199 | static irq_code_t *code_from_uspace(irq_code_t *ucode)
|
---|
[162f919] | 200 | {
|
---|
[a996ae31] | 201 | irq_pio_range_t *ranges = NULL;
|
---|
| 202 | irq_cmd_t *cmds = NULL;
|
---|
| 203 |
|
---|
[da1bafb] | 204 | irq_code_t *code = malloc(sizeof(*code), 0);
|
---|
| 205 | int rc = copy_from_uspace(code, ucode, sizeof(*code));
|
---|
[a996ae31] | 206 | if (rc != EOK)
|
---|
| 207 | goto error;
|
---|
[162f919] | 208 |
|
---|
[a996ae31] | 209 | if ((code->rangecount > IRQ_MAX_RANGE_COUNT) ||
|
---|
| 210 | (code->cmdcount > IRQ_MAX_PROG_SIZE))
|
---|
| 211 | goto error;
|
---|
[da1bafb] | 212 |
|
---|
[a996ae31] | 213 | ranges = malloc(sizeof(code->ranges[0]) * code->rangecount, 0);
|
---|
| 214 | rc = copy_from_uspace(ranges, code->ranges,
|
---|
| 215 | sizeof(code->ranges[0]) * code->rangecount);
|
---|
| 216 | if (rc != EOK)
|
---|
| 217 | goto error;
|
---|
| 218 |
|
---|
| 219 | cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0);
|
---|
| 220 | rc = copy_from_uspace(cmds, code->cmds,
|
---|
[8b243f2] | 221 | sizeof(code->cmds[0]) * code->cmdcount);
|
---|
[a996ae31] | 222 | if (rc != EOK)
|
---|
| 223 | goto error;
|
---|
| 224 |
|
---|
| 225 | rc = ranges_map_and_apply(ranges, code->rangecount, cmds,
|
---|
| 226 | code->cmdcount);
|
---|
| 227 | if (rc != EOK)
|
---|
| 228 | goto error;
|
---|
| 229 |
|
---|
| 230 | code->ranges = ranges;
|
---|
| 231 | code->cmds = cmds;
|
---|
| 232 |
|
---|
[162f919] | 233 | return code;
|
---|
[a996ae31] | 234 |
|
---|
| 235 | error:
|
---|
| 236 | if (cmds)
|
---|
| 237 | free(cmds);
|
---|
| 238 | if (ranges)
|
---|
| 239 | free(ranges);
|
---|
| 240 | free(code);
|
---|
| 241 | return NULL;
|
---|
[162f919] | 242 | }
|
---|
| 243 |
|
---|
[2b017ba] | 244 | /** Register an answerbox as a receiving end for IRQ notifications.
|
---|
| 245 | *
|
---|
[78ffb70] | 246 | * @param box Receiving answerbox.
|
---|
| 247 | * @param inr IRQ number.
|
---|
| 248 | * @param devno Device number.
|
---|
| 249 | * @param imethod Interface and method to be associated with the
|
---|
| 250 | * notification.
|
---|
| 251 | * @param ucode Uspace pointer to top-half pseudocode.
|
---|
| 252 | * @return EOK on success or a negative error code.
|
---|
[2b017ba] | 253 | *
|
---|
| 254 | */
|
---|
[8b243f2] | 255 | int ipc_irq_register(answerbox_t *box, inr_t inr, devno_t devno,
|
---|
[228e490] | 256 | sysarg_t imethod, irq_code_t *ucode)
|
---|
[162f919] | 257 | {
|
---|
[96b02eb9] | 258 | sysarg_t key[] = {
|
---|
| 259 | (sysarg_t) inr,
|
---|
| 260 | (sysarg_t) devno
|
---|
[cecb0789] | 261 | };
|
---|
[78ffb70] | 262 |
|
---|
| 263 | if ((inr < 0) || (inr > last_inr))
|
---|
| 264 | return ELIMIT;
|
---|
[c822026] | 265 |
|
---|
[da1bafb] | 266 | irq_code_t *code;
|
---|
[162f919] | 267 | if (ucode) {
|
---|
| 268 | code = code_from_uspace(ucode);
|
---|
| 269 | if (!code)
|
---|
| 270 | return EBADMEM;
|
---|
[da1bafb] | 271 | } else
|
---|
[162f919] | 272 | code = NULL;
|
---|
[c822026] | 273 |
|
---|
[cecb0789] | 274 | /*
|
---|
| 275 | * Allocate and populate the IRQ structure.
|
---|
| 276 | */
|
---|
[da1bafb] | 277 | irq_t *irq = malloc(sizeof(irq_t), 0);
|
---|
| 278 |
|
---|
[cecb0789] | 279 | irq_initialize(irq);
|
---|
| 280 | irq->devno = devno;
|
---|
| 281 | irq->inr = inr;
|
---|
| 282 | irq->claim = ipc_irq_top_half_claim;
|
---|
[691eb52] | 283 | irq->handler = ipc_irq_top_half_handler;
|
---|
[4874c2d] | 284 | irq->notif_cfg.notify = true;
|
---|
[2b017ba] | 285 | irq->notif_cfg.answerbox = box;
|
---|
[228e490] | 286 | irq->notif_cfg.imethod = imethod;
|
---|
[2b017ba] | 287 | irq->notif_cfg.code = code;
|
---|
| 288 | irq->notif_cfg.counter = 0;
|
---|
[c822026] | 289 |
|
---|
[cecb0789] | 290 | /*
|
---|
| 291 | * Enlist the IRQ structure in the uspace IRQ hash table and the
|
---|
| 292 | * answerbox's list.
|
---|
| 293 | */
|
---|
[da1bafb] | 294 | irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
|
---|
| 295 |
|
---|
| 296 | link_t *hlp = hash_table_find(&irq_uspace_hash_table, key);
|
---|
[2845930] | 297 | if (hlp) {
|
---|
[da1bafb] | 298 | irq_t *hirq = hash_table_get_instance(hlp, irq_t, link);
|
---|
[c822026] | 299 |
|
---|
[2845930] | 300 | /* hirq is locked */
|
---|
[da1bafb] | 301 | irq_spinlock_unlock(&hirq->lock, false);
|
---|
[cecb0789] | 302 | code_free(code);
|
---|
[da1bafb] | 303 | irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
|
---|
| 304 |
|
---|
[cecb0789] | 305 | free(irq);
|
---|
| 306 | return EEXISTS;
|
---|
| 307 | }
|
---|
[c822026] | 308 |
|
---|
[da1bafb] | 309 | /* Locking is not really necessary, but paranoid */
|
---|
| 310 | irq_spinlock_lock(&irq->lock, false);
|
---|
| 311 | irq_spinlock_lock(&box->irq_lock, false);
|
---|
| 312 |
|
---|
[cecb0789] | 313 | hash_table_insert(&irq_uspace_hash_table, key, &irq->link);
|
---|
[55b77d9] | 314 | list_append(&irq->notif_cfg.link, &box->irq_list);
|
---|
[c822026] | 315 |
|
---|
[da1bafb] | 316 | irq_spinlock_unlock(&box->irq_lock, false);
|
---|
| 317 | irq_spinlock_unlock(&irq->lock, false);
|
---|
| 318 | irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
|
---|
| 319 |
|
---|
[cecb0789] | 320 | return EOK;
|
---|
| 321 | }
|
---|
| 322 |
|
---|
| 323 | /** Unregister task from IRQ notification.
|
---|
| 324 | *
|
---|
[78ffb70] | 325 | * @param box Answerbox associated with the notification.
|
---|
| 326 | * @param inr IRQ number.
|
---|
| 327 | * @param devno Device number.
|
---|
| 328 | * @return EOK on success or a negative error code.
|
---|
[cecb0789] | 329 | */
|
---|
| 330 | int ipc_irq_unregister(answerbox_t *box, inr_t inr, devno_t devno)
|
---|
| 331 | {
|
---|
[96b02eb9] | 332 | sysarg_t key[] = {
|
---|
| 333 | (sysarg_t) inr,
|
---|
| 334 | (sysarg_t) devno
|
---|
[cecb0789] | 335 | };
|
---|
[78ffb70] | 336 |
|
---|
| 337 | if ((inr < 0) || (inr > last_inr))
|
---|
| 338 | return ELIMIT;
|
---|
[da1bafb] | 339 |
|
---|
| 340 | irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
|
---|
| 341 | link_t *lnk = hash_table_find(&irq_uspace_hash_table, key);
|
---|
[cecb0789] | 342 | if (!lnk) {
|
---|
[da1bafb] | 343 | irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
|
---|
[cecb0789] | 344 | return ENOENT;
|
---|
| 345 | }
|
---|
[da1bafb] | 346 |
|
---|
| 347 | irq_t *irq = hash_table_get_instance(lnk, irq_t, link);
|
---|
| 348 |
|
---|
[2845930] | 349 | /* irq is locked */
|
---|
[da1bafb] | 350 | irq_spinlock_lock(&box->irq_lock, false);
|
---|
[cecb0789] | 351 |
|
---|
| 352 | ASSERT(irq->notif_cfg.answerbox == box);
|
---|
| 353 |
|
---|
| 354 | /* Free up the pseudo code and associated structures. */
|
---|
| 355 | code_free(irq->notif_cfg.code);
|
---|
[da1bafb] | 356 |
|
---|
| 357 | /* Remove the IRQ from the answerbox's list. */
|
---|
[cecb0789] | 358 | list_remove(&irq->notif_cfg.link);
|
---|
[da1bafb] | 359 |
|
---|
[2845930] | 360 | /*
|
---|
| 361 | * We need to drop the IRQ lock now because hash_table_remove() will try
|
---|
| 362 | * to reacquire it. That basically violates the natural locking order,
|
---|
| 363 | * but a deadlock in hash_table_remove() is prevented by the fact that
|
---|
| 364 | * we already held the IRQ lock and didn't drop the hash table lock in
|
---|
| 365 | * the meantime.
|
---|
| 366 | */
|
---|
[da1bafb] | 367 | irq_spinlock_unlock(&irq->lock, false);
|
---|
| 368 |
|
---|
[cecb0789] | 369 | /* Remove the IRQ from the uspace IRQ hash table. */
|
---|
| 370 | hash_table_remove(&irq_uspace_hash_table, key, 2);
|
---|
| 371 |
|
---|
[da1bafb] | 372 | irq_spinlock_unlock(&box->irq_lock, false);
|
---|
| 373 | irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
|
---|
[cecb0789] | 374 |
|
---|
| 375 | /* Free up the IRQ structure. */
|
---|
| 376 | free(irq);
|
---|
| 377 |
|
---|
| 378 | return EOK;
|
---|
| 379 | }
|
---|
| 380 |
|
---|
| 381 | /** Disconnect all IRQ notifications from an answerbox.
|
---|
| 382 | *
|
---|
| 383 | * This function is effective because the answerbox contains
|
---|
| 384 | * list of all irq_t structures that are registered to
|
---|
| 385 | * send notifications to it.
|
---|
| 386 | *
|
---|
[da1bafb] | 387 | * @param box Answerbox for which we want to carry out the cleanup.
|
---|
| 388 | *
|
---|
[cecb0789] | 389 | */
|
---|
| 390 | void ipc_irq_cleanup(answerbox_t *box)
|
---|
| 391 | {
|
---|
| 392 | loop:
|
---|
[da1bafb] | 393 | irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
|
---|
| 394 | irq_spinlock_lock(&box->irq_lock, false);
|
---|
[cecb0789] | 395 |
|
---|
[55b77d9] | 396 | while (!list_empty(&box->irq_list)) {
|
---|
[cecb0789] | 397 | DEADLOCK_PROBE_INIT(p_irqlock);
|
---|
| 398 |
|
---|
[55b77d9] | 399 | irq_t *irq = list_get_instance(list_first(&box->irq_list), irq_t,
|
---|
[da1bafb] | 400 | notif_cfg.link);
|
---|
| 401 |
|
---|
| 402 | if (!irq_spinlock_trylock(&irq->lock)) {
|
---|
[cecb0789] | 403 | /*
|
---|
| 404 | * Avoid deadlock by trying again.
|
---|
| 405 | */
|
---|
[da1bafb] | 406 | irq_spinlock_unlock(&box->irq_lock, false);
|
---|
| 407 | irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
|
---|
[cecb0789] | 408 | DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD);
|
---|
| 409 | goto loop;
|
---|
| 410 | }
|
---|
[da1bafb] | 411 |
|
---|
[96b02eb9] | 412 | sysarg_t key[2];
|
---|
[cecb0789] | 413 | key[0] = irq->inr;
|
---|
| 414 | key[1] = irq->devno;
|
---|
| 415 |
|
---|
| 416 | ASSERT(irq->notif_cfg.answerbox == box);
|
---|
| 417 |
|
---|
| 418 | /* Unlist from the answerbox. */
|
---|
| 419 | list_remove(&irq->notif_cfg.link);
|
---|
| 420 |
|
---|
| 421 | /* Free up the pseudo code and associated structures. */
|
---|
| 422 | code_free(irq->notif_cfg.code);
|
---|
| 423 |
|
---|
[2845930] | 424 | /*
|
---|
| 425 | * We need to drop the IRQ lock now because hash_table_remove()
|
---|
| 426 | * will try to reacquire it. That basically violates the natural
|
---|
| 427 | * locking order, but a deadlock in hash_table_remove() is
|
---|
| 428 | * prevented by the fact that we already held the IRQ lock and
|
---|
| 429 | * didn't drop the hash table lock in the meantime.
|
---|
| 430 | */
|
---|
[da1bafb] | 431 | irq_spinlock_unlock(&irq->lock, false);
|
---|
[37be841] | 432 |
|
---|
| 433 | /* Remove from the hash table. */
|
---|
| 434 | hash_table_remove(&irq_uspace_hash_table, key, 2);
|
---|
| 435 |
|
---|
[cecb0789] | 436 | free(irq);
|
---|
| 437 | }
|
---|
| 438 |
|
---|
[da1bafb] | 439 | irq_spinlock_unlock(&box->irq_lock, false);
|
---|
| 440 | irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
|
---|
[162f919] | 441 | }
|
---|
| 442 |
|
---|
[8b243f2] | 443 | /** Add a call to the proper answerbox queue.
|
---|
[2b017ba] | 444 | *
|
---|
[da1bafb] | 445 | * Assume irq->lock is locked and interrupts disabled.
|
---|
| 446 | *
|
---|
| 447 | * @param irq IRQ structure referencing the target answerbox.
|
---|
| 448 | * @param call IRQ notification call.
|
---|
[874621f] | 449 | *
|
---|
[2b017ba] | 450 | */
|
---|
| 451 | static void send_call(irq_t *irq, call_t *call)
|
---|
[874621f] | 452 | {
|
---|
[da1bafb] | 453 | irq_spinlock_lock(&irq->notif_cfg.answerbox->irq_lock, false);
|
---|
[2b017ba] | 454 | list_append(&call->link, &irq->notif_cfg.answerbox->irq_notifs);
|
---|
[da1bafb] | 455 | irq_spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock, false);
|
---|
| 456 |
|
---|
[2b017ba] | 457 | waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST);
|
---|
[874621f] | 458 | }
|
---|
| 459 |
|
---|
[cecb0789] | 460 | /** Apply the top-half pseudo code to find out whether to accept the IRQ or not.
|
---|
[874621f] | 461 | *
|
---|
[da1bafb] | 462 | * @param irq IRQ structure.
|
---|
| 463 | *
|
---|
| 464 | * @return IRQ_ACCEPT if the interrupt is accepted by the
|
---|
| 465 | * pseudocode, IRQ_DECLINE otherwise.
|
---|
[cecb0789] | 466 | *
|
---|
[874621f] | 467 | */
|
---|
[cecb0789] | 468 | irq_ownership_t ipc_irq_top_half_claim(irq_t *irq)
|
---|
[874621f] | 469 | {
|
---|
[cecb0789] | 470 | irq_code_t *code = irq->notif_cfg.code;
|
---|
[da1bafb] | 471 | uint32_t *scratch = irq->notif_cfg.scratch;
|
---|
[cecb0789] | 472 |
|
---|
| 473 | if (!irq->notif_cfg.notify)
|
---|
| 474 | return IRQ_DECLINE;
|
---|
| 475 |
|
---|
| 476 | if (!code)
|
---|
| 477 | return IRQ_DECLINE;
|
---|
| 478 |
|
---|
[01e39cbe] | 479 | for (size_t i = 0; i < code->cmdcount; i++) {
|
---|
[da1bafb] | 480 | uint32_t dstval;
|
---|
[01e39cbe] | 481 |
|
---|
[da1bafb] | 482 | uintptr_t srcarg = code->cmds[i].srcarg;
|
---|
| 483 | uintptr_t dstarg = code->cmds[i].dstarg;
|
---|
[874621f] | 484 |
|
---|
[cecb0789] | 485 | if (srcarg >= IPC_CALL_LEN)
|
---|
| 486 | break;
|
---|
[da1bafb] | 487 |
|
---|
[cecb0789] | 488 | if (dstarg >= IPC_CALL_LEN)
|
---|
| 489 | break;
|
---|
| 490 |
|
---|
| 491 | switch (code->cmds[i].cmd) {
|
---|
| 492 | case CMD_PIO_READ_8:
|
---|
| 493 | dstval = pio_read_8((ioport8_t *) code->cmds[i].addr);
|
---|
| 494 | if (dstarg)
|
---|
| 495 | scratch[dstarg] = dstval;
|
---|
| 496 | break;
|
---|
| 497 | case CMD_PIO_READ_16:
|
---|
| 498 | dstval = pio_read_16((ioport16_t *) code->cmds[i].addr);
|
---|
| 499 | if (dstarg)
|
---|
| 500 | scratch[dstarg] = dstval;
|
---|
| 501 | break;
|
---|
| 502 | case CMD_PIO_READ_32:
|
---|
| 503 | dstval = pio_read_32((ioport32_t *) code->cmds[i].addr);
|
---|
| 504 | if (dstarg)
|
---|
| 505 | scratch[dstarg] = dstval;
|
---|
| 506 | break;
|
---|
| 507 | case CMD_PIO_WRITE_8:
|
---|
| 508 | pio_write_8((ioport8_t *) code->cmds[i].addr,
|
---|
| 509 | (uint8_t) code->cmds[i].value);
|
---|
| 510 | break;
|
---|
| 511 | case CMD_PIO_WRITE_16:
|
---|
| 512 | pio_write_16((ioport16_t *) code->cmds[i].addr,
|
---|
| 513 | (uint16_t) code->cmds[i].value);
|
---|
| 514 | break;
|
---|
| 515 | case CMD_PIO_WRITE_32:
|
---|
| 516 | pio_write_32((ioport32_t *) code->cmds[i].addr,
|
---|
| 517 | (uint32_t) code->cmds[i].value);
|
---|
| 518 | break;
|
---|
[9cdac5a] | 519 | case CMD_PIO_WRITE_A_8:
|
---|
| 520 | if (srcarg) {
|
---|
| 521 | pio_write_8((ioport8_t *) code->cmds[i].addr,
|
---|
| 522 | (uint8_t) scratch[srcarg]);
|
---|
| 523 | }
|
---|
| 524 | break;
|
---|
| 525 | case CMD_PIO_WRITE_A_16:
|
---|
| 526 | if (srcarg) {
|
---|
| 527 | pio_write_16((ioport16_t *) code->cmds[i].addr,
|
---|
| 528 | (uint16_t) scratch[srcarg]);
|
---|
| 529 | }
|
---|
| 530 | break;
|
---|
| 531 | case CMD_PIO_WRITE_A_32:
|
---|
| 532 | if (srcarg) {
|
---|
| 533 | pio_write_32((ioport32_t *) code->cmds[i].addr,
|
---|
| 534 | (uint32_t) scratch[srcarg]);
|
---|
| 535 | }
|
---|
| 536 | break;
|
---|
[cecb0789] | 537 | case CMD_BTEST:
|
---|
[da1bafb] | 538 | if ((srcarg) && (dstarg)) {
|
---|
[cecb0789] | 539 | dstval = scratch[srcarg] & code->cmds[i].value;
|
---|
| 540 | scratch[dstarg] = dstval;
|
---|
| 541 | }
|
---|
| 542 | break;
|
---|
| 543 | case CMD_PREDICATE:
|
---|
[da1bafb] | 544 | if ((srcarg) && (!scratch[srcarg])) {
|
---|
[cecb0789] | 545 | i += code->cmds[i].value;
|
---|
| 546 | continue;
|
---|
| 547 | }
|
---|
| 548 | break;
|
---|
| 549 | case CMD_ACCEPT:
|
---|
| 550 | return IRQ_ACCEPT;
|
---|
| 551 | case CMD_DECLINE:
|
---|
| 552 | default:
|
---|
| 553 | return IRQ_DECLINE;
|
---|
| 554 | }
|
---|
[874621f] | 555 | }
|
---|
[01e39cbe] | 556 |
|
---|
[cecb0789] | 557 | return IRQ_DECLINE;
|
---|
[874621f] | 558 | }
|
---|
| 559 |
|
---|
[cecb0789] | 560 | /* IRQ top-half handler.
|
---|
[162f919] | 561 | *
|
---|
[2b017ba] | 562 | * We expect interrupts to be disabled and the irq->lock already held.
|
---|
[8b243f2] | 563 | *
|
---|
[da1bafb] | 564 | * @param irq IRQ structure.
|
---|
| 565 | *
|
---|
[162f919] | 566 | */
|
---|
[cecb0789] | 567 | void ipc_irq_top_half_handler(irq_t *irq)
|
---|
[162f919] | 568 | {
|
---|
[2b017ba] | 569 | ASSERT(irq);
|
---|
[1d432f9] | 570 |
|
---|
| 571 | ASSERT(interrupts_disabled());
|
---|
| 572 | ASSERT(irq_spinlock_locked(&irq->lock));
|
---|
[da1bafb] | 573 |
|
---|
[2b017ba] | 574 | if (irq->notif_cfg.answerbox) {
|
---|
[da1bafb] | 575 | call_t *call = ipc_call_alloc(FRAME_ATOMIC);
|
---|
[cecb0789] | 576 | if (!call)
|
---|
[d8f7362] | 577 | return;
|
---|
[cecb0789] | 578 |
|
---|
[162f919] | 579 | call->flags |= IPC_CALL_NOTIF;
|
---|
[43752b6] | 580 | /* Put a counter to the message */
|
---|
[0c1a5d8a] | 581 | call->priv = ++irq->notif_cfg.counter;
|
---|
[da1bafb] | 582 |
|
---|
[43752b6] | 583 | /* Set up args */
|
---|
[228e490] | 584 | IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
|
---|
[cecb0789] | 585 | IPC_SET_ARG1(call->data, irq->notif_cfg.scratch[1]);
|
---|
| 586 | IPC_SET_ARG2(call->data, irq->notif_cfg.scratch[2]);
|
---|
| 587 | IPC_SET_ARG3(call->data, irq->notif_cfg.scratch[3]);
|
---|
| 588 | IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]);
|
---|
| 589 | IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]);
|
---|
[da1bafb] | 590 |
|
---|
[2b017ba] | 591 | send_call(irq, call);
|
---|
[162f919] | 592 | }
|
---|
| 593 | }
|
---|
| 594 |
|
---|
[cecb0789] | 595 | /** Send notification message.
|
---|
[874621f] | 596 | *
|
---|
[da1bafb] | 597 | * @param irq IRQ structure.
|
---|
| 598 | * @param a1 Driver-specific payload argument.
|
---|
| 599 | * @param a2 Driver-specific payload argument.
|
---|
| 600 | * @param a3 Driver-specific payload argument.
|
---|
| 601 | * @param a4 Driver-specific payload argument.
|
---|
| 602 | * @param a5 Driver-specific payload argument.
|
---|
| 603 | *
|
---|
[162f919] | 604 | */
|
---|
[96b02eb9] | 605 | void ipc_irq_send_msg(irq_t *irq, sysarg_t a1, sysarg_t a2, sysarg_t a3,
|
---|
| 606 | sysarg_t a4, sysarg_t a5)
|
---|
[162f919] | 607 | {
|
---|
[da1bafb] | 608 | irq_spinlock_lock(&irq->lock, true);
|
---|
| 609 |
|
---|
[cecb0789] | 610 | if (irq->notif_cfg.answerbox) {
|
---|
[da1bafb] | 611 | call_t *call = ipc_call_alloc(FRAME_ATOMIC);
|
---|
[cecb0789] | 612 | if (!call) {
|
---|
[da1bafb] | 613 | irq_spinlock_unlock(&irq->lock, true);
|
---|
[cecb0789] | 614 | return;
|
---|
[b14e35f2] | 615 | }
|
---|
[da1bafb] | 616 |
|
---|
[cecb0789] | 617 | call->flags |= IPC_CALL_NOTIF;
|
---|
| 618 | /* Put a counter to the message */
|
---|
| 619 | call->priv = ++irq->notif_cfg.counter;
|
---|
[da1bafb] | 620 |
|
---|
[228e490] | 621 | IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
|
---|
[cecb0789] | 622 | IPC_SET_ARG1(call->data, a1);
|
---|
| 623 | IPC_SET_ARG2(call->data, a2);
|
---|
| 624 | IPC_SET_ARG3(call->data, a3);
|
---|
| 625 | IPC_SET_ARG4(call->data, a4);
|
---|
| 626 | IPC_SET_ARG5(call->data, a5);
|
---|
| 627 |
|
---|
| 628 | send_call(irq, call);
|
---|
[b14e35f2] | 629 | }
|
---|
[da1bafb] | 630 |
|
---|
| 631 | irq_spinlock_unlock(&irq->lock, true);
|
---|
[162f919] | 632 | }
|
---|
[b45c443] | 633 |
|
---|
[cc73a8a1] | 634 | /** @}
|
---|
[b45c443] | 635 | */
|
---|