source: mainline/kernel/generic/src/ipc/irq.c@ 35b8bfe

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 35b8bfe was 8820544, checked in by Martin Decky <martin@…>, 11 years ago

support for kernel notification multiplexing in the async framework

  • rename SYS_EVENT_* and SYS_IRQ_* syscalls to unify the terminology
  • add SYS_IPC_EVENT_UNSUBSCRIBE
  • remove IRQ handler multiplexing from DDF, the generic mechanism replaces it (unfortunatelly the order of arguments used by interrupt_handler_t needs to be permutated to align with the async framework conventions)
  • Property mode set to 100644
File size: 17.9 KB
RevLine 
[162f919]1/*
[df4ed85]2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2006 Jakub Jermar
[162f919]4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
[cc73a8a1]30/** @addtogroup genericipc
[b45c443]31 * @{
32 */
[da1bafb]33
[cc73a8a1]34/**
35 * @file
36 * @brief IRQ notification framework.
[bdc5c516]37 *
[8820544]38 * This framework allows applications to subscribe to receive a notification
[bdc5c516]39 * when interrupt is detected. The application may provide a simple 'top-half'
40 * handler as part of its registration, which can perform simple operations
[56c167c]41 * (read/write port/memory, add information to notification IPC message).
[bdc5c516]42 *
43 * The structure of a notification message is as follows:
[8820544]44 * - IMETHOD: interface and method as set by the SYS_IPC_IRQ_SUBSCRIBE syscall
[56c167c]45 * - ARG1: payload modified by a 'top-half' handler (scratch[1])
46 * - ARG2: payload modified by a 'top-half' handler (scratch[2])
47 * - ARG3: payload modified by a 'top-half' handler (scratch[3])
48 * - ARG4: payload modified by a 'top-half' handler (scratch[4])
49 * - ARG5: payload modified by a 'top-half' handler (scratch[5])
[43752b6]50 * - in_phone_hash: interrupt counter (may be needed to assure correct order
[228e490]51 * in multithreaded drivers)
[cecb0789]52 *
[8820544]53 * Note on synchronization for ipc_irq_subscribe(), ipc_irq_unsubscribe(),
[cecb0789]54 * ipc_irq_cleanup() and IRQ handlers:
55 *
56 * By always taking all of the uspace IRQ hash table lock, IRQ structure lock
57 * and answerbox lock, we can rule out race conditions between the
58 * registration functions and also the cleanup function. Thus the observer can
59 * either see the IRQ structure present in both the hash table and the
60 * answerbox list or absent in both. Views in which the IRQ structure would be
61 * linked in the hash table but not in the answerbox list, or vice versa, are
62 * not possible.
63 *
64 * By always taking the hash table lock and the IRQ structure lock, we can
65 * rule out a scenario in which we would free up an IRQ structure, which is
66 * still referenced by, for example, an IRQ handler. The locking scheme forces
67 * us to lock the IRQ structure only after any progressing IRQs on that
68 * structure are finished. Because we hold the hash table lock, we prevent new
69 * IRQs from taking new references to the IRQ structure.
[da1bafb]70 *
[bdc5c516]71 */
72
[162f919]73#include <arch.h>
74#include <mm/slab.h>
[a996ae31]75#include <mm/page.h>
76#include <mm/km.h>
[162f919]77#include <errno.h>
[2b017ba]78#include <ddi/irq.h>
[162f919]79#include <ipc/ipc.h>
80#include <ipc/irq.h>
[e3c762cd]81#include <syscall/copy.h>
[d0c5901]82#include <console/console.h>
[253f35a1]83#include <print.h>
[a996ae31]84#include <macros.h>
85
86static void ranges_unmap(irq_pio_range_t *ranges, size_t rangecount)
87{
[56c167c]88 for (size_t i = 0; i < rangecount; i++) {
[472d813]89#ifdef IO_SPACE_BOUNDARY
[a996ae31]90 if ((void *) ranges[i].base >= IO_SPACE_BOUNDARY)
[472d813]91#endif
[a996ae31]92 km_unmap(ranges[i].base, ranges[i].size);
93 }
94}
95
96static int ranges_map_and_apply(irq_pio_range_t *ranges, size_t rangecount,
97 irq_cmd_t *cmds, size_t cmdcount)
98{
99 /* Copy the physical base addresses aside. */
[56c167c]100 uintptr_t *pbase = malloc(rangecount * sizeof(uintptr_t), 0);
101 for (size_t i = 0; i < rangecount; i++)
[a996ae31]102 pbase[i] = ranges[i].base;
[56c167c]103
[a996ae31]104 /* Map the PIO ranges into the kernel virtual address space. */
[56c167c]105 for (size_t i = 0; i < rangecount; i++) {
[472d813]106#ifdef IO_SPACE_BOUNDARY
[a996ae31]107 if ((void *) ranges[i].base < IO_SPACE_BOUNDARY)
108 continue;
[472d813]109#endif
[a996ae31]110 ranges[i].base = km_map(pbase[i], ranges[i].size,
111 PAGE_READ | PAGE_WRITE | PAGE_KERNEL | PAGE_NOT_CACHEABLE);
112 if (!ranges[i].base) {
113 ranges_unmap(ranges, i);
114 free(pbase);
115 return ENOMEM;
116 }
117 }
[56c167c]118
[a996ae31]119 /* Rewrite the pseudocode addresses from physical to kernel virtual. */
[56c167c]120 for (size_t i = 0; i < cmdcount; i++) {
[a996ae31]121 uintptr_t addr;
[f2bbe8c]122 size_t size;
[56c167c]123
[a996ae31]124 /* Process only commands that use an address. */
125 switch (cmds[i].cmd) {
126 case CMD_PIO_READ_8:
[56c167c]127 case CMD_PIO_WRITE_8:
128 case CMD_PIO_WRITE_A_8:
[f2bbe8c]129 size = 1;
130 break;
[56c167c]131 case CMD_PIO_READ_16:
132 case CMD_PIO_WRITE_16:
133 case CMD_PIO_WRITE_A_16:
[f2bbe8c]134 size = 2;
135 break;
[56c167c]136 case CMD_PIO_READ_32:
137 case CMD_PIO_WRITE_32:
138 case CMD_PIO_WRITE_A_32:
[f2bbe8c]139 size = 4;
[a996ae31]140 break;
141 default:
142 /* Move onto the next command. */
143 continue;
144 }
[56c167c]145
[a996ae31]146 addr = (uintptr_t) cmds[i].addr;
147
[56c167c]148 size_t j;
[a996ae31]149 for (j = 0; j < rangecount; j++) {
150 /* Find the matching range. */
[f2bbe8c]151 if (!iswithin(pbase[j], ranges[j].size, addr, size))
[a996ae31]152 continue;
[56c167c]153
[a996ae31]154 /* Switch the command to a kernel virtual address. */
155 addr -= pbase[j];
156 addr += ranges[j].base;
[56c167c]157
[a996ae31]158 cmds[i].addr = (void *) addr;
159 break;
[bd8c6537]160 }
[56c167c]161
[bd8c6537]162 if (j == rangecount) {
163 /*
164 * The address used in this command is outside of all
165 * defined ranges.
166 */
167 ranges_unmap(ranges, rangecount);
168 free(pbase);
169 return EINVAL;
170 }
[a996ae31]171 }
[56c167c]172
[a996ae31]173 free(pbase);
174 return EOK;
175}
[162f919]176
[8486c07]177/** Statically check the top-half pseudocode
178 *
179 * Check the top-half pseudocode for invalid or unsafe
180 * constructs.
181 *
182 */
183static int code_check(irq_cmd_t *cmds, size_t cmdcount)
184{
185 for (size_t i = 0; i < cmdcount; i++) {
186 /*
187 * Check for accepted ranges.
188 */
189 if (cmds[i].cmd >= CMD_LAST)
190 return EINVAL;
191
192 if (cmds[i].srcarg >= IPC_CALL_LEN)
193 return EINVAL;
194
195 if (cmds[i].dstarg >= IPC_CALL_LEN)
196 return EINVAL;
197
198 switch (cmds[i].cmd) {
199 case CMD_PREDICATE:
200 /*
201 * Check for control flow overflow.
202 * Note that jumping just beyond the last
203 * command is a correct behaviour.
204 */
205 if (i + cmds[i].value > cmdcount)
206 return EINVAL;
207
208 break;
209 default:
210 break;
211 }
212 }
213
214 return EOK;
215}
216
[cecb0789]217/** Free the top-half pseudocode.
[8b243f2]218 *
[da1bafb]219 * @param code Pointer to the top-half pseudocode.
220 *
[8b243f2]221 */
[162f919]222static void code_free(irq_code_t *code)
223{
224 if (code) {
[a996ae31]225 ranges_unmap(code->ranges, code->rangecount);
226 free(code->ranges);
[162f919]227 free(code->cmds);
228 free(code);
229 }
230}
231
[cecb0789]232/** Copy the top-half pseudocode from userspace into the kernel.
[8b243f2]233 *
[da1bafb]234 * @param ucode Userspace address of the top-half pseudocode.
235 *
236 * @return Kernel address of the copied pseudocode.
[8b243f2]237 *
238 */
239static irq_code_t *code_from_uspace(irq_code_t *ucode)
[162f919]240{
[a996ae31]241 irq_pio_range_t *ranges = NULL;
242 irq_cmd_t *cmds = NULL;
[56c167c]243
[da1bafb]244 irq_code_t *code = malloc(sizeof(*code), 0);
245 int rc = copy_from_uspace(code, ucode, sizeof(*code));
[a996ae31]246 if (rc != EOK)
247 goto error;
[162f919]248
[a996ae31]249 if ((code->rangecount > IRQ_MAX_RANGE_COUNT) ||
250 (code->cmdcount > IRQ_MAX_PROG_SIZE))
251 goto error;
[da1bafb]252
[a996ae31]253 ranges = malloc(sizeof(code->ranges[0]) * code->rangecount, 0);
254 rc = copy_from_uspace(ranges, code->ranges,
255 sizeof(code->ranges[0]) * code->rangecount);
256 if (rc != EOK)
257 goto error;
[56c167c]258
[a996ae31]259 cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0);
260 rc = copy_from_uspace(cmds, code->cmds,
[8b243f2]261 sizeof(code->cmds[0]) * code->cmdcount);
[a996ae31]262 if (rc != EOK)
263 goto error;
[8486c07]264
265 rc = code_check(cmds, code->cmdcount);
266 if (rc != EOK)
267 goto error;
268
[a996ae31]269 rc = ranges_map_and_apply(ranges, code->rangecount, cmds,
270 code->cmdcount);
271 if (rc != EOK)
272 goto error;
[56c167c]273
[a996ae31]274 code->ranges = ranges;
275 code->cmds = cmds;
[56c167c]276
[162f919]277 return code;
[56c167c]278
[a996ae31]279error:
280 if (cmds)
281 free(cmds);
[56c167c]282
[a996ae31]283 if (ranges)
284 free(ranges);
[56c167c]285
[a996ae31]286 free(code);
287 return NULL;
[162f919]288}
289
[8820544]290/** Subscribe an answerbox as a receiving end for IRQ notifications.
[2b017ba]291 *
[56c167c]292 * @param box Receiving answerbox.
293 * @param inr IRQ number.
294 * @param devno Device number.
295 * @param imethod Interface and method to be associated with the
296 * notification.
297 * @param ucode Uspace pointer to top-half pseudocode.
298 *
299 * @return EOK on success or a negative error code.
[2b017ba]300 *
301 */
[8820544]302int ipc_irq_subscribe(answerbox_t *box, inr_t inr, devno_t devno,
[228e490]303 sysarg_t imethod, irq_code_t *ucode)
[162f919]304{
[96b02eb9]305 sysarg_t key[] = {
306 (sysarg_t) inr,
307 (sysarg_t) devno
[cecb0789]308 };
[56c167c]309
[78ffb70]310 if ((inr < 0) || (inr > last_inr))
311 return ELIMIT;
[c822026]312
[da1bafb]313 irq_code_t *code;
[162f919]314 if (ucode) {
315 code = code_from_uspace(ucode);
316 if (!code)
317 return EBADMEM;
[da1bafb]318 } else
[162f919]319 code = NULL;
[c822026]320
[cecb0789]321 /*
322 * Allocate and populate the IRQ structure.
323 */
[da1bafb]324 irq_t *irq = malloc(sizeof(irq_t), 0);
325
[cecb0789]326 irq_initialize(irq);
327 irq->devno = devno;
328 irq->inr = inr;
329 irq->claim = ipc_irq_top_half_claim;
[691eb52]330 irq->handler = ipc_irq_top_half_handler;
[4874c2d]331 irq->notif_cfg.notify = true;
[2b017ba]332 irq->notif_cfg.answerbox = box;
[228e490]333 irq->notif_cfg.imethod = imethod;
[2b017ba]334 irq->notif_cfg.code = code;
335 irq->notif_cfg.counter = 0;
[c822026]336
[cecb0789]337 /*
338 * Enlist the IRQ structure in the uspace IRQ hash table and the
339 * answerbox's list.
340 */
[da1bafb]341 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
342
343 link_t *hlp = hash_table_find(&irq_uspace_hash_table, key);
[2845930]344 if (hlp) {
[da1bafb]345 irq_t *hirq = hash_table_get_instance(hlp, irq_t, link);
[c822026]346
[2845930]347 /* hirq is locked */
[da1bafb]348 irq_spinlock_unlock(&hirq->lock, false);
[cecb0789]349 code_free(code);
[da1bafb]350 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
351
[cecb0789]352 free(irq);
353 return EEXISTS;
354 }
[c822026]355
[da1bafb]356 /* Locking is not really necessary, but paranoid */
357 irq_spinlock_lock(&irq->lock, false);
358 irq_spinlock_lock(&box->irq_lock, false);
359
[cecb0789]360 hash_table_insert(&irq_uspace_hash_table, key, &irq->link);
[55b77d9]361 list_append(&irq->notif_cfg.link, &box->irq_list);
[c822026]362
[da1bafb]363 irq_spinlock_unlock(&box->irq_lock, false);
364 irq_spinlock_unlock(&irq->lock, false);
365 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
366
[cecb0789]367 return EOK;
368}
369
[8820544]370/** Unsubscribe task from IRQ notification.
[cecb0789]371 *
[56c167c]372 * @param box Answerbox associated with the notification.
373 * @param inr IRQ number.
374 * @param devno Device number.
375 *
376 * @return EOK on success or a negative error code.
377 *
[cecb0789]378 */
[8820544]379int ipc_irq_unsubscribe(answerbox_t *box, inr_t inr, devno_t devno)
[cecb0789]380{
[96b02eb9]381 sysarg_t key[] = {
382 (sysarg_t) inr,
383 (sysarg_t) devno
[cecb0789]384 };
[56c167c]385
[78ffb70]386 if ((inr < 0) || (inr > last_inr))
387 return ELIMIT;
[da1bafb]388
389 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
390 link_t *lnk = hash_table_find(&irq_uspace_hash_table, key);
[cecb0789]391 if (!lnk) {
[da1bafb]392 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
[cecb0789]393 return ENOENT;
394 }
[da1bafb]395
396 irq_t *irq = hash_table_get_instance(lnk, irq_t, link);
397
[2845930]398 /* irq is locked */
[da1bafb]399 irq_spinlock_lock(&box->irq_lock, false);
[cecb0789]400
401 ASSERT(irq->notif_cfg.answerbox == box);
402
[da1bafb]403 /* Remove the IRQ from the answerbox's list. */
[cecb0789]404 list_remove(&irq->notif_cfg.link);
[da1bafb]405
[2845930]406 /*
407 * We need to drop the IRQ lock now because hash_table_remove() will try
408 * to reacquire it. That basically violates the natural locking order,
409 * but a deadlock in hash_table_remove() is prevented by the fact that
410 * we already held the IRQ lock and didn't drop the hash table lock in
411 * the meantime.
412 */
[da1bafb]413 irq_spinlock_unlock(&irq->lock, false);
414
[cecb0789]415 /* Remove the IRQ from the uspace IRQ hash table. */
416 hash_table_remove(&irq_uspace_hash_table, key, 2);
417
[da1bafb]418 irq_spinlock_unlock(&box->irq_lock, false);
419 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
[cecb0789]420
[3cc070d]421 /* Free up the pseudo code and associated structures. */
422 code_free(irq->notif_cfg.code);
423
[cecb0789]424 /* Free up the IRQ structure. */
425 free(irq);
426
427 return EOK;
428}
429
430/** Disconnect all IRQ notifications from an answerbox.
431 *
432 * This function is effective because the answerbox contains
[8820544]433 * list of all irq_t structures that are subscribed to
[cecb0789]434 * send notifications to it.
435 *
[da1bafb]436 * @param box Answerbox for which we want to carry out the cleanup.
437 *
[cecb0789]438 */
439void ipc_irq_cleanup(answerbox_t *box)
440{
441loop:
[da1bafb]442 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
443 irq_spinlock_lock(&box->irq_lock, false);
[cecb0789]444
[55b77d9]445 while (!list_empty(&box->irq_list)) {
[cecb0789]446 DEADLOCK_PROBE_INIT(p_irqlock);
447
[55b77d9]448 irq_t *irq = list_get_instance(list_first(&box->irq_list), irq_t,
[da1bafb]449 notif_cfg.link);
450
451 if (!irq_spinlock_trylock(&irq->lock)) {
[cecb0789]452 /*
453 * Avoid deadlock by trying again.
454 */
[da1bafb]455 irq_spinlock_unlock(&box->irq_lock, false);
456 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
[cecb0789]457 DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD);
458 goto loop;
459 }
[da1bafb]460
[96b02eb9]461 sysarg_t key[2];
[cecb0789]462 key[0] = irq->inr;
463 key[1] = irq->devno;
464
465 ASSERT(irq->notif_cfg.answerbox == box);
466
467 /* Unlist from the answerbox. */
468 list_remove(&irq->notif_cfg.link);
469
[2845930]470 /*
471 * We need to drop the IRQ lock now because hash_table_remove()
472 * will try to reacquire it. That basically violates the natural
473 * locking order, but a deadlock in hash_table_remove() is
474 * prevented by the fact that we already held the IRQ lock and
475 * didn't drop the hash table lock in the meantime.
476 */
[da1bafb]477 irq_spinlock_unlock(&irq->lock, false);
[37be841]478
479 /* Remove from the hash table. */
480 hash_table_remove(&irq_uspace_hash_table, key, 2);
[56c167c]481
[3cc070d]482 /*
483 * Release both locks so that we can free the pseudo code.
484 */
485 irq_spinlock_unlock(&box->irq_lock, false);
486 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
[56c167c]487
[3cc070d]488 code_free(irq->notif_cfg.code);
[cecb0789]489 free(irq);
[3cc070d]490
491 /* Reacquire both locks before taking another round. */
492 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
493 irq_spinlock_lock(&box->irq_lock, false);
[cecb0789]494 }
495
[da1bafb]496 irq_spinlock_unlock(&box->irq_lock, false);
497 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
[162f919]498}
499
[8b243f2]500/** Add a call to the proper answerbox queue.
[2b017ba]501 *
[da1bafb]502 * Assume irq->lock is locked and interrupts disabled.
503 *
504 * @param irq IRQ structure referencing the target answerbox.
505 * @param call IRQ notification call.
[874621f]506 *
[2b017ba]507 */
508static void send_call(irq_t *irq, call_t *call)
[874621f]509{
[da1bafb]510 irq_spinlock_lock(&irq->notif_cfg.answerbox->irq_lock, false);
[cfaa35a]511 list_append(&call->ab_link, &irq->notif_cfg.answerbox->irq_notifs);
[da1bafb]512 irq_spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock, false);
513
[2b017ba]514 waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST);
[874621f]515}
516
[cecb0789]517/** Apply the top-half pseudo code to find out whether to accept the IRQ or not.
[874621f]518 *
[da1bafb]519 * @param irq IRQ structure.
520 *
521 * @return IRQ_ACCEPT if the interrupt is accepted by the
522 * pseudocode, IRQ_DECLINE otherwise.
[cecb0789]523 *
[874621f]524 */
[cecb0789]525irq_ownership_t ipc_irq_top_half_claim(irq_t *irq)
[874621f]526{
[cecb0789]527 irq_code_t *code = irq->notif_cfg.code;
[da1bafb]528 uint32_t *scratch = irq->notif_cfg.scratch;
[cecb0789]529
530 if (!irq->notif_cfg.notify)
531 return IRQ_DECLINE;
532
533 if (!code)
534 return IRQ_DECLINE;
535
[01e39cbe]536 for (size_t i = 0; i < code->cmdcount; i++) {
[da1bafb]537 uintptr_t srcarg = code->cmds[i].srcarg;
538 uintptr_t dstarg = code->cmds[i].dstarg;
[874621f]539
[cecb0789]540 switch (code->cmds[i].cmd) {
541 case CMD_PIO_READ_8:
[8486c07]542 scratch[dstarg] =
543 pio_read_8((ioport8_t *) code->cmds[i].addr);
[cecb0789]544 break;
545 case CMD_PIO_READ_16:
[8486c07]546 scratch[dstarg] =
547 pio_read_16((ioport16_t *) code->cmds[i].addr);
[cecb0789]548 break;
549 case CMD_PIO_READ_32:
[8486c07]550 scratch[dstarg] =
551 pio_read_32((ioport32_t *) code->cmds[i].addr);
[cecb0789]552 break;
553 case CMD_PIO_WRITE_8:
554 pio_write_8((ioport8_t *) code->cmds[i].addr,
555 (uint8_t) code->cmds[i].value);
556 break;
557 case CMD_PIO_WRITE_16:
558 pio_write_16((ioport16_t *) code->cmds[i].addr,
559 (uint16_t) code->cmds[i].value);
560 break;
561 case CMD_PIO_WRITE_32:
562 pio_write_32((ioport32_t *) code->cmds[i].addr,
563 (uint32_t) code->cmds[i].value);
564 break;
[9cdac5a]565 case CMD_PIO_WRITE_A_8:
[8486c07]566 pio_write_8((ioport8_t *) code->cmds[i].addr,
567 (uint8_t) scratch[srcarg]);
[9cdac5a]568 break;
569 case CMD_PIO_WRITE_A_16:
[8486c07]570 pio_write_16((ioport16_t *) code->cmds[i].addr,
571 (uint16_t) scratch[srcarg]);
[9cdac5a]572 break;
573 case CMD_PIO_WRITE_A_32:
[8486c07]574 pio_write_32((ioport32_t *) code->cmds[i].addr,
575 (uint32_t) scratch[srcarg]);
576 break;
577 case CMD_LOAD:
578 scratch[dstarg] = code->cmds[i].value;
[9cdac5a]579 break;
[8486c07]580 case CMD_AND:
581 scratch[dstarg] = scratch[srcarg] &
582 code->cmds[i].value;
[cecb0789]583 break;
584 case CMD_PREDICATE:
[8486c07]585 if (scratch[srcarg] == 0)
[cecb0789]586 i += code->cmds[i].value;
[8486c07]587
[cecb0789]588 break;
589 case CMD_ACCEPT:
590 return IRQ_ACCEPT;
591 case CMD_DECLINE:
592 default:
593 return IRQ_DECLINE;
594 }
[874621f]595 }
[01e39cbe]596
[cecb0789]597 return IRQ_DECLINE;
[874621f]598}
599
[cecb0789]600/* IRQ top-half handler.
[162f919]601 *
[2b017ba]602 * We expect interrupts to be disabled and the irq->lock already held.
[8b243f2]603 *
[da1bafb]604 * @param irq IRQ structure.
605 *
[162f919]606 */
[cecb0789]607void ipc_irq_top_half_handler(irq_t *irq)
[162f919]608{
[2b017ba]609 ASSERT(irq);
[56c167c]610
[1d432f9]611 ASSERT(interrupts_disabled());
612 ASSERT(irq_spinlock_locked(&irq->lock));
[da1bafb]613
[2b017ba]614 if (irq->notif_cfg.answerbox) {
[da1bafb]615 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
[cecb0789]616 if (!call)
[d8f7362]617 return;
[cecb0789]618
[162f919]619 call->flags |= IPC_CALL_NOTIF;
[43752b6]620 /* Put a counter to the message */
[0c1a5d8a]621 call->priv = ++irq->notif_cfg.counter;
[da1bafb]622
[43752b6]623 /* Set up args */
[228e490]624 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
[cecb0789]625 IPC_SET_ARG1(call->data, irq->notif_cfg.scratch[1]);
626 IPC_SET_ARG2(call->data, irq->notif_cfg.scratch[2]);
627 IPC_SET_ARG3(call->data, irq->notif_cfg.scratch[3]);
628 IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]);
629 IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]);
[da1bafb]630
[2b017ba]631 send_call(irq, call);
[162f919]632 }
633}
634
[cecb0789]635/** Send notification message.
[874621f]636 *
[da1bafb]637 * @param irq IRQ structure.
638 * @param a1 Driver-specific payload argument.
639 * @param a2 Driver-specific payload argument.
640 * @param a3 Driver-specific payload argument.
641 * @param a4 Driver-specific payload argument.
642 * @param a5 Driver-specific payload argument.
643 *
[162f919]644 */
[96b02eb9]645void ipc_irq_send_msg(irq_t *irq, sysarg_t a1, sysarg_t a2, sysarg_t a3,
646 sysarg_t a4, sysarg_t a5)
[162f919]647{
[da1bafb]648 irq_spinlock_lock(&irq->lock, true);
649
[cecb0789]650 if (irq->notif_cfg.answerbox) {
[da1bafb]651 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
[cecb0789]652 if (!call) {
[da1bafb]653 irq_spinlock_unlock(&irq->lock, true);
[cecb0789]654 return;
[b14e35f2]655 }
[da1bafb]656
[cecb0789]657 call->flags |= IPC_CALL_NOTIF;
658 /* Put a counter to the message */
659 call->priv = ++irq->notif_cfg.counter;
[da1bafb]660
[228e490]661 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
[cecb0789]662 IPC_SET_ARG1(call->data, a1);
663 IPC_SET_ARG2(call->data, a2);
664 IPC_SET_ARG3(call->data, a3);
665 IPC_SET_ARG4(call->data, a4);
666 IPC_SET_ARG5(call->data, a5);
667
668 send_call(irq, call);
[b14e35f2]669 }
[da1bafb]670
671 irq_spinlock_unlock(&irq->lock, true);
[162f919]672}
[b45c443]673
[cc73a8a1]674/** @}
[b45c443]675 */
Note: See TracBrowser for help on using the repository browser.