source: mainline/kernel/generic/src/ipc/irq.c@ e9d15d9

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since e9d15d9 was e9d15d9, checked in by Jakub Jermar <jakub@…>, 8 years ago

Turn IRQ structures into kernel objects

ipc_irq_subscribe() now returns a capability for the underlying IRQ kernel
object. ipc_irq_unsubscribe() can now be done only with a valid IRQ capability.

  • Property mode set to 100644
File size: 17.3 KB
RevLine 
[162f919]1/*
[df4ed85]2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2006 Jakub Jermar
[162f919]4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
[cc73a8a1]30/** @addtogroup genericipc
[b45c443]31 * @{
32 */
[da1bafb]33
[cc73a8a1]34/**
35 * @file
36 * @brief IRQ notification framework.
[bdc5c516]37 *
[8820544]38 * This framework allows applications to subscribe to receive a notification
[bdc5c516]39 * when interrupt is detected. The application may provide a simple 'top-half'
40 * handler as part of its registration, which can perform simple operations
[56c167c]41 * (read/write port/memory, add information to notification IPC message).
[bdc5c516]42 *
43 * The structure of a notification message is as follows:
[8820544]44 * - IMETHOD: interface and method as set by the SYS_IPC_IRQ_SUBSCRIBE syscall
[56c167c]45 * - ARG1: payload modified by a 'top-half' handler (scratch[1])
46 * - ARG2: payload modified by a 'top-half' handler (scratch[2])
47 * - ARG3: payload modified by a 'top-half' handler (scratch[3])
48 * - ARG4: payload modified by a 'top-half' handler (scratch[4])
49 * - ARG5: payload modified by a 'top-half' handler (scratch[5])
[43752b6]50 * - in_phone_hash: interrupt counter (may be needed to assure correct order
[228e490]51 * in multithreaded drivers)
[cecb0789]52 *
[8820544]53 * Note on synchronization for ipc_irq_subscribe(), ipc_irq_unsubscribe(),
[cecb0789]54 * ipc_irq_cleanup() and IRQ handlers:
55 *
56 * By always taking all of the uspace IRQ hash table lock, IRQ structure lock
57 * and answerbox lock, we can rule out race conditions between the
58 * registration functions and also the cleanup function. Thus the observer can
59 * either see the IRQ structure present in both the hash table and the
60 * answerbox list or absent in both. Views in which the IRQ structure would be
61 * linked in the hash table but not in the answerbox list, or vice versa, are
62 * not possible.
63 *
64 * By always taking the hash table lock and the IRQ structure lock, we can
65 * rule out a scenario in which we would free up an IRQ structure, which is
66 * still referenced by, for example, an IRQ handler. The locking scheme forces
67 * us to lock the IRQ structure only after any progressing IRQs on that
68 * structure are finished. Because we hold the hash table lock, we prevent new
69 * IRQs from taking new references to the IRQ structure.
[da1bafb]70 *
[bdc5c516]71 */
72
[162f919]73#include <arch.h>
[63e27ef]74#include <assert.h>
[162f919]75#include <mm/slab.h>
[a996ae31]76#include <mm/page.h>
77#include <mm/km.h>
[162f919]78#include <errno.h>
[2b017ba]79#include <ddi/irq.h>
[162f919]80#include <ipc/ipc.h>
81#include <ipc/irq.h>
[e3c762cd]82#include <syscall/copy.h>
[d0c5901]83#include <console/console.h>
[253f35a1]84#include <print.h>
[a996ae31]85#include <macros.h>
[e9d15d9]86#include <kobject/kobject.h>
[a996ae31]87
88static void ranges_unmap(irq_pio_range_t *ranges, size_t rangecount)
89{
[56c167c]90 for (size_t i = 0; i < rangecount; i++) {
[472d813]91#ifdef IO_SPACE_BOUNDARY
[a996ae31]92 if ((void *) ranges[i].base >= IO_SPACE_BOUNDARY)
[472d813]93#endif
[a996ae31]94 km_unmap(ranges[i].base, ranges[i].size);
95 }
96}
97
98static int ranges_map_and_apply(irq_pio_range_t *ranges, size_t rangecount,
99 irq_cmd_t *cmds, size_t cmdcount)
100{
101 /* Copy the physical base addresses aside. */
[56c167c]102 uintptr_t *pbase = malloc(rangecount * sizeof(uintptr_t), 0);
103 for (size_t i = 0; i < rangecount; i++)
[a996ae31]104 pbase[i] = ranges[i].base;
[56c167c]105
[a996ae31]106 /* Map the PIO ranges into the kernel virtual address space. */
[56c167c]107 for (size_t i = 0; i < rangecount; i++) {
[472d813]108#ifdef IO_SPACE_BOUNDARY
[a996ae31]109 if ((void *) ranges[i].base < IO_SPACE_BOUNDARY)
110 continue;
[472d813]111#endif
[a996ae31]112 ranges[i].base = km_map(pbase[i], ranges[i].size,
113 PAGE_READ | PAGE_WRITE | PAGE_KERNEL | PAGE_NOT_CACHEABLE);
114 if (!ranges[i].base) {
115 ranges_unmap(ranges, i);
116 free(pbase);
117 return ENOMEM;
118 }
119 }
[56c167c]120
[a996ae31]121 /* Rewrite the pseudocode addresses from physical to kernel virtual. */
[56c167c]122 for (size_t i = 0; i < cmdcount; i++) {
[a996ae31]123 uintptr_t addr;
[f2bbe8c]124 size_t size;
[56c167c]125
[a996ae31]126 /* Process only commands that use an address. */
127 switch (cmds[i].cmd) {
128 case CMD_PIO_READ_8:
[56c167c]129 case CMD_PIO_WRITE_8:
130 case CMD_PIO_WRITE_A_8:
[f2bbe8c]131 size = 1;
132 break;
[56c167c]133 case CMD_PIO_READ_16:
134 case CMD_PIO_WRITE_16:
135 case CMD_PIO_WRITE_A_16:
[f2bbe8c]136 size = 2;
137 break;
[56c167c]138 case CMD_PIO_READ_32:
139 case CMD_PIO_WRITE_32:
140 case CMD_PIO_WRITE_A_32:
[f2bbe8c]141 size = 4;
[a996ae31]142 break;
143 default:
144 /* Move onto the next command. */
145 continue;
146 }
[56c167c]147
[a996ae31]148 addr = (uintptr_t) cmds[i].addr;
149
[56c167c]150 size_t j;
[a996ae31]151 for (j = 0; j < rangecount; j++) {
152 /* Find the matching range. */
[f2bbe8c]153 if (!iswithin(pbase[j], ranges[j].size, addr, size))
[a996ae31]154 continue;
[56c167c]155
[a996ae31]156 /* Switch the command to a kernel virtual address. */
157 addr -= pbase[j];
158 addr += ranges[j].base;
[56c167c]159
[a996ae31]160 cmds[i].addr = (void *) addr;
161 break;
[bd8c6537]162 }
[56c167c]163
[bd8c6537]164 if (j == rangecount) {
165 /*
166 * The address used in this command is outside of all
167 * defined ranges.
168 */
169 ranges_unmap(ranges, rangecount);
170 free(pbase);
171 return EINVAL;
172 }
[a996ae31]173 }
[56c167c]174
[a996ae31]175 free(pbase);
176 return EOK;
177}
[162f919]178
[8486c07]179/** Statically check the top-half pseudocode
180 *
181 * Check the top-half pseudocode for invalid or unsafe
182 * constructs.
183 *
184 */
185static int code_check(irq_cmd_t *cmds, size_t cmdcount)
186{
187 for (size_t i = 0; i < cmdcount; i++) {
188 /*
189 * Check for accepted ranges.
190 */
191 if (cmds[i].cmd >= CMD_LAST)
192 return EINVAL;
193
194 if (cmds[i].srcarg >= IPC_CALL_LEN)
195 return EINVAL;
196
197 if (cmds[i].dstarg >= IPC_CALL_LEN)
198 return EINVAL;
199
200 switch (cmds[i].cmd) {
201 case CMD_PREDICATE:
202 /*
203 * Check for control flow overflow.
204 * Note that jumping just beyond the last
205 * command is a correct behaviour.
206 */
207 if (i + cmds[i].value > cmdcount)
208 return EINVAL;
209
210 break;
211 default:
212 break;
213 }
214 }
215
216 return EOK;
217}
218
[cecb0789]219/** Free the top-half pseudocode.
[8b243f2]220 *
[da1bafb]221 * @param code Pointer to the top-half pseudocode.
222 *
[8b243f2]223 */
[162f919]224static void code_free(irq_code_t *code)
225{
226 if (code) {
[a996ae31]227 ranges_unmap(code->ranges, code->rangecount);
228 free(code->ranges);
[162f919]229 free(code->cmds);
230 free(code);
231 }
232}
233
[cecb0789]234/** Copy the top-half pseudocode from userspace into the kernel.
[8b243f2]235 *
[da1bafb]236 * @param ucode Userspace address of the top-half pseudocode.
237 *
238 * @return Kernel address of the copied pseudocode.
[8b243f2]239 *
240 */
241static irq_code_t *code_from_uspace(irq_code_t *ucode)
[162f919]242{
[a996ae31]243 irq_pio_range_t *ranges = NULL;
244 irq_cmd_t *cmds = NULL;
[56c167c]245
[da1bafb]246 irq_code_t *code = malloc(sizeof(*code), 0);
247 int rc = copy_from_uspace(code, ucode, sizeof(*code));
[a996ae31]248 if (rc != EOK)
249 goto error;
[162f919]250
[a996ae31]251 if ((code->rangecount > IRQ_MAX_RANGE_COUNT) ||
252 (code->cmdcount > IRQ_MAX_PROG_SIZE))
253 goto error;
[da1bafb]254
[a996ae31]255 ranges = malloc(sizeof(code->ranges[0]) * code->rangecount, 0);
256 rc = copy_from_uspace(ranges, code->ranges,
257 sizeof(code->ranges[0]) * code->rangecount);
258 if (rc != EOK)
259 goto error;
[56c167c]260
[a996ae31]261 cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0);
262 rc = copy_from_uspace(cmds, code->cmds,
[8b243f2]263 sizeof(code->cmds[0]) * code->cmdcount);
[a996ae31]264 if (rc != EOK)
265 goto error;
[8486c07]266
267 rc = code_check(cmds, code->cmdcount);
268 if (rc != EOK)
269 goto error;
270
[a996ae31]271 rc = ranges_map_and_apply(ranges, code->rangecount, cmds,
272 code->cmdcount);
273 if (rc != EOK)
274 goto error;
[56c167c]275
[a996ae31]276 code->ranges = ranges;
277 code->cmds = cmds;
[56c167c]278
[162f919]279 return code;
[56c167c]280
[a996ae31]281error:
282 if (cmds)
283 free(cmds);
[56c167c]284
[a996ae31]285 if (ranges)
286 free(ranges);
[56c167c]287
[a996ae31]288 free(code);
289 return NULL;
[162f919]290}
291
[8820544]292/** Subscribe an answerbox as a receiving end for IRQ notifications.
[2b017ba]293 *
[56c167c]294 * @param box Receiving answerbox.
295 * @param inr IRQ number.
296 * @param devno Device number.
297 * @param imethod Interface and method to be associated with the
298 * notification.
299 * @param ucode Uspace pointer to top-half pseudocode.
300 *
[e9d15d9]301 * @return IRQ capability.
302 * @return Negative error code.
[2b017ba]303 *
304 */
[8820544]305int ipc_irq_subscribe(answerbox_t *box, inr_t inr, devno_t devno,
[228e490]306 sysarg_t imethod, irq_code_t *ucode)
[162f919]307{
[96b02eb9]308 sysarg_t key[] = {
309 (sysarg_t) inr,
310 (sysarg_t) devno
[cecb0789]311 };
[56c167c]312
[78ffb70]313 if ((inr < 0) || (inr > last_inr))
314 return ELIMIT;
[c822026]315
[da1bafb]316 irq_code_t *code;
[162f919]317 if (ucode) {
318 code = code_from_uspace(ucode);
319 if (!code)
320 return EBADMEM;
[da1bafb]321 } else
[162f919]322 code = NULL;
[c822026]323
[cecb0789]324 /*
[e9d15d9]325 * Allocate and populate the IRQ kernel object.
[cecb0789]326 */
[e9d15d9]327 int cap = kobject_alloc(TASK);
328 if (cap < 0)
329 return cap;
330 kobject_t *kobj = kobject_get_current(cap, KOBJECT_TYPE_ALLOCATED);
331 assert(kobj);
332 kobj->type = KOBJECT_TYPE_IRQ;
333
334 irq_t *irq = &kobj->irq;
[cecb0789]335 irq_initialize(irq);
336 irq->devno = devno;
337 irq->inr = inr;
338 irq->claim = ipc_irq_top_half_claim;
[691eb52]339 irq->handler = ipc_irq_top_half_handler;
[4874c2d]340 irq->notif_cfg.notify = true;
[2b017ba]341 irq->notif_cfg.answerbox = box;
[228e490]342 irq->notif_cfg.imethod = imethod;
[2b017ba]343 irq->notif_cfg.code = code;
344 irq->notif_cfg.counter = 0;
[c822026]345
[cecb0789]346 /*
347 * Enlist the IRQ structure in the uspace IRQ hash table and the
348 * answerbox's list.
349 */
[da1bafb]350 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
351
352 link_t *hlp = hash_table_find(&irq_uspace_hash_table, key);
[2845930]353 if (hlp) {
[da1bafb]354 irq_t *hirq = hash_table_get_instance(hlp, irq_t, link);
[c822026]355
[2845930]356 /* hirq is locked */
[da1bafb]357 irq_spinlock_unlock(&hirq->lock, false);
[cecb0789]358 code_free(code);
[da1bafb]359 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
360
[e9d15d9]361 kobject_free(TASK, cap);
[8a637a4]362 return EEXIST;
[cecb0789]363 }
[c822026]364
[da1bafb]365 /* Locking is not really necessary, but paranoid */
366 irq_spinlock_lock(&irq->lock, false);
367 irq_spinlock_lock(&box->irq_lock, false);
368
[cecb0789]369 hash_table_insert(&irq_uspace_hash_table, key, &irq->link);
[55b77d9]370 list_append(&irq->notif_cfg.link, &box->irq_list);
[c822026]371
[da1bafb]372 irq_spinlock_unlock(&box->irq_lock, false);
373 irq_spinlock_unlock(&irq->lock, false);
374 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
375
[e9d15d9]376 return cap;
[cecb0789]377}
378
[8820544]379/** Unsubscribe task from IRQ notification.
[cecb0789]380 *
[e9d15d9]381 * @param box Answerbox associated with the notification.
382 * @param irq_cap IRQ capability.
[56c167c]383 *
384 * @return EOK on success or a negative error code.
385 *
[cecb0789]386 */
[e9d15d9]387int ipc_irq_unsubscribe(answerbox_t *box, int irq_cap)
[cecb0789]388{
[e9d15d9]389 kobject_t *kobj = kobject_get_current(irq_cap, KOBJECT_TYPE_IRQ);
390 if (!kobj)
[cecb0789]391 return ENOENT;
[e9d15d9]392 irq_t *irq = &kobj->irq;
393
394 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
395 irq_spinlock_lock(&irq->lock, false);
[da1bafb]396 irq_spinlock_lock(&box->irq_lock, false);
[cecb0789]397
[63e27ef]398 assert(irq->notif_cfg.answerbox == box);
[cecb0789]399
[da1bafb]400 /* Remove the IRQ from the answerbox's list. */
[cecb0789]401 list_remove(&irq->notif_cfg.link);
[da1bafb]402
[cecb0789]403 /* Remove the IRQ from the uspace IRQ hash table. */
[e9d15d9]404 hash_table_remove_item(&irq_uspace_hash_table, &irq->link);
[cecb0789]405
[da1bafb]406 irq_spinlock_unlock(&box->irq_lock, false);
[e9d15d9]407 /* irq->lock unlocked by the hash table remove_callback */
[da1bafb]408 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
[cecb0789]409
[3cc070d]410 /* Free up the pseudo code and associated structures. */
411 code_free(irq->notif_cfg.code);
412
[e9d15d9]413 /* Free up the IRQ kernel object. */
414 kobject_free(TASK, irq_cap);
[cecb0789]415
416 return EOK;
417}
418
419/** Disconnect all IRQ notifications from an answerbox.
420 *
[e9d15d9]421 * This function is effective because the answerbox contains list of all irq_t
422 * structures that are subscribed to send notifications to it.
[cecb0789]423 *
[da1bafb]424 * @param box Answerbox for which we want to carry out the cleanup.
425 *
[cecb0789]426 */
427void ipc_irq_cleanup(answerbox_t *box)
428{
429loop:
[da1bafb]430 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
431 irq_spinlock_lock(&box->irq_lock, false);
[cecb0789]432
[55b77d9]433 while (!list_empty(&box->irq_list)) {
[cecb0789]434 DEADLOCK_PROBE_INIT(p_irqlock);
435
[55b77d9]436 irq_t *irq = list_get_instance(list_first(&box->irq_list), irq_t,
[da1bafb]437 notif_cfg.link);
438
439 if (!irq_spinlock_trylock(&irq->lock)) {
[cecb0789]440 /*
441 * Avoid deadlock by trying again.
442 */
[da1bafb]443 irq_spinlock_unlock(&box->irq_lock, false);
444 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
[cecb0789]445 DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD);
446 goto loop;
447 }
[da1bafb]448
[63e27ef]449 assert(irq->notif_cfg.answerbox == box);
[cecb0789]450
451 /* Unlist from the answerbox. */
452 list_remove(&irq->notif_cfg.link);
453
[37be841]454 /* Remove from the hash table. */
[e9d15d9]455 hash_table_remove_item(&irq_uspace_hash_table, &irq->link);
[56c167c]456
[3cc070d]457 /*
458 * Release both locks so that we can free the pseudo code.
459 */
460 irq_spinlock_unlock(&box->irq_lock, false);
461 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
[56c167c]462
[3cc070d]463 code_free(irq->notif_cfg.code);
[e9d15d9]464
465 // XXX: what to do about the irq capability? The task is in
466 // clean-up anyway.
[3cc070d]467
468 /* Reacquire both locks before taking another round. */
469 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
470 irq_spinlock_lock(&box->irq_lock, false);
[cecb0789]471 }
472
[da1bafb]473 irq_spinlock_unlock(&box->irq_lock, false);
474 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
[162f919]475}
476
[8b243f2]477/** Add a call to the proper answerbox queue.
[2b017ba]478 *
[da1bafb]479 * Assume irq->lock is locked and interrupts disabled.
480 *
481 * @param irq IRQ structure referencing the target answerbox.
482 * @param call IRQ notification call.
[874621f]483 *
[2b017ba]484 */
485static void send_call(irq_t *irq, call_t *call)
[874621f]486{
[da1bafb]487 irq_spinlock_lock(&irq->notif_cfg.answerbox->irq_lock, false);
[cfaa35a]488 list_append(&call->ab_link, &irq->notif_cfg.answerbox->irq_notifs);
[da1bafb]489 irq_spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock, false);
490
[2b017ba]491 waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST);
[874621f]492}
493
[cecb0789]494/** Apply the top-half pseudo code to find out whether to accept the IRQ or not.
[874621f]495 *
[da1bafb]496 * @param irq IRQ structure.
497 *
498 * @return IRQ_ACCEPT if the interrupt is accepted by the
499 * pseudocode, IRQ_DECLINE otherwise.
[cecb0789]500 *
[874621f]501 */
[cecb0789]502irq_ownership_t ipc_irq_top_half_claim(irq_t *irq)
[874621f]503{
[cecb0789]504 irq_code_t *code = irq->notif_cfg.code;
[da1bafb]505 uint32_t *scratch = irq->notif_cfg.scratch;
[cecb0789]506
507 if (!irq->notif_cfg.notify)
508 return IRQ_DECLINE;
509
510 if (!code)
511 return IRQ_DECLINE;
512
[01e39cbe]513 for (size_t i = 0; i < code->cmdcount; i++) {
[da1bafb]514 uintptr_t srcarg = code->cmds[i].srcarg;
515 uintptr_t dstarg = code->cmds[i].dstarg;
[874621f]516
[cecb0789]517 switch (code->cmds[i].cmd) {
518 case CMD_PIO_READ_8:
[8486c07]519 scratch[dstarg] =
520 pio_read_8((ioport8_t *) code->cmds[i].addr);
[cecb0789]521 break;
522 case CMD_PIO_READ_16:
[8486c07]523 scratch[dstarg] =
524 pio_read_16((ioport16_t *) code->cmds[i].addr);
[cecb0789]525 break;
526 case CMD_PIO_READ_32:
[8486c07]527 scratch[dstarg] =
528 pio_read_32((ioport32_t *) code->cmds[i].addr);
[cecb0789]529 break;
530 case CMD_PIO_WRITE_8:
531 pio_write_8((ioport8_t *) code->cmds[i].addr,
532 (uint8_t) code->cmds[i].value);
533 break;
534 case CMD_PIO_WRITE_16:
535 pio_write_16((ioport16_t *) code->cmds[i].addr,
536 (uint16_t) code->cmds[i].value);
537 break;
538 case CMD_PIO_WRITE_32:
539 pio_write_32((ioport32_t *) code->cmds[i].addr,
540 (uint32_t) code->cmds[i].value);
541 break;
[9cdac5a]542 case CMD_PIO_WRITE_A_8:
[8486c07]543 pio_write_8((ioport8_t *) code->cmds[i].addr,
544 (uint8_t) scratch[srcarg]);
[9cdac5a]545 break;
546 case CMD_PIO_WRITE_A_16:
[8486c07]547 pio_write_16((ioport16_t *) code->cmds[i].addr,
548 (uint16_t) scratch[srcarg]);
[9cdac5a]549 break;
550 case CMD_PIO_WRITE_A_32:
[8486c07]551 pio_write_32((ioport32_t *) code->cmds[i].addr,
552 (uint32_t) scratch[srcarg]);
553 break;
554 case CMD_LOAD:
555 scratch[dstarg] = code->cmds[i].value;
[9cdac5a]556 break;
[8486c07]557 case CMD_AND:
558 scratch[dstarg] = scratch[srcarg] &
559 code->cmds[i].value;
[cecb0789]560 break;
561 case CMD_PREDICATE:
[8486c07]562 if (scratch[srcarg] == 0)
[cecb0789]563 i += code->cmds[i].value;
[8486c07]564
[cecb0789]565 break;
566 case CMD_ACCEPT:
567 return IRQ_ACCEPT;
568 case CMD_DECLINE:
569 default:
570 return IRQ_DECLINE;
571 }
[874621f]572 }
[01e39cbe]573
[cecb0789]574 return IRQ_DECLINE;
[874621f]575}
576
[cecb0789]577/* IRQ top-half handler.
[162f919]578 *
[2b017ba]579 * We expect interrupts to be disabled and the irq->lock already held.
[8b243f2]580 *
[da1bafb]581 * @param irq IRQ structure.
582 *
[162f919]583 */
[cecb0789]584void ipc_irq_top_half_handler(irq_t *irq)
[162f919]585{
[63e27ef]586 assert(irq);
[56c167c]587
[63e27ef]588 assert(interrupts_disabled());
589 assert(irq_spinlock_locked(&irq->lock));
[da1bafb]590
[2b017ba]591 if (irq->notif_cfg.answerbox) {
[da1bafb]592 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
[cecb0789]593 if (!call)
[d8f7362]594 return;
[cecb0789]595
[162f919]596 call->flags |= IPC_CALL_NOTIF;
[43752b6]597 /* Put a counter to the message */
[0c1a5d8a]598 call->priv = ++irq->notif_cfg.counter;
[da1bafb]599
[43752b6]600 /* Set up args */
[228e490]601 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
[cecb0789]602 IPC_SET_ARG1(call->data, irq->notif_cfg.scratch[1]);
603 IPC_SET_ARG2(call->data, irq->notif_cfg.scratch[2]);
604 IPC_SET_ARG3(call->data, irq->notif_cfg.scratch[3]);
605 IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]);
606 IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]);
[da1bafb]607
[2b017ba]608 send_call(irq, call);
[162f919]609 }
610}
611
[cecb0789]612/** Send notification message.
[874621f]613 *
[da1bafb]614 * @param irq IRQ structure.
615 * @param a1 Driver-specific payload argument.
616 * @param a2 Driver-specific payload argument.
617 * @param a3 Driver-specific payload argument.
618 * @param a4 Driver-specific payload argument.
619 * @param a5 Driver-specific payload argument.
620 *
[162f919]621 */
[96b02eb9]622void ipc_irq_send_msg(irq_t *irq, sysarg_t a1, sysarg_t a2, sysarg_t a3,
623 sysarg_t a4, sysarg_t a5)
[162f919]624{
[da1bafb]625 irq_spinlock_lock(&irq->lock, true);
626
[cecb0789]627 if (irq->notif_cfg.answerbox) {
[da1bafb]628 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
[cecb0789]629 if (!call) {
[da1bafb]630 irq_spinlock_unlock(&irq->lock, true);
[cecb0789]631 return;
[b14e35f2]632 }
[da1bafb]633
[cecb0789]634 call->flags |= IPC_CALL_NOTIF;
635 /* Put a counter to the message */
636 call->priv = ++irq->notif_cfg.counter;
[da1bafb]637
[228e490]638 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
[cecb0789]639 IPC_SET_ARG1(call->data, a1);
640 IPC_SET_ARG2(call->data, a2);
641 IPC_SET_ARG3(call->data, a3);
642 IPC_SET_ARG4(call->data, a4);
643 IPC_SET_ARG5(call->data, a5);
644
645 send_call(irq, call);
[b14e35f2]646 }
[da1bafb]647
648 irq_spinlock_unlock(&irq->lock, true);
[162f919]649}
[b45c443]650
[cc73a8a1]651/** @}
[b45c443]652 */
Note: See TracBrowser for help on using the repository browser.