source: mainline/kernel/generic/src/ipc/irq.c@ 4b1c7c6f

Last change on this file since 4b1c7c6f was 4b1c7c6f, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

Clean up PAGE_* flags.

Remove "nop flags", they are confusing for readers and any benefit they would
gain in self-documentation they lose in being used inconsistently.

Remove "PAGE_READ", the only place it's used meaningfully is the incomplete
RISC-V implementation, and most call sites assume read is implied.

  • Property mode set to 100644
File size: 14.5 KB
RevLine 
[162f919]1/*
[df4ed85]2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2006 Jakub Jermar
[162f919]4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
[cc73a8a1]30/** @addtogroup genericipc
[b45c443]31 * @{
32 */
[da1bafb]33
[cc73a8a1]34/**
35 * @file
36 * @brief IRQ notification framework.
[bdc5c516]37 *
[8820544]38 * This framework allows applications to subscribe to receive a notification
[a5d0143]39 * when an interrupt is detected. The application may provide a simple
40 * 'top-half' handler as part of its registration, which can perform simple
41 * operations (read/write port/memory, add information to notification IPC
42 * message).
[bdc5c516]43 *
44 * The structure of a notification message is as follows:
[8820544]45 * - IMETHOD: interface and method as set by the SYS_IPC_IRQ_SUBSCRIBE syscall
[56c167c]46 * - ARG1: payload modified by a 'top-half' handler (scratch[1])
47 * - ARG2: payload modified by a 'top-half' handler (scratch[2])
48 * - ARG3: payload modified by a 'top-half' handler (scratch[3])
49 * - ARG4: payload modified by a 'top-half' handler (scratch[4])
50 * - ARG5: payload modified by a 'top-half' handler (scratch[5])
[43752b6]51 * - in_phone_hash: interrupt counter (may be needed to assure correct order
[228e490]52 * in multithreaded drivers)
[bdc5c516]53 */
54
[162f919]55#include <arch.h>
[63e27ef]56#include <assert.h>
[162f919]57#include <mm/slab.h>
[a996ae31]58#include <mm/page.h>
59#include <mm/km.h>
[162f919]60#include <errno.h>
[2b017ba]61#include <ddi/irq.h>
[162f919]62#include <ipc/ipc.h>
63#include <ipc/irq.h>
[e3c762cd]64#include <syscall/copy.h>
[d0c5901]65#include <console/console.h>
[253f35a1]66#include <print.h>
[a996ae31]67#include <macros.h>
[3f74275]68#include <cap/cap.h>
[a996ae31]69
70static void ranges_unmap(irq_pio_range_t *ranges, size_t rangecount)
71{
[56c167c]72 for (size_t i = 0; i < rangecount; i++) {
[472d813]73#ifdef IO_SPACE_BOUNDARY
[a996ae31]74 if ((void *) ranges[i].base >= IO_SPACE_BOUNDARY)
[472d813]75#endif
[a996ae31]76 km_unmap(ranges[i].base, ranges[i].size);
77 }
78}
79
[b7fd2a0]80static errno_t ranges_map_and_apply(irq_pio_range_t *ranges, size_t rangecount,
[a996ae31]81 irq_cmd_t *cmds, size_t cmdcount)
82{
83 /* Copy the physical base addresses aside. */
[56c167c]84 uintptr_t *pbase = malloc(rangecount * sizeof(uintptr_t), 0);
85 for (size_t i = 0; i < rangecount; i++)
[a996ae31]86 pbase[i] = ranges[i].base;
[a35b458]87
[a996ae31]88 /* Map the PIO ranges into the kernel virtual address space. */
[56c167c]89 for (size_t i = 0; i < rangecount; i++) {
[472d813]90#ifdef IO_SPACE_BOUNDARY
[a996ae31]91 if ((void *) ranges[i].base < IO_SPACE_BOUNDARY)
92 continue;
[472d813]93#endif
[a996ae31]94 ranges[i].base = km_map(pbase[i], ranges[i].size,
[4b1c7c6f]95 PAGE_WRITE);
[a996ae31]96 if (!ranges[i].base) {
97 ranges_unmap(ranges, i);
98 free(pbase);
99 return ENOMEM;
100 }
101 }
[a35b458]102
[a5d0143]103 /* Rewrite the IRQ code addresses from physical to kernel virtual. */
[56c167c]104 for (size_t i = 0; i < cmdcount; i++) {
[a996ae31]105 uintptr_t addr;
[f2bbe8c]106 size_t size;
[a35b458]107
[a996ae31]108 /* Process only commands that use an address. */
109 switch (cmds[i].cmd) {
110 case CMD_PIO_READ_8:
[56c167c]111 case CMD_PIO_WRITE_8:
112 case CMD_PIO_WRITE_A_8:
[f2bbe8c]113 size = 1;
114 break;
[56c167c]115 case CMD_PIO_READ_16:
116 case CMD_PIO_WRITE_16:
117 case CMD_PIO_WRITE_A_16:
[f2bbe8c]118 size = 2;
119 break;
[56c167c]120 case CMD_PIO_READ_32:
121 case CMD_PIO_WRITE_32:
122 case CMD_PIO_WRITE_A_32:
[f2bbe8c]123 size = 4;
[a996ae31]124 break;
125 default:
126 /* Move onto the next command. */
127 continue;
128 }
[a35b458]129
[a996ae31]130 addr = (uintptr_t) cmds[i].addr;
[a35b458]131
[56c167c]132 size_t j;
[a996ae31]133 for (j = 0; j < rangecount; j++) {
134 /* Find the matching range. */
[f2bbe8c]135 if (!iswithin(pbase[j], ranges[j].size, addr, size))
[a996ae31]136 continue;
[a35b458]137
[a996ae31]138 /* Switch the command to a kernel virtual address. */
139 addr -= pbase[j];
140 addr += ranges[j].base;
[a35b458]141
[a996ae31]142 cmds[i].addr = (void *) addr;
143 break;
[bd8c6537]144 }
[a35b458]145
[bd8c6537]146 if (j == rangecount) {
147 /*
148 * The address used in this command is outside of all
149 * defined ranges.
150 */
151 ranges_unmap(ranges, rangecount);
152 free(pbase);
153 return EINVAL;
154 }
[a996ae31]155 }
[a35b458]156
[a996ae31]157 free(pbase);
158 return EOK;
159}
[162f919]160
[a5d0143]161/** Statically check the top-half IRQ code
[8486c07]162 *
[a5d0143]163 * Check the top-half IRQ code for invalid or unsafe constructs.
[8486c07]164 *
165 */
[b7fd2a0]166static errno_t code_check(irq_cmd_t *cmds, size_t cmdcount)
[8486c07]167{
168 for (size_t i = 0; i < cmdcount; i++) {
169 /*
170 * Check for accepted ranges.
171 */
172 if (cmds[i].cmd >= CMD_LAST)
173 return EINVAL;
[a35b458]174
[8486c07]175 if (cmds[i].srcarg >= IPC_CALL_LEN)
176 return EINVAL;
[a35b458]177
[8486c07]178 if (cmds[i].dstarg >= IPC_CALL_LEN)
179 return EINVAL;
[a35b458]180
[8486c07]181 switch (cmds[i].cmd) {
182 case CMD_PREDICATE:
183 /*
184 * Check for control flow overflow.
185 * Note that jumping just beyond the last
186 * command is a correct behaviour.
187 */
188 if (i + cmds[i].value > cmdcount)
189 return EINVAL;
[a35b458]190
[8486c07]191 break;
192 default:
193 break;
194 }
195 }
[a35b458]196
[8486c07]197 return EOK;
198}
199
[a5d0143]200/** Free the top-half IRQ code.
[8b243f2]201 *
[a5d0143]202 * @param code Pointer to the top-half IRQ code.
[da1bafb]203 *
[8b243f2]204 */
[162f919]205static void code_free(irq_code_t *code)
206{
207 if (code) {
[a996ae31]208 ranges_unmap(code->ranges, code->rangecount);
209 free(code->ranges);
[162f919]210 free(code->cmds);
211 free(code);
212 }
213}
214
[a5d0143]215/** Copy the top-half IRQ code from userspace into the kernel.
[8b243f2]216 *
[a5d0143]217 * @param ucode Userspace address of the top-half IRQ code.
[da1bafb]218 *
[a5d0143]219 * @return Kernel address of the copied IRQ code.
[8b243f2]220 *
221 */
222static irq_code_t *code_from_uspace(irq_code_t *ucode)
[162f919]223{
[a996ae31]224 irq_pio_range_t *ranges = NULL;
225 irq_cmd_t *cmds = NULL;
[a35b458]226
[da1bafb]227 irq_code_t *code = malloc(sizeof(*code), 0);
[b7fd2a0]228 errno_t rc = copy_from_uspace(code, ucode, sizeof(*code));
[a996ae31]229 if (rc != EOK)
230 goto error;
[a35b458]231
[a996ae31]232 if ((code->rangecount > IRQ_MAX_RANGE_COUNT) ||
233 (code->cmdcount > IRQ_MAX_PROG_SIZE))
234 goto error;
[a35b458]235
[a996ae31]236 ranges = malloc(sizeof(code->ranges[0]) * code->rangecount, 0);
237 rc = copy_from_uspace(ranges, code->ranges,
238 sizeof(code->ranges[0]) * code->rangecount);
239 if (rc != EOK)
240 goto error;
[a35b458]241
[a996ae31]242 cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0);
243 rc = copy_from_uspace(cmds, code->cmds,
[8b243f2]244 sizeof(code->cmds[0]) * code->cmdcount);
[a996ae31]245 if (rc != EOK)
246 goto error;
[a35b458]247
[8486c07]248 rc = code_check(cmds, code->cmdcount);
249 if (rc != EOK)
250 goto error;
[a35b458]251
[a996ae31]252 rc = ranges_map_and_apply(ranges, code->rangecount, cmds,
253 code->cmdcount);
254 if (rc != EOK)
255 goto error;
[a35b458]256
[a996ae31]257 code->ranges = ranges;
258 code->cmds = cmds;
[a35b458]259
[162f919]260 return code;
[a35b458]261
[a996ae31]262error:
263 if (cmds)
264 free(cmds);
[a35b458]265
[a996ae31]266 if (ranges)
267 free(ranges);
[a35b458]268
[a996ae31]269 free(code);
270 return NULL;
[162f919]271}
272
[c1f68b0]273static void irq_hash_out(irq_t *irq)
274{
275 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
276 irq_spinlock_lock(&irq->lock, false);
[a35b458]277
[c1f68b0]278 if (irq->notif_cfg.hashed_in) {
279 /* Remove the IRQ from the uspace IRQ hash table. */
280 hash_table_remove_item(&irq_uspace_hash_table, &irq->link);
281 irq->notif_cfg.hashed_in = false;
282 }
283
284 irq_spinlock_unlock(&irq->lock, false);
285 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
286}
287
[48bcf49]288static void irq_destroy(void *arg)
289{
290 irq_t *irq = (irq_t *) arg;
291
[c1f68b0]292 irq_hash_out(irq);
293
[48bcf49]294 /* Free up the IRQ code and associated structures. */
295 code_free(irq->notif_cfg.code);
[82d515e9]296 slab_free(irq_cache, irq);
[48bcf49]297}
298
299static kobject_ops_t irq_kobject_ops = {
300 .destroy = irq_destroy
301};
302
[8820544]303/** Subscribe an answerbox as a receiving end for IRQ notifications.
[2b017ba]304 *
[56c167c]305 * @param box Receiving answerbox.
306 * @param inr IRQ number.
[a5d0143]307 * @param imethod Interface and method to be associated with the notification.
308 * @param ucode Uspace pointer to top-half IRQ code.
[56c167c]309 *
[9233e9d]310 * @param[out] uspace_handle Uspace pointer to IRQ capability handle
311 *
312 * @return Error code.
[2b017ba]313 *
314 */
[b7fd2a0]315errno_t ipc_irq_subscribe(answerbox_t *box, inr_t inr, sysarg_t imethod,
[9233e9d]316 irq_code_t *ucode, cap_handle_t *uspace_handle)
[162f919]317{
[78ffb70]318 if ((inr < 0) || (inr > last_inr))
319 return ELIMIT;
[a35b458]320
[da1bafb]321 irq_code_t *code;
[162f919]322 if (ucode) {
323 code = code_from_uspace(ucode);
324 if (!code)
325 return EBADMEM;
[da1bafb]326 } else
[162f919]327 code = NULL;
[a35b458]328
[cecb0789]329 /*
[e9d15d9]330 * Allocate and populate the IRQ kernel object.
[cecb0789]331 */
[09d01f2]332 cap_handle_t handle;
[b7fd2a0]333 errno_t rc = cap_alloc(TASK, &handle);
[09d01f2]334 if (rc != EOK)
335 return rc;
[a35b458]336
[09d01f2]337 rc = copy_to_uspace(uspace_handle, &handle, sizeof(cap_handle_t));
[9233e9d]338 if (rc != EOK) {
339 cap_free(TASK, handle);
340 return rc;
341 }
342
[82d515e9]343 irq_t *irq = (irq_t *) slab_alloc(irq_cache, FRAME_ATOMIC);
[63d8f43]344 if (!irq) {
345 cap_free(TASK, handle);
346 return ENOMEM;
347 }
[48bcf49]348
349 kobject_t *kobject = malloc(sizeof(kobject_t), FRAME_ATOMIC);
350 if (!kobject) {
351 cap_free(TASK, handle);
[82d515e9]352 slab_free(irq_cache, irq);
[48bcf49]353 return ENOMEM;
354 }
[a35b458]355
[cecb0789]356 irq_initialize(irq);
357 irq->inr = inr;
358 irq->claim = ipc_irq_top_half_claim;
[691eb52]359 irq->handler = ipc_irq_top_half_handler;
[4874c2d]360 irq->notif_cfg.notify = true;
[2b017ba]361 irq->notif_cfg.answerbox = box;
[228e490]362 irq->notif_cfg.imethod = imethod;
[2b017ba]363 irq->notif_cfg.code = code;
364 irq->notif_cfg.counter = 0;
[a35b458]365
[cecb0789]366 /*
[9e87562]367 * Insert the IRQ structure into the uspace IRQ hash table.
[cecb0789]368 */
[da1bafb]369 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
370 irq_spinlock_lock(&irq->lock, false);
[a35b458]371
[48bcf49]372 irq->notif_cfg.hashed_in = true;
[82cbf8c6]373 hash_table_insert(&irq_uspace_hash_table, &irq->link);
[a35b458]374
[da1bafb]375 irq_spinlock_unlock(&irq->lock, false);
376 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
[9e87562]377
[48bcf49]378 kobject_initialize(kobject, KOBJECT_TYPE_IRQ, irq, &irq_kobject_ops);
379 cap_publish(TASK, handle, kobject);
[a35b458]380
[9233e9d]381 return EOK;
[cecb0789]382}
383
[8820544]384/** Unsubscribe task from IRQ notification.
[cecb0789]385 *
[3f74275]386 * @param box Answerbox associated with the notification.
387 * @param handle IRQ capability handle.
[56c167c]388 *
[cde999a]389 * @return EOK on success or an error code.
[56c167c]390 *
[cecb0789]391 */
[b7fd2a0]392errno_t ipc_irq_unsubscribe(answerbox_t *box, int handle)
[cecb0789]393{
[48bcf49]394 kobject_t *kobj = cap_unpublish(TASK, handle, KOBJECT_TYPE_IRQ);
395 if (!kobj)
[cecb0789]396 return ENOENT;
[a35b458]397
[48bcf49]398 assert(kobj->irq->notif_cfg.answerbox == box);
399
[c1f68b0]400 irq_hash_out(kobj->irq);
[48bcf49]401
402 kobject_put(kobj);
[3f74275]403 cap_free(TASK, handle);
[a35b458]404
[cecb0789]405 return EOK;
406}
407
[8b243f2]408/** Add a call to the proper answerbox queue.
[2b017ba]409 *
[da1bafb]410 * Assume irq->lock is locked and interrupts disabled.
411 *
412 * @param irq IRQ structure referencing the target answerbox.
413 * @param call IRQ notification call.
[874621f]414 *
[2b017ba]415 */
416static void send_call(irq_t *irq, call_t *call)
[874621f]417{
[da1bafb]418 irq_spinlock_lock(&irq->notif_cfg.answerbox->irq_lock, false);
[cfaa35a]419 list_append(&call->ab_link, &irq->notif_cfg.answerbox->irq_notifs);
[da1bafb]420 irq_spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock, false);
[a35b458]421
[2b017ba]422 waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST);
[874621f]423}
424
[a5d0143]425/** Apply the top-half IRQ code to find out whether to accept the IRQ or not.
[874621f]426 *
[da1bafb]427 * @param irq IRQ structure.
428 *
[a5d0143]429 * @return IRQ_ACCEPT if the interrupt is accepted by the IRQ code.
430 * @return IRQ_DECLINE if the interrupt is not accepted byt the IRQ code.
[cecb0789]431 *
[874621f]432 */
[cecb0789]433irq_ownership_t ipc_irq_top_half_claim(irq_t *irq)
[874621f]434{
[cecb0789]435 irq_code_t *code = irq->notif_cfg.code;
[da1bafb]436 uint32_t *scratch = irq->notif_cfg.scratch;
[a35b458]437
[cecb0789]438 if (!irq->notif_cfg.notify)
439 return IRQ_DECLINE;
[a35b458]440
[cecb0789]441 if (!code)
442 return IRQ_DECLINE;
[a35b458]443
[01e39cbe]444 for (size_t i = 0; i < code->cmdcount; i++) {
[da1bafb]445 uintptr_t srcarg = code->cmds[i].srcarg;
446 uintptr_t dstarg = code->cmds[i].dstarg;
[a35b458]447
[cecb0789]448 switch (code->cmds[i].cmd) {
449 case CMD_PIO_READ_8:
[8486c07]450 scratch[dstarg] =
451 pio_read_8((ioport8_t *) code->cmds[i].addr);
[cecb0789]452 break;
453 case CMD_PIO_READ_16:
[8486c07]454 scratch[dstarg] =
455 pio_read_16((ioport16_t *) code->cmds[i].addr);
[cecb0789]456 break;
457 case CMD_PIO_READ_32:
[8486c07]458 scratch[dstarg] =
459 pio_read_32((ioport32_t *) code->cmds[i].addr);
[cecb0789]460 break;
461 case CMD_PIO_WRITE_8:
462 pio_write_8((ioport8_t *) code->cmds[i].addr,
463 (uint8_t) code->cmds[i].value);
464 break;
465 case CMD_PIO_WRITE_16:
466 pio_write_16((ioport16_t *) code->cmds[i].addr,
467 (uint16_t) code->cmds[i].value);
468 break;
469 case CMD_PIO_WRITE_32:
470 pio_write_32((ioport32_t *) code->cmds[i].addr,
471 (uint32_t) code->cmds[i].value);
472 break;
[9cdac5a]473 case CMD_PIO_WRITE_A_8:
[8486c07]474 pio_write_8((ioport8_t *) code->cmds[i].addr,
475 (uint8_t) scratch[srcarg]);
[9cdac5a]476 break;
477 case CMD_PIO_WRITE_A_16:
[8486c07]478 pio_write_16((ioport16_t *) code->cmds[i].addr,
479 (uint16_t) scratch[srcarg]);
[9cdac5a]480 break;
481 case CMD_PIO_WRITE_A_32:
[8486c07]482 pio_write_32((ioport32_t *) code->cmds[i].addr,
483 (uint32_t) scratch[srcarg]);
484 break;
485 case CMD_LOAD:
486 scratch[dstarg] = code->cmds[i].value;
[9cdac5a]487 break;
[8486c07]488 case CMD_AND:
489 scratch[dstarg] = scratch[srcarg] &
490 code->cmds[i].value;
[cecb0789]491 break;
492 case CMD_PREDICATE:
[8486c07]493 if (scratch[srcarg] == 0)
[cecb0789]494 i += code->cmds[i].value;
[a35b458]495
[cecb0789]496 break;
497 case CMD_ACCEPT:
498 return IRQ_ACCEPT;
499 case CMD_DECLINE:
500 default:
501 return IRQ_DECLINE;
502 }
[874621f]503 }
[a35b458]504
[cecb0789]505 return IRQ_DECLINE;
[874621f]506}
507
[cecb0789]508/* IRQ top-half handler.
[162f919]509 *
[2b017ba]510 * We expect interrupts to be disabled and the irq->lock already held.
[8b243f2]511 *
[da1bafb]512 * @param irq IRQ structure.
513 *
[162f919]514 */
[cecb0789]515void ipc_irq_top_half_handler(irq_t *irq)
[162f919]516{
[63e27ef]517 assert(irq);
[a35b458]518
[63e27ef]519 assert(interrupts_disabled());
520 assert(irq_spinlock_locked(&irq->lock));
[a35b458]521
[2b017ba]522 if (irq->notif_cfg.answerbox) {
[da1bafb]523 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
[cecb0789]524 if (!call)
[d8f7362]525 return;
[a35b458]526
[162f919]527 call->flags |= IPC_CALL_NOTIF;
[43752b6]528 /* Put a counter to the message */
[0c1a5d8a]529 call->priv = ++irq->notif_cfg.counter;
[a35b458]530
[43752b6]531 /* Set up args */
[228e490]532 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
[cecb0789]533 IPC_SET_ARG1(call->data, irq->notif_cfg.scratch[1]);
534 IPC_SET_ARG2(call->data, irq->notif_cfg.scratch[2]);
535 IPC_SET_ARG3(call->data, irq->notif_cfg.scratch[3]);
536 IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]);
537 IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]);
[a35b458]538
[2b017ba]539 send_call(irq, call);
[162f919]540 }
541}
542
[cecb0789]543/** Send notification message.
[874621f]544 *
[da1bafb]545 * @param irq IRQ structure.
546 * @param a1 Driver-specific payload argument.
547 * @param a2 Driver-specific payload argument.
548 * @param a3 Driver-specific payload argument.
549 * @param a4 Driver-specific payload argument.
550 * @param a5 Driver-specific payload argument.
551 *
[162f919]552 */
[96b02eb9]553void ipc_irq_send_msg(irq_t *irq, sysarg_t a1, sysarg_t a2, sysarg_t a3,
554 sysarg_t a4, sysarg_t a5)
[162f919]555{
[da1bafb]556 irq_spinlock_lock(&irq->lock, true);
[a35b458]557
[cecb0789]558 if (irq->notif_cfg.answerbox) {
[da1bafb]559 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
[cecb0789]560 if (!call) {
[da1bafb]561 irq_spinlock_unlock(&irq->lock, true);
[cecb0789]562 return;
[b14e35f2]563 }
[a35b458]564
[cecb0789]565 call->flags |= IPC_CALL_NOTIF;
566 /* Put a counter to the message */
567 call->priv = ++irq->notif_cfg.counter;
[a35b458]568
[228e490]569 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
[cecb0789]570 IPC_SET_ARG1(call->data, a1);
571 IPC_SET_ARG2(call->data, a2);
572 IPC_SET_ARG3(call->data, a3);
573 IPC_SET_ARG4(call->data, a4);
574 IPC_SET_ARG5(call->data, a5);
[a35b458]575
[cecb0789]576 send_call(irq, call);
[b14e35f2]577 }
[a35b458]578
[da1bafb]579 irq_spinlock_unlock(&irq->lock, true);
[162f919]580}
[b45c443]581
[cc73a8a1]582/** @}
[b45c443]583 */
Note: See TracBrowser for help on using the repository browser.