source: mainline/kernel/generic/src/ipc/irq.c@ 76e17d7c

ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 76e17d7c was fc0de8c, checked in by Jakub Jermar <jakub@…>, 6 years ago

Move kobject's ops out of kobject

Kobject ops is a property of the kobject type rather than the individual
kernel objects. There is no need to remember the ops in every single
instance.

  • Property mode set to 100644
File size: 14.7 KB
RevLine 
[162f919]1/*
[df4ed85]2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2006 Jakub Jermar
[162f919]4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
[174156fd]30/** @addtogroup kernel_generic_ipc
[b45c443]31 * @{
32 */
[da1bafb]33
[cc73a8a1]34/**
35 * @file
36 * @brief IRQ notification framework.
[bdc5c516]37 *
[8820544]38 * This framework allows applications to subscribe to receive a notification
[a5d0143]39 * when an interrupt is detected. The application may provide a simple
40 * 'top-half' handler as part of its registration, which can perform simple
41 * operations (read/write port/memory, add information to notification IPC
42 * message).
[bdc5c516]43 *
44 * The structure of a notification message is as follows:
[8820544]45 * - IMETHOD: interface and method as set by the SYS_IPC_IRQ_SUBSCRIBE syscall
[56c167c]46 * - ARG1: payload modified by a 'top-half' handler (scratch[1])
47 * - ARG2: payload modified by a 'top-half' handler (scratch[2])
48 * - ARG3: payload modified by a 'top-half' handler (scratch[3])
49 * - ARG4: payload modified by a 'top-half' handler (scratch[4])
50 * - ARG5: payload modified by a 'top-half' handler (scratch[5])
[167616c]51 * - request_label: interrupt counter (may be needed to assure correct order
[228e490]52 * in multithreaded drivers)
[bdc5c516]53 */
54
[162f919]55#include <arch.h>
[63e27ef]56#include <assert.h>
[162f919]57#include <mm/slab.h>
[a996ae31]58#include <mm/page.h>
59#include <mm/km.h>
[162f919]60#include <errno.h>
[2b017ba]61#include <ddi/irq.h>
[162f919]62#include <ipc/ipc.h>
63#include <ipc/irq.h>
[e3c762cd]64#include <syscall/copy.h>
[d0c5901]65#include <console/console.h>
[a996ae31]66#include <macros.h>
[3f74275]67#include <cap/cap.h>
[aafed15]68#include <stdlib.h>
[a996ae31]69
70static void ranges_unmap(irq_pio_range_t *ranges, size_t rangecount)
71{
[56c167c]72 for (size_t i = 0; i < rangecount; i++) {
[472d813]73#ifdef IO_SPACE_BOUNDARY
[a996ae31]74 if ((void *) ranges[i].base >= IO_SPACE_BOUNDARY)
[472d813]75#endif
[a996ae31]76 km_unmap(ranges[i].base, ranges[i].size);
77 }
78}
79
[b7fd2a0]80static errno_t ranges_map_and_apply(irq_pio_range_t *ranges, size_t rangecount,
[a996ae31]81 irq_cmd_t *cmds, size_t cmdcount)
82{
83 /* Copy the physical base addresses aside. */
[11b285d]84 uintptr_t *pbase = malloc(rangecount * sizeof(uintptr_t));
[7473807]85 if (!pbase)
86 return ENOMEM;
[56c167c]87 for (size_t i = 0; i < rangecount; i++)
[a996ae31]88 pbase[i] = ranges[i].base;
[a35b458]89
[a996ae31]90 /* Map the PIO ranges into the kernel virtual address space. */
[56c167c]91 for (size_t i = 0; i < rangecount; i++) {
[472d813]92#ifdef IO_SPACE_BOUNDARY
[a996ae31]93 if ((void *) ranges[i].base < IO_SPACE_BOUNDARY)
94 continue;
[472d813]95#endif
[a996ae31]96 ranges[i].base = km_map(pbase[i], ranges[i].size,
[a1b9f63]97 KM_NATURAL_ALIGNMENT,
[a996ae31]98 PAGE_READ | PAGE_WRITE | PAGE_KERNEL | PAGE_NOT_CACHEABLE);
99 if (!ranges[i].base) {
100 ranges_unmap(ranges, i);
101 free(pbase);
102 return ENOMEM;
103 }
104 }
[a35b458]105
[a5d0143]106 /* Rewrite the IRQ code addresses from physical to kernel virtual. */
[56c167c]107 for (size_t i = 0; i < cmdcount; i++) {
[a996ae31]108 uintptr_t addr;
[f2bbe8c]109 size_t size;
[a35b458]110
[a996ae31]111 /* Process only commands that use an address. */
112 switch (cmds[i].cmd) {
113 case CMD_PIO_READ_8:
[56c167c]114 case CMD_PIO_WRITE_8:
115 case CMD_PIO_WRITE_A_8:
[f2bbe8c]116 size = 1;
117 break;
[56c167c]118 case CMD_PIO_READ_16:
119 case CMD_PIO_WRITE_16:
120 case CMD_PIO_WRITE_A_16:
[f2bbe8c]121 size = 2;
122 break;
[56c167c]123 case CMD_PIO_READ_32:
124 case CMD_PIO_WRITE_32:
125 case CMD_PIO_WRITE_A_32:
[f2bbe8c]126 size = 4;
[a996ae31]127 break;
128 default:
129 /* Move onto the next command. */
130 continue;
131 }
[a35b458]132
[a996ae31]133 addr = (uintptr_t) cmds[i].addr;
[a35b458]134
[56c167c]135 size_t j;
[a996ae31]136 for (j = 0; j < rangecount; j++) {
137 /* Find the matching range. */
[f2bbe8c]138 if (!iswithin(pbase[j], ranges[j].size, addr, size))
[a996ae31]139 continue;
[a35b458]140
[a996ae31]141 /* Switch the command to a kernel virtual address. */
142 addr -= pbase[j];
143 addr += ranges[j].base;
[a35b458]144
[a996ae31]145 cmds[i].addr = (void *) addr;
146 break;
[bd8c6537]147 }
[a35b458]148
[bd8c6537]149 if (j == rangecount) {
150 /*
151 * The address used in this command is outside of all
152 * defined ranges.
153 */
154 ranges_unmap(ranges, rangecount);
155 free(pbase);
156 return EINVAL;
157 }
[a996ae31]158 }
[a35b458]159
[a996ae31]160 free(pbase);
161 return EOK;
162}
[162f919]163
[a5d0143]164/** Statically check the top-half IRQ code
[8486c07]165 *
[a5d0143]166 * Check the top-half IRQ code for invalid or unsafe constructs.
[8486c07]167 *
168 */
[b7fd2a0]169static errno_t code_check(irq_cmd_t *cmds, size_t cmdcount)
[8486c07]170{
171 for (size_t i = 0; i < cmdcount; i++) {
172 /*
173 * Check for accepted ranges.
174 */
175 if (cmds[i].cmd >= CMD_LAST)
176 return EINVAL;
[a35b458]177
[8486c07]178 if (cmds[i].srcarg >= IPC_CALL_LEN)
179 return EINVAL;
[a35b458]180
[8486c07]181 if (cmds[i].dstarg >= IPC_CALL_LEN)
182 return EINVAL;
[a35b458]183
[8486c07]184 switch (cmds[i].cmd) {
185 case CMD_PREDICATE:
186 /*
187 * Check for control flow overflow.
188 * Note that jumping just beyond the last
189 * command is a correct behaviour.
190 */
191 if (i + cmds[i].value > cmdcount)
192 return EINVAL;
[a35b458]193
[8486c07]194 break;
195 default:
196 break;
197 }
198 }
[a35b458]199
[8486c07]200 return EOK;
201}
202
[a5d0143]203/** Free the top-half IRQ code.
[8b243f2]204 *
[a5d0143]205 * @param code Pointer to the top-half IRQ code.
[da1bafb]206 *
[8b243f2]207 */
[162f919]208static void code_free(irq_code_t *code)
209{
210 if (code) {
[a996ae31]211 ranges_unmap(code->ranges, code->rangecount);
212 free(code->ranges);
[162f919]213 free(code->cmds);
214 free(code);
215 }
216}
217
[a5d0143]218/** Copy the top-half IRQ code from userspace into the kernel.
[8b243f2]219 *
[a5d0143]220 * @param ucode Userspace address of the top-half IRQ code.
[da1bafb]221 *
[a5d0143]222 * @return Kernel address of the copied IRQ code.
[8b243f2]223 *
224 */
[5a5269d]225static irq_code_t *code_from_uspace(uspace_ptr_irq_code_t ucode)
[162f919]226{
[a996ae31]227 irq_pio_range_t *ranges = NULL;
228 irq_cmd_t *cmds = NULL;
[a35b458]229
[11b285d]230 irq_code_t *code = malloc(sizeof(*code));
[7473807]231 if (!code)
232 return NULL;
[b7fd2a0]233 errno_t rc = copy_from_uspace(code, ucode, sizeof(*code));
[a996ae31]234 if (rc != EOK)
235 goto error;
[a35b458]236
[a996ae31]237 if ((code->rangecount > IRQ_MAX_RANGE_COUNT) ||
238 (code->cmdcount > IRQ_MAX_PROG_SIZE))
239 goto error;
[a35b458]240
[11b285d]241 ranges = malloc(sizeof(code->ranges[0]) * code->rangecount);
[7473807]242 if (!ranges)
243 goto error;
[5a5269d]244 rc = copy_from_uspace(ranges, (uintptr_t) code->ranges,
[a996ae31]245 sizeof(code->ranges[0]) * code->rangecount);
246 if (rc != EOK)
247 goto error;
[a35b458]248
[11b285d]249 cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount);
[7473807]250 if (!cmds)
251 goto error;
[5a5269d]252 rc = copy_from_uspace(cmds, (uintptr_t) code->cmds,
[8b243f2]253 sizeof(code->cmds[0]) * code->cmdcount);
[a996ae31]254 if (rc != EOK)
255 goto error;
[a35b458]256
[8486c07]257 rc = code_check(cmds, code->cmdcount);
258 if (rc != EOK)
259 goto error;
[a35b458]260
[a996ae31]261 rc = ranges_map_and_apply(ranges, code->rangecount, cmds,
262 code->cmdcount);
263 if (rc != EOK)
264 goto error;
[a35b458]265
[a996ae31]266 code->ranges = ranges;
267 code->cmds = cmds;
[a35b458]268
[162f919]269 return code;
[a35b458]270
[a996ae31]271error:
272 if (cmds)
273 free(cmds);
[a35b458]274
[a996ae31]275 if (ranges)
276 free(ranges);
[a35b458]277
[a996ae31]278 free(code);
279 return NULL;
[162f919]280}
281
[c1f68b0]282static void irq_hash_out(irq_t *irq)
283{
284 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
285 irq_spinlock_lock(&irq->lock, false);
[a35b458]286
[c1f68b0]287 if (irq->notif_cfg.hashed_in) {
288 /* Remove the IRQ from the uspace IRQ hash table. */
289 hash_table_remove_item(&irq_uspace_hash_table, &irq->link);
290 irq->notif_cfg.hashed_in = false;
291 }
292
293 irq_spinlock_unlock(&irq->lock, false);
294 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
295}
296
[48bcf49]297static void irq_destroy(void *arg)
298{
299 irq_t *irq = (irq_t *) arg;
300
[c1f68b0]301 irq_hash_out(irq);
302
[48bcf49]303 /* Free up the IRQ code and associated structures. */
304 code_free(irq->notif_cfg.code);
[82d515e9]305 slab_free(irq_cache, irq);
[48bcf49]306}
307
[fc0de8c]308kobject_ops_t irq_kobject_ops = {
[48bcf49]309 .destroy = irq_destroy
310};
311
[8820544]312/** Subscribe an answerbox as a receiving end for IRQ notifications.
[2b017ba]313 *
[56c167c]314 * @param box Receiving answerbox.
315 * @param inr IRQ number.
[a5d0143]316 * @param imethod Interface and method to be associated with the notification.
317 * @param ucode Uspace pointer to top-half IRQ code.
[56c167c]318 *
[9233e9d]319 * @param[out] uspace_handle Uspace pointer to IRQ capability handle
320 *
321 * @return Error code.
[2b017ba]322 *
323 */
[b7fd2a0]324errno_t ipc_irq_subscribe(answerbox_t *box, inr_t inr, sysarg_t imethod,
[5a5269d]325 uspace_ptr_irq_code_t ucode, uspace_ptr_cap_irq_handle_t uspace_handle)
[162f919]326{
[78ffb70]327 if ((inr < 0) || (inr > last_inr))
328 return ELIMIT;
[a35b458]329
[da1bafb]330 irq_code_t *code;
[162f919]331 if (ucode) {
332 code = code_from_uspace(ucode);
333 if (!code)
334 return EBADMEM;
[da1bafb]335 } else
[162f919]336 code = NULL;
[a35b458]337
[cecb0789]338 /*
[e9d15d9]339 * Allocate and populate the IRQ kernel object.
[cecb0789]340 */
[09d01f2]341 cap_handle_t handle;
[b7fd2a0]342 errno_t rc = cap_alloc(TASK, &handle);
[09d01f2]343 if (rc != EOK)
344 return rc;
[a35b458]345
[09d01f2]346 rc = copy_to_uspace(uspace_handle, &handle, sizeof(cap_handle_t));
[9233e9d]347 if (rc != EOK) {
348 cap_free(TASK, handle);
349 return rc;
350 }
351
[82d515e9]352 irq_t *irq = (irq_t *) slab_alloc(irq_cache, FRAME_ATOMIC);
[63d8f43]353 if (!irq) {
354 cap_free(TASK, handle);
355 return ENOMEM;
356 }
[48bcf49]357
[e394c196]358 kobject_t *kobject = kobject_alloc(FRAME_ATOMIC);
[48bcf49]359 if (!kobject) {
360 cap_free(TASK, handle);
[82d515e9]361 slab_free(irq_cache, irq);
[48bcf49]362 return ENOMEM;
363 }
[a35b458]364
[cecb0789]365 irq_initialize(irq);
366 irq->inr = inr;
367 irq->claim = ipc_irq_top_half_claim;
[691eb52]368 irq->handler = ipc_irq_top_half_handler;
[4874c2d]369 irq->notif_cfg.notify = true;
[2b017ba]370 irq->notif_cfg.answerbox = box;
[228e490]371 irq->notif_cfg.imethod = imethod;
[2b017ba]372 irq->notif_cfg.code = code;
373 irq->notif_cfg.counter = 0;
[a35b458]374
[cecb0789]375 /*
[9e87562]376 * Insert the IRQ structure into the uspace IRQ hash table.
[cecb0789]377 */
[da1bafb]378 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
379 irq_spinlock_lock(&irq->lock, false);
[a35b458]380
[48bcf49]381 irq->notif_cfg.hashed_in = true;
[82cbf8c6]382 hash_table_insert(&irq_uspace_hash_table, &irq->link);
[a35b458]383
[da1bafb]384 irq_spinlock_unlock(&irq->lock, false);
385 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
[9e87562]386
[fc0de8c]387 kobject_initialize(kobject, KOBJECT_TYPE_IRQ, irq);
[48bcf49]388 cap_publish(TASK, handle, kobject);
[a35b458]389
[9233e9d]390 return EOK;
[cecb0789]391}
392
[8820544]393/** Unsubscribe task from IRQ notification.
[cecb0789]394 *
[3f74275]395 * @param box Answerbox associated with the notification.
396 * @param handle IRQ capability handle.
[56c167c]397 *
[cde999a]398 * @return EOK on success or an error code.
[56c167c]399 *
[cecb0789]400 */
[eadaeae8]401errno_t ipc_irq_unsubscribe(answerbox_t *box, cap_irq_handle_t handle)
[cecb0789]402{
[48bcf49]403 kobject_t *kobj = cap_unpublish(TASK, handle, KOBJECT_TYPE_IRQ);
404 if (!kobj)
[cecb0789]405 return ENOENT;
[a35b458]406
[48bcf49]407 assert(kobj->irq->notif_cfg.answerbox == box);
408
[c1f68b0]409 irq_hash_out(kobj->irq);
[48bcf49]410
411 kobject_put(kobj);
[3f74275]412 cap_free(TASK, handle);
[a35b458]413
[cecb0789]414 return EOK;
415}
416
[8b243f2]417/** Add a call to the proper answerbox queue.
[2b017ba]418 *
[da1bafb]419 * Assume irq->lock is locked and interrupts disabled.
420 *
421 * @param irq IRQ structure referencing the target answerbox.
422 * @param call IRQ notification call.
[874621f]423 *
[2b017ba]424 */
425static void send_call(irq_t *irq, call_t *call)
[874621f]426{
[da1bafb]427 irq_spinlock_lock(&irq->notif_cfg.answerbox->irq_lock, false);
[cfaa35a]428 list_append(&call->ab_link, &irq->notif_cfg.answerbox->irq_notifs);
[da1bafb]429 irq_spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock, false);
[a35b458]430
[2b017ba]431 waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST);
[874621f]432}
433
[a5d0143]434/** Apply the top-half IRQ code to find out whether to accept the IRQ or not.
[874621f]435 *
[da1bafb]436 * @param irq IRQ structure.
437 *
[a5d0143]438 * @return IRQ_ACCEPT if the interrupt is accepted by the IRQ code.
439 * @return IRQ_DECLINE if the interrupt is not accepted byt the IRQ code.
[cecb0789]440 *
[874621f]441 */
[cecb0789]442irq_ownership_t ipc_irq_top_half_claim(irq_t *irq)
[874621f]443{
[cecb0789]444 irq_code_t *code = irq->notif_cfg.code;
[da1bafb]445 uint32_t *scratch = irq->notif_cfg.scratch;
[a35b458]446
[cecb0789]447 if (!irq->notif_cfg.notify)
448 return IRQ_DECLINE;
[a35b458]449
[cecb0789]450 if (!code)
451 return IRQ_DECLINE;
[a35b458]452
[01e39cbe]453 for (size_t i = 0; i < code->cmdcount; i++) {
[da1bafb]454 uintptr_t srcarg = code->cmds[i].srcarg;
455 uintptr_t dstarg = code->cmds[i].dstarg;
[a35b458]456
[cecb0789]457 switch (code->cmds[i].cmd) {
458 case CMD_PIO_READ_8:
[8486c07]459 scratch[dstarg] =
460 pio_read_8((ioport8_t *) code->cmds[i].addr);
[cecb0789]461 break;
462 case CMD_PIO_READ_16:
[8486c07]463 scratch[dstarg] =
464 pio_read_16((ioport16_t *) code->cmds[i].addr);
[cecb0789]465 break;
466 case CMD_PIO_READ_32:
[8486c07]467 scratch[dstarg] =
468 pio_read_32((ioport32_t *) code->cmds[i].addr);
[cecb0789]469 break;
470 case CMD_PIO_WRITE_8:
471 pio_write_8((ioport8_t *) code->cmds[i].addr,
472 (uint8_t) code->cmds[i].value);
473 break;
474 case CMD_PIO_WRITE_16:
475 pio_write_16((ioport16_t *) code->cmds[i].addr,
476 (uint16_t) code->cmds[i].value);
477 break;
478 case CMD_PIO_WRITE_32:
479 pio_write_32((ioport32_t *) code->cmds[i].addr,
480 (uint32_t) code->cmds[i].value);
481 break;
[9cdac5a]482 case CMD_PIO_WRITE_A_8:
[8486c07]483 pio_write_8((ioport8_t *) code->cmds[i].addr,
484 (uint8_t) scratch[srcarg]);
[9cdac5a]485 break;
486 case CMD_PIO_WRITE_A_16:
[8486c07]487 pio_write_16((ioport16_t *) code->cmds[i].addr,
488 (uint16_t) scratch[srcarg]);
[9cdac5a]489 break;
490 case CMD_PIO_WRITE_A_32:
[8486c07]491 pio_write_32((ioport32_t *) code->cmds[i].addr,
492 (uint32_t) scratch[srcarg]);
493 break;
494 case CMD_LOAD:
495 scratch[dstarg] = code->cmds[i].value;
[9cdac5a]496 break;
[8486c07]497 case CMD_AND:
498 scratch[dstarg] = scratch[srcarg] &
499 code->cmds[i].value;
[cecb0789]500 break;
501 case CMD_PREDICATE:
[8486c07]502 if (scratch[srcarg] == 0)
[cecb0789]503 i += code->cmds[i].value;
[a35b458]504
[cecb0789]505 break;
506 case CMD_ACCEPT:
507 return IRQ_ACCEPT;
508 case CMD_DECLINE:
509 default:
510 return IRQ_DECLINE;
511 }
[874621f]512 }
[a35b458]513
[cecb0789]514 return IRQ_DECLINE;
[874621f]515}
516
[7c3fb9b]517/** IRQ top-half handler.
[162f919]518 *
[2b017ba]519 * We expect interrupts to be disabled and the irq->lock already held.
[8b243f2]520 *
[da1bafb]521 * @param irq IRQ structure.
522 *
[162f919]523 */
[cecb0789]524void ipc_irq_top_half_handler(irq_t *irq)
[162f919]525{
[63e27ef]526 assert(irq);
[a35b458]527
[63e27ef]528 assert(interrupts_disabled());
529 assert(irq_spinlock_locked(&irq->lock));
[a35b458]530
[2b017ba]531 if (irq->notif_cfg.answerbox) {
[90efa3b]532 call_t *call = ipc_call_alloc();
[cecb0789]533 if (!call)
[d8f7362]534 return;
[a35b458]535
[162f919]536 call->flags |= IPC_CALL_NOTIF;
[43752b6]537 /* Put a counter to the message */
[0c1a5d8a]538 call->priv = ++irq->notif_cfg.counter;
[a35b458]539
[43752b6]540 /* Set up args */
[fafb8e5]541 ipc_set_imethod(&call->data, irq->notif_cfg.imethod);
542 ipc_set_arg1(&call->data, irq->notif_cfg.scratch[1]);
543 ipc_set_arg2(&call->data, irq->notif_cfg.scratch[2]);
544 ipc_set_arg3(&call->data, irq->notif_cfg.scratch[3]);
545 ipc_set_arg4(&call->data, irq->notif_cfg.scratch[4]);
546 ipc_set_arg5(&call->data, irq->notif_cfg.scratch[5]);
[a35b458]547
[2b017ba]548 send_call(irq, call);
[162f919]549 }
550}
551
[cecb0789]552/** Send notification message.
[874621f]553 *
[da1bafb]554 * @param irq IRQ structure.
555 * @param a1 Driver-specific payload argument.
556 * @param a2 Driver-specific payload argument.
557 * @param a3 Driver-specific payload argument.
558 * @param a4 Driver-specific payload argument.
559 * @param a5 Driver-specific payload argument.
560 *
[162f919]561 */
[96b02eb9]562void ipc_irq_send_msg(irq_t *irq, sysarg_t a1, sysarg_t a2, sysarg_t a3,
563 sysarg_t a4, sysarg_t a5)
[162f919]564{
[da1bafb]565 irq_spinlock_lock(&irq->lock, true);
[a35b458]566
[cecb0789]567 if (irq->notif_cfg.answerbox) {
[90efa3b]568 call_t *call = ipc_call_alloc();
[cecb0789]569 if (!call) {
[da1bafb]570 irq_spinlock_unlock(&irq->lock, true);
[cecb0789]571 return;
[b14e35f2]572 }
[a35b458]573
[cecb0789]574 call->flags |= IPC_CALL_NOTIF;
575 /* Put a counter to the message */
576 call->priv = ++irq->notif_cfg.counter;
[a35b458]577
[fafb8e5]578 ipc_set_imethod(&call->data, irq->notif_cfg.imethod);
579 ipc_set_arg1(&call->data, a1);
580 ipc_set_arg2(&call->data, a2);
581 ipc_set_arg3(&call->data, a3);
582 ipc_set_arg4(&call->data, a4);
583 ipc_set_arg5(&call->data, a5);
[a35b458]584
[cecb0789]585 send_call(irq, call);
[b14e35f2]586 }
[a35b458]587
[da1bafb]588 irq_spinlock_unlock(&irq->lock, true);
[162f919]589}
[b45c443]590
[cc73a8a1]591/** @}
[b45c443]592 */
Note: See TracBrowser for help on using the repository browser.