source: mainline/kernel/generic/src/ipc/irq.c@ cfaa35a

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since cfaa35a was cfaa35a, checked in by Jakub Jermar <jakub@…>, 13 years ago

Rename call_t's link to ab_link as this link is exclusively used for
linking the call into one of the answerbox lists.

  • Property mode set to 100644
File size: 17.9 KB
RevLine 
[162f919]1/*
[df4ed85]2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2006 Jakub Jermar
[162f919]4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
[cc73a8a1]30/** @addtogroup genericipc
[b45c443]31 * @{
32 */
[da1bafb]33
[cc73a8a1]34/**
35 * @file
36 * @brief IRQ notification framework.
[bdc5c516]37 *
38 * This framework allows applications to register to receive a notification
39 * when interrupt is detected. The application may provide a simple 'top-half'
40 * handler as part of its registration, which can perform simple operations
[56c167c]41 * (read/write port/memory, add information to notification IPC message).
[bdc5c516]42 *
43 * The structure of a notification message is as follows:
[17b3cc6]44 * - IMETHOD: interface and method as registered by
45 * the SYS_IRQ_REGISTER syscall
[56c167c]46 * - ARG1: payload modified by a 'top-half' handler (scratch[1])
47 * - ARG2: payload modified by a 'top-half' handler (scratch[2])
48 * - ARG3: payload modified by a 'top-half' handler (scratch[3])
49 * - ARG4: payload modified by a 'top-half' handler (scratch[4])
50 * - ARG5: payload modified by a 'top-half' handler (scratch[5])
[43752b6]51 * - in_phone_hash: interrupt counter (may be needed to assure correct order
[228e490]52 * in multithreaded drivers)
[cecb0789]53 *
54 * Note on synchronization for ipc_irq_register(), ipc_irq_unregister(),
55 * ipc_irq_cleanup() and IRQ handlers:
56 *
57 * By always taking all of the uspace IRQ hash table lock, IRQ structure lock
58 * and answerbox lock, we can rule out race conditions between the
59 * registration functions and also the cleanup function. Thus the observer can
60 * either see the IRQ structure present in both the hash table and the
61 * answerbox list or absent in both. Views in which the IRQ structure would be
62 * linked in the hash table but not in the answerbox list, or vice versa, are
63 * not possible.
64 *
65 * By always taking the hash table lock and the IRQ structure lock, we can
66 * rule out a scenario in which we would free up an IRQ structure, which is
67 * still referenced by, for example, an IRQ handler. The locking scheme forces
68 * us to lock the IRQ structure only after any progressing IRQs on that
69 * structure are finished. Because we hold the hash table lock, we prevent new
70 * IRQs from taking new references to the IRQ structure.
[da1bafb]71 *
[bdc5c516]72 */
73
[162f919]74#include <arch.h>
75#include <mm/slab.h>
[a996ae31]76#include <mm/page.h>
77#include <mm/km.h>
[162f919]78#include <errno.h>
[2b017ba]79#include <ddi/irq.h>
[162f919]80#include <ipc/ipc.h>
81#include <ipc/irq.h>
[e3c762cd]82#include <syscall/copy.h>
[d0c5901]83#include <console/console.h>
[253f35a1]84#include <print.h>
[a996ae31]85#include <macros.h>
86
87static void ranges_unmap(irq_pio_range_t *ranges, size_t rangecount)
88{
[56c167c]89 for (size_t i = 0; i < rangecount; i++) {
[472d813]90#ifdef IO_SPACE_BOUNDARY
[a996ae31]91 if ((void *) ranges[i].base >= IO_SPACE_BOUNDARY)
[472d813]92#endif
[a996ae31]93 km_unmap(ranges[i].base, ranges[i].size);
94 }
95}
96
97static int ranges_map_and_apply(irq_pio_range_t *ranges, size_t rangecount,
98 irq_cmd_t *cmds, size_t cmdcount)
99{
100 /* Copy the physical base addresses aside. */
[56c167c]101 uintptr_t *pbase = malloc(rangecount * sizeof(uintptr_t), 0);
102 for (size_t i = 0; i < rangecount; i++)
[a996ae31]103 pbase[i] = ranges[i].base;
[56c167c]104
[a996ae31]105 /* Map the PIO ranges into the kernel virtual address space. */
[56c167c]106 for (size_t i = 0; i < rangecount; i++) {
[472d813]107#ifdef IO_SPACE_BOUNDARY
[a996ae31]108 if ((void *) ranges[i].base < IO_SPACE_BOUNDARY)
109 continue;
[472d813]110#endif
[a996ae31]111 ranges[i].base = km_map(pbase[i], ranges[i].size,
112 PAGE_READ | PAGE_WRITE | PAGE_KERNEL | PAGE_NOT_CACHEABLE);
113 if (!ranges[i].base) {
114 ranges_unmap(ranges, i);
115 free(pbase);
116 return ENOMEM;
117 }
118 }
[56c167c]119
[a996ae31]120 /* Rewrite the pseudocode addresses from physical to kernel virtual. */
[56c167c]121 for (size_t i = 0; i < cmdcount; i++) {
[a996ae31]122 uintptr_t addr;
[f2bbe8c]123 size_t size;
[56c167c]124
[a996ae31]125 /* Process only commands that use an address. */
126 switch (cmds[i].cmd) {
127 case CMD_PIO_READ_8:
[56c167c]128 case CMD_PIO_WRITE_8:
129 case CMD_PIO_WRITE_A_8:
[f2bbe8c]130 size = 1;
131 break;
[56c167c]132 case CMD_PIO_READ_16:
133 case CMD_PIO_WRITE_16:
134 case CMD_PIO_WRITE_A_16:
[f2bbe8c]135 size = 2;
136 break;
[56c167c]137 case CMD_PIO_READ_32:
138 case CMD_PIO_WRITE_32:
139 case CMD_PIO_WRITE_A_32:
[f2bbe8c]140 size = 4;
[a996ae31]141 break;
142 default:
143 /* Move onto the next command. */
144 continue;
145 }
[56c167c]146
[a996ae31]147 addr = (uintptr_t) cmds[i].addr;
148
[56c167c]149 size_t j;
[a996ae31]150 for (j = 0; j < rangecount; j++) {
151 /* Find the matching range. */
[f2bbe8c]152 if (!iswithin(pbase[j], ranges[j].size, addr, size))
[a996ae31]153 continue;
[56c167c]154
[a996ae31]155 /* Switch the command to a kernel virtual address. */
156 addr -= pbase[j];
157 addr += ranges[j].base;
[56c167c]158
[a996ae31]159 cmds[i].addr = (void *) addr;
160 break;
[bd8c6537]161 }
[56c167c]162
[bd8c6537]163 if (j == rangecount) {
164 /*
165 * The address used in this command is outside of all
166 * defined ranges.
167 */
168 ranges_unmap(ranges, rangecount);
169 free(pbase);
170 return EINVAL;
171 }
[a996ae31]172 }
[56c167c]173
[a996ae31]174 free(pbase);
175 return EOK;
176}
[162f919]177
[8486c07]178/** Statically check the top-half pseudocode
179 *
180 * Check the top-half pseudocode for invalid or unsafe
181 * constructs.
182 *
183 */
184static int code_check(irq_cmd_t *cmds, size_t cmdcount)
185{
186 for (size_t i = 0; i < cmdcount; i++) {
187 /*
188 * Check for accepted ranges.
189 */
190 if (cmds[i].cmd >= CMD_LAST)
191 return EINVAL;
192
193 if (cmds[i].srcarg >= IPC_CALL_LEN)
194 return EINVAL;
195
196 if (cmds[i].dstarg >= IPC_CALL_LEN)
197 return EINVAL;
198
199 switch (cmds[i].cmd) {
200 case CMD_PREDICATE:
201 /*
202 * Check for control flow overflow.
203 * Note that jumping just beyond the last
204 * command is a correct behaviour.
205 */
206 if (i + cmds[i].value > cmdcount)
207 return EINVAL;
208
209 break;
210 default:
211 break;
212 }
213 }
214
215 return EOK;
216}
217
[cecb0789]218/** Free the top-half pseudocode.
[8b243f2]219 *
[da1bafb]220 * @param code Pointer to the top-half pseudocode.
221 *
[8b243f2]222 */
[162f919]223static void code_free(irq_code_t *code)
224{
225 if (code) {
[a996ae31]226 ranges_unmap(code->ranges, code->rangecount);
227 free(code->ranges);
[162f919]228 free(code->cmds);
229 free(code);
230 }
231}
232
[cecb0789]233/** Copy the top-half pseudocode from userspace into the kernel.
[8b243f2]234 *
[da1bafb]235 * @param ucode Userspace address of the top-half pseudocode.
236 *
237 * @return Kernel address of the copied pseudocode.
[8b243f2]238 *
239 */
240static irq_code_t *code_from_uspace(irq_code_t *ucode)
[162f919]241{
[a996ae31]242 irq_pio_range_t *ranges = NULL;
243 irq_cmd_t *cmds = NULL;
[56c167c]244
[da1bafb]245 irq_code_t *code = malloc(sizeof(*code), 0);
246 int rc = copy_from_uspace(code, ucode, sizeof(*code));
[a996ae31]247 if (rc != EOK)
248 goto error;
[162f919]249
[a996ae31]250 if ((code->rangecount > IRQ_MAX_RANGE_COUNT) ||
251 (code->cmdcount > IRQ_MAX_PROG_SIZE))
252 goto error;
[da1bafb]253
[a996ae31]254 ranges = malloc(sizeof(code->ranges[0]) * code->rangecount, 0);
255 rc = copy_from_uspace(ranges, code->ranges,
256 sizeof(code->ranges[0]) * code->rangecount);
257 if (rc != EOK)
258 goto error;
[56c167c]259
[a996ae31]260 cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0);
261 rc = copy_from_uspace(cmds, code->cmds,
[8b243f2]262 sizeof(code->cmds[0]) * code->cmdcount);
[a996ae31]263 if (rc != EOK)
264 goto error;
[8486c07]265
266 rc = code_check(cmds, code->cmdcount);
267 if (rc != EOK)
268 goto error;
269
[a996ae31]270 rc = ranges_map_and_apply(ranges, code->rangecount, cmds,
271 code->cmdcount);
272 if (rc != EOK)
273 goto error;
[56c167c]274
[a996ae31]275 code->ranges = ranges;
276 code->cmds = cmds;
[56c167c]277
[162f919]278 return code;
[56c167c]279
[a996ae31]280error:
281 if (cmds)
282 free(cmds);
[56c167c]283
[a996ae31]284 if (ranges)
285 free(ranges);
[56c167c]286
[a996ae31]287 free(code);
288 return NULL;
[162f919]289}
290
[2b017ba]291/** Register an answerbox as a receiving end for IRQ notifications.
292 *
[56c167c]293 * @param box Receiving answerbox.
294 * @param inr IRQ number.
295 * @param devno Device number.
296 * @param imethod Interface and method to be associated with the
297 * notification.
298 * @param ucode Uspace pointer to top-half pseudocode.
299 *
300 * @return EOK on success or a negative error code.
[2b017ba]301 *
302 */
[8b243f2]303int ipc_irq_register(answerbox_t *box, inr_t inr, devno_t devno,
[228e490]304 sysarg_t imethod, irq_code_t *ucode)
[162f919]305{
[96b02eb9]306 sysarg_t key[] = {
307 (sysarg_t) inr,
308 (sysarg_t) devno
[cecb0789]309 };
[56c167c]310
[78ffb70]311 if ((inr < 0) || (inr > last_inr))
312 return ELIMIT;
[c822026]313
[da1bafb]314 irq_code_t *code;
[162f919]315 if (ucode) {
316 code = code_from_uspace(ucode);
317 if (!code)
318 return EBADMEM;
[da1bafb]319 } else
[162f919]320 code = NULL;
[c822026]321
[cecb0789]322 /*
323 * Allocate and populate the IRQ structure.
324 */
[da1bafb]325 irq_t *irq = malloc(sizeof(irq_t), 0);
326
[cecb0789]327 irq_initialize(irq);
328 irq->devno = devno;
329 irq->inr = inr;
330 irq->claim = ipc_irq_top_half_claim;
[691eb52]331 irq->handler = ipc_irq_top_half_handler;
[4874c2d]332 irq->notif_cfg.notify = true;
[2b017ba]333 irq->notif_cfg.answerbox = box;
[228e490]334 irq->notif_cfg.imethod = imethod;
[2b017ba]335 irq->notif_cfg.code = code;
336 irq->notif_cfg.counter = 0;
[c822026]337
[cecb0789]338 /*
339 * Enlist the IRQ structure in the uspace IRQ hash table and the
340 * answerbox's list.
341 */
[da1bafb]342 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
343
344 link_t *hlp = hash_table_find(&irq_uspace_hash_table, key);
[2845930]345 if (hlp) {
[da1bafb]346 irq_t *hirq = hash_table_get_instance(hlp, irq_t, link);
[c822026]347
[2845930]348 /* hirq is locked */
[da1bafb]349 irq_spinlock_unlock(&hirq->lock, false);
[cecb0789]350 code_free(code);
[da1bafb]351 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
352
[cecb0789]353 free(irq);
354 return EEXISTS;
355 }
[c822026]356
[da1bafb]357 /* Locking is not really necessary, but paranoid */
358 irq_spinlock_lock(&irq->lock, false);
359 irq_spinlock_lock(&box->irq_lock, false);
360
[cecb0789]361 hash_table_insert(&irq_uspace_hash_table, key, &irq->link);
[55b77d9]362 list_append(&irq->notif_cfg.link, &box->irq_list);
[c822026]363
[da1bafb]364 irq_spinlock_unlock(&box->irq_lock, false);
365 irq_spinlock_unlock(&irq->lock, false);
366 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
367
[cecb0789]368 return EOK;
369}
370
371/** Unregister task from IRQ notification.
372 *
[56c167c]373 * @param box Answerbox associated with the notification.
374 * @param inr IRQ number.
375 * @param devno Device number.
376 *
377 * @return EOK on success or a negative error code.
378 *
[cecb0789]379 */
380int ipc_irq_unregister(answerbox_t *box, inr_t inr, devno_t devno)
381{
[96b02eb9]382 sysarg_t key[] = {
383 (sysarg_t) inr,
384 (sysarg_t) devno
[cecb0789]385 };
[56c167c]386
[78ffb70]387 if ((inr < 0) || (inr > last_inr))
388 return ELIMIT;
[da1bafb]389
390 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
391 link_t *lnk = hash_table_find(&irq_uspace_hash_table, key);
[cecb0789]392 if (!lnk) {
[da1bafb]393 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
[cecb0789]394 return ENOENT;
395 }
[da1bafb]396
397 irq_t *irq = hash_table_get_instance(lnk, irq_t, link);
398
[2845930]399 /* irq is locked */
[da1bafb]400 irq_spinlock_lock(&box->irq_lock, false);
[cecb0789]401
402 ASSERT(irq->notif_cfg.answerbox == box);
403
[da1bafb]404 /* Remove the IRQ from the answerbox's list. */
[cecb0789]405 list_remove(&irq->notif_cfg.link);
[da1bafb]406
[2845930]407 /*
408 * We need to drop the IRQ lock now because hash_table_remove() will try
409 * to reacquire it. That basically violates the natural locking order,
410 * but a deadlock in hash_table_remove() is prevented by the fact that
411 * we already held the IRQ lock and didn't drop the hash table lock in
412 * the meantime.
413 */
[da1bafb]414 irq_spinlock_unlock(&irq->lock, false);
415
[cecb0789]416 /* Remove the IRQ from the uspace IRQ hash table. */
417 hash_table_remove(&irq_uspace_hash_table, key, 2);
418
[da1bafb]419 irq_spinlock_unlock(&box->irq_lock, false);
420 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
[cecb0789]421
[3cc070d]422 /* Free up the pseudo code and associated structures. */
423 code_free(irq->notif_cfg.code);
424
[cecb0789]425 /* Free up the IRQ structure. */
426 free(irq);
427
428 return EOK;
429}
430
431/** Disconnect all IRQ notifications from an answerbox.
432 *
433 * This function is effective because the answerbox contains
434 * list of all irq_t structures that are registered to
435 * send notifications to it.
436 *
[da1bafb]437 * @param box Answerbox for which we want to carry out the cleanup.
438 *
[cecb0789]439 */
440void ipc_irq_cleanup(answerbox_t *box)
441{
442loop:
[da1bafb]443 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
444 irq_spinlock_lock(&box->irq_lock, false);
[cecb0789]445
[55b77d9]446 while (!list_empty(&box->irq_list)) {
[cecb0789]447 DEADLOCK_PROBE_INIT(p_irqlock);
448
[55b77d9]449 irq_t *irq = list_get_instance(list_first(&box->irq_list), irq_t,
[da1bafb]450 notif_cfg.link);
451
452 if (!irq_spinlock_trylock(&irq->lock)) {
[cecb0789]453 /*
454 * Avoid deadlock by trying again.
455 */
[da1bafb]456 irq_spinlock_unlock(&box->irq_lock, false);
457 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
[cecb0789]458 DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD);
459 goto loop;
460 }
[da1bafb]461
[96b02eb9]462 sysarg_t key[2];
[cecb0789]463 key[0] = irq->inr;
464 key[1] = irq->devno;
465
466 ASSERT(irq->notif_cfg.answerbox == box);
467
468 /* Unlist from the answerbox. */
469 list_remove(&irq->notif_cfg.link);
470
[2845930]471 /*
472 * We need to drop the IRQ lock now because hash_table_remove()
473 * will try to reacquire it. That basically violates the natural
474 * locking order, but a deadlock in hash_table_remove() is
475 * prevented by the fact that we already held the IRQ lock and
476 * didn't drop the hash table lock in the meantime.
477 */
[da1bafb]478 irq_spinlock_unlock(&irq->lock, false);
[37be841]479
480 /* Remove from the hash table. */
481 hash_table_remove(&irq_uspace_hash_table, key, 2);
[56c167c]482
[3cc070d]483 /*
484 * Release both locks so that we can free the pseudo code.
485 */
486 irq_spinlock_unlock(&box->irq_lock, false);
487 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
[56c167c]488
[3cc070d]489 code_free(irq->notif_cfg.code);
[cecb0789]490 free(irq);
[3cc070d]491
492 /* Reacquire both locks before taking another round. */
493 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
494 irq_spinlock_lock(&box->irq_lock, false);
[cecb0789]495 }
496
[da1bafb]497 irq_spinlock_unlock(&box->irq_lock, false);
498 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
[162f919]499}
500
[8b243f2]501/** Add a call to the proper answerbox queue.
[2b017ba]502 *
[da1bafb]503 * Assume irq->lock is locked and interrupts disabled.
504 *
505 * @param irq IRQ structure referencing the target answerbox.
506 * @param call IRQ notification call.
[874621f]507 *
[2b017ba]508 */
509static void send_call(irq_t *irq, call_t *call)
[874621f]510{
[da1bafb]511 irq_spinlock_lock(&irq->notif_cfg.answerbox->irq_lock, false);
[cfaa35a]512 list_append(&call->ab_link, &irq->notif_cfg.answerbox->irq_notifs);
[da1bafb]513 irq_spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock, false);
514
[2b017ba]515 waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST);
[874621f]516}
517
[cecb0789]518/** Apply the top-half pseudo code to find out whether to accept the IRQ or not.
[874621f]519 *
[da1bafb]520 * @param irq IRQ structure.
521 *
522 * @return IRQ_ACCEPT if the interrupt is accepted by the
523 * pseudocode, IRQ_DECLINE otherwise.
[cecb0789]524 *
[874621f]525 */
[cecb0789]526irq_ownership_t ipc_irq_top_half_claim(irq_t *irq)
[874621f]527{
[cecb0789]528 irq_code_t *code = irq->notif_cfg.code;
[da1bafb]529 uint32_t *scratch = irq->notif_cfg.scratch;
[cecb0789]530
531 if (!irq->notif_cfg.notify)
532 return IRQ_DECLINE;
533
534 if (!code)
535 return IRQ_DECLINE;
536
[01e39cbe]537 for (size_t i = 0; i < code->cmdcount; i++) {
[da1bafb]538 uintptr_t srcarg = code->cmds[i].srcarg;
539 uintptr_t dstarg = code->cmds[i].dstarg;
[874621f]540
[cecb0789]541 switch (code->cmds[i].cmd) {
542 case CMD_PIO_READ_8:
[8486c07]543 scratch[dstarg] =
544 pio_read_8((ioport8_t *) code->cmds[i].addr);
[cecb0789]545 break;
546 case CMD_PIO_READ_16:
[8486c07]547 scratch[dstarg] =
548 pio_read_16((ioport16_t *) code->cmds[i].addr);
[cecb0789]549 break;
550 case CMD_PIO_READ_32:
[8486c07]551 scratch[dstarg] =
552 pio_read_32((ioport32_t *) code->cmds[i].addr);
[cecb0789]553 break;
554 case CMD_PIO_WRITE_8:
555 pio_write_8((ioport8_t *) code->cmds[i].addr,
556 (uint8_t) code->cmds[i].value);
557 break;
558 case CMD_PIO_WRITE_16:
559 pio_write_16((ioport16_t *) code->cmds[i].addr,
560 (uint16_t) code->cmds[i].value);
561 break;
562 case CMD_PIO_WRITE_32:
563 pio_write_32((ioport32_t *) code->cmds[i].addr,
564 (uint32_t) code->cmds[i].value);
565 break;
[9cdac5a]566 case CMD_PIO_WRITE_A_8:
[8486c07]567 pio_write_8((ioport8_t *) code->cmds[i].addr,
568 (uint8_t) scratch[srcarg]);
[9cdac5a]569 break;
570 case CMD_PIO_WRITE_A_16:
[8486c07]571 pio_write_16((ioport16_t *) code->cmds[i].addr,
572 (uint16_t) scratch[srcarg]);
[9cdac5a]573 break;
574 case CMD_PIO_WRITE_A_32:
[8486c07]575 pio_write_32((ioport32_t *) code->cmds[i].addr,
576 (uint32_t) scratch[srcarg]);
577 break;
578 case CMD_LOAD:
579 scratch[dstarg] = code->cmds[i].value;
[9cdac5a]580 break;
[8486c07]581 case CMD_AND:
582 scratch[dstarg] = scratch[srcarg] &
583 code->cmds[i].value;
[cecb0789]584 break;
585 case CMD_PREDICATE:
[8486c07]586 if (scratch[srcarg] == 0)
[cecb0789]587 i += code->cmds[i].value;
[8486c07]588
[cecb0789]589 break;
590 case CMD_ACCEPT:
591 return IRQ_ACCEPT;
592 case CMD_DECLINE:
593 default:
594 return IRQ_DECLINE;
595 }
[874621f]596 }
[01e39cbe]597
[cecb0789]598 return IRQ_DECLINE;
[874621f]599}
600
[cecb0789]601/* IRQ top-half handler.
[162f919]602 *
[2b017ba]603 * We expect interrupts to be disabled and the irq->lock already held.
[8b243f2]604 *
[da1bafb]605 * @param irq IRQ structure.
606 *
[162f919]607 */
[cecb0789]608void ipc_irq_top_half_handler(irq_t *irq)
[162f919]609{
[2b017ba]610 ASSERT(irq);
[56c167c]611
[1d432f9]612 ASSERT(interrupts_disabled());
613 ASSERT(irq_spinlock_locked(&irq->lock));
[da1bafb]614
[2b017ba]615 if (irq->notif_cfg.answerbox) {
[da1bafb]616 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
[cecb0789]617 if (!call)
[d8f7362]618 return;
[cecb0789]619
[162f919]620 call->flags |= IPC_CALL_NOTIF;
[43752b6]621 /* Put a counter to the message */
[0c1a5d8a]622 call->priv = ++irq->notif_cfg.counter;
[da1bafb]623
[43752b6]624 /* Set up args */
[228e490]625 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
[cecb0789]626 IPC_SET_ARG1(call->data, irq->notif_cfg.scratch[1]);
627 IPC_SET_ARG2(call->data, irq->notif_cfg.scratch[2]);
628 IPC_SET_ARG3(call->data, irq->notif_cfg.scratch[3]);
629 IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]);
630 IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]);
[da1bafb]631
[2b017ba]632 send_call(irq, call);
[162f919]633 }
634}
635
[cecb0789]636/** Send notification message.
[874621f]637 *
[da1bafb]638 * @param irq IRQ structure.
639 * @param a1 Driver-specific payload argument.
640 * @param a2 Driver-specific payload argument.
641 * @param a3 Driver-specific payload argument.
642 * @param a4 Driver-specific payload argument.
643 * @param a5 Driver-specific payload argument.
644 *
[162f919]645 */
[96b02eb9]646void ipc_irq_send_msg(irq_t *irq, sysarg_t a1, sysarg_t a2, sysarg_t a3,
647 sysarg_t a4, sysarg_t a5)
[162f919]648{
[da1bafb]649 irq_spinlock_lock(&irq->lock, true);
650
[cecb0789]651 if (irq->notif_cfg.answerbox) {
[da1bafb]652 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
[cecb0789]653 if (!call) {
[da1bafb]654 irq_spinlock_unlock(&irq->lock, true);
[cecb0789]655 return;
[b14e35f2]656 }
[da1bafb]657
[cecb0789]658 call->flags |= IPC_CALL_NOTIF;
659 /* Put a counter to the message */
660 call->priv = ++irq->notif_cfg.counter;
[da1bafb]661
[228e490]662 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
[cecb0789]663 IPC_SET_ARG1(call->data, a1);
664 IPC_SET_ARG2(call->data, a2);
665 IPC_SET_ARG3(call->data, a3);
666 IPC_SET_ARG4(call->data, a4);
667 IPC_SET_ARG5(call->data, a5);
668
669 send_call(irq, call);
[b14e35f2]670 }
[da1bafb]671
672 irq_spinlock_unlock(&irq->lock, true);
[162f919]673}
[b45c443]674
[cc73a8a1]675/** @}
[b45c443]676 */
Note: See TracBrowser for help on using the repository browser.