source: mainline/kernel/generic/src/ipc/irq.c@ 087768f

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 087768f was 472d813, checked in by Jakub Jermar <jakub@…>, 13 years ago

Test for IO_SPACE_BOUNDARY hit only on platforms with separate I/O space.

  • Property mode set to 100644
File size: 17.1 KB
Line 
1/*
2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2006 Jakub Jermar
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup genericipc
31 * @{
32 */
33
34/**
35 * @file
36 * @brief IRQ notification framework.
37 *
38 * This framework allows applications to register to receive a notification
39 * when interrupt is detected. The application may provide a simple 'top-half'
40 * handler as part of its registration, which can perform simple operations
41 * (read/write port/memory, add information to notification ipc message).
42 *
43 * The structure of a notification message is as follows:
44 * - IMETHOD: interface and method as registered by
45 * the SYS_IRQ_REGISTER syscall
46 * - ARG1: payload modified by a 'top-half' handler
47 * - ARG2: payload modified by a 'top-half' handler
48 * - ARG3: payload modified by a 'top-half' handler
49 * - ARG4: payload modified by a 'top-half' handler
50 * - ARG5: payload modified by a 'top-half' handler
51 * - in_phone_hash: interrupt counter (may be needed to assure correct order
52 * in multithreaded drivers)
53 *
54 * Note on synchronization for ipc_irq_register(), ipc_irq_unregister(),
55 * ipc_irq_cleanup() and IRQ handlers:
56 *
57 * By always taking all of the uspace IRQ hash table lock, IRQ structure lock
58 * and answerbox lock, we can rule out race conditions between the
59 * registration functions and also the cleanup function. Thus the observer can
60 * either see the IRQ structure present in both the hash table and the
61 * answerbox list or absent in both. Views in which the IRQ structure would be
62 * linked in the hash table but not in the answerbox list, or vice versa, are
63 * not possible.
64 *
65 * By always taking the hash table lock and the IRQ structure lock, we can
66 * rule out a scenario in which we would free up an IRQ structure, which is
67 * still referenced by, for example, an IRQ handler. The locking scheme forces
68 * us to lock the IRQ structure only after any progressing IRQs on that
69 * structure are finished. Because we hold the hash table lock, we prevent new
70 * IRQs from taking new references to the IRQ structure.
71 *
72 */
73
74#include <arch.h>
75#include <mm/slab.h>
76#include <mm/page.h>
77#include <mm/km.h>
78#include <errno.h>
79#include <ddi/irq.h>
80#include <ipc/ipc.h>
81#include <ipc/irq.h>
82#include <syscall/copy.h>
83#include <console/console.h>
84#include <print.h>
85#include <macros.h>
86
87static void ranges_unmap(irq_pio_range_t *ranges, size_t rangecount)
88{
89 size_t i;
90
91 for (i = 0; i < rangecount; i++) {
92#ifdef IO_SPACE_BOUNDARY
93 if ((void *) ranges[i].base >= IO_SPACE_BOUNDARY)
94#endif
95 km_unmap(ranges[i].base, ranges[i].size);
96 }
97}
98
99static int ranges_map_and_apply(irq_pio_range_t *ranges, size_t rangecount,
100 irq_cmd_t *cmds, size_t cmdcount)
101{
102 uintptr_t *pbase;
103 size_t i, j;
104
105 /* Copy the physical base addresses aside. */
106 pbase = malloc(rangecount * sizeof(uintptr_t), 0);
107 for (i = 0; i < rangecount; i++)
108 pbase[i] = ranges[i].base;
109
110 /* Map the PIO ranges into the kernel virtual address space. */
111 for (i = 0; i < rangecount; i++) {
112#ifdef IO_SPACE_BOUNDARY
113 if ((void *) ranges[i].base < IO_SPACE_BOUNDARY)
114 continue;
115#endif
116 ranges[i].base = km_map(pbase[i], ranges[i].size,
117 PAGE_READ | PAGE_WRITE | PAGE_KERNEL | PAGE_NOT_CACHEABLE);
118 if (!ranges[i].base) {
119 ranges_unmap(ranges, i);
120 free(pbase);
121 return ENOMEM;
122 }
123 }
124
125 /* Rewrite the pseudocode addresses from physical to kernel virtual. */
126 for (i = 0; i < cmdcount; i++) {
127 uintptr_t addr;
128 size_t size;
129
130 /* Process only commands that use an address. */
131 switch (cmds[i].cmd) {
132 case CMD_PIO_READ_8:
133 case CMD_PIO_WRITE_8:
134 case CMD_PIO_WRITE_A_8:
135 size = 1;
136 break;
137 case CMD_PIO_READ_16:
138 case CMD_PIO_WRITE_16:
139 case CMD_PIO_WRITE_A_16:
140 size = 2;
141 break;
142 case CMD_PIO_READ_32:
143 case CMD_PIO_WRITE_32:
144 case CMD_PIO_WRITE_A_32:
145 size = 4;
146 break;
147 default:
148 /* Move onto the next command. */
149 continue;
150 }
151
152 addr = (uintptr_t) cmds[i].addr;
153
154 for (j = 0; j < rangecount; j++) {
155
156 /* Find the matching range. */
157 if (!iswithin(pbase[j], ranges[j].size, addr, size))
158 continue;
159
160 /* Switch the command to a kernel virtual address. */
161 addr -= pbase[j];
162 addr += ranges[j].base;
163
164 cmds[i].addr = (void *) addr;
165 break;
166 }
167
168 if (j == rangecount) {
169 /*
170 * The address used in this command is outside of all
171 * defined ranges.
172 */
173 ranges_unmap(ranges, rangecount);
174 free(pbase);
175 return EINVAL;
176 }
177 }
178
179 free(pbase);
180 return EOK;
181}
182
183/** Free the top-half pseudocode.
184 *
185 * @param code Pointer to the top-half pseudocode.
186 *
187 */
188static void code_free(irq_code_t *code)
189{
190 if (code) {
191 ranges_unmap(code->ranges, code->rangecount);
192 free(code->ranges);
193 free(code->cmds);
194 free(code);
195 }
196}
197
198/** Copy the top-half pseudocode from userspace into the kernel.
199 *
200 * @param ucode Userspace address of the top-half pseudocode.
201 *
202 * @return Kernel address of the copied pseudocode.
203 *
204 */
205static irq_code_t *code_from_uspace(irq_code_t *ucode)
206{
207 irq_pio_range_t *ranges = NULL;
208 irq_cmd_t *cmds = NULL;
209
210 irq_code_t *code = malloc(sizeof(*code), 0);
211 int rc = copy_from_uspace(code, ucode, sizeof(*code));
212 if (rc != EOK)
213 goto error;
214
215 if ((code->rangecount > IRQ_MAX_RANGE_COUNT) ||
216 (code->cmdcount > IRQ_MAX_PROG_SIZE))
217 goto error;
218
219 ranges = malloc(sizeof(code->ranges[0]) * code->rangecount, 0);
220 rc = copy_from_uspace(ranges, code->ranges,
221 sizeof(code->ranges[0]) * code->rangecount);
222 if (rc != EOK)
223 goto error;
224
225 cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0);
226 rc = copy_from_uspace(cmds, code->cmds,
227 sizeof(code->cmds[0]) * code->cmdcount);
228 if (rc != EOK)
229 goto error;
230
231 rc = ranges_map_and_apply(ranges, code->rangecount, cmds,
232 code->cmdcount);
233 if (rc != EOK)
234 goto error;
235
236 code->ranges = ranges;
237 code->cmds = cmds;
238
239 return code;
240
241error:
242 if (cmds)
243 free(cmds);
244 if (ranges)
245 free(ranges);
246 free(code);
247 return NULL;
248}
249
250/** Register an answerbox as a receiving end for IRQ notifications.
251 *
252 * @param box Receiving answerbox.
253 * @param inr IRQ number.
254 * @param devno Device number.
255 * @param imethod Interface and method to be associated with the
256 * notification.
257 * @param ucode Uspace pointer to top-half pseudocode.
258 * @return EOK on success or a negative error code.
259 *
260 */
261int ipc_irq_register(answerbox_t *box, inr_t inr, devno_t devno,
262 sysarg_t imethod, irq_code_t *ucode)
263{
264 sysarg_t key[] = {
265 (sysarg_t) inr,
266 (sysarg_t) devno
267 };
268
269 if ((inr < 0) || (inr > last_inr))
270 return ELIMIT;
271
272 irq_code_t *code;
273 if (ucode) {
274 code = code_from_uspace(ucode);
275 if (!code)
276 return EBADMEM;
277 } else
278 code = NULL;
279
280 /*
281 * Allocate and populate the IRQ structure.
282 */
283 irq_t *irq = malloc(sizeof(irq_t), 0);
284
285 irq_initialize(irq);
286 irq->devno = devno;
287 irq->inr = inr;
288 irq->claim = ipc_irq_top_half_claim;
289 irq->handler = ipc_irq_top_half_handler;
290 irq->notif_cfg.notify = true;
291 irq->notif_cfg.answerbox = box;
292 irq->notif_cfg.imethod = imethod;
293 irq->notif_cfg.code = code;
294 irq->notif_cfg.counter = 0;
295
296 /*
297 * Enlist the IRQ structure in the uspace IRQ hash table and the
298 * answerbox's list.
299 */
300 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
301
302 link_t *hlp = hash_table_find(&irq_uspace_hash_table, key);
303 if (hlp) {
304 irq_t *hirq = hash_table_get_instance(hlp, irq_t, link);
305
306 /* hirq is locked */
307 irq_spinlock_unlock(&hirq->lock, false);
308 code_free(code);
309 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
310
311 free(irq);
312 return EEXISTS;
313 }
314
315 /* Locking is not really necessary, but paranoid */
316 irq_spinlock_lock(&irq->lock, false);
317 irq_spinlock_lock(&box->irq_lock, false);
318
319 hash_table_insert(&irq_uspace_hash_table, key, &irq->link);
320 list_append(&irq->notif_cfg.link, &box->irq_list);
321
322 irq_spinlock_unlock(&box->irq_lock, false);
323 irq_spinlock_unlock(&irq->lock, false);
324 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
325
326 return EOK;
327}
328
329/** Unregister task from IRQ notification.
330 *
331 * @param box Answerbox associated with the notification.
332 * @param inr IRQ number.
333 * @param devno Device number.
334 * @return EOK on success or a negative error code.
335 */
336int ipc_irq_unregister(answerbox_t *box, inr_t inr, devno_t devno)
337{
338 sysarg_t key[] = {
339 (sysarg_t) inr,
340 (sysarg_t) devno
341 };
342
343 if ((inr < 0) || (inr > last_inr))
344 return ELIMIT;
345
346 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
347 link_t *lnk = hash_table_find(&irq_uspace_hash_table, key);
348 if (!lnk) {
349 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
350 return ENOENT;
351 }
352
353 irq_t *irq = hash_table_get_instance(lnk, irq_t, link);
354
355 /* irq is locked */
356 irq_spinlock_lock(&box->irq_lock, false);
357
358 ASSERT(irq->notif_cfg.answerbox == box);
359
360 /* Free up the pseudo code and associated structures. */
361 code_free(irq->notif_cfg.code);
362
363 /* Remove the IRQ from the answerbox's list. */
364 list_remove(&irq->notif_cfg.link);
365
366 /*
367 * We need to drop the IRQ lock now because hash_table_remove() will try
368 * to reacquire it. That basically violates the natural locking order,
369 * but a deadlock in hash_table_remove() is prevented by the fact that
370 * we already held the IRQ lock and didn't drop the hash table lock in
371 * the meantime.
372 */
373 irq_spinlock_unlock(&irq->lock, false);
374
375 /* Remove the IRQ from the uspace IRQ hash table. */
376 hash_table_remove(&irq_uspace_hash_table, key, 2);
377
378 irq_spinlock_unlock(&box->irq_lock, false);
379 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
380
381 /* Free up the IRQ structure. */
382 free(irq);
383
384 return EOK;
385}
386
387/** Disconnect all IRQ notifications from an answerbox.
388 *
389 * This function is effective because the answerbox contains
390 * list of all irq_t structures that are registered to
391 * send notifications to it.
392 *
393 * @param box Answerbox for which we want to carry out the cleanup.
394 *
395 */
396void ipc_irq_cleanup(answerbox_t *box)
397{
398loop:
399 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
400 irq_spinlock_lock(&box->irq_lock, false);
401
402 while (!list_empty(&box->irq_list)) {
403 DEADLOCK_PROBE_INIT(p_irqlock);
404
405 irq_t *irq = list_get_instance(list_first(&box->irq_list), irq_t,
406 notif_cfg.link);
407
408 if (!irq_spinlock_trylock(&irq->lock)) {
409 /*
410 * Avoid deadlock by trying again.
411 */
412 irq_spinlock_unlock(&box->irq_lock, false);
413 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
414 DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD);
415 goto loop;
416 }
417
418 sysarg_t key[2];
419 key[0] = irq->inr;
420 key[1] = irq->devno;
421
422 ASSERT(irq->notif_cfg.answerbox == box);
423
424 /* Unlist from the answerbox. */
425 list_remove(&irq->notif_cfg.link);
426
427 /* Free up the pseudo code and associated structures. */
428 code_free(irq->notif_cfg.code);
429
430 /*
431 * We need to drop the IRQ lock now because hash_table_remove()
432 * will try to reacquire it. That basically violates the natural
433 * locking order, but a deadlock in hash_table_remove() is
434 * prevented by the fact that we already held the IRQ lock and
435 * didn't drop the hash table lock in the meantime.
436 */
437 irq_spinlock_unlock(&irq->lock, false);
438
439 /* Remove from the hash table. */
440 hash_table_remove(&irq_uspace_hash_table, key, 2);
441
442 free(irq);
443 }
444
445 irq_spinlock_unlock(&box->irq_lock, false);
446 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
447}
448
449/** Add a call to the proper answerbox queue.
450 *
451 * Assume irq->lock is locked and interrupts disabled.
452 *
453 * @param irq IRQ structure referencing the target answerbox.
454 * @param call IRQ notification call.
455 *
456 */
457static void send_call(irq_t *irq, call_t *call)
458{
459 irq_spinlock_lock(&irq->notif_cfg.answerbox->irq_lock, false);
460 list_append(&call->link, &irq->notif_cfg.answerbox->irq_notifs);
461 irq_spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock, false);
462
463 waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST);
464}
465
466/** Apply the top-half pseudo code to find out whether to accept the IRQ or not.
467 *
468 * @param irq IRQ structure.
469 *
470 * @return IRQ_ACCEPT if the interrupt is accepted by the
471 * pseudocode, IRQ_DECLINE otherwise.
472 *
473 */
474irq_ownership_t ipc_irq_top_half_claim(irq_t *irq)
475{
476 irq_code_t *code = irq->notif_cfg.code;
477 uint32_t *scratch = irq->notif_cfg.scratch;
478
479 if (!irq->notif_cfg.notify)
480 return IRQ_DECLINE;
481
482 if (!code)
483 return IRQ_DECLINE;
484
485 for (size_t i = 0; i < code->cmdcount; i++) {
486 uint32_t dstval;
487
488 uintptr_t srcarg = code->cmds[i].srcarg;
489 uintptr_t dstarg = code->cmds[i].dstarg;
490
491 if (srcarg >= IPC_CALL_LEN)
492 break;
493
494 if (dstarg >= IPC_CALL_LEN)
495 break;
496
497 switch (code->cmds[i].cmd) {
498 case CMD_PIO_READ_8:
499 dstval = pio_read_8((ioport8_t *) code->cmds[i].addr);
500 if (dstarg)
501 scratch[dstarg] = dstval;
502 break;
503 case CMD_PIO_READ_16:
504 dstval = pio_read_16((ioport16_t *) code->cmds[i].addr);
505 if (dstarg)
506 scratch[dstarg] = dstval;
507 break;
508 case CMD_PIO_READ_32:
509 dstval = pio_read_32((ioport32_t *) code->cmds[i].addr);
510 if (dstarg)
511 scratch[dstarg] = dstval;
512 break;
513 case CMD_PIO_WRITE_8:
514 pio_write_8((ioport8_t *) code->cmds[i].addr,
515 (uint8_t) code->cmds[i].value);
516 break;
517 case CMD_PIO_WRITE_16:
518 pio_write_16((ioport16_t *) code->cmds[i].addr,
519 (uint16_t) code->cmds[i].value);
520 break;
521 case CMD_PIO_WRITE_32:
522 pio_write_32((ioport32_t *) code->cmds[i].addr,
523 (uint32_t) code->cmds[i].value);
524 break;
525 case CMD_PIO_WRITE_A_8:
526 if (srcarg) {
527 pio_write_8((ioport8_t *) code->cmds[i].addr,
528 (uint8_t) scratch[srcarg]);
529 }
530 break;
531 case CMD_PIO_WRITE_A_16:
532 if (srcarg) {
533 pio_write_16((ioport16_t *) code->cmds[i].addr,
534 (uint16_t) scratch[srcarg]);
535 }
536 break;
537 case CMD_PIO_WRITE_A_32:
538 if (srcarg) {
539 pio_write_32((ioport32_t *) code->cmds[i].addr,
540 (uint32_t) scratch[srcarg]);
541 }
542 break;
543 case CMD_BTEST:
544 if ((srcarg) && (dstarg)) {
545 dstval = scratch[srcarg] & code->cmds[i].value;
546 scratch[dstarg] = dstval;
547 }
548 break;
549 case CMD_PREDICATE:
550 if ((srcarg) && (!scratch[srcarg])) {
551 i += code->cmds[i].value;
552 continue;
553 }
554 break;
555 case CMD_ACCEPT:
556 return IRQ_ACCEPT;
557 case CMD_DECLINE:
558 default:
559 return IRQ_DECLINE;
560 }
561 }
562
563 return IRQ_DECLINE;
564}
565
566/* IRQ top-half handler.
567 *
568 * We expect interrupts to be disabled and the irq->lock already held.
569 *
570 * @param irq IRQ structure.
571 *
572 */
573void ipc_irq_top_half_handler(irq_t *irq)
574{
575 ASSERT(irq);
576
577 ASSERT(interrupts_disabled());
578 ASSERT(irq_spinlock_locked(&irq->lock));
579
580 if (irq->notif_cfg.answerbox) {
581 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
582 if (!call)
583 return;
584
585 call->flags |= IPC_CALL_NOTIF;
586 /* Put a counter to the message */
587 call->priv = ++irq->notif_cfg.counter;
588
589 /* Set up args */
590 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
591 IPC_SET_ARG1(call->data, irq->notif_cfg.scratch[1]);
592 IPC_SET_ARG2(call->data, irq->notif_cfg.scratch[2]);
593 IPC_SET_ARG3(call->data, irq->notif_cfg.scratch[3]);
594 IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]);
595 IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]);
596
597 send_call(irq, call);
598 }
599}
600
601/** Send notification message.
602 *
603 * @param irq IRQ structure.
604 * @param a1 Driver-specific payload argument.
605 * @param a2 Driver-specific payload argument.
606 * @param a3 Driver-specific payload argument.
607 * @param a4 Driver-specific payload argument.
608 * @param a5 Driver-specific payload argument.
609 *
610 */
611void ipc_irq_send_msg(irq_t *irq, sysarg_t a1, sysarg_t a2, sysarg_t a3,
612 sysarg_t a4, sysarg_t a5)
613{
614 irq_spinlock_lock(&irq->lock, true);
615
616 if (irq->notif_cfg.answerbox) {
617 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
618 if (!call) {
619 irq_spinlock_unlock(&irq->lock, true);
620 return;
621 }
622
623 call->flags |= IPC_CALL_NOTIF;
624 /* Put a counter to the message */
625 call->priv = ++irq->notif_cfg.counter;
626
627 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
628 IPC_SET_ARG1(call->data, a1);
629 IPC_SET_ARG2(call->data, a2);
630 IPC_SET_ARG3(call->data, a3);
631 IPC_SET_ARG4(call->data, a4);
632 IPC_SET_ARG5(call->data, a5);
633
634 send_call(irq, call);
635 }
636
637 irq_spinlock_unlock(&irq->lock, true);
638}
639
640/** @}
641 */
Note: See TracBrowser for help on using the repository browser.