source: mainline/kernel/generic/src/ipc/irq.c@ 6a75c134

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 6a75c134 was 8a637a4, checked in by Martin Decky <martin@…>, 10 years ago

remove EEXISTS in favor of EEXIST

  • Property mode set to 100644
File size: 17.9 KB
Line 
1/*
2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2006 Jakub Jermar
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup genericipc
31 * @{
32 */
33
34/**
35 * @file
36 * @brief IRQ notification framework.
37 *
38 * This framework allows applications to subscribe to receive a notification
39 * when interrupt is detected. The application may provide a simple 'top-half'
40 * handler as part of its registration, which can perform simple operations
41 * (read/write port/memory, add information to notification IPC message).
42 *
43 * The structure of a notification message is as follows:
44 * - IMETHOD: interface and method as set by the SYS_IPC_IRQ_SUBSCRIBE syscall
45 * - ARG1: payload modified by a 'top-half' handler (scratch[1])
46 * - ARG2: payload modified by a 'top-half' handler (scratch[2])
47 * - ARG3: payload modified by a 'top-half' handler (scratch[3])
48 * - ARG4: payload modified by a 'top-half' handler (scratch[4])
49 * - ARG5: payload modified by a 'top-half' handler (scratch[5])
50 * - in_phone_hash: interrupt counter (may be needed to assure correct order
51 * in multithreaded drivers)
52 *
53 * Note on synchronization for ipc_irq_subscribe(), ipc_irq_unsubscribe(),
54 * ipc_irq_cleanup() and IRQ handlers:
55 *
56 * By always taking all of the uspace IRQ hash table lock, IRQ structure lock
57 * and answerbox lock, we can rule out race conditions between the
58 * registration functions and also the cleanup function. Thus the observer can
59 * either see the IRQ structure present in both the hash table and the
60 * answerbox list or absent in both. Views in which the IRQ structure would be
61 * linked in the hash table but not in the answerbox list, or vice versa, are
62 * not possible.
63 *
64 * By always taking the hash table lock and the IRQ structure lock, we can
65 * rule out a scenario in which we would free up an IRQ structure, which is
66 * still referenced by, for example, an IRQ handler. The locking scheme forces
67 * us to lock the IRQ structure only after any progressing IRQs on that
68 * structure are finished. Because we hold the hash table lock, we prevent new
69 * IRQs from taking new references to the IRQ structure.
70 *
71 */
72
73#include <arch.h>
74#include <mm/slab.h>
75#include <mm/page.h>
76#include <mm/km.h>
77#include <errno.h>
78#include <ddi/irq.h>
79#include <ipc/ipc.h>
80#include <ipc/irq.h>
81#include <syscall/copy.h>
82#include <console/console.h>
83#include <print.h>
84#include <macros.h>
85
86static void ranges_unmap(irq_pio_range_t *ranges, size_t rangecount)
87{
88 for (size_t i = 0; i < rangecount; i++) {
89#ifdef IO_SPACE_BOUNDARY
90 if ((void *) ranges[i].base >= IO_SPACE_BOUNDARY)
91#endif
92 km_unmap(ranges[i].base, ranges[i].size);
93 }
94}
95
96static int ranges_map_and_apply(irq_pio_range_t *ranges, size_t rangecount,
97 irq_cmd_t *cmds, size_t cmdcount)
98{
99 /* Copy the physical base addresses aside. */
100 uintptr_t *pbase = malloc(rangecount * sizeof(uintptr_t), 0);
101 for (size_t i = 0; i < rangecount; i++)
102 pbase[i] = ranges[i].base;
103
104 /* Map the PIO ranges into the kernel virtual address space. */
105 for (size_t i = 0; i < rangecount; i++) {
106#ifdef IO_SPACE_BOUNDARY
107 if ((void *) ranges[i].base < IO_SPACE_BOUNDARY)
108 continue;
109#endif
110 ranges[i].base = km_map(pbase[i], ranges[i].size,
111 PAGE_READ | PAGE_WRITE | PAGE_KERNEL | PAGE_NOT_CACHEABLE);
112 if (!ranges[i].base) {
113 ranges_unmap(ranges, i);
114 free(pbase);
115 return ENOMEM;
116 }
117 }
118
119 /* Rewrite the pseudocode addresses from physical to kernel virtual. */
120 for (size_t i = 0; i < cmdcount; i++) {
121 uintptr_t addr;
122 size_t size;
123
124 /* Process only commands that use an address. */
125 switch (cmds[i].cmd) {
126 case CMD_PIO_READ_8:
127 case CMD_PIO_WRITE_8:
128 case CMD_PIO_WRITE_A_8:
129 size = 1;
130 break;
131 case CMD_PIO_READ_16:
132 case CMD_PIO_WRITE_16:
133 case CMD_PIO_WRITE_A_16:
134 size = 2;
135 break;
136 case CMD_PIO_READ_32:
137 case CMD_PIO_WRITE_32:
138 case CMD_PIO_WRITE_A_32:
139 size = 4;
140 break;
141 default:
142 /* Move onto the next command. */
143 continue;
144 }
145
146 addr = (uintptr_t) cmds[i].addr;
147
148 size_t j;
149 for (j = 0; j < rangecount; j++) {
150 /* Find the matching range. */
151 if (!iswithin(pbase[j], ranges[j].size, addr, size))
152 continue;
153
154 /* Switch the command to a kernel virtual address. */
155 addr -= pbase[j];
156 addr += ranges[j].base;
157
158 cmds[i].addr = (void *) addr;
159 break;
160 }
161
162 if (j == rangecount) {
163 /*
164 * The address used in this command is outside of all
165 * defined ranges.
166 */
167 ranges_unmap(ranges, rangecount);
168 free(pbase);
169 return EINVAL;
170 }
171 }
172
173 free(pbase);
174 return EOK;
175}
176
177/** Statically check the top-half pseudocode
178 *
179 * Check the top-half pseudocode for invalid or unsafe
180 * constructs.
181 *
182 */
183static int code_check(irq_cmd_t *cmds, size_t cmdcount)
184{
185 for (size_t i = 0; i < cmdcount; i++) {
186 /*
187 * Check for accepted ranges.
188 */
189 if (cmds[i].cmd >= CMD_LAST)
190 return EINVAL;
191
192 if (cmds[i].srcarg >= IPC_CALL_LEN)
193 return EINVAL;
194
195 if (cmds[i].dstarg >= IPC_CALL_LEN)
196 return EINVAL;
197
198 switch (cmds[i].cmd) {
199 case CMD_PREDICATE:
200 /*
201 * Check for control flow overflow.
202 * Note that jumping just beyond the last
203 * command is a correct behaviour.
204 */
205 if (i + cmds[i].value > cmdcount)
206 return EINVAL;
207
208 break;
209 default:
210 break;
211 }
212 }
213
214 return EOK;
215}
216
217/** Free the top-half pseudocode.
218 *
219 * @param code Pointer to the top-half pseudocode.
220 *
221 */
222static void code_free(irq_code_t *code)
223{
224 if (code) {
225 ranges_unmap(code->ranges, code->rangecount);
226 free(code->ranges);
227 free(code->cmds);
228 free(code);
229 }
230}
231
232/** Copy the top-half pseudocode from userspace into the kernel.
233 *
234 * @param ucode Userspace address of the top-half pseudocode.
235 *
236 * @return Kernel address of the copied pseudocode.
237 *
238 */
239static irq_code_t *code_from_uspace(irq_code_t *ucode)
240{
241 irq_pio_range_t *ranges = NULL;
242 irq_cmd_t *cmds = NULL;
243
244 irq_code_t *code = malloc(sizeof(*code), 0);
245 int rc = copy_from_uspace(code, ucode, sizeof(*code));
246 if (rc != EOK)
247 goto error;
248
249 if ((code->rangecount > IRQ_MAX_RANGE_COUNT) ||
250 (code->cmdcount > IRQ_MAX_PROG_SIZE))
251 goto error;
252
253 ranges = malloc(sizeof(code->ranges[0]) * code->rangecount, 0);
254 rc = copy_from_uspace(ranges, code->ranges,
255 sizeof(code->ranges[0]) * code->rangecount);
256 if (rc != EOK)
257 goto error;
258
259 cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0);
260 rc = copy_from_uspace(cmds, code->cmds,
261 sizeof(code->cmds[0]) * code->cmdcount);
262 if (rc != EOK)
263 goto error;
264
265 rc = code_check(cmds, code->cmdcount);
266 if (rc != EOK)
267 goto error;
268
269 rc = ranges_map_and_apply(ranges, code->rangecount, cmds,
270 code->cmdcount);
271 if (rc != EOK)
272 goto error;
273
274 code->ranges = ranges;
275 code->cmds = cmds;
276
277 return code;
278
279error:
280 if (cmds)
281 free(cmds);
282
283 if (ranges)
284 free(ranges);
285
286 free(code);
287 return NULL;
288}
289
290/** Subscribe an answerbox as a receiving end for IRQ notifications.
291 *
292 * @param box Receiving answerbox.
293 * @param inr IRQ number.
294 * @param devno Device number.
295 * @param imethod Interface and method to be associated with the
296 * notification.
297 * @param ucode Uspace pointer to top-half pseudocode.
298 *
299 * @return EOK on success or a negative error code.
300 *
301 */
302int ipc_irq_subscribe(answerbox_t *box, inr_t inr, devno_t devno,
303 sysarg_t imethod, irq_code_t *ucode)
304{
305 sysarg_t key[] = {
306 (sysarg_t) inr,
307 (sysarg_t) devno
308 };
309
310 if ((inr < 0) || (inr > last_inr))
311 return ELIMIT;
312
313 irq_code_t *code;
314 if (ucode) {
315 code = code_from_uspace(ucode);
316 if (!code)
317 return EBADMEM;
318 } else
319 code = NULL;
320
321 /*
322 * Allocate and populate the IRQ structure.
323 */
324 irq_t *irq = malloc(sizeof(irq_t), 0);
325
326 irq_initialize(irq);
327 irq->devno = devno;
328 irq->inr = inr;
329 irq->claim = ipc_irq_top_half_claim;
330 irq->handler = ipc_irq_top_half_handler;
331 irq->notif_cfg.notify = true;
332 irq->notif_cfg.answerbox = box;
333 irq->notif_cfg.imethod = imethod;
334 irq->notif_cfg.code = code;
335 irq->notif_cfg.counter = 0;
336
337 /*
338 * Enlist the IRQ structure in the uspace IRQ hash table and the
339 * answerbox's list.
340 */
341 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
342
343 link_t *hlp = hash_table_find(&irq_uspace_hash_table, key);
344 if (hlp) {
345 irq_t *hirq = hash_table_get_instance(hlp, irq_t, link);
346
347 /* hirq is locked */
348 irq_spinlock_unlock(&hirq->lock, false);
349 code_free(code);
350 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
351
352 free(irq);
353 return EEXIST;
354 }
355
356 /* Locking is not really necessary, but paranoid */
357 irq_spinlock_lock(&irq->lock, false);
358 irq_spinlock_lock(&box->irq_lock, false);
359
360 hash_table_insert(&irq_uspace_hash_table, key, &irq->link);
361 list_append(&irq->notif_cfg.link, &box->irq_list);
362
363 irq_spinlock_unlock(&box->irq_lock, false);
364 irq_spinlock_unlock(&irq->lock, false);
365 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
366
367 return EOK;
368}
369
370/** Unsubscribe task from IRQ notification.
371 *
372 * @param box Answerbox associated with the notification.
373 * @param inr IRQ number.
374 * @param devno Device number.
375 *
376 * @return EOK on success or a negative error code.
377 *
378 */
379int ipc_irq_unsubscribe(answerbox_t *box, inr_t inr, devno_t devno)
380{
381 sysarg_t key[] = {
382 (sysarg_t) inr,
383 (sysarg_t) devno
384 };
385
386 if ((inr < 0) || (inr > last_inr))
387 return ELIMIT;
388
389 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
390 link_t *lnk = hash_table_find(&irq_uspace_hash_table, key);
391 if (!lnk) {
392 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
393 return ENOENT;
394 }
395
396 irq_t *irq = hash_table_get_instance(lnk, irq_t, link);
397
398 /* irq is locked */
399 irq_spinlock_lock(&box->irq_lock, false);
400
401 ASSERT(irq->notif_cfg.answerbox == box);
402
403 /* Remove the IRQ from the answerbox's list. */
404 list_remove(&irq->notif_cfg.link);
405
406 /*
407 * We need to drop the IRQ lock now because hash_table_remove() will try
408 * to reacquire it. That basically violates the natural locking order,
409 * but a deadlock in hash_table_remove() is prevented by the fact that
410 * we already held the IRQ lock and didn't drop the hash table lock in
411 * the meantime.
412 */
413 irq_spinlock_unlock(&irq->lock, false);
414
415 /* Remove the IRQ from the uspace IRQ hash table. */
416 hash_table_remove(&irq_uspace_hash_table, key, 2);
417
418 irq_spinlock_unlock(&box->irq_lock, false);
419 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
420
421 /* Free up the pseudo code and associated structures. */
422 code_free(irq->notif_cfg.code);
423
424 /* Free up the IRQ structure. */
425 free(irq);
426
427 return EOK;
428}
429
430/** Disconnect all IRQ notifications from an answerbox.
431 *
432 * This function is effective because the answerbox contains
433 * list of all irq_t structures that are subscribed to
434 * send notifications to it.
435 *
436 * @param box Answerbox for which we want to carry out the cleanup.
437 *
438 */
439void ipc_irq_cleanup(answerbox_t *box)
440{
441loop:
442 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
443 irq_spinlock_lock(&box->irq_lock, false);
444
445 while (!list_empty(&box->irq_list)) {
446 DEADLOCK_PROBE_INIT(p_irqlock);
447
448 irq_t *irq = list_get_instance(list_first(&box->irq_list), irq_t,
449 notif_cfg.link);
450
451 if (!irq_spinlock_trylock(&irq->lock)) {
452 /*
453 * Avoid deadlock by trying again.
454 */
455 irq_spinlock_unlock(&box->irq_lock, false);
456 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
457 DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD);
458 goto loop;
459 }
460
461 sysarg_t key[2];
462 key[0] = irq->inr;
463 key[1] = irq->devno;
464
465 ASSERT(irq->notif_cfg.answerbox == box);
466
467 /* Unlist from the answerbox. */
468 list_remove(&irq->notif_cfg.link);
469
470 /*
471 * We need to drop the IRQ lock now because hash_table_remove()
472 * will try to reacquire it. That basically violates the natural
473 * locking order, but a deadlock in hash_table_remove() is
474 * prevented by the fact that we already held the IRQ lock and
475 * didn't drop the hash table lock in the meantime.
476 */
477 irq_spinlock_unlock(&irq->lock, false);
478
479 /* Remove from the hash table. */
480 hash_table_remove(&irq_uspace_hash_table, key, 2);
481
482 /*
483 * Release both locks so that we can free the pseudo code.
484 */
485 irq_spinlock_unlock(&box->irq_lock, false);
486 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
487
488 code_free(irq->notif_cfg.code);
489 free(irq);
490
491 /* Reacquire both locks before taking another round. */
492 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
493 irq_spinlock_lock(&box->irq_lock, false);
494 }
495
496 irq_spinlock_unlock(&box->irq_lock, false);
497 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
498}
499
500/** Add a call to the proper answerbox queue.
501 *
502 * Assume irq->lock is locked and interrupts disabled.
503 *
504 * @param irq IRQ structure referencing the target answerbox.
505 * @param call IRQ notification call.
506 *
507 */
508static void send_call(irq_t *irq, call_t *call)
509{
510 irq_spinlock_lock(&irq->notif_cfg.answerbox->irq_lock, false);
511 list_append(&call->ab_link, &irq->notif_cfg.answerbox->irq_notifs);
512 irq_spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock, false);
513
514 waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST);
515}
516
517/** Apply the top-half pseudo code to find out whether to accept the IRQ or not.
518 *
519 * @param irq IRQ structure.
520 *
521 * @return IRQ_ACCEPT if the interrupt is accepted by the
522 * pseudocode, IRQ_DECLINE otherwise.
523 *
524 */
525irq_ownership_t ipc_irq_top_half_claim(irq_t *irq)
526{
527 irq_code_t *code = irq->notif_cfg.code;
528 uint32_t *scratch = irq->notif_cfg.scratch;
529
530 if (!irq->notif_cfg.notify)
531 return IRQ_DECLINE;
532
533 if (!code)
534 return IRQ_DECLINE;
535
536 for (size_t i = 0; i < code->cmdcount; i++) {
537 uintptr_t srcarg = code->cmds[i].srcarg;
538 uintptr_t dstarg = code->cmds[i].dstarg;
539
540 switch (code->cmds[i].cmd) {
541 case CMD_PIO_READ_8:
542 scratch[dstarg] =
543 pio_read_8((ioport8_t *) code->cmds[i].addr);
544 break;
545 case CMD_PIO_READ_16:
546 scratch[dstarg] =
547 pio_read_16((ioport16_t *) code->cmds[i].addr);
548 break;
549 case CMD_PIO_READ_32:
550 scratch[dstarg] =
551 pio_read_32((ioport32_t *) code->cmds[i].addr);
552 break;
553 case CMD_PIO_WRITE_8:
554 pio_write_8((ioport8_t *) code->cmds[i].addr,
555 (uint8_t) code->cmds[i].value);
556 break;
557 case CMD_PIO_WRITE_16:
558 pio_write_16((ioport16_t *) code->cmds[i].addr,
559 (uint16_t) code->cmds[i].value);
560 break;
561 case CMD_PIO_WRITE_32:
562 pio_write_32((ioport32_t *) code->cmds[i].addr,
563 (uint32_t) code->cmds[i].value);
564 break;
565 case CMD_PIO_WRITE_A_8:
566 pio_write_8((ioport8_t *) code->cmds[i].addr,
567 (uint8_t) scratch[srcarg]);
568 break;
569 case CMD_PIO_WRITE_A_16:
570 pio_write_16((ioport16_t *) code->cmds[i].addr,
571 (uint16_t) scratch[srcarg]);
572 break;
573 case CMD_PIO_WRITE_A_32:
574 pio_write_32((ioport32_t *) code->cmds[i].addr,
575 (uint32_t) scratch[srcarg]);
576 break;
577 case CMD_LOAD:
578 scratch[dstarg] = code->cmds[i].value;
579 break;
580 case CMD_AND:
581 scratch[dstarg] = scratch[srcarg] &
582 code->cmds[i].value;
583 break;
584 case CMD_PREDICATE:
585 if (scratch[srcarg] == 0)
586 i += code->cmds[i].value;
587
588 break;
589 case CMD_ACCEPT:
590 return IRQ_ACCEPT;
591 case CMD_DECLINE:
592 default:
593 return IRQ_DECLINE;
594 }
595 }
596
597 return IRQ_DECLINE;
598}
599
600/* IRQ top-half handler.
601 *
602 * We expect interrupts to be disabled and the irq->lock already held.
603 *
604 * @param irq IRQ structure.
605 *
606 */
607void ipc_irq_top_half_handler(irq_t *irq)
608{
609 ASSERT(irq);
610
611 ASSERT(interrupts_disabled());
612 ASSERT(irq_spinlock_locked(&irq->lock));
613
614 if (irq->notif_cfg.answerbox) {
615 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
616 if (!call)
617 return;
618
619 call->flags |= IPC_CALL_NOTIF;
620 /* Put a counter to the message */
621 call->priv = ++irq->notif_cfg.counter;
622
623 /* Set up args */
624 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
625 IPC_SET_ARG1(call->data, irq->notif_cfg.scratch[1]);
626 IPC_SET_ARG2(call->data, irq->notif_cfg.scratch[2]);
627 IPC_SET_ARG3(call->data, irq->notif_cfg.scratch[3]);
628 IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]);
629 IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]);
630
631 send_call(irq, call);
632 }
633}
634
635/** Send notification message.
636 *
637 * @param irq IRQ structure.
638 * @param a1 Driver-specific payload argument.
639 * @param a2 Driver-specific payload argument.
640 * @param a3 Driver-specific payload argument.
641 * @param a4 Driver-specific payload argument.
642 * @param a5 Driver-specific payload argument.
643 *
644 */
645void ipc_irq_send_msg(irq_t *irq, sysarg_t a1, sysarg_t a2, sysarg_t a3,
646 sysarg_t a4, sysarg_t a5)
647{
648 irq_spinlock_lock(&irq->lock, true);
649
650 if (irq->notif_cfg.answerbox) {
651 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
652 if (!call) {
653 irq_spinlock_unlock(&irq->lock, true);
654 return;
655 }
656
657 call->flags |= IPC_CALL_NOTIF;
658 /* Put a counter to the message */
659 call->priv = ++irq->notif_cfg.counter;
660
661 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
662 IPC_SET_ARG1(call->data, a1);
663 IPC_SET_ARG2(call->data, a2);
664 IPC_SET_ARG3(call->data, a3);
665 IPC_SET_ARG4(call->data, a4);
666 IPC_SET_ARG5(call->data, a5);
667
668 send_call(irq, call);
669 }
670
671 irq_spinlock_unlock(&irq->lock, true);
672}
673
674/** @}
675 */
Note: See TracBrowser for help on using the repository browser.