source: mainline/kernel/generic/src/ipc/irq.c@ e8a1530

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since e8a1530 was cfaa35a, checked in by Jakub Jermar <jakub@…>, 13 years ago

Rename call_t's link to ab_link as this link is exclusively used for
linking the call into one of the answerbox lists.

  • Property mode set to 100644
File size: 17.9 KB
Line 
1/*
2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2006 Jakub Jermar
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup genericipc
31 * @{
32 */
33
34/**
35 * @file
36 * @brief IRQ notification framework.
37 *
38 * This framework allows applications to register to receive a notification
39 * when interrupt is detected. The application may provide a simple 'top-half'
40 * handler as part of its registration, which can perform simple operations
41 * (read/write port/memory, add information to notification IPC message).
42 *
43 * The structure of a notification message is as follows:
44 * - IMETHOD: interface and method as registered by
45 * the SYS_IRQ_REGISTER syscall
46 * - ARG1: payload modified by a 'top-half' handler (scratch[1])
47 * - ARG2: payload modified by a 'top-half' handler (scratch[2])
48 * - ARG3: payload modified by a 'top-half' handler (scratch[3])
49 * - ARG4: payload modified by a 'top-half' handler (scratch[4])
50 * - ARG5: payload modified by a 'top-half' handler (scratch[5])
51 * - in_phone_hash: interrupt counter (may be needed to assure correct order
52 * in multithreaded drivers)
53 *
54 * Note on synchronization for ipc_irq_register(), ipc_irq_unregister(),
55 * ipc_irq_cleanup() and IRQ handlers:
56 *
57 * By always taking all of the uspace IRQ hash table lock, IRQ structure lock
58 * and answerbox lock, we can rule out race conditions between the
59 * registration functions and also the cleanup function. Thus the observer can
60 * either see the IRQ structure present in both the hash table and the
61 * answerbox list or absent in both. Views in which the IRQ structure would be
62 * linked in the hash table but not in the answerbox list, or vice versa, are
63 * not possible.
64 *
65 * By always taking the hash table lock and the IRQ structure lock, we can
66 * rule out a scenario in which we would free up an IRQ structure, which is
67 * still referenced by, for example, an IRQ handler. The locking scheme forces
68 * us to lock the IRQ structure only after any progressing IRQs on that
69 * structure are finished. Because we hold the hash table lock, we prevent new
70 * IRQs from taking new references to the IRQ structure.
71 *
72 */
73
74#include <arch.h>
75#include <mm/slab.h>
76#include <mm/page.h>
77#include <mm/km.h>
78#include <errno.h>
79#include <ddi/irq.h>
80#include <ipc/ipc.h>
81#include <ipc/irq.h>
82#include <syscall/copy.h>
83#include <console/console.h>
84#include <print.h>
85#include <macros.h>
86
87static void ranges_unmap(irq_pio_range_t *ranges, size_t rangecount)
88{
89 for (size_t i = 0; i < rangecount; i++) {
90#ifdef IO_SPACE_BOUNDARY
91 if ((void *) ranges[i].base >= IO_SPACE_BOUNDARY)
92#endif
93 km_unmap(ranges[i].base, ranges[i].size);
94 }
95}
96
97static int ranges_map_and_apply(irq_pio_range_t *ranges, size_t rangecount,
98 irq_cmd_t *cmds, size_t cmdcount)
99{
100 /* Copy the physical base addresses aside. */
101 uintptr_t *pbase = malloc(rangecount * sizeof(uintptr_t), 0);
102 for (size_t i = 0; i < rangecount; i++)
103 pbase[i] = ranges[i].base;
104
105 /* Map the PIO ranges into the kernel virtual address space. */
106 for (size_t i = 0; i < rangecount; i++) {
107#ifdef IO_SPACE_BOUNDARY
108 if ((void *) ranges[i].base < IO_SPACE_BOUNDARY)
109 continue;
110#endif
111 ranges[i].base = km_map(pbase[i], ranges[i].size,
112 PAGE_READ | PAGE_WRITE | PAGE_KERNEL | PAGE_NOT_CACHEABLE);
113 if (!ranges[i].base) {
114 ranges_unmap(ranges, i);
115 free(pbase);
116 return ENOMEM;
117 }
118 }
119
120 /* Rewrite the pseudocode addresses from physical to kernel virtual. */
121 for (size_t i = 0; i < cmdcount; i++) {
122 uintptr_t addr;
123 size_t size;
124
125 /* Process only commands that use an address. */
126 switch (cmds[i].cmd) {
127 case CMD_PIO_READ_8:
128 case CMD_PIO_WRITE_8:
129 case CMD_PIO_WRITE_A_8:
130 size = 1;
131 break;
132 case CMD_PIO_READ_16:
133 case CMD_PIO_WRITE_16:
134 case CMD_PIO_WRITE_A_16:
135 size = 2;
136 break;
137 case CMD_PIO_READ_32:
138 case CMD_PIO_WRITE_32:
139 case CMD_PIO_WRITE_A_32:
140 size = 4;
141 break;
142 default:
143 /* Move onto the next command. */
144 continue;
145 }
146
147 addr = (uintptr_t) cmds[i].addr;
148
149 size_t j;
150 for (j = 0; j < rangecount; j++) {
151 /* Find the matching range. */
152 if (!iswithin(pbase[j], ranges[j].size, addr, size))
153 continue;
154
155 /* Switch the command to a kernel virtual address. */
156 addr -= pbase[j];
157 addr += ranges[j].base;
158
159 cmds[i].addr = (void *) addr;
160 break;
161 }
162
163 if (j == rangecount) {
164 /*
165 * The address used in this command is outside of all
166 * defined ranges.
167 */
168 ranges_unmap(ranges, rangecount);
169 free(pbase);
170 return EINVAL;
171 }
172 }
173
174 free(pbase);
175 return EOK;
176}
177
178/** Statically check the top-half pseudocode
179 *
180 * Check the top-half pseudocode for invalid or unsafe
181 * constructs.
182 *
183 */
184static int code_check(irq_cmd_t *cmds, size_t cmdcount)
185{
186 for (size_t i = 0; i < cmdcount; i++) {
187 /*
188 * Check for accepted ranges.
189 */
190 if (cmds[i].cmd >= CMD_LAST)
191 return EINVAL;
192
193 if (cmds[i].srcarg >= IPC_CALL_LEN)
194 return EINVAL;
195
196 if (cmds[i].dstarg >= IPC_CALL_LEN)
197 return EINVAL;
198
199 switch (cmds[i].cmd) {
200 case CMD_PREDICATE:
201 /*
202 * Check for control flow overflow.
203 * Note that jumping just beyond the last
204 * command is a correct behaviour.
205 */
206 if (i + cmds[i].value > cmdcount)
207 return EINVAL;
208
209 break;
210 default:
211 break;
212 }
213 }
214
215 return EOK;
216}
217
218/** Free the top-half pseudocode.
219 *
220 * @param code Pointer to the top-half pseudocode.
221 *
222 */
223static void code_free(irq_code_t *code)
224{
225 if (code) {
226 ranges_unmap(code->ranges, code->rangecount);
227 free(code->ranges);
228 free(code->cmds);
229 free(code);
230 }
231}
232
233/** Copy the top-half pseudocode from userspace into the kernel.
234 *
235 * @param ucode Userspace address of the top-half pseudocode.
236 *
237 * @return Kernel address of the copied pseudocode.
238 *
239 */
240static irq_code_t *code_from_uspace(irq_code_t *ucode)
241{
242 irq_pio_range_t *ranges = NULL;
243 irq_cmd_t *cmds = NULL;
244
245 irq_code_t *code = malloc(sizeof(*code), 0);
246 int rc = copy_from_uspace(code, ucode, sizeof(*code));
247 if (rc != EOK)
248 goto error;
249
250 if ((code->rangecount > IRQ_MAX_RANGE_COUNT) ||
251 (code->cmdcount > IRQ_MAX_PROG_SIZE))
252 goto error;
253
254 ranges = malloc(sizeof(code->ranges[0]) * code->rangecount, 0);
255 rc = copy_from_uspace(ranges, code->ranges,
256 sizeof(code->ranges[0]) * code->rangecount);
257 if (rc != EOK)
258 goto error;
259
260 cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0);
261 rc = copy_from_uspace(cmds, code->cmds,
262 sizeof(code->cmds[0]) * code->cmdcount);
263 if (rc != EOK)
264 goto error;
265
266 rc = code_check(cmds, code->cmdcount);
267 if (rc != EOK)
268 goto error;
269
270 rc = ranges_map_and_apply(ranges, code->rangecount, cmds,
271 code->cmdcount);
272 if (rc != EOK)
273 goto error;
274
275 code->ranges = ranges;
276 code->cmds = cmds;
277
278 return code;
279
280error:
281 if (cmds)
282 free(cmds);
283
284 if (ranges)
285 free(ranges);
286
287 free(code);
288 return NULL;
289}
290
291/** Register an answerbox as a receiving end for IRQ notifications.
292 *
293 * @param box Receiving answerbox.
294 * @param inr IRQ number.
295 * @param devno Device number.
296 * @param imethod Interface and method to be associated with the
297 * notification.
298 * @param ucode Uspace pointer to top-half pseudocode.
299 *
300 * @return EOK on success or a negative error code.
301 *
302 */
303int ipc_irq_register(answerbox_t *box, inr_t inr, devno_t devno,
304 sysarg_t imethod, irq_code_t *ucode)
305{
306 sysarg_t key[] = {
307 (sysarg_t) inr,
308 (sysarg_t) devno
309 };
310
311 if ((inr < 0) || (inr > last_inr))
312 return ELIMIT;
313
314 irq_code_t *code;
315 if (ucode) {
316 code = code_from_uspace(ucode);
317 if (!code)
318 return EBADMEM;
319 } else
320 code = NULL;
321
322 /*
323 * Allocate and populate the IRQ structure.
324 */
325 irq_t *irq = malloc(sizeof(irq_t), 0);
326
327 irq_initialize(irq);
328 irq->devno = devno;
329 irq->inr = inr;
330 irq->claim = ipc_irq_top_half_claim;
331 irq->handler = ipc_irq_top_half_handler;
332 irq->notif_cfg.notify = true;
333 irq->notif_cfg.answerbox = box;
334 irq->notif_cfg.imethod = imethod;
335 irq->notif_cfg.code = code;
336 irq->notif_cfg.counter = 0;
337
338 /*
339 * Enlist the IRQ structure in the uspace IRQ hash table and the
340 * answerbox's list.
341 */
342 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
343
344 link_t *hlp = hash_table_find(&irq_uspace_hash_table, key);
345 if (hlp) {
346 irq_t *hirq = hash_table_get_instance(hlp, irq_t, link);
347
348 /* hirq is locked */
349 irq_spinlock_unlock(&hirq->lock, false);
350 code_free(code);
351 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
352
353 free(irq);
354 return EEXISTS;
355 }
356
357 /* Locking is not really necessary, but paranoid */
358 irq_spinlock_lock(&irq->lock, false);
359 irq_spinlock_lock(&box->irq_lock, false);
360
361 hash_table_insert(&irq_uspace_hash_table, key, &irq->link);
362 list_append(&irq->notif_cfg.link, &box->irq_list);
363
364 irq_spinlock_unlock(&box->irq_lock, false);
365 irq_spinlock_unlock(&irq->lock, false);
366 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
367
368 return EOK;
369}
370
371/** Unregister task from IRQ notification.
372 *
373 * @param box Answerbox associated with the notification.
374 * @param inr IRQ number.
375 * @param devno Device number.
376 *
377 * @return EOK on success or a negative error code.
378 *
379 */
380int ipc_irq_unregister(answerbox_t *box, inr_t inr, devno_t devno)
381{
382 sysarg_t key[] = {
383 (sysarg_t) inr,
384 (sysarg_t) devno
385 };
386
387 if ((inr < 0) || (inr > last_inr))
388 return ELIMIT;
389
390 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
391 link_t *lnk = hash_table_find(&irq_uspace_hash_table, key);
392 if (!lnk) {
393 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
394 return ENOENT;
395 }
396
397 irq_t *irq = hash_table_get_instance(lnk, irq_t, link);
398
399 /* irq is locked */
400 irq_spinlock_lock(&box->irq_lock, false);
401
402 ASSERT(irq->notif_cfg.answerbox == box);
403
404 /* Remove the IRQ from the answerbox's list. */
405 list_remove(&irq->notif_cfg.link);
406
407 /*
408 * We need to drop the IRQ lock now because hash_table_remove() will try
409 * to reacquire it. That basically violates the natural locking order,
410 * but a deadlock in hash_table_remove() is prevented by the fact that
411 * we already held the IRQ lock and didn't drop the hash table lock in
412 * the meantime.
413 */
414 irq_spinlock_unlock(&irq->lock, false);
415
416 /* Remove the IRQ from the uspace IRQ hash table. */
417 hash_table_remove(&irq_uspace_hash_table, key, 2);
418
419 irq_spinlock_unlock(&box->irq_lock, false);
420 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
421
422 /* Free up the pseudo code and associated structures. */
423 code_free(irq->notif_cfg.code);
424
425 /* Free up the IRQ structure. */
426 free(irq);
427
428 return EOK;
429}
430
431/** Disconnect all IRQ notifications from an answerbox.
432 *
433 * This function is effective because the answerbox contains
434 * list of all irq_t structures that are registered to
435 * send notifications to it.
436 *
437 * @param box Answerbox for which we want to carry out the cleanup.
438 *
439 */
440void ipc_irq_cleanup(answerbox_t *box)
441{
442loop:
443 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
444 irq_spinlock_lock(&box->irq_lock, false);
445
446 while (!list_empty(&box->irq_list)) {
447 DEADLOCK_PROBE_INIT(p_irqlock);
448
449 irq_t *irq = list_get_instance(list_first(&box->irq_list), irq_t,
450 notif_cfg.link);
451
452 if (!irq_spinlock_trylock(&irq->lock)) {
453 /*
454 * Avoid deadlock by trying again.
455 */
456 irq_spinlock_unlock(&box->irq_lock, false);
457 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
458 DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD);
459 goto loop;
460 }
461
462 sysarg_t key[2];
463 key[0] = irq->inr;
464 key[1] = irq->devno;
465
466 ASSERT(irq->notif_cfg.answerbox == box);
467
468 /* Unlist from the answerbox. */
469 list_remove(&irq->notif_cfg.link);
470
471 /*
472 * We need to drop the IRQ lock now because hash_table_remove()
473 * will try to reacquire it. That basically violates the natural
474 * locking order, but a deadlock in hash_table_remove() is
475 * prevented by the fact that we already held the IRQ lock and
476 * didn't drop the hash table lock in the meantime.
477 */
478 irq_spinlock_unlock(&irq->lock, false);
479
480 /* Remove from the hash table. */
481 hash_table_remove(&irq_uspace_hash_table, key, 2);
482
483 /*
484 * Release both locks so that we can free the pseudo code.
485 */
486 irq_spinlock_unlock(&box->irq_lock, false);
487 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
488
489 code_free(irq->notif_cfg.code);
490 free(irq);
491
492 /* Reacquire both locks before taking another round. */
493 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
494 irq_spinlock_lock(&box->irq_lock, false);
495 }
496
497 irq_spinlock_unlock(&box->irq_lock, false);
498 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
499}
500
501/** Add a call to the proper answerbox queue.
502 *
503 * Assume irq->lock is locked and interrupts disabled.
504 *
505 * @param irq IRQ structure referencing the target answerbox.
506 * @param call IRQ notification call.
507 *
508 */
509static void send_call(irq_t *irq, call_t *call)
510{
511 irq_spinlock_lock(&irq->notif_cfg.answerbox->irq_lock, false);
512 list_append(&call->ab_link, &irq->notif_cfg.answerbox->irq_notifs);
513 irq_spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock, false);
514
515 waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST);
516}
517
518/** Apply the top-half pseudo code to find out whether to accept the IRQ or not.
519 *
520 * @param irq IRQ structure.
521 *
522 * @return IRQ_ACCEPT if the interrupt is accepted by the
523 * pseudocode, IRQ_DECLINE otherwise.
524 *
525 */
526irq_ownership_t ipc_irq_top_half_claim(irq_t *irq)
527{
528 irq_code_t *code = irq->notif_cfg.code;
529 uint32_t *scratch = irq->notif_cfg.scratch;
530
531 if (!irq->notif_cfg.notify)
532 return IRQ_DECLINE;
533
534 if (!code)
535 return IRQ_DECLINE;
536
537 for (size_t i = 0; i < code->cmdcount; i++) {
538 uintptr_t srcarg = code->cmds[i].srcarg;
539 uintptr_t dstarg = code->cmds[i].dstarg;
540
541 switch (code->cmds[i].cmd) {
542 case CMD_PIO_READ_8:
543 scratch[dstarg] =
544 pio_read_8((ioport8_t *) code->cmds[i].addr);
545 break;
546 case CMD_PIO_READ_16:
547 scratch[dstarg] =
548 pio_read_16((ioport16_t *) code->cmds[i].addr);
549 break;
550 case CMD_PIO_READ_32:
551 scratch[dstarg] =
552 pio_read_32((ioport32_t *) code->cmds[i].addr);
553 break;
554 case CMD_PIO_WRITE_8:
555 pio_write_8((ioport8_t *) code->cmds[i].addr,
556 (uint8_t) code->cmds[i].value);
557 break;
558 case CMD_PIO_WRITE_16:
559 pio_write_16((ioport16_t *) code->cmds[i].addr,
560 (uint16_t) code->cmds[i].value);
561 break;
562 case CMD_PIO_WRITE_32:
563 pio_write_32((ioport32_t *) code->cmds[i].addr,
564 (uint32_t) code->cmds[i].value);
565 break;
566 case CMD_PIO_WRITE_A_8:
567 pio_write_8((ioport8_t *) code->cmds[i].addr,
568 (uint8_t) scratch[srcarg]);
569 break;
570 case CMD_PIO_WRITE_A_16:
571 pio_write_16((ioport16_t *) code->cmds[i].addr,
572 (uint16_t) scratch[srcarg]);
573 break;
574 case CMD_PIO_WRITE_A_32:
575 pio_write_32((ioport32_t *) code->cmds[i].addr,
576 (uint32_t) scratch[srcarg]);
577 break;
578 case CMD_LOAD:
579 scratch[dstarg] = code->cmds[i].value;
580 break;
581 case CMD_AND:
582 scratch[dstarg] = scratch[srcarg] &
583 code->cmds[i].value;
584 break;
585 case CMD_PREDICATE:
586 if (scratch[srcarg] == 0)
587 i += code->cmds[i].value;
588
589 break;
590 case CMD_ACCEPT:
591 return IRQ_ACCEPT;
592 case CMD_DECLINE:
593 default:
594 return IRQ_DECLINE;
595 }
596 }
597
598 return IRQ_DECLINE;
599}
600
601/* IRQ top-half handler.
602 *
603 * We expect interrupts to be disabled and the irq->lock already held.
604 *
605 * @param irq IRQ structure.
606 *
607 */
608void ipc_irq_top_half_handler(irq_t *irq)
609{
610 ASSERT(irq);
611
612 ASSERT(interrupts_disabled());
613 ASSERT(irq_spinlock_locked(&irq->lock));
614
615 if (irq->notif_cfg.answerbox) {
616 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
617 if (!call)
618 return;
619
620 call->flags |= IPC_CALL_NOTIF;
621 /* Put a counter to the message */
622 call->priv = ++irq->notif_cfg.counter;
623
624 /* Set up args */
625 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
626 IPC_SET_ARG1(call->data, irq->notif_cfg.scratch[1]);
627 IPC_SET_ARG2(call->data, irq->notif_cfg.scratch[2]);
628 IPC_SET_ARG3(call->data, irq->notif_cfg.scratch[3]);
629 IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]);
630 IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]);
631
632 send_call(irq, call);
633 }
634}
635
636/** Send notification message.
637 *
638 * @param irq IRQ structure.
639 * @param a1 Driver-specific payload argument.
640 * @param a2 Driver-specific payload argument.
641 * @param a3 Driver-specific payload argument.
642 * @param a4 Driver-specific payload argument.
643 * @param a5 Driver-specific payload argument.
644 *
645 */
646void ipc_irq_send_msg(irq_t *irq, sysarg_t a1, sysarg_t a2, sysarg_t a3,
647 sysarg_t a4, sysarg_t a5)
648{
649 irq_spinlock_lock(&irq->lock, true);
650
651 if (irq->notif_cfg.answerbox) {
652 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
653 if (!call) {
654 irq_spinlock_unlock(&irq->lock, true);
655 return;
656 }
657
658 call->flags |= IPC_CALL_NOTIF;
659 /* Put a counter to the message */
660 call->priv = ++irq->notif_cfg.counter;
661
662 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
663 IPC_SET_ARG1(call->data, a1);
664 IPC_SET_ARG2(call->data, a2);
665 IPC_SET_ARG3(call->data, a3);
666 IPC_SET_ARG4(call->data, a4);
667 IPC_SET_ARG5(call->data, a5);
668
669 send_call(irq, call);
670 }
671
672 irq_spinlock_unlock(&irq->lock, true);
673}
674
675/** @}
676 */
Note: See TracBrowser for help on using the repository browser.