source: mainline/kernel/generic/src/ipc/irq.c@ bd8c6537

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since bd8c6537 was bd8c6537, checked in by Jakub Jermar <jakub@…>, 13 years ago

Check whether the IRQ code provides PIO ranges for all used memory
mapped addresses.

  • Property mode set to 100644
File size: 17.0 KB
Line 
1/*
2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2006 Jakub Jermar
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup genericipc
31 * @{
32 */
33
34/**
35 * @file
36 * @brief IRQ notification framework.
37 *
38 * This framework allows applications to register to receive a notification
39 * when interrupt is detected. The application may provide a simple 'top-half'
40 * handler as part of its registration, which can perform simple operations
41 * (read/write port/memory, add information to notification ipc message).
42 *
43 * The structure of a notification message is as follows:
44 * - IMETHOD: interface and method as registered by
45 * the SYS_IRQ_REGISTER syscall
46 * - ARG1: payload modified by a 'top-half' handler
47 * - ARG2: payload modified by a 'top-half' handler
48 * - ARG3: payload modified by a 'top-half' handler
49 * - ARG4: payload modified by a 'top-half' handler
50 * - ARG5: payload modified by a 'top-half' handler
51 * - in_phone_hash: interrupt counter (may be needed to assure correct order
52 * in multithreaded drivers)
53 *
54 * Note on synchronization for ipc_irq_register(), ipc_irq_unregister(),
55 * ipc_irq_cleanup() and IRQ handlers:
56 *
57 * By always taking all of the uspace IRQ hash table lock, IRQ structure lock
58 * and answerbox lock, we can rule out race conditions between the
59 * registration functions and also the cleanup function. Thus the observer can
60 * either see the IRQ structure present in both the hash table and the
61 * answerbox list or absent in both. Views in which the IRQ structure would be
62 * linked in the hash table but not in the answerbox list, or vice versa, are
63 * not possible.
64 *
65 * By always taking the hash table lock and the IRQ structure lock, we can
66 * rule out a scenario in which we would free up an IRQ structure, which is
67 * still referenced by, for example, an IRQ handler. The locking scheme forces
68 * us to lock the IRQ structure only after any progressing IRQs on that
69 * structure are finished. Because we hold the hash table lock, we prevent new
70 * IRQs from taking new references to the IRQ structure.
71 *
72 */
73
74#include <arch.h>
75#include <mm/slab.h>
76#include <mm/page.h>
77#include <mm/km.h>
78#include <errno.h>
79#include <ddi/irq.h>
80#include <ipc/ipc.h>
81#include <ipc/irq.h>
82#include <syscall/copy.h>
83#include <console/console.h>
84#include <print.h>
85#include <macros.h>
86
87static void ranges_unmap(irq_pio_range_t *ranges, size_t rangecount)
88{
89 size_t i;
90
91 for (i = 0; i < rangecount; i++) {
92 if ((void *) ranges[i].base >= IO_SPACE_BOUNDARY)
93 km_unmap(ranges[i].base, ranges[i].size);
94 }
95}
96
97static int ranges_map_and_apply(irq_pio_range_t *ranges, size_t rangecount,
98 irq_cmd_t *cmds, size_t cmdcount)
99{
100 uintptr_t *pbase;
101 size_t i, j;
102
103 /* Copy the physical base addresses aside. */
104 pbase = malloc(rangecount * sizeof(uintptr_t), 0);
105 for (i = 0; i < rangecount; i++)
106 pbase[i] = ranges[i].base;
107
108 /* Map the PIO ranges into the kernel virtual address space. */
109 for (i = 0; i < rangecount; i++) {
110 if ((void *) ranges[i].base < IO_SPACE_BOUNDARY)
111 continue;
112 ranges[i].base = km_map(pbase[i], ranges[i].size,
113 PAGE_READ | PAGE_WRITE | PAGE_KERNEL | PAGE_NOT_CACHEABLE);
114 if (!ranges[i].base) {
115 ranges_unmap(ranges, i);
116 free(pbase);
117 return ENOMEM;
118 }
119 }
120
121 /* Rewrite the pseudocode addresses from physical to kernel virtual. */
122 for (i = 0; i < cmdcount; i++) {
123 uintptr_t addr;
124
125 /* Process only commands that use an address. */
126 switch (cmds[i].cmd) {
127 case CMD_PIO_READ_8:
128 case CMD_PIO_READ_16:
129 case CMD_PIO_READ_32:
130 case CMD_PIO_WRITE_8:
131 case CMD_PIO_WRITE_16:
132 case CMD_PIO_WRITE_32:
133 case CMD_PIO_WRITE_A_8:
134 case CMD_PIO_WRITE_A_16:
135 case CMD_PIO_WRITE_A_32:
136 break;
137 default:
138 /* Move onto the next command. */
139 continue;
140 }
141
142 addr = (uintptr_t) cmds[i].addr;
143
144 /* Process only memory mapped PIO addresses. */
145 if ((void *) addr < IO_SPACE_BOUNDARY)
146 continue;
147
148 for (j = 0; j < rangecount; j++) {
149
150 /* Find the matching range. */
151 if (!iswithin(pbase[j], ranges[j].size, addr, 1))
152 continue;
153
154 /* Switch the command to a kernel virtual address. */
155 addr -= pbase[j];
156 addr += ranges[j].base;
157
158 cmds[i].addr = (void *) addr;
159 break;
160 }
161
162 if (j == rangecount) {
163 /*
164 * The address used in this command is outside of all
165 * defined ranges.
166 */
167 ranges_unmap(ranges, rangecount);
168 free(pbase);
169 return EINVAL;
170 }
171 }
172
173 free(pbase);
174 return EOK;
175}
176
177/** Free the top-half pseudocode.
178 *
179 * @param code Pointer to the top-half pseudocode.
180 *
181 */
182static void code_free(irq_code_t *code)
183{
184 if (code) {
185 ranges_unmap(code->ranges, code->rangecount);
186 free(code->ranges);
187 free(code->cmds);
188 free(code);
189 }
190}
191
192/** Copy the top-half pseudocode from userspace into the kernel.
193 *
194 * @param ucode Userspace address of the top-half pseudocode.
195 *
196 * @return Kernel address of the copied pseudocode.
197 *
198 */
199static irq_code_t *code_from_uspace(irq_code_t *ucode)
200{
201 irq_pio_range_t *ranges = NULL;
202 irq_cmd_t *cmds = NULL;
203
204 irq_code_t *code = malloc(sizeof(*code), 0);
205 int rc = copy_from_uspace(code, ucode, sizeof(*code));
206 if (rc != EOK)
207 goto error;
208
209 if ((code->rangecount > IRQ_MAX_RANGE_COUNT) ||
210 (code->cmdcount > IRQ_MAX_PROG_SIZE))
211 goto error;
212
213 ranges = malloc(sizeof(code->ranges[0]) * code->rangecount, 0);
214 rc = copy_from_uspace(ranges, code->ranges,
215 sizeof(code->ranges[0]) * code->rangecount);
216 if (rc != EOK)
217 goto error;
218
219 cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0);
220 rc = copy_from_uspace(cmds, code->cmds,
221 sizeof(code->cmds[0]) * code->cmdcount);
222 if (rc != EOK)
223 goto error;
224
225 rc = ranges_map_and_apply(ranges, code->rangecount, cmds,
226 code->cmdcount);
227 if (rc != EOK)
228 goto error;
229
230 code->ranges = ranges;
231 code->cmds = cmds;
232
233 return code;
234
235error:
236 if (cmds)
237 free(cmds);
238 if (ranges)
239 free(ranges);
240 free(code);
241 return NULL;
242}
243
244/** Register an answerbox as a receiving end for IRQ notifications.
245 *
246 * @param box Receiving answerbox.
247 * @param inr IRQ number.
248 * @param devno Device number.
249 * @param imethod Interface and method to be associated with the
250 * notification.
251 * @param ucode Uspace pointer to top-half pseudocode.
252 * @return EOK on success or a negative error code.
253 *
254 */
255int ipc_irq_register(answerbox_t *box, inr_t inr, devno_t devno,
256 sysarg_t imethod, irq_code_t *ucode)
257{
258 sysarg_t key[] = {
259 (sysarg_t) inr,
260 (sysarg_t) devno
261 };
262
263 if ((inr < 0) || (inr > last_inr))
264 return ELIMIT;
265
266 irq_code_t *code;
267 if (ucode) {
268 code = code_from_uspace(ucode);
269 if (!code)
270 return EBADMEM;
271 } else
272 code = NULL;
273
274 /*
275 * Allocate and populate the IRQ structure.
276 */
277 irq_t *irq = malloc(sizeof(irq_t), 0);
278
279 irq_initialize(irq);
280 irq->devno = devno;
281 irq->inr = inr;
282 irq->claim = ipc_irq_top_half_claim;
283 irq->handler = ipc_irq_top_half_handler;
284 irq->notif_cfg.notify = true;
285 irq->notif_cfg.answerbox = box;
286 irq->notif_cfg.imethod = imethod;
287 irq->notif_cfg.code = code;
288 irq->notif_cfg.counter = 0;
289
290 /*
291 * Enlist the IRQ structure in the uspace IRQ hash table and the
292 * answerbox's list.
293 */
294 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
295
296 link_t *hlp = hash_table_find(&irq_uspace_hash_table, key);
297 if (hlp) {
298 irq_t *hirq = hash_table_get_instance(hlp, irq_t, link);
299
300 /* hirq is locked */
301 irq_spinlock_unlock(&hirq->lock, false);
302 code_free(code);
303 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
304
305 free(irq);
306 return EEXISTS;
307 }
308
309 /* Locking is not really necessary, but paranoid */
310 irq_spinlock_lock(&irq->lock, false);
311 irq_spinlock_lock(&box->irq_lock, false);
312
313 hash_table_insert(&irq_uspace_hash_table, key, &irq->link);
314 list_append(&irq->notif_cfg.link, &box->irq_list);
315
316 irq_spinlock_unlock(&box->irq_lock, false);
317 irq_spinlock_unlock(&irq->lock, false);
318 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
319
320 return EOK;
321}
322
323/** Unregister task from IRQ notification.
324 *
325 * @param box Answerbox associated with the notification.
326 * @param inr IRQ number.
327 * @param devno Device number.
328 * @return EOK on success or a negative error code.
329 */
330int ipc_irq_unregister(answerbox_t *box, inr_t inr, devno_t devno)
331{
332 sysarg_t key[] = {
333 (sysarg_t) inr,
334 (sysarg_t) devno
335 };
336
337 if ((inr < 0) || (inr > last_inr))
338 return ELIMIT;
339
340 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
341 link_t *lnk = hash_table_find(&irq_uspace_hash_table, key);
342 if (!lnk) {
343 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
344 return ENOENT;
345 }
346
347 irq_t *irq = hash_table_get_instance(lnk, irq_t, link);
348
349 /* irq is locked */
350 irq_spinlock_lock(&box->irq_lock, false);
351
352 ASSERT(irq->notif_cfg.answerbox == box);
353
354 /* Free up the pseudo code and associated structures. */
355 code_free(irq->notif_cfg.code);
356
357 /* Remove the IRQ from the answerbox's list. */
358 list_remove(&irq->notif_cfg.link);
359
360 /*
361 * We need to drop the IRQ lock now because hash_table_remove() will try
362 * to reacquire it. That basically violates the natural locking order,
363 * but a deadlock in hash_table_remove() is prevented by the fact that
364 * we already held the IRQ lock and didn't drop the hash table lock in
365 * the meantime.
366 */
367 irq_spinlock_unlock(&irq->lock, false);
368
369 /* Remove the IRQ from the uspace IRQ hash table. */
370 hash_table_remove(&irq_uspace_hash_table, key, 2);
371
372 irq_spinlock_unlock(&box->irq_lock, false);
373 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
374
375 /* Free up the IRQ structure. */
376 free(irq);
377
378 return EOK;
379}
380
381/** Disconnect all IRQ notifications from an answerbox.
382 *
383 * This function is effective because the answerbox contains
384 * list of all irq_t structures that are registered to
385 * send notifications to it.
386 *
387 * @param box Answerbox for which we want to carry out the cleanup.
388 *
389 */
390void ipc_irq_cleanup(answerbox_t *box)
391{
392loop:
393 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
394 irq_spinlock_lock(&box->irq_lock, false);
395
396 while (!list_empty(&box->irq_list)) {
397 DEADLOCK_PROBE_INIT(p_irqlock);
398
399 irq_t *irq = list_get_instance(list_first(&box->irq_list), irq_t,
400 notif_cfg.link);
401
402 if (!irq_spinlock_trylock(&irq->lock)) {
403 /*
404 * Avoid deadlock by trying again.
405 */
406 irq_spinlock_unlock(&box->irq_lock, false);
407 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
408 DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD);
409 goto loop;
410 }
411
412 sysarg_t key[2];
413 key[0] = irq->inr;
414 key[1] = irq->devno;
415
416 ASSERT(irq->notif_cfg.answerbox == box);
417
418 /* Unlist from the answerbox. */
419 list_remove(&irq->notif_cfg.link);
420
421 /* Free up the pseudo code and associated structures. */
422 code_free(irq->notif_cfg.code);
423
424 /*
425 * We need to drop the IRQ lock now because hash_table_remove()
426 * will try to reacquire it. That basically violates the natural
427 * locking order, but a deadlock in hash_table_remove() is
428 * prevented by the fact that we already held the IRQ lock and
429 * didn't drop the hash table lock in the meantime.
430 */
431 irq_spinlock_unlock(&irq->lock, false);
432
433 /* Remove from the hash table. */
434 hash_table_remove(&irq_uspace_hash_table, key, 2);
435
436 free(irq);
437 }
438
439 irq_spinlock_unlock(&box->irq_lock, false);
440 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
441}
442
443/** Add a call to the proper answerbox queue.
444 *
445 * Assume irq->lock is locked and interrupts disabled.
446 *
447 * @param irq IRQ structure referencing the target answerbox.
448 * @param call IRQ notification call.
449 *
450 */
451static void send_call(irq_t *irq, call_t *call)
452{
453 irq_spinlock_lock(&irq->notif_cfg.answerbox->irq_lock, false);
454 list_append(&call->link, &irq->notif_cfg.answerbox->irq_notifs);
455 irq_spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock, false);
456
457 waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST);
458}
459
460/** Apply the top-half pseudo code to find out whether to accept the IRQ or not.
461 *
462 * @param irq IRQ structure.
463 *
464 * @return IRQ_ACCEPT if the interrupt is accepted by the
465 * pseudocode, IRQ_DECLINE otherwise.
466 *
467 */
468irq_ownership_t ipc_irq_top_half_claim(irq_t *irq)
469{
470 irq_code_t *code = irq->notif_cfg.code;
471 uint32_t *scratch = irq->notif_cfg.scratch;
472
473 if (!irq->notif_cfg.notify)
474 return IRQ_DECLINE;
475
476 if (!code)
477 return IRQ_DECLINE;
478
479 for (size_t i = 0; i < code->cmdcount; i++) {
480 uint32_t dstval;
481
482 uintptr_t srcarg = code->cmds[i].srcarg;
483 uintptr_t dstarg = code->cmds[i].dstarg;
484
485 if (srcarg >= IPC_CALL_LEN)
486 break;
487
488 if (dstarg >= IPC_CALL_LEN)
489 break;
490
491 switch (code->cmds[i].cmd) {
492 case CMD_PIO_READ_8:
493 dstval = pio_read_8((ioport8_t *) code->cmds[i].addr);
494 if (dstarg)
495 scratch[dstarg] = dstval;
496 break;
497 case CMD_PIO_READ_16:
498 dstval = pio_read_16((ioport16_t *) code->cmds[i].addr);
499 if (dstarg)
500 scratch[dstarg] = dstval;
501 break;
502 case CMD_PIO_READ_32:
503 dstval = pio_read_32((ioport32_t *) code->cmds[i].addr);
504 if (dstarg)
505 scratch[dstarg] = dstval;
506 break;
507 case CMD_PIO_WRITE_8:
508 pio_write_8((ioport8_t *) code->cmds[i].addr,
509 (uint8_t) code->cmds[i].value);
510 break;
511 case CMD_PIO_WRITE_16:
512 pio_write_16((ioport16_t *) code->cmds[i].addr,
513 (uint16_t) code->cmds[i].value);
514 break;
515 case CMD_PIO_WRITE_32:
516 pio_write_32((ioport32_t *) code->cmds[i].addr,
517 (uint32_t) code->cmds[i].value);
518 break;
519 case CMD_PIO_WRITE_A_8:
520 if (srcarg) {
521 pio_write_8((ioport8_t *) code->cmds[i].addr,
522 (uint8_t) scratch[srcarg]);
523 }
524 break;
525 case CMD_PIO_WRITE_A_16:
526 if (srcarg) {
527 pio_write_16((ioport16_t *) code->cmds[i].addr,
528 (uint16_t) scratch[srcarg]);
529 }
530 break;
531 case CMD_PIO_WRITE_A_32:
532 if (srcarg) {
533 pio_write_32((ioport32_t *) code->cmds[i].addr,
534 (uint32_t) scratch[srcarg]);
535 }
536 break;
537 case CMD_BTEST:
538 if ((srcarg) && (dstarg)) {
539 dstval = scratch[srcarg] & code->cmds[i].value;
540 scratch[dstarg] = dstval;
541 }
542 break;
543 case CMD_PREDICATE:
544 if ((srcarg) && (!scratch[srcarg])) {
545 i += code->cmds[i].value;
546 continue;
547 }
548 break;
549 case CMD_ACCEPT:
550 return IRQ_ACCEPT;
551 case CMD_DECLINE:
552 default:
553 return IRQ_DECLINE;
554 }
555 }
556
557 return IRQ_DECLINE;
558}
559
560/* IRQ top-half handler.
561 *
562 * We expect interrupts to be disabled and the irq->lock already held.
563 *
564 * @param irq IRQ structure.
565 *
566 */
567void ipc_irq_top_half_handler(irq_t *irq)
568{
569 ASSERT(irq);
570
571 ASSERT(interrupts_disabled());
572 ASSERT(irq_spinlock_locked(&irq->lock));
573
574 if (irq->notif_cfg.answerbox) {
575 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
576 if (!call)
577 return;
578
579 call->flags |= IPC_CALL_NOTIF;
580 /* Put a counter to the message */
581 call->priv = ++irq->notif_cfg.counter;
582
583 /* Set up args */
584 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
585 IPC_SET_ARG1(call->data, irq->notif_cfg.scratch[1]);
586 IPC_SET_ARG2(call->data, irq->notif_cfg.scratch[2]);
587 IPC_SET_ARG3(call->data, irq->notif_cfg.scratch[3]);
588 IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]);
589 IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]);
590
591 send_call(irq, call);
592 }
593}
594
595/** Send notification message.
596 *
597 * @param irq IRQ structure.
598 * @param a1 Driver-specific payload argument.
599 * @param a2 Driver-specific payload argument.
600 * @param a3 Driver-specific payload argument.
601 * @param a4 Driver-specific payload argument.
602 * @param a5 Driver-specific payload argument.
603 *
604 */
605void ipc_irq_send_msg(irq_t *irq, sysarg_t a1, sysarg_t a2, sysarg_t a3,
606 sysarg_t a4, sysarg_t a5)
607{
608 irq_spinlock_lock(&irq->lock, true);
609
610 if (irq->notif_cfg.answerbox) {
611 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
612 if (!call) {
613 irq_spinlock_unlock(&irq->lock, true);
614 return;
615 }
616
617 call->flags |= IPC_CALL_NOTIF;
618 /* Put a counter to the message */
619 call->priv = ++irq->notif_cfg.counter;
620
621 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
622 IPC_SET_ARG1(call->data, a1);
623 IPC_SET_ARG2(call->data, a2);
624 IPC_SET_ARG3(call->data, a3);
625 IPC_SET_ARG4(call->data, a4);
626 IPC_SET_ARG5(call->data, a5);
627
628 send_call(irq, call);
629 }
630
631 irq_spinlock_unlock(&irq->lock, true);
632}
633
634/** @}
635 */
Note: See TracBrowser for help on using the repository browser.