source: mainline/kernel/generic/src/ipc/irq.c@ f2bbe8c

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since f2bbe8c was f2bbe8c, checked in by Jakub Jermar <jakub@…>, 14 years ago
  • Enforce PIO ranges no matter whether the address is in memory or in a

separate I/O space.

  • Be more exact when checking whether the accessed PIO register fits within one PIO range.
  • Property mode set to 100644
File size: 17.0 KB
Line 
1/*
2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2006 Jakub Jermar
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup genericipc
31 * @{
32 */
33
34/**
35 * @file
36 * @brief IRQ notification framework.
37 *
38 * This framework allows applications to register to receive a notification
39 * when interrupt is detected. The application may provide a simple 'top-half'
40 * handler as part of its registration, which can perform simple operations
41 * (read/write port/memory, add information to notification ipc message).
42 *
43 * The structure of a notification message is as follows:
44 * - IMETHOD: interface and method as registered by
45 * the SYS_IRQ_REGISTER syscall
46 * - ARG1: payload modified by a 'top-half' handler
47 * - ARG2: payload modified by a 'top-half' handler
48 * - ARG3: payload modified by a 'top-half' handler
49 * - ARG4: payload modified by a 'top-half' handler
50 * - ARG5: payload modified by a 'top-half' handler
51 * - in_phone_hash: interrupt counter (may be needed to assure correct order
52 * in multithreaded drivers)
53 *
54 * Note on synchronization for ipc_irq_register(), ipc_irq_unregister(),
55 * ipc_irq_cleanup() and IRQ handlers:
56 *
57 * By always taking all of the uspace IRQ hash table lock, IRQ structure lock
58 * and answerbox lock, we can rule out race conditions between the
59 * registration functions and also the cleanup function. Thus the observer can
60 * either see the IRQ structure present in both the hash table and the
61 * answerbox list or absent in both. Views in which the IRQ structure would be
62 * linked in the hash table but not in the answerbox list, or vice versa, are
63 * not possible.
64 *
65 * By always taking the hash table lock and the IRQ structure lock, we can
66 * rule out a scenario in which we would free up an IRQ structure, which is
67 * still referenced by, for example, an IRQ handler. The locking scheme forces
68 * us to lock the IRQ structure only after any progressing IRQs on that
69 * structure are finished. Because we hold the hash table lock, we prevent new
70 * IRQs from taking new references to the IRQ structure.
71 *
72 */
73
74#include <arch.h>
75#include <mm/slab.h>
76#include <mm/page.h>
77#include <mm/km.h>
78#include <errno.h>
79#include <ddi/irq.h>
80#include <ipc/ipc.h>
81#include <ipc/irq.h>
82#include <syscall/copy.h>
83#include <console/console.h>
84#include <print.h>
85#include <macros.h>
86
87static void ranges_unmap(irq_pio_range_t *ranges, size_t rangecount)
88{
89 size_t i;
90
91 for (i = 0; i < rangecount; i++) {
92 if ((void *) ranges[i].base >= IO_SPACE_BOUNDARY)
93 km_unmap(ranges[i].base, ranges[i].size);
94 }
95}
96
97static int ranges_map_and_apply(irq_pio_range_t *ranges, size_t rangecount,
98 irq_cmd_t *cmds, size_t cmdcount)
99{
100 uintptr_t *pbase;
101 size_t i, j;
102
103 /* Copy the physical base addresses aside. */
104 pbase = malloc(rangecount * sizeof(uintptr_t), 0);
105 for (i = 0; i < rangecount; i++)
106 pbase[i] = ranges[i].base;
107
108 /* Map the PIO ranges into the kernel virtual address space. */
109 for (i = 0; i < rangecount; i++) {
110 if ((void *) ranges[i].base < IO_SPACE_BOUNDARY)
111 continue;
112 ranges[i].base = km_map(pbase[i], ranges[i].size,
113 PAGE_READ | PAGE_WRITE | PAGE_KERNEL | PAGE_NOT_CACHEABLE);
114 if (!ranges[i].base) {
115 ranges_unmap(ranges, i);
116 free(pbase);
117 return ENOMEM;
118 }
119 }
120
121 /* Rewrite the pseudocode addresses from physical to kernel virtual. */
122 for (i = 0; i < cmdcount; i++) {
123 uintptr_t addr;
124 size_t size;
125
126 /* Process only commands that use an address. */
127 switch (cmds[i].cmd) {
128 case CMD_PIO_READ_8:
129 case CMD_PIO_WRITE_8:
130 case CMD_PIO_WRITE_A_8:
131 size = 1;
132 break;
133 case CMD_PIO_READ_16:
134 case CMD_PIO_WRITE_16:
135 case CMD_PIO_WRITE_A_16:
136 size = 2;
137 break;
138 case CMD_PIO_READ_32:
139 case CMD_PIO_WRITE_32:
140 case CMD_PIO_WRITE_A_32:
141 size = 4;
142 break;
143 default:
144 /* Move onto the next command. */
145 continue;
146 }
147
148 addr = (uintptr_t) cmds[i].addr;
149
150 for (j = 0; j < rangecount; j++) {
151
152 /* Find the matching range. */
153 if (!iswithin(pbase[j], ranges[j].size, addr, size))
154 continue;
155
156 /* Switch the command to a kernel virtual address. */
157 addr -= pbase[j];
158 addr += ranges[j].base;
159
160 cmds[i].addr = (void *) addr;
161 break;
162 }
163
164 if (j == rangecount) {
165 /*
166 * The address used in this command is outside of all
167 * defined ranges.
168 */
169 ranges_unmap(ranges, rangecount);
170 free(pbase);
171 return EINVAL;
172 }
173 }
174
175 free(pbase);
176 return EOK;
177}
178
179/** Free the top-half pseudocode.
180 *
181 * @param code Pointer to the top-half pseudocode.
182 *
183 */
184static void code_free(irq_code_t *code)
185{
186 if (code) {
187 ranges_unmap(code->ranges, code->rangecount);
188 free(code->ranges);
189 free(code->cmds);
190 free(code);
191 }
192}
193
194/** Copy the top-half pseudocode from userspace into the kernel.
195 *
196 * @param ucode Userspace address of the top-half pseudocode.
197 *
198 * @return Kernel address of the copied pseudocode.
199 *
200 */
201static irq_code_t *code_from_uspace(irq_code_t *ucode)
202{
203 irq_pio_range_t *ranges = NULL;
204 irq_cmd_t *cmds = NULL;
205
206 irq_code_t *code = malloc(sizeof(*code), 0);
207 int rc = copy_from_uspace(code, ucode, sizeof(*code));
208 if (rc != EOK)
209 goto error;
210
211 if ((code->rangecount > IRQ_MAX_RANGE_COUNT) ||
212 (code->cmdcount > IRQ_MAX_PROG_SIZE))
213 goto error;
214
215 ranges = malloc(sizeof(code->ranges[0]) * code->rangecount, 0);
216 rc = copy_from_uspace(ranges, code->ranges,
217 sizeof(code->ranges[0]) * code->rangecount);
218 if (rc != EOK)
219 goto error;
220
221 cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0);
222 rc = copy_from_uspace(cmds, code->cmds,
223 sizeof(code->cmds[0]) * code->cmdcount);
224 if (rc != EOK)
225 goto error;
226
227 rc = ranges_map_and_apply(ranges, code->rangecount, cmds,
228 code->cmdcount);
229 if (rc != EOK)
230 goto error;
231
232 code->ranges = ranges;
233 code->cmds = cmds;
234
235 return code;
236
237error:
238 if (cmds)
239 free(cmds);
240 if (ranges)
241 free(ranges);
242 free(code);
243 return NULL;
244}
245
246/** Register an answerbox as a receiving end for IRQ notifications.
247 *
248 * @param box Receiving answerbox.
249 * @param inr IRQ number.
250 * @param devno Device number.
251 * @param imethod Interface and method to be associated with the
252 * notification.
253 * @param ucode Uspace pointer to top-half pseudocode.
254 * @return EOK on success or a negative error code.
255 *
256 */
257int ipc_irq_register(answerbox_t *box, inr_t inr, devno_t devno,
258 sysarg_t imethod, irq_code_t *ucode)
259{
260 sysarg_t key[] = {
261 (sysarg_t) inr,
262 (sysarg_t) devno
263 };
264
265 if ((inr < 0) || (inr > last_inr))
266 return ELIMIT;
267
268 irq_code_t *code;
269 if (ucode) {
270 code = code_from_uspace(ucode);
271 if (!code)
272 return EBADMEM;
273 } else
274 code = NULL;
275
276 /*
277 * Allocate and populate the IRQ structure.
278 */
279 irq_t *irq = malloc(sizeof(irq_t), 0);
280
281 irq_initialize(irq);
282 irq->devno = devno;
283 irq->inr = inr;
284 irq->claim = ipc_irq_top_half_claim;
285 irq->handler = ipc_irq_top_half_handler;
286 irq->notif_cfg.notify = true;
287 irq->notif_cfg.answerbox = box;
288 irq->notif_cfg.imethod = imethod;
289 irq->notif_cfg.code = code;
290 irq->notif_cfg.counter = 0;
291
292 /*
293 * Enlist the IRQ structure in the uspace IRQ hash table and the
294 * answerbox's list.
295 */
296 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
297
298 link_t *hlp = hash_table_find(&irq_uspace_hash_table, key);
299 if (hlp) {
300 irq_t *hirq = hash_table_get_instance(hlp, irq_t, link);
301
302 /* hirq is locked */
303 irq_spinlock_unlock(&hirq->lock, false);
304 code_free(code);
305 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
306
307 free(irq);
308 return EEXISTS;
309 }
310
311 /* Locking is not really necessary, but paranoid */
312 irq_spinlock_lock(&irq->lock, false);
313 irq_spinlock_lock(&box->irq_lock, false);
314
315 hash_table_insert(&irq_uspace_hash_table, key, &irq->link);
316 list_append(&irq->notif_cfg.link, &box->irq_list);
317
318 irq_spinlock_unlock(&box->irq_lock, false);
319 irq_spinlock_unlock(&irq->lock, false);
320 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
321
322 return EOK;
323}
324
325/** Unregister task from IRQ notification.
326 *
327 * @param box Answerbox associated with the notification.
328 * @param inr IRQ number.
329 * @param devno Device number.
330 * @return EOK on success or a negative error code.
331 */
332int ipc_irq_unregister(answerbox_t *box, inr_t inr, devno_t devno)
333{
334 sysarg_t key[] = {
335 (sysarg_t) inr,
336 (sysarg_t) devno
337 };
338
339 if ((inr < 0) || (inr > last_inr))
340 return ELIMIT;
341
342 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
343 link_t *lnk = hash_table_find(&irq_uspace_hash_table, key);
344 if (!lnk) {
345 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
346 return ENOENT;
347 }
348
349 irq_t *irq = hash_table_get_instance(lnk, irq_t, link);
350
351 /* irq is locked */
352 irq_spinlock_lock(&box->irq_lock, false);
353
354 ASSERT(irq->notif_cfg.answerbox == box);
355
356 /* Free up the pseudo code and associated structures. */
357 code_free(irq->notif_cfg.code);
358
359 /* Remove the IRQ from the answerbox's list. */
360 list_remove(&irq->notif_cfg.link);
361
362 /*
363 * We need to drop the IRQ lock now because hash_table_remove() will try
364 * to reacquire it. That basically violates the natural locking order,
365 * but a deadlock in hash_table_remove() is prevented by the fact that
366 * we already held the IRQ lock and didn't drop the hash table lock in
367 * the meantime.
368 */
369 irq_spinlock_unlock(&irq->lock, false);
370
371 /* Remove the IRQ from the uspace IRQ hash table. */
372 hash_table_remove(&irq_uspace_hash_table, key, 2);
373
374 irq_spinlock_unlock(&box->irq_lock, false);
375 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
376
377 /* Free up the IRQ structure. */
378 free(irq);
379
380 return EOK;
381}
382
383/** Disconnect all IRQ notifications from an answerbox.
384 *
385 * This function is effective because the answerbox contains
386 * list of all irq_t structures that are registered to
387 * send notifications to it.
388 *
389 * @param box Answerbox for which we want to carry out the cleanup.
390 *
391 */
392void ipc_irq_cleanup(answerbox_t *box)
393{
394loop:
395 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
396 irq_spinlock_lock(&box->irq_lock, false);
397
398 while (!list_empty(&box->irq_list)) {
399 DEADLOCK_PROBE_INIT(p_irqlock);
400
401 irq_t *irq = list_get_instance(list_first(&box->irq_list), irq_t,
402 notif_cfg.link);
403
404 if (!irq_spinlock_trylock(&irq->lock)) {
405 /*
406 * Avoid deadlock by trying again.
407 */
408 irq_spinlock_unlock(&box->irq_lock, false);
409 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
410 DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD);
411 goto loop;
412 }
413
414 sysarg_t key[2];
415 key[0] = irq->inr;
416 key[1] = irq->devno;
417
418 ASSERT(irq->notif_cfg.answerbox == box);
419
420 /* Unlist from the answerbox. */
421 list_remove(&irq->notif_cfg.link);
422
423 /* Free up the pseudo code and associated structures. */
424 code_free(irq->notif_cfg.code);
425
426 /*
427 * We need to drop the IRQ lock now because hash_table_remove()
428 * will try to reacquire it. That basically violates the natural
429 * locking order, but a deadlock in hash_table_remove() is
430 * prevented by the fact that we already held the IRQ lock and
431 * didn't drop the hash table lock in the meantime.
432 */
433 irq_spinlock_unlock(&irq->lock, false);
434
435 /* Remove from the hash table. */
436 hash_table_remove(&irq_uspace_hash_table, key, 2);
437
438 free(irq);
439 }
440
441 irq_spinlock_unlock(&box->irq_lock, false);
442 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
443}
444
445/** Add a call to the proper answerbox queue.
446 *
447 * Assume irq->lock is locked and interrupts disabled.
448 *
449 * @param irq IRQ structure referencing the target answerbox.
450 * @param call IRQ notification call.
451 *
452 */
453static void send_call(irq_t *irq, call_t *call)
454{
455 irq_spinlock_lock(&irq->notif_cfg.answerbox->irq_lock, false);
456 list_append(&call->link, &irq->notif_cfg.answerbox->irq_notifs);
457 irq_spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock, false);
458
459 waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST);
460}
461
462/** Apply the top-half pseudo code to find out whether to accept the IRQ or not.
463 *
464 * @param irq IRQ structure.
465 *
466 * @return IRQ_ACCEPT if the interrupt is accepted by the
467 * pseudocode, IRQ_DECLINE otherwise.
468 *
469 */
470irq_ownership_t ipc_irq_top_half_claim(irq_t *irq)
471{
472 irq_code_t *code = irq->notif_cfg.code;
473 uint32_t *scratch = irq->notif_cfg.scratch;
474
475 if (!irq->notif_cfg.notify)
476 return IRQ_DECLINE;
477
478 if (!code)
479 return IRQ_DECLINE;
480
481 for (size_t i = 0; i < code->cmdcount; i++) {
482 uint32_t dstval;
483
484 uintptr_t srcarg = code->cmds[i].srcarg;
485 uintptr_t dstarg = code->cmds[i].dstarg;
486
487 if (srcarg >= IPC_CALL_LEN)
488 break;
489
490 if (dstarg >= IPC_CALL_LEN)
491 break;
492
493 switch (code->cmds[i].cmd) {
494 case CMD_PIO_READ_8:
495 dstval = pio_read_8((ioport8_t *) code->cmds[i].addr);
496 if (dstarg)
497 scratch[dstarg] = dstval;
498 break;
499 case CMD_PIO_READ_16:
500 dstval = pio_read_16((ioport16_t *) code->cmds[i].addr);
501 if (dstarg)
502 scratch[dstarg] = dstval;
503 break;
504 case CMD_PIO_READ_32:
505 dstval = pio_read_32((ioport32_t *) code->cmds[i].addr);
506 if (dstarg)
507 scratch[dstarg] = dstval;
508 break;
509 case CMD_PIO_WRITE_8:
510 pio_write_8((ioport8_t *) code->cmds[i].addr,
511 (uint8_t) code->cmds[i].value);
512 break;
513 case CMD_PIO_WRITE_16:
514 pio_write_16((ioport16_t *) code->cmds[i].addr,
515 (uint16_t) code->cmds[i].value);
516 break;
517 case CMD_PIO_WRITE_32:
518 pio_write_32((ioport32_t *) code->cmds[i].addr,
519 (uint32_t) code->cmds[i].value);
520 break;
521 case CMD_PIO_WRITE_A_8:
522 if (srcarg) {
523 pio_write_8((ioport8_t *) code->cmds[i].addr,
524 (uint8_t) scratch[srcarg]);
525 }
526 break;
527 case CMD_PIO_WRITE_A_16:
528 if (srcarg) {
529 pio_write_16((ioport16_t *) code->cmds[i].addr,
530 (uint16_t) scratch[srcarg]);
531 }
532 break;
533 case CMD_PIO_WRITE_A_32:
534 if (srcarg) {
535 pio_write_32((ioport32_t *) code->cmds[i].addr,
536 (uint32_t) scratch[srcarg]);
537 }
538 break;
539 case CMD_BTEST:
540 if ((srcarg) && (dstarg)) {
541 dstval = scratch[srcarg] & code->cmds[i].value;
542 scratch[dstarg] = dstval;
543 }
544 break;
545 case CMD_PREDICATE:
546 if ((srcarg) && (!scratch[srcarg])) {
547 i += code->cmds[i].value;
548 continue;
549 }
550 break;
551 case CMD_ACCEPT:
552 return IRQ_ACCEPT;
553 case CMD_DECLINE:
554 default:
555 return IRQ_DECLINE;
556 }
557 }
558
559 return IRQ_DECLINE;
560}
561
562/* IRQ top-half handler.
563 *
564 * We expect interrupts to be disabled and the irq->lock already held.
565 *
566 * @param irq IRQ structure.
567 *
568 */
569void ipc_irq_top_half_handler(irq_t *irq)
570{
571 ASSERT(irq);
572
573 ASSERT(interrupts_disabled());
574 ASSERT(irq_spinlock_locked(&irq->lock));
575
576 if (irq->notif_cfg.answerbox) {
577 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
578 if (!call)
579 return;
580
581 call->flags |= IPC_CALL_NOTIF;
582 /* Put a counter to the message */
583 call->priv = ++irq->notif_cfg.counter;
584
585 /* Set up args */
586 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
587 IPC_SET_ARG1(call->data, irq->notif_cfg.scratch[1]);
588 IPC_SET_ARG2(call->data, irq->notif_cfg.scratch[2]);
589 IPC_SET_ARG3(call->data, irq->notif_cfg.scratch[3]);
590 IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]);
591 IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]);
592
593 send_call(irq, call);
594 }
595}
596
597/** Send notification message.
598 *
599 * @param irq IRQ structure.
600 * @param a1 Driver-specific payload argument.
601 * @param a2 Driver-specific payload argument.
602 * @param a3 Driver-specific payload argument.
603 * @param a4 Driver-specific payload argument.
604 * @param a5 Driver-specific payload argument.
605 *
606 */
607void ipc_irq_send_msg(irq_t *irq, sysarg_t a1, sysarg_t a2, sysarg_t a3,
608 sysarg_t a4, sysarg_t a5)
609{
610 irq_spinlock_lock(&irq->lock, true);
611
612 if (irq->notif_cfg.answerbox) {
613 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
614 if (!call) {
615 irq_spinlock_unlock(&irq->lock, true);
616 return;
617 }
618
619 call->flags |= IPC_CALL_NOTIF;
620 /* Put a counter to the message */
621 call->priv = ++irq->notif_cfg.counter;
622
623 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
624 IPC_SET_ARG1(call->data, a1);
625 IPC_SET_ARG2(call->data, a2);
626 IPC_SET_ARG3(call->data, a3);
627 IPC_SET_ARG4(call->data, a4);
628 IPC_SET_ARG5(call->data, a5);
629
630 send_call(irq, call);
631 }
632
633 irq_spinlock_unlock(&irq->lock, true);
634}
635
636/** @}
637 */
Note: See TracBrowser for help on using the repository browser.