source: mainline/kernel/generic/src/ipc/irq.c@ e9d15d9

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since e9d15d9 was e9d15d9, checked in by Jakub Jermar <jakub@…>, 8 years ago

Turn IRQ structures into kernel objects

ipc_irq_subscribe() now returns a capability for the underlying IRQ kernel
object. ipc_irq_unsubscribe() can now be done only with a valid IRQ capability.

  • Property mode set to 100644
File size: 17.3 KB
Line 
1/*
2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2006 Jakub Jermar
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup genericipc
31 * @{
32 */
33
34/**
35 * @file
36 * @brief IRQ notification framework.
37 *
38 * This framework allows applications to subscribe to receive a notification
39 * when interrupt is detected. The application may provide a simple 'top-half'
40 * handler as part of its registration, which can perform simple operations
41 * (read/write port/memory, add information to notification IPC message).
42 *
43 * The structure of a notification message is as follows:
44 * - IMETHOD: interface and method as set by the SYS_IPC_IRQ_SUBSCRIBE syscall
45 * - ARG1: payload modified by a 'top-half' handler (scratch[1])
46 * - ARG2: payload modified by a 'top-half' handler (scratch[2])
47 * - ARG3: payload modified by a 'top-half' handler (scratch[3])
48 * - ARG4: payload modified by a 'top-half' handler (scratch[4])
49 * - ARG5: payload modified by a 'top-half' handler (scratch[5])
50 * - in_phone_hash: interrupt counter (may be needed to assure correct order
51 * in multithreaded drivers)
52 *
53 * Note on synchronization for ipc_irq_subscribe(), ipc_irq_unsubscribe(),
54 * ipc_irq_cleanup() and IRQ handlers:
55 *
56 * By always taking all of the uspace IRQ hash table lock, IRQ structure lock
57 * and answerbox lock, we can rule out race conditions between the
58 * registration functions and also the cleanup function. Thus the observer can
59 * either see the IRQ structure present in both the hash table and the
60 * answerbox list or absent in both. Views in which the IRQ structure would be
61 * linked in the hash table but not in the answerbox list, or vice versa, are
62 * not possible.
63 *
64 * By always taking the hash table lock and the IRQ structure lock, we can
65 * rule out a scenario in which we would free up an IRQ structure, which is
66 * still referenced by, for example, an IRQ handler. The locking scheme forces
67 * us to lock the IRQ structure only after any progressing IRQs on that
68 * structure are finished. Because we hold the hash table lock, we prevent new
69 * IRQs from taking new references to the IRQ structure.
70 *
71 */
72
73#include <arch.h>
74#include <assert.h>
75#include <mm/slab.h>
76#include <mm/page.h>
77#include <mm/km.h>
78#include <errno.h>
79#include <ddi/irq.h>
80#include <ipc/ipc.h>
81#include <ipc/irq.h>
82#include <syscall/copy.h>
83#include <console/console.h>
84#include <print.h>
85#include <macros.h>
86#include <kobject/kobject.h>
87
88static void ranges_unmap(irq_pio_range_t *ranges, size_t rangecount)
89{
90 for (size_t i = 0; i < rangecount; i++) {
91#ifdef IO_SPACE_BOUNDARY
92 if ((void *) ranges[i].base >= IO_SPACE_BOUNDARY)
93#endif
94 km_unmap(ranges[i].base, ranges[i].size);
95 }
96}
97
98static int ranges_map_and_apply(irq_pio_range_t *ranges, size_t rangecount,
99 irq_cmd_t *cmds, size_t cmdcount)
100{
101 /* Copy the physical base addresses aside. */
102 uintptr_t *pbase = malloc(rangecount * sizeof(uintptr_t), 0);
103 for (size_t i = 0; i < rangecount; i++)
104 pbase[i] = ranges[i].base;
105
106 /* Map the PIO ranges into the kernel virtual address space. */
107 for (size_t i = 0; i < rangecount; i++) {
108#ifdef IO_SPACE_BOUNDARY
109 if ((void *) ranges[i].base < IO_SPACE_BOUNDARY)
110 continue;
111#endif
112 ranges[i].base = km_map(pbase[i], ranges[i].size,
113 PAGE_READ | PAGE_WRITE | PAGE_KERNEL | PAGE_NOT_CACHEABLE);
114 if (!ranges[i].base) {
115 ranges_unmap(ranges, i);
116 free(pbase);
117 return ENOMEM;
118 }
119 }
120
121 /* Rewrite the pseudocode addresses from physical to kernel virtual. */
122 for (size_t i = 0; i < cmdcount; i++) {
123 uintptr_t addr;
124 size_t size;
125
126 /* Process only commands that use an address. */
127 switch (cmds[i].cmd) {
128 case CMD_PIO_READ_8:
129 case CMD_PIO_WRITE_8:
130 case CMD_PIO_WRITE_A_8:
131 size = 1;
132 break;
133 case CMD_PIO_READ_16:
134 case CMD_PIO_WRITE_16:
135 case CMD_PIO_WRITE_A_16:
136 size = 2;
137 break;
138 case CMD_PIO_READ_32:
139 case CMD_PIO_WRITE_32:
140 case CMD_PIO_WRITE_A_32:
141 size = 4;
142 break;
143 default:
144 /* Move onto the next command. */
145 continue;
146 }
147
148 addr = (uintptr_t) cmds[i].addr;
149
150 size_t j;
151 for (j = 0; j < rangecount; j++) {
152 /* Find the matching range. */
153 if (!iswithin(pbase[j], ranges[j].size, addr, size))
154 continue;
155
156 /* Switch the command to a kernel virtual address. */
157 addr -= pbase[j];
158 addr += ranges[j].base;
159
160 cmds[i].addr = (void *) addr;
161 break;
162 }
163
164 if (j == rangecount) {
165 /*
166 * The address used in this command is outside of all
167 * defined ranges.
168 */
169 ranges_unmap(ranges, rangecount);
170 free(pbase);
171 return EINVAL;
172 }
173 }
174
175 free(pbase);
176 return EOK;
177}
178
179/** Statically check the top-half pseudocode
180 *
181 * Check the top-half pseudocode for invalid or unsafe
182 * constructs.
183 *
184 */
185static int code_check(irq_cmd_t *cmds, size_t cmdcount)
186{
187 for (size_t i = 0; i < cmdcount; i++) {
188 /*
189 * Check for accepted ranges.
190 */
191 if (cmds[i].cmd >= CMD_LAST)
192 return EINVAL;
193
194 if (cmds[i].srcarg >= IPC_CALL_LEN)
195 return EINVAL;
196
197 if (cmds[i].dstarg >= IPC_CALL_LEN)
198 return EINVAL;
199
200 switch (cmds[i].cmd) {
201 case CMD_PREDICATE:
202 /*
203 * Check for control flow overflow.
204 * Note that jumping just beyond the last
205 * command is a correct behaviour.
206 */
207 if (i + cmds[i].value > cmdcount)
208 return EINVAL;
209
210 break;
211 default:
212 break;
213 }
214 }
215
216 return EOK;
217}
218
219/** Free the top-half pseudocode.
220 *
221 * @param code Pointer to the top-half pseudocode.
222 *
223 */
224static void code_free(irq_code_t *code)
225{
226 if (code) {
227 ranges_unmap(code->ranges, code->rangecount);
228 free(code->ranges);
229 free(code->cmds);
230 free(code);
231 }
232}
233
234/** Copy the top-half pseudocode from userspace into the kernel.
235 *
236 * @param ucode Userspace address of the top-half pseudocode.
237 *
238 * @return Kernel address of the copied pseudocode.
239 *
240 */
241static irq_code_t *code_from_uspace(irq_code_t *ucode)
242{
243 irq_pio_range_t *ranges = NULL;
244 irq_cmd_t *cmds = NULL;
245
246 irq_code_t *code = malloc(sizeof(*code), 0);
247 int rc = copy_from_uspace(code, ucode, sizeof(*code));
248 if (rc != EOK)
249 goto error;
250
251 if ((code->rangecount > IRQ_MAX_RANGE_COUNT) ||
252 (code->cmdcount > IRQ_MAX_PROG_SIZE))
253 goto error;
254
255 ranges = malloc(sizeof(code->ranges[0]) * code->rangecount, 0);
256 rc = copy_from_uspace(ranges, code->ranges,
257 sizeof(code->ranges[0]) * code->rangecount);
258 if (rc != EOK)
259 goto error;
260
261 cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0);
262 rc = copy_from_uspace(cmds, code->cmds,
263 sizeof(code->cmds[0]) * code->cmdcount);
264 if (rc != EOK)
265 goto error;
266
267 rc = code_check(cmds, code->cmdcount);
268 if (rc != EOK)
269 goto error;
270
271 rc = ranges_map_and_apply(ranges, code->rangecount, cmds,
272 code->cmdcount);
273 if (rc != EOK)
274 goto error;
275
276 code->ranges = ranges;
277 code->cmds = cmds;
278
279 return code;
280
281error:
282 if (cmds)
283 free(cmds);
284
285 if (ranges)
286 free(ranges);
287
288 free(code);
289 return NULL;
290}
291
292/** Subscribe an answerbox as a receiving end for IRQ notifications.
293 *
294 * @param box Receiving answerbox.
295 * @param inr IRQ number.
296 * @param devno Device number.
297 * @param imethod Interface and method to be associated with the
298 * notification.
299 * @param ucode Uspace pointer to top-half pseudocode.
300 *
301 * @return IRQ capability.
302 * @return Negative error code.
303 *
304 */
305int ipc_irq_subscribe(answerbox_t *box, inr_t inr, devno_t devno,
306 sysarg_t imethod, irq_code_t *ucode)
307{
308 sysarg_t key[] = {
309 (sysarg_t) inr,
310 (sysarg_t) devno
311 };
312
313 if ((inr < 0) || (inr > last_inr))
314 return ELIMIT;
315
316 irq_code_t *code;
317 if (ucode) {
318 code = code_from_uspace(ucode);
319 if (!code)
320 return EBADMEM;
321 } else
322 code = NULL;
323
324 /*
325 * Allocate and populate the IRQ kernel object.
326 */
327 int cap = kobject_alloc(TASK);
328 if (cap < 0)
329 return cap;
330 kobject_t *kobj = kobject_get_current(cap, KOBJECT_TYPE_ALLOCATED);
331 assert(kobj);
332 kobj->type = KOBJECT_TYPE_IRQ;
333
334 irq_t *irq = &kobj->irq;
335 irq_initialize(irq);
336 irq->devno = devno;
337 irq->inr = inr;
338 irq->claim = ipc_irq_top_half_claim;
339 irq->handler = ipc_irq_top_half_handler;
340 irq->notif_cfg.notify = true;
341 irq->notif_cfg.answerbox = box;
342 irq->notif_cfg.imethod = imethod;
343 irq->notif_cfg.code = code;
344 irq->notif_cfg.counter = 0;
345
346 /*
347 * Enlist the IRQ structure in the uspace IRQ hash table and the
348 * answerbox's list.
349 */
350 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
351
352 link_t *hlp = hash_table_find(&irq_uspace_hash_table, key);
353 if (hlp) {
354 irq_t *hirq = hash_table_get_instance(hlp, irq_t, link);
355
356 /* hirq is locked */
357 irq_spinlock_unlock(&hirq->lock, false);
358 code_free(code);
359 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
360
361 kobject_free(TASK, cap);
362 return EEXIST;
363 }
364
365 /* Locking is not really necessary, but paranoid */
366 irq_spinlock_lock(&irq->lock, false);
367 irq_spinlock_lock(&box->irq_lock, false);
368
369 hash_table_insert(&irq_uspace_hash_table, key, &irq->link);
370 list_append(&irq->notif_cfg.link, &box->irq_list);
371
372 irq_spinlock_unlock(&box->irq_lock, false);
373 irq_spinlock_unlock(&irq->lock, false);
374 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
375
376 return cap;
377}
378
379/** Unsubscribe task from IRQ notification.
380 *
381 * @param box Answerbox associated with the notification.
382 * @param irq_cap IRQ capability.
383 *
384 * @return EOK on success or a negative error code.
385 *
386 */
387int ipc_irq_unsubscribe(answerbox_t *box, int irq_cap)
388{
389 kobject_t *kobj = kobject_get_current(irq_cap, KOBJECT_TYPE_IRQ);
390 if (!kobj)
391 return ENOENT;
392 irq_t *irq = &kobj->irq;
393
394 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
395 irq_spinlock_lock(&irq->lock, false);
396 irq_spinlock_lock(&box->irq_lock, false);
397
398 assert(irq->notif_cfg.answerbox == box);
399
400 /* Remove the IRQ from the answerbox's list. */
401 list_remove(&irq->notif_cfg.link);
402
403 /* Remove the IRQ from the uspace IRQ hash table. */
404 hash_table_remove_item(&irq_uspace_hash_table, &irq->link);
405
406 irq_spinlock_unlock(&box->irq_lock, false);
407 /* irq->lock unlocked by the hash table remove_callback */
408 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
409
410 /* Free up the pseudo code and associated structures. */
411 code_free(irq->notif_cfg.code);
412
413 /* Free up the IRQ kernel object. */
414 kobject_free(TASK, irq_cap);
415
416 return EOK;
417}
418
419/** Disconnect all IRQ notifications from an answerbox.
420 *
421 * This function is effective because the answerbox contains list of all irq_t
422 * structures that are subscribed to send notifications to it.
423 *
424 * @param box Answerbox for which we want to carry out the cleanup.
425 *
426 */
427void ipc_irq_cleanup(answerbox_t *box)
428{
429loop:
430 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
431 irq_spinlock_lock(&box->irq_lock, false);
432
433 while (!list_empty(&box->irq_list)) {
434 DEADLOCK_PROBE_INIT(p_irqlock);
435
436 irq_t *irq = list_get_instance(list_first(&box->irq_list), irq_t,
437 notif_cfg.link);
438
439 if (!irq_spinlock_trylock(&irq->lock)) {
440 /*
441 * Avoid deadlock by trying again.
442 */
443 irq_spinlock_unlock(&box->irq_lock, false);
444 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
445 DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD);
446 goto loop;
447 }
448
449 assert(irq->notif_cfg.answerbox == box);
450
451 /* Unlist from the answerbox. */
452 list_remove(&irq->notif_cfg.link);
453
454 /* Remove from the hash table. */
455 hash_table_remove_item(&irq_uspace_hash_table, &irq->link);
456
457 /*
458 * Release both locks so that we can free the pseudo code.
459 */
460 irq_spinlock_unlock(&box->irq_lock, false);
461 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
462
463 code_free(irq->notif_cfg.code);
464
465 // XXX: what to do about the irq capability? The task is in
466 // clean-up anyway.
467
468 /* Reacquire both locks before taking another round. */
469 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
470 irq_spinlock_lock(&box->irq_lock, false);
471 }
472
473 irq_spinlock_unlock(&box->irq_lock, false);
474 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
475}
476
477/** Add a call to the proper answerbox queue.
478 *
479 * Assume irq->lock is locked and interrupts disabled.
480 *
481 * @param irq IRQ structure referencing the target answerbox.
482 * @param call IRQ notification call.
483 *
484 */
485static void send_call(irq_t *irq, call_t *call)
486{
487 irq_spinlock_lock(&irq->notif_cfg.answerbox->irq_lock, false);
488 list_append(&call->ab_link, &irq->notif_cfg.answerbox->irq_notifs);
489 irq_spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock, false);
490
491 waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST);
492}
493
494/** Apply the top-half pseudo code to find out whether to accept the IRQ or not.
495 *
496 * @param irq IRQ structure.
497 *
498 * @return IRQ_ACCEPT if the interrupt is accepted by the
499 * pseudocode, IRQ_DECLINE otherwise.
500 *
501 */
502irq_ownership_t ipc_irq_top_half_claim(irq_t *irq)
503{
504 irq_code_t *code = irq->notif_cfg.code;
505 uint32_t *scratch = irq->notif_cfg.scratch;
506
507 if (!irq->notif_cfg.notify)
508 return IRQ_DECLINE;
509
510 if (!code)
511 return IRQ_DECLINE;
512
513 for (size_t i = 0; i < code->cmdcount; i++) {
514 uintptr_t srcarg = code->cmds[i].srcarg;
515 uintptr_t dstarg = code->cmds[i].dstarg;
516
517 switch (code->cmds[i].cmd) {
518 case CMD_PIO_READ_8:
519 scratch[dstarg] =
520 pio_read_8((ioport8_t *) code->cmds[i].addr);
521 break;
522 case CMD_PIO_READ_16:
523 scratch[dstarg] =
524 pio_read_16((ioport16_t *) code->cmds[i].addr);
525 break;
526 case CMD_PIO_READ_32:
527 scratch[dstarg] =
528 pio_read_32((ioport32_t *) code->cmds[i].addr);
529 break;
530 case CMD_PIO_WRITE_8:
531 pio_write_8((ioport8_t *) code->cmds[i].addr,
532 (uint8_t) code->cmds[i].value);
533 break;
534 case CMD_PIO_WRITE_16:
535 pio_write_16((ioport16_t *) code->cmds[i].addr,
536 (uint16_t) code->cmds[i].value);
537 break;
538 case CMD_PIO_WRITE_32:
539 pio_write_32((ioport32_t *) code->cmds[i].addr,
540 (uint32_t) code->cmds[i].value);
541 break;
542 case CMD_PIO_WRITE_A_8:
543 pio_write_8((ioport8_t *) code->cmds[i].addr,
544 (uint8_t) scratch[srcarg]);
545 break;
546 case CMD_PIO_WRITE_A_16:
547 pio_write_16((ioport16_t *) code->cmds[i].addr,
548 (uint16_t) scratch[srcarg]);
549 break;
550 case CMD_PIO_WRITE_A_32:
551 pio_write_32((ioport32_t *) code->cmds[i].addr,
552 (uint32_t) scratch[srcarg]);
553 break;
554 case CMD_LOAD:
555 scratch[dstarg] = code->cmds[i].value;
556 break;
557 case CMD_AND:
558 scratch[dstarg] = scratch[srcarg] &
559 code->cmds[i].value;
560 break;
561 case CMD_PREDICATE:
562 if (scratch[srcarg] == 0)
563 i += code->cmds[i].value;
564
565 break;
566 case CMD_ACCEPT:
567 return IRQ_ACCEPT;
568 case CMD_DECLINE:
569 default:
570 return IRQ_DECLINE;
571 }
572 }
573
574 return IRQ_DECLINE;
575}
576
577/* IRQ top-half handler.
578 *
579 * We expect interrupts to be disabled and the irq->lock already held.
580 *
581 * @param irq IRQ structure.
582 *
583 */
584void ipc_irq_top_half_handler(irq_t *irq)
585{
586 assert(irq);
587
588 assert(interrupts_disabled());
589 assert(irq_spinlock_locked(&irq->lock));
590
591 if (irq->notif_cfg.answerbox) {
592 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
593 if (!call)
594 return;
595
596 call->flags |= IPC_CALL_NOTIF;
597 /* Put a counter to the message */
598 call->priv = ++irq->notif_cfg.counter;
599
600 /* Set up args */
601 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
602 IPC_SET_ARG1(call->data, irq->notif_cfg.scratch[1]);
603 IPC_SET_ARG2(call->data, irq->notif_cfg.scratch[2]);
604 IPC_SET_ARG3(call->data, irq->notif_cfg.scratch[3]);
605 IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]);
606 IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]);
607
608 send_call(irq, call);
609 }
610}
611
612/** Send notification message.
613 *
614 * @param irq IRQ structure.
615 * @param a1 Driver-specific payload argument.
616 * @param a2 Driver-specific payload argument.
617 * @param a3 Driver-specific payload argument.
618 * @param a4 Driver-specific payload argument.
619 * @param a5 Driver-specific payload argument.
620 *
621 */
622void ipc_irq_send_msg(irq_t *irq, sysarg_t a1, sysarg_t a2, sysarg_t a3,
623 sysarg_t a4, sysarg_t a5)
624{
625 irq_spinlock_lock(&irq->lock, true);
626
627 if (irq->notif_cfg.answerbox) {
628 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
629 if (!call) {
630 irq_spinlock_unlock(&irq->lock, true);
631 return;
632 }
633
634 call->flags |= IPC_CALL_NOTIF;
635 /* Put a counter to the message */
636 call->priv = ++irq->notif_cfg.counter;
637
638 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
639 IPC_SET_ARG1(call->data, a1);
640 IPC_SET_ARG2(call->data, a2);
641 IPC_SET_ARG3(call->data, a3);
642 IPC_SET_ARG4(call->data, a4);
643 IPC_SET_ARG5(call->data, a5);
644
645 send_call(irq, call);
646 }
647
648 irq_spinlock_unlock(&irq->lock, true);
649}
650
651/** @}
652 */
Note: See TracBrowser for help on using the repository browser.