source: mainline/kernel/generic/src/ipc/irq.c@ 8a45bf09

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 8a45bf09 was 8a45bf09, checked in by Jakub Jermar <jakub@…>, 8 years ago

Improve comments

  • Property mode set to 100644
File size: 16.9 KB
Line 
1/*
2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2006 Jakub Jermar
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup genericipc
31 * @{
32 */
33
34/**
35 * @file
36 * @brief IRQ notification framework.
37 *
38 * This framework allows applications to subscribe to receive a notification
39 * when interrupt is detected. The application may provide a simple 'top-half'
40 * handler as part of its registration, which can perform simple operations
41 * (read/write port/memory, add information to notification IPC message).
42 *
43 * The structure of a notification message is as follows:
44 * - IMETHOD: interface and method as set by the SYS_IPC_IRQ_SUBSCRIBE syscall
45 * - ARG1: payload modified by a 'top-half' handler (scratch[1])
46 * - ARG2: payload modified by a 'top-half' handler (scratch[2])
47 * - ARG3: payload modified by a 'top-half' handler (scratch[3])
48 * - ARG4: payload modified by a 'top-half' handler (scratch[4])
49 * - ARG5: payload modified by a 'top-half' handler (scratch[5])
50 * - in_phone_hash: interrupt counter (may be needed to assure correct order
51 * in multithreaded drivers)
52 *
53 * Note on synchronization for ipc_irq_subscribe(), ipc_irq_unsubscribe(),
54 * ipc_irq_cleanup() and IRQ handlers:
55 *
56 * By always taking all of the uspace IRQ hash table lock, IRQ structure lock
57 * and answerbox lock, we can rule out race conditions between the
58 * registration functions and also the cleanup function. Thus the observer can
59 * either see the IRQ structure present in both the hash table and the
60 * answerbox list or absent in both. Views in which the IRQ structure would be
61 * linked in the hash table but not in the answerbox list, or vice versa, are
62 * not possible.
63 *
64 * By always taking the hash table lock and the IRQ structure lock, we can
65 * rule out a scenario in which we would free up an IRQ structure, which is
66 * still referenced by, for example, an IRQ handler. The locking scheme forces
67 * us to lock the IRQ structure only after any progressing IRQs on that
68 * structure are finished. Because we hold the hash table lock, we prevent new
69 * IRQs from taking new references to the IRQ structure.
70 *
71 */
72
73#include <arch.h>
74#include <assert.h>
75#include <mm/slab.h>
76#include <mm/page.h>
77#include <mm/km.h>
78#include <errno.h>
79#include <ddi/irq.h>
80#include <ipc/ipc.h>
81#include <ipc/irq.h>
82#include <syscall/copy.h>
83#include <console/console.h>
84#include <print.h>
85#include <macros.h>
86#include <cap/cap.h>
87
88static void ranges_unmap(irq_pio_range_t *ranges, size_t rangecount)
89{
90 for (size_t i = 0; i < rangecount; i++) {
91#ifdef IO_SPACE_BOUNDARY
92 if ((void *) ranges[i].base >= IO_SPACE_BOUNDARY)
93#endif
94 km_unmap(ranges[i].base, ranges[i].size);
95 }
96}
97
98static int ranges_map_and_apply(irq_pio_range_t *ranges, size_t rangecount,
99 irq_cmd_t *cmds, size_t cmdcount)
100{
101 /* Copy the physical base addresses aside. */
102 uintptr_t *pbase = malloc(rangecount * sizeof(uintptr_t), 0);
103 for (size_t i = 0; i < rangecount; i++)
104 pbase[i] = ranges[i].base;
105
106 /* Map the PIO ranges into the kernel virtual address space. */
107 for (size_t i = 0; i < rangecount; i++) {
108#ifdef IO_SPACE_BOUNDARY
109 if ((void *) ranges[i].base < IO_SPACE_BOUNDARY)
110 continue;
111#endif
112 ranges[i].base = km_map(pbase[i], ranges[i].size,
113 PAGE_READ | PAGE_WRITE | PAGE_KERNEL | PAGE_NOT_CACHEABLE);
114 if (!ranges[i].base) {
115 ranges_unmap(ranges, i);
116 free(pbase);
117 return ENOMEM;
118 }
119 }
120
121 /* Rewrite the pseudocode addresses from physical to kernel virtual. */
122 for (size_t i = 0; i < cmdcount; i++) {
123 uintptr_t addr;
124 size_t size;
125
126 /* Process only commands that use an address. */
127 switch (cmds[i].cmd) {
128 case CMD_PIO_READ_8:
129 case CMD_PIO_WRITE_8:
130 case CMD_PIO_WRITE_A_8:
131 size = 1;
132 break;
133 case CMD_PIO_READ_16:
134 case CMD_PIO_WRITE_16:
135 case CMD_PIO_WRITE_A_16:
136 size = 2;
137 break;
138 case CMD_PIO_READ_32:
139 case CMD_PIO_WRITE_32:
140 case CMD_PIO_WRITE_A_32:
141 size = 4;
142 break;
143 default:
144 /* Move onto the next command. */
145 continue;
146 }
147
148 addr = (uintptr_t) cmds[i].addr;
149
150 size_t j;
151 for (j = 0; j < rangecount; j++) {
152 /* Find the matching range. */
153 if (!iswithin(pbase[j], ranges[j].size, addr, size))
154 continue;
155
156 /* Switch the command to a kernel virtual address. */
157 addr -= pbase[j];
158 addr += ranges[j].base;
159
160 cmds[i].addr = (void *) addr;
161 break;
162 }
163
164 if (j == rangecount) {
165 /*
166 * The address used in this command is outside of all
167 * defined ranges.
168 */
169 ranges_unmap(ranges, rangecount);
170 free(pbase);
171 return EINVAL;
172 }
173 }
174
175 free(pbase);
176 return EOK;
177}
178
179/** Statically check the top-half pseudocode
180 *
181 * Check the top-half pseudocode for invalid or unsafe
182 * constructs.
183 *
184 */
185static int code_check(irq_cmd_t *cmds, size_t cmdcount)
186{
187 for (size_t i = 0; i < cmdcount; i++) {
188 /*
189 * Check for accepted ranges.
190 */
191 if (cmds[i].cmd >= CMD_LAST)
192 return EINVAL;
193
194 if (cmds[i].srcarg >= IPC_CALL_LEN)
195 return EINVAL;
196
197 if (cmds[i].dstarg >= IPC_CALL_LEN)
198 return EINVAL;
199
200 switch (cmds[i].cmd) {
201 case CMD_PREDICATE:
202 /*
203 * Check for control flow overflow.
204 * Note that jumping just beyond the last
205 * command is a correct behaviour.
206 */
207 if (i + cmds[i].value > cmdcount)
208 return EINVAL;
209
210 break;
211 default:
212 break;
213 }
214 }
215
216 return EOK;
217}
218
219/** Free the top-half pseudocode.
220 *
221 * @param code Pointer to the top-half pseudocode.
222 *
223 */
224static void code_free(irq_code_t *code)
225{
226 if (code) {
227 ranges_unmap(code->ranges, code->rangecount);
228 free(code->ranges);
229 free(code->cmds);
230 free(code);
231 }
232}
233
234/** Copy the top-half pseudocode from userspace into the kernel.
235 *
236 * @param ucode Userspace address of the top-half pseudocode.
237 *
238 * @return Kernel address of the copied pseudocode.
239 *
240 */
241static irq_code_t *code_from_uspace(irq_code_t *ucode)
242{
243 irq_pio_range_t *ranges = NULL;
244 irq_cmd_t *cmds = NULL;
245
246 irq_code_t *code = malloc(sizeof(*code), 0);
247 int rc = copy_from_uspace(code, ucode, sizeof(*code));
248 if (rc != EOK)
249 goto error;
250
251 if ((code->rangecount > IRQ_MAX_RANGE_COUNT) ||
252 (code->cmdcount > IRQ_MAX_PROG_SIZE))
253 goto error;
254
255 ranges = malloc(sizeof(code->ranges[0]) * code->rangecount, 0);
256 rc = copy_from_uspace(ranges, code->ranges,
257 sizeof(code->ranges[0]) * code->rangecount);
258 if (rc != EOK)
259 goto error;
260
261 cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0);
262 rc = copy_from_uspace(cmds, code->cmds,
263 sizeof(code->cmds[0]) * code->cmdcount);
264 if (rc != EOK)
265 goto error;
266
267 rc = code_check(cmds, code->cmdcount);
268 if (rc != EOK)
269 goto error;
270
271 rc = ranges_map_and_apply(ranges, code->rangecount, cmds,
272 code->cmdcount);
273 if (rc != EOK)
274 goto error;
275
276 code->ranges = ranges;
277 code->cmds = cmds;
278
279 return code;
280
281error:
282 if (cmds)
283 free(cmds);
284
285 if (ranges)
286 free(ranges);
287
288 free(code);
289 return NULL;
290}
291
292/** Subscribe an answerbox as a receiving end for IRQ notifications.
293 *
294 * @param box Receiving answerbox.
295 * @param inr IRQ number.
296 * @param imethod Interface and method to be associated with the
297 * notification.
298 * @param ucode Uspace pointer to top-half pseudocode.
299 *
300 * @return IRQ capability handle.
301 * @return Negative error code.
302 *
303 */
304int ipc_irq_subscribe(answerbox_t *box, inr_t inr, sysarg_t imethod,
305 irq_code_t *ucode)
306{
307 sysarg_t key[] = {
308 [IRQ_HT_KEY_INR] = (sysarg_t) inr,
309 [IRQ_HT_KEY_MODE] = (sysarg_t) IRQ_HT_MODE_NO_CLAIM
310 };
311
312 if ((inr < 0) || (inr > last_inr))
313 return ELIMIT;
314
315 irq_code_t *code;
316 if (ucode) {
317 code = code_from_uspace(ucode);
318 if (!code)
319 return EBADMEM;
320 } else
321 code = NULL;
322
323 /*
324 * Allocate and populate the IRQ kernel object.
325 */
326 int handle = cap_alloc(TASK);
327 if (handle < 0)
328 return handle;
329 cap_t *cap = cap_get_current(handle, CAP_TYPE_ALLOCATED);
330 assert(cap);
331 cap->type = CAP_TYPE_IRQ;
332
333 irq_t *irq = &cap->irq;
334 irq_initialize(irq);
335 irq->inr = inr;
336 irq->claim = ipc_irq_top_half_claim;
337 irq->handler = ipc_irq_top_half_handler;
338 irq->notif_cfg.notify = true;
339 irq->notif_cfg.answerbox = box;
340 irq->notif_cfg.imethod = imethod;
341 irq->notif_cfg.code = code;
342 irq->notif_cfg.counter = 0;
343
344 /*
345 * Enlist the IRQ structure in the uspace IRQ hash table and the
346 * answerbox's list.
347 */
348 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
349 irq_spinlock_lock(&irq->lock, false);
350 irq_spinlock_lock(&box->irq_lock, false);
351
352 hash_table_insert(&irq_uspace_hash_table, key, &irq->link);
353 list_append(&irq->notif_cfg.link, &box->irq_list);
354
355 irq_spinlock_unlock(&box->irq_lock, false);
356 irq_spinlock_unlock(&irq->lock, false);
357 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
358
359 return handle;
360}
361
362/** Unsubscribe task from IRQ notification.
363 *
364 * @param box Answerbox associated with the notification.
365 * @param handle IRQ capability handle.
366 *
367 * @return EOK on success or a negative error code.
368 *
369 */
370int ipc_irq_unsubscribe(answerbox_t *box, int handle)
371{
372 cap_t *cap = cap_get_current(handle, CAP_TYPE_IRQ);
373 if (!cap)
374 return ENOENT;
375 irq_t *irq = &cap->irq;
376
377 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
378 irq_spinlock_lock(&irq->lock, false);
379 irq_spinlock_lock(&box->irq_lock, false);
380
381 assert(irq->notif_cfg.answerbox == box);
382
383 /* Remove the IRQ from the answerbox's list. */
384 list_remove(&irq->notif_cfg.link);
385
386 /* Remove the IRQ from the uspace IRQ hash table. */
387 hash_table_remove_item(&irq_uspace_hash_table, &irq->link);
388
389 irq_spinlock_unlock(&box->irq_lock, false);
390 /* irq->lock unlocked by the hash table remove_callback */
391 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
392
393 /* Free up the pseudo code and associated structures. */
394 code_free(irq->notif_cfg.code);
395
396 /* Free up the IRQ capability and the underlying kernel object. */
397 cap_free(TASK, handle);
398
399 return EOK;
400}
401
402/** Disconnect all IRQ notifications from an answerbox.
403 *
404 * This function is effective because the answerbox contains list of all irq_t
405 * structures that are subscribed to send notifications to it.
406 *
407 * @param box Answerbox for which we want to carry out the cleanup.
408 *
409 */
410void ipc_irq_cleanup(answerbox_t *box)
411{
412loop:
413 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
414 irq_spinlock_lock(&box->irq_lock, false);
415
416 while (!list_empty(&box->irq_list)) {
417 DEADLOCK_PROBE_INIT(p_irqlock);
418
419 irq_t *irq = list_get_instance(list_first(&box->irq_list), irq_t,
420 notif_cfg.link);
421
422 if (!irq_spinlock_trylock(&irq->lock)) {
423 /*
424 * Avoid deadlock by trying again.
425 */
426 irq_spinlock_unlock(&box->irq_lock, false);
427 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
428 DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD);
429 goto loop;
430 }
431
432 assert(irq->notif_cfg.answerbox == box);
433
434 /* Unlist from the answerbox. */
435 list_remove(&irq->notif_cfg.link);
436
437 /* Remove from the hash table. */
438 hash_table_remove_item(&irq_uspace_hash_table, &irq->link);
439
440 /*
441 * Release both locks so that we can free the IRQ code.
442 */
443 irq_spinlock_unlock(&box->irq_lock, false);
444 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
445
446 code_free(irq->notif_cfg.code);
447
448 // XXX: what to do about the irq capability? The task is in
449 // clean-up anyway.
450
451 /* Reacquire both locks before taking another round. */
452 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
453 irq_spinlock_lock(&box->irq_lock, false);
454 }
455
456 irq_spinlock_unlock(&box->irq_lock, false);
457 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
458}
459
460/** Add a call to the proper answerbox queue.
461 *
462 * Assume irq->lock is locked and interrupts disabled.
463 *
464 * @param irq IRQ structure referencing the target answerbox.
465 * @param call IRQ notification call.
466 *
467 */
468static void send_call(irq_t *irq, call_t *call)
469{
470 irq_spinlock_lock(&irq->notif_cfg.answerbox->irq_lock, false);
471 list_append(&call->ab_link, &irq->notif_cfg.answerbox->irq_notifs);
472 irq_spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock, false);
473
474 waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST);
475}
476
477/** Apply the top-half pseudo code to find out whether to accept the IRQ or not.
478 *
479 * @param irq IRQ structure.
480 *
481 * @return IRQ_ACCEPT if the interrupt is accepted by the
482 * pseudocode, IRQ_DECLINE otherwise.
483 *
484 */
485irq_ownership_t ipc_irq_top_half_claim(irq_t *irq)
486{
487 irq_code_t *code = irq->notif_cfg.code;
488 uint32_t *scratch = irq->notif_cfg.scratch;
489
490 if (!irq->notif_cfg.notify)
491 return IRQ_DECLINE;
492
493 if (!code)
494 return IRQ_DECLINE;
495
496 for (size_t i = 0; i < code->cmdcount; i++) {
497 uintptr_t srcarg = code->cmds[i].srcarg;
498 uintptr_t dstarg = code->cmds[i].dstarg;
499
500 switch (code->cmds[i].cmd) {
501 case CMD_PIO_READ_8:
502 scratch[dstarg] =
503 pio_read_8((ioport8_t *) code->cmds[i].addr);
504 break;
505 case CMD_PIO_READ_16:
506 scratch[dstarg] =
507 pio_read_16((ioport16_t *) code->cmds[i].addr);
508 break;
509 case CMD_PIO_READ_32:
510 scratch[dstarg] =
511 pio_read_32((ioport32_t *) code->cmds[i].addr);
512 break;
513 case CMD_PIO_WRITE_8:
514 pio_write_8((ioport8_t *) code->cmds[i].addr,
515 (uint8_t) code->cmds[i].value);
516 break;
517 case CMD_PIO_WRITE_16:
518 pio_write_16((ioport16_t *) code->cmds[i].addr,
519 (uint16_t) code->cmds[i].value);
520 break;
521 case CMD_PIO_WRITE_32:
522 pio_write_32((ioport32_t *) code->cmds[i].addr,
523 (uint32_t) code->cmds[i].value);
524 break;
525 case CMD_PIO_WRITE_A_8:
526 pio_write_8((ioport8_t *) code->cmds[i].addr,
527 (uint8_t) scratch[srcarg]);
528 break;
529 case CMD_PIO_WRITE_A_16:
530 pio_write_16((ioport16_t *) code->cmds[i].addr,
531 (uint16_t) scratch[srcarg]);
532 break;
533 case CMD_PIO_WRITE_A_32:
534 pio_write_32((ioport32_t *) code->cmds[i].addr,
535 (uint32_t) scratch[srcarg]);
536 break;
537 case CMD_LOAD:
538 scratch[dstarg] = code->cmds[i].value;
539 break;
540 case CMD_AND:
541 scratch[dstarg] = scratch[srcarg] &
542 code->cmds[i].value;
543 break;
544 case CMD_PREDICATE:
545 if (scratch[srcarg] == 0)
546 i += code->cmds[i].value;
547
548 break;
549 case CMD_ACCEPT:
550 return IRQ_ACCEPT;
551 case CMD_DECLINE:
552 default:
553 return IRQ_DECLINE;
554 }
555 }
556
557 return IRQ_DECLINE;
558}
559
560/* IRQ top-half handler.
561 *
562 * We expect interrupts to be disabled and the irq->lock already held.
563 *
564 * @param irq IRQ structure.
565 *
566 */
567void ipc_irq_top_half_handler(irq_t *irq)
568{
569 assert(irq);
570
571 assert(interrupts_disabled());
572 assert(irq_spinlock_locked(&irq->lock));
573
574 if (irq->notif_cfg.answerbox) {
575 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
576 if (!call)
577 return;
578
579 call->flags |= IPC_CALL_NOTIF;
580 /* Put a counter to the message */
581 call->priv = ++irq->notif_cfg.counter;
582
583 /* Set up args */
584 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
585 IPC_SET_ARG1(call->data, irq->notif_cfg.scratch[1]);
586 IPC_SET_ARG2(call->data, irq->notif_cfg.scratch[2]);
587 IPC_SET_ARG3(call->data, irq->notif_cfg.scratch[3]);
588 IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]);
589 IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]);
590
591 send_call(irq, call);
592 }
593}
594
595/** Send notification message.
596 *
597 * @param irq IRQ structure.
598 * @param a1 Driver-specific payload argument.
599 * @param a2 Driver-specific payload argument.
600 * @param a3 Driver-specific payload argument.
601 * @param a4 Driver-specific payload argument.
602 * @param a5 Driver-specific payload argument.
603 *
604 */
605void ipc_irq_send_msg(irq_t *irq, sysarg_t a1, sysarg_t a2, sysarg_t a3,
606 sysarg_t a4, sysarg_t a5)
607{
608 irq_spinlock_lock(&irq->lock, true);
609
610 if (irq->notif_cfg.answerbox) {
611 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
612 if (!call) {
613 irq_spinlock_unlock(&irq->lock, true);
614 return;
615 }
616
617 call->flags |= IPC_CALL_NOTIF;
618 /* Put a counter to the message */
619 call->priv = ++irq->notif_cfg.counter;
620
621 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
622 IPC_SET_ARG1(call->data, a1);
623 IPC_SET_ARG2(call->data, a2);
624 IPC_SET_ARG3(call->data, a3);
625 IPC_SET_ARG4(call->data, a4);
626 IPC_SET_ARG5(call->data, a5);
627
628 send_call(irq, call);
629 }
630
631 irq_spinlock_unlock(&irq->lock, true);
632}
633
634/** @}
635 */
Note: See TracBrowser for help on using the repository browser.