source: mainline/kernel/generic/src/ipc/irq.c@ 30c27e9

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 30c27e9 was a5d0143, checked in by Jakub Jermar <jakub@…>, 8 years ago

Improve comments

  • Property mode set to 100644
File size: 14.4 KB
Line 
1/*
2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2006 Jakub Jermar
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup genericipc
31 * @{
32 */
33
34/**
35 * @file
36 * @brief IRQ notification framework.
37 *
38 * This framework allows applications to subscribe to receive a notification
39 * when an interrupt is detected. The application may provide a simple
40 * 'top-half' handler as part of its registration, which can perform simple
41 * operations (read/write port/memory, add information to notification IPC
42 * message).
43 *
44 * The structure of a notification message is as follows:
45 * - IMETHOD: interface and method as set by the SYS_IPC_IRQ_SUBSCRIBE syscall
46 * - ARG1: payload modified by a 'top-half' handler (scratch[1])
47 * - ARG2: payload modified by a 'top-half' handler (scratch[2])
48 * - ARG3: payload modified by a 'top-half' handler (scratch[3])
49 * - ARG4: payload modified by a 'top-half' handler (scratch[4])
50 * - ARG5: payload modified by a 'top-half' handler (scratch[5])
51 * - in_phone_hash: interrupt counter (may be needed to assure correct order
52 * in multithreaded drivers)
53 */
54
55#include <arch.h>
56#include <assert.h>
57#include <mm/slab.h>
58#include <mm/page.h>
59#include <mm/km.h>
60#include <errno.h>
61#include <ddi/irq.h>
62#include <ipc/ipc.h>
63#include <ipc/irq.h>
64#include <syscall/copy.h>
65#include <console/console.h>
66#include <print.h>
67#include <macros.h>
68#include <cap/cap.h>
69
70static void ranges_unmap(irq_pio_range_t *ranges, size_t rangecount)
71{
72 for (size_t i = 0; i < rangecount; i++) {
73#ifdef IO_SPACE_BOUNDARY
74 if ((void *) ranges[i].base >= IO_SPACE_BOUNDARY)
75#endif
76 km_unmap(ranges[i].base, ranges[i].size);
77 }
78}
79
80static int ranges_map_and_apply(irq_pio_range_t *ranges, size_t rangecount,
81 irq_cmd_t *cmds, size_t cmdcount)
82{
83 /* Copy the physical base addresses aside. */
84 uintptr_t *pbase = malloc(rangecount * sizeof(uintptr_t), 0);
85 for (size_t i = 0; i < rangecount; i++)
86 pbase[i] = ranges[i].base;
87
88 /* Map the PIO ranges into the kernel virtual address space. */
89 for (size_t i = 0; i < rangecount; i++) {
90#ifdef IO_SPACE_BOUNDARY
91 if ((void *) ranges[i].base < IO_SPACE_BOUNDARY)
92 continue;
93#endif
94 ranges[i].base = km_map(pbase[i], ranges[i].size,
95 PAGE_READ | PAGE_WRITE | PAGE_KERNEL | PAGE_NOT_CACHEABLE);
96 if (!ranges[i].base) {
97 ranges_unmap(ranges, i);
98 free(pbase);
99 return ENOMEM;
100 }
101 }
102
103 /* Rewrite the IRQ code addresses from physical to kernel virtual. */
104 for (size_t i = 0; i < cmdcount; i++) {
105 uintptr_t addr;
106 size_t size;
107
108 /* Process only commands that use an address. */
109 switch (cmds[i].cmd) {
110 case CMD_PIO_READ_8:
111 case CMD_PIO_WRITE_8:
112 case CMD_PIO_WRITE_A_8:
113 size = 1;
114 break;
115 case CMD_PIO_READ_16:
116 case CMD_PIO_WRITE_16:
117 case CMD_PIO_WRITE_A_16:
118 size = 2;
119 break;
120 case CMD_PIO_READ_32:
121 case CMD_PIO_WRITE_32:
122 case CMD_PIO_WRITE_A_32:
123 size = 4;
124 break;
125 default:
126 /* Move onto the next command. */
127 continue;
128 }
129
130 addr = (uintptr_t) cmds[i].addr;
131
132 size_t j;
133 for (j = 0; j < rangecount; j++) {
134 /* Find the matching range. */
135 if (!iswithin(pbase[j], ranges[j].size, addr, size))
136 continue;
137
138 /* Switch the command to a kernel virtual address. */
139 addr -= pbase[j];
140 addr += ranges[j].base;
141
142 cmds[i].addr = (void *) addr;
143 break;
144 }
145
146 if (j == rangecount) {
147 /*
148 * The address used in this command is outside of all
149 * defined ranges.
150 */
151 ranges_unmap(ranges, rangecount);
152 free(pbase);
153 return EINVAL;
154 }
155 }
156
157 free(pbase);
158 return EOK;
159}
160
161/** Statically check the top-half IRQ code
162 *
163 * Check the top-half IRQ code for invalid or unsafe constructs.
164 *
165 */
166static int code_check(irq_cmd_t *cmds, size_t cmdcount)
167{
168 for (size_t i = 0; i < cmdcount; i++) {
169 /*
170 * Check for accepted ranges.
171 */
172 if (cmds[i].cmd >= CMD_LAST)
173 return EINVAL;
174
175 if (cmds[i].srcarg >= IPC_CALL_LEN)
176 return EINVAL;
177
178 if (cmds[i].dstarg >= IPC_CALL_LEN)
179 return EINVAL;
180
181 switch (cmds[i].cmd) {
182 case CMD_PREDICATE:
183 /*
184 * Check for control flow overflow.
185 * Note that jumping just beyond the last
186 * command is a correct behaviour.
187 */
188 if (i + cmds[i].value > cmdcount)
189 return EINVAL;
190
191 break;
192 default:
193 break;
194 }
195 }
196
197 return EOK;
198}
199
200/** Free the top-half IRQ code.
201 *
202 * @param code Pointer to the top-half IRQ code.
203 *
204 */
205static void code_free(irq_code_t *code)
206{
207 if (code) {
208 ranges_unmap(code->ranges, code->rangecount);
209 free(code->ranges);
210 free(code->cmds);
211 free(code);
212 }
213}
214
215/** Copy the top-half IRQ code from userspace into the kernel.
216 *
217 * @param ucode Userspace address of the top-half IRQ code.
218 *
219 * @return Kernel address of the copied IRQ code.
220 *
221 */
222static irq_code_t *code_from_uspace(irq_code_t *ucode)
223{
224 irq_pio_range_t *ranges = NULL;
225 irq_cmd_t *cmds = NULL;
226
227 irq_code_t *code = malloc(sizeof(*code), 0);
228 int rc = copy_from_uspace(code, ucode, sizeof(*code));
229 if (rc != EOK)
230 goto error;
231
232 if ((code->rangecount > IRQ_MAX_RANGE_COUNT) ||
233 (code->cmdcount > IRQ_MAX_PROG_SIZE))
234 goto error;
235
236 ranges = malloc(sizeof(code->ranges[0]) * code->rangecount, 0);
237 rc = copy_from_uspace(ranges, code->ranges,
238 sizeof(code->ranges[0]) * code->rangecount);
239 if (rc != EOK)
240 goto error;
241
242 cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0);
243 rc = copy_from_uspace(cmds, code->cmds,
244 sizeof(code->cmds[0]) * code->cmdcount);
245 if (rc != EOK)
246 goto error;
247
248 rc = code_check(cmds, code->cmdcount);
249 if (rc != EOK)
250 goto error;
251
252 rc = ranges_map_and_apply(ranges, code->rangecount, cmds,
253 code->cmdcount);
254 if (rc != EOK)
255 goto error;
256
257 code->ranges = ranges;
258 code->cmds = cmds;
259
260 return code;
261
262error:
263 if (cmds)
264 free(cmds);
265
266 if (ranges)
267 free(ranges);
268
269 free(code);
270 return NULL;
271}
272
273/** Subscribe an answerbox as a receiving end for IRQ notifications.
274 *
275 * @param box Receiving answerbox.
276 * @param inr IRQ number.
277 * @param imethod Interface and method to be associated with the notification.
278 * @param ucode Uspace pointer to top-half IRQ code.
279 *
280 * @return IRQ capability handle.
281 * @return Negative error code.
282 *
283 */
284int ipc_irq_subscribe(answerbox_t *box, inr_t inr, sysarg_t imethod,
285 irq_code_t *ucode)
286{
287 sysarg_t key[] = {
288 [IRQ_HT_KEY_INR] = (sysarg_t) inr,
289 [IRQ_HT_KEY_MODE] = (sysarg_t) IRQ_HT_MODE_NO_CLAIM
290 };
291
292 if ((inr < 0) || (inr > last_inr))
293 return ELIMIT;
294
295 irq_code_t *code;
296 if (ucode) {
297 code = code_from_uspace(ucode);
298 if (!code)
299 return EBADMEM;
300 } else
301 code = NULL;
302
303 /*
304 * Allocate and populate the IRQ kernel object.
305 */
306 int handle = cap_alloc(TASK);
307 if (handle < 0)
308 return handle;
309 cap_t *cap = cap_get_current(handle, CAP_TYPE_ALLOCATED);
310 assert(cap);
311
312 irq_t *irq = &cap->irq;
313 irq_initialize(irq);
314 irq->inr = inr;
315 irq->claim = ipc_irq_top_half_claim;
316 irq->handler = ipc_irq_top_half_handler;
317 irq->notif_cfg.notify = true;
318 irq->notif_cfg.answerbox = box;
319 irq->notif_cfg.imethod = imethod;
320 irq->notif_cfg.code = code;
321 irq->notif_cfg.counter = 0;
322
323 /*
324 * Insert the IRQ structure into the uspace IRQ hash table and retype
325 * the capability. By retyping the capability inside the critical
326 * section, we make sure another thread cannot attempt to unregister the
327 * IRQ before it is inserted into the hash table.
328 */
329 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
330 irq_spinlock_lock(&irq->lock, false);
331
332 cap->type = CAP_TYPE_IRQ;
333 hash_table_insert(&irq_uspace_hash_table, key, &irq->link);
334
335 irq_spinlock_unlock(&irq->lock, false);
336 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
337
338 return handle;
339}
340
341/** Unsubscribe task from IRQ notification.
342 *
343 * @param box Answerbox associated with the notification.
344 * @param handle IRQ capability handle.
345 *
346 * @return EOK on success or a negative error code.
347 *
348 */
349int ipc_irq_unsubscribe(answerbox_t *box, int handle)
350{
351 irq_spinlock_lock(&TASK->lock, true);
352 cap_t *cap = cap_get_current(handle, CAP_TYPE_IRQ);
353 if (!cap) {
354 irq_spinlock_unlock(&TASK->lock, true);
355 return ENOENT;
356 }
357 /* Make sure only one thread can win the race to unsubscribe. */
358 cap->type = CAP_TYPE_ALLOCATED;
359 irq_spinlock_unlock(&TASK->lock, true);
360
361 irq_t *irq = &cap->irq;
362
363 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
364 irq_spinlock_lock(&irq->lock, false);
365
366 assert(irq->notif_cfg.answerbox == box);
367
368 /* Remove the IRQ from the uspace IRQ hash table. */
369 hash_table_remove_item(&irq_uspace_hash_table, &irq->link);
370
371 /* irq->lock unlocked by the hash table remove_callback */
372 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
373
374 /* Free up the IRQ code and associated structures. */
375 code_free(irq->notif_cfg.code);
376
377 /* Free up the IRQ capability and the underlying kernel object. */
378 cap_free(TASK, handle);
379
380 return EOK;
381}
382
383/** Add a call to the proper answerbox queue.
384 *
385 * Assume irq->lock is locked and interrupts disabled.
386 *
387 * @param irq IRQ structure referencing the target answerbox.
388 * @param call IRQ notification call.
389 *
390 */
391static void send_call(irq_t *irq, call_t *call)
392{
393 irq_spinlock_lock(&irq->notif_cfg.answerbox->irq_lock, false);
394 list_append(&call->ab_link, &irq->notif_cfg.answerbox->irq_notifs);
395 irq_spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock, false);
396
397 waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST);
398}
399
400/** Apply the top-half IRQ code to find out whether to accept the IRQ or not.
401 *
402 * @param irq IRQ structure.
403 *
404 * @return IRQ_ACCEPT if the interrupt is accepted by the IRQ code.
405 * @return IRQ_DECLINE if the interrupt is not accepted byt the IRQ code.
406 *
407 */
408irq_ownership_t ipc_irq_top_half_claim(irq_t *irq)
409{
410 irq_code_t *code = irq->notif_cfg.code;
411 uint32_t *scratch = irq->notif_cfg.scratch;
412
413 if (!irq->notif_cfg.notify)
414 return IRQ_DECLINE;
415
416 if (!code)
417 return IRQ_DECLINE;
418
419 for (size_t i = 0; i < code->cmdcount; i++) {
420 uintptr_t srcarg = code->cmds[i].srcarg;
421 uintptr_t dstarg = code->cmds[i].dstarg;
422
423 switch (code->cmds[i].cmd) {
424 case CMD_PIO_READ_8:
425 scratch[dstarg] =
426 pio_read_8((ioport8_t *) code->cmds[i].addr);
427 break;
428 case CMD_PIO_READ_16:
429 scratch[dstarg] =
430 pio_read_16((ioport16_t *) code->cmds[i].addr);
431 break;
432 case CMD_PIO_READ_32:
433 scratch[dstarg] =
434 pio_read_32((ioport32_t *) code->cmds[i].addr);
435 break;
436 case CMD_PIO_WRITE_8:
437 pio_write_8((ioport8_t *) code->cmds[i].addr,
438 (uint8_t) code->cmds[i].value);
439 break;
440 case CMD_PIO_WRITE_16:
441 pio_write_16((ioport16_t *) code->cmds[i].addr,
442 (uint16_t) code->cmds[i].value);
443 break;
444 case CMD_PIO_WRITE_32:
445 pio_write_32((ioport32_t *) code->cmds[i].addr,
446 (uint32_t) code->cmds[i].value);
447 break;
448 case CMD_PIO_WRITE_A_8:
449 pio_write_8((ioport8_t *) code->cmds[i].addr,
450 (uint8_t) scratch[srcarg]);
451 break;
452 case CMD_PIO_WRITE_A_16:
453 pio_write_16((ioport16_t *) code->cmds[i].addr,
454 (uint16_t) scratch[srcarg]);
455 break;
456 case CMD_PIO_WRITE_A_32:
457 pio_write_32((ioport32_t *) code->cmds[i].addr,
458 (uint32_t) scratch[srcarg]);
459 break;
460 case CMD_LOAD:
461 scratch[dstarg] = code->cmds[i].value;
462 break;
463 case CMD_AND:
464 scratch[dstarg] = scratch[srcarg] &
465 code->cmds[i].value;
466 break;
467 case CMD_PREDICATE:
468 if (scratch[srcarg] == 0)
469 i += code->cmds[i].value;
470
471 break;
472 case CMD_ACCEPT:
473 return IRQ_ACCEPT;
474 case CMD_DECLINE:
475 default:
476 return IRQ_DECLINE;
477 }
478 }
479
480 return IRQ_DECLINE;
481}
482
483/* IRQ top-half handler.
484 *
485 * We expect interrupts to be disabled and the irq->lock already held.
486 *
487 * @param irq IRQ structure.
488 *
489 */
490void ipc_irq_top_half_handler(irq_t *irq)
491{
492 assert(irq);
493
494 assert(interrupts_disabled());
495 assert(irq_spinlock_locked(&irq->lock));
496
497 if (irq->notif_cfg.answerbox) {
498 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
499 if (!call)
500 return;
501
502 call->flags |= IPC_CALL_NOTIF;
503 /* Put a counter to the message */
504 call->priv = ++irq->notif_cfg.counter;
505
506 /* Set up args */
507 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
508 IPC_SET_ARG1(call->data, irq->notif_cfg.scratch[1]);
509 IPC_SET_ARG2(call->data, irq->notif_cfg.scratch[2]);
510 IPC_SET_ARG3(call->data, irq->notif_cfg.scratch[3]);
511 IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]);
512 IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]);
513
514 send_call(irq, call);
515 }
516}
517
518/** Send notification message.
519 *
520 * @param irq IRQ structure.
521 * @param a1 Driver-specific payload argument.
522 * @param a2 Driver-specific payload argument.
523 * @param a3 Driver-specific payload argument.
524 * @param a4 Driver-specific payload argument.
525 * @param a5 Driver-specific payload argument.
526 *
527 */
528void ipc_irq_send_msg(irq_t *irq, sysarg_t a1, sysarg_t a2, sysarg_t a3,
529 sysarg_t a4, sysarg_t a5)
530{
531 irq_spinlock_lock(&irq->lock, true);
532
533 if (irq->notif_cfg.answerbox) {
534 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
535 if (!call) {
536 irq_spinlock_unlock(&irq->lock, true);
537 return;
538 }
539
540 call->flags |= IPC_CALL_NOTIF;
541 /* Put a counter to the message */
542 call->priv = ++irq->notif_cfg.counter;
543
544 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
545 IPC_SET_ARG1(call->data, a1);
546 IPC_SET_ARG2(call->data, a2);
547 IPC_SET_ARG3(call->data, a3);
548 IPC_SET_ARG4(call->data, a4);
549 IPC_SET_ARG5(call->data, a5);
550
551 send_call(irq, call);
552 }
553
554 irq_spinlock_unlock(&irq->lock, true);
555}
556
557/** @}
558 */
Note: See TracBrowser for help on using the repository browser.