source: mainline/kernel/generic/src/ipc/irq.c@ 9e87562

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 9e87562 was 9e87562, checked in by Jakub Jermar <jakub@…>, 8 years ago

Make all accesses to capabilites exclusive

This commit makes sure that all accesses to the capabilities array and other
metadata are protected by a mutex. This is necessary for future resizing of the
capabilities array.

Group task's capabilities by type so that it is possible to visit all
capabilities of the given type effectively.

Provide cap_publish() and cap_unpublish() to automate steps that make the
capability visible/invisible to userspace and insert/remove the capability from
the respective type list.

  • Property mode set to 100644
File size: 14.0 KB
Line 
1/*
2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2006 Jakub Jermar
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup genericipc
31 * @{
32 */
33
34/**
35 * @file
36 * @brief IRQ notification framework.
37 *
38 * This framework allows applications to subscribe to receive a notification
39 * when an interrupt is detected. The application may provide a simple
40 * 'top-half' handler as part of its registration, which can perform simple
41 * operations (read/write port/memory, add information to notification IPC
42 * message).
43 *
44 * The structure of a notification message is as follows:
45 * - IMETHOD: interface and method as set by the SYS_IPC_IRQ_SUBSCRIBE syscall
46 * - ARG1: payload modified by a 'top-half' handler (scratch[1])
47 * - ARG2: payload modified by a 'top-half' handler (scratch[2])
48 * - ARG3: payload modified by a 'top-half' handler (scratch[3])
49 * - ARG4: payload modified by a 'top-half' handler (scratch[4])
50 * - ARG5: payload modified by a 'top-half' handler (scratch[5])
51 * - in_phone_hash: interrupt counter (may be needed to assure correct order
52 * in multithreaded drivers)
53 */
54
55#include <arch.h>
56#include <assert.h>
57#include <mm/slab.h>
58#include <mm/page.h>
59#include <mm/km.h>
60#include <errno.h>
61#include <ddi/irq.h>
62#include <ipc/ipc.h>
63#include <ipc/irq.h>
64#include <syscall/copy.h>
65#include <console/console.h>
66#include <print.h>
67#include <macros.h>
68#include <cap/cap.h>
69
70static void ranges_unmap(irq_pio_range_t *ranges, size_t rangecount)
71{
72 for (size_t i = 0; i < rangecount; i++) {
73#ifdef IO_SPACE_BOUNDARY
74 if ((void *) ranges[i].base >= IO_SPACE_BOUNDARY)
75#endif
76 km_unmap(ranges[i].base, ranges[i].size);
77 }
78}
79
80static int ranges_map_and_apply(irq_pio_range_t *ranges, size_t rangecount,
81 irq_cmd_t *cmds, size_t cmdcount)
82{
83 /* Copy the physical base addresses aside. */
84 uintptr_t *pbase = malloc(rangecount * sizeof(uintptr_t), 0);
85 for (size_t i = 0; i < rangecount; i++)
86 pbase[i] = ranges[i].base;
87
88 /* Map the PIO ranges into the kernel virtual address space. */
89 for (size_t i = 0; i < rangecount; i++) {
90#ifdef IO_SPACE_BOUNDARY
91 if ((void *) ranges[i].base < IO_SPACE_BOUNDARY)
92 continue;
93#endif
94 ranges[i].base = km_map(pbase[i], ranges[i].size,
95 PAGE_READ | PAGE_WRITE | PAGE_KERNEL | PAGE_NOT_CACHEABLE);
96 if (!ranges[i].base) {
97 ranges_unmap(ranges, i);
98 free(pbase);
99 return ENOMEM;
100 }
101 }
102
103 /* Rewrite the IRQ code addresses from physical to kernel virtual. */
104 for (size_t i = 0; i < cmdcount; i++) {
105 uintptr_t addr;
106 size_t size;
107
108 /* Process only commands that use an address. */
109 switch (cmds[i].cmd) {
110 case CMD_PIO_READ_8:
111 case CMD_PIO_WRITE_8:
112 case CMD_PIO_WRITE_A_8:
113 size = 1;
114 break;
115 case CMD_PIO_READ_16:
116 case CMD_PIO_WRITE_16:
117 case CMD_PIO_WRITE_A_16:
118 size = 2;
119 break;
120 case CMD_PIO_READ_32:
121 case CMD_PIO_WRITE_32:
122 case CMD_PIO_WRITE_A_32:
123 size = 4;
124 break;
125 default:
126 /* Move onto the next command. */
127 continue;
128 }
129
130 addr = (uintptr_t) cmds[i].addr;
131
132 size_t j;
133 for (j = 0; j < rangecount; j++) {
134 /* Find the matching range. */
135 if (!iswithin(pbase[j], ranges[j].size, addr, size))
136 continue;
137
138 /* Switch the command to a kernel virtual address. */
139 addr -= pbase[j];
140 addr += ranges[j].base;
141
142 cmds[i].addr = (void *) addr;
143 break;
144 }
145
146 if (j == rangecount) {
147 /*
148 * The address used in this command is outside of all
149 * defined ranges.
150 */
151 ranges_unmap(ranges, rangecount);
152 free(pbase);
153 return EINVAL;
154 }
155 }
156
157 free(pbase);
158 return EOK;
159}
160
161/** Statically check the top-half IRQ code
162 *
163 * Check the top-half IRQ code for invalid or unsafe constructs.
164 *
165 */
166static int code_check(irq_cmd_t *cmds, size_t cmdcount)
167{
168 for (size_t i = 0; i < cmdcount; i++) {
169 /*
170 * Check for accepted ranges.
171 */
172 if (cmds[i].cmd >= CMD_LAST)
173 return EINVAL;
174
175 if (cmds[i].srcarg >= IPC_CALL_LEN)
176 return EINVAL;
177
178 if (cmds[i].dstarg >= IPC_CALL_LEN)
179 return EINVAL;
180
181 switch (cmds[i].cmd) {
182 case CMD_PREDICATE:
183 /*
184 * Check for control flow overflow.
185 * Note that jumping just beyond the last
186 * command is a correct behaviour.
187 */
188 if (i + cmds[i].value > cmdcount)
189 return EINVAL;
190
191 break;
192 default:
193 break;
194 }
195 }
196
197 return EOK;
198}
199
200/** Free the top-half IRQ code.
201 *
202 * @param code Pointer to the top-half IRQ code.
203 *
204 */
205static void code_free(irq_code_t *code)
206{
207 if (code) {
208 ranges_unmap(code->ranges, code->rangecount);
209 free(code->ranges);
210 free(code->cmds);
211 free(code);
212 }
213}
214
215/** Copy the top-half IRQ code from userspace into the kernel.
216 *
217 * @param ucode Userspace address of the top-half IRQ code.
218 *
219 * @return Kernel address of the copied IRQ code.
220 *
221 */
222static irq_code_t *code_from_uspace(irq_code_t *ucode)
223{
224 irq_pio_range_t *ranges = NULL;
225 irq_cmd_t *cmds = NULL;
226
227 irq_code_t *code = malloc(sizeof(*code), 0);
228 int rc = copy_from_uspace(code, ucode, sizeof(*code));
229 if (rc != EOK)
230 goto error;
231
232 if ((code->rangecount > IRQ_MAX_RANGE_COUNT) ||
233 (code->cmdcount > IRQ_MAX_PROG_SIZE))
234 goto error;
235
236 ranges = malloc(sizeof(code->ranges[0]) * code->rangecount, 0);
237 rc = copy_from_uspace(ranges, code->ranges,
238 sizeof(code->ranges[0]) * code->rangecount);
239 if (rc != EOK)
240 goto error;
241
242 cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0);
243 rc = copy_from_uspace(cmds, code->cmds,
244 sizeof(code->cmds[0]) * code->cmdcount);
245 if (rc != EOK)
246 goto error;
247
248 rc = code_check(cmds, code->cmdcount);
249 if (rc != EOK)
250 goto error;
251
252 rc = ranges_map_and_apply(ranges, code->rangecount, cmds,
253 code->cmdcount);
254 if (rc != EOK)
255 goto error;
256
257 code->ranges = ranges;
258 code->cmds = cmds;
259
260 return code;
261
262error:
263 if (cmds)
264 free(cmds);
265
266 if (ranges)
267 free(ranges);
268
269 free(code);
270 return NULL;
271}
272
273/** Subscribe an answerbox as a receiving end for IRQ notifications.
274 *
275 * @param box Receiving answerbox.
276 * @param inr IRQ number.
277 * @param imethod Interface and method to be associated with the notification.
278 * @param ucode Uspace pointer to top-half IRQ code.
279 *
280 * @return IRQ capability handle.
281 * @return Negative error code.
282 *
283 */
284int ipc_irq_subscribe(answerbox_t *box, inr_t inr, sysarg_t imethod,
285 irq_code_t *ucode)
286{
287 sysarg_t key[] = {
288 [IRQ_HT_KEY_INR] = (sysarg_t) inr,
289 [IRQ_HT_KEY_MODE] = (sysarg_t) IRQ_HT_MODE_NO_CLAIM
290 };
291
292 if ((inr < 0) || (inr > last_inr))
293 return ELIMIT;
294
295 irq_code_t *code;
296 if (ucode) {
297 code = code_from_uspace(ucode);
298 if (!code)
299 return EBADMEM;
300 } else
301 code = NULL;
302
303 /*
304 * Allocate and populate the IRQ kernel object.
305 */
306 int handle = cap_alloc(TASK);
307 if (handle < 0)
308 return handle;
309
310 irq_t *irq = (irq_t *) slab_alloc(irq_slab, FRAME_ATOMIC);
311 if (!irq) {
312 cap_free(TASK, handle);
313 return ENOMEM;
314 }
315
316 irq_initialize(irq);
317 irq->inr = inr;
318 irq->claim = ipc_irq_top_half_claim;
319 irq->handler = ipc_irq_top_half_handler;
320 irq->notif_cfg.notify = true;
321 irq->notif_cfg.answerbox = box;
322 irq->notif_cfg.imethod = imethod;
323 irq->notif_cfg.code = code;
324 irq->notif_cfg.counter = 0;
325
326 /*
327 * Insert the IRQ structure into the uspace IRQ hash table.
328 */
329 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
330 irq_spinlock_lock(&irq->lock, false);
331
332 hash_table_insert(&irq_uspace_hash_table, key, &irq->link);
333
334 irq_spinlock_unlock(&irq->lock, false);
335 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
336
337 cap_publish(TASK, handle, CAP_TYPE_IRQ, irq);
338
339 return handle;
340}
341
342/** Unsubscribe task from IRQ notification.
343 *
344 * @param box Answerbox associated with the notification.
345 * @param handle IRQ capability handle.
346 *
347 * @return EOK on success or a negative error code.
348 *
349 */
350int ipc_irq_unsubscribe(answerbox_t *box, int handle)
351{
352 cap_t *cap = cap_unpublish(TASK, handle, CAP_TYPE_IRQ);
353 if (!cap)
354 return ENOENT;
355
356 irq_t *irq = (irq_t *) cap->kobject;
357
358 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
359 irq_spinlock_lock(&irq->lock, false);
360
361 assert(irq->notif_cfg.answerbox == box);
362
363 /* Remove the IRQ from the uspace IRQ hash table. */
364 hash_table_remove_item(&irq_uspace_hash_table, &irq->link);
365
366 /* irq->lock unlocked by the hash table remove_callback */
367 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
368
369 /* Free up the IRQ code and associated structures. */
370 code_free(irq->notif_cfg.code);
371
372 /* Free up the IRQ capability and the underlying kernel object. */
373 slab_free(irq_slab, cap->kobject);
374 cap_free(TASK, handle);
375
376 return EOK;
377}
378
379/** Add a call to the proper answerbox queue.
380 *
381 * Assume irq->lock is locked and interrupts disabled.
382 *
383 * @param irq IRQ structure referencing the target answerbox.
384 * @param call IRQ notification call.
385 *
386 */
387static void send_call(irq_t *irq, call_t *call)
388{
389 irq_spinlock_lock(&irq->notif_cfg.answerbox->irq_lock, false);
390 list_append(&call->ab_link, &irq->notif_cfg.answerbox->irq_notifs);
391 irq_spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock, false);
392
393 waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST);
394}
395
396/** Apply the top-half IRQ code to find out whether to accept the IRQ or not.
397 *
398 * @param irq IRQ structure.
399 *
400 * @return IRQ_ACCEPT if the interrupt is accepted by the IRQ code.
401 * @return IRQ_DECLINE if the interrupt is not accepted byt the IRQ code.
402 *
403 */
404irq_ownership_t ipc_irq_top_half_claim(irq_t *irq)
405{
406 irq_code_t *code = irq->notif_cfg.code;
407 uint32_t *scratch = irq->notif_cfg.scratch;
408
409 if (!irq->notif_cfg.notify)
410 return IRQ_DECLINE;
411
412 if (!code)
413 return IRQ_DECLINE;
414
415 for (size_t i = 0; i < code->cmdcount; i++) {
416 uintptr_t srcarg = code->cmds[i].srcarg;
417 uintptr_t dstarg = code->cmds[i].dstarg;
418
419 switch (code->cmds[i].cmd) {
420 case CMD_PIO_READ_8:
421 scratch[dstarg] =
422 pio_read_8((ioport8_t *) code->cmds[i].addr);
423 break;
424 case CMD_PIO_READ_16:
425 scratch[dstarg] =
426 pio_read_16((ioport16_t *) code->cmds[i].addr);
427 break;
428 case CMD_PIO_READ_32:
429 scratch[dstarg] =
430 pio_read_32((ioport32_t *) code->cmds[i].addr);
431 break;
432 case CMD_PIO_WRITE_8:
433 pio_write_8((ioport8_t *) code->cmds[i].addr,
434 (uint8_t) code->cmds[i].value);
435 break;
436 case CMD_PIO_WRITE_16:
437 pio_write_16((ioport16_t *) code->cmds[i].addr,
438 (uint16_t) code->cmds[i].value);
439 break;
440 case CMD_PIO_WRITE_32:
441 pio_write_32((ioport32_t *) code->cmds[i].addr,
442 (uint32_t) code->cmds[i].value);
443 break;
444 case CMD_PIO_WRITE_A_8:
445 pio_write_8((ioport8_t *) code->cmds[i].addr,
446 (uint8_t) scratch[srcarg]);
447 break;
448 case CMD_PIO_WRITE_A_16:
449 pio_write_16((ioport16_t *) code->cmds[i].addr,
450 (uint16_t) scratch[srcarg]);
451 break;
452 case CMD_PIO_WRITE_A_32:
453 pio_write_32((ioport32_t *) code->cmds[i].addr,
454 (uint32_t) scratch[srcarg]);
455 break;
456 case CMD_LOAD:
457 scratch[dstarg] = code->cmds[i].value;
458 break;
459 case CMD_AND:
460 scratch[dstarg] = scratch[srcarg] &
461 code->cmds[i].value;
462 break;
463 case CMD_PREDICATE:
464 if (scratch[srcarg] == 0)
465 i += code->cmds[i].value;
466
467 break;
468 case CMD_ACCEPT:
469 return IRQ_ACCEPT;
470 case CMD_DECLINE:
471 default:
472 return IRQ_DECLINE;
473 }
474 }
475
476 return IRQ_DECLINE;
477}
478
479/* IRQ top-half handler.
480 *
481 * We expect interrupts to be disabled and the irq->lock already held.
482 *
483 * @param irq IRQ structure.
484 *
485 */
486void ipc_irq_top_half_handler(irq_t *irq)
487{
488 assert(irq);
489
490 assert(interrupts_disabled());
491 assert(irq_spinlock_locked(&irq->lock));
492
493 if (irq->notif_cfg.answerbox) {
494 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
495 if (!call)
496 return;
497
498 call->flags |= IPC_CALL_NOTIF;
499 /* Put a counter to the message */
500 call->priv = ++irq->notif_cfg.counter;
501
502 /* Set up args */
503 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
504 IPC_SET_ARG1(call->data, irq->notif_cfg.scratch[1]);
505 IPC_SET_ARG2(call->data, irq->notif_cfg.scratch[2]);
506 IPC_SET_ARG3(call->data, irq->notif_cfg.scratch[3]);
507 IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]);
508 IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]);
509
510 send_call(irq, call);
511 }
512}
513
514/** Send notification message.
515 *
516 * @param irq IRQ structure.
517 * @param a1 Driver-specific payload argument.
518 * @param a2 Driver-specific payload argument.
519 * @param a3 Driver-specific payload argument.
520 * @param a4 Driver-specific payload argument.
521 * @param a5 Driver-specific payload argument.
522 *
523 */
524void ipc_irq_send_msg(irq_t *irq, sysarg_t a1, sysarg_t a2, sysarg_t a3,
525 sysarg_t a4, sysarg_t a5)
526{
527 irq_spinlock_lock(&irq->lock, true);
528
529 if (irq->notif_cfg.answerbox) {
530 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
531 if (!call) {
532 irq_spinlock_unlock(&irq->lock, true);
533 return;
534 }
535
536 call->flags |= IPC_CALL_NOTIF;
537 /* Put a counter to the message */
538 call->priv = ++irq->notif_cfg.counter;
539
540 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
541 IPC_SET_ARG1(call->data, a1);
542 IPC_SET_ARG2(call->data, a2);
543 IPC_SET_ARG3(call->data, a3);
544 IPC_SET_ARG4(call->data, a4);
545 IPC_SET_ARG5(call->data, a5);
546
547 send_call(irq, call);
548 }
549
550 irq_spinlock_unlock(&irq->lock, true);
551}
552
553/** @}
554 */
Note: See TracBrowser for help on using the repository browser.