source: mainline/kernel/generic/src/ipc/irq.c@ 11b285d

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 11b285d was 11b285d, checked in by Jiří Zárevúcky <jiri.zarevucky@…>, 7 years ago

Use standard signature for malloc() in kernel.

The remaining instances of blocking allocation are replaced with
a new separate function named nfmalloc (short for non-failing malloc).

  • Property mode set to 100644
File size: 14.6 KB
Line 
1/*
2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2006 Jakub Jermar
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup genericipc
31 * @{
32 */
33
34/**
35 * @file
36 * @brief IRQ notification framework.
37 *
38 * This framework allows applications to subscribe to receive a notification
39 * when an interrupt is detected. The application may provide a simple
40 * 'top-half' handler as part of its registration, which can perform simple
41 * operations (read/write port/memory, add information to notification IPC
42 * message).
43 *
44 * The structure of a notification message is as follows:
45 * - IMETHOD: interface and method as set by the SYS_IPC_IRQ_SUBSCRIBE syscall
46 * - ARG1: payload modified by a 'top-half' handler (scratch[1])
47 * - ARG2: payload modified by a 'top-half' handler (scratch[2])
48 * - ARG3: payload modified by a 'top-half' handler (scratch[3])
49 * - ARG4: payload modified by a 'top-half' handler (scratch[4])
50 * - ARG5: payload modified by a 'top-half' handler (scratch[5])
51 * - in_phone_hash: interrupt counter (may be needed to assure correct order
52 * in multithreaded drivers)
53 */
54
55#include <arch.h>
56#include <assert.h>
57#include <mm/slab.h>
58#include <mm/page.h>
59#include <mm/km.h>
60#include <errno.h>
61#include <ddi/irq.h>
62#include <ipc/ipc.h>
63#include <ipc/irq.h>
64#include <syscall/copy.h>
65#include <console/console.h>
66#include <print.h>
67#include <macros.h>
68#include <cap/cap.h>
69
70static void ranges_unmap(irq_pio_range_t *ranges, size_t rangecount)
71{
72 for (size_t i = 0; i < rangecount; i++) {
73#ifdef IO_SPACE_BOUNDARY
74 if ((void *) ranges[i].base >= IO_SPACE_BOUNDARY)
75#endif
76 km_unmap(ranges[i].base, ranges[i].size);
77 }
78}
79
80static errno_t ranges_map_and_apply(irq_pio_range_t *ranges, size_t rangecount,
81 irq_cmd_t *cmds, size_t cmdcount)
82{
83 /* Copy the physical base addresses aside. */
84 uintptr_t *pbase = malloc(rangecount * sizeof(uintptr_t));
85 if (!pbase)
86 return ENOMEM;
87 for (size_t i = 0; i < rangecount; i++)
88 pbase[i] = ranges[i].base;
89
90 /* Map the PIO ranges into the kernel virtual address space. */
91 for (size_t i = 0; i < rangecount; i++) {
92#ifdef IO_SPACE_BOUNDARY
93 if ((void *) ranges[i].base < IO_SPACE_BOUNDARY)
94 continue;
95#endif
96 ranges[i].base = km_map(pbase[i], ranges[i].size,
97 PAGE_READ | PAGE_WRITE | PAGE_KERNEL | PAGE_NOT_CACHEABLE);
98 if (!ranges[i].base) {
99 ranges_unmap(ranges, i);
100 free(pbase);
101 return ENOMEM;
102 }
103 }
104
105 /* Rewrite the IRQ code addresses from physical to kernel virtual. */
106 for (size_t i = 0; i < cmdcount; i++) {
107 uintptr_t addr;
108 size_t size;
109
110 /* Process only commands that use an address. */
111 switch (cmds[i].cmd) {
112 case CMD_PIO_READ_8:
113 case CMD_PIO_WRITE_8:
114 case CMD_PIO_WRITE_A_8:
115 size = 1;
116 break;
117 case CMD_PIO_READ_16:
118 case CMD_PIO_WRITE_16:
119 case CMD_PIO_WRITE_A_16:
120 size = 2;
121 break;
122 case CMD_PIO_READ_32:
123 case CMD_PIO_WRITE_32:
124 case CMD_PIO_WRITE_A_32:
125 size = 4;
126 break;
127 default:
128 /* Move onto the next command. */
129 continue;
130 }
131
132 addr = (uintptr_t) cmds[i].addr;
133
134 size_t j;
135 for (j = 0; j < rangecount; j++) {
136 /* Find the matching range. */
137 if (!iswithin(pbase[j], ranges[j].size, addr, size))
138 continue;
139
140 /* Switch the command to a kernel virtual address. */
141 addr -= pbase[j];
142 addr += ranges[j].base;
143
144 cmds[i].addr = (void *) addr;
145 break;
146 }
147
148 if (j == rangecount) {
149 /*
150 * The address used in this command is outside of all
151 * defined ranges.
152 */
153 ranges_unmap(ranges, rangecount);
154 free(pbase);
155 return EINVAL;
156 }
157 }
158
159 free(pbase);
160 return EOK;
161}
162
163/** Statically check the top-half IRQ code
164 *
165 * Check the top-half IRQ code for invalid or unsafe constructs.
166 *
167 */
168static errno_t code_check(irq_cmd_t *cmds, size_t cmdcount)
169{
170 for (size_t i = 0; i < cmdcount; i++) {
171 /*
172 * Check for accepted ranges.
173 */
174 if (cmds[i].cmd >= CMD_LAST)
175 return EINVAL;
176
177 if (cmds[i].srcarg >= IPC_CALL_LEN)
178 return EINVAL;
179
180 if (cmds[i].dstarg >= IPC_CALL_LEN)
181 return EINVAL;
182
183 switch (cmds[i].cmd) {
184 case CMD_PREDICATE:
185 /*
186 * Check for control flow overflow.
187 * Note that jumping just beyond the last
188 * command is a correct behaviour.
189 */
190 if (i + cmds[i].value > cmdcount)
191 return EINVAL;
192
193 break;
194 default:
195 break;
196 }
197 }
198
199 return EOK;
200}
201
202/** Free the top-half IRQ code.
203 *
204 * @param code Pointer to the top-half IRQ code.
205 *
206 */
207static void code_free(irq_code_t *code)
208{
209 if (code) {
210 ranges_unmap(code->ranges, code->rangecount);
211 free(code->ranges);
212 free(code->cmds);
213 free(code);
214 }
215}
216
217/** Copy the top-half IRQ code from userspace into the kernel.
218 *
219 * @param ucode Userspace address of the top-half IRQ code.
220 *
221 * @return Kernel address of the copied IRQ code.
222 *
223 */
224static irq_code_t *code_from_uspace(irq_code_t *ucode)
225{
226 irq_pio_range_t *ranges = NULL;
227 irq_cmd_t *cmds = NULL;
228
229 irq_code_t *code = malloc(sizeof(*code));
230 if (!code)
231 return NULL;
232 errno_t rc = copy_from_uspace(code, ucode, sizeof(*code));
233 if (rc != EOK)
234 goto error;
235
236 if ((code->rangecount > IRQ_MAX_RANGE_COUNT) ||
237 (code->cmdcount > IRQ_MAX_PROG_SIZE))
238 goto error;
239
240 ranges = malloc(sizeof(code->ranges[0]) * code->rangecount);
241 if (!ranges)
242 goto error;
243 rc = copy_from_uspace(ranges, code->ranges,
244 sizeof(code->ranges[0]) * code->rangecount);
245 if (rc != EOK)
246 goto error;
247
248 cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount);
249 if (!cmds)
250 goto error;
251 rc = copy_from_uspace(cmds, code->cmds,
252 sizeof(code->cmds[0]) * code->cmdcount);
253 if (rc != EOK)
254 goto error;
255
256 rc = code_check(cmds, code->cmdcount);
257 if (rc != EOK)
258 goto error;
259
260 rc = ranges_map_and_apply(ranges, code->rangecount, cmds,
261 code->cmdcount);
262 if (rc != EOK)
263 goto error;
264
265 code->ranges = ranges;
266 code->cmds = cmds;
267
268 return code;
269
270error:
271 if (cmds)
272 free(cmds);
273
274 if (ranges)
275 free(ranges);
276
277 free(code);
278 return NULL;
279}
280
281static void irq_hash_out(irq_t *irq)
282{
283 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
284 irq_spinlock_lock(&irq->lock, false);
285
286 if (irq->notif_cfg.hashed_in) {
287 /* Remove the IRQ from the uspace IRQ hash table. */
288 hash_table_remove_item(&irq_uspace_hash_table, &irq->link);
289 irq->notif_cfg.hashed_in = false;
290 }
291
292 irq_spinlock_unlock(&irq->lock, false);
293 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
294}
295
296static void irq_destroy(void *arg)
297{
298 irq_t *irq = (irq_t *) arg;
299
300 irq_hash_out(irq);
301
302 /* Free up the IRQ code and associated structures. */
303 code_free(irq->notif_cfg.code);
304 slab_free(irq_cache, irq);
305}
306
307static kobject_ops_t irq_kobject_ops = {
308 .destroy = irq_destroy
309};
310
311/** Subscribe an answerbox as a receiving end for IRQ notifications.
312 *
313 * @param box Receiving answerbox.
314 * @param inr IRQ number.
315 * @param imethod Interface and method to be associated with the notification.
316 * @param ucode Uspace pointer to top-half IRQ code.
317 *
318 * @param[out] uspace_handle Uspace pointer to IRQ capability handle
319 *
320 * @return Error code.
321 *
322 */
323errno_t ipc_irq_subscribe(answerbox_t *box, inr_t inr, sysarg_t imethod,
324 irq_code_t *ucode, cap_irq_handle_t *uspace_handle)
325{
326 if ((inr < 0) || (inr > last_inr))
327 return ELIMIT;
328
329 irq_code_t *code;
330 if (ucode) {
331 code = code_from_uspace(ucode);
332 if (!code)
333 return EBADMEM;
334 } else
335 code = NULL;
336
337 /*
338 * Allocate and populate the IRQ kernel object.
339 */
340 cap_handle_t handle;
341 errno_t rc = cap_alloc(TASK, &handle);
342 if (rc != EOK)
343 return rc;
344
345 rc = copy_to_uspace(uspace_handle, &handle, sizeof(cap_handle_t));
346 if (rc != EOK) {
347 cap_free(TASK, handle);
348 return rc;
349 }
350
351 irq_t *irq = (irq_t *) slab_alloc(irq_cache, FRAME_ATOMIC);
352 if (!irq) {
353 cap_free(TASK, handle);
354 return ENOMEM;
355 }
356
357 kobject_t *kobject = malloc(sizeof(kobject_t));
358 if (!kobject) {
359 cap_free(TASK, handle);
360 slab_free(irq_cache, irq);
361 return ENOMEM;
362 }
363
364 irq_initialize(irq);
365 irq->inr = inr;
366 irq->claim = ipc_irq_top_half_claim;
367 irq->handler = ipc_irq_top_half_handler;
368 irq->notif_cfg.notify = true;
369 irq->notif_cfg.answerbox = box;
370 irq->notif_cfg.imethod = imethod;
371 irq->notif_cfg.code = code;
372 irq->notif_cfg.counter = 0;
373
374 /*
375 * Insert the IRQ structure into the uspace IRQ hash table.
376 */
377 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
378 irq_spinlock_lock(&irq->lock, false);
379
380 irq->notif_cfg.hashed_in = true;
381 hash_table_insert(&irq_uspace_hash_table, &irq->link);
382
383 irq_spinlock_unlock(&irq->lock, false);
384 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
385
386 kobject_initialize(kobject, KOBJECT_TYPE_IRQ, irq, &irq_kobject_ops);
387 cap_publish(TASK, handle, kobject);
388
389 return EOK;
390}
391
392/** Unsubscribe task from IRQ notification.
393 *
394 * @param box Answerbox associated with the notification.
395 * @param handle IRQ capability handle.
396 *
397 * @return EOK on success or an error code.
398 *
399 */
400errno_t ipc_irq_unsubscribe(answerbox_t *box, cap_irq_handle_t handle)
401{
402 kobject_t *kobj = cap_unpublish(TASK, handle, KOBJECT_TYPE_IRQ);
403 if (!kobj)
404 return ENOENT;
405
406 assert(kobj->irq->notif_cfg.answerbox == box);
407
408 irq_hash_out(kobj->irq);
409
410 kobject_put(kobj);
411 cap_free(TASK, handle);
412
413 return EOK;
414}
415
416/** Add a call to the proper answerbox queue.
417 *
418 * Assume irq->lock is locked and interrupts disabled.
419 *
420 * @param irq IRQ structure referencing the target answerbox.
421 * @param call IRQ notification call.
422 *
423 */
424static void send_call(irq_t *irq, call_t *call)
425{
426 irq_spinlock_lock(&irq->notif_cfg.answerbox->irq_lock, false);
427 list_append(&call->ab_link, &irq->notif_cfg.answerbox->irq_notifs);
428 irq_spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock, false);
429
430 waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST);
431}
432
433/** Apply the top-half IRQ code to find out whether to accept the IRQ or not.
434 *
435 * @param irq IRQ structure.
436 *
437 * @return IRQ_ACCEPT if the interrupt is accepted by the IRQ code.
438 * @return IRQ_DECLINE if the interrupt is not accepted byt the IRQ code.
439 *
440 */
441irq_ownership_t ipc_irq_top_half_claim(irq_t *irq)
442{
443 irq_code_t *code = irq->notif_cfg.code;
444 uint32_t *scratch = irq->notif_cfg.scratch;
445
446 if (!irq->notif_cfg.notify)
447 return IRQ_DECLINE;
448
449 if (!code)
450 return IRQ_DECLINE;
451
452 for (size_t i = 0; i < code->cmdcount; i++) {
453 uintptr_t srcarg = code->cmds[i].srcarg;
454 uintptr_t dstarg = code->cmds[i].dstarg;
455
456 switch (code->cmds[i].cmd) {
457 case CMD_PIO_READ_8:
458 scratch[dstarg] =
459 pio_read_8((ioport8_t *) code->cmds[i].addr);
460 break;
461 case CMD_PIO_READ_16:
462 scratch[dstarg] =
463 pio_read_16((ioport16_t *) code->cmds[i].addr);
464 break;
465 case CMD_PIO_READ_32:
466 scratch[dstarg] =
467 pio_read_32((ioport32_t *) code->cmds[i].addr);
468 break;
469 case CMD_PIO_WRITE_8:
470 pio_write_8((ioport8_t *) code->cmds[i].addr,
471 (uint8_t) code->cmds[i].value);
472 break;
473 case CMD_PIO_WRITE_16:
474 pio_write_16((ioport16_t *) code->cmds[i].addr,
475 (uint16_t) code->cmds[i].value);
476 break;
477 case CMD_PIO_WRITE_32:
478 pio_write_32((ioport32_t *) code->cmds[i].addr,
479 (uint32_t) code->cmds[i].value);
480 break;
481 case CMD_PIO_WRITE_A_8:
482 pio_write_8((ioport8_t *) code->cmds[i].addr,
483 (uint8_t) scratch[srcarg]);
484 break;
485 case CMD_PIO_WRITE_A_16:
486 pio_write_16((ioport16_t *) code->cmds[i].addr,
487 (uint16_t) scratch[srcarg]);
488 break;
489 case CMD_PIO_WRITE_A_32:
490 pio_write_32((ioport32_t *) code->cmds[i].addr,
491 (uint32_t) scratch[srcarg]);
492 break;
493 case CMD_LOAD:
494 scratch[dstarg] = code->cmds[i].value;
495 break;
496 case CMD_AND:
497 scratch[dstarg] = scratch[srcarg] &
498 code->cmds[i].value;
499 break;
500 case CMD_PREDICATE:
501 if (scratch[srcarg] == 0)
502 i += code->cmds[i].value;
503
504 break;
505 case CMD_ACCEPT:
506 return IRQ_ACCEPT;
507 case CMD_DECLINE:
508 default:
509 return IRQ_DECLINE;
510 }
511 }
512
513 return IRQ_DECLINE;
514}
515
516/* IRQ top-half handler.
517 *
518 * We expect interrupts to be disabled and the irq->lock already held.
519 *
520 * @param irq IRQ structure.
521 *
522 */
523void ipc_irq_top_half_handler(irq_t *irq)
524{
525 assert(irq);
526
527 assert(interrupts_disabled());
528 assert(irq_spinlock_locked(&irq->lock));
529
530 if (irq->notif_cfg.answerbox) {
531 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
532 if (!call)
533 return;
534
535 call->flags |= IPC_CALL_NOTIF;
536 /* Put a counter to the message */
537 call->priv = ++irq->notif_cfg.counter;
538
539 /* Set up args */
540 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
541 IPC_SET_ARG1(call->data, irq->notif_cfg.scratch[1]);
542 IPC_SET_ARG2(call->data, irq->notif_cfg.scratch[2]);
543 IPC_SET_ARG3(call->data, irq->notif_cfg.scratch[3]);
544 IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]);
545 IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]);
546
547 send_call(irq, call);
548 }
549}
550
551/** Send notification message.
552 *
553 * @param irq IRQ structure.
554 * @param a1 Driver-specific payload argument.
555 * @param a2 Driver-specific payload argument.
556 * @param a3 Driver-specific payload argument.
557 * @param a4 Driver-specific payload argument.
558 * @param a5 Driver-specific payload argument.
559 *
560 */
561void ipc_irq_send_msg(irq_t *irq, sysarg_t a1, sysarg_t a2, sysarg_t a3,
562 sysarg_t a4, sysarg_t a5)
563{
564 irq_spinlock_lock(&irq->lock, true);
565
566 if (irq->notif_cfg.answerbox) {
567 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
568 if (!call) {
569 irq_spinlock_unlock(&irq->lock, true);
570 return;
571 }
572
573 call->flags |= IPC_CALL_NOTIF;
574 /* Put a counter to the message */
575 call->priv = ++irq->notif_cfg.counter;
576
577 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
578 IPC_SET_ARG1(call->data, a1);
579 IPC_SET_ARG2(call->data, a2);
580 IPC_SET_ARG3(call->data, a3);
581 IPC_SET_ARG4(call->data, a4);
582 IPC_SET_ARG5(call->data, a5);
583
584 send_call(irq, call);
585 }
586
587 irq_spinlock_unlock(&irq->lock, true);
588}
589
590/** @}
591 */
Note: See TracBrowser for help on using the repository browser.