source: mainline/kernel/generic/src/ipc/irq.c@ a35b458

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since a35b458 was a35b458, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

style: Remove trailing whitespace on _all_ lines, including empty ones, for particular file types.

Command used: tools/srepl '\s\+$' '' -- *.c *.h *.py *.sh *.s *.S *.ag

Currently, whitespace on empty lines is very inconsistent.
There are two basic choices: Either remove the whitespace, or keep empty lines
indented to the level of surrounding code. The former is AFAICT more common,
and also much easier to do automatically.

Alternatively, we could write script for automatic indentation, and use that
instead. However, if such a script exists, it's possible to use the indented
style locally, by having the editor apply relevant conversions on load/save,
without affecting remote repository. IMO, it makes more sense to adopt
the simpler rule.

  • Property mode set to 100644
File size: 14.5 KB
Line 
1/*
2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2006 Jakub Jermar
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup genericipc
31 * @{
32 */
33
34/**
35 * @file
36 * @brief IRQ notification framework.
37 *
38 * This framework allows applications to subscribe to receive a notification
39 * when an interrupt is detected. The application may provide a simple
40 * 'top-half' handler as part of its registration, which can perform simple
41 * operations (read/write port/memory, add information to notification IPC
42 * message).
43 *
44 * The structure of a notification message is as follows:
45 * - IMETHOD: interface and method as set by the SYS_IPC_IRQ_SUBSCRIBE syscall
46 * - ARG1: payload modified by a 'top-half' handler (scratch[1])
47 * - ARG2: payload modified by a 'top-half' handler (scratch[2])
48 * - ARG3: payload modified by a 'top-half' handler (scratch[3])
49 * - ARG4: payload modified by a 'top-half' handler (scratch[4])
50 * - ARG5: payload modified by a 'top-half' handler (scratch[5])
51 * - in_phone_hash: interrupt counter (may be needed to assure correct order
52 * in multithreaded drivers)
53 */
54
55#include <arch.h>
56#include <assert.h>
57#include <mm/slab.h>
58#include <mm/page.h>
59#include <mm/km.h>
60#include <errno.h>
61#include <ddi/irq.h>
62#include <ipc/ipc.h>
63#include <ipc/irq.h>
64#include <syscall/copy.h>
65#include <console/console.h>
66#include <print.h>
67#include <macros.h>
68#include <cap/cap.h>
69
70static void ranges_unmap(irq_pio_range_t *ranges, size_t rangecount)
71{
72 for (size_t i = 0; i < rangecount; i++) {
73#ifdef IO_SPACE_BOUNDARY
74 if ((void *) ranges[i].base >= IO_SPACE_BOUNDARY)
75#endif
76 km_unmap(ranges[i].base, ranges[i].size);
77 }
78}
79
80static errno_t ranges_map_and_apply(irq_pio_range_t *ranges, size_t rangecount,
81 irq_cmd_t *cmds, size_t cmdcount)
82{
83 /* Copy the physical base addresses aside. */
84 uintptr_t *pbase = malloc(rangecount * sizeof(uintptr_t), 0);
85 for (size_t i = 0; i < rangecount; i++)
86 pbase[i] = ranges[i].base;
87
88 /* Map the PIO ranges into the kernel virtual address space. */
89 for (size_t i = 0; i < rangecount; i++) {
90#ifdef IO_SPACE_BOUNDARY
91 if ((void *) ranges[i].base < IO_SPACE_BOUNDARY)
92 continue;
93#endif
94 ranges[i].base = km_map(pbase[i], ranges[i].size,
95 PAGE_READ | PAGE_WRITE | PAGE_KERNEL | PAGE_NOT_CACHEABLE);
96 if (!ranges[i].base) {
97 ranges_unmap(ranges, i);
98 free(pbase);
99 return ENOMEM;
100 }
101 }
102
103 /* Rewrite the IRQ code addresses from physical to kernel virtual. */
104 for (size_t i = 0; i < cmdcount; i++) {
105 uintptr_t addr;
106 size_t size;
107
108 /* Process only commands that use an address. */
109 switch (cmds[i].cmd) {
110 case CMD_PIO_READ_8:
111 case CMD_PIO_WRITE_8:
112 case CMD_PIO_WRITE_A_8:
113 size = 1;
114 break;
115 case CMD_PIO_READ_16:
116 case CMD_PIO_WRITE_16:
117 case CMD_PIO_WRITE_A_16:
118 size = 2;
119 break;
120 case CMD_PIO_READ_32:
121 case CMD_PIO_WRITE_32:
122 case CMD_PIO_WRITE_A_32:
123 size = 4;
124 break;
125 default:
126 /* Move onto the next command. */
127 continue;
128 }
129
130 addr = (uintptr_t) cmds[i].addr;
131
132 size_t j;
133 for (j = 0; j < rangecount; j++) {
134 /* Find the matching range. */
135 if (!iswithin(pbase[j], ranges[j].size, addr, size))
136 continue;
137
138 /* Switch the command to a kernel virtual address. */
139 addr -= pbase[j];
140 addr += ranges[j].base;
141
142 cmds[i].addr = (void *) addr;
143 break;
144 }
145
146 if (j == rangecount) {
147 /*
148 * The address used in this command is outside of all
149 * defined ranges.
150 */
151 ranges_unmap(ranges, rangecount);
152 free(pbase);
153 return EINVAL;
154 }
155 }
156
157 free(pbase);
158 return EOK;
159}
160
161/** Statically check the top-half IRQ code
162 *
163 * Check the top-half IRQ code for invalid or unsafe constructs.
164 *
165 */
166static errno_t code_check(irq_cmd_t *cmds, size_t cmdcount)
167{
168 for (size_t i = 0; i < cmdcount; i++) {
169 /*
170 * Check for accepted ranges.
171 */
172 if (cmds[i].cmd >= CMD_LAST)
173 return EINVAL;
174
175 if (cmds[i].srcarg >= IPC_CALL_LEN)
176 return EINVAL;
177
178 if (cmds[i].dstarg >= IPC_CALL_LEN)
179 return EINVAL;
180
181 switch (cmds[i].cmd) {
182 case CMD_PREDICATE:
183 /*
184 * Check for control flow overflow.
185 * Note that jumping just beyond the last
186 * command is a correct behaviour.
187 */
188 if (i + cmds[i].value > cmdcount)
189 return EINVAL;
190
191 break;
192 default:
193 break;
194 }
195 }
196
197 return EOK;
198}
199
200/** Free the top-half IRQ code.
201 *
202 * @param code Pointer to the top-half IRQ code.
203 *
204 */
205static void code_free(irq_code_t *code)
206{
207 if (code) {
208 ranges_unmap(code->ranges, code->rangecount);
209 free(code->ranges);
210 free(code->cmds);
211 free(code);
212 }
213}
214
215/** Copy the top-half IRQ code from userspace into the kernel.
216 *
217 * @param ucode Userspace address of the top-half IRQ code.
218 *
219 * @return Kernel address of the copied IRQ code.
220 *
221 */
222static irq_code_t *code_from_uspace(irq_code_t *ucode)
223{
224 irq_pio_range_t *ranges = NULL;
225 irq_cmd_t *cmds = NULL;
226
227 irq_code_t *code = malloc(sizeof(*code), 0);
228 errno_t rc = copy_from_uspace(code, ucode, sizeof(*code));
229 if (rc != EOK)
230 goto error;
231
232 if ((code->rangecount > IRQ_MAX_RANGE_COUNT) ||
233 (code->cmdcount > IRQ_MAX_PROG_SIZE))
234 goto error;
235
236 ranges = malloc(sizeof(code->ranges[0]) * code->rangecount, 0);
237 rc = copy_from_uspace(ranges, code->ranges,
238 sizeof(code->ranges[0]) * code->rangecount);
239 if (rc != EOK)
240 goto error;
241
242 cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0);
243 rc = copy_from_uspace(cmds, code->cmds,
244 sizeof(code->cmds[0]) * code->cmdcount);
245 if (rc != EOK)
246 goto error;
247
248 rc = code_check(cmds, code->cmdcount);
249 if (rc != EOK)
250 goto error;
251
252 rc = ranges_map_and_apply(ranges, code->rangecount, cmds,
253 code->cmdcount);
254 if (rc != EOK)
255 goto error;
256
257 code->ranges = ranges;
258 code->cmds = cmds;
259
260 return code;
261
262error:
263 if (cmds)
264 free(cmds);
265
266 if (ranges)
267 free(ranges);
268
269 free(code);
270 return NULL;
271}
272
273static void irq_hash_out(irq_t *irq)
274{
275 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
276 irq_spinlock_lock(&irq->lock, false);
277
278 if (irq->notif_cfg.hashed_in) {
279 /* Remove the IRQ from the uspace IRQ hash table. */
280 hash_table_remove_item(&irq_uspace_hash_table, &irq->link);
281 irq->notif_cfg.hashed_in = false;
282 }
283
284 irq_spinlock_unlock(&irq->lock, false);
285 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
286}
287
288static void irq_destroy(void *arg)
289{
290 irq_t *irq = (irq_t *) arg;
291
292 irq_hash_out(irq);
293
294 /* Free up the IRQ code and associated structures. */
295 code_free(irq->notif_cfg.code);
296 slab_free(irq_cache, irq);
297}
298
299static kobject_ops_t irq_kobject_ops = {
300 .destroy = irq_destroy
301};
302
303/** Subscribe an answerbox as a receiving end for IRQ notifications.
304 *
305 * @param box Receiving answerbox.
306 * @param inr IRQ number.
307 * @param imethod Interface and method to be associated with the notification.
308 * @param ucode Uspace pointer to top-half IRQ code.
309 *
310 * @param[out] uspace_handle Uspace pointer to IRQ capability handle
311 *
312 * @return Error code.
313 *
314 */
315errno_t ipc_irq_subscribe(answerbox_t *box, inr_t inr, sysarg_t imethod,
316 irq_code_t *ucode, cap_handle_t *uspace_handle)
317{
318 if ((inr < 0) || (inr > last_inr))
319 return ELIMIT;
320
321 irq_code_t *code;
322 if (ucode) {
323 code = code_from_uspace(ucode);
324 if (!code)
325 return EBADMEM;
326 } else
327 code = NULL;
328
329 /*
330 * Allocate and populate the IRQ kernel object.
331 */
332 cap_handle_t handle;
333 errno_t rc = cap_alloc(TASK, &handle);
334 if (rc != EOK)
335 return rc;
336
337 rc = copy_to_uspace(uspace_handle, &handle, sizeof(cap_handle_t));
338 if (rc != EOK) {
339 cap_free(TASK, handle);
340 return rc;
341 }
342
343 irq_t *irq = (irq_t *) slab_alloc(irq_cache, FRAME_ATOMIC);
344 if (!irq) {
345 cap_free(TASK, handle);
346 return ENOMEM;
347 }
348
349 kobject_t *kobject = malloc(sizeof(kobject_t), FRAME_ATOMIC);
350 if (!kobject) {
351 cap_free(TASK, handle);
352 slab_free(irq_cache, irq);
353 return ENOMEM;
354 }
355
356 irq_initialize(irq);
357 irq->inr = inr;
358 irq->claim = ipc_irq_top_half_claim;
359 irq->handler = ipc_irq_top_half_handler;
360 irq->notif_cfg.notify = true;
361 irq->notif_cfg.answerbox = box;
362 irq->notif_cfg.imethod = imethod;
363 irq->notif_cfg.code = code;
364 irq->notif_cfg.counter = 0;
365
366 /*
367 * Insert the IRQ structure into the uspace IRQ hash table.
368 */
369 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
370 irq_spinlock_lock(&irq->lock, false);
371
372 irq->notif_cfg.hashed_in = true;
373 hash_table_insert(&irq_uspace_hash_table, &irq->link);
374
375 irq_spinlock_unlock(&irq->lock, false);
376 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
377
378 kobject_initialize(kobject, KOBJECT_TYPE_IRQ, irq, &irq_kobject_ops);
379 cap_publish(TASK, handle, kobject);
380
381 return EOK;
382}
383
384/** Unsubscribe task from IRQ notification.
385 *
386 * @param box Answerbox associated with the notification.
387 * @param handle IRQ capability handle.
388 *
389 * @return EOK on success or an error code.
390 *
391 */
392errno_t ipc_irq_unsubscribe(answerbox_t *box, int handle)
393{
394 kobject_t *kobj = cap_unpublish(TASK, handle, KOBJECT_TYPE_IRQ);
395 if (!kobj)
396 return ENOENT;
397
398 assert(kobj->irq->notif_cfg.answerbox == box);
399
400 irq_hash_out(kobj->irq);
401
402 kobject_put(kobj);
403 cap_free(TASK, handle);
404
405 return EOK;
406}
407
408/** Add a call to the proper answerbox queue.
409 *
410 * Assume irq->lock is locked and interrupts disabled.
411 *
412 * @param irq IRQ structure referencing the target answerbox.
413 * @param call IRQ notification call.
414 *
415 */
416static void send_call(irq_t *irq, call_t *call)
417{
418 irq_spinlock_lock(&irq->notif_cfg.answerbox->irq_lock, false);
419 list_append(&call->ab_link, &irq->notif_cfg.answerbox->irq_notifs);
420 irq_spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock, false);
421
422 waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST);
423}
424
425/** Apply the top-half IRQ code to find out whether to accept the IRQ or not.
426 *
427 * @param irq IRQ structure.
428 *
429 * @return IRQ_ACCEPT if the interrupt is accepted by the IRQ code.
430 * @return IRQ_DECLINE if the interrupt is not accepted byt the IRQ code.
431 *
432 */
433irq_ownership_t ipc_irq_top_half_claim(irq_t *irq)
434{
435 irq_code_t *code = irq->notif_cfg.code;
436 uint32_t *scratch = irq->notif_cfg.scratch;
437
438 if (!irq->notif_cfg.notify)
439 return IRQ_DECLINE;
440
441 if (!code)
442 return IRQ_DECLINE;
443
444 for (size_t i = 0; i < code->cmdcount; i++) {
445 uintptr_t srcarg = code->cmds[i].srcarg;
446 uintptr_t dstarg = code->cmds[i].dstarg;
447
448 switch (code->cmds[i].cmd) {
449 case CMD_PIO_READ_8:
450 scratch[dstarg] =
451 pio_read_8((ioport8_t *) code->cmds[i].addr);
452 break;
453 case CMD_PIO_READ_16:
454 scratch[dstarg] =
455 pio_read_16((ioport16_t *) code->cmds[i].addr);
456 break;
457 case CMD_PIO_READ_32:
458 scratch[dstarg] =
459 pio_read_32((ioport32_t *) code->cmds[i].addr);
460 break;
461 case CMD_PIO_WRITE_8:
462 pio_write_8((ioport8_t *) code->cmds[i].addr,
463 (uint8_t) code->cmds[i].value);
464 break;
465 case CMD_PIO_WRITE_16:
466 pio_write_16((ioport16_t *) code->cmds[i].addr,
467 (uint16_t) code->cmds[i].value);
468 break;
469 case CMD_PIO_WRITE_32:
470 pio_write_32((ioport32_t *) code->cmds[i].addr,
471 (uint32_t) code->cmds[i].value);
472 break;
473 case CMD_PIO_WRITE_A_8:
474 pio_write_8((ioport8_t *) code->cmds[i].addr,
475 (uint8_t) scratch[srcarg]);
476 break;
477 case CMD_PIO_WRITE_A_16:
478 pio_write_16((ioport16_t *) code->cmds[i].addr,
479 (uint16_t) scratch[srcarg]);
480 break;
481 case CMD_PIO_WRITE_A_32:
482 pio_write_32((ioport32_t *) code->cmds[i].addr,
483 (uint32_t) scratch[srcarg]);
484 break;
485 case CMD_LOAD:
486 scratch[dstarg] = code->cmds[i].value;
487 break;
488 case CMD_AND:
489 scratch[dstarg] = scratch[srcarg] &
490 code->cmds[i].value;
491 break;
492 case CMD_PREDICATE:
493 if (scratch[srcarg] == 0)
494 i += code->cmds[i].value;
495
496 break;
497 case CMD_ACCEPT:
498 return IRQ_ACCEPT;
499 case CMD_DECLINE:
500 default:
501 return IRQ_DECLINE;
502 }
503 }
504
505 return IRQ_DECLINE;
506}
507
508/* IRQ top-half handler.
509 *
510 * We expect interrupts to be disabled and the irq->lock already held.
511 *
512 * @param irq IRQ structure.
513 *
514 */
515void ipc_irq_top_half_handler(irq_t *irq)
516{
517 assert(irq);
518
519 assert(interrupts_disabled());
520 assert(irq_spinlock_locked(&irq->lock));
521
522 if (irq->notif_cfg.answerbox) {
523 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
524 if (!call)
525 return;
526
527 call->flags |= IPC_CALL_NOTIF;
528 /* Put a counter to the message */
529 call->priv = ++irq->notif_cfg.counter;
530
531 /* Set up args */
532 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
533 IPC_SET_ARG1(call->data, irq->notif_cfg.scratch[1]);
534 IPC_SET_ARG2(call->data, irq->notif_cfg.scratch[2]);
535 IPC_SET_ARG3(call->data, irq->notif_cfg.scratch[3]);
536 IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]);
537 IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]);
538
539 send_call(irq, call);
540 }
541}
542
543/** Send notification message.
544 *
545 * @param irq IRQ structure.
546 * @param a1 Driver-specific payload argument.
547 * @param a2 Driver-specific payload argument.
548 * @param a3 Driver-specific payload argument.
549 * @param a4 Driver-specific payload argument.
550 * @param a5 Driver-specific payload argument.
551 *
552 */
553void ipc_irq_send_msg(irq_t *irq, sysarg_t a1, sysarg_t a2, sysarg_t a3,
554 sysarg_t a4, sysarg_t a5)
555{
556 irq_spinlock_lock(&irq->lock, true);
557
558 if (irq->notif_cfg.answerbox) {
559 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
560 if (!call) {
561 irq_spinlock_unlock(&irq->lock, true);
562 return;
563 }
564
565 call->flags |= IPC_CALL_NOTIF;
566 /* Put a counter to the message */
567 call->priv = ++irq->notif_cfg.counter;
568
569 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
570 IPC_SET_ARG1(call->data, a1);
571 IPC_SET_ARG2(call->data, a2);
572 IPC_SET_ARG3(call->data, a3);
573 IPC_SET_ARG4(call->data, a4);
574 IPC_SET_ARG5(call->data, a5);
575
576 send_call(irq, call);
577 }
578
579 irq_spinlock_unlock(&irq->lock, true);
580}
581
582/** @}
583 */
Note: See TracBrowser for help on using the repository browser.