source: mainline/kernel/generic/src/ipc/irq.c@ ad12b5ea

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since ad12b5ea was 55b77d9, checked in by Jiri Svoboda <jiri@…>, 14 years ago

Separate list_t typedef from link_t (kernel part).

  • list_t represents lists
  • Use list_first(), list_last(), list_empty() where appropriate
  • Use list_foreach() where possible
  • Replace improper uses of list_prepend() with list_insert_after()
  • Replace improper uses of list_append() with list_insert_before()
  • Property mode set to 100644
File size: 15.9 KB
Line 
1/*
2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2006 Jakub Jermar
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup genericipc
31 * @{
32 */
33
34/**
35 * @file
36 * @brief IRQ notification framework.
37 *
38 * This framework allows applications to register to receive a notification
39 * when interrupt is detected. The application may provide a simple 'top-half'
40 * handler as part of its registration, which can perform simple operations
41 * (read/write port/memory, add information to notification ipc message).
42 *
43 * The structure of a notification message is as follows:
44 * - IMETHOD: interface and method as registered by the SYS_REGISTER_IRQ
45 * syscall
46 * - ARG1: payload modified by a 'top-half' handler
47 * - ARG2: payload modified by a 'top-half' handler
48 * - ARG3: payload modified by a 'top-half' handler
49 * - ARG4: payload modified by a 'top-half' handler
50 * - ARG5: payload modified by a 'top-half' handler
51 * - in_phone_hash: interrupt counter (may be needed to assure correct order
52 * in multithreaded drivers)
53 *
54 * Note on synchronization for ipc_irq_register(), ipc_irq_unregister(),
55 * ipc_irq_cleanup() and IRQ handlers:
56 *
57 * By always taking all of the uspace IRQ hash table lock, IRQ structure lock
58 * and answerbox lock, we can rule out race conditions between the
59 * registration functions and also the cleanup function. Thus the observer can
60 * either see the IRQ structure present in both the hash table and the
61 * answerbox list or absent in both. Views in which the IRQ structure would be
62 * linked in the hash table but not in the answerbox list, or vice versa, are
63 * not possible.
64 *
65 * By always taking the hash table lock and the IRQ structure lock, we can
66 * rule out a scenario in which we would free up an IRQ structure, which is
67 * still referenced by, for example, an IRQ handler. The locking scheme forces
68 * us to lock the IRQ structure only after any progressing IRQs on that
69 * structure are finished. Because we hold the hash table lock, we prevent new
70 * IRQs from taking new references to the IRQ structure.
71 *
72 */
73
74#include <arch.h>
75#include <mm/slab.h>
76#include <errno.h>
77#include <ddi/irq.h>
78#include <ipc/ipc.h>
79#include <ipc/irq.h>
80#include <syscall/copy.h>
81#include <console/console.h>
82#include <print.h>
83
84/** Free the top-half pseudocode.
85 *
86 * @param code Pointer to the top-half pseudocode.
87 *
88 */
89static void code_free(irq_code_t *code)
90{
91 if (code) {
92 free(code->cmds);
93 free(code);
94 }
95}
96
97/** Copy the top-half pseudocode from userspace into the kernel.
98 *
99 * @param ucode Userspace address of the top-half pseudocode.
100 *
101 * @return Kernel address of the copied pseudocode.
102 *
103 */
104static irq_code_t *code_from_uspace(irq_code_t *ucode)
105{
106 irq_code_t *code = malloc(sizeof(*code), 0);
107 int rc = copy_from_uspace(code, ucode, sizeof(*code));
108 if (rc != 0) {
109 free(code);
110 return NULL;
111 }
112
113 if (code->cmdcount > IRQ_MAX_PROG_SIZE) {
114 free(code);
115 return NULL;
116 }
117
118 irq_cmd_t *ucmds = code->cmds;
119 code->cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0);
120 rc = copy_from_uspace(code->cmds, ucmds,
121 sizeof(code->cmds[0]) * code->cmdcount);
122 if (rc != 0) {
123 free(code->cmds);
124 free(code);
125 return NULL;
126 }
127
128 return code;
129}
130
131/** Register an answerbox as a receiving end for IRQ notifications.
132 *
133 * @param box Receiving answerbox.
134 * @param inr IRQ number.
135 * @param devno Device number.
136 * @param imethod Interface and method to be associated with the
137 * notification.
138 * @param ucode Uspace pointer to top-half pseudocode.
139 * @return EOK on success or a negative error code.
140 *
141 */
142int ipc_irq_register(answerbox_t *box, inr_t inr, devno_t devno,
143 sysarg_t imethod, irq_code_t *ucode)
144{
145 sysarg_t key[] = {
146 (sysarg_t) inr,
147 (sysarg_t) devno
148 };
149
150 if ((inr < 0) || (inr > last_inr))
151 return ELIMIT;
152
153 irq_code_t *code;
154 if (ucode) {
155 code = code_from_uspace(ucode);
156 if (!code)
157 return EBADMEM;
158 } else
159 code = NULL;
160
161 /*
162 * Allocate and populate the IRQ structure.
163 */
164 irq_t *irq = malloc(sizeof(irq_t), 0);
165
166 irq_initialize(irq);
167 irq->devno = devno;
168 irq->inr = inr;
169 irq->claim = ipc_irq_top_half_claim;
170 irq->handler = ipc_irq_top_half_handler;
171 irq->notif_cfg.notify = true;
172 irq->notif_cfg.answerbox = box;
173 irq->notif_cfg.imethod = imethod;
174 irq->notif_cfg.code = code;
175 irq->notif_cfg.counter = 0;
176 irq->driver_as = AS;
177
178 /*
179 * Enlist the IRQ structure in the uspace IRQ hash table and the
180 * answerbox's list.
181 */
182 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
183
184 link_t *hlp = hash_table_find(&irq_uspace_hash_table, key);
185 if (hlp) {
186 irq_t *hirq = hash_table_get_instance(hlp, irq_t, link);
187
188 /* hirq is locked */
189 irq_spinlock_unlock(&hirq->lock, false);
190 code_free(code);
191 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
192
193 free(irq);
194 return EEXISTS;
195 }
196
197 /* Locking is not really necessary, but paranoid */
198 irq_spinlock_lock(&irq->lock, false);
199 irq_spinlock_lock(&box->irq_lock, false);
200
201 hash_table_insert(&irq_uspace_hash_table, key, &irq->link);
202 list_append(&irq->notif_cfg.link, &box->irq_list);
203
204 irq_spinlock_unlock(&box->irq_lock, false);
205 irq_spinlock_unlock(&irq->lock, false);
206 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
207
208 return EOK;
209}
210
211/** Unregister task from IRQ notification.
212 *
213 * @param box Answerbox associated with the notification.
214 * @param inr IRQ number.
215 * @param devno Device number.
216 * @return EOK on success or a negative error code.
217 */
218int ipc_irq_unregister(answerbox_t *box, inr_t inr, devno_t devno)
219{
220 sysarg_t key[] = {
221 (sysarg_t) inr,
222 (sysarg_t) devno
223 };
224
225 if ((inr < 0) || (inr > last_inr))
226 return ELIMIT;
227
228 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
229 link_t *lnk = hash_table_find(&irq_uspace_hash_table, key);
230 if (!lnk) {
231 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
232 return ENOENT;
233 }
234
235 irq_t *irq = hash_table_get_instance(lnk, irq_t, link);
236
237 /* irq is locked */
238 irq_spinlock_lock(&box->irq_lock, false);
239
240 ASSERT(irq->notif_cfg.answerbox == box);
241
242 /* Free up the pseudo code and associated structures. */
243 code_free(irq->notif_cfg.code);
244
245 /* Remove the IRQ from the answerbox's list. */
246 list_remove(&irq->notif_cfg.link);
247
248 /*
249 * We need to drop the IRQ lock now because hash_table_remove() will try
250 * to reacquire it. That basically violates the natural locking order,
251 * but a deadlock in hash_table_remove() is prevented by the fact that
252 * we already held the IRQ lock and didn't drop the hash table lock in
253 * the meantime.
254 */
255 irq_spinlock_unlock(&irq->lock, false);
256
257 /* Remove the IRQ from the uspace IRQ hash table. */
258 hash_table_remove(&irq_uspace_hash_table, key, 2);
259
260 irq_spinlock_unlock(&box->irq_lock, false);
261 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
262
263 /* Free up the IRQ structure. */
264 free(irq);
265
266 return EOK;
267}
268
269/** Disconnect all IRQ notifications from an answerbox.
270 *
271 * This function is effective because the answerbox contains
272 * list of all irq_t structures that are registered to
273 * send notifications to it.
274 *
275 * @param box Answerbox for which we want to carry out the cleanup.
276 *
277 */
278void ipc_irq_cleanup(answerbox_t *box)
279{
280loop:
281 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);
282 irq_spinlock_lock(&box->irq_lock, false);
283
284 while (!list_empty(&box->irq_list)) {
285 DEADLOCK_PROBE_INIT(p_irqlock);
286
287 irq_t *irq = list_get_instance(list_first(&box->irq_list), irq_t,
288 notif_cfg.link);
289
290 if (!irq_spinlock_trylock(&irq->lock)) {
291 /*
292 * Avoid deadlock by trying again.
293 */
294 irq_spinlock_unlock(&box->irq_lock, false);
295 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
296 DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD);
297 goto loop;
298 }
299
300 sysarg_t key[2];
301 key[0] = irq->inr;
302 key[1] = irq->devno;
303
304 ASSERT(irq->notif_cfg.answerbox == box);
305
306 /* Unlist from the answerbox. */
307 list_remove(&irq->notif_cfg.link);
308
309 /* Free up the pseudo code and associated structures. */
310 code_free(irq->notif_cfg.code);
311
312 /*
313 * We need to drop the IRQ lock now because hash_table_remove()
314 * will try to reacquire it. That basically violates the natural
315 * locking order, but a deadlock in hash_table_remove() is
316 * prevented by the fact that we already held the IRQ lock and
317 * didn't drop the hash table lock in the meantime.
318 */
319 irq_spinlock_unlock(&irq->lock, false);
320
321 /* Remove from the hash table. */
322 hash_table_remove(&irq_uspace_hash_table, key, 2);
323
324 free(irq);
325 }
326
327 irq_spinlock_unlock(&box->irq_lock, false);
328 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);
329}
330
331/** Add a call to the proper answerbox queue.
332 *
333 * Assume irq->lock is locked and interrupts disabled.
334 *
335 * @param irq IRQ structure referencing the target answerbox.
336 * @param call IRQ notification call.
337 *
338 */
339static void send_call(irq_t *irq, call_t *call)
340{
341 irq_spinlock_lock(&irq->notif_cfg.answerbox->irq_lock, false);
342 list_append(&call->link, &irq->notif_cfg.answerbox->irq_notifs);
343 irq_spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock, false);
344
345 waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST);
346}
347
348/** Apply the top-half pseudo code to find out whether to accept the IRQ or not.
349 *
350 * @param irq IRQ structure.
351 *
352 * @return IRQ_ACCEPT if the interrupt is accepted by the
353 * pseudocode, IRQ_DECLINE otherwise.
354 *
355 */
356irq_ownership_t ipc_irq_top_half_claim(irq_t *irq)
357{
358 irq_code_t *code = irq->notif_cfg.code;
359 uint32_t *scratch = irq->notif_cfg.scratch;
360
361 if (!irq->notif_cfg.notify)
362 return IRQ_DECLINE;
363
364 if (!code)
365 return IRQ_DECLINE;
366
367#define CMD_MEM_READ(target) \
368do { \
369 void *va = code->cmds[i].addr; \
370 if (AS != irq->driver_as) \
371 as_switch(AS, irq->driver_as); \
372 memcpy_from_uspace(&target, va, (sizeof(target))); \
373 if (dstarg) \
374 scratch[dstarg] = target; \
375} while(0)
376
377#define CMD_MEM_WRITE(val) \
378do { \
379 void *va = code->cmds[i].addr; \
380 if (AS != irq->driver_as) \
381 as_switch(AS, irq->driver_as); \
382 memcpy_to_uspace(va, &val, sizeof(val)); \
383} while (0)
384
385 as_t *current_as = AS;
386 size_t i;
387 for (i = 0; i < code->cmdcount; i++) {
388 uint32_t dstval;
389 uintptr_t srcarg = code->cmds[i].srcarg;
390 uintptr_t dstarg = code->cmds[i].dstarg;
391
392 if (srcarg >= IPC_CALL_LEN)
393 break;
394
395 if (dstarg >= IPC_CALL_LEN)
396 break;
397
398 switch (code->cmds[i].cmd) {
399 case CMD_PIO_READ_8:
400 dstval = pio_read_8((ioport8_t *) code->cmds[i].addr);
401 if (dstarg)
402 scratch[dstarg] = dstval;
403 break;
404 case CMD_PIO_READ_16:
405 dstval = pio_read_16((ioport16_t *) code->cmds[i].addr);
406 if (dstarg)
407 scratch[dstarg] = dstval;
408 break;
409 case CMD_PIO_READ_32:
410 dstval = pio_read_32((ioport32_t *) code->cmds[i].addr);
411 if (dstarg)
412 scratch[dstarg] = dstval;
413 break;
414 case CMD_PIO_WRITE_8:
415 pio_write_8((ioport8_t *) code->cmds[i].addr,
416 (uint8_t) code->cmds[i].value);
417 break;
418 case CMD_PIO_WRITE_16:
419 pio_write_16((ioport16_t *) code->cmds[i].addr,
420 (uint16_t) code->cmds[i].value);
421 break;
422 case CMD_PIO_WRITE_32:
423 pio_write_32((ioport32_t *) code->cmds[i].addr,
424 (uint32_t) code->cmds[i].value);
425 break;
426 case CMD_PIO_WRITE_A_8:
427 if (srcarg) {
428 pio_write_8((ioport8_t *) code->cmds[i].addr,
429 (uint8_t) scratch[srcarg]);
430 }
431 break;
432 case CMD_PIO_WRITE_A_16:
433 if (srcarg) {
434 pio_write_16((ioport16_t *) code->cmds[i].addr,
435 (uint16_t) scratch[srcarg]);
436 }
437 break;
438 case CMD_PIO_WRITE_A_32:
439 if (srcarg) {
440 pio_write_32((ioport32_t *) code->cmds[i].addr,
441 (uint32_t) scratch[srcarg]);
442 }
443 break;
444 case CMD_MEM_READ_8: {
445 uint8_t val;
446 CMD_MEM_READ(val);
447 break;
448 }
449 case CMD_MEM_READ_16: {
450 uint16_t val;
451 CMD_MEM_READ(val);
452 break;
453 }
454 case CMD_MEM_READ_32: {
455 uint32_t val;
456 CMD_MEM_READ(val);
457 break;
458 }
459 case CMD_MEM_WRITE_8: {
460 uint8_t val = code->cmds[i].value;
461 CMD_MEM_WRITE(val);
462 break;
463 }
464 case CMD_MEM_WRITE_16: {
465 uint16_t val = code->cmds[i].value;
466 CMD_MEM_WRITE(val);
467 break;
468 }
469 case CMD_MEM_WRITE_32: {
470 uint32_t val = code->cmds[i].value;
471 CMD_MEM_WRITE(val);
472 break;
473 }
474 case CMD_MEM_WRITE_A_8:
475 if (srcarg) {
476 uint8_t val = scratch[srcarg];
477 CMD_MEM_WRITE(val);
478 }
479 break;
480 case CMD_MEM_WRITE_A_16:
481 if (srcarg) {
482 uint16_t val = scratch[srcarg];
483 CMD_MEM_WRITE(val);
484 }
485 break;
486 case CMD_MEM_WRITE_A_32:
487 if (srcarg) {
488 uint32_t val = scratch[srcarg];
489 CMD_MEM_WRITE(val);
490 }
491 break;
492 case CMD_BTEST:
493 if ((srcarg) && (dstarg)) {
494 dstval = scratch[srcarg] & code->cmds[i].value;
495 scratch[dstarg] = dstval;
496 }
497 break;
498 case CMD_PREDICATE:
499 if ((srcarg) && (!scratch[srcarg])) {
500 i += code->cmds[i].value;
501 continue;
502 }
503 break;
504 case CMD_ACCEPT:
505 if (AS != current_as)
506 as_switch(AS, current_as);
507 return IRQ_ACCEPT;
508 case CMD_DECLINE:
509 default:
510 if (AS != current_as)
511 as_switch(AS, current_as);
512 return IRQ_DECLINE;
513 }
514 }
515 if (AS != current_as)
516 as_switch(AS, current_as);
517
518 return IRQ_DECLINE;
519}
520
521/* IRQ top-half handler.
522 *
523 * We expect interrupts to be disabled and the irq->lock already held.
524 *
525 * @param irq IRQ structure.
526 *
527 */
528void ipc_irq_top_half_handler(irq_t *irq)
529{
530 ASSERT(irq);
531
532 ASSERT(interrupts_disabled());
533 ASSERT(irq_spinlock_locked(&irq->lock));
534
535 if (irq->notif_cfg.answerbox) {
536 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
537 if (!call)
538 return;
539
540 call->flags |= IPC_CALL_NOTIF;
541 /* Put a counter to the message */
542 call->priv = ++irq->notif_cfg.counter;
543
544 /* Set up args */
545 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
546 IPC_SET_ARG1(call->data, irq->notif_cfg.scratch[1]);
547 IPC_SET_ARG2(call->data, irq->notif_cfg.scratch[2]);
548 IPC_SET_ARG3(call->data, irq->notif_cfg.scratch[3]);
549 IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]);
550 IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]);
551
552 send_call(irq, call);
553 }
554}
555
556/** Send notification message.
557 *
558 * @param irq IRQ structure.
559 * @param a1 Driver-specific payload argument.
560 * @param a2 Driver-specific payload argument.
561 * @param a3 Driver-specific payload argument.
562 * @param a4 Driver-specific payload argument.
563 * @param a5 Driver-specific payload argument.
564 *
565 */
566void ipc_irq_send_msg(irq_t *irq, sysarg_t a1, sysarg_t a2, sysarg_t a3,
567 sysarg_t a4, sysarg_t a5)
568{
569 irq_spinlock_lock(&irq->lock, true);
570
571 if (irq->notif_cfg.answerbox) {
572 call_t *call = ipc_call_alloc(FRAME_ATOMIC);
573 if (!call) {
574 irq_spinlock_unlock(&irq->lock, true);
575 return;
576 }
577
578 call->flags |= IPC_CALL_NOTIF;
579 /* Put a counter to the message */
580 call->priv = ++irq->notif_cfg.counter;
581
582 IPC_SET_IMETHOD(call->data, irq->notif_cfg.imethod);
583 IPC_SET_ARG1(call->data, a1);
584 IPC_SET_ARG2(call->data, a2);
585 IPC_SET_ARG3(call->data, a3);
586 IPC_SET_ARG4(call->data, a4);
587 IPC_SET_ARG5(call->data, a5);
588
589 send_call(irq, call);
590 }
591
592 irq_spinlock_unlock(&irq->lock, true);
593}
594
595/** @}
596 */
Note: See TracBrowser for help on using the repository browser.