- Timestamp:
- 2017-09-30T06:29:42Z (8 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 300f4c4
- Parents:
- d076f16 (diff), 6636fb19 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel
- Files:
-
- 2 added
- 4 deleted
- 44 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/Makefile
rd076f16 r91b60499 209 209 generic/src/ddi/ddi.c \ 210 210 generic/src/ddi/irq.c \ 211 generic/src/ddi/device.c \212 211 generic/src/debug/symtab.c \ 213 212 generic/src/debug/stacktrace.c \ … … 272 271 generic/src/ipc/sysipc.c \ 273 272 generic/src/ipc/sysipc_ops.c \ 274 generic/src/ipc/ops/clnestab.c \275 273 generic/src/ipc/ops/conctmeto.c \ 276 274 generic/src/ipc/ops/concttome.c \ 277 generic/src/ipc/ops/connclone.c \278 275 generic/src/ipc/ops/dataread.c \ 279 276 generic/src/ipc/ops/datawrite.c \ … … 286 283 generic/src/ipc/irq.c \ 287 284 generic/src/ipc/event.c \ 285 generic/src/cap/cap.c \ 288 286 generic/src/security/perm.c \ 289 287 generic/src/sysinfo/sysinfo.c \ -
kernel/arch/arm32/src/interrupt.c
rd076f16 r91b60499 38 38 #include <arch/machine_func.h> 39 39 #include <ddi/irq.h> 40 #include <ddi/device.h>41 40 #include <interrupt.h> 42 41 -
kernel/arch/arm32/src/mach/beagleboardxm/beagleboardxm.c
rd076f16 r91b60499 44 44 #include <mm/km.h> 45 45 #include <ddi/ddi.h> 46 #include <ddi/device.h>47 46 48 47 static void bbxm_init(void); … … 117 116 static irq_t timer_irq; 118 117 irq_initialize(&timer_irq); 119 timer_irq.devno = device_assign_devno();120 118 timer_irq.inr = AMDM37x_GPT1_IRQ; 121 119 timer_irq.claim = bb_timer_irq_claim; -
kernel/arch/arm32/src/mach/beaglebone/beaglebone.c
rd076f16 r91b60499 46 46 #include <interrupt.h> 47 47 #include <ddi/ddi.h> 48 #include <ddi/device.h>49 48 #include <mm/km.h> 50 49 … … 129 128 static irq_t timer_irq; 130 129 irq_initialize(&timer_irq); 131 timer_irq.devno = device_assign_devno();132 130 timer_irq.inr = AM335x_DMTIMER2_IRQ; 133 131 timer_irq.claim = bbone_timer_irq_claim; -
kernel/arch/arm32/src/mach/gta02/gta02.c
rd076f16 r91b60499 48 48 #include <interrupt.h> 49 49 #include <ddi/ddi.h> 50 #include <ddi/device.h>51 50 #include <log.h> 52 51 … … 241 240 { 242 241 irq_initialize(>a02_timer_irq); 243 gta02_timer_irq.devno = device_assign_devno();244 242 gta02_timer_irq.inr = GTA02_TIMER_IRQ; 245 243 gta02_timer_irq.claim = gta02_timer_irq_claim; -
kernel/arch/arm32/src/mach/integratorcp/integratorcp.c
rd076f16 r91b60499 43 43 #include <console/console.h> 44 44 #include <sysinfo/sysinfo.h> 45 #include <ddi/device.h>46 45 #include <mm/page.h> 47 46 #include <mm/frame.h> … … 207 206 { 208 207 irq_initialize(&icp.timer_irq); 209 icp.timer_irq.devno = device_assign_devno();210 208 icp.timer_irq.inr = ICP_TIMER_IRQ; 211 209 icp.timer_irq.claim = icp_timer_claim; -
kernel/arch/arm32/src/mach/raspberrypi/raspberrypi.c
rd076f16 r91b60499 50 50 #include <interrupt.h> 51 51 #include <ddi/ddi.h> 52 #include <ddi/device.h>53 52 54 53 #define RPI_DEFAULT_MEMORY_START 0 … … 118 117 static irq_t timer_irq; 119 118 irq_initialize(&timer_irq); 120 timer_irq.devno = device_assign_devno();121 119 timer_irq.inr = BCM2835_TIMER1_IRQ; 122 120 timer_irq.claim = raspberrypi_timer_irq_claim; -
kernel/arch/ia32/src/drivers/i8254.c
rd076f16 r91b60499 51 51 #include <arch.h> 52 52 #include <ddi/irq.h> 53 #include <ddi/device.h>54 53 55 54 #define CLK_PORT1 ((ioport8_t *) 0x40U) … … 86 85 irq_initialize(&i8254_irq); 87 86 i8254_irq.preack = true; 88 i8254_irq.devno = device_assign_devno();89 87 i8254_irq.inr = IRQ_CLK; 90 88 i8254_irq.claim = i8254_claim; -
kernel/arch/ia32/src/smp/apic.c
rd076f16 r91b60499 47 47 #include <arch.h> 48 48 #include <ddi/irq.h> 49 #include <ddi/device.h>50 49 51 50 #ifdef CONFIG_SMP … … 190 189 irq_initialize(&l_apic_timer_irq); 191 190 l_apic_timer_irq.preack = true; 192 l_apic_timer_irq.devno = device_assign_devno();193 191 l_apic_timer_irq.inr = IRQ_CLK; 194 192 l_apic_timer_irq.claim = l_apic_timer_claim; -
kernel/arch/ia64/src/drivers/it.c
rd076f16 r91b60499 42 42 #include <time/clock.h> 43 43 #include <ddi/irq.h> 44 #include <ddi/device.h>45 44 #include <arch.h> 46 45 … … 66 65 irq_initialize(&it_irq); 67 66 it_irq.inr = INTERRUPT_TIMER; 68 it_irq.devno = device_assign_devno();69 67 it_irq.claim = it_claim; 70 68 it_irq.handler = it_interrupt; -
kernel/arch/mips32/src/interrupt.c
rd076f16 r91b60499 41 41 #include <time/clock.h> 42 42 #include <ipc/sysipc.h> 43 #include <ddi/device.h>44 43 45 44 #define IRQ_COUNT 8 … … 175 174 176 175 irq_initialize(&timer_irq); 177 timer_irq.devno = device_assign_devno();178 176 timer_irq.inr = TIMER_IRQ; 179 177 timer_irq.claim = timer_claim; … … 186 184 #ifdef MACHINE_msim 187 185 irq_initialize(&dorder_irq); 188 dorder_irq.devno = device_assign_devno();189 186 dorder_irq.inr = DORDER_IRQ; 190 187 dorder_irq.claim = dorder_claim; -
kernel/arch/sparc64/src/drivers/niagara.c
rd076f16 r91b60499 39 39 #include <console/console.h> 40 40 #include <ddi/ddi.h> 41 #include <ddi/device.h>42 41 #include <arch/asm.h> 43 42 #include <arch.h> -
kernel/genarch/src/drivers/dsrln/dsrlnin.c
rd076f16 r91b60499 40 40 #include <mm/slab.h> 41 41 #include <arch/asm.h> 42 #include <ddi/device.h>43 42 44 43 static irq_ownership_t dsrlnin_claim(irq_t *irq) … … 64 63 65 64 irq_initialize(&instance->irq); 66 instance->irq.devno = device_assign_devno();67 65 instance->irq.inr = inr; 68 66 instance->irq.claim = dsrlnin_claim; -
kernel/genarch/src/drivers/i8042/i8042.c
rd076f16 r91b60499 44 44 #include <console/chardev.h> 45 45 #include <mm/slab.h> 46 #include <ddi/device.h>47 46 #include <time/delay.h> 48 47 … … 113 112 114 113 irq_initialize(&instance->irq); 115 instance->irq.devno = device_assign_devno();116 114 instance->irq.inr = inr; 117 115 instance->irq.claim = i8042_claim; -
kernel/genarch/src/drivers/ns16550/ns16550.c
rd076f16 r91b60499 41 41 #include <console/chardev.h> 42 42 #include <mm/slab.h> 43 #include <ddi/device.h>44 43 #include <str.h> 45 44 … … 138 137 139 138 irq_initialize(&instance->irq); 140 instance->irq.devno = device_assign_devno();141 139 instance->irq.inr = inr; 142 140 instance->irq.claim = ns16550_claim; -
kernel/genarch/src/drivers/omap/uart.c
rd076f16 r91b60499 37 37 #include <assert.h> 38 38 #include <genarch/drivers/omap/uart.h> 39 #include <ddi/device.h>40 39 #include <str.h> 41 40 #include <mm/km.h> … … 161 160 /* Initialize IRQ */ 162 161 irq_initialize(&uart->irq); 163 uart->irq.devno = device_assign_devno();164 162 uart->irq.inr = interrupt; 165 163 uart->irq.claim = omap_uart_claim; -
kernel/genarch/src/drivers/pl011/pl011.c
rd076f16 r91b60499 39 39 #include <console/chardev.h> 40 40 #include <console/console.h> 41 #include <ddi/device.h>42 41 #include <arch/asm.h> 43 42 #include <mm/slab.h> … … 127 126 /* Initialize IRQ */ 128 127 irq_initialize(&uart->irq); 129 uart->irq.devno = device_assign_devno();130 128 uart->irq.inr = interrupt; 131 129 uart->irq.claim = pl011_uart_claim; -
kernel/genarch/src/drivers/pl050/pl050.c
rd076f16 r91b60499 41 41 #include <console/chardev.h> 42 42 #include <mm/slab.h> 43 #include <ddi/device.h>44 43 45 44 #define PL050_KEY_RELEASE 0xF0 … … 87 86 88 87 irq_initialize(&instance->irq); 89 instance->irq.devno = device_assign_devno();90 88 instance->irq.inr = inr; 91 89 instance->irq.claim = pl050_claim; -
kernel/genarch/src/drivers/s3c24xx/uart.c
rd076f16 r91b60499 42 42 #include <console/chardev.h> 43 43 #include <console/console.h> 44 #include <ddi/device.h>45 44 #include <arch/asm.h> 46 45 #include <mm/slab.h> … … 123 122 /* Initialize IRQ structure. */ 124 123 irq_initialize(&uart->irq); 125 uart->irq.devno = device_assign_devno();126 124 uart->irq.inr = inr; 127 125 uart->irq.claim = s3c24xx_uart_claim; -
kernel/genarch/src/drivers/via-cuda/cuda.c
rd076f16 r91b60499 40 40 #include <arch/asm.h> 41 41 #include <mm/slab.h> 42 #include <ddi/device.h>43 42 #include <synch/spinlock.h> 44 43 #include <mem.h> … … 106 105 107 106 irq_initialize(&instance->irq); 108 instance->irq.devno = device_assign_devno();109 107 instance->irq.inr = inr; 110 108 instance->irq.claim = cuda_claim; -
kernel/generic/include/adt/hash_table.h
rd076f16 r91b60499 82 82 extern link_t *hash_table_find(hash_table_t *h, sysarg_t key[]); 83 83 extern void hash_table_remove(hash_table_t *h, sysarg_t key[], size_t keys); 84 extern void hash_table_remove_item(hash_table_t *h, link_t *item); 84 85 85 86 #endif -
kernel/generic/include/ddi/irq.h
rd076f16 r91b60499 43 43 #include <proc/task.h> 44 44 #include <ipc/ipc.h> 45 #include <mm/slab.h> 46 47 typedef enum { 48 IRQ_HT_KEY_INR, 49 IRQ_HT_KEY_MODE 50 } irq_ht_key_t; 51 52 typedef enum { 53 IRQ_HT_MODE_CLAIM, 54 IRQ_HT_MODE_NO_CLAIM 55 } irq_ht_mode_t; 45 56 46 57 typedef enum { … … 70 81 /** When false, notifications are not sent. */ 71 82 bool notify; 83 /** True if the structure is in irq_uspace_hash_table_table */ 84 bool hashed_in; 72 85 /** Answerbox for notifications. */ 73 86 answerbox_t *answerbox; … … 76 89 /** Arguments that will be sent if the IRQ is claimed. */ 77 90 uint32_t scratch[IPC_CALL_LEN]; 78 /** Top-half pseudocode. */91 /** Top-half IRQ code. */ 79 92 irq_code_t *code; 80 93 /** Counter. */ 81 94 size_t counter; 82 83 /**84 * Link between IRQs that are notifying the same answerbox. The list is85 * protected by the answerbox irq_lock.86 */87 link_t link;88 95 } ipc_notif_cfg_t; 89 96 … … 91 98 * 92 99 * If one device has multiple interrupts, there will be multiple irq_t 93 * instantions with the same devno. 94 * 100 * instantions. 95 101 */ 96 102 typedef struct irq { … … 112 118 */ 113 119 bool preack; 114 115 /** Unique device number. -1 if not yet assigned. */116 devno_t devno;117 120 118 121 /** Actual IRQ number. -1 if not yet assigned. */ … … 139 142 extern hash_table_t irq_uspace_hash_table; 140 143 144 extern slab_cache_t *irq_slab; 145 141 146 extern inr_t last_inr; 142 147 -
kernel/generic/include/ipc/ipc.h
rd076f16 r91b60499 42 42 #include <abi/proc/task.h> 43 43 #include <typedefs.h> 44 45 # define IPC_MAX_PHONES 6444 #include <mm/slab.h> 45 #include <cap/cap.h> 46 46 47 47 struct answerbox; … … 62 62 63 63 /** Structure identifying phone (in TASK structure) */ 64 typedef struct {64 typedef struct phone { 65 65 mutex_t lock; 66 66 link_t link; … … 69 69 ipc_phone_state_t state; 70 70 atomic_t active_calls; 71 kobject_t *kobject; 71 72 } phone_t; 72 73 … … 94 95 /** Notifications from IRQ handlers. */ 95 96 list_t irq_notifs; 96 /** IRQs with notifications to this answerbox. */97 list_t irq_list;98 97 } answerbox_t; 99 98 … … 170 169 } call_t; 171 170 171 extern slab_cache_t *phone_slab; 172 172 173 extern answerbox_t *ipc_phone_0; 173 174 -
kernel/generic/include/ipc/ipcrsc.h
rd076f16 r91b60499 38 38 #include <proc/task.h> 39 39 #include <ipc/ipc.h> 40 #include <cap/cap.h> 40 41 41 42 extern call_t *get_call(sysarg_t); 42 extern int phone_get(sysarg_t, phone_t **); 43 extern int phone_alloc(task_t *); 44 extern bool phone_connect(int, answerbox_t *); 45 extern void phone_dealloc(int); 43 extern cap_handle_t phone_alloc(task_t *); 44 extern bool phone_connect(cap_handle_t, answerbox_t *); 45 extern void phone_dealloc(cap_handle_t); 46 46 47 47 #endif -
kernel/generic/include/ipc/irq.h
rd076f16 r91b60499 47 47 #include <adt/list.h> 48 48 49 extern int ipc_irq_subscribe(answerbox_t *, inr_t, devno_t, sysarg_t,50 irq_code_t *);51 49 52 50 extern irq_ownership_t ipc_irq_top_half_claim(irq_t *); 53 51 extern void ipc_irq_top_half_handler(irq_t *); 54 52 55 extern int ipc_irq_ unsubscribe(answerbox_t *, inr_t, devno_t);56 extern void ipc_irq_cleanup(answerbox_t *);53 extern int ipc_irq_subscribe(answerbox_t *, inr_t, sysarg_t, irq_code_t *); 54 extern int ipc_irq_unsubscribe(answerbox_t *, int); 57 55 58 56 /* -
kernel/generic/include/ipc/sysipc.h
rd076f16 r91b60499 56 56 extern sysarg_t sys_ipc_hangup(sysarg_t); 57 57 58 extern sysarg_t sys_ipc_irq_subscribe(inr_t, devno_t,sysarg_t, irq_code_t *);59 extern sysarg_t sys_ipc_irq_unsubscribe( inr_t, devno_t);58 extern sysarg_t sys_ipc_irq_subscribe(inr_t, sysarg_t, irq_code_t *); 59 extern sysarg_t sys_ipc_irq_unsubscribe(sysarg_t); 60 60 61 61 #ifdef __32_BITS__ -
kernel/generic/include/proc/task.h
rd076f16 r91b60499 60 60 #include <abi/sysinfo.h> 61 61 #include <arch.h> 62 #include <cap/cap.h> 62 63 63 64 #define TASK THE->task … … 65 66 66 67 struct thread; 68 struct cap; 67 69 68 70 /** Task structure. */ … … 95 97 /** Task permissions. */ 96 98 perm_t perms; 99 100 /** Capabilities */ 101 cap_info_t *cap_info; 97 102 98 103 /* IPC stuff */ … … 100 105 /** Receiving communication endpoint */ 101 106 answerbox_t answerbox; 102 103 /** Sending communication endpoints */104 phone_t phones[IPC_MAX_PHONES];105 107 106 108 /** Spinlock protecting the active_calls list. */ -
kernel/generic/include/typedefs.h
rd076f16 r91b60499 49 49 50 50 typedef int32_t inr_t; 51 typedef int32_t devno_t;52 51 53 52 typedef volatile uint8_t ioport8_t; -
kernel/generic/src/adt/hash_table.c
rd076f16 r91b60499 190 190 } 191 191 192 /** Remove an existing item from hash table. 193 * 194 * @param h Hash table. 195 * @param item Item to remove from the hash table. 196 */ 197 void hash_table_remove_item(hash_table_t *h, link_t *item) 198 { 199 assert(h); 200 assert(h->op); 201 202 list_remove(item); 203 if (h->op->remove_callback) 204 h->op->remove_callback(item); 205 } 206 192 207 /** @} 193 208 */ -
kernel/generic/src/console/kconsole.c
rd076f16 r91b60499 55 55 #include <str.h> 56 56 #include <sysinfo/sysinfo.h> 57 #include <ddi/device.h>58 57 #include <symtab.h> 59 58 #include <errno.h> -
kernel/generic/src/ddi/irq.c
rd076f16 r91b60499 32 32 /** 33 33 * @file 34 * @brief IRQ dispatcher. 35 * 36 * This file provides means of connecting IRQs with particular devices and logic 37 * for dispatching interrupts to IRQ handlers defined by those devices. 38 * 39 * This code is designed to support: 40 * - multiple devices sharing single IRQ 41 * - multiple IRQs per single device 42 * - multiple instances of the same device 43 * 44 * 45 * Note about architectures. 46 * 47 * Some architectures have the term IRQ well defined. Examples of such 48 * architectures include amd64, ia32 and mips32. Some other architectures, such 49 * as sparc64, don't use the term at all. In those cases, we boldly step forward 50 * and define what an IRQ is. 51 * 52 * The implementation is generic enough and still allows the architectures to 53 * use the hardware layout effectively. For instance, on amd64 and ia32, where 54 * there is only 16 IRQs, the irq_hash_table can be optimized to a 55 * one-dimensional array. Next, when it is known that the IRQ numbers (aka 56 * INR's) are unique, the claim functions can always return IRQ_ACCEPT. 57 * 58 * 59 * Note about the irq_hash_table. 60 * 61 * The hash table is configured to use two keys: inr and devno. However, the 62 * hash index is computed only from inr. Moreover, if devno is -1, the match is 63 * based on the return value of the claim() function instead of on devno. 34 * @brief IRQ dispatcher 35 * 36 * This file provides means of connecting IRQs with respective device drivers 37 * and logic for dispatching interrupts to IRQ handlers defined by those 38 * drivers. 64 39 */ 65 40 … … 74 49 #include <arch.h> 75 50 76 #define KEY_INR 0 77 #define KEY_DEVNO 1 78 79 /** Spinlock protecting the kernel IRQ hash table. 51 slab_cache_t *irq_slab = NULL; 52 53 /** Spinlock protecting the kernel IRQ hash table 80 54 * 81 55 * This lock must be taken only when interrupts are disabled. … … 87 61 static hash_table_t irq_kernel_hash_table; 88 62 89 /** Spinlock protecting the uspace IRQ hash table .63 /** Spinlock protecting the uspace IRQ hash table 90 64 * 91 65 * This lock must be taken only when interrupts are disabled. … … 94 68 IRQ_SPINLOCK_INITIALIZE(irq_uspace_hash_table_lock); 95 69 96 /** The uspace IRQ hash table .*/70 /** The uspace IRQ hash table */ 97 71 hash_table_t irq_uspace_hash_table; 98 72 99 /**100 * Hash table operations for cases when we know that there will be collisions101 * between different keys.102 */103 73 static size_t irq_ht_hash(sysarg_t *key); 104 74 static bool irq_ht_compare(sysarg_t *key, size_t keys, link_t *item); … … 111 81 }; 112 82 113 /** 114 * Hash table operations for cases when we know that there will be no collisions 115 * between different keys. However, there might be still collisions among 116 * elements with single key (sharing of one IRQ). 117 */ 118 static size_t irq_lin_hash(sysarg_t *key); 119 static bool irq_lin_compare(sysarg_t *key, size_t keys, link_t *item); 120 static void irq_lin_remove(link_t *item); 121 122 static hash_table_operations_t irq_lin_ops = { 123 .hash = irq_lin_hash, 124 .compare = irq_lin_compare, 125 .remove_callback = irq_lin_remove, 126 }; 127 128 /** Number of buckets in either of the hash tables. */ 83 /** Number of buckets in either of the hash tables */ 129 84 static size_t buckets; 130 85 131 /** Last valid INR .*/86 /** Last valid INR */ 132 87 inr_t last_inr = 0; 133 88 134 /** Initialize IRQ subsystem .135 * 136 * @param inrs Numbers of unique IRQ numbers or INRs.137 * @param chains Number of chains in the hash table.89 /** Initialize IRQ subsystem 90 * 91 * @param inrs Numbers of unique IRQ numbers or INRs. 92 * @param chains Number of buckets in the hash table. 138 93 * 139 94 */ … … 143 98 last_inr = inrs - 1; 144 99 145 /* 146 * Be smart about the choice of the hash table operations. In cases in 147 * which inrs equals the requested number of chains (i.e. where there is 148 * no collision between different keys), we can use optimized set of 149 * operations. 150 */ 151 if (inrs == chains) { 152 hash_table_create(&irq_uspace_hash_table, chains, 2, 153 &irq_lin_ops); 154 hash_table_create(&irq_kernel_hash_table, chains, 2, 155 &irq_lin_ops); 156 } else { 157 hash_table_create(&irq_uspace_hash_table, chains, 2, 158 &irq_ht_ops); 159 hash_table_create(&irq_kernel_hash_table, chains, 2, 160 &irq_ht_ops); 161 } 162 } 163 164 /** Initialize one IRQ structure. 165 * 166 * @param irq Pointer to the IRQ structure to be initialized. 100 irq_slab = slab_cache_create("irq_t", sizeof(irq_t), 0, NULL, NULL, 101 FRAME_ATOMIC); 102 assert(irq_slab); 103 104 hash_table_create(&irq_uspace_hash_table, chains, 2, &irq_ht_ops); 105 hash_table_create(&irq_kernel_hash_table, chains, 2, &irq_ht_ops); 106 } 107 108 /** Initialize one IRQ structure 109 * 110 * @param irq Pointer to the IRQ structure to be initialized. 167 111 * 168 112 */ … … 172 116 link_initialize(&irq->link); 173 117 irq_spinlock_initialize(&irq->lock, "irq.lock"); 174 link_initialize(&irq->notif_cfg.link);175 118 irq->inr = -1; 176 irq->devno = -1;177 119 178 120 irq_initialize_arch(irq); 179 121 } 180 122 181 /** Register IRQ for device .123 /** Register IRQ for device 182 124 * 183 125 * The irq structure must be filled with information about the interrupt source 184 126 * and with the claim() function pointer and handler() function pointer. 185 127 * 186 * @param irq IRQ structure belonging to a device.128 * @param irq IRQ structure belonging to a device. 187 129 * 188 130 */ … … 190 132 { 191 133 sysarg_t key[] = { 192 (sysarg_t) irq->inr,193 (sysarg_t) irq->devno134 [IRQ_HT_KEY_INR] = (sysarg_t) irq->inr, 135 [IRQ_HT_KEY_MODE] = (sysarg_t) IRQ_HT_MODE_NO_CLAIM 194 136 }; 195 137 … … 201 143 } 202 144 203 /** Search and lock the uspace IRQ hash table. 204 * 205 */ 145 /** Search and lock the uspace IRQ hash table */ 206 146 static irq_t *irq_dispatch_and_lock_uspace(inr_t inr) 207 147 { 208 148 link_t *lnk; 209 149 sysarg_t key[] = { 210 (sysarg_t) inr,211 (sysarg_t) -1 /* Search will use claim() instead of devno */150 [IRQ_HT_KEY_INR] = (sysarg_t) inr, 151 [IRQ_HT_KEY_MODE] = (sysarg_t) IRQ_HT_MODE_CLAIM 212 152 }; 213 153 … … 224 164 } 225 165 226 /** Search and lock the kernel IRQ hash table. 227 * 228 */ 166 /** Search and lock the kernel IRQ hash table */ 229 167 static irq_t *irq_dispatch_and_lock_kernel(inr_t inr) 230 168 { 231 169 link_t *lnk; 232 170 sysarg_t key[] = { 233 (sysarg_t) inr,234 (sysarg_t) -1 /* Search will use claim() instead of devno */171 [IRQ_HT_KEY_INR] = (sysarg_t) inr, 172 [IRQ_HT_KEY_MODE] = (sysarg_t) IRQ_HT_MODE_CLAIM 235 173 }; 236 174 … … 247 185 } 248 186 249 /** Dispatch the IRQ .187 /** Dispatch the IRQ 250 188 * 251 189 * We assume this function is only called from interrupt context (i.e. that … … 255 193 * return with interrupts disabled and holding the respective structure. 256 194 * 257 * @param inr Interrupt number (aka inr or irq).195 * @param inr Interrupt number (aka inr or irq). 258 196 * 259 197 * @return IRQ structure of the respective device … … 285 223 } 286 224 287 /** Compute hash index for the key. 288 * 289 * This function computes hash index into the IRQ hash table for which there can 290 * be collisions between different INRs. 291 * 292 * The devno is not used to compute the hash. 293 * 294 * @param key The first of the keys is inr and the second is devno or -1. 225 /** Compute hash index for the key 226 * 227 * @param key The first of the keys is inr and the second is mode. Only inr is 228 * used to compute the hash. 295 229 * 296 230 * @return Index into the hash table. … … 299 233 size_t irq_ht_hash(sysarg_t key[]) 300 234 { 301 inr_t inr = (inr_t) key[ KEY_INR];235 inr_t inr = (inr_t) key[IRQ_HT_KEY_INR]; 302 236 return inr % buckets; 303 237 } 304 238 305 /** Compare hash table element with a key. 306 * 307 * There are two things to note about this function. First, it is used for the 308 * more complex architecture setup in which there are way too many interrupt 309 * numbers (i.e. inr's) to arrange the hash table so that collisions occur only 310 * among same inrs of different devnos. So the explicit check for inr match must 311 * be done. Second, if devno is -1, the second key (i.e. devno) is not used for 312 * the match and the result of the claim() function is used instead. 239 /** Compare hash table element with a key 240 * 241 * If mode is IRQ_HT_MODE_CLAIM, the result of the claim() function is used for 242 * the match. Otherwise the key does not match. 313 243 * 314 244 * This function assumes interrupts are already disabled. 315 245 * 316 * @param key Keys (i.e. inr and devno).317 * @param keys This is 2.318 * @param item The item to compare the key with.319 * 320 * @return true on match321 * @return false on no match246 * @param key Keys (i.e. inr and mode). 247 * @param keys This is 2. 248 * @param item The item to compare the key with. 249 * 250 * @return True on match 251 * @return False on no match 322 252 * 323 253 */ … … 325 255 { 326 256 irq_t *irq = hash_table_get_instance(item, irq_t, link); 327 inr_t inr = (inr_t) key[ KEY_INR];328 devno_t devno = (devno_t) key[KEY_DEVNO];257 inr_t inr = (inr_t) key[IRQ_HT_KEY_INR]; 258 irq_ht_mode_t mode = (irq_ht_mode_t) key[IRQ_HT_KEY_MODE]; 329 259 330 260 bool rv; 331 261 332 262 irq_spinlock_lock(&irq->lock, false); 333 if ( devno == -1) {263 if (mode == IRQ_HT_MODE_CLAIM) { 334 264 /* Invoked by irq_dispatch_and_lock(). */ 335 rv = ((irq->inr == inr) && 336 (irq->claim(irq) == IRQ_ACCEPT)); 265 rv = ((irq->inr == inr) && (irq->claim(irq) == IRQ_ACCEPT)); 337 266 } else { 338 267 /* Invoked by irq_find_and_lock(). */ 339 rv = ((irq->inr == inr) && (irq->devno == devno));268 rv = false; 340 269 } 341 270 … … 347 276 } 348 277 349 /** Unlock IRQ structure after hash_table_remove() .350 * 351 * @param lnk Link in the removed and locked IRQ structure.278 /** Unlock IRQ structure after hash_table_remove() 279 * 280 * @param lnk Link in the removed and locked IRQ structure. 352 281 */ 353 282 void irq_ht_remove(link_t *lnk) … … 358 287 } 359 288 360 /** Compute hash index for the key.361 *362 * This function computes hash index into the IRQ hash table for which there are363 * no collisions between different INRs.364 *365 * @param key The first of the keys is inr and the second is devno or -1.366 *367 * @return Index into the hash table.368 *369 */370 size_t irq_lin_hash(sysarg_t key[])371 {372 inr_t inr = (inr_t) key[KEY_INR];373 return inr;374 }375 376 /** Compare hash table element with a key.377 *378 * There are two things to note about this function. First, it is used for the379 * less complex architecture setup in which there are not too many interrupt380 * numbers (i.e. inr's) to arrange the hash table so that collisions occur only381 * among same inrs of different devnos. So the explicit check for inr match is382 * not done. Second, if devno is -1, the second key (i.e. devno) is not used383 * for the match and the result of the claim() function is used instead.384 *385 * This function assumes interrupts are already disabled.386 *387 * @param key Keys (i.e. inr and devno).388 * @param keys This is 2.389 * @param item The item to compare the key with.390 *391 * @return true on match392 * @return false on no match393 *394 */395 bool irq_lin_compare(sysarg_t key[], size_t keys, link_t *item)396 {397 irq_t *irq = list_get_instance(item, irq_t, link);398 devno_t devno = (devno_t) key[KEY_DEVNO];399 bool rv;400 401 irq_spinlock_lock(&irq->lock, false);402 if (devno == -1) {403 /* Invoked by irq_dispatch_and_lock() */404 rv = (irq->claim(irq) == IRQ_ACCEPT);405 } else {406 /* Invoked by irq_find_and_lock() */407 rv = (irq->devno == devno);408 }409 410 /* unlock only on non-match */411 if (!rv)412 irq_spinlock_unlock(&irq->lock, false);413 414 return rv;415 }416 417 /** Unlock IRQ structure after hash_table_remove().418 *419 * @param lnk Link in the removed and locked IRQ structure.420 *421 */422 void irq_lin_remove(link_t *lnk)423 {424 irq_t *irq __attribute__((unused))425 = hash_table_get_instance(lnk, irq_t, link);426 irq_spinlock_unlock(&irq->lock, false);427 }428 429 289 /** @} 430 290 */ -
kernel/generic/src/ipc/ipc.c
rd076f16 r91b60499 43 43 #include <synch/waitq.h> 44 44 #include <ipc/ipc.h> 45 #include <ipc/ipcrsc.h> 45 46 #include <abi/ipc/methods.h> 46 47 #include <ipc/kbox.h> … … 58 59 #include <arch/interrupt.h> 59 60 #include <ipc/irq.h> 61 #include <cap/cap.h> 60 62 61 63 static void ipc_forget_call(call_t *); … … 64 66 answerbox_t *ipc_phone_0 = NULL; 65 67 66 static slab_cache_t *ipc_call_slab; 67 static slab_cache_t *ipc_answerbox_slab; 68 static slab_cache_t *call_slab; 69 static slab_cache_t *answerbox_slab; 70 71 slab_cache_t *phone_slab = NULL; 68 72 69 73 /** Initialize a call structure. … … 93 97 if (call->buffer) 94 98 free(call->buffer); 95 slab_free( ipc_call_slab, call);99 slab_free(call_slab, call); 96 100 } 97 101 } … … 110 114 call_t *ipc_call_alloc(unsigned int flags) 111 115 { 112 call_t *call = slab_alloc( ipc_call_slab, flags);116 call_t *call = slab_alloc(call_slab, flags); 113 117 if (call) { 114 118 _ipc_call_init(call); … … 145 149 list_initialize(&box->answers); 146 150 list_initialize(&box->irq_notifs); 147 list_initialize(&box->irq_list);148 151 box->task = task; 149 152 } … … 151 154 /** Connect a phone to an answerbox. 152 155 * 153 * @param phone Initialized phone structure. 154 * @param box Initialized answerbox structure. 155 * @return True if the phone was connected, false otherwise. 156 * This function must be passed a reference to phone->kobject. 157 * 158 * @param phone Initialized phone structure. 159 * @param box Initialized answerbox structure. 160 * @return True if the phone was connected, false otherwise. 156 161 */ 157 162 bool ipc_phone_connect(phone_t *phone, answerbox_t *box) … … 166 171 phone->state = IPC_PHONE_CONNECTED; 167 172 phone->callee = box; 173 /* Pass phone->kobject reference to box->connected_phones */ 168 174 list_append(&phone->link, &box->connected_phones); 169 175 } … … 171 177 irq_spinlock_unlock(&box->lock, true); 172 178 mutex_unlock(&phone->lock); 179 180 if (!active) { 181 /* We still have phone->kobject's reference; drop it */ 182 kobject_put(phone->kobject); 183 } 173 184 174 185 return active; … … 188 199 phone->state = IPC_PHONE_FREE; 189 200 atomic_set(&phone->active_calls, 0); 201 phone->kobject = NULL; 190 202 } 191 203 … … 200 212 int ipc_call_sync(phone_t *phone, call_t *request) 201 213 { 202 answerbox_t *mybox = slab_alloc( ipc_answerbox_slab, 0);214 answerbox_t *mybox = slab_alloc(answerbox_slab, 0); 203 215 ipc_answerbox_init(mybox, TASK); 204 216 … … 208 220 int rc = ipc_call(phone, request); 209 221 if (rc != EOK) { 210 slab_free( ipc_answerbox_slab, mybox);222 slab_free(answerbox_slab, mybox); 211 223 return rc; 212 224 } … … 255 267 assert(!answer || request == answer); 256 268 257 slab_free( ipc_answerbox_slab, mybox);269 slab_free(answerbox_slab, mybox); 258 270 return rc; 259 271 } … … 453 465 list_remove(&phone->link); 454 466 irq_spinlock_unlock(&box->lock, true); 467 468 /* Drop the answerbox reference */ 469 kobject_put(phone->kobject); 455 470 456 471 call_t *call = ipc_call_alloc(0); … … 655 670 656 671 task_release(phone->caller); 672 673 kobject_put(phone->kobject); 657 674 658 675 /* Must start again */ … … 661 678 662 679 mutex_unlock(&phone->lock); 680 kobject_put(phone->kobject); 663 681 } 664 682 … … 707 725 * Nota bene: there may still be answers waiting for pick up. 708 726 */ 709 spinlock_unlock(&TASK->active_calls_lock); 727 spinlock_unlock(&TASK->active_calls_lock); 710 728 return; 711 729 } … … 720 738 * call on the list. 721 739 */ 722 spinlock_unlock(&TASK->active_calls_lock); 740 spinlock_unlock(&TASK->active_calls_lock); 723 741 goto restart; 724 742 } … … 727 745 728 746 goto restart; 747 } 748 749 static bool phone_cap_wait_cb(cap_t *cap, void *arg) 750 { 751 phone_t *phone = cap->kobject->phone; 752 bool *restart = (bool *) arg; 753 754 mutex_lock(&phone->lock); 755 if ((phone->state == IPC_PHONE_HUNGUP) && 756 (atomic_get(&phone->active_calls) == 0)) { 757 phone->state = IPC_PHONE_FREE; 758 phone->callee = NULL; 759 } 760 761 /* 762 * We might have had some IPC_PHONE_CONNECTING phones at the beginning 763 * of ipc_cleanup(). Depending on whether these were forgotten or 764 * answered, they will eventually enter the IPC_PHONE_FREE or 765 * IPC_PHONE_CONNECTED states, respectively. In the latter case, the 766 * other side may slam the open phones at any time, in which case we 767 * will get an IPC_PHONE_SLAMMED phone. 768 */ 769 if ((phone->state == IPC_PHONE_CONNECTED) || 770 (phone->state == IPC_PHONE_SLAMMED)) { 771 mutex_unlock(&phone->lock); 772 ipc_phone_hangup(phone); 773 /* 774 * Now there may be one extra active call, which needs to be 775 * forgotten. 776 */ 777 ipc_forget_all_active_calls(); 778 *restart = true; 779 return false; 780 } 781 782 /* 783 * If the hangup succeeded, it has sent a HANGUP message, the IPC is now 784 * in HUNGUP state, we wait for the reply to come 785 */ 786 if (phone->state != IPC_PHONE_FREE) { 787 mutex_unlock(&phone->lock); 788 return false; 789 } 790 791 mutex_unlock(&phone->lock); 792 return true; 729 793 } 730 794 … … 733 797 { 734 798 call_t *call; 735 size_t i;799 bool restart; 736 800 737 801 restart: … … 740 804 * Locking is needed as there may be connection handshakes in progress. 741 805 */ 742 for (i = 0; i < IPC_MAX_PHONES; i++) { 743 phone_t *phone = &TASK->phones[i]; 744 745 mutex_lock(&phone->lock); 746 if ((phone->state == IPC_PHONE_HUNGUP) && 747 (atomic_get(&phone->active_calls) == 0)) { 748 phone->state = IPC_PHONE_FREE; 749 phone->callee = NULL; 750 } 751 752 /* 753 * We might have had some IPC_PHONE_CONNECTING phones at the 754 * beginning of ipc_cleanup(). Depending on whether these were 755 * forgotten or answered, they will eventually enter the 756 * IPC_PHONE_FREE or IPC_PHONE_CONNECTED states, respectively. 757 * In the latter case, the other side may slam the open phones 758 * at any time, in which case we will get an IPC_PHONE_SLAMMED 759 * phone. 760 */ 761 if ((phone->state == IPC_PHONE_CONNECTED) || 762 (phone->state == IPC_PHONE_SLAMMED)) { 763 mutex_unlock(&phone->lock); 764 ipc_phone_hangup(phone); 765 /* 766 * Now there may be one extra active call, which needs 767 * to be forgotten. 768 */ 769 ipc_forget_all_active_calls(); 770 goto restart; 771 } 772 773 /* 774 * If the hangup succeeded, it has sent a HANGUP message, the 775 * IPC is now in HUNGUP state, we wait for the reply to come 776 */ 777 if (phone->state != IPC_PHONE_FREE) { 778 mutex_unlock(&phone->lock); 779 break; 780 } 781 782 mutex_unlock(&phone->lock); 783 } 784 785 /* Got into cleanup */ 786 if (i == IPC_MAX_PHONES) 806 restart = false; 807 if (caps_apply_to_kobject_type(TASK, KOBJECT_TYPE_PHONE, 808 phone_cap_wait_cb, &restart)) { 809 /* Got into cleanup */ 787 810 return; 788 811 } 812 if (restart) 813 goto restart; 814 789 815 call = ipc_wait_for_call(&TASK->answerbox, SYNCH_NO_TIMEOUT, 790 816 SYNCH_FLAGS_NONE); … … 795 821 ipc_call_free(call); 796 822 goto restart; 823 } 824 825 static bool phone_cap_cleanup_cb(cap_t *cap, void *arg) 826 { 827 ipc_phone_hangup(cap->kobject->phone); 828 return true; 829 } 830 831 static bool irq_cap_cleanup_cb(cap_t *cap, void *arg) 832 { 833 ipc_irq_unsubscribe(&TASK->answerbox, cap->handle); 834 return true; 797 835 } 798 836 … … 816 854 817 855 /* Disconnect all our phones ('ipc_phone_hangup') */ 818 for (size_t i = 0; i < IPC_MAX_PHONES; i++)819 ipc_phone_hangup(&TASK->phones[i]);856 caps_apply_to_kobject_type(TASK, KOBJECT_TYPE_PHONE, 857 phone_cap_cleanup_cb, NULL); 820 858 821 859 /* Unsubscribe from any event notifications. */ 822 860 event_cleanup_answerbox(&TASK->answerbox); 823 861 824 /* Disconnect all connected irqs */ 825 ipc_irq_cleanup(&TASK->answerbox); 862 /* Disconnect all connected IRQs */ 863 caps_apply_to_kobject_type(TASK, KOBJECT_TYPE_IRQ, irq_cap_cleanup_cb, 864 NULL); 826 865 827 866 /* Disconnect all phones connected to our regular answerbox */ … … 847 886 void ipc_init(void) 848 887 { 849 ipc_call_slab = slab_cache_create("call_t", sizeof(call_t), 0, NULL,888 call_slab = slab_cache_create("call_t", sizeof(call_t), 0, NULL, 850 889 NULL, 0); 851 ipc_answerbox_slab = slab_cache_create("answerbox_t", 852 sizeof(answerbox_t), 0, NULL, NULL, 0); 890 phone_slab = slab_cache_create("phone_t", sizeof(phone_t), 0, NULL, 891 NULL, 0); 892 answerbox_slab = slab_cache_create("answerbox_t", sizeof(answerbox_t), 893 0, NULL, NULL, 0); 853 894 } 854 895 … … 885 926 } 886 927 928 static bool print_task_phone_cb(cap_t *cap, void *arg) 929 { 930 phone_t *phone = cap->kobject->phone; 931 932 mutex_lock(&phone->lock); 933 if (phone->state != IPC_PHONE_FREE) { 934 printf("%-11d %7" PRIun " ", cap->handle, 935 atomic_get(&phone->active_calls)); 936 937 switch (phone->state) { 938 case IPC_PHONE_CONNECTING: 939 printf("connecting"); 940 break; 941 case IPC_PHONE_CONNECTED: 942 printf("connected to %" PRIu64 " (%s)", 943 phone->callee->task->taskid, 944 phone->callee->task->name); 945 break; 946 case IPC_PHONE_SLAMMED: 947 printf("slammed by %p", phone->callee); 948 break; 949 case IPC_PHONE_HUNGUP: 950 printf("hung up by %p", phone->callee); 951 break; 952 default: 953 break; 954 } 955 956 printf("\n"); 957 } 958 mutex_unlock(&phone->lock); 959 960 return true; 961 } 962 887 963 /** List answerbox contents. 888 964 * … … 894 970 irq_spinlock_lock(&tasks_lock, true); 895 971 task_t *task = task_find_by_id(taskid); 896 897 972 if (!task) { 898 973 irq_spinlock_unlock(&tasks_lock, true); 899 974 return; 900 975 } 901 902 /* Hand-over-hand locking */ 903 irq_spinlock_exchange(&tasks_lock, &task->lock); 904 905 printf("[phone id] [calls] [state\n"); 906 907 size_t i; 908 for (i = 0; i < IPC_MAX_PHONES; i++) { 909 if (SYNCH_FAILED(mutex_trylock(&task->phones[i].lock))) { 910 printf("%-10zu (mutex busy)\n", i); 911 continue; 912 } 913 914 if (task->phones[i].state != IPC_PHONE_FREE) { 915 printf("%-10zu %7" PRIun " ", i, 916 atomic_get(&task->phones[i].active_calls)); 917 918 switch (task->phones[i].state) { 919 case IPC_PHONE_CONNECTING: 920 printf("connecting"); 921 break; 922 case IPC_PHONE_CONNECTED: 923 printf("connected to %" PRIu64 " (%s)", 924 task->phones[i].callee->task->taskid, 925 task->phones[i].callee->task->name); 926 break; 927 case IPC_PHONE_SLAMMED: 928 printf("slammed by %p", 929 task->phones[i].callee); 930 break; 931 case IPC_PHONE_HUNGUP: 932 printf("hung up by %p", 933 task->phones[i].callee); 934 break; 935 default: 936 break; 937 } 938 939 printf("\n"); 940 } 941 942 mutex_unlock(&task->phones[i].lock); 943 } 944 976 task_hold(task); 977 irq_spinlock_unlock(&tasks_lock, true); 978 979 printf("[phone cap] [calls] [state\n"); 980 981 caps_apply_to_kobject_type(task, KOBJECT_TYPE_PHONE, 982 print_task_phone_cb, NULL); 983 984 irq_spinlock_lock(&task->lock, true); 945 985 irq_spinlock_lock(&task->answerbox.lock, false); 946 986 … … 964 1004 irq_spinlock_unlock(&task->answerbox.lock, false); 965 1005 irq_spinlock_unlock(&task->lock, true); 1006 1007 task_release(task); 966 1008 } 967 1009 -
kernel/generic/src/ipc/ipcrsc.c
rd076f16 r91b60499 39 39 * 40 40 * The pattern of usage of the resources is: 41 * - allocate empty phone slot, connect | deallocate slot41 * - allocate empty phone capability slot, connect | deallocate slot 42 42 * - disconnect connected phone (some messages might be on the fly) 43 43 * - find phone in slot and send a message using phone … … 53 53 * atomic on all platforms) 54 54 * 55 * - To find an empty phone slot, the TASK must be locked55 * - To find an empty phone capability slot, the TASK must be locked 56 56 * - To answer a message, the answerbox must be locked 57 57 * - The locking of phone and answerbox is done at the ipc_ level. … … 77 77 * 78 78 * *** Connect_to_me *** 79 * The caller sends IPC_M_CONNECT_TO_ME. 79 * The caller sends IPC_M_CONNECT_TO_ME. 80 80 * The server receives an automatically opened phoneid. If it accepts 81 * (RETVAL=0), it can use the phoneid immediately. 82 * Possible race condition can arise, when the client receives messages from new83 * connection before getting response for connect_to_me message. Userspace84 * should implement handshakeprotocol that would control it.81 * (RETVAL=0), it can use the phoneid immediately. Possible race condition can 82 * arise, when the client receives messages from new connection before getting 83 * response for connect_to_me message. Userspace should implement handshake 84 * protocol that would control it. 85 85 * 86 86 * Phone hangup … … 89 89 * - The phone is disconnected (no more messages can be sent over this phone), 90 90 * all in-progress messages are correctly handled. The answerbox receives 91 * IPC_M_PHONE_HUNGUP call from the phone that hung up. When all async 92 * callsare answered, the phone is deallocated.91 * IPC_M_PHONE_HUNGUP call from the phone that hung up. When all async calls 92 * are answered, the phone is deallocated. 93 93 * 94 94 * *** The answerbox hangs up (ipc_answer(EHANGUP)) 95 * - The phone is disconnected. EHANGUP response code is sent 96 * to the calling task. All new calls through this phone 97 * get a EHUNGUP error code, the task is expected to 98 * send an sys_ipc_hangup after cleaning up its internal structures. 95 * - The phone is disconnected. EHANGUP response code is sent to the calling 96 * task. All new calls through this phone get a EHUNGUP error code, the task 97 * is expected to send an sys_ipc_hangup after cleaning up its internal 98 * structures. 99 * 99 100 * 100 101 * Call forwarding 101 102 * 102 * The call can be forwarded, so that the answer to call is passed directly 103 * t o the original sender. However, this poses special problems regarding104 * routingof hangup messages.103 * The call can be forwarded, so that the answer to call is passed directly to 104 * the original sender. However, this poses special problems regarding routing 105 * of hangup messages. 105 106 * 106 107 * sys_ipc_hangup -> IPC_M_PHONE_HUNGUP … … 133 134 #include <assert.h> 134 135 #include <abi/errno.h> 136 #include <cap/cap.h> 137 #include <mm/slab.h> 135 138 136 139 /** Find call_t * in call table according to callid. … … 138 141 * @todo Some speedup (hash table?) 139 142 * 140 * @param callid Userspace hash of the call. Currently it is the call 141 * structure kernel address. 142 * 143 * @return NULL on not found, otherwise pointer to the call 144 * structure. 143 * @param callid Userspace hash of the call. Currently it is the call structure 144 * kernel address. 145 * 146 * @return NULL on not found, otherwise pointer to the call structure. 145 147 * 146 148 */ … … 162 164 } 163 165 164 /** Get phone from the current task by ID. 165 * 166 * @param phoneid Phone ID. 167 * @param phone Place to store pointer to phone. 168 * 169 * @return EOK on success, EINVAL if ID is invalid. 170 * 171 */ 172 int phone_get(sysarg_t phoneid, phone_t **phone) 173 { 174 if (phoneid >= IPC_MAX_PHONES) 175 return EINVAL; 176 177 *phone = &TASK->phones[phoneid]; 178 return EOK; 179 } 180 181 /** Allocate new phone slot in the specified task. 182 * 183 * @param task Task for which to allocate a new phone. 184 * 185 * @return New phone handle or -1 if the phone handle limit is 186 * exceeded. 187 * 188 */ 189 int phone_alloc(task_t *task) 190 { 191 irq_spinlock_lock(&task->lock, true); 192 193 size_t i; 194 for (i = 0; i < IPC_MAX_PHONES; i++) { 195 phone_t *phone = &task->phones[i]; 196 197 if ((phone->state == IPC_PHONE_HUNGUP) && 198 (atomic_get(&phone->active_calls) == 0)) 199 phone->state = IPC_PHONE_FREE; 166 static bool phone_reclaim(kobject_t *kobj) 167 { 168 bool gc = false; 169 170 mutex_lock(&kobj->phone->lock); 171 if (kobj->phone->state == IPC_PHONE_HUNGUP && 172 atomic_get(&kobj->phone->active_calls) == 0) 173 gc = true; 174 mutex_unlock(&kobj->phone->lock); 175 176 return gc; 177 } 178 179 static void phone_destroy(void *arg) 180 { 181 phone_t *phone = (phone_t *) arg; 182 slab_free(phone_slab, phone); 183 } 184 185 static kobject_ops_t phone_kobject_ops = { 186 .reclaim = phone_reclaim, 187 .destroy = phone_destroy 188 }; 189 190 191 /** Allocate new phone in the specified task. 192 * 193 * @param task Task for which to allocate a new phone. 194 * 195 * @return New phone capability handle. 196 * @return Negative error code if a new capability cannot be allocated. 197 */ 198 cap_handle_t phone_alloc(task_t *task) 199 { 200 cap_handle_t handle = cap_alloc(task); 201 if (handle >= 0) { 202 phone_t *phone = slab_alloc(phone_slab, FRAME_ATOMIC); 203 if (!phone) { 204 cap_free(TASK, handle); 205 return ENOMEM; 206 } 207 kobject_t *kobject = malloc(sizeof(kobject_t), FRAME_ATOMIC); 208 if (!kobject) { 209 cap_free(TASK, handle); 210 slab_free(phone_slab, phone); 211 return ENOMEM; 212 } 213 214 ipc_phone_init(phone, task); 215 phone->state = IPC_PHONE_CONNECTING; 216 217 kobject_initialize(kobject, KOBJECT_TYPE_PHONE, phone, 218 &phone_kobject_ops); 219 phone->kobject = kobject; 200 220 201 if (phone->state == IPC_PHONE_FREE) { 202 phone->state = IPC_PHONE_CONNECTING; 203 break; 204 } 221 cap_publish(task, handle, kobject); 205 222 } 206 223 207 irq_spinlock_unlock(&task->lock, true); 208 209 if (i == IPC_MAX_PHONES) 210 return -1; 211 212 return i; 213 } 214 215 /** Mark a phone structure free. 216 * 217 * @param phone Phone structure to be marked free. 218 * 219 */ 220 static void phone_deallocp(phone_t *phone) 221 { 222 assert(phone->state == IPC_PHONE_CONNECTING); 223 224 /* Atomic operation */ 225 phone->state = IPC_PHONE_FREE; 224 return handle; 226 225 } 227 226 … … 230 229 * All already sent messages will be correctly processed. 231 230 * 232 * @param phoneid Phone handle of the phone to be freed. 233 * 234 */ 235 void phone_dealloc(int phoneid) 236 { 237 phone_deallocp(&TASK->phones[phoneid]); 231 * @param handle Phone capability handle of the phone to be freed. 232 * 233 */ 234 void phone_dealloc(cap_handle_t handle) 235 { 236 kobject_t *kobj = cap_unpublish(TASK, handle, KOBJECT_TYPE_PHONE); 237 if (!kobj) 238 return; 239 240 assert(kobj->phone); 241 assert(kobj->phone->state == IPC_PHONE_CONNECTING); 242 243 kobject_put(kobj); 244 cap_free(TASK, handle); 238 245 } 239 246 240 247 /** Connect phone to a given answerbox. 241 248 * 242 * @param phoneid Phone handle to be connected.243 * @param box Answerbox to which to connect the phone handle.249 * @param handle Capability handle of the phone to be connected. 250 * @param box Answerbox to which to connect the phone. 244 251 * @return True if the phone was connected, false otherwise. 245 * 246 * The procedure _enforces_ that the user first marks the phone 247 * busy (e.g. via phone_alloc) and then connects the phone, otherwise 248 * race condition may appear. 249 * 250 */ 251 bool phone_connect(int phoneid, answerbox_t *box) 252 { 253 phone_t *phone = &TASK->phones[phoneid]; 254 255 assert(phone->state == IPC_PHONE_CONNECTING); 256 return ipc_phone_connect(phone, box); 252 */ 253 bool phone_connect(cap_handle_t handle, answerbox_t *box) 254 { 255 kobject_t *phone_obj = kobject_get(TASK, handle, KOBJECT_TYPE_PHONE); 256 if (!phone_obj) 257 return false; 258 259 assert(phone_obj->phone->state == IPC_PHONE_CONNECTING); 260 261 /* Hand over phone_obj reference to the answerbox */ 262 return ipc_phone_connect(phone_obj->phone, box); 257 263 } 258 264 -
kernel/generic/src/ipc/irq.c
rd076f16 r91b60499 37 37 * 38 38 * This framework allows applications to subscribe to receive a notification 39 * when interrupt is detected. The application may provide a simple 'top-half' 40 * handler as part of its registration, which can perform simple operations 41 * (read/write port/memory, add information to notification IPC message). 39 * when an interrupt is detected. The application may provide a simple 40 * 'top-half' handler as part of its registration, which can perform simple 41 * operations (read/write port/memory, add information to notification IPC 42 * message). 42 43 * 43 44 * The structure of a notification message is as follows: … … 50 51 * - in_phone_hash: interrupt counter (may be needed to assure correct order 51 52 * in multithreaded drivers) 52 *53 * Note on synchronization for ipc_irq_subscribe(), ipc_irq_unsubscribe(),54 * ipc_irq_cleanup() and IRQ handlers:55 *56 * By always taking all of the uspace IRQ hash table lock, IRQ structure lock57 * and answerbox lock, we can rule out race conditions between the58 * registration functions and also the cleanup function. Thus the observer can59 * either see the IRQ structure present in both the hash table and the60 * answerbox list or absent in both. Views in which the IRQ structure would be61 * linked in the hash table but not in the answerbox list, or vice versa, are62 * not possible.63 *64 * By always taking the hash table lock and the IRQ structure lock, we can65 * rule out a scenario in which we would free up an IRQ structure, which is66 * still referenced by, for example, an IRQ handler. The locking scheme forces67 * us to lock the IRQ structure only after any progressing IRQs on that68 * structure are finished. Because we hold the hash table lock, we prevent new69 * IRQs from taking new references to the IRQ structure.70 *71 53 */ 72 54 … … 84 66 #include <print.h> 85 67 #include <macros.h> 68 #include <cap/cap.h> 86 69 87 70 static void ranges_unmap(irq_pio_range_t *ranges, size_t rangecount) … … 118 101 } 119 102 120 /* Rewrite the pseudocode addresses from physical to kernel virtual. */103 /* Rewrite the IRQ code addresses from physical to kernel virtual. */ 121 104 for (size_t i = 0; i < cmdcount; i++) { 122 105 uintptr_t addr; … … 176 159 } 177 160 178 /** Statically check the top-half pseudocode 179 * 180 * Check the top-half pseudocode for invalid or unsafe 181 * constructs. 161 /** Statically check the top-half IRQ code 162 * 163 * Check the top-half IRQ code for invalid or unsafe constructs. 182 164 * 183 165 */ … … 216 198 } 217 199 218 /** Free the top-half pseudocode.219 * 220 * @param code Pointer to the top-half pseudocode.200 /** Free the top-half IRQ code. 201 * 202 * @param code Pointer to the top-half IRQ code. 221 203 * 222 204 */ … … 231 213 } 232 214 233 /** Copy the top-half pseudocode from userspace into the kernel.234 * 235 * @param ucode Userspace address of the top-half pseudocode.236 * 237 * @return Kernel address of the copied pseudocode.215 /** Copy the top-half IRQ code from userspace into the kernel. 216 * 217 * @param ucode Userspace address of the top-half IRQ code. 218 * 219 * @return Kernel address of the copied IRQ code. 238 220 * 239 221 */ … … 289 271 } 290 272 273 static void irq_destroy(void *arg) 274 { 275 irq_t *irq = (irq_t *) arg; 276 277 /* Free up the IRQ code and associated structures. */ 278 code_free(irq->notif_cfg.code); 279 slab_free(irq_slab, irq); 280 } 281 282 static kobject_ops_t irq_kobject_ops = { 283 .destroy = irq_destroy 284 }; 285 291 286 /** Subscribe an answerbox as a receiving end for IRQ notifications. 292 287 * 293 288 * @param box Receiving answerbox. 294 289 * @param inr IRQ number. 295 * @param devno Device number. 296 * @param imethod Interface and method to be associated with the 297 * notification. 298 * @param ucode Uspace pointer to top-half pseudocode. 299 * 300 * @return EOK on success or a negative error code. 301 * 302 */ 303 int ipc_irq_subscribe(answerbox_t *box, inr_t inr, devno_t devno, 304 sysarg_t imethod, irq_code_t *ucode) 290 * @param imethod Interface and method to be associated with the notification. 291 * @param ucode Uspace pointer to top-half IRQ code. 292 * 293 * @return IRQ capability handle. 294 * @return Negative error code. 295 * 296 */ 297 int ipc_irq_subscribe(answerbox_t *box, inr_t inr, sysarg_t imethod, 298 irq_code_t *ucode) 305 299 { 306 300 sysarg_t key[] = { 307 (sysarg_t) inr,308 (sysarg_t) devno301 [IRQ_HT_KEY_INR] = (sysarg_t) inr, 302 [IRQ_HT_KEY_MODE] = (sysarg_t) IRQ_HT_MODE_NO_CLAIM 309 303 }; 310 304 … … 321 315 322 316 /* 323 * Allocate and populate the IRQ structure.317 * Allocate and populate the IRQ kernel object. 324 318 */ 325 irq_t *irq = malloc(sizeof(irq_t), 0); 319 cap_handle_t handle = cap_alloc(TASK); 320 if (handle < 0) 321 return handle; 322 323 irq_t *irq = (irq_t *) slab_alloc(irq_slab, FRAME_ATOMIC); 324 if (!irq) { 325 cap_free(TASK, handle); 326 return ENOMEM; 327 } 328 329 kobject_t *kobject = malloc(sizeof(kobject_t), FRAME_ATOMIC); 330 if (!kobject) { 331 cap_free(TASK, handle); 332 slab_free(irq_slab, irq); 333 return ENOMEM; 334 } 326 335 327 336 irq_initialize(irq); 328 irq->devno = devno;329 337 irq->inr = inr; 330 338 irq->claim = ipc_irq_top_half_claim; … … 337 345 338 346 /* 339 * Enlist the IRQ structure in the uspace IRQ hash table and the 340 * answerbox's list. 347 * Insert the IRQ structure into the uspace IRQ hash table. 341 348 */ 342 349 irq_spinlock_lock(&irq_uspace_hash_table_lock, true); 343 344 link_t *hlp = hash_table_find(&irq_uspace_hash_table, key);345 if (hlp) {346 irq_t *hirq = hash_table_get_instance(hlp, irq_t, link);347 348 /* hirq is locked */349 irq_spinlock_unlock(&hirq->lock, false);350 code_free(code);351 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);352 353 free(irq);354 return EEXIST;355 }356 357 /* Locking is not really necessary, but paranoid */358 350 irq_spinlock_lock(&irq->lock, false); 359 irq_spinlock_lock(&box->irq_lock, false);360 351 352 irq->notif_cfg.hashed_in = true; 361 353 hash_table_insert(&irq_uspace_hash_table, key, &irq->link); 362 list_append(&irq->notif_cfg.link, &box->irq_list); 363 364 irq_spinlock_unlock(&box->irq_lock, false); 354 365 355 irq_spinlock_unlock(&irq->lock, false); 366 356 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true); 357 358 kobject_initialize(kobject, KOBJECT_TYPE_IRQ, irq, &irq_kobject_ops); 359 cap_publish(TASK, handle, kobject); 360 361 return handle; 362 } 363 364 /** Unsubscribe task from IRQ notification. 365 * 366 * @param box Answerbox associated with the notification. 367 * @param handle IRQ capability handle. 368 * 369 * @return EOK on success or a negative error code. 370 * 371 */ 372 int ipc_irq_unsubscribe(answerbox_t *box, int handle) 373 { 374 kobject_t *kobj = cap_unpublish(TASK, handle, KOBJECT_TYPE_IRQ); 375 if (!kobj) 376 return ENOENT; 377 378 assert(kobj->irq->notif_cfg.answerbox == box); 379 380 irq_spinlock_lock(&irq_uspace_hash_table_lock, true); 381 irq_spinlock_lock(&kobj->irq->lock, false); 382 383 if (kobj->irq->notif_cfg.hashed_in) { 384 /* Remove the IRQ from the uspace IRQ hash table. */ 385 hash_table_remove_item(&irq_uspace_hash_table, 386 &kobj->irq->link); 387 kobj->irq->notif_cfg.hashed_in = false; 388 } 389 390 /* kobj->irq->lock unlocked by the hash table remove_callback */ 391 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true); 392 393 kobject_put(kobj); 394 cap_free(TASK, handle); 367 395 368 396 return EOK; 369 }370 371 /** Unsubscribe task from IRQ notification.372 *373 * @param box Answerbox associated with the notification.374 * @param inr IRQ number.375 * @param devno Device number.376 *377 * @return EOK on success or a negative error code.378 *379 */380 int ipc_irq_unsubscribe(answerbox_t *box, inr_t inr, devno_t devno)381 {382 sysarg_t key[] = {383 (sysarg_t) inr,384 (sysarg_t) devno385 };386 387 if ((inr < 0) || (inr > last_inr))388 return ELIMIT;389 390 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);391 link_t *lnk = hash_table_find(&irq_uspace_hash_table, key);392 if (!lnk) {393 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);394 return ENOENT;395 }396 397 irq_t *irq = hash_table_get_instance(lnk, irq_t, link);398 399 /* irq is locked */400 irq_spinlock_lock(&box->irq_lock, false);401 402 assert(irq->notif_cfg.answerbox == box);403 404 /* Remove the IRQ from the answerbox's list. */405 list_remove(&irq->notif_cfg.link);406 407 /*408 * We need to drop the IRQ lock now because hash_table_remove() will try409 * to reacquire it. That basically violates the natural locking order,410 * but a deadlock in hash_table_remove() is prevented by the fact that411 * we already held the IRQ lock and didn't drop the hash table lock in412 * the meantime.413 */414 irq_spinlock_unlock(&irq->lock, false);415 416 /* Remove the IRQ from the uspace IRQ hash table. */417 hash_table_remove(&irq_uspace_hash_table, key, 2);418 419 irq_spinlock_unlock(&box->irq_lock, false);420 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);421 422 /* Free up the pseudo code and associated structures. */423 code_free(irq->notif_cfg.code);424 425 /* Free up the IRQ structure. */426 free(irq);427 428 return EOK;429 }430 431 /** Disconnect all IRQ notifications from an answerbox.432 *433 * This function is effective because the answerbox contains434 * list of all irq_t structures that are subscribed to435 * send notifications to it.436 *437 * @param box Answerbox for which we want to carry out the cleanup.438 *439 */440 void ipc_irq_cleanup(answerbox_t *box)441 {442 loop:443 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);444 irq_spinlock_lock(&box->irq_lock, false);445 446 while (!list_empty(&box->irq_list)) {447 DEADLOCK_PROBE_INIT(p_irqlock);448 449 irq_t *irq = list_get_instance(list_first(&box->irq_list), irq_t,450 notif_cfg.link);451 452 if (!irq_spinlock_trylock(&irq->lock)) {453 /*454 * Avoid deadlock by trying again.455 */456 irq_spinlock_unlock(&box->irq_lock, false);457 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);458 DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD);459 goto loop;460 }461 462 sysarg_t key[2];463 key[0] = irq->inr;464 key[1] = irq->devno;465 466 assert(irq->notif_cfg.answerbox == box);467 468 /* Unlist from the answerbox. */469 list_remove(&irq->notif_cfg.link);470 471 /*472 * We need to drop the IRQ lock now because hash_table_remove()473 * will try to reacquire it. That basically violates the natural474 * locking order, but a deadlock in hash_table_remove() is475 * prevented by the fact that we already held the IRQ lock and476 * didn't drop the hash table lock in the meantime.477 */478 irq_spinlock_unlock(&irq->lock, false);479 480 /* Remove from the hash table. */481 hash_table_remove(&irq_uspace_hash_table, key, 2);482 483 /*484 * Release both locks so that we can free the pseudo code.485 */486 irq_spinlock_unlock(&box->irq_lock, false);487 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);488 489 code_free(irq->notif_cfg.code);490 free(irq);491 492 /* Reacquire both locks before taking another round. */493 irq_spinlock_lock(&irq_uspace_hash_table_lock, true);494 irq_spinlock_lock(&box->irq_lock, false);495 }496 497 irq_spinlock_unlock(&box->irq_lock, false);498 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true);499 397 } 500 398 … … 516 414 } 517 415 518 /** Apply the top-half pseudocode to find out whether to accept the IRQ or not.416 /** Apply the top-half IRQ code to find out whether to accept the IRQ or not. 519 417 * 520 418 * @param irq IRQ structure. 521 419 * 522 * @return IRQ_ACCEPT if the interrupt is accepted by the 523 * pseudocode, IRQ_DECLINE otherwise.420 * @return IRQ_ACCEPT if the interrupt is accepted by the IRQ code. 421 * @return IRQ_DECLINE if the interrupt is not accepted byt the IRQ code. 524 422 * 525 423 */ -
kernel/generic/src/ipc/kbox.c
rd076f16 r91b60499 206 206 * cleanup code. 207 207 * 208 * @return Phone idon success, or negative error code.208 * @return Phone capability handle on success, or negative error code. 209 209 * 210 210 */ … … 236 236 } 237 237 238 int newphid = phone_alloc(TASK); 239 if (newphid < 0) { 240 mutex_unlock(&task->kb.cleanup_lock); 241 return ELIMIT; 242 } 243 238 cap_handle_t phone_handle = phone_alloc(TASK); 239 if (phone_handle < 0) { 240 mutex_unlock(&task->kb.cleanup_lock); 241 return phone_handle; 242 } 243 244 kobject_t *phone_obj = kobject_get(TASK, phone_handle, 245 KOBJECT_TYPE_PHONE); 244 246 /* Connect the newly allocated phone to the kbox */ 245 (void) ipc_phone_connect(&TASK->phones[newphid], &task->kb.box); 247 /* Hand over phone_obj's reference to ipc_phone_connect() */ 248 (void) ipc_phone_connect(phone_obj->phone, &task->kb.box); 246 249 247 250 if (task->kb.thread != NULL) { 248 251 mutex_unlock(&task->kb.cleanup_lock); 249 return newphid;252 return phone_handle; 250 253 } 251 254 … … 263 266 mutex_unlock(&task->kb.cleanup_lock); 264 267 265 return newphid;268 return phone_handle; 266 269 } 267 270 -
kernel/generic/src/ipc/ops/conctmeto.c
rd076f16 r91b60499 1 1 /* 2 2 * Copyright (c) 2006 Ondrej Palkovsky 3 * Copyright (c) 2012 Jakub Jermar 3 * Copyright (c) 2012 Jakub Jermar 4 4 * All rights reserved. 5 5 * … … 42 42 static int request_preprocess(call_t *call, phone_t *phone) 43 43 { 44 int newphid= phone_alloc(TASK);44 cap_handle_t phone_handle = phone_alloc(TASK); 45 45 46 /* Remember the phone idor the error. */47 call->priv = newphid;48 if ( newphid< 0)49 return ELIMIT;50 46 /* Remember the phone capability or the error. */ 47 call->priv = phone_handle; 48 if (phone_handle < 0) 49 return phone_handle; 50 51 51 /* Set arg5 for server */ 52 IPC_SET_ARG5(call->data, (sysarg_t) &TASK->phones[newphid]); 52 kobject_t *phone_obj = kobject_get(TASK, phone_handle, 53 KOBJECT_TYPE_PHONE); 54 /* Hand over phone_obj's reference to ARG5 */ 55 IPC_SET_ARG5(call->data, (sysarg_t) phone_obj->phone); 53 56 54 57 return EOK; … … 57 60 static int request_forget(call_t *call) 58 61 { 59 phone_dealloc(call->priv); 62 cap_handle_t phone_handle = (cap_handle_t) call->priv; 63 phone_dealloc(phone_handle); 64 /* Hand over reference from ARG5 to phone->kobject */ 65 phone_t *phone = (phone_t *) IPC_GET_ARG5(call->data); 66 /* Drop phone_obj's reference */ 67 kobject_put(phone->kobject); 60 68 return EOK; 61 69 } … … 63 71 static int answer_preprocess(call_t *answer, ipc_data_t *olddata) 64 72 { 73 /* Hand over reference from ARG5 to phone */ 65 74 phone_t *phone = (phone_t *) IPC_GET_ARG5(*olddata); 66 75 67 76 /* If the user accepted call, connect */ 68 if (IPC_GET_RETVAL(answer->data) == EOK) 77 if (IPC_GET_RETVAL(answer->data) == EOK) { 78 /* Hand over reference from phone to the answerbox */ 69 79 (void) ipc_phone_connect(phone, &TASK->answerbox); 80 } else { 81 kobject_put(phone->kobject); 82 } 70 83 71 84 return EOK; … … 74 87 static int answer_process(call_t *answer) 75 88 { 76 int newphid = (int) answer->priv;89 cap_handle_t phone_handle = (cap_handle_t) answer->priv; 77 90 78 91 if (IPC_GET_RETVAL(answer->data)) { 79 if ( newphid>= 0) {92 if (phone_handle >= 0) { 80 93 /* 81 94 * The phone was indeed allocated and now needs 82 95 * to be deallocated. 83 96 */ 84 phone_dealloc( newphid);97 phone_dealloc(phone_handle); 85 98 } 86 99 } else { 87 IPC_SET_ARG5(answer->data, newphid);100 IPC_SET_ARG5(answer->data, phone_handle); 88 101 } 89 102 -
kernel/generic/src/ipc/ops/concttome.c
rd076f16 r91b60499 1 1 /* 2 2 * Copyright (c) 2006 Ondrej Palkovsky 3 * Copyright (c) 2012 Jakub Jermar 3 * Copyright (c) 2012 Jakub Jermar 4 4 * All rights reserved. 5 5 * … … 42 42 static int request_process(call_t *call, answerbox_t *box) 43 43 { 44 int phoneid= phone_alloc(TASK);44 cap_handle_t phone_handle = phone_alloc(TASK); 45 45 46 IPC_SET_ARG5(call->data, phone id);46 IPC_SET_ARG5(call->data, phone_handle); 47 47 48 48 return EOK; … … 51 51 static int answer_cleanup(call_t *answer, ipc_data_t *olddata) 52 52 { 53 int phoneid = (int) IPC_GET_ARG5(*olddata);53 cap_handle_t phone_handle = (cap_handle_t) IPC_GET_ARG5(*olddata); 54 54 55 if (phone id>= 0)56 phone_dealloc(phone id);55 if (phone_handle >= 0) 56 phone_dealloc(phone_handle); 57 57 58 58 return EOK; … … 61 61 static int answer_preprocess(call_t *answer, ipc_data_t *olddata) 62 62 { 63 int phoneid = (int) IPC_GET_ARG5(*olddata);63 cap_handle_t phone_handle = (cap_handle_t) IPC_GET_ARG5(*olddata); 64 64 65 65 if (IPC_GET_RETVAL(answer->data) != EOK) { 66 66 /* The connection was not accepted */ 67 67 answer_cleanup(answer, olddata); 68 } else if (phone id>= 0) {68 } else if (phone_handle >= 0) { 69 69 /* The connection was accepted */ 70 if (phone_connect(phoneid, &answer->sender->answerbox)) { 71 /* Set 'phone hash' as arg5 of response */ 70 if (phone_connect(phone_handle, &answer->sender->answerbox)) { 71 /* Set 'phone hash' as ARG5 of response */ 72 kobject_t *phone_obj = kobject_get(TASK, phone_handle, 73 KOBJECT_TYPE_PHONE); 72 74 IPC_SET_ARG5(answer->data, 73 (sysarg_t) &TASK->phones[phoneid]); 75 (sysarg_t) phone_obj->phone); 76 kobject_put(phone_obj); 74 77 } else { 75 78 /* The answerbox is shutting down. */ -
kernel/generic/src/ipc/ops/stchngath.c
rd076f16 r91b60499 43 43 static int request_preprocess(call_t *call, phone_t *phone) 44 44 { 45 phone_t *sender_phone;46 45 task_t *other_task_s; 47 46 48 if (phone_get(IPC_GET_ARG5(call->data), &sender_phone) != EOK) 47 kobject_t *sender_obj = kobject_get(TASK, IPC_GET_ARG5(call->data), 48 KOBJECT_TYPE_PHONE); 49 if (!sender_obj) 49 50 return ENOENT; 50 51 51 mutex_lock(&sender_phone->lock); 52 if (sender_phone->state != IPC_PHONE_CONNECTED) { 53 mutex_unlock(&sender_phone->lock); 52 mutex_lock(&sender_obj->phone->lock); 53 if (sender_obj->phone->state != IPC_PHONE_CONNECTED) { 54 mutex_unlock(&sender_obj->phone->lock); 55 kobject_put(sender_obj); 54 56 return EINVAL; 55 57 } 56 58 57 other_task_s = sender_ phone->callee->task;59 other_task_s = sender_obj->phone->callee->task; 58 60 59 mutex_unlock(&sender_ phone->lock);61 mutex_unlock(&sender_obj->phone->lock); 60 62 61 63 /* Remember the third party task hash. */ 62 64 IPC_SET_ARG5(call->data, (sysarg_t) other_task_s); 63 65 66 kobject_put(sender_obj); 64 67 return EOK; 65 68 } … … 71 74 if (!IPC_GET_RETVAL(answer->data)) { 72 75 /* The recipient authorized the change of state. */ 73 phone_t *recipient_phone;74 76 task_t *other_task_s; 75 77 task_t *other_task_r; 76 78 77 rc = phone_get(IPC_GET_ARG1(answer->data),78 &recipient_phone);79 if ( rc != EOK) {79 kobject_t *recipient_obj = kobject_get(TASK, 80 IPC_GET_ARG1(answer->data), KOBJECT_TYPE_PHONE); 81 if (!recipient_obj) { 80 82 IPC_SET_RETVAL(answer->data, ENOENT); 81 83 return ENOENT; 82 84 } 83 85 84 mutex_lock(&recipient_ phone->lock);85 if (recipient_ phone->state != IPC_PHONE_CONNECTED) {86 mutex_unlock(&recipient_ phone->lock);86 mutex_lock(&recipient_obj->phone->lock); 87 if (recipient_obj->phone->state != IPC_PHONE_CONNECTED) { 88 mutex_unlock(&recipient_obj->phone->lock); 87 89 IPC_SET_RETVAL(answer->data, EINVAL); 90 kobject_put(recipient_obj); 88 91 return EINVAL; 89 92 } 90 93 91 other_task_r = recipient_ phone->callee->task;94 other_task_r = recipient_obj->phone->callee->task; 92 95 other_task_s = (task_t *) IPC_GET_ARG5(*olddata); 93 96 … … 110 113 } 111 114 112 mutex_unlock(&recipient_phone->lock); 115 mutex_unlock(&recipient_obj->phone->lock); 116 kobject_put(recipient_obj); 113 117 } 114 118 -
kernel/generic/src/ipc/sysipc.c
rd076f16 r91b60499 85 85 { 86 86 switch (imethod) { 87 case IPC_M_CONNECTION_CLONE:88 case IPC_M_CLONE_ESTABLISH:89 87 case IPC_M_PHONE_HUNGUP: 90 88 /* This message is meant only for the original recipient. */ … … 135 133 { 136 134 switch (IPC_GET_IMETHOD(call->data)) { 137 case IPC_M_CONNECTION_CLONE:138 case IPC_M_CLONE_ESTABLISH:139 135 case IPC_M_CONNECT_TO_ME: 140 136 case IPC_M_CONNECT_ME_TO: … … 264 260 /** Make a call over IPC and wait for reply. 265 261 * 266 * @param phoneid Phonehandle for the call.267 * @param data[inout] Structure with request/reply data.268 * @param priv Value to be stored in call->priv.262 * @param handle Phone capability handle for the call. 263 * @param data[inout] Structure with request/reply data. 264 * @param priv Value to be stored in call->priv. 269 265 * 270 266 * @return EOK on success. … … 272 268 * 273 269 */ 274 int ipc_req_internal( int phoneid, ipc_data_t *data, sysarg_t priv)275 { 276 phone_t *phone;277 if ( phone_get(phoneid, &phone) != EOK)270 int ipc_req_internal(cap_handle_t handle, ipc_data_t *data, sysarg_t priv) 271 { 272 kobject_t *kobj = kobject_get(TASK, handle, KOBJECT_TYPE_PHONE); 273 if (!kobj->phone) 278 274 return ENOENT; 279 275 … … 282 278 memcpy(call->data.args, data->args, sizeof(data->args)); 283 279 284 int rc = request_preprocess(call, phone);280 int rc = request_preprocess(call, kobj->phone); 285 281 if (!rc) { 286 282 #ifdef CONFIG_UDEBUG … … 289 285 290 286 ipc_call_hold(call); 291 rc = ipc_call_sync( phone, call);287 rc = ipc_call_sync(kobj->phone, call); 292 288 spinlock_lock(&call->forget_lock); 293 289 bool forgotten = call->forget; … … 316 312 assert(rc == EINTR); 317 313 } 318 return rc; 314 kobject_put(kobj); 315 return rc; 319 316 } 320 317 … … 325 322 memcpy(data->args, call->data.args, sizeof(data->args)); 326 323 ipc_call_free(call); 324 kobject_put(kobj); 327 325 328 326 return EOK; … … 350 348 * the generic function sys_ipc_call_async_slow(). 351 349 * 352 * @param phoneid Phonehandle for the call.353 * @param imethod Interface and method of the call.354 * @param arg1 Service-defined payload argument.355 * @param arg2 Service-defined payload argument.356 * @param arg3 Service-defined payload argument.357 * @param arg4 Service-defined payload argument.350 * @param handle Phone capability handle for the call. 351 * @param imethod Interface and method of the call. 352 * @param arg1 Service-defined payload argument. 353 * @param arg2 Service-defined payload argument. 354 * @param arg3 Service-defined payload argument. 355 * @param arg4 Service-defined payload argument. 358 356 * 359 357 * @return Call hash on success. … … 363 361 * 364 362 */ 365 sysarg_t sys_ipc_call_async_fast(sysarg_t phoneid, sysarg_t imethod,363 sysarg_t sys_ipc_call_async_fast(sysarg_t handle, sysarg_t imethod, 366 364 sysarg_t arg1, sysarg_t arg2, sysarg_t arg3, sysarg_t arg4) 367 365 { 368 phone_t *phone;369 if ( phone_get(phoneid, &phone) != EOK)366 kobject_t *kobj = kobject_get(TASK, handle, KOBJECT_TYPE_PHONE); 367 if (!kobj) 370 368 return IPC_CALLRET_FATAL; 371 369 372 if (check_call_limit(phone)) 370 if (check_call_limit(kobj->phone)) { 371 kobject_put(kobj); 373 372 return IPC_CALLRET_TEMPORARY; 373 } 374 374 375 375 call_t *call = ipc_call_alloc(0); … … 386 386 IPC_SET_ARG5(call->data, 0); 387 387 388 int res = request_preprocess(call, phone);388 int res = request_preprocess(call, kobj->phone); 389 389 390 390 if (!res) 391 ipc_call( phone, call);391 ipc_call(kobj->phone, call); 392 392 else 393 ipc_backsend_err(phone, call, res); 394 393 ipc_backsend_err(kobj->phone, call, res); 394 395 kobject_put(kobj); 395 396 return (sysarg_t) call; 396 397 } … … 398 399 /** Make an asynchronous IPC call allowing to transmit the entire payload. 399 400 * 400 * @param phoneid Phone handlefor the call.401 * @param handle Phone capability for the call. 401 402 * @param data Userspace address of call data with the request. 402 403 * … … 404 405 * 405 406 */ 406 sysarg_t sys_ipc_call_async_slow(sysarg_t phoneid, ipc_data_t *data)407 { 408 phone_t *phone;409 if ( phone_get(phoneid, &phone) != EOK)407 sysarg_t sys_ipc_call_async_slow(sysarg_t handle, ipc_data_t *data) 408 { 409 kobject_t *kobj = kobject_get(TASK, handle, KOBJECT_TYPE_PHONE); 410 if (!kobj) 410 411 return IPC_CALLRET_FATAL; 411 412 412 if (check_call_limit(phone)) 413 if (check_call_limit(kobj->phone)) { 414 kobject_put(kobj); 413 415 return IPC_CALLRET_TEMPORARY; 416 } 414 417 415 418 call_t *call = ipc_call_alloc(0); … … 418 421 if (rc != 0) { 419 422 ipc_call_free(call); 423 kobject_put(kobj); 420 424 return (sysarg_t) rc; 421 425 } 422 426 423 int res = request_preprocess(call, phone);427 int res = request_preprocess(call, kobj->phone); 424 428 425 429 if (!res) 426 ipc_call( phone, call);430 ipc_call(kobj->phone, call); 427 431 else 428 ipc_backsend_err(phone, call, res); 429 432 ipc_backsend_err(kobj->phone, call, res); 433 434 kobject_put(kobj); 430 435 return (sysarg_t) call; 431 436 } … … 435 440 * Common code for both the fast and the slow version. 436 441 * 437 * @param callid Hash of the call to forward.438 * @param phoneid Phone handleto use for forwarding.439 * @param imethod New interface and method to use for the forwarded call.440 * @param arg1 New value of the first argument for the forwarded call.441 * @param arg2 New value of the second argument for the forwarded call.442 * @param arg3 New value of the third argument for the forwarded call.443 * @param arg4 New value of the fourth argument for the forwarded call.444 * @param arg5 New value of the fifth argument for the forwarded call.445 * @param mode Flags that specify mode of the forward operation.446 * @param slow If true, arg3, arg4 and arg5 are considered. Otherwise447 * the function considers only the fast version arguments:448 * i.e. arg1 and arg2.442 * @param callid Hash of the call to forward. 443 * @param handle Phone capability to use for forwarding. 444 * @param imethod New interface and method to use for the forwarded call. 445 * @param arg1 New value of the first argument for the forwarded call. 446 * @param arg2 New value of the second argument for the forwarded call. 447 * @param arg3 New value of the third argument for the forwarded call. 448 * @param arg4 New value of the fourth argument for the forwarded call. 449 * @param arg5 New value of the fifth argument for the forwarded call. 450 * @param mode Flags that specify mode of the forward operation. 451 * @param slow If true, arg3, arg4 and arg5 are considered. Otherwise 452 * the function considers only the fast version arguments: 453 * i.e. arg1 and arg2. 449 454 * 450 455 * @return 0 on succes, otherwise an error code. … … 453 458 * 454 459 */ 455 static sysarg_t sys_ipc_forward_common(sysarg_t callid, sysarg_t phoneid,460 static sysarg_t sys_ipc_forward_common(sysarg_t callid, sysarg_t handle, 456 461 sysarg_t imethod, sysarg_t arg1, sysarg_t arg2, sysarg_t arg3, 457 462 sysarg_t arg4, sysarg_t arg5, unsigned int mode, bool slow) … … 468 473 bool after_forward = false; 469 474 int rc; 470 phone_t *phone; 471 472 if ( phone_get(phoneid, &phone) != EOK) {475 476 kobject_t *kobj = kobject_get(TASK, handle, KOBJECT_TYPE_PHONE); 477 if (!kobj) { 473 478 rc = ENOENT; 474 479 goto error; … … 516 521 } 517 522 518 rc = ipc_forward(call, phone, &TASK->answerbox, mode);523 rc = ipc_forward(call, kobj->phone, &TASK->answerbox, mode); 519 524 if (rc != EOK) { 520 525 after_forward = true; … … 522 527 } 523 528 529 kobject_put(kobj); 524 530 return EOK; 525 531 … … 532 538 ipc_answer(&TASK->answerbox, call); 533 539 540 if (kobj) 541 kobject_put(kobj); 534 542 return rc; 535 543 } … … 544 552 * arguments are not set and these values are ignored. 545 553 * 546 * @param callid Hash of the call to forward.547 * @param phoneidPhone handle to use for forwarding.548 * @param imethod New interface and method to use for the forwarded call.549 * @param arg1 New value of the first argument for the forwarded call.550 * @param arg2 New value of the second argument for the forwarded call.551 * @param mode Flags that specify mode of the forward operation.554 * @param callid Hash of the call to forward. 555 * @param handle Phone handle to use for forwarding. 556 * @param imethod New interface and method to use for the forwarded call. 557 * @param arg1 New value of the first argument for the forwarded call. 558 * @param arg2 New value of the second argument for the forwarded call. 559 * @param mode Flags that specify mode of the forward operation. 552 560 * 553 561 * @return 0 on succes, otherwise an error code. 554 562 * 555 563 */ 556 sysarg_t sys_ipc_forward_fast(sysarg_t callid, sysarg_t phoneid,564 sysarg_t sys_ipc_forward_fast(sysarg_t callid, sysarg_t handle, 557 565 sysarg_t imethod, sysarg_t arg1, sysarg_t arg2, unsigned int mode) 558 566 { 559 return sys_ipc_forward_common(callid, phoneid, imethod, arg1, arg2, 0, 0,567 return sys_ipc_forward_common(callid, handle, imethod, arg1, arg2, 0, 0, 560 568 0, mode, false); 561 569 } … … 571 579 * 572 580 * @param callid Hash of the call to forward. 573 * @param phoneidPhone handle to use for forwarding.581 * @param handle Phone handle to use for forwarding. 574 582 * @param data Userspace address of the new IPC data. 575 583 * @param mode Flags that specify mode of the forward operation. … … 578 586 * 579 587 */ 580 sysarg_t sys_ipc_forward_slow(sysarg_t callid, sysarg_t phoneid,588 sysarg_t sys_ipc_forward_slow(sysarg_t callid, sysarg_t handle, 581 589 ipc_data_t *data, unsigned int mode) 582 590 { … … 587 595 return (sysarg_t) rc; 588 596 589 return sys_ipc_forward_common(callid, phoneid,597 return sys_ipc_forward_common(callid, handle, 590 598 IPC_GET_IMETHOD(newdata), IPC_GET_ARG1(newdata), 591 599 IPC_GET_ARG2(newdata), IPC_GET_ARG3(newdata), … … 685 693 /** Hang up a phone. 686 694 * 687 * @param Phonehandle of the phone to be hung up.695 * @param handle Phone capability handle of the phone to be hung up. 688 696 * 689 697 * @return 0 on success or an error code. 690 698 * 691 699 */ 692 sysarg_t sys_ipc_hangup(sysarg_t phoneid) 693 { 694 phone_t *phone; 695 696 if (phone_get(phoneid, &phone) != EOK) 700 sysarg_t sys_ipc_hangup(sysarg_t handle) 701 { 702 kobject_t *kobj = kobject_get(TASK, handle, KOBJECT_TYPE_PHONE); 703 if (!kobj) 697 704 return ENOENT; 698 705 699 if (ipc_phone_hangup(phone)) 706 if (ipc_phone_hangup(kobj->phone)) { 707 kobject_put(kobj); 700 708 return -1; 701 709 } 710 711 kobject_put(kobj); 702 712 return 0; 703 713 } … … 802 812 * 803 813 * @param inr IRQ number. 804 * @param devno Device number.805 814 * @param imethod Interface and method to be associated with the notification. 806 815 * @param ucode Uspace pointer to the top-half pseudocode. 807 816 * 808 * @return EPERM or a return code returned by ipc_irq_subscribe(). 809 * 810 */ 811 sysarg_t sys_ipc_irq_subscribe(inr_t inr, devno_t devno, sysarg_t imethod, 812 irq_code_t *ucode) 817 * @return IRQ kernel object capability 818 * @return EPERM 819 * @return Error code returned by ipc_irq_subscribe(). 820 * 821 */ 822 sysarg_t sys_ipc_irq_subscribe(inr_t inr, sysarg_t imethod, irq_code_t *ucode) 813 823 { 814 824 if (!(perm_get(TASK) & PERM_IRQ_REG)) 815 825 return EPERM; 816 826 817 return ipc_irq_subscribe(&TASK->answerbox, inr, devno,imethod, ucode);827 return ipc_irq_subscribe(&TASK->answerbox, inr, imethod, ucode); 818 828 } 819 829 … … 826 836 * 827 837 */ 828 sysarg_t sys_ipc_irq_unsubscribe( inr_t inr, devno_t devno)838 sysarg_t sys_ipc_irq_unsubscribe(sysarg_t cap) 829 839 { 830 840 if (!(perm_get(TASK) & PERM_IRQ_REG)) 831 841 return EPERM; 832 842 833 ipc_irq_unsubscribe(&TASK->answerbox, inr, devno);843 ipc_irq_unsubscribe(&TASK->answerbox, cap); 834 844 835 845 return 0; -
kernel/generic/src/ipc/sysipc_ops.c
rd076f16 r91b60499 38 38 39 39 /* Forward declarations. */ 40 sysipc_ops_t ipc_m_connection_clone_ops;41 sysipc_ops_t ipc_m_clone_establish_ops;42 40 sysipc_ops_t ipc_m_connect_to_me_ops; 43 41 sysipc_ops_t ipc_m_connect_me_to_ops; … … 51 49 52 50 static sysipc_ops_t *sysipc_ops[] = { 53 [IPC_M_CONNECTION_CLONE] = &ipc_m_connection_clone_ops,54 [IPC_M_CLONE_ESTABLISH] = &ipc_m_clone_establish_ops,55 51 [IPC_M_CONNECT_TO_ME] = &ipc_m_connect_to_me_ops, 56 52 [IPC_M_CONNECT_ME_TO] = &ipc_m_connect_me_to_ops, -
kernel/generic/src/proc/task.c
rd076f16 r91b60499 50 50 #include <adt/btree.h> 51 51 #include <adt/list.h> 52 #include <cap/cap.h> 52 53 #include <ipc/ipc.h> 53 54 #include <ipc/ipcrsc.h> … … 83 84 static void task_kill_internal(task_t *); 84 85 static int tsk_constructor(void *, unsigned int); 86 static size_t tsk_destructor(void *obj); 85 87 86 88 /** Initialize kernel tasks support. … … 92 94 avltree_create(&tasks_tree); 93 95 task_slab = slab_cache_create("task_t", sizeof(task_t), 0, 94 tsk_constructor, NULL, 0);96 tsk_constructor, tsk_destructor, 0); 95 97 } 96 98 … … 167 169 list_initialize(&task->threads); 168 170 171 caps_task_alloc(task); 172 169 173 ipc_answerbox_init(&task->answerbox, task); 170 174 171 size_t i;172 for (i = 0; i < IPC_MAX_PHONES; i++)173 ipc_phone_init(&task->phones[i], task);174 175 175 spinlock_initialize(&task->active_calls_lock, "active_calls_lock"); 176 176 list_initialize(&task->active_calls); … … 186 186 } 187 187 188 size_t tsk_destructor(void *obj) 189 { 190 task_t *task = (task_t *) obj; 191 192 caps_task_free(task); 193 return 0; 194 } 195 188 196 /** Create new task with no threads. 189 197 * … … 206 214 task->ucycles = 0; 207 215 task->kcycles = 0; 216 217 caps_task_init(task); 208 218 209 219 task->ipc_info.call_sent = 0; … … 228 238 229 239 if ((ipc_phone_0) && 230 (container_check(ipc_phone_0->task->container, task->container))) 231 (void) ipc_phone_connect(&task->phones[0], ipc_phone_0); 240 (container_check(ipc_phone_0->task->container, task->container))) { 241 cap_handle_t phone_handle = phone_alloc(task); 242 kobject_t *phone_obj = kobject_get(task, phone_handle, 243 KOBJECT_TYPE_PHONE); 244 (void) ipc_phone_connect(phone_obj->phone, ipc_phone_0); 245 } 232 246 233 247 futex_task_init(task); … … 603 617 if (*additional) 604 618 printf("%-8" PRIu64 " %9" PRIu64 "%c %9" PRIu64 "%c " 605 "%9" PRIua , task->taskid, ucycles, usuffix, kcycles,619 "%9" PRIua "\n", task->taskid, ucycles, usuffix, kcycles, 606 620 ksuffix, atomic_get(&task->refcount)); 607 621 else … … 609 623 task->taskid, task->name, task->container, task, task->as); 610 624 #endif 611 612 if (*additional) {613 size_t i;614 for (i = 0; i < IPC_MAX_PHONES; i++) {615 if (task->phones[i].callee)616 printf(" %zu:%p", i, task->phones[i].callee);617 }618 printf("\n");619 }620 625 621 626 irq_spinlock_unlock(&task->lock, false); -
kernel/generic/src/proc/thread.c
rd076f16 r91b60499 228 228 #endif 229 229 230 return 1; /* One pagefreed */230 return STACK_FRAMES; /* number of frames freed */ 231 231 } 232 232 -
kernel/generic/src/syscall/syscall.c
rd076f16 r91b60499 45 45 #include <arch.h> 46 46 #include <debug.h> 47 #include <ddi/device.h>48 47 #include <interrupt.h> 49 48 #include <ipc/sysipc.h> … … 175 174 176 175 /* DDI related syscalls. */ 177 [SYS_DEVICE_ASSIGN_DEVNO] = (syshandler_t) sys_device_assign_devno,178 176 [SYS_PHYSMEM_MAP] = (syshandler_t) sys_physmem_map, 179 177 [SYS_PHYSMEM_UNMAP] = (syshandler_t) sys_physmem_unmap, -
kernel/generic/src/sysinfo/sysinfo.c
rd076f16 r91b60499 700 700 701 701 sysinfo_return_t ret; 702 ret.tag = SYSINFO_VAL_UNDEFINED; 702 703 703 704 if (subtree != NULL) { … … 730 731 ret.data.size = size; 731 732 } 732 } else {733 /* No item in the fixed sysinfo tree */734 ret.tag = SYSINFO_VAL_UNDEFINED;735 733 } 736 734
Note:
See TracChangeset
for help on using the changeset viewer.