- Timestamp:
- 2011-02-04T20:56:52Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 0397e5a4, e29e09cf
- Parents:
- e778543 (diff), 0b37882 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel
- Files:
-
- 27 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/arm32/src/mach/gta02/gta02.c
re778543 r17aca1c 174 174 fb_parea.pbase = GTA02_FB_BASE; 175 175 fb_parea.frames = 150; 176 fb_parea.unpriv = false; 176 177 ddi_parea_register(&fb_parea); 177 178 } -
kernel/arch/arm32/src/mach/integratorcp/integratorcp.c
re778543 r17aca1c 300 300 fb_parea.pbase = ICP_FB; 301 301 fb_parea.frames = 300; 302 fb_parea.unpriv = false; 302 303 ddi_parea_register(&fb_parea); 303 304 } -
kernel/arch/ia32/_link.ld.in
re778543 r17aca1c 49 49 } 50 50 51 #ifdef CONFIG_LINE_DEBUG 52 .comment 0 : { *(.comment); } 53 .debug_abbrev 0 : { *(.debug_abbrev); } 54 .debug_aranges 0 : { *(.debug_aranges); } 55 .debug_info 0 : { *(.debug_info); } 56 .debug_line 0 : { *(.debug_line); } 57 .debug_loc 0 : { *(.debug_loc); } 58 .debug_pubnames 0 : { *(.debug_pubnames); } 59 .debug_pubtypes 0 : { *(.debug_pubtypes); } 60 .debug_ranges 0 : { *(.debug_ranges); } 61 .debug_str 0 : { *(.debug_str); } 62 #endif 63 51 64 /DISCARD/ : { 52 *(.note.GNU-stack); 53 *(.comment); 65 *(*); 54 66 } 55 67 -
kernel/arch/mips32/src/mm/tlb.c
re778543 r17aca1c 557 557 entry_hi_t hi, hi_save; 558 558 tlb_index_t index; 559 560 ASSERT(asid != ASID_INVALID); 559 560 if (asid == ASID_INVALID) 561 return; 561 562 562 563 hi_save.value = cp0_entry_hi_read(); -
kernel/arch/sparc64/src/drivers/niagara.c
re778543 r17aca1c 216 216 outbuf_parea.pbase = (uintptr_t) (KA2PA(&output_buffer)); 217 217 outbuf_parea.frames = 1; 218 outbuf_parea.unpriv = false; 218 219 ddi_parea_register(&outbuf_parea); 219 220 … … 221 222 inbuf_parea.pbase = (uintptr_t) (KA2PA(&input_buffer)); 222 223 inbuf_parea.frames = 1; 224 inbuf_parea.unpriv = false; 223 225 ddi_parea_register(&inbuf_parea); 224 226 -
kernel/generic/include/ddi/ddi.h
re778543 r17aca1c 43 43 /** Structure representing contiguous physical memory area. */ 44 44 typedef struct { 45 uintptr_t pbase; /**< Physical base of the area. */ 46 pfn_t frames; /**< Number of frames in the area. */ 45 link_t link; /**< Linked list link */ 47 46 48 link_t link; /**< Linked list link */ 47 uintptr_t pbase; /**< Physical base of the area. */ 48 pfn_t frames; /**< Number of frames in the area. */ 49 bool unpriv; /**< Allow mapping by unprivileged tasks. */ 49 50 } parea_t; 50 51 … … 60 61 extern int ddi_iospace_enable_arch(task_t *, uintptr_t, size_t); 61 62 62 63 63 #endif 64 64 -
kernel/generic/include/ipc/event_types.h
re778543 r17aca1c 41 41 /** Returning from kernel console to userspace */ 42 42 EVENT_KCONSOLE, 43 /** A t hread has faulted and will be terminated */43 /** A task/thread has faulted and will be terminated */ 44 44 EVENT_FAULT, 45 45 EVENT_END -
kernel/generic/include/ipc/sysipc.h
re778543 r17aca1c 56 56 unsigned int); 57 57 extern sysarg_t sys_ipc_hangup(sysarg_t); 58 extern sysarg_t sys_ ipc_register_irq(inr_t, devno_t, sysarg_t, irq_code_t *);59 extern sysarg_t sys_ ipc_unregister_irq(inr_t, devno_t);58 extern sysarg_t sys_register_irq(inr_t, devno_t, sysarg_t, irq_code_t *); 59 extern sysarg_t sys_unregister_irq(inr_t, devno_t); 60 60 61 61 #ifdef __32_BITS__ -
kernel/generic/include/mm/as.h
re778543 r17aca1c 115 115 116 116 /** 117 * Number of processors on wich is this address space active. 118 * Protected by asidlock. 117 * Number of processors on which this 118 * address space is active. Protected by 119 * asidlock. 119 120 */ 120 121 size_t cpu_refcount; 121 122 122 /** 123 * Address space identifier. 124 * Constant on architectures that do not support ASIDs. 125 * Protected by asidlock. 123 /** Address space identifier. 124 * 125 * Constant on architectures that do not 126 * support ASIDs. Protected by asidlock. 127 * 126 128 */ 127 129 asid_t asid; 128 130 129 /** Number of references (i.e tasks that reference this as). */131 /** Number of references (i.e. tasks that reference this as). */ 130 132 atomic_t refcount; 131 133 … … 199 201 typedef struct { 200 202 mutex_t lock; 203 201 204 /** Containing address space. */ 202 205 as_t *as; 203 206 204 /** 205 * Flags related to the memory represented by the address space area. 206 */ 207 /** Memory flags. */ 207 208 unsigned int flags; 208 209 209 /** A ttributes related to the address space area itself. */210 /** Address space area attributes. */ 210 211 unsigned int attributes; 211 /** Size of this area in multiples of PAGE_SIZE. */ 212 213 /** Number of pages in the area. */ 212 214 size_t pages; 215 216 /** Number of resident pages in the area. */ 217 size_t resident; 218 213 219 /** Base address of this area. */ 214 220 uintptr_t base; 221 215 222 /** Map of used space. */ 216 223 btree_t used_space; 217 224 218 225 /** 219 * If the address space area has been shared, this pointer will220 * referencethe share info structure.226 * If the address space area is shared. this is 227 * a reference to the share info structure. 221 228 */ 222 229 share_info_t *sh_info; … … 261 268 extern bool as_area_check_access(as_area_t *, pf_access_t); 262 269 extern size_t as_area_get_size(uintptr_t); 263 extern int used_space_insert(as_area_t *, uintptr_t, size_t); 264 extern int used_space_remove(as_area_t *, uintptr_t, size_t); 265 270 extern bool used_space_insert(as_area_t *, uintptr_t, size_t); 271 extern bool used_space_remove(as_area_t *, uintptr_t, size_t); 266 272 267 273 /* Interface to be implemented by architectures. */ … … 307 313 extern sysarg_t sys_as_area_change_flags(uintptr_t, unsigned int); 308 314 extern sysarg_t sys_as_area_destroy(uintptr_t); 315 extern sysarg_t sys_as_get_unmapped_area(uintptr_t, size_t); 309 316 310 317 /* Introspection functions. */ -
kernel/generic/include/proc/task.h
re778543 r17aca1c 131 131 extern task_t *task_find_by_id(task_id_t); 132 132 extern int task_kill(task_id_t); 133 extern void task_kill_self(bool) __attribute__((noreturn)); 133 134 extern void task_get_accounting(task_t *, uint64_t *, uint64_t *); 134 135 extern void task_print_list(bool); … … 155 156 extern sysarg_t sys_task_set_name(const char *, size_t); 156 157 extern sysarg_t sys_task_kill(task_id_t *); 158 extern sysarg_t sys_task_exit(sysarg_t); 157 159 158 160 #endif -
kernel/generic/include/syscall/syscall.h
re778543 r17aca1c 48 48 SYS_TASK_SET_NAME, 49 49 SYS_TASK_KILL, 50 SYS_TASK_EXIT, 50 51 SYS_PROGRAM_SPAWN_LOADER, 51 52 … … 58 59 SYS_AS_AREA_CHANGE_FLAGS, 59 60 SYS_AS_AREA_DESTROY, 61 SYS_AS_GET_UNMAPPED_AREA, 60 62 61 63 SYS_PAGE_FIND_MAPPING, … … 72 74 SYS_IPC_POKE, 73 75 SYS_IPC_HANGUP, 74 SYS_IPC_REGISTER_IRQ,75 SYS_IPC_UNREGISTER_IRQ,76 76 SYS_IPC_CONNECT_KBOX, 77 77 … … 84 84 SYS_PHYSMEM_MAP, 85 85 SYS_IOSPACE_ENABLE, 86 SYS_REGISTER_IRQ, 87 SYS_UNREGISTER_IRQ, 86 88 87 89 SYS_SYSINFO_GET_TAG, -
kernel/generic/include/udebug/udebug.h
re778543 r17aca1c 36 36 #define KERN_UDEBUG_H_ 37 37 38 #define UDEBUG_EVMASK(event) (1 << ((event) - 1)) 39 40 typedef enum { /* udebug_method_t */ 41 42 /** Start debugging the recipient. 43 * 44 * Causes all threads in the receiving task to stop. When they 45 * are all stoped, an answer with retval 0 is generated. 46 * 47 */ 48 UDEBUG_M_BEGIN = 1, 49 50 /** Finish debugging the recipient. 51 * 52 * Answers all pending GO and GUARD messages. 53 * 54 */ 55 UDEBUG_M_END, 56 57 /** Set which events should be captured. */ 58 UDEBUG_M_SET_EVMASK, 59 60 /** Make sure the debugged task is still there. 61 * 62 * This message is answered when the debugged task dies 63 * or the debugging session ends. 64 * 65 */ 66 UDEBUG_M_GUARD, 67 68 /** Run a thread until a debugging event occurs. 69 * 70 * This message is answered when the thread stops 71 * in a debugging event. 72 * 73 * - ARG2 - id of the thread to run 74 * 75 */ 76 UDEBUG_M_GO, 77 78 /** Stop a thread being debugged. 79 * 80 * Creates a special STOP event in the thread, causing 81 * it to answer a pending GO message (if any). 82 * 83 */ 84 UDEBUG_M_STOP, 85 86 /** Read arguments of a syscall. 87 * 88 * - ARG2 - thread identification 89 * - ARG3 - destination address in the caller's address space 90 * 91 */ 92 UDEBUG_M_ARGS_READ, 93 94 /** Read thread's userspace register state (istate_t). 95 * 96 * - ARG2 - thread identification 97 * - ARG3 - destination address in the caller's address space 98 * 99 * or, on error, retval will be 100 * - ENOENT - thread does not exist 101 * - EBUSY - register state not available 102 */ 103 UDEBUG_M_REGS_READ, 104 105 /** Read the list of the debugged tasks's threads. 106 * 107 * - ARG2 - destination address in the caller's address space 108 * - ARG3 - size of receiving buffer in bytes 109 * 110 * The kernel fills the buffer with a series of sysarg_t values 111 * (thread ids). On answer, the kernel will set: 112 * 113 * - ARG2 - number of bytes that were actually copied 114 * - ARG3 - number of bytes of the complete data 115 * 116 */ 117 UDEBUG_M_THREAD_READ, 118 119 /** Read the name of the debugged task. 120 * 121 * - ARG2 - destination address in the caller's address space 122 * - ARG3 - size of receiving buffer in bytes 123 * 124 * The kernel fills the buffer with a non-terminated string. 125 * 126 * - ARG2 - number of bytes that were actually copied 127 * - ARG3 - number of bytes of the complete data 128 * 129 */ 130 UDEBUG_M_NAME_READ, 131 132 /** Read the list of the debugged task's address space areas. 133 * 134 * - ARG2 - destination address in the caller's address space 135 * - ARG3 - size of receiving buffer in bytes 136 * 137 * The kernel fills the buffer with a series of as_area_info_t structures. 138 * Upon answer, the kernel will set: 139 * 140 * - ARG2 - number of bytes that were actually copied 141 * - ARG3 - number of bytes of the complete data 142 * 143 */ 144 UDEBUG_M_AREAS_READ, 145 146 /** Read the debugged tasks's memory. 147 * 148 * - ARG2 - destination address in the caller's address space 149 * - ARG3 - source address in the recipient's address space 150 * - ARG4 - size of receiving buffer in bytes 151 * 152 */ 153 UDEBUG_M_MEM_READ 154 } udebug_method_t; 155 156 typedef enum { 157 UDEBUG_EVENT_FINISHED = 1, /**< Debuging session has finished */ 158 UDEBUG_EVENT_STOP, /**< Stopped on DEBUG_STOP request */ 159 UDEBUG_EVENT_SYSCALL_B, /**< Before beginning syscall execution */ 160 UDEBUG_EVENT_SYSCALL_E, /**< After finishing syscall execution */ 161 UDEBUG_EVENT_THREAD_B, /**< The task created a new thread */ 162 UDEBUG_EVENT_THREAD_E /**< A thread exited */ 163 } udebug_event_t; 164 165 typedef enum { 166 UDEBUG_EM_FINISHED = UDEBUG_EVMASK(UDEBUG_EVENT_FINISHED), 167 UDEBUG_EM_STOP = UDEBUG_EVMASK(UDEBUG_EVENT_STOP), 168 UDEBUG_EM_SYSCALL_B = UDEBUG_EVMASK(UDEBUG_EVENT_SYSCALL_B), 169 UDEBUG_EM_SYSCALL_E = UDEBUG_EVMASK(UDEBUG_EVENT_SYSCALL_E), 170 UDEBUG_EM_THREAD_B = UDEBUG_EVMASK(UDEBUG_EVENT_THREAD_B), 171 UDEBUG_EM_THREAD_E = UDEBUG_EVMASK(UDEBUG_EVENT_THREAD_E), 172 UDEBUG_EM_ALL = 173 (UDEBUG_EVMASK(UDEBUG_EVENT_FINISHED) | 174 UDEBUG_EVMASK(UDEBUG_EVENT_STOP) | 175 UDEBUG_EVMASK(UDEBUG_EVENT_SYSCALL_B) | 176 UDEBUG_EVMASK(UDEBUG_EVENT_SYSCALL_E) | 177 UDEBUG_EVMASK(UDEBUG_EVENT_THREAD_B) | 178 UDEBUG_EVMASK(UDEBUG_EVENT_THREAD_E)) 179 } udebug_evmask_t; 180 181 #ifdef KERNEL 182 38 183 #include <ipc/ipc.h> 39 40 typedef enum { /* udebug_method_t */41 42 /** Start debugging the recipient.43 * Causes all threads in the receiving task to stop. When they44 * are all stoped, an answer with retval 0 is generated.45 */46 UDEBUG_M_BEGIN = 1,47 48 /** Finish debugging the recipient.49 * Answers all pending GO and GUARD messages.50 */51 UDEBUG_M_END,52 53 /** Set which events should be captured.54 */55 UDEBUG_M_SET_EVMASK,56 57 /** Make sure the debugged task is still there.58 * This message is answered when the debugged task dies59 * or the debugging session ends.60 */61 UDEBUG_M_GUARD,62 63 /** Run a thread until a debugging event occurs.64 * This message is answered when the thread stops65 * in a debugging event.66 *67 * - ARG2 - id of the thread to run68 */69 UDEBUG_M_GO,70 71 /** Stop a thread being debugged.72 * Creates a special STOP event in the thread, causing73 * it to answer a pending GO message (if any).74 */75 UDEBUG_M_STOP,76 77 /** Read arguments of a syscall.78 *79 * - ARG2 - thread identification80 * - ARG3 - destination address in the caller's address space81 *82 */83 UDEBUG_M_ARGS_READ,84 85 /** Read thread's userspace register state (istate_t).86 *87 * - ARG2 - thread identification88 * - ARG3 - destination address in the caller's address space89 *90 * or, on error, retval will be91 * - ENOENT - thread does not exist92 * - EBUSY - register state not available93 */94 UDEBUG_M_REGS_READ,95 96 /** Read the list of the debugged tasks's threads.97 *98 * - ARG2 - destination address in the caller's address space99 * - ARG3 - size of receiving buffer in bytes100 *101 * The kernel fills the buffer with a series of sysarg_t values102 * (thread ids). On answer, the kernel will set:103 *104 * - ARG2 - number of bytes that were actually copied105 * - ARG3 - number of bytes of the complete data106 *107 */108 UDEBUG_M_THREAD_READ,109 110 /** Read the name of the debugged task.111 *112 * - ARG2 - destination address in the caller's address space113 * - ARG3 - size of receiving buffer in bytes114 *115 * The kernel fills the buffer with a non-terminated string.116 *117 * - ARG2 - number of bytes that were actually copied118 * - ARG3 - number of bytes of the complete data119 *120 */121 UDEBUG_M_NAME_READ,122 123 /** Read the list of the debugged task's address space areas.124 *125 * - ARG2 - destination address in the caller's address space126 * - ARG3 - size of receiving buffer in bytes127 *128 * The kernel fills the buffer with a series of as_area_info_t structures.129 * Upon answer, the kernel will set:130 *131 * - ARG2 - number of bytes that were actually copied132 * - ARG3 - number of bytes of the complete data133 *134 */135 UDEBUG_M_AREAS_READ,136 137 /** Read the debugged tasks's memory.138 *139 * - ARG2 - destination address in the caller's address space140 * - ARG3 - source address in the recipient's address space141 * - ARG4 - size of receiving buffer in bytes142 *143 */144 UDEBUG_M_MEM_READ,145 146 } udebug_method_t;147 148 149 typedef enum {150 UDEBUG_EVENT_FINISHED = 1, /**< Debuging session has finished */151 UDEBUG_EVENT_STOP, /**< Stopped on DEBUG_STOP request */152 UDEBUG_EVENT_SYSCALL_B, /**< Before beginning syscall execution */153 UDEBUG_EVENT_SYSCALL_E, /**< After finishing syscall execution */154 UDEBUG_EVENT_THREAD_B, /**< The task created a new thread */155 UDEBUG_EVENT_THREAD_E /**< A thread exited */156 } udebug_event_t;157 158 #define UDEBUG_EVMASK(event) (1 << ((event) - 1))159 160 typedef enum {161 UDEBUG_EM_FINISHED = UDEBUG_EVMASK(UDEBUG_EVENT_FINISHED),162 UDEBUG_EM_STOP = UDEBUG_EVMASK(UDEBUG_EVENT_STOP),163 UDEBUG_EM_SYSCALL_B = UDEBUG_EVMASK(UDEBUG_EVENT_SYSCALL_B),164 UDEBUG_EM_SYSCALL_E = UDEBUG_EVMASK(UDEBUG_EVENT_SYSCALL_E),165 UDEBUG_EM_THREAD_B = UDEBUG_EVMASK(UDEBUG_EVENT_THREAD_B),166 UDEBUG_EM_THREAD_E = UDEBUG_EVMASK(UDEBUG_EVENT_THREAD_E),167 UDEBUG_EM_ALL =168 UDEBUG_EVMASK(UDEBUG_EVENT_FINISHED) |169 UDEBUG_EVMASK(UDEBUG_EVENT_STOP) |170 UDEBUG_EVMASK(UDEBUG_EVENT_SYSCALL_B) |171 UDEBUG_EVMASK(UDEBUG_EVENT_SYSCALL_E) |172 UDEBUG_EVMASK(UDEBUG_EVENT_THREAD_B) |173 UDEBUG_EVMASK(UDEBUG_EVENT_THREAD_E)174 } udebug_evmask_t;175 176 #ifdef KERNEL177 178 184 #include <synch/mutex.h> 179 185 #include <synch/condvar.h> … … 196 202 mutex_t lock; 197 203 char *lock_owner; 198 204 199 205 udebug_task_state_t dt_state; 200 206 call_t *begin_call; … … 209 215 /** Synchronize debug ops on this thread / access to this structure. */ 210 216 mutex_t lock; 211 217 212 218 waitq_t go_wq; 213 219 call_t *go_call; 214 220 sysarg_t syscall_args[6]; 215 221 istate_t *uspace_state; 216 222 217 223 /** What type of event are we stopped in or 0 if none. */ 218 224 udebug_event_t cur_event; 219 bool go; /**< thread is GO */220 bool stoppable; /**< thread is stoppable */221 bool active; /**< thread is in a debugging session */225 bool go; /**< Thread is GO */ 226 bool stoppable; /**< Thread is stoppable */ 227 bool active; /**< Thread is in a debugging session */ 222 228 condvar_t active_cv; 223 229 } udebug_thread_t; … … 226 232 struct thread; 227 233 228 void udebug_task_init(udebug_task_t *ut); 229 void udebug_thread_initialize(udebug_thread_t *ut); 230 231 void udebug_syscall_event(sysarg_t a1, sysarg_t a2, sysarg_t a3, 232 sysarg_t a4, sysarg_t a5, sysarg_t a6, sysarg_t id, sysarg_t rc, 233 bool end_variant); 234 235 void udebug_thread_b_event_attach(struct thread *t, struct task *ta); 234 void udebug_task_init(udebug_task_t *); 235 void udebug_thread_initialize(udebug_thread_t *); 236 237 void udebug_syscall_event(sysarg_t, sysarg_t, sysarg_t, sysarg_t, sysarg_t, 238 sysarg_t, sysarg_t, sysarg_t, bool); 239 240 void udebug_thread_b_event_attach(struct thread *, struct task *); 236 241 void udebug_thread_e_event(void); 237 242 … … 241 246 void udebug_before_thread_runs(void); 242 247 243 int udebug_task_cleanup(struct task * ta);248 int udebug_task_cleanup(struct task *); 244 249 void udebug_thread_fault(void); 245 250 -
kernel/generic/src/console/cmd.c
re778543 r17aca1c 553 553 for (i = 0; basic_commands[i]; i++) { 554 554 cmd_initialize(basic_commands[i]); 555 if (!cmd_register(basic_commands[i])) 556 printf("Cannot register command %s\n", basic_commands[i]->name); 555 } 556 557 for (i = 0; basic_commands[i]; i++) { 558 if (!cmd_register(basic_commands[i])) { 559 printf("Cannot register command %s\n", 560 basic_commands[i]->name); 561 } 557 562 } 558 563 } -
kernel/generic/src/console/console.c
re778543 r17aca1c 160 160 klog_parea.pbase = (uintptr_t) faddr; 161 161 klog_parea.frames = SIZE2FRAMES(sizeof(klog)); 162 klog_parea.unpriv = false; 162 163 ddi_parea_register(&klog_parea); 163 164 -
kernel/generic/src/ddi/ddi.c
re778543 r17aca1c 104 104 { 105 105 ASSERT(TASK); 106 ASSERT((pf % FRAME_SIZE) == 0); 107 ASSERT((vp % PAGE_SIZE) == 0); 108 109 /* 110 * Make sure the caller is authorised to make this syscall. 111 */ 112 cap_t caps = cap_get(TASK); 113 if (!(caps & CAP_MEM_MANAGER)) 114 return EPERM; 106 107 if ((pf % FRAME_SIZE) != 0) 108 return EBADMEM; 109 110 if ((vp % PAGE_SIZE) != 0) 111 return EBADMEM; 112 113 /* 114 * Unprivileged tasks are only allowed to map pareas 115 * which are explicitly marked as such. 116 */ 117 bool priv = 118 ((cap_get(TASK) & CAP_MEM_MANAGER) == CAP_MEM_MANAGER); 115 119 116 120 mem_backend_data_t backend_data; … … 123 127 124 128 if (znum == (size_t) -1) { 125 /* Frames not found in any zones 126 * -> assume it is hardware device and allow mapping 129 /* 130 * Frames not found in any zone 131 * -> assume it is a hardware device and allow mapping 132 * for privileged tasks. 127 133 */ 128 134 irq_spinlock_unlock(&zones.lock, true); 135 136 if (!priv) 137 return EPERM; 138 129 139 goto map; 130 140 } 131 141 132 142 if (zones.info[znum].flags & ZONE_FIRMWARE) { 133 /* Frames are part of firmware */ 143 /* 144 * Frames are part of firmware 145 * -> allow mapping for privileged tasks. 146 */ 134 147 irq_spinlock_unlock(&zones.lock, true); 148 149 if (!priv) 150 return EPERM; 151 135 152 goto map; 136 153 } … … 138 155 if (zone_flags_available(zones.info[znum].flags)) { 139 156 /* 140 * Frames are part of physical memory, check if the memory141 * region is enabled for mapping.157 * Frames are part of physical memory, check 158 * if the memory region is enabled for mapping. 142 159 */ 143 160 irq_spinlock_unlock(&zones.lock, true); … … 150 167 if ((!parea) || (parea->frames < pages)) { 151 168 mutex_unlock(&parea_lock); 152 goto err; 169 return ENOENT; 170 } 171 172 if (!priv) { 173 if (!parea->unpriv) { 174 mutex_unlock(&parea_lock); 175 return EPERM; 176 } 153 177 } 154 178 … … 158 182 159 183 irq_spinlock_unlock(&zones.lock, true); 160 161 err:162 184 return ENOENT; 163 185 -
kernel/generic/src/interrupt/interrupt.c
re778543 r17aca1c 45 45 #include <console/console.h> 46 46 #include <console/cmd.h> 47 #include <ipc/event.h>48 47 #include <synch/mutex.h> 49 48 #include <time/delay.h> … … 188 187 printf("\n"); 189 188 190 /* 191 * Userspace can subscribe for FAULT events to take action 192 * whenever a thread faults. (E.g. take a dump, run a debugger). 193 * The notification is always available, but unless Udebug is enabled, 194 * that's all you get. 195 */ 196 if (event_is_subscribed(EVENT_FAULT)) { 197 /* Notify the subscriber that a fault occurred. */ 198 event_notify_3(EVENT_FAULT, LOWER32(TASK->taskid), 199 UPPER32(TASK->taskid), (sysarg_t) THREAD); 200 201 #ifdef CONFIG_UDEBUG 202 /* Wait for a debugging session. */ 203 udebug_thread_fault(); 204 #endif 205 } 206 207 task_kill(TASK->taskid); 208 thread_exit(); 189 task_kill_self(true); 209 190 } 210 191 -
kernel/generic/src/ipc/ipc.c
re778543 r17aca1c 787 787 } 788 788 789 printf(" --- outgoing answers ---\n");789 printf(" --- incoming answers ---\n"); 790 790 for (cur = task->answerbox.answers.next; 791 791 cur != &task->answerbox.answers; -
kernel/generic/src/ipc/irq.c
re778543 r17aca1c 42 42 * 43 43 * The structure of a notification message is as follows: 44 * - IMETHOD: interface and method as registered by the SYS_ IPC_REGISTER_IRQ44 * - IMETHOD: interface and method as registered by the SYS_REGISTER_IRQ 45 45 * syscall 46 46 * - ARG1: payload modified by a 'top-half' handler -
kernel/generic/src/ipc/sysipc.c
re778543 r17aca1c 1105 1105 * 1106 1106 */ 1107 sysarg_t sys_ ipc_register_irq(inr_t inr, devno_t devno, sysarg_t imethod,1107 sysarg_t sys_register_irq(inr_t inr, devno_t devno, sysarg_t imethod, 1108 1108 irq_code_t *ucode) 1109 1109 { … … 1122 1122 * 1123 1123 */ 1124 sysarg_t sys_ ipc_unregister_irq(inr_t inr, devno_t devno)1124 sysarg_t sys_unregister_irq(inr_t inr, devno_t devno) 1125 1125 { 1126 1126 if (!(cap_get(TASK) & CAP_IRQ_REG)) -
kernel/generic/src/lib/elf.c
re778543 r17aca1c 157 157 case PT_NULL: 158 158 case PT_PHDR: 159 case PT_NOTE: 159 160 break; 160 161 case PT_LOAD: … … 173 174 break; 174 175 case PT_SHLIB: 175 case PT_NOTE:176 176 case PT_LOPROC: 177 177 case PT_HIPROC: -
kernel/generic/src/lib/rd.c
re778543 r17aca1c 90 90 FRAME_SIZE); 91 91 rd_parea.frames = SIZE2FRAMES(dsize); 92 rd_parea.unpriv = false; 92 93 ddi_parea_register(&rd_parea); 93 94 -
kernel/generic/src/mm/as.c
re778543 r17aca1c 71 71 #include <memstr.h> 72 72 #include <macros.h> 73 #include <bitops.h> 73 74 #include <arch.h> 74 75 #include <errno.h> … … 86 87 * Each architecture decides what functions will be used to carry out 87 88 * address space operations such as creating or locking page tables. 88 *89 89 */ 90 90 as_operations_t *as_operations = NULL; 91 91 92 /** 93 * Slab for as_t objects. 92 /** Slab for as_t objects. 94 93 * 95 94 */ 96 95 static slab_cache_t *as_slab; 97 96 98 /** 99 * This lock serializes access to the ASID subsystem.100 * Itprotects:97 /** ASID subsystem lock. 98 * 99 * This lock protects: 101 100 * - inactive_as_with_asid_head list 102 101 * - as->asid for each as of the as_t type … … 107 106 108 107 /** 109 * This list contains address spaces that are not active on any 110 * processor and that have valid ASID. 111 * 108 * Inactive address spaces (on all processors) 109 * that have valid ASID. 112 110 */ 113 111 LIST_INITIALIZE(inactive_as_with_asid_head); … … 123 121 mutex_initialize(&as->lock, MUTEX_PASSIVE); 124 122 125 int rc = as_constructor_arch(as, flags); 126 127 return rc; 123 return as_constructor_arch(as, flags); 128 124 } 129 125 130 126 NO_TRACE static size_t as_destructor(void *obj) 131 127 { 132 as_t *as = (as_t *) obj; 133 return as_destructor_arch(as); 128 return as_destructor_arch((as_t *) obj); 134 129 } 135 130 … … 146 141 panic("Cannot create kernel address space."); 147 142 148 /* Make sure the kernel address space 143 /* 144 * Make sure the kernel address space 149 145 * reference count never drops to zero. 150 146 */ … … 195 191 { 196 192 DEADLOCK_PROBE_INIT(p_asidlock); 197 193 198 194 ASSERT(as != AS); 199 195 ASSERT(atomic_get(&as->refcount) == 0); … … 203 199 * lock its mutex. 204 200 */ 205 201 206 202 /* 207 203 * We need to avoid deadlock between TLB shootdown and asidlock. … … 210 206 * disabled to prevent nested context switches. We also depend on the 211 207 * fact that so far no spinlocks are held. 212 *213 208 */ 214 209 preemption_disable(); … … 235 230 spinlock_unlock(&asidlock); 236 231 interrupts_restore(ipl); 237 232 238 233 239 234 /* … … 241 236 * The B+tree must be walked carefully because it is 242 237 * also being destroyed. 243 *244 238 */ 245 239 bool cond = true; … … 268 262 /** Hold a reference to an address space. 269 263 * 270 * Holding a reference to an address space prevents destruction of that address271 * space.264 * Holding a reference to an address space prevents destruction 265 * of that address space. 272 266 * 273 267 * @param as Address space to be held. … … 281 275 /** Release a reference to an address space. 282 276 * 283 * The last one to release a reference to an address space destroys the address284 * space.277 * The last one to release a reference to an address space 278 * destroys the address space. 285 279 * 286 280 * @param asAddress space to be released. … … 295 289 /** Check area conflicts with other areas. 296 290 * 297 * @param as 298 * @param vaStarting virtual address of the area being tested.299 * @param size Size ofthe area being tested.300 * @param avoid _areaDo not touch this area.291 * @param as Address space. 292 * @param addr Starting virtual address of the area being tested. 293 * @param count Number of pages in the area being tested. 294 * @param avoid Do not touch this area. 301 295 * 302 296 * @return True if there is no conflict, false otherwise. 303 297 * 304 298 */ 305 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, 306 as_area_t *avoid_area) 307 { 299 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr, 300 size_t count, as_area_t *avoid) 301 { 302 ASSERT((addr % PAGE_SIZE) == 0); 308 303 ASSERT(mutex_locked(&as->lock)); 309 304 310 305 /* 311 306 * We don't want any area to have conflicts with NULL page. 312 * 313 */ 314 if (overlaps(va, size, (uintptr_t) NULL, PAGE_SIZE)) 307 */ 308 if (overlaps(addr, count << PAGE_WIDTH, (uintptr_t) NULL, PAGE_SIZE)) 315 309 return false; 316 310 … … 321 315 * record in the left neighbour, the leftmost record in the right 322 316 * neighbour and all records in the leaf node itself. 323 *324 317 */ 325 318 btree_node_t *leaf; 326 319 as_area_t *area = 327 (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);320 (as_area_t *) btree_search(&as->as_area_btree, addr, &leaf); 328 321 if (area) { 329 if (area != avoid _area)322 if (area != avoid) 330 323 return false; 331 324 } … … 337 330 area = (as_area_t *) node->value[node->keys - 1]; 338 331 339 mutex_lock(&area->lock); 340 341 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 332 if (area != avoid) { 333 mutex_lock(&area->lock); 334 335 if (overlaps(addr, count << PAGE_WIDTH, 336 area->base, area->pages << PAGE_WIDTH)) { 337 mutex_unlock(&area->lock); 338 return false; 339 } 340 342 341 mutex_unlock(&area->lock); 343 return false; 344 } 345 346 mutex_unlock(&area->lock); 342 } 347 343 } 348 344 … … 351 347 area = (as_area_t *) node->value[0]; 352 348 353 mutex_lock(&area->lock); 354 355 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 349 if (area != avoid) { 350 mutex_lock(&area->lock); 351 352 if (overlaps(addr, count << PAGE_WIDTH, 353 area->base, area->pages << PAGE_WIDTH)) { 354 mutex_unlock(&area->lock); 355 return false; 356 } 357 356 358 mutex_unlock(&area->lock); 357 return false; 358 } 359 360 mutex_unlock(&area->lock); 359 } 361 360 } 362 361 … … 366 365 area = (as_area_t *) leaf->value[i]; 367 366 368 if (area == avoid _area)367 if (area == avoid) 369 368 continue; 370 369 371 370 mutex_lock(&area->lock); 372 371 373 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 372 if (overlaps(addr, count << PAGE_WIDTH, 373 area->base, area->pages << PAGE_WIDTH)) { 374 374 mutex_unlock(&area->lock); 375 375 return false; … … 382 382 * So far, the area does not conflict with other areas. 383 383 * Check if it doesn't conflict with kernel address space. 384 *385 384 */ 386 385 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 387 return !overlaps( va, size,386 return !overlaps(addr, count << PAGE_WIDTH, 388 387 KERNEL_ADDRESS_SPACE_START, 389 388 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); … … 412 411 mem_backend_data_t *backend_data) 413 412 { 414 if ( base % PAGE_SIZE)413 if ((base % PAGE_SIZE) != 0) 415 414 return NULL; 416 415 417 if ( !size)416 if (size == 0) 418 417 return NULL; 418 419 size_t pages = SIZE2FRAMES(size); 419 420 420 421 /* Writeable executable areas are not supported. */ … … 424 425 mutex_lock(&as->lock); 425 426 426 if (!check_area_conflicts(as, base, size, NULL)) {427 if (!check_area_conflicts(as, base, pages, NULL)) { 427 428 mutex_unlock(&as->lock); 428 429 return NULL; … … 436 437 area->flags = flags; 437 438 area->attributes = attrs; 438 area->pages = SIZE2FRAMES(size); 439 area->pages = pages; 440 area->resident = 0; 439 441 area->base = base; 440 442 area->sh_info = NULL; … … 479 481 * to find out whether this is a miss or va belongs to an address 480 482 * space area found there. 481 *482 483 */ 483 484 … … 490 491 mutex_lock(&area->lock); 491 492 492 if ((area->base <= va) && (va < area->base + area->pages * PAGE_SIZE)) 493 if ((area->base <= va) && 494 (va < area->base + (area->pages << PAGE_WIDTH))) 493 495 return area; 494 496 … … 499 501 * Second, locate the left neighbour and test its last record. 500 502 * Because of its position in the B+tree, it must have base < va. 501 *502 503 */ 503 504 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); … … 507 508 mutex_lock(&area->lock); 508 509 509 if (va < area->base + area->pages * PAGE_SIZE)510 if (va < area->base + (area->pages << PAGE_WIDTH)) 510 511 return area; 511 512 … … 534 535 /* 535 536 * Locate the area. 536 *537 537 */ 538 538 as_area_t *area = find_area_and_lock(as, address); … … 546 546 * Remapping of address space areas associated 547 547 * with memory mapped devices is not supported. 548 *549 548 */ 550 549 mutex_unlock(&area->lock); … … 557 556 * Remapping of shared address space areas 558 557 * is not supported. 559 *560 558 */ 561 559 mutex_unlock(&area->lock); … … 568 566 /* 569 567 * Zero size address space areas are not allowed. 570 *571 568 */ 572 569 mutex_unlock(&area->lock); … … 576 573 577 574 if (pages < area->pages) { 578 uintptr_t start_free = area->base + pages * PAGE_SIZE;575 uintptr_t start_free = area->base + (pages << PAGE_WIDTH); 579 576 580 577 /* 581 578 * Shrinking the area. 582 579 * No need to check for overlaps. 583 *584 580 */ 585 581 … … 588 584 /* 589 585 * Start TLB shootdown sequence. 590 *591 586 */ 592 587 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, 593 area->base + pages * PAGE_SIZE, area->pages - pages);588 area->base + (pages << PAGE_WIDTH), area->pages - pages); 594 589 595 590 /* … … 599 594 * is also the right way to remove part of the used_space 600 595 * B+tree leaf list. 601 *602 596 */ 603 597 bool cond = true; … … 615 609 size_t i = 0; 616 610 617 if (overlaps(ptr, size * PAGE_SIZE, area->base,618 pages * PAGE_SIZE)) {611 if (overlaps(ptr, size << PAGE_WIDTH, area->base, 612 pages << PAGE_WIDTH)) { 619 613 620 if (ptr + size * PAGE_SIZE<= start_free) {614 if (ptr + (size << PAGE_WIDTH) <= start_free) { 621 615 /* 622 616 * The whole interval fits 623 617 * completely in the resized 624 618 * address space area. 625 *626 619 */ 627 620 break; … … 632 625 * to b and c overlaps with the resized 633 626 * address space area. 634 *635 627 */ 636 628 … … 652 644 for (; i < size; i++) { 653 645 pte_t *pte = page_mapping_find(as, ptr + 654 i * PAGE_SIZE);646 (i << PAGE_WIDTH)); 655 647 656 648 ASSERT(pte); … … 661 653 (area->backend->frame_free)) { 662 654 area->backend->frame_free(area, 663 ptr + i * PAGE_SIZE,655 ptr + (i << PAGE_WIDTH), 664 656 PTE_GET_FRAME(pte)); 665 657 } 666 658 667 659 page_mapping_remove(as, ptr + 668 i * PAGE_SIZE);660 (i << PAGE_WIDTH)); 669 661 } 670 662 } … … 673 665 /* 674 666 * Finish TLB shootdown sequence. 675 * 676 */ 677 678 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, 667 */ 668 669 tlb_invalidate_pages(as->asid, area->base + (pages << PAGE_WIDTH), 679 670 area->pages - pages); 680 671 681 672 /* 682 673 * Invalidate software translation caches (e.g. TSB on sparc64). 683 *684 674 */ 685 675 as_invalidate_translation_cache(as, area->base + 686 pages * PAGE_SIZE, area->pages - pages);676 (pages << PAGE_WIDTH), area->pages - pages); 687 677 tlb_shootdown_finalize(ipl); 688 678 … … 692 682 * Growing the area. 693 683 * Check for overlaps with other address space areas. 694 * 695 */ 696 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, 697 area)) { 684 */ 685 if (!check_area_conflicts(as, address, pages, area)) { 698 686 mutex_unlock(&area->lock); 699 687 mutex_unlock(&as->lock); … … 794 782 795 783 for (size = 0; size < (size_t) node->value[i]; size++) { 796 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 784 pte_t *pte = 785 page_mapping_find(as, ptr + (size << PAGE_WIDTH)); 797 786 798 787 ASSERT(pte); … … 803 792 (area->backend->frame_free)) { 804 793 area->backend->frame_free(area, 805 ptr + size * PAGE_SIZE, PTE_GET_FRAME(pte));794 ptr + (size << PAGE_WIDTH), PTE_GET_FRAME(pte)); 806 795 } 807 796 808 page_mapping_remove(as, ptr + size * PAGE_SIZE);797 page_mapping_remove(as, ptr + (size << PAGE_WIDTH)); 809 798 } 810 799 } … … 813 802 /* 814 803 * Finish TLB shootdown sequence. 815 *816 804 */ 817 805 … … 821 809 * Invalidate potential software translation caches (e.g. TSB on 822 810 * sparc64). 823 *824 811 */ 825 812 as_invalidate_translation_cache(as, area->base, area->pages); … … 839 826 /* 840 827 * Remove the empty area from address space. 841 *842 828 */ 843 829 btree_remove(&as->as_area_btree, base, NULL); … … 881 867 /* 882 868 * Could not find the source address space area. 883 *884 869 */ 885 870 mutex_unlock(&src_as->lock); … … 891 876 * There is no backend or the backend does not 892 877 * know how to share the area. 893 *894 878 */ 895 879 mutex_unlock(&src_area->lock); … … 898 882 } 899 883 900 size_t src_size = src_area->pages * PAGE_SIZE;884 size_t src_size = src_area->pages << PAGE_WIDTH; 901 885 unsigned int src_flags = src_area->flags; 902 886 mem_backend_t *src_backend = src_area->backend; … … 918 902 * First, prepare the area for sharing. 919 903 * Then it will be safe to unlock it. 920 *921 904 */ 922 905 share_info_t *sh_info = src_area->sh_info; … … 930 913 /* 931 914 * Call the backend to setup sharing. 932 *933 915 */ 934 916 src_area->backend->share(src_area); … … 949 931 * The flags of the source area are masked against dst_flags_mask 950 932 * to support sharing in less privileged mode. 951 *952 933 */ 953 934 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, … … 966 947 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL 967 948 * attribute and set the sh_info. 968 *969 949 */ 970 950 mutex_lock(&dst_as->lock); … … 989 969 NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access) 990 970 { 971 ASSERT(mutex_locked(&area->lock)); 972 991 973 int flagmap[] = { 992 974 [PF_ACCESS_READ] = AS_AREA_READ, … … 994 976 [PF_ACCESS_EXEC] = AS_AREA_EXEC 995 977 }; 996 997 ASSERT(mutex_locked(&area->lock));998 978 999 979 if (!(area->flags & flagmap[access])) … … 1066 1046 /* 1067 1047 * Compute total number of used pages in the used_space B+tree 1068 *1069 1048 */ 1070 1049 size_t used_pages = 0; … … 1088 1067 /* 1089 1068 * Start TLB shootdown sequence. 1090 *1091 1069 */ 1092 1070 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, … … 1096 1074 * Remove used pages from page tables and remember their frame 1097 1075 * numbers. 1098 *1099 1076 */ 1100 1077 size_t frame_idx = 0; … … 1111 1088 1112 1089 for (size = 0; size < (size_t) node->value[i]; size++) { 1113 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 1090 pte_t *pte = 1091 page_mapping_find(as, ptr + (size << PAGE_WIDTH)); 1114 1092 1115 1093 ASSERT(pte); … … 1120 1098 1121 1099 /* Remove old mapping */ 1122 page_mapping_remove(as, ptr + size * PAGE_SIZE);1100 page_mapping_remove(as, ptr + (size << PAGE_WIDTH)); 1123 1101 } 1124 1102 } … … 1127 1105 /* 1128 1106 * Finish TLB shootdown sequence. 1129 *1130 1107 */ 1131 1108 … … 1135 1112 * Invalidate potential software translation caches (e.g. TSB on 1136 1113 * sparc64). 1137 *1138 1114 */ 1139 1115 as_invalidate_translation_cache(as, area->base, area->pages); … … 1168 1144 1169 1145 /* Insert the new mapping */ 1170 page_mapping_insert(as, ptr + size * PAGE_SIZE,1146 page_mapping_insert(as, ptr + (size << PAGE_WIDTH), 1171 1147 old_frame[frame_idx++], page_flags); 1172 1148 … … 1217 1193 * No area contained mapping for 'page'. 1218 1194 * Signal page fault to low-level handler. 1219 *1220 1195 */ 1221 1196 mutex_unlock(&AS->lock); … … 1237 1212 * The address space area is not backed by any backend 1238 1213 * or the backend cannot handle page faults. 1239 *1240 1214 */ 1241 1215 mutex_unlock(&area->lock); … … 1249 1223 * To avoid race condition between two page faults on the same address, 1250 1224 * we need to make sure the mapping has not been already inserted. 1251 *1252 1225 */ 1253 1226 pte_t *pte; … … 1267 1240 /* 1268 1241 * Resort to the backend page fault handler. 1269 *1270 1242 */ 1271 1243 if (area->backend->page_fault(area, page, access) != AS_PF_OK) { … … 1322 1294 * preemption is disabled. We should not be 1323 1295 * holding any other lock. 1324 *1325 1296 */ 1326 1297 (void) interrupts_enable(); … … 1342 1313 * list of inactive address spaces with assigned 1343 1314 * ASID. 1344 *1345 1315 */ 1346 1316 ASSERT(old_as->asid != ASID_INVALID); … … 1353 1323 * Perform architecture-specific tasks when the address space 1354 1324 * is being removed from the CPU. 1355 *1356 1325 */ 1357 1326 as_deinstall_arch(old_as); … … 1360 1329 /* 1361 1330 * Second, prepare the new address space. 1362 *1363 1331 */ 1364 1332 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { … … 1376 1344 * Perform architecture-specific steps. 1377 1345 * (e.g. write ASID to hardware register etc.) 1378 *1379 1346 */ 1380 1347 as_install_arch(new_as); … … 1395 1362 { 1396 1363 ASSERT(mutex_locked(&area->lock)); 1397 1364 1398 1365 return area_flags_to_page_flags(area->flags); 1399 1366 } … … 1499 1466 1500 1467 if (src_area) { 1501 size = src_area->pages * PAGE_SIZE;1468 size = src_area->pages << PAGE_WIDTH; 1502 1469 mutex_unlock(&src_area->lock); 1503 1470 } else … … 1516 1483 * @param count Number of page to be marked. 1517 1484 * 1518 * @return Zero on failure and non-zeroon success.1519 * 1520 */ 1521 intused_space_insert(as_area_t *area, uintptr_t page, size_t count)1485 * @return False on failure or true on success. 1486 * 1487 */ 1488 bool used_space_insert(as_area_t *area, uintptr_t page, size_t count) 1522 1489 { 1523 1490 ASSERT(mutex_locked(&area->lock)); … … 1530 1497 /* 1531 1498 * We hit the beginning of some used space. 1532 * 1533 */ 1534 return 0; 1499 */ 1500 return false; 1535 1501 } 1536 1502 1537 1503 if (!leaf->keys) { 1538 1504 btree_insert(&area->used_space, page, (void *) count, leaf); 1539 return 1;1505 goto success; 1540 1506 } 1541 1507 … … 1551 1517 * somewhere between the rightmost interval of 1552 1518 * the left neigbour and the first interval of the leaf. 1553 *1554 1519 */ 1555 1520 1556 1521 if (page >= right_pg) { 1557 1522 /* Do nothing. */ 1558 } else if (overlaps(page, count * PAGE_SIZE, left_pg,1559 left_cnt * PAGE_SIZE)) {1523 } else if (overlaps(page, count << PAGE_WIDTH, left_pg, 1524 left_cnt << PAGE_WIDTH)) { 1560 1525 /* The interval intersects with the left interval. */ 1561 return 0;1562 } else if (overlaps(page, count * PAGE_SIZE, right_pg,1563 right_cnt * PAGE_SIZE)) {1526 return false; 1527 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1528 right_cnt << PAGE_WIDTH)) { 1564 1529 /* The interval intersects with the right interval. */ 1565 return 0;1566 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&1567 (page + count * PAGE_SIZE== right_pg)) {1530 return false; 1531 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1532 (page + (count << PAGE_WIDTH) == right_pg)) { 1568 1533 /* 1569 1534 * The interval can be added by merging the two already 1570 1535 * present intervals. 1571 *1572 1536 */ 1573 1537 node->value[node->keys - 1] += count + right_cnt; 1574 1538 btree_remove(&area->used_space, right_pg, leaf); 1575 return 1;1576 } else if (page == left_pg + left_cnt * PAGE_SIZE) {1539 goto success; 1540 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1577 1541 /* 1578 1542 * The interval can be added by simply growing the left 1579 1543 * interval. 1580 *1581 1544 */ 1582 1545 node->value[node->keys - 1] += count; 1583 return 1;1584 } else if (page + count * PAGE_SIZE== right_pg) {1546 goto success; 1547 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1585 1548 /* 1586 1549 * The interval can be addded by simply moving base of 1587 1550 * the right interval down and increasing its size 1588 1551 * accordingly. 1589 *1590 1552 */ 1591 1553 leaf->value[0] += count; 1592 1554 leaf->key[0] = page; 1593 return 1;1555 goto success; 1594 1556 } else { 1595 1557 /* 1596 1558 * The interval is between both neigbouring intervals, 1597 1559 * but cannot be merged with any of them. 1598 *1599 1560 */ 1600 1561 btree_insert(&area->used_space, page, (void *) count, 1601 1562 leaf); 1602 return 1;1563 goto success; 1603 1564 } 1604 1565 } else if (page < leaf->key[0]) { … … 1609 1570 * Investigate the border case in which the left neighbour does 1610 1571 * not exist but the interval fits from the left. 1611 * 1612 */ 1613 1614 if (overlaps(page, count * PAGE_SIZE, right_pg, 1615 right_cnt * PAGE_SIZE)) { 1572 */ 1573 1574 if (overlaps(page, count << PAGE_WIDTH, right_pg, 1575 right_cnt << PAGE_WIDTH)) { 1616 1576 /* The interval intersects with the right interval. */ 1617 return 0;1618 } else if (page + count * PAGE_SIZE== right_pg) {1577 return false; 1578 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1619 1579 /* 1620 1580 * The interval can be added by moving the base of the 1621 1581 * right interval down and increasing its size 1622 1582 * accordingly. 1623 *1624 1583 */ 1625 1584 leaf->key[0] = page; 1626 1585 leaf->value[0] += count; 1627 return 1;1586 goto success; 1628 1587 } else { 1629 1588 /* 1630 1589 * The interval doesn't adjoin with the right interval. 1631 1590 * It must be added individually. 1632 *1633 1591 */ 1634 1592 btree_insert(&area->used_space, page, (void *) count, 1635 1593 leaf); 1636 return 1;1594 goto success; 1637 1595 } 1638 1596 } … … 1649 1607 * somewhere between the leftmost interval of 1650 1608 * the right neigbour and the last interval of the leaf. 1651 *1652 1609 */ 1653 1610 1654 1611 if (page < left_pg) { 1655 1612 /* Do nothing. */ 1656 } else if (overlaps(page, count * PAGE_SIZE, left_pg,1657 left_cnt * PAGE_SIZE)) {1613 } else if (overlaps(page, count << PAGE_WIDTH, left_pg, 1614 left_cnt << PAGE_WIDTH)) { 1658 1615 /* The interval intersects with the left interval. */ 1659 return 0;1660 } else if (overlaps(page, count * PAGE_SIZE, right_pg,1661 right_cnt * PAGE_SIZE)) {1616 return false; 1617 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1618 right_cnt << PAGE_WIDTH)) { 1662 1619 /* The interval intersects with the right interval. */ 1663 return 0;1664 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&1665 (page + count * PAGE_SIZE== right_pg)) {1620 return false; 1621 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1622 (page + (count << PAGE_WIDTH) == right_pg)) { 1666 1623 /* 1667 1624 * The interval can be added by merging the two already 1668 1625 * present intervals. 1669 *1670 1626 */ 1671 1627 leaf->value[leaf->keys - 1] += count + right_cnt; 1672 1628 btree_remove(&area->used_space, right_pg, node); 1673 return 1;1674 } else if (page == left_pg + left_cnt * PAGE_SIZE) {1629 goto success; 1630 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1675 1631 /* 1676 1632 * The interval can be added by simply growing the left 1677 1633 * interval. 1678 *1679 1634 */ 1680 leaf->value[leaf->keys - 1] += 1681 return 1;1682 } else if (page + count * PAGE_SIZE== right_pg) {1635 leaf->value[leaf->keys - 1] += count; 1636 goto success; 1637 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1683 1638 /* 1684 1639 * The interval can be addded by simply moving base of 1685 1640 * the right interval down and increasing its size 1686 1641 * accordingly. 1687 *1688 1642 */ 1689 1643 node->value[0] += count; 1690 1644 node->key[0] = page; 1691 return 1;1645 goto success; 1692 1646 } else { 1693 1647 /* 1694 1648 * The interval is between both neigbouring intervals, 1695 1649 * but cannot be merged with any of them. 1696 *1697 1650 */ 1698 1651 btree_insert(&area->used_space, page, (void *) count, 1699 1652 leaf); 1700 return 1;1653 goto success; 1701 1654 } 1702 1655 } else if (page >= leaf->key[leaf->keys - 1]) { … … 1707 1660 * Investigate the border case in which the right neighbour 1708 1661 * does not exist but the interval fits from the right. 1709 * 1710 */ 1711 1712 if (overlaps(page, count * PAGE_SIZE, left_pg, 1713 left_cnt * PAGE_SIZE)) { 1662 */ 1663 1664 if (overlaps(page, count << PAGE_WIDTH, left_pg, 1665 left_cnt << PAGE_WIDTH)) { 1714 1666 /* The interval intersects with the left interval. */ 1715 return 0;1716 } else if (left_pg + left_cnt * PAGE_SIZE== page) {1667 return false; 1668 } else if (left_pg + (left_cnt << PAGE_WIDTH) == page) { 1717 1669 /* 1718 1670 * The interval can be added by growing the left 1719 1671 * interval. 1720 *1721 1672 */ 1722 1673 leaf->value[leaf->keys - 1] += count; 1723 return 1;1674 goto success; 1724 1675 } else { 1725 1676 /* 1726 1677 * The interval doesn't adjoin with the left interval. 1727 1678 * It must be added individually. 1728 *1729 1679 */ 1730 1680 btree_insert(&area->used_space, page, (void *) count, 1731 1681 leaf); 1732 return 1;1682 goto success; 1733 1683 } 1734 1684 } … … 1738 1688 * only between two other intervals of the leaf. The two border cases 1739 1689 * were already resolved. 1740 *1741 1690 */ 1742 1691 btree_key_t i; … … 1750 1699 /* 1751 1700 * The interval fits between left_pg and right_pg. 1752 *1753 1701 */ 1754 1702 1755 if (overlaps(page, count * PAGE_SIZE, left_pg,1756 left_cnt * PAGE_SIZE)) {1703 if (overlaps(page, count << PAGE_WIDTH, left_pg, 1704 left_cnt << PAGE_WIDTH)) { 1757 1705 /* 1758 1706 * The interval intersects with the left 1759 1707 * interval. 1760 *1761 1708 */ 1762 return 0;1763 } else if (overlaps(page, count * PAGE_SIZE, right_pg,1764 right_cnt * PAGE_SIZE)) {1709 return false; 1710 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1711 right_cnt << PAGE_WIDTH)) { 1765 1712 /* 1766 1713 * The interval intersects with the right 1767 1714 * interval. 1768 *1769 1715 */ 1770 return 0;1771 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&1772 (page + count * PAGE_SIZE== right_pg)) {1716 return false; 1717 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1718 (page + (count << PAGE_WIDTH) == right_pg)) { 1773 1719 /* 1774 1720 * The interval can be added by merging the two 1775 1721 * already present intervals. 1776 *1777 1722 */ 1778 1723 leaf->value[i - 1] += count + right_cnt; 1779 1724 btree_remove(&area->used_space, right_pg, leaf); 1780 return 1;1781 } else if (page == left_pg + left_cnt * PAGE_SIZE) {1725 goto success; 1726 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1782 1727 /* 1783 1728 * The interval can be added by simply growing 1784 1729 * the left interval. 1785 *1786 1730 */ 1787 1731 leaf->value[i - 1] += count; 1788 return 1;1789 } else if (page + count * PAGE_SIZE== right_pg) {1732 goto success; 1733 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1790 1734 /* 1791 1735 * The interval can be addded by simply moving 1792 1736 * base of the right interval down and 1793 1737 * increasing its size accordingly. 1794 *1795 1738 */ 1796 1739 leaf->value[i] += count; 1797 1740 leaf->key[i] = page; 1798 return 1;1741 goto success; 1799 1742 } else { 1800 1743 /* … … 1802 1745 * intervals, but cannot be merged with any of 1803 1746 * them. 1804 *1805 1747 */ 1806 1748 btree_insert(&area->used_space, page, 1807 1749 (void *) count, leaf); 1808 return 1;1750 goto success; 1809 1751 } 1810 1752 } … … 1813 1755 panic("Inconsistency detected while adding %zu pages of used " 1814 1756 "space at %p.", count, (void *) page); 1757 1758 success: 1759 area->resident += count; 1760 return true; 1815 1761 } 1816 1762 … … 1823 1769 * @param count Number of page to be marked. 1824 1770 * 1825 * @return Zero on failure and non-zeroon success.1826 * 1827 */ 1828 intused_space_remove(as_area_t *area, uintptr_t page, size_t count)1771 * @return False on failure or true on success. 1772 * 1773 */ 1774 bool used_space_remove(as_area_t *area, uintptr_t page, size_t count) 1829 1775 { 1830 1776 ASSERT(mutex_locked(&area->lock)); … … 1837 1783 /* 1838 1784 * We are lucky, page is the beginning of some interval. 1839 *1840 1785 */ 1841 1786 if (count > pages) { 1842 return 0;1787 return false; 1843 1788 } else if (count == pages) { 1844 1789 btree_remove(&area->used_space, page, leaf); 1845 return 1;1790 goto success; 1846 1791 } else { 1847 1792 /* 1848 1793 * Find the respective interval. 1849 1794 * Decrease its size and relocate its start address. 1850 *1851 1795 */ 1852 1796 btree_key_t i; 1853 1797 for (i = 0; i < leaf->keys; i++) { 1854 1798 if (leaf->key[i] == page) { 1855 leaf->key[i] += count * PAGE_SIZE;1799 leaf->key[i] += count << PAGE_WIDTH; 1856 1800 leaf->value[i] -= count; 1857 return 1;1801 goto success; 1858 1802 } 1859 1803 } 1804 1860 1805 goto error; 1861 1806 } … … 1867 1812 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1868 1813 1869 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,1870 count * PAGE_SIZE)) {1871 if (page + count * PAGE_SIZE==1872 left_pg + left_cnt * PAGE_SIZE) {1814 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1815 count << PAGE_WIDTH)) { 1816 if (page + (count << PAGE_WIDTH) == 1817 left_pg + (left_cnt << PAGE_WIDTH)) { 1873 1818 /* 1874 1819 * The interval is contained in the rightmost … … 1876 1821 * removed by updating the size of the bigger 1877 1822 * interval. 1878 *1879 1823 */ 1880 1824 node->value[node->keys - 1] -= count; 1881 return 1;1882 } else if (page + count * PAGE_SIZE<1883 left_pg + left_cnt*PAGE_SIZE) {1825 goto success; 1826 } else if (page + (count << PAGE_WIDTH) < 1827 left_pg + (left_cnt << PAGE_WIDTH)) { 1884 1828 /* 1885 1829 * The interval is contained in the rightmost … … 1888 1832 * the original interval and also inserting a 1889 1833 * new interval. 1890 *1891 1834 */ 1892 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1893 (page + count*PAGE_SIZE)) >> PAGE_WIDTH;1835 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) - 1836 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH; 1894 1837 node->value[node->keys - 1] -= count + new_cnt; 1895 1838 btree_insert(&area->used_space, page + 1896 count * PAGE_SIZE, (void *) new_cnt, leaf);1897 return 1;1839 (count << PAGE_WIDTH), (void *) new_cnt, leaf); 1840 goto success; 1898 1841 } 1899 1842 } 1900 return 0; 1843 1844 return false; 1901 1845 } else if (page < leaf->key[0]) 1902 return 0;1846 return false; 1903 1847 1904 1848 if (page > leaf->key[leaf->keys - 1]) { … … 1906 1850 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1907 1851 1908 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,1909 count * PAGE_SIZE)) {1910 if (page + count * PAGE_SIZE==1911 left_pg + left_cnt * PAGE_SIZE) {1852 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1853 count << PAGE_WIDTH)) { 1854 if (page + (count << PAGE_WIDTH) == 1855 left_pg + (left_cnt << PAGE_WIDTH)) { 1912 1856 /* 1913 1857 * The interval is contained in the rightmost 1914 1858 * interval of the leaf and can be removed by 1915 1859 * updating the size of the bigger interval. 1916 *1917 1860 */ 1918 1861 leaf->value[leaf->keys - 1] -= count; 1919 return 1;1920 } else if (page + count * PAGE_SIZE< left_pg +1921 left_cnt * PAGE_SIZE) {1862 goto success; 1863 } else if (page + (count << PAGE_WIDTH) < left_pg + 1864 (left_cnt << PAGE_WIDTH)) { 1922 1865 /* 1923 1866 * The interval is contained in the rightmost … … 1926 1869 * original interval and also inserting a new 1927 1870 * interval. 1928 *1929 1871 */ 1930 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1931 (page + count * PAGE_SIZE)) >> PAGE_WIDTH;1872 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) - 1873 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH; 1932 1874 leaf->value[leaf->keys - 1] -= count + new_cnt; 1933 1875 btree_insert(&area->used_space, page + 1934 count * PAGE_SIZE, (void *) new_cnt, leaf);1935 return 1;1876 (count << PAGE_WIDTH), (void *) new_cnt, leaf); 1877 goto success; 1936 1878 } 1937 1879 } 1938 return 0; 1880 1881 return false; 1939 1882 } 1940 1883 1941 1884 /* 1942 1885 * The border cases have been already resolved. 1943 * Now the interval can be only between intervals of the leaf. 1886 * Now the interval can be only between intervals of the leaf. 1944 1887 */ 1945 1888 btree_key_t i; … … 1953 1896 * to (i - 1) and i. 1954 1897 */ 1955 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,1956 count * PAGE_SIZE)) {1957 if (page + count * PAGE_SIZE==1958 left_pg + left_cnt*PAGE_SIZE) {1898 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1899 count << PAGE_WIDTH)) { 1900 if (page + (count << PAGE_WIDTH) == 1901 left_pg + (left_cnt << PAGE_WIDTH)) { 1959 1902 /* 1960 1903 * The interval is contained in the … … 1962 1905 * be removed by updating the size of 1963 1906 * the bigger interval. 1964 *1965 1907 */ 1966 1908 leaf->value[i - 1] -= count; 1967 return 1;1968 } else if (page + count * PAGE_SIZE<1969 left_pg + left_cnt * PAGE_SIZE) {1909 goto success; 1910 } else if (page + (count << PAGE_WIDTH) < 1911 left_pg + (left_cnt << PAGE_WIDTH)) { 1970 1912 /* 1971 1913 * The interval is contained in the … … 1976 1918 */ 1977 1919 size_t new_cnt = ((left_pg + 1978 left_cnt * PAGE_SIZE) -1979 (page + count * PAGE_SIZE)) >>1920 (left_cnt << PAGE_WIDTH)) - 1921 (page + (count << PAGE_WIDTH))) >> 1980 1922 PAGE_WIDTH; 1981 1923 leaf->value[i - 1] -= count + new_cnt; 1982 1924 btree_insert(&area->used_space, page + 1983 count * PAGE_SIZE, (void *) new_cnt,1925 (count << PAGE_WIDTH), (void *) new_cnt, 1984 1926 leaf); 1985 return 1;1927 goto success; 1986 1928 } 1987 1929 } 1988 return 0; 1930 1931 return false; 1989 1932 } 1990 1933 } … … 1993 1936 panic("Inconsistency detected while removing %zu pages of used " 1994 1937 "space from %p.", count, (void *) page); 1938 1939 success: 1940 area->resident -= count; 1941 return true; 1995 1942 } 1996 1943 … … 2027 1974 } 2028 1975 1976 /** Return pointer to unmapped address space area 1977 * 1978 * @param base Lowest address bound. 1979 * @param size Requested size of the allocation. 1980 * 1981 * @return Pointer to the beginning of unmapped address space area. 1982 * 1983 */ 1984 sysarg_t sys_as_get_unmapped_area(uintptr_t base, size_t size) 1985 { 1986 if (size == 0) 1987 return 0; 1988 1989 /* 1990 * Make sure we allocate from page-aligned 1991 * address. Check for possible overflow in 1992 * each step. 1993 */ 1994 1995 size_t pages = SIZE2FRAMES(size); 1996 uintptr_t ret = 0; 1997 1998 /* 1999 * Find the lowest unmapped address aligned on the sz 2000 * boundary, not smaller than base and of the required size. 2001 */ 2002 2003 mutex_lock(&AS->lock); 2004 2005 /* First check the base address itself */ 2006 uintptr_t addr = ALIGN_UP(base, PAGE_SIZE); 2007 if ((addr >= base) && 2008 (check_area_conflicts(AS, addr, pages, NULL))) 2009 ret = addr; 2010 2011 /* Eventually check the addresses behind each area */ 2012 link_t *cur; 2013 for (cur = AS->as_area_btree.leaf_head.next; 2014 (ret == 0) && (cur != &AS->as_area_btree.leaf_head); 2015 cur = cur->next) { 2016 btree_node_t *node = 2017 list_get_instance(cur, btree_node_t, leaf_link); 2018 2019 btree_key_t i; 2020 for (i = 0; (ret == 0) && (i < node->keys); i++) { 2021 as_area_t *area = (as_area_t *) node->value[i]; 2022 2023 mutex_lock(&area->lock); 2024 2025 uintptr_t addr = 2026 ALIGN_UP(area->base + (area->pages << PAGE_WIDTH), 2027 PAGE_SIZE); 2028 2029 if ((addr >= base) && (addr >= area->base) && 2030 (check_area_conflicts(AS, addr, pages, area))) 2031 ret = addr; 2032 2033 mutex_unlock(&area->lock); 2034 } 2035 } 2036 2037 mutex_unlock(&AS->lock); 2038 2039 return (sysarg_t) ret; 2040 } 2041 2029 2042 /** Get list of adress space areas. 2030 2043 * … … 2093 2106 mutex_lock(&as->lock); 2094 2107 2095 /* print out info about address space areas */2108 /* Print out info about address space areas */ 2096 2109 link_t *cur; 2097 2110 for (cur = as->as_area_btree.leaf_head.next; -
kernel/generic/src/proc/program.c
re778543 r17aca1c 171 171 void *loader = program_loader; 172 172 if (!loader) { 173 as_destroy(as); 173 174 printf("Cannot spawn loader as none was registered\n"); 174 175 return ENOENT; … … 179 180 if (rc != EE_OK) { 180 181 as_destroy(as); 182 printf("Cannot spawn loader (%s)\n", elf_error(rc)); 181 183 return ENOENT; 182 184 } -
kernel/generic/src/proc/task.c
re778543 r17aca1c 384 384 { 385 385 task_id_t taskid; 386 int rc; 387 388 rc = copy_from_uspace(&taskid, uspace_taskid, sizeof(taskid)); 386 int rc = copy_from_uspace(&taskid, uspace_taskid, sizeof(taskid)); 389 387 if (rc != 0) 390 388 return (sysarg_t) rc; 391 389 392 390 return (sysarg_t) task_kill(taskid); 393 391 } … … 520 518 } 521 519 520 /** Kill the currently running task. 521 * 522 * @param notify Send out fault notifications. 523 * 524 * @return Zero on success or an error code from errno.h. 525 * 526 */ 527 void task_kill_self(bool notify) 528 { 529 /* 530 * User space can subscribe for FAULT events to take action 531 * whenever a task faults (to take a dump, run a debugger, etc.). 532 * The notification is always available, but unless udebug is enabled, 533 * that's all you get. 534 */ 535 if (notify) { 536 if (event_is_subscribed(EVENT_FAULT)) { 537 /* Notify the subscriber that a fault occurred. */ 538 event_notify_3(EVENT_FAULT, LOWER32(TASK->taskid), 539 UPPER32(TASK->taskid), (sysarg_t) THREAD); 540 541 #ifdef CONFIG_UDEBUG 542 /* Wait for a debugging session. */ 543 udebug_thread_fault(); 544 #endif 545 } 546 } 547 548 irq_spinlock_lock(&tasks_lock, true); 549 task_kill_internal(TASK); 550 irq_spinlock_unlock(&tasks_lock, true); 551 552 thread_exit(); 553 } 554 555 /** Process syscall to terminate the current task. 556 * 557 * @param notify Send out fault notifications. 558 * 559 */ 560 sysarg_t sys_task_exit(sysarg_t notify) 561 { 562 task_kill_self(notify); 563 564 /* Unreachable */ 565 return EOK; 566 } 567 522 568 static bool task_print_walker(avltree_node_t *node, void *arg) 523 569 { -
kernel/generic/src/syscall/syscall.c
re778543 r17aca1c 87 87 } else { 88 88 printf("Task %" PRIu64": Unknown syscall %#" PRIxn, TASK->taskid, id); 89 task_kill(TASK->taskid); 90 thread_exit(); 89 task_kill_self(true); 91 90 } 92 91 … … 132 131 (syshandler_t) sys_task_set_name, 133 132 (syshandler_t) sys_task_kill, 133 (syshandler_t) sys_task_exit, 134 134 (syshandler_t) sys_program_spawn_loader, 135 135 … … 144 144 (syshandler_t) sys_as_area_change_flags, 145 145 (syshandler_t) sys_as_area_destroy, 146 (syshandler_t) sys_as_get_unmapped_area, 146 147 147 148 /* Page mapping related syscalls. */ … … 160 161 (syshandler_t) sys_ipc_poke, 161 162 (syshandler_t) sys_ipc_hangup, 162 (syshandler_t) sys_ipc_register_irq,163 (syshandler_t) sys_ipc_unregister_irq,164 163 (syshandler_t) sys_ipc_connect_kbox, 165 164 … … 175 174 (syshandler_t) sys_physmem_map, 176 175 (syshandler_t) sys_iospace_enable, 176 (syshandler_t) sys_register_irq, 177 (syshandler_t) sys_unregister_irq, 177 178 178 179 /* Sysinfo syscalls */ -
kernel/generic/src/sysinfo/stats.c
re778543 r17aca1c 160 160 static size_t get_task_virtmem(as_t *as) 161 161 { 162 size_t result = 0;163 164 162 /* 165 * We are holding some spinlocks here and therefore are not allowed to 166 * block. Only attempt to lock the address space and address space area 167 * mutexes conditionally. If it is not possible to lock either object, 168 * allow the statistics to be inexact by skipping the respective object. 169 * 170 * Note that it may be infinitely better to let the address space 171 * management code compute these statistics as it proceeds instead of 172 * having them calculated over and over again here. 163 * We are holding spinlocks here and therefore are not allowed to 164 * block. Only attempt to lock the address space and address space 165 * area mutexes conditionally. If it is not possible to lock either 166 * object, return inexact statistics by skipping the respective object. 173 167 */ 174 168 175 169 if (SYNCH_FAILED(mutex_trylock(&as->lock))) 176 return result * PAGE_SIZE; 170 return 0; 171 172 size_t pages = 0; 177 173 178 174 /* Walk the B+ tree and count pages */ 179 link_t *cur;180 for (cur = as->as_area_btree.leaf_head.next;181 cur != &as->as_area_btree.leaf_head; cur = cur->next) {182 btree_node_t *node =183 list_get_instance(cur, btree_node_t, leaf_link);184 185 unsigned int i;186 for (i = 0; i < node->keys; i++) {187 as_area_t *area = node->value[i];188 189 if (SYNCH_FAILED(mutex_trylock(&area->lock)))190 continue;191 result += area->pages;192 mutex_unlock(&area->lock);193 }194 }195 196 mutex_unlock(&as->lock);197 198 return result * PAGE_SIZE;199 }200 201 /** Get the resident (used) size of a virtual address space202 *203 * @param as Address space.204 *205 * @return Size of the resident (used) virtual address space (bytes).206 *207 */208 static size_t get_task_resmem(as_t *as)209 {210 size_t result = 0;211 212 /*213 * We are holding some spinlocks here and therefore are not allowed to214 * block. Only attempt to lock the address space and address space area215 * mutexes conditionally. If it is not possible to lock either object,216 * allow the statistics to be inexact by skipping the respective object.217 *218 * Note that it may be infinitely better to let the address space219 * management code compute these statistics as it proceeds instead of220 * having them calculated over and over again here.221 */222 223 if (SYNCH_FAILED(mutex_trylock(&as->lock)))224 return result * PAGE_SIZE;225 226 /* Walk the B+ tree of AS areas */227 175 link_t *cur; 228 176 for (cur = as->as_area_btree.leaf_head.next; … … 238 186 continue; 239 187 240 /* Walk the B+ tree of resident pages */ 241 link_t *rcur; 242 for (rcur = area->used_space.leaf_head.next; 243 rcur != &area->used_space.leaf_head; rcur = rcur->next) { 244 btree_node_t *rnode = 245 list_get_instance(rcur, btree_node_t, leaf_link); 246 247 unsigned int j; 248 for (j = 0; j < rnode->keys; j++) 249 result += (size_t) rnode->value[i]; 250 } 251 188 pages += area->pages; 252 189 mutex_unlock(&area->lock); 253 190 } … … 256 193 mutex_unlock(&as->lock); 257 194 258 return result * PAGE_SIZE; 195 return (pages << PAGE_WIDTH); 196 } 197 198 /** Get the resident (used) size of a virtual address space 199 * 200 * @param as Address space. 201 * 202 * @return Size of the resident (used) virtual address space (bytes). 203 * 204 */ 205 static size_t get_task_resmem(as_t *as) 206 { 207 /* 208 * We are holding spinlocks here and therefore are not allowed to 209 * block. Only attempt to lock the address space and address space 210 * area mutexes conditionally. If it is not possible to lock either 211 * object, return inexact statistics by skipping the respective object. 212 */ 213 214 if (SYNCH_FAILED(mutex_trylock(&as->lock))) 215 return 0; 216 217 size_t pages = 0; 218 219 /* Walk the B+ tree and count pages */ 220 link_t *cur; 221 for (cur = as->as_area_btree.leaf_head.next; 222 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 223 btree_node_t *node = 224 list_get_instance(cur, btree_node_t, leaf_link); 225 226 unsigned int i; 227 for (i = 0; i < node->keys; i++) { 228 as_area_t *area = node->value[i]; 229 230 if (SYNCH_FAILED(mutex_trylock(&area->lock))) 231 continue; 232 233 pages += area->resident; 234 mutex_unlock(&area->lock); 235 } 236 } 237 238 mutex_unlock(&as->lock); 239 240 return (pages << PAGE_WIDTH); 259 241 } 260 242 -
kernel/generic/src/time/clock.c
re778543 r17aca1c 93 93 clock_parea.pbase = (uintptr_t) faddr; 94 94 clock_parea.frames = 1; 95 clock_parea.unpriv = true; 95 96 ddi_parea_register(&clock_parea); 96 97 … … 100 101 * 101 102 */ 102 sysinfo_set_item_val("clock.cacheable", NULL, (sysarg_t) true);103 103 sysinfo_set_item_val("clock.faddr", NULL, (sysarg_t) faddr); 104 104 }
Note:
See TracChangeset
for help on using the changeset viewer.