- Timestamp:
- 2011-02-03T05:11:01Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- ba38f72c
- Parents:
- 22027b6e (diff), 86d7bfa (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel
- Files:
-
- 41 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/amd64/include/interrupt.h
r22027b6e r8b5690f 55 55 #define IRQ_PIC_SPUR 7 56 56 #define IRQ_MOUSE 12 57 #define IRQ_ DP8390 957 #define IRQ_NE2000 9 58 58 59 59 /* This one must have four least significant bits set to ones */ -
kernel/arch/amd64/src/amd64.c
r22027b6e r8b5690f 238 238 sysinfo_set_item_val(irqs_info, NULL, true); 239 239 240 sysinfo_set_item_val("netif. dp8390.inr", NULL, IRQ_DP8390);240 sysinfo_set_item_val("netif.ne2000.inr", NULL, IRQ_NE2000); 241 241 } 242 242 -
kernel/arch/arm32/src/mach/gta02/gta02.c
r22027b6e r8b5690f 174 174 fb_parea.pbase = GTA02_FB_BASE; 175 175 fb_parea.frames = 150; 176 fb_parea.unpriv = false; 176 177 ddi_parea_register(&fb_parea); 177 178 } -
kernel/arch/arm32/src/mach/integratorcp/integratorcp.c
r22027b6e r8b5690f 300 300 fb_parea.pbase = ICP_FB; 301 301 fb_parea.frames = 300; 302 fb_parea.unpriv = false; 302 303 ddi_parea_register(&fb_parea); 303 304 } -
kernel/arch/ia32/_link.ld.in
r22027b6e r8b5690f 49 49 } 50 50 51 #ifdef CONFIG_LINE_DEBUG 52 .comment 0 : { *(.comment); } 53 .debug_abbrev 0 : { *(.debug_abbrev); } 54 .debug_aranges 0 : { *(.debug_aranges); } 55 .debug_info 0 : { *(.debug_info); } 56 .debug_line 0 : { *(.debug_line); } 57 .debug_loc 0 : { *(.debug_loc); } 58 .debug_pubnames 0 : { *(.debug_pubnames); } 59 .debug_pubtypes 0 : { *(.debug_pubtypes); } 60 .debug_ranges 0 : { *(.debug_ranges); } 61 .debug_str 0 : { *(.debug_str); } 62 #endif 63 51 64 /DISCARD/ : { 52 *(.note.GNU-stack); 53 *(.comment); 65 *(*); 54 66 } 55 67 -
kernel/arch/ia32/include/interrupt.h
r22027b6e r8b5690f 55 55 #define IRQ_PIC_SPUR 7 56 56 #define IRQ_MOUSE 12 57 #define IRQ_ DP8390 557 #define IRQ_NE2000 5 58 58 59 59 /* This one must have four least significant bits set to ones */ -
kernel/arch/ia32/src/ia32.c
r22027b6e r8b5690f 196 196 sysinfo_set_item_val(irqs_info, NULL, true); 197 197 198 sysinfo_set_item_val("netif. dp8390.inr", NULL, IRQ_DP8390);198 sysinfo_set_item_val("netif.ne2000.inr", NULL, IRQ_NE2000); 199 199 } 200 200 -
kernel/arch/ia64/include/interrupt.h
r22027b6e r8b5690f 61 61 #define IRQ_KBD (0x01 + LEGACY_INTERRUPT_BASE) 62 62 #define IRQ_MOUSE (0x0c + LEGACY_INTERRUPT_BASE) 63 #define IRQ_ DP8390 (0x09 + LEGACY_INTERRUPT_BASE)63 #define IRQ_NE2000 (0x09 + LEGACY_INTERRUPT_BASE) 64 64 65 65 /** General Exception codes. */ -
kernel/arch/ia64/src/ia64.c
r22027b6e r8b5690f 222 222 #endif 223 223 224 sysinfo_set_item_val("netif. dp8390.inr", NULL, IRQ_DP8390);224 sysinfo_set_item_val("netif.ne2000.inr", NULL, IRQ_NE2000); 225 225 226 226 sysinfo_set_item_val("ia64_iospace", NULL, true); -
kernel/arch/sparc64/src/drivers/niagara.c
r22027b6e r8b5690f 216 216 outbuf_parea.pbase = (uintptr_t) (KA2PA(&output_buffer)); 217 217 outbuf_parea.frames = 1; 218 outbuf_parea.unpriv = false; 218 219 ddi_parea_register(&outbuf_parea); 219 220 … … 221 222 inbuf_parea.pbase = (uintptr_t) (KA2PA(&input_buffer)); 222 223 inbuf_parea.frames = 1; 224 inbuf_parea.unpriv = false; 223 225 ddi_parea_register(&inbuf_parea); 224 226 -
kernel/generic/include/ddi/ddi.h
r22027b6e r8b5690f 43 43 /** Structure representing contiguous physical memory area. */ 44 44 typedef struct { 45 uintptr_t pbase; /**< Physical base of the area. */ 46 pfn_t frames; /**< Number of frames in the area. */ 45 link_t link; /**< Linked list link */ 47 46 48 link_t link; /**< Linked list link */ 47 uintptr_t pbase; /**< Physical base of the area. */ 48 pfn_t frames; /**< Number of frames in the area. */ 49 bool unpriv; /**< Allow mapping by unprivileged tasks. */ 49 50 } parea_t; 50 51 … … 54 55 extern sysarg_t sys_physmem_map(sysarg_t, sysarg_t, sysarg_t, sysarg_t); 55 56 extern sysarg_t sys_iospace_enable(ddi_ioarg_t *); 56 extern sysarg_t sys_interrupt_enable(int irq, int enable);57 57 58 58 /* … … 61 61 extern int ddi_iospace_enable_arch(task_t *, uintptr_t, size_t); 62 62 63 64 63 #endif 65 64 -
kernel/generic/include/debug.h
r22027b6e r8b5690f 55 55 do { \ 56 56 if (!(expr)) \ 57 panic_assert("%s", #expr); \ 57 panic_assert("%s() at %s:%u:\n%s", \ 58 __func__, __FILE__, __LINE__, #expr); \ 58 59 } while (0) 59 60 … … 72 73 do { \ 73 74 if (!(expr)) \ 74 panic_assert("%s, %s", #expr, msg); \ 75 panic_assert("%s() at %s:%u:\n%s, %s", \ 76 __func__, __FILE__, __LINE__, #expr, msg); \ 75 77 } while (0) 76 78 -
kernel/generic/include/ipc/event_types.h
r22027b6e r8b5690f 41 41 /** Returning from kernel console to userspace */ 42 42 EVENT_KCONSOLE, 43 /** A t hread has faulted and will be terminated */43 /** A task/thread has faulted and will be terminated */ 44 44 EVENT_FAULT, 45 45 EVENT_END -
kernel/generic/include/ipc/ipc.h
r22027b6e r8b5690f 165 165 * error is sent back to caller. Otherwise 166 166 * the call is accepted and the response is sent back. 167 * - the allocated phoneid is passed to userspace 167 * - the hash of the client task is passed to userspace 168 * (on the receiving side) as ARG4 of the call. 169 * - the hash of the allocated phone is passed to userspace 168 170 * (on the receiving side) as ARG5 of the call. 169 171 * … … 319 321 typedef struct { 320 322 sysarg_t args[IPC_CALL_LEN]; 323 /** Task which made or forwarded the call with IPC_FF_ROUTE_FROM_ME. */ 324 struct task *task; 325 /** Phone which made or last masqueraded this call. */ 321 326 phone_t *phone; 322 327 } ipc_data_t; … … 333 338 * The caller box is different from sender->answerbox 334 339 * for synchronous calls. 335 *336 340 */ 337 341 answerbox_t *callerbox; … … 350 354 * cases, we must keep it aside so that the answer is processed 351 355 * correctly. 352 *353 356 */ 354 357 phone_t *caller_phone; -
kernel/generic/include/ipc/sysipc.h
r22027b6e r8b5690f 56 56 unsigned int); 57 57 extern sysarg_t sys_ipc_hangup(sysarg_t); 58 extern sysarg_t sys_ ipc_register_irq(inr_t, devno_t, sysarg_t, irq_code_t *);59 extern sysarg_t sys_ ipc_unregister_irq(inr_t, devno_t);58 extern sysarg_t sys_register_irq(inr_t, devno_t, sysarg_t, irq_code_t *); 59 extern sysarg_t sys_unregister_irq(inr_t, devno_t); 60 60 61 61 #ifdef __32_BITS__ -
kernel/generic/include/proc/task.h
r22027b6e r8b5690f 131 131 extern task_t *task_find_by_id(task_id_t); 132 132 extern int task_kill(task_id_t); 133 extern void task_kill_self(bool) __attribute__((noreturn)); 133 134 extern void task_get_accounting(task_t *, uint64_t *, uint64_t *); 134 135 extern void task_print_list(bool); … … 155 156 extern sysarg_t sys_task_set_name(const char *, size_t); 156 157 extern sysarg_t sys_task_kill(task_id_t *); 158 extern sysarg_t sys_task_exit(sysarg_t); 157 159 158 160 #endif -
kernel/generic/include/proc/thread.h
r22027b6e r8b5690f 91 91 92 92 /** Function implementing the thread. */ 93 void (* 93 void (*thread_code)(void *); 94 94 /** Argument passed to thread_code() function. */ 95 95 void *thread_arg; 96 96 97 97 /** 98 * From here, the stored context is restored when the thread is99 * scheduled.98 * From here, the stored context is restored 99 * when the thread is scheduled. 100 100 */ 101 101 context_t saved_context; 102 /** 103 * From here, the stored timeout context is restored when sleep times 104 * out. 102 103 /** 104 * From here, the stored timeout context 105 * is restored when sleep times out. 105 106 */ 106 107 context_t sleep_timeout_context; 107 /** 108 * From here, the stored interruption context is restored when sleep is 109 * interrupted. 108 109 /** 110 * From here, the stored interruption context 111 * is restored when sleep is interrupted. 110 112 */ 111 113 context_t sleep_interruption_context; … … 125 127 */ 126 128 bool in_copy_from_uspace; 129 127 130 /** 128 131 * True if this thread is executing copy_to_uspace(). … … 187 190 188 191 #ifdef CONFIG_UDEBUG 192 /** 193 * If true, the scheduler will print a stack trace 194 * to the kernel console upon scheduling this thread. 195 */ 196 bool btrace; 197 189 198 /** Debugging stuff */ 190 199 udebug_thread_t udebug; … … 237 246 extern bool thread_exists(thread_t *); 238 247 248 #ifdef CONFIG_UDEBUG 249 extern void thread_stack_trace(thread_id_t); 250 #endif 251 239 252 /** Fpu context slab cache. */ 240 253 extern slab_cache_t *fpu_context_slab; -
kernel/generic/include/syscall/syscall.h
r22027b6e r8b5690f 48 48 SYS_TASK_SET_NAME, 49 49 SYS_TASK_KILL, 50 SYS_TASK_EXIT, 50 51 SYS_PROGRAM_SPAWN_LOADER, 51 52 … … 70 71 SYS_IPC_POKE, 71 72 SYS_IPC_HANGUP, 72 SYS_IPC_REGISTER_IRQ,73 SYS_IPC_UNREGISTER_IRQ,74 73 SYS_IPC_CONNECT_KBOX, 75 74 … … 82 81 SYS_PHYSMEM_MAP, 83 82 SYS_IOSPACE_ENABLE, 84 SYS_INTERRUPT_ENABLE, 83 SYS_REGISTER_IRQ, 84 SYS_UNREGISTER_IRQ, 85 85 86 86 SYS_SYSINFO_GET_TAG, -
kernel/generic/include/sysinfo/abi.h
r22027b6e r8b5690f 104 104 char name[TASK_NAME_BUFLEN]; /**< Task name (in kernel) */ 105 105 size_t virtmem; /**< Size of VAS (bytes) */ 106 size_t resmem; /**< Size of resident (used) memory (bytes) */ 106 107 size_t threads; /**< Number of threads */ 107 108 uint64_t ucycles; /**< Number of CPU cycles in user space */ -
kernel/generic/include/sysinfo/sysinfo.h
r22027b6e r8b5690f 148 148 extern sysarg_t sys_sysinfo_get_value(void *, size_t, void *); 149 149 extern sysarg_t sys_sysinfo_get_data_size(void *, size_t, void *); 150 extern sysarg_t sys_sysinfo_get_data(void *, size_t, void *, size_t );150 extern sysarg_t sys_sysinfo_get_data(void *, size_t, void *, size_t, size_t *); 151 151 152 152 #endif -
kernel/generic/include/udebug/udebug.h
r22027b6e r8b5690f 36 36 #define KERN_UDEBUG_H_ 37 37 38 #define UDEBUG_EVMASK(event) (1 << ((event) - 1)) 39 40 typedef enum { /* udebug_method_t */ 41 42 /** Start debugging the recipient. 43 * 44 * Causes all threads in the receiving task to stop. When they 45 * are all stoped, an answer with retval 0 is generated. 46 * 47 */ 48 UDEBUG_M_BEGIN = 1, 49 50 /** Finish debugging the recipient. 51 * 52 * Answers all pending GO and GUARD messages. 53 * 54 */ 55 UDEBUG_M_END, 56 57 /** Set which events should be captured. */ 58 UDEBUG_M_SET_EVMASK, 59 60 /** Make sure the debugged task is still there. 61 * 62 * This message is answered when the debugged task dies 63 * or the debugging session ends. 64 * 65 */ 66 UDEBUG_M_GUARD, 67 68 /** Run a thread until a debugging event occurs. 69 * 70 * This message is answered when the thread stops 71 * in a debugging event. 72 * 73 * - ARG2 - id of the thread to run 74 * 75 */ 76 UDEBUG_M_GO, 77 78 /** Stop a thread being debugged. 79 * 80 * Creates a special STOP event in the thread, causing 81 * it to answer a pending GO message (if any). 82 * 83 */ 84 UDEBUG_M_STOP, 85 86 /** Read arguments of a syscall. 87 * 88 * - ARG2 - thread identification 89 * - ARG3 - destination address in the caller's address space 90 * 91 */ 92 UDEBUG_M_ARGS_READ, 93 94 /** Read thread's userspace register state (istate_t). 95 * 96 * - ARG2 - thread identification 97 * - ARG3 - destination address in the caller's address space 98 * 99 * or, on error, retval will be 100 * - ENOENT - thread does not exist 101 * - EBUSY - register state not available 102 */ 103 UDEBUG_M_REGS_READ, 104 105 /** Read the list of the debugged tasks's threads. 106 * 107 * - ARG2 - destination address in the caller's address space 108 * - ARG3 - size of receiving buffer in bytes 109 * 110 * The kernel fills the buffer with a series of sysarg_t values 111 * (thread ids). On answer, the kernel will set: 112 * 113 * - ARG2 - number of bytes that were actually copied 114 * - ARG3 - number of bytes of the complete data 115 * 116 */ 117 UDEBUG_M_THREAD_READ, 118 119 /** Read the name of the debugged task. 120 * 121 * - ARG2 - destination address in the caller's address space 122 * - ARG3 - size of receiving buffer in bytes 123 * 124 * The kernel fills the buffer with a non-terminated string. 125 * 126 * - ARG2 - number of bytes that were actually copied 127 * - ARG3 - number of bytes of the complete data 128 * 129 */ 130 UDEBUG_M_NAME_READ, 131 132 /** Read the list of the debugged task's address space areas. 133 * 134 * - ARG2 - destination address in the caller's address space 135 * - ARG3 - size of receiving buffer in bytes 136 * 137 * The kernel fills the buffer with a series of as_area_info_t structures. 138 * Upon answer, the kernel will set: 139 * 140 * - ARG2 - number of bytes that were actually copied 141 * - ARG3 - number of bytes of the complete data 142 * 143 */ 144 UDEBUG_M_AREAS_READ, 145 146 /** Read the debugged tasks's memory. 147 * 148 * - ARG2 - destination address in the caller's address space 149 * - ARG3 - source address in the recipient's address space 150 * - ARG4 - size of receiving buffer in bytes 151 * 152 */ 153 UDEBUG_M_MEM_READ 154 } udebug_method_t; 155 156 typedef enum { 157 UDEBUG_EVENT_FINISHED = 1, /**< Debuging session has finished */ 158 UDEBUG_EVENT_STOP, /**< Stopped on DEBUG_STOP request */ 159 UDEBUG_EVENT_SYSCALL_B, /**< Before beginning syscall execution */ 160 UDEBUG_EVENT_SYSCALL_E, /**< After finishing syscall execution */ 161 UDEBUG_EVENT_THREAD_B, /**< The task created a new thread */ 162 UDEBUG_EVENT_THREAD_E /**< A thread exited */ 163 } udebug_event_t; 164 165 typedef enum { 166 UDEBUG_EM_FINISHED = UDEBUG_EVMASK(UDEBUG_EVENT_FINISHED), 167 UDEBUG_EM_STOP = UDEBUG_EVMASK(UDEBUG_EVENT_STOP), 168 UDEBUG_EM_SYSCALL_B = UDEBUG_EVMASK(UDEBUG_EVENT_SYSCALL_B), 169 UDEBUG_EM_SYSCALL_E = UDEBUG_EVMASK(UDEBUG_EVENT_SYSCALL_E), 170 UDEBUG_EM_THREAD_B = UDEBUG_EVMASK(UDEBUG_EVENT_THREAD_B), 171 UDEBUG_EM_THREAD_E = UDEBUG_EVMASK(UDEBUG_EVENT_THREAD_E), 172 UDEBUG_EM_ALL = 173 (UDEBUG_EVMASK(UDEBUG_EVENT_FINISHED) | 174 UDEBUG_EVMASK(UDEBUG_EVENT_STOP) | 175 UDEBUG_EVMASK(UDEBUG_EVENT_SYSCALL_B) | 176 UDEBUG_EVMASK(UDEBUG_EVENT_SYSCALL_E) | 177 UDEBUG_EVMASK(UDEBUG_EVENT_THREAD_B) | 178 UDEBUG_EVMASK(UDEBUG_EVENT_THREAD_E)) 179 } udebug_evmask_t; 180 181 #ifdef KERNEL 182 38 183 #include <ipc/ipc.h> 39 40 typedef enum { /* udebug_method_t */41 42 /** Start debugging the recipient.43 * Causes all threads in the receiving task to stop. When they44 * are all stoped, an answer with retval 0 is generated.45 */46 UDEBUG_M_BEGIN = 1,47 48 /** Finish debugging the recipient.49 * Answers all pending GO and GUARD messages.50 */51 UDEBUG_M_END,52 53 /** Set which events should be captured.54 */55 UDEBUG_M_SET_EVMASK,56 57 /** Make sure the debugged task is still there.58 * This message is answered when the debugged task dies59 * or the debugging session ends.60 */61 UDEBUG_M_GUARD,62 63 /** Run a thread until a debugging event occurs.64 * This message is answered when the thread stops65 * in a debugging event.66 *67 * - ARG2 - id of the thread to run68 */69 UDEBUG_M_GO,70 71 /** Stop a thread being debugged.72 * Creates a special STOP event in the thread, causing73 * it to answer a pending GO message (if any).74 */75 UDEBUG_M_STOP,76 77 /** Read arguments of a syscall.78 *79 * - ARG2 - thread identification80 * - ARG3 - destination address in the caller's address space81 *82 */83 UDEBUG_M_ARGS_READ,84 85 /** Read thread's userspace register state (istate_t).86 *87 * - ARG2 - thread identification88 * - ARG3 - destination address in the caller's address space89 *90 * or, on error, retval will be91 * - ENOENT - thread does not exist92 * - EBUSY - register state not available93 */94 UDEBUG_M_REGS_READ,95 96 /** Read the list of the debugged tasks's threads.97 *98 * - ARG2 - destination address in the caller's address space99 * - ARG3 - size of receiving buffer in bytes100 *101 * The kernel fills the buffer with a series of sysarg_t values102 * (thread ids). On answer, the kernel will set:103 *104 * - ARG2 - number of bytes that were actually copied105 * - ARG3 - number of bytes of the complete data106 *107 */108 UDEBUG_M_THREAD_READ,109 110 /** Read the name of the debugged task.111 *112 * - ARG2 - destination address in the caller's address space113 * - ARG3 - size of receiving buffer in bytes114 *115 * The kernel fills the buffer with a non-terminated string.116 *117 * - ARG2 - number of bytes that were actually copied118 * - ARG3 - number of bytes of the complete data119 *120 */121 UDEBUG_M_NAME_READ,122 123 /** Read the list of the debugged task's address space areas.124 *125 * - ARG2 - destination address in the caller's address space126 * - ARG3 - size of receiving buffer in bytes127 *128 * The kernel fills the buffer with a series of as_area_info_t structures.129 * Upon answer, the kernel will set:130 *131 * - ARG2 - number of bytes that were actually copied132 * - ARG3 - number of bytes of the complete data133 *134 */135 UDEBUG_M_AREAS_READ,136 137 /** Read the debugged tasks's memory.138 *139 * - ARG2 - destination address in the caller's address space140 * - ARG3 - source address in the recipient's address space141 * - ARG4 - size of receiving buffer in bytes142 *143 */144 UDEBUG_M_MEM_READ,145 146 } udebug_method_t;147 148 149 typedef enum {150 UDEBUG_EVENT_FINISHED = 1, /**< Debuging session has finished */151 UDEBUG_EVENT_STOP, /**< Stopped on DEBUG_STOP request */152 UDEBUG_EVENT_SYSCALL_B, /**< Before beginning syscall execution */153 UDEBUG_EVENT_SYSCALL_E, /**< After finishing syscall execution */154 UDEBUG_EVENT_THREAD_B, /**< The task created a new thread */155 UDEBUG_EVENT_THREAD_E /**< A thread exited */156 } udebug_event_t;157 158 #define UDEBUG_EVMASK(event) (1 << ((event) - 1))159 160 typedef enum {161 UDEBUG_EM_FINISHED = UDEBUG_EVMASK(UDEBUG_EVENT_FINISHED),162 UDEBUG_EM_STOP = UDEBUG_EVMASK(UDEBUG_EVENT_STOP),163 UDEBUG_EM_SYSCALL_B = UDEBUG_EVMASK(UDEBUG_EVENT_SYSCALL_B),164 UDEBUG_EM_SYSCALL_E = UDEBUG_EVMASK(UDEBUG_EVENT_SYSCALL_E),165 UDEBUG_EM_THREAD_B = UDEBUG_EVMASK(UDEBUG_EVENT_THREAD_B),166 UDEBUG_EM_THREAD_E = UDEBUG_EVMASK(UDEBUG_EVENT_THREAD_E),167 UDEBUG_EM_ALL =168 UDEBUG_EVMASK(UDEBUG_EVENT_FINISHED) |169 UDEBUG_EVMASK(UDEBUG_EVENT_STOP) |170 UDEBUG_EVMASK(UDEBUG_EVENT_SYSCALL_B) |171 UDEBUG_EVMASK(UDEBUG_EVENT_SYSCALL_E) |172 UDEBUG_EVMASK(UDEBUG_EVENT_THREAD_B) |173 UDEBUG_EVMASK(UDEBUG_EVENT_THREAD_E)174 } udebug_evmask_t;175 176 #ifdef KERNEL177 178 184 #include <synch/mutex.h> 179 185 #include <synch/condvar.h> … … 196 202 mutex_t lock; 197 203 char *lock_owner; 198 204 199 205 udebug_task_state_t dt_state; 200 206 call_t *begin_call; … … 209 215 /** Synchronize debug ops on this thread / access to this structure. */ 210 216 mutex_t lock; 211 217 212 218 waitq_t go_wq; 213 219 call_t *go_call; 214 220 sysarg_t syscall_args[6]; 215 221 istate_t *uspace_state; 216 222 217 223 /** What type of event are we stopped in or 0 if none. */ 218 224 udebug_event_t cur_event; 219 bool go; /**< thread is GO */220 bool stoppable; /**< thread is stoppable */221 bool active; /**< thread is in a debugging session */225 bool go; /**< Thread is GO */ 226 bool stoppable; /**< Thread is stoppable */ 227 bool active; /**< Thread is in a debugging session */ 222 228 condvar_t active_cv; 223 229 } udebug_thread_t; … … 226 232 struct thread; 227 233 228 void udebug_task_init(udebug_task_t *ut); 229 void udebug_thread_initialize(udebug_thread_t *ut); 230 231 void udebug_syscall_event(sysarg_t a1, sysarg_t a2, sysarg_t a3, 232 sysarg_t a4, sysarg_t a5, sysarg_t a6, sysarg_t id, sysarg_t rc, 233 bool end_variant); 234 235 void udebug_thread_b_event_attach(struct thread *t, struct task *ta); 234 void udebug_task_init(udebug_task_t *); 235 void udebug_thread_initialize(udebug_thread_t *); 236 237 void udebug_syscall_event(sysarg_t, sysarg_t, sysarg_t, sysarg_t, sysarg_t, 238 sysarg_t, sysarg_t, sysarg_t, bool); 239 240 void udebug_thread_b_event_attach(struct thread *, struct task *); 236 241 void udebug_thread_e_event(void); 237 242 … … 241 246 void udebug_before_thread_runs(void); 242 247 243 int udebug_task_cleanup(struct task * ta);248 int udebug_task_cleanup(struct task *); 244 249 void udebug_thread_fault(void); 245 250 -
kernel/generic/src/adt/avl.c
r22027b6e r8b5690f 723 723 void avltree_walk(avltree_t *t, avltree_walker_t walker, void *arg) 724 724 { 725 _avltree_walk(t->root, walker, arg); 725 if (t->root) 726 _avltree_walk(t->root, walker, arg); 726 727 } 727 728 -
kernel/generic/src/console/cmd.c
r22027b6e r8b5690f 78 78 static cmd_info_t help_info = { 79 79 .name = "help", 80 .description = "List ofsupported commands.",80 .description = "List supported commands.", 81 81 .func = cmd_help, 82 82 .argc = 0 83 83 }; 84 84 85 /* Data and methods for 'reboot' command. */ 85 86 static int cmd_reboot(cmd_arg_t *argv); 86 87 static cmd_info_t reboot_info = { 87 88 .name = "reboot", 88 .description = "Reboot .",89 .description = "Reboot system.", 89 90 .func = cmd_reboot, 90 91 .argc = 0 91 92 }; 92 93 94 /* Data and methods for 'uptime' command. */ 93 95 static int cmd_uptime(cmd_arg_t *argv); 94 96 static cmd_info_t uptime_info = { 95 97 .name = "uptime", 96 .description = " Print uptime information.",98 .description = "Show system uptime.", 97 99 .func = cmd_uptime, 98 100 .argc = 0 99 101 }; 100 102 103 /* Data and methods for 'continue' command. */ 101 104 static int cmd_continue(cmd_arg_t *argv); 102 105 static cmd_info_t continue_info = { … … 108 111 109 112 #ifdef CONFIG_TEST 113 114 /* Data and methods for 'test' command. */ 110 115 static char test_buf[MAX_CMDLINE + 1]; 111 116 static int cmd_test(cmd_arg_t *argv); … … 119 124 static cmd_info_t test_info = { 120 125 .name = "test", 121 .description = " Print list ofkernel tests or run a test.",126 .description = "<test> List kernel tests or run a test.", 122 127 .func = cmd_test, 123 128 .argc = 1, … … 125 130 }; 126 131 132 /* Data and methods for 'bench' command. */ 127 133 static int cmd_bench(cmd_arg_t *argv); 128 134 static cmd_arg_t bench_argv[] = { … … 138 144 static cmd_info_t bench_info = { 139 145 .name = "bench", 140 .description = " Run kernel test as benchmark.",146 .description = "<test> <count> Run kernel test as benchmark.", 141 147 .func = cmd_bench, 142 148 .argc = 2, 143 149 .argv = bench_argv 144 150 }; 145 #endif 151 152 #endif /* CONFIG_TEST */ 146 153 147 154 /* Data and methods for 'description' command. */ 148 155 static int cmd_desc(cmd_arg_t *argv); 149 156 static void desc_help(void); 150 static char desc_buf[MAX_CMDLINE +1];157 static char desc_buf[MAX_CMDLINE + 1]; 151 158 static cmd_arg_t desc_argv = { 152 159 .type = ARG_TYPE_STRING, … … 156 163 static cmd_info_t desc_info = { 157 164 .name = "describe", 158 .description = " Describe specified command.",165 .description = "<command> Describe specified command.", 159 166 .help = desc_help, 160 167 .func = cmd_desc, … … 165 172 /* Data and methods for 'symaddr' command. */ 166 173 static int cmd_symaddr(cmd_arg_t *argv); 167 static char symaddr_buf[MAX_CMDLINE +1];174 static char symaddr_buf[MAX_CMDLINE + 1]; 168 175 static cmd_arg_t symaddr_argv = { 169 176 .type = ARG_TYPE_STRING, … … 173 180 static cmd_info_t symaddr_info = { 174 181 .name = "symaddr", 175 .description = " Return symbol address.",182 .description = "<symbol> Return symbol address.", 176 183 .func = cmd_symaddr, 177 184 .argc = 1, … … 179 186 }; 180 187 181 static char set_buf[MAX_CMDLINE+1]; 188 /* Data and methods for 'set4' command. */ 189 static char set_buf[MAX_CMDLINE + 1]; 182 190 static int cmd_set4(cmd_arg_t *argv); 183 191 static cmd_arg_t set4_argv[] = { … … 193 201 static cmd_info_t set4_info = { 194 202 .name = "set4", 195 .description = " set <dest_addr> <value> - 4byte version",203 .description = "<addr> <value> Set 4B memory location to a value.", 196 204 .func = cmd_set4, 197 205 .argc = 2, … … 213 221 static cmd_info_t call0_info = { 214 222 .name = "call0", 215 .description = " call0 <function> -> call function().",223 .description = "<function> Call function().", 216 224 .func = cmd_call0, 217 225 .argc = 1, … … 228 236 static cmd_info_t mcall0_info = { 229 237 .name = "mcall0", 230 .description = " mcall0 <function> -> call function() on each CPU.",238 .description = "<function> Call function() on each CPU.", 231 239 .func = cmd_mcall0, 232 240 .argc = 1, … … 250 258 static cmd_info_t call1_info = { 251 259 .name = "call1", 252 .description = " call1 <function> <arg1> -> call function(arg1).",260 .description = "<function> <arg1> Call function(arg1).", 253 261 .func = cmd_call1, 254 262 .argc = 2, … … 277 285 static cmd_info_t call2_info = { 278 286 .name = "call2", 279 .description = " call2 <function> <arg1> <arg2> -> call function(arg1,arg2).",287 .description = "<function> <arg1> <arg2> Call function(arg1, arg2).", 280 288 .func = cmd_call2, 281 289 .argc = 3, … … 310 318 static cmd_info_t call3_info = { 311 319 .name = "call3", 312 .description = " call3 <function> <arg1> <arg2> <arg3> -> call function(arg1,arg2,arg3).",320 .description = "<function> <arg1> <arg2> <arg3> Call function(arg1, arg2, arg3).", 313 321 .func = cmd_call3, 314 322 .argc = 4, … … 340 348 cmd_info_t tlb_info = { 341 349 .name = "tlb", 342 .description = "Print TLB of current processor.",350 .description = "Print TLB of the current CPU.", 343 351 .help = NULL, 344 352 .func = cmd_tlb, … … 377 385 }; 378 386 387 #ifdef CONFIG_UDEBUG 388 389 /* Data and methods for 'btrace' command */ 390 static int cmd_btrace(cmd_arg_t *argv); 391 static cmd_arg_t btrace_argv = { 392 .type = ARG_TYPE_INT, 393 }; 394 static cmd_info_t btrace_info = { 395 .name = "btrace", 396 .description = "<threadid> Show thread stack trace.", 397 .func = cmd_btrace, 398 .argc = 1, 399 .argv = &btrace_argv 400 }; 401 402 #endif /* CONFIG_UDEBUG */ 379 403 380 404 static int cmd_sched(cmd_arg_t *argv); 381 405 static cmd_info_t sched_info = { 382 406 .name = "scheduler", 383 .description = " List allscheduler information.",407 .description = "Show scheduler information.", 384 408 .func = cmd_sched, 385 409 .argc = 0 … … 406 430 static cmd_info_t zones_info = { 407 431 .name = "zones", 408 .description = "List ofmemory zones.",432 .description = "List memory zones.", 409 433 .func = cmd_zones, 410 434 .argc = 0 435 }; 436 437 /* Data and methods for 'zone' command */ 438 static int cmd_zone(cmd_arg_t *argv); 439 static cmd_arg_t zone_argv = { 440 .type = ARG_TYPE_INT, 441 }; 442 443 static cmd_info_t zone_info = { 444 .name = "zone", 445 .description = "<zone> Show memory zone structure.", 446 .func = cmd_zone, 447 .argc = 1, 448 .argv = &zone_argv 411 449 }; 412 450 … … 418 456 static cmd_info_t ipc_info = { 419 457 .name = "ipc", 420 .description = " ipc <taskid> Show IPC information of giventask.",458 .description = "<taskid> Show IPC information of a task.", 421 459 .func = cmd_ipc, 422 460 .argc = 1, … … 431 469 static cmd_info_t kill_info = { 432 470 .name = "kill", 433 .description = " kill<taskid> Kill a task.",471 .description = "<taskid> Kill a task.", 434 472 .func = cmd_kill, 435 473 .argc = 1, 436 474 .argv = &kill_argv 437 };438 439 /* Data and methods for 'zone' command */440 static int cmd_zone(cmd_arg_t *argv);441 static cmd_arg_t zone_argv = {442 .type = ARG_TYPE_INT,443 };444 445 static cmd_info_t zone_info = {446 .name = "zone",447 .description = "Show memory zone structure.",448 .func = cmd_zone,449 .argc = 1,450 .argv = &zone_argv451 475 }; 452 476 … … 482 506 &cpus_info, 483 507 &desc_info, 484 &reboot_info,485 &uptime_info,486 508 &halt_info, 487 509 &help_info, 488 510 &ipc_info, 489 511 &kill_info, 512 &physmem_info, 513 &reboot_info, 514 &sched_info, 490 515 &set4_info, 491 516 &slabs_info, 517 &symaddr_info, 492 518 &sysinfo_info, 493 &symaddr_info, 494 &sched_info, 519 &tasks_info, 495 520 &threads_info, 496 &tasks_info,497 &physmem_info,498 521 &tlb_info, 522 &uptime_info, 499 523 &version_info, 500 524 &zones_info, … … 504 528 &bench_info, 505 529 #endif 530 #ifdef CONFIG_UDEBUG 531 &btrace_info, 532 #endif 506 533 NULL 507 534 }; … … 526 553 for (i = 0; basic_commands[i]; i++) { 527 554 cmd_initialize(basic_commands[i]); 528 if (!cmd_register(basic_commands[i])) 529 printf("Cannot register command %s\n", basic_commands[i]->name); 530 } 531 } 532 555 } 556 557 for (i = 0; basic_commands[i]; i++) { 558 if (!cmd_register(basic_commands[i])) { 559 printf("Cannot register command %s\n", 560 basic_commands[i]->name); 561 } 562 } 563 } 533 564 534 565 /** List supported commands. … … 574 605 } 575 606 576 577 607 /** Reboot the system. 578 608 * … … 588 618 return 1; 589 619 } 590 591 620 592 621 /** Print system uptime information. … … 824 853 } 825 854 826 827 855 /** Print detailed description of 'describe' command. */ 828 856 void desc_help(void) … … 911 939 * @return Always 1 912 940 */ 913 int cmd_slabs(cmd_arg_t * 941 int cmd_slabs(cmd_arg_t *argv) 914 942 { 915 943 slab_print_list(); … … 923 951 * @return Always 1 924 952 */ 925 int cmd_sysinfo(cmd_arg_t * 953 int cmd_sysinfo(cmd_arg_t *argv) 926 954 { 927 955 sysinfo_dump(NULL); … … 929 957 } 930 958 931 932 /** Command for listings Thread information 959 /** Command for listing thread information 933 960 * 934 961 * @param argv Ignored … … 948 975 } 949 976 950 /** Command for listing s Task information977 /** Command for listing task information 951 978 * 952 979 * @param argv Ignored … … 966 993 } 967 994 968 /** Command for listings Thread information 995 #ifdef CONFIG_UDEBUG 996 997 /** Command for printing thread stack trace 998 * 999 * @param argv Integer argument from cmdline expected 1000 * 1001 * return Always 1 1002 * 1003 */ 1004 int cmd_btrace(cmd_arg_t *argv) 1005 { 1006 thread_stack_trace(argv[0].intval); 1007 return 1; 1008 } 1009 1010 #endif /* CONFIG_UDEBUG */ 1011 1012 /** Command for printing scheduler information 969 1013 * 970 1014 * @param argv Ignores … … 972 1016 * @return Always 1 973 1017 */ 974 int cmd_sched(cmd_arg_t * 1018 int cmd_sched(cmd_arg_t *argv) 975 1019 { 976 1020 sched_print_list(); … … 984 1028 * return Always 1 985 1029 */ 986 int cmd_zones(cmd_arg_t * 1030 int cmd_zones(cmd_arg_t *argv) 987 1031 { 988 1032 zones_print_list(); … … 996 1040 * return Always 1 997 1041 */ 998 int cmd_zone(cmd_arg_t * 1042 int cmd_zone(cmd_arg_t *argv) 999 1043 { 1000 1044 zone_print_one(argv[0].intval); … … 1002 1046 } 1003 1047 1004 /** Command for printing task ipcdetails1048 /** Command for printing task IPC details 1005 1049 * 1006 1050 * @param argv Integer argument from cmdline expected … … 1008 1052 * return Always 1 1009 1053 */ 1010 int cmd_ipc(cmd_arg_t * 1054 int cmd_ipc(cmd_arg_t *argv) 1011 1055 { 1012 1056 ipc_print_task(argv[0].intval); … … 1020 1064 * return 0 on failure, 1 on success. 1021 1065 */ 1022 int cmd_kill(cmd_arg_t * 1066 int cmd_kill(cmd_arg_t *argv) 1023 1067 { 1024 1068 if (task_kill(argv[0].intval) != EOK) -
kernel/generic/src/console/console.c
r22027b6e r8b5690f 160 160 klog_parea.pbase = (uintptr_t) faddr; 161 161 klog_parea.frames = SIZE2FRAMES(sizeof(klog)); 162 klog_parea.unpriv = false; 162 163 ddi_parea_register(&klog_parea); 163 164 -
kernel/generic/src/ddi/ddi.c
r22027b6e r8b5690f 104 104 { 105 105 ASSERT(TASK); 106 ASSERT((pf % FRAME_SIZE) == 0); 107 ASSERT((vp % PAGE_SIZE) == 0); 108 109 /* 110 * Make sure the caller is authorised to make this syscall. 111 */ 112 cap_t caps = cap_get(TASK); 113 if (!(caps & CAP_MEM_MANAGER)) 114 return EPERM; 106 107 if ((pf % FRAME_SIZE) != 0) 108 return EBADMEM; 109 110 if ((vp % PAGE_SIZE) != 0) 111 return EBADMEM; 112 113 /* 114 * Unprivileged tasks are only allowed to map pareas 115 * which are explicitly marked as such. 116 */ 117 bool priv = 118 ((cap_get(TASK) & CAP_MEM_MANAGER) == CAP_MEM_MANAGER); 115 119 116 120 mem_backend_data_t backend_data; … … 123 127 124 128 if (znum == (size_t) -1) { 125 /* Frames not found in any zones 126 * -> assume it is hardware device and allow mapping 129 /* 130 * Frames not found in any zone 131 * -> assume it is a hardware device and allow mapping 132 * for privileged tasks. 127 133 */ 128 134 irq_spinlock_unlock(&zones.lock, true); 135 136 if (!priv) 137 return EPERM; 138 129 139 goto map; 130 140 } 131 141 132 142 if (zones.info[znum].flags & ZONE_FIRMWARE) { 133 /* Frames are part of firmware */ 143 /* 144 * Frames are part of firmware 145 * -> allow mapping for privileged tasks. 146 */ 134 147 irq_spinlock_unlock(&zones.lock, true); 148 149 if (!priv) 150 return EPERM; 151 135 152 goto map; 136 153 } … … 138 155 if (zone_flags_available(zones.info[znum].flags)) { 139 156 /* 140 * Frames are part of physical memory, check if the memory141 * region is enabled for mapping.157 * Frames are part of physical memory, check 158 * if the memory region is enabled for mapping. 142 159 */ 143 160 irq_spinlock_unlock(&zones.lock, true); … … 150 167 if ((!parea) || (parea->frames < pages)) { 151 168 mutex_unlock(&parea_lock); 152 goto err; 169 return ENOENT; 170 } 171 172 if (!priv) { 173 if (!parea->unpriv) { 174 mutex_unlock(&parea_lock); 175 return EPERM; 176 } 153 177 } 154 178 … … 158 182 159 183 irq_spinlock_unlock(&zones.lock, true); 160 161 err:162 184 return ENOENT; 163 185 … … 258 280 } 259 281 260 /** Disable or enable specified interrupts.261 *262 * @param irq the interrupt to be enabled/disabled.263 * @param enable if true enable the interrupt, disable otherwise.264 *265 * @retutn Zero on success, error code otherwise.266 */267 sysarg_t sys_interrupt_enable(int irq, int enable)268 {269 /* FIXME: this needs to be generic code, or better not be in kernel at all. */270 #if 0271 cap_t task_cap = cap_get(TASK);272 if (!(task_cap & CAP_IRQ_REG))273 return EPERM;274 275 if (irq < 0 || irq > 16) {276 return EINVAL;277 }278 279 uint16_t irq_mask = (uint16_t)(1 << irq);280 if (enable) {281 trap_virtual_enable_irqs(irq_mask);282 } else {283 trap_virtual_disable_irqs(irq_mask);284 }285 286 #endif287 return 0;288 }289 290 282 /** @} 291 283 */ -
kernel/generic/src/interrupt/interrupt.c
r22027b6e r8b5690f 45 45 #include <console/console.h> 46 46 #include <console/cmd.h> 47 #include <ipc/event.h>48 47 #include <synch/mutex.h> 49 48 #include <time/delay.h> … … 188 187 printf("\n"); 189 188 190 /* 191 * Userspace can subscribe for FAULT events to take action 192 * whenever a thread faults. (E.g. take a dump, run a debugger). 193 * The notification is always available, but unless Udebug is enabled, 194 * that's all you get. 195 */ 196 if (event_is_subscribed(EVENT_FAULT)) { 197 /* Notify the subscriber that a fault occurred. */ 198 event_notify_3(EVENT_FAULT, LOWER32(TASK->taskid), 199 UPPER32(TASK->taskid), (sysarg_t) THREAD); 200 201 #ifdef CONFIG_UDEBUG 202 /* Wait for a debugging session. */ 203 udebug_thread_fault(); 204 #endif 205 } 206 207 task_kill(TASK->taskid); 208 thread_exit(); 189 task_kill_self(true); 209 190 } 210 191 -
kernel/generic/src/ipc/ipc.c
r22027b6e r8b5690f 295 295 atomic_inc(&phone->active_calls); 296 296 call->data.phone = phone; 297 call->data.task = TASK; 297 298 } 298 299 … … 406 407 call->caller_phone = call->data.phone; 407 408 call->data.phone = newphone; 409 call->data.task = TASK; 408 410 } 409 411 … … 688 690 irq_spinlock_exchange(&tasks_lock, &task->lock); 689 691 690 /* Print opened phones & details */ 691 printf("PHONE:\n"); 692 printf("[phone id] [calls] [state\n"); 692 693 693 694 size_t i; 694 695 for (i = 0; i < IPC_MAX_PHONES; i++) { 695 696 if (SYNCH_FAILED(mutex_trylock(&task->phones[i].lock))) { 696 printf("% zu: mutex busy\n", i);697 printf("%-10zu (mutex busy)\n", i); 697 698 continue; 698 699 } 699 700 700 701 if (task->phones[i].state != IPC_PHONE_FREE) { 701 printf("%zu: ", i); 702 printf("%-10zu %7" PRIun " ", i, 703 atomic_get(&task->phones[i].active_calls)); 702 704 703 705 switch (task->phones[i].state) { 704 706 case IPC_PHONE_CONNECTING: 705 printf("connecting 707 printf("connecting"); 706 708 break; 707 709 case IPC_PHONE_CONNECTED: 708 printf("connected to : %p (%" PRIu64 ")",709 task->phones[i].callee ,710 task->phones[i].callee->task-> taskid);710 printf("connected to %" PRIu64 " (%s)", 711 task->phones[i].callee->task->taskid, 712 task->phones[i].callee->task->name); 711 713 break; 712 714 case IPC_PHONE_SLAMMED: 713 printf("slammed by : %p ",715 printf("slammed by %p", 714 716 task->phones[i].callee); 715 717 break; 716 718 case IPC_PHONE_HUNGUP: 717 printf("hung up - was: %p ",719 printf("hung up by %p", 718 720 task->phones[i].callee); 719 721 break; … … 722 724 } 723 725 724 printf("active: %" PRIun "\n", 725 atomic_get(&task->phones[i].active_calls)); 726 printf("\n"); 726 727 } 727 728 … … 731 732 irq_spinlock_lock(&task->answerbox.lock, false); 732 733 734 #ifdef __32_BITS__ 735 printf("[call id ] [method] [arg1] [arg2] [arg3] [arg4] [arg5]" 736 " [flags] [sender\n"); 737 #endif 738 739 #ifdef __64_BITS__ 740 printf("[call id ] [method] [arg1] [arg2] [arg3] [arg4]" 741 " [arg5] [flags] [sender\n"); 742 #endif 743 733 744 link_t *cur; 734 745 735 /* Print answerbox - calls */ 736 printf("ABOX - CALLS:\n"); 746 printf(" --- incomming calls ---\n"); 737 747 for (cur = task->answerbox.calls.next; cur != &task->answerbox.calls; 738 748 cur = cur->next) { 739 749 call_t *call = list_get_instance(cur, call_t, link); 740 printf("Callid: %p Srctask:%" PRIu64 " M:%" PRIun 741 " A1:%" PRIun " A2:%" PRIun " A3:%" PRIun 742 " A4:%" PRIun " A5:%" PRIun " Flags:%x\n", call, 743 call->sender->taskid, 750 751 #ifdef __32_BITS__ 752 printf("%10p ", call); 753 #endif 754 755 #ifdef __64_BITS__ 756 printf("%18p ", call); 757 #endif 758 759 printf("%-8" PRIun " %-6" PRIun " %-6" PRIun " %-6" PRIun 760 " %-6" PRIun " %-6" PRIun " %-7x %" PRIu64 " (%s)\n", 744 761 IPC_GET_IMETHOD(call->data), IPC_GET_ARG1(call->data), 745 762 IPC_GET_ARG2(call->data), IPC_GET_ARG3(call->data), 746 763 IPC_GET_ARG4(call->data), IPC_GET_ARG5(call->data), 747 call->flags); 748 } 749 750 /* Print answerbox - dispatched calls */ 751 printf("ABOX - DISPATCHED CALLS:\n"); 764 call->flags, call->sender->taskid, call->sender->name); 765 } 766 767 printf(" --- dispatched calls ---\n"); 752 768 for (cur = task->answerbox.dispatched_calls.next; 753 769 cur != &task->answerbox.dispatched_calls; 754 770 cur = cur->next) { 755 771 call_t *call = list_get_instance(cur, call_t, link); 756 printf("Callid: %p Srctask:%" PRIu64 " M:%" PRIun 757 " A1:%" PRIun " A2:%" PRIun " A3:%" PRIun 758 " A4:%" PRIun " A5:%" PRIun " Flags:%x\n", call, 759 call->sender->taskid, 772 773 #ifdef __32_BITS__ 774 printf("%10p ", call); 775 #endif 776 777 #ifdef __64_BITS__ 778 printf("%18p ", call); 779 #endif 780 781 printf("%-8" PRIun " %-6" PRIun " %-6" PRIun " %-6" PRIun 782 " %-6" PRIun " %-6" PRIun " %-7x %" PRIu64 " (%s)\n", 760 783 IPC_GET_IMETHOD(call->data), IPC_GET_ARG1(call->data), 761 784 IPC_GET_ARG2(call->data), IPC_GET_ARG3(call->data), 762 785 IPC_GET_ARG4(call->data), IPC_GET_ARG5(call->data), 763 call->flags); 764 } 765 766 /* Print answerbox - answers */ 767 printf("ABOX - ANSWERS:\n"); 786 call->flags, call->sender->taskid, call->sender->name); 787 } 788 789 printf(" --- incoming answers ---\n"); 768 790 for (cur = task->answerbox.answers.next; 769 791 cur != &task->answerbox.answers; 770 792 cur = cur->next) { 771 793 call_t *call = list_get_instance(cur, call_t, link); 772 printf("Callid:%p M:%" PRIun " A1:%" PRIun " A2:%" PRIun 773 " A3:%" PRIun " A4:%" PRIun " A5:%" PRIun " Flags:%x\n", 774 call, IPC_GET_IMETHOD(call->data), IPC_GET_ARG1(call->data), 794 795 #ifdef __32_BITS__ 796 printf("%10p ", call); 797 #endif 798 799 #ifdef __64_BITS__ 800 printf("%18p ", call); 801 #endif 802 803 printf("%-8" PRIun " %-6" PRIun " %-6" PRIun " %-6" PRIun 804 " %-6" PRIun " %-6" PRIun " %-7x %" PRIu64 " (%s)\n", 805 IPC_GET_IMETHOD(call->data), IPC_GET_ARG1(call->data), 775 806 IPC_GET_ARG2(call->data), IPC_GET_ARG3(call->data), 776 807 IPC_GET_ARG4(call->data), IPC_GET_ARG5(call->data), 777 call->flags );808 call->flags, call->sender->taskid, call->sender->name); 778 809 } 779 810 -
kernel/generic/src/ipc/irq.c
r22027b6e r8b5690f 42 42 * 43 43 * The structure of a notification message is as follows: 44 * - IMETHOD: interface and method as registered by the SYS_ IPC_REGISTER_IRQ44 * - IMETHOD: interface and method as registered by the SYS_REGISTER_IRQ 45 45 * syscall 46 46 * - ARG1: payload modified by a 'top-half' handler -
kernel/generic/src/ipc/sysipc.c
r22027b6e r8b5690f 248 248 /* The connection was accepted */ 249 249 phone_connect(phoneid, &answer->sender->answerbox); 250 /* Set 'task hash' as arg4 of response */ 251 IPC_SET_ARG4(answer->data, (sysarg_t) TASK); 250 252 /* Set 'phone hash' as arg5 of response */ 251 253 IPC_SET_ARG5(answer->data, … … 1103 1105 * 1104 1106 */ 1105 sysarg_t sys_ ipc_register_irq(inr_t inr, devno_t devno, sysarg_t imethod,1107 sysarg_t sys_register_irq(inr_t inr, devno_t devno, sysarg_t imethod, 1106 1108 irq_code_t *ucode) 1107 1109 { … … 1120 1122 * 1121 1123 */ 1122 sysarg_t sys_ ipc_unregister_irq(inr_t inr, devno_t devno)1124 sysarg_t sys_unregister_irq(inr_t inr, devno_t devno) 1123 1125 { 1124 1126 if (!(cap_get(TASK) & CAP_IRQ_REG)) -
kernel/generic/src/lib/elf.c
r22027b6e r8b5690f 157 157 case PT_NULL: 158 158 case PT_PHDR: 159 case PT_NOTE: 159 160 break; 160 161 case PT_LOAD: … … 173 174 break; 174 175 case PT_SHLIB: 175 case PT_NOTE:176 176 case PT_LOPROC: 177 177 case PT_HIPROC: -
kernel/generic/src/lib/rd.c
r22027b6e r8b5690f 90 90 FRAME_SIZE); 91 91 rd_parea.frames = SIZE2FRAMES(dsize); 92 rd_parea.unpriv = false; 92 93 ddi_parea_register(&rd_parea); 93 94 -
kernel/generic/src/mm/backend_phys.c
r22027b6e r8b5690f 81 81 page_mapping_insert(AS, addr, base + (addr - area->base), 82 82 as_area_get_flags(area)); 83 if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1)) 84 panic("Cannot insert used space."); 83 84 if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1)) 85 panic("Cannot insert used space."); 85 86 86 87 return AS_PF_OK; -
kernel/generic/src/mm/slab.c
r22027b6e r8b5690f 806 806 } 807 807 808 /** Go through all caches and reclaim what is possible 809 * 810 * Interrupts must be disabled before calling this function, 811 * otherwise memory allocation from interrupts can deadlock. 812 * 813 */ 808 /** Go through all caches and reclaim what is possible */ 814 809 size_t slab_reclaim(unsigned int flags) 815 810 { 816 irq_spinlock_lock(&slab_cache_lock, false);811 irq_spinlock_lock(&slab_cache_lock, true); 817 812 818 813 size_t frames = 0; … … 824 819 } 825 820 826 irq_spinlock_unlock(&slab_cache_lock, false);821 irq_spinlock_unlock(&slab_cache_lock, true); 827 822 828 823 return frames; -
kernel/generic/src/proc/scheduler.c
r22027b6e r8b5690f 62 62 #include <print.h> 63 63 #include <debug.h> 64 65 static void before_task_runs(void); 66 static void before_thread_runs(void); 67 static void after_thread_ran(void); 64 #include <stacktrace.h> 65 68 66 static void scheduler_separated_stack(void); 69 67 … … 71 69 72 70 /** Carry out actions before new task runs. */ 73 void before_task_runs(void)71 static void before_task_runs(void) 74 72 { 75 73 before_task_runs_arch(); … … 80 78 * Perform actions that need to be 81 79 * taken before the newly selected 82 * t read is passed control.80 * thread is passed control. 83 81 * 84 82 * THREAD->lock is locked on entry 85 83 * 86 84 */ 87 void before_thread_runs(void)85 static void before_thread_runs(void) 88 86 { 89 87 before_thread_runs_arch(); 88 90 89 #ifdef CONFIG_FPU_LAZY 91 if (THREAD == CPU->fpu_owner)90 if (THREAD == CPU->fpu_owner) 92 91 fpu_enable(); 93 92 else … … 102 101 } 103 102 #endif 103 104 #ifdef CONFIG_UDEBUG 105 if (THREAD->btrace) { 106 istate_t *istate = THREAD->udebug.uspace_state; 107 if (istate != NULL) { 108 printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid); 109 stack_trace_istate(istate); 110 } 111 112 THREAD->btrace = false; 113 } 114 #endif 104 115 } 105 116 … … 113 124 * 114 125 */ 115 void after_thread_ran(void)126 static void after_thread_ran(void) 116 127 { 117 128 after_thread_ran_arch(); … … 391 402 * possible destruction should thread_destroy() be called on this or any 392 403 * other processor while the scheduler is still using them. 393 *394 404 */ 395 405 if (old_task) … … 417 427 * The thread structure is kept allocated until 418 428 * somebody calls thread_detach() on it. 419 *420 429 */ 421 430 if (!irq_spinlock_trylock(&THREAD->join_wq.lock)) { 422 431 /* 423 432 * Avoid deadlock. 424 *425 433 */ 426 434 irq_spinlock_unlock(&THREAD->lock, false); … … 443 451 /* 444 452 * Prefer the thread after it's woken up. 445 *446 453 */ 447 454 THREAD->priority = -1; … … 451 458 * waitq_sleep(). Address of wq->lock is kept in 452 459 * THREAD->sleep_queue. 453 *454 460 */ 455 461 irq_spinlock_unlock(&THREAD->sleep_queue->lock, false); … … 461 467 /* 462 468 * Entering state is unexpected. 463 *464 469 */ 465 470 panic("tid%" PRIu64 ": unexpected state %s.", … … 480 485 481 486 /* 482 * If both the old and the new task are the same, lots of work is 483 * avoided. 484 * 487 * If both the old and the new task are the same, 488 * lots of work is avoided. 485 489 */ 486 490 if (TASK != THREAD->task) { … … 488 492 489 493 /* 490 * Note that it is possible for two tasks to share one address 491 * space. 492 ( 494 * Note that it is possible for two tasks 495 * to share one address space. 493 496 */ 494 497 if (old_as != new_as) { … … 496 499 * Both tasks and address spaces are different. 497 500 * Replace the old one with the new one. 498 *499 501 */ 500 502 as_switch(old_as, new_as); … … 527 529 * necessary, is to be mapped in before_thread_runs(). This 528 530 * function must be executed before the switch to the new stack. 529 *530 531 */ 531 532 before_thread_runs(); … … 534 535 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to 535 536 * thread's stack. 536 *537 537 */ 538 538 the_copy(THE, (the_t *) THREAD->kstack); … … 658 658 /* 659 659 * Ready thread on local CPU 660 *661 660 */ 662 661 -
kernel/generic/src/proc/task.c
r22027b6e r8b5690f 342 342 sysarg_t sys_task_set_name(const char *uspace_name, size_t name_len) 343 343 { 344 int rc;345 344 char namebuf[TASK_NAME_BUFLEN]; 346 345 347 346 /* Cap length of name and copy it from userspace. */ 348 349 347 if (name_len > TASK_NAME_BUFLEN - 1) 350 348 name_len = TASK_NAME_BUFLEN - 1; 351 349 352 rc = copy_from_uspace(namebuf, uspace_name, name_len);350 int rc = copy_from_uspace(namebuf, uspace_name, name_len); 353 351 if (rc != 0) 354 352 return (sysarg_t) rc; 355 353 356 354 namebuf[name_len] = '\0'; 355 356 /* 357 * As the task name is referenced also from the 358 * threads, lock the threads' lock for the course 359 * of the update. 360 */ 361 362 irq_spinlock_lock(&tasks_lock, true); 363 irq_spinlock_lock(&TASK->lock, false); 364 irq_spinlock_lock(&threads_lock, false); 365 366 /* Set task name */ 357 367 str_cpy(TASK->name, TASK_NAME_BUFLEN, namebuf); 358 368 369 irq_spinlock_unlock(&threads_lock, false); 370 irq_spinlock_unlock(&TASK->lock, false); 371 irq_spinlock_unlock(&tasks_lock, true); 372 359 373 return EOK; 360 374 } … … 370 384 { 371 385 task_id_t taskid; 372 int rc; 373 374 rc = copy_from_uspace(&taskid, uspace_taskid, sizeof(taskid)); 386 int rc = copy_from_uspace(&taskid, uspace_taskid, sizeof(taskid)); 375 387 if (rc != 0) 376 388 return (sysarg_t) rc; 377 389 378 390 return (sysarg_t) task_kill(taskid); 379 391 } … … 449 461 static void task_kill_internal(task_t *task) 450 462 { 463 irq_spinlock_lock(&task->lock, false); 464 irq_spinlock_lock(&threads_lock, false); 465 466 /* 467 * Interrupt all threads. 468 */ 469 451 470 link_t *cur; 452 453 /*454 * Interrupt all threads.455 */456 irq_spinlock_lock(&task->lock, false);457 471 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 458 472 thread_t *thread = list_get_instance(cur, thread_t, th_link); … … 471 485 } 472 486 487 irq_spinlock_unlock(&threads_lock, false); 473 488 irq_spinlock_unlock(&task->lock, false); 474 489 } … … 500 515 irq_spinlock_unlock(&tasks_lock, true); 501 516 517 return EOK; 518 } 519 520 /** Kill the currently running task. 521 * 522 * @param notify Send out fault notifications. 523 * 524 * @return Zero on success or an error code from errno.h. 525 * 526 */ 527 void task_kill_self(bool notify) 528 { 529 /* 530 * User space can subscribe for FAULT events to take action 531 * whenever a task faults (to take a dump, run a debugger, etc.). 532 * The notification is always available, but unless udebug is enabled, 533 * that's all you get. 534 */ 535 if (notify) { 536 if (event_is_subscribed(EVENT_FAULT)) { 537 /* Notify the subscriber that a fault occurred. */ 538 event_notify_3(EVENT_FAULT, LOWER32(TASK->taskid), 539 UPPER32(TASK->taskid), (sysarg_t) THREAD); 540 541 #ifdef CONFIG_UDEBUG 542 /* Wait for a debugging session. */ 543 udebug_thread_fault(); 544 #endif 545 } 546 } 547 548 irq_spinlock_lock(&tasks_lock, true); 549 task_kill_internal(TASK); 550 irq_spinlock_unlock(&tasks_lock, true); 551 552 thread_exit(); 553 } 554 555 /** Process syscall to terminate the current task. 556 * 557 * @param notify Send out fault notifications. 558 * 559 */ 560 sysarg_t sys_task_exit(sysarg_t notify) 561 { 562 task_kill_self(notify); 563 564 /* Unreachable */ 502 565 return EOK; 503 566 } -
kernel/generic/src/proc/thread.c
r22027b6e r8b5690f 239 239 * Switch thread to the ready state. 240 240 * 241 * @param t Thread to make ready.241 * @param thread Thread to make ready. 242 242 * 243 243 */ … … 246 246 irq_spinlock_lock(&thread->lock, true); 247 247 248 ASSERT( !(thread->state == Ready));248 ASSERT(thread->state != Ready); 249 249 250 250 int i = (thread->priority < RQ_COUNT - 1) … … 350 350 351 351 #ifdef CONFIG_UDEBUG 352 /* Init debugging stuff */ 352 /* Initialize debugging stuff */ 353 thread->btrace = false; 353 354 udebug_thread_initialize(&thread->udebug); 354 355 #endif … … 535 536 /** Detach thread. 536 537 * 537 * Mark the thread as detached , if the thread is already in the Lingering538 * state, deallocate its resources.538 * Mark the thread as detached. If the thread is already 539 * in the Lingering state, deallocate its resources. 539 540 * 540 541 * @param thread Thread to be detached. … … 590 591 order_suffix(thread->kcycles, &kcycles, &ksuffix); 591 592 593 char *name; 594 if (str_cmp(thread->name, "uinit") == 0) 595 name = thread->task->name; 596 else 597 name = thread->name; 598 592 599 #ifdef __32_BITS__ 593 600 if (*additional) 594 printf("%-8" PRIu64 "%10p %9" PRIu64 "%c %9" PRIu64 "%c ",595 thread->tid, thread-> kstack, ucycles, usuffix,596 kcycles, ksuffix);601 printf("%-8" PRIu64 " %10p %10p %9" PRIu64 "%c %9" PRIu64 "%c ", 602 thread->tid, thread->thread_code, thread->kstack, 603 ucycles, usuffix, kcycles, ksuffix); 597 604 else 598 printf("%-8" PRIu64 " %-14s %10p %-8s %10p %-5" PRIu32 " %10p\n",599 thread->tid, thread->name, thread, thread_states[thread->state],600 thread->task, thread->task->context , thread->thread_code);605 printf("%-8" PRIu64 " %-14s %10p %-8s %10p %-5" PRIu32 "\n", 606 thread->tid, name, thread, thread_states[thread->state], 607 thread->task, thread->task->context); 601 608 #endif 602 609 603 610 #ifdef __64_BITS__ 604 611 if (*additional) 605 printf("%-8" PRIu64 " %18p %18p\n"612 printf("%-8" PRIu64 " %18p %18p\n" 606 613 " %9" PRIu64 "%c %9" PRIu64 "%c ", 607 614 thread->tid, thread->thread_code, thread->kstack, 608 615 ucycles, usuffix, kcycles, ksuffix); 609 616 else 610 printf("%-8" PRIu64 " %-14s %18p %-8s %18p %-5" PRIu32 "\n",611 thread->tid, thread->name, thread, thread_states[thread->state],617 printf("%-8" PRIu64 " %-14s %18p %-8s %18p %-5" PRIu32 "\n", 618 thread->tid, name, thread, thread_states[thread->state], 612 619 thread->task, thread->task->context); 613 620 #endif … … 647 654 #ifdef __32_BITS__ 648 655 if (additional) 649 printf("[id ] [ stack ] [ucycles ] [kcycles ] [cpu]"650 " [ waitqueue]\n");656 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]" 657 " [cpu] [waitqueue]\n"); 651 658 else 652 659 printf("[id ] [name ] [address ] [state ] [task ]" 653 " [ctx] [code ]\n");660 " [ctx]\n"); 654 661 #endif 655 662 … … 740 747 ASSERT(interrupts_disabled()); 741 748 ASSERT(irq_spinlock_locked(&threads_lock)); 742 749 743 750 thread_iterator_t iterator; 744 751 … … 751 758 } 752 759 760 #ifdef CONFIG_UDEBUG 761 762 void thread_stack_trace(thread_id_t thread_id) 763 { 764 irq_spinlock_lock(&threads_lock, true); 765 766 thread_t *thread = thread_find_by_id(thread_id); 767 if (thread == NULL) { 768 printf("No such thread.\n"); 769 irq_spinlock_unlock(&threads_lock, true); 770 return; 771 } 772 773 irq_spinlock_lock(&thread->lock, false); 774 775 /* 776 * Schedule a stack trace to be printed 777 * just before the thread is scheduled next. 778 * 779 * If the thread is sleeping then try to interrupt 780 * the sleep. Any request for printing an uspace stack 781 * trace from within the kernel should be always 782 * considered a last resort debugging means, therefore 783 * forcing the thread's sleep to be interrupted 784 * is probably justifiable. 785 */ 786 787 bool sleeping = false; 788 istate_t *istate = thread->udebug.uspace_state; 789 if (istate != NULL) { 790 printf("Scheduling thread stack trace.\n"); 791 thread->btrace = true; 792 if (thread->state == Sleeping) 793 sleeping = true; 794 } else 795 printf("Thread interrupt state not available.\n"); 796 797 irq_spinlock_unlock(&thread->lock, false); 798 799 if (sleeping) 800 waitq_interrupt_sleep(thread); 801 802 irq_spinlock_unlock(&threads_lock, true); 803 } 804 805 #endif /* CONFIG_UDEBUG */ 753 806 754 807 /** Process syscall to create new thread. … … 793 846 * has already been created. We need to undo its 794 847 * creation now. 795 *796 848 */ 797 849 … … 815 867 * THREAD_B events for threads that already existed 816 868 * and could be detected with THREAD_READ before. 817 *818 869 */ 819 870 udebug_thread_b_event_attach(thread, TASK); -
kernel/generic/src/synch/waitq.c
r22027b6e r8b5690f 127 127 /** Interrupt sleeping thread. 128 128 * 129 * This routine attempts to interrupt a thread from its sleep in a waitqueue. 130 * If the thread is not found sleeping, no action is taken. 129 * This routine attempts to interrupt a thread from its sleep in 130 * a waitqueue. If the thread is not found sleeping, no action 131 * is taken. 132 * 133 * The threads_lock must be already held and interrupts must be 134 * disabled upon calling this function. 131 135 * 132 136 * @param thread Thread to be interrupted. … … 138 142 DEADLOCK_PROBE_INIT(p_wqlock); 139 143 140 irq_spinlock_lock(&threads_lock, true); 141 if (!thread_exists(thread)) 142 goto out; 144 /* 145 * The thread is quaranteed to exist because 146 * threads_lock is held. 147 */ 143 148 144 149 grab_locks: … … 150 155 /* 151 156 * The sleep cannot be interrupted. 152 *153 157 */ 154 158 irq_spinlock_unlock(&thread->lock, false); 155 goto out;159 return; 156 160 } 157 161 158 162 if (!irq_spinlock_trylock(&wq->lock)) { 163 /* Avoid deadlock */ 159 164 irq_spinlock_unlock(&thread->lock, false); 160 165 DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD); 161 /* Avoid deadlock */162 166 goto grab_locks; 163 167 } … … 173 177 irq_spinlock_unlock(&wq->lock, false); 174 178 } 179 175 180 irq_spinlock_unlock(&thread->lock, false); 176 181 177 182 if (do_wakeup) 178 183 thread_ready(thread); 179 180 out:181 irq_spinlock_unlock(&threads_lock, true);182 184 } 183 185 … … 370 372 * If the thread was already interrupted, 371 373 * don't go to sleep at all. 372 *373 374 */ 374 375 if (THREAD->interrupted) { … … 381 382 * Set context that will be restored if the sleep 382 383 * of this thread is ever interrupted. 383 *384 384 */ 385 385 THREAD->sleep_interruptible = true; -
kernel/generic/src/syscall/syscall.c
r22027b6e r8b5690f 86 86 } else { 87 87 printf("Task %" PRIu64": Unknown syscall %#" PRIxn, TASK->taskid, id); 88 task_kill(TASK->taskid); 89 thread_exit(); 88 task_kill_self(true); 90 89 } 91 90 … … 131 130 (syshandler_t) sys_task_set_name, 132 131 (syshandler_t) sys_task_kill, 132 (syshandler_t) sys_task_exit, 133 133 (syshandler_t) sys_program_spawn_loader, 134 134 … … 156 156 (syshandler_t) sys_ipc_poke, 157 157 (syshandler_t) sys_ipc_hangup, 158 (syshandler_t) sys_ipc_register_irq,159 (syshandler_t) sys_ipc_unregister_irq,160 158 (syshandler_t) sys_ipc_connect_kbox, 161 159 … … 171 169 (syshandler_t) sys_physmem_map, 172 170 (syshandler_t) sys_iospace_enable, 173 (syshandler_t) sys_interrupt_enable, 171 (syshandler_t) sys_register_irq, 172 (syshandler_t) sys_unregister_irq, 174 173 175 174 /* Sysinfo syscalls */ -
kernel/generic/src/sysinfo/stats.c
r22027b6e r8b5690f 170 170 * Note that it may be infinitely better to let the address space 171 171 * management code compute these statistics as it proceeds instead of 172 * having them calculated hereover and over again here.172 * having them calculated over and over again here. 173 173 */ 174 174 … … 199 199 } 200 200 201 /** Get the resident (used) size of a virtual address space 202 * 203 * @param as Address space. 204 * 205 * @return Size of the resident (used) virtual address space (bytes). 206 * 207 */ 208 static size_t get_task_resmem(as_t *as) 209 { 210 size_t result = 0; 211 212 /* 213 * We are holding some spinlocks here and therefore are not allowed to 214 * block. Only attempt to lock the address space and address space area 215 * mutexes conditionally. If it is not possible to lock either object, 216 * allow the statistics to be inexact by skipping the respective object. 217 * 218 * Note that it may be infinitely better to let the address space 219 * management code compute these statistics as it proceeds instead of 220 * having them calculated over and over again here. 221 */ 222 223 if (SYNCH_FAILED(mutex_trylock(&as->lock))) 224 return result * PAGE_SIZE; 225 226 /* Walk the B+ tree of AS areas */ 227 link_t *cur; 228 for (cur = as->as_area_btree.leaf_head.next; 229 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 230 btree_node_t *node = 231 list_get_instance(cur, btree_node_t, leaf_link); 232 233 unsigned int i; 234 for (i = 0; i < node->keys; i++) { 235 as_area_t *area = node->value[i]; 236 237 if (SYNCH_FAILED(mutex_trylock(&area->lock))) 238 continue; 239 240 /* Walk the B+ tree of resident pages */ 241 link_t *rcur; 242 for (rcur = area->used_space.leaf_head.next; 243 rcur != &area->used_space.leaf_head; rcur = rcur->next) { 244 btree_node_t *rnode = 245 list_get_instance(rcur, btree_node_t, leaf_link); 246 247 unsigned int j; 248 for (j = 0; j < rnode->keys; j++) 249 result += (size_t) rnode->value[i]; 250 } 251 252 mutex_unlock(&area->lock); 253 } 254 } 255 256 mutex_unlock(&as->lock); 257 258 return result * PAGE_SIZE; 259 } 260 201 261 /* Produce task statistics 202 262 * … … 215 275 str_cpy(stats_task->name, TASK_NAME_BUFLEN, task->name); 216 276 stats_task->virtmem = get_task_virtmem(task->as); 277 stats_task->resmem = get_task_resmem(task->as); 217 278 stats_task->threads = atomic_get(&task->refcount); 218 279 task_get_accounting(task, &(stats_task->ucycles), -
kernel/generic/src/sysinfo/sysinfo.c
r22027b6e r8b5690f 40 40 #include <arch/asm.h> 41 41 #include <errno.h> 42 #include <macros.h> 42 43 43 44 /** Maximal sysinfo path length */ … … 761 762 * character must be null). 762 763 * 763 * The user space buffer must be sized exactly according 764 * to the size of the binary data, otherwise the request 765 * fails. 764 * If the user space buffer size does not equal 765 * the actual size of the returned data, the data 766 * is truncated. Whether this is actually a fatal 767 * error or the data can be still interpreted as valid 768 * depends on the nature of the data and has to be 769 * decided by the user space. 770 * 771 * The actual size of data returned is stored to 772 * size_ptr. 766 773 * 767 774 * @param path_ptr Sysinfo path in the user address space. … … 770 777 * to store the binary data. 771 778 * @param buffer_size User space buffer size. 779 * @param size_ptr User space pointer where to store the 780 * binary data size. 772 781 * 773 782 * @return Error code (EOK in case of no error). … … 775 784 */ 776 785 sysarg_t sys_sysinfo_get_data(void *path_ptr, size_t path_size, 777 void *buffer_ptr, size_t buffer_size )786 void *buffer_ptr, size_t buffer_size, size_t *size_ptr) 778 787 { 779 788 int rc; 780 789 781 790 /* Get the item */ 782 sysinfo_return_t ret = sysinfo_get_item_uspace(path_ptr, path_size, false); 783 791 sysinfo_return_t ret = sysinfo_get_item_uspace(path_ptr, path_size, 792 false); 793 784 794 /* Only constant or generated binary data is considered */ 785 if ((ret.tag == SYSINFO_VAL_DATA) || (ret.tag == SYSINFO_VAL_FUNCTION_DATA)) { 786 /* Check destination buffer size */ 787 if (ret.data.size == buffer_size) 788 rc = copy_to_uspace(buffer_ptr, ret.data.data, 789 ret.data.size); 790 else 791 rc = ENOMEM; 795 if ((ret.tag == SYSINFO_VAL_DATA) || 796 (ret.tag == SYSINFO_VAL_FUNCTION_DATA)) { 797 size_t size = min(ret.data.size, buffer_size); 798 rc = copy_to_uspace(buffer_ptr, ret.data.data, size); 799 if (rc == EOK) 800 rc = copy_to_uspace(size_ptr, &size, sizeof(size)); 792 801 } else 793 802 rc = EINVAL; -
kernel/generic/src/time/clock.c
r22027b6e r8b5690f 93 93 clock_parea.pbase = (uintptr_t) faddr; 94 94 clock_parea.frames = 1; 95 clock_parea.unpriv = true; 95 96 ddi_parea_register(&clock_parea); 96 97 … … 100 101 * 101 102 */ 102 sysinfo_set_item_val("clock.cacheable", NULL, (sysarg_t) true);103 103 sysinfo_set_item_val("clock.faddr", NULL, (sysarg_t) faddr); 104 104 }
Note:
See TracChangeset
for help on using the changeset viewer.