Changeset 1affcdf3 in mainline for kernel/generic
- Timestamp:
- 2011-06-10T19:33:41Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 1878386
- Parents:
- 13ecdac9 (diff), 79a141a (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel/generic
- Files:
-
- 1 added
- 51 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/adt/list.h
r13ecdac9 r1affcdf3 41 41 /** Doubly linked list head and link type. */ 42 42 typedef struct link { 43 struct link *prev; 44 struct link *next; 43 struct link *prev; /**< Pointer to the previous item in the list. */ 44 struct link *next; /**< Pointer to the next item in the list. */ 45 45 } link_t; 46 46 … … 48 48 * 49 49 * @param name Name of the new statically allocated list. 50 * 50 51 */ 51 52 #define LIST_INITIALIZE(name) \ 52 link_t name = { .prev = &name, .next = &name } 53 link_t name = { \ 54 .prev = &name, \ 55 .next = &name \ 56 } 57 58 #define list_get_instance(link, type, member) \ 59 ((type *) (((void *)(link)) - ((void *) &(((type *) NULL)->member)))) 60 61 #define list_foreach(list, iterator) \ 62 for (link_t *iterator = (list).next; \ 63 iterator != &(list); iterator = iterator->next) 53 64 54 65 /** Initialize doubly-linked circular list link … … 57 68 * 58 69 * @param link Pointer to link_t structure to be initialized. 70 * 59 71 */ 60 72 NO_TRACE static inline void link_initialize(link_t *link) … … 68 80 * Initialize doubly-linked circular list. 69 81 * 70 * @param head Pointer to link_t structure representing head of the list. 71 */ 72 NO_TRACE static inline void list_initialize(link_t *head) 73 { 74 head->prev = head; 75 head->next = head; 82 * @param list Pointer to link_t structure representing the list. 83 * 84 */ 85 NO_TRACE static inline void list_initialize(link_t *list) 86 { 87 list->prev = list; 88 list->next = list; 76 89 } 77 90 … … 81 94 * 82 95 * @param link Pointer to link_t structure to be added. 83 * @param head Pointer to link_t structure representing head of the list. 84 */ 85 NO_TRACE static inline void list_prepend(link_t *link, link_t *head) 86 { 87 link->next = head->next; 88 link->prev = head; 89 head->next->prev = link; 90 head->next = link; 96 * @param list Pointer to link_t structure representing the list. 97 * 98 */ 99 NO_TRACE static inline void list_prepend(link_t *link, link_t *list) 100 { 101 link->next = list->next; 102 link->prev = list; 103 list->next->prev = link; 104 list->next = link; 91 105 } 92 106 … … 96 110 * 97 111 * @param link Pointer to link_t structure to be added. 98 * @param head Pointer to link_t structure representing head of the list. 99 */ 100 NO_TRACE static inline void list_append(link_t *link, link_t *head) 101 { 102 link->prev = head->prev; 103 link->next = head; 104 head->prev->next = link; 105 head->prev = link; 112 * @param list Pointer to link_t structure representing the list. 113 * 114 */ 115 NO_TRACE static inline void list_append(link_t *link, link_t *list) 116 { 117 link->prev = list->prev; 118 link->next = list; 119 list->prev->next = link; 120 list->prev = link; 121 } 122 123 /** Insert item before another item in doubly-linked circular list. 124 * 125 */ 126 static inline void list_insert_before(link_t *link, link_t *list) 127 { 128 list_append(link, list); 129 } 130 131 /** Insert item after another item in doubly-linked circular list. 132 * 133 */ 134 static inline void list_insert_after(link_t *link, link_t *list) 135 { 136 list_prepend(list, link); 106 137 } 107 138 … … 110 141 * Remove item from doubly-linked circular list. 111 142 * 112 * @param link Pointer to link_t structure to be removed from the list it is 113 * contained in. 143 * @param link Pointer to link_t structure to be removed from the list 144 * it is contained in. 145 * 114 146 */ 115 147 NO_TRACE static inline void list_remove(link_t *link) … … 124 156 * Query emptiness of doubly-linked circular list. 125 157 * 126 * @param head Pointer to link_t structure representing head of the list. 127 */ 128 NO_TRACE static inline bool list_empty(link_t *head) 129 { 130 return head->next == head ? true : false; 131 } 132 158 * @param list Pointer to link_t structure representing the list. 159 * 160 */ 161 NO_TRACE static inline int list_empty(link_t *list) 162 { 163 return (list->next == list); 164 } 165 166 /** Get head item of a list. 167 * 168 * @param list Pointer to link_t structure representing the list. 169 * 170 * @return Head item of the list. 171 * @return NULL if the list is empty. 172 * 173 */ 174 static inline link_t *list_head(link_t *list) 175 { 176 return ((list->next == list) ? NULL : list->next); 177 } 133 178 134 179 /** Split or concatenate headless doubly-linked circular list … … 139 184 * concatenates splitted lists and splits concatenated lists. 140 185 * 141 * @param part1 Pointer to link_t structure leading the first (half of the 142 * headless) list. 143 * @param part2 Pointer to link_t structure leading the second (half of the 144 * headless) list. 186 * @param part1 Pointer to link_t structure leading the first 187 * (half of the headless) list. 188 * @param part2 Pointer to link_t structure leading the second 189 * (half of the headless) list. 190 * 145 191 */ 146 192 NO_TRACE static inline void headless_list_split_or_concat(link_t *part1, link_t *part2) 147 193 { 148 link_t *hlp;149 150 194 part1->prev->next = part2; 151 part2->prev->next = part1; 152 hlp = part1->prev; 195 part2->prev->next = part1; 196 197 link_t *hlp = part1->prev; 198 153 199 part1->prev = part2->prev; 154 200 part2->prev = hlp; 155 201 } 156 202 157 158 203 /** Split headless doubly-linked circular list 159 204 * 160 205 * Split headless doubly-linked circular list. 161 206 * 162 * @param part1 Pointer to link_t structure leading the first half of the 163 * headless list. 164 * @param part2 Pointer to link_t structure leading the second half of the 165 * headless list. 207 * @param part1 Pointer to link_t structure leading 208 * the first half of the headless list. 209 * @param part2 Pointer to link_t structure leading 210 * the second half of the headless list. 211 * 166 212 */ 167 213 NO_TRACE static inline void headless_list_split(link_t *part1, link_t *part2) … … 174 220 * Concatenate two headless doubly-linked circular lists. 175 221 * 176 * @param part1 Pointer to link_t structure leading the first headless list. 177 * @param part2 Pointer to link_t structure leading the second headless list. 222 * @param part1 Pointer to link_t structure leading 223 * the first headless list. 224 * @param part2 Pointer to link_t structure leading 225 * the second headless list. 226 * 178 227 */ 179 228 NO_TRACE static inline void headless_list_concat(link_t *part1, link_t *part2) … … 182 231 } 183 232 184 #define list_get_instance(link, type, member) \ 185 ((type *)(((uint8_t *)(link)) - ((uint8_t *)&(((type *)NULL)->member)))) 186 187 extern bool list_member(const link_t *link, const link_t *head); 188 extern void list_concat(link_t *head1, link_t *head2); 233 /** Get n-th item of a list. 234 * 235 * @param list Pointer to link_t structure representing the list. 236 * @param n Item number (indexed from zero). 237 * 238 * @return n-th item of the list. 239 * @return NULL if no n-th item found. 240 * 241 */ 242 static inline link_t *list_nth(link_t *list, unsigned int n) 243 { 244 unsigned int cnt = 0; 245 246 list_foreach(*list, link) { 247 if (cnt == n) 248 return link; 249 250 cnt++; 251 } 252 253 return NULL; 254 } 255 256 extern int list_member(const link_t *, const link_t *); 257 extern void list_concat(link_t *, link_t *); 258 extern unsigned int list_count(const link_t *); 189 259 190 260 #endif -
kernel/generic/include/arch.h
r13ecdac9 r1affcdf3 41 41 #include <mm/as.h> 42 42 43 #define DEFAULT_CONTEXT 0 43 /* 44 * THE is not an abbreviation, but the English definite article written in 45 * capital letters. It means the current pointer to something, e.g. thread, 46 * processor or address space. Kind reader of this comment shall appreciate 47 * the wit of constructs like THE->thread and similar. 48 */ 49 #define THE ((the_t * )(get_stack_base())) 44 50 45 51 #define CPU THE->cpu … … 47 53 #define TASK THE->task 48 54 #define AS THE->as 49 #define CONTEXT (THE->task ? THE->task->context : DEFAULT_CONTEXT)50 55 #define PREEMPTION_DISABLED THE->preemption_disabled 56 #define MAGIC UINT32_C(0xfacefeed) 51 57 52 #define context_check(ctx1, ctx2) ((ctx1) == (ctx2)) 58 #define container_check(ctn1, ctn2) ((ctn1) == (ctn2)) 59 60 #define DEFAULT_CONTAINER 0 61 #define CONTAINER \ 62 ((THE->task) ? (THE->task->container) : (DEFAULT_CONTAINER)) 53 63 54 64 /** … … 63 73 cpu_t *cpu; /**< Executing cpu. */ 64 74 as_t *as; /**< Current address space. */ 75 uint32_t magic; /**< Magic value */ 65 76 } the_t; 66 67 /*68 * THE is not an abbreviation, but the English definite article written in69 * capital letters. It means the current pointer to something, e.g. thread,70 * processor or address space. Kind reader of this comment shall appreciate71 * the wit of constructs like THE->thread and similar.72 */73 #define THE ((the_t * )(get_stack_base()))74 77 75 78 extern void the_initialize(the_t *); -
kernel/generic/include/config.h
r13ecdac9 r1affcdf3 36 36 #define KERN_CONFIG_H_ 37 37 38 #include <typedefs.h>39 38 #include <arch/mm/page.h> 40 39 41 #define STACK_SIZE PAGE_SIZE 40 #define ONE_FRAME 0 41 #define TWO_FRAMES 1 42 #define FOUR_FRAMES 2 43 44 #define STACK_FRAMES TWO_FRAMES 45 #define STACK_SIZE ((1 << STACK_FRAMES) << PAGE_WIDTH) 42 46 43 47 #define CONFIG_INIT_TASKS 32 44 48 #define CONFIG_TASK_NAME_BUFLEN 32 49 50 #ifndef __ASM__ 51 52 #include <typedefs.h> 45 53 46 54 typedef struct { … … 80 88 extern ballocs_t ballocs; 81 89 90 #endif /* __ASM__ */ 91 82 92 #endif 83 93 -
kernel/generic/include/cpu.h
r13ecdac9 r1affcdf3 41 41 #include <arch/cpu.h> 42 42 #include <arch/context.h> 43 44 #define CPU_STACK_SIZE STACK_SIZE45 43 46 44 /** CPU structure. -
kernel/generic/include/ddi/irq.h
r13ecdac9 r1affcdf3 77 77 */ 78 78 CMD_PIO_WRITE_A_32, 79 79 80 /** Read 1 byte from the memory space. */ 81 CMD_MEM_READ_8, 82 /** Read 2 bytes from the memory space. */ 83 CMD_MEM_READ_16, 84 /** Read 4 bytes from the memory space. */ 85 CMD_MEM_READ_32, 86 87 /** Write 1 byte to the memory space. */ 88 CMD_MEM_WRITE_8, 89 /** Write 2 bytes to the memory space. */ 90 CMD_MEM_WRITE_16, 91 /** Write 4 bytes to the memory space. */ 92 CMD_MEM_WRITE_32, 93 94 /** Write 1 byte from the source argument to the memory space. */ 95 CMD_MEM_WRITE_A_8, 96 /** Write 2 bytes from the source argument to the memory space. */ 97 CMD_MEM_WRITE_A_16, 98 /** Write 4 bytes from the source argument to the memory space. */ 99 CMD_MEM_WRITE_A_32, 100 80 101 /** 81 102 * Perform a bit masking on the source argument … … 203 224 /** Notification configuration structure. */ 204 225 ipc_notif_cfg_t notif_cfg; 226 227 as_t *driver_as; 205 228 } irq_t; 206 229 -
kernel/generic/include/ipc/event.h
r13ecdac9 r1affcdf3 41 41 #include <ipc/ipc.h> 42 42 43 typedef void (*event_callback_t)(void); 44 43 45 /** Event notification structure. */ 44 46 typedef struct { … … 51 53 /** Counter. */ 52 54 size_t counter; 55 56 /** Masked flag. */ 57 bool masked; 58 /** Unmask callback. */ 59 event_callback_t unmask_callback; 53 60 } event_t; 54 61 55 62 extern void event_init(void); 63 extern void event_cleanup_answerbox(answerbox_t *); 64 extern void event_set_unmask_callback(event_type_t, event_callback_t); 65 66 #define event_notify_0(e, m) \ 67 event_notify((e), (m), 0, 0, 0, 0, 0) 68 #define event_notify_1(e, m, a1) \ 69 event_notify((e), (m), (a1), 0, 0, 0, 0) 70 #define event_notify_2(e, m, a1, a2) \ 71 event_notify((e), (m), (a1), (a2), 0, 0, 0) 72 #define event_notify_3(e, m, a1, a2, a3) \ 73 event_notify((e), (m), (a1), (a2), (a3), 0, 0) 74 #define event_notify_4(e, m, a1, a2, a3, a4) \ 75 event_notify((e), (m), (a1), (a2), (a3), (a4), 0) 76 #define event_notify_5(e, m, a1, a2, a3, a4, a5) \ 77 event_notify((e), (m), (a1), (a2), (a3), (a4), (a5)) 78 79 extern int event_notify(event_type_t, bool, sysarg_t, sysarg_t, sysarg_t, 80 sysarg_t, sysarg_t); 81 56 82 extern sysarg_t sys_event_subscribe(sysarg_t, sysarg_t); 57 extern bool event_is_subscribed(event_type_t); 58 extern void event_cleanup_answerbox(answerbox_t *); 59 60 #define event_notify_0(e) \ 61 event_notify((e), 0, 0, 0, 0, 0) 62 #define event_notify_1(e, a1) \ 63 event_notify((e), (a1), 0, 0, 0, 0) 64 #define event_notify_2(e, a1, a2) \ 65 event_notify((e), (a1), (a2), 0, 0, 0) 66 #define event_notify_3(e, a1, a2, a3) \ 67 event_notify((e), (a1), (a2), (a3), 0, 0) 68 #define event_notify_4(e, a1, a2, a3, a4) \ 69 event_notify((e), (a1), (a2), (a3), (a4), 0) 70 #define event_notify_5(e, a1, a2, a3, a4, a5) \ 71 event_notify((e), (a1), (a2), (a3), (a4), (a5)) 72 73 extern void event_notify(event_type_t, sysarg_t, sysarg_t, sysarg_t, 74 sysarg_t, sysarg_t); 83 extern sysarg_t sys_event_unmask(sysarg_t); 75 84 76 85 #endif -
kernel/generic/include/ipc/event_types.h
r13ecdac9 r1affcdf3 39 39 /** New data available in kernel log */ 40 40 EVENT_KLOG = 0, 41 /** Returning from kernel console to us erspace */41 /** Returning from kernel console to uspace */ 42 42 EVENT_KCONSOLE, 43 43 /** A task/thread has faulted and will be terminated */ -
kernel/generic/include/ipc/ipc.h
r13ecdac9 r1affcdf3 100 100 #define IPC_GET_ARG5(data) ((data).args[5]) 101 101 102 /* Well known phone descriptors */103 #define PHONE_NS 0104 105 102 /* Forwarding flags. */ 106 103 #define IPC_FF_NONE 0 … … 117 114 118 115 /* Data transfer flags. */ 119 #define IPC_XF_NONE 116 #define IPC_XF_NONE 0 120 117 121 118 /** Restrict the transfer size if necessary. */ 122 #define IPC_XF_RESTRICT (1 << 0) 123 124 /** Kernel IPC interfaces 125 * 126 */ 127 #define IPC_IF_KERNEL 0 128 129 /** System-specific methods - only through special syscalls 130 * 131 * These methods have special behaviour. These methods also 132 * have the implicit kernel interface 0. 133 * 134 */ 135 136 /** Clone connection. 137 * 138 * The calling task clones one of its phones for the callee. 139 * 140 * - ARG1 - The caller sets ARG1 to the phone of the cloned connection. 141 * - The callee gets the new phone from ARG1. 142 * 143 * - on answer, the callee acknowledges the new connection by sending EOK back 144 * or the kernel closes it 145 * 146 */ 147 #define IPC_M_CONNECTION_CLONE 1 148 149 /** Protocol for CONNECT - ME 150 * 151 * Through this call, the recipient learns about the new cloned connection. 152 * 153 * - ARG5 - the kernel sets ARG5 to contain the hash of the used phone 154 * - on asnwer, the callee acknowledges the new connection by sending EOK back 155 * or the kernel closes it 156 * 157 */ 158 #define IPC_M_CONNECT_ME 2 159 160 /** Protocol for CONNECT - TO - ME 161 * 162 * Calling process asks the callee to create a callback connection, 163 * so that it can start initiating new messages. 164 * 165 * The protocol for negotiating is: 166 * - sys_connect_to_me - sends a message IPC_M_CONNECT_TO_ME 167 * - recipient - upon receipt tries to allocate new phone 168 * - if it fails, responds with ELIMIT 169 * - passes call to userspace. If userspace 170 * responds with error, phone is deallocated and 171 * error is sent back to caller. Otherwise 172 * the call is accepted and the response is sent back. 173 * - the hash of the client task is passed to userspace 174 * (on the receiving side) as ARG4 of the call. 175 * - the hash of the allocated phone is passed to userspace 176 * (on the receiving side) as ARG5 of the call. 177 * 178 */ 179 #define IPC_M_CONNECT_TO_ME 3 180 181 /** Protocol for CONNECT - ME - TO 182 * 183 * Calling process asks the callee to create for him a new connection. 184 * E.g. the caller wants a name server to connect him to print server. 185 * 186 * The protocol for negotiating is: 187 * - sys_connect_me_to - send a synchronous message to name server 188 * indicating that it wants to be connected to some 189 * service 190 * - arg1/2/3 are user specified, arg5 contains 191 * address of the phone that should be connected 192 * (TODO: it leaks to userspace) 193 * - recipient - if ipc_answer == 0, then accept connection 194 * - otherwise connection refused 195 * - recepient may forward message. 196 * 197 */ 198 #define IPC_M_CONNECT_ME_TO 4 199 200 /** This message is sent to answerbox when the phone is hung up 201 * 202 */ 203 #define IPC_M_PHONE_HUNGUP 5 204 205 /** Send as_area over IPC. 206 * - ARG1 - source as_area base address 207 * - ARG2 - size of source as_area (filled automatically by kernel) 208 * - ARG3 - flags of the as_area being sent 209 * 210 * on answer, the recipient must set: 211 * - ARG1 - dst as_area base adress 212 * 213 */ 214 #define IPC_M_SHARE_OUT 6 215 216 /** Receive as_area over IPC. 217 * - ARG1 - destination as_area base address 218 * - ARG2 - destination as_area size 219 * - ARG3 - user defined argument 220 * 221 * on answer, the recipient must set: 222 * 223 * - ARG1 - source as_area base address 224 * - ARG2 - flags that will be used for sharing 225 * 226 */ 227 #define IPC_M_SHARE_IN 7 228 229 /** Send data to another address space over IPC. 230 * - ARG1 - source address space virtual address 231 * - ARG2 - size of data to be copied, may be overriden by the recipient 232 * 233 * on answer, the recipient must set: 234 * 235 * - ARG1 - final destination address space virtual address 236 * - ARG2 - final size of data to be copied 237 * 238 */ 239 #define IPC_M_DATA_WRITE 8 240 241 /** Receive data from another address space over IPC. 242 * - ARG1 - destination virtual address in the source address space 243 * - ARG2 - size of data to be received, may be cropped by the recipient 244 * 245 * on answer, the recipient must set: 246 * 247 * - ARG1 - source virtual address in the destination address space 248 * - ARG2 - final size of data to be copied 249 * 250 */ 251 #define IPC_M_DATA_READ 9 252 253 /** Debug the recipient. 254 * - ARG1 - specifies the debug method (from udebug_method_t) 255 * - other arguments are specific to the debug method 256 * 257 */ 258 #define IPC_M_DEBUG_ALL 10 259 260 /* Well-known methods */ 261 #define IPC_M_LAST_SYSTEM 511 262 #define IPC_M_PING 512 263 264 /* User methods */ 119 #define IPC_XF_RESTRICT (1 << 0) 120 121 /** User-defined IPC methods */ 265 122 #define IPC_FIRST_USER_METHOD 1024 266 123 -
kernel/generic/include/mm/as.h
r13ecdac9 r1affcdf3 84 84 #define USER_ADDRESS_SPACE_END USER_ADDRESS_SPACE_END_ARCH 85 85 86 #define USTACK_ADDRESS USTACK_ADDRESS_ARCH 86 #ifdef USTACK_ADDRESS_ARCH 87 #define USTACK_ADDRESS USTACK_ADDRESS_ARCH 88 #else 89 #define USTACK_ADDRESS (USER_ADDRESS_SPACE_END - (STACK_SIZE - 1)) 90 #endif 87 91 88 92 /** Kernel address space. */ -
kernel/generic/include/mm/frame.h
r13ecdac9 r1affcdf3 44 44 #include <arch/mm/page.h> 45 45 #include <arch/mm/frame.h> 46 47 #define ONE_FRAME 048 #define TWO_FRAMES 149 #define FOUR_FRAMES 250 51 52 #ifdef ARCH_STACK_FRAMES53 #define STACK_FRAMES ARCH_STACK_FRAMES54 #else55 #define STACK_FRAMES ONE_FRAME56 #endif57 46 58 47 /** Maximum number of zones in the system. */ … … 164 153 extern void frame_free_noreserve(uintptr_t); 165 154 extern void frame_reference_add(pfn_t); 155 extern size_t frame_total_free_get(void); 166 156 167 157 extern size_t find_zone(pfn_t, size_t, size_t); -
kernel/generic/include/mm/page.h
r13ecdac9 r1affcdf3 37 37 38 38 #include <typedefs.h> 39 #include <proc/task.h> 39 40 #include <mm/as.h> 40 #include <memstr.h> 41 #include <arch/mm/page.h> 42 43 #define P2SZ(pages) \ 44 ((pages) << PAGE_WIDTH) 41 45 42 46 /** Operations to manipulate page mappings. */ … … 44 48 void (* mapping_insert)(as_t *, uintptr_t, uintptr_t, unsigned int); 45 49 void (* mapping_remove)(as_t *, uintptr_t); 46 pte_t *(* mapping_find)(as_t *, uintptr_t );50 pte_t *(* mapping_find)(as_t *, uintptr_t, bool); 47 51 } page_mapping_operations_t; 48 52 … … 55 59 extern void page_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int); 56 60 extern void page_mapping_remove(as_t *, uintptr_t); 57 extern pte_t *page_mapping_find(as_t *, uintptr_t );61 extern pte_t *page_mapping_find(as_t *, uintptr_t, bool); 58 62 extern pte_t *page_table_create(unsigned int); 59 63 extern void page_table_destroy(pte_t *); … … 62 66 extern uintptr_t hw_map(uintptr_t, size_t); 63 67 68 extern sysarg_t sys_page_find_mapping(uintptr_t, uintptr_t *); 69 64 70 #endif 65 71 -
kernel/generic/include/mm/reserve.h
r13ecdac9 r1affcdf3 38 38 #include <typedefs.h> 39 39 40 extern void reserve_init(void); 40 41 extern bool reserve_try_alloc(size_t); 41 42 extern void reserve_force_alloc(size_t); -
kernel/generic/include/mm/tlb.h
r13ecdac9 r1affcdf3 86 86 extern void tlb_invalidate_asid(asid_t); 87 87 extern void tlb_invalidate_pages(asid_t, uintptr_t, size_t); 88 88 89 #endif 89 90 -
kernel/generic/include/proc/task.h
r13ecdac9 r1affcdf3 78 78 /** Unique identity of task. */ 79 79 task_id_t taskid; 80 /** Task security cont ext. */81 cont ext_id_t context;80 /** Task security container. */ 81 container_id_t container; 82 82 83 83 /** Number of references (i.e. threads). */ -
kernel/generic/include/proc/thread.h
r13ecdac9 r1affcdf3 49 49 #include <sysinfo/abi.h> 50 50 51 #define THREAD_STACK_SIZE STACK_SIZE52 51 #define THREAD_NAME_BUFLEN 20 53 52 … … 259 258 extern sysarg_t sys_thread_get_id(thread_id_t *); 260 259 extern sysarg_t sys_thread_usleep(uint32_t); 260 extern sysarg_t sys_thread_udelay(uint32_t); 261 261 262 262 #endif -
kernel/generic/include/proc/uarg.h
r13ecdac9 r1affcdf3 40 40 void *uspace_entry; 41 41 void *uspace_stack; 42 42 43 43 void (* uspace_thread_function)(); 44 44 void *uspace_thread_arg; -
kernel/generic/include/synch/waitq.h
r13ecdac9 r1affcdf3 62 62 int missed_wakeups; 63 63 64 /** List of sleeping threads for w ich there was no missed_wakeup. */64 /** List of sleeping threads for which there was no missed_wakeup. */ 65 65 link_t head; 66 66 } waitq_t; -
kernel/generic/include/syscall/syscall.h
r13ecdac9 r1affcdf3 44 44 SYS_THREAD_GET_ID, 45 45 SYS_THREAD_USLEEP, 46 SYS_THREAD_UDELAY, 46 47 47 48 SYS_TASK_GET_ID, … … 61 62 SYS_AS_GET_UNMAPPED_AREA, 62 63 64 SYS_PAGE_FIND_MAPPING, 65 63 66 SYS_IPC_CALL_SYNC_FAST, 64 67 SYS_IPC_CALL_SYNC_SLOW, … … 75 78 76 79 SYS_EVENT_SUBSCRIBE, 80 SYS_EVENT_UNMASK, 77 81 78 82 SYS_CAP_GRANT, -
kernel/generic/include/typedefs.h
r13ecdac9 r1affcdf3 64 64 typedef uint64_t thread_id_t; 65 65 typedef uint64_t task_id_t; 66 typedef uint32_t cont ext_id_t;66 typedef uint32_t container_id_t; 67 67 68 68 typedef int32_t inr_t; -
kernel/generic/src/adt/list.c
r13ecdac9 r1affcdf3 52 52 * 53 53 */ 54 boollist_member(const link_t *link, const link_t *head)54 int list_member(const link_t *link, const link_t *head) 55 55 { 56 56 bool found = false; -
kernel/generic/src/console/cmd.c
r13ecdac9 r1affcdf3 1107 1107 release_console(); 1108 1108 1109 event_notify_0(EVENT_KCONSOLE );1109 event_notify_0(EVENT_KCONSOLE, false); 1110 1110 indev_pop_character(stdin); 1111 1111 -
kernel/generic/src/console/console.c
r13ecdac9 r1affcdf3 53 53 #include <str.h> 54 54 55 #define KLOG_PAGES 455 #define KLOG_PAGES 8 56 56 #define KLOG_LENGTH (KLOG_PAGES * PAGE_SIZE / sizeof(wchar_t)) 57 #define KLOG_LATENCY 858 57 59 58 /** Kernel log cyclic buffer */ … … 61 60 62 61 /** Kernel log initialized */ 63 static bool klog_inited = false;62 static atomic_t klog_inited = {false}; 64 63 65 64 /** First kernel log characters */ … … 76 75 77 76 /** Kernel log spinlock */ 78 SPINLOCK_STATIC_INITIALIZE_NAME(klog_lock, " *klog_lock");77 SPINLOCK_STATIC_INITIALIZE_NAME(klog_lock, "klog_lock"); 79 78 80 79 /** Physical memory area used for klog buffer */ … … 166 165 sysinfo_set_item_val("klog.pages", NULL, KLOG_PAGES); 167 166 168 spinlock_lock(&klog_lock); 169 klog_inited = true; 170 spinlock_unlock(&klog_lock); 167 event_set_unmask_callback(EVENT_KLOG, klog_update); 168 atomic_set(&klog_inited, true); 171 169 } 172 170 … … 263 261 void klog_update(void) 264 262 { 263 if (!atomic_get(&klog_inited)) 264 return; 265 265 266 spinlock_lock(&klog_lock); 266 267 267 if ((klog_inited) && (event_is_subscribed(EVENT_KLOG)) && (klog_uspace > 0)) { 268 event_notify_3(EVENT_KLOG, klog_start, klog_len, klog_uspace); 269 klog_uspace = 0; 268 if (klog_uspace > 0) { 269 if (event_notify_3(EVENT_KLOG, true, klog_start, klog_len, 270 klog_uspace) == EOK) 271 klog_uspace = 0; 270 272 } 271 273 … … 275 277 void putchar(const wchar_t ch) 276 278 { 279 bool ordy = ((stdout) && (stdout->op->write)); 280 277 281 spinlock_lock(&klog_lock); 278 282 279 if ((klog_stored > 0) && (stdout) && (stdout->op->write)) { 280 /* Print charaters stored in kernel log */ 281 size_t i; 282 for (i = klog_len - klog_stored; i < klog_len; i++) 283 stdout->op->write(stdout, klog[(klog_start + i) % KLOG_LENGTH], silent); 284 klog_stored = 0; 283 /* Print charaters stored in kernel log */ 284 if (ordy) { 285 while (klog_stored > 0) { 286 wchar_t tmp = klog[(klog_start + klog_len - klog_stored) % KLOG_LENGTH]; 287 klog_stored--; 288 289 /* 290 * We need to give up the spinlock for 291 * the physical operation of writting out 292 * the character. 293 */ 294 spinlock_unlock(&klog_lock); 295 stdout->op->write(stdout, tmp, silent); 296 spinlock_lock(&klog_lock); 297 } 285 298 } 286 299 … … 292 305 klog_start = (klog_start + 1) % KLOG_LENGTH; 293 306 294 if ((stdout) && (stdout->op->write)) 307 if (!ordy) { 308 if (klog_stored < klog_len) 309 klog_stored++; 310 } 311 312 /* The character is stored for uspace */ 313 if (klog_uspace < klog_len) 314 klog_uspace++; 315 316 spinlock_unlock(&klog_lock); 317 318 if (ordy) { 319 /* 320 * Output the character. In this case 321 * it should be no longer buffered. 322 */ 295 323 stdout->op->write(stdout, ch, silent); 296 else {324 } else { 297 325 /* 298 326 * No standard output routine defined yet. … … 304 332 * Note that the early_putc() function might be 305 333 * a no-op on certain hardware configurations. 306 *307 334 */ 308 335 early_putchar(ch); 309 310 if (klog_stored < klog_len) 311 klog_stored++; 312 } 313 314 /* The character is stored for uspace */ 315 if (klog_uspace < klog_len) 316 klog_uspace++; 317 318 /* Check notify uspace to update */ 319 bool update; 320 if ((klog_uspace > KLOG_LATENCY) || (ch == '\n')) 321 update = true; 322 else 323 update = false; 324 325 spinlock_unlock(&klog_lock); 326 327 if (update) 336 } 337 338 /* Force notification on newline */ 339 if (ch == '\n') 328 340 klog_update(); 329 341 } -
kernel/generic/src/ddi/ddi.c
r13ecdac9 r1affcdf3 224 224 task_t *task = task_find_by_id(id); 225 225 226 if ((!task) || (!cont ext_check(CONTEXT, task->context))) {226 if ((!task) || (!container_check(CONTAINER, task->container))) { 227 227 /* 228 228 * There is no task with the specified ID -
kernel/generic/src/debug/panic.c
r13ecdac9 r1affcdf3 95 95 printf("\n"); 96 96 97 printf("THE=%p: ", THE); 98 if (THE != NULL) { 99 printf("pe=%" PRIun " thr=%p task=%p cpu=%p as=%p" 100 " magic=%#" PRIx32 "\n", THE->preemption_disabled, 101 THE->thread, THE->task, THE->cpu, THE->as, THE->magic); 102 } else 103 printf("invalid\n"); 104 97 105 if (istate) { 98 106 istate_decode(istate); -
kernel/generic/src/interrupt/interrupt.c
r13ecdac9 r1affcdf3 177 177 (void *) istate_get_pc(istate)); 178 178 179 istate_decode(istate); 179 180 stack_trace_istate(istate); 180 181 … … 205 206 * stack. 206 207 */ 207 return (istate_t *) ((uint8_t *) thread->kstack + THREAD_STACK_SIZE -208 sizeof(istate_t));208 return (istate_t *) ((uint8_t *) 209 thread->kstack + STACK_SIZE - sizeof(istate_t)); 209 210 } 210 211 -
kernel/generic/src/ipc/event.c
r13ecdac9 r1affcdf3 48 48 static event_t events[EVENT_END]; 49 49 50 /** Initialize kernel events. */ 50 /** Initialize kernel events. 51 * 52 */ 51 53 void event_init(void) 52 54 { 53 unsigned int i; 54 55 for (i = 0; i < EVENT_END; i++) { 55 for (unsigned int i = 0; i < EVENT_END; i++) { 56 56 spinlock_initialize(&events[i].lock, "event.lock"); 57 57 events[i].answerbox = NULL; 58 58 events[i].counter = 0; 59 59 events[i].imethod = 0; 60 events[i].masked = false; 61 events[i].unmask_callback = NULL; 60 62 } 61 63 } 62 64 65 /** Unsubscribe kernel events associated with an answerbox 66 * 67 * @param answerbox Answerbox to be unsubscribed. 68 * 69 */ 70 void event_cleanup_answerbox(answerbox_t *answerbox) 71 { 72 for (unsigned int i = 0; i < EVENT_END; i++) { 73 spinlock_lock(&events[i].lock); 74 75 if (events[i].answerbox == answerbox) { 76 events[i].answerbox = NULL; 77 events[i].counter = 0; 78 events[i].imethod = 0; 79 events[i].masked = false; 80 } 81 82 spinlock_unlock(&events[i].lock); 83 } 84 } 85 86 /** Define a callback function for the event unmask event. 87 * 88 * @param evno Event type. 89 * @param callback Callback function to be called when 90 * the event is unmasked. 91 * 92 */ 93 void event_set_unmask_callback(event_type_t evno, event_callback_t callback) 94 { 95 ASSERT(evno < EVENT_END); 96 97 spinlock_lock(&events[evno].lock); 98 events[evno].unmask_callback = callback; 99 spinlock_unlock(&events[evno].lock); 100 } 101 102 /** Send kernel notification event 103 * 104 * @param evno Event type. 105 * @param mask Mask further notifications after a successful 106 * sending. 107 * @param a1 First argument. 108 * @param a2 Second argument. 109 * @param a3 Third argument. 110 * @param a4 Fourth argument. 111 * @param a5 Fifth argument. 112 * 113 * @return EOK if notification was successfully sent. 114 * @return ENOMEM if the notification IPC message failed to allocate. 115 * @return EBUSY if the notifications of the given type are 116 * currently masked. 117 * @return ENOENT if the notifications of the given type are 118 * currently not subscribed. 119 * 120 */ 121 int event_notify(event_type_t evno, bool mask, sysarg_t a1, sysarg_t a2, 122 sysarg_t a3, sysarg_t a4, sysarg_t a5) 123 { 124 ASSERT(evno < EVENT_END); 125 126 spinlock_lock(&events[evno].lock); 127 128 int ret; 129 130 if (events[evno].answerbox != NULL) { 131 if (!events[evno].masked) { 132 call_t *call = ipc_call_alloc(FRAME_ATOMIC); 133 134 if (call) { 135 call->flags |= IPC_CALL_NOTIF; 136 call->priv = ++events[evno].counter; 137 138 IPC_SET_IMETHOD(call->data, events[evno].imethod); 139 IPC_SET_ARG1(call->data, a1); 140 IPC_SET_ARG2(call->data, a2); 141 IPC_SET_ARG3(call->data, a3); 142 IPC_SET_ARG4(call->data, a4); 143 IPC_SET_ARG5(call->data, a5); 144 145 irq_spinlock_lock(&events[evno].answerbox->irq_lock, true); 146 list_append(&call->link, &events[evno].answerbox->irq_notifs); 147 irq_spinlock_unlock(&events[evno].answerbox->irq_lock, true); 148 149 waitq_wakeup(&events[evno].answerbox->wq, WAKEUP_FIRST); 150 151 if (mask) 152 events[evno].masked = true; 153 154 ret = EOK; 155 } else 156 ret = ENOMEM; 157 } else 158 ret = EBUSY; 159 } else 160 ret = ENOENT; 161 162 spinlock_unlock(&events[evno].lock); 163 164 return ret; 165 } 166 167 /** Subscribe event notifications 168 * 169 * @param evno Event type. 170 * @param imethod IPC interface and method to be used for 171 * the notifications. 172 * @param answerbox Answerbox to send the notifications to. 173 * 174 * @return EOK if the subscription was successful. 175 * @return EEXISTS if the notifications of the given type are 176 * already subscribed. 177 * 178 */ 63 179 static int event_subscribe(event_type_t evno, sysarg_t imethod, 64 180 answerbox_t *answerbox) 65 181 { 66 if (evno >= EVENT_END) 67 return ELIMIT; 182 ASSERT(evno < EVENT_END); 68 183 69 184 spinlock_lock(&events[evno].lock); … … 75 190 events[evno].imethod = imethod; 76 191 events[evno].counter = 0; 192 events[evno].masked = false; 77 193 res = EOK; 78 194 } else … … 84 200 } 85 201 202 /** Unmask event notifications 203 * 204 * @param evno Event type to unmask. 205 * 206 */ 207 static void event_unmask(event_type_t evno) 208 { 209 ASSERT(evno < EVENT_END); 210 211 spinlock_lock(&events[evno].lock); 212 events[evno].masked = false; 213 event_callback_t callback = events[evno].unmask_callback; 214 spinlock_unlock(&events[evno].lock); 215 216 /* 217 * Check if there is an unmask callback 218 * function defined for this event. 219 */ 220 if (callback != NULL) 221 callback(); 222 } 223 224 /** Event notification syscall wrapper 225 * 226 * @param evno Event type to subscribe. 227 * @param imethod IPC interface and method to be used for 228 * the notifications. 229 * 230 * @return EOK on success. 231 * @return ELIMIT on unknown event type. 232 * @return EEXISTS if the notifications of the given type are 233 * already subscribed. 234 * 235 */ 86 236 sysarg_t sys_event_subscribe(sysarg_t evno, sysarg_t imethod) 87 237 { 238 if (evno >= EVENT_END) 239 return ELIMIT; 240 88 241 return (sysarg_t) event_subscribe((event_type_t) evno, (sysarg_t) 89 242 imethod, &TASK->answerbox); 90 243 } 91 244 92 bool event_is_subscribed(event_type_t evno) 93 { 94 bool res; 95 96 ASSERT(evno < EVENT_END); 97 98 spinlock_lock(&events[evno].lock); 99 res = events[evno].answerbox != NULL; 100 spinlock_unlock(&events[evno].lock); 101 102 return res; 103 } 104 105 106 void event_cleanup_answerbox(answerbox_t *answerbox) 107 { 108 unsigned int i; 109 110 for (i = 0; i < EVENT_END; i++) { 111 spinlock_lock(&events[i].lock); 112 if (events[i].answerbox == answerbox) { 113 events[i].answerbox = NULL; 114 events[i].counter = 0; 115 events[i].imethod = 0; 116 } 117 spinlock_unlock(&events[i].lock); 118 } 119 } 120 121 void event_notify(event_type_t evno, sysarg_t a1, sysarg_t a2, sysarg_t a3, 122 sysarg_t a4, sysarg_t a5) 123 { 124 ASSERT(evno < EVENT_END); 125 126 spinlock_lock(&events[evno].lock); 127 if (events[evno].answerbox != NULL) { 128 call_t *call = ipc_call_alloc(FRAME_ATOMIC); 129 if (call) { 130 call->flags |= IPC_CALL_NOTIF; 131 call->priv = ++events[evno].counter; 132 IPC_SET_IMETHOD(call->data, events[evno].imethod); 133 IPC_SET_ARG1(call->data, a1); 134 IPC_SET_ARG2(call->data, a2); 135 IPC_SET_ARG3(call->data, a3); 136 IPC_SET_ARG4(call->data, a4); 137 IPC_SET_ARG5(call->data, a5); 138 139 irq_spinlock_lock(&events[evno].answerbox->irq_lock, true); 140 list_append(&call->link, &events[evno].answerbox->irq_notifs); 141 irq_spinlock_unlock(&events[evno].answerbox->irq_lock, true); 142 143 waitq_wakeup(&events[evno].answerbox->wq, WAKEUP_FIRST); 144 } 145 } 146 spinlock_unlock(&events[evno].lock); 245 /** Event notification unmask syscall wrapper 246 * 247 * Note that currently no tests are performed whether the calling 248 * task is entitled to unmask the notifications. However, thanks 249 * to the fact that notification masking is only a performance 250 * optimization, this has probably no security implications. 251 * 252 * @param evno Event type to unmask. 253 * 254 * @return EOK on success. 255 * @return ELIMIT on unknown event type. 256 * 257 */ 258 sysarg_t sys_event_unmask(sysarg_t evno) 259 { 260 if (evno >= EVENT_END) 261 return ELIMIT; 262 263 event_unmask((event_type_t) evno); 264 return EOK; 147 265 } 148 266 -
kernel/generic/src/ipc/ipc.c
r13ecdac9 r1affcdf3 44 44 #include <synch/synch.h> 45 45 #include <ipc/ipc.h> 46 #include <ipc/ipc_methods.h> 46 47 #include <ipc/kbox.h> 47 48 #include <ipc/event.h> -
kernel/generic/src/ipc/irq.c
r13ecdac9 r1affcdf3 174 174 irq->notif_cfg.code = code; 175 175 irq->notif_cfg.counter = 0; 176 irq->driver_as = AS; 176 177 177 178 /* … … 364 365 return IRQ_DECLINE; 365 366 367 #define CMD_MEM_READ(target) \ 368 do { \ 369 void *va = code->cmds[i].addr; \ 370 if (AS != irq->driver_as) \ 371 as_switch(AS, irq->driver_as); \ 372 memcpy_from_uspace(&target, va, (sizeof(target))); \ 373 if (dstarg) \ 374 scratch[dstarg] = target; \ 375 } while(0) 376 377 #define CMD_MEM_WRITE(val) \ 378 do { \ 379 void *va = code->cmds[i].addr; \ 380 if (AS != irq->driver_as) \ 381 as_switch(AS, irq->driver_as); \ 382 memcpy_to_uspace(va, &val, sizeof(val)); \ 383 } while (0) 384 385 as_t *current_as = AS; 366 386 size_t i; 367 387 for (i = 0; i < code->cmdcount; i++) { … … 422 442 } 423 443 break; 444 case CMD_MEM_READ_8: { 445 uint8_t val; 446 CMD_MEM_READ(val); 447 break; 448 } 449 case CMD_MEM_READ_16: { 450 uint16_t val; 451 CMD_MEM_READ(val); 452 break; 453 } 454 case CMD_MEM_READ_32: { 455 uint32_t val; 456 CMD_MEM_READ(val); 457 break; 458 } 459 case CMD_MEM_WRITE_8: { 460 uint8_t val = code->cmds[i].value; 461 CMD_MEM_WRITE(val); 462 break; 463 } 464 case CMD_MEM_WRITE_16: { 465 uint16_t val = code->cmds[i].value; 466 CMD_MEM_WRITE(val); 467 break; 468 } 469 case CMD_MEM_WRITE_32: { 470 uint32_t val = code->cmds[i].value; 471 CMD_MEM_WRITE(val); 472 break; 473 } 474 case CMD_MEM_WRITE_A_8: 475 if (srcarg) { 476 uint8_t val = scratch[srcarg]; 477 CMD_MEM_WRITE(val); 478 } 479 break; 480 case CMD_MEM_WRITE_A_16: 481 if (srcarg) { 482 uint16_t val = scratch[srcarg]; 483 CMD_MEM_WRITE(val); 484 } 485 break; 486 case CMD_MEM_WRITE_A_32: 487 if (srcarg) { 488 uint32_t val = scratch[srcarg]; 489 CMD_MEM_WRITE(val); 490 } 491 break; 424 492 case CMD_BTEST: 425 493 if ((srcarg) && (dstarg)) { … … 435 503 break; 436 504 case CMD_ACCEPT: 505 if (AS != current_as) 506 as_switch(AS, current_as); 437 507 return IRQ_ACCEPT; 438 508 case CMD_DECLINE: 439 509 default: 510 if (AS != current_as) 511 as_switch(AS, current_as); 440 512 return IRQ_DECLINE; 441 513 } 442 514 } 515 if (AS != current_as) 516 as_switch(AS, current_as); 443 517 444 518 return IRQ_DECLINE; -
kernel/generic/src/ipc/kbox.c
r13ecdac9 r1affcdf3 37 37 #include <synch/mutex.h> 38 38 #include <ipc/ipc.h> 39 #include <ipc/ipc_methods.h> 39 40 #include <ipc/ipcrsc.h> 40 41 #include <arch.h> … … 169 170 switch (IPC_GET_IMETHOD(call->data)) { 170 171 171 case IPC_M_DEBUG _ALL:172 case IPC_M_DEBUG: 172 173 /* Handle debug call. */ 173 174 udebug_call_receive(call); -
kernel/generic/src/ipc/sysipc.c
r13ecdac9 r1affcdf3 40 40 #include <debug.h> 41 41 #include <ipc/ipc.h> 42 #include <ipc/ipc_methods.h> 42 43 #include <ipc/sysipc.h> 43 44 #include <ipc/irq.h> … … 460 461 } 461 462 #ifdef CONFIG_UDEBUG 462 case IPC_M_DEBUG _ALL:463 case IPC_M_DEBUG: 463 464 return udebug_request_preprocess(call, phone); 464 465 #endif … … 495 496 /* 496 497 * This must be an affirmative answer to IPC_M_DATA_READ 497 * or IPC_M_DEBUG _ALL/UDEBUG_M_MEM_READ...498 * or IPC_M_DEBUG/UDEBUG_M_MEM_READ... 498 499 * 499 500 */ … … 531 532 532 533 switch (IPC_GET_IMETHOD(call->data)) { 533 case IPC_M_DEBUG _ALL:534 case IPC_M_DEBUG: 534 535 return -1; 535 536 default: -
kernel/generic/src/lib/elf.c
r13ecdac9 r1affcdf3 114 114 } 115 115 116 /* Inspect all section headers and proc cess them. */116 /* Inspect all section headers and process them. */ 117 117 for (i = 0; i < header->e_shnum; i++) { 118 118 elf_section_header_t *sechdr = -
kernel/generic/src/lib/memfnc.c
r13ecdac9 r1affcdf3 56 56 void *memset(void *dst, int val, size_t cnt) 57 57 { 58 size_t i; 59 uint8_t *ptr = (uint8_t *) dst; 58 uint8_t *dp = (uint8_t *) dst; 60 59 61 for (i = 0; i < cnt; i++)62 ptr[i]= val;60 while (cnt-- != 0) 61 *dp++ = val; 63 62 64 63 return dst; … … 83 82 84 83 while (cnt-- != 0) 85 84 *dp++ = *sp++; 86 85 87 86 return dst; -
kernel/generic/src/main/main.c
r13ecdac9 r1affcdf3 71 71 #include <mm/as.h> 72 72 #include <mm/slab.h> 73 #include <mm/reserve.h> 73 74 #include <synch/waitq.h> 74 75 #include <synch/futex.h> … … 117 118 #endif 118 119 119 #define CONFIG_STACK_SIZE ((1 << STACK_FRAMES) * STACK_SIZE)120 121 120 /** Main kernel routine for bootstrap CPU. 122 121 * … … 138 137 config.kernel_size = ALIGN_UP(hardcoded_ktext_size + 139 138 hardcoded_kdata_size, PAGE_SIZE); 140 config.stack_size = CONFIG_STACK_SIZE;139 config.stack_size = STACK_SIZE; 141 140 142 141 /* Initialy the stack is placed just after the kernel */ … … 164 163 165 164 context_save(&ctx); 166 context_set(&ctx, FADDR(main_bsp_separated_stack), config.stack_base,167 THREAD_STACK_SIZE);165 context_set(&ctx, FADDR(main_bsp_separated_stack), 166 config.stack_base, STACK_SIZE); 168 167 context_restore(&ctx); 169 168 /* not reached */ … … 217 216 ddi_init(); 218 217 arch_post_mm_init(); 218 reserve_init(); 219 219 arch_pre_smp_init(); 220 220 smp_init(); … … 321 321 context_save(&CPU->saved_context); 322 322 context_set(&CPU->saved_context, FADDR(main_ap_separated_stack), 323 (uintptr_t) CPU->stack, CPU_STACK_SIZE);323 (uintptr_t) CPU->stack, STACK_SIZE); 324 324 context_restore(&CPU->saved_context); 325 325 /* not reached */ -
kernel/generic/src/main/uinit.c
r13ecdac9 r1affcdf3 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Userspace bootstrap thread. 36 36 * 37 37 * This file contains uinit kernel thread wich is used to start every … … 40 40 * @see SYS_THREAD_CREATE 41 41 */ 42 42 43 43 #include <main/uinit.h> 44 44 #include <typedefs.h> … … 48 48 #include <arch.h> 49 49 #include <udebug/udebug.h> 50 51 50 52 51 /** Thread used to bring up userspace thread. … … 58 57 { 59 58 uspace_arg_t uarg; 60 59 61 60 /* 62 61 * So far, we don't have a use for joining userspace threads so we … … 68 67 */ 69 68 thread_detach(THREAD); 70 69 71 70 #ifdef CONFIG_UDEBUG 72 71 udebug_stoppable_end(); … … 78 77 uarg.uspace_thread_function = NULL; 79 78 uarg.uspace_thread_arg = NULL; 80 79 81 80 free((uspace_arg_t *) arg); 82 81 -
kernel/generic/src/main/version.c
r13ecdac9 r1affcdf3 38 38 39 39 static const char *project = "SPARTAN kernel"; 40 static const char *copyright = "Copyright (c) 2001-201 0HelenOS project";40 static const char *copyright = "Copyright (c) 2001-2011 HelenOS project"; 41 41 static const char *release = STRING(RELEASE); 42 42 static const char *name = STRING(NAME); -
kernel/generic/src/mm/as.c
r13ecdac9 r1affcdf3 302 302 * We don't want any area to have conflicts with NULL page. 303 303 */ 304 if (overlaps(addr, count << PAGE_WIDTH, (uintptr_t) NULL, PAGE_SIZE))304 if (overlaps(addr, P2SZ(count), (uintptr_t) NULL, PAGE_SIZE)) 305 305 return false; 306 306 … … 329 329 mutex_lock(&area->lock); 330 330 331 if (overlaps(addr, count << PAGE_WIDTH,332 area->base, area->pages << PAGE_WIDTH)) {331 if (overlaps(addr, P2SZ(count), area->base, 332 P2SZ(area->pages))) { 333 333 mutex_unlock(&area->lock); 334 334 return false; … … 346 346 mutex_lock(&area->lock); 347 347 348 if (overlaps(addr, count << PAGE_WIDTH,349 area->base, area->pages << PAGE_WIDTH)) {348 if (overlaps(addr, P2SZ(count), area->base, 349 P2SZ(area->pages))) { 350 350 mutex_unlock(&area->lock); 351 351 return false; … … 366 366 mutex_lock(&area->lock); 367 367 368 if (overlaps(addr, count << PAGE_WIDTH,369 area->base, area->pages << PAGE_WIDTH)) {368 if (overlaps(addr, P2SZ(count), area->base, 369 P2SZ(area->pages))) { 370 370 mutex_unlock(&area->lock); 371 371 return false; … … 380 380 */ 381 381 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 382 return !overlaps(addr, count << PAGE_WIDTH, 383 KERNEL_ADDRESS_SPACE_START, 382 return !overlaps(addr, P2SZ(count), KERNEL_ADDRESS_SPACE_START, 384 383 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 385 384 } … … 474 473 475 474 btree_node_t *leaf; 476 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 475 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, 476 &leaf); 477 477 if (area) { 478 478 /* va is the base address of an address space area */ … … 482 482 483 483 /* 484 * Search the leaf node and the righ most record of its left neighbour484 * Search the leaf node and the rightmost record of its left neighbour 485 485 * to find out whether this is a miss or va belongs to an address 486 486 * space area found there. … … 494 494 495 495 mutex_lock(&area->lock); 496 496 497 497 if ((area->base <= va) && 498 (va < area->base + (area->pages << PAGE_WIDTH)))498 (va <= area->base + (P2SZ(area->pages) - 1))) 499 499 return area; 500 500 … … 506 506 * Because of its position in the B+tree, it must have base < va. 507 507 */ 508 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 508 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, 509 leaf); 509 510 if (lnode) { 510 511 area = (as_area_t *) lnode->value[lnode->keys - 1]; … … 512 513 mutex_lock(&area->lock); 513 514 514 if (va < area->base + (area->pages << PAGE_WIDTH))515 if (va <= area->base + (P2SZ(area->pages) - 1)) 515 516 return area; 516 517 … … 577 578 578 579 if (pages < area->pages) { 579 uintptr_t start_free = area->base + (pages << PAGE_WIDTH);580 uintptr_t start_free = area->base + P2SZ(pages); 580 581 581 582 /* … … 590 591 */ 591 592 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, 592 area->base + (pages << PAGE_WIDTH), area->pages - pages);593 area->base + P2SZ(pages), area->pages - pages); 593 594 594 595 /* … … 613 614 size_t i = 0; 614 615 615 if (overlaps(ptr, size << PAGE_WIDTH, area->base,616 pages << PAGE_WIDTH)) {616 if (overlaps(ptr, P2SZ(size), area->base, 617 P2SZ(pages))) { 617 618 618 if (ptr + (size << PAGE_WIDTH) <= start_free) {619 if (ptr + P2SZ(size) <= start_free) { 619 620 /* 620 621 * The whole interval fits … … 647 648 648 649 for (; i < size; i++) { 649 pte_t *pte = page_mapping_find(as, ptr +650 (i << PAGE_WIDTH));650 pte_t *pte = page_mapping_find(as, 651 ptr + P2SZ(i), false); 651 652 652 653 ASSERT(pte); … … 657 658 (area->backend->frame_free)) { 658 659 area->backend->frame_free(area, 659 ptr + (i << PAGE_WIDTH),660 ptr + P2SZ(i), 660 661 PTE_GET_FRAME(pte)); 661 662 } 662 663 663 page_mapping_remove(as, ptr + 664 (i << PAGE_WIDTH)); 664 page_mapping_remove(as, ptr + P2SZ(i)); 665 665 } 666 666 } … … 671 671 */ 672 672 673 tlb_invalidate_pages(as->asid, area->base + (pages << PAGE_WIDTH),673 tlb_invalidate_pages(as->asid, area->base + P2SZ(pages), 674 674 area->pages - pages); 675 675 676 676 /* 677 * Invalidate software translation caches (e.g. TSB on sparc64). 678 */ 679 as_invalidate_translation_cache(as, area->base + 680 (pages << PAGE_WIDTH), area->pages - pages); 677 * Invalidate software translation caches 678 * (e.g. TSB on sparc64, PHT on ppc32). 679 */ 680 as_invalidate_translation_cache(as, area->base + P2SZ(pages), 681 area->pages - pages); 681 682 tlb_shootdown_finalize(ipl); 682 683 … … 797 798 798 799 for (size = 0; size < (size_t) node->value[i]; size++) { 799 pte_t *pte = 800 page_mapping_find(as, ptr + (size << PAGE_WIDTH));800 pte_t *pte = page_mapping_find(as, 801 ptr + P2SZ(size), false); 801 802 802 803 ASSERT(pte); … … 807 808 (area->backend->frame_free)) { 808 809 area->backend->frame_free(area, 809 ptr + (size << PAGE_WIDTH), PTE_GET_FRAME(pte)); 810 ptr + P2SZ(size), 811 PTE_GET_FRAME(pte)); 810 812 } 811 813 812 page_mapping_remove(as, ptr + (size << PAGE_WIDTH));814 page_mapping_remove(as, ptr + P2SZ(size)); 813 815 } 814 816 } … … 822 824 823 825 /* 824 * Invalidate potential software translation caches (e.g. TSB on825 * sparc64).826 * Invalidate potential software translation caches 827 * (e.g. TSB on sparc64, PHT on ppc32). 826 828 */ 827 829 as_invalidate_translation_cache(as, area->base, area->pages); … … 897 899 } 898 900 899 size_t src_size = src_area->pages << PAGE_WIDTH;901 size_t src_size = P2SZ(src_area->pages); 900 902 unsigned int src_flags = src_area->flags; 901 903 mem_backend_t *src_backend = src_area->backend; … … 1094 1096 for (cur = area->used_space.leaf_head.next; 1095 1097 cur != &area->used_space.leaf_head; cur = cur->next) { 1096 btree_node_t *node 1097 = list_get_instance(cur, btree_node_t,leaf_link);1098 btree_node_t *node = list_get_instance(cur, btree_node_t, 1099 leaf_link); 1098 1100 btree_key_t i; 1099 1101 … … 1103 1105 1104 1106 for (size = 0; size < (size_t) node->value[i]; size++) { 1105 pte_t *pte = 1106 p age_mapping_find(as, ptr + (size << PAGE_WIDTH));1107 pte_t *pte = page_mapping_find(as, 1108 ptr + P2SZ(size), false); 1107 1109 1108 1110 ASSERT(pte); … … 1113 1115 1114 1116 /* Remove old mapping */ 1115 page_mapping_remove(as, ptr + (size << PAGE_WIDTH));1117 page_mapping_remove(as, ptr + P2SZ(size)); 1116 1118 } 1117 1119 } … … 1125 1127 1126 1128 /* 1127 * Invalidate potential software translation caches (e.g. TSB on1128 * sparc64).1129 * Invalidate potential software translation caches 1130 * (e.g. TSB on sparc64, PHT on ppc32). 1129 1131 */ 1130 1132 as_invalidate_translation_cache(as, area->base, area->pages); … … 1159 1161 1160 1162 /* Insert the new mapping */ 1161 page_mapping_insert(as, ptr + (size << PAGE_WIDTH),1163 page_mapping_insert(as, ptr + P2SZ(size), 1162 1164 old_frame[frame_idx++], page_flags); 1163 1165 … … 1240 1242 */ 1241 1243 pte_t *pte; 1242 if ((pte = page_mapping_find(AS, page ))) {1244 if ((pte = page_mapping_find(AS, page, false))) { 1243 1245 if (PTE_PRESENT(pte)) { 1244 1246 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) || … … 1481 1483 1482 1484 if (src_area) { 1483 size = src_area->pages << PAGE_WIDTH;1485 size = P2SZ(src_area->pages); 1484 1486 mutex_unlock(&src_area->lock); 1485 1487 } else … … 1536 1538 if (page >= right_pg) { 1537 1539 /* Do nothing. */ 1538 } else if (overlaps(page, count << PAGE_WIDTH, left_pg,1539 left_cnt << PAGE_WIDTH)) {1540 } else if (overlaps(page, P2SZ(count), left_pg, 1541 P2SZ(left_cnt))) { 1540 1542 /* The interval intersects with the left interval. */ 1541 1543 return false; 1542 } else if (overlaps(page, count << PAGE_WIDTH, right_pg,1543 right_cnt << PAGE_WIDTH)) {1544 } else if (overlaps(page, P2SZ(count), right_pg, 1545 P2SZ(right_cnt))) { 1544 1546 /* The interval intersects with the right interval. */ 1545 1547 return false; 1546 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) &&1547 (page + (count << PAGE_WIDTH) == right_pg)) {1548 } else if ((page == left_pg + P2SZ(left_cnt)) && 1549 (page + P2SZ(count) == right_pg)) { 1548 1550 /* 1549 1551 * The interval can be added by merging the two already … … 1553 1555 btree_remove(&area->used_space, right_pg, leaf); 1554 1556 goto success; 1555 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) {1557 } else if (page == left_pg + P2SZ(left_cnt)) { 1556 1558 /* 1557 1559 * The interval can be added by simply growing the left … … 1560 1562 node->value[node->keys - 1] += count; 1561 1563 goto success; 1562 } else if (page + (count << PAGE_WIDTH) == right_pg) {1564 } else if (page + P2SZ(count) == right_pg) { 1563 1565 /* 1564 1566 * The interval can be addded by simply moving base of … … 1587 1589 */ 1588 1590 1589 if (overlaps(page, count << PAGE_WIDTH, right_pg, 1590 right_cnt << PAGE_WIDTH)) { 1591 if (overlaps(page, P2SZ(count), right_pg, P2SZ(right_cnt))) { 1591 1592 /* The interval intersects with the right interval. */ 1592 1593 return false; 1593 } else if (page + (count << PAGE_WIDTH) == right_pg) {1594 } else if (page + P2SZ(count) == right_pg) { 1594 1595 /* 1595 1596 * The interval can be added by moving the base of the … … 1626 1627 if (page < left_pg) { 1627 1628 /* Do nothing. */ 1628 } else if (overlaps(page, count << PAGE_WIDTH, left_pg,1629 left_cnt << PAGE_WIDTH)) {1629 } else if (overlaps(page, P2SZ(count), left_pg, 1630 P2SZ(left_cnt))) { 1630 1631 /* The interval intersects with the left interval. */ 1631 1632 return false; 1632 } else if (overlaps(page, count << PAGE_WIDTH, right_pg,1633 right_cnt << PAGE_WIDTH)) {1633 } else if (overlaps(page, P2SZ(count), right_pg, 1634 P2SZ(right_cnt))) { 1634 1635 /* The interval intersects with the right interval. */ 1635 1636 return false; 1636 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) &&1637 (page + (count << PAGE_WIDTH) == right_pg)) {1637 } else if ((page == left_pg + P2SZ(left_cnt)) && 1638 (page + P2SZ(count) == right_pg)) { 1638 1639 /* 1639 1640 * The interval can be added by merging the two already … … 1643 1644 btree_remove(&area->used_space, right_pg, node); 1644 1645 goto success; 1645 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) {1646 } else if (page == left_pg + P2SZ(left_cnt)) { 1646 1647 /* 1647 1648 * The interval can be added by simply growing the left … … 1650 1651 leaf->value[leaf->keys - 1] += count; 1651 1652 goto success; 1652 } else if (page + (count << PAGE_WIDTH) == right_pg) {1653 } else if (page + P2SZ(count) == right_pg) { 1653 1654 /* 1654 1655 * The interval can be addded by simply moving base of … … 1677 1678 */ 1678 1679 1679 if (overlaps(page, count << PAGE_WIDTH, left_pg, 1680 left_cnt << PAGE_WIDTH)) { 1680 if (overlaps(page, P2SZ(count), left_pg, P2SZ(left_cnt))) { 1681 1681 /* The interval intersects with the left interval. */ 1682 1682 return false; 1683 } else if (left_pg + (left_cnt << PAGE_WIDTH) == page) {1683 } else if (left_pg + P2SZ(left_cnt) == page) { 1684 1684 /* 1685 1685 * The interval can be added by growing the left … … 1716 1716 */ 1717 1717 1718 if (overlaps(page, count << PAGE_WIDTH, left_pg,1719 left_cnt << PAGE_WIDTH)) {1718 if (overlaps(page, P2SZ(count), left_pg, 1719 P2SZ(left_cnt))) { 1720 1720 /* 1721 1721 * The interval intersects with the left … … 1723 1723 */ 1724 1724 return false; 1725 } else if (overlaps(page, count << PAGE_WIDTH, right_pg,1726 right_cnt << PAGE_WIDTH)) {1725 } else if (overlaps(page, P2SZ(count), right_pg, 1726 P2SZ(right_cnt))) { 1727 1727 /* 1728 1728 * The interval intersects with the right … … 1730 1730 */ 1731 1731 return false; 1732 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) &&1733 (page + (count << PAGE_WIDTH) == right_pg)) {1732 } else if ((page == left_pg + P2SZ(left_cnt)) && 1733 (page + P2SZ(count) == right_pg)) { 1734 1734 /* 1735 1735 * The interval can be added by merging the two … … 1739 1739 btree_remove(&area->used_space, right_pg, leaf); 1740 1740 goto success; 1741 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) {1741 } else if (page == left_pg + P2SZ(left_cnt)) { 1742 1742 /* 1743 1743 * The interval can be added by simply growing … … 1746 1746 leaf->value[i - 1] += count; 1747 1747 goto success; 1748 } else if (page + (count << PAGE_WIDTH) == right_pg) {1748 } else if (page + P2SZ(count) == right_pg) { 1749 1749 /* 1750 1750 * The interval can be addded by simply moving … … 1812 1812 for (i = 0; i < leaf->keys; i++) { 1813 1813 if (leaf->key[i] == page) { 1814 leaf->key[i] += count << PAGE_WIDTH;1814 leaf->key[i] += P2SZ(count); 1815 1815 leaf->value[i] -= count; 1816 1816 goto success; … … 1822 1822 } 1823 1823 1824 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf); 1824 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, 1825 leaf); 1825 1826 if ((node) && (page < leaf->key[0])) { 1826 1827 uintptr_t left_pg = node->key[node->keys - 1]; 1827 1828 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1828 1829 1829 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1830 count << PAGE_WIDTH)) { 1831 if (page + (count << PAGE_WIDTH) == 1832 left_pg + (left_cnt << PAGE_WIDTH)) { 1830 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) { 1831 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) { 1833 1832 /* 1834 1833 * The interval is contained in the rightmost … … 1839 1838 node->value[node->keys - 1] -= count; 1840 1839 goto success; 1841 } else if (page + (count << PAGE_WIDTH) < 1842 left_pg + (left_cnt << PAGE_WIDTH)) { 1840 } else if (page + P2SZ(count) < 1841 left_pg + P2SZ(left_cnt)) { 1842 size_t new_cnt; 1843 1843 1844 /* 1844 1845 * The interval is contained in the rightmost … … 1848 1849 * new interval. 1849 1850 */ 1850 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) -1851 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH;1851 new_cnt = ((left_pg + P2SZ(left_cnt)) - 1852 (page + P2SZ(count))) >> PAGE_WIDTH; 1852 1853 node->value[node->keys - 1] -= count + new_cnt; 1853 1854 btree_insert(&area->used_space, page + 1854 (count << PAGE_WIDTH), (void *) new_cnt, leaf);1855 P2SZ(count), (void *) new_cnt, leaf); 1855 1856 goto success; 1856 1857 } … … 1865 1866 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1866 1867 1867 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1868 count << PAGE_WIDTH)) { 1869 if (page + (count << PAGE_WIDTH) == 1870 left_pg + (left_cnt << PAGE_WIDTH)) { 1868 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) { 1869 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) { 1871 1870 /* 1872 1871 * The interval is contained in the rightmost … … 1876 1875 leaf->value[leaf->keys - 1] -= count; 1877 1876 goto success; 1878 } else if (page + (count << PAGE_WIDTH) < left_pg + 1879 (left_cnt << PAGE_WIDTH)) { 1877 } else if (page + P2SZ(count) < left_pg + 1878 P2SZ(left_cnt)) { 1879 size_t new_cnt; 1880 1880 1881 /* 1881 1882 * The interval is contained in the rightmost … … 1885 1886 * interval. 1886 1887 */ 1887 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) -1888 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH;1888 new_cnt = ((left_pg + P2SZ(left_cnt)) - 1889 (page + P2SZ(count))) >> PAGE_WIDTH; 1889 1890 leaf->value[leaf->keys - 1] -= count + new_cnt; 1890 1891 btree_insert(&area->used_space, page + 1891 (count << PAGE_WIDTH), (void *) new_cnt, leaf);1892 P2SZ(count), (void *) new_cnt, leaf); 1892 1893 goto success; 1893 1894 } … … 1911 1912 * to (i - 1) and i. 1912 1913 */ 1913 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page,1914 count << PAGE_WIDTH)) {1915 if (page + (count << PAGE_WIDTH) ==1916 left_pg + (left_cnt << PAGE_WIDTH)) {1914 if (overlaps(left_pg, P2SZ(left_cnt), page, 1915 P2SZ(count))) { 1916 if (page + P2SZ(count) == 1917 left_pg + P2SZ(left_cnt)) { 1917 1918 /* 1918 1919 * The interval is contained in the … … 1923 1924 leaf->value[i - 1] -= count; 1924 1925 goto success; 1925 } else if (page + (count << PAGE_WIDTH) < 1926 left_pg + (left_cnt << PAGE_WIDTH)) { 1926 } else if (page + P2SZ(count) < 1927 left_pg + P2SZ(left_cnt)) { 1928 size_t new_cnt; 1929 1927 1930 /* 1928 1931 * The interval is contained in the … … 1932 1935 * also inserting a new interval. 1933 1936 */ 1934 size_t new_cnt = ((left_pg + 1935 (left_cnt << PAGE_WIDTH)) - 1936 (page + (count << PAGE_WIDTH))) >> 1937 new_cnt = ((left_pg + P2SZ(left_cnt)) - 1938 (page + P2SZ(count))) >> 1937 1939 PAGE_WIDTH; 1938 1940 leaf->value[i - 1] -= count + new_cnt; 1939 1941 btree_insert(&area->used_space, page + 1940 (count << PAGE_WIDTH), (void *) new_cnt,1942 P2SZ(count), (void *) new_cnt, 1941 1943 leaf); 1942 1944 goto success; … … 2034 2036 btree_key_t i; 2035 2037 for (i = 0; (ret == 0) && (i < node->keys); i++) { 2038 uintptr_t addr; 2039 2036 2040 as_area_t *area = (as_area_t *) node->value[i]; 2037 2041 2038 2042 mutex_lock(&area->lock); 2039 2043 2040 uintptr_t addr = 2041 ALIGN_UP(area->base + (area->pages << PAGE_WIDTH), 2044 addr = ALIGN_UP(area->base + P2SZ(area->pages), 2042 2045 PAGE_SIZE); 2043 2046 … … 2098 2101 2099 2102 info[area_idx].start_addr = area->base; 2100 info[area_idx].size = FRAMES2SIZE(area->pages);2103 info[area_idx].size = P2SZ(area->pages); 2101 2104 info[area_idx].flags = area->flags; 2102 2105 ++area_idx; … … 2136 2139 " (%p - %p)\n", area, (void *) area->base, 2137 2140 area->pages, (void *) area->base, 2138 (void *) (area->base + FRAMES2SIZE(area->pages)));2141 (void *) (area->base + P2SZ(area->pages))); 2139 2142 mutex_unlock(&area->lock); 2140 2143 } -
kernel/generic/src/mm/backend_anon.c
r13ecdac9 r1affcdf3 50 50 #include <typedefs.h> 51 51 #include <align.h> 52 #include <memstr.h> 52 53 #include <arch.h> 53 54 … … 121 122 page_table_lock(area->as, false); 122 123 pte = page_mapping_find(area->as, 123 base + j * PAGE_SIZE);124 base + P2SZ(j), false); 124 125 ASSERT(pte && PTE_VALID(pte) && 125 126 PTE_PRESENT(pte)); 126 127 btree_insert(&area->sh_info->pagemap, 127 (base + j * PAGE_SIZE) - area->base,128 (base + P2SZ(j)) - area->base, 128 129 (void *) PTE_GET_FRAME(pte), NULL); 129 130 page_table_unlock(area->as, false); -
kernel/generic/src/mm/backend_elf.c
r13ecdac9 r1affcdf3 170 170 if (!(area->flags & AS_AREA_WRITE)) 171 171 if (base >= entry->p_vaddr && 172 base + count * PAGE_SIZE<= start_anon)172 base + P2SZ(count) <= start_anon) 173 173 continue; 174 174 … … 182 182 if (!(area->flags & AS_AREA_WRITE)) 183 183 if (base >= entry->p_vaddr && 184 base + (j + 1) * PAGE_SIZE <= 185 start_anon) 184 base + P2SZ(j + 1) <= start_anon) 186 185 continue; 187 186 188 187 page_table_lock(area->as, false); 189 188 pte = page_mapping_find(area->as, 190 base + j * PAGE_SIZE);189 base + P2SZ(j), false); 191 190 ASSERT(pte && PTE_VALID(pte) && 192 191 PTE_PRESENT(pte)); 193 192 btree_insert(&area->sh_info->pagemap, 194 (base + j * PAGE_SIZE) - area->base,193 (base + P2SZ(j)) - area->base, 195 194 (void *) PTE_GET_FRAME(pte), NULL); 196 195 page_table_unlock(area->as, false); -
kernel/generic/src/mm/frame.c
r13ecdac9 r1affcdf3 182 182 * 183 183 */ 184 #ifdef CONFIG_DEBUG 185 NO_TRACE static size_t total_frames_free(void) 184 NO_TRACE static size_t frame_total_free_get_internal(void) 186 185 { 187 186 size_t total = 0; 188 187 size_t i; 188 189 189 for (i = 0; i < zones.count; i++) 190 190 total += zones.info[i].free_count; … … 192 192 return total; 193 193 } 194 #endif /* CONFIG_DEBUG */ 194 195 NO_TRACE size_t frame_total_free_get(void) 196 { 197 size_t total; 198 199 irq_spinlock_lock(&zones.lock, true); 200 total = frame_total_free_get_internal(); 201 irq_spinlock_unlock(&zones.lock, true); 202 203 return total; 204 } 205 195 206 196 207 /** Find a zone with a given frames. … … 840 851 buddy_system_free(zone->buddy_system, &zone->frames[i].buddy_link); 841 852 } 842 843 /* "Unreserve" new frames. */844 reserve_free(count);845 853 } else 846 854 zone->frames = NULL; … … 1051 1059 1052 1060 #ifdef CONFIG_DEBUG 1053 size_t avail = total_frames_free();1061 size_t avail = frame_total_free_get_internal(); 1054 1062 #endif 1055 1063 -
kernel/generic/src/mm/page.c
r13ecdac9 r1affcdf3 60 60 61 61 #include <mm/page.h> 62 #include <genarch/mm/page_ht.h> 63 #include <genarch/mm/page_pt.h> 62 64 #include <arch/mm/page.h> 63 65 #include <arch/mm/asid.h> … … 70 72 #include <debug.h> 71 73 #include <arch.h> 74 #include <syscall/copy.h> 75 #include <errno.h> 72 76 73 77 /** Virtual operations for page subsystem. */ … … 108 112 * using flags. Allocate and setup any missing page tables. 109 113 * 110 * @param as Address space to w ich page belongs.114 * @param as Address space to which page belongs. 111 115 * @param page Virtual address of the page to be mapped. 112 116 * @param frame Physical address of memory frame to which the mapping is … … 135 139 * this call visible. 136 140 * 137 * @param as Address space to w ich page belongs.141 * @param as Address space to which page belongs. 138 142 * @param page Virtual address of the page to be demapped. 139 143 * … … 152 156 } 153 157 154 /** Find mapping for virtual page 155 * 156 * Find mapping for virtual page. 157 * 158 * @param as Address space to wich page belongs. 159 * @param page Virtual page. 158 /** Find mapping for virtual page. 159 * 160 * @param as Address space to which page belongs. 161 * @param page Virtual page. 162 * @param nolock True if the page tables need not be locked. 160 163 * 161 164 * @return NULL if there is no such mapping; requested mapping … … 163 166 * 164 167 */ 165 NO_TRACE pte_t *page_mapping_find(as_t *as, uintptr_t page )166 { 167 ASSERT( page_table_locked(as));168 NO_TRACE pte_t *page_mapping_find(as_t *as, uintptr_t page, bool nolock) 169 { 170 ASSERT(nolock || page_table_locked(as)); 168 171 169 172 ASSERT(page_mapping_operations); 170 173 ASSERT(page_mapping_operations->mapping_find); 171 174 172 return page_mapping_operations->mapping_find(as, page); 175 return page_mapping_operations->mapping_find(as, page, nolock); 176 } 177 178 /** Syscall wrapper for getting mapping of a virtual page. 179 * 180 * @retval EOK Everything went find, @p uspace_frame and @p uspace_node 181 * contains correct values. 182 * @retval ENOENT Virtual address has no mapping. 183 */ 184 sysarg_t sys_page_find_mapping(uintptr_t virt_address, 185 uintptr_t *uspace_frame) 186 { 187 mutex_lock(&AS->lock); 188 189 pte_t *pte = page_mapping_find(AS, virt_address, false); 190 if (!PTE_VALID(pte) || !PTE_PRESENT(pte)) { 191 mutex_unlock(&AS->lock); 192 193 return (sysarg_t) ENOENT; 194 } 195 196 uintptr_t phys_address = PTE_GET_FRAME(pte); 197 198 mutex_unlock(&AS->lock); 199 200 int rc = copy_to_uspace(uspace_frame, 201 &phys_address, sizeof(phys_address)); 202 if (rc != EOK) { 203 return (sysarg_t) rc; 204 } 205 206 return EOK; 173 207 } 174 208 -
kernel/generic/src/mm/reserve.c
r13ecdac9 r1affcdf3 45 45 IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(reserve_lock, "reserve_lock"); 46 46 static ssize_t reserve = 0; 47 48 /** Initialize memory reservations tracking. 49 * 50 * This function must be called after frame zones are created and merged 51 * and before any address space area is created. 52 */ 53 void reserve_init(void) 54 { 55 reserve = frame_total_free_get(); 56 } 47 57 48 58 /** Try to reserve memory. -
kernel/generic/src/printf/vprintf.c
r13ecdac9 r1affcdf3 41 41 #include <typedefs.h> 42 42 #include <str.h> 43 44 IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(printf_lock, "*printf_lock");45 43 46 44 static int vprintf_str_write(const char *str, size_t size, void *data) … … 93 91 }; 94 92 95 irq_spinlock_lock(&printf_lock, true); 96 int ret = printf_core(fmt, &ps, ap); 97 irq_spinlock_unlock(&printf_lock, true); 98 99 return ret; 93 return printf_core(fmt, &ps, ap); 100 94 } 101 95 -
kernel/generic/src/proc/program.c
r13ecdac9 r1affcdf3 54 54 #include <proc/program.h> 55 55 56 #ifndef LOADED_PROG_STACK_PAGES_NO57 #define LOADED_PROG_STACK_PAGES_NO 158 #endif59 60 56 /** 61 57 * Points to the binary image used as the program loader. All non-initial … … 90 86 91 87 /* 92 * Create the dataaddress space area.88 * Create the stack address space area. 93 89 */ 94 90 as_area_t *area = as_area_create(as, 95 91 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, 96 LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS,97 AS_AREA_ATTR_NONE,&anon_backend, NULL);92 STACK_SIZE, USTACK_ADDRESS, AS_AREA_ATTR_NONE, 93 &anon_backend, NULL); 98 94 if (!area) 99 95 return ENOMEM; -
kernel/generic/src/proc/scheduler.c
r13ecdac9 r1affcdf3 376 376 context_save(&CPU->saved_context); 377 377 context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), 378 (uintptr_t) CPU->stack, CPU_STACK_SIZE);378 (uintptr_t) CPU->stack, STACK_SIZE); 379 379 context_restore(&CPU->saved_context); 380 380 -
kernel/generic/src/proc/task.c
r13ecdac9 r1affcdf3 190 190 str_cpy(task->name, TASK_NAME_BUFLEN, name); 191 191 192 task->cont ext = CONTEXT;192 task->container = CONTAINER; 193 193 task->capabilities = 0; 194 194 task->ucycles = 0; … … 211 211 212 212 if ((ipc_phone_0) && 213 (cont ext_check(ipc_phone_0->task->context, task->context)))213 (container_check(ipc_phone_0->task->container, task->container))) 214 214 ipc_phone_connect(&task->phones[0], ipc_phone_0); 215 215 … … 534 534 */ 535 535 if (notify) { 536 if (event_is_subscribed(EVENT_FAULT)) { 537 /* Notify the subscriber that a fault occurred. */ 538 event_notify_3(EVENT_FAULT, LOWER32(TASK->taskid), 539 UPPER32(TASK->taskid), (sysarg_t) THREAD); 540 536 /* Notify the subscriber that a fault occurred. */ 537 if (event_notify_3(EVENT_FAULT, false, LOWER32(TASK->taskid), 538 UPPER32(TASK->taskid), (sysarg_t) THREAD) == EOK) { 541 539 #ifdef CONFIG_UDEBUG 542 540 /* Wait for a debugging session. */ … … 586 584 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %10p %10p" 587 585 " %9" PRIu64 "%c %9" PRIu64 "%c\n", task->taskid, 588 task->name, task->cont ext, task, task->as,586 task->name, task->container, task, task->as, 589 587 ucycles, usuffix, kcycles, ksuffix); 590 588 #endif … … 597 595 else 598 596 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %18p %18p\n", 599 task->taskid, task->name, task->cont ext, task, task->as);597 task->taskid, task->name, task->container, task, task->as); 600 598 #endif 601 599 … … 627 625 printf("[id ] [threads] [calls] [callee\n"); 628 626 else 629 printf("[id ] [name ] [ct x] [address ] [as ]"627 printf("[id ] [name ] [ctn] [address ] [as ]" 630 628 " [ucycles ] [kcycles ]\n"); 631 629 #endif … … 636 634 " [callee\n"); 637 635 else 638 printf("[id ] [name ] [ct x] [address ]"636 printf("[id ] [name ] [ctn] [address ]" 639 637 " [as ]\n"); 640 638 #endif -
kernel/generic/src/proc/the.c
r13ecdac9 r1affcdf3 58 58 the->task = NULL; 59 59 the->as = NULL; 60 the->magic = MAGIC; 60 61 } 61 62 … … 70 71 NO_TRACE void the_copy(the_t *src, the_t *dst) 71 72 { 73 ASSERT(src->magic == MAGIC); 72 74 *dst = *src; 73 75 } -
kernel/generic/src/proc/thread.c
r13ecdac9 r1affcdf3 55 55 #include <time/clock.h> 56 56 #include <time/timeout.h> 57 #include <time/delay.h> 57 58 #include <config.h> 58 59 #include <arch/interrupt.h> … … 67 68 #include <syscall/copy.h> 68 69 #include <errno.h> 69 70 71 #ifndef LOADED_PROG_STACK_PAGES_NO72 #define LOADED_PROG_STACK_PAGES_NO 173 #endif74 75 70 76 71 /** Thread states */ … … 300 295 301 296 /* Not needed, but good for debugging */ 302 memsetb(thread->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0);297 memsetb(thread->kstack, STACK_SIZE, 0); 303 298 304 299 irq_spinlock_lock(&tidlock, true); … … 308 303 context_save(&thread->saved_context); 309 304 context_set(&thread->saved_context, FADDR(cushion), 310 (uintptr_t) thread->kstack, THREAD_STACK_SIZE);305 (uintptr_t) thread->kstack, STACK_SIZE); 311 306 312 307 the_initialize((the_t *) thread->kstack); … … 605 600 printf("%-8" PRIu64 " %-14s %10p %-8s %10p %-5" PRIu32 "\n", 606 601 thread->tid, name, thread, thread_states[thread->state], 607 thread->task, thread->task->cont ext);602 thread->task, thread->task->container); 608 603 #endif 609 604 … … 617 612 printf("%-8" PRIu64 " %-14s %18p %-8s %18p %-5" PRIu32 "\n", 618 613 thread->tid, name, thread, thread_states[thread->state], 619 thread->task, thread->task->cont ext);614 thread->task, thread->task->container); 620 615 #endif 621 616 … … 658 653 else 659 654 printf("[id ] [name ] [address ] [state ] [task ]" 660 " [ct x]\n");655 " [ctn]\n"); 661 656 #endif 662 657 … … 667 662 } else 668 663 printf("[id ] [name ] [address ] [state ]" 669 " [task ] [ct x]\n");664 " [task ] [ctn]\n"); 670 665 #endif 671 666 … … 918 913 } 919 914 915 sysarg_t sys_thread_udelay(uint32_t usec) 916 { 917 delay(usec); 918 return 0; 919 } 920 920 921 /** @} 921 922 */ -
kernel/generic/src/security/cap.c
r13ecdac9 r1affcdf3 92 92 task_t *task = task_find_by_id(taskid); 93 93 94 if ((!task) || (!cont ext_check(CONTEXT, task->context))) {94 if ((!task) || (!container_check(CONTAINER, task->container))) { 95 95 irq_spinlock_unlock(&tasks_lock, true); 96 96 return (sysarg_t) ENOENT; … … 121 121 122 122 task_t *task = task_find_by_id(taskid); 123 if ((!task) || (!cont ext_check(CONTEXT, task->context))) {123 if ((!task) || (!container_check(CONTAINER, task->container))) { 124 124 irq_spinlock_unlock(&tasks_lock, true); 125 125 return (sysarg_t) ENOENT; -
kernel/generic/src/synch/futex.c
r13ecdac9 r1affcdf3 119 119 */ 120 120 page_table_lock(AS, true); 121 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE) );121 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE), false); 122 122 if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { 123 123 page_table_unlock(AS, true); … … 155 155 */ 156 156 page_table_lock(AS, true); 157 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE) );157 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE), false); 158 158 if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { 159 159 page_table_unlock(AS, true); -
kernel/generic/src/synch/spinlock.c
r13ecdac9 r1affcdf3 96 96 * run in a simulator) that caused problems with both 97 97 * printf_lock and the framebuffer lock. 98 *99 98 */ 100 99 if (lock->name[0] == '*') -
kernel/generic/src/syscall/syscall.c
r13ecdac9 r1affcdf3 41 41 #include <proc/program.h> 42 42 #include <mm/as.h> 43 #include <mm/page.h> 43 44 #include <print.h> 44 45 #include <arch.h> … … 118 119 119 120 syshandler_t syscall_table[SYSCALL_END] = { 121 /* System management syscalls. */ 120 122 (syshandler_t) sys_klog, 121 123 (syshandler_t) sys_tls_set, … … 126 128 (syshandler_t) sys_thread_get_id, 127 129 (syshandler_t) sys_thread_usleep, 130 (syshandler_t) sys_thread_udelay, 128 131 129 132 (syshandler_t) sys_task_get_id, … … 145 148 (syshandler_t) sys_as_get_unmapped_area, 146 149 150 /* Page mapping related syscalls. */ 151 (syshandler_t) sys_page_find_mapping, 152 147 153 /* IPC related syscalls. */ 148 154 (syshandler_t) sys_ipc_call_sync_fast, … … 161 167 /* Event notification syscalls. */ 162 168 (syshandler_t) sys_event_subscribe, 169 (syshandler_t) sys_event_unmask, 163 170 164 171 /* Capabilities related syscalls. */ … … 173 180 (syshandler_t) sys_unregister_irq, 174 181 175 /* Sysinfo syscalls */182 /* Sysinfo syscalls. */ 176 183 (syshandler_t) sys_sysinfo_get_tag, 177 184 (syshandler_t) sys_sysinfo_get_value,
Note:
See TracChangeset
for help on using the changeset viewer.