Changeset 00aece0 in mainline for kernel/generic
- Timestamp:
- 2012-02-18T16:47:38Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 4449c6c
- Parents:
- bd5f3b7 (diff), f943dd3 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/generic
- Files:
-
- 4 added
- 41 edited
-
include/adt/list.h (modified) (4 diffs)
-
include/align.h (modified) (2 diffs)
-
include/config.h (modified) (2 diffs)
-
include/ddi/ddi.h (modified) (1 diff)
-
include/ddi/irq.h (modified) (1 diff)
-
include/ipc/ipc.h (modified) (1 diff)
-
include/ipc/irq.h (modified) (1 diff)
-
include/ipc/sysipc.h (modified) (1 diff)
-
include/lib/ra.h (added)
-
include/lib/rd.h (modified) (1 diff)
-
include/macros.h (modified) (1 diff)
-
include/mm/as.h (modified) (2 diffs)
-
include/mm/frame.h (modified) (5 diffs)
-
include/mm/km.h (added)
-
include/mm/page.h (modified) (2 diffs)
-
include/synch/smc.h (modified) (1 diff)
-
include/syscall/syscall.h (modified) (1 diff)
-
include/sysinfo/sysinfo.h (modified) (2 diffs)
-
include/typedefs.h (modified) (2 diffs)
-
src/adt/btree.c (modified) (7 diffs)
-
src/adt/list.c (modified) (2 diffs)
-
src/console/cmd.c (modified) (2 diffs)
-
src/cpu/cpu.c (modified) (1 diff)
-
src/ddi/ddi.c (modified) (11 diffs)
-
src/ipc/irq.c (modified) (9 diffs)
-
src/ipc/sysipc.c (modified) (3 diffs)
-
src/lib/elf.c (modified) (1 diff)
-
src/lib/ra.c (added)
-
src/lib/rd.c (modified) (2 diffs)
-
src/main/kinit.c (modified) (4 diffs)
-
src/main/main.c (modified) (7 diffs)
-
src/mm/as.c (modified) (12 diffs)
-
src/mm/backend_anon.c (modified) (7 diffs)
-
src/mm/backend_elf.c (modified) (12 diffs)
-
src/mm/frame.c (modified) (23 diffs)
-
src/mm/km.c (added)
-
src/mm/page.c (modified) (4 diffs)
-
src/mm/reserve.c (modified) (5 diffs)
-
src/mm/slab.c (modified) (1 diff)
-
src/printf/printf_core.c (modified) (5 diffs)
-
src/proc/program.c (modified) (1 diff)
-
src/proc/thread.c (modified) (1 diff)
-
src/syscall/copy.c (modified) (2 diffs)
-
src/syscall/syscall.c (modified) (2 diffs)
-
src/sysinfo/sysinfo.c (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/adt/list.h
rbd5f3b7 r00aece0 72 72 73 73 #define assert_link_not_used(link) \ 74 ASSERT(( link)->prev == NULL && (link)->next == NULL)74 ASSERT(((link)->prev == NULL) && ((link)->next == NULL)) 75 75 76 76 /** Initialize doubly-linked circular list link … … 158 158 NO_TRACE static inline void list_remove(link_t *link) 159 159 { 160 link->next->prev = link->prev; 161 link->prev->next = link->next; 160 if ((link->prev != NULL) && (link->next != NULL)) { 161 link->next->prev = link->prev; 162 link->prev->next = link->next; 163 } 164 162 165 link_initialize(link); 163 166 } … … 170 173 * 171 174 */ 172 NO_TRACE static inline int list_empty( list_t *list)175 NO_TRACE static inline int list_empty(const list_t *list) 173 176 { 174 177 return (list->head.next == &list->head); … … 183 186 * 184 187 */ 185 static inline link_t *list_first( list_t *list)188 static inline link_t *list_first(const list_t *list) 186 189 { 187 190 return ((list->head.next == &list->head) ? NULL : list->head.next); -
kernel/generic/include/align.h
rbd5f3b7 r00aece0 42 42 * 43 43 * @param s Address or size to be aligned. 44 * @param a Size of alignment, must be power of 2.44 * @param a Size of alignment, must be a power of 2. 45 45 */ 46 46 #define ALIGN_DOWN(s, a) ((s) & ~((a) - 1)) … … 50 50 * 51 51 * @param s Address or size to be aligned. 52 * @param a Size of alignment, must be power of 2.52 * @param a Size of alignment, must be a power of 2. 53 53 */ 54 54 #define ALIGN_UP(s, a) (((s) + ((a) - 1)) & ~((a) - 1)) 55 56 /** Check alignment. 57 * 58 * @param s Address or size to be checked for alignment. 59 * @param a Size of alignment, must be a power of 2. 60 */ 61 #define IS_ALIGNED(s, a) (ALIGN_UP((s), (a)) == (s)) 55 62 56 63 #endif -
kernel/generic/include/config.h
rbd5f3b7 r00aece0 53 53 54 54 typedef struct { 55 uintptr_t addr;55 uintptr_t paddr; 56 56 size_t size; 57 57 char name[CONFIG_TASK_NAME_BUFLEN]; … … 74 74 75 75 typedef struct { 76 unsigned int cpu_count; /**< Number of processors detected. */ 77 volatile size_t cpu_active; /**< Number of processors that are up and running. */ 76 /** Number of processors detected. */ 77 unsigned int cpu_count; 78 /** Number of processors that are up and running. */ 79 volatile size_t cpu_active; 78 80 79 81 uintptr_t base; 80 size_t kernel_size; /**< Size of memory in bytes taken by kernel and stack */ 82 /** Size of memory in bytes taken by kernel and stack. */ 83 size_t kernel_size; 81 84 82 uintptr_t stack_base; /**< Base adddress of initial stack */ 83 size_t stack_size; /**< Size of initial stack */ 85 /** Base adddress of initial stack. */ 86 uintptr_t stack_base; 87 /** Size of initial stack. */ 88 size_t stack_size; 89 90 bool identity_configured; 91 /** Base address of the kernel identity mapped memory. */ 92 uintptr_t identity_base; 93 /** Size of the kernel identity mapped memory. */ 94 size_t identity_size; 95 96 bool non_identity_configured; 97 98 /** End of physical memory. */ 99 uint64_t physmem_end; 84 100 } config_t; 85 101 -
kernel/generic/include/ddi/ddi.h
rbd5f3b7 r00aece0 55 55 extern void ddi_parea_register(parea_t *); 56 56 57 extern sysarg_t sys_physmem_map(sysarg_t, sysarg_t, sysarg_t, sysarg_t); 57 extern sysarg_t sys_physmem_map(uintptr_t, size_t, unsigned int, void *, 58 uintptr_t); 59 extern sysarg_t sys_physmem_unmap(uintptr_t); 60 61 extern sysarg_t sys_dmamem_map(size_t, unsigned int, unsigned int, void *, 62 void *, uintptr_t); 63 extern sysarg_t sys_dmamem_unmap(uintptr_t, size_t, unsigned int); 64 58 65 extern sysarg_t sys_iospace_enable(ddi_ioarg_t *); 66 extern sysarg_t sys_iospace_disable(ddi_ioarg_t *); 59 67 60 68 /* -
kernel/generic/include/ddi/irq.h
rbd5f3b7 r00aece0 134 134 /** Notification configuration structure. */ 135 135 ipc_notif_cfg_t notif_cfg; 136 137 as_t *driver_as;138 136 } irq_t; 139 137 -
kernel/generic/include/ipc/ipc.h
rbd5f3b7 r00aece0 36 36 #define KERN_IPC_H_ 37 37 38 #include <abi/ipc/ipc.h>39 38 #include <synch/spinlock.h> 40 39 #include <synch/mutex.h> 41 40 #include <synch/waitq.h> 41 #include <abi/ipc/ipc.h> 42 #include <abi/proc/task.h> 42 43 #include <typedefs.h> 43 44 44 #define IPC_MAX_PHONES 3245 #define IPC_MAX_PHONES 64 45 46 46 47 struct answerbox; -
kernel/generic/include/ipc/irq.h
rbd5f3b7 r00aece0 36 36 #define KERN_IPC_IRQ_H_ 37 37 38 /** Maximum length of IPC IRQ program */ 38 /** Maximum number of IPC IRQ programmed I/O ranges. */ 39 #define IRQ_MAX_RANGE_COUNT 8 40 41 /** Maximum length of IPC IRQ program. */ 39 42 #define IRQ_MAX_PROG_SIZE 20 40 43 -
kernel/generic/include/ipc/sysipc.h
rbd5f3b7 r00aece0 56 56 unsigned int); 57 57 extern sysarg_t sys_ipc_hangup(sysarg_t); 58 extern sysarg_t sys_ register_irq(inr_t, devno_t, sysarg_t, irq_code_t *);59 extern sysarg_t sys_ unregister_irq(inr_t, devno_t);58 extern sysarg_t sys_irq_register(inr_t, devno_t, sysarg_t, irq_code_t *); 59 extern sysarg_t sys_irq_unregister(inr_t, devno_t); 60 60 61 61 #ifdef __32_BITS__ -
kernel/generic/include/lib/rd.h
rbd5f3b7 r00aece0 38 38 #include <typedefs.h> 39 39 40 /** 41 * RAM disk version 42 */ 43 #define RD_VERSION 1 44 45 /** 46 * RAM disk magic number 47 */ 48 #define RD_MAGIC_SIZE 4 49 #define RD_MAG0 'H' 50 #define RD_MAG1 'O' 51 #define RD_MAG2 'R' 52 #define RD_MAG3 'D' 53 54 /** 55 * RAM disk data encoding types 56 */ 57 #define RD_DATA_NONE 0 58 #define RD_DATA_LSB 1 /* Least significant byte first (little endian) */ 59 #define RD_DATA_MSB 2 /* Most signigicant byte first (big endian) */ 60 61 /** 62 * RAM disk error return codes 63 */ 64 #define RE_OK 0 /* No error */ 65 #define RE_INVALID 1 /* Invalid RAM disk image */ 66 #define RE_UNSUPPORTED 2 /* Non-supported image (e.g. wrong version) */ 67 68 /** RAM disk header */ 69 struct rd_header { 70 uint8_t magic[RD_MAGIC_SIZE]; 71 uint8_t version; 72 uint8_t data_type; 73 uint32_t header_size; 74 uint64_t data_size; 75 } __attribute__ ((packed)); 76 77 typedef struct rd_header rd_header_t; 78 79 extern int init_rd(rd_header_t *addr, size_t size); 40 extern void init_rd(void *, size_t); 80 41 81 42 #endif -
kernel/generic/include/macros.h
rbd5f3b7 r00aece0 69 69 uint64_t sz2) 70 70 { 71 uint64_t e1 = s1 + sz1; 72 uint64_t e2 = s2 + sz2; 73 71 uint64_t e1; 72 uint64_t e2; 73 74 /* Handle the two corner cases when either sz1 or sz2 are zero. */ 75 if (sz1 == 0) 76 return (s1 == s2) && (sz2 == 0); 77 e1 = s1 + sz1 - 1; 78 if (sz2 == 0) 79 return (s1 <= s2) && (s2 <= e1); 80 e2 = s2 + sz2 - 1; 81 82 /* e1 and e2 are end addresses, the sum is imune to overflow */ 74 83 return ((s1 <= s2) && (e1 >= e2)); 75 84 } 76 85 77 86 #endif /* __ASM__ */ 87 88 #define ispwr2(x) (((x) & ((x) - 1)) == 0) 78 89 79 90 #define isdigit(d) (((d) >= '0') && ((d) <= '9')) -
kernel/generic/include/mm/as.h
rbd5f3b7 r00aece0 242 242 extern int as_page_fault(uintptr_t, pf_access_t, istate_t *); 243 243 244 extern as_area_t *as_area_create(as_t *, unsigned int, size_t, u intptr_t,245 unsigned int, mem_backend_t *, mem_backend_data_t *);244 extern as_area_t *as_area_create(as_t *, unsigned int, size_t, unsigned int, 245 mem_backend_t *, mem_backend_data_t *, uintptr_t *, uintptr_t); 246 246 extern int as_area_destroy(as_t *, uintptr_t); 247 247 extern int as_area_resize(as_t *, uintptr_t, size_t, unsigned int); 248 extern int as_area_share(as_t *, uintptr_t, size_t, as_t *, u intptr_t,249 u nsigned int);248 extern int as_area_share(as_t *, uintptr_t, size_t, as_t *, unsigned int, 249 uintptr_t *, uintptr_t); 250 250 extern int as_area_change_flags(as_t *, unsigned int, uintptr_t); 251 251 … … 284 284 285 285 /* Address space area related syscalls. */ 286 extern sysarg_t sys_as_area_create(uintptr_t, size_t, unsigned int );286 extern sysarg_t sys_as_area_create(uintptr_t, size_t, unsigned int, uintptr_t); 287 287 extern sysarg_t sys_as_area_resize(uintptr_t, size_t, unsigned int); 288 288 extern sysarg_t sys_as_area_change_flags(uintptr_t, unsigned int); 289 289 extern sysarg_t sys_as_area_destroy(uintptr_t); 290 extern sysarg_t sys_as_get_unmapped_area(uintptr_t, size_t);291 290 292 291 /* Introspection functions. */ -
kernel/generic/include/mm/frame.h
rbd5f3b7 r00aece0 50 50 typedef uint8_t frame_flags_t; 51 51 52 #define FRAME_NONE 0x0 52 53 /** Convert the frame address to kernel VA. */ 53 54 #define FRAME_KA 0x1 … … 58 59 /** Do not reserve / unreserve memory. */ 59 60 #define FRAME_NO_RESERVE 0x8 61 /** Allocate a frame which can be identity-mapped. */ 62 #define FRAME_LOWMEM 0x10 63 /** Allocate a frame which cannot be identity-mapped. */ 64 #define FRAME_HIGHMEM 0x20 60 65 61 66 typedef uint8_t zone_flags_t; 62 67 68 #define ZONE_NONE 0x0 63 69 /** Available zone (free for allocation) */ 64 #define ZONE_AVAILABLE 0x 070 #define ZONE_AVAILABLE 0x1 65 71 /** Zone is reserved (not available for allocation) */ 66 #define ZONE_RESERVED 0x 872 #define ZONE_RESERVED 0x2 67 73 /** Zone is used by firmware (not available for allocation) */ 68 #define ZONE_FIRMWARE 0x10 74 #define ZONE_FIRMWARE 0x4 75 /** Zone contains memory that can be identity-mapped */ 76 #define ZONE_LOWMEM 0x8 77 /** Zone contains memory that cannot be identity-mapped */ 78 #define ZONE_HIGHMEM 0x10 69 79 70 /** Currently there is no equivalent zone flags 71 for frame flags */ 72 #define FRAME_TO_ZONE_FLAGS(frame_flags) 0 80 /** Mask of zone bits that must be matched exactly. */ 81 #define ZONE_EF_MASK 0x7 82 83 #define FRAME_TO_ZONE_FLAGS(ff) \ 84 ((((ff) & FRAME_LOWMEM) ? ZONE_LOWMEM : \ 85 (((ff) & FRAME_HIGHMEM) ? ZONE_HIGHMEM : \ 86 ZONE_LOWMEM /* | ZONE_HIGHMEM */)) | \ 87 ZONE_AVAILABLE) 88 89 #define ZONE_FLAGS_MATCH(zf, f) \ 90 (((((zf) & ZONE_EF_MASK)) == ((f) & ZONE_EF_MASK)) && \ 91 (((zf) & ~ZONE_EF_MASK) & (f))) 73 92 74 93 typedef struct { 75 94 size_t refcount; /**< Tracking of shared frames */ 76 uint8_t buddy_order; /**< Buddy system block order */77 95 link_t buddy_link; /**< Link to the next free block inside 78 96 one order */ 79 97 void *parent; /**< If allocated by slab, this points there */ 98 uint8_t buddy_order; /**< Buddy system block order */ 80 99 } frame_t; 81 100 … … 129 148 } 130 149 131 NO_TRACE static inline bool zone_flags_available(zone_flags_t flags)132 {133 return ((flags & (ZONE_RESERVED | ZONE_FIRMWARE)) == 0);134 }135 136 150 #define IS_BUDDY_ORDER_OK(index, order) \ 137 151 ((~(((sysarg_t) -1) << (order)) & (index)) == 0) … … 146 160 147 161 extern void frame_init(void); 162 extern bool frame_adjust_zone_bounds(bool, uintptr_t *, size_t *); 148 163 extern void *frame_alloc_generic(uint8_t, frame_flags_t, size_t *); 149 164 extern void *frame_alloc(uint8_t, frame_flags_t); … … 161 176 extern void frame_mark_unavailable(pfn_t, size_t); 162 177 extern size_t zone_conf_size(size_t); 178 extern pfn_t zone_external_conf_alloc(size_t); 163 179 extern bool zone_merge(size_t, size_t); 164 180 extern void zone_merge_all(void); -
kernel/generic/include/mm/page.h
rbd5f3b7 r00aece0 49 49 void (* mapping_remove)(as_t *, uintptr_t); 50 50 pte_t *(* mapping_find)(as_t *, uintptr_t, bool); 51 void (* mapping_make_global)(uintptr_t, size_t); 51 52 } page_mapping_operations_t; 52 53 … … 60 61 extern void page_mapping_remove(as_t *, uintptr_t); 61 62 extern pte_t *page_mapping_find(as_t *, uintptr_t, bool); 63 extern void page_mapping_make_global(uintptr_t, size_t); 62 64 extern pte_t *page_table_create(unsigned int); 63 65 extern void page_table_destroy(pte_t *); 64 extern void map_structure(uintptr_t, size_t);65 66 66 extern uintptr_t hw_map(uintptr_t, size_t); 67 68 extern sysarg_t sys_page_find_mapping(uintptr_t, uintptr_t *); 67 extern int page_find_mapping(uintptr_t, void **); 68 extern sysarg_t sys_page_find_mapping(uintptr_t, void *); 69 69 70 70 #endif -
kernel/generic/include/synch/smc.h
rbd5f3b7 r00aece0 36 36 #define KERN_SMC_H_ 37 37 38 extern sysarg_t sys_smc_coherence(uintptr_t va, size_t size);38 extern sysarg_t sys_smc_coherence(uintptr_t, size_t); 39 39 40 40 #endif -
kernel/generic/include/syscall/syscall.h
rbd5f3b7 r00aece0 45 45 extern sysarg_t syscall_handler(sysarg_t, sysarg_t, sysarg_t, sysarg_t, 46 46 sysarg_t, sysarg_t, sysarg_t); 47 extern sysarg_t sys_tls_set( sysarg_t);47 extern sysarg_t sys_tls_set(uintptr_t); 48 48 49 49 #endif -
kernel/generic/include/sysinfo/sysinfo.h
rbd5f3b7 r00aece0 38 38 #include <typedefs.h> 39 39 #include <str.h> 40 #include <abi/sysinfo.h> 40 41 41 42 /** Framebuffer info exported flags */ 42 43 extern bool fb_exported; 43 44 /** Item value type45 *46 */47 typedef enum {48 SYSINFO_VAL_UNDEFINED = 0, /**< Undefined value */49 SYSINFO_VAL_VAL = 1, /**< Constant numeric value */50 SYSINFO_VAL_DATA = 2, /**< Constant binary data */51 SYSINFO_VAL_FUNCTION_VAL = 3, /**< Generated numeric value */52 SYSINFO_VAL_FUNCTION_DATA = 4 /**< Generated binary data */53 } sysinfo_item_val_type_t;54 44 55 45 /** Subtree type … … 145 135 extern void sysinfo_dump(sysinfo_item_t *); 146 136 147 extern sysarg_t sys_sysinfo_get_ tag(void *, size_t);137 extern sysarg_t sys_sysinfo_get_val_type(void *, size_t); 148 138 extern sysarg_t sys_sysinfo_get_value(void *, size_t, void *); 149 139 extern sysarg_t sys_sysinfo_get_data_size(void *, size_t, void *); -
kernel/generic/include/typedefs.h
rbd5f3b7 r00aece0 39 39 #include <arch/common.h> 40 40 #include <arch/types.h> 41 #include <abi/bool.h> 41 42 42 43 #define NULL ((void *) 0) … … 61 62 typedef void (* function)(); 62 63 63 typedef uint8_t bool;64 typedef uint64_t thread_id_t;65 typedef uint64_t task_id_t;66 64 typedef uint32_t container_id_t; 67 65 -
kernel/generic/src/adt/btree.c
rbd5f3b7 r00aece0 38 38 * 39 39 * The B+tree has the following properties: 40 * @li it is a bal lanced 3-4-5 tree (i.e. BTREE_M = 5)40 * @li it is a balanced 3-4-5 tree (i.e. BTREE_M = 5) 41 41 * @li values (i.e. pointers to values) are stored only in leaves 42 42 * @li leaves are linked in a list 43 43 * 44 * Be careful lwhen using these trees. They need to allocate44 * Be careful when using these trees. They need to allocate 45 45 * and deallocate memory for their index nodes and as such 46 46 * can sleep. … … 146 146 * also makes use of this feature. 147 147 * 148 * @param node B-tree node into w ich the new key is to be inserted.148 * @param node B-tree node into which the new key is to be inserted. 149 149 * @param key The key to be inserted. 150 150 * @param value Pointer to value to be inserted. … … 270 270 * This feature is used during insert by right rotation. 271 271 * 272 * @param node B-tree node into w ich the new key is to be inserted.272 * @param node B-tree node into which the new key is to be inserted. 273 273 * @param key The key to be inserted. 274 274 * @param value Pointer to value to be inserted. … … 463 463 if (rnode->keys < BTREE_MAX_KEYS) { 464 464 /* 465 * The rota ion can be done. The right sibling has free space.465 * The rotation can be done. The right sibling has free space. 466 466 */ 467 467 node_insert_key_and_rsubtree(node, inskey, insvalue, rsubtree); … … 484 484 * the median will be copied there. 485 485 * 486 * @param node B-tree node w ich is going to be split.486 * @param node B-tree node which is going to be split. 487 487 * @param key The key to be inserted. 488 488 * @param value Pointer to the value to be inserted. … … 562 562 if (node->keys < BTREE_MAX_KEYS) { 563 563 /* 564 * Node con atins enough space, the key can be stored immediately.564 * Node contains enough space, the key can be stored immediately. 565 565 */ 566 566 node_insert_key_and_rsubtree(node, key, value, rsubtree); … … 806 806 807 807 /* 808 * The key can be immediatel ly removed.808 * The key can be immediately removed. 809 809 * 810 810 * Note that the right subtree is removed because when -
kernel/generic/src/adt/list.c
rbd5f3b7 r00aece0 33 33 /** 34 34 * @file 35 * @brief Functions completing doubly linked circular list implementa ion.35 * @brief Functions completing doubly linked circular list implementation. 36 36 * 37 37 * This file contains some of the functions implementing doubly linked circular lists. … … 49 49 * @param list List to look in. 50 50 * 51 * @return true if link is contained in head, false otherwise.51 * @return true if link is contained in list, false otherwise. 52 52 * 53 53 */ -
kernel/generic/src/console/cmd.c
rbd5f3b7 r00aece0 1177 1177 /* Execute the test */ 1178 1178 test_quiet = true; 1179 const char * ret = test->entry();1179 const char *test_ret = test->entry(); 1180 1180 1181 1181 /* Update and read thread accounting */ … … 1185 1185 irq_spinlock_unlock(&TASK->lock, true); 1186 1186 1187 if ( ret != NULL) {1188 printf("%s\n", ret);1187 if (test_ret != NULL) { 1188 printf("%s\n", test_ret); 1189 1189 ret = false; 1190 1190 break; -
kernel/generic/src/cpu/cpu.c
rbd5f3b7 r00aece0 74 74 for (i = 0; i < config.cpu_count; i++) { 75 75 cpus[i].stack = (uint8_t *) frame_alloc(STACK_FRAMES, 76 FRAME_ KA | FRAME_ATOMIC);76 FRAME_LOWMEM | FRAME_KA | FRAME_ATOMIC); 77 77 cpus[i].id = i; 78 78 -
kernel/generic/src/ddi/ddi.c
rbd5f3b7 r00aece0 45 45 #include <mm/frame.h> 46 46 #include <mm/as.h> 47 #include <mm/page.h> 47 48 #include <synch/mutex.h> 48 49 #include <syscall/copy.h> … … 52 53 #include <errno.h> 53 54 #include <trace.h> 55 #include <bitops.h> 54 56 55 57 /** This lock protects the parea_btree. */ … … 87 89 /** Map piece of physical memory into virtual address space of current task. 88 90 * 89 * @param pf Physical address of the starting frame. 90 * @param vp Virtual address of the starting page. 91 * @param phys Physical address of the starting frame. 91 92 * @param pages Number of pages to map. 92 93 * @param flags Address space area flags for the mapping. 93 * 94 * @return 0 on success, EPERM if the caller lacks capabilities to use this 95 * syscall, EBADMEM if pf or vf is not page aligned, ENOENT if there 96 * is no task matching the specified ID or the physical address space 97 * is not enabled for mapping and ENOMEM if there was a problem in 98 * creating address space area. 99 * 100 */ 101 NO_TRACE static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, size_t pages, 102 unsigned int flags) 94 * @param virt Virtual address of the starting page. 95 * @param bound Lowest virtual address bound. 96 * 97 * @return EOK on success. 98 * @return EPERM if the caller lacks capabilities to use this syscall. 99 * @return EBADMEM if phys is not page aligned. 100 * @return ENOENT if there is no task matching the specified ID or 101 * the physical address space is not enabled for mapping. 102 * @return ENOMEM if there was a problem in creating address space area. 103 * 104 */ 105 NO_TRACE static int physmem_map(uintptr_t phys, size_t pages, 106 unsigned int flags, uintptr_t *virt, uintptr_t bound) 103 107 { 104 108 ASSERT(TASK); 105 109 106 if ((pf % FRAME_SIZE) != 0) 107 return EBADMEM; 108 109 if ((vp % PAGE_SIZE) != 0) 110 if ((phys % FRAME_SIZE) != 0) 110 111 return EBADMEM; 111 112 … … 118 119 119 120 mem_backend_data_t backend_data; 120 backend_data.base = p f;121 backend_data.base = phys; 121 122 backend_data.frames = pages; 122 123 … … 129 130 btree_node_t *nodep; 130 131 parea_t *parea = (parea_t *) btree_search(&parea_btree, 131 (btree_key_t) p f, &nodep);132 (btree_key_t) phys, &nodep); 132 133 133 134 if ((parea != NULL) && (parea->frames >= pages)) { … … 149 150 150 151 irq_spinlock_lock(&zones.lock, true); 151 size_t znum = find_zone(ADDR2PFN(p f), pages, 0);152 size_t znum = find_zone(ADDR2PFN(phys), pages, 0); 152 153 153 154 if (znum == (size_t) -1) { … … 182 183 183 184 map: 184 if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp,185 AS_AREA_ATTR_NONE, &phys_backend, &backend_data )) {185 if (!as_area_create(TASK->as, flags, FRAMES2SIZE(pages), 186 AS_AREA_ATTR_NONE, &phys_backend, &backend_data, virt, bound)) { 186 187 /* 187 188 * The address space area was not created. … … 207 208 } 208 209 210 NO_TRACE static int physmem_unmap(uintptr_t virt) 211 { 212 // TODO: implement unmap 213 return EOK; 214 } 215 216 /** Wrapper for SYS_PHYSMEM_MAP syscall. 217 * 218 * @param phys Physical base address to map 219 * @param pages Number of pages 220 * @param flags Flags of newly mapped pages 221 * @param virt_ptr Destination virtual address 222 * @param bound Lowest virtual address bound. 223 * 224 * @return 0 on success, otherwise it returns error code found in errno.h 225 * 226 */ 227 sysarg_t sys_physmem_map(uintptr_t phys, size_t pages, unsigned int flags, 228 void *virt_ptr, uintptr_t bound) 229 { 230 uintptr_t virt = (uintptr_t) -1; 231 int rc = physmem_map(ALIGN_DOWN(phys, FRAME_SIZE), pages, flags, 232 &virt, bound); 233 if (rc != EOK) 234 return rc; 235 236 rc = copy_to_uspace(virt_ptr, &virt, sizeof(virt)); 237 if (rc != EOK) { 238 physmem_unmap((uintptr_t) virt); 239 return rc; 240 } 241 242 return EOK; 243 } 244 245 sysarg_t sys_physmem_unmap(uintptr_t virt) 246 { 247 return physmem_unmap(virt); 248 } 249 209 250 /** Enable range of I/O space for task. 210 251 * … … 217 258 * 218 259 */ 219 NO_TRACE static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, 220 size_t size) 260 NO_TRACE static int iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size) 221 261 { 222 262 /* … … 243 283 /* Lock the task and release the lock protecting tasks_btree. */ 244 284 irq_spinlock_exchange(&tasks_lock, &task->lock); 245 246 285 int rc = ddi_iospace_enable_arch(task, ioaddr, size); 247 248 286 irq_spinlock_unlock(&task->lock, true); 249 287 250 288 return rc; 251 }252 253 /** Wrapper for SYS_PHYSMEM_MAP syscall.254 *255 * @param phys_base Physical base address to map256 * @param virt_base Destination virtual address257 * @param pages Number of pages258 * @param flags Flags of newly mapped pages259 *260 * @return 0 on success, otherwise it returns error code found in errno.h261 *262 */263 sysarg_t sys_physmem_map(sysarg_t phys_base, sysarg_t virt_base,264 sysarg_t pages, sysarg_t flags)265 {266 return (sysarg_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base,267 FRAME_SIZE), ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE),268 (size_t) pages, (int) flags);269 289 } 270 290 … … 283 303 return (sysarg_t) rc; 284 304 285 return (sysarg_t) ddi_iospace_enable((task_id_t) arg.task_id,305 return (sysarg_t) iospace_enable((task_id_t) arg.task_id, 286 306 (uintptr_t) arg.ioaddr, (size_t) arg.size); 287 307 } 288 308 309 sysarg_t sys_iospace_disable(ddi_ioarg_t *uspace_io_arg) 310 { 311 // TODO: implement 312 return ENOTSUP; 313 } 314 315 NO_TRACE static int dmamem_map(uintptr_t virt, size_t size, unsigned int map_flags, 316 unsigned int flags, void **phys) 317 { 318 ASSERT(TASK); 319 320 // TODO: implement locking of non-anonymous mapping 321 return page_find_mapping(virt, phys); 322 } 323 324 NO_TRACE static int dmamem_map_anonymous(size_t size, unsigned int map_flags, 325 unsigned int flags, void **phys, uintptr_t *virt, uintptr_t bound) 326 { 327 ASSERT(TASK); 328 329 size_t pages = SIZE2FRAMES(size); 330 uint8_t order; 331 332 /* We need the 2^order >= pages */ 333 if (pages == 1) 334 order = 0; 335 else 336 order = fnzb(pages - 1) + 1; 337 338 *phys = frame_alloc_noreserve(order, 0); 339 if (*phys == NULL) 340 return ENOMEM; 341 342 mem_backend_data_t backend_data; 343 backend_data.base = (uintptr_t) *phys; 344 backend_data.frames = pages; 345 346 if (!as_area_create(TASK->as, map_flags, size, 347 AS_AREA_ATTR_NONE, &phys_backend, &backend_data, virt, bound)) { 348 frame_free_noreserve((uintptr_t) *phys); 349 return ENOMEM; 350 } 351 352 return EOK; 353 } 354 355 NO_TRACE static int dmamem_unmap(uintptr_t virt, size_t size) 356 { 357 // TODO: implement unlocking & unmap 358 return EOK; 359 } 360 361 NO_TRACE static int dmamem_unmap_anonymous(uintptr_t virt) 362 { 363 // TODO: implement unlocking & unmap 364 return EOK; 365 } 366 367 sysarg_t sys_dmamem_map(size_t size, unsigned int map_flags, unsigned int flags, 368 void *phys_ptr, void *virt_ptr, uintptr_t bound) 369 { 370 if ((flags & DMAMEM_FLAGS_ANONYMOUS) == 0) { 371 /* 372 * Non-anonymous DMA mapping 373 */ 374 375 void *phys; 376 int rc = dmamem_map((uintptr_t) virt_ptr, size, map_flags, 377 flags, &phys); 378 379 if (rc != EOK) 380 return rc; 381 382 rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys)); 383 if (rc != EOK) { 384 dmamem_unmap((uintptr_t) virt_ptr, size); 385 return rc; 386 } 387 } else { 388 /* 389 * Anonymous DMA mapping 390 */ 391 392 void *phys; 393 uintptr_t virt = (uintptr_t) -1; 394 int rc = dmamem_map_anonymous(size, map_flags, flags, 395 &phys, &virt, bound); 396 if (rc != EOK) 397 return rc; 398 399 rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys)); 400 if (rc != EOK) { 401 dmamem_unmap_anonymous((uintptr_t) virt); 402 return rc; 403 } 404 405 rc = copy_to_uspace(virt_ptr, &virt, sizeof(virt)); 406 if (rc != EOK) { 407 dmamem_unmap_anonymous((uintptr_t) virt); 408 return rc; 409 } 410 } 411 412 return EOK; 413 } 414 415 sysarg_t sys_dmamem_unmap(uintptr_t virt, size_t size, unsigned int flags) 416 { 417 if ((flags & DMAMEM_FLAGS_ANONYMOUS) == 0) 418 return dmamem_unmap(virt, size); 419 else 420 return dmamem_unmap_anonymous(virt); 421 } 422 289 423 /** @} 290 424 */ -
kernel/generic/src/ipc/irq.c
rbd5f3b7 r00aece0 42 42 * 43 43 * The structure of a notification message is as follows: 44 * - IMETHOD: interface and method as registered by the SYS_REGISTER_IRQ45 * syscall44 * - IMETHOD: interface and method as registered by 45 * the SYS_IRQ_REGISTER syscall 46 46 * - ARG1: payload modified by a 'top-half' handler 47 47 * - ARG2: payload modified by a 'top-half' handler … … 74 74 #include <arch.h> 75 75 #include <mm/slab.h> 76 #include <mm/page.h> 77 #include <mm/km.h> 76 78 #include <errno.h> 77 79 #include <ddi/irq.h> … … 81 83 #include <console/console.h> 82 84 #include <print.h> 85 #include <macros.h> 86 87 static void ranges_unmap(irq_pio_range_t *ranges, size_t rangecount) 88 { 89 size_t i; 90 91 for (i = 0; i < rangecount; i++) { 92 #ifdef IO_SPACE_BOUNDARY 93 if ((void *) ranges[i].base >= IO_SPACE_BOUNDARY) 94 #endif 95 km_unmap(ranges[i].base, ranges[i].size); 96 } 97 } 98 99 static int ranges_map_and_apply(irq_pio_range_t *ranges, size_t rangecount, 100 irq_cmd_t *cmds, size_t cmdcount) 101 { 102 uintptr_t *pbase; 103 size_t i, j; 104 105 /* Copy the physical base addresses aside. */ 106 pbase = malloc(rangecount * sizeof(uintptr_t), 0); 107 for (i = 0; i < rangecount; i++) 108 pbase[i] = ranges[i].base; 109 110 /* Map the PIO ranges into the kernel virtual address space. */ 111 for (i = 0; i < rangecount; i++) { 112 #ifdef IO_SPACE_BOUNDARY 113 if ((void *) ranges[i].base < IO_SPACE_BOUNDARY) 114 continue; 115 #endif 116 ranges[i].base = km_map(pbase[i], ranges[i].size, 117 PAGE_READ | PAGE_WRITE | PAGE_KERNEL | PAGE_NOT_CACHEABLE); 118 if (!ranges[i].base) { 119 ranges_unmap(ranges, i); 120 free(pbase); 121 return ENOMEM; 122 } 123 } 124 125 /* Rewrite the pseudocode addresses from physical to kernel virtual. */ 126 for (i = 0; i < cmdcount; i++) { 127 uintptr_t addr; 128 size_t size; 129 130 /* Process only commands that use an address. */ 131 switch (cmds[i].cmd) { 132 case CMD_PIO_READ_8: 133 case CMD_PIO_WRITE_8: 134 case CMD_PIO_WRITE_A_8: 135 size = 1; 136 break; 137 case CMD_PIO_READ_16: 138 case CMD_PIO_WRITE_16: 139 case CMD_PIO_WRITE_A_16: 140 size = 2; 141 break; 142 case CMD_PIO_READ_32: 143 case CMD_PIO_WRITE_32: 144 case CMD_PIO_WRITE_A_32: 145 size = 4; 146 break; 147 default: 148 /* Move onto the next command. */ 149 continue; 150 } 151 152 addr = (uintptr_t) cmds[i].addr; 153 154 for (j = 0; j < rangecount; j++) { 155 156 /* Find the matching range. */ 157 if (!iswithin(pbase[j], ranges[j].size, addr, size)) 158 continue; 159 160 /* Switch the command to a kernel virtual address. */ 161 addr -= pbase[j]; 162 addr += ranges[j].base; 163 164 cmds[i].addr = (void *) addr; 165 break; 166 } 167 168 if (j == rangecount) { 169 /* 170 * The address used in this command is outside of all 171 * defined ranges. 172 */ 173 ranges_unmap(ranges, rangecount); 174 free(pbase); 175 return EINVAL; 176 } 177 } 178 179 free(pbase); 180 return EOK; 181 } 83 182 84 183 /** Free the top-half pseudocode. … … 90 189 { 91 190 if (code) { 191 ranges_unmap(code->ranges, code->rangecount); 192 free(code->ranges); 92 193 free(code->cmds); 93 194 free(code); … … 104 205 static irq_code_t *code_from_uspace(irq_code_t *ucode) 105 206 { 207 irq_pio_range_t *ranges = NULL; 208 irq_cmd_t *cmds = NULL; 209 106 210 irq_code_t *code = malloc(sizeof(*code), 0); 107 211 int rc = copy_from_uspace(code, ucode, sizeof(*code)); 108 if (rc != 0) { 109 free(code); 110 return NULL; 111 } 112 113 if (code->cmdcount > IRQ_MAX_PROG_SIZE) { 114 free(code); 115 return NULL; 116 } 117 118 irq_cmd_t *ucmds = code->cmds; 119 code->cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0); 120 rc = copy_from_uspace(code->cmds, ucmds, 212 if (rc != EOK) 213 goto error; 214 215 if ((code->rangecount > IRQ_MAX_RANGE_COUNT) || 216 (code->cmdcount > IRQ_MAX_PROG_SIZE)) 217 goto error; 218 219 ranges = malloc(sizeof(code->ranges[0]) * code->rangecount, 0); 220 rc = copy_from_uspace(ranges, code->ranges, 221 sizeof(code->ranges[0]) * code->rangecount); 222 if (rc != EOK) 223 goto error; 224 225 cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0); 226 rc = copy_from_uspace(cmds, code->cmds, 121 227 sizeof(code->cmds[0]) * code->cmdcount); 122 if (rc != 0) { 123 free(code->cmds); 124 free(code); 125 return NULL; 126 } 127 228 if (rc != EOK) 229 goto error; 230 231 rc = ranges_map_and_apply(ranges, code->rangecount, cmds, 232 code->cmdcount); 233 if (rc != EOK) 234 goto error; 235 236 code->ranges = ranges; 237 code->cmds = cmds; 238 128 239 return code; 240 241 error: 242 if (cmds) 243 free(cmds); 244 if (ranges) 245 free(ranges); 246 free(code); 247 return NULL; 129 248 } 130 249 … … 174 293 irq->notif_cfg.code = code; 175 294 irq->notif_cfg.counter = 0; 176 irq->driver_as = AS;177 295 178 296 /* … … 365 483 return IRQ_DECLINE; 366 484 367 #define CMD_MEM_READ(target) \ 368 do { \ 369 void *va = code->cmds[i].addr; \ 370 if (AS != irq->driver_as) \ 371 as_switch(AS, irq->driver_as); \ 372 memcpy_from_uspace(&target, va, (sizeof(target))); \ 373 if (dstarg) \ 374 scratch[dstarg] = target; \ 375 } while(0) 376 377 #define CMD_MEM_WRITE(val) \ 378 do { \ 379 void *va = code->cmds[i].addr; \ 380 if (AS != irq->driver_as) \ 381 as_switch(AS, irq->driver_as); \ 382 memcpy_to_uspace(va, &val, sizeof(val)); \ 383 } while (0) 384 385 as_t *current_as = AS; 386 size_t i; 387 for (i = 0; i < code->cmdcount; i++) { 485 for (size_t i = 0; i < code->cmdcount; i++) { 388 486 uint32_t dstval; 487 389 488 uintptr_t srcarg = code->cmds[i].srcarg; 390 489 uintptr_t dstarg = code->cmds[i].dstarg; … … 442 541 } 443 542 break; 444 case CMD_MEM_READ_8: {445 uint8_t val;446 CMD_MEM_READ(val);447 break;448 }449 case CMD_MEM_READ_16: {450 uint16_t val;451 CMD_MEM_READ(val);452 break;453 }454 case CMD_MEM_READ_32: {455 uint32_t val;456 CMD_MEM_READ(val);457 break;458 }459 case CMD_MEM_WRITE_8: {460 uint8_t val = code->cmds[i].value;461 CMD_MEM_WRITE(val);462 break;463 }464 case CMD_MEM_WRITE_16: {465 uint16_t val = code->cmds[i].value;466 CMD_MEM_WRITE(val);467 break;468 }469 case CMD_MEM_WRITE_32: {470 uint32_t val = code->cmds[i].value;471 CMD_MEM_WRITE(val);472 break;473 }474 case CMD_MEM_WRITE_A_8:475 if (srcarg) {476 uint8_t val = scratch[srcarg];477 CMD_MEM_WRITE(val);478 }479 break;480 case CMD_MEM_WRITE_A_16:481 if (srcarg) {482 uint16_t val = scratch[srcarg];483 CMD_MEM_WRITE(val);484 }485 break;486 case CMD_MEM_WRITE_A_32:487 if (srcarg) {488 uint32_t val = scratch[srcarg];489 CMD_MEM_WRITE(val);490 }491 break;492 543 case CMD_BTEST: 493 544 if ((srcarg) && (dstarg)) { … … 503 554 break; 504 555 case CMD_ACCEPT: 505 if (AS != current_as)506 as_switch(AS, current_as);507 556 return IRQ_ACCEPT; 508 557 case CMD_DECLINE: 509 558 default: 510 if (AS != current_as)511 as_switch(AS, current_as);512 559 return IRQ_DECLINE; 513 560 } 514 561 } 515 if (AS != current_as)516 as_switch(AS, current_as);517 562 518 563 return IRQ_DECLINE; -
kernel/generic/src/ipc/sysipc.c
rbd5f3b7 r00aece0 271 271 irq_spinlock_unlock(&answer->sender->lock, true); 272 272 273 uintptr_t dst_base = (uintptr_t) -1; 273 274 int rc = as_area_share(as, IPC_GET_ARG1(*olddata), 274 IPC_GET_ARG2(*olddata), AS, 275 IPC_GET_ARG1(answer->data), IPC_GET_ARG3(*olddata)); 275 IPC_GET_ARG2(*olddata), AS, IPC_GET_ARG3(*olddata), 276 &dst_base, IPC_GET_ARG1(answer->data)); 277 278 if (rc == EOK) 279 rc = copy_to_uspace((void *) IPC_GET_ARG2(answer->data), 280 &dst_base, sizeof(dst_base)); 281 276 282 IPC_SET_RETVAL(answer->data, rc); 277 283 return rc; 278 284 } 279 285 } else if (IPC_GET_IMETHOD(*olddata) == IPC_M_SHARE_IN) { 280 if (!IPC_GET_RETVAL(answer->data)) { 286 if (!IPC_GET_RETVAL(answer->data)) { 281 287 irq_spinlock_lock(&answer->sender->lock, true); 282 288 as_t *as = answer->sender->as; 283 289 irq_spinlock_unlock(&answer->sender->lock, true); 284 290 291 uintptr_t dst_base = (uintptr_t) -1; 285 292 int rc = as_area_share(AS, IPC_GET_ARG1(answer->data), 286 IPC_GET_ARG2(*olddata), as, IPC_GET_ARG1(*olddata), 287 IPC_GET_ARG2(answer->data)); 293 IPC_GET_ARG1(*olddata), as, IPC_GET_ARG2(answer->data), 294 &dst_base, IPC_GET_ARG3(answer->data)); 295 IPC_SET_ARG4(answer->data, dst_base); 288 296 IPC_SET_RETVAL(answer->data, rc); 289 297 } … … 1185 1193 * 1186 1194 */ 1187 sysarg_t sys_ register_irq(inr_t inr, devno_t devno, sysarg_t imethod,1195 sysarg_t sys_irq_register(inr_t inr, devno_t devno, sysarg_t imethod, 1188 1196 irq_code_t *ucode) 1189 1197 { … … 1202 1210 * 1203 1211 */ 1204 sysarg_t sys_ unregister_irq(inr_t inr, devno_t devno)1212 sysarg_t sys_irq_unregister(inr_t inr, devno_t devno) 1205 1213 { 1206 1214 if (!(cap_get(TASK) & CAP_IRQ_REG)) -
kernel/generic/src/lib/elf.c
rbd5f3b7 r00aece0 226 226 size_t mem_sz = entry->p_memsz + (entry->p_vaddr - base); 227 227 228 as_area_t *area = as_area_create(as, flags, mem_sz, base,229 AS_AREA_ATTR_NONE, &elf_backend, &backend_data );228 as_area_t *area = as_area_create(as, flags, mem_sz, 229 AS_AREA_ATTR_NONE, &elf_backend, &backend_data, &base, 0); 230 230 if (!area) 231 231 return EE_MEMORY; -
kernel/generic/src/lib/rd.c
rbd5f3b7 r00aece0 33 33 /** 34 34 * @file 35 * @brief RAM disk support.35 * @brief RAM disk support. 36 36 * 37 37 * Support for RAM disk images. … … 39 39 40 40 #include <lib/rd.h> 41 #include <byteorder.h>42 41 #include <mm/frame.h> 43 42 #include <sysinfo/sysinfo.h> 44 43 #include <ddi/ddi.h> 45 #include <align.h>46 44 47 static parea_t rd_parea; /**< Physical memory area for rd. */ 45 /** Physical memory area for RAM disk. */ 46 static parea_t rd_parea; 48 47 49 /** 50 * RAM disk initialization routine. At this point, the RAM disk memory is shared 51 * and information about the share is provided as sysinfo values to the 52 * userspace tasks. 53 */ 54 int init_rd(rd_header_t *header, size_t size) 48 /** RAM disk initialization routine 49 * 50 * The information about the RAM disk is provided as sysinfo 51 * values to the uspace tasks. 52 * 53 */ 54 void init_rd(void *data, size_t size) 55 55 { 56 /* Identify RAM disk */ 57 if ((header->magic[0] != RD_MAG0) || (header->magic[1] != RD_MAG1) || 58 (header->magic[2] != RD_MAG2) || (header->magic[3] != RD_MAG3)) 59 return RE_INVALID; 56 uintptr_t base = (uintptr_t) data; 57 ASSERT((base % FRAME_SIZE) == 0); 60 58 61 /* Identify version */ 62 if (header->version != RD_VERSION) 63 return RE_UNSUPPORTED; 64 65 uint32_t hsize; 66 uint64_t dsize; 67 switch (header->data_type) { 68 case RD_DATA_LSB: 69 hsize = uint32_t_le2host(header->header_size); 70 dsize = uint64_t_le2host(header->data_size); 71 break; 72 case RD_DATA_MSB: 73 hsize = uint32_t_be2host(header->header_size); 74 dsize = uint64_t_be2host(header->data_size); 75 break; 76 default: 77 return RE_UNSUPPORTED; 78 } 79 80 if ((hsize % FRAME_SIZE) || (dsize % FRAME_SIZE)) 81 return RE_UNSUPPORTED; 82 83 if (hsize > size) 84 return RE_INVALID; 85 86 if ((uint64_t) hsize + dsize > size) 87 dsize = size - hsize; 88 89 rd_parea.pbase = ALIGN_DOWN((uintptr_t) KA2PA((void *) header + hsize), 90 FRAME_SIZE); 91 rd_parea.frames = SIZE2FRAMES(dsize); 59 rd_parea.pbase = base; 60 rd_parea.frames = SIZE2FRAMES(size); 92 61 rd_parea.unpriv = false; 93 62 rd_parea.mapped = false; 94 63 ddi_parea_register(&rd_parea); 95 64 96 65 sysinfo_set_item_val("rd", NULL, true); 97 sysinfo_set_item_val("rd.header_size", NULL, hsize); 98 sysinfo_set_item_val("rd.size", NULL, dsize); 99 sysinfo_set_item_val("rd.address.physical", NULL, 100 (sysarg_t) KA2PA((void *) header + hsize)); 101 102 return RE_OK; 66 sysinfo_set_item_val("rd.size", NULL, size); 67 sysinfo_set_item_val("rd.address.physical", NULL, (sysarg_t) base); 103 68 } 104 69 -
kernel/generic/src/main/kinit.c
rbd5f3b7 r00aece0 57 57 #include <mm/as.h> 58 58 #include <mm/frame.h> 59 #include <mm/km.h> 59 60 #include <print.h> 60 61 #include <memstr.h> … … 68 69 #include <str.h> 69 70 #include <sysinfo/stats.h> 71 #include <align.h> 70 72 71 73 #ifdef CONFIG_SMP … … 178 180 179 181 for (i = 0; i < init.cnt; i++) { 180 if (init.tasks[i]. addr % FRAME_SIZE) {181 printf("init[%zu] .addris not frame aligned\n", i);182 if (init.tasks[i].paddr % FRAME_SIZE) { 183 printf("init[%zu]: Address is not frame aligned\n", i); 182 184 programs[i].task = NULL; 183 185 continue; … … 199 201 str_cpy(namebuf + INIT_PREFIX_LEN, 200 202 TASK_NAME_BUFLEN - INIT_PREFIX_LEN, name); 201 202 int rc = program_create_from_image((void *) init.tasks[i].addr, 203 namebuf, &programs[i]); 204 205 if ((rc == 0) && (programs[i].task != NULL)) { 203 204 /* 205 * Create virtual memory mappings for init task images. 206 */ 207 uintptr_t page = km_map(init.tasks[i].paddr, 208 init.tasks[i].size, 209 PAGE_READ | PAGE_WRITE | PAGE_CACHEABLE); 210 ASSERT(page); 211 212 int rc = program_create_from_image((void *) page, namebuf, 213 &programs[i]); 214 215 if (rc == 0) { 216 if (programs[i].task != NULL) { 217 /* 218 * Set capabilities to init userspace tasks. 219 */ 220 cap_set(programs[i].task, CAP_CAP | CAP_MEM_MANAGER | 221 CAP_IO_MANAGER | CAP_IRQ_REG); 222 223 if (!ipc_phone_0) 224 ipc_phone_0 = &programs[i].task->answerbox; 225 } 226 206 227 /* 207 * Set capabilities to init userspace tasks. 228 * If programs[i].task == NULL then it is 229 * the program loader and it was registered 230 * successfully. 208 231 */ 209 cap_set(programs[i].task, CAP_CAP | CAP_MEM_MANAGER | 210 CAP_IO_MANAGER | CAP_IRQ_REG); 211 212 if (!ipc_phone_0) 213 ipc_phone_0 = &programs[i].task->answerbox; 214 } else if (rc == 0) { 215 /* It was the program loader and was registered */ 216 } else { 217 /* RAM disk image */ 218 int rd = init_rd((rd_header_t *) init.tasks[i].addr, init.tasks[i].size); 219 220 if (rd != RE_OK) 221 printf("Init binary %zu not used (error %d)\n", i, rd); 222 } 232 } else if (i == init.cnt - 1) { 233 /* 234 * Assume the last task is the RAM disk. 235 */ 236 init_rd((void *) init.tasks[i].paddr, init.tasks[i].size); 237 } else 238 printf("init[%zu]: Init binary load failed (error %d)\n", i, rc); 223 239 } 224 240 -
kernel/generic/src/main/main.c
rbd5f3b7 r00aece0 68 68 #include <mm/page.h> 69 69 #include <genarch/mm/page_pt.h> 70 #include <mm/km.h> 70 71 #include <mm/tlb.h> 71 72 #include <mm/as.h> … … 86 87 #include <sysinfo/sysinfo.h> 87 88 #include <sysinfo/stats.h> 89 #include <lib/ra.h> 88 90 89 91 /** Global configuration structure. */ 90 config_t config; 92 config_t config = { 93 .identity_configured = false, 94 .non_identity_configured = false, 95 .physmem_end = 0 96 }; 91 97 92 98 /** Initial user-space tasks */ … … 145 151 size_t i; 146 152 for (i = 0; i < init.cnt; i++) { 147 if (PA_OVERLAPS(config.stack_base, config.stack_size, 148 init.tasks[i].addr, init.tasks[i].size)) 149 config.stack_base = ALIGN_UP(init.tasks[i].addr + 150 init.tasks[i].size, config.stack_size); 153 if (overlaps(KA2PA(config.stack_base), config.stack_size, 154 init.tasks[i].paddr, init.tasks[i].size)) { 155 /* 156 * The init task overlaps with the memory behind the 157 * kernel image so it must be in low memory and we can 158 * use PA2KA on the init task's physical address. 159 */ 160 config.stack_base = ALIGN_UP( 161 PA2KA(init.tasks[i].paddr) + init.tasks[i].size, 162 config.stack_size); 163 } 151 164 } 152 165 … … 205 218 */ 206 219 arch_pre_mm_init(); 220 km_identity_init(); 207 221 frame_init(); 208 209 /* Initialize at least 1 memory segment big enough for slab to work. */210 222 slab_cache_init(); 223 ra_init(); 211 224 sysinfo_init(); 212 225 btree_init(); … … 214 227 page_init(); 215 228 tlb_init(); 229 km_non_identity_init(); 216 230 ddi_init(); 217 231 arch_post_mm_init(); … … 243 257 for (i = 0; i < init.cnt; i++) 244 258 LOG("init[%zu].addr=%p, init[%zu].size=%zu", 245 i, (void *) init.tasks[i]. addr, i, init.tasks[i].size);259 i, (void *) init.tasks[i].paddr, i, init.tasks[i].size); 246 260 } else 247 261 printf("No init binaries found.\n"); … … 262 276 * Create the first thread. 263 277 */ 264 thread_t *kinit_thread 265 =thread_create(kinit, NULL, kernel, 0, "kinit", true);278 thread_t *kinit_thread = 279 thread_create(kinit, NULL, kernel, 0, "kinit", true); 266 280 if (!kinit_thread) 267 281 panic("Cannot create kinit thread."); -
kernel/generic/src/mm/as.c
rbd5f3b7 r00aece0 387 387 } 388 388 389 /** Return pointer to unmapped address space area 390 * 391 * The address space must be already locked when calling 392 * this function. 393 * 394 * @param as Address space. 395 * @param bound Lowest address bound. 396 * @param size Requested size of the allocation. 397 * 398 * @return Address of the beginning of unmapped address space area. 399 * @return -1 if no suitable address space area was found. 400 * 401 */ 402 NO_TRACE static uintptr_t as_get_unmapped_area(as_t *as, uintptr_t bound, 403 size_t size) 404 { 405 ASSERT(mutex_locked(&as->lock)); 406 407 if (size == 0) 408 return (uintptr_t) -1; 409 410 /* 411 * Make sure we allocate from page-aligned 412 * address. Check for possible overflow in 413 * each step. 414 */ 415 416 size_t pages = SIZE2FRAMES(size); 417 418 /* 419 * Find the lowest unmapped address aligned on the size 420 * boundary, not smaller than bound and of the required size. 421 */ 422 423 /* First check the bound address itself */ 424 uintptr_t addr = ALIGN_UP(bound, PAGE_SIZE); 425 if ((addr >= bound) && 426 (check_area_conflicts(as, addr, pages, NULL))) 427 return addr; 428 429 /* Eventually check the addresses behind each area */ 430 list_foreach(as->as_area_btree.leaf_list, cur) { 431 btree_node_t *node = 432 list_get_instance(cur, btree_node_t, leaf_link); 433 434 for (btree_key_t i = 0; i < node->keys; i++) { 435 as_area_t *area = (as_area_t *) node->value[i]; 436 437 mutex_lock(&area->lock); 438 439 addr = 440 ALIGN_UP(area->base + P2SZ(area->pages), PAGE_SIZE); 441 bool avail = 442 ((addr >= bound) && (addr >= area->base) && 443 (check_area_conflicts(as, addr, pages, area))); 444 445 mutex_unlock(&area->lock); 446 447 if (avail) 448 return addr; 449 } 450 } 451 452 /* No suitable address space area found */ 453 return (uintptr_t) -1; 454 } 455 389 456 /** Create address space area of common attributes. 390 457 * … … 394 461 * @param flags Flags of the area memory. 395 462 * @param size Size of area. 396 * @param base Base address of area.397 463 * @param attrs Attributes of the area. 398 464 * @param backend Address space area backend. NULL if no backend is used. 399 465 * @param backend_data NULL or a pointer to an array holding two void *. 466 * @param base Starting virtual address of the area. 467 * If set to -1, a suitable mappable area is found. 468 * @param bound Lowest address bound if base is set to -1. 469 * Otherwise ignored. 400 470 * 401 471 * @return Address space area on success or NULL on failure. … … 403 473 */ 404 474 as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size, 405 u intptr_t base, unsigned int attrs, mem_backend_t *backend,406 mem_backend_data_t *backend_data )407 { 408 if (( base % PAGE_SIZE) != 0)475 unsigned int attrs, mem_backend_t *backend, 476 mem_backend_data_t *backend_data, uintptr_t *base, uintptr_t bound) 477 { 478 if ((*base != (uintptr_t) -1) && ((*base % PAGE_SIZE) != 0)) 409 479 return NULL; 410 480 … … 420 490 mutex_lock(&as->lock); 421 491 422 if (!check_area_conflicts(as, base, pages, NULL)) { 492 if (*base == (uintptr_t) -1) { 493 *base = as_get_unmapped_area(as, bound, size); 494 if (*base == (uintptr_t) -1) { 495 mutex_unlock(&as->lock); 496 return NULL; 497 } 498 } 499 500 if (!check_area_conflicts(as, *base, pages, NULL)) { 423 501 mutex_unlock(&as->lock); 424 502 return NULL; … … 434 512 area->pages = pages; 435 513 area->resident = 0; 436 area->base = base;514 area->base = *base; 437 515 area->sh_info = NULL; 438 516 area->backend = backend; … … 452 530 453 531 btree_create(&area->used_space); 454 btree_insert(&as->as_area_btree, base, (void *) area, NULL); 532 btree_insert(&as->as_area_btree, *base, (void *) area, 533 NULL); 455 534 456 535 mutex_unlock(&as->lock); … … 860 939 * @param acc_size Expected size of the source area. 861 940 * @param dst_as Pointer to destination address space. 862 * @param dst_base Target base address.863 941 * @param dst_flags_mask Destination address space area flags mask. 942 * @param dst_base Target base address. If set to -1, 943 * a suitable mappable area is found. 944 * @param bound Lowest address bound if dst_base is set to -1. 945 * Otherwise ignored. 864 946 * 865 947 * @return Zero on success. … … 873 955 */ 874 956 int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, 875 as_t *dst_as, uintptr_t dst_base, unsigned int dst_flags_mask) 957 as_t *dst_as, unsigned int dst_flags_mask, uintptr_t *dst_base, 958 uintptr_t bound) 876 959 { 877 960 mutex_lock(&src_as->lock); … … 945 1028 * to support sharing in less privileged mode. 946 1029 */ 947 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, 948 dst_base, AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 1030 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, 1031 src_size, AS_AREA_ATTR_PARTIAL, src_backend, 1032 &src_backend_data, dst_base, bound); 949 1033 if (!dst_area) { 950 1034 /* … … 1955 2039 */ 1956 2040 1957 /** Wrapper for as_area_create(). */ 1958 sysarg_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags)1959 { 1960 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,1961 AS_AREA_ATTR_NONE, &anon_backend, NULL))1962 return (sysarg_t) address;1963 else2041 sysarg_t sys_as_area_create(uintptr_t base, size_t size, unsigned int flags, 2042 uintptr_t bound) 2043 { 2044 uintptr_t virt = base; 2045 as_area_t *area = as_area_create(AS, flags | AS_AREA_CACHEABLE, size, 2046 AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, bound); 2047 if (area == NULL) 1964 2048 return (sysarg_t) -1; 1965 } 1966 1967 /** Wrapper for as_area_resize(). */ 2049 2050 return (sysarg_t) virt; 2051 } 2052 1968 2053 sysarg_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags) 1969 2054 { … … 1971 2056 } 1972 2057 1973 /** Wrapper for as_area_change_flags(). */1974 2058 sysarg_t sys_as_area_change_flags(uintptr_t address, unsigned int flags) 1975 2059 { … … 1977 2061 } 1978 2062 1979 /** Wrapper for as_area_destroy(). */1980 2063 sysarg_t sys_as_area_destroy(uintptr_t address) 1981 2064 { 1982 2065 return (sysarg_t) as_area_destroy(AS, address); 1983 }1984 1985 /** Return pointer to unmapped address space area1986 *1987 * @param base Lowest address bound.1988 * @param size Requested size of the allocation.1989 *1990 * @return Pointer to the beginning of unmapped address space area.1991 *1992 */1993 sysarg_t sys_as_get_unmapped_area(uintptr_t base, size_t size)1994 {1995 if (size == 0)1996 return 0;1997 1998 /*1999 * Make sure we allocate from page-aligned2000 * address. Check for possible overflow in2001 * each step.2002 */2003 2004 size_t pages = SIZE2FRAMES(size);2005 uintptr_t ret = 0;2006 2007 /*2008 * Find the lowest unmapped address aligned on the sz2009 * boundary, not smaller than base and of the required size.2010 */2011 2012 mutex_lock(&AS->lock);2013 2014 /* First check the base address itself */2015 uintptr_t addr = ALIGN_UP(base, PAGE_SIZE);2016 if ((addr >= base) &&2017 (check_area_conflicts(AS, addr, pages, NULL)))2018 ret = addr;2019 2020 /* Eventually check the addresses behind each area */2021 list_foreach(AS->as_area_btree.leaf_list, cur) {2022 if (ret != 0)2023 break;2024 2025 btree_node_t *node =2026 list_get_instance(cur, btree_node_t, leaf_link);2027 2028 btree_key_t i;2029 for (i = 0; (ret == 0) && (i < node->keys); i++) {2030 uintptr_t addr;2031 2032 as_area_t *area = (as_area_t *) node->value[i];2033 2034 mutex_lock(&area->lock);2035 2036 addr = ALIGN_UP(area->base + P2SZ(area->pages),2037 PAGE_SIZE);2038 2039 if ((addr >= base) && (addr >= area->base) &&2040 (check_area_conflicts(AS, addr, pages, area)))2041 ret = addr;2042 2043 mutex_unlock(&area->lock);2044 }2045 }2046 2047 mutex_unlock(&AS->lock);2048 2049 return (sysarg_t) ret;2050 2066 } 2051 2067 -
kernel/generic/src/mm/backend_anon.c
rbd5f3b7 r00aece0 44 44 #include <mm/frame.h> 45 45 #include <mm/slab.h> 46 #include <mm/km.h> 46 47 #include <synch/mutex.h> 47 48 #include <adt/list.h> … … 155 156 int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) 156 157 { 158 uintptr_t upage = ALIGN_DOWN(addr, PAGE_SIZE); 159 uintptr_t kpage; 157 160 uintptr_t frame; 158 161 … … 175 178 mutex_lock(&area->sh_info->lock); 176 179 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 177 ALIGN_DOWN(addr, PAGE_SIZE)- area->base, &leaf);180 upage - area->base, &leaf); 178 181 if (!frame) { 179 182 bool allocate = true; … … 185 188 */ 186 189 for (i = 0; i < leaf->keys; i++) { 187 if (leaf->key[i] == 188 ALIGN_DOWN(addr, PAGE_SIZE) - area->base) { 190 if (leaf->key[i] == upage - area->base) { 189 191 allocate = false; 190 192 break; … … 192 194 } 193 195 if (allocate) { 194 frame = (uintptr_t) frame_alloc_noreserve( 195 ONE_FRAME, 0); 196 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 196 kpage = km_temporary_page_get(&frame, 197 FRAME_NO_RESERVE); 198 memsetb((void *) kpage, PAGE_SIZE, 0); 199 km_temporary_page_put(kpage); 197 200 198 201 /* … … 201 204 */ 202 205 btree_insert(&area->sh_info->pagemap, 203 ALIGN_DOWN(addr, PAGE_SIZE) - area->base, 204 (void *) frame, leaf); 206 upage - area->base, (void *) frame, leaf); 205 207 } 206 208 } … … 223 225 * the different causes 224 226 */ 225 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 226 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 227 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE); 228 memsetb((void *) kpage, PAGE_SIZE, 0); 229 km_temporary_page_put(kpage); 227 230 } 228 231 229 232 /* 230 * Map ' page' to 'frame'.233 * Map 'upage' to 'frame'. 231 234 * Note that TLB shootdown is not attempted as only new information is 232 235 * being inserted into page tables. 233 236 */ 234 page_mapping_insert(AS, addr, frame, as_area_get_flags(area));235 if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))237 page_mapping_insert(AS, upage, frame, as_area_get_flags(area)); 238 if (!used_space_insert(area, upage, 1)) 236 239 panic("Cannot insert used space."); 237 240 -
kernel/generic/src/mm/backend_elf.c
rbd5f3b7 r00aece0 44 44 #include <mm/page.h> 45 45 #include <mm/reserve.h> 46 #include <mm/km.h> 46 47 #include <genarch/mm/page_pt.h> 47 48 #include <genarch/mm/page_ht.h> … … 229 230 elf_segment_header_t *entry = area->backend_data.segment; 230 231 btree_node_t *leaf; 231 uintptr_t base, frame, page, start_anon; 232 uintptr_t base; 233 uintptr_t frame; 234 uintptr_t kpage; 235 uintptr_t upage; 236 uintptr_t start_anon; 232 237 size_t i; 233 238 bool dirty = false; … … 249 254 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE)); 250 255 251 /* Virtual address of faulting page */252 page = ALIGN_DOWN(addr, PAGE_SIZE);256 /* Virtual address of faulting page */ 257 upage = ALIGN_DOWN(addr, PAGE_SIZE); 253 258 254 259 /* Virtual address of the end of initialized part of segment */ … … 264 269 mutex_lock(&area->sh_info->lock); 265 270 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 266 page - area->base, &leaf);271 upage - area->base, &leaf); 267 272 if (!frame) { 268 273 unsigned int i; … … 273 278 274 279 for (i = 0; i < leaf->keys; i++) { 275 if (leaf->key[i] == page - area->base) {280 if (leaf->key[i] == upage - area->base) { 276 281 found = true; 277 282 break; … … 281 286 if (frame || found) { 282 287 frame_reference_add(ADDR2PFN(frame)); 283 page_mapping_insert(AS, addr, frame,288 page_mapping_insert(AS, upage, frame, 284 289 as_area_get_flags(area)); 285 if (!used_space_insert(area, page, 1))290 if (!used_space_insert(area, upage, 1)) 286 291 panic("Cannot insert used space."); 287 292 mutex_unlock(&area->sh_info->lock); … … 294 299 * mapping. 295 300 */ 296 if ( page >= entry->p_vaddr &&page + PAGE_SIZE <= start_anon) {301 if (upage >= entry->p_vaddr && upage + PAGE_SIZE <= start_anon) { 297 302 /* 298 303 * Initialized portion of the segment. The memory is backed … … 304 309 */ 305 310 if (entry->p_flags & PF_W) { 306 frame = (uintptr_t)frame_alloc_noreserve(ONE_FRAME, 0);307 memcpy((void *) PA2KA(frame),308 (void *) (base + i * FRAME_SIZE), FRAME_SIZE);311 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE); 312 memcpy((void *) kpage, (void *) (base + i * PAGE_SIZE), 313 PAGE_SIZE); 309 314 if (entry->p_flags & PF_X) { 310 smc_coherence_block((void *) PA2KA(frame), 311 FRAME_SIZE); 315 smc_coherence_block((void *) kpage, PAGE_SIZE); 312 316 } 317 km_temporary_page_put(kpage); 313 318 dirty = true; 314 319 } else { 315 frame = KA2PA(base + i * FRAME_SIZE); 320 pte_t *pte = page_mapping_find(AS_KERNEL, 321 base + i * FRAME_SIZE, true); 322 323 ASSERT(pte); 324 ASSERT(PTE_PRESENT(pte)); 325 326 frame = PTE_GET_FRAME(pte); 316 327 } 317 } else if ( page >= start_anon) {328 } else if (upage >= start_anon) { 318 329 /* 319 330 * This is the uninitialized portion of the segment. … … 322 333 * and cleared. 323 334 */ 324 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 325 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 335 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE); 336 memsetb((void *) kpage, PAGE_SIZE, 0); 337 km_temporary_page_put(kpage); 326 338 dirty = true; 327 339 } else { … … 334 346 * (The segment can be and often is shorter than 1 page). 335 347 */ 336 if ( page < entry->p_vaddr)337 pad_lo = entry->p_vaddr - page;348 if (upage < entry->p_vaddr) 349 pad_lo = entry->p_vaddr - upage; 338 350 else 339 351 pad_lo = 0; 340 352 341 if (start_anon < page + PAGE_SIZE)342 pad_hi = page + PAGE_SIZE - start_anon;353 if (start_anon < upage + PAGE_SIZE) 354 pad_hi = upage + PAGE_SIZE - start_anon; 343 355 else 344 356 pad_hi = 0; 345 357 346 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0);347 memcpy((void *) ( PA2KA(frame)+ pad_lo),348 (void *) (base + i * FRAME_SIZE + pad_lo),349 FRAME_SIZE - pad_lo - pad_hi);358 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE); 359 memcpy((void *) (kpage + pad_lo), 360 (void *) (base + i * PAGE_SIZE + pad_lo), 361 PAGE_SIZE - pad_lo - pad_hi); 350 362 if (entry->p_flags & PF_X) { 351 smc_coherence_block((void *) ( PA2KA(frame)+ pad_lo),352 FRAME_SIZE - pad_lo - pad_hi);363 smc_coherence_block((void *) (kpage + pad_lo), 364 PAGE_SIZE - pad_lo - pad_hi); 353 365 } 354 memsetb((void *) PA2KA(frame), pad_lo, 0);355 memsetb((void *) ( PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi,356 0);366 memsetb((void *) kpage, pad_lo, 0); 367 memsetb((void *) (kpage + PAGE_SIZE - pad_hi), pad_hi, 0); 368 km_temporary_page_put(kpage); 357 369 dirty = true; 358 370 } … … 360 372 if (dirty && area->sh_info) { 361 373 frame_reference_add(ADDR2PFN(frame)); 362 btree_insert(&area->sh_info->pagemap, page - area->base,374 btree_insert(&area->sh_info->pagemap, upage - area->base, 363 375 (void *) frame, leaf); 364 376 } … … 367 379 mutex_unlock(&area->sh_info->lock); 368 380 369 page_mapping_insert(AS, addr, frame, as_area_get_flags(area));370 if (!used_space_insert(area, page, 1))381 page_mapping_insert(AS, upage, frame, as_area_get_flags(area)); 382 if (!used_space_insert(area, upage, 1)) 371 383 panic("Cannot insert used space."); 372 384 -
kernel/generic/src/mm/frame.c
rbd5f3b7 r00aece0 240 240 NO_TRACE static bool zone_can_alloc(zone_t *zone, uint8_t order) 241 241 { 242 return ( zone_flags_available(zone->flags)243 &&buddy_system_can_alloc(zone->buddy_system, order));242 return ((zone->flags & ZONE_AVAILABLE) && 243 buddy_system_can_alloc(zone->buddy_system, order)); 244 244 } 245 245 … … 265 265 * Check whether the zone meets the search criteria. 266 266 */ 267 if ( (zones.info[i].flags & flags) == flags) {267 if (ZONE_FLAGS_MATCH(zones.info[i].flags, flags)) { 268 268 /* 269 269 * Check if the zone has 2^order frames area available. … … 460 460 NO_TRACE static pfn_t zone_frame_alloc(zone_t *zone, uint8_t order) 461 461 { 462 ASSERT(zone _flags_available(zone->flags));462 ASSERT(zone->flags & ZONE_AVAILABLE); 463 463 464 464 /* Allocate frames from zone buddy system */ … … 490 490 NO_TRACE static size_t zone_frame_free(zone_t *zone, size_t frame_idx) 491 491 { 492 ASSERT(zone _flags_available(zone->flags));492 ASSERT(zone->flags & ZONE_AVAILABLE); 493 493 494 494 frame_t *frame = &zone->frames[frame_idx]; … … 518 518 NO_TRACE static void zone_mark_unavailable(zone_t *zone, size_t frame_idx) 519 519 { 520 ASSERT(zone _flags_available(zone->flags));520 ASSERT(zone->flags & ZONE_AVAILABLE); 521 521 522 522 frame_t *frame = zone_get_frame(zone, frame_idx); … … 549 549 buddy_system_t *buddy) 550 550 { 551 ASSERT(zone _flags_available(zones.info[z1].flags));552 ASSERT(zone _flags_available(zones.info[z2].flags));551 ASSERT(zones.info[z1].flags & ZONE_AVAILABLE); 552 ASSERT(zones.info[z2].flags & ZONE_AVAILABLE); 553 553 ASSERT(zones.info[z1].flags == zones.info[z2].flags); 554 554 ASSERT(zones.info[z1].base < zones.info[z2].base); … … 645 645 NO_TRACE static void return_config_frames(size_t znum, pfn_t pfn, size_t count) 646 646 { 647 ASSERT(zone _flags_available(zones.info[znum].flags));647 ASSERT(zones.info[znum].flags & ZONE_AVAILABLE); 648 648 649 649 size_t cframes = SIZE2FRAMES(zone_conf_size(count)); … … 681 681 size_t count) 682 682 { 683 ASSERT(zone _flags_available(zones.info[znum].flags));683 ASSERT(zones.info[znum].flags & ZONE_AVAILABLE); 684 684 ASSERT(frame_idx + count < zones.info[znum].count); 685 685 … … 723 723 * set of flags 724 724 */ 725 if ((z1 >= zones.count) || (z2 >= zones.count) 726 || (z2 - z1 != 1) 727 || (!zone_flags_available(zones.info[z1].flags)) 728 || (!zone_flags_available(zones.info[z2].flags)) 729 || (zones.info[z1].flags != zones.info[z2].flags)) { 725 if ((z1 >= zones.count) || (z2 >= zones.count) || (z2 - z1 != 1) || 726 (zones.info[z1].flags != zones.info[z2].flags)) { 730 727 ret = false; 731 728 goto errout; … … 828 825 zone->buddy_system = buddy; 829 826 830 if ( zone_flags_available(flags)) {827 if (flags & ZONE_AVAILABLE) { 831 828 /* 832 829 * Compute order for buddy system and initialize … … 865 862 { 866 863 return (count * sizeof(frame_t) + buddy_conf_size(fnzb(count))); 864 } 865 866 /** Allocate external configuration frames from low memory. */ 867 pfn_t zone_external_conf_alloc(size_t count) 868 { 869 size_t size = zone_conf_size(count); 870 size_t order = ispwr2(size) ? fnzb(size) : (fnzb(size) + 1); 871 872 return ADDR2PFN((uintptr_t) frame_alloc(order - FRAME_WIDTH, 873 FRAME_LOWMEM | FRAME_ATOMIC)); 867 874 } 868 875 … … 888 895 irq_spinlock_lock(&zones.lock, true); 889 896 890 if ( zone_flags_available(flags)) { /* Create available zone */897 if (flags & ZONE_AVAILABLE) { /* Create available zone */ 891 898 /* Theoretically we could have NULL here, practically make sure 892 899 * nobody tries to do that. If some platform requires, remove … … 894 901 */ 895 902 ASSERT(confframe != ADDR2PFN((uintptr_t ) NULL)); 903 904 /* Update the known end of physical memory. */ 905 config.physmem_end = max(config.physmem_end, PFN2ADDR(start + count)); 896 906 897 907 /* If confframe is supposed to be inside our zone, then make sure … … 914 924 for (i = 0; i < init.cnt; i++) 915 925 if (overlaps(addr, PFN2ADDR(confcount), 916 KA2PA(init.tasks[i].addr),926 init.tasks[i].paddr, 917 927 init.tasks[i].size)) { 918 928 overlap = true; … … 1142 1152 size_t znum = find_zone(pfn, 1, 0); 1143 1153 1144 1145 1154 ASSERT(znum != (size_t) -1); 1146 1155 … … 1233 1242 1234 1243 /* Tell the architecture to create some memory */ 1235 frame_ arch_init();1244 frame_low_arch_init(); 1236 1245 if (config.cpu_active == 1) { 1237 1246 frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)), … … 1242 1251 size_t i; 1243 1252 for (i = 0; i < init.cnt; i++) { 1244 pfn_t pfn = ADDR2PFN( KA2PA(init.tasks[i].addr));1253 pfn_t pfn = ADDR2PFN(init.tasks[i].paddr); 1245 1254 frame_mark_unavailable(pfn, 1246 1255 SIZE2FRAMES(init.tasks[i].size)); … … 1256 1265 frame_mark_unavailable(0, 1); 1257 1266 } 1267 frame_high_arch_init(); 1268 } 1269 1270 /** Adjust bounds of physical memory region according to low/high memory split. 1271 * 1272 * @param low[in] If true, the adujstment is performed to make the region 1273 * fit in the low memory. Otherwise the adjustment is 1274 * performed to make the region fit in the high memory. 1275 * @param basep[inout] Pointer to a variable which contains the region's base 1276 * address and which may receive the adjusted base address. 1277 * @param sizep[inout] Pointer to a variable which contains the region's size 1278 * and which may receive the adjusted size. 1279 * @retun True if the region still exists even after the 1280 * adjustment, false otherwise. 1281 */ 1282 bool frame_adjust_zone_bounds(bool low, uintptr_t *basep, size_t *sizep) 1283 { 1284 uintptr_t limit = config.identity_size; 1285 1286 if (low) { 1287 if (*basep > limit) 1288 return false; 1289 if (*basep + *sizep > limit) 1290 *sizep = limit - *basep; 1291 } else { 1292 if (*basep + *sizep <= limit) 1293 return false; 1294 if (*basep <= limit) { 1295 *sizep -= limit - *basep; 1296 *basep = limit; 1297 } 1298 } 1299 return true; 1258 1300 } 1259 1301 … … 1294 1336 *total += (uint64_t) FRAMES2SIZE(zones.info[i].count); 1295 1337 1296 if (zone _flags_available(zones.info[i].flags)) {1338 if (zones.info[i].flags & ZONE_AVAILABLE) { 1297 1339 *busy += (uint64_t) FRAMES2SIZE(zones.info[i].busy_count); 1298 1340 *free += (uint64_t) FRAMES2SIZE(zones.info[i].free_count); … … 1345 1387 irq_spinlock_unlock(&zones.lock, true); 1346 1388 1347 bool available = zone_flags_available(flags);1389 bool available = ((flags & ZONE_AVAILABLE) != 0); 1348 1390 1349 1391 printf("%-4zu", i); … … 1357 1399 #endif 1358 1400 1359 printf(" %12zu %c%c%c ", count, 1360 available ? 'A' : ' ', 1361 (flags & ZONE_RESERVED) ? 'R' : ' ', 1362 (flags & ZONE_FIRMWARE) ? 'F' : ' '); 1401 printf(" %12zu %c%c%c%c%c ", count, 1402 available ? 'A' : '-', 1403 (flags & ZONE_RESERVED) ? 'R' : '-', 1404 (flags & ZONE_FIRMWARE) ? 'F' : '-', 1405 (flags & ZONE_LOWMEM) ? 'L' : '-', 1406 (flags & ZONE_HIGHMEM) ? 'H' : '-'); 1363 1407 1364 1408 if (available) … … 1402 1446 irq_spinlock_unlock(&zones.lock, true); 1403 1447 1404 bool available = zone_flags_available(flags);1448 bool available = ((flags & ZONE_AVAILABLE) != 0); 1405 1449 1406 1450 uint64_t size; … … 1412 1456 printf("Zone size: %zu frames (%" PRIu64 " %s)\n", count, 1413 1457 size, size_suffix); 1414 printf("Zone flags: %c%c%c\n", 1415 available ? 'A' : ' ', 1416 (flags & ZONE_RESERVED) ? 'R' : ' ', 1417 (flags & ZONE_FIRMWARE) ? 'F' : ' '); 1458 printf("Zone flags: %c%c%c%c%c\n", 1459 available ? 'A' : '-', 1460 (flags & ZONE_RESERVED) ? 'R' : '-', 1461 (flags & ZONE_FIRMWARE) ? 'F' : '-', 1462 (flags & ZONE_LOWMEM) ? 'L' : '-', 1463 (flags & ZONE_HIGHMEM) ? 'H' : '-'); 1418 1464 1419 1465 if (available) { -
kernel/generic/src/mm/page.c
rbd5f3b7 r00aece0 53 53 * We assume that the other processors are either not using the mapping yet 54 54 * (i.e. during the bootstrap) or are executing the TLB shootdown code. While 55 * we don't care much about the former case, the processors in the latter case 55 * we don't care much about the former case, the processors in the latter case 56 56 * will do an implicit serialization by virtue of running the TLB shootdown 57 57 * interrupt handler. … … 74 74 #include <syscall/copy.h> 75 75 #include <errno.h> 76 #include <align.h> 76 77 77 78 /** Virtual operations for page subsystem. */ … … 81 82 { 82 83 page_arch_init(); 83 }84 85 /** Map memory structure86 *87 * Identity-map memory structure88 * considering possible crossings89 * of page boundaries.90 *91 * @param addr Address of the structure.92 * @param size Size of the structure.93 *94 */95 void map_structure(uintptr_t addr, size_t size)96 {97 size_t length = size + (addr - (addr & ~(PAGE_SIZE - 1)));98 size_t cnt = length / PAGE_SIZE + (length % PAGE_SIZE > 0);99 100 size_t i;101 for (i = 0; i < cnt; i++)102 page_mapping_insert(AS_KERNEL, addr + i * PAGE_SIZE,103 addr + i * PAGE_SIZE, PAGE_NOT_CACHEABLE | PAGE_WRITE);104 105 /* Repel prefetched accesses to the old mapping. */106 memory_barrier();107 84 } 108 85 … … 176 153 } 177 154 155 /** Make the mapping shared by all page tables (not address spaces). 156 * 157 * @param base Starting virtual address of the range that is made global. 158 * @param size Size of the address range that is made global. 159 */ 160 void page_mapping_make_global(uintptr_t base, size_t size) 161 { 162 ASSERT(page_mapping_operations); 163 ASSERT(page_mapping_operations->mapping_make_global); 164 165 return page_mapping_operations->mapping_make_global(base, size); 166 } 167 168 int page_find_mapping(uintptr_t virt, void **phys) 169 { 170 mutex_lock(&AS->lock); 171 172 pte_t *pte = page_mapping_find(AS, virt, false); 173 if ((!PTE_VALID(pte)) || (!PTE_PRESENT(pte))) { 174 mutex_unlock(&AS->lock); 175 return ENOENT; 176 } 177 178 *phys = (void *) PTE_GET_FRAME(pte) + 179 (virt - ALIGN_DOWN(virt, PAGE_SIZE)); 180 181 mutex_unlock(&AS->lock); 182 183 return EOK; 184 } 185 178 186 /** Syscall wrapper for getting mapping of a virtual page. 179 * 180 * @retval EOK Everything went find, @p uspace_frame and @p uspace_node 181 * contains correct values. 182 * @retval ENOENT Virtual address has no mapping. 183 */ 184 sysarg_t sys_page_find_mapping(uintptr_t virt_address, 185 uintptr_t *uspace_frame) 186 { 187 mutex_lock(&AS->lock); 188 189 pte_t *pte = page_mapping_find(AS, virt_address, false); 190 if (!PTE_VALID(pte) || !PTE_PRESENT(pte)) { 191 mutex_unlock(&AS->lock); 192 193 return (sysarg_t) ENOENT; 194 } 195 196 uintptr_t phys_address = PTE_GET_FRAME(pte); 197 198 mutex_unlock(&AS->lock); 199 200 int rc = copy_to_uspace(uspace_frame, 201 &phys_address, sizeof(phys_address)); 202 if (rc != EOK) { 203 return (sysarg_t) rc; 204 } 205 206 return EOK; 187 * 188 * @return EOK on success. 189 * @return ENOENT if no virtual address mapping found. 190 * 191 */ 192 sysarg_t sys_page_find_mapping(uintptr_t virt, void *phys_ptr) 193 { 194 void *phys; 195 int rc = page_find_mapping(virt, &phys); 196 if (rc != EOK) 197 return rc; 198 199 rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys)); 200 return (sysarg_t) rc; 207 201 } 208 202 -
kernel/generic/src/mm/reserve.c
rbd5f3b7 r00aece0 42 42 #include <typedefs.h> 43 43 #include <arch/types.h> 44 #include <debug.h> 45 46 static bool reserve_initialized = false; 44 47 45 48 IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(reserve_lock, "reserve_lock"); … … 54 57 { 55 58 reserve = frame_total_free_get(); 59 reserve_initialized = true; 56 60 } 57 61 … … 67 71 { 68 72 bool reserved = false; 73 74 ASSERT(reserve_initialized); 69 75 70 76 irq_spinlock_lock(&reserve_lock, true); … … 111 117 void reserve_force_alloc(size_t size) 112 118 { 119 if (!reserve_initialized) 120 return; 121 113 122 irq_spinlock_lock(&reserve_lock, true); 114 123 reserve -= size; … … 122 131 void reserve_free(size_t size) 123 132 { 133 if (!reserve_initialized) 134 return; 135 124 136 irq_spinlock_lock(&reserve_lock, true); 125 137 reserve += size; -
kernel/generic/src/mm/slab.c
rbd5f3b7 r00aece0 180 180 unsigned int flags) 181 181 { 182 183 184 182 size_t zone = 0; 185 183 -
kernel/generic/src/printf/printf_core.c
rbd5f3b7 r00aece0 75 75 #define PRINT_NUMBER_BUFFER_SIZE (64 + 5) 76 76 77 /** Get signed or unsigned integer argument */ 78 #define PRINTF_GET_INT_ARGUMENT(type, ap, flags) \ 79 ({ \ 80 unsigned type res; \ 81 \ 82 if ((flags) & __PRINTF_FLAG_SIGNED) { \ 83 signed type arg = va_arg((ap), signed type); \ 84 \ 85 if (arg < 0) { \ 86 res = -arg; \ 87 (flags) |= __PRINTF_FLAG_NEGATIVE; \ 88 } else \ 89 res = arg; \ 90 } else \ 91 res = va_arg((ap), unsigned type); \ 92 \ 93 res; \ 94 }) 95 77 96 /** Enumeration of possible arguments types. 78 97 */ … … 207 226 } 208 227 209 return (int) (counter + 1);228 return (int) (counter); 210 229 } 211 230 … … 245 264 } 246 265 247 return (int) (counter + 1);266 return (int) (counter); 248 267 } 249 268 … … 832 851 size_t size; 833 852 uint64_t number; 853 834 854 switch (qualifier) { 835 855 case PrintfQualifierByte: 836 856 size = sizeof(unsigned char); 837 number = (uint64_t) va_arg(ap, unsigned int);857 number = PRINTF_GET_INT_ARGUMENT(int, ap, flags); 838 858 break; 839 859 case PrintfQualifierShort: 840 860 size = sizeof(unsigned short); 841 number = (uint64_t) va_arg(ap, unsigned int);861 number = PRINTF_GET_INT_ARGUMENT(int, ap, flags); 842 862 break; 843 863 case PrintfQualifierInt: 844 864 size = sizeof(unsigned int); 845 number = (uint64_t) va_arg(ap, unsigned int);865 number = PRINTF_GET_INT_ARGUMENT(int, ap, flags); 846 866 break; 847 867 case PrintfQualifierLong: 848 868 size = sizeof(unsigned long); 849 number = (uint64_t) va_arg(ap, unsigned long);869 number = PRINTF_GET_INT_ARGUMENT(long, ap, flags); 850 870 break; 851 871 case PrintfQualifierLongLong: 852 872 size = sizeof(unsigned long long); 853 number = (uint64_t) va_arg(ap, unsigned long long);873 number = PRINTF_GET_INT_ARGUMENT(long long, ap, flags); 854 874 break; 855 875 case PrintfQualifierPointer: … … 866 886 counter = -counter; 867 887 goto out; 868 }869 870 if (flags & __PRINTF_FLAG_SIGNED) {871 if (number & (0x1 << (size * 8 - 1))) {872 flags |= __PRINTF_FLAG_NEGATIVE;873 874 if (size == sizeof(uint64_t)) {875 number = -((int64_t) number);876 } else {877 number = ~number;878 number &=879 ~(0xFFFFFFFFFFFFFFFFll <<880 (size * 8));881 number++;882 }883 }884 888 } 885 889 -
kernel/generic/src/proc/program.c
rbd5f3b7 r00aece0 87 87 * Create the stack address space area. 88 88 */ 89 uintptr_t virt = USTACK_ADDRESS; 89 90 as_area_t *area = as_area_create(as, 90 91 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, 91 STACK_SIZE, USTACK_ADDRESS, AS_AREA_ATTR_NONE, 92 &anon_backend, NULL); 92 STACK_SIZE, AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, 0); 93 93 if (!area) 94 94 return ENOMEM; -
kernel/generic/src/proc/thread.c
rbd5f3b7 r00aece0 173 173 #endif /* CONFIG_FPU */ 174 174 175 /* 176 * Allocate the kernel stack from the low-memory to prevent an infinite 177 * nesting of TLB-misses when accessing the stack from the part of the 178 * TLB-miss handler written in C. 179 * 180 * Note that low-memory is safe to be used for the stack as it will be 181 * covered by the kernel identity mapping, which guarantees not to 182 * nest TLB-misses infinitely (either via some hardware mechanism or 183 * by the construciton of the assembly-language part of the TLB-miss 184 * handler). 185 * 186 * This restriction can be lifted once each architecture provides 187 * a similar guarantee, for example by locking the kernel stack 188 * in the TLB whenever it is allocated from the high-memory and the 189 * thread is being scheduled to run. 190 */ 191 kmflags |= FRAME_LOWMEM; 192 kmflags &= ~FRAME_HIGHMEM; 193 175 194 thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); 176 195 if (!thread->kstack) { -
kernel/generic/src/syscall/copy.c
rbd5f3b7 r00aece0 56 56 * @param size Size of the data to be copied. 57 57 * 58 * @return 0on success or error code from @ref errno.h.58 * @return EOK on success or error code from @ref errno.h. 59 59 */ 60 60 int copy_from_uspace(void *dst, const void *uspace_src, size_t size) … … 94 94 95 95 interrupts_restore(ipl); 96 return !rc ? EPERM : 0;96 return !rc ? EPERM : EOK; 97 97 } 98 98 -
kernel/generic/src/syscall/syscall.c
rbd5f3b7 r00aece0 146 146 (syshandler_t) sys_as_area_change_flags, 147 147 (syshandler_t) sys_as_area_destroy, 148 (syshandler_t) sys_as_get_unmapped_area,149 148 150 149 /* Page mapping related syscalls. */ … … 176 175 (syshandler_t) sys_device_assign_devno, 177 176 (syshandler_t) sys_physmem_map, 177 (syshandler_t) sys_physmem_unmap, 178 (syshandler_t) sys_dmamem_map, 179 (syshandler_t) sys_dmamem_unmap, 178 180 (syshandler_t) sys_iospace_enable, 179 (syshandler_t) sys_register_irq, 180 (syshandler_t) sys_unregister_irq, 181 (syshandler_t) sys_iospace_disable, 182 (syshandler_t) sys_irq_register, 183 (syshandler_t) sys_irq_unregister, 181 184 182 185 /* Sysinfo syscalls. */ 183 (syshandler_t) sys_sysinfo_get_ tag,186 (syshandler_t) sys_sysinfo_get_val_type, 184 187 (syshandler_t) sys_sysinfo_get_value, 185 188 (syshandler_t) sys_sysinfo_get_data_size, -
kernel/generic/src/sysinfo/sysinfo.c
rbd5f3b7 r00aece0 661 661 * 662 662 */ 663 sysarg_t sys_sysinfo_get_ tag(void *path_ptr, size_t path_size)663 sysarg_t sys_sysinfo_get_val_type(void *path_ptr, size_t path_size) 664 664 { 665 665 /*
Note:
See TracChangeset
for help on using the changeset viewer.
