Changeset c520034 in mainline
- Timestamp:
- 2011-12-31T18:19:35Z (13 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 295f658, 77c2b02, 96cd5b4
- Parents:
- 852052d (diff), 22f0561 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel
- Files:
-
- 26 added
- 72 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/Makefile
r852052d rc520034 229 229 generic/src/syscall/syscall.c \ 230 230 generic/src/syscall/copy.c \ 231 generic/src/mm/km.c \ 231 232 generic/src/mm/reserve.c \ 232 233 generic/src/mm/buddy.c \ … … 245 246 generic/src/lib/str.c \ 246 247 generic/src/lib/elf.c \ 248 generic/src/lib/ra.c \ 247 249 generic/src/lib/rd.c \ 248 250 generic/src/printf/printf_core.c \ -
kernel/arch/abs32le/Makefile.inc
r852052d rc520034 57 57 arch/$(KARCH)/src/smp/smp.c \ 58 58 arch/$(KARCH)/src/smp/ipi.c \ 59 arch/$(KARCH)/src/mm/km.c \ 59 60 arch/$(KARCH)/src/mm/as.c \ 60 61 arch/$(KARCH)/src/mm/frame.c \ -
kernel/arch/abs32le/include/mm/frame.h
r852052d rc520034 41 41 #include <typedefs.h> 42 42 43 extern void frame_arch_init(void); 43 extern void frame_low_arch_init(void); 44 extern void frame_high_arch_init(void); 44 45 extern void physmem_print(void); 45 46 -
kernel/arch/abs32le/src/mm/frame.c
r852052d rc520034 50 50 51 51 52 void frame_arch_init(void) 52 void frame_low_arch_init(void) 53 { 54 } 55 56 void frame_high_arch_init(void) 53 57 { 54 58 } -
kernel/arch/abs32le/src/mm/page.c
r852052d rc520034 56 56 } 57 57 58 59 uintptr_t hw_map(uintptr_t physaddr, size_t size)60 {61 return physaddr;62 }63 64 58 void page_fault(unsigned int n __attribute__((unused)), istate_t *istate) 65 59 { -
kernel/arch/amd64/Makefile.inc
r852052d rc520034 86 86 arch/$(KARCH)/src/bios/bios.c \ 87 87 arch/$(KARCH)/src/interrupt.c \ 88 arch/$(KARCH)/src/mm/km.c \ 88 89 arch/$(KARCH)/src/mm/as.c \ 89 90 arch/$(KARCH)/src/mm/frame.c \ -
kernel/arch/amd64/include/mm/frame.h
r852052d rc520034 43 43 #include <typedefs.h> 44 44 45 extern uintptr_t last_frame;46 extern void frame_ arch_init(void);45 extern void frame_low_arch_init(void); 46 extern void frame_high_arch_init(void); 47 47 extern void physmem_print(void); 48 48 -
kernel/arch/amd64/src/mm/page.c
r852052d rc520034 46 46 #include <panic.h> 47 47 #include <align.h> 48 #include <macros.h> 48 49 49 50 void page_arch_init(void) 50 51 { 51 if (config.cpu_active == 1) { 52 uintptr_t cur; 53 unsigned int identity_flags = 54 PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL | PAGE_WRITE; 52 if (config.cpu_active > 1) { 53 write_cr3((uintptr_t) AS_KERNEL->genarch.page_table); 54 return; 55 } 56 57 uintptr_t cur; 58 unsigned int identity_flags = 59 PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL | PAGE_WRITE; 55 60 56 61 page_mapping_operations = &pt_mapping_operations; 57 62 58 63 page_table_lock(AS_KERNEL, true); 59 64 60 /* 61 * PA2KA(identity) mapping for all frames. 62 */ 63 for (cur = 0; cur < last_frame; cur += FRAME_SIZE) 64 page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, identity_flags); 65 /* 66 * PA2KA(identity) mapping for all low-memory frames. 67 */ 68 for (cur = 0; cur < min(config.identity_size, config.physmem_end); 69 cur += FRAME_SIZE) 70 page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, identity_flags); 65 71 66 72 page_table_unlock(AS_KERNEL, true); 67 73 68 exc_register(14, "page_fault", true, (iroutine_t) page_fault); 69 write_cr3((uintptr_t) AS_KERNEL->genarch.page_table); 70 } else 71 write_cr3((uintptr_t) AS_KERNEL->genarch.page_table); 74 exc_register(14, "page_fault", true, (iroutine_t) page_fault); 75 write_cr3((uintptr_t) AS_KERNEL->genarch.page_table); 72 76 } 73 77 … … 94 98 } 95 99 96 uintptr_t hw_map(uintptr_t physaddr, size_t size)97 {98 if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))99 panic("Unable to map physical memory %p (%zu bytes).",100 (void *) physaddr, size);101 102 uintptr_t virtaddr = PA2KA(last_frame);103 pfn_t i;104 105 page_table_lock(AS_KERNEL, true);106 107 for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)108 page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE);109 110 page_table_unlock(AS_KERNEL, true);111 112 last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);113 114 return virtaddr;115 }116 117 100 /** @} 118 101 */ -
kernel/arch/arm32/Makefile.inc
r852052d rc520034 53 53 arch/$(KARCH)/src/debug/stacktrace.c \ 54 54 arch/$(KARCH)/src/debug/stacktrace_asm.S \ 55 arch/$(KARCH)/src/mm/km.c \ 55 56 arch/$(KARCH)/src/mm/as.c \ 56 57 arch/$(KARCH)/src/mm/frame.c \ -
kernel/arch/arm32/include/mach/integratorcp/integratorcp.h
r852052d rc520034 103 103 extern void icp_cpu_halt(void); 104 104 extern void icp_irq_exception(unsigned int, istate_t *); 105 extern void icp_get_memory_extents(uintptr_t *, uintptr_t *);105 extern void icp_get_memory_extents(uintptr_t *, size_t *); 106 106 extern void icp_frame_init(void); 107 107 extern size_t icp_get_irq_count(void); -
kernel/arch/arm32/include/mach/testarm/testarm.h
r852052d rc520034 71 71 extern void gxemul_cpu_halt(void); 72 72 extern void gxemul_irq_exception(unsigned int, istate_t *); 73 extern void gxemul_get_memory_extents(uintptr_t *, uintptr_t *);73 extern void gxemul_get_memory_extents(uintptr_t *, size_t *); 74 74 extern void gxemul_frame_init(void); 75 75 extern size_t gxemul_get_irq_count(void); -
kernel/arch/arm32/include/machine_func.h
r852052d rc520034 50 50 void (*machine_timer_irq_start)(void); 51 51 void (*machine_cpu_halt)(void); 52 void (*machine_get_memory_extents)(uintptr_t *, uintptr_t *);52 void (*machine_get_memory_extents)(uintptr_t *, size_t *); 53 53 void (*machine_irq_exception)(unsigned int, istate_t *); 54 54 void (*machine_frame_init)(void); … … 81 81 * @param size Place to store memory size. 82 82 */ 83 extern void machine_get_memory_extents(uintptr_t *start, uintptr_t *size);83 extern void machine_get_memory_extents(uintptr_t *start, size_t *size); 84 84 85 85 /** Interrupt exception handler. -
kernel/arch/arm32/include/mm/frame.h
r852052d rc520034 61 61 #endif 62 62 63 extern uintptr_t last_frame; 64 65 extern void frame_arch_init(void); 63 extern void frame_low_arch_init(void); 64 extern void frame_high_arch_init(void); 66 65 extern void boot_page_table_free(void); 67 66 #define physmem_print() -
kernel/arch/arm32/include/mm/page.h
r852052d rc520034 54 54 55 55 /* Number of entries in each level. */ 56 #define PTL0_ENTRIES_ARCH ( 2<< 12) /* 4096 */56 #define PTL0_ENTRIES_ARCH (1 << 12) /* 4096 */ 57 57 #define PTL1_ENTRIES_ARCH 0 58 58 #define PTL2_ENTRIES_ARCH 0 59 59 /* coarse page tables used (256 * 4 = 1KB per page) */ 60 #define PTL3_ENTRIES_ARCH ( 2<< 8) /* 256 */60 #define PTL3_ENTRIES_ARCH (1 << 8) /* 256 */ 61 61 62 62 /* Page table sizes for each level. */ -
kernel/arch/arm32/src/mach/gta02/gta02.c
r852052d rc520034 65 65 static void gta02_timer_irq_start(void); 66 66 static void gta02_cpu_halt(void); 67 static void gta02_get_memory_extents(uintptr_t *start, uintptr_t *size);67 static void gta02_get_memory_extents(uintptr_t *start, size_t *size); 68 68 static void gta02_irq_exception(unsigned int exc_no, istate_t *istate); 69 69 static void gta02_frame_init(void); … … 123 123 * @param size Place to store memory size. 124 124 */ 125 static void gta02_get_memory_extents(uintptr_t *start, uintptr_t *size)125 static void gta02_get_memory_extents(uintptr_t *start, size_t *size) 126 126 { 127 127 *start = GTA02_MEMORY_START + GTA02_MEMORY_SKIP; -
kernel/arch/arm32/src/mach/integratorcp/integratorcp.c
r852052d rc520034 220 220 * @param size Place to store memory size. 221 221 */ 222 void icp_get_memory_extents(uintptr_t *start, uintptr_t *size)222 void icp_get_memory_extents(uintptr_t *start, size_t *size) 223 223 { 224 224 *start = 0; -
kernel/arch/arm32/src/mach/testarm/testarm.c
r852052d rc520034 202 202 * @param size Place to store memory size. 203 203 */ 204 void gxemul_get_memory_extents(uintptr_t *start, uintptr_t *size)204 void gxemul_get_memory_extents(uintptr_t *start, size_t *size) 205 205 { 206 206 *start = 0; 207 207 *size = *((uintptr_t *) (GXEMUL_MP_ADDRESS + GXEMUL_MP_MEMSIZE_OFFSET)); 208 208 } 209 209 -
kernel/arch/arm32/src/machine_func.c
r852052d rc520034 85 85 * @param size Place to store memory size. 86 86 */ 87 void machine_get_memory_extents(uintptr_t *start, uintptr_t *size)87 void machine_get_memory_extents(uintptr_t *start, size_t *size) 88 88 { 89 89 (machine_ops->machine_get_memory_extents)(start, size); -
kernel/arch/arm32/src/mm/frame.c
r852052d rc520034 39 39 #include <config.h> 40 40 #include <align.h> 41 #include <macros.h> 41 42 42 /** Address of the last frame in the memory. */ 43 uintptr_t last_frame = 0; 43 static void frame_common_arch_init(bool low) 44 { 45 uintptr_t base; 46 size_t size; 44 47 45 /** Creates memory zones. */ 46 void frame_arch_init(void) 48 machine_get_memory_extents(&base, &size); 49 base = ALIGN_UP(base, FRAME_SIZE); 50 size = ALIGN_DOWN(size, FRAME_SIZE); 51 52 if (!frame_adjust_zone_bounds(low, &base, &size)) 53 return; 54 55 if (low) { 56 zone_create(ADDR2PFN(base), SIZE2FRAMES(size), 57 BOOT_PAGE_TABLE_START_FRAME + 58 BOOT_PAGE_TABLE_SIZE_IN_FRAMES, 59 ZONE_AVAILABLE | ZONE_LOWMEM); 60 } else { 61 pfn_t conf = zone_external_conf_alloc(SIZE2FRAMES(size)); 62 63 zone_create(ADDR2PFN(base), SIZE2FRAMES(size), conf, 64 ZONE_AVAILABLE | ZONE_HIGHMEM); 65 } 66 67 } 68 69 /** Create low memory zones. */ 70 void frame_low_arch_init(void) 47 71 { 48 uintptr_t mem_start, mem_size; 49 uintptr_t first_frame; 50 uintptr_t num_frames; 72 frame_common_arch_init(true); 51 73 52 machine_get_memory_extents(&mem_start, &mem_size);53 first_frame = ALIGN_UP(mem_start, FRAME_SIZE);54 last_frame = ALIGN_DOWN(mem_start + mem_size, FRAME_SIZE);55 num_frames = (last_frame - first_frame) >> FRAME_WIDTH;56 57 /* All memory as one zone */58 zone_create(first_frame >> FRAME_WIDTH, num_frames,59 BOOT_PAGE_TABLE_START_FRAME + BOOT_PAGE_TABLE_SIZE_IN_FRAMES, 0);60 61 74 /* blacklist boot page table */ 62 75 frame_mark_unavailable(BOOT_PAGE_TABLE_START_FRAME, … … 64 77 65 78 machine_frame_init(); 79 } 80 81 /** Create high memory zones. */ 82 void frame_high_arch_init(void) 83 { 84 frame_common_arch_init(false); 66 85 } 67 86 -
kernel/arch/arm32/src/mm/page.c
r852052d rc520034 37 37 #include <genarch/mm/page_pt.h> 38 38 #include <mm/page.h> 39 #include <arch/mm/frame.h> 39 40 #include <align.h> 40 41 #include <config.h> … … 42 43 #include <typedefs.h> 43 44 #include <interrupt.h> 44 #include < arch/mm/frame.h>45 #include <macros.h> 45 46 46 47 /** Initializes page tables. … … 57 58 58 59 uintptr_t cur; 60 59 61 /* Kernel identity mapping */ 60 for (cur = PHYSMEM_START_ADDR; cur < last_frame; cur += FRAME_SIZE) 62 for (cur = PHYSMEM_START_ADDR; 63 cur < min(config.identity_size, config.physmem_end); 64 cur += FRAME_SIZE) 61 65 page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags); 62 66 63 67 /* Create mapping for exception table at high offset */ 64 68 #ifdef HIGH_EXCEPTION_VECTORS 69 // XXX: fixme to use proper non-identity page 65 70 void *virtaddr = frame_alloc(ONE_FRAME, FRAME_KA); 66 page_mapping_insert(AS_KERNEL, EXC_BASE_ADDRESS, KA2PA(virtaddr), flags); 71 page_mapping_insert(AS_KERNEL, EXC_BASE_ADDRESS, KA2PA(virtaddr), 72 flags); 67 73 #else 68 74 #error "Only high exception vector supported now" … … 78 84 } 79 85 80 /** Maps device into the kernel space.81 *82 * Maps physical address of device into kernel virtual address space (so it can83 * be accessed only by kernel through virtual address).84 *85 * @param physaddr Physical address where device is connected.86 * @param size Length of area where device is present.87 *88 * @return Virtual address where device will be accessible.89 */90 uintptr_t hw_map(uintptr_t physaddr, size_t size)91 {92 if (last_frame + ALIGN_UP(size, PAGE_SIZE) >93 KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH)) {94 panic("Unable to map physical memory %p (%d bytes).",95 (void *) physaddr, size);96 }97 98 uintptr_t virtaddr = PA2KA(last_frame);99 pfn_t i;100 101 page_table_lock(AS_KERNEL, true);102 for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++) {103 page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i),104 physaddr + PFN2ADDR(i),105 PAGE_NOT_CACHEABLE | PAGE_READ | PAGE_WRITE | PAGE_KERNEL);106 }107 page_table_unlock(AS_KERNEL, true);108 109 last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);110 return virtaddr;111 }112 113 86 /** @} 114 87 */ -
kernel/arch/ia32/Makefile.inc
r852052d rc520034 99 99 arch/$(KARCH)/src/userspace.c \ 100 100 arch/$(KARCH)/src/cpu/cpu.c \ 101 arch/$(KARCH)/src/mm/km.c \ 101 102 arch/$(KARCH)/src/mm/as.c \ 102 103 arch/$(KARCH)/src/mm/frame.c \ -
kernel/arch/ia32/include/mm/frame.h
r852052d rc520034 43 43 #include <typedefs.h> 44 44 45 extern uintptr_t last_frame; 46 47 extern void frame_arch_init(void); 45 extern void frame_low_arch_init(void); 46 extern void frame_high_arch_init(void); 48 47 extern void physmem_print(void); 49 48 -
kernel/arch/ia32/src/mm/frame.c
r852052d rc520034 46 46 #include <print.h> 47 47 48 #define PHYSMEM_LIMIT32 UINT64_C(0x07c000000)49 #define PHYSMEM_LIMIT64 UINT64_C(0x200000000)50 51 48 size_t hardcoded_unmapped_ktext_size = 0; 52 49 size_t hardcoded_unmapped_kdata_size = 0; 53 50 54 uintptr_t last_frame = 0; 55 56 static void init_e820_memory(pfn_t minconf) 51 static void init_e820_memory(pfn_t minconf, bool low) 57 52 { 58 53 unsigned int i; 59 54 60 55 for (i = 0; i < e820counter; i++) { 61 uint 64_t base =e820table[i].base_address;62 uint64_t size =e820table[i].size;56 uintptr_t base = (uintptr_t) e820table[i].base_address; 57 size_t size = (size_t) e820table[i].size; 63 58 64 #ifdef __32_BITS__ 65 /* 66 * XXX FIXME: 67 * 68 * Ignore zones which start above PHYSMEM_LIMIT32 69 * or clip zones which go beyond PHYSMEM_LIMIT32. 70 * 71 * The PHYSMEM_LIMIT32 (2 GB - 64 MB) is a rather 72 * arbitrary constant which allows to have at 73 * least 64 MB in the kernel address space to 74 * map hardware resources. 75 * 76 * The kernel uses fixed 1:1 identity mapping 77 * of the physical memory with 2:2 GB split. 78 * This is a severe limitation of the current 79 * kernel memory management. 80 * 81 */ 82 83 if (base > PHYSMEM_LIMIT32) 59 if (!frame_adjust_zone_bounds(low, &base, &size)) 84 60 continue; 85 86 if (base + size > PHYSMEM_LIMIT32)87 size = PHYSMEM_LIMIT32 - base;88 #endif89 90 #ifdef __64_BITS__91 /*92 * XXX FIXME:93 *94 * Ignore zones which start above PHYSMEM_LIMIT6495 * or clip zones which go beyond PHYSMEM_LIMIT64.96 *97 * The PHYSMEM_LIMIT64 (8 GB) is the size of the98 * fixed 1:1 identically mapped physical memory99 * accessible during the bootstrap process.100 * This is a severe limitation of the current101 * kernel memory management.102 *103 */104 105 if (base > PHYSMEM_LIMIT64)106 continue;107 108 if (base + size > PHYSMEM_LIMIT64)109 size = PHYSMEM_LIMIT64 - base;110 #endif111 61 112 62 if (e820table[i].type == MEMMAP_MEMORY_AVAILABLE) { … … 116 66 FRAME_SIZE); 117 67 68 size_t count = SIZE2FRAMES(new_size); 118 69 pfn_t pfn = ADDR2PFN(new_base); 119 size_t count = SIZE2FRAMES(new_size);70 pfn_t conf; 120 71 121 pfn_t conf; 122 if ((minconf < pfn) || (minconf >= pfn + count)) 123 conf = pfn; 124 else 125 conf = minconf; 126 127 zone_create(pfn, count, conf, ZONE_AVAILABLE); 128 129 // XXX this has to be removed 130 if (last_frame < ALIGN_UP(new_base + new_size, FRAME_SIZE)) 131 last_frame = ALIGN_UP(new_base + new_size, FRAME_SIZE); 72 if (low) { 73 if ((minconf < pfn) || (minconf >= pfn + count)) 74 conf = pfn; 75 else 76 conf = minconf; 77 zone_create(pfn, count, conf, 78 ZONE_AVAILABLE | ZONE_LOWMEM); 79 } else { 80 conf = zone_external_conf_alloc(count); 81 zone_create(pfn, count, conf, 82 ZONE_AVAILABLE | ZONE_HIGHMEM); 83 } 132 84 } else if ((e820table[i].type == MEMMAP_MEMORY_ACPI) || 133 85 (e820table[i].type == MEMMAP_MEMORY_NVS)) { … … 179 131 180 132 181 void frame_ arch_init(void)133 void frame_low_arch_init(void) 182 134 { 183 135 pfn_t minconf; … … 192 144 #endif 193 145 194 init_e820_memory(minconf );146 init_e820_memory(minconf, true); 195 147 196 148 /* Reserve frame 0 (BIOS data) */ … … 206 158 } 207 159 160 void frame_high_arch_init(void) 161 { 162 if (config.cpu_active == 1) 163 init_e820_memory(0, false); 164 } 165 208 166 /** @} 209 167 */ -
kernel/arch/ia32/src/mm/page.c
r852052d rc520034 49 49 #include <print.h> 50 50 #include <interrupt.h> 51 #include <macros.h> 51 52 52 53 void page_arch_init(void) … … 55 56 int flags; 56 57 57 if (config.cpu_active == 1) { 58 page_mapping_operations = &pt_mapping_operations; 58 if (config.cpu_active > 1) { 59 /* Fast path for non-boot CPUs */ 60 write_cr3((uintptr_t) AS_KERNEL->genarch.page_table); 61 paging_on(); 62 return; 63 } 64 65 page_mapping_operations = &pt_mapping_operations; 59 66 60 /* 61 * PA2KA(identity) mapping for all frames until last_frame. 62 */ 63 page_table_lock(AS_KERNEL, true); 64 for (cur = 0; cur < last_frame; cur += FRAME_SIZE) { 65 flags = PAGE_CACHEABLE | PAGE_WRITE; 66 if ((PA2KA(cur) >= config.base) && (PA2KA(cur) < config.base + config.kernel_size)) 67 flags |= PAGE_GLOBAL; 68 page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags); 69 } 70 page_table_unlock(AS_KERNEL, true); 67 /* 68 * PA2KA(identity) mapping for all low-memory frames. 69 */ 70 page_table_lock(AS_KERNEL, true); 71 for (cur = 0; cur < min(config.identity_size, config.physmem_end); 72 cur += FRAME_SIZE) { 73 flags = PAGE_CACHEABLE | PAGE_WRITE; 74 if ((PA2KA(cur) >= config.base) && 75 (PA2KA(cur) < config.base + config.kernel_size)) 76 flags |= PAGE_GLOBAL; 77 page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags); 78 } 79 page_table_unlock(AS_KERNEL, true); 71 80 72 exc_register(14, "page_fault", true, (iroutine_t) page_fault); 73 write_cr3((uintptr_t) AS_KERNEL->genarch.page_table); 74 } else 75 write_cr3((uintptr_t) AS_KERNEL->genarch.page_table); 81 exc_register(14, "page_fault", true, (iroutine_t) page_fault); 82 write_cr3((uintptr_t) AS_KERNEL->genarch.page_table); 76 83 77 84 paging_on(); 78 }79 80 81 uintptr_t hw_map(uintptr_t physaddr, size_t size)82 {83 if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))84 panic("Unable to map physical memory %p (%zu bytes).",85 (void *) physaddr, size);86 87 uintptr_t virtaddr = PA2KA(last_frame);88 pfn_t i;89 page_table_lock(AS_KERNEL, true);90 for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++) {91 uintptr_t addr = PFN2ADDR(i);92 page_mapping_insert(AS_KERNEL, virtaddr + addr, physaddr + addr, PAGE_NOT_CACHEABLE | PAGE_WRITE);93 }94 page_table_unlock(AS_KERNEL, true);95 96 last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);97 98 return virtaddr;99 85 } 100 86 -
kernel/arch/ia64/Makefile.inc
r852052d rc520034 52 52 arch/$(KARCH)/src/ivt.S \ 53 53 arch/$(KARCH)/src/interrupt.c \ 54 arch/$(KARCH)/src/mm/km.c \ 54 55 arch/$(KARCH)/src/mm/as.c \ 55 56 arch/$(KARCH)/src/mm/frame.c \ -
kernel/arch/ia64/include/arch.h
r852052d rc520034 36 36 #define KERN_ia64_ARCH_H_ 37 37 38 #include <arch/drivers/ski.h>39 40 38 extern void arch_pre_main(void); 41 39 -
kernel/arch/ia64/include/asm.h
r852052d rc520034 39 39 #include <typedefs.h> 40 40 #include <arch/register.h> 41 #include <arch/legacyio.h> 41 42 #include <trace.h> 42 43 43 #define IA64_IOSPACE_ADDRESS 0xE001000000000000ULL44 45 44 #define IO_SPACE_BOUNDARY ((void *) (64 * 1024)) 46 45 46 /** Map the I/O port address to a legacy I/O address. */ 47 NO_TRACE static inline uintptr_t p2a(volatile void *p) 48 { 49 uintptr_t prt = (uintptr_t) p; 50 51 return legacyio_virt_base + (((prt >> 2) << 12) | (prt & 0xfff)); 52 } 53 47 54 NO_TRACE static inline void pio_write_8(ioport8_t *port, uint8_t v) 48 55 { 49 if (port < (ioport8_t *) IO_SPACE_BOUNDARY) { 50 uintptr_t prt = (uintptr_t) port; 51 52 *((ioport8_t *) (IA64_IOSPACE_ADDRESS + 53 ((prt & 0xfff) | ((prt >> 2) << 12)))) = v; 54 } else { 56 if (port < (ioport8_t *) IO_SPACE_BOUNDARY) 57 *((ioport8_t *) p2a(port)) = v; 58 else 55 59 *port = v; 56 }57 60 58 61 asm volatile ( … … 64 67 NO_TRACE static inline void pio_write_16(ioport16_t *port, uint16_t v) 65 68 { 66 if (port < (ioport16_t *) IO_SPACE_BOUNDARY) { 67 uintptr_t prt = (uintptr_t) port; 68 69 *((ioport16_t *) (IA64_IOSPACE_ADDRESS + 70 ((prt & 0xfff) | ((prt >> 2) << 12)))) = v; 71 } else { 69 if (port < (ioport16_t *) IO_SPACE_BOUNDARY) 70 *((ioport16_t *) p2a(port)) = v; 71 else 72 72 *port = v; 73 }74 73 75 74 asm volatile ( … … 81 80 NO_TRACE static inline void pio_write_32(ioport32_t *port, uint32_t v) 82 81 { 83 if (port < (ioport32_t *) IO_SPACE_BOUNDARY) { 84 uintptr_t prt = (uintptr_t) port; 85 86 *((ioport32_t *) (IA64_IOSPACE_ADDRESS + 87 ((prt & 0xfff) | ((prt >> 2) << 12)))) = v; 88 } else { 82 if (port < (ioport32_t *) IO_SPACE_BOUNDARY) 83 *((ioport32_t *) p2a(port)) = v; 84 else 89 85 *port = v; 90 }91 86 92 87 asm volatile ( … … 105 100 ); 106 101 107 if (port < (ioport8_t *) IO_SPACE_BOUNDARY) { 108 uintptr_t prt = (uintptr_t) port; 109 110 v = *((ioport8_t *) (IA64_IOSPACE_ADDRESS + 111 ((prt & 0xfff) | ((prt >> 2) << 12)))); 112 } else { 102 if (port < (ioport8_t *) IO_SPACE_BOUNDARY) 103 v = *((ioport8_t *) p2a(port)); 104 else 113 105 v = *port; 114 }115 106 116 107 return v; … … 126 117 ); 127 118 128 if (port < (ioport16_t *) IO_SPACE_BOUNDARY) { 129 uintptr_t prt = (uintptr_t) port; 130 131 v = *((ioport16_t *) (IA64_IOSPACE_ADDRESS + 132 ((prt & 0xfff) | ((prt >> 2) << 12)))); 133 } else { 119 if (port < (ioport16_t *) IO_SPACE_BOUNDARY) 120 v = *((ioport16_t *) p2a(port)); 121 else 134 122 v = *port; 135 }136 123 137 124 return v; … … 147 134 ); 148 135 149 if (port < (ioport32_t *) IO_SPACE_BOUNDARY) { 150 uintptr_t prt = (uintptr_t) port; 151 152 v = *((ioport32_t *) (IA64_IOSPACE_ADDRESS + 153 ((prt & 0xfff) | ((prt >> 2) << 12)))); 154 } else { 136 if (port < (ioport32_t *) IO_SPACE_BOUNDARY) 137 v = *((ioport32_t *) p2a(port)); 138 else 155 139 v = *port; 156 }157 140 158 141 return v; -
kernel/arch/ia64/include/mm/frame.h
r852052d rc520034 43 43 #include <typedefs.h> 44 44 45 extern uintptr_t last_frame;45 extern uintptr_t end_of_identity; 46 46 47 extern void frame_arch_init(void); 47 extern void frame_low_arch_init(void); 48 extern void frame_high_arch_init(void); 48 49 #define physmem_print() 49 50 -
kernel/arch/ia64/include/mm/page.h
r852052d rc520034 43 43 44 44 /** Bit width of the TLB-locked portion of kernel address space. */ 45 #define KERNEL_PAGE_WIDTH 28 /* 256M */ 46 #define IO_PAGE_WIDTH 26 /* 64M */ 47 #define FW_PAGE_WIDTH 28 /* 256M */ 48 49 #define USPACE_IO_PAGE_WIDTH 12 /* 4K */ 50 51 52 /* 53 * Statically mapped IO spaces - offsets to 0xe...00 of virtual addresses 54 * because of "minimal virtual bits implemented is 51" it is possible to 55 * have values up to 0x0007000000000000 56 */ 57 58 /* Firmware area (bellow 4GB in phys mem) */ 59 #define FW_OFFSET 0x00000000F0000000 60 /* Legacy IO space */ 61 #define IO_OFFSET 0x0001000000000000 62 /* Videoram - now mapped to 0 as VGA text mode vram on 0xb8000 */ 63 #define VIO_OFFSET 0x0002000000000000 64 45 #define KERNEL_PAGE_WIDTH 28 /* 256M */ 65 46 66 47 #define PPN_SHIFT 12 -
kernel/arch/ia64/src/ia64.c
r852052d rc520034 45 45 #include <arch/drivers/it.h> 46 46 #include <arch/drivers/kbd.h> 47 #include <arch/legacyio.h> 47 48 #include <genarch/drivers/ega/ega.h> 48 49 #include <genarch/drivers/i8042/i8042.h> … … 51 52 #include <genarch/kbrd/kbrd.h> 52 53 #include <genarch/srln/srln.h> 54 #include <mm/page.h> 55 56 #ifdef MACHINE_ski 57 #include <arch/drivers/ski.h> 58 #endif 53 59 54 60 /* NS16550 as a COM 1 */ … … 58 64 59 65 static uint64_t iosapic_base = 0xfec00000; 66 uintptr_t legacyio_virt_base = 0; 60 67 61 68 /** Performs ia64-specific initialization before main_bsp() is called. */ … … 80 87 static void iosapic_init(void) 81 88 { 82 uint 64_t IOSAPIC = PA2KA((sysarg_t)(iosapic_base)) | FW_OFFSET;89 uintptr_t IOSAPIC = hw_map(iosapic_base, PAGE_SIZE); 83 90 int i; 84 91 … … 107 114 { 108 115 if (config.cpu_active == 1) { 116 /* Map the page with legacy I/O. */ 117 legacyio_virt_base = hw_map(LEGACYIO_PHYS_BASE, LEGACYIO_SIZE); 118 109 119 iosapic_init(); 110 120 irq_init(INR_COUNT, INR_COUNT); … … 113 123 } 114 124 115 void arch_post_cpu_init(void) 116 { 125 void arch_post_cpu_init(void){ 117 126 } 118 127 … … 202 211 sysinfo_set_item_val("ia64_iospace", NULL, true); 203 212 sysinfo_set_item_val("ia64_iospace.address", NULL, true); 204 sysinfo_set_item_val("ia64_iospace.address.virtual", NULL, IO_OFFSET);213 sysinfo_set_item_val("ia64_iospace.address.virtual", NULL, LEGACYIO_USER_BASE); 205 214 } 206 215 -
kernel/arch/ia64/src/mm/frame.c
r852052d rc520034 51 51 #define MINCONF 1 52 52 53 uintptr_t last_frame = 0;53 uintptr_t end_of_identity = -1ULL; 54 54 55 void frame_arch_init(void)55 static void frame_common_arch_init(bool low) 56 56 { 57 if (config.cpu_active == 1) { 58 unsigned int i; 59 for (i = 0; i < bootinfo->memmap_items; i++) { 60 if (bootinfo->memmap[i].type == MEMMAP_FREE_MEM) { 61 uint64_t base = bootinfo->memmap[i].base; 62 uint64_t size = bootinfo->memmap[i].size; 63 uint64_t abase = ALIGN_UP(base, FRAME_SIZE); 57 unsigned int i; 64 58 65 if (size > FRAME_SIZE) 66 size -= abase - base; 59 for (i = 0; i < bootinfo->memmap_items; i++) { 60 if (bootinfo->memmap[i].type != MEMMAP_FREE_MEM) 61 continue; 67 62 68 if (size > MIN_ZONE_SIZE) { 69 zone_create(abase >> FRAME_WIDTH, 70 size >> FRAME_WIDTH, 71 max(MINCONF, abase >> FRAME_WIDTH), 72 0); 73 } 74 if (abase + size > last_frame) 75 last_frame = abase + size; 63 uintptr_t base = bootinfo->memmap[i].base; 64 size_t size = bootinfo->memmap[i].size; 65 uintptr_t abase = ALIGN_UP(base, FRAME_SIZE); 66 67 if (size > FRAME_SIZE) 68 size -= abase - base; 69 70 if (!frame_adjust_zone_bounds(low, &abase, &size)) 71 continue; 72 73 if (size > MIN_ZONE_SIZE) { 74 pfn_t pfn = ADDR2PFN(abase); 75 size_t count = SIZE2FRAMES(size); 76 77 if (low) { 78 zone_create(pfn, count, max(MINCONF, pfn), 79 ZONE_AVAILABLE | ZONE_LOWMEM); 80 } else { 81 pfn_t conf; 82 83 conf = zone_external_conf_alloc(count); 84 zone_create(pfn, count, conf, 85 ZONE_AVAILABLE | ZONE_HIGHMEM); 76 86 } 77 87 } 78 79 /* 80 * Blacklist ROM regions. 81 */ 82 frame_mark_unavailable(ADDR2PFN(ROM_BASE), 83 SIZE2FRAMES(ROM_SIZE)); 88 } 89 } 84 90 85 frame_mark_unavailable(ADDR2PFN(KERNEL_RESERVED_AREA_BASE), 86 SIZE2FRAMES(KERNEL_RESERVED_AREA_SIZE)); 87 } 91 void frame_low_arch_init(void) 92 { 93 if (config.cpu_active > 1) 94 return; 95 96 frame_common_arch_init(true); 97 98 /* 99 * Blacklist ROM regions. 100 */ 101 frame_mark_unavailable(ADDR2PFN(ROM_BASE), 102 SIZE2FRAMES(ROM_SIZE)); 103 104 frame_mark_unavailable(ADDR2PFN(KERNEL_RESERVED_AREA_BASE), 105 SIZE2FRAMES(KERNEL_RESERVED_AREA_SIZE)); 106 107 /* PA2KA will work only on low-memory. */ 108 end_of_identity = PA2KA(config.physmem_end - FRAME_SIZE) + PAGE_SIZE; 109 } 110 111 void frame_high_arch_init(void) 112 { 113 if (config.cpu_active > 1) 114 return; 115 116 frame_common_arch_init(false); 88 117 } 89 118 -
kernel/arch/ia64/src/mm/page.c
r852052d rc520034 255 255 } 256 256 257 uintptr_t hw_map(uintptr_t physaddr, size_t size __attribute__ ((unused)))258 {259 /* THIS is a dirty hack. */260 return (uintptr_t)((uint64_t)(PA2KA(physaddr)) + VIO_OFFSET);261 }262 263 257 /** @} 264 258 */ -
kernel/arch/ia64/src/mm/tlb.c
r852052d rc520034 52 52 #include <arch.h> 53 53 #include <interrupt.h> 54 55 #define IO_FRAME_BASE 0xFFFFC000000 54 #include <arch/legacyio.h> 56 55 57 56 /** Invalidate all TLB entries. */ … … 467 466 } 468 467 468 static bool is_kernel_fault(uintptr_t va) 469 { 470 region_register_t rr; 471 472 rr.word = rr_read(VA2VRN(va)); 473 rid_t rid = rr.map.rid; 474 return (RID2ASID(rid) == ASID_KERNEL) && (VA2VRN(va) == VRN_KERNEL); 475 } 476 469 477 /** Instruction TLB fault handler for faults with VHPT turned off. 470 478 * … … 480 488 va = istate->cr_ifa; /* faulting address */ 481 489 482 page_table_lock(AS, true); 490 ASSERT(!is_kernel_fault(va)); 491 483 492 t = page_mapping_find(AS, va, true); 484 493 if (t) { … … 488 497 */ 489 498 itc_pte_copy(t); 490 page_table_unlock(AS, true);491 499 } else { 492 500 /* 493 501 * Forward the page fault to address space page fault handler. 494 502 */ 495 page_table_unlock(AS, true);496 503 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 497 504 fault_if_from_uspace(istate, "Page fault at %p.", … … 522 529 static int try_memmap_io_insertion(uintptr_t va, istate_t *istate) 523 530 { 524 if ((va >= IO_OFFSET ) && (va < IO_OFFSET + (1 <<IO_PAGE_WIDTH))) {531 if ((va >= LEGACYIO_USER_BASE) && (va < LEGACYIO_USER_BASE + (1 << LEGACYIO_PAGE_WIDTH))) { 525 532 if (TASK) { 526 uint64_t io_page = (va & ((1 << IO_PAGE_WIDTH) - 1)) >>527 USPACE_IO_PAGE_WIDTH;533 uint64_t io_page = (va & ((1 << LEGACYIO_PAGE_WIDTH) - 1)) >> 534 LEGACYIO_SINGLE_PAGE_WIDTH; 528 535 529 536 if (is_io_page_accessible(io_page)) { 530 537 uint64_t page, frame; 531 538 532 page = IO_OFFSET+533 (1 << USPACE_IO_PAGE_WIDTH) * io_page;534 frame = IO_FRAME_BASE +535 (1 << USPACE_IO_PAGE_WIDTH) * io_page;539 page = LEGACYIO_USER_BASE + 540 (1 << LEGACYIO_SINGLE_PAGE_WIDTH) * io_page; 541 frame = LEGACYIO_PHYS_BASE + 542 (1 << LEGACYIO_SINGLE_PAGE_WIDTH) * io_page; 536 543 537 544 tlb_entry_t entry; … … 547 554 entry.ar = AR_READ | AR_WRITE; 548 555 entry.ppn = frame >> PPN_SHIFT; 549 entry.ps = USPACE_IO_PAGE_WIDTH;556 entry.ps = LEGACYIO_SINGLE_PAGE_WIDTH; 550 557 551 558 dtc_mapping_insert(page, TASK->as->asid, entry); … … 570 577 { 571 578 if (istate->cr_isr.sp) { 572 /* Speculative load. Deffer the exception573 until a more clever approach can be used.574 575 Currently if we try to find the mapping576 for the speculative load while in the kernel,577 we might introduce a livelock because of578 the possibly invalid values of the address.*/579 /* 580 * Speculative load. Deffer the exception until a more clever 581 * approach can be used. Currently if we try to find the 582 * mapping for the speculative load while in the kernel, we 583 * might introduce a livelock because of the possibly invalid 584 * values of the address. 585 */ 579 586 istate->cr_ipsr.ed = true; 580 587 return; … … 582 589 583 590 uintptr_t va = istate->cr_ifa; /* faulting address */ 584 585 region_register_t rr; 586 rr.word = rr_read(VA2VRN(va)); 587 rid_t rid = rr.map.rid; 588 if (RID2ASID(rid) == ASID_KERNEL) { 589 if (VA2VRN(va) == VRN_KERNEL) { 591 as_t *as = AS; 592 593 if (is_kernel_fault(va)) { 594 if (va < end_of_identity) { 590 595 /* 591 * Provide KA2PA(identity) mapping for faulting piece of 592 * kernel address space. 596 * Create kernel identity mapping for low memory. 593 597 */ 594 598 dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0); 595 599 return; 600 } else { 601 as = AS_KERNEL; 596 602 } 597 603 } 598 604 599 605 600 page_table_lock(AS, true); 601 pte_t *entry = page_mapping_find(AS, va, true); 606 pte_t *entry = page_mapping_find(as, va, true); 602 607 if (entry) { 603 608 /* … … 606 611 */ 607 612 dtc_pte_copy(entry); 608 page_table_unlock(AS, true);609 613 } else { 610 page_table_unlock(AS, true);611 614 if (try_memmap_io_insertion(va, istate)) 612 615 return; … … 647 650 uintptr_t va; 648 651 pte_t *t; 652 as_t *as = AS; 649 653 650 654 va = istate->cr_ifa; /* faulting address */ 651 655 652 page_table_lock(AS, true); 653 t = page_mapping_find(AS, va, true); 656 if (is_kernel_fault(va)) 657 as = AS_KERNEL; 658 659 t = page_mapping_find(as, va, true); 654 660 ASSERT((t) && (t->p)); 655 661 if ((t) && (t->p) && (t->w)) { … … 667 673 } 668 674 } 669 page_table_unlock(AS, true);670 675 } 671 676 … … 682 687 683 688 va = istate->cr_ifa; /* faulting address */ 684 685 page_table_lock(AS, true); 689 690 ASSERT(!is_kernel_fault(va)); 691 686 692 t = page_mapping_find(AS, va, true); 687 693 ASSERT((t) && (t->p)); … … 700 706 } 701 707 } 702 page_table_unlock(AS, true);703 708 } 704 709 … … 713 718 uintptr_t va; 714 719 pte_t *t; 720 as_t *as = AS; 715 721 716 722 va = istate->cr_ifa; /* faulting address */ 717 723 718 page_table_lock(AS, true); 719 t = page_mapping_find(AS, va, true); 724 if (is_kernel_fault(va)) 725 as = AS_KERNEL; 726 727 t = page_mapping_find(as, va, true); 720 728 ASSERT((t) && (t->p)); 721 729 if ((t) && (t->p)) { … … 733 741 } 734 742 } 735 page_table_unlock(AS, true);736 743 } 737 744 … … 748 755 749 756 va = istate->cr_ifa; /* faulting address */ 757 758 ASSERT(!is_kernel_fault(va)); 750 759 751 760 /* 752 761 * Assume a write to a read-only page. 753 762 */ 754 page_table_lock(AS, true);755 763 t = page_mapping_find(AS, va, true); 756 764 ASSERT((t) && (t->p)); … … 761 769 panic_memtrap(istate, PF_ACCESS_WRITE, va, NULL); 762 770 } 763 page_table_unlock(AS, true);764 771 } 765 772 … … 777 784 va = istate->cr_ifa; /* faulting address */ 778 785 779 page_table_lock(AS, true); 786 ASSERT(!is_kernel_fault(va)); 787 780 788 t = page_mapping_find(AS, va, true); 781 789 ASSERT(t); … … 790 798 else 791 799 dtc_pte_copy(t); 792 page_table_unlock(AS, true);793 800 } else { 794 page_table_unlock(AS, true);795 801 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 796 802 fault_if_from_uspace(istate, "Page fault at %p.", -
kernel/arch/ia64/src/start.S
r852052d rc520034 38 38 #define KERNEL_TRANSLATION_I 0x0010000000000661 39 39 #define KERNEL_TRANSLATION_D 0x0010000000000661 40 #define KERNEL_TRANSLATION_VIO 0x001000000000067141 #define KERNEL_TRANSLATION_IO 0x00100FFFFC00067142 #define KERNEL_TRANSLATION_FW 0x00100000F000067143 40 44 41 .section K_TEXT_START, "ax" … … 88 85 itr.d dtr[r0] = r10 89 86 90 movl r7 = 1 91 movl r8 = (VRN_KERNEL << VRN_SHIFT) | VIO_OFFSET 92 mov cr.ifa = r8 93 movl r10 = (KERNEL_TRANSLATION_VIO) 94 itr.d dtr[r7] = r10 95 96 mov r11 = cr.itir 97 movl r10 = ~0xfc 98 and r10 = r10, r11 99 movl r11 = (IO_PAGE_WIDTH << PS_SHIFT) 100 or r10 = r10, r11 101 mov cr.itir = r10 102 103 movl r7 = 2 104 movl r8 = (VRN_KERNEL << VRN_SHIFT) | IO_OFFSET 105 mov cr.ifa = r8 106 movl r10 = (KERNEL_TRANSLATION_IO) 107 itr.d dtr[r7] = r10 108 109 # Setup mapping for firmware area (also SAPIC) 110 111 mov r11 = cr.itir 112 movl r10 = ~0xfc 113 and r10 = r10, r11 114 movl r11 = (FW_PAGE_WIDTH << PS_SHIFT) 115 or r10 = r10, r11 116 mov cr.itir = r10 117 118 movl r7 = 3 119 movl r8 = (VRN_KERNEL << VRN_SHIFT) | FW_OFFSET 120 mov cr.ifa = r8 121 movl r10 = (KERNEL_TRANSLATION_FW) 122 itr.d dtr[r7] = r10 123 124 # Initialize DSR 87 # Initialize DCR 125 88 126 89 movl r10 = (DCR_DP_MASK | DCR_DK_MASK | DCR_DX_MASK | DCR_DR_MASK | DCR_DA_MASK | DCR_DD_MASK | DCR_LC_MASK) -
kernel/arch/mips32/Makefile.inc
r852052d rc520034 63 63 arch/$(KARCH)/src/debug/stacktrace.c \ 64 64 arch/$(KARCH)/src/debug/stacktrace_asm.S \ 65 arch/$(KARCH)/src/mm/km.c \ 65 66 arch/$(KARCH)/src/mm/frame.c \ 66 67 arch/$(KARCH)/src/mm/page.c \ -
kernel/arch/mips32/include/mm/as.h
r852052d rc520034 39 39 40 40 #define KERNEL_ADDRESS_SPACE_START_ARCH UINT32_C(0x80000000) 41 #define KERNEL_ADDRESS_SPACE_END_ARCH UINT32_C(0x 9fffffff)41 #define KERNEL_ADDRESS_SPACE_END_ARCH UINT32_C(0xffffffff) 42 42 #define USER_ADDRESS_SPACE_START_ARCH UINT32_C(0x00000000) 43 43 #define USER_ADDRESS_SPACE_END_ARCH UINT32_C(0x7fffffff) -
kernel/arch/mips32/include/mm/frame.h
r852052d rc520034 41 41 #ifndef __ASM__ 42 42 43 extern void frame_arch_init(void); 43 extern void frame_low_arch_init(void); 44 extern void frame_high_arch_init(void); 44 45 extern void physmem_print(void); 45 46 -
kernel/arch/mips32/src/mm/frame.c
r852052d rc520034 131 131 } 132 132 133 static void frame_add_region(pfn_t start_frame, pfn_t end_frame) 134 { 135 if (end_frame > start_frame) { 136 /* Convert 1M frames to 16K frames */ 137 pfn_t first = ADDR2PFN(start_frame << ZERO_PAGE_WIDTH); 138 pfn_t count = ADDR2PFN((end_frame - start_frame) << ZERO_PAGE_WIDTH); 139 133 static void frame_add_region(pfn_t start_frame, pfn_t end_frame, bool low) 134 { 135 if (end_frame <= start_frame) 136 return; 137 138 uintptr_t base = start_frame << ZERO_PAGE_WIDTH; 139 size_t size = (end_frame - start_frame) << ZERO_PAGE_WIDTH; 140 141 if (!frame_adjust_zone_bounds(low, &base, &size)) 142 return; 143 144 pfn_t first = ADDR2PFN(base); 145 size_t count = SIZE2FRAMES(size); 146 pfn_t conf_frame; 147 148 if (low) { 140 149 /* Interrupt vector frame is blacklisted */ 141 pfn_t conf_frame;142 150 if (first == 0) 143 151 conf_frame = 1; 144 152 else 145 153 conf_frame = first; 146 147 zone_create(first, count, conf_frame, 0); 148 149 if (phys_regions_count < MAX_REGIONS) { 150 phys_regions[phys_regions_count].start = first; 151 phys_regions[phys_regions_count].count = count; 152 phys_regions_count++; 153 } 154 zone_create(first, count, conf_frame, 155 ZONE_AVAILABLE | ZONE_LOWMEM); 156 } else { 157 conf_frame = zone_external_conf_alloc(count); 158 zone_create(first, count, conf_frame, 159 ZONE_AVAILABLE | ZONE_HIGHMEM); 160 } 161 162 163 if (phys_regions_count < MAX_REGIONS) { 164 phys_regions[phys_regions_count].start = first; 165 phys_regions[phys_regions_count].count = count; 166 phys_regions_count++; 154 167 } 155 168 } … … 165 178 * 166 179 */ 167 void frame_ arch_init(void)180 void frame_low_arch_init(void) 168 181 { 169 182 ipl_t ipl = interrupts_disable(); … … 224 237 225 238 if (!avail) { 226 frame_add_region(start_frame, frame );239 frame_add_region(start_frame, frame, true); 227 240 start_frame = frame + 1; 228 241 avail = true; … … 230 243 } 231 244 232 frame_add_region(start_frame, frame );245 frame_add_region(start_frame, frame, true); 233 246 234 247 /* Blacklist interrupt vector frame */ … … 246 259 } 247 260 261 void frame_high_arch_init(void) 262 { 263 } 248 264 249 265 void physmem_print(void) -
kernel/arch/mips32/src/mm/page.c
r852052d rc520034 41 41 { 42 42 page_mapping_operations = &pt_mapping_operations; 43 } 44 45 /** Map device into kernel space 46 * - on mips, all devices are already mapped into kernel space, 47 * translate the physical address to uncached area 48 */ 49 uintptr_t hw_map(uintptr_t physaddr, size_t size) 50 { 51 return physaddr + 0xa0000000; 43 as_switch(NULL, AS_KERNEL); 52 44 } 53 45 -
kernel/arch/mips32/src/mm/tlb.c
r852052d rc520034 95 95 96 96 badvaddr = cp0_badvaddr_read(); 97 98 mutex_lock(&AS->lock);99 97 asid = AS->asid; 100 mutex_unlock(&AS->lock);101 98 102 99 pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate, &pfrc); -
kernel/arch/mips64/Makefile.inc
r852052d rc520034 55 55 arch/$(KARCH)/src/debug/stacktrace.c \ 56 56 arch/$(KARCH)/src/debug/stacktrace_asm.S \ 57 arch/$(KARCH)/src/mm/km.c \ 57 58 arch/$(KARCH)/src/mm/frame.c \ 58 59 arch/$(KARCH)/src/mm/page.c \ -
kernel/arch/mips64/include/mm/frame.h
r852052d rc520034 41 41 #ifndef __ASM__ 42 42 43 extern void frame_arch_init(void); 43 extern void frame_low_arch_init(void); 44 extern void frame_high_arch_init(void); 44 45 extern void physmem_print(void); 45 46 -
kernel/arch/mips64/src/mm/frame.c
r852052d rc520034 123 123 } 124 124 125 static void frame_add_region(pfn_t start_frame, pfn_t end_frame) 126 { 127 if (end_frame > start_frame) { 128 /* Convert 1M frames to 16K frames */ 129 pfn_t first = ADDR2PFN(start_frame << ZERO_PAGE_WIDTH); 130 pfn_t count = ADDR2PFN((end_frame - start_frame) << ZERO_PAGE_WIDTH); 131 125 static void frame_add_region(pfn_t start_frame, pfn_t end_frame, bool low) 126 { 127 if (end_frame <= start_frame) 128 return; 129 130 uintptr_t base = start_frame << ZERO_PAGE_WIDTH; 131 size_t size = (end_frame - start_frame) << ZERO_PAGE_WIDTH; 132 133 if (!frame_adjust_zone_bounds(low, &base, &size)) 134 return; 135 136 pfn_t first = ADDR2PFN(base); 137 size_t count = SIZE2FRAMES(size); 138 pfn_t conf_frame; 139 140 if (low) { 132 141 /* Interrupt vector frame is blacklisted */ 133 pfn_t conf_frame;134 142 if (first == 0) 135 143 conf_frame = 1; 136 144 else 137 145 conf_frame = first; 146 zone_create(first, count, conf_frame, 147 ZONE_AVAILABLE | ZONE_LOWMEM); 148 } else { 149 conf_frame = zone_external_conf_alloc(count); 150 zone_create(first, count, conf_frame, 151 ZONE_AVAILABLE | ZONE_HIGHMEM); 152 } 138 153 139 zone_create(first, count, conf_frame, 0);140 154 141 if (phys_regions_count < MAX_REGIONS) { 142 phys_regions[phys_regions_count].start = first; 143 phys_regions[phys_regions_count].count = count; 144 phys_regions_count++; 145 } 155 if (phys_regions_count < MAX_REGIONS) { 156 phys_regions[phys_regions_count].start = first; 157 phys_regions[phys_regions_count].count = count; 158 phys_regions_count++; 146 159 } 147 160 } … … 156 169 * 157 170 */ 158 void frame_ arch_init(void)171 void frame_low_arch_init(void) 159 172 { 160 173 ipl_t ipl = interrupts_disable(); … … 207 220 208 221 if (!avail) { 209 frame_add_region(start_frame, frame );222 frame_add_region(start_frame, frame, true); 210 223 start_frame = frame + 1; 211 224 avail = true; … … 213 226 } 214 227 215 frame_add_region(start_frame, frame );228 frame_add_region(start_frame, frame, true); 216 229 217 230 /* Blacklist interrupt vector frame */ … … 229 242 } 230 243 244 void frame_high_arch_init(void) 245 { 246 } 247 231 248 void physmem_print(void) 232 249 { -
kernel/arch/mips64/src/mm/page.c
r852052d rc520034 43 43 } 44 44 45 /** Map device into kernel space46 * - on mips, all devices are already mapped into kernel space,47 * translate the physical address to uncached area48 */49 uintptr_t hw_map(uintptr_t physaddr, size_t size)50 {51 return physaddr + 0xffffffffa0000000;52 }53 54 45 /** @} 55 46 */ -
kernel/arch/ppc32/Makefile.inc
r852052d rc520034 52 52 arch/$(KARCH)/src/proc/scheduler.c \ 53 53 arch/$(KARCH)/src/ddi/ddi.c \ 54 arch/$(KARCH)/src/mm/km.c \ 54 55 arch/$(KARCH)/src/mm/as.c \ 55 56 arch/$(KARCH)/src/mm/frame.c \ -
kernel/arch/ppc32/include/mm/frame.h
r852052d rc520034 44 44 #include <trace.h> 45 45 46 extern uintptr_t last_frame;47 48 46 NO_TRACE static inline uint32_t physmem_top(void) 49 47 { … … 58 56 } 59 57 60 extern void frame_arch_init(void); 58 extern void frame_low_arch_init(void); 59 extern void frame_high_arch_init(void); 61 60 extern void physmem_print(void); 62 61 -
kernel/arch/ppc32/src/mm/frame.c
r852052d rc520034 40 40 #include <print.h> 41 41 42 uintptr_t last_frame = 0;43 42 memmap_t memmap; 44 43 … … 54 53 } 55 54 56 void frame_arch_init(void)55 static void frame_common_arch_init(bool low) 57 56 { 58 57 pfn_t minconf = 2; … … 61 60 for (i = 0; i < memmap.cnt; i++) { 62 61 /* To be safe, make the available zone possibly smaller */ 63 uintptr_t new_start= ALIGN_UP((uintptr_t) memmap.zones[i].start,62 uintptr_t base = ALIGN_UP((uintptr_t) memmap.zones[i].start, 64 63 FRAME_SIZE); 65 size_t new_size = ALIGN_DOWN(memmap.zones[i].size -66 ( new_start- ((uintptr_t) memmap.zones[i].start)), FRAME_SIZE);64 size_t size = ALIGN_DOWN(memmap.zones[i].size - 65 (base - ((uintptr_t) memmap.zones[i].start)), FRAME_SIZE); 67 66 68 pfn_t pfn = ADDR2PFN(new_start); 69 size_t count = SIZE2FRAMES(new_size); 70 67 if (!frame_adjust_zone_bounds(low, &base, &size)) 68 return; 69 70 pfn_t pfn = ADDR2PFN(base); 71 size_t count = SIZE2FRAMES(size); 71 72 pfn_t conf; 72 if ((minconf < pfn) || (minconf >= pfn + count)) 73 conf = pfn; 74 else 75 conf = minconf; 76 77 zone_create(pfn, count, conf, 0); 78 79 if (last_frame < ALIGN_UP(new_start + new_size, FRAME_SIZE)) 80 last_frame = ALIGN_UP(new_start + new_size, FRAME_SIZE); 73 74 if (low) { 75 if ((minconf < pfn) || (minconf >= pfn + count)) 76 conf = pfn; 77 else 78 conf = minconf; 79 zone_create(pfn, count, conf, 80 ZONE_AVAILABLE | ZONE_LOWMEM); 81 } else { 82 conf = zone_external_conf_alloc(count); 83 zone_create(pfn, count, conf, 84 ZONE_AVAILABLE | ZONE_HIGHMEM); 85 } 81 86 } 87 88 } 89 90 void frame_low_arch_init(void) 91 { 92 frame_common_arch_init(true); 82 93 83 94 /* First is exception vector, second is 'implementation specific', … … 92 103 } 93 104 105 void frame_high_arch_init(void) 106 { 107 frame_common_arch_init(false); 108 } 109 94 110 /** @} 95 111 */ -
kernel/arch/ppc32/src/mm/page.c
r852052d rc520034 46 46 } 47 47 48 uintptr_t hw_map(uintptr_t physaddr, size_t size)49 {50 if (last_frame + ALIGN_UP(size, PAGE_SIZE) >51 KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))52 panic("Unable to map physical memory %p (%zu bytes).",53 (void *) physaddr, size);54 55 uintptr_t virtaddr = PA2KA(last_frame);56 pfn_t i;57 page_table_lock(AS_KERNEL, true);58 for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)59 page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i),60 physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE);61 page_table_unlock(AS_KERNEL, true);62 63 last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);64 65 return virtaddr;66 }67 68 48 /** @} 69 49 */ -
kernel/arch/sparc64/Makefile.inc
r852052d rc520034 68 68 arch/$(KARCH)/src/fpu_context.c \ 69 69 arch/$(KARCH)/src/dummy.s \ 70 arch/$(KARCH)/src/mm/$(USARCH)/km.c \ 70 71 arch/$(KARCH)/src/mm/$(USARCH)/as.c \ 71 72 arch/$(KARCH)/src/mm/$(USARCH)/frame.c \ -
kernel/arch/sparc64/include/mm/sun4u/frame.h
r852052d rc520034 72 72 typedef union frame_address frame_address_t; 73 73 74 extern uintptr_t last_frame;75 74 extern uintptr_t end_of_identity; 76 75 77 extern void frame_arch_init(void); 76 extern void frame_low_arch_init(void); 77 extern void frame_high_arch_init(void); 78 78 #define physmem_print() 79 79 -
kernel/arch/sparc64/include/mm/sun4v/frame.h
r852052d rc520034 46 46 #include <typedefs.h> 47 47 48 extern uintptr_t last_frame;49 extern void frame_ arch_init(void);48 extern void frame_low_arch_init(void); 49 extern void frame_high_arch_init(void); 50 50 #define physmem_print() 51 51 -
kernel/arch/sparc64/src/mm/page.c
r852052d rc520034 51 51 } 52 52 53 /** Map memory-mapped device into virtual memory.54 *55 * We are currently using identity mapping for mapping device registers.56 *57 * @param physaddr Physical address of the page where the device is58 * located.59 * @param size Size of the device's registers.60 *61 * @return Virtual address of the page where the device is mapped.62 *63 */64 uintptr_t hw_map(uintptr_t physaddr, size_t size)65 {66 return PA2KA(physaddr);67 }68 69 53 /** @} 70 54 */ -
kernel/arch/sparc64/src/mm/sun4u/frame.c
r852052d rc520034 41 41 #include <macros.h> 42 42 43 uintptr_t last_frame = (uintptr_t) NULL;44 45 43 /** Create memory zones according to information stored in memmap. 46 44 * 47 45 * Walk the memory map and create frame zones according to it. 48 46 */ 49 void frame_arch_init(void)47 static void frame_common_arch_init(bool low) 50 48 { 51 if (config.cpu_active == 1) { 52 unsigned int i; 49 unsigned int i; 50 51 for (i = 0; i < memmap.cnt; i++) { 52 uintptr_t base; 53 size_t size; 54 55 /* 56 * The memmap is created by HelenOS boot loader. 57 * It already contains no holes. 58 */ 59 60 /* To be safe, make the available zone possibly smaller */ 61 base = ALIGN_UP((uintptr_t) memmap.zones[i].start, FRAME_SIZE); 62 size = ALIGN_DOWN(memmap.zones[i].size - 63 (base - ((uintptr_t) memmap.zones[i].start)), FRAME_SIZE); 53 64 54 for (i = 0; i < memmap.cnt; i++) { 55 /* To be safe, make the available zone possibly smaller */ 56 uintptr_t new_start = ALIGN_UP((uintptr_t) memmap.zones[i].start, 57 FRAME_SIZE); 58 size_t new_size = ALIGN_DOWN(memmap.zones[i].size - 59 (new_start - ((uintptr_t) memmap.zones[i].start)), FRAME_SIZE); 60 61 /* 62 * The memmap is created by HelenOS boot loader. 63 * It already contains no holes. 64 */ 65 66 pfn_t confdata = ADDR2PFN(new_start); 67 65 if (!frame_adjust_zone_bounds(low, &base, &size)) 66 continue; 67 68 pfn_t confdata; 69 pfn_t pfn = ADDR2PFN(base); 70 size_t count = SIZE2FRAMES(size); 71 72 if (low) { 73 confdata = pfn; 68 74 if (confdata == ADDR2PFN(KA2PA(PFN2ADDR(0)))) 69 75 confdata = ADDR2PFN(KA2PA(PFN2ADDR(2))); 70 76 71 zone_create(ADDR2PFN(new_start), SIZE2FRAMES(new_size), 72 confdata, 0); 73 74 last_frame = max(last_frame, new_start + new_size); 77 zone_create(pfn, count, confdata, 78 ZONE_AVAILABLE | ZONE_LOWMEM); 79 } else { 80 confdata = zone_external_conf_alloc(count); 81 zone_create(pfn, count, confdata, 82 ZONE_AVAILABLE | ZONE_HIGHMEM); 75 83 } 84 } 76 85 77 /* 78 * On sparc64, physical memory can start on a non-zero address. 79 * The generic frame_init() only marks PFN 0 as not free, so we 80 * must mark the physically first frame not free explicitly 81 * here, no matter what is its address. 82 */ 83 frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1); 84 } 86 } 87 88 void frame_low_arch_init(void) 89 { 90 if (config.cpu_active > 1) 91 return; 85 92 86 end_of_identity = PA2KA(last_frame); 93 frame_common_arch_init(true); 94 95 /* 96 * On sparc64, physical memory can start on a non-zero address. 97 * The generic frame_init() only marks PFN 0 as not free, so we 98 * must mark the physically first frame not free explicitly 99 * here, no matter what is its address. 100 */ 101 frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1); 102 103 /* PA2KA will work only on low-memory. */ 104 end_of_identity = PA2KA(config.physmem_end - FRAME_SIZE) + PAGE_SIZE; 105 } 106 107 void frame_high_arch_init(void) 108 { 109 if (config.cpu_active > 1) 110 return; 111 112 frame_common_arch_init(false); 87 113 } 88 114 -
kernel/arch/sparc64/src/mm/sun4u/tlb.c
r852052d rc520034 206 206 pte_t *t; 207 207 208 page_table_lock(AS, true);209 208 t = page_mapping_find(AS, page_16k, true); 210 209 if (t && PTE_EXECUTABLE(t)) { … … 218 217 itsb_pte_copy(t, index); 219 218 #endif 220 page_table_unlock(AS, true);221 219 } else { 222 220 /* … … 224 222 * handler. 225 223 */ 226 page_table_unlock(AS, true);227 224 if (as_page_fault(page_16k, PF_ACCESS_EXEC, istate) == 228 225 AS_PF_FAULT) { … … 250 247 size_t index; 251 248 pte_t *t; 249 as_t *as = AS; 252 250 253 251 page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH; … … 261 259 "Dereferencing NULL pointer."); 262 260 } else if (page_8k >= end_of_identity) { 263 /* 264 * The kernel is accessing the I/O space. 265 * We still do identity mapping for I/O, 266 * but without caching. 267 */ 268 dtlb_insert_mapping(page_8k, KA2PA(page_8k), 269 PAGESIZE_8K, false, false); 270 return; 261 /* Kernel non-identity. */ 262 as = AS_KERNEL; 263 } else { 264 do_fast_data_access_mmu_miss_fault(istate, tag, 265 "Unexpected kernel page fault."); 271 266 } 272 do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected " 273 "kernel page fault."); 274 } 275 276 page_table_lock(AS, true); 277 t = page_mapping_find(AS, page_16k, true); 267 } 268 269 t = page_mapping_find(as, page_16k, true); 278 270 if (t) { 279 271 /* … … 286 278 dtsb_pte_copy(t, index, true); 287 279 #endif 288 page_table_unlock(AS, true);289 280 } else { 290 281 /* 291 282 * Forward the page fault to the address space page fault 292 283 * handler. 293 */ 294 page_table_unlock(AS, true); 284 */ 295 285 if (as_page_fault(page_16k, PF_ACCESS_READ, istate) == 296 286 AS_PF_FAULT) { … … 314 304 size_t index; 315 305 pte_t *t; 306 as_t *as = AS; 316 307 317 308 page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); 318 309 index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */ 319 310 320 page_table_lock(AS, true); 321 t = page_mapping_find(AS, page_16k, true); 311 if (tag.context == ASID_KERNEL) 312 as = AS_KERNEL; 313 314 t = page_mapping_find(as, page_16k, true); 322 315 if (t && PTE_WRITABLE(t)) { 323 316 /* … … 334 327 dtsb_pte_copy(t, index, false); 335 328 #endif 336 page_table_unlock(AS, true);337 329 } else { 338 330 /* … … 340 332 * handler. 341 333 */ 342 page_table_unlock(AS, true);343 334 if (as_page_fault(page_16k, PF_ACCESS_WRITE, istate) == 344 335 AS_PF_FAULT) { -
kernel/arch/sparc64/src/mm/sun4v/frame.c
r852052d rc520034 45 45 * Walk the memory map and create frame zones according to it. 46 46 */ 47 void frame_arch_init(void)47 static void frame_common_arch_init(bool low) 48 48 { 49 if (config.cpu_active == 1) { 50 unsigned int i; 49 unsigned int i; 51 50 52 for (i = 0; i < memmap.cnt; i++) { 53 /* To be safe, make the available zone possibly smaller */ 54 uintptr_t new_start = ALIGN_UP((uintptr_t) memmap.zones[i].start, 55 FRAME_SIZE); 56 size_t new_size = ALIGN_DOWN(memmap.zones[i].size - 57 (new_start - ((uintptr_t) memmap.zones[i].start)), FRAME_SIZE); 58 59 /* 60 * The memmap is created by HelenOS boot loader. 61 * It already contains no holes. 62 */ 63 64 pfn_t confdata = ADDR2PFN(new_start); 65 51 for (i = 0; i < memmap.cnt; i++) { 52 uintptr_t base; 53 size_t size; 54 55 /* 56 * The memmap is created by HelenOS boot loader. 57 * It already contains no holes. 58 */ 59 60 /* To be safe, make the available zone possibly smaller */ 61 base = ALIGN_UP((uintptr_t) memmap.zones[i].start, FRAME_SIZE); 62 size = ALIGN_DOWN(memmap.zones[i].size - 63 (base - ((uintptr_t) memmap.zones[i].start)), FRAME_SIZE); 64 65 if (!frame_adjust_zone_bounds(low, &base, &size)) 66 continue; 67 68 pfn_t confdata; 69 pfn_t pfn = ADDR2PFN(base); 70 size_t count = SIZE2FRAMES(size); 71 72 if (low) { 73 confdata = pfn; 66 74 if (confdata == ADDR2PFN(KA2PA(PFN2ADDR(0)))) 67 75 confdata = ADDR2PFN(KA2PA(PFN2ADDR(2))); 68 76 69 zone_create(ADDR2PFN(new_start), SIZE2FRAMES(new_size), 70 confdata, 0); 77 zone_create(pfn, count, confdata, 78 ZONE_AVAILABLE | ZONE_LOWMEM); 79 } else { 80 confdata = zone_external_conf_alloc(count); 81 zone_create(pfn, count, confdata, 82 ZONE_AVAILABLE | ZONE_HIGHMEM); 71 83 } 84 } 72 85 73 /* 74 * On sparc64, physical memory can start on a non-zero address. 75 * The generic frame_init() only marks PFN 0 as not free, so we 76 * must mark the physically first frame not free explicitly 77 * here, no matter what is its address. 78 */ 79 frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1); 80 } 86 } 87 88 89 void frame_low_arch_init(void) 90 { 91 if (config.cpu_active > 1) 92 return; 93 94 frame_common_arch_init(true); 95 96 /* 97 * On sparc64, physical memory can start on a non-zero address. 98 * The generic frame_init() only marks PFN 0 as not free, so we 99 * must mark the physically first frame not free explicitly 100 * here, no matter what is its address. 101 */ 102 frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1); 103 } 104 105 void frame_high_arch_init(void) 106 { 107 if (config.cpu_active > 1) 108 return; 109 110 frame_common_arch_init(false); 81 111 } 82 112 -
kernel/arch/sparc64/src/mm/sun4v/tlb.c
r852052d rc520034 218 218 pte_t *t; 219 219 220 page_table_lock(AS, true);221 220 t = page_mapping_find(AS, va, true); 222 221 … … 231 230 itsb_pte_copy(t); 232 231 #endif 233 page_table_unlock(AS, true);234 232 } else { 235 233 /* … … 237 235 * handler. 238 236 */ 239 page_table_unlock(AS, true);240 237 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 241 238 do_fast_instruction_access_mmu_miss_fault(istate, … … 274 271 } 275 272 276 page_table_lock(AS, true);277 273 t = page_mapping_find(AS, va, true); 278 274 if (t) { … … 286 282 dtsb_pte_copy(t, true); 287 283 #endif 288 page_table_unlock(AS, true);289 284 } else { 290 285 /* … … 292 287 * handler. 293 288 */ 294 page_table_unlock(AS, true);295 289 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 296 290 do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, … … 316 310 uint16_t ctx = DMISS_CONTEXT(page_and_ctx); 317 311 318 page_table_lock(AS, true);319 312 t = page_mapping_find(AS, va, true); 320 313 if (t && PTE_WRITABLE(t)) { … … 331 324 dtsb_pte_copy(t, false); 332 325 #endif 333 page_table_unlock(AS, true);334 326 } else { 335 327 /* … … 337 329 * handler. 338 330 */ 339 page_table_unlock(AS, true);340 331 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { 341 332 do_fast_data_access_protection_fault(istate, page_and_ctx, -
kernel/genarch/include/mm/page_ht.h
r852052d rc520034 43 43 #include <mm/as.h> 44 44 #include <mm/page.h> 45 #include <mm/slab.h> 45 46 #include <synch/mutex.h> 46 47 #include <adt/hash_table.h> … … 64 65 extern page_mapping_operations_t ht_mapping_operations; 65 66 67 extern slab_cache_t *pte_cache; 66 68 extern mutex_t page_ht_lock; 67 69 extern hash_table_t page_ht; -
kernel/genarch/src/mm/as_ht.c
r852052d rc520034 41 41 #include <mm/as.h> 42 42 #include <mm/frame.h> 43 #include <mm/slab.h> 43 44 #include <typedefs.h> 44 45 #include <memstr.h> … … 77 78 hash_table_create(&page_ht, PAGE_HT_ENTRIES, 2, &ht_operations); 78 79 mutex_initialize(&page_ht_lock, MUTEX_PASSIVE); 80 pte_cache = slab_cache_create("pte_cache", sizeof(pte_t), 0, NULL, NULL, 81 SLAB_CACHE_MAGDEFERRED); 79 82 } 80 83 -
kernel/genarch/src/mm/as_pt.c
r852052d rc520034 73 73 pte_t *ptl0_create(unsigned int flags) 74 74 { 75 pte_t *dst_ptl0 = (pte_t *) frame_alloc(PTL0_SIZE, FRAME_KA); 75 pte_t *dst_ptl0 = (pte_t *) frame_alloc(PTL0_SIZE, 76 FRAME_LOWMEM | FRAME_KA); 76 77 size_t table_size = FRAME_SIZE << PTL0_SIZE; 77 78 … … 89 90 (pte_t *) PA2KA((uintptr_t) AS_KERNEL->genarch.page_table); 90 91 91 uintptr_t src = 92 (uintptr_t)&src_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)];93 uintptr_t dst = 94 (uintptr_t)&dst_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)];92 uintptr_t src = (uintptr_t) 93 &src_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)]; 94 uintptr_t dst = (uintptr_t) 95 &dst_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)]; 95 96 96 97 memsetb(dst_ptl0, table_size, 0); -
kernel/genarch/src/mm/page_ht.c
r852052d rc520034 59 59 static void ht_mapping_remove(as_t *, uintptr_t); 60 60 static pte_t *ht_mapping_find(as_t *, uintptr_t, bool); 61 static void ht_mapping_make_global(uintptr_t, size_t); 62 63 slab_cache_t *pte_cache = NULL; 61 64 62 65 /** … … 86 89 .mapping_insert = ht_mapping_insert, 87 90 .mapping_remove = ht_mapping_remove, 88 .mapping_find = ht_mapping_find 91 .mapping_find = ht_mapping_find, 92 .mapping_make_global = ht_mapping_make_global 89 93 }; 90 94 … … 163 167 pte_t *pte = hash_table_get_instance(item, pte_t, link); 164 168 165 free(pte);169 slab_free(pte_cache, pte); 166 170 } 167 171 … … 188 192 189 193 if (!hash_table_find(&page_ht, key)) { 190 pte_t *pte = (pte_t *) malloc(sizeof(pte_t),FRAME_ATOMIC);194 pte_t *pte = slab_alloc(pte_cache, FRAME_LOWMEM | FRAME_ATOMIC); 191 195 ASSERT(pte != NULL); 192 196 … … 260 264 } 261 265 266 void ht_mapping_make_global(uintptr_t base, size_t size) 267 { 268 /* nothing to do */ 269 } 270 262 271 /** @} 263 272 */ -
kernel/genarch/src/mm/page_pt.c
r852052d rc520034 39 39 #include <mm/page.h> 40 40 #include <mm/frame.h> 41 #include <mm/km.h> 41 42 #include <mm/as.h> 42 43 #include <arch/mm/page.h> … … 45 46 #include <arch/asm.h> 46 47 #include <memstr.h> 48 #include <align.h> 49 #include <macros.h> 47 50 48 51 static void pt_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int); 49 52 static void pt_mapping_remove(as_t *, uintptr_t); 50 53 static pte_t *pt_mapping_find(as_t *, uintptr_t, bool); 54 static void pt_mapping_make_global(uintptr_t, size_t); 51 55 52 56 page_mapping_operations_t pt_mapping_operations = { 53 57 .mapping_insert = pt_mapping_insert, 54 58 .mapping_remove = pt_mapping_remove, 55 .mapping_find = pt_mapping_find 59 .mapping_find = pt_mapping_find, 60 .mapping_make_global = pt_mapping_make_global 56 61 }; 57 62 … … 75 80 76 81 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) { 77 pte_t *newpt = (pte_t *) frame_alloc(PTL1_SIZE, FRAME_KA); 82 pte_t *newpt = (pte_t *) frame_alloc(PTL1_SIZE, 83 FRAME_LOWMEM | FRAME_KA); 78 84 memsetb(newpt, FRAME_SIZE << PTL1_SIZE, 0); 79 85 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt)); 80 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE); 86 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page), 87 PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | 88 PAGE_WRITE); 81 89 } 82 90 … … 84 92 85 93 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) { 86 pte_t *newpt = (pte_t *) frame_alloc(PTL2_SIZE, FRAME_KA); 94 pte_t *newpt = (pte_t *) frame_alloc(PTL2_SIZE, 95 FRAME_LOWMEM | FRAME_KA); 87 96 memsetb(newpt, FRAME_SIZE << PTL2_SIZE, 0); 88 97 SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt)); 89 SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE); 98 SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page), 99 PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | 100 PAGE_WRITE); 90 101 } 91 102 … … 93 104 94 105 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) { 95 pte_t *newpt = (pte_t *) frame_alloc(PTL3_SIZE, FRAME_KA); 106 pte_t *newpt = (pte_t *) frame_alloc(PTL3_SIZE, 107 FRAME_LOWMEM | FRAME_KA); 96 108 memsetb(newpt, FRAME_SIZE << PTL3_SIZE, 0); 97 109 SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt)); 98 SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE); 110 SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page), 111 PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | 112 PAGE_WRITE); 99 113 } 100 114 … … 123 137 /* 124 138 * First, remove the mapping, if it exists. 125 *126 139 */ 127 140 … … 140 153 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page))); 141 154 142 /* Destroy the mapping. Setting to PAGE_NOT_PRESENT is not sufficient. */ 155 /* 156 * Destroy the mapping. 157 * Setting to PAGE_NOT_PRESENT is not sufficient. 158 */ 143 159 memsetb(&ptl3[PTL3_INDEX(page)], sizeof(pte_t), 0); 144 160 145 161 /* 146 * Second, free all empty tables along the way from PTL3 down to PTL0 .147 * 162 * Second, free all empty tables along the way from PTL3 down to PTL0 163 * except those needed for sharing the kernel non-identity mappings. 148 164 */ 149 165 … … 162 178 /* 163 179 * PTL3 is empty. 164 * Release the frame and remove PTL3 pointer from preceding table. 165 * 166 */ 167 frame_free(KA2PA((uintptr_t) ptl3)); 180 * Release the frame and remove PTL3 pointer from the parent 181 * table. 182 */ 168 183 #if (PTL2_ENTRIES != 0) 169 184 memsetb(&ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0); … … 171 186 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0); 172 187 #else 188 if (km_is_non_identity(page)) 189 return; 190 173 191 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0); 174 192 #endif 193 frame_free(KA2PA((uintptr_t) ptl3)); 175 194 } else { 176 195 /* … … 195 214 /* 196 215 * PTL2 is empty. 197 * Release the frame and remove PTL2 pointer from preceding table. 198 * 199 */ 200 frame_free(KA2PA((uintptr_t) ptl2)); 216 * Release the frame and remove PTL2 pointer from the parent 217 * table. 218 */ 201 219 #if (PTL1_ENTRIES != 0) 202 220 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0); 203 221 #else 222 if (km_is_non_identity(page)) 223 return; 224 204 225 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0); 205 226 #endif 227 frame_free(KA2PA((uintptr_t) ptl2)); 206 228 } else { 207 229 /* … … 227 249 /* 228 250 * PTL1 is empty. 229 * Release the frame and remove PTL1 pointer from preceding table. 230 * 231 */ 251 * Release the frame and remove PTL1 pointer from the parent 252 * table. 253 */ 254 if (km_is_non_identity(page)) 255 return; 256 257 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0); 232 258 frame_free(KA2PA((uintptr_t) ptl1)); 233 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);234 259 } 235 260 #endif /* PTL1_ENTRIES != 0 */ … … 267 292 } 268 293 294 /** Make the mappings in the given range global accross all address spaces. 295 * 296 * All PTL0 entries in the given range will be mapped to a next level page 297 * table. The next level page table will be allocated and cleared. 298 * 299 * pt_mapping_remove() will never deallocate these page tables even when there 300 * are no PTEs in them. 301 * 302 * @param as Address space. 303 * @param base Base address corresponding to the first PTL0 entry that will be 304 * altered by this function. 305 * @param size Size in bytes defining the range of PTL0 entries that will be 306 * altered by this function. 307 */ 308 void pt_mapping_make_global(uintptr_t base, size_t size) 309 { 310 uintptr_t ptl0 = PA2KA((uintptr_t) AS_KERNEL->genarch.page_table); 311 uintptr_t ptl0step = (((uintptr_t) -1) / PTL0_ENTRIES) + 1; 312 size_t order; 313 uintptr_t addr; 314 315 #if (PTL1_ENTRIES != 0) 316 order = PTL1_SIZE; 317 #elif (PTL2_ENTRIES != 0) 318 order = PTL2_SIZE; 319 #else 320 order = PTL3_SIZE; 321 #endif 322 323 ASSERT(ispwr2(ptl0step)); 324 325 for (addr = ALIGN_DOWN(base, ptl0step); addr < base + size; 326 addr += ptl0step) { 327 uintptr_t l1; 328 329 l1 = (uintptr_t) frame_alloc(order, FRAME_KA | FRAME_LOWMEM); 330 memsetb((void *) l1, FRAME_SIZE << order, 0); 331 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(addr), KA2PA(l1)); 332 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(addr), 333 PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | 334 PAGE_WRITE); 335 } 336 } 337 269 338 /** @} 270 339 */ -
kernel/generic/include/align.h
r852052d rc520034 42 42 * 43 43 * @param s Address or size to be aligned. 44 * @param a Size of alignment, must be power of 2.44 * @param a Size of alignment, must be a power of 2. 45 45 */ 46 46 #define ALIGN_DOWN(s, a) ((s) & ~((a) - 1)) … … 50 50 * 51 51 * @param s Address or size to be aligned. 52 * @param a Size of alignment, must be power of 2.52 * @param a Size of alignment, must be a power of 2. 53 53 */ 54 54 #define ALIGN_UP(s, a) (((s) + ((a) - 1)) & ~((a) - 1)) 55 56 /** Check alignment. 57 * 58 * @param s Address or size to be checked for alignment. 59 * @param a Size of alignment, must be a power of 2. 60 */ 61 #define IS_ALIGNED(s, a) (ALIGN_UP((s), (a)) == (s)) 55 62 56 63 #endif -
kernel/generic/include/config.h
r852052d rc520034 74 74 75 75 typedef struct { 76 unsigned int cpu_count; /**< Number of processors detected. */ 77 volatile size_t cpu_active; /**< Number of processors that are up and running. */ 76 /** Number of processors detected. */ 77 unsigned int cpu_count; 78 /** Number of processors that are up and running. */ 79 volatile size_t cpu_active; 78 80 79 81 uintptr_t base; 80 size_t kernel_size; /**< Size of memory in bytes taken by kernel and stack */ 82 /** Size of memory in bytes taken by kernel and stack. */ 83 size_t kernel_size; 81 84 82 uintptr_t stack_base; /**< Base adddress of initial stack */ 83 size_t stack_size; /**< Size of initial stack */ 85 /** Base adddress of initial stack. */ 86 uintptr_t stack_base; 87 /** Size of initial stack. */ 88 size_t stack_size; 89 90 bool identity_configured; 91 /** Base address of the kernel identity mapped memory. */ 92 uintptr_t identity_base; 93 /** Size of the kernel identity mapped memory. */ 94 size_t identity_size; 95 96 bool non_identity_configured; 97 98 /** End of physical memory. */ 99 uint64_t physmem_end; 84 100 } config_t; 85 101 -
kernel/generic/include/macros.h
r852052d rc520034 77 77 #endif /* __ASM__ */ 78 78 79 #define ispwr2(x) (((x) & ((x) - 1)) == 0) 80 79 81 #define isdigit(d) (((d) >= '0') && ((d) <= '9')) 80 82 #define islower(c) (((c) >= 'a') && ((c) <= 'z')) -
kernel/generic/include/mm/frame.h
r852052d rc520034 50 50 typedef uint8_t frame_flags_t; 51 51 52 #define FRAME_NONE 0x0 52 53 /** Convert the frame address to kernel VA. */ 53 54 #define FRAME_KA 0x1 … … 58 59 /** Do not reserve / unreserve memory. */ 59 60 #define FRAME_NO_RESERVE 0x8 61 /** Allocate a frame which can be identity-mapped. */ 62 #define FRAME_LOWMEM 0x10 63 /** Allocate a frame which cannot be identity-mapped. */ 64 #define FRAME_HIGHMEM 0x20 60 65 61 66 typedef uint8_t zone_flags_t; 62 67 68 #define ZONE_NONE 0x0 63 69 /** Available zone (free for allocation) */ 64 #define ZONE_AVAILABLE 0x 070 #define ZONE_AVAILABLE 0x1 65 71 /** Zone is reserved (not available for allocation) */ 66 #define ZONE_RESERVED 0x 872 #define ZONE_RESERVED 0x2 67 73 /** Zone is used by firmware (not available for allocation) */ 68 #define ZONE_FIRMWARE 0x10 74 #define ZONE_FIRMWARE 0x4 75 /** Zone contains memory that can be identity-mapped */ 76 #define ZONE_LOWMEM 0x8 77 /** Zone contains memory that cannot be identity-mapped */ 78 #define ZONE_HIGHMEM 0x10 69 79 70 /** Currently there is no equivalent zone flags 71 for frame flags */ 72 #define FRAME_TO_ZONE_FLAGS(frame_flags) 0 80 /** Mask of zone bits that must be matched exactly. */ 81 #define ZONE_EF_MASK 0x7 82 83 #define FRAME_TO_ZONE_FLAGS(ff) \ 84 ((((ff) & FRAME_LOWMEM) ? ZONE_LOWMEM : \ 85 (((ff) & FRAME_HIGHMEM) ? ZONE_HIGHMEM : ZONE_NONE)) | \ 86 (ZONE_AVAILABLE | ZONE_LOWMEM /* | ZONE_HIGHMEM */)) 87 88 #define ZONE_FLAGS_MATCH(zf, f) \ 89 (((((zf) & ZONE_EF_MASK)) == ((f) & ZONE_EF_MASK)) && \ 90 (((zf) & ~ZONE_EF_MASK) & (f))) 73 91 74 92 typedef struct { 75 93 size_t refcount; /**< Tracking of shared frames */ 76 uint8_t buddy_order; /**< Buddy system block order */77 94 link_t buddy_link; /**< Link to the next free block inside 78 95 one order */ 79 96 void *parent; /**< If allocated by slab, this points there */ 97 uint8_t buddy_order; /**< Buddy system block order */ 80 98 } frame_t; 81 99 … … 129 147 } 130 148 131 NO_TRACE static inline bool zone_flags_available(zone_flags_t flags)132 {133 return ((flags & (ZONE_RESERVED | ZONE_FIRMWARE)) == 0);134 }135 136 149 #define IS_BUDDY_ORDER_OK(index, order) \ 137 150 ((~(((sysarg_t) -1) << (order)) & (index)) == 0) … … 146 159 147 160 extern void frame_init(void); 161 extern bool frame_adjust_zone_bounds(bool, uintptr_t *, size_t *); 148 162 extern void *frame_alloc_generic(uint8_t, frame_flags_t, size_t *); 149 163 extern void *frame_alloc(uint8_t, frame_flags_t); … … 161 175 extern void frame_mark_unavailable(pfn_t, size_t); 162 176 extern size_t zone_conf_size(size_t); 177 extern pfn_t zone_external_conf_alloc(size_t); 163 178 extern bool zone_merge(size_t, size_t); 164 179 extern void zone_merge_all(void); -
kernel/generic/include/mm/page.h
r852052d rc520034 49 49 void (* mapping_remove)(as_t *, uintptr_t); 50 50 pte_t *(* mapping_find)(as_t *, uintptr_t, bool); 51 void (* mapping_make_global)(uintptr_t, size_t); 51 52 } page_mapping_operations_t; 52 53 … … 60 61 extern void page_mapping_remove(as_t *, uintptr_t); 61 62 extern pte_t *page_mapping_find(as_t *, uintptr_t, bool); 63 extern void page_mapping_make_global(uintptr_t, size_t); 62 64 extern pte_t *page_table_create(unsigned int); 63 65 extern void page_table_destroy(pte_t *); -
kernel/generic/src/cpu/cpu.c
r852052d rc520034 74 74 for (i = 0; i < config.cpu_count; i++) { 75 75 cpus[i].stack = (uint8_t *) frame_alloc(STACK_FRAMES, 76 FRAME_ KA | FRAME_ATOMIC);76 FRAME_LOWMEM | FRAME_KA | FRAME_ATOMIC); 77 77 cpus[i].id = i; 78 78 -
kernel/generic/src/main/main.c
r852052d rc520034 68 68 #include <mm/page.h> 69 69 #include <genarch/mm/page_pt.h> 70 #include <mm/km.h> 70 71 #include <mm/tlb.h> 71 72 #include <mm/as.h> … … 86 87 #include <sysinfo/sysinfo.h> 87 88 #include <sysinfo/stats.h> 89 #include <lib/ra.h> 88 90 89 91 /** Global configuration structure. */ 90 config_t config; 92 config_t config = { 93 .identity_configured = false, 94 .non_identity_configured = false, 95 .physmem_end = 0 96 }; 91 97 92 98 /** Initial user-space tasks */ … … 205 211 */ 206 212 arch_pre_mm_init(); 213 km_identity_init(); 207 214 frame_init(); 208 209 /* Initialize at least 1 memory segment big enough for slab to work. */210 215 slab_cache_init(); 216 ra_init(); 211 217 sysinfo_init(); 212 218 btree_init(); … … 214 220 page_init(); 215 221 tlb_init(); 222 km_non_identity_init(); 216 223 ddi_init(); 217 224 arch_post_mm_init(); -
kernel/generic/src/mm/frame.c
r852052d rc520034 240 240 NO_TRACE static bool zone_can_alloc(zone_t *zone, uint8_t order) 241 241 { 242 return ( zone_flags_available(zone->flags)243 &&buddy_system_can_alloc(zone->buddy_system, order));242 return ((zone->flags & ZONE_AVAILABLE) && 243 buddy_system_can_alloc(zone->buddy_system, order)); 244 244 } 245 245 … … 265 265 * Check whether the zone meets the search criteria. 266 266 */ 267 if ( (zones.info[i].flags & flags) == flags) {267 if (ZONE_FLAGS_MATCH(zones.info[i].flags, flags)) { 268 268 /* 269 269 * Check if the zone has 2^order frames area available. … … 460 460 NO_TRACE static pfn_t zone_frame_alloc(zone_t *zone, uint8_t order) 461 461 { 462 ASSERT(zone _flags_available(zone->flags));462 ASSERT(zone->flags & ZONE_AVAILABLE); 463 463 464 464 /* Allocate frames from zone buddy system */ … … 490 490 NO_TRACE static size_t zone_frame_free(zone_t *zone, size_t frame_idx) 491 491 { 492 ASSERT(zone _flags_available(zone->flags));492 ASSERT(zone->flags & ZONE_AVAILABLE); 493 493 494 494 frame_t *frame = &zone->frames[frame_idx]; … … 518 518 NO_TRACE static void zone_mark_unavailable(zone_t *zone, size_t frame_idx) 519 519 { 520 ASSERT(zone _flags_available(zone->flags));520 ASSERT(zone->flags & ZONE_AVAILABLE); 521 521 522 522 frame_t *frame = zone_get_frame(zone, frame_idx); … … 549 549 buddy_system_t *buddy) 550 550 { 551 ASSERT(zone _flags_available(zones.info[z1].flags));552 ASSERT(zone _flags_available(zones.info[z2].flags));551 ASSERT(zones.info[z1].flags & ZONE_AVAILABLE); 552 ASSERT(zones.info[z2].flags & ZONE_AVAILABLE); 553 553 ASSERT(zones.info[z1].flags == zones.info[z2].flags); 554 554 ASSERT(zones.info[z1].base < zones.info[z2].base); … … 645 645 NO_TRACE static void return_config_frames(size_t znum, pfn_t pfn, size_t count) 646 646 { 647 ASSERT(zone _flags_available(zones.info[znum].flags));647 ASSERT(zones.info[znum].flags & ZONE_AVAILABLE); 648 648 649 649 size_t cframes = SIZE2FRAMES(zone_conf_size(count)); … … 681 681 size_t count) 682 682 { 683 ASSERT(zone _flags_available(zones.info[znum].flags));683 ASSERT(zones.info[znum].flags & ZONE_AVAILABLE); 684 684 ASSERT(frame_idx + count < zones.info[znum].count); 685 685 … … 723 723 * set of flags 724 724 */ 725 if ((z1 >= zones.count) || (z2 >= zones.count) 726 || (z2 - z1 != 1) 727 || (!zone_flags_available(zones.info[z1].flags)) 728 || (!zone_flags_available(zones.info[z2].flags)) 729 || (zones.info[z1].flags != zones.info[z2].flags)) { 725 if ((z1 >= zones.count) || (z2 >= zones.count) || (z2 - z1 != 1) || 726 (zones.info[z1].flags != zones.info[z2].flags)) { 730 727 ret = false; 731 728 goto errout; … … 828 825 zone->buddy_system = buddy; 829 826 830 if ( zone_flags_available(flags)) {827 if (flags & ZONE_AVAILABLE) { 831 828 /* 832 829 * Compute order for buddy system and initialize … … 865 862 { 866 863 return (count * sizeof(frame_t) + buddy_conf_size(fnzb(count))); 864 } 865 866 /** Allocate external configuration frames from low memory. */ 867 pfn_t zone_external_conf_alloc(size_t count) 868 { 869 size_t size = zone_conf_size(count); 870 size_t order = ispwr2(size) ? fnzb(size) : (fnzb(size) + 1); 871 872 return ADDR2PFN((uintptr_t) frame_alloc(order - FRAME_WIDTH, FRAME_LOWMEM)); 867 873 } 868 874 … … 888 894 irq_spinlock_lock(&zones.lock, true); 889 895 890 if ( zone_flags_available(flags)) { /* Create available zone */896 if (flags & ZONE_AVAILABLE) { /* Create available zone */ 891 897 /* Theoretically we could have NULL here, practically make sure 892 898 * nobody tries to do that. If some platform requires, remove … … 894 900 */ 895 901 ASSERT(confframe != ADDR2PFN((uintptr_t ) NULL)); 902 903 /* Update the known end of physical memory. */ 904 config.physmem_end = max(config.physmem_end, PFN2ADDR(start + count)); 896 905 897 906 /* If confframe is supposed to be inside our zone, then make sure … … 1232 1241 1233 1242 /* Tell the architecture to create some memory */ 1234 frame_ arch_init();1243 frame_low_arch_init(); 1235 1244 if (config.cpu_active == 1) { 1236 1245 frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)), … … 1255 1264 frame_mark_unavailable(0, 1); 1256 1265 } 1266 frame_high_arch_init(); 1267 } 1268 1269 /** Adjust bounds of physical memory region according to low/high memory split. 1270 * 1271 * @param low[in] If true, the adujstment is performed to make the region 1272 * fit in the low memory. Otherwise the adjustment is 1273 * performed to make the region fit in the high memory. 1274 * @param basep[inout] Pointer to a variable which contains the region's base 1275 * address and which may receive the adjusted base address. 1276 * @param sizep[inout] Pointer to a variable which contains the region's size 1277 * and which may receive the adjusted size. 1278 * @retun True if the region still exists even after the 1279 * adjustment, false otherwise. 1280 */ 1281 bool frame_adjust_zone_bounds(bool low, uintptr_t *basep, size_t *sizep) 1282 { 1283 uintptr_t limit = config.identity_size; 1284 1285 if (low) { 1286 if (*basep > limit) 1287 return false; 1288 if (*basep + *sizep > limit) 1289 *sizep = limit - *basep; 1290 } else { 1291 if (*basep + *sizep <= limit) 1292 return false; 1293 if (*basep <= limit) { 1294 *sizep -= limit - *basep; 1295 *basep = limit; 1296 } 1297 } 1298 return true; 1257 1299 } 1258 1300 … … 1293 1335 *total += (uint64_t) FRAMES2SIZE(zones.info[i].count); 1294 1336 1295 if (zone _flags_available(zones.info[i].flags)) {1337 if (zones.info[i].flags & ZONE_AVAILABLE) { 1296 1338 *busy += (uint64_t) FRAMES2SIZE(zones.info[i].busy_count); 1297 1339 *free += (uint64_t) FRAMES2SIZE(zones.info[i].free_count); … … 1344 1386 irq_spinlock_unlock(&zones.lock, true); 1345 1387 1346 bool available = zone_flags_available(flags);1388 bool available = ((flags & ZONE_AVAILABLE) != 0); 1347 1389 1348 1390 printf("%-4zu", i); … … 1356 1398 #endif 1357 1399 1358 printf(" %12zu %c%c%c ", count, 1359 available ? 'A' : ' ', 1360 (flags & ZONE_RESERVED) ? 'R' : ' ', 1361 (flags & ZONE_FIRMWARE) ? 'F' : ' '); 1400 printf(" %12zu %c%c%c%c%c ", count, 1401 available ? 'A' : '-', 1402 (flags & ZONE_RESERVED) ? 'R' : '-', 1403 (flags & ZONE_FIRMWARE) ? 'F' : '-', 1404 (flags & ZONE_LOWMEM) ? 'L' : '-', 1405 (flags & ZONE_HIGHMEM) ? 'H' : '-'); 1362 1406 1363 1407 if (available) … … 1401 1445 irq_spinlock_unlock(&zones.lock, true); 1402 1446 1403 bool available = zone_flags_available(flags);1447 bool available = ((flags & ZONE_AVAILABLE) != 0); 1404 1448 1405 1449 uint64_t size; … … 1411 1455 printf("Zone size: %zu frames (%" PRIu64 " %s)\n", count, 1412 1456 size, size_suffix); 1413 printf("Zone flags: %c%c%c\n", 1414 available ? 'A' : ' ', 1415 (flags & ZONE_RESERVED) ? 'R' : ' ', 1416 (flags & ZONE_FIRMWARE) ? 'F' : ' '); 1457 printf("Zone flags: %c%c%c%c%c\n", 1458 available ? 'A' : '-', 1459 (flags & ZONE_RESERVED) ? 'R' : '-', 1460 (flags & ZONE_FIRMWARE) ? 'F' : '-', 1461 (flags & ZONE_LOWMEM) ? 'L' : '-', 1462 (flags & ZONE_HIGHMEM) ? 'H' : '-'); 1417 1463 1418 1464 if (available) { -
kernel/generic/src/mm/page.c
r852052d rc520034 65 65 #include <arch/mm/asid.h> 66 66 #include <mm/as.h> 67 #include <mm/km.h> 67 68 #include <mm/frame.h> 68 69 #include <arch/barrier.h> … … 75 76 #include <errno.h> 76 77 #include <align.h> 78 #include <macros.h> 79 #include <bitops.h> 77 80 78 81 /** Virtual operations for page subsystem. */ … … 177 180 } 178 181 182 /** Make the mapping shared by all page tables (not address spaces). 183 * 184 * @param base Starting virtual address of the range that is made global. 185 * @param size Size of the address range that is made global. 186 */ 187 void page_mapping_make_global(uintptr_t base, size_t size) 188 { 189 ASSERT(page_mapping_operations); 190 ASSERT(page_mapping_operations->mapping_make_global); 191 192 return page_mapping_operations->mapping_make_global(base, size); 193 } 194 195 uintptr_t hw_map(uintptr_t physaddr, size_t size) 196 { 197 uintptr_t virtaddr; 198 size_t asize; 199 size_t align; 200 pfn_t i; 201 202 asize = ALIGN_UP(size, PAGE_SIZE); 203 align = ispwr2(size) ? size : (1U << (fnzb(size) + 1)); 204 virtaddr = km_page_alloc(asize, align); 205 206 page_table_lock(AS_KERNEL, true); 207 for (i = 0; i < ADDR2PFN(asize); i++) { 208 uintptr_t addr = PFN2ADDR(i); 209 page_mapping_insert(AS_KERNEL, virtaddr + addr, physaddr + addr, 210 PAGE_NOT_CACHEABLE | PAGE_WRITE); 211 } 212 page_table_unlock(AS_KERNEL, true); 213 214 return virtaddr; 215 } 216 179 217 int page_find_mapping(uintptr_t virt, void **phys) 180 218 { -
kernel/generic/src/mm/reserve.c
r852052d rc520034 42 42 #include <typedefs.h> 43 43 #include <arch/types.h> 44 #include <debug.h> 45 46 static bool reserve_initialized = false; 44 47 45 48 IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(reserve_lock, "reserve_lock"); … … 54 57 { 55 58 reserve = frame_total_free_get(); 59 reserve_initialized = true; 56 60 } 57 61 … … 67 71 { 68 72 bool reserved = false; 73 74 ASSERT(reserve_initialized); 69 75 70 76 irq_spinlock_lock(&reserve_lock, true); … … 111 117 void reserve_force_alloc(size_t size) 112 118 { 119 if (!reserve_initialized) 120 return; 121 113 122 irq_spinlock_lock(&reserve_lock, true); 114 123 reserve -= size; … … 122 131 void reserve_free(size_t size) 123 132 { 133 if (!reserve_initialized) 134 return; 135 124 136 irq_spinlock_lock(&reserve_lock, true); 125 137 reserve += size; -
kernel/generic/src/proc/thread.c
r852052d rc520034 173 173 #endif /* CONFIG_FPU */ 174 174 175 /* 176 * Allocate the kernel stack from the low-memory to prevent an infinite 177 * nesting of TLB-misses when accessing the stack from the part of the 178 * TLB-miss handler written in C. 179 * 180 * Note that low-memory is safe to be used for the stack as it will be 181 * covered by the kernel identity mapping, which guarantees not to 182 * nest TLB-misses infinitely (either via some hardware mechanism or 183 * by the construciton of the assembly-language part of the TLB-miss 184 * handler). 185 * 186 * This restriction can be lifted once each architecture provides 187 * a similar guarantee, for example by locking the kernel stack 188 * in the TLB whenever it is allocated from the high-memory and the 189 * thread is being scheduled to run. 190 */ 191 kmflags |= FRAME_LOWMEM; 192 kmflags &= ~FRAME_HIGHMEM; 193 175 194 thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); 176 195 if (!thread->kstack) {
Note:
See TracChangeset
for help on using the changeset viewer.