- Timestamp:
- 2013-03-10T14:56:21Z (12 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 05bab88
- Parents:
- ea906c29 (diff), 2277e03 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel
- Files:
-
- 28 added
- 4 deleted
- 67 edited
- 2 moved
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/amd64/Makefile.inc
rea906c29 r850235d 33 33 34 34 FPU_NO_CFLAGS = -mno-sse -mno-sse2 35 36 # 37 # FIXME: 38 # 39 # The -fno-optimize-sibling-calls should be removed as soon as a bug 40 # in GCC concerning the "large" memory model and tail call optimization 41 # is fixed. 42 # 43 # When GCC generates a code for tail call, instead of generating .. 44 # 45 # jmp *fnc 46 # 47 # it generates an assembly code with an illegal immediate prefix: 48 # 49 # jmp *$fnc 50 # 51 # See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=48385 for reference. 52 # 53 54 CMN1 = -m64 -mcmodel=large -mno-red-zone -fno-unwind-tables -fno-omit-frame-pointer -fno-optimize-sibling-calls 35 CMN1 = -m64 -mcmodel=large -mno-red-zone -fno-unwind-tables -fno-omit-frame-pointer 55 36 GCC_CFLAGS += $(CMN1) 56 37 ICC_CFLAGS += $(CMN1) … … 95 76 arch/$(KARCH)/src/proc/thread.c \ 96 77 arch/$(KARCH)/src/userspace.c \ 97 arch/$(KARCH)/src/syscall.c \ 98 arch/$(KARCH)/src/debugger.c 78 arch/$(KARCH)/src/syscall.c 99 79 100 80 ifeq ($(CONFIG_SMP),y) -
kernel/arch/amd64/include/arch/istate.h
rea906c29 r850235d 66 66 } istate_t; 67 67 68 #define RPL_USER 3 69 68 70 /** Return true if exception happened while in userspace */ 69 71 NO_TRACE static inline int istate_from_uspace(istate_t *istate) 70 72 { 71 return !(istate->rip & UINT64_C(0x8000000000000000));73 return (istate->cs & RPL_USER) == RPL_USER; 72 74 } 73 75 -
kernel/arch/amd64/src/amd64.c
rea906c29 r850235d 43 43 #include <arch/bios/bios.h> 44 44 #include <arch/boot/boot.h> 45 #include <arch/debugger.h>46 45 #include <arch/drivers/i8254.h> 47 46 #include <arch/drivers/i8259.h> … … 161 160 #endif 162 161 163 /* Enable debugger */164 debugger_init();165 162 /* Merge all memory zones to 1 big zone */ 166 163 zone_merge_all(); -
kernel/arch/amd64/src/fpu_context.c
rea906c29 r850235d 57 57 { 58 58 /* TODO: Zero all SSE, MMX etc. registers */ 59 /* Default value of SCR register is 0x1f80, 60 * it masks all FPU exceptions*/ 59 61 asm volatile ( 60 62 "fninit\n" -
kernel/arch/amd64/src/mm/page.c
rea906c29 r850235d 92 92 access = PF_ACCESS_READ; 93 93 94 if (as_page_fault(page, access, istate) == AS_PF_FAULT) { 95 fault_if_from_uspace(istate, "Page fault: %p.", (void *) page); 96 panic_memtrap(istate, access, page, NULL); 97 } 94 as_page_fault(page, access, istate); 98 95 } 99 96 -
kernel/arch/arm32/Makefile.inc
rea906c29 r850235d 33 33 ATSIGN = % 34 34 35 GCC_CFLAGS += -march=armv4 -fno-omit-frame-pointer -mapcs-frame 35 GCC_CFLAGS += -fno-omit-frame-pointer -mapcs-frame -march=$(subst _,-,$(PROCESSOR_ARCH)) -mno-unaligned-access 36 37 ifeq ($(CONFIG_FPU),y) 38 # This is necessary to allow vmsr insn and fpexc manipulation 39 # Use vfp32 to allow context save/restore of d16-d31 regs. 40 AFLAGS += -mfloat-abi=hard -mfpu=vfp3 41 endif 36 42 37 43 BITS = 32 … … 62 68 arch/$(KARCH)/src/ras.c 63 69 70 ifeq ($(CONFIG_FPU),y) 71 ARCH_SOURCES += arch/$(KARCH)/src/fpu_context.c 72 ARCH_SOURCES += arch/$(KARCH)/src/fpu.s 73 endif 74 64 75 ifeq ($(MACHINE),gta02) 65 76 ARCH_SOURCES += arch/$(KARCH)/src/mach/gta02/gta02.c 66 endif67 68 ifeq ($(MACHINE),testarm)69 ARCH_SOURCES += arch/$(KARCH)/src/mach/testarm/testarm.c70 77 endif 71 78 … … 74 81 endif 75 82 83 ifeq ($(MACHINE),beagleboardxm) 84 ARCH_SOURCES += arch/$(KARCH)/src/mach/beagleboardxm/beagleboardxm.c 85 endif 86 87 ifeq ($(MACHINE),beaglebone) 88 ARCH_SOURCES += arch/$(KARCH)/src/mach/beaglebone/beaglebone.c 89 endif 90 76 91 ifeq ($(CONFIG_PL050),y) 77 92 ARCH_SOURCES += genarch/src/drivers/pl050/pl050.c -
kernel/arch/arm32/_link.ld.in
rea906c29 r850235d 9 9 #ifdef MACHINE_gta02 10 10 #define KERNEL_LOAD_ADDRESS 0xb0a08000 11 #elif defined MACHINE_beagleboardxm 12 #define KERNEL_LOAD_ADDRESS 0x80a00000 13 #elif defined MACHINE_beaglebone 14 #define KERNEL_LOAD_ADDRESS 0x80a00000 11 15 #else 12 16 #define KERNEL_LOAD_ADDRESS 0x80a00000 -
kernel/arch/arm32/include/arch/asm.h
rea906c29 r850235d 43 43 #include <trace.h> 44 44 45 /** No such instruction on ARM to sleep CPU. */ 45 /** CPU specific way to sleep cpu. 46 * 47 * ARMv7 introduced wait for event and wait for interrupt (wfe/wfi). 48 * ARM920T has custom coprocessor action to do the same. See ARM920T Technical 49 * Reference Manual ch 4.9 p. 4-23 (103 in the PDF) 50 * ARM926EJ-S uses the same coprocessor instruction as ARM920T. See ARM926EJ-S 51 * chapter 2.3.8 p.2-22 (52 in the PDF) 52 * 53 * @note Although mcr p15, 0, R0, c7, c0, 4 is defined in ARM Architecture 54 * reference manual for armv4/5 CP15 implementation is mandatory only for 55 * armv6+. 56 */ 46 57 NO_TRACE static inline void cpu_sleep(void) 47 58 { 59 #ifdef PROCESSOR_ARCH_armv7_a 60 asm volatile ( "wfe" ); 61 #elif defined(PROCESSOR_ARCH_armv6) | defined(PROCESSOR_arm926ej_s) | defined(PROCESSOR_arm920t) 62 asm volatile ( "mcr p15, 0, R0, c7, c0, 4" ); 63 #endif 48 64 } 49 65 -
kernel/arch/arm32/include/arch/barrier.h
rea906c29 r850235d 37 37 #define KERN_arm32_BARRIER_H_ 38 38 39 /* 40 * TODO: implement true ARM memory barriers for macros below. 41 */ 39 #ifdef KERNEL 40 #include <arch/cp15.h> 41 #else 42 #include <libarch/cp15.h> 43 #endif 44 42 45 #define CS_ENTER_BARRIER() asm volatile ("" ::: "memory") 43 46 #define CS_LEAVE_BARRIER() asm volatile ("" ::: "memory") 44 47 48 #if defined PROCESSOR_ARCH_armv7_a 49 /* ARMv7 uses instructions for memory barriers see ARM Architecture reference 50 * manual for details: 51 * DMB: ch. A8.8.43 page A8-376 52 * DSB: ch. A8.8.44 page A8-378 53 * See ch. A3.8.3 page A3-148 for details about memory barrier implementation 54 * and functionality on armv7 architecture. 55 */ 56 #define memory_barrier() asm volatile ("dmb" ::: "memory") 57 #define read_barrier() asm volatile ("dsb" ::: "memory") 58 #define write_barrier() asm volatile ("dsb st" ::: "memory") 59 #define inst_barrier() asm volatile ("isb" ::: "memory") 60 #elif defined PROCESSOR_ARCH_armv6 | defined KERNEL 61 /* 62 * ARMv6 introduced user access of the following commands: 63 * - Prefetch flush 64 * - Data synchronization barrier 65 * - Data memory barrier 66 * - Clean and prefetch range operations. 67 * ARM Architecture Reference Manual version I ch. B.3.2.1 p. B3-4 68 */ 69 /* ARMv6- use system control coprocessor (CP15) for memory barrier instructions. 70 * Although at least mcr p15, 0, r0, c7, c10, 4 is mentioned in earlier archs, 71 * CP15 implementation is mandatory only for armv6+. 72 */ 73 #define memory_barrier() CP15DMB_write(0) 74 #define read_barrier() CP15DSB_write(0) 75 #define write_barrier() read_barrier() 76 #define inst_barrier() CP15ISB_write(0) 77 #else 78 /* Older manuals mention syscalls as a way to implement cache coherency and 79 * barriers. See for example ARM Architecture Reference Manual Version D 80 * chapter 2.7.4 Prefetching and self-modifying code (p. A2-28) 81 */ 82 // TODO implement on per PROCESSOR basis or via syscalls 45 83 #define memory_barrier() asm volatile ("" ::: "memory") 46 84 #define read_barrier() asm volatile ("" ::: "memory") 47 85 #define write_barrier() asm volatile ("" ::: "memory") 86 #define inst_barrier() asm volatile ("" ::: "memory") 87 #endif 48 88 89 /* 90 * There are multiple ways ICache can be implemented on ARM machines. Namely 91 * PIPT, VIPT, and ASID and VMID tagged VIVT (see ARM Architecture Reference 92 * Manual B3.11.2 (p. 1383). However, CortexA8 Manual states: "For maximum 93 * compatibility across processors, ARM recommends that operating systems target 94 * the ARMv7 base architecture that uses ASID-tagged VIVT instruction caches, 95 * and do not assume the presence of the IVIPT extension. Software that relies 96 * on the IVIPT extension might fail in an unpredictable way on an ARMv7 97 * implementation that does not include the IVIPT extension." (7.2.6 p. 245). 98 * Only PIPT invalidates cache for all VA aliases if one block is invalidated. 99 * 100 * @note: Supporting ASID and VMID tagged VIVT may need to add ICache 101 * maintenance to other places than just smc. 102 */ 103 104 #if defined PROCESSOR_ARCH_armv7_a | defined PROCESSOR_ARCH_armv6 | defined KERNEL 105 /* Available on all supported arms, 106 * invalidates entire ICache so the written value does not matter. */ 107 //TODO might be PL1 only on armv5- 108 #define smc_coherence(a) \ 109 do { \ 110 DCCMVAU_write((uint32_t)(a)); /* Flush changed memory */\ 111 write_barrier(); /* Wait for completion */\ 112 ICIALLU_write(0); /* Flush ICache */\ 113 inst_barrier(); /* Wait for Inst refetch */\ 114 } while (0) 115 /* @note: Cache type register is not available in uspace. We would need 116 * to export the cache line value, or use syscall for uspace smc_coherence */ 117 #define smc_coherence_block(a, l) \ 118 do { \ 119 for (uintptr_t addr = (uintptr_t)a; addr < (uintptr_t)a + l; addr += 4)\ 120 smc_coherence(addr); \ 121 } while (0) 122 #else 49 123 #define smc_coherence(a) 50 124 #define smc_coherence_block(a, l) 125 #endif 126 51 127 52 128 #endif -
kernel/arch/arm32/include/arch/cache.h
rea906c29 r850235d 1 1 /* 2 * Copyright (c) 201 1 Martin Sucha2 * Copyright (c) 2013 Jan Vesely 3 3 * All rights reserved. 4 4 * … … 27 27 */ 28 28 29 /** @addtogroup fs29 /** @addtogroup arm32 30 30 * @{ 31 */ 31 */ 32 /** @file 33 * @brief Security Extensions Routines 34 */ 32 35 33 #ifndef EXT2FS_EXT2FS_H_34 #define EXT2FS_EXT2FS_H_36 #ifndef KERN_arm32_CACHE_H_ 37 #define KERN_arm32_CACHE_H_ 35 38 36 #include <libext2.h> 37 #include <libfs.h> 38 #include <sys/types.h> 39 unsigned dcache_levels(void); 39 40 40 #define min(a, b) ((a) < (b) ? (a) : (b)) 41 42 extern vfs_out_ops_t ext2fs_ops; 43 extern libfs_ops_t ext2fs_libfs_ops; 44 45 extern int ext2fs_global_init(void); 46 extern int ext2fs_global_fini(void); 41 void dcache_flush(void); 42 void dcache_flush_invalidate(void); 43 void cpu_dcache_flush(void); 44 void cpu_dcache_flush_invalidate(void); 45 void icache_invalidate(void); 47 46 48 47 #endif 48 /** @} 49 */ 49 50 50 /**51 * @}52 */ -
kernel/arch/arm32/include/arch/cpu.h
rea906c29 r850235d 40 40 #include <arch/asm.h> 41 41 42 enum { 43 ARM_MAX_CACHE_LEVELS = 7, 44 }; 42 45 43 /** Struct representing ARM CPU identifi action. */46 /** Struct representing ARM CPU identification. */ 44 47 typedef struct { 45 /** Implement ator (vendor) number. */48 /** Implementor (vendor) number. */ 46 49 uint32_t imp_num; 47 50 … … 57 60 /** Revision number. */ 58 61 uint32_t rev_num; 62 63 struct { 64 unsigned ways; 65 unsigned sets; 66 unsigned line_size; 67 unsigned way_shift; 68 unsigned set_shift; 69 } dcache[ARM_MAX_CACHE_LEVELS]; 70 unsigned dcache_levels; 59 71 } cpu_arch_t; 60 72 -
kernel/arch/arm32/include/arch/cycle.h
rea906c29 r850235d 38 38 39 39 #include <trace.h> 40 #include <arch/cp15.h> 40 41 41 42 /** Return count of CPU cycles. … … 48 49 NO_TRACE static inline uint64_t get_cycle(void) 49 50 { 51 #ifdef PROCESSOR_ARCH_armv7_a 52 if ((ID_PFR1_read() & ID_PFR1_GEN_TIMER_EXT_MASK) == 53 ID_PFR1_GEN_TIMER_EXT) { 54 uint32_t low = 0, high = 0; 55 asm volatile( "MRRC p15, 0, %[low], %[high], c14": [low]"=r"(low), [high]"=r"(high)); 56 return ((uint64_t)high << 32) | low; 57 } else { 58 return (uint64_t)PMCCNTR_read() * 64; 59 } 60 #endif 50 61 return 0; 51 62 } -
kernel/arch/arm32/include/arch/fpu_context.h
rea906c29 r850235d 31 31 */ 32 32 /** @file 33 * @brief FPU context (not implemented). 34 * 35 * GXemul doesn't support FPU on its ARM CPU. 33 * @brief FPU context. 36 34 */ 37 35 … … 41 39 #include <typedefs.h> 42 40 43 #define FPU_CONTEXT_ALIGN 041 #define FPU_CONTEXT_ALIGN 8 44 42 43 /* ARM Architecture reference manual, p B-1529. 44 */ 45 45 typedef struct { 46 uint32_t fpexc; 47 uint32_t fpscr; 48 uint32_t s[64]; 46 49 } fpu_context_t; 50 51 void fpu_setup(void); 52 53 bool handle_if_fpu_exception(void); 47 54 48 55 #endif -
kernel/arch/arm32/include/arch/mach/beagleboardxm/beagleboardxm.h
rea906c29 r850235d 1 1 /* 2 * Copyright (c) 20 06 Ondrej Palkovsky2 * Copyright (c) 2012 Jan Vesely 3 3 * All rights reserved. 4 4 * … … 26 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 27 */ 28 29 /** @addtogroup amd64debug 28 /** @addtogroup arm32beagleboardxm beagleboardxm 29 * @brief BeagleBoard-xM platform. 30 * @ingroup arm32 30 31 * @{ 31 32 */ 32 33 /** @file 34 * @brief BeagleBoard platform driver. 33 35 */ 34 36 35 #ifndef KERN_a md64_DEBUGGER_H_36 #define KERN_a md64_DEBUGGER_H_37 #ifndef KERN_arm32_beagleboardxm_H_ 38 #define KERN_arm32_beagleboardxm_H_ 37 39 38 #include < typedefs.h>40 #include <arch/machine_func.h> 39 41 40 #define BKPOINTS_MAX 4 41 42 /* Flags that are passed to breakpoint_add function */ 43 #define BKPOINT_INSTR 0x1U 44 #define BKPOINT_WRITE 0x2U 45 #define BKPOINT_READ_WRITE 0x4U 46 47 #define BKPOINT_CHECK_ZERO 0x8U 48 49 50 extern void debugger_init(void); 51 extern int breakpoint_add(const void *, const unsigned int, int); 52 extern void breakpoint_del(int); 42 extern struct arm_machine_ops bbxm_machine_ops; 53 43 54 44 #endif … … 56 46 /** @} 57 47 */ 48 -
kernel/arch/arm32/include/arch/machine_func.h
rea906c29 r850235d 108 108 extern size_t machine_get_irq_count(void); 109 109 110 extern const char * machine_get_platform_name(void); 111 110 112 #endif 111 113 -
kernel/arch/arm32/include/arch/mm/frame.h
rea906c29 r850235d 47 47 48 48 #ifdef MACHINE_gta02 49 50 #define PHYSMEM_START_ADDR 0x30008000 49 51 #define BOOT_PAGE_TABLE_ADDRESS 0x30010000 52 53 #elif defined MACHINE_beagleboardxm 54 55 #define PHYSMEM_START_ADDR 0x80000000 56 #define BOOT_PAGE_TABLE_ADDRESS 0x80008000 57 58 #elif defined MACHINE_beaglebone 59 60 #define PHYSMEM_START_ADDR 0x80000000 61 #define BOOT_PAGE_TABLE_ADDRESS 0x80008000 62 50 63 #else 64 65 #define PHYSMEM_START_ADDR 0x00000000 51 66 #define BOOT_PAGE_TABLE_ADDRESS 0x00008000 67 52 68 #endif 53 69 54 70 #define BOOT_PAGE_TABLE_START_FRAME (BOOT_PAGE_TABLE_ADDRESS >> FRAME_WIDTH) 55 71 #define BOOT_PAGE_TABLE_SIZE_IN_FRAMES (BOOT_PAGE_TABLE_SIZE >> FRAME_WIDTH) 56 57 #ifdef MACHINE_gta0258 #define PHYSMEM_START_ADDR 0x3000800059 #else60 #define PHYSMEM_START_ADDR 0x0000000061 #endif62 72 63 73 extern void frame_low_arch_init(void); -
kernel/arch/arm32/include/arch/mm/page.h
rea906c29 r850235d 46 46 #define PAGE_SIZE FRAME_SIZE 47 47 48 #if (defined MACHINE_beagleboardxm) || (defined MACHINE_beaglebone) 49 #ifndef __ASM__ 50 # define KA2PA(x) ((uintptr_t) (x)) 51 # define PA2KA(x) ((uintptr_t) (x)) 52 #else 53 # define KA2PA(x) (x) 54 # define PA2KA(x) (x) 55 #endif 56 #else 48 57 #ifndef __ASM__ 49 58 # define KA2PA(x) (((uintptr_t) (x)) - 0x80000000) … … 53 62 # define PA2KA(x) ((x) + 0x80000000) 54 63 #endif 64 #endif 55 65 56 66 /* Number of entries in each level. */ 57 #define PTL0_ENTRIES_ARCH (1 << 12)/* 4096 */58 #define PTL1_ENTRIES_ARCH 59 #define PTL2_ENTRIES_ARCH 67 #define PTL0_ENTRIES_ARCH (1 << 12) /* 4096 */ 68 #define PTL1_ENTRIES_ARCH 0 69 #define PTL2_ENTRIES_ARCH 0 60 70 /* coarse page tables used (256 * 4 = 1KB per page) */ 61 #define PTL3_ENTRIES_ARCH (1 << 8)/* 256 */71 #define PTL3_ENTRIES_ARCH (1 << 8) /* 256 */ 62 72 63 73 /* Page table sizes for each level. */ 64 #define PTL0_SIZE_ARCH 65 #define PTL1_SIZE_ARCH 66 #define PTL2_SIZE_ARCH 67 #define PTL3_SIZE_ARCH 74 #define PTL0_SIZE_ARCH FOUR_FRAMES 75 #define PTL1_SIZE_ARCH 0 76 #define PTL2_SIZE_ARCH 0 77 #define PTL3_SIZE_ARCH ONE_FRAME 68 78 69 79 /* Macros calculating indices into page tables for each level. */ 70 #define PTL0_INDEX_ARCH(vaddr) 71 #define PTL1_INDEX_ARCH(vaddr) 72 #define PTL2_INDEX_ARCH(vaddr) 73 #define PTL3_INDEX_ARCH(vaddr) 80 #define PTL0_INDEX_ARCH(vaddr) (((vaddr) >> 20) & 0xfff) 81 #define PTL1_INDEX_ARCH(vaddr) 0 82 #define PTL2_INDEX_ARCH(vaddr) 0 83 #define PTL3_INDEX_ARCH(vaddr) (((vaddr) >> 12) & 0x0ff) 74 84 75 85 /* Get PTE address accessors for each level. */ 76 86 #define GET_PTL1_ADDRESS_ARCH(ptl0, i) \ 77 87 ((pte_t *) ((((pte_t *)(ptl0))[(i)].l0).coarse_table_addr << 10)) 78 88 #define GET_PTL2_ADDRESS_ARCH(ptl1, i) \ 79 89 (ptl1) 80 90 #define GET_PTL3_ADDRESS_ARCH(ptl2, i) \ 81 91 (ptl2) 82 92 #define GET_FRAME_ADDRESS_ARCH(ptl3, i) \ 83 93 ((uintptr_t) ((((pte_t *)(ptl3))[(i)].l1).frame_base_addr << 12)) 84 94 85 95 /* Set PTE address accessors for each level. */ 86 96 #define SET_PTL0_ADDRESS_ARCH(ptl0) \ 87 97 (set_ptl0_addr((pte_t *) (ptl0))) 88 98 #define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) \ 89 99 (((pte_t *) (ptl0))[(i)].l0.coarse_table_addr = (a) >> 10) 90 100 #define SET_PTL2_ADDRESS_ARCH(ptl1, i, a) 91 101 #define SET_PTL3_ADDRESS_ARCH(ptl2, i, a) 92 102 #define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) \ 93 103 (((pte_t *) (ptl3))[(i)].l1.frame_base_addr = (a) >> 12) 94 104 95 105 /* Get PTE flags accessors for each level. */ 96 106 #define GET_PTL1_FLAGS_ARCH(ptl0, i) \ 97 107 get_pt_level0_flags((pte_t *) (ptl0), (size_t) (i)) 98 108 #define GET_PTL2_FLAGS_ARCH(ptl1, i) \ 99 109 PAGE_PRESENT 100 110 #define GET_PTL3_FLAGS_ARCH(ptl2, i) \ 101 111 PAGE_PRESENT 102 112 #define GET_FRAME_FLAGS_ARCH(ptl3, i) \ 103 113 get_pt_level1_flags((pte_t *) (ptl3), (size_t) (i)) 104 114 105 115 /* Set PTE flags accessors for each level. */ 106 116 #define SET_PTL1_FLAGS_ARCH(ptl0, i, x) \ 107 117 set_pt_level0_flags((pte_t *) (ptl0), (size_t) (i), (x)) 108 118 #define SET_PTL2_FLAGS_ARCH(ptl1, i, x) 109 119 #define SET_PTL3_FLAGS_ARCH(ptl2, i, x) … … 119 129 set_pt_level1_present((pte_t *) (ptl3), (size_t) (i)) 120 130 121 /* Macros for querying the last-level PTE entries. */ 122 #define PTE_VALID_ARCH(pte) \ 123 (*((uint32_t *) (pte)) != 0) 124 #define PTE_PRESENT_ARCH(pte) \ 125 (((pte_t *) (pte))->l0.descriptor_type != 0) 126 #define PTE_GET_FRAME_ARCH(pte) \ 127 (((pte_t *) (pte))->l1.frame_base_addr << FRAME_WIDTH) 128 #define PTE_WRITABLE_ARCH(pte) \ 129 (((pte_t *) (pte))->l1.access_permission_0 == PTE_AP_USER_RW_KERNEL_RW) 130 #define PTE_EXECUTABLE_ARCH(pte) \ 131 1 132 133 #ifndef __ASM__ 134 135 /** Level 0 page table entry. */ 136 typedef struct { 137 /* 0b01 for coarse tables, see below for details */ 138 unsigned descriptor_type : 2; 139 unsigned impl_specific : 3; 140 unsigned domain : 4; 141 unsigned should_be_zero : 1; 142 143 /* Pointer to the coarse 2nd level page table (holding entries for small 144 * (4KB) or large (64KB) pages. ARM also supports fine 2nd level page 145 * tables that may hold even tiny pages (1KB) but they are bigger (4KB 146 * per table in comparison with 1KB per the coarse table) 147 */ 148 unsigned coarse_table_addr : 22; 149 } ATTRIBUTE_PACKED pte_level0_t; 150 151 /** Level 1 page table entry (small (4KB) pages used). */ 152 typedef struct { 153 154 /* 0b10 for small pages */ 155 unsigned descriptor_type : 2; 156 unsigned bufferable : 1; 157 unsigned cacheable : 1; 158 159 /* access permissions for each of 4 subparts of a page 160 * (for each 1KB when small pages used */ 161 unsigned access_permission_0 : 2; 162 unsigned access_permission_1 : 2; 163 unsigned access_permission_2 : 2; 164 unsigned access_permission_3 : 2; 165 unsigned frame_base_addr : 20; 166 } ATTRIBUTE_PACKED pte_level1_t; 167 168 typedef union { 169 pte_level0_t l0; 170 pte_level1_t l1; 171 } pte_t; 172 173 /* Level 1 page tables access permissions */ 174 175 /** User mode: no access, privileged mode: no access. */ 176 #define PTE_AP_USER_NO_KERNEL_NO 0 177 178 /** User mode: no access, privileged mode: read/write. */ 179 #define PTE_AP_USER_NO_KERNEL_RW 1 180 181 /** User mode: read only, privileged mode: read/write. */ 182 #define PTE_AP_USER_RO_KERNEL_RW 2 183 184 /** User mode: read/write, privileged mode: read/write. */ 185 #define PTE_AP_USER_RW_KERNEL_RW 3 186 187 188 /* pte_level0_t and pte_level1_t descriptor_type flags */ 189 190 /** pte_level0_t and pte_level1_t "not present" flag (used in descriptor_type). */ 191 #define PTE_DESCRIPTOR_NOT_PRESENT 0 192 193 /** pte_level0_t coarse page table flag (used in descriptor_type). */ 194 #define PTE_DESCRIPTOR_COARSE_TABLE 1 195 196 /** pte_level1_t small page table flag (used in descriptor type). */ 197 #define PTE_DESCRIPTOR_SMALL_PAGE 2 198 199 200 /** Sets the address of level 0 page table. 201 * 202 * @param pt Pointer to the page table to set. 203 * 204 */ 205 NO_TRACE static inline void set_ptl0_addr(pte_t *pt) 206 { 207 asm volatile ( 208 "mcr p15, 0, %[pt], c2, c0, 0\n" 209 :: [pt] "r" (pt) 210 ); 211 } 212 213 214 /** Returns level 0 page table entry flags. 215 * 216 * @param pt Level 0 page table. 217 * @param i Index of the entry to return. 218 * 219 */ 220 NO_TRACE static inline int get_pt_level0_flags(pte_t *pt, size_t i) 221 { 222 pte_level0_t *p = &pt[i].l0; 223 int np = (p->descriptor_type == PTE_DESCRIPTOR_NOT_PRESENT); 224 225 return (np << PAGE_PRESENT_SHIFT) | (1 << PAGE_USER_SHIFT) | 226 (1 << PAGE_READ_SHIFT) | (1 << PAGE_WRITE_SHIFT) | 227 (1 << PAGE_EXEC_SHIFT) | (1 << PAGE_CACHEABLE_SHIFT); 228 } 229 230 /** Returns level 1 page table entry flags. 231 * 232 * @param pt Level 1 page table. 233 * @param i Index of the entry to return. 234 * 235 */ 236 NO_TRACE static inline int get_pt_level1_flags(pte_t *pt, size_t i) 237 { 238 pte_level1_t *p = &pt[i].l1; 239 240 int dt = p->descriptor_type; 241 int ap = p->access_permission_0; 242 243 return ((dt == PTE_DESCRIPTOR_NOT_PRESENT) << PAGE_PRESENT_SHIFT) | 244 ((ap == PTE_AP_USER_RO_KERNEL_RW) << PAGE_READ_SHIFT) | 245 ((ap == PTE_AP_USER_RW_KERNEL_RW) << PAGE_READ_SHIFT) | 246 ((ap == PTE_AP_USER_RW_KERNEL_RW) << PAGE_WRITE_SHIFT) | 247 ((ap != PTE_AP_USER_NO_KERNEL_RW) << PAGE_USER_SHIFT) | 248 ((ap == PTE_AP_USER_NO_KERNEL_RW) << PAGE_READ_SHIFT) | 249 ((ap == PTE_AP_USER_NO_KERNEL_RW) << PAGE_WRITE_SHIFT) | 250 (1 << PAGE_EXEC_SHIFT) | 251 (p->bufferable << PAGE_CACHEABLE); 252 } 253 254 /** Sets flags of level 0 page table entry. 255 * 256 * @param pt level 0 page table 257 * @param i index of the entry to be changed 258 * @param flags new flags 259 * 260 */ 261 NO_TRACE static inline void set_pt_level0_flags(pte_t *pt, size_t i, int flags) 262 { 263 pte_level0_t *p = &pt[i].l0; 264 265 if (flags & PAGE_NOT_PRESENT) { 266 p->descriptor_type = PTE_DESCRIPTOR_NOT_PRESENT; 267 /* 268 * Ensures that the entry will be recognized as valid when 269 * PTE_VALID_ARCH applied. 270 */ 271 p->should_be_zero = 1; 272 } else { 273 p->descriptor_type = PTE_DESCRIPTOR_COARSE_TABLE; 274 p->should_be_zero = 0; 275 } 276 } 277 278 NO_TRACE static inline void set_pt_level0_present(pte_t *pt, size_t i) 279 { 280 pte_level0_t *p = &pt[i].l0; 281 282 p->should_be_zero = 0; 283 write_barrier(); 284 p->descriptor_type = PTE_DESCRIPTOR_COARSE_TABLE; 285 } 286 287 /** Sets flags of level 1 page table entry. 288 * 289 * We use same access rights for the whole page. When page 290 * is not preset we store 1 in acess_rigts_3 so that at least 291 * one bit is 1 (to mark correct page entry, see #PAGE_VALID_ARCH). 292 * 293 * @param pt Level 1 page table. 294 * @param i Index of the entry to be changed. 295 * @param flags New flags. 296 * 297 */ 298 NO_TRACE static inline void set_pt_level1_flags(pte_t *pt, size_t i, int flags) 299 { 300 pte_level1_t *p = &pt[i].l1; 301 302 if (flags & PAGE_NOT_PRESENT) 303 p->descriptor_type = PTE_DESCRIPTOR_NOT_PRESENT; 304 else 305 p->descriptor_type = PTE_DESCRIPTOR_SMALL_PAGE; 306 307 p->cacheable = p->bufferable = (flags & PAGE_CACHEABLE) != 0; 308 309 /* default access permission */ 310 p->access_permission_0 = p->access_permission_1 = 311 p->access_permission_2 = p->access_permission_3 = 312 PTE_AP_USER_NO_KERNEL_RW; 313 314 if (flags & PAGE_USER) { 315 if (flags & PAGE_READ) { 316 p->access_permission_0 = p->access_permission_1 = 317 p->access_permission_2 = p->access_permission_3 = 318 PTE_AP_USER_RO_KERNEL_RW; 319 } 320 if (flags & PAGE_WRITE) { 321 p->access_permission_0 = p->access_permission_1 = 322 p->access_permission_2 = p->access_permission_3 = 323 PTE_AP_USER_RW_KERNEL_RW; 324 } 325 } 326 } 327 328 NO_TRACE static inline void set_pt_level1_present(pte_t *pt, size_t i) 329 { 330 pte_level1_t *p = &pt[i].l1; 331 332 p->descriptor_type = PTE_DESCRIPTOR_SMALL_PAGE; 333 } 334 335 extern void page_arch_init(void); 336 337 #endif /* __ASM__ */ 131 #if defined(PROCESSOR_ARCH_armv6) | defined(PROCESSOR_ARCH_armv7_a) 132 #include "page_armv6.h" 133 #elif defined(PROCESSOR_ARCH_armv4) | defined(PROCESSOR_ARCH_armv5) 134 #include "page_armv4.h" 135 #else 136 #error "Unsupported architecture" 137 #endif 338 138 339 139 #endif -
kernel/arch/arm32/include/arch/mm/page_fault.h
rea906c29 r850235d 40 40 41 41 42 /** Decribes CP15 "fault status register" (FSR). */43 typedef struct { 44 unsigned status : 3; 45 unsigned domain : 4; 46 unsigned zero : 1; 47 unsigned should_be_zero : 24; 48 } ATTRIBUTE_PACKED fault_status_t; 49 50 51 /** Help union used for casting integer value into #fault_status_t.*/42 /** Decribes CP15 "fault status register" (FSR). 43 * 44 * "VMSAv6 added a fifth fault status bit (bit[10]) to both the IFSR and DFSR. 45 * It is IMPLEMENTATION DEFINED how this bit is encoded in earlier versions of 46 * the architecture. A write flag (bit[11] of the DFSR) has also been 47 * introduced." 48 * ARM Architecture Reference Manual version i ch. B4.6 (PDF p. 719) 49 * 50 * See ARM Architecture Reference Manual ch. B4.9.6 (pdf p.743). for FSR info 51 */ 52 52 typedef union { 53 fault_status_t fs; 54 uint32_t dummy; 55 } fault_status_union_t; 53 struct { 54 unsigned status : 4; 55 unsigned domain : 4; 56 unsigned zero : 1; 57 unsigned lpae : 1; /**< Needs LPAE support implemented */ 58 unsigned fs : 1; /**< armv6+ mandated, earlier IPLM. DEFINED */ 59 unsigned wr : 1; /**< armv6+ only */ 60 unsigned ext : 1 ; /**< external abort */ 61 unsigned cm : 1; /**< Cache maintenance, needs LPAE support */ 62 unsigned should_be_zero : 18; 63 } data; 64 struct { 65 unsigned status : 4; 66 unsigned sbz0 : 6; 67 unsigned fs : 1; 68 unsigned should_be_zero : 21; 69 } inst; 70 uint32_t raw; 71 } fault_status_t; 56 72 57 73 -
kernel/arch/arm32/include/arch/regutils.h
rea906c29 r850235d 41 41 #define STATUS_REG_MODE_MASK 0x1f 42 42 43 #define CP15_R1_HIGH_VECTORS_BIT (1 << 13)44 45 43 /* ARM Processor Operation Modes */ 46 #define USER_MODE 0x10 47 #define FIQ_MODE 0x11 48 #define IRQ_MODE 0x12 49 #define SUPERVISOR_MODE 0x13 50 #define ABORT_MODE 0x17 51 #define UNDEFINED_MODE 0x1b 52 #define SYSTEM_MODE 0x1f 53 44 enum { 45 USER_MODE = 0x10, 46 FIQ_MODE = 0x11, 47 IRQ_MODE = 0x12, 48 SUPERVISOR_MODE = 0x13, 49 MONITOR_MODE = 0x16, 50 ABORT_MODE = 0x17, 51 HYPERVISOR_MODE = 0x1a, 52 UNDEFINED_MODE = 0x1b, 53 SYSTEM_MODE = 0x1f, 54 MODE_MASK = 0x1f, 55 }; 54 56 /* [CS]PRS manipulation macros */ 55 57 #define GEN_STATUS_READ(nm, reg) \ -
kernel/arch/arm32/src/arm32.c
rea906c29 r850235d 49 49 #include <str.h> 50 50 #include <arch/ras.h> 51 #include <sysinfo/sysinfo.h> 51 52 52 53 /** Performs arm32-specific initialization before main_bsp() is called. */ … … 116 117 { 117 118 machine_input_init(); 119 const char *platform = machine_get_platform_name(); 120 121 sysinfo_set_item_data("platform", NULL, (void *) platform, 122 str_size(platform)); 118 123 } 119 124 … … 133 138 uint8_t *stck; 134 139 135 stck = &THREAD->kstack[STACK_SIZE - SP_DELTA];140 stck = &THREAD->kstack[STACK_SIZE]; 136 141 supervisor_sp = (uintptr_t) stck; 137 142 } -
kernel/arch/arm32/src/cpu/cpu.c
rea906c29 r850235d 34 34 */ 35 35 36 #include <arch/cache.h> 36 37 #include <arch/cpu.h> 38 #include <arch/cp15.h> 37 39 #include <cpu.h> 38 40 #include <arch.h> 39 41 #include <print.h> 40 42 41 /** Number of indexes left out in the #imp_data array */ 42 #define IMP_DATA_START_OFFSET 0x40 43 44 /** Implementators (vendor) names */ 45 static const char *imp_data[] = { 46 "?", /* IMP_DATA_START_OFFSET */ 47 "ARM Ltd", /* 0x41 */ 48 "", /* 0x42 */ 49 "", /* 0x43 */ 50 "Digital Equipment Corporation", /* 0x44 */ 51 "", "", "", "", "", "", "", "", "", "", /* 0x45 - 0x4e */ 52 "", "", "", "", "", "", "", "", "", "", /* 0x4f - 0x58 */ 53 "", "", "", "", "", "", "", "", "", "", /* 0x59 - 0x62 */ 54 "", "", "", "", "", "", /* 0x63 - 0x68 */ 55 "Intel Corporation" /* 0x69 */ 56 }; 57 58 /** Length of the #imp_data array */ 59 static unsigned int imp_data_length = sizeof(imp_data) / sizeof(char *); 43 static inline unsigned log2(unsigned val) 44 { 45 unsigned log = 0; 46 --val; 47 while (val) { 48 ++log; 49 val >>= 1; 50 } 51 return log; 52 } 53 54 static unsigned dcache_ways(unsigned level); 55 static unsigned dcache_sets(unsigned level); 56 static unsigned dcache_linesize_log(unsigned level); 57 58 59 /** Implementers (vendor) names */ 60 static const char * implementer(unsigned id) 61 { 62 switch (id) 63 { 64 case 0x41: return "ARM Limited"; 65 case 0x44: return "Digital Equipment Corporation"; 66 case 0x4d: return "Motorola, Freescale Semiconductor Inc."; 67 case 0x51: return "Qualcomm Inc."; 68 case 0x56: return "Marvell Semiconductor Inc."; 69 case 0x69: return "Intel Corporation"; 70 } 71 return "Unknown implementer"; 72 } 60 73 61 74 /** Architecture names */ 62 static const char *arch_data[] = { 63 "?", /* 0x0 */ 64 "4", /* 0x1 */ 65 "4T", /* 0x2 */ 66 "5", /* 0x3 */ 67 "5T", /* 0x4 */ 68 "5TE", /* 0x5 */ 69 "5TEJ", /* 0x6 */ 70 "6" /* 0x7 */ 71 }; 72 73 /** Length of the #arch_data array */ 74 static unsigned int arch_data_length = sizeof(arch_data) / sizeof(char *); 75 static const char * architecture_string(cpu_arch_t *arch) 76 { 77 static const char *arch_data[] = { 78 "ARM", /* 0x0 */ 79 "ARMv4", /* 0x1 */ 80 "ARMv4T", /* 0x2 */ 81 "ARMv5", /* 0x3 */ 82 "ARMv5T", /* 0x4 */ 83 "ARMv5TE", /* 0x5 */ 84 "ARMv5TEJ", /* 0x6 */ 85 "ARMv6" /* 0x7 */ 86 }; 87 if (arch->arch_num < (sizeof(arch_data) / sizeof(arch_data[0]))) 88 return arch_data[arch->arch_num]; 89 else 90 return arch_data[0]; 91 } 75 92 76 93 77 94 /** Retrieves processor identification from CP15 register 0. 78 * 95 * 79 96 * @param cpu Structure for storing CPU identification. 97 * See page B4-1630 of ARM Architecture Reference Manual. 80 98 */ 81 99 static void arch_cpu_identify(cpu_arch_t *cpu) 82 100 { 83 uint32_t ident; 84 asm volatile ( 85 "mrc p15, 0, %[ident], c0, c0, 0\n" 86 : [ident] "=r" (ident) 87 ); 101 const uint32_t ident = MIDR_read(); 102 103 cpu->imp_num = (ident >> MIDR_IMPLEMENTER_SHIFT) & MIDR_IMPLEMENTER_MASK; 104 cpu->variant_num = (ident >> MIDR_VARIANT_SHIFT) & MIDR_VARIANT_MASK; 105 cpu->arch_num = (ident >> MIDR_ARCHITECTURE_SHIFT) & MIDR_ARCHITECTURE_MASK; 106 cpu->prim_part_num = (ident >> MIDR_PART_NUMBER_SHIFT) & MIDR_PART_NUMBER_MASK; 107 cpu->rev_num = (ident >> MIDR_REVISION_SHIFT) & MIDR_REVISION_MASK; 108 109 // TODO CPUs with arch_num == 0xf use CPUID scheme for identification 110 cpu->dcache_levels = dcache_levels(); 111 112 for (unsigned i = 0; i < cpu->dcache_levels; ++i) { 113 cpu->dcache[i].ways = dcache_ways(i); 114 cpu->dcache[i].sets = dcache_sets(i); 115 cpu->dcache[i].way_shift = 31 - log2(cpu->dcache[i].ways); 116 cpu->dcache[i].set_shift = dcache_linesize_log(i); 117 cpu->dcache[i].line_size = 1 << dcache_linesize_log(i); 118 printf("Found DCache L%u: %u-way, %u sets, %u byte lines " 119 "(shifts: w%u, s%u)\n", i + 1, cpu->dcache[i].ways, 120 cpu->dcache[i].sets, cpu->dcache[i].line_size, 121 cpu->dcache[i].way_shift, cpu->dcache[i].set_shift); 122 } 123 } 124 125 /** Enables unaligned access and caching for armv6+ */ 126 void cpu_arch_init(void) 127 { 128 uint32_t control_reg = SCTLR_read(); 88 129 89 cpu->imp_num = ident >> 24; 90 cpu->variant_num = (ident << 8) >> 28; 91 cpu->arch_num = (ident << 12) >> 28; 92 cpu->prim_part_num = (ident << 16) >> 20; 93 cpu->rev_num = (ident << 28) >> 28; 94 } 95 96 /** Does nothing on ARM. */ 97 void cpu_arch_init(void) 98 { 130 /* Turn off tex remap, RAZ/WI prior to armv7 */ 131 control_reg &= ~SCTLR_TEX_REMAP_EN_FLAG; 132 /* Turn off accessed flag, RAZ/WI prior to armv7 */ 133 control_reg &= ~(SCTLR_ACCESS_FLAG_EN_FLAG | SCTLR_HW_ACCESS_FLAG_EN_FLAG); 134 135 /* Unaligned access is supported on armv6+ */ 136 #if defined(PROCESSOR_ARCH_armv7_a) | defined(PROCESSOR_ARCH_armv6) 137 /* Enable unaligned access, RAZ/WI prior to armv6 138 * switchable on armv6, RAO/WI writes on armv7, 139 * see ARM Architecture Reference Manual ARMv7-A and ARMv7-R edition 140 * L.3.1 (p. 2456) */ 141 control_reg |= SCTLR_UNALIGNED_EN_FLAG; 142 /* Disable alignment checks, this turns unaligned access to undefined, 143 * unless U bit is set. */ 144 control_reg &= ~SCTLR_ALIGN_CHECK_EN_FLAG; 145 /* Enable caching, On arm prior to armv7 there is only one level 146 * of caches. Data cache is coherent. 147 * "This means that the behavior of accesses from the same observer to 148 * different VAs, that are translated to the same PA 149 * with the same memory attributes, is fully coherent." 150 * ARM Architecture Reference Manual ARMv7-A and ARMv7-R Edition 151 * B3.11.1 (p. 1383) 152 * We are safe to turn this on. For arm v6 see ch L.6.2 (p. 2469) 153 * L2 Cache for armv7 is enabled by default (i.e. controlled by 154 * this flag). 155 */ 156 control_reg |= SCTLR_CACHE_EN_FLAG; 157 #endif 158 #ifdef PROCESSOR_ARCH_armv7_a 159 /* ICache coherency is elaborate on in barrier.h. 160 * VIPT and PIPT caches need maintenance only on code modify, 161 * so it should be safe for general use. 162 * Enable branch predictors too as they follow the same rules 163 * as ICache and they can be flushed together 164 */ 165 if ((CTR_read() & CTR_L1I_POLICY_MASK) != CTR_L1I_POLICY_AIVIVT) { 166 control_reg |= 167 SCTLR_INST_CACHE_EN_FLAG | SCTLR_BRANCH_PREDICT_EN_FLAG; 168 } 169 #endif 170 SCTLR_write(control_reg); 171 172 #ifdef CONFIG_FPU 173 fpu_setup(); 174 #endif 175 176 #ifdef PROCESSOR_ARCH_armv7_a 177 if ((ID_PFR1_read() & ID_PFR1_GEN_TIMER_EXT_MASK) != 178 ID_PFR1_GEN_TIMER_EXT) { 179 PMCR_write(PMCR_read() | PMCR_E_FLAG | PMCR_D_FLAG); 180 PMCNTENSET_write(PMCNTENSET_CYCLE_COUNTER_EN_FLAG); 181 } 182 #endif 99 183 } 100 184 101 185 /** Retrieves processor identification and stores it to #CPU.arch */ 102 void cpu_identify(void) 186 void cpu_identify(void) 103 187 { 104 188 arch_cpu_identify(&CPU->arch); … … 108 192 void cpu_print_report(cpu_t *m) 109 193 { 110 const char *vendor = imp_data[0]; 111 const char *architecture = arch_data[0]; 112 cpu_arch_t * cpu_arch = &m->arch; 113 114 if ((cpu_arch->imp_num) > 0 && 115 (cpu_arch->imp_num < (imp_data_length + IMP_DATA_START_OFFSET))) { 116 vendor = imp_data[cpu_arch->imp_num - IMP_DATA_START_OFFSET]; 117 } 118 119 if ((cpu_arch->arch_num) > 0 && 120 (cpu_arch->arch_num < arch_data_length)) { 121 architecture = arch_data[cpu_arch->arch_num]; 122 } 123 124 printf("cpu%d: vendor=%s, architecture=ARM%s, part number=%x, " 194 printf("cpu%d: vendor=%s, architecture=%s, part number=%x, " 125 195 "variant=%x, revision=%x\n", 126 m->id, vendor, architecture, cpu_arch->prim_part_num, 127 cpu_arch->variant_num, cpu_arch->rev_num); 196 m->id, implementer(m->arch.imp_num), 197 architecture_string(&m->arch), m->arch.prim_part_num, 198 m->arch.variant_num, m->arch.rev_num); 199 } 200 201 /** See chapter B4.1.19 of ARM Architecture Reference Manual */ 202 static unsigned dcache_linesize_log(unsigned level) 203 { 204 #ifdef PROCESSOR_ARCH_armv7_a 205 CSSELR_write((level & CCSELR_LEVEL_MASK) << CCSELR_LEVEL_SHIFT); 206 const unsigned ls_log = 2 + 207 ((CCSIDR_read() >> CCSIDR_LINESIZE_SHIFT) & CCSIDR_LINESIZE_MASK); 208 return ls_log + 2; //return log2(bytes) 209 #endif 210 return 0; 211 212 } 213 214 /** See chapter B4.1.19 of ARM Architecture Reference Manual */ 215 static unsigned dcache_ways(unsigned level) 216 { 217 #ifdef PROCESSOR_ARCH_armv7_a 218 CSSELR_write((level & CCSELR_LEVEL_MASK) << CCSELR_LEVEL_SHIFT); 219 const unsigned ways = 1 + 220 ((CCSIDR_read() >> CCSIDR_ASSOC_SHIFT) & CCSIDR_ASSOC_MASK); 221 return ways; 222 #endif 223 return 0; 224 } 225 226 /** See chapter B4.1.19 of ARM Architecture Reference Manual */ 227 static unsigned dcache_sets(unsigned level) 228 { 229 #ifdef PROCESSOR_ARCH_armv7_a 230 CSSELR_write((level & CCSELR_LEVEL_MASK) << CCSELR_LEVEL_SHIFT); 231 const unsigned sets = 1 + 232 ((CCSIDR_read() >> CCSIDR_NUMSETS_SHIFT) & CCSIDR_NUMSETS_MASK); 233 return sets; 234 #endif 235 return 0; 236 } 237 238 unsigned dcache_levels(void) 239 { 240 unsigned levels = 0; 241 #ifdef PROCESSOR_ARCH_armv7_a 242 const uint32_t val = CLIDR_read(); 243 for (unsigned i = 1; i <= 7; ++i) { 244 const unsigned ctype = CLIDR_CACHE(i, val); 245 switch (ctype) { 246 case CLIDR_DCACHE_ONLY: 247 case CLIDR_SEP_CACHE: 248 case CLIDR_UNI_CACHE: 249 ++levels; 250 default: 251 (void)0; 252 } 253 } 254 #endif 255 return levels; 256 } 257 258 static void dcache_clean_manual(unsigned level, bool invalidate, 259 unsigned ways, unsigned sets, unsigned way_shift, unsigned set_shift) 260 { 261 262 for (unsigned i = 0; i < ways; ++i) { 263 for (unsigned j = 0; j < sets; ++j) { 264 const uint32_t val = 265 ((level & 0x7) << 1) | 266 (j << set_shift) | (i << way_shift); 267 if (invalidate) 268 DCCISW_write(val); 269 else 270 DCCSW_write(val); 271 } 272 } 273 } 274 275 void dcache_flush(void) 276 { 277 /* See ARM Architecture Reference Manual ch. B4.2.1 p. B4-1724 */ 278 const unsigned levels = dcache_levels(); 279 for (unsigned i = 0; i < levels; ++i) { 280 const unsigned ways = dcache_ways(i); 281 const unsigned sets = dcache_sets(i); 282 const unsigned way_shift = 31 - log2(ways); 283 const unsigned set_shift = dcache_linesize_log(i); 284 dcache_clean_manual(i, false, ways, sets, way_shift, set_shift); 285 } 286 } 287 288 void dcache_flush_invalidate(void) 289 { 290 /* See ARM Architecture Reference Manual ch. B4.2.1 p. B4-1724 */ 291 const unsigned levels = dcache_levels(); 292 for (unsigned i = 0; i < levels; ++i) { 293 const unsigned ways = dcache_ways(i); 294 const unsigned sets = dcache_sets(i); 295 const unsigned way_shift = 31 - log2(ways); 296 const unsigned set_shift = dcache_linesize_log(i); 297 dcache_clean_manual(i, true, ways, sets, way_shift, set_shift); 298 } 299 } 300 301 302 void cpu_dcache_flush(void) 303 { 304 for (unsigned i = 0; i < CPU->arch.dcache_levels; ++i) 305 dcache_clean_manual(i, false, 306 CPU->arch.dcache[i].ways, CPU->arch.dcache[i].sets, 307 CPU->arch.dcache[i].way_shift, CPU->arch.dcache[i].set_shift); 308 } 309 310 void cpu_dcache_flush_invalidate(void) 311 { 312 const unsigned levels = dcache_levels(); 313 for (unsigned i = 0; i < levels; ++i) 314 dcache_clean_manual(i, true, 315 CPU->arch.dcache[i].ways, CPU->arch.dcache[i].sets, 316 CPU->arch.dcache[i].way_shift, CPU->arch.dcache[i].set_shift); 317 } 318 319 void icache_invalidate(void) 320 { 321 ICIALLU_write(0); 128 322 } 129 323 -
kernel/arch/arm32/src/dummy.S
rea906c29 r850235d 32 32 .global asm_delay_loop 33 33 34 .global fpu_context_restore35 .global fpu_context_save36 .global fpu_enable37 .global fpu_init38 39 34 .global sys_tls_set 40 35 .global dummy … … 46 41 mov pc, lr 47 42 48 fpu_context_restore:49 mov pc, lr50 51 fpu_context_save:52 mov pc, lr53 54 fpu_enable:55 mov pc, lr56 57 fpu_init:58 mov pc, lr59 60 43 # not used on ARM 61 44 sys_tls_set: -
kernel/arch/arm32/src/exception.c
rea906c29 r850235d 39 39 #include <interrupt.h> 40 40 #include <arch/mm/page_fault.h> 41 #include <arch/cp15.h> 41 42 #include <arch/barrier.h> 42 43 #include <print.h> … … 73 74 /* make it LDR instruction and store at exception vector */ 74 75 *vector = handler_address_ptr | LDR_OPCODE; 75 smc_coherence( *vector);76 smc_coherence(vector); 76 77 77 78 /* store handler's address */ … … 117 118 118 119 #ifdef HIGH_EXCEPTION_VECTORS 119 /** Activates use of high exception vectors addresses. */ 120 /** Activates use of high exception vectors addresses. 121 * 122 * "High vectors were introduced into some implementations of ARMv4 and are 123 * required in ARMv6 implementations. High vectors allow the exception vector 124 * locations to be moved from their normal address range 0x00000000-0x0000001C 125 * at the bottom of the 32-bit address space, to an alternative address range 126 * 0xFFFF0000-0xFFFF001C near the top of the address space. These alternative 127 * locations are known as the high vectors. 128 * 129 * Prior to ARMv6, it is IMPLEMENTATION DEFINED whether the high vectors are 130 * supported. When they are, a hardware configuration input selects whether 131 * the normal vectors or the high vectors are to be used from 132 * reset." ARM Architecture Reference Manual A2.6.11 (p. 64 in the PDF). 133 * 134 * ARM920T (gta02) TRM A2.3.5 (PDF p. 36) and ARM926EJ-S (icp) 2.3.2 (PDF p. 42) 135 * say that armv4 an armv5 chips that we support implement this. 136 */ 120 137 static void high_vectors(void) 121 138 { 122 uint32_t control_reg; 123 124 asm volatile ( 125 "mrc p15, 0, %[control_reg], c1, c0" 126 : [control_reg] "=r" (control_reg) 127 ); 139 uint32_t control_reg = SCTLR_read(); 128 140 129 141 /* switch on the high vectors bit */ 130 control_reg |= CP15_R1_HIGH_VECTORS_BIT; 131 132 asm volatile ( 133 "mcr p15, 0, %[control_reg], c1, c0" 134 :: [control_reg] "r" (control_reg) 135 ); 142 control_reg |= SCTLR_HIGH_VECTORS_EN_FLAG; 143 144 SCTLR_write(control_reg); 136 145 } 137 146 #endif … … 144 153 { 145 154 machine_irq_exception(exc_no, istate); 155 } 156 157 /** Undefined instruction exception handler. 158 * 159 * Calls scheduler_fpu_lazy_request 160 */ 161 static void undef_insn_exception(unsigned int exc_no, istate_t *istate) 162 { 163 #ifdef CONFIG_FPU 164 if (handle_if_fpu_exception()) { 165 /* 166 * Retry the failing instruction, 167 * ARM Architecture Reference Manual says on p.B1-1169 168 * that offset for undef instruction exception is 4 169 */ 170 istate->pc -= 4; 171 return; 172 } 173 #endif 174 fault_if_from_uspace(istate, "Undefined instruction."); 175 panic_badtrap(istate, exc_no, "Undefined instruction."); 146 176 } 147 177 … … 153 183 void exception_init(void) 154 184 { 185 // TODO check for availability of high vectors for <= armv5 155 186 #ifdef HIGH_EXCEPTION_VECTORS 156 187 high_vectors(); … … 158 189 install_exception_handlers(); 159 190 191 exc_register(EXC_UNDEF_INSTR, "undefined instruction", true, 192 (iroutine_t) undef_insn_exception); 160 193 exc_register(EXC_IRQ, "interrupt", true, 161 194 (iroutine_t) irq_exception); -
kernel/arch/arm32/src/mach/gta02/gta02.c
rea906c29 r850235d 27 27 */ 28 28 29 /** @addtogroup arm32g xemul29 /** @addtogroup arm32gta02 30 30 * @{ 31 31 */ -
kernel/arch/arm32/src/machine_func.c
rea906c29 r850235d 41 41 #include <arch/mach/gta02/gta02.h> 42 42 #include <arch/mach/integratorcp/integratorcp.h> 43 #include <arch/mach/testarm/testarm.h> 43 #include <arch/mach/beagleboardxm/beagleboardxm.h> 44 #include <arch/mach/beaglebone/beaglebone.h> 44 45 45 46 /** Pointer to machine_ops structure being used. */ … … 51 52 #if defined(MACHINE_gta02) 52 53 machine_ops = >a02_machine_ops; 53 #elif defined(MACHINE_testarm)54 machine_ops = &gxemul_machine_ops;55 54 #elif defined(MACHINE_integratorcp) 56 55 machine_ops = &icp_machine_ops; 56 #elif defined(MACHINE_beagleboardxm) 57 machine_ops = &bbxm_machine_ops; 58 #elif defined(MACHINE_beaglebone) 59 machine_ops = &bbone_machine_ops; 57 60 #else 58 61 #error Machine type not defined. … … 131 134 } 132 135 136 const char * machine_get_platform_name(void) 137 { 138 if (machine_ops->machine_get_platform_name) 139 return machine_ops->machine_get_platform_name(); 140 return NULL; 141 } 133 142 /** @} 134 143 */ -
kernel/arch/arm32/src/mm/page.c
rea906c29 r850235d 52 52 void page_arch_init(void) 53 53 { 54 int flags = PAGE_CACHEABLE ;54 int flags = PAGE_CACHEABLE | PAGE_EXEC; 55 55 page_mapping_operations = &pt_mapping_operations; 56 56 57 57 page_table_lock(AS_KERNEL, true); 58 58 59 uintptr_t cur;60 61 59 /* Kernel identity mapping */ 62 for (cur = PHYSMEM_START_ADDR; 63 cur < min(config.identity_size, config.physmem_end); 60 //FIXME: We need to consider the possibility that 61 //identity_base > identity_size and physmem_end. 62 //This might lead to overflow if identity_size is too big. 63 for (uintptr_t cur = PHYSMEM_START_ADDR; 64 cur < min(KA2PA(config.identity_base) + 65 config.identity_size, config.physmem_end); 64 66 cur += FRAME_SIZE) 65 67 page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags); -
kernel/arch/arm32/src/mm/page_fault.c
rea906c29 r850235d 34 34 */ 35 35 #include <panic.h> 36 #include <arch/cp15.h> 36 37 #include <arch/exception.h> 37 38 #include <arch/mm/page_fault.h> … … 42 43 #include <print.h> 43 44 44 /** Returns value stored in fault status register. 45 * 46 * @return Value stored in CP15 fault status register (FSR). 47 */ 48 static inline fault_status_t read_fault_status_register(void) 49 { 50 fault_status_union_t fsu; 51 52 /* fault status is stored in CP15 register 5 */ 53 asm volatile ( 54 "mrc p15, 0, %[dummy], c5, c0, 0" 55 : [dummy] "=r" (fsu.dummy) 56 ); 57 58 return fsu.fs; 59 } 60 61 /** Returns FAR (fault address register) content. 62 * 63 * @return FAR (fault address register) content (address that caused a page 64 * fault) 65 */ 66 static inline uintptr_t read_fault_address_register(void) 67 { 68 uintptr_t ret; 69 70 /* fault adress is stored in CP15 register 6 */ 71 asm volatile ( 72 "mrc p15, 0, %[ret], c6, c0, 0" 73 : [ret] "=r" (ret) 74 ); 75 76 return ret; 77 } 78 45 46 /** 47 * FSR encoding ARM Architecture Reference Manual ARMv7-A and ARMv7-R edition. 48 * 49 * B3.13.3 page B3-1406 (PDF page 1406) 50 */ 51 typedef enum { 52 DFSR_SOURCE_ALIGN = 0x0001, 53 DFSR_SOURCE_CACHE_MAINTENANCE = 0x0004, 54 DFSR_SOURCE_SYNC_EXTERNAL_TRANSLATION_L1 = 0x000c, 55 DFSR_SOURCE_SYNC_EXTERNAL_TRANSLATION_L2 = 0x000e, 56 DFSR_SOURCE_SYNC_PARITY_TRANSLATION_L1 = 0x040c, 57 DFSR_SOURCE_SYNC_PARITY_TRANSLATION_L2 = 0x040e, 58 DFSR_SOURCE_TRANSLATION_L1 = 0x0005, 59 DFSR_SOURCE_TRANSLATION_L2 = 0x0007, 60 DFSR_SOURCE_ACCESS_FLAG_L1 = 0x0003, /**< @note: This used to be alignment enc. */ 61 DFSR_SOURCE_ACCESS_FLAG_L2 = 0x0006, 62 DFSR_SOURCE_DOMAIN_L1 = 0x0009, 63 DFSR_SOURCE_DOMAIN_L2 = 0x000b, 64 DFSR_SOURCE_PERMISSION_L1 = 0x000d, 65 DFSR_SOURCE_PERMISSION_L2 = 0x000f, 66 DFSR_SOURCE_DEBUG = 0x0002, 67 DFSR_SOURCE_SYNC_EXTERNAL = 0x0008, 68 DFSR_SOURCE_TLB_CONFLICT = 0x0400, 69 DFSR_SOURCE_LOCKDOWN = 0x0404, /**< @note: Implementation defined */ 70 DFSR_SOURCE_COPROCESSOR = 0x040a, /**< @note Implementation defined */ 71 DFSR_SOURCE_SYNC_PARITY = 0x0409, 72 DFSR_SOURCE_ASYNC_EXTERNAL = 0x0406, 73 DFSR_SOURCE_ASYNC_PARITY = 0x0408, 74 DFSR_SOURCE_MASK = 0x0000040f, 75 } dfsr_source_t; 76 77 static inline const char * dfsr_source_to_str(dfsr_source_t source) 78 { 79 switch (source) { 80 case DFSR_SOURCE_TRANSLATION_L1: 81 return "Translation fault L1"; 82 case DFSR_SOURCE_TRANSLATION_L2: 83 return "Translation fault L2"; 84 case DFSR_SOURCE_PERMISSION_L1: 85 return "Permission fault L1"; 86 case DFSR_SOURCE_PERMISSION_L2: 87 return "Permission fault L2"; 88 case DFSR_SOURCE_ALIGN: 89 return "Alignment fault"; 90 case DFSR_SOURCE_CACHE_MAINTENANCE: 91 return "Instruction cache maintenance fault"; 92 case DFSR_SOURCE_SYNC_EXTERNAL_TRANSLATION_L1: 93 return "Synchronous external abort on translation table walk level 1"; 94 case DFSR_SOURCE_SYNC_EXTERNAL_TRANSLATION_L2: 95 return "Synchronous external abort on translation table walk level 2"; 96 case DFSR_SOURCE_SYNC_PARITY_TRANSLATION_L1: 97 return "Synchronous parity error on translation table walk level 1"; 98 case DFSR_SOURCE_SYNC_PARITY_TRANSLATION_L2: 99 return "Synchronous parity error on translation table walk level 2"; 100 case DFSR_SOURCE_ACCESS_FLAG_L1: 101 return "Access flag fault L1"; 102 case DFSR_SOURCE_ACCESS_FLAG_L2: 103 return "Access flag fault L2"; 104 case DFSR_SOURCE_DOMAIN_L1: 105 return "Domain fault L1"; 106 case DFSR_SOURCE_DOMAIN_L2: 107 return "Domain flault L2"; 108 case DFSR_SOURCE_DEBUG: 109 return "Debug event"; 110 case DFSR_SOURCE_SYNC_EXTERNAL: 111 return "Synchronous external abort"; 112 case DFSR_SOURCE_TLB_CONFLICT: 113 return "TLB conflict abort"; 114 case DFSR_SOURCE_LOCKDOWN: 115 return "Lockdown (Implementation defined)"; 116 case DFSR_SOURCE_COPROCESSOR: 117 return "Coprocessor abort (Implementation defined)"; 118 case DFSR_SOURCE_SYNC_PARITY: 119 return "Synchronous parity error on memory access"; 120 case DFSR_SOURCE_ASYNC_EXTERNAL: 121 return "Asynchronous external abort"; 122 case DFSR_SOURCE_ASYNC_PARITY: 123 return "Asynchronous parity error on memory access"; 124 case DFSR_SOURCE_MASK: 125 break; 126 } 127 return "Unknown data abort"; 128 } 129 130 #if defined(PROCESSOR_ARCH_armv4) | defined(PROCESSOR_ARCH_armv5) 79 131 /** Decides whether read or write into memory is requested. 80 132 * … … 97 149 panic("page_fault - instruction does not access memory " 98 150 "(instr_code: %#0" PRIx32 ", badvaddr:%p).", 99 instr_union.pc, (void *) badvaddr);151 *(uint32_t*)instr_union.instr, (void *) badvaddr); 100 152 return PF_ACCESS_EXEC; 101 153 } … … 136 188 inst, (void *) badvaddr); 137 189 } 190 #endif 138 191 139 192 /** Handles "data abort" exception (load or store at invalid address). … … 145 198 void data_abort(unsigned int exc_no, istate_t *istate) 146 199 { 147 fault_status_t fsr __attribute__ ((unused)) = 148 read_fault_status_register(); 149 uintptr_t badvaddr = read_fault_address_register(); 150 151 pf_access_t access = get_memory_access_type(istate->pc, badvaddr); 152 153 int ret = as_page_fault(badvaddr, access, istate); 154 155 if (ret == AS_PF_FAULT) { 156 fault_if_from_uspace(istate, "Page fault: %#x.", badvaddr); 157 panic_memtrap(istate, access, badvaddr, NULL); 158 } 200 const uintptr_t badvaddr = DFAR_read(); 201 const fault_status_t fsr = { .raw = DFSR_read() }; 202 const dfsr_source_t source = fsr.raw & DFSR_SOURCE_MASK; 203 204 switch (source) { 205 case DFSR_SOURCE_TRANSLATION_L1: 206 case DFSR_SOURCE_TRANSLATION_L2: 207 case DFSR_SOURCE_PERMISSION_L1: 208 case DFSR_SOURCE_PERMISSION_L2: 209 /* Page fault is handled further down */ 210 break; 211 case DFSR_SOURCE_ALIGN: 212 case DFSR_SOURCE_CACHE_MAINTENANCE: 213 case DFSR_SOURCE_SYNC_EXTERNAL_TRANSLATION_L1: 214 case DFSR_SOURCE_SYNC_EXTERNAL_TRANSLATION_L2: 215 case DFSR_SOURCE_SYNC_PARITY_TRANSLATION_L1: 216 case DFSR_SOURCE_SYNC_PARITY_TRANSLATION_L2: 217 case DFSR_SOURCE_ACCESS_FLAG_L1: 218 case DFSR_SOURCE_ACCESS_FLAG_L2: 219 case DFSR_SOURCE_DOMAIN_L1: 220 case DFSR_SOURCE_DOMAIN_L2: 221 case DFSR_SOURCE_DEBUG: 222 case DFSR_SOURCE_SYNC_EXTERNAL: 223 case DFSR_SOURCE_TLB_CONFLICT: 224 case DFSR_SOURCE_LOCKDOWN: 225 case DFSR_SOURCE_COPROCESSOR: 226 case DFSR_SOURCE_SYNC_PARITY: 227 case DFSR_SOURCE_ASYNC_EXTERNAL: 228 case DFSR_SOURCE_ASYNC_PARITY: 229 case DFSR_SOURCE_MASK: 230 /* Weird abort stuff */ 231 fault_if_from_uspace(istate, "Unhandled abort %s at address: " 232 "%#x.", dfsr_source_to_str(source), badvaddr); 233 panic("Unhandled abort %s at address: %#x.", 234 dfsr_source_to_str(source), badvaddr); 235 } 236 237 #if defined(PROCESSOR_ARCH_armv6) | defined(PROCESSOR_ARCH_armv7_a) 238 const pf_access_t access = 239 fsr.data.wr ? PF_ACCESS_WRITE : PF_ACCESS_READ; 240 #elif defined(PROCESSOR_ARCH_armv4) | defined(PROCESSOR_ARCH_armv5) 241 const pf_access_t access = get_memory_access_type(istate->pc, badvaddr); 242 #else 243 #error "Unsupported architecture" 244 #endif 245 as_page_fault(badvaddr, access, istate); 159 246 } 160 247 … … 167 254 void prefetch_abort(unsigned int exc_no, istate_t *istate) 168 255 { 169 int ret = as_page_fault(istate->pc, PF_ACCESS_EXEC, istate); 170 171 if (ret == AS_PF_FAULT) { 172 fault_if_from_uspace(istate, 173 "Page fault - prefetch_abort: %#x.", istate->pc); 174 panic_memtrap(istate, PF_ACCESS_EXEC, istate->pc, NULL); 175 } 256 as_page_fault(istate->pc, PF_ACCESS_EXEC, istate); 176 257 } 177 258 -
kernel/arch/arm32/src/ras.c
rea906c29 r850235d 67 67 void ras_check(unsigned int n, istate_t *istate) 68 68 { 69 uintptr_t rewrite_pc = istate->pc; 69 bool restart_needed = false; 70 uintptr_t restart_pc = 0; 70 71 71 72 if (istate_from_uspace(istate)) { … … 73 74 if ((ras_page[RAS_START] < istate->pc) && 74 75 (ras_page[RAS_END] > istate->pc)) { 75 rewrite_pc = ras_page[RAS_START]; 76 restart_needed = true; 77 restart_pc = ras_page[RAS_START]; 76 78 } 77 79 ras_page[RAS_START] = 0; 78 80 ras_page[RAS_END] = 0xffffffff; 79 } 81 } 80 82 } 81 83 82 84 exc_dispatch(n, istate); 83 84 istate->pc = rewrite_pc;85 if (restart_needed) 86 istate->pc = restart_pc; 85 87 } 86 88 -
kernel/arch/ia32/Makefile.inc
rea906c29 r850235d 105 105 arch/$(KARCH)/src/boot/memmap.c \ 106 106 arch/$(KARCH)/src/fpu_context.c \ 107 arch/$(KARCH)/src/debugger.c \108 107 arch/$(KARCH)/src/syscall.c -
kernel/arch/ia32/include/arch/istate.h
rea906c29 r850235d 68 68 } istate_t; 69 69 70 #define RPL_USER 3 71 70 72 /** Return true if exception happened while in userspace */ 71 73 NO_TRACE static inline int istate_from_uspace(istate_t *istate) 72 74 { 73 return !(istate->eip & UINT32_C(0x80000000));75 return (istate->cs & RPL_USER) == RPL_USER; 74 76 } 75 77 -
kernel/arch/ia32/src/fpu_context.c
rea906c29 r850235d 37 37 #include <arch.h> 38 38 #include <cpu.h> 39 40 41 /** x87 FPU scr values (P3+ MMX2) */ 42 enum { 43 X87_FLUSH_ZERO_FLAG = (1 << 15), 44 X87_ROUND_CONTROL_MASK = (0x3 << 13), 45 x87_ROUND_TO_NEAREST_EVEN = (0x0 << 13), 46 X87_ROUND_DOWN_TO_NEG_INF = (0x1 << 13), 47 X87_ROUND_UP_TO_POS_INF = (0x2 << 13), 48 X87_ROUND_TO_ZERO = (0x3 << 13), 49 X87_PRECISION_MASK = (1 << 12), 50 X87_UNDERFLOW_MASK = (1 << 11), 51 X87_OVERFLOW_MASK = (1 << 10), 52 X87_ZERO_DIV_MASK = (1 << 9), 53 X87_DENORMAL_OP_MASK = (1 << 8), 54 X87_INVALID_OP_MASK = (1 << 7), 55 X87_DENOM_ZERO_FLAG = (1 << 6), 56 X87_PRECISION_EXC_FLAG = (1 << 5), 57 X87_UNDERFLOW_EXC_FLAG = (1 << 4), 58 X87_OVERFLOW_EXC_FLAG = (1 << 3), 59 X87_ZERO_DIV_EXC_FLAG = (1 << 2), 60 X87_DENORMAL_EXC_FLAG = (1 << 1), 61 X87_INVALID_OP_EXC_FLAG = (1 << 0), 62 63 X87_ALL_MASK = X87_PRECISION_MASK | X87_UNDERFLOW_MASK | X87_OVERFLOW_MASK | X87_ZERO_DIV_MASK | X87_DENORMAL_OP_MASK | X87_INVALID_OP_MASK, 64 }; 65 39 66 40 67 typedef void (*fpu_context_function)(fpu_context_t *fctx); … … 98 125 } 99 126 127 /** Initialize x87 FPU. Mask all exceptions. */ 100 128 void fpu_init() 101 129 { … … 111 139 "ldmxcsr %[help0]\n" 112 140 : [help0] "+m" (help0), [help1] "+r" (help1) 113 : [magic] "i" ( 0x1f80)141 : [magic] "i" (X87_ALL_MASK) 114 142 ); 115 143 } -
kernel/arch/ia32/src/ia32.c
rea906c29 r850235d 45 45 #include <arch/bios/bios.h> 46 46 #include <arch/boot/boot.h> 47 #include <arch/debugger.h>48 47 #include <arch/drivers/i8254.h> 49 48 #include <arch/drivers/i8259.h> … … 118 117 #endif 119 118 120 /* Enable debugger */121 debugger_init();122 119 /* Merge all memory zones to 1 big zone */ 123 120 zone_merge_all(); -
kernel/arch/ia64/include/arch/istate.h
rea906c29 r850235d 106 106 uint64_t in5; 107 107 uint64_t in6; 108 109 uint64_t alignment; 108 110 } istate_t; 109 111 -
kernel/arch/ia64/include/arch/mm/as.h
rea906c29 r850235d 43 43 #define USER_ADDRESS_SPACE_END_ARCH UINT64_C(0xdfffffffffffffff) 44 44 45 #define USTACK_ADDRESS_ARCH UINT64_C(0x0000000ff0000000)46 47 45 typedef struct { 48 46 } as_arch_t; -
kernel/arch/ia64/src/ivt.S
rea906c29 r850235d 35 35 #define FRS_TO_SAVE 30 36 36 #define STACK_ITEMS (21 + FRS_TO_SAVE * 2) 37 #define STACK_FRAME_SIZE ALIGN_UP((STACK_ITEMS *STACK_ITEM_SIZE) + STACK_SCRATCH_AREA_SIZE, STACK_ALIGNMENT)37 #define STACK_FRAME_SIZE ALIGN_UP((STACK_ITEMS * STACK_ITEM_SIZE) + STACK_SCRATCH_AREA_SIZE, STACK_ALIGNMENT) 38 38 39 39 #if (STACK_ITEMS % 2 == 0) -
kernel/arch/ia64/src/mm/tlb.c
rea906c29 r850235d 113 113 va = page; 114 114 115 rr.word = rr_read(VA2VRN( va));116 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN( va))))) {115 rr.word = rr_read(VA2VRN(page)); 116 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(page))))) { 117 117 /* 118 118 * The selected region register does not contain required RID. … … 122 122 123 123 rr0 = rr; 124 rr0.map.rid = ASID2RID(asid, VA2VRN( va));125 rr_write(VA2VRN( va), rr0.word);124 rr0.map.rid = ASID2RID(asid, VA2VRN(page)); 125 rr_write(VA2VRN(page), rr0.word); 126 126 srlz_d(); 127 127 srlz_i(); … … 139 139 case 1: /* cnt 4 - 15 */ 140 140 ps = PAGE_WIDTH + 2; 141 va &= ~((1 << ps) - 1);141 va &= ~((1UL << ps) - 1); 142 142 break; 143 143 case 2: /* cnt 16 - 63 */ 144 144 ps = PAGE_WIDTH + 4; 145 va &= ~((1 << ps) - 1);145 va &= ~((1UL << ps) - 1); 146 146 break; 147 147 case 3: /* cnt 64 - 255 */ 148 148 ps = PAGE_WIDTH + 6; 149 va &= ~((1 << ps) - 1);149 va &= ~((1UL << ps) - 1); 150 150 break; 151 151 case 4: /* cnt 256 - 1023 */ 152 152 ps = PAGE_WIDTH + 8; 153 va &= ~((1 << ps) - 1);153 va &= ~((1UL << ps) - 1); 154 154 break; 155 155 case 5: /* cnt 1024 - 4095 */ 156 156 ps = PAGE_WIDTH + 10; 157 va &= ~((1 << ps) - 1);157 va &= ~((1UL << ps) - 1); 158 158 break; 159 159 case 6: /* cnt 4096 - 16383 */ 160 160 ps = PAGE_WIDTH + 12; 161 va &= ~((1 << ps) - 1);161 va &= ~((1UL << ps) - 1); 162 162 break; 163 163 case 7: /* cnt 16384 - 65535 */ 164 164 case 8: /* cnt 65536 - (256K - 1) */ 165 165 ps = PAGE_WIDTH + 14; 166 va &= ~((1 << ps) - 1);166 va &= ~((1UL << ps) - 1); 167 167 break; 168 168 default: 169 169 ps = PAGE_WIDTH + 18; 170 va &= ~((1 << ps) - 1);171 break; 172 } 173 174 for (; va < (page + cnt * PAGE_SIZE); va += (1 << ps))170 va &= ~((1UL << ps) - 1); 171 break; 172 } 173 174 for (; va < (page + cnt * PAGE_SIZE); va += (1UL << ps)) 175 175 asm volatile ( 176 176 "ptc.l %[va], %[ps] ;;" … … 183 183 184 184 if (restore_rr) { 185 rr_write(VA2VRN( va), rr.word);185 rr_write(VA2VRN(page), rr.word); 186 186 srlz_d(); 187 187 srlz_i(); … … 501 501 * Forward the page fault to address space page fault handler. 502 502 */ 503 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 504 fault_if_from_uspace(istate, "Page fault at %p.", 505 (void *) va); 506 panic_memtrap(istate, PF_ACCESS_EXEC, va, NULL); 507 } 503 as_page_fault(va, PF_ACCESS_EXEC, istate); 508 504 } 509 505 } … … 619 615 * handler. 620 616 */ 621 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 622 fault_if_from_uspace(istate, "Page fault at %p.", 623 (void *) va); 624 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, NULL); 625 } 617 as_page_fault(va, PF_ACCESS_READ, istate); 626 618 } 627 619 } … … 667 659 dtc_pte_copy(t); 668 660 } else { 669 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { 670 fault_if_from_uspace(istate, "Page fault at %p.", 671 (void *) va); 672 panic_memtrap(istate, PF_ACCESS_WRITE, va, NULL); 673 } 661 as_page_fault(va, PF_ACCESS_WRITE, istate); 674 662 } 675 663 } … … 700 688 itc_pte_copy(t); 701 689 } else { 702 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 703 fault_if_from_uspace(istate, "Page fault at %p.", 704 (void *) va); 705 panic_memtrap(istate, PF_ACCESS_EXEC, va, NULL); 706 } 690 as_page_fault(va, PF_ACCESS_EXEC, istate); 707 691 } 708 692 } … … 764 748 ASSERT((t) && (t->p)); 765 749 ASSERT(!t->w); 766 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { 767 fault_if_from_uspace(istate, "Page fault at %p.", 768 (void *) va); 769 panic_memtrap(istate, PF_ACCESS_WRITE, va, NULL); 770 } 750 as_page_fault(va, PF_ACCESS_WRITE, istate); 771 751 } 772 752 … … 799 779 dtc_pte_copy(t); 800 780 } else { 801 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 802 fault_if_from_uspace(istate, "Page fault at %p.", 803 (void *) va); 804 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, NULL); 805 } 781 as_page_fault(va, PF_ACCESS_READ, istate); 806 782 } 807 783 } -
kernel/arch/ia64/src/proc/scheduler.c
rea906c29 r850235d 90 90 : 91 91 : "r" (&THREAD->kstack[STACK_SIZE / 2]), 92 "r" (&THREAD->kstack[STACK_SIZE / 2 - SP_DELTA])92 "r" (&THREAD->kstack[STACK_SIZE / 2]) 93 93 ); 94 94 } -
kernel/arch/mips32/include/arch/mm/tlb.h
rea906c29 r850235d 112 112 #ifdef __BE__ 113 113 unsigned p : 1; 114 unsigned : 2 7;115 unsigned index : 4;114 unsigned : 25; 115 unsigned index : 6; 116 116 #else 117 unsigned index : 4;118 unsigned : 2 7;117 unsigned index : 6; 118 unsigned : 25; 119 119 unsigned p : 1; 120 120 #endif -
kernel/arch/mips32/src/mips32.c
rea906c29 r850235d 228 228 { 229 229 supervisor_sp = 230 (uintptr_t) &THREAD->kstack[STACK_SIZE - SP_DELTA];230 (uintptr_t) &THREAD->kstack[STACK_SIZE]; 231 231 } 232 232 -
kernel/arch/mips32/src/mm/tlb.c
rea906c29 r850235d 48 48 #include <symtab.h> 49 49 50 static void tlb_refill_fail(istate_t *); 51 static void tlb_invalid_fail(istate_t *); 52 static void tlb_modified_fail(istate_t *); 53 54 static pte_t *find_mapping_and_check(uintptr_t, int, istate_t *, int *); 50 static pte_t *find_mapping_and_check(uintptr_t, int, istate_t *); 55 51 56 52 /** Initialize TLB. … … 92 88 uintptr_t badvaddr; 93 89 pte_t *pte; 94 int pfrc;95 90 96 91 badvaddr = cp0_badvaddr_read(); 97 92 asid = AS->asid; 98 93 99 pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate, &pfrc); 100 if (!pte) { 101 switch (pfrc) { 102 case AS_PF_FAULT: 103 goto fail; 104 break; 105 case AS_PF_DEFER: 106 /* 107 * The page fault came during copy_from_uspace() 108 * or copy_to_uspace(). 109 */ 110 return; 111 default: 112 panic("Unexpected pfrc (%d).", pfrc); 94 pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate); 95 if (pte) { 96 /* 97 * Record access to PTE. 98 */ 99 pte->a = 1; 100 101 tlb_prepare_entry_hi(&hi, asid, badvaddr); 102 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, 103 pte->cacheable, pte->pfn); 104 105 /* 106 * New entry is to be inserted into TLB 107 */ 108 cp0_entry_hi_write(hi.value); 109 if ((badvaddr / PAGE_SIZE) % 2 == 0) { 110 cp0_entry_lo0_write(lo.value); 111 cp0_entry_lo1_write(0); 112 } else { 113 cp0_entry_lo0_write(0); 114 cp0_entry_lo1_write(lo.value); 113 115 } 114 } 115 116 /* 117 * Record access to PTE. 118 */ 119 pte->a = 1; 120 121 tlb_prepare_entry_hi(&hi, asid, badvaddr); 122 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->cacheable, 123 pte->pfn); 124 125 /* 126 * New entry is to be inserted into TLB 127 */ 128 cp0_entry_hi_write(hi.value); 129 if ((badvaddr / PAGE_SIZE) % 2 == 0) { 130 cp0_entry_lo0_write(lo.value); 131 cp0_entry_lo1_write(0); 132 } 133 else { 134 cp0_entry_lo0_write(0); 135 cp0_entry_lo1_write(lo.value); 136 } 137 cp0_pagemask_write(TLB_PAGE_MASK_16K); 138 tlbwr(); 139 140 return; 141 142 fail: 143 tlb_refill_fail(istate); 116 cp0_pagemask_write(TLB_PAGE_MASK_16K); 117 tlbwr(); 118 } 144 119 } 145 120 … … 155 130 entry_hi_t hi; 156 131 pte_t *pte; 157 int pfrc;158 132 159 133 badvaddr = cp0_badvaddr_read(); … … 168 142 index.value = cp0_index_read(); 169 143 170 /* 171 * Fail if the entry is not in TLB. 172 */ 173 if (index.p) { 174 printf("TLB entry not found.\n"); 175 goto fail; 176 } 177 178 pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate, &pfrc); 179 if (!pte) { 180 switch (pfrc) { 181 case AS_PF_FAULT: 182 goto fail; 183 break; 184 case AS_PF_DEFER: 185 /* 186 * The page fault came during copy_from_uspace() 187 * or copy_to_uspace(). 188 */ 189 return; 190 default: 191 panic("Unexpected pfrc (%d).", pfrc); 192 } 193 } 194 195 /* 196 * Read the faulting TLB entry. 197 */ 198 tlbr(); 199 200 /* 201 * Record access to PTE. 202 */ 203 pte->a = 1; 204 205 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->cacheable, 206 pte->pfn); 207 208 /* 209 * The entry is to be updated in TLB. 210 */ 211 if ((badvaddr / PAGE_SIZE) % 2 == 0) 212 cp0_entry_lo0_write(lo.value); 213 else 214 cp0_entry_lo1_write(lo.value); 215 cp0_pagemask_write(TLB_PAGE_MASK_16K); 216 tlbwi(); 217 218 return; 219 220 fail: 221 tlb_invalid_fail(istate); 144 ASSERT(!index.p); 145 146 pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate); 147 if (pte) { 148 /* 149 * Read the faulting TLB entry. 150 */ 151 tlbr(); 152 153 /* 154 * Record access to PTE. 155 */ 156 pte->a = 1; 157 158 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, 159 pte->cacheable, pte->pfn); 160 161 /* 162 * The entry is to be updated in TLB. 163 */ 164 if ((badvaddr / PAGE_SIZE) % 2 == 0) 165 cp0_entry_lo0_write(lo.value); 166 else 167 cp0_entry_lo1_write(lo.value); 168 cp0_pagemask_write(TLB_PAGE_MASK_16K); 169 tlbwi(); 170 } 222 171 } 223 172 … … 233 182 entry_hi_t hi; 234 183 pte_t *pte; 235 int pfrc;236 184 237 185 badvaddr = cp0_badvaddr_read(); … … 249 197 * Fail if the entry is not in TLB. 250 198 */ 251 if (index.p) { 252 printf("TLB entry not found.\n"); 253 goto fail; 254 } 255 256 pte = find_mapping_and_check(badvaddr, PF_ACCESS_WRITE, istate, &pfrc); 257 if (!pte) { 258 switch (pfrc) { 259 case AS_PF_FAULT: 260 goto fail; 261 break; 262 case AS_PF_DEFER: 263 /* 264 * The page fault came during copy_from_uspace() 265 * or copy_to_uspace(). 266 */ 267 return; 268 default: 269 panic("Unexpected pfrc (%d).", pfrc); 270 } 271 } 272 273 /* 274 * Read the faulting TLB entry. 275 */ 276 tlbr(); 277 278 /* 279 * Record access and write to PTE. 280 */ 281 pte->a = 1; 282 pte->d = 1; 283 284 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->w, pte->cacheable, 285 pte->pfn); 286 287 /* 288 * The entry is to be updated in TLB. 289 */ 290 if ((badvaddr / PAGE_SIZE) % 2 == 0) 291 cp0_entry_lo0_write(lo.value); 292 else 293 cp0_entry_lo1_write(lo.value); 294 cp0_pagemask_write(TLB_PAGE_MASK_16K); 295 tlbwi(); 296 297 return; 298 299 fail: 300 tlb_modified_fail(istate); 301 } 302 303 void tlb_refill_fail(istate_t *istate) 304 { 305 uintptr_t va = cp0_badvaddr_read(); 306 307 fault_if_from_uspace(istate, "TLB Refill Exception on %p.", 308 (void *) va); 309 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, "TLB Refill Exception."); 310 } 311 312 313 void tlb_invalid_fail(istate_t *istate) 314 { 315 uintptr_t va = cp0_badvaddr_read(); 316 317 fault_if_from_uspace(istate, "TLB Invalid Exception on %p.", 318 (void *) va); 319 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, "TLB Invalid Exception."); 320 } 321 322 void tlb_modified_fail(istate_t *istate) 323 { 324 uintptr_t va = cp0_badvaddr_read(); 325 326 fault_if_from_uspace(istate, "TLB Modified Exception on %p.", 327 (void *) va); 328 panic_memtrap(istate, PF_ACCESS_WRITE, va, "TLB Modified Exception."); 199 ASSERT(!index.p); 200 201 pte = find_mapping_and_check(badvaddr, PF_ACCESS_WRITE, istate); 202 if (pte) { 203 /* 204 * Read the faulting TLB entry. 205 */ 206 tlbr(); 207 208 /* 209 * Record access and write to PTE. 210 */ 211 pte->a = 1; 212 pte->d = 1; 213 214 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->w, 215 pte->cacheable, pte->pfn); 216 217 /* 218 * The entry is to be updated in TLB. 219 */ 220 if ((badvaddr / PAGE_SIZE) % 2 == 0) 221 cp0_entry_lo0_write(lo.value); 222 else 223 cp0_entry_lo1_write(lo.value); 224 cp0_pagemask_write(TLB_PAGE_MASK_16K); 225 tlbwi(); 226 } 329 227 } 330 228 … … 334 232 * @param access Access mode that caused the fault. 335 233 * @param istate Pointer to interrupted state. 336 * @param pfrc Pointer to variable where as_page_fault() return code337 * will be stored.338 234 * 339 235 * @return PTE on success, NULL otherwise. 340 236 */ 341 pte_t * 342 find_mapping_and_check(uintptr_t badvaddr, int access, istate_t *istate, 343 int *pfrc) 237 pte_t *find_mapping_and_check(uintptr_t badvaddr, int access, istate_t *istate) 344 238 { 345 239 entry_hi_t hi; … … 348 242 hi.value = cp0_entry_hi_read(); 349 243 350 /* 351 * Handler cannot succeed if the ASIDs don't match. 352 */ 353 if (hi.asid != AS->asid) { 354 printf("EntryHi.asid=%d, AS->asid=%d\n", hi.asid, AS->asid); 355 return NULL; 356 } 244 ASSERT(hi.asid == AS->asid); 357 245 358 246 /* … … 366 254 */ 367 255 return pte; 368 } else { 369 int rc; 370 371 /* 372 * Mapping not found in page tables. 373 * Resort to higher-level page fault handler. 374 */ 375 switch (rc = as_page_fault(badvaddr, access, istate)) { 376 case AS_PF_OK: 377 /* 378 * The higher-level page fault handler succeeded, 379 * The mapping ought to be in place. 380 */ 381 pte = page_mapping_find(AS, badvaddr, true); 382 ASSERT(pte && pte->p); 383 ASSERT(pte->w || access != PF_ACCESS_WRITE); 384 return pte; 385 case AS_PF_DEFER: 386 *pfrc = AS_PF_DEFER; 387 return NULL; 388 case AS_PF_FAULT: 389 *pfrc = AS_PF_FAULT; 390 return NULL; 391 default: 392 panic("Unexpected rc (%d).", rc); 393 } 394 395 } 256 } 257 258 /* 259 * Mapping not found in page tables. 260 * Resort to higher-level page fault handler. 261 */ 262 if (as_page_fault(badvaddr, access, istate) == AS_PF_OK) { 263 pte = page_mapping_find(AS, badvaddr, true); 264 ASSERT(pte && pte->p); 265 ASSERT(pte->w || access != PF_ACCESS_WRITE); 266 return pte; 267 } 268 269 return NULL; 396 270 } 397 271 -
kernel/arch/mips64/src/mips64.c
rea906c29 r850235d 205 205 { 206 206 supervisor_sp = 207 (uintptr_t) &THREAD->kstack[STACK_SIZE - SP_DELTA];207 (uintptr_t) &THREAD->kstack[STACK_SIZE]; 208 208 } 209 209 -
kernel/arch/mips64/src/mm/tlb.c
rea906c29 r850235d 79 79 * @param access Access mode that caused the fault. 80 80 * @param istate Pointer to interrupted state. 81 * @param pfrc Pointer to variable where as_page_fault()82 * return code will be stored.83 81 * 84 82 * @return PTE on success, NULL otherwise. … … 86 84 */ 87 85 static pte_t *find_mapping_and_check(uintptr_t badvaddr, int access, 88 istate_t *istate , int *pfrc)86 istate_t *istate) 89 87 { 90 88 entry_hi_t hi; 91 89 hi.value = cp0_entry_hi_read(); 92 90 93 /* 94 * Handler cannot succeed if the ASIDs don't match. 95 */ 96 if (hi.asid != AS->asid) { 97 printf("EntryHi.asid=%d, AS->asid=%d\n", hi.asid, AS->asid); 98 return NULL; 99 } 91 ASSERT(hi.asid == AS->asid); 100 92 101 93 /* … … 109 101 */ 110 102 return pte; 111 } else { 112 int rc; 113 114 /* 115 * Mapping not found in page tables. 116 * Resort to higher-level page fault handler. 117 */ 118 switch (rc = as_page_fault(badvaddr, access, istate)) { 119 case AS_PF_OK: 120 /* 121 * The higher-level page fault handler succeeded, 122 * The mapping ought to be in place. 123 */ 124 pte = page_mapping_find(AS, badvaddr, true); 125 ASSERT(pte); 126 ASSERT(pte->p); 127 ASSERT((pte->w) || (access != PF_ACCESS_WRITE)); 128 return pte; 129 case AS_PF_DEFER: 130 *pfrc = AS_PF_DEFER; 131 return NULL; 132 case AS_PF_FAULT: 133 *pfrc = AS_PF_FAULT; 134 return NULL; 135 default: 136 panic("Unexpected return code (%d).", rc); 137 } 138 } 103 } 104 105 /* 106 * Mapping not found in page tables. 107 * Resort to higher-level page fault handler. 108 */ 109 if (as_page_fault(badvaddr, access, istate) == AS_PF_OK) { 110 /* 111 * The higher-level page fault handler succeeded, 112 * The mapping ought to be in place. 113 */ 114 pte = page_mapping_find(AS, badvaddr, true); 115 ASSERT(pte); 116 ASSERT(pte->p); 117 ASSERT((pte->w) || (access != PF_ACCESS_WRITE)); 118 return pte; 119 } 120 121 return NULL; 139 122 } 140 123 … … 156 139 } 157 140 158 static void tlb_refill_fail(istate_t *istate)159 {160 uintptr_t va = cp0_badvaddr_read();161 162 fault_if_from_uspace(istate, "TLB Refill Exception on %p.",163 (void *) va);164 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, "TLB Refill Exception.");165 }166 167 static void tlb_invalid_fail(istate_t *istate)168 {169 uintptr_t va = cp0_badvaddr_read();170 171 fault_if_from_uspace(istate, "TLB Invalid Exception on %p.",172 (void *) va);173 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, "TLB Invalid Exception.");174 }175 176 static void tlb_modified_fail(istate_t *istate)177 {178 uintptr_t va = cp0_badvaddr_read();179 180 fault_if_from_uspace(istate, "TLB Modified Exception on %p.",181 (void *) va);182 panic_memtrap(istate, PF_ACCESS_WRITE, va, "TLB Modified Exception.");183 }184 185 141 /** Process TLB Refill Exception. 186 142 * … … 196 152 mutex_unlock(&AS->lock); 197 153 198 int pfrc; 199 pte_t *pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, 200 istate, &pfrc); 201 if (!pte) { 202 switch (pfrc) { 203 case AS_PF_FAULT: 204 goto fail; 205 break; 206 case AS_PF_DEFER: 207 /* 208 * The page fault came during copy_from_uspace() 209 * or copy_to_uspace(). 210 */ 211 return; 212 default: 213 panic("Unexpected pfrc (%d).", pfrc); 154 pte_t *pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate); 155 if (pte) { 156 /* 157 * Record access to PTE. 158 */ 159 pte->a = 1; 160 161 entry_lo_t lo; 162 entry_hi_t hi; 163 164 tlb_prepare_entry_hi(&hi, asid, badvaddr); 165 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->c, 166 pte->frame); 167 168 /* 169 * New entry is to be inserted into TLB 170 */ 171 cp0_entry_hi_write(hi.value); 172 173 if ((badvaddr / PAGE_SIZE) % 2 == 0) { 174 cp0_entry_lo0_write(lo.value); 175 cp0_entry_lo1_write(0); 176 } else { 177 cp0_entry_lo0_write(0); 178 cp0_entry_lo1_write(lo.value); 214 179 } 215 } 216 217 /* 218 * Record access to PTE. 219 */ 220 pte->a = 1; 221 222 entry_lo_t lo; 223 entry_hi_t hi; 224 225 tlb_prepare_entry_hi(&hi, asid, badvaddr); 226 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->c, 227 pte->frame); 228 229 /* 230 * New entry is to be inserted into TLB 231 */ 232 cp0_entry_hi_write(hi.value); 233 234 if ((badvaddr / PAGE_SIZE) % 2 == 0) { 235 cp0_entry_lo0_write(lo.value); 236 cp0_entry_lo1_write(0); 237 } else { 238 cp0_entry_lo0_write(0); 239 cp0_entry_lo1_write(lo.value); 240 } 241 242 cp0_pagemask_write(TLB_PAGE_MASK_16K); 243 tlbwr(); 244 245 return; 246 247 fail: 248 tlb_refill_fail(istate); 180 181 cp0_pagemask_write(TLB_PAGE_MASK_16K); 182 tlbwr(); 183 } 249 184 } 250 185 … … 271 206 index.value = cp0_index_read(); 272 207 273 /* 274 * Fail if the entry is not in TLB. 275 */ 276 if (index.p) { 277 printf("TLB entry not found.\n"); 278 goto fail; 279 } 280 281 int pfrc; 282 pte_t *pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, 283 istate, &pfrc); 284 if (!pte) { 285 switch (pfrc) { 286 case AS_PF_FAULT: 287 goto fail; 288 break; 289 case AS_PF_DEFER: 290 /* 291 * The page fault came during copy_from_uspace() 292 * or copy_to_uspace(). 293 */ 294 return; 295 default: 296 panic("Unexpected pfrc (%d).", pfrc); 297 } 298 } 299 300 /* 301 * Read the faulting TLB entry. 302 */ 303 tlbr(); 304 305 /* 306 * Record access to PTE. 307 */ 308 pte->a = 1; 309 310 entry_lo_t lo; 311 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->c, 312 pte->frame); 313 314 /* 315 * The entry is to be updated in TLB. 316 */ 317 if ((badvaddr / PAGE_SIZE) % 2 == 0) 318 cp0_entry_lo0_write(lo.value); 319 else 320 cp0_entry_lo1_write(lo.value); 321 322 cp0_pagemask_write(TLB_PAGE_MASK_16K); 323 tlbwi(); 324 325 return; 326 327 fail: 328 tlb_invalid_fail(istate); 208 ASSERT(!index.p); 209 210 pte_t *pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate); 211 if (pte) { 212 /* 213 * Read the faulting TLB entry. 214 */ 215 tlbr(); 216 217 /* 218 * Record access to PTE. 219 */ 220 pte->a = 1; 221 222 entry_lo_t lo; 223 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->c, 224 pte->frame); 225 226 /* 227 * The entry is to be updated in TLB. 228 */ 229 if ((badvaddr / PAGE_SIZE) % 2 == 0) 230 cp0_entry_lo0_write(lo.value); 231 else 232 cp0_entry_lo1_write(lo.value); 233 234 cp0_pagemask_write(TLB_PAGE_MASK_16K); 235 tlbwi(); 236 } 237 329 238 } 330 239 … … 351 260 index.value = cp0_index_read(); 352 261 353 /* 354 * Fail if the entry is not in TLB. 355 */ 356 if (index.p) { 357 printf("TLB entry not found.\n"); 358 goto fail; 359 } 360 361 int pfrc; 362 pte_t *pte = find_mapping_and_check(badvaddr, PF_ACCESS_WRITE, 363 istate, &pfrc); 364 if (!pte) { 365 switch (pfrc) { 366 case AS_PF_FAULT: 367 goto fail; 368 break; 369 case AS_PF_DEFER: 370 /* 371 * The page fault came during copy_from_uspace() 372 * or copy_to_uspace(). 373 */ 374 return; 375 default: 376 panic("Unexpected pfrc (%d).", pfrc); 377 } 378 } 379 380 /* 381 * Read the faulting TLB entry. 382 */ 383 tlbr(); 384 385 /* 386 * Record access and write to PTE. 387 */ 388 pte->a = 1; 389 pte->d = 1; 390 391 entry_lo_t lo; 392 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->w, pte->c, 393 pte->frame); 394 395 /* 396 * The entry is to be updated in TLB. 397 */ 398 if ((badvaddr / PAGE_SIZE) % 2 == 0) 399 cp0_entry_lo0_write(lo.value); 400 else 401 cp0_entry_lo1_write(lo.value); 402 403 cp0_pagemask_write(TLB_PAGE_MASK_16K); 404 tlbwi(); 405 406 return; 407 408 fail: 409 tlb_modified_fail(istate); 262 ASSERT(!index.p); 263 264 pte_t *pte = find_mapping_and_check(badvaddr, PF_ACCESS_WRITE, istate); 265 if (pte) { 266 /* 267 * Read the faulting TLB entry. 268 */ 269 tlbr(); 270 271 /* 272 * Record access and write to PTE. 273 */ 274 pte->a = 1; 275 pte->d = 1; 276 277 entry_lo_t lo; 278 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->w, pte->c, 279 pte->frame); 280 281 /* 282 * The entry is to be updated in TLB. 283 */ 284 if ((badvaddr / PAGE_SIZE) % 2 == 0) 285 cp0_entry_lo0_write(lo.value); 286 else 287 cp0_entry_lo1_write(lo.value); 288 289 cp0_pagemask_write(TLB_PAGE_MASK_16K); 290 tlbwi(); 291 } 410 292 } 411 293 -
kernel/arch/ppc32/src/mm/pht.c
rea906c29 r850235d 49 49 * @param access Access mode that caused the fault. 50 50 * @param istate Pointer to interrupted state. 51 * @param pfrc Pointer to variable where as_page_fault() return code52 * will be stored.53 51 * 54 52 * @return PTE on success, NULL otherwise. … … 56 54 */ 57 55 static pte_t *find_mapping_and_check(as_t *as, uintptr_t badvaddr, int access, 58 istate_t *istate , int *pfrc)56 istate_t *istate) 59 57 { 60 58 /* … … 68 66 */ 69 67 return pte; 70 } else { 68 } 69 /* 70 * Mapping not found in page tables. 71 * Resort to higher-level page fault handler. 72 */ 73 if (as_page_fault(badvaddr, access, istate) == AS_PF_OK) { 71 74 /* 72 * Mapping not found in page tables.73 * Resort to higher-level page fault handler.75 * The higher-level page fault handler succeeded, 76 * The mapping ought to be in place. 74 77 */ 75 int rc = as_page_fault(badvaddr, access, istate); 76 switch (rc) { 77 case AS_PF_OK: 78 /* 79 * The higher-level page fault handler succeeded, 80 * The mapping ought to be in place. 81 */ 82 pte = page_mapping_find(as, badvaddr, true); 83 ASSERT((pte) && (pte->present)); 84 *pfrc = 0; 85 return pte; 86 case AS_PF_DEFER: 87 *pfrc = rc; 88 return NULL; 89 case AS_PF_FAULT: 90 *pfrc = rc; 91 return NULL; 92 default: 93 panic("Unexpected rc (%d).", rc); 94 } 95 } 96 } 97 98 static void pht_refill_fail(uintptr_t badvaddr, istate_t *istate) 99 { 100 fault_if_from_uspace(istate, "PHT Refill Exception on %p.", 101 (void *) badvaddr); 102 panic_memtrap(istate, PF_ACCESS_UNKNOWN, badvaddr, 103 "PHT Refill Exception."); 78 pte = page_mapping_find(as, badvaddr, true); 79 ASSERT((pte) && (pte->present)); 80 return pte; 81 } 82 83 return NULL; 104 84 } 105 85 … … 202 182 badvaddr = istate->pc; 203 183 204 int pfrc;205 184 pte_t *pte = find_mapping_and_check(AS, badvaddr, 206 PF_ACCESS_READ /* FIXME */, istate, &pfrc); 207 208 if (!pte) { 209 switch (pfrc) { 210 case AS_PF_FAULT: 211 pht_refill_fail(badvaddr, istate); 212 return; 213 case AS_PF_DEFER: 214 /* 215 * The page fault came during copy_from_uspace() 216 * or copy_to_uspace(). 217 */ 218 return; 219 default: 220 panic("Unexpected pfrc (%d).", pfrc); 221 } 222 } 223 224 /* Record access to PTE */ 225 pte->accessed = 1; 226 pht_insert(badvaddr, pte); 185 PF_ACCESS_READ /* FIXME */, istate); 186 187 if (pte) { 188 /* Record access to PTE */ 189 pte->accessed = 1; 190 pht_insert(badvaddr, pte); 191 } 227 192 } 228 193 -
kernel/arch/ppc32/src/ppc32.c
rea906c29 r850235d 103 103 } 104 104 105 #ifdef CONFIG_FB 105 106 static bool display_register(ofw_tree_node_t *node, void *arg) 106 107 { … … 169 170 return true; 170 171 } 172 #endif 171 173 172 174 void arch_post_mm_init(void) -
kernel/arch/ppc32/src/proc/scheduler.c
rea906c29 r850235d 55 55 asm volatile ( 56 56 "mtsprg0 %[ksp]\n" 57 :: [ksp] "r" (KA2PA(&THREAD->kstack[STACK_SIZE - SP_DELTA]))57 :: [ksp] "r" (KA2PA(&THREAD->kstack[STACK_SIZE])) 58 58 ); 59 59 } -
kernel/arch/sparc64/src/mm/sun4u/tlb.c
rea906c29 r850235d 58 58 static void dtlb_pte_copy(pte_t *, size_t, bool); 59 59 static void itlb_pte_copy(pte_t *, size_t); 60 static void do_fast_instruction_access_mmu_miss_fault(istate_t *, uintptr_t,61 const char *);62 static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t,63 const char *);64 static void do_fast_data_access_protection_fault(istate_t *,65 tlb_tag_access_reg_t, const char *);66 60 67 61 const char *context_encoding[] = { … … 222 216 * handler. 223 217 */ 224 if (as_page_fault(page_16k, PF_ACCESS_EXEC, istate) == 225 AS_PF_FAULT) { 226 do_fast_instruction_access_mmu_miss_fault(istate, 227 istate->tpc, __func__); 228 } 218 as_page_fault(page_16k, PF_ACCESS_EXEC, istate); 229 219 } 230 220 } … … 256 246 if (!tag.vpn) { 257 247 /* NULL access in kernel */ 258 do_fast_data_access_mmu_miss_fault(istate, tag, 259 "Dereferencing NULL pointer."); 248 panic("NULL pointer dereference."); 260 249 } else if (page_8k >= end_of_identity) { 261 250 /* Kernel non-identity. */ 262 251 as = AS_KERNEL; 263 252 } else { 264 do_fast_data_access_mmu_miss_fault(istate, tag, 265 "Unexpected kernel page fault."); 253 panic("Unexpected kernel page fault."); 266 254 } 267 255 } … … 283 271 * handler. 284 272 */ 285 if (as_page_fault(page_16k, PF_ACCESS_READ, istate) == 286 AS_PF_FAULT) { 287 do_fast_data_access_mmu_miss_fault(istate, tag, 288 __func__); 289 } 273 as_page_fault(page_16k, PF_ACCESS_READ, istate); 290 274 } 291 275 } … … 332 316 * handler. 333 317 */ 334 if (as_page_fault(page_16k, PF_ACCESS_WRITE, istate) == 335 AS_PF_FAULT) { 336 do_fast_data_access_protection_fault(istate, tag, 337 __func__); 338 } 318 as_page_fault(page_16k, PF_ACCESS_WRITE, istate); 339 319 } 340 320 } … … 428 408 429 409 #endif 430 431 void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,432 uintptr_t va, const char *str)433 {434 fault_if_from_uspace(istate, "%s, address=%p.", str, (void *) va);435 panic_memtrap(istate, PF_ACCESS_EXEC, va, str);436 }437 438 void do_fast_data_access_mmu_miss_fault(istate_t *istate,439 tlb_tag_access_reg_t tag, const char *str)440 {441 uintptr_t va;442 443 va = tag.vpn << MMU_PAGE_WIDTH;444 fault_if_from_uspace(istate, "%s, page=%p (asid=%u).", str,445 (void *) va, tag.context);446 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, str);447 }448 449 void do_fast_data_access_protection_fault(istate_t *istate,450 tlb_tag_access_reg_t tag, const char *str)451 {452 uintptr_t va;453 454 va = tag.vpn << MMU_PAGE_WIDTH;455 fault_if_from_uspace(istate, "%s, page=%p (asid=%u).", str,456 (void *) va, tag.context);457 panic_memtrap(istate, PF_ACCESS_WRITE, va, str);458 }459 410 460 411 void describe_dmmu_fault(void) -
kernel/arch/sparc64/src/mm/sun4v/tlb.c
rea906c29 r850235d 62 62 static void itlb_pte_copy(pte_t *); 63 63 static void dtlb_pte_copy(pte_t *, bool); 64 static void do_fast_instruction_access_mmu_miss_fault(istate_t *, uintptr_t,65 const char *);66 static void do_fast_data_access_mmu_miss_fault(istate_t *, uint64_t,67 const char *);68 static void do_fast_data_access_protection_fault(istate_t *,69 uint64_t, const char *);70 64 71 65 /* … … 235 229 * handler. 236 230 */ 237 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 238 do_fast_instruction_access_mmu_miss_fault(istate, 239 istate->tpc, __func__); 240 } 231 as_page_fault(va, PF_ACCESS_EXEC, istate); 241 232 } 242 233 } … … 264 255 if (va == 0) { 265 256 /* NULL access in kernel */ 266 do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, 267 __func__); 257 panic("NULL pointer dereference."); 268 258 } 269 do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, "Unexpected " 270 "kernel page fault."); 259 panic("Unexpected kernel page fault."); 271 260 } 272 261 … … 287 276 * handler. 288 277 */ 289 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 290 do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, 291 __func__); 292 } 278 as_page_fault(va, PF_ACCESS_READ, istate); 293 279 } 294 280 } … … 329 315 * handler. 330 316 */ 331 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { 332 do_fast_data_access_protection_fault(istate, page_and_ctx, 333 __func__); 334 } 317 as_page_fault(va, PF_ACCESS_WRITE, istate); 335 318 } 336 319 } … … 346 329 } 347 330 348 void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, uintptr_t va,349 const char *str)350 {351 fault_if_from_uspace(istate, "%s, address=%p.", str,352 (void *) va);353 panic_memtrap(istate, PF_ACCESS_EXEC, va, str);354 }355 356 void do_fast_data_access_mmu_miss_fault(istate_t *istate,357 uint64_t page_and_ctx, const char *str)358 {359 fault_if_from_uspace(istate, "%s, page=%p (asid=%" PRId64 ").", str,360 (void *) DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx));361 panic_memtrap(istate, PF_ACCESS_UNKNOWN, DMISS_ADDRESS(page_and_ctx),362 str);363 }364 365 void do_fast_data_access_protection_fault(istate_t *istate,366 uint64_t page_and_ctx, const char *str)367 {368 fault_if_from_uspace(istate, "%s, page=%p (asid=%" PRId64 ").", str,369 (void *) DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx));370 panic_memtrap(istate, PF_ACCESS_WRITE, DMISS_ADDRESS(page_and_ctx),371 str);372 }373 374 331 /** 375 332 * Describes the exact condition which caused the last DMMU fault. -
kernel/arch/sparc64/src/proc/sun4u/scheduler.c
rea906c29 r850235d 52 52 { 53 53 if (THREAD->uspace) { 54 uint64_t sp; 55 54 56 /* 55 57 * Write kernel stack address to %g6 of the alternate and … … 63 65 * before it explicitly uses %g7. 64 66 */ 65 uint64_t sp = (uintptr_t) THREAD->kstack + STACK_SIZE - 66 (STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT)); 67 sp = (uintptr_t) THREAD->kstack + STACK_SIZE - STACK_BIAS; 67 68 write_to_ig_g6(sp); 68 69 write_to_ag_g6(sp); … … 76 77 if (THREAD->uspace) { 77 78 /* sample the state of the userspace window buffer */ 78 THREAD->arch.uspace_window_buffer = (uint8_t *) read_from_ag_g7(); 79 THREAD->arch.uspace_window_buffer = 80 (uint8_t *) read_from_ag_g7(); 79 81 } 80 82 } -
kernel/arch/sparc64/src/proc/sun4v/scheduler.c
rea906c29 r850235d 55 55 { 56 56 if (THREAD->uspace) { 57 uint64_t sp = (uintptr_t) THREAD->kstack + STACK_SIZE - 58 (STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT)); 57 uint64_t sp; 58 59 sp = (uintptr_t) THREAD->kstack + STACK_SIZE - STACK_BIAS; 59 60 asi_u64_write(ASI_SCRATCHPAD, SCRATCHPAD_KSTACK, sp); 60 61 asi_u64_write(ASI_SCRATCHPAD, SCRATCHPAD_WBUF, -
kernel/genarch/Makefile.inc
rea906c29 r850235d 106 106 endif 107 107 108 ifeq ($(CONFIG_AM335X_UART),y) 109 GENARCH_SOURCES += \ 110 genarch/src/drivers/am335x/uart.c 111 endif 112 113 ifeq ($(CONFIG_AM335X_TIMERS),y) 114 GENARCH_SOURCES += \ 115 genarch/src/drivers/am335x/timer.c 116 endif 117 118 ifeq ($(CONFIG_AMDM37X_UART),y) 119 GENARCH_SOURCES += \ 120 genarch/src/drivers/amdm37x/uart.c 121 endif 122 108 123 ifeq ($(CONFIG_VIA_CUDA),y) 109 124 GENARCH_SOURCES += \ -
kernel/genarch/src/mm/page_ht.c
rea906c29 r850235d 209 209 pte->frame = ALIGN_DOWN(frame, FRAME_SIZE); 210 210 211 /* 212 * Make sure that a concurrent ht_mapping_find() will see the 213 * new entry only after it is fully initialized. 214 */ 211 215 write_barrier(); 212 216 -
kernel/genarch/src/mm/page_pt.c
rea906c29 r850235d 89 89 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | 90 90 PAGE_WRITE); 91 /* 92 * Make sure that a concurrent hardware page table walk or 93 * pt_mapping_find() will see the new PTL1 only after it is 94 * fully initialized. 95 */ 91 96 write_barrier(); 92 97 SET_PTL1_PRESENT(ptl0, PTL0_INDEX(page)); … … 103 108 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | 104 109 PAGE_WRITE); 110 /* 111 * Make the new PTL2 visible only after it is fully initialized. 112 */ 105 113 write_barrier(); 106 114 SET_PTL2_PRESENT(ptl1, PTL1_INDEX(page)); … … 117 125 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | 118 126 PAGE_WRITE); 127 /* 128 * Make the new PTL3 visible only after it is fully initialized. 129 */ 119 130 write_barrier(); 120 131 SET_PTL3_PRESENT(ptl2, PTL2_INDEX(page)); … … 125 136 SET_FRAME_ADDRESS(ptl3, PTL3_INDEX(page), frame); 126 137 SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), flags | PAGE_NOT_PRESENT); 138 /* 139 * Make the new mapping visible only after it is fully initialized. 140 */ 127 141 write_barrier(); 128 142 SET_FRAME_PRESENT(ptl3, PTL3_INDEX(page)); … … 296 310 297 311 #if (PTL1_ENTRIES != 0) 312 /* 313 * Always read ptl2 only after we are sure it is present. 314 */ 298 315 read_barrier(); 299 316 #endif … … 304 321 305 322 #if (PTL2_ENTRIES != 0) 323 /* 324 * Always read ptl3 only after we are sure it is present. 325 */ 306 326 read_barrier(); 307 327 #endif -
kernel/generic/include/config.h
rea906c29 r850235d 44 44 #define STACK_FRAMES TWO_FRAMES 45 45 #define STACK_SIZE ((1 << STACK_FRAMES) << PAGE_WIDTH) 46 47 #define STACK_SIZE_USER (1 * 1024 * 1024) 46 48 47 49 #define CONFIG_INIT_TASKS 32 -
kernel/generic/include/console/console.h
rea906c29 r850235d 67 67 extern wchar_t getc(indev_t *indev); 68 68 extern size_t gets(indev_t *indev, char *buf, size_t buflen); 69 extern sysarg_t sys_klog(int fd, const void *buf, size_t size);69 extern sysarg_t sys_klog(int cmd, const void *buf, size_t size); 70 70 71 71 extern void grab_console(void); -
kernel/generic/include/macros.h
rea906c29 r850235d 52 52 uint64_t sz2) 53 53 { 54 uint64_t e1 = s1 + sz1; 55 uint64_t e2 = s2 + sz2; 56 57 return ((s1 < e2) && (s2 < e1)); 54 uint64_t e1 = s1 + sz1 - 1; 55 uint64_t e2 = s2 + sz2 - 1; 56 57 /* both sizes are non-zero */ 58 if (sz1 && sz2) 59 return ((s1 <= e2) && (s2 <= e1)); 60 61 /* one size is non-zero */ 62 if (sz2) 63 return ((s1 >= s2) && (s1 <= e2)); 64 if (sz1) 65 return ((s2 >= s1) && (s2 <= e1)); 66 67 /* both are zero */ 68 return (s1 == s2); 58 69 } 59 70 … … 119 130 | ((((uint64_t) (up)) & UINT32_C(0xffffffff)) << 32)) 120 131 132 /* Test for sum overflow. */ 133 #define overflows(a, b) \ 134 ((a) + (b) < (a)) 135 136 /* Test for sum overflow into positive numbers. */ 137 #define overflows_into_positive(a, b) \ 138 (overflows((a), (b)) && ((a) + (b) > 0)) 139 121 140 /** Pseudorandom generator 122 141 * -
kernel/generic/include/mm/as.h
rea906c29 r850235d 61 61 #define USER_ADDRESS_SPACE_END USER_ADDRESS_SPACE_END_ARCH 62 62 63 #ifdef USTACK_ADDRESS_ARCH64 #define USTACK_ADDRESS USTACK_ADDRESS_ARCH65 #else66 #define USTACK_ADDRESS (USER_ADDRESS_SPACE_END - (STACK_SIZE - 1))67 #endif68 69 63 /** Kernel address space. */ 70 64 #define FLAG_AS_KERNEL (1 << 0) … … 74 68 #define AS_AREA_ATTR_PARTIAL 1 /**< Not fully initialized area. */ 75 69 70 /** The page fault was resolved by as_page_fault(). */ 71 #define AS_PF_OK 0 72 73 /** The page fault was caused by memcpy_from_uspace() or memcpy_to_uspace(). */ 74 #define AS_PF_DEFER 1 75 76 76 /** The page fault was not resolved by as_page_fault(). */ 77 #define AS_PF_FAULT 0 78 79 /** The page fault was resolved by as_page_fault(). */ 80 #define AS_PF_OK 1 81 82 /** The page fault was caused by memcpy_from_uspace() or memcpy_to_uspace(). */ 83 #define AS_PF_DEFER 2 77 #define AS_PF_FAULT 2 78 79 /** The page fault was not resolved by as_page_fault(). Non-verbose version. */ 80 #define AS_PF_SILENT 3 84 81 85 82 /** Address space structure. … … 224 221 void (* destroy)(as_area_t *); 225 222 223 bool (* is_resizable)(as_area_t *); 224 bool (* is_shareable)(as_area_t *); 225 226 226 int (* page_fault)(as_area_t *, uintptr_t, pf_access_t); 227 227 void (* frame_free)(as_area_t *, uintptr_t, uintptr_t); -
kernel/generic/src/console/cmd.c
rea906c29 r850235d 56 56 #include <cpu.h> 57 57 #include <mm/tlb.h> 58 #include <mm/km.h> 58 59 #include <arch/mm/tlb.h> 59 60 #include <mm/frame.h> … … 81 82 .func = cmd_help, 82 83 .argc = 0 84 }; 85 86 /* Data and methods for pio_read_8 command */ 87 static int cmd_pio_read_8(cmd_arg_t *argv); 88 static cmd_arg_t pio_read_8_argv[] = { { .type = ARG_TYPE_INT } }; 89 static cmd_info_t pio_read_8_info = { 90 .name = "pio_read_8", 91 .description = "pio_read_8 <address> Read 1 byte from memory (or port).", 92 .func = cmd_pio_read_8, 93 .argc = 1, 94 .argv = pio_read_8_argv 95 }; 96 97 /* Data and methods for pio_read_16 command */ 98 static int cmd_pio_read_16(cmd_arg_t *argv); 99 static cmd_arg_t pio_read_16_argv[] = { { .type = ARG_TYPE_INT } }; 100 static cmd_info_t pio_read_16_info = { 101 .name = "pio_read_16", 102 .description = "pio_read_16 <address> Read 2 bytes from memory (or port).", 103 .func = cmd_pio_read_16, 104 .argc = 1, 105 .argv = pio_read_16_argv 106 }; 107 108 /* Data and methods for pio_read_32 command */ 109 static int cmd_pio_read_32(cmd_arg_t *argv); 110 static cmd_arg_t pio_read_32_argv[] = { { .type = ARG_TYPE_INT } }; 111 static cmd_info_t pio_read_32_info = { 112 .name = "pio_read_32", 113 .description = "pio_read_32 <address> Read 4 bytes from memory (or port).", 114 .func = cmd_pio_read_32, 115 .argc = 1, 116 .argv = pio_read_32_argv 117 }; 118 119 /* Data and methods for pio_write_8 command */ 120 static int cmd_pio_write_8(cmd_arg_t *argv); 121 static cmd_arg_t pio_write_8_argv[] = { 122 { .type = ARG_TYPE_INT }, 123 { .type = ARG_TYPE_INT } 124 }; 125 static cmd_info_t pio_write_8_info = { 126 .name = "pio_write_8", 127 .description = "pio_write_8 <address> <value> Write 1 byte to memory (or port).", 128 .func = cmd_pio_write_8, 129 .argc = 2, 130 .argv = pio_write_8_argv 131 }; 132 133 /* Data and methods for pio_write_16 command */ 134 static int cmd_pio_write_16(cmd_arg_t *argv); 135 static cmd_arg_t pio_write_16_argv[] = { 136 { .type = ARG_TYPE_INT }, 137 { .type = ARG_TYPE_INT } 138 }; 139 static cmd_info_t pio_write_16_info = { 140 .name = "pio_write_16", 141 .description = "pio_write_16 <address> <value> Write 2 bytes to memory (or port).", 142 .func = cmd_pio_write_16, 143 .argc = 2, 144 .argv = pio_write_16_argv 145 }; 146 147 /* Data and methods for pio_write_32 command */ 148 static int cmd_pio_write_32(cmd_arg_t *argv); 149 static cmd_arg_t pio_write_32_argv[] = { 150 { .type = ARG_TYPE_INT }, 151 { .type = ARG_TYPE_INT } 152 }; 153 static cmd_info_t pio_write_32_info = { 154 .name = "pio_write_32", 155 .description = "pio_write_32 <address> <value> Write 4 bytes to memory (or port).", 156 .func = cmd_pio_write_32, 157 .argc = 2, 158 .argv = pio_write_32_argv 83 159 }; 84 160 … … 531 607 &btrace_info, 532 608 #endif 609 &pio_read_8_info, 610 &pio_read_16_info, 611 &pio_read_32_info, 612 &pio_write_8_info, 613 &pio_write_16_info, 614 &pio_write_32_info, 533 615 NULL 534 616 }; … … 601 683 spinlock_unlock(&cmd_lock); 602 684 685 return 1; 686 } 687 688 /** Read 1 byte from phys memory or io port. 689 * 690 * @param argv Argument vector. 691 * 692 * @return 0 on failure, 1 on success. 693 */ 694 static int cmd_pio_read_8(cmd_arg_t *argv) 695 { 696 uint8_t *ptr = NULL; 697 698 #ifdef IO_SPACE_BOUNDARY 699 if ((void *) argv->intval < IO_SPACE_BOUNDARY) 700 ptr = (void *) argv[0].intval; 701 else 702 #endif 703 ptr = (uint8_t *) km_map(argv[0].intval, sizeof(uint8_t), 704 PAGE_NOT_CACHEABLE); 705 706 const uint8_t val = pio_read_8(ptr); 707 printf("read %" PRIxn ": %" PRIx8 "\n", argv[0].intval, val); 708 709 #ifdef IO_SPACE_BOUNDARY 710 if ((void *) argv->intval < IO_SPACE_BOUNDARY) 711 return 1; 712 #endif 713 714 km_unmap((uintptr_t) ptr, sizeof(uint8_t)); 715 return 1; 716 } 717 718 /** Read 2 bytes from phys memory or io port. 719 * 720 * @param argv Argument vector. 721 * 722 * @return 0 on failure, 1 on success. 723 */ 724 static int cmd_pio_read_16(cmd_arg_t *argv) 725 { 726 uint16_t *ptr = NULL; 727 728 #ifdef IO_SPACE_BOUNDARY 729 if ((void *) argv->intval < IO_SPACE_BOUNDARY) 730 ptr = (void *) argv[0].intval; 731 else 732 #endif 733 ptr = (uint16_t *) km_map(argv[0].intval, sizeof(uint16_t), 734 PAGE_NOT_CACHEABLE); 735 736 const uint16_t val = pio_read_16(ptr); 737 printf("read %" PRIxn ": %" PRIx16 "\n", argv[0].intval, val); 738 739 #ifdef IO_SPACE_BOUNDARY 740 if ((void *) argv->intval < IO_SPACE_BOUNDARY) 741 return 1; 742 #endif 743 744 km_unmap((uintptr_t) ptr, sizeof(uint16_t)); 745 return 1; 746 } 747 748 /** Read 4 bytes from phys memory or io port. 749 * 750 * @param argv Argument vector. 751 * 752 * @return 0 on failure, 1 on success. 753 */ 754 static int cmd_pio_read_32(cmd_arg_t *argv) 755 { 756 uint32_t *ptr = NULL; 757 758 #ifdef IO_SPACE_BOUNDARY 759 if ((void *) argv->intval < IO_SPACE_BOUNDARY) 760 ptr = (void *) argv[0].intval; 761 else 762 #endif 763 ptr = (uint32_t *) km_map(argv[0].intval, sizeof(uint32_t), 764 PAGE_NOT_CACHEABLE); 765 766 const uint32_t val = pio_read_32(ptr); 767 printf("read %" PRIxn ": %" PRIx32 "\n", argv[0].intval, val); 768 769 #ifdef IO_SPACE_BOUNDARY 770 if ((void *) argv->intval < IO_SPACE_BOUNDARY) 771 return 1; 772 #endif 773 774 km_unmap((uintptr_t) ptr, sizeof(uint32_t)); 775 return 1; 776 } 777 778 /** Write 1 byte to phys memory or io port. 779 * 780 * @param argv Argument vector. 781 * 782 * @return 0 on failure, 1 on success. 783 */ 784 static int cmd_pio_write_8(cmd_arg_t *argv) 785 { 786 uint8_t *ptr = NULL; 787 788 #ifdef IO_SPACE_BOUNDARY 789 if ((void *) argv->intval < IO_SPACE_BOUNDARY) 790 ptr = (void *) argv[0].intval; 791 else 792 #endif 793 ptr = (uint8_t *) km_map(argv[0].intval, sizeof(uint8_t), 794 PAGE_NOT_CACHEABLE); 795 796 printf("write %" PRIxn ": %" PRIx8 "\n", argv[0].intval, 797 (uint8_t) argv[1].intval); 798 pio_write_8(ptr, (uint8_t) argv[1].intval); 799 800 #ifdef IO_SPACE_BOUNDARY 801 if ((void *) argv->intval < IO_SPACE_BOUNDARY) 802 return 1; 803 #endif 804 805 km_unmap((uintptr_t) ptr, sizeof(uint8_t)); 806 return 1; 807 } 808 809 /** Write 2 bytes to phys memory or io port. 810 * 811 * @param argv Argument vector. 812 * 813 * @return 0 on failure, 1 on success. 814 */ 815 static int cmd_pio_write_16(cmd_arg_t *argv) 816 { 817 uint16_t *ptr = NULL; 818 819 #ifdef IO_SPACE_BOUNDARY 820 if ((void *) argv->intval < IO_SPACE_BOUNDARY) 821 ptr = (void *) argv[0].intval; 822 else 823 #endif 824 ptr = (uint16_t *) km_map(argv[0].intval, sizeof(uint16_t), 825 PAGE_NOT_CACHEABLE); 826 827 printf("write %" PRIxn ": %" PRIx16 "\n", argv[0].intval, 828 (uint16_t) argv[1].intval); 829 pio_write_16(ptr, (uint16_t) argv[1].intval); 830 831 #ifdef IO_SPACE_BOUNDARY 832 if ((void *) argv->intval < IO_SPACE_BOUNDARY) 833 return 1; 834 #endif 835 836 km_unmap((uintptr_t) ptr, sizeof(uint16_t)); 837 return 1; 838 } 839 840 /** Write 4 bytes to phys memory or io port. 841 * 842 * @param argv Argument vector. 843 * 844 * @return 0 on failure, 1 on success. 845 */ 846 static int cmd_pio_write_32(cmd_arg_t *argv) 847 { 848 uint32_t *ptr = NULL; 849 850 #ifdef IO_SPACE_BOUNDARY 851 if ((void *) argv->intval < IO_SPACE_BOUNDARY) 852 ptr = (void *) argv[0].intval; 853 else 854 #endif 855 ptr = (uint32_t *) km_map(argv[0].intval, sizeof(uint32_t), 856 PAGE_NOT_CACHEABLE); 857 858 printf("write %" PRIxn ": %" PRIx32 "\n", argv[0].intval, 859 (uint32_t) argv[1].intval); 860 pio_write_32(ptr, (uint32_t) argv[1].intval); 861 862 #ifdef IO_SPACE_BOUNDARY 863 if ((void *) argv->intval < IO_SPACE_BOUNDARY) 864 return 1; 865 #endif 866 867 km_unmap((uintptr_t) ptr, sizeof(uint32_t)); 603 868 return 1; 604 869 } -
kernel/generic/src/console/console.c
rea906c29 r850235d 52 52 #include <errno.h> 53 53 #include <str.h> 54 #include <abi/klog.h> 54 55 55 56 #define KLOG_PAGES 8 … … 335 336 * 336 337 */ 337 sysarg_t sys_klog(int fd, const void *buf, size_t size)338 sysarg_t sys_klog(int cmd, const void *buf, size_t size) 338 339 { 339 340 char *data; 340 341 int rc; 341 342 343 switch (cmd) { 344 case KLOG_UPDATE: 345 klog_update(NULL); 346 return EOK; 347 case KLOG_WRITE: 348 case KLOG_COMMAND: 349 break; 350 default: 351 return ENOTSUP; 352 } 353 342 354 if (size > PAGE_SIZE) 343 355 return (sysarg_t) ELIMIT; … … 355 367 data[size] = 0; 356 368 357 printf("%s", data); 369 switch (cmd) { 370 case KLOG_WRITE: 371 printf("%s", data); 372 break; 373 case KLOG_COMMAND: 374 if (!stdin) 375 break; 376 for (unsigned int i = 0; i < size; i++) 377 indev_push_character(stdin, data[i]); 378 indev_push_character(stdin, '\n'); 379 break; 380 } 381 358 382 free(data); 359 } else 360 klog_update(NULL); 361 383 } 384 362 385 return size; 363 386 } -
kernel/generic/src/console/kconsole.c
rea906c29 r850235d 524 524 /* It's a number - convert it */ 525 525 uint64_t value; 526 int rc = str_uint64_t(text, NULL, 0, true, &value); 526 char *end; 527 int rc = str_uint64_t(text, &end, 0, false, &value); 528 if (end != text + len) 529 rc = EINVAL; 527 530 switch (rc) { 528 531 case EINVAL: 529 printf("Invalid number .\n");532 printf("Invalid number '%s'.\n", text); 530 533 return false; 531 534 case EOVERFLOW: 532 printf("Integer overflow .\n");535 printf("Integer overflow in '%s'.\n", text); 533 536 return false; 534 537 case EOK: … … 538 541 break; 539 542 default: 540 printf("Unknown error .\n");543 printf("Unknown error parsing '%s'.\n", text); 541 544 return false; 542 545 } -
kernel/generic/src/interrupt/interrupt.c
rea906c29 r850235d 166 166 } 167 167 168 static NO_TRACE void fault_from_uspace_core(istate_t *istate, const char *fmt, va_list args) 168 static NO_TRACE 169 void fault_from_uspace_core(istate_t *istate, const char *fmt, va_list args) 169 170 { 170 171 printf("Task %s (%" PRIu64 ") killed due to an exception at " -
kernel/generic/src/main/kinit.c
rea906c29 r850235d 172 172 #endif /* CONFIG_KCONSOLE */ 173 173 174 /* 175 * Store the default stack size in sysinfo so that uspace can create 176 * stack with this default size. 177 */ 178 sysinfo_set_item_val("default.stack_size", NULL, STACK_SIZE_USER); 179 174 180 interrupts_enable(); 175 181 … … 244 250 CAP_IO_MANAGER | CAP_IRQ_REG); 245 251 246 if (!ipc_phone_0) 252 if (!ipc_phone_0) { 247 253 ipc_phone_0 = &programs[i].task->answerbox; 254 /* 255 * Hold the first task so that the 256 * ipc_phone_0 remains a valid pointer 257 * even if the first task exits for 258 * whatever reason. 259 */ 260 task_hold(programs[i].task); 261 } 248 262 } 249 263 -
kernel/generic/src/mm/as.c
rea906c29 r850235d 79 79 #include <syscall/copy.h> 80 80 #include <arch/interrupt.h> 81 #include <interrupt.h> 81 82 82 83 /** … … 285 286 /** Check area conflicts with other areas. 286 287 * 287 * @param as Address space. 288 * @param addr Starting virtual address of the area being tested. 289 * @param count Number of pages in the area being tested. 290 * @param avoid Do not touch this area. 288 * @param as Address space. 289 * @param addr Starting virtual address of the area being tested. 290 * @param count Number of pages in the area being tested. 291 * @param guarded True if the area being tested is protected by guard pages. 292 * @param avoid Do not touch this area. 291 293 * 292 294 * @return True if there is no conflict, false otherwise. … … 294 296 */ 295 297 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr, 296 size_t count, as_area_t *avoid)298 size_t count, bool guarded, as_area_t *avoid) 297 299 { 298 300 ASSERT((addr % PAGE_SIZE) == 0); 299 301 ASSERT(mutex_locked(&as->lock)); 302 303 /* 304 * If the addition of the supposed area address and size overflows, 305 * report conflict. 306 */ 307 if (overflows_into_positive(addr, P2SZ(count))) 308 return false; 300 309 301 310 /* … … 304 313 if (overlaps(addr, P2SZ(count), (uintptr_t) NULL, PAGE_SIZE)) 305 314 return false; 306 315 307 316 /* 308 317 * The leaf node is found in O(log n), where n is proportional to … … 328 337 if (area != avoid) { 329 338 mutex_lock(&area->lock); 330 339 340 /* 341 * If at least one of the two areas are protected 342 * by the AS_AREA_GUARD flag then we must be sure 343 * that they are separated by at least one unmapped 344 * page. 345 */ 346 int const gp = (guarded || 347 (area->flags & AS_AREA_GUARD)) ? 1 : 0; 348 349 /* 350 * The area comes from the left neighbour node, which 351 * means that there already are some areas in the leaf 352 * node, which in turn means that adding gp is safe and 353 * will not cause an integer overflow. 354 */ 331 355 if (overlaps(addr, P2SZ(count), area->base, 356 P2SZ(area->pages + gp))) { 357 mutex_unlock(&area->lock); 358 return false; 359 } 360 361 mutex_unlock(&area->lock); 362 } 363 } 364 365 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); 366 if (node) { 367 area = (as_area_t *) node->value[0]; 368 369 if (area != avoid) { 370 int gp; 371 372 mutex_lock(&area->lock); 373 374 gp = (guarded || (area->flags & AS_AREA_GUARD)) ? 1 : 0; 375 if (gp && overflows(addr, P2SZ(count))) { 376 /* 377 * Guard page not needed if the supposed area 378 * is adjacent to the end of the address space. 379 * We already know that the following test is 380 * going to fail... 381 */ 382 gp--; 383 } 384 385 if (overlaps(addr, P2SZ(count + gp), area->base, 332 386 P2SZ(area->pages))) { 333 387 mutex_unlock(&area->lock); … … 339 393 } 340 394 341 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);342 if (node) {343 area = (as_area_t *) node->value[0];344 345 if (area != avoid) {346 mutex_lock(&area->lock);347 348 if (overlaps(addr, P2SZ(count), area->base,349 P2SZ(area->pages))) {350 mutex_unlock(&area->lock);351 return false;352 }353 354 mutex_unlock(&area->lock);355 }356 }357 358 395 /* Second, check the leaf node. */ 359 396 btree_key_t i; 360 397 for (i = 0; i < leaf->keys; i++) { 361 398 area = (as_area_t *) leaf->value[i]; 399 int agp; 400 int gp; 362 401 363 402 if (area == avoid) … … 365 404 366 405 mutex_lock(&area->lock); 367 368 if (overlaps(addr, P2SZ(count), area->base, 369 P2SZ(area->pages))) { 406 407 gp = (guarded || (area->flags & AS_AREA_GUARD)) ? 1 : 0; 408 agp = gp; 409 410 /* 411 * Sanitize the two possible unsigned integer overflows. 412 */ 413 if (gp && overflows(addr, P2SZ(count))) 414 gp--; 415 if (agp && overflows(area->base, P2SZ(area->pages))) 416 agp--; 417 418 if (overlaps(addr, P2SZ(count + gp), area->base, 419 P2SZ(area->pages + agp))) { 370 420 mutex_unlock(&area->lock); 371 421 return false; … … 377 427 /* 378 428 * So far, the area does not conflict with other areas. 379 * Check if it doesn't conflict with kerneladdress space.429 * Check if it is contained in the user address space. 380 430 */ 381 431 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 382 return !overlaps(addr, P2SZ(count), KERNEL_ADDRESS_SPACE_START, 383 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 432 return iswithin(USER_ADDRESS_SPACE_START, 433 (USER_ADDRESS_SPACE_END - USER_ADDRESS_SPACE_START) + 1, 434 addr, P2SZ(count)); 384 435 } 385 436 … … 392 443 * this function. 393 444 * 394 * @param as Address space. 395 * @param bound Lowest address bound. 396 * @param size Requested size of the allocation. 445 * @param as Address space. 446 * @param bound Lowest address bound. 447 * @param size Requested size of the allocation. 448 * @param guarded True if the allocation must be protected by guard pages. 397 449 * 398 450 * @return Address of the beginning of unmapped address space area. … … 401 453 */ 402 454 NO_TRACE static uintptr_t as_get_unmapped_area(as_t *as, uintptr_t bound, 403 size_t size )455 size_t size, bool guarded) 404 456 { 405 457 ASSERT(mutex_locked(&as->lock)); … … 423 475 /* First check the bound address itself */ 424 476 uintptr_t addr = ALIGN_UP(bound, PAGE_SIZE); 425 if ((addr >= bound) && 426 (check_area_conflicts(as, addr, pages, NULL))) 427 return addr; 477 if (addr >= bound) { 478 if (guarded) { 479 /* Leave an unmapped page between the lower 480 * bound and the area's start address. 481 */ 482 addr += P2SZ(1); 483 } 484 485 if (check_area_conflicts(as, addr, pages, guarded, NULL)) 486 return addr; 487 } 428 488 429 489 /* Eventually check the addresses behind each area */ … … 439 499 addr = 440 500 ALIGN_UP(area->base + P2SZ(area->pages), PAGE_SIZE); 501 502 if (guarded || area->flags & AS_AREA_GUARD) { 503 /* We must leave an unmapped page 504 * between the two areas. 505 */ 506 addr += P2SZ(1); 507 } 508 441 509 bool avail = 442 510 ((addr >= bound) && (addr >= area->base) && 443 (check_area_conflicts(as, addr, pages, area)));511 (check_area_conflicts(as, addr, pages, guarded, area))); 444 512 445 513 mutex_unlock(&area->lock); … … 481 549 if (size == 0) 482 550 return NULL; 483 551 484 552 size_t pages = SIZE2FRAMES(size); 485 553 … … 487 555 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) 488 556 return NULL; 557 558 bool const guarded = flags & AS_AREA_GUARD; 489 559 490 560 mutex_lock(&as->lock); 491 561 492 562 if (*base == (uintptr_t) -1) { 493 *base = as_get_unmapped_area(as, bound, size );563 *base = as_get_unmapped_area(as, bound, size, guarded); 494 564 if (*base == (uintptr_t) -1) { 495 565 mutex_unlock(&as->lock); … … 497 567 } 498 568 } 499 500 if (!check_area_conflicts(as, *base, pages, NULL)) { 569 570 if (overflows_into_positive(*base, size)) 571 return NULL; 572 573 if (!check_area_conflicts(as, *base, pages, guarded, NULL)) { 501 574 mutex_unlock(&as->lock); 502 575 return NULL; … … 625 698 return ENOENT; 626 699 } 627 628 if (area->backend == &phys_backend) { 629 /* 630 * Remapping of address space areas associated 631 * with memory mapped devices is not supported. 700 701 if (!area->backend->is_resizable(area)) { 702 /* 703 * The backend does not support resizing for this area. 632 704 */ 633 705 mutex_unlock(&area->lock); … … 776 848 /* 777 849 * Growing the area. 850 */ 851 852 if (overflows_into_positive(address, P2SZ(pages))) 853 return EINVAL; 854 855 /* 778 856 * Check for overlaps with other address space areas. 779 857 */ 780 if (!check_area_conflicts(as, address, pages, area)) { 858 bool const guarded = area->flags & AS_AREA_GUARD; 859 if (!check_area_conflicts(as, address, pages, guarded, area)) { 781 860 mutex_unlock(&area->lock); 782 861 mutex_unlock(&as->lock); … … 979 1058 } 980 1059 981 if ((!src_area->backend) || (!src_area->backend->share)) { 982 /* 983 * There is no backend or the backend does not 984 * know how to share the area. 1060 if (!src_area->backend->is_shareable(src_area)) { 1061 /* 1062 * The backend does not permit sharing of this area. 985 1063 */ 986 1064 mutex_unlock(&src_area->lock); … … 1285 1363 int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) 1286 1364 { 1365 int rc = AS_PF_FAULT; 1366 1287 1367 if (!THREAD) 1288 return AS_PF_FAULT;1368 goto page_fault; 1289 1369 1290 1370 if (!AS) 1291 return AS_PF_FAULT;1371 goto page_fault; 1292 1372 1293 1373 mutex_lock(&AS->lock); … … 1345 1425 * Resort to the backend page fault handler. 1346 1426 */ 1347 if (area->backend->page_fault(area, page, access) != AS_PF_OK) { 1427 rc = area->backend->page_fault(area, page, access); 1428 if (rc != AS_PF_OK) { 1348 1429 page_table_unlock(AS, false); 1349 1430 mutex_unlock(&area->lock); … … 1366 1447 istate_set_retaddr(istate, 1367 1448 (uintptr_t) &memcpy_to_uspace_failover_address); 1449 } else if (rc == AS_PF_SILENT) { 1450 printf("Killing task %" PRIu64 " due to a " 1451 "failed late reservation request.\n", TASK->taskid); 1452 task_kill_self(true); 1368 1453 } else { 1369 return AS_PF_FAULT; 1454 fault_if_from_uspace(istate, "Page fault: %p.", (void *) page); 1455 panic_memtrap(istate, access, page, NULL); 1370 1456 } 1371 1457 … … 2054 2140 { 2055 2141 uintptr_t virt = base; 2056 as_area_t *area = as_area_create(AS, flags | AS_AREA_CACHEABLE, size,2142 as_area_t *area = as_area_create(AS, flags, size, 2057 2143 AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, bound); 2058 2144 if (area == NULL) -
kernel/generic/src/mm/backend_anon.c
rea906c29 r850235d 59 59 static void anon_destroy(as_area_t *); 60 60 61 static bool anon_is_resizable(as_area_t *); 62 static bool anon_is_shareable(as_area_t *); 63 61 64 static int anon_page_fault(as_area_t *, uintptr_t, pf_access_t); 62 65 static void anon_frame_free(as_area_t *, uintptr_t, uintptr_t); … … 68 71 .destroy = anon_destroy, 69 72 73 .is_resizable = anon_is_resizable, 74 .is_shareable = anon_is_shareable, 75 70 76 .page_fault = anon_page_fault, 71 77 .frame_free = anon_frame_free, … … 74 80 bool anon_create(as_area_t *area) 75 81 { 82 if (area->flags & AS_AREA_LATE_RESERVE) 83 return true; 84 76 85 return reserve_try_alloc(area->pages); 77 86 } … … 79 88 bool anon_resize(as_area_t *area, size_t new_pages) 80 89 { 90 if (area->flags & AS_AREA_LATE_RESERVE) 91 return true; 92 81 93 if (new_pages > area->pages) 82 94 return reserve_try_alloc(new_pages - area->pages); … … 100 112 ASSERT(mutex_locked(&area->as->lock)); 101 113 ASSERT(mutex_locked(&area->lock)); 114 ASSERT(!(area->flags & AS_AREA_LATE_RESERVE)); 102 115 103 116 /* … … 139 152 void anon_destroy(as_area_t *area) 140 153 { 154 if (area->flags & AS_AREA_LATE_RESERVE) 155 return; 156 141 157 reserve_free(area->pages); 142 158 } 143 159 160 bool anon_is_resizable(as_area_t *area) 161 { 162 return true; 163 } 164 165 bool anon_is_shareable(as_area_t *area) 166 { 167 return !(area->flags & AS_AREA_LATE_RESERVE); 168 } 144 169 145 170 /** Service a page fault in the anonymous memory address space area. … … 225 250 * the different causes 226 251 */ 252 253 if (area->flags & AS_AREA_LATE_RESERVE) { 254 /* 255 * Reserve the memory for this page now. 256 */ 257 if (!reserve_try_alloc(1)) 258 return AS_PF_SILENT; 259 } 260 227 261 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE); 228 262 memsetb((void *) kpage, PAGE_SIZE, 0); … … 255 289 ASSERT(mutex_locked(&area->lock)); 256 290 257 frame_free_noreserve(frame); 291 if (area->flags & AS_AREA_LATE_RESERVE) { 292 /* 293 * In case of the late reserve areas, physical memory will not 294 * be unreserved when the area is destroyed so we need to use 295 * the normal unreserving frame_free(). 296 */ 297 frame_free(frame); 298 } else { 299 /* 300 * The reserve will be given back when the area is destroyed or 301 * resized, so use the frame_free_noreserve() which does not 302 * manipulate the reserve or it would be given back twice. 303 */ 304 frame_free_noreserve(frame); 305 } 258 306 } 259 307 -
kernel/generic/src/mm/backend_elf.c
rea906c29 r850235d 58 58 static void elf_destroy(as_area_t *); 59 59 60 static bool elf_is_resizable(as_area_t *); 61 static bool elf_is_shareable(as_area_t *); 62 60 63 static int elf_page_fault(as_area_t *, uintptr_t, pf_access_t); 61 64 static void elf_frame_free(as_area_t *, uintptr_t, uintptr_t); … … 66 69 .share = elf_share, 67 70 .destroy = elf_destroy, 71 72 .is_resizable = elf_is_resizable, 73 .is_shareable = elf_is_shareable, 68 74 69 75 .page_fault = elf_page_fault, … … 213 219 } 214 220 221 bool elf_is_resizable(as_area_t *area) 222 { 223 return true; 224 } 225 226 bool elf_is_shareable(as_area_t *area) 227 { 228 return true; 229 } 230 231 215 232 /** Service a page fault in the ELF backend address space area. 216 233 * -
kernel/generic/src/mm/backend_phys.c
rea906c29 r850235d 52 52 static void phys_destroy(as_area_t *); 53 53 54 static bool phys_is_resizable(as_area_t *); 55 static bool phys_is_shareable(as_area_t *); 56 57 54 58 static int phys_page_fault(as_area_t *, uintptr_t, pf_access_t); 55 59 … … 59 63 .share = phys_share, 60 64 .destroy = phys_destroy, 65 66 .is_resizable = phys_is_resizable, 67 .is_shareable = phys_is_shareable, 61 68 62 69 .page_fault = phys_page_fault, … … 87 94 /* Nothing to do. */ 88 95 } 96 97 bool phys_is_resizable(as_area_t *area) 98 { 99 return false; 100 } 101 102 bool phys_is_shareable(as_area_t *area) 103 { 104 return true; 105 } 106 89 107 90 108 /** Service a page fault in the address space area backed by physical memory. -
kernel/generic/src/mm/km.c
rea906c29 r850235d 233 233 * @param[inout] framep Pointer to a variable which will receive the physical 234 234 * address of the allocated frame. 235 * @param[in] flags Frame allocation flags. FRAME_NONE or FRAME_NO_RESERVE. 235 * @param[in] flags Frame allocation flags. FRAME_NONE, FRAME_NO_RESERVE 236 * and FRAME_ATOMIC bits are allowed. 236 237 * @return Virtual address of the allocated frame. 237 238 */ … … 243 244 ASSERT(THREAD); 244 245 ASSERT(framep); 245 ASSERT(!(flags & ~ FRAME_NO_RESERVE));246 ASSERT(!(flags & ~(FRAME_NO_RESERVE | FRAME_ATOMIC))); 246 247 247 248 /* … … 255 256 ASSERT(page); // FIXME 256 257 } else { 257 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 258 FRAME_LOWMEM); 258 frame = (uintptr_t) frame_alloc(ONE_FRAME, 259 FRAME_LOWMEM | flags); 260 if (!frame) 261 return (uintptr_t) NULL; 259 262 page = PA2KA(frame); 260 263 } -
kernel/generic/src/proc/program.c
rea906c29 r850235d 79 79 * Create the stack address space area. 80 80 */ 81 uintptr_t virt = USTACK_ADDRESS; 81 uintptr_t virt = (uintptr_t) -1; 82 uintptr_t bound = USER_ADDRESS_SPACE_END - (STACK_SIZE_USER - 1); 83 84 /* Adjust bound to create space for the desired guard page. */ 85 bound -= PAGE_SIZE; 86 82 87 as_area_t *area = as_area_create(as, 83 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, 84 STACK_SIZE, AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, 0); 88 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE | AS_AREA_GUARD | 89 AS_AREA_LATE_RESERVE, STACK_SIZE_USER, AS_AREA_ATTR_NONE, 90 &anon_backend, NULL, &virt, bound); 85 91 if (!area) { 86 92 task_destroy(prg->task); … … 93 99 kernel_uarg->uspace_entry = (void *) entry_addr; 94 100 kernel_uarg->uspace_stack = (void *) virt; 95 kernel_uarg->uspace_stack_size = STACK_SIZE ;101 kernel_uarg->uspace_stack_size = STACK_SIZE_USER; 96 102 kernel_uarg->uspace_thread_function = NULL; 97 103 kernel_uarg->uspace_thread_arg = NULL; -
kernel/generic/src/proc/scheduler.c
rea906c29 r850235d 92 92 else 93 93 fpu_disable(); 94 #el se94 #elif defined CONFIG_FPU 95 95 fpu_enable(); 96 96 if (THREAD->fpu_context_exists) … … 327 327 THREAD->kcycles += get_cycle() - THREAD->last_cycle; 328 328 329 #if ndef CONFIG_FPU_LAZY329 #if (defined CONFIG_FPU) && (!defined CONFIG_FPU_LAZY) 330 330 fpu_context_save(THREAD->saved_fpu_context); 331 331 #endif -
kernel/generic/src/proc/task.c
rea906c29 r850235d 125 125 { 126 126 size_t tasks_left; 127 128 if (ipc_phone_0) { 129 task_t *task_0 = ipc_phone_0->task; 130 ipc_phone_0 = NULL; 131 /* 132 * The first task is held by kinit(), we need to release it or 133 * it will never finish cleanup. 134 */ 135 task_release(task_0); 136 } 127 137 128 138 /* Repeat until there are any tasks except TASK */ … … 196 206 task->ucycles = 0; 197 207 task->kcycles = 0; 198 208 199 209 task->ipc_info.call_sent = 0; 200 210 task->ipc_info.call_received = 0;
Note:
See TracChangeset
for help on using the changeset viewer.