- Timestamp:
- 2013-01-24T22:07:06Z (13 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 03362fbd, 3acd1bb, d59c046
- Parents:
- 6218d4b (diff), 24bead17 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/arch/arm32
- Files:
-
- 1 added
- 16 edited
-
Makefile.inc (modified) (1 diff)
-
include/asm.h (modified) (1 diff)
-
include/barrier.h (modified) (3 diffs)
-
include/cache.h (added)
-
include/cp15.h (modified) (4 diffs)
-
include/cpu.h (modified) (2 diffs)
-
include/cycle.h (modified) (2 diffs)
-
include/mm/frame.h (modified) (2 diffs)
-
include/mm/page.h (modified) (1 diff)
-
include/mm/page_fault.h (modified) (1 diff)
-
include/regutils.h (modified) (1 diff)
-
include/security_ext.h (modified) (1 diff)
-
src/cpu/cpu.c (modified) (5 diffs)
-
src/exception.c (modified) (3 diffs)
-
src/fpu_context.c (modified) (3 diffs)
-
src/mach/beagleboardxm/beagleboardxm.c (modified) (5 diffs)
-
src/mm/page_fault.c (modified) (4 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/arm32/Makefile.inc
r6218d4b r005b765 33 33 ATSIGN = % 34 34 35 GCC_CFLAGS += -fno-omit-frame-pointer -mapcs-frame -march=$(subst _,-,$(PROCESSOR)) -mno-unaligned-access 36 37 ifeq ($(MACHINE),beagleboardxm) 38 GCC_CFLAGS += -mcpu=cortex-a8 39 endif 35 GCC_CFLAGS += -fno-omit-frame-pointer -mapcs-frame -march=$(subst _,-,$(PROCESSOR_ARCH)) -mno-unaligned-access 40 36 41 37 ifeq ($(CONFIG_FPU),y) -
kernel/arch/arm32/include/asm.h
r6218d4b r005b765 43 43 #include <trace.h> 44 44 45 /** No such instruction on old ARM to sleep CPU.45 /** CPU specific way to sleep cpu. 46 46 * 47 47 * ARMv7 introduced wait for event and wait for interrupt (wfe/wfi). 48 48 * ARM920T has custom coprocessor action to do the same. See ARM920T Technical 49 49 * Reference Manual ch 4.9 p. 4-23 (103 in the PDF) 50 * ARM926EJ-S uses the same coprocessor instruction as ARM920T. See ARM926EJ-S 51 * chapter 2.3.8 p.2-22 (52 in the PDF) 52 * 53 * @note Although mcr p15, 0, R0, c7, c0, 4 is defined in ARM Architecture 54 * reference manual for armv4/5 CP15 implementation is mandatory only for 55 * armv6+. 50 56 */ 51 57 NO_TRACE static inline void cpu_sleep(void) 52 58 { 53 #ifdef PROCESSOR_ armv7_a54 asm volatile ( "wfe" ::);55 #elif defined( MACHINE_gta02)56 asm volatile ( "mcr p15, 0,R0,c7,c0,4" ::);59 #ifdef PROCESSOR_ARCH_armv7_a 60 asm volatile ( "wfe" ); 61 #elif defined(PROCESSOR_ARCH_armv6) | defined(PROCESSOR_arm926ej_s) | defined(PROCESSOR_arm920t) 62 asm volatile ( "mcr p15, 0, R0, c7, c0, 4" ); 57 63 #endif 58 64 } -
kernel/arch/arm32/include/barrier.h
r6218d4b r005b765 37 37 #define KERN_arm32_BARRIER_H_ 38 38 39 /* 40 * TODO: implement true ARM memory barriers for macros below. 41 * ARMv6 introduced user access of the following commands: 42 * • Prefetch flush 43 * • Data synchronization barrier 44 * • Data memory barrier 45 * • Clean and prefetch range operations. 46 * ARM Architecture Reference Manual version I ch. B.3.2.1 p. B3-4 47 */ 39 #ifdef KERNEL 40 #include <arch/cp15.h> 41 #else 42 #include <libarch/cp15.h> 43 #endif 44 48 45 #define CS_ENTER_BARRIER() asm volatile ("" ::: "memory") 49 46 #define CS_LEAVE_BARRIER() asm volatile ("" ::: "memory") … … 60 57 #define read_barrier() asm volatile ("dsb" ::: "memory") 61 58 #define write_barrier() asm volatile ("dsb st" ::: "memory") 59 #define inst_barrier() asm volatile ("isb" ::: "memory") 60 #elif defined PROCESSOR_ARCH_armv6 | defined KERNEL 61 /* 62 * ARMv6 introduced user access of the following commands: 63 * - Prefetch flush 64 * - Data synchronization barrier 65 * - Data memory barrier 66 * - Clean and prefetch range operations. 67 * ARM Architecture Reference Manual version I ch. B.3.2.1 p. B3-4 68 */ 69 /* ARMv6- use system control coprocessor (CP15) for memory barrier instructions. 70 * Although at least mcr p15, 0, r0, c7, c10, 4 is mentioned in earlier archs, 71 * CP15 implementation is mandatory only for armv6+. 72 */ 73 #define memory_barrier() CP15DMB_write(0) 74 #define read_barrier() CP15DSB_write(0) 75 #define write_barrier() read_barrier() 76 #define inst_barrier() CP15ISB_write(0) 62 77 #else 78 /* Older manuals mention syscalls as a way to implement cache coherency and 79 * barriers. See for example ARM Architecture Reference Manual Version D 80 * chapter 2.7.4 Prefetching and self-modifying code (p. A2-28) 81 */ 82 // TODO implement on per PROCESSOR basis or via syscalls 63 83 #define memory_barrier() asm volatile ("" ::: "memory") 64 84 #define read_barrier() asm volatile ("" ::: "memory") 65 85 #define write_barrier() asm volatile ("" ::: "memory") 86 #define inst_barrier() asm volatile ("" ::: "memory") 66 87 #endif 88 67 89 /* 68 90 * There are multiple ways ICache can be implemented on ARM machines. Namely … … 80 102 */ 81 103 82 #ifdef PROCESSOR_ARCH_armv7_a 83 #define smc_coherence(a) asm volatile ( "isb" ::: "memory") 84 #define smc_coherence_block(a, l) smc_coherence(a) 85 #else 104 #if defined PROCESSOR_ARCH_armv7_a | defined PROCESSOR_ARCH_armv6 | defined KERNEL 86 105 /* Available on all supported arms, 87 106 * invalidates entire ICache so the written value does not matter. */ 88 //TODO might be PL1 only on armv5 - 89 #define smc_coherence(a) asm volatile ( "mcr p15, 0, r0, c7, c5, 0") 90 #define smc_coherence_block(a, l) smc_coherence(a) 107 //TODO might be PL1 only on armv5- 108 #define smc_coherence(a) \ 109 do { \ 110 DCCMVAU_write((uint32_t)(a)); /* Flush changed memory */\ 111 write_barrier(); /* Wait for completion */\ 112 ICIALLU_write(0); /* Flush ICache */\ 113 inst_barrier(); /* Wait for Inst refetch */\ 114 } while (0) 115 /* @note: Cache type register is not available in uspace. We would need 116 * to export the cache line value, or use syscall for uspace smc_coherence */ 117 #define smc_coherence_block(a, l) \ 118 do { \ 119 for (uintptr_t addr = (uintptr_t)a; addr < (uintptr_t)a + l; addr += 4)\ 120 smc_coherence(addr); \ 121 } while (0) 122 #else 123 #define smc_coherence(a) 124 #define smc_coherence_block(a, l) 91 125 #endif 92 126 -
kernel/arch/arm32/include/cp15.h
r6218d4b r005b765 55 55 56 56 /* Identification registers */ 57 enum { 58 MIDR_IMPLEMENTER_MASK = 0xff, 59 MIDR_IMPLEMENTER_SHIFT = 24, 60 MIDR_VARIANT_MASK = 0xf, 61 MIDR_VARIANT_SHIFT = 20, 62 MIDR_ARCHITECTURE_MASK = 0xf, 63 MIDR_ARCHITECTURE_SHIFT = 16, 64 MIDR_PART_NUMBER_MASK = 0xfff, 65 MIDR_PART_NUMBER_SHIFT = 4, 66 MIDR_REVISION_MASK = 0xf, 67 MIDR_REVISION_SHIFT = 0, 68 }; 57 69 CONTROL_REG_GEN_READ(MIDR, c0, 0, c0, 0); 70 71 enum { 72 CTR_FORMAT_MASK = 0xe0000000, 73 CTR_FORMAT_ARMv7 = 0x80000000, 74 CTR_FORMAT_ARMv6 = 0x00000000, 75 /* ARMv7 format */ 76 CTR_CWG_MASK = 0xf, 77 CTR_CWG_SHIFT = 24, 78 CTR_ERG_MASK = 0xf, 79 CTR_ERG_SHIFT = 20, 80 CTR_D_MIN_LINE_MASK = 0xf, 81 CTR_D_MIN_LINE_SHIFT = 16, 82 CTR_I_MIN_LINE_MASK = 0xf, 83 CTR_I_MIN_LINE_SHIFT = 0, 84 CTR_L1I_POLICY_MASK = 0x0000c000, 85 CTR_L1I_POLICY_AIVIVT = 0x00004000, 86 CTR_L1I_POLICY_VIPT = 0x00008000, 87 CTR_L1I_POLICY_PIPT = 0x0000c000, 88 /* ARMv6 format */ 89 CTR_CTYPE_MASK = 0x1e000000, 90 CTR_CTYPE_WT = 0x00000000, 91 CTR_CTYPE_WB_NL = 0x04000000, 92 CTR_CTYPE_WB_D = 0x0a000000, 93 CTR_CTYPE_WB_A = 0x0c000000, /**< ARMv5- only */ 94 CTR_CTYPE_WB_B = 0x0e000000, /**< ARMv5- only */ 95 CTR_CTYPE_WB_C = 0x1c000000, 96 CTR_SEP_FLAG = 1 << 24, 97 CTR_DCACHE_P_FLAG = 1 << 23, 98 CTR_DCACHE_SIZE_MASK = 0xf, 99 CTR_DCACHE_SIZE_SHIFT = 18, 100 CTR_DCACHE_ASSOC_MASK = 0x7, 101 CTR_DCACHE_ASSOC_SHIFT = 15, 102 CTR_DCACHE_M_FLAG = 1 << 14, 103 CTR_DCACHE_LEN_MASK = 0x3, 104 CTR_DCACHE_LEN_SHIFT = 0, 105 CTR_ICACHE_P_FLAG = 1 << 11, 106 CTR_ICACHE_SIZE_MASK = 0xf, 107 CTR_ICACHE_SIZE_SHIFT = 6, 108 CTR_ICACHE_ASSOC_MASK = 0x7, 109 CTR_ICACHE_ASSOC_SHIFT = 3, 110 CTR_ICACHE_M_FLAG = 1 << 2, 111 CTR_ICACHE_LEN_MASK = 0x3, 112 CTR_ICACHE_LEN_SHIFT = 0, 113 }; 58 114 CONTROL_REG_GEN_READ(CTR, c0, 0, c0, 1); 59 115 CONTROL_REG_GEN_READ(TCMR, c0, 0, c0, 2); … … 104 160 CONTROL_REG_GEN_READ(ID_ISAR5, c0, 0, c2, 5); 105 161 162 enum { 163 CCSIDR_WT_FLAG = 1 << 31, 164 CCSIDR_WB_FLAG = 1 << 30, 165 CCSIDR_RA_FLAG = 1 << 29, 166 CCSIDR_WA_FLAG = 1 << 28, 167 CCSIDR_NUMSETS_MASK = 0x7fff, 168 CCSIDR_NUMSETS_SHIFT = 13, 169 CCSIDR_ASSOC_MASK = 0x3ff, 170 CCSIDR_ASSOC_SHIFT = 3, 171 CCSIDR_LINESIZE_MASK = 0x7, 172 CCSIDR_LINESIZE_SHIFT = 0, 173 }; 106 174 CONTROL_REG_GEN_READ(CCSIDR, c0, 1, c0, 0); 175 176 enum { 177 CLIDR_LOUU_MASK = 0x7, 178 CLIDR_LOUU_SHIFT = 27, 179 CLIDR_LOC_MASK = 0x7, 180 CLIDR_LOC_SHIFT = 24, 181 CLIDR_LOUIS_MASK = 0x7, 182 CLIDR_LOUIS_SHIFT = 21, 183 CLIDR_NOCACHE = 0x0, 184 CLIDR_ICACHE_ONLY = 0x1, 185 CLIDR_DCACHE_ONLY = 0x2, 186 CLIDR_SEP_CACHE = 0x3, 187 CLIDR_UNI_CACHE = 0x4, 188 CLIDR_CACHE_MASK = 0x7, 189 #define CLIDR_CACHE(level, val) ((val >> (level - 1) * 3) & CLIDR_CACHE_MASK) 190 }; 107 191 CONTROL_REG_GEN_READ(CLIDR, c0, 1, c0, 1); 108 192 CONTROL_REG_GEN_READ(AIDR, c0, 1, c0, 7); /* Implementation defined or MIDR */ 109 193 194 enum { 195 CCSELR_LEVEL_MASK = 0x7, 196 CCSELR_LEVEL_SHIFT = 1, 197 CCSELR_INSTRUCTION_FLAG = 1 << 0, 198 }; 110 199 CONTROL_REG_GEN_READ(CSSELR, c0, 2, c0, 0); 111 200 CONTROL_REG_GEN_WRITE(CSSELR, c0, 2, c0, 0); … … 116 205 117 206 /* System control registers */ 207 /* COntrol register bit values see ch. B4.1.130 of ARM Architecture Reference 208 * Manual ARMv7-A and ARMv7-R edition, page 1687 */ 209 enum { 210 SCTLR_MMU_EN_FLAG = 1 << 0, 211 SCTLR_ALIGN_CHECK_EN_FLAG = 1 << 1, /* Allow alignemnt check */ 212 SCTLR_CACHE_EN_FLAG = 1 << 2, 213 SCTLR_CP15_BARRIER_EN_FLAG = 1 << 5, 214 SCTLR_B_EN_FLAG = 1 << 7, /* ARMv6-, big endian switch */ 215 SCTLR_SWAP_EN_FLAG = 1 << 10, 216 SCTLR_BRANCH_PREDICT_EN_FLAG = 1 << 11, 217 SCTLR_INST_CACHE_EN_FLAG = 1 << 12, 218 SCTLR_HIGH_VECTORS_EN_FLAG = 1 << 13, 219 SCTLR_ROUND_ROBIN_EN_FLAG = 1 << 14, 220 SCTLR_HW_ACCESS_FLAG_EN_FLAG = 1 << 17, 221 SCTLR_WRITE_XN_EN_FLAG = 1 << 19, /* Only if virt. supported */ 222 SCTLR_USPCE_WRITE_XN_EN_FLAG = 1 << 20, /* Only if virt. supported */ 223 SCTLR_FAST_IRQ_EN_FLAG = 1 << 21, /* Disable impl. specific feat*/ 224 SCTLR_UNALIGNED_EN_FLAG = 1 << 22, /* Must be 1 on armv7 */ 225 SCTLR_IRQ_VECTORS_EN_FLAG = 1 << 24, 226 SCTLR_BIG_ENDIAN_EXC_FLAG = 1 << 25, 227 SCTLR_NMFI_EN_FLAG = 1 << 27, 228 SCTLR_TEX_REMAP_EN_FLAG = 1 << 28, 229 SCTLR_ACCESS_FLAG_EN_FLAG = 1 << 29, 230 SCTLR_THUMB_EXC_EN_FLAG = 1 << 30, 231 }; 118 232 CONTROL_REG_GEN_READ(SCTLR, c1, 0, c0, 0); 119 233 CONTROL_REG_GEN_WRITE(SCTLR, c1, 0, c0, 0); … … 302 416 CONTROL_REG_GEN_WRITE(TLBIALLNSNHS, c8, 4, c7, 4); 303 417 304 /* c9 are reserved */ 418 /* c9 are performance monitoring resgisters */ 419 enum { 420 PMCR_IMP_MASK = 0xff, 421 PMCR_IMP_SHIFT = 24, 422 PMCR_IDCODE_MASK = 0xff, 423 PMCR_IDCODE_SHIFT = 16, 424 PMCR_EVENT_NUM_MASK = 0x1f, 425 PMCR_EVENT_NUM_SHIFT = 11, 426 PMCR_DP_FLAG = 1 << 5, 427 PMCR_X_FLAG = 1 << 4, 428 PMCR_D_FLAG = 1 << 3, 429 PMCR_C_FLAG = 1 << 2, 430 PMCR_P_FLAG = 1 << 1, 431 PMCR_E_FLAG = 1 << 0, 432 }; 433 CONTROL_REG_GEN_READ(PMCR, c9, 0, c12, 0); 434 CONTROL_REG_GEN_WRITE(PMCR, c9, 0, c12, 0); 435 enum { 436 PMCNTENSET_CYCLE_COUNTER_EN_FLAG = 1 << 31, 437 #define PMCNTENSET_COUNTER_EN_FLAG(c) (1 << c) 438 }; 439 CONTROL_REG_GEN_READ(PMCNTENSET, c9, 0, c12, 1); 440 CONTROL_REG_GEN_WRITE(PMCNTENSET, c9, 0, c12, 1); 441 CONTROL_REG_GEN_READ(PMCCNTR, c9, 0, c13, 0); 442 CONTROL_REG_GEN_WRITE(PMCCNTR, c9, 0, c13, 0); 443 305 444 306 445 /*c10 has tons of reserved too */ -
kernel/arch/arm32/include/cpu.h
r6218d4b r005b765 40 40 #include <arch/asm.h> 41 41 42 enum { 43 ARM_MAX_CACHE_LEVELS = 7, 44 }; 42 45 43 46 /** Struct representing ARM CPU identification. */ … … 57 60 /** Revision number. */ 58 61 uint32_t rev_num; 62 63 struct { 64 unsigned ways; 65 unsigned sets; 66 unsigned line_size; 67 unsigned way_shift; 68 unsigned set_shift; 69 } dcache[ARM_MAX_CACHE_LEVELS]; 70 unsigned dcache_levels; 59 71 } cpu_arch_t; 60 72 -
kernel/arch/arm32/include/cycle.h
r6218d4b r005b765 38 38 39 39 #include <trace.h> 40 #include <arch/cp15.h> 40 41 41 42 /** Return count of CPU cycles. … … 48 49 NO_TRACE static inline uint64_t get_cycle(void) 49 50 { 51 #ifdef PROCESSOR_ARCH_armv7_a 52 if ((ID_PFR1_read() & ID_PFR1_GEN_TIMER_EXT_MASK) == 53 ID_PFR1_GEN_TIMER_EXT) { 54 uint32_t low = 0, high = 0; 55 asm volatile( "MRRC p15, 0, %[low], %[high], c14": [low]"=r"(low), [high]"=r"(high)); 56 return ((uint64_t)high << 32) | low; 57 } else { 58 return (uint64_t)PMCCNTR_read() * 64; 59 } 60 #endif 50 61 return 0; 51 62 } -
kernel/arch/arm32/include/mm/frame.h
r6218d4b r005b765 47 47 48 48 #ifdef MACHINE_gta02 49 50 #define PHYSMEM_START_ADDR 0x30008000 49 51 #define BOOT_PAGE_TABLE_ADDRESS 0x30010000 52 50 53 #elif defined MACHINE_beagleboardxm 54 55 #define PHYSMEM_START_ADDR 0x80000000 51 56 #define BOOT_PAGE_TABLE_ADDRESS 0x80008000 57 52 58 #else 59 60 #define PHYSMEM_START_ADDR 0x00000000 53 61 #define BOOT_PAGE_TABLE_ADDRESS 0x00008000 62 54 63 #endif 55 64 … … 57 66 #define BOOT_PAGE_TABLE_SIZE_IN_FRAMES (BOOT_PAGE_TABLE_SIZE >> FRAME_WIDTH) 58 67 59 #ifdef MACHINE_gta0260 #define PHYSMEM_START_ADDR 0x3000800061 #elif defined MACHINE_beagleboardxm62 #define PHYSMEM_START_ADDR 0x8000000063 #else64 #define PHYSMEM_START_ADDR 0x0000000065 #endif66 68 67 69 extern void frame_low_arch_init(void); -
kernel/arch/arm32/include/mm/page.h
r6218d4b r005b765 129 129 set_pt_level1_present((pte_t *) (ptl3), (size_t) (i)) 130 130 131 #if defined(PROCESSOR_ armv6) | defined(PROCESSOR_armv7_a)131 #if defined(PROCESSOR_ARCH_armv6) | defined(PROCESSOR_ARCH_armv7_a) 132 132 #include "page_armv6.h" 133 #elif defined(PROCESSOR_ armv4) | defined(PROCESSOR_armv5)133 #elif defined(PROCESSOR_ARCH_armv4) | defined(PROCESSOR_ARCH_armv5) 134 134 #include "page_armv4.h" 135 135 #else -
kernel/arch/arm32/include/mm/page_fault.h
r6218d4b r005b765 42 42 /** Decribes CP15 "fault status register" (FSR). 43 43 * 44 * See ARM Architecture Reference Manual ch. B4.9.6 (pdf p.743). 44 * "VMSAv6 added a fifth fault status bit (bit[10]) to both the IFSR and DFSR. 45 * It is IMPLEMENTATION DEFINED how this bit is encoded in earlier versions of 46 * the architecture. A write flag (bit[11] of the DFSR) has also been 47 * introduced." 48 * ARM Architecture Reference Manual version i ch. B4.6 (PDF p. 719) 49 * 50 * See ARM Architecture Reference Manual ch. B4.9.6 (pdf p.743). for FSR info 45 51 */ 46 52 typedef union { -
kernel/arch/arm32/include/regutils.h
r6218d4b r005b765 40 40 #define STATUS_REG_IRQ_DISABLED_BIT (1 << 7) 41 41 #define STATUS_REG_MODE_MASK 0x1f 42 43 /* COntrol register bit values see ch. B4.1.130 of ARM Architecture Reference44 * Manual ARMv7-A and ARMv7-R edition, page 1687 */45 #define CP15_R1_MMU_EN (1 << 0)46 #define CP15_R1_ALIGN_CHECK_EN (1 << 1) /* Allow alignemnt check */47 #define CP15_R1_CACHE_EN (1 << 2)48 #define CP15_R1_CP15_BARRIER_EN (1 << 5)49 #define CP15_R1_B_EN (1 << 7) /* ARMv6- only big endian switch */50 #define CP15_R1_SWAP_EN (1 << 10)51 #define CP15_R1_BRANCH_PREDICT_EN (1 << 11)52 #define CP15_R1_INST_CACHE_EN (1 << 12)53 #define CP15_R1_HIGH_VECTORS_EN (1 << 13)54 #define CP15_R1_ROUND_ROBIN_EN (1 << 14)55 #define CP15_R1_HW_ACCESS_FLAG_EN (1 << 17)56 #define CP15_R1_WRITE_XN_EN (1 << 19) /* Only if virt. supported */57 #define CP15_R1_USPCE_WRITE_XN_EN (1 << 20) /* Only if virt. supported */58 #define CP15_R1_FAST_IRQ_EN (1 << 21) /* Disbale impl.specific features */59 #define CP15_R1_UNALIGNED_EN (1 << 22) /* Must be 1 on armv7 */60 #define CP15_R1_IRQ_VECTORS_EN (1 << 24)61 #define CP15_R1_BIG_ENDIAN_EXC (1 << 25)62 #define CP15_R1_NMFI_EN (1 << 27)63 #define CP15_R1_TEX_REMAP_EN (1 << 28)64 #define CP15_R1_ACCESS_FLAG_EN (1 << 29)65 #define CP15_R1_THUMB_EXC_EN (1 << 30)66 42 67 43 /* ARM Processor Operation Modes */ -
kernel/arch/arm32/include/security_ext.h
r6218d4b r005b765 48 48 static inline bool sec_ext_is_implemented() 49 49 { 50 #ifdef PROCESSOR_ armv7_a50 #ifdef PROCESSOR_ARCH_armv7_a 51 51 const uint32_t idpfr = ID_PFR1_read() & ID_PFR1_SEC_EXT_MASK; 52 52 return idpfr == ID_PFR1_SEC_EXT || idpfr == ID_PFR1_SEC_EXT_RFR; -
kernel/arch/arm32/src/cpu/cpu.c
r6218d4b r005b765 34 34 */ 35 35 36 #include <arch/cache.h> 36 37 #include <arch/cpu.h> 38 #include <arch/cp15.h> 37 39 #include <cpu.h> 38 40 #include <arch.h> 39 41 #include <print.h> 42 43 static inline unsigned log2(unsigned val) 44 { 45 unsigned log = 0; 46 --val; 47 while (val) { 48 ++log; 49 val >>= 1; 50 } 51 return log; 52 } 53 54 static unsigned dcache_ways(unsigned level); 55 static unsigned dcache_sets(unsigned level); 56 static unsigned dcache_linesize_log(unsigned level); 57 40 58 41 59 /** Implementers (vendor) names */ … … 81 99 static void arch_cpu_identify(cpu_arch_t *cpu) 82 100 { 83 uint32_t ident; 84 asm volatile ( 85 "mrc p15, 0, %[ident], c0, c0, 0\n" 86 : [ident] "=r" (ident) 87 ); 88 89 cpu->imp_num = ident >> 24; 90 cpu->variant_num = (ident << 8) >> 28; 91 cpu->arch_num = (ident << 12) >> 28; 92 cpu->prim_part_num = (ident << 16) >> 20; 93 cpu->rev_num = (ident << 28) >> 28; 101 const uint32_t ident = MIDR_read(); 102 103 cpu->imp_num = (ident >> MIDR_IMPLEMENTER_SHIFT) & MIDR_IMPLEMENTER_MASK; 104 cpu->variant_num = (ident >> MIDR_VARIANT_SHIFT) & MIDR_VARIANT_MASK; 105 cpu->arch_num = (ident >> MIDR_ARCHITECTURE_SHIFT) & MIDR_ARCHITECTURE_MASK; 106 cpu->prim_part_num = (ident >> MIDR_PART_NUMBER_SHIFT) & MIDR_PART_NUMBER_MASK; 107 cpu->rev_num = (ident >> MIDR_REVISION_SHIFT) & MIDR_REVISION_MASK; 108 94 109 // TODO CPUs with arch_num == 0xf use CPUID scheme for identification 110 cpu->dcache_levels = dcache_levels(); 111 112 for (unsigned i = 0; i < cpu->dcache_levels; ++i) { 113 cpu->dcache[i].ways = dcache_ways(i); 114 cpu->dcache[i].sets = dcache_sets(i); 115 cpu->dcache[i].way_shift = 31 - log2(cpu->dcache[i].ways); 116 cpu->dcache[i].set_shift = dcache_linesize_log(i); 117 cpu->dcache[i].line_size = 1 << dcache_linesize_log(i); 118 printf("Found DCache L%u: %u-way, %u sets, %u byte lines " 119 "(shifts: w%u, s%u)\n", i + 1, cpu->dcache[i].ways, 120 cpu->dcache[i].sets, cpu->dcache[i].line_size, 121 cpu->dcache[i].way_shift, cpu->dcache[i].set_shift); 122 } 95 123 } 96 124 … … 98 126 void cpu_arch_init(void) 99 127 { 100 #if defined(PROCESSOR_armv7_a) | defined(PROCESSOR_armv6) 101 uint32_t control_reg = 0; 102 asm volatile ( 103 "mrc p15, 0, %[control_reg], c1, c0" 104 : [control_reg] "=r" (control_reg) 105 ); 128 uint32_t control_reg = SCTLR_read(); 106 129 107 /* Turn off tex remap, RAZ ignores writes prior to armv7 */ 108 control_reg &= ~CP15_R1_TEX_REMAP_EN; 109 /* Turn off accessed flag, RAZ ignores writes prior to armv7 */ 110 control_reg &= ~(CP15_R1_ACCESS_FLAG_EN | CP15_R1_HW_ACCESS_FLAG_EN); 111 /* Enable unaligned access, RAZ ignores writes prior to armv6 112 * switchable on armv6, RAO ignores writes on armv7, 130 /* Turn off tex remap, RAZ/WI prior to armv7 */ 131 control_reg &= ~SCTLR_TEX_REMAP_EN_FLAG; 132 /* Turn off accessed flag, RAZ/WI prior to armv7 */ 133 control_reg &= ~(SCTLR_ACCESS_FLAG_EN_FLAG | SCTLR_HW_ACCESS_FLAG_EN_FLAG); 134 135 /* Unaligned access is supported on armv6+ */ 136 #if defined(PROCESSOR_ARCH_armv7_a) | defined(PROCESSOR_ARCH_armv6) 137 /* Enable unaligned access, RAZ/WI prior to armv6 138 * switchable on armv6, RAO/WI writes on armv7, 113 139 * see ARM Architecture Reference Manual ARMv7-A and ARMv7-R edition 114 140 * L.3.1 (p. 2456) */ 115 control_reg |= CP15_R1_UNALIGNED_EN;141 control_reg |= SCTLR_UNALIGNED_EN_FLAG; 116 142 /* Disable alignment checks, this turns unaligned access to undefined, 117 143 * unless U bit is set. */ 118 control_reg &= ~ CP15_R1_ALIGN_CHECK_EN;144 control_reg &= ~SCTLR_ALIGN_CHECK_EN_FLAG; 119 145 /* Enable caching, On arm prior to armv7 there is only one level 120 146 * of caches. Data cache is coherent. … … 124 150 * ARM Architecture Reference Manual ARMv7-A and ARMv7-R Edition 125 151 * B3.11.1 (p. 1383) 126 * ICache coherency is elaborate on in barrier.h. 127 * We are safe to turn these on. 152 * We are safe to turn this on. For arm v6 see ch L.6.2 (p. 2469) 153 * L2 Cache for armv7 is enabled by default (i.e. controlled by 154 * this flag). 128 155 */ 129 control_reg |= CP15_R1_CACHE_EN | CP15_R1_INST_CACHE_EN; 130 131 asm volatile ( 132 "mcr p15, 0, %[control_reg], c1, c0" 133 :: [control_reg] "r" (control_reg) 134 ); 135 #endif 156 control_reg |= SCTLR_CACHE_EN_FLAG; 157 #endif 158 #ifdef PROCESSOR_ARCH_armv7_a 159 /* ICache coherency is elaborate on in barrier.h. 160 * VIPT and PIPT caches need maintenance only on code modify, 161 * so it should be safe for general use. 162 * Enable branch predictors too as they follow the same rules 163 * as ICache and they can be flushed together 164 */ 165 if ((CTR_read() & CTR_L1I_POLICY_MASK) != CTR_L1I_POLICY_AIVIVT) { 166 control_reg |= 167 SCTLR_INST_CACHE_EN_FLAG | SCTLR_BRANCH_PREDICT_EN_FLAG; 168 } 169 #endif 170 SCTLR_write(control_reg); 171 136 172 #ifdef CONFIG_FPU 137 173 fpu_setup(); 174 #endif 175 176 #ifdef PROCESSOR_ARCH_armv7_a 177 if ((ID_PFR1_read() & ID_PFR1_GEN_TIMER_EXT_MASK) != 178 ID_PFR1_GEN_TIMER_EXT) { 179 PMCR_write(PMCR_read() | PMCR_E_FLAG | PMCR_D_FLAG); 180 PMCNTENSET_write(PMCNTENSET_CYCLE_COUNTER_EN_FLAG); 181 } 138 182 #endif 139 183 } … … 155 199 } 156 200 201 /** See chapter B4.1.19 of ARM Architecture Reference Manual */ 202 static unsigned dcache_linesize_log(unsigned level) 203 { 204 #ifdef PROCESSOR_ARCH_armv7_a 205 CSSELR_write((level & CCSELR_LEVEL_MASK) << CCSELR_LEVEL_SHIFT); 206 const unsigned ls_log = 2 + 207 ((CCSIDR_read() >> CCSIDR_LINESIZE_SHIFT) & CCSIDR_LINESIZE_MASK); 208 return ls_log + 2; //return log2(bytes) 209 #endif 210 return 0; 211 212 } 213 214 /** See chapter B4.1.19 of ARM Architecture Reference Manual */ 215 static unsigned dcache_ways(unsigned level) 216 { 217 #ifdef PROCESSOR_ARCH_armv7_a 218 CSSELR_write((level & CCSELR_LEVEL_MASK) << CCSELR_LEVEL_SHIFT); 219 const unsigned ways = 1 + 220 ((CCSIDR_read() >> CCSIDR_ASSOC_SHIFT) & CCSIDR_ASSOC_MASK); 221 return ways; 222 #endif 223 return 0; 224 } 225 226 /** See chapter B4.1.19 of ARM Architecture Reference Manual */ 227 static unsigned dcache_sets(unsigned level) 228 { 229 #ifdef PROCESSOR_ARCH_armv7_a 230 CSSELR_write((level & CCSELR_LEVEL_MASK) << CCSELR_LEVEL_SHIFT); 231 const unsigned sets = 1 + 232 ((CCSIDR_read() >> CCSIDR_NUMSETS_SHIFT) & CCSIDR_NUMSETS_MASK); 233 return sets; 234 #endif 235 return 0; 236 } 237 238 unsigned dcache_levels(void) 239 { 240 unsigned levels = 0; 241 #ifdef PROCESSOR_ARCH_armv7_a 242 const uint32_t val = CLIDR_read(); 243 for (unsigned i = 1; i <= 7; ++i) { 244 const unsigned ctype = CLIDR_CACHE(i, val); 245 switch (ctype) { 246 case CLIDR_DCACHE_ONLY: 247 case CLIDR_SEP_CACHE: 248 case CLIDR_UNI_CACHE: 249 ++levels; 250 default: 251 (void)0; 252 } 253 } 254 #endif 255 return levels; 256 } 257 258 static void dcache_clean_manual(unsigned level, bool invalidate, 259 unsigned ways, unsigned sets, unsigned way_shift, unsigned set_shift) 260 { 261 262 for (unsigned i = 0; i < ways; ++i) { 263 for (unsigned j = 0; j < sets; ++j) { 264 const uint32_t val = 265 ((level & 0x7) << 1) | 266 (j << set_shift) | (i << way_shift); 267 if (invalidate) 268 DCCISW_write(val); 269 else 270 DCCSW_write(val); 271 } 272 } 273 } 274 275 void dcache_flush(void) 276 { 277 /* See ARM Architecture Reference Manual ch. B4.2.1 p. B4-1724 */ 278 const unsigned levels = dcache_levels(); 279 for (unsigned i = 0; i < levels; ++i) { 280 const unsigned ways = dcache_ways(i); 281 const unsigned sets = dcache_sets(i); 282 const unsigned way_shift = 31 - log2(ways); 283 const unsigned set_shift = dcache_linesize_log(i); 284 dcache_clean_manual(i, false, ways, sets, way_shift, set_shift); 285 } 286 } 287 288 void dcache_flush_invalidate(void) 289 { 290 /* See ARM Architecture Reference Manual ch. B4.2.1 p. B4-1724 */ 291 const unsigned levels = dcache_levels(); 292 for (unsigned i = 0; i < levels; ++i) { 293 const unsigned ways = dcache_ways(i); 294 const unsigned sets = dcache_sets(i); 295 const unsigned way_shift = 31 - log2(ways); 296 const unsigned set_shift = dcache_linesize_log(i); 297 dcache_clean_manual(i, true, ways, sets, way_shift, set_shift); 298 } 299 } 300 301 302 void cpu_dcache_flush(void) 303 { 304 for (unsigned i = 0; i < CPU->arch.dcache_levels; ++i) 305 dcache_clean_manual(i, false, 306 CPU->arch.dcache[i].ways, CPU->arch.dcache[i].sets, 307 CPU->arch.dcache[i].way_shift, CPU->arch.dcache[i].set_shift); 308 } 309 310 void cpu_dcache_flush_invalidate(void) 311 { 312 const unsigned levels = dcache_levels(); 313 for (unsigned i = 0; i < levels; ++i) 314 dcache_clean_manual(i, true, 315 CPU->arch.dcache[i].ways, CPU->arch.dcache[i].sets, 316 CPU->arch.dcache[i].way_shift, CPU->arch.dcache[i].set_shift); 317 } 318 319 void icache_invalidate(void) 320 { 321 ICIALLU_write(0); 322 } 323 157 324 /** @} 158 325 */ -
kernel/arch/arm32/src/exception.c
r6218d4b r005b765 39 39 #include <interrupt.h> 40 40 #include <arch/mm/page_fault.h> 41 #include <arch/cp15.h> 41 42 #include <arch/barrier.h> 42 43 #include <print.h> … … 73 74 /* make it LDR instruction and store at exception vector */ 74 75 *vector = handler_address_ptr | LDR_OPCODE; 75 smc_coherence( *vector);76 smc_coherence(vector); 76 77 77 78 /* store handler's address */ … … 136 137 static void high_vectors(void) 137 138 { 138 uint32_t control_reg = 0; 139 asm volatile ( 140 "mrc p15, 0, %[control_reg], c1, c0" 141 : [control_reg] "=r" (control_reg) 142 ); 139 uint32_t control_reg = SCTLR_read(); 143 140 144 141 /* switch on the high vectors bit */ 145 control_reg |= CP15_R1_HIGH_VECTORS_EN; 146 147 asm volatile ( 148 "mcr p15, 0, %[control_reg], c1, c0" 149 :: [control_reg] "r" (control_reg) 150 ); 142 control_reg |= SCTLR_HIGH_VECTORS_EN_FLAG; 143 144 SCTLR_write(control_reg); 151 145 } 152 146 #endif -
kernel/arch/arm32/src/fpu_context.c
r6218d4b r005b765 119 119 * rely on user decision to use CONFIG_FPU. 120 120 */ 121 #ifdef PROCESSOR_ armv7_a121 #ifdef PROCESSOR_ARC_armv7_a 122 122 const uint32_t cpacr = CPACR_read(); 123 123 /* FPU needs access to coprocessor 10 and 11. … … 148 148 * rely on user decision to use CONFIG_FPU. 149 149 */ 150 #ifndef PROCESSOR_armv7_a 151 return; 152 #endif 153 150 #ifdef PROCESSOR_ARCH_armv7_a 154 151 /* Allow coprocessor access */ 155 152 uint32_t cpacr = CPACR_read(); … … 159 156 cpacr |= (CPACR_CP_FULL_ACCESS(10) | CPACR_CP_FULL_ACCESS(11)); 160 157 CPACR_write(cpacr); 161 162 smc_coherence(0); 158 #endif 163 159 } 164 160 -
kernel/arch/arm32/src/mach/beagleboardxm/beagleboardxm.c
r6218d4b r005b765 38 38 #include <genarch/drivers/amdm37x_uart/amdm37x_uart.h> 39 39 #include <genarch/drivers/amdm37x_gpt/amdm37x_gpt.h> 40 #include <genarch/drivers/amdm37x_dispc/amdm37x_dispc.h>41 40 #include <genarch/fb/fb.h> 42 41 #include <genarch/srln/srln.h> … … 61 60 62 61 static struct beagleboard { 63 amdm37x_dispc_regs_t *dispc;64 62 amdm37x_irc_regs_t *irc_addr; 65 63 amdm37x_uart_t uart; … … 85 83 } 86 84 87 static void bbxm_setup_fb(unsigned width, unsigned height, unsigned bpp)88 {89 const unsigned pixel_bytes = (bpp / 8);90 const size_t size = ALIGN_UP(width * height * pixel_bytes, FRAME_SIZE);91 const unsigned frames = size / FRAME_SIZE;92 unsigned order = 0;93 unsigned frame = 1;94 while (frame < frames) {95 frame *= 2;96 ++order;97 }98 /* prefer highmem as we don't care about virtual mapping. */99 void *buffer = frame_alloc(order, FRAME_LOWMEM);100 if (!buffer) {101 printf("Failed to allocate framebuffer.\n");102 return;103 }104 105 amdm37x_dispc_setup_fb(beagleboard.dispc, width, height, bpp,106 (uintptr_t) buffer);107 108 fb_properties_t prop = {109 .addr = (uintptr_t)buffer,110 .offset = 0,111 .x = width,112 .y = height,113 .scan = width * pixel_bytes,114 .visual = VISUAL_RGB_5_6_5_LE115 };116 switch (bpp)117 {118 case 8:119 prop.visual = VISUAL_INDIRECT_8; break;120 case 16:121 prop.visual = VISUAL_RGB_5_6_5_LE; break;122 case 24:123 prop.visual = VISUAL_BGR_8_8_8; break;124 case 32:125 prop.visual = VISUAL_RGB_8_8_8_0; break;126 default:127 printf("Invalid framebuffer bit depth: bailing out.\n");128 return;129 }130 outdev_t *fb_dev = fb_init(&prop);131 if (fb_dev)132 stdout_wire(fb_dev);133 134 }135 136 85 static void bb_timer_irq_handler(irq_t *irq) 137 86 { … … 154 103 ASSERT(beagleboard.irc_addr); 155 104 amdm37x_irc_init(beagleboard.irc_addr); 156 157 /* Map display controller */158 beagleboard.dispc = (void*) km_map(AMDM37x_DISPC_BASE_ADDRESS,159 AMDM37x_DISPC_SIZE, PAGE_NOT_CACHEABLE);160 ASSERT(beagleboard.dispc);161 105 162 106 /* Initialize timer. Use timer1, because it is in WKUP power domain … … 223 167 static void bbxm_output_init(void) 224 168 { 225 #ifdef CONFIG_FB226 bbxm_setup_fb(CONFIG_BFB_WIDTH, CONFIG_BFB_HEIGHT, CONFIG_BFB_BPP);227 #else228 (void)bbxm_setup_fb;229 #endif230 169 /* UART3 is wired to external RS232 connector */ 231 170 const bool ok = amdm37x_uart_init(&beagleboard.uart, -
kernel/arch/arm32/src/mm/page_fault.c
r6218d4b r005b765 34 34 */ 35 35 #include <panic.h> 36 #include <arch/cp15.h> 36 37 #include <arch/exception.h> 37 38 #include <arch/mm/page_fault.h> … … 127 128 } 128 129 129 130 /** Returns value stored in comnbined/data fault status register. 131 * 132 * @return Value stored in CP15 fault status register (FSR). 133 * 134 * "VMSAv6 added a fifth fault status bit (bit[10]) to both the IFSR and DFSR. 135 * It is IMPLEMENTATION DEFINED how this bit is encoded in earlier versions of 136 * the architecture. A write flag (bit[11] of the DFSR) has also been 137 * introduced." 138 * ARM Architecture Reference Manual version i ch. B4.6 (PDF p. 719) 139 * 140 * See ch. B4.9.6 for location of data/instruction FSR. 141 * 142 */ 143 static inline fault_status_t read_data_fault_status_register(void) 144 { 145 fault_status_t fsu; 146 147 /* Combined/Data fault status is stored in CP15 register 5, c0. */ 148 asm volatile ( 149 "mrc p15, 0, %[dummy], c5, c0, 0" 150 : [dummy] "=r" (fsu.raw) 151 ); 152 153 return fsu; 154 } 155 156 /** Returns DFAR (fault address register) content. 157 * 158 * This register is equivalent to FAR on pre armv6 machines. 159 * 160 * @return DFAR (fault address register) content (address that caused a page 161 * fault) 162 */ 163 static inline uintptr_t read_data_fault_address_register(void) 164 { 165 uintptr_t ret; 166 167 /* fault adress is stored in CP15 register 6 */ 168 asm volatile ( 169 "mrc p15, 0, %[ret], c6, c0, 0" 170 : [ret] "=r" (ret) 171 ); 172 173 return ret; 174 } 175 176 #if defined(PROCESSOR_armv4) | defined(PROCESSOR_armv5) 130 #if defined(PROCESSOR_ARCH_armv4) | defined(PROCESSOR_ARCH_armv5) 177 131 /** Decides whether read or write into memory is requested. 178 132 * … … 244 198 void data_abort(unsigned int exc_no, istate_t *istate) 245 199 { 246 const uintptr_t badvaddr = read_data_fault_address_register();247 const fault_status_t fsr = read_data_fault_status_register();200 const uintptr_t badvaddr = DFAR_read(); 201 const fault_status_t fsr = { .raw = DFSR_read() }; 248 202 const dfsr_source_t source = fsr.raw & DFSR_SOURCE_MASK; 249 203 … … 281 235 } 282 236 283 #if defined(PROCESSOR_ armv6) | defined(PROCESSOR_armv7_a)237 #if defined(PROCESSOR_ARCH_armv6) | defined(PROCESSOR_ARCH_armv7_a) 284 238 const pf_access_t access = 285 239 fsr.data.wr ? PF_ACCESS_WRITE : PF_ACCESS_READ; 286 #elif defined(PROCESSOR_ armv4) | defined(PROCESSOR_armv5)240 #elif defined(PROCESSOR_ARCH_armv4) | defined(PROCESSOR_ARCH_armv5) 287 241 const pf_access_t access = get_memory_access_type(istate->pc, badvaddr); 288 242 #else
Note:
See TracChangeset
for help on using the changeset viewer.
