Changes in boot/arch/arm32/src/mm.c [9d58539:f7fba727] in mainline
- File:
-
- 1 edited
-
boot/arch/arm32/src/mm.c (modified) (5 diffs)
Legend:
- Unmodified
- Added
- Removed
-
boot/arch/arm32/src/mm.c
r9d58539 rf7fba727 37 37 #include <arch/asm.h> 38 38 #include <arch/mm.h> 39 #include <arch/cp15.h> 40 41 #ifdef PROCESSOR_ARCH_armv7_a 42 static unsigned log2(unsigned val) 43 { 44 unsigned log = 0; 45 while (val >> log++); 46 return log - 2; 47 } 48 49 static void dcache_invalidate_level(unsigned level) 50 { 51 CSSELR_write(level << 1); 52 const uint32_t ccsidr = CCSIDR_read(); 53 const unsigned sets = CCSIDR_SETS(ccsidr); 54 const unsigned ways = CCSIDR_WAYS(ccsidr); 55 const unsigned line_log = CCSIDR_LINESIZE_LOG(ccsidr); 56 const unsigned set_shift = line_log; 57 const unsigned way_shift = 32 - log2(ways); 58 59 for (unsigned k = 0; k < ways; ++k) 60 for (unsigned j = 0; j < sets; ++j) { 61 const uint32_t val = (level << 1) | 62 (j << set_shift) | (k << way_shift); 63 DCISW_write(val); 64 } 65 } 66 67 /** invalidate all dcaches -- armv7 */ 68 static void cache_invalidate(void) 69 { 70 const uint32_t cinfo = CLIDR_read(); 71 for (unsigned i = 0; i < 7; ++i) { 72 switch (CLIDR_CACHE(i, cinfo)) 73 { 74 case CLIDR_DCACHE_ONLY: 75 case CLIDR_SEP_CACHE: 76 case CLIDR_UNI_CACHE: 77 dcache_invalidate_level(i); 78 } 79 } 80 asm volatile ( "dsb\n" ); 81 ICIALLU_write(0); 82 asm volatile ( "isb\n" ); 83 } 84 #endif 85 86 /** Disable the MMU */ 87 static void disable_paging(void) 88 { 89 asm volatile ( 90 "mrc p15, 0, r0, c1, c0, 0\n" 91 "bic r0, r0, #1\n" 92 "mcr p15, 0, r0, c1, c0, 0\n" 93 ::: "r0" 94 ); 95 } 96 97 /** Check if caching can be enabled for a given memory section. 98 * 99 * Memory areas used for I/O are excluded from caching. 100 * At the moment caching is enabled only on GTA02. 101 * 102 * @param section The section number. 103 * 104 * @return 1 if the given section can be mapped as cacheable, 0 otherwise. 105 */ 106 static inline int section_cacheable(pfn_t section) 107 { 108 const unsigned long address = section << PTE_SECTION_SHIFT; 109 #ifdef MACHINE_gta02 110 if (address < GTA02_IOMEM_START || address >= GTA02_IOMEM_END) 111 return 1; 112 #elif defined MACHINE_beagleboardxm 113 if (address >= BBXM_RAM_START && address < BBXM_RAM_END) 114 return 1; 115 #elif defined MACHINE_beaglebone 116 if (address >= AM335x_RAM_START && address < AM335x_RAM_END) 117 return 1; 118 #endif 119 return address * 0; 120 } 39 121 40 122 /** Initialize "section" page table entry. … … 54 136 { 55 137 pte->descriptor_type = PTE_DESCRIPTOR_SECTION; 56 pte->bufferable = 0; 57 pte->cacheable = 0; 58 pte->impl_specific = 0; 138 pte->xn = 0; 59 139 pte->domain = 0; 60 140 pte->should_be_zero_1 = 0; 61 pte->access_permission = PTE_AP_USER_NO_KERNEL_RW; 141 pte->access_permission_0 = PTE_AP_USER_NO_KERNEL_RW; 142 #ifdef PROCESSOR_ARCH_armv7_a 143 /* 144 * Keeps this setting in sync with memory type attributes in: 145 * init_boot_pt (boot/arch/arm32/src/mm.c) 146 * set_pt_level1_flags (kernel/arch/arm32/include/arch/mm/page_armv6.h) 147 * set_ptl0_addr (kernel/arch/arm32/include/arch/mm/page.h) 148 */ 149 pte->tex = section_cacheable(frame) ? 5 : 0; 150 pte->cacheable = section_cacheable(frame) ? 0 : 0; 151 pte->bufferable = section_cacheable(frame) ? 1 : 0; 152 #else 153 pte->bufferable = 1; 154 pte->cacheable = section_cacheable(frame); 155 pte->tex = 0; 156 #endif 157 pte->access_permission_1 = 0; 158 pte->shareable = 0; 159 pte->non_global = 0; 62 160 pte->should_be_zero_2 = 0; 161 pte->non_secure = 0; 63 162 pte->section_base_addr = frame; 64 163 } … … 67 166 static void init_boot_pt(void) 68 167 { 69 pfn_t split_page = 0x800; 70 71 /* Create 1:1 virtual-physical mapping (in lower 2 GB). */ 72 pfn_t page; 73 for (page = 0; page < split_page; page++) 168 /* 169 * Create 1:1 virtual-physical mapping. 170 * Physical memory on BBxM a BBone starts at 2GB 171 * boundary, icp has a memory mirror at 2GB. 172 * (ARM Integrator Core Module User guide ch. 6.3, p. 6-7) 173 * gta02 somehow works (probably due to limited address size), 174 * s3c2442b manual ch. 5, p.5-1: 175 * "Address space: 128Mbytes per bank (total 1GB/8 banks)" 176 */ 177 for (pfn_t page = 0; page < PTL0_ENTRIES; ++page) 74 178 init_ptl0_section(&boot_pt[page], page); 75 179 76 180 /* 77 * Create 1:1 virtual-physical mapping in kernel space 78 * (upper 2 GB), physical addresses start from 0. 79 */ 80 for (page = split_page; page < PTL0_ENTRIES; page++) 81 init_ptl0_section(&boot_pt[page], page - split_page); 82 83 asm volatile ( 84 "mcr p15, 0, %[pt], c2, c0, 0\n" 85 :: [pt] "r" (boot_pt) 86 ); 181 * Tell MMU page might be cached. Keeps this setting in sync 182 * with memory type attributes in: 183 * init_ptl0_section (boot/arch/arm32/src/mm.c) 184 * set_pt_level1_flags (kernel/arch/arm32/include/arch/mm/page_armv6.h) 185 * set_ptl0_addr (kernel/arch/arm32/include/arch/mm/page.h) 186 */ 187 uint32_t val = (uint32_t)boot_pt & TTBR_ADDR_MASK; 188 val |= TTBR_RGN_WBWA_CACHE | TTBR_C_FLAG; 189 TTBR0_write(val); 87 190 } 88 191 … … 95 198 /* Behave as a client of domains */ 96 199 "ldr r0, =0x55555555\n" 97 "mcr p15, 0, r0, c3, c0, 0\n" 98 200 "mcr p15, 0, r0, c3, c0, 0\n" 201 99 202 /* Current settings */ 100 203 "mrc p15, 0, r0, c1, c0, 0\n" 101 204 102 /* Mask to enable paging */ 103 "ldr r1, =0x00000001\n" 205 /* Enable ICache, DCache, BPredictors and MMU, 206 * we disable caches before jumping to kernel 207 * so this is safe for all archs. 208 * Enable VMSAv6 the bit (23) is only writable on ARMv6. 209 * (and QEMU) 210 */ 211 #ifdef PROCESSOR_ARCH_armv6 212 "ldr r1, =0x00801805\n" 213 #else 214 "ldr r1, =0x00001805\n" 215 #endif 216 104 217 "orr r0, r0, r1\n" 218 219 /* Invalidate the TLB content before turning on the MMU. 220 * ARMv7-A Reference manual, B3.10.3 221 */ 222 "mcr p15, 0, r0, c8, c7, 0\n" 105 223 106 /* Store settings */224 /* Store settings, enable the MMU */ 107 225 "mcr p15, 0, r0, c1, c0, 0\n" 108 226 ::: "r0", "r1" … … 112 230 /** Start the MMU - initialize page table and enable paging. */ 113 231 void mmu_start() { 232 disable_paging(); 233 #ifdef PROCESSOR_ARCH_armv7_a 234 /* Make sure we run in memory code when caches are enabled, 235 * make sure we read memory data too. This part is ARMv7 specific as 236 * ARMv7 no longer invalidates caches on restart. 237 * See chapter B2.2.2 of ARM Architecture Reference Manual p. B2-1263*/ 238 cache_invalidate(); 239 #endif 114 240 init_boot_pt(); 115 241 enable_paging();
Note:
See TracChangeset
for help on using the changeset viewer.
