Changeset 25eec4e in mainline for boot/arch/arm32/src/mm.c
- Timestamp:
- 2013-04-19T18:38:18Z (12 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 6d717a4
- Parents:
- a1e2df13 (diff), 289cb7dd (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
boot/arch/arm32/src/mm.c
ra1e2df13 r25eec4e 38 38 #include <arch/mm.h> 39 39 40 /** Disable the MMU */ 41 static void disable_paging(void) 42 { 43 asm volatile ( 44 "mrc p15, 0, r0, c1, c0, 0\n" 45 "bic r0, r0, #1\n" 46 "mcr p15, 0, r0, c1, c0, 0\n" 47 ::: "r0" 48 ); 49 } 50 51 /** Check if caching can be enabled for a given memory section. 52 * 53 * Memory areas used for I/O are excluded from caching. 54 * At the moment caching is enabled only on GTA02. 55 * 56 * @param section The section number. 57 * 58 * @return 1 if the given section can be mapped as cacheable, 0 otherwise. 59 */ 60 static inline int section_cacheable(pfn_t section) 61 { 62 #ifdef MACHINE_gta02 63 unsigned long address = section << PTE_SECTION_SHIFT; 64 65 if (address >= GTA02_IOMEM_START && address < GTA02_IOMEM_END) 66 return 0; 67 else 68 return 1; 69 #elif defined MACHINE_beagleboardxm 70 const unsigned long address = section << PTE_SECTION_SHIFT; 71 if (address >= BBXM_RAM_START && address < BBXM_RAM_END) 72 return 1; 73 #elif defined MACHINE_beaglebone 74 const unsigned long address = section << PTE_SECTION_SHIFT; 75 if (address >= AM335x_RAM_START && address < AM335x_RAM_END) 76 return 1; 77 #endif 78 return 0; 79 } 80 40 81 /** Initialize "section" page table entry. 41 82 * … … 54 95 { 55 96 pte->descriptor_type = PTE_DESCRIPTOR_SECTION; 56 pte->bufferable = 0;57 pte->cacheable = 0;58 pte-> impl_specific= 0;97 pte->bufferable = 1; 98 pte->cacheable = section_cacheable(frame); 99 pte->xn = 0; 59 100 pte->domain = 0; 60 101 pte->should_be_zero_1 = 0; 61 pte->access_permission = PTE_AP_USER_NO_KERNEL_RW; 102 pte->access_permission_0 = PTE_AP_USER_NO_KERNEL_RW; 103 pte->tex = 0; 104 pte->access_permission_1 = 0; 105 pte->shareable = 0; 106 pte->non_global = 0; 62 107 pte->should_be_zero_2 = 0; 108 pte->non_secure = 0; 63 109 pte->section_base_addr = frame; 64 110 } … … 67 113 static void init_boot_pt(void) 68 114 { 69 pfn_t split_page = 0x800; 70 115 const pfn_t split_page = PTL0_ENTRIES; 71 116 /* Create 1:1 virtual-physical mapping (in lower 2 GB). */ 72 117 pfn_t page; 73 118 for (page = 0; page < split_page; page++) 74 119 init_ptl0_section(&boot_pt[page], page); 75 76 /*77 * Create 1:1 virtual-physical mapping in kernel space78 * (upper 2 GB), physical addresses start from 0.79 */80 for (page = split_page; page < PTL0_ENTRIES; page++)81 init_ptl0_section(&boot_pt[page], page - split_page);82 120 83 121 asm volatile ( … … 95 133 /* Behave as a client of domains */ 96 134 "ldr r0, =0x55555555\n" 97 "mcr p15, 0, r0, c3, c0, 0\n" 98 135 "mcr p15, 0, r0, c3, c0, 0\n" 136 99 137 /* Current settings */ 100 138 "mrc p15, 0, r0, c1, c0, 0\n" 101 139 102 /* Mask to enable paging */ 103 "ldr r1, =0x00000001\n" 140 /* Enable ICache, DCache, BPredictors and MMU, 141 * we disable caches before jumping to kernel 142 * so this is safe for all archs. 143 */ 144 "ldr r1, =0x00001805\n" 145 104 146 "orr r0, r0, r1\n" 147 148 /* Invalidate the TLB content before turning on the MMU. 149 * ARMv7-A Reference manual, B3.10.3 150 */ 151 "mcr p15, 0, r0, c8, c7, 0\n" 105 152 106 /* Store settings */153 /* Store settings, enable the MMU */ 107 154 "mcr p15, 0, r0, c1, c0, 0\n" 108 155 ::: "r0", "r1" … … 112 159 /** Start the MMU - initialize page table and enable paging. */ 113 160 void mmu_start() { 161 disable_paging(); 114 162 init_boot_pt(); 115 163 enable_paging();
Note:
See TracChangeset
for help on using the changeset viewer.