Changeset 70a1c59 in mainline for kernel/arch/sparc64/include/trap/sun4v/mmu.h
- Timestamp:
- 2009-11-15T19:12:47Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 5f678b1c
- Parents:
- 69b68d1f
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/sparc64/include/trap/sun4v/mmu.h
r69b68d1f r70a1c59 1 1 /* 2 2 * Copyright (c) 2006 Jakub Jermar 3 * Copyright (c) 2008 Pavel Rimsky 3 4 * All rights reserved. 4 5 * … … 40 41 #include <arch/stack.h> 41 42 #include <arch/regdef.h> 43 #include <arch/arch.h> 44 #include <arch/sun4v/arch.h> 45 #include <arch/sun4v/hypercall.h> 46 #include <arch/mm/sun4v/mmu.h> 42 47 #include <arch/mm/tlb.h> 43 48 #include <arch/mm/mmu.h> … … 52 57 #define TT_FAST_DATA_ACCESS_MMU_MISS 0x68 53 58 #define TT_FAST_DATA_ACCESS_PROTECTION 0x6c 59 #define TT_CPU_MONDO 0x7c 54 60 55 61 #define FAST_MMU_HANDLER_SIZE 128 … … 57 63 #ifdef __ASM__ 58 64 65 /* MMU fault status area data fault offset */ 66 #define FSA_DFA_OFFSET 0x48 67 68 /* MMU fault status area data context */ 69 #define FSA_DFC_OFFSET 0x50 70 71 /* offset of the target address within the TTE Data entry */ 72 #define TTE_DATA_TADDR_OFFSET 13 73 59 74 .macro FAST_INSTRUCTION_ACCESS_MMU_MISS_HANDLER 60 /*61 * First, try to refill TLB from TSB.62 */63 #ifdef CONFIG_TSB64 ldxa [%g0] ASI_IMMU, %g1 ! read TSB Tag Target Register65 ldxa [%g0] ASI_IMMU_TSB_8KB_PTR_REG, %g2 ! read TSB 8K Pointer66 ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 ! 16-byte atomic load into %g4 and %g567 cmp %g1, %g4 ! is this the entry we are looking for?68 bne,pn %xcc, 0f69 nop70 stxa %g5, [%g0] ASI_ITLB_DATA_IN_REG ! copy mapping from ITSB to ITLB71 retry72 #endif73 74 0:75 wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate76 75 PREEMPTIBLE_HANDLER fast_instruction_access_mmu_miss 77 76 .endm 78 77 78 /* 79 * Handler of the Fast Data Access MMU Miss trap. If the trap occurred in the kernel 80 * (context 0), an identity mapping (with displacement) is installed. Otherwise 81 * a higher level service routine is called. 82 */ 79 83 .macro FAST_DATA_ACCESS_MMU_MISS_HANDLER tl 80 //MH 81 save %sp, -STACK_WINDOW_SAVE_AREA_SIZE, %sp 82 set 0x8000, %o0 83 set 0x0, %o1 84 setx 0x80000000804087c3, %g1, %o2 85 set 0x3, %o3 86 ta 0x83 87 restore %g0, 0, %g0 88 retry 89 #if 0 90 /* 91 * First, try to refill TLB from TSB. 92 */ 84 mov SCRATCHPAD_MMU_FSA, %g1 85 ldxa [%g1] ASI_SCRATCHPAD, %g1 ! g1 <= RA of MMU fault status area 93 86 94 #ifdef CONFIG_TSB 95 ldxa [%g0] ASI_DMMU, %g1 ! read TSB Tag Target Register 96 srlx %g1, TSB_TAG_TARGET_CONTEXT_SHIFT, %g2 ! is this a kernel miss? 97 brz,pn %g2, 0f 98 ldxa [%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g3 ! read TSB 8K Pointer 99 ldda [%g3] ASI_NUCLEUS_QUAD_LDD, %g4 ! 16-byte atomic load into %g4 and %g5 100 cmp %g1, %g4 ! is this the entry we are looking for? 101 bne,pn %xcc, 0f 87 /* read faulting context */ 88 add %g1, FSA_DFC_OFFSET, %g2 ! g2 <= RA of data fault context 89 ldxa [%g2] ASI_REAL, %g3 ! read the fault context 90 91 /* read the faulting address */ 92 add %g1, FSA_DFA_OFFSET, %g2 ! g2 <= RA of data fault address 93 ldxa [%g2] ASI_REAL, %g1 ! read the fault address 94 srlx %g1, TTE_DATA_TADDR_OFFSET, %g1 ! truncate it to page boundary 95 sllx %g1, TTE_DATA_TADDR_OFFSET, %g1 96 97 /* service by higher-level routine when context != 0 */ 98 brnz %g3, 0f 102 99 nop 103 stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG ! copy mapping from DTSB to DTLB104 retry105 #endif 100 /* exclude page number 0 from installing the identity mapping */ 101 brz %g1, 0f 102 nop 106 103 107 104 /* 108 * Second, test if it is the portion of the kernel address space 109 * which is faulting. If that is the case, immediately create 110 * identity mapping for that page in DTLB. VPN 0 is excluded from 111 * this treatment. 112 * 113 * Note that branch-delay slots are used in order to save space. 105 * Installing the identity does not fit into 32 instructions, call 106 * a separate routine. The routine performs RETRY, hence the call never 107 * returns. 114 108 */ 115 0: 116 //MH 117 // sethi %hi(fast_data_access_mmu_miss_data_hi), %g7 118 wr %g0, ASI_DMMU, %asi 119 ldxa [VA_DMMU_TAG_ACCESS] %asi, %g1 ! read the faulting Context and VPN 120 set TLB_TAG_ACCESS_CONTEXT_MASK, %g2 121 andcc %g1, %g2, %g3 ! get Context 122 bnz %xcc, 0f ! Context is non-zero 123 andncc %g1, %g2, %g3 ! get page address into %g3 124 bz %xcc, 0f ! page address is zero 125 //MH 126 // ldx [%g7 + %lo(end_of_identity)], %g4 127 cmp %g3, %g4 128 bgeu %xcc, 0f 109 ba install_identity_mapping 110 nop 129 111 130 ldx [%g7 + %lo(kernel_8k_tlb_data_template)], %g2 131 add %g3, %g2, %g2 132 stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG ! identity map the kernel page 133 retry 112 0: 134 113 135 114 /* 136 * Third, catch and handle special cases when the trap is caused by 137 * the userspace register window spill or fill handler. In case 138 * one of these two traps caused this trap, we just lower the trap 139 * level and service the DTLB miss. In the end, we restart 140 * the offending SAVE or RESTORE. 115 * One of the scenarios in which this trap can occur is when the 116 * register window spill/fill handler accesses a memory which is not 117 * mapped. In such a case, this handler will be called from TL = 1. 118 * We handle the situation by pretending that the MMU miss occurred 119 * on TL = 0. Once the MMU miss trap is services, the instruction which 120 * caused the spill/fill trap is restarted, the spill/fill trap occurs, 121 * but this time its handler accesse memory which IS mapped. 141 122 */ 142 123 0: … … 146 127 147 128 /* 148 * Switch from the MM globals. 129 * Save the faulting virtual page and faulting context to the %g2 130 * register. The most significant 51 bits of the %g2 register will 131 * contain the virtual address which caused the fault truncated to the 132 * page boundary. The least significant 13 bits of the %g2 register 133 * will contain the number of the context in which the fault occurred. 134 * The value of the %g2 register will be passed as a parameter to the 135 * higher level service routine. 149 136 */ 150 wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate137 or %g1, %g3, %g2 151 138 152 /*153 * Read the Tag Access register for the higher-level handler.154 * This is necessary to survive nested DTLB misses.155 */156 ldxa [VA_DMMU_TAG_ACCESS] %asi, %g2157 158 /*159 * g2 will be passed as an argument to fast_data_access_mmu_miss().160 */161 139 PREEMPTIBLE_HANDLER fast_data_access_mmu_miss 162 #endif163 140 .endm 164 141 142 /* 143 * Handler of the Fast Data MMU Protection trap. Finds the trapping address 144 * and context and calls higher level service routine. 145 */ 165 146 .macro FAST_DATA_ACCESS_PROTECTION_HANDLER tl 166 147 /* 167 148 * The same special case as in FAST_DATA_ACCESS_MMU_MISS_HANDLER. 168 149 */ 150 .if (\tl > 0) 151 wrpr %g0, 1, %tl 152 .endif 169 153 170 .if (\tl > 0) 171 wrpr %g0, 1, %tl 172 .endif 154 mov SCRATCHPAD_MMU_FSA, %g1 155 ldxa [%g1] ASI_SCRATCHPAD, %g1 ! g1 <= RA of MMU fault status area 173 156 174 /* 175 * Switch from the MM globals. 176 */ 177 wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate 157 /* read faulting context */ 158 add %g1, FSA_DFC_OFFSET, %g2 ! g2 <= RA of data fault context 159 ldxa [%g2] ASI_REAL, %g3 ! read the fault context 178 160 179 /* 180 * Read the Tag Access register for the higher-level handler. 181 * This is necessary to survive nested DTLB misses. 182 */ 183 mov VA_DMMU_TAG_ACCESS, %g2 184 ldxa [%g2] ASI_DMMU, %g2 161 /* read the faulting address */ 162 add %g1, FSA_DFA_OFFSET, %g2 ! g2 <= RA of data fault address 163 ldxa [%g2] ASI_REAL, %g1 ! read the fault address 164 srlx %g1, TTE_DATA_TADDR_OFFSET, %g1 ! truncate it to page boundary 165 sllx %g1, TTE_DATA_TADDR_OFFSET, %g1 185 166 186 /* 187 * g2 will be passed as an argument to fast_data_access_mmu_miss().188 */ 167 /* the same as for FAST_DATA_ACCESS_MMU_MISS_HANDLER */ 168 or %g1, %g3, %g2 169 189 170 PREEMPTIBLE_HANDLER fast_data_access_protection 190 171 .endm 191 192 172 #endif /* __ASM__ */ 193 173
Note:
See TracChangeset
for help on using the changeset viewer.