Changeset 8c2214e in mainline
- Timestamp:
- 2010-02-20T20:29:27Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- f516bc2
- Parents:
- e0cb57b
- Location:
- kernel/arch/sparc64
- Files:
-
- 2 added
- 15 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/sparc64/include/mm/as.h
re0cb57b r8c2214e 36 36 #define KERN_sparc64_AS_H_ 37 37 38 #include <arch/mm/tte.h> 39 40 #define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH 1 41 42 #define KERNEL_ADDRESS_SPACE_START_ARCH (unsigned long) 0x0000000000000000 43 #define KERNEL_ADDRESS_SPACE_END_ARCH (unsigned long) 0xffffffffffffffff 44 #define USER_ADDRESS_SPACE_START_ARCH (unsigned long) 0x0000000000000000 45 #define USER_ADDRESS_SPACE_END_ARCH (unsigned long) 0xffffffffffffffff 46 47 #define USTACK_ADDRESS_ARCH (0xffffffffffffffffULL - (PAGE_SIZE - 1)) 48 49 #ifdef CONFIG_TSB 50 51 /** TSB Tag Target register. */ 52 typedef union tsb_tag_target { 53 uint64_t value; 54 struct { 55 unsigned invalid : 1; /**< Invalidated by software. */ 56 unsigned : 2; 57 unsigned context : 13; /**< Software ASID. */ 58 unsigned : 6; 59 uint64_t va_tag : 42; /**< Virtual address bits <63:22>. */ 60 } __attribute__ ((packed)); 61 } tsb_tag_target_t; 62 63 /** TSB entry. */ 64 typedef struct tsb_entry { 65 tsb_tag_target_t tag; 66 tte_data_t data; 67 } __attribute__ ((packed)) tsb_entry_t; 68 69 typedef struct { 70 tsb_entry_t *itsb; 71 tsb_entry_t *dtsb; 72 } as_arch_t; 73 74 #else 75 76 typedef struct { 77 } as_arch_t; 78 79 #endif /* CONFIG_TSB */ 80 81 #include <genarch/mm/as_ht.h> 82 83 #ifdef CONFIG_TSB 84 #include <arch/mm/tsb.h> 85 #define as_invalidate_translation_cache(as, page, cnt) \ 86 tsb_invalidate((as), (page), (cnt)) 87 #else 88 #define as_invalidate_translation_cache(as, page, cnt) 38 #if defined (SUN4U) 39 #include <arch/mm/sun4u/as.h> 40 #elif defined (SUN4V) 41 #include <arch/mm/sun4v/as.h> 89 42 #endif 90 91 extern void as_arch_init(void);92 43 93 44 #endif -
kernel/arch/sparc64/include/mm/sun4u/tlb.h
re0cb57b r8c2214e 684 684 685 685 extern void dump_sfsr_and_sfar(void); 686 extern void describe_dmmu_fault(void); 686 687 687 688 #endif /* !def __ASM__ */ -
kernel/arch/sparc64/include/mm/sun4v/frame.h
re0cb57b r8c2214e 33 33 */ 34 34 35 #ifndef KERN_sparc64_ SUN4V_FRAME_H_36 #define KERN_sparc64_ SUN4V_FRAME_H_35 #ifndef KERN_sparc64_sun4v_FRAME_H_ 36 #define KERN_sparc64_sun4v_FRAME_H_ 37 37 38 /*39 * Page size supported by the MMU.40 * For 8K there is the nasty illegal virtual aliasing problem.41 * Therefore, the kernel uses 8K only internally on the TLB and TSB levels.42 */43 38 #define MMU_FRAME_WIDTH 13 /* 8K */ 44 39 #define MMU_FRAME_SIZE (1 << MMU_FRAME_WIDTH) … … 52 47 #include <arch/types.h> 53 48 54 union frame_address { 55 uintptr_t address; 56 struct { 57 #if defined (US) 58 unsigned : 23; 59 uint64_t pfn : 28; /**< Physical Frame Number. */ 60 #elif defined (US3) 61 unsigned : 21; 62 uint64_t pfn : 30; /**< Physical Frame Number. */ 63 #endif 64 unsigned offset : 13; /**< Offset. */ 65 } __attribute__ ((packed)); 66 }; 67 68 typedef union frame_address frame_address_t; 69 49 extern uintptr_t last_frame; 70 50 extern void frame_arch_init(void); 71 51 #define physmem_print() -
kernel/arch/sparc64/include/mm/sun4v/mmu.h
re0cb57b r8c2214e 45 45 #define ASI_SECONDARY_CONTEXT_REG 0x21 /**< secondary context register ASI. */ 46 46 47 48 49 50 51 52 53 54 55 56 57 /* I-MMU ASIs. */58 #define ASI_IMMU 0x5059 #define ASI_IMMU_TSB_8KB_PTR_REG 0x5160 #define ASI_IMMU_TSB_64KB_PTR_REG 0x5261 #define ASI_ITLB_DATA_IN_REG 0x5462 #define ASI_ITLB_DATA_ACCESS_REG 0x5563 #define ASI_ITLB_TAG_READ_REG 0x5664 #define ASI_IMMU_DEMAP 0x5765 66 /* Virtual Addresses within ASI_IMMU. */67 #define VA_IMMU_TSB_TAG_TARGET 0x0 /**< IMMU TSB tag target register. */68 #define VA_IMMU_SFSR 0x18 /**< IMMU sync fault status register. */69 #define VA_IMMU_TSB_BASE 0x28 /**< IMMU TSB base register. */70 #define VA_IMMU_TAG_ACCESS 0x30 /**< IMMU TLB tag access register. */71 #if defined (US3)72 #define VA_IMMU_PRIMARY_EXTENSION 0x48 /**< IMMU TSB primary extension register */73 #define VA_IMMU_NUCLEUS_EXTENSION 0x58 /**< IMMU TSB nucleus extension register */74 #endif75 76 77 /* D-MMU ASIs. */78 #define ASI_DMMU 0x5879 #define ASI_DMMU_TSB_8KB_PTR_REG 0x5980 #define ASI_DMMU_TSB_64KB_PTR_REG 0x5a81 #define ASI_DMMU_TSB_DIRECT_PTR_REG 0x5b82 #define ASI_DTLB_DATA_IN_REG 0x5c83 #define ASI_DTLB_DATA_ACCESS_REG 0x5d84 #define ASI_DTLB_TAG_READ_REG 0x5e85 #define ASI_DMMU_DEMAP 0x5f86 87 /* Virtual Addresses within ASI_DMMU. */88 #define VA_DMMU_TSB_TAG_TARGET 0x0 /**< DMMU TSB tag target register. */89 #define VA_PRIMARY_CONTEXT_REG 0x8 /**< DMMU primary context register. */90 #define VA_SECONDARY_CONTEXT_REG 0x10 /**< DMMU secondary context register. */91 #define VA_DMMU_SFSR 0x18 /**< DMMU sync fault status register. */92 #define VA_DMMU_SFAR 0x20 /**< DMMU sync fault address register. */93 #define VA_DMMU_TSB_BASE 0x28 /**< DMMU TSB base register. */94 #define VA_DMMU_TAG_ACCESS 0x30 /**< DMMU TLB tag access register. */95 #define VA_DMMU_VA_WATCHPOINT_REG 0x38 /**< DMMU VA data watchpoint register. */96 #define VA_DMMU_PA_WATCHPOINT_REG 0x40 /**< DMMU PA data watchpoint register. */97 #if defined (US3)98 #define VA_DMMU_PRIMARY_EXTENSION 0x48 /**< DMMU TSB primary extension register */99 #define VA_DMMU_SECONDARY_EXTENSION 0x50 /**< DMMU TSB secondary extension register */100 #define VA_DMMU_NUCLEUS_EXTENSION 0x58 /**< DMMU TSB nucleus extension register */101 #endif102 103 #ifndef __ASM__104 105 #include <arch/asm.h>106 #include <arch/barrier.h>107 #include <arch/types.h>108 109 #if defined(US)110 /** LSU Control Register. */111 typedef union {112 uint64_t value;113 struct {114 unsigned : 23;115 unsigned pm : 8;116 unsigned vm : 8;117 unsigned pr : 1;118 unsigned pw : 1;119 unsigned vr : 1;120 unsigned vw : 1;121 unsigned : 1;122 unsigned fm : 16;123 unsigned dm : 1; /**< D-MMU enable. */124 unsigned im : 1; /**< I-MMU enable. */125 unsigned dc : 1; /**< D-Cache enable. */126 unsigned ic : 1; /**< I-Cache enable. */127 128 } __attribute__ ((packed));129 } lsu_cr_reg_t;130 #endif /* US */131 132 #endif /* !def __ASM__ */133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 47 #endif 150 48 -
kernel/arch/sparc64/include/mm/sun4v/tsb.h
re0cb57b r8c2214e 71 71 struct pte; 72 72 73 extern void tsb_invalidate(struct as *as, uintptr_t page, count_t pages);73 extern void tsb_invalidate(struct as *as, uintptr_t page, uint64_t pages); 74 74 extern void itsb_pte_copy(struct pte *t); 75 75 extern void dtsb_pte_copy(struct pte *t, bool ro); -
kernel/arch/sparc64/include/mm/tlb.h
re0cb57b r8c2214e 36 36 #define KERN_sparc64_TLB_H_ 37 37 38 38 39 #if defined (SUN4U) 39 40 #include <arch/mm/sun4u/tlb.h> -
kernel/arch/sparc64/include/mm/tsb.h
re0cb57b r8c2214e 36 36 #define KERN_sparc64_TSB_H_ 37 37 38 /* 39 * ITSB abd DTSB will claim 64K of memory, which 40 * is a nice number considered that it is one of 41 * the page sizes supported by hardware, which, 42 * again, is nice because TSBs need to be locked 43 * in TLBs - only one TLB entry will do. 44 */ 45 #define TSB_SIZE 2 /* when changing this, change 46 * as.c as well */ 47 #define ITSB_ENTRY_COUNT (512 * (1 << TSB_SIZE)) 48 #define DTSB_ENTRY_COUNT (512 * (1 << TSB_SIZE)) 49 50 #define TSB_TAG_TARGET_CONTEXT_SHIFT 48 51 52 #ifndef __ASM__ 53 54 #include <arch/mm/tte.h> 55 #include <arch/mm/mmu.h> 56 #include <arch/types.h> 57 58 /** TSB Base register. */ 59 typedef union tsb_base_reg { 60 uint64_t value; 61 struct { 62 uint64_t base : 51; /**< TSB base address, bits 63:13. */ 63 unsigned split : 1; /**< Split vs. common TSB for 8K and 64K 64 * pages. HelenOS uses only 8K pages 65 * for user mappings, so we always set 66 * this to 0. 67 */ 68 unsigned : 9; 69 unsigned size : 3; /**< TSB size. Number of entries is 70 * 512 * 2^size. */ 71 } __attribute__ ((packed)); 72 } tsb_base_reg_t; 73 74 /** Read ITSB Base register. 75 * 76 * @return Content of the ITSB Base register. 77 */ 78 static inline uint64_t itsb_base_read(void) 79 { 80 return asi_u64_read(ASI_IMMU, VA_IMMU_TSB_BASE); 81 } 82 83 /** Read DTSB Base register. 84 * 85 * @return Content of the DTSB Base register. 86 */ 87 static inline uint64_t dtsb_base_read(void) 88 { 89 return asi_u64_read(ASI_DMMU, VA_DMMU_TSB_BASE); 90 } 91 92 /** Write ITSB Base register. 93 * 94 * @param v New content of the ITSB Base register. 95 */ 96 static inline void itsb_base_write(uint64_t v) 97 { 98 asi_u64_write(ASI_IMMU, VA_IMMU_TSB_BASE, v); 99 } 100 101 /** Write DTSB Base register. 102 * 103 * @param v New content of the DTSB Base register. 104 */ 105 static inline void dtsb_base_write(uint64_t v) 106 { 107 asi_u64_write(ASI_DMMU, VA_DMMU_TSB_BASE, v); 108 } 109 110 #if defined (US3) 111 112 /** Write DTSB Primary Extension register. 113 * 114 * @param v New content of the DTSB Primary Extension register. 115 */ 116 static inline void dtsb_primary_extension_write(uint64_t v) 117 { 118 asi_u64_write(ASI_DMMU, VA_DMMU_PRIMARY_EXTENSION, v); 119 } 120 121 /** Write DTSB Secondary Extension register. 122 * 123 * @param v New content of the DTSB Secondary Extension register. 124 */ 125 static inline void dtsb_secondary_extension_write(uint64_t v) 126 { 127 asi_u64_write(ASI_DMMU, VA_DMMU_SECONDARY_EXTENSION, v); 128 } 129 130 /** Write DTSB Nucleus Extension register. 131 * 132 * @param v New content of the DTSB Nucleus Extension register. 133 */ 134 static inline void dtsb_nucleus_extension_write(uint64_t v) 135 { 136 asi_u64_write(ASI_DMMU, VA_DMMU_NUCLEUS_EXTENSION, v); 137 } 138 139 /** Write ITSB Primary Extension register. 140 * 141 * @param v New content of the ITSB Primary Extension register. 142 */ 143 static inline void itsb_primary_extension_write(uint64_t v) 144 { 145 asi_u64_write(ASI_IMMU, VA_IMMU_PRIMARY_EXTENSION, v); 146 } 147 148 /** Write ITSB Nucleus Extension register. 149 * 150 * @param v New content of the ITSB Nucleus Extension register. 151 */ 152 static inline void itsb_nucleus_extension_write(uint64_t v) 153 { 154 asi_u64_write(ASI_IMMU, VA_IMMU_NUCLEUS_EXTENSION, v); 155 } 156 38 #if defined (SUN4U) 39 #include <arch/mm/sun4u/tsb.h> 40 #elif defined (SUN4V) 41 #include <arch/mm/sun4v/tsb.h> 157 42 #endif 158 159 /* Forward declarations. */160 struct as;161 struct pte;162 163 extern void tsb_invalidate(struct as *as, uintptr_t page, size_t pages);164 extern void itsb_pte_copy(struct pte *t, size_t index);165 extern void dtsb_pte_copy(struct pte *t, size_t index, bool ro);166 167 #endif /* !def __ASM__ */168 43 169 44 #endif -
kernel/arch/sparc64/include/trap/exception.h
re0cb57b r8c2214e 38 38 39 39 #define TT_INSTRUCTION_ACCESS_EXCEPTION 0x08 40 #define TT_INSTRUCTION_ACCESS_MMU_MISS 0x09 40 41 #define TT_INSTRUCTION_ACCESS_ERROR 0x0a 42 #define TT_IAE_UNAUTH_ACCESS 0x0b 43 #define TT_IAE_NFO_PAGE 0x0c 41 44 #define TT_ILLEGAL_INSTRUCTION 0x10 42 45 #define TT_PRIVILEGED_OPCODE 0x11 43 46 #define TT_UNIMPLEMENTED_LDD 0x12 44 47 #define TT_UNIMPLEMENTED_STD 0x13 48 #define TT_DAE_INVALID_ASI 0x14 49 #define TT_DAE_PRIVILEGE_VIOLATION 0x15 50 #define TT_DAE_NC_PAGE 0x16 51 #define TT_DAE_NFO_PAGE 0x17 45 52 #define TT_FP_DISABLED 0x20 46 53 #define TT_FP_EXCEPTION_IEEE_754 0x21 … … 49 56 #define TT_DIVISION_BY_ZERO 0x28 50 57 #define TT_DATA_ACCESS_EXCEPTION 0x30 58 #define TT_DATA_ACCESS_MMU_MISS 0x31 51 59 #define TT_DATA_ACCESS_ERROR 0x32 52 60 #define TT_MEM_ADDRESS_NOT_ALIGNED 0x34 -
kernel/arch/sparc64/include/trap/sun4v/mmu.h
re0cb57b r8c2214e 36 36 */ 37 37 38 #ifndef KERN_sparc64_ SUN4V_MMU_TRAP_H_39 #define KERN_sparc64_ SUN4V_MMU_TRAP_H_38 #ifndef KERN_sparc64_sun4v_MMU_TRAP_H_ 39 #define KERN_sparc64_sun4v_MMU_TRAP_H_ 40 40 41 41 #include <arch/stack.h> … … 121 121 * but this time its handler accesse memory which IS mapped. 122 122 */ 123 0: 124 .if (\tl > 0) 125 wrpr %g0, 1, %tl 126 .endif 123 .if (\tl > 0) 124 wrpr %g0, 1, %tl 125 .endif 127 126 128 127 /* -
kernel/arch/sparc64/include/trap/trap_table.h
re0cb57b r8c2214e 101 101 .macro PREEMPTIBLE_HANDLER f 102 102 sethi %hi(\f), %g1 103 b a %xcc,preemptible_handler103 b preemptible_handler 104 104 or %g1, %lo(\f), %g1 105 105 .endm -
kernel/arch/sparc64/src/cpu/sun4v/cpu.c
re0cb57b r8c2214e 1 1 /* 2 2 * Copyright (c) 2005 Jakub Jermar 3 * Copyright (c) 2009 Pavel Rimsky 3 4 * All rights reserved. 4 5 * -
kernel/arch/sparc64/src/mm/sun4u/tlb.c
re0cb57b r8c2214e 476 476 } 477 477 478 void d ump_sfsr_and_sfar(void)478 void describe_dmmu_fault(void) 479 479 { 480 480 tlb_sfsr_reg_t sfsr; … … 499 499 } 500 500 501 void dump_sfsr_and_sfar(void) 502 { 503 tlb_sfsr_reg_t sfsr; 504 uintptr_t sfar; 505 506 sfsr.value = dtlb_sfsr_read(); 507 sfar = dtlb_sfar_read(); 508 509 #if defined (US) 510 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, " 511 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, 512 sfsr.ow, sfsr.fv); 513 #elif defined (US3) 514 printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, " 515 "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft, 516 sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv); 517 #endif 518 519 printf("DTLB SFAR: address=%p\n", sfar); 520 521 dtlb_sfsr_write(0); 522 } 523 501 524 #if defined (US) 502 525 /** Invalidate all unlocked ITLB and DTLB entries. */ -
kernel/arch/sparc64/src/mm/sun4v/as.c
re0cb57b r8c2214e 1 1 /* 2 2 * Copyright (c) 2006 Jakub Jermar 3 * Copyright (c) 2009 Pavel Rimsky 3 4 * All rights reserved. 4 5 * … … 34 35 35 36 #include <arch/mm/as.h> 37 #include <arch/mm/pagesize.h> 36 38 #include <arch/mm/tlb.h> 37 39 #include <genarch/mm/page_ht.h> … … 39 41 #include <debug.h> 40 42 #include <config.h> 43 #include <arch/sun4v/hypercall.h> 41 44 42 45 #ifdef CONFIG_TSB … … 86 89 { 87 90 #ifdef CONFIG_TSB 88 count_t cnt = (TSB_ENTRY_COUNT * sizeof(tsb_entry_t)) >> FRAME_WIDTH;91 size_t cnt = (TSB_ENTRY_COUNT * sizeof(tsb_entry_t)) >> FRAME_WIDTH; 89 92 frame_free((uintptr_t) as->arch.tsb_description.tsb_base); 90 93 return cnt; … … 112 115 { 113 116 mmu_secondary_context_write(as->asid); 117 #ifdef CONFIG_TSB 118 uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH); 119 120 ASSERT(as->arch.tsb_description.tsb_base); 121 uintptr_t tsb = PA2KA(as->arch.tsb_description.tsb_base); 122 123 if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { 124 /* 125 * TSBs were allocated from memory not covered 126 * by the locked 4M kernel DTLB entry. We need 127 * to map both TSBs explicitly. 128 */ 129 mmu_demap_page(tsb, 0, MMU_FLAG_DTLB); 130 dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true); 131 } 132 133 __hypercall_fast2(MMU_TSB_CTXNON0, 1, KA2PA(&(as->arch.tsb_description))); 134 135 #endif 114 136 } 115 137 … … 134 156 uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH); 135 157 136 ASSERT(as->arch. itsb && as->arch.dtsb);158 ASSERT(as->arch.tsb_description.tsb_base); 137 159 138 uintptr_t tsb = (uintptr_t) as->arch.itsb;160 uintptr_t tsb = PA2KA(as->arch.tsb_description.tsb_base); 139 161 140 162 if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { … … 144 166 * to demap the entry installed by as_install_arch(). 145 167 */ 146 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);168 __hypercall_fast3(MMU_UNMAP_PERM_ADDR, tsb, 0, MMU_FLAG_DTLB); 147 169 } 148 170 #endif -
kernel/arch/sparc64/src/mm/sun4v/tsb.c
re0cb57b r8c2214e 1 1 /* 2 2 * Copyright (c) 2006 Jakub Jermar 3 * Copyright (c) 2009 Pavel Rimsky 3 4 * All rights reserved. 4 5 * … … 34 35 35 36 #include <arch/mm/tsb.h> 37 #include <arch/mm/pagesize.h> 36 38 #include <arch/mm/tlb.h> 37 39 #include <arch/mm/page.h> … … 49 51 * portions of both TSBs are invalidated at a time. 50 52 * 51 * @param as 52 * @param page First page to invalidate in TSB.53 * @param pages Number of pages to invalidate. Value of ( size_t) -1 means the54 * whole TSB.53 * @param as Address space. 54 * @param page First page to invalidate in TSB. 55 * @param pages Number of pages to invalidate. Value of (count_t) -1 means the 56 * whole TSB. 55 57 */ 56 58 void tsb_invalidate(as_t *as, uintptr_t page, size_t pages) 57 59 { 58 size_t i0; 59 size_t i; 60 size_t i0, i; 60 61 size_t cnt; 61 62 62 ASSERT(as->arch. itsb && as->arch.dtsb);63 ASSERT(as->arch.tsb_description.tsb_base); 63 64 64 65 i0 = (page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK; 65 ASSERT(i0 < ITSB_ENTRY_COUNT && i0 < DTSB_ENTRY_COUNT);66 ASSERT(i0 < TSB_ENTRY_COUNT); 66 67 67 if (pages == (size_t) - 1 || (pages * 2) > ITSB_ENTRY_COUNT)68 cnt = ITSB_ENTRY_COUNT;68 if (pages == (size_t) - 1 || (pages) > TSB_ENTRY_COUNT) 69 cnt = TSB_ENTRY_COUNT; 69 70 else 70 cnt = pages * 2;71 cnt = pages; 71 72 72 73 for (i = 0; i < cnt; i++) { 73 as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT - 1)].tag.invalid = 74 true; 75 as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT - 1)].tag.invalid = 76 true; 74 ((tsb_entry_t *) PA2KA(as->arch.tsb_description.tsb_base))[ 75 (i0 + i) & (TSB_ENTRY_COUNT - 1)].data.v = false; 77 76 } 78 77 } … … 81 80 * 82 81 * @param t Software PTE. 83 * @param index Zero if lower 8K-subpage, one if higher 8K subpage.84 82 */ 85 void itsb_pte_copy(pte_t *t , size_t index)83 void itsb_pte_copy(pte_t *t) 86 84 { 87 #if 088 85 as_t *as; 89 86 tsb_entry_t *tsb; 90 87 size_t entry; 91 88 92 ASSERT(index <= 1);93 94 89 as = t->as; 95 entry = ( (t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK;96 ASSERT(entry < ITSB_ENTRY_COUNT);97 tsb = & as->arch.itsb[entry];90 entry = (t->page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK; 91 ASSERT(entry < TSB_ENTRY_COUNT); 92 tsb = &((tsb_entry_t *) PA2KA(as->arch.tsb_description.tsb_base))[entry]; 98 93 99 94 /* … … 103 98 */ 104 99 105 tsb->tag.invalid = true; /* invalidate the entry 106 * (tag target has this 107 * set to 0) */ 100 tsb->data.v = false; 108 101 109 102 write_barrier(); 110 103 111 tsb->tag.context = as->asid;112 /* the shift is bigger than PAGE_WIDTH, do not bother with index */113 104 tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT; 105 114 106 tsb->data.value = 0; 107 tsb->data.nfo = false; 108 tsb->data.ra = t->frame >> MMU_FRAME_WIDTH; 109 tsb->data.ie = false; 110 tsb->data.e = false; 111 tsb->data.cp = t->c; /* cp as cache in phys.-idxed, c as cacheable */ 112 tsb->data.cv = false; 113 tsb->data.p = t->k; /* p as privileged, k as kernel */ 114 tsb->data.x = true; 115 tsb->data.w = false; 115 116 tsb->data.size = PAGESIZE_8K; 116 tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;117 tsb->data.cp = t->c; /* cp as cache in phys.-idxed, c as cacheable */118 tsb->data.p = t->k; /* p as privileged, k as kernel */119 tsb->data.v = t->p; /* v as valid, p as present */120 117 121 118 write_barrier(); 122 119 123 tsb->tag.invalid = false; /* mark the entry as valid */ 124 #endif 120 tsb->data.v = t->p; /* v as valid, p as present */ 125 121 } 126 122 … … 128 124 * 129 125 * @param t Software PTE. 130 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.131 126 * @param ro If true, the mapping is copied read-only. 132 127 */ 133 void dtsb_pte_copy(pte_t *t, size_t index,bool ro)128 void dtsb_pte_copy(pte_t *t, bool ro) 134 129 { 135 #if 0136 130 as_t *as; 137 131 tsb_entry_t *tsb; 138 132 size_t entry; 139 140 ASSERT(index <= 1);141 133 142 134 as = t->as; 143 entry = ( (t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK;144 ASSERT(entry < DTSB_ENTRY_COUNT);145 tsb = & as->arch.dtsb[entry];135 entry = (t->page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK; 136 ASSERT(entry < TSB_ENTRY_COUNT); 137 tsb = &((tsb_entry_t *) PA2KA(as->arch.tsb_description.tsb_base))[entry]; 146 138 147 139 /* … … 151 143 */ 152 144 153 tsb->tag.invalid = true; /* invalidate the entry 154 * (tag target has this 155 * set to 0) */ 145 tsb->data.v = false; 156 146 157 147 write_barrier(); 158 148 159 tsb->tag.context = as->asid;160 /* the shift is bigger than PAGE_WIDTH, do not bother with index */161 149 tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT; 150 162 151 tsb->data.value = 0; 163 tsb->data.size = PAGESIZE_8K; 164 tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index; 165 tsb->data.cp = t->c; 152 tsb->data.nfo = false; 153 tsb->data.ra = t->frame >> MMU_FRAME_WIDTH; 154 tsb->data.ie = false; 155 tsb->data.e = false; 156 tsb->data.cp = t->c; /* cp as cache in phys.-idxed, c as cacheable */ 166 157 #ifdef CONFIG_VIRT_IDX_DCACHE 167 158 tsb->data.cv = t->c; 168 159 #endif /* CONFIG_VIRT_IDX_DCACHE */ 169 tsb->data.p = t->k; /* p as privileged */ 160 tsb->data.p = t->k; /* p as privileged, k as kernel */ 161 tsb->data.x = true; 170 162 tsb->data.w = ro ? false : t->w; 171 tsb->data. v = t->p;163 tsb->data.size = PAGESIZE_8K; 172 164 173 165 write_barrier(); 174 166 175 tsb->tag.invalid = false; /* mark the entry as valid */ 176 #endif 167 tsb->data.v = t->p; /* v as valid, p as present */ 177 168 } 178 169 179 170 /** @} 180 171 */ 181 -
kernel/arch/sparc64/src/trap/sun4v/trap_table.S
re0cb57b r8c2214e 48 48 #include <arch/stack.h> 49 49 #include <arch/sun4v/regdef.h> 50 #include <arch/sun4v/arch.h> 51 #include <arch/sun4v/cpu.h> 50 52 51 53 #define TABLE_SIZE TRAP_TABLE_SIZE … … 60 62 61 63 /* TT = 0x08, TL = 0, instruction_access_exception */ 64 /* TT = 0x08, TL = 0, IAE_privilege_violation on UltraSPARC T2 */ 62 65 .org trap_table + TT_INSTRUCTION_ACCESS_EXCEPTION*ENTRY_SIZE 63 66 .global instruction_access_exception_tl0 64 67 instruction_access_exception_tl0: 65 /*wrpr %g0, PSTATE_AG_BIT | PSTATE_PRIV_BIT, %pstate 66 PREEMPTIBLE_HANDLER instruction_access_exception*/ 68 PREEMPTIBLE_HANDLER instruction_access_exception 69 70 /* TT = 0x09, TL = 0, instruction_access_mmu_miss */ 71 .org trap_table + TT_INSTRUCTION_ACCESS_MMU_MISS*ENTRY_SIZE 72 .global instruction_access_mmu_miss_handler_tl0 73 ba fast_instruction_access_mmu_miss_handler_tl0 74 nop 67 75 68 76 /* TT = 0x0a, TL = 0, instruction_access_error */ … … 72 80 PREEMPTIBLE_HANDLER instruction_access_error 73 81 82 /* TT = 0x0b, TL = 0, IAE_unauth_access */ 83 .org trap_table + TT_IAE_UNAUTH_ACCESS*ENTRY_SIZE 84 .global iae_unauth_access_tl0 85 iae_unauth_access_tl0: 86 PREEMPTIBLE_HANDLER instruction_access_exception 87 88 /* TT = 0x0c, TL = 0, IAE_nfo_page */ 89 .org trap_table + TT_IAE_NFO_PAGE*ENTRY_SIZE 90 .global iae_nfo_page_tl0 91 iae_nfo_page_tl0: 92 PREEMPTIBLE_HANDLER instruction_access_exception 93 74 94 /* TT = 0x10, TL = 0, illegal_instruction */ 75 95 .org trap_table + TT_ILLEGAL_INSTRUCTION*ENTRY_SIZE … … 96 116 PREEMPTIBLE_HANDLER unimplemented_STD 97 117 118 /* TT = 0x14, TL = 0, DAE_invalid_asi */ 119 .org trap_table + TT_DAE_INVALID_ASI*ENTRY_SIZE 120 .global dae_invalid_asi_tl0 121 dae_invalid_asi_tl0: 122 PREEMPTIBLE_HANDLER data_access_exception 123 124 /* TT = 0x15, TL = 0, DAE_privilege_violation */ 125 .org trap_table + TT_DAE_PRIVILEGE_VIOLATION*ENTRY_SIZE 126 .global dae_privilege_violation_tl0 127 dae_privilege_violation_tl0: 128 PREEMPTIBLE_HANDLER data_access_exception 129 130 /* TT = 0x16, TL = 0, DAE_nc_page */ 131 .org trap_table + TT_DAE_NC_PAGE*ENTRY_SIZE 132 .global dae_nc_page_tl0 133 dae_nc_page_tl0: 134 PREEMPTIBLE_HANDLER data_access_exception 135 136 /* TT = 0x17, TL = 0, DAE_nfo_page */ 137 .org trap_table + TT_DAE_NFO_PAGE*ENTRY_SIZE 138 .global dae_nfo_page_tl0 139 dae_nfo_page_tl0: 140 PREEMPTIBLE_HANDLER data_access_exception 141 98 142 /* TT = 0x20, TL = 0, fb_disabled handler */ 99 143 .org trap_table + TT_FP_DISABLED*ENTRY_SIZE … … 133 177 134 178 /* TT = 0x30, TL = 0, data_access_exception */ 179 /* TT = 0x30, TL = 0, DAE_side_effect_page for UltraPSARC T2 */ 135 180 .org trap_table + TT_DATA_ACCESS_EXCEPTION*ENTRY_SIZE 136 181 .global data_access_exception_tl0 137 182 data_access_exception_tl0: 138 wrpr %g0, PSTATE_AG_BIT | PSTATE_PRIV_BIT, %pstate139 183 PREEMPTIBLE_HANDLER data_access_exception 184 185 /* TT = 0x31, TL = 0, data_access_mmu_miss */ 186 .org trap_table + TT_DATA_ACCESS_MMU_MISS*ENTRY_SIZE 187 .global data_access_mmu_miss_tl0 188 data_access_mmu_miss_tl0: 189 ba fast_data_access_mmu_miss_handler_tl0 190 nop 140 191 141 192 /* TT = 0x32, TL = 0, data_access_error */ … … 271 322 INTERRUPT_LEVEL_N_HANDLER 15 272 323 273 /* TT = 0x60, TL = 0, interrupt_vector_trap handler */274 .org trap_table + TT_INTERRUPT_VECTOR_TRAP*ENTRY_SIZE275 .global interrupt_vector_trap_handler_tl0276 interrupt_vector_trap_handler_tl0:277 INTERRUPT_VECTOR_TRAP_HANDLER278 279 324 /* TT = 0x64, TL = 0, fast_instruction_access_MMU_miss */ 280 325 .org trap_table + TT_FAST_INSTRUCTION_ACCESS_MMU_MISS*ENTRY_SIZE … … 294 339 fast_data_access_protection_handler_tl0: 295 340 FAST_DATA_ACCESS_PROTECTION_HANDLER 0 341 342 /* TT = 0x7c, TL = 0, cpu_mondo */ 343 .org trap_table + TT_CPU_MONDO*ENTRY_SIZE 344 .global cpu_mondo_handler_tl0 345 cpu_mondo_handler_tl0: 346 /* PREEMPTIBLE_HANDLER cpu_mondo */ 296 347 297 348 /* TT = 0x80, TL = 0, spill_0_normal handler */ … … 352 403 353 404 /* TT = 0x08, TL > 0, instruction_access_exception */ 405 /* TT = 0x08, TL > 0, IAE_privilege_violation on UltraSPARC T2 */ 354 406 .org trap_table + (TT_INSTRUCTION_ACCESS_EXCEPTION+512)*ENTRY_SIZE 355 407 .global instruction_access_exception_tl1 356 408 instruction_access_exception_tl1: 357 409 wrpr %g0, 1, %tl 358 wrpr %g0, PSTATE_AG_BIT | PSTATE_PRIV_BIT, %pstate359 410 PREEMPTIBLE_HANDLER instruction_access_exception 411 412 /* TT = 0x09, TL > 0, instruction_access_mmu_miss */ 413 .org trap_table + (TT_INSTRUCTION_ACCESS_MMU_MISS+512)*ENTRY_SIZE 414 .global instruction_access_mmu_miss_handler_tl1 415 wrpr %g0, 1, %tl 416 ba fast_instruction_access_mmu_miss_handler_tl0 417 nop 360 418 361 419 /* TT = 0x0a, TL > 0, instruction_access_error */ … … 366 424 PREEMPTIBLE_HANDLER instruction_access_error 367 425 426 /* TT = 0x0b, TL > 0, IAE_unauth_access */ 427 .org trap_table + (TT_IAE_UNAUTH_ACCESS+512)*ENTRY_SIZE 428 .global iae_unauth_access_tl1 429 iae_unauth_access_tl1: 430 wrpr %g0, 1, %tl 431 PREEMPTIBLE_HANDLER instruction_access_exception 432 433 /* TT = 0x0c, TL > 0, IAE_nfo_page */ 434 .org trap_table + (TT_IAE_NFO_PAGE+512)*ENTRY_SIZE 435 .global iae_nfo_page_tl1 436 iae_nfo_page_tl1: 437 wrpr %g0, 1, %tl 438 PREEMPTIBLE_HANDLER instruction_access_exception 439 368 440 /* TT = 0x10, TL > 0, illegal_instruction */ 369 441 .org trap_table + (TT_ILLEGAL_INSTRUCTION+512)*ENTRY_SIZE … … 372 444 wrpr %g0, 1, %tl 373 445 PREEMPTIBLE_HANDLER illegal_instruction 446 447 /* TT = 0x14, TL > 0, DAE_invalid_asi */ 448 .org trap_table + (TT_DAE_INVALID_ASI+512)*ENTRY_SIZE 449 .global dae_invalid_asi_tl1 450 dae_invalid_asi_tl1: 451 wrpr %g0, 1, %tl 452 PREEMPTIBLE_HANDLER data_access_exception 453 454 /* TT = 0x15, TL > 0, DAE_privilege_violation */ 455 .org trap_table + (TT_DAE_PRIVILEGE_VIOLATION+512)*ENTRY_SIZE 456 .global dae_privilege_violation_tl1 457 dae_privilege_violation_tl1: 458 wrpr %g0, 1, %tl 459 PREEMPTIBLE_HANDLER data_access_exception 460 461 /* TT = 0x16, TL > 0, DAE_nc_page */ 462 .org trap_table + (TT_DAE_NC_PAGE+512)*ENTRY_SIZE 463 .global dae_nc_page_tl1 464 dae_nc_page_tl1: 465 wrpr %g0, 1, %tl 466 PREEMPTIBLE_HANDLER data_access_exception 467 468 /* TT = 0x17, TL > 0, DAE_nfo_page */ 469 .org trap_table + (TT_DAE_NFO_PAGE+512)*ENTRY_SIZE 470 .global dae_nfo_page_tl1 471 dae_nfo_page_tl1: 472 wrpr %g0, 1, %tl 473 PREEMPTIBLE_HANDLER data_access_exception 374 474 375 475 /* TT = 0x24, TL > 0, clean_window handler */ … … 390 490 .global data_access_exception_tl1 391 491 data_access_exception_tl1: 392 wrpr %g0, 1, %tl492 /*wrpr %g0, 1, %tl 393 493 wrpr %g0, PSTATE_AG_BIT | PSTATE_PRIV_BIT, %pstate 394 PREEMPTIBLE_HANDLER data_access_exception 494 PREEMPTIBLE_HANDLER data_access_exception*/ 495 496 /* TT = 0x31, TL > 0, data_access_mmu_miss */ 497 .org trap_table + (TT_DATA_ACCESS_MMU_MISS+512)*ENTRY_SIZE 498 .global data_access_mmu_miss_tl1 499 data_access_mmu_miss_tl1: 500 ba fast_data_access_mmu_miss_handler_tl1 501 nop 502 395 503 396 504 /* TT = 0x32, TL > 0, data_access_error */ … … 419 527 fast_data_access_protection_handler_tl1: 420 528 FAST_DATA_ACCESS_PROTECTION_HANDLER 1 529 530 /* TT = 0x7c, TL > 0, cpu_mondo */ 531 .org trap_table + (TT_CPU_MONDO+512)*ENTRY_SIZE 532 .global cpu_mondo_handler_tl1 533 cpu_mondo_handler_tl1: 534 wrpr %g0, %tl 535 /* PREEMPTIBLE_HANDLER cpu_mondo */ 421 536 422 537 /* TT = 0x80, TL > 0, spill_0_normal handler */ … … 660 775 .endm 661 776 662 663 #if 0664 777 /* 665 778 * Preemptible trap handler for handling traps from kernel. … … 677 790 nop ! it will be easy to find 678 791 679 /* prevent unnecessary CLEANWIN exceptions */680 wrpr %g0, WSTATE_OTHER(0) | WSTATE_NORMAL(0), %wstate681 1:682 /*683 * Prevent SAVE instruction from causing a spill exception. If the684 * CANSAVE register is zero, explicitly spill register window685 * at CWP + 2.686 */687 688 rdpr %cansave, %g3689 brnz %g3, 2f690 nop691 INLINE_SPILL %g3, %g4692 693 2:694 /* ask for new register window */695 save %sp, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp696 697 /* copy higher level routine's address and its argument */698 mov %g1, %l0699 mov %g2, %o0700 701 /*702 * Save TSTATE, TPC and TNPC aside.703 */704 rdpr %tstate, %g1705 rdpr %tpc, %g2706 rdpr %tnpc, %g3707 708 stx %g1, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TSTATE]709 stx %g2, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TPC]710 stx %g3, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC]711 712 /*713 * Save the Y register.714 * This register is deprecated according to SPARC V9 specification715 * and is only present for backward compatibility with previous716 * versions of the SPARC architecture.717 * Surprisingly, gcc makes use of this register without a notice.718 */719 rd %y, %g4720 stx %g4, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_Y]721 722 /* switch to TL = 0, explicitly enable FPU */723 wrpr %g0, 0, %tl724 wrpr %g0, 0, %gl725 wrpr %g0, PSTATE_PRIV_BIT | PSTATE_PEF_BIT, %pstate726 727 /* g1 -> l1, ..., g7 -> l7 */728 SAVE_GLOBALS729 730 /* call higher-level service routine, pass istate as its 2nd parameter */731 call %l0732 add %sp, PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC, %o1733 734 /* l1 -> g1, ..., l7 -> g7 */735 RESTORE_GLOBALS736 737 /* we must prserve the PEF bit */738 rdpr %pstate, %l1739 740 /* TL := 1, GL := 1 */741 wrpr %g0, PSTATE_PRIV_BIT, %pstate742 wrpr %g0, 1, %tl743 wrpr %g0, 1, %gl744 745 /* Read TSTATE, TPC and TNPC from saved copy. */746 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TSTATE], %g1747 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TPC], %g2748 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC], %g3749 750 /* Copy PSTATE.PEF to the in-register copy of TSTATE. */751 and %l1, PSTATE_PEF_BIT, %l1752 sllx %l1, TSTATE_PSTATE_SHIFT, %l1753 sethi %hi(TSTATE_PEF_BIT), %g4 ! reset the PEF bit to 0 ...754 andn %g1, %g4, %g1755 or %g1, %l1, %g1 ! ... "or" it with saved PEF756 757 /* Restore TSTATE, TPC and TNPC from saved copies. */758 wrpr %g1, 0, %tstate759 wrpr %g2, 0, %tpc760 wrpr %g3, 0, %tnpc761 762 /* Restore Y. */763 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_Y], %g4764 wr %g4, %y765 766 /* If TSTATE.CWP + 1 == CWP, then we do not have to fix CWP. */767 and %g1, TSTATE_CWP_MASK, %l0768 inc %l0769 and %l0, NWINDOWS - 1, %l0 ! %l0 mod NWINDOWS770 rdpr %cwp, %l1771 cmp %l0, %l1772 bz 4f ! CWP is ok773 nop774 775 3:776 /*777 * Fix CWP.778 * In order to recapitulate, the input registers in the current779 * window are the output registers of the window to which we want780 * to restore. Because the fill trap fills only input and local781 * registers of a window, we need to preserve those output782 * registers manually.783 */784 mov %sp, %g2785 stx %i0, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I0]786 stx %i1, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I1]787 stx %i2, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I2]788 stx %i3, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I3]789 stx %i4, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I4]790 stx %i5, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I5]791 stx %i6, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I6]792 stx %i7, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I7]793 wrpr %l0, 0, %cwp794 mov %g2, %sp795 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I0], %i0796 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I1], %i1797 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I2], %i2798 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I3], %i3799 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I4], %i4800 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I5], %i5801 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I6], %i6802 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I7], %i7803 804 4:805 /*806 * Prevent RESTORE instruction from causing a fill exception. If the807 * CANRESTORE register is zero, explicitly fill register window808 * at CWP - 1.809 */810 rdpr %canrestore, %g1811 brnz %g1, 5f812 nop813 INLINE_FILL %g3, %g4814 815 5:816 restore817 818 retry819 .endm820 821 #endif822 823 /*824 * Preemptible trap handler for handling traps from kernel.825 */826 .macro PREEMPTIBLE_HANDLER_KERNEL827 828 /*829 * ASSERT(%tl == 1)830 */831 rdpr %tl, %g3832 cmp %g3, 1833 be 1f834 nop835 0: ba 0b ! this is for debugging, if we ever get here836 nop ! it will be easy to find837 838 792 1: 839 793 /* prevent unnecessary CLEANWIN exceptions */ … … 872 826 retry 873 827 .endm 874 875 876 828 877 829 /* … … 1092 1044 and %g1, NWINDOWS - 1, %g1 1093 1045 wrpr %g1, 0, %cwp ! CWP-- 1094 1046 1095 1047 .if \is_syscall 1096 1048 done … … 1100 1052 1101 1053 .endm 1102 1103 1104 1054 1105 1055 /* Preemptible trap handler for TL=1. … … 1132 1082 trap_instruction_handler: 1133 1083 PREEMPTIBLE_HANDLER_TEMPLATE 1 1134
Note:
See TracChangeset
for help on using the changeset viewer.