Changeset 0b9ac3c in mainline for kernel/arch
- Timestamp:
- 2010-02-23T19:03:28Z (16 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- c62d2e1
- Parents:
- 1ccafee (diff), 5e50394 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/arch
- Files:
-
- 54 added
- 78 edited
- 12 moved
-
abs32le/Makefile.inc (modified) (2 diffs)
-
abs32le/include/asm.h (modified) (1 diff)
-
abs32le/include/atomic.h (modified) (5 diffs)
-
abs32le/include/barrier.h (modified) (3 diffs)
-
abs32le/include/context.h (modified) (1 diff)
-
abs32le/include/context_offset.h (modified) (1 diff)
-
abs32le/include/interrupt.h (modified) (1 diff)
-
abs32le/include/memstr.h (modified) (1 diff)
-
abs32le/include/mm/frame.h (modified) (2 diffs)
-
abs32le/include/mm/page.h (modified) (4 diffs)
-
abs32le/include/types.h (modified) (1 diff)
-
abs32le/src/abs32le.c (modified) (4 diffs)
-
abs32le/src/ddi/ddi.c (added)
-
abs32le/src/debug/stacktrace.c (modified) (4 diffs)
-
abs32le/src/smp/ipi.c (added)
-
abs32le/src/smp/smp.c (added)
-
amd64/include/asm.h (modified) (1 diff)
-
amd64/include/atomic.h (modified) (7 diffs)
-
amd64/include/interrupt.h (modified) (1 diff)
-
amd64/include/memstr.h (modified) (2 diffs)
-
amd64/include/types.h (modified) (1 diff)
-
amd64/src/amd64.c (modified) (2 diffs)
-
amd64/src/debugger.c (modified) (2 diffs)
-
arm32/include/asm.h (modified) (1 diff)
-
arm32/include/atomic.h (modified) (8 diffs)
-
arm32/include/memstr.h (modified) (2 diffs)
-
arm32/include/types.h (modified) (3 diffs)
-
arm32/src/arm32.c (modified) (3 diffs)
-
ia32/include/asm.h (modified) (1 diff)
-
ia32/include/atomic.h (modified) (7 diffs)
-
ia32/include/interrupt.h (modified) (1 diff)
-
ia32/include/memstr.h (modified) (2 diffs)
-
ia32/include/types.h (modified) (1 diff)
-
ia32/src/ia32.c (modified) (2 diffs)
-
ia32/src/smp/ipi.c (modified) (1 diff)
-
ia64/include/asm.h (modified) (1 diff)
-
ia64/include/atomic.h (modified) (8 diffs)
-
ia64/include/context.h (modified) (1 diff)
-
ia64/include/interrupt.h (modified) (1 diff)
-
ia64/include/memstr.h (modified) (2 diffs)
-
ia64/include/types.h (modified) (2 diffs)
-
ia64/src/ia64.c (modified) (3 diffs)
-
mips32/include/asm.h (modified) (1 diff)
-
mips32/include/atomic.h (modified) (6 diffs)
-
mips32/include/context.h (modified) (2 diffs)
-
mips32/include/memstr.h (modified) (2 diffs)
-
mips32/include/types.h (modified) (2 diffs)
-
mips32/src/mips32.c (modified) (2 diffs)
-
mips32/src/smp/dorder.c (modified) (2 diffs)
-
ppc32/include/asm.h (modified) (2 diffs)
-
ppc32/include/atomic.h (modified) (7 diffs)
-
ppc32/include/context.h (modified) (3 diffs)
-
ppc32/include/memstr.h (modified) (2 diffs)
-
ppc32/include/types.h (modified) (2 diffs)
-
ppc32/src/ppc32.c (modified) (7 diffs)
-
sparc64/Makefile.inc (modified) (4 diffs)
-
sparc64/include/arch.h (modified) (2 diffs)
-
sparc64/include/asm.h (modified) (1 diff)
-
sparc64/include/atomic.h (modified) (4 diffs)
-
sparc64/include/context.h (modified) (1 diff)
-
sparc64/include/cpu.h (modified) (1 diff)
-
sparc64/include/drivers/niagara.h (added)
-
sparc64/include/drivers/tick.h (modified) (1 diff)
-
sparc64/include/memstr.h (modified) (1 diff)
-
sparc64/include/mm/as.h (modified) (1 diff)
-
sparc64/include/mm/frame.h (modified) (1 diff)
-
sparc64/include/mm/mmu.h (modified) (1 diff)
-
sparc64/include/mm/pagesize.h (added)
-
sparc64/include/mm/sun4u/as.h (added)
-
sparc64/include/mm/sun4u/frame.h (added)
-
sparc64/include/mm/sun4u/mmu.h (added)
-
sparc64/include/mm/sun4u/tlb.h (added)
-
sparc64/include/mm/sun4u/tsb.h (added)
-
sparc64/include/mm/sun4u/tte.h (added)
-
sparc64/include/mm/sun4v/as.h (added)
-
sparc64/include/mm/sun4v/frame.h (added)
-
sparc64/include/mm/sun4v/mmu.h (added)
-
sparc64/include/mm/sun4v/page.h (added)
-
sparc64/include/mm/sun4v/tlb.h (added)
-
sparc64/include/mm/sun4v/tsb.h (added)
-
sparc64/include/mm/sun4v/tte.h (added)
-
sparc64/include/mm/tlb.h (modified) (1 diff)
-
sparc64/include/mm/tsb.h (modified) (1 diff)
-
sparc64/include/mm/tte.h (modified) (1 diff)
-
sparc64/include/smp/sun4v/smp.h (added)
-
sparc64/include/sun4u/arch.h (added)
-
sparc64/include/sun4u/asm.h (added)
-
sparc64/include/sun4u/cpu.h (added)
-
sparc64/include/sun4v/arch.h (added)
-
sparc64/include/sun4v/asm.h (added)
-
sparc64/include/sun4v/cpu.h (added)
-
sparc64/include/sun4v/hypercall.h (added)
-
sparc64/include/sun4v/ipi.h (added)
-
sparc64/include/sun4v/md.h (added)
-
sparc64/include/sun4v/regdef.h (added)
-
sparc64/include/sun4v/register.h (added)
-
sparc64/include/trap/exception.h (modified) (2 diffs)
-
sparc64/include/trap/interrupt.h (modified) (4 diffs)
-
sparc64/include/trap/mmu.h (modified) (1 diff)
-
sparc64/include/trap/regwin.h (modified) (2 diffs)
-
sparc64/include/trap/sun4u/interrupt.h (added)
-
sparc64/include/trap/sun4u/mmu.h (added)
-
sparc64/include/trap/sun4u/regwin.h (added)
-
sparc64/include/trap/sun4v/interrupt.h (added)
-
sparc64/include/trap/sun4v/mmu.h (added)
-
sparc64/include/trap/sun4v/regwin.h (added)
-
sparc64/include/trap/trap_table.h (modified) (1 diff)
-
sparc64/include/types.h (modified) (2 diffs)
-
sparc64/src/asm.S (modified) (2 diffs)
-
sparc64/src/cpu/sun4u/cpu.c (moved) (moved from kernel/arch/sparc64/src/cpu/cpu.c )
-
sparc64/src/cpu/sun4v/cpu.c (added)
-
sparc64/src/drivers/kbd.c (modified) (1 diff)
-
sparc64/src/drivers/niagara.c (added)
-
sparc64/src/drivers/tick.c (modified) (2 diffs)
-
sparc64/src/mm/page.c (modified) (1 diff)
-
sparc64/src/mm/sun4u/as.c (moved) (moved from kernel/arch/sparc64/src/mm/as.c )
-
sparc64/src/mm/sun4u/frame.c (moved) (moved from kernel/arch/sparc64/src/mm/frame.c )
-
sparc64/src/mm/sun4u/tlb.c (moved) (moved from kernel/arch/sparc64/src/mm/tlb.c ) (4 diffs)
-
sparc64/src/mm/sun4u/tsb.c (moved) (moved from kernel/arch/sparc64/src/mm/tsb.c )
-
sparc64/src/mm/sun4v/as.c (added)
-
sparc64/src/mm/sun4v/frame.c (added)
-
sparc64/src/mm/sun4v/tlb.c (added)
-
sparc64/src/mm/sun4v/tsb.c (added)
-
sparc64/src/proc/sun4u/scheduler.c (moved) (moved from kernel/arch/sparc64/src/proc/scheduler.c )
-
sparc64/src/proc/sun4v/scheduler.c (added)
-
sparc64/src/smp/sun4u/ipi.c (moved) (moved from kernel/arch/sparc64/src/smp/ipi.c ) (2 diffs)
-
sparc64/src/smp/sun4u/smp.c (moved) (moved from kernel/arch/sparc64/src/smp/smp.c ) (1 diff)
-
sparc64/src/smp/sun4v/ipi.c (added)
-
sparc64/src/smp/sun4v/smp.c (added)
-
sparc64/src/sun4u/asm.S (added)
-
sparc64/src/sun4u/sparc64.c (moved) (moved from kernel/arch/sparc64/src/sparc64.c ) (2 diffs)
-
sparc64/src/sun4u/start.S (moved) (moved from kernel/arch/sparc64/src/start.S )
-
sparc64/src/sun4v/asm.S (added)
-
sparc64/src/sun4v/md.c (added)
-
sparc64/src/sun4v/sparc64.c (added)
-
sparc64/src/sun4v/start.S (added)
-
sparc64/src/trap/exception.c (modified) (1 diff)
-
sparc64/src/trap/interrupt.c (modified) (3 diffs)
-
sparc64/src/trap/sun4u/interrupt.c (added)
-
sparc64/src/trap/sun4u/mmu.S (moved) (moved from kernel/arch/sparc64/src/trap/mmu.S )
-
sparc64/src/trap/sun4u/trap_table.S (moved) (moved from kernel/arch/sparc64/src/trap/trap_table.S )
-
sparc64/src/trap/sun4v/interrupt.c (added)
-
sparc64/src/trap/sun4v/mmu.S (added)
-
sparc64/src/trap/sun4v/trap_table.S (added)
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/abs32le/Makefile.inc
r1ccafee r0b9ac3c 30 30 # 31 31 32 BFD = binary 33 34 ifeq ($(COMPILER),gcc_cross) 35 TOOLCHAIN_DIR = $(CROSS_PREFIX)/$(CROSS_TARGET) 36 37 ifeq ($(CROSS_TARGET),arm32) 38 TARGET = arm-linux-gnu 39 ATSIGN = % 40 endif 41 42 ifeq ($(CROSS_TARGET),ia32) 43 TARGET = i686-pc-linux-gnu 44 endif 45 46 ifeq ($(CROSS_TARGET),mips32) 47 TARGET = mipsel-linux-gnu 48 GCC_CFLAGS += -mno-abicalls 49 endif 50 endif 51 52 ifeq ($(COMPILER),clang) 53 CLANG_ARCH = i386 54 endif 55 32 56 BITS = 32 33 57 ENDIANESS = LE … … 41 65 arch/$(KARCH)/src/userspace.c \ 42 66 arch/$(KARCH)/src/cpu/cpu.c \ 67 arch/$(KARCH)/src/ddi/ddi.c \ 68 arch/$(KARCH)/src/smp/smp.c \ 69 arch/$(KARCH)/src/smp/ipi.c \ 43 70 arch/$(KARCH)/src/mm/as.c \ 44 71 arch/$(KARCH)/src/mm/frame.c \ -
kernel/arch/abs32le/include/asm.h
r1ccafee r0b9ac3c 40 40 #include <config.h> 41 41 42 extern void interrupt_handlers(void); 42 static inline void asm_delay_loop(uint32_t usec) 43 { 44 } 43 45 44 extern void enable_l_apic_in_msr(void); 45 46 47 extern void asm_delay_loop(uint32_t); 48 extern void asm_fake_loop(uint32_t); 49 50 51 static inline void cpu_halt(void) 46 static inline __attribute__((noreturn)) void cpu_halt(void) 52 47 { 53 48 /* On real hardware this should stop processing further -
kernel/arch/abs32le/include/atomic.h
r1ccafee r0b9ac3c 54 54 } 55 55 56 static inline longatomic_postinc(atomic_t *val)56 static inline atomic_count_t atomic_postinc(atomic_t *val) 57 57 { 58 58 /* On real hardware both the storing of the previous … … 60 60 atomic action. */ 61 61 62 longprev = val->count;62 atomic_count_t prev = val->count; 63 63 64 64 val->count++; … … 66 66 } 67 67 68 static inline longatomic_postdec(atomic_t *val)68 static inline atomic_count_t atomic_postdec(atomic_t *val) 69 69 { 70 70 /* On real hardware both the storing of the previous … … 72 72 atomic action. */ 73 73 74 longprev = val->count;74 atomic_count_t prev = val->count; 75 75 76 76 val->count--; … … 81 81 #define atomic_predec(val) (atomic_postdec(val) - 1) 82 82 83 static inline uint32_t test_and_set(atomic_t *val) { 84 uint32_t v; 85 86 asm volatile ( 87 "movl $1, %[v]\n" 88 "xchgl %[v], %[count]\n" 89 : [v] "=r" (v), [count] "+m" (val->count) 90 ); 91 92 return v; 83 static inline atomic_count_t test_and_set(atomic_t *val) 84 { 85 atomic_count_t prev = val->count; 86 val->count = 1; 87 return prev; 93 88 } 94 89 95 /** ia32 specific fast spinlock */96 90 static inline void atomic_lock_arch(atomic_t *val) 97 91 { 98 uint32_t tmp; 99 100 preemption_disable(); 101 asm volatile ( 102 "0:\n" 103 "pause\n" /* Pentium 4's HT love this instruction */ 104 "mov %[count], %[tmp]\n" 105 "testl %[tmp], %[tmp]\n" 106 "jnz 0b\n" /* lightweight looping on locked spinlock */ 107 108 "incl %[tmp]\n" /* now use the atomic operation */ 109 "xchgl %[count], %[tmp]\n" 110 "testl %[tmp], %[tmp]\n" 111 "jnz 0b\n" 112 : [count] "+m" (val->count), [tmp] "=&r" (tmp) 113 ); 114 /* 115 * Prevent critical section code from bleeding out this way up. 116 */ 117 CS_ENTER_BARRIER(); 92 do { 93 while (val->count); 94 } while (test_and_set(val)); 118 95 } 119 96 -
kernel/arch/abs32le/include/barrier.h
r1ccafee r0b9ac3c 27 27 */ 28 28 29 /** @addtogroup ia3229 /** @addtogroup abs32le 30 30 * @{ 31 31 */ … … 33 33 */ 34 34 35 #ifndef KERN_ia32_BARRIER_H_ 36 #define KERN_ia32_BARRIER_H_ 37 38 /* 39 * NOTE: 40 * No barriers for critical section (i.e. spinlock) on IA-32 are needed: 41 * - spinlock_lock() and spinlock_trylock() use serializing XCHG instruction 42 * - writes cannot pass reads on IA-32 => spinlock_unlock() needs no barriers 43 */ 35 #ifndef KERN_abs32le_BARRIER_H_ 36 #define KERN_abs32le_BARRIER_H_ 44 37 45 38 /* … … 47 40 */ 48 41 49 #define CS_ENTER_BARRIER() asm volatile ("" ::: "memory")50 #define CS_LEAVE_BARRIER() asm volatile ("" ::: "memory")42 #define CS_ENTER_BARRIER() 43 #define CS_LEAVE_BARRIER() 51 44 52 static inline void cpuid_serialization(void) 53 { 54 asm volatile ( 55 "xorl %%eax, %%eax\n" 56 "cpuid\n" 57 ::: "eax", "ebx", "ecx", "edx", "memory" 58 ); 59 } 45 #define memory_barrier() 46 #define read_barrier() 47 #define write_barrier() 60 48 61 #if defined(CONFIG_FENCES_P4) 62 #define memory_barrier() asm volatile ("mfence\n" ::: "memory") 63 #define read_barrier() asm volatile ("lfence\n" ::: "memory") 64 #ifdef CONFIG_WEAK_MEMORY 65 #define write_barrier() asm volatile ("sfence\n" ::: "memory") 66 #else 67 #define write_barrier() asm volatile ("" ::: "memory"); 68 #endif 69 #elif defined(CONFIG_FENCES_P3) 70 #define memory_barrier() cpuid_serialization() 71 #define read_barrier() cpuid_serialization() 72 #ifdef CONFIG_WEAK_MEMORY 73 #define write_barrier() asm volatile ("sfence\n" ::: "memory") 74 #else 75 #define write_barrier() asm volatile ("" ::: "memory"); 76 #endif 77 #else 78 #define memory_barrier() cpuid_serialization() 79 #define read_barrier() cpuid_serialization() 80 #ifdef CONFIG_WEAK_MEMORY 81 #define write_barrier() cpuid_serialization() 82 #else 83 #define write_barrier() asm volatile ("" ::: "memory"); 84 #endif 85 #endif 86 87 /* 88 * On ia32, the hardware takes care about instruction and data cache coherence, 89 * even on SMP systems. We issue a write barrier to be sure that writes 90 * queueing in the store buffer drain to the memory (even though it would be 91 * sufficient for them to drain to the D-cache). 92 */ 93 #define smc_coherence(a) write_barrier() 94 #define smc_coherence_block(a, l) write_barrier() 49 #define smc_coherence(addr) 50 #define smc_coherence_block(addr, size) 95 51 96 52 #endif -
kernel/arch/abs32le/include/context.h
r1ccafee r0b9ac3c 36 36 #define KERN_abs32le_CONTEXT_H_ 37 37 38 #ifdef KERNEL39 #include <arch/types.h>40 41 38 #define STACK_ITEM_SIZE 4 42 39 #define SP_DELTA 0 43 40 44 #define context_set(c, _pc, stack, size) \ 45 do { \ 46 (c)->pc = (uintptr_t) (_pc); \ 47 } while (0) 48 49 #endif /* KERNEL */ 41 #define context_set(ctx, pc, stack, size) \ 42 context_set_generic(ctx, pc, stack, size) 50 43 51 44 /* -
kernel/arch/abs32le/include/context_offset.h
r1ccafee r0b9ac3c 37 37 38 38 #define OFFSET_PC 0x00 39 40 #ifdef KERNEL 41 #define OFFSET_IPL 0x04 42 #else 43 #define OFFSET_TLS 0x04 44 #endif 39 #define OFFSET_IPL 0x04 45 40 46 41 #endif -
kernel/arch/abs32le/include/interrupt.h
r1ccafee r0b9ac3c 40 40 #define IVT_ITEMS 0 41 41 #define IVT_FIRST 0 42 43 #define VECTOR_TLB_SHOOTDOWN_IPI 0 42 44 43 45 /* -
kernel/arch/abs32le/include/memstr.h
r1ccafee r0b9ac3c 36 36 #define KERN_abs32le_MEMSTR_H_ 37 37 38 #define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) 39 40 extern void memsetw(void *, size_t, uint16_t); 41 extern void memsetb(void *, size_t, uint8_t); 42 43 extern int memcmp(const void *, const void *, size_t); 38 #define memcpy(dst, src, cnt) _memcpy((dst), (src), (cnt)) 39 #define memsetb(dst, cnt, val) _memsetb((dst), (cnt), (val)) 40 #define memsetw(dst, cnt, val) _memsetw((dst), (cnt), (val)) 44 41 45 42 #endif -
kernel/arch/abs32le/include/mm/frame.h
r1ccafee r0b9ac3c 40 40 41 41 #ifdef KERNEL 42 #ifndef __ASM__43 42 44 43 #include <arch/types.h> … … 47 46 extern void physmem_print(void); 48 47 49 #endif /* __ASM__ */50 48 #endif /* KERNEL */ 51 49 -
kernel/arch/abs32le/include/mm/page.h
r1ccafee r0b9ac3c 43 43 #ifdef KERNEL 44 44 45 #ifndef __ASM__ 46 #define KA2PA(x) (((uintptr_t) (x)) - 0x80000000) 47 #define PA2KA(x) (((uintptr_t) (x)) + 0x80000000) 48 #else 49 #define KA2PA(x) ((x) - 0x80000000) 50 #define PA2KA(x) ((x) + 0x80000000) 51 #endif 45 #define KA2PA(x) (((uintptr_t) (x)) - 0x80000000) 46 #define PA2KA(x) (((uintptr_t) (x)) + 0x80000000) 52 47 53 48 /* … … 122 117 #define PTE_EXECUTABLE_ARCH(p) 1 123 118 124 #ifndef __ASM__125 126 119 #include <mm/mm.h> 127 120 #include <arch/interrupt.h> … … 129 122 #include <typedefs.h> 130 123 131 /* Page fault error codes. */132 133 /** When bit on this position is 0, the page fault was caused by a not-present134 * page.135 */136 #define PFERR_CODE_P (1 << 0)137 138 /** When bit on this position is 1, the page fault was caused by a write. */139 #define PFERR_CODE_RW (1 << 1)140 141 /** When bit on this position is 1, the page fault was caused in user mode. */142 #define PFERR_CODE_US (1 << 2)143 144 /** When bit on this position is 1, a reserved bit was set in page directory. */145 #define PFERR_CODE_RSVD (1 << 3)146 147 124 /** Page Table Entry. */ 148 125 typedef struct { 149 unsigned present : 1; 150 unsigned writeable : 1; 151 unsigned uaccessible : 1; 152 unsigned page_write_through : 1; 153 unsigned page_cache_disable : 1; 154 unsigned accessed : 1; 155 unsigned dirty : 1; 156 unsigned pat : 1; 157 unsigned global : 1; 158 unsigned soft_valid : 1; /**< Valid content even if the present bit is not set. */ 159 unsigned avl : 2; 160 unsigned frame_address : 20; 161 } __attribute__ ((packed)) pte_t; 126 unsigned int present : 1; 127 unsigned int writeable : 1; 128 unsigned int uaccessible : 1; 129 unsigned int page_write_through : 1; 130 unsigned int page_cache_disable : 1; 131 unsigned int accessed : 1; 132 unsigned int dirty : 1; 133 unsigned int pat : 1; 134 unsigned int global : 1; 135 136 /** Valid content even if the present bit is not set. */ 137 unsigned int soft_valid : 1; 138 unsigned int avl : 2; 139 unsigned int frame_address : 20; 140 } __attribute__((packed)) pte_t; 162 141 163 142 static inline unsigned int get_pt_flags(pte_t *pt, size_t i) … … 192 171 193 172 extern void page_arch_init(void); 194 extern void page_fault(int n, istate_t *istate); 195 196 #endif /* __ASM__ */ 173 extern void page_fault(int, istate_t *); 197 174 198 175 #endif /* KERNEL */ -
kernel/arch/abs32le/include/types.h
r1ccafee r0b9ac3c 55 55 typedef uint32_t unative_t; 56 56 typedef int32_t native_t; 57 typedef uint32_t atomic_count_t; 57 58 58 59 typedef struct { -
kernel/arch/abs32le/src/abs32le.c
r1ccafee r0b9ac3c 35 35 #include <arch.h> 36 36 #include <arch/types.h> 37 #include <arch/context.h>38 37 #include <arch/interrupt.h> 39 38 #include <arch/asm.h> 40 39 40 #include <func.h> 41 41 #include <config.h> 42 #include <errno.h> 43 #include <context.h> 44 #include <fpu_context.h> 42 45 #include <interrupt.h> 46 #include <syscall/copy.h> 43 47 #include <ddi/irq.h> 44 48 #include <proc/thread.h> … … 47 51 #include <sysinfo/sysinfo.h> 48 52 #include <memstr.h> 53 54 char memcpy_from_uspace_failover_address; 55 char memcpy_to_uspace_failover_address; 49 56 50 57 void arch_pre_mm_init(void) … … 81 88 unative_t sys_tls_set(unative_t addr) 82 89 { 83 return 0;90 return EOK; 84 91 } 85 92 … … 102 109 } 103 110 111 void irq_initialize_arch(irq_t *irq) 112 { 113 (void) irq; 114 } 115 116 void panic_printf(char *fmt, ...) 117 { 118 va_list args; 119 120 va_start(args, fmt); 121 vprintf(fmt, args); 122 va_end(args); 123 124 halt(); 125 } 126 127 int context_save_arch(context_t *ctx) 128 { 129 return 1; 130 } 131 132 void context_restore_arch(context_t *ctx) 133 { 134 while (true); 135 } 136 137 void fpu_init(void) 138 { 139 } 140 141 void fpu_context_save(fpu_context_t *ctx) 142 { 143 } 144 145 void fpu_context_restore(fpu_context_t *ctx) 146 { 147 } 148 149 int memcpy_from_uspace(void *dst, const void *uspace_src, size_t size) 150 { 151 return EOK; 152 } 153 154 int memcpy_to_uspace(void *uspace_dst, const void *src, size_t size) 155 { 156 return EOK; 157 } 158 104 159 /** @} 105 160 */ -
kernel/arch/abs32le/src/debug/stacktrace.c
r1ccafee r0b9ac3c 27 27 */ 28 28 29 /** @addtogroup ia3229 /** @addtogroup abs32le 30 30 * @{ 31 31 */ … … 38 38 #include <typedefs.h> 39 39 40 #define FRAME_OFFSET_FP_PREV 041 #define FRAME_OFFSET_RA 142 43 40 bool kernel_frame_pointer_validate(uintptr_t fp) 44 41 { 45 return fp != 0;42 return true; 46 43 } 47 44 48 45 bool kernel_frame_pointer_prev(uintptr_t fp, uintptr_t *prev) 49 46 { 50 uint32_t *stack = (void *) fp;51 *prev = stack[FRAME_OFFSET_FP_PREV];52 47 return true; 53 48 } … … 55 50 bool kernel_return_address_get(uintptr_t fp, uintptr_t *ra) 56 51 { 57 uint32_t *stack = (void *) fp;58 *ra = stack[FRAME_OFFSET_RA];59 52 return true; 60 53 } … … 62 55 bool uspace_frame_pointer_validate(uintptr_t fp) 63 56 { 64 return fp != 0;57 return true; 65 58 } 66 59 67 60 bool uspace_frame_pointer_prev(uintptr_t fp, uintptr_t *prev) 68 61 { 69 return !copy_from_uspace((void *) prev, 70 (uint32_t *) fp + FRAME_OFFSET_FP_PREV, sizeof(*prev)); 62 return true; 71 63 } 72 64 73 65 bool uspace_return_address_get(uintptr_t fp, uintptr_t *ra) 74 66 { 75 return !copy_from_uspace((void *) ra, (uint32_t *) fp + FRAME_OFFSET_RA, 76 sizeof(*ra)); 67 return true; 68 } 69 70 uintptr_t frame_pointer_get(void) 71 { 72 return 0; 73 } 74 75 uintptr_t program_counter_get(void) 76 { 77 return 0; 77 78 } 78 79 -
kernel/arch/amd64/include/asm.h
r1ccafee r0b9ac3c 68 68 } 69 69 70 static inline void cpu_halt(void)71 { 72 asm volatile (73 "0:\n"74 "hlt\n"75 " jmp 0b\n"76 );70 static inline void __attribute__((noreturn)) cpu_halt(void) 71 { 72 while (true) { 73 asm volatile ( 74 "hlt\n" 75 ); 76 } 77 77 } 78 78 -
kernel/arch/amd64/include/atomic.h
r1ccafee r0b9ac3c 40 40 #include <preemption.h> 41 41 42 static inline void atomic_inc(atomic_t *val) { 42 static inline void atomic_inc(atomic_t *val) 43 { 43 44 #ifdef CONFIG_SMP 44 45 asm volatile ( … … 54 55 } 55 56 56 static inline void atomic_dec(atomic_t *val) { 57 static inline void atomic_dec(atomic_t *val) 58 { 57 59 #ifdef CONFIG_SMP 58 60 asm volatile ( … … 68 70 } 69 71 70 static inline long atomic_postinc(atomic_t *val)72 static inline atomic_count_t atomic_postinc(atomic_t *val) 71 73 { 72 longr = 1;74 atomic_count_t r = 1; 73 75 74 76 asm volatile ( 75 77 "lock xaddq %[r], %[count]\n" 76 : [count] "+m" (val->count), [r] "+r" (r) 78 : [count] "+m" (val->count), 79 [r] "+r" (r) 77 80 ); 78 81 … … 80 83 } 81 84 82 static inline long atomic_postdec(atomic_t *val)85 static inline atomic_count_t atomic_postdec(atomic_t *val) 83 86 { 84 longr = -1;87 atomic_count_t r = -1; 85 88 86 89 asm volatile ( 87 90 "lock xaddq %[r], %[count]\n" 88 : [count] "+m" (val->count), [r] "+r" (r) 91 : [count] "+m" (val->count), 92 [r] "+r" (r) 89 93 ); 90 94 … … 95 99 #define atomic_predec(val) (atomic_postdec(val) - 1) 96 100 97 static inline uint64_t test_and_set(atomic_t *val) { 98 uint64_t v; 101 static inline atomic_count_t test_and_set(atomic_t *val) 102 { 103 atomic_count_t v = 1; 99 104 100 105 asm volatile ( 101 "movq $1, %[v]\n"102 106 "xchgq %[v], %[count]\n" 103 : [v] "=r" (v), [count] "+m" (val->count) 107 : [v] "+r" (v), 108 [count] "+m" (val->count) 104 109 ); 105 110 … … 107 112 } 108 113 109 110 114 /** amd64 specific fast spinlock */ 111 115 static inline void atomic_lock_arch(atomic_t *val) 112 116 { 113 uint64_t tmp;117 atomic_count_t tmp; 114 118 115 119 preemption_disable(); … … 125 129 "testq %[tmp], %[tmp]\n" 126 130 "jnz 0b\n" 127 : [count] "+m" (val->count), [tmp] "=&r" (tmp) 131 : [count] "+m" (val->count), 132 [tmp] "=&r" (tmp) 128 133 ); 134 129 135 /* 130 136 * Prevent critical section code from bleeding out this way up. -
kernel/arch/amd64/include/interrupt.h
r1ccafee r0b9ac3c 54 54 #define IRQ_PIC_SPUR 7 55 55 #define IRQ_MOUSE 12 56 #define IRQ_DP8390 9 56 57 57 58 /* this one must have four least significant bits set to ones */ -
kernel/arch/amd64/include/memstr.h
r1ccafee r0b9ac3c 27 27 */ 28 28 29 /** @addtogroup amd64 29 /** @addtogroup amd64 30 30 * @{ 31 31 */ … … 38 38 #define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) 39 39 40 extern void memsetw(void *dst, size_t cnt, uint16_t x); 41 extern void memsetb(void *dst, size_t cnt, uint8_t x); 42 43 extern int memcmp(const void *a, const void *b, size_t cnt); 40 extern void memsetw(void *, size_t, uint16_t); 41 extern void memsetb(void *, size_t, uint8_t); 44 42 45 43 #endif -
kernel/arch/amd64/include/types.h
r1ccafee r0b9ac3c 55 55 typedef uint64_t unative_t; 56 56 typedef int64_t native_t; 57 typedef uint64_t atomic_count_t; 57 58 58 59 typedef struct { -
kernel/arch/amd64/src/amd64.c
r1ccafee r0b9ac3c 228 228 (uintptr_t) I8042_BASE); 229 229 #endif 230 231 #ifdef CONFIG_NETIF_DP8390 232 trap_virtual_enable_irqs(1 << IRQ_DP8390); 233 sysinfo_set_item_val("netif.dp8390.inr", NULL, IRQ_DP8390); 234 #endif 230 235 } 231 236 … … 278 283 } 279 284 285 void irq_initialize_arch(irq_t *irq) 286 { 287 (void) irq; 288 } 289 280 290 /** @} 281 291 */ -
kernel/arch/amd64/src/debugger.c
r1ccafee r0b9ac3c 201 201 202 202 /* Send IPI */ 203 #ifdef CONFIG_SMP204 203 // ipi_broadcast(VECTOR_DEBUG_IPI); 205 #endif206 204 207 205 return curidx; … … 262 260 spinlock_unlock(&bkpoint_lock); 263 261 interrupts_restore(ipl); 264 #ifdef CONFIG_SMP 265 // ipi_broadcast(VECTOR_DEBUG_IPI); 266 #endif 262 // ipi_broadcast(VECTOR_DEBUG_IPI); 267 263 } 268 264 -
kernel/arch/arm32/include/asm.h
r1ccafee r0b9ac3c 96 96 } 97 97 98 extern void cpu_halt(void) ;98 extern void cpu_halt(void) __attribute__((noreturn)); 99 99 extern void asm_delay_loop(uint32_t t); 100 100 extern void userspace_asm(uintptr_t ustack, uintptr_t uspace_uarg, -
kernel/arch/arm32/include/atomic.h
r1ccafee r0b9ac3c 47 47 * 48 48 */ 49 static inline long atomic_add(atomic_t *val, int i)49 static inline atomic_count_t atomic_add(atomic_t *val, atomic_count_t i) 50 50 { 51 long ret;52 53 51 /* 54 52 * This implementation is for UP pre-ARMv6 systems where we do not have … … 57 55 ipl_t ipl = interrupts_disable(); 58 56 val->count += i; 59 ret = val->count;57 atomic_count_t ret = val->count; 60 58 interrupts_restore(ipl); 61 59 … … 66 64 * 67 65 * @param val Variable to be incremented. 66 * 68 67 */ 69 68 static inline void atomic_inc(atomic_t *val) … … 75 74 * 76 75 * @param val Variable to be decremented. 76 * 77 77 */ 78 78 static inline void atomic_dec(atomic_t *val) { … … 84 84 * @param val Variable to be incremented. 85 85 * @return Value after incrementation. 86 * 86 87 */ 87 static inline longatomic_preinc(atomic_t *val)88 static inline atomic_count_t atomic_preinc(atomic_t *val) 88 89 { 89 90 return atomic_add(val, 1); … … 94 95 * @param val Variable to be decremented. 95 96 * @return Value after decrementation. 97 * 96 98 */ 97 static inline longatomic_predec(atomic_t *val)99 static inline atomic_count_t atomic_predec(atomic_t *val) 98 100 { 99 101 return atomic_add(val, -1); … … 104 106 * @param val Variable to be incremented. 105 107 * @return Value before incrementation. 108 * 106 109 */ 107 static inline longatomic_postinc(atomic_t *val)110 static inline atomic_count_t atomic_postinc(atomic_t *val) 108 111 { 109 112 return atomic_add(val, 1) - 1; … … 114 117 * @param val Variable to be decremented. 115 118 * @return Value before decrementation. 119 * 116 120 */ 117 static inline longatomic_postdec(atomic_t *val)121 static inline atomic_count_t atomic_postdec(atomic_t *val) 118 122 { 119 123 return atomic_add(val, -1) + 1; -
kernel/arch/arm32/include/memstr.h
r1ccafee r0b9ac3c 27 27 */ 28 28 29 /** @addtogroup arm32 29 /** @addtogroup arm32 30 30 * @{ 31 31 */ … … 39 39 #define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) 40 40 41 extern void memsetw(void *dst, size_t cnt, uint16_t x); 42 extern void memsetb(void *dst, size_t cnt, uint8_t x); 43 44 extern int memcmp(const void *a, const void *b, size_t cnt); 41 extern void memsetw(void *, size_t, uint16_t); 42 extern void memsetb(void *, size_t, uint8_t); 45 43 46 44 #endif -
kernel/arch/arm32/include/types.h
r1ccafee r0b9ac3c 27 27 */ 28 28 29 /** @addtogroup arm32 29 /** @addtogroup arm32 30 30 * @{ 31 31 */ … … 38 38 39 39 #ifndef DOXYGEN 40 # define ATTRIBUTE_PACKED __attribute__((packed))40 #define ATTRIBUTE_PACKED __attribute__((packed)) 41 41 #else 42 #define ATTRIBUTE_PACKED42 #define ATTRIBUTE_PACKED 43 43 #endif 44 44 … … 62 62 typedef uint32_t unative_t; 63 63 typedef int32_t native_t; 64 typedef uint32_t atomic_count_t; 64 65 65 66 typedef struct { -
kernel/arch/arm32/src/arm32.c
r1ccafee r0b9ac3c 155 155 void cpu_halt(void) 156 156 { 157 machine_cpu_halt(); 157 while (true) 158 machine_cpu_halt(); 158 159 } 159 160 … … 162 163 { 163 164 /* not implemented */ 164 while ( 1);165 while (true); 165 166 } 166 167 … … 179 180 } 180 181 182 void irq_initialize_arch(irq_t *irq) 183 { 184 (void) irq; 185 } 186 181 187 /** @} 182 188 */ -
kernel/arch/ia32/include/asm.h
r1ccafee r0b9ac3c 60 60 * 61 61 */ 62 static inline void cpu_halt(void)63 { 64 asm volatile (65 "0:\n"66 "hlt\n"67 " jmp 0b\n"68 );62 static inline __attribute__((noreturn)) void cpu_halt(void) 63 { 64 while (true) { 65 asm volatile ( 66 "hlt\n" 67 ); 68 } 69 69 } 70 70 -
kernel/arch/ia32/include/atomic.h
r1ccafee r0b9ac3c 40 40 #include <preemption.h> 41 41 42 static inline void atomic_inc(atomic_t *val) { 42 static inline void atomic_inc(atomic_t *val) 43 { 43 44 #ifdef CONFIG_SMP 44 45 asm volatile ( … … 54 55 } 55 56 56 static inline void atomic_dec(atomic_t *val) { 57 static inline void atomic_dec(atomic_t *val) 58 { 57 59 #ifdef CONFIG_SMP 58 60 asm volatile ( … … 68 70 } 69 71 70 static inline long atomic_postinc(atomic_t *val)72 static inline atomic_count_t atomic_postinc(atomic_t *val) 71 73 { 72 longr = 1;74 atomic_count_t r = 1; 73 75 74 76 asm volatile ( 75 77 "lock xaddl %[r], %[count]\n" 76 : [count] "+m" (val->count), [r] "+r" (r) 78 : [count] "+m" (val->count), 79 [r] "+r" (r) 77 80 ); 78 81 … … 80 83 } 81 84 82 static inline long atomic_postdec(atomic_t *val)85 static inline atomic_count_t atomic_postdec(atomic_t *val) 83 86 { 84 longr = -1;87 atomic_count_t r = -1; 85 88 86 89 asm volatile ( 87 90 "lock xaddl %[r], %[count]\n" 88 : [count] "+m" (val->count), [r] "+r"(r) 91 : [count] "+m" (val->count), 92 [r] "+r" (r) 89 93 ); 90 94 … … 95 99 #define atomic_predec(val) (atomic_postdec(val) - 1) 96 100 97 static inline uint32_t test_and_set(atomic_t *val) { 98 uint32_t v; 101 static inline atomic_count_t test_and_set(atomic_t *val) 102 { 103 atomic_count_t v = 1; 99 104 100 105 asm volatile ( 101 "movl $1, %[v]\n"102 106 "xchgl %[v], %[count]\n" 103 : [v] "=r" (v), [count] "+m" (val->count) 107 : [v] "+r" (v), 108 [count] "+m" (val->count) 104 109 ); 105 110 … … 110 115 static inline void atomic_lock_arch(atomic_t *val) 111 116 { 112 uint32_t tmp;117 atomic_count_t tmp; 113 118 114 119 preemption_disable(); … … 124 129 "testl %[tmp], %[tmp]\n" 125 130 "jnz 0b\n" 126 : [count] "+m" (val->count), [tmp] "=&r" (tmp) 131 : [count] "+m" (val->count), 132 [tmp] "=&r" (tmp) 127 133 ); 134 128 135 /* 129 136 * Prevent critical section code from bleeding out this way up. -
kernel/arch/ia32/include/interrupt.h
r1ccafee r0b9ac3c 54 54 #define IRQ_PIC_SPUR 7 55 55 #define IRQ_MOUSE 12 56 #define IRQ_DP8390 9 56 57 57 58 /* this one must have four least significant bits set to ones */ -
kernel/arch/ia32/include/memstr.h
r1ccafee r0b9ac3c 27 27 */ 28 28 29 /** @addtogroup ia32 29 /** @addtogroup ia32 30 30 * @{ 31 31 */ … … 38 38 #define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) 39 39 40 extern void memsetw(void *dst, size_t cnt, uint16_t x); 41 extern void memsetb(void *dst, size_t cnt, uint8_t x); 42 43 extern int memcmp(const void *a, const void *b, size_t cnt); 40 extern void memsetw(void *, size_t, uint16_t); 41 extern void memsetb(void *, size_t, uint8_t); 44 42 45 43 #endif -
kernel/arch/ia32/include/types.h
r1ccafee r0b9ac3c 55 55 typedef uint32_t unative_t; 56 56 typedef int32_t native_t; 57 typedef uint32_t atomic_count_t; 57 58 58 59 typedef struct { -
kernel/arch/ia32/src/ia32.c
r1ccafee r0b9ac3c 186 186 (uintptr_t) I8042_BASE); 187 187 #endif 188 189 #ifdef CONFIG_NETIF_DP8390 190 trap_virtual_enable_irqs(1 << IRQ_DP8390); 191 sysinfo_set_item_val("netif.dp8390.inr", NULL, IRQ_DP8390); 192 #endif 188 193 } 189 194 … … 234 239 } 235 240 241 void irq_initialize_arch(irq_t *irq) 242 { 243 (void) irq; 244 } 245 236 246 /** @} 237 247 */ -
kernel/arch/ia32/src/smp/ipi.c
r1ccafee r0b9ac3c 27 27 */ 28 28 29 /** @addtogroup ia32 29 /** @addtogroup ia32 30 30 * @{ 31 31 */ -
kernel/arch/ia64/include/asm.h
r1ccafee r0b9ac3c 428 428 } 429 429 430 extern void cpu_halt(void) ;430 extern void cpu_halt(void) __attribute__((noreturn)); 431 431 extern void cpu_sleep(void); 432 432 extern void asm_delay_loop(uint32_t t); -
kernel/arch/ia64/include/atomic.h
r1ccafee r0b9ac3c 36 36 #define KERN_ia64_ATOMIC_H_ 37 37 38 static inline uint64_t test_and_set(atomic_t *val)38 static inline atomic_count_t test_and_set(atomic_t *val) 39 39 { 40 uint64_t v;41 40 atomic_count_t v; 41 42 42 asm volatile ( 43 43 "movl %[v] = 0x1;;\n" … … 53 53 { 54 54 do { 55 while (val->count) 56 ; 55 while (val->count); 57 56 } while (test_and_set(val)); 58 57 } … … 60 59 static inline void atomic_inc(atomic_t *val) 61 60 { 62 longv;61 atomic_count_t v; 63 62 64 63 asm volatile ( … … 71 70 static inline void atomic_dec(atomic_t *val) 72 71 { 73 longv;72 atomic_count_t v; 74 73 75 74 asm volatile ( … … 80 79 } 81 80 82 static inline longatomic_preinc(atomic_t *val)81 static inline atomic_count_t atomic_preinc(atomic_t *val) 83 82 { 84 longv;83 atomic_count_t v; 85 84 86 85 asm volatile ( … … 93 92 } 94 93 95 static inline longatomic_predec(atomic_t *val)94 static inline atomic_count_t atomic_predec(atomic_t *val) 96 95 { 97 longv;96 atomic_count_t v; 98 97 99 98 asm volatile ( … … 106 105 } 107 106 108 static inline longatomic_postinc(atomic_t *val)107 static inline atomic_count_t atomic_postinc(atomic_t *val) 109 108 { 110 longv;109 atomic_count_t v; 111 110 112 111 asm volatile ( … … 119 118 } 120 119 121 static inline longatomic_postdec(atomic_t *val)120 static inline atomic_count_t atomic_postdec(atomic_t *val) 122 121 { 123 longv;122 atomic_count_t v; 124 123 125 124 asm volatile ( -
kernel/arch/ia64/include/context.h
r1ccafee r0b9ac3c 48 48 */ 49 49 #define SP_DELTA (0 + ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT)) 50 51 #ifdef context_set52 #undef context_set53 #endif54 50 55 51 /* RSE stack starts at the bottom of memory stack. */ -
kernel/arch/ia64/include/interrupt.h
r1ccafee r0b9ac3c 61 61 #define IRQ_KBD (0x01 + LEGACY_INTERRUPT_BASE) 62 62 #define IRQ_MOUSE (0x0c + LEGACY_INTERRUPT_BASE) 63 #define IRQ_DP8390 (0x09 + LEGACY_INTERRUPT_BASE) 63 64 64 65 /** General Exception codes. */ -
kernel/arch/ia64/include/memstr.h
r1ccafee r0b9ac3c 27 27 */ 28 28 29 /** @addtogroup ia64 29 /** @addtogroup ia64 30 30 * @{ 31 31 */ … … 38 38 #define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) 39 39 40 extern void memsetw(void *dst, size_t cnt, uint16_t x); 41 extern void memsetb(void *dst, size_t cnt, uint8_t x); 42 43 extern int memcmp(const void *a, const void *b, size_t cnt); 40 extern void memsetw(void *, size_t, uint16_t); 41 extern void memsetb(void *, size_t, uint8_t); 44 42 45 43 #endif -
kernel/arch/ia64/include/types.h
r1ccafee r0b9ac3c 27 27 */ 28 28 29 /** @addtogroup ia64 29 /** @addtogroup ia64 30 30 * @{ 31 31 */ … … 63 63 typedef uint64_t unative_t; 64 64 typedef int64_t native_t; 65 typedef uint64_t atomic_count_t; 65 66 66 67 typedef struct { -
kernel/arch/ia64/src/ia64.c
r1ccafee r0b9ac3c 44 44 #include <arch/stack.h> 45 45 #include <arch/mm/page.h> 46 #include <interrupt.h> 46 47 #include <mm/as.h> 47 48 #include <config.h> … … 211 212 (uintptr_t) I8042_BASE); 212 213 #endif 213 214 215 #ifdef CONFIG_NETIF_DP8390 216 sysinfo_set_item_val("netif.dp8390.inr", NULL, IRQ_DP8390); 217 #endif 218 214 219 sysinfo_set_item_val("ia64_iospace", NULL, true); 215 220 sysinfo_set_item_val("ia64_iospace.address", NULL, true); … … 280 285 } 281 286 287 void irq_initialize_arch(irq_t *irq) 288 { 289 (void) irq; 290 } 291 282 292 /** @} 283 293 */ -
kernel/arch/mips32/include/asm.h
r1ccafee r0b9ac3c 66 66 } 67 67 68 extern void cpu_halt(void) ;68 extern void cpu_halt(void) __attribute__((noreturn)); 69 69 extern void asm_delay_loop(uint32_t t); 70 70 extern void userspace_asm(uintptr_t ustack, uintptr_t uspace_uarg, -
kernel/arch/mips32/include/atomic.h
r1ccafee r0b9ac3c 27 27 */ 28 28 29 /** @addtogroup mips32 29 /** @addtogroup mips32 30 30 * @{ 31 31 */ … … 51 51 * 52 52 * @return Value after addition. 53 * 53 54 */ 54 static inline long atomic_add(atomic_t *val, int i)55 static inline atomic_count_t atomic_add(atomic_t *val, atomic_count_t i) 55 56 { 56 long tmp, v; 57 atomic_count_t tmp; 58 atomic_count_t v; 57 59 58 60 asm volatile ( … … 64 66 " beq %0, %4, 1b\n" /* if the atomic operation failed, try again */ 65 67 " nop\n" 66 : "=&r" (tmp), "+m" (val->count), "=&r" (v) 67 : "r" (i), "i" (0) 68 : "=&r" (tmp), 69 "+m" (val->count), 70 "=&r" (v) 71 : "r" (i), 72 "i" (0) 68 73 ); 69 74 … … 71 76 } 72 77 73 static inline uint32_t test_and_set(atomic_t *val) { 74 uint32_t tmp, v; 78 static inline atomic_count_t test_and_set(atomic_t *val) 79 { 80 atomic_count_t tmp; 81 atomic_count_t v; 75 82 76 83 asm volatile ( … … 82 89 " beqz %0, 1b\n" 83 90 "2:\n" 84 : "=&r" (tmp), "+m" (val->count), "=&r" (v) 91 : "=&r" (tmp), 92 "+m" (val->count), 93 "=&r" (v) 85 94 : "i" (1) 86 95 ); … … 89 98 } 90 99 91 static inline void atomic_lock_arch(atomic_t *val) { 100 static inline void atomic_lock_arch(atomic_t *val) 101 { 92 102 do { 93 while (val->count) 94 ; 103 while (val->count); 95 104 } while (test_and_set(val)); 96 105 } -
kernel/arch/mips32/include/context.h
r1ccafee r0b9ac3c 27 27 */ 28 28 29 /** @addtogroup mips32 29 /** @addtogroup mips32 30 30 * @{ 31 31 */ … … 42 42 * Put one item onto the stack to support get_stack_base() and align it up. 43 43 */ 44 #define SP_DELTA (0 + ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT)) 45 44 #define SP_DELTA (0 + ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT)) 46 45 47 46 #ifndef __ASM__ 48 47 49 48 #include <arch/types.h> 49 50 #define context_set(ctx, pc, stack, size) \ 51 context_set_generic(ctx, pc, stack, size) 50 52 51 53 /* -
kernel/arch/mips32/include/memstr.h
r1ccafee r0b9ac3c 27 27 */ 28 28 29 /** @addtogroup mips32 29 /** @addtogroup mips32 30 30 * @{ 31 31 */ … … 38 38 #define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) 39 39 40 extern void memsetw(void *dst, size_t cnt, uint16_t x); 41 extern void memsetb(void *dst, size_t cnt, uint8_t x); 42 43 extern int memcmp(const void *a, const void *b, size_t cnt); 40 extern void memsetw(void *, size_t, uint16_t); 41 extern void memsetb(void *, size_t, uint8_t); 44 42 45 43 #endif -
kernel/arch/mips32/include/types.h
r1ccafee r0b9ac3c 27 27 */ 28 28 29 /** @addtogroup mips32 29 /** @addtogroup mips32 30 30 * @{ 31 31 */ … … 55 55 typedef uint32_t unative_t; 56 56 typedef int32_t native_t; 57 typedef uint32_t atomic_count_t; 57 58 58 59 typedef struct { -
kernel/arch/mips32/src/mips32.c
r1ccafee r0b9ac3c 46 46 #include <sysinfo/sysinfo.h> 47 47 #include <arch/interrupt.h> 48 #include <interrupt.h> 48 49 #include <console/chardev.h> 49 50 #include <arch/barrier.h> … … 257 258 } 258 259 260 void irq_initialize_arch(irq_t *irq) 261 { 262 (void) irq; 263 } 264 259 265 /** @} 260 266 */ -
kernel/arch/mips32/src/smp/dorder.c
r1ccafee r0b9ac3c 33 33 */ 34 34 35 #include <arch/smp/dorder.h> 35 #include <smp/ipi.h> 36 37 #ifdef CONFIG_SMP 36 38 37 39 #define MSIM_DORDER_ADDRESS 0xB0000004 … … 39 41 void ipi_broadcast_arch(int ipi) 40 42 { 41 #ifdef CONFIG_SMP42 43 *((volatile unsigned int *) MSIM_DORDER_ADDRESS) = 0x7FFFFFFF; 44 } 45 43 46 #endif 44 }45 47 46 48 /** @} -
kernel/arch/ppc32/include/asm.h
r1ccafee r0b9ac3c 27 27 */ 28 28 29 /** @addtogroup ppc32 29 /** @addtogroup ppc32 30 30 * @{ 31 31 */ … … 146 146 } 147 147 148 void cpu_halt(void); 149 void asm_delay_loop(uint32_t t); 150 148 extern void cpu_halt(void) __attribute__((noreturn)); 149 extern void asm_delay_loop(uint32_t t); 151 150 extern void userspace_asm(uintptr_t uspace_uarg, uintptr_t stack, uintptr_t entry); 152 151 153 152 static inline void pio_write_8(ioport8_t *port, uint8_t v) 154 153 { 155 *port = v; 154 *port = v; 156 155 } 157 156 158 157 static inline void pio_write_16(ioport16_t *port, uint16_t v) 159 158 { 160 *port = v; 159 *port = v; 161 160 } 162 161 163 162 static inline void pio_write_32(ioport32_t *port, uint32_t v) 164 163 { 165 *port = v; 164 *port = v; 166 165 } 167 166 168 167 static inline uint8_t pio_read_8(ioport8_t *port) 169 168 { 170 return *port; 169 return *port; 171 170 } 172 171 173 172 static inline uint16_t pio_read_16(ioport16_t *port) 174 173 { 175 return *port; 174 return *port; 176 175 } 177 176 178 177 static inline uint32_t pio_read_32(ioport32_t *port) 179 178 { 180 return *port; 179 return *port; 181 180 } 182 181 -
kernel/arch/ppc32/include/atomic.h
r1ccafee r0b9ac3c 27 27 */ 28 28 29 /** @addtogroup ppc32 29 /** @addtogroup ppc32 30 30 * @{ 31 31 */ … … 38 38 static inline void atomic_inc(atomic_t *val) 39 39 { 40 longtmp;41 40 atomic_count_t tmp; 41 42 42 asm volatile ( 43 43 "1:\n" … … 46 46 "stwcx. %0, 0, %2\n" 47 47 "bne- 1b" 48 : "=&r" (tmp), "=m" (val->count) 49 : "r" (&val->count), "m" (val->count) 48 : "=&r" (tmp), 49 "=m" (val->count) 50 : "r" (&val->count), 51 "m" (val->count) 50 52 : "cc" 51 53 ); … … 54 56 static inline void atomic_dec(atomic_t *val) 55 57 { 56 longtmp;57 58 atomic_count_t tmp; 59 58 60 asm volatile ( 59 61 "1:\n" 60 62 "lwarx %0, 0, %2\n" 61 63 "addic %0, %0, -1\n" 62 "stwcx. %0, 0, %2\n"64 "stwcx. %0, 0, %2\n" 63 65 "bne- 1b" 64 : "=&r" (tmp), "=m" (val->count) 65 : "r" (&val->count), "m" (val->count) 66 : "=&r" (tmp), 67 "=m" (val->count) 68 : "r" (&val->count), 69 "m" (val->count) 66 70 : "cc" 67 71 ); 68 72 } 69 73 70 static inline longatomic_postinc(atomic_t *val)74 static inline atomic_count_t atomic_postinc(atomic_t *val) 71 75 { 72 76 atomic_inc(val); … … 74 78 } 75 79 76 static inline longatomic_postdec(atomic_t *val)80 static inline atomic_count_t atomic_postdec(atomic_t *val) 77 81 { 78 82 atomic_dec(val); … … 80 84 } 81 85 82 static inline longatomic_preinc(atomic_t *val)86 static inline atomic_count_t atomic_preinc(atomic_t *val) 83 87 { 84 88 atomic_inc(val); … … 86 90 } 87 91 88 static inline longatomic_predec(atomic_t *val)92 static inline atomic_count_t atomic_predec(atomic_t *val) 89 93 { 90 94 atomic_dec(val); -
kernel/arch/ppc32/include/context.h
r1ccafee r0b9ac3c 27 27 */ 28 28 29 /** @addtogroup ppc32 29 /** @addtogroup ppc32 30 30 * @{ 31 31 */ … … 38 38 #include <arch/types.h> 39 39 40 #define SP_DELTA 16 40 #define SP_DELTA 16 41 42 #define context_set(ctx, pc, stack, size) \ 43 context_set_generic(ctx, pc, stack, size) 41 44 42 45 typedef struct { … … 68 71 69 72 ipl_t ipl; 70 } __attribute__ ((packed)) context_t;73 } __attribute__((packed)) context_t; 71 74 72 75 #endif -
kernel/arch/ppc32/include/memstr.h
r1ccafee r0b9ac3c 27 27 */ 28 28 29 /** @addtogroup ppc32 29 /** @addtogroup ppc32 30 30 * @{ 31 31 */ … … 38 38 #define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) 39 39 40 extern void memsetw(void *dst, size_t cnt, uint16_t x); 41 extern void memsetb(void *dst, size_t cnt, uint8_t x); 42 43 extern int memcmp(const void *a, const void *b, size_t cnt); 40 extern void memsetw(void *, size_t, uint16_t); 41 extern void memsetb(void *, size_t, uint8_t); 44 42 45 43 #endif -
kernel/arch/ppc32/include/types.h
r1ccafee r0b9ac3c 27 27 */ 28 28 29 /** @addtogroup ppc32 29 /** @addtogroup ppc32 30 30 * @{ 31 31 */ … … 55 55 typedef uint32_t unative_t; 56 56 typedef int32_t native_t; 57 typedef uint32_t atomic_count_t; 57 58 58 59 typedef struct { -
kernel/arch/ppc32/src/ppc32.c
r1ccafee r0b9ac3c 39 39 #include <genarch/kbrd/kbrd.h> 40 40 #include <arch/interrupt.h> 41 #include <interrupt.h> 41 42 #include <genarch/fb/fb.h> 42 43 #include <genarch/fb/visuals.h> … … 47 48 #include <proc/uarg.h> 48 49 #include <console/console.h> 50 #include <sysinfo/sysinfo.h> 49 51 #include <ddi/irq.h> 50 52 #include <arch/drivers/pic.h> … … 58 60 59 61 bootinfo_t bootinfo; 62 63 static cir_t pic_cir; 64 static void *pic_cir_arg; 60 65 61 66 /** Performs ppc32-specific initialization before main_bsp() is called. */ … … 186 191 if (assigned_address) { 187 192 /* Initialize PIC */ 188 cir_t cir; 189 void *cir_arg; 190 pic_init(assigned_address[0].addr, PAGE_SIZE, &cir, &cir_arg); 191 193 pic_init(assigned_address[0].addr, PAGE_SIZE, &pic_cir, 194 &pic_cir_arg); 195 192 196 #ifdef CONFIG_MAC_KBD 193 197 uintptr_t pa = assigned_address[0].addr + 0x16000; … … 201 205 /* Initialize I/O controller */ 202 206 cuda_instance_t *cuda_instance = 203 cuda_init(cuda, IRQ_CUDA, cir,cir_arg);207 cuda_init(cuda, IRQ_CUDA, pic_cir, pic_cir_arg); 204 208 if (cuda_instance) { 205 209 kbrd_instance_t *kbrd_instance = kbrd_init(); … … 211 215 } 212 216 } 217 218 /* 219 * This is the necessary evil until the userspace driver is entirely 220 * self-sufficient. 221 */ 222 sysinfo_set_item_val("cuda", NULL, true); 223 sysinfo_set_item_val("cuda.inr", NULL, IRQ_CUDA); 224 sysinfo_set_item_val("cuda.address.physical", NULL, pa); 225 sysinfo_set_item_val("cuda.address.kernel", NULL, 226 (uintptr_t) cuda); 213 227 #endif 214 228 } … … 216 230 /* Consider only a single device for now */ 217 231 return false; 232 } 233 234 void irq_initialize_arch(irq_t *irq) 235 { 236 irq->cir = pic_cir; 237 irq->cir_arg = pic_cir_arg; 238 irq->preack = true; 218 239 } 219 240 -
kernel/arch/sparc64/Makefile.inc
r1ccafee r0b9ac3c 46 46 ifeq ($(PROCESSOR),us) 47 47 DEFS += -DUS 48 DEFS += -DSUN4U 49 USARCH = sun4u 48 50 endif 49 51 50 52 ifeq ($(PROCESSOR),us3) 51 53 DEFS += -DUS3 54 DEFS += -DSUN4U 55 USARCH = sun4u 56 endif 57 58 ifeq ($(PROCESSOR),sun4v) 59 DEFS += -DSUN4V 60 USARCH = sun4v 61 #MH 62 DEFS += -DUS 52 63 endif 53 64 54 65 ARCH_SOURCES = \ 55 arch/$(KARCH)/src/cpu/ cpu.c \66 arch/$(KARCH)/src/cpu/$(USARCH)/cpu.c \ 56 67 arch/$(KARCH)/src/debug/stacktrace.c \ 57 68 arch/$(KARCH)/src/debug/stacktrace_asm.S \ 58 69 arch/$(KARCH)/src/asm.S \ 70 arch/$(KARCH)/src/$(USARCH)/asm.S \ 59 71 arch/$(KARCH)/src/panic.S \ 60 72 arch/$(KARCH)/src/console.c \ … … 62 74 arch/$(KARCH)/src/fpu_context.c \ 63 75 arch/$(KARCH)/src/dummy.s \ 64 arch/$(KARCH)/src/mm/as.c \ 65 arch/$(KARCH)/src/mm/cache.S \ 66 arch/$(KARCH)/src/mm/frame.c \ 76 arch/$(KARCH)/src/mm/$(USARCH)/as.c \ 77 arch/$(KARCH)/src/mm/$(USARCH)/frame.c \ 67 78 arch/$(KARCH)/src/mm/page.c \ 68 arch/$(KARCH)/src/mm/ tlb.c \69 arch/$(KARCH)/src/ sparc64.c \70 arch/$(KARCH)/src/ start.S \71 arch/$(KARCH)/src/proc/ scheduler.c \79 arch/$(KARCH)/src/mm/$(USARCH)/tlb.c \ 80 arch/$(KARCH)/src/$(USARCH)/sparc64.c \ 81 arch/$(KARCH)/src/$(USARCH)/start.S \ 82 arch/$(KARCH)/src/proc/$(USARCH)/scheduler.c \ 72 83 arch/$(KARCH)/src/proc/thread.c \ 73 arch/$(KARCH)/src/trap/ mmu.S \74 arch/$(KARCH)/src/trap/ trap_table.S \84 arch/$(KARCH)/src/trap/$(USARCH)/mmu.S \ 85 arch/$(KARCH)/src/trap/$(USARCH)/trap_table.S \ 75 86 arch/$(KARCH)/src/trap/trap.c \ 76 87 arch/$(KARCH)/src/trap/exception.c \ … … 81 92 arch/$(KARCH)/src/drivers/sgcn.c \ 82 93 arch/$(KARCH)/src/drivers/pci.c \ 83 arch/$(KARCH)/src/drivers/fhc.c 94 arch/$(KARCH)/src/drivers/fhc.c \ 95 arch/$(KARCH)/src/trap/$(USARCH)/interrupt.c 96 97 ifeq ($(USARCH),sun4u) 98 ARCH_SOURCES += \ 99 arch/$(KARCH)/src/mm/cache.S 100 endif 101 102 ifeq ($(USARCH),sun4v) 103 ARCH_SOURCES += \ 104 arch/$(KARCH)/src/drivers/niagara.c \ 105 arch/$(KARCH)/src/sun4v/md.c 106 endif 84 107 85 108 ifeq ($(CONFIG_FB),y) … … 90 113 ifeq ($(CONFIG_SMP),y) 91 114 ARCH_SOURCES += \ 92 arch/$(KARCH)/src/smp/ ipi.c \93 arch/$(KARCH)/src/smp/ smp.c115 arch/$(KARCH)/src/smp/$(USARCH)/smp.c \ 116 arch/$(KARCH)/src/smp/$(USARCH)/ipi.c 94 117 endif 95 118 96 119 ifeq ($(CONFIG_TSB),y) 97 120 ARCH_SOURCES += \ 98 arch/$(KARCH)/src/mm/ tsb.c121 arch/$(KARCH)/src/mm/$(USARCH)/tsb.c 99 122 endif -
kernel/arch/sparc64/include/arch.h
r1ccafee r0b9ac3c 38 38 #define KERN_sparc64_ARCH_H_ 39 39 40 #if defined (SUN4U) 41 #include <arch/sun4u/arch.h> 42 #elif defined (SUN4V) 43 #include <arch/sun4v/arch.h> 44 #endif 45 40 46 #define ASI_AIUP 0x10 /** Access to primary context with user privileges. */ 41 47 #define ASI_AIUS 0x11 /** Access to secondary context with user privileges. */ 42 #define ASI_NUCLEUS_QUAD_LDD 0x24 /** ASI for 16-byte atomic loads. */43 #define ASI_DCACHE_TAG 0x47 /** ASI D-Cache Tag. */44 #define ASI_ICBUS_CONFIG 0x4a /** ASI of the UPA_CONFIG/FIREPLANE_CONFIG register. */45 48 46 49 #define NWINDOWS 8 /** Number of register window sets. */ … … 52 55 #endif /* __ASM__ */ 53 56 57 54 58 #endif 55 59 -
kernel/arch/sparc64/include/asm.h
r1ccafee r0b9ac3c 430 430 } 431 431 432 extern void cpu_halt(void) ;432 extern void cpu_halt(void) __attribute__((noreturn)); 433 433 extern void cpu_sleep(void); 434 434 extern void asm_delay_loop(const uint32_t usec); -
kernel/arch/sparc64/include/atomic.h
r1ccafee r0b9ac3c 27 27 */ 28 28 29 /** @addtogroup sparc64 29 /** @addtogroup sparc64 30 30 * @{ 31 31 */ … … 45 45 * 46 46 * @param val Atomic variable. 47 * @param i Signed value to be added.47 * @param i Signed value to be added. 48 48 * 49 49 * @return Value of the atomic variable as it existed before addition. 50 * 50 51 */ 51 static inline long atomic_add(atomic_t *val, int i)52 static inline atomic_count_t atomic_add(atomic_t *val, atomic_count_t i) 52 53 { 53 uint64_t a, b; 54 54 atomic_count_t a; 55 atomic_count_t b; 56 55 57 do { 56 volatile uintptr_t x = (uint64_t) &val->count;57 58 a = *(( uint64_t *) x);58 volatile uintptr_t ptr = (uintptr_t) &val->count; 59 60 a = *((atomic_count_t *) ptr); 59 61 b = a + i; 60 asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *)x)), 61 "+r" (b) : "r" (a)); 62 63 asm volatile ( 64 "casx %0, %2, %1\n" 65 : "+m" (*((atomic_count_t *) ptr)), 66 "+r" (b) 67 : "r" (a) 68 ); 62 69 } while (a != b); 63 70 64 71 return a; 65 72 } 66 73 67 static inline longatomic_preinc(atomic_t *val)74 static inline atomic_count_t atomic_preinc(atomic_t *val) 68 75 { 69 76 return atomic_add(val, 1) + 1; 70 77 } 71 78 72 static inline longatomic_postinc(atomic_t *val)79 static inline atomic_count_t atomic_postinc(atomic_t *val) 73 80 { 74 81 return atomic_add(val, 1); 75 82 } 76 83 77 static inline longatomic_predec(atomic_t *val)84 static inline atomic_count_t atomic_predec(atomic_t *val) 78 85 { 79 86 return atomic_add(val, -1) - 1; 80 87 } 81 88 82 static inline longatomic_postdec(atomic_t *val)89 static inline atomic_count_t atomic_postdec(atomic_t *val) 83 90 { 84 91 return atomic_add(val, -1); … … 95 102 } 96 103 97 static inline longtest_and_set(atomic_t *val)104 static inline atomic_count_t test_and_set(atomic_t *val) 98 105 { 99 uint64_t v = 1; 100 volatile uintptr_t x = (uint64_t) &val->count; 101 102 asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *) x)), 103 "+r" (v) : "r" (0)); 104 106 atomic_count_t v = 1; 107 volatile uintptr_t ptr = (uintptr_t) &val->count; 108 109 asm volatile ( 110 "casx %0, %2, %1\n" 111 : "+m" (*((atomic_count_t *) ptr)), 112 "+r" (v) 113 : "r" (0) 114 ); 115 105 116 return v; 106 117 } … … 108 119 static inline void atomic_lock_arch(atomic_t *val) 109 120 { 110 uint64_t tmp1 = 1;111 uint64_t tmp2 = 0;112 113 volatile uintptr_t x = (uint64_t) &val->count;114 121 atomic_count_t tmp1 = 1; 122 atomic_count_t tmp2 = 0; 123 124 volatile uintptr_t ptr = (uintptr_t) &val->count; 125 115 126 preemption_disable(); 116 127 117 128 asm volatile ( 118 "0:\n" 119 "casx %0, %3, %1\n" 120 "brz %1, 2f\n" 121 "nop\n" 122 "1:\n" 123 "ldx %0, %2\n" 124 "brz %2, 0b\n" 125 "nop\n" 126 "ba %%xcc, 1b\n" 127 "nop\n" 128 "2:\n" 129 : "+m" (*((uint64_t *) x)), "+r" (tmp1), "+r" (tmp2) : "r" (0) 129 "0:\n" 130 "casx %0, %3, %1\n" 131 "brz %1, 2f\n" 132 "nop\n" 133 "1:\n" 134 "ldx %0, %2\n" 135 "brz %2, 0b\n" 136 "nop\n" 137 "ba %%xcc, 1b\n" 138 "nop\n" 139 "2:\n" 140 : "+m" (*((atomic_count_t *) ptr)), 141 "+r" (tmp1), 142 "+r" (tmp2) 143 : "r" (0) 130 144 ); 131 145 -
kernel/arch/sparc64/include/context.h
r1ccafee r0b9ac3c 42 42 #define SP_DELTA (STACK_WINDOW_SAVE_AREA_SIZE + STACK_ARG_SAVE_AREA_SIZE) 43 43 44 #ifdef context_set45 #undef context_set46 #endif47 48 44 #define context_set(c, _pc, stack, size) \ 49 45 (c)->pc = ((uintptr_t) _pc) - 8; \ -
kernel/arch/sparc64/include/cpu.h
r1ccafee r0b9ac3c 64 64 #endif 65 65 66 typedef struct {67 uint32_t mid; /**< Processor ID as read from68 UPA_CONFIG/FIREPLANE_CONFIG. */69 ver_reg_t ver;70 uint32_t clock_frequency; /**< Processor frequency in Hz. */71 uint64_t next_tick_cmpr; /**< Next clock interrupt should be72 generated when the TICK register73 matches this value. */74 } cpu_arch_t;75 66 67 #if defined (SUN4U) 68 #include <arch/sun4u/cpu.h> 69 #elif defined (SUN4V) 70 #include <arch/sun4v/cpu.h> 71 #endif 76 72 77 /**78 * Reads the module ID (agent ID/CPUID) of the current CPU.79 */80 static inline uint32_t read_mid(void)81 {82 uint64_t icbus_config = asi_u64_read(ASI_ICBUS_CONFIG, 0);83 icbus_config = icbus_config >> ICBUS_CONFIG_MID_SHIFT;84 #if defined (US)85 return icbus_config & 0x1f;86 #elif defined (US3)87 if (((ver_reg_t) ver_read()).impl == IMPL_ULTRASPARCIII_I)88 return icbus_config & 0x1f;89 else90 return icbus_config & 0x3ff;91 #endif92 }93 73 94 74 #endif -
kernel/arch/sparc64/include/drivers/tick.h
r1ccafee r0b9ac3c 36 36 #define KERN_sparc64_TICK_H_ 37 37 38 #include <arch/asm.h> 38 39 #include <arch/interrupt.h> 40 41 /* mask of the "counter" field of the Tick register */ 42 #define TICK_COUNTER_MASK (~(1l << 63)) 39 43 40 44 extern void tick_init(void); 41 45 extern void tick_interrupt(int n, istate_t *istate); 46 47 /** 48 * Reads the Tick register counter. 49 */ 50 static inline uint64_t tick_counter_read(void) 51 { 52 return TICK_COUNTER_MASK & tick_read(); 53 } 42 54 43 55 #endif -
kernel/arch/sparc64/include/memstr.h
r1ccafee r0b9ac3c 38 38 #define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) 39 39 40 extern void memsetw(void *dst, size_t cnt, uint16_t x); 41 extern void memsetb(void *dst, size_t cnt, uint8_t x); 42 43 extern int memcmp(const void *a, const void *b, size_t cnt); 40 extern void memsetw(void *, size_t, uint16_t); 41 extern void memsetb(void *, size_t, uint8_t); 44 42 45 43 #endif -
kernel/arch/sparc64/include/mm/as.h
r1ccafee r0b9ac3c 36 36 #define KERN_sparc64_AS_H_ 37 37 38 #include <arch/mm/tte.h> 39 40 #define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH 1 41 42 #define KERNEL_ADDRESS_SPACE_START_ARCH (unsigned long) 0x0000000000000000 43 #define KERNEL_ADDRESS_SPACE_END_ARCH (unsigned long) 0xffffffffffffffff 44 #define USER_ADDRESS_SPACE_START_ARCH (unsigned long) 0x0000000000000000 45 #define USER_ADDRESS_SPACE_END_ARCH (unsigned long) 0xffffffffffffffff 46 47 #define USTACK_ADDRESS_ARCH (0xffffffffffffffffULL - (PAGE_SIZE - 1)) 48 49 #ifdef CONFIG_TSB 50 51 /** TSB Tag Target register. */ 52 typedef union tsb_tag_target { 53 uint64_t value; 54 struct { 55 unsigned invalid : 1; /**< Invalidated by software. */ 56 unsigned : 2; 57 unsigned context : 13; /**< Software ASID. */ 58 unsigned : 6; 59 uint64_t va_tag : 42; /**< Virtual address bits <63:22>. */ 60 } __attribute__ ((packed)); 61 } tsb_tag_target_t; 62 63 /** TSB entry. */ 64 typedef struct tsb_entry { 65 tsb_tag_target_t tag; 66 tte_data_t data; 67 } __attribute__ ((packed)) tsb_entry_t; 68 69 typedef struct { 70 tsb_entry_t *itsb; 71 tsb_entry_t *dtsb; 72 } as_arch_t; 73 74 #else 75 76 typedef struct { 77 } as_arch_t; 78 79 #endif /* CONFIG_TSB */ 80 81 #include <genarch/mm/as_ht.h> 82 83 #ifdef CONFIG_TSB 84 #include <arch/mm/tsb.h> 85 #define as_invalidate_translation_cache(as, page, cnt) \ 86 tsb_invalidate((as), (page), (cnt)) 87 #else 88 #define as_invalidate_translation_cache(as, page, cnt) 38 #if defined (SUN4U) 39 #include <arch/mm/sun4u/as.h> 40 #elif defined (SUN4V) 41 #include <arch/mm/sun4v/as.h> 89 42 #endif 90 91 extern void as_arch_init(void);92 43 93 44 #endif -
kernel/arch/sparc64/include/mm/frame.h
r1ccafee r0b9ac3c 36 36 #define KERN_sparc64_FRAME_H_ 37 37 38 /* 39 * Page size supported by the MMU. 40 * For 8K there is the nasty illegal virtual aliasing problem. 41 * Therefore, the kernel uses 8K only internally on the TLB and TSB levels. 42 */ 43 #define MMU_FRAME_WIDTH 13 /* 8K */ 44 #define MMU_FRAME_SIZE (1 << MMU_FRAME_WIDTH) 45 46 /* 47 * Page size exported to the generic memory management subsystems. 48 * This page size is not directly supported by the MMU, but we can emulate 49 * each 16K page with a pair of adjacent 8K pages. 50 */ 51 #define FRAME_WIDTH 14 /* 16K */ 52 #define FRAME_SIZE (1 << FRAME_WIDTH) 53 54 #ifdef KERNEL 55 #ifndef __ASM__ 56 57 #include <arch/types.h> 58 59 union frame_address { 60 uintptr_t address; 61 struct { 62 #if defined (US) 63 unsigned : 23; 64 uint64_t pfn : 28; /**< Physical Frame Number. */ 65 #elif defined (US3) 66 unsigned : 21; 67 uint64_t pfn : 30; /**< Physical Frame Number. */ 68 #endif 69 unsigned offset : 13; /**< Offset. */ 70 } __attribute__ ((packed)); 71 }; 72 73 typedef union frame_address frame_address_t; 74 75 extern uintptr_t last_frame; 76 extern uintptr_t end_of_identity; 77 78 extern void frame_arch_init(void); 79 #define physmem_print() 80 81 #endif 38 #if defined (SUN4U) 39 #include <arch/mm/sun4u/frame.h> 40 #elif defined (SUN4V) 41 #include <arch/mm/sun4v/frame.h> 82 42 #endif 83 43 -
kernel/arch/sparc64/include/mm/mmu.h
r1ccafee r0b9ac3c 36 36 #define KERN_sparc64_MMU_H_ 37 37 38 #if defined(US) 39 /* LSU Control Register ASI. */ 40 #define ASI_LSU_CONTROL_REG 0x45 /**< Load/Store Unit Control Register. */ 38 #if defined (SUN4U) 39 #include <arch/mm/sun4u/mmu.h> 40 #elif defined (SUN4V) 41 #include <arch/mm/sun4v/mmu.h> 41 42 #endif 42 43 43 /* I-MMU ASIs. */44 #define ASI_IMMU 0x5045 #define ASI_IMMU_TSB_8KB_PTR_REG 0x5146 #define ASI_IMMU_TSB_64KB_PTR_REG 0x5247 #define ASI_ITLB_DATA_IN_REG 0x5448 #define ASI_ITLB_DATA_ACCESS_REG 0x5549 #define ASI_ITLB_TAG_READ_REG 0x5650 #define ASI_IMMU_DEMAP 0x5751 52 /* Virtual Addresses within ASI_IMMU. */53 #define VA_IMMU_TSB_TAG_TARGET 0x0 /**< IMMU TSB tag target register. */54 #define VA_IMMU_SFSR 0x18 /**< IMMU sync fault status register. */55 #define VA_IMMU_TSB_BASE 0x28 /**< IMMU TSB base register. */56 #define VA_IMMU_TAG_ACCESS 0x30 /**< IMMU TLB tag access register. */57 #if defined (US3)58 #define VA_IMMU_PRIMARY_EXTENSION 0x48 /**< IMMU TSB primary extension register */59 #define VA_IMMU_NUCLEUS_EXTENSION 0x58 /**< IMMU TSB nucleus extension register */60 #endif61 62 63 /* D-MMU ASIs. */64 #define ASI_DMMU 0x5865 #define ASI_DMMU_TSB_8KB_PTR_REG 0x5966 #define ASI_DMMU_TSB_64KB_PTR_REG 0x5a67 #define ASI_DMMU_TSB_DIRECT_PTR_REG 0x5b68 #define ASI_DTLB_DATA_IN_REG 0x5c69 #define ASI_DTLB_DATA_ACCESS_REG 0x5d70 #define ASI_DTLB_TAG_READ_REG 0x5e71 #define ASI_DMMU_DEMAP 0x5f72 73 /* Virtual Addresses within ASI_DMMU. */74 #define VA_DMMU_TSB_TAG_TARGET 0x0 /**< DMMU TSB tag target register. */75 #define VA_PRIMARY_CONTEXT_REG 0x8 /**< DMMU primary context register. */76 #define VA_SECONDARY_CONTEXT_REG 0x10 /**< DMMU secondary context register. */77 #define VA_DMMU_SFSR 0x18 /**< DMMU sync fault status register. */78 #define VA_DMMU_SFAR 0x20 /**< DMMU sync fault address register. */79 #define VA_DMMU_TSB_BASE 0x28 /**< DMMU TSB base register. */80 #define VA_DMMU_TAG_ACCESS 0x30 /**< DMMU TLB tag access register. */81 #define VA_DMMU_VA_WATCHPOINT_REG 0x38 /**< DMMU VA data watchpoint register. */82 #define VA_DMMU_PA_WATCHPOINT_REG 0x40 /**< DMMU PA data watchpoint register. */83 #if defined (US3)84 #define VA_DMMU_PRIMARY_EXTENSION 0x48 /**< DMMU TSB primary extension register */85 #define VA_DMMU_SECONDARY_EXTENSION 0x50 /**< DMMU TSB secondary extension register */86 #define VA_DMMU_NUCLEUS_EXTENSION 0x58 /**< DMMU TSB nucleus extension register */87 #endif88 89 #ifndef __ASM__90 91 #include <arch/asm.h>92 #include <arch/barrier.h>93 #include <arch/types.h>94 95 #if defined(US)96 /** LSU Control Register. */97 typedef union {98 uint64_t value;99 struct {100 unsigned : 23;101 unsigned pm : 8;102 unsigned vm : 8;103 unsigned pr : 1;104 unsigned pw : 1;105 unsigned vr : 1;106 unsigned vw : 1;107 unsigned : 1;108 unsigned fm : 16;109 unsigned dm : 1; /**< D-MMU enable. */110 unsigned im : 1; /**< I-MMU enable. */111 unsigned dc : 1; /**< D-Cache enable. */112 unsigned ic : 1; /**< I-Cache enable. */113 114 } __attribute__ ((packed));115 } lsu_cr_reg_t;116 #endif /* US */117 118 #endif /* !def __ASM__ */119 44 120 45 #endif -
kernel/arch/sparc64/include/mm/tlb.h
r1ccafee r0b9ac3c 36 36 #define KERN_sparc64_TLB_H_ 37 37 38 #if defined (US) 39 #define ITLB_ENTRY_COUNT 64 40 #define DTLB_ENTRY_COUNT 64 41 #define DTLB_MAX_LOCKED_ENTRIES DTLB_ENTRY_COUNT 38 39 #if defined (SUN4U) 40 #include <arch/mm/sun4u/tlb.h> 41 #elif defined (SUN4V) 42 #include <arch/mm/sun4v/tlb.h> 42 43 #endif 43 44 /** TLB_DSMALL is the only of the three DMMUs that can hold locked entries. */45 #if defined (US3)46 #define DTLB_MAX_LOCKED_ENTRIES 1647 #endif48 49 #define MEM_CONTEXT_KERNEL 050 #define MEM_CONTEXT_TEMP 151 52 /** Page sizes. */53 #define PAGESIZE_8K 054 #define PAGESIZE_64K 155 #define PAGESIZE_512K 256 #define PAGESIZE_4M 357 58 /** Bit width of the TLB-locked portion of kernel address space. */59 #define KERNEL_PAGE_WIDTH 22 /* 4M */60 61 /* TLB Demap Operation types. */62 #define TLB_DEMAP_PAGE 063 #define TLB_DEMAP_CONTEXT 164 #if defined (US3)65 #define TLB_DEMAP_ALL 266 #endif67 68 #define TLB_DEMAP_TYPE_SHIFT 669 70 /* TLB Demap Operation Context register encodings. */71 #define TLB_DEMAP_PRIMARY 072 #define TLB_DEMAP_SECONDARY 173 #define TLB_DEMAP_NUCLEUS 274 75 /* There are more TLBs in one MMU in US3, their codes are defined here. */76 #if defined (US3)77 /* D-MMU: one small (16-entry) TLB and two big (512-entry) TLBs */78 #define TLB_DSMALL 079 #define TLB_DBIG_0 280 #define TLB_DBIG_1 381 82 /* I-MMU: one small (16-entry) TLB and one big TLB */83 #define TLB_ISMALL 084 #define TLB_IBIG 285 #endif86 87 #define TLB_DEMAP_CONTEXT_SHIFT 488 89 /* TLB Tag Access shifts */90 #define TLB_TAG_ACCESS_CONTEXT_SHIFT 091 #define TLB_TAG_ACCESS_CONTEXT_MASK ((1 << 13) - 1)92 #define TLB_TAG_ACCESS_VPN_SHIFT 1393 94 #ifndef __ASM__95 96 #include <arch/mm/tte.h>97 #include <arch/mm/mmu.h>98 #include <arch/mm/page.h>99 #include <arch/asm.h>100 #include <arch/barrier.h>101 #include <arch/types.h>102 #include <arch/register.h>103 #include <arch/cpu.h>104 105 union tlb_context_reg {106 uint64_t v;107 struct {108 unsigned long : 51;109 unsigned context : 13; /**< Context/ASID. */110 } __attribute__ ((packed));111 };112 typedef union tlb_context_reg tlb_context_reg_t;113 114 /** I-/D-TLB Data In/Access Register type. */115 typedef tte_data_t tlb_data_t;116 117 /** I-/D-TLB Data Access Address in Alternate Space. */118 119 #if defined (US)120 121 union tlb_data_access_addr {122 uint64_t value;123 struct {124 uint64_t : 55;125 unsigned tlb_entry : 6;126 unsigned : 3;127 } __attribute__ ((packed));128 };129 typedef union tlb_data_access_addr dtlb_data_access_addr_t;130 typedef union tlb_data_access_addr dtlb_tag_read_addr_t;131 typedef union tlb_data_access_addr itlb_data_access_addr_t;132 typedef union tlb_data_access_addr itlb_tag_read_addr_t;133 134 #elif defined (US3)135 136 /*137 * In US3, I-MMU and D-MMU have different formats of the data138 * access register virtual address. In the corresponding139 * structures the member variable for the entry number is140 * called "local_tlb_entry" - it contrasts with the "tlb_entry"141 * for the US data access register VA structure. The rationale142 * behind this is to prevent careless mistakes in the code143 * caused by setting only the entry number and not the TLB144 * number in the US3 code (when taking the code from US).145 */146 147 union dtlb_data_access_addr {148 uint64_t value;149 struct {150 uint64_t : 45;151 unsigned : 1;152 unsigned tlb_number : 2;153 unsigned : 4;154 unsigned local_tlb_entry : 9;155 unsigned : 3;156 } __attribute__ ((packed));157 };158 typedef union dtlb_data_access_addr dtlb_data_access_addr_t;159 typedef union dtlb_data_access_addr dtlb_tag_read_addr_t;160 161 union itlb_data_access_addr {162 uint64_t value;163 struct {164 uint64_t : 45;165 unsigned : 1;166 unsigned tlb_number : 2;167 unsigned : 6;168 unsigned local_tlb_entry : 7;169 unsigned : 3;170 } __attribute__ ((packed));171 };172 typedef union itlb_data_access_addr itlb_data_access_addr_t;173 typedef union itlb_data_access_addr itlb_tag_read_addr_t;174 175 #endif176 177 /** I-/D-TLB Tag Read Register. */178 union tlb_tag_read_reg {179 uint64_t value;180 struct {181 uint64_t vpn : 51; /**< Virtual Address bits 63:13. */182 unsigned context : 13; /**< Context identifier. */183 } __attribute__ ((packed));184 };185 typedef union tlb_tag_read_reg tlb_tag_read_reg_t;186 typedef union tlb_tag_read_reg tlb_tag_access_reg_t;187 188 189 /** TLB Demap Operation Address. */190 union tlb_demap_addr {191 uint64_t value;192 struct {193 uint64_t vpn: 51; /**< Virtual Address bits 63:13. */194 #if defined (US)195 unsigned : 6; /**< Ignored. */196 unsigned type : 1; /**< The type of demap operation. */197 #elif defined (US3)198 unsigned : 5; /**< Ignored. */199 unsigned type: 2; /**< The type of demap operation. */200 #endif201 unsigned context : 2; /**< Context register selection. */202 unsigned : 4; /**< Zero. */203 } __attribute__ ((packed));204 };205 typedef union tlb_demap_addr tlb_demap_addr_t;206 207 /** TLB Synchronous Fault Status Register. */208 union tlb_sfsr_reg {209 uint64_t value;210 struct {211 #if defined (US)212 unsigned long : 40; /**< Implementation dependent. */213 unsigned asi : 8; /**< ASI. */214 unsigned : 2;215 unsigned ft : 7; /**< Fault type. */216 #elif defined (US3)217 unsigned long : 39; /**< Implementation dependent. */218 unsigned nf : 1; /**< Non-faulting load. */219 unsigned asi : 8; /**< ASI. */220 unsigned tm : 1; /**< I-TLB miss. */221 unsigned : 3; /**< Reserved. */222 unsigned ft : 5; /**< Fault type. */223 #endif224 unsigned e : 1; /**< Side-effect bit. */225 unsigned ct : 2; /**< Context Register selection. */226 unsigned pr : 1; /**< Privilege bit. */227 unsigned w : 1; /**< Write bit. */228 unsigned ow : 1; /**< Overwrite bit. */229 unsigned fv : 1; /**< Fault Valid bit. */230 } __attribute__ ((packed));231 };232 typedef union tlb_sfsr_reg tlb_sfsr_reg_t;233 234 #if defined (US3)235 236 /*237 * Functions for determining the number of entries in TLBs. They either return238 * a constant value or a value based on the CPU autodetection.239 */240 241 /**242 * Determine the number of entries in the DMMU's small TLB.243 */244 static inline uint16_t tlb_dsmall_size(void)245 {246 return 16;247 }248 249 /**250 * Determine the number of entries in each DMMU's big TLB.251 */252 static inline uint16_t tlb_dbig_size(void)253 {254 return 512;255 }256 257 /**258 * Determine the number of entries in the IMMU's small TLB.259 */260 static inline uint16_t tlb_ismall_size(void)261 {262 return 16;263 }264 265 /**266 * Determine the number of entries in the IMMU's big TLB.267 */268 static inline uint16_t tlb_ibig_size(void)269 {270 if (((ver_reg_t) ver_read()).impl == IMPL_ULTRASPARCIV_PLUS)271 return 512;272 else273 return 128;274 }275 276 #endif277 278 /** Read MMU Primary Context Register.279 *280 * @return Current value of Primary Context Register.281 */282 static inline uint64_t mmu_primary_context_read(void)283 {284 return asi_u64_read(ASI_DMMU, VA_PRIMARY_CONTEXT_REG);285 }286 287 /** Write MMU Primary Context Register.288 *289 * @param v New value of Primary Context Register.290 */291 static inline void mmu_primary_context_write(uint64_t v)292 {293 asi_u64_write(ASI_DMMU, VA_PRIMARY_CONTEXT_REG, v);294 flush_pipeline();295 }296 297 /** Read MMU Secondary Context Register.298 *299 * @return Current value of Secondary Context Register.300 */301 static inline uint64_t mmu_secondary_context_read(void)302 {303 return asi_u64_read(ASI_DMMU, VA_SECONDARY_CONTEXT_REG);304 }305 306 /** Write MMU Primary Context Register.307 *308 * @param v New value of Primary Context Register.309 */310 static inline void mmu_secondary_context_write(uint64_t v)311 {312 asi_u64_write(ASI_DMMU, VA_SECONDARY_CONTEXT_REG, v);313 flush_pipeline();314 }315 316 #if defined (US)317 318 /** Read IMMU TLB Data Access Register.319 *320 * @param entry TLB Entry index.321 *322 * @return Current value of specified IMMU TLB Data Access323 * Register.324 */325 static inline uint64_t itlb_data_access_read(size_t entry)326 {327 itlb_data_access_addr_t reg;328 329 reg.value = 0;330 reg.tlb_entry = entry;331 return asi_u64_read(ASI_ITLB_DATA_ACCESS_REG, reg.value);332 }333 334 /** Write IMMU TLB Data Access Register.335 *336 * @param entry TLB Entry index.337 * @param value Value to be written.338 */339 static inline void itlb_data_access_write(size_t entry, uint64_t value)340 {341 itlb_data_access_addr_t reg;342 343 reg.value = 0;344 reg.tlb_entry = entry;345 asi_u64_write(ASI_ITLB_DATA_ACCESS_REG, reg.value, value);346 flush_pipeline();347 }348 349 /** Read DMMU TLB Data Access Register.350 *351 * @param entry TLB Entry index.352 *353 * @return Current value of specified DMMU TLB Data Access354 * Register.355 */356 static inline uint64_t dtlb_data_access_read(size_t entry)357 {358 dtlb_data_access_addr_t reg;359 360 reg.value = 0;361 reg.tlb_entry = entry;362 return asi_u64_read(ASI_DTLB_DATA_ACCESS_REG, reg.value);363 }364 365 /** Write DMMU TLB Data Access Register.366 *367 * @param entry TLB Entry index.368 * @param value Value to be written.369 */370 static inline void dtlb_data_access_write(size_t entry, uint64_t value)371 {372 dtlb_data_access_addr_t reg;373 374 reg.value = 0;375 reg.tlb_entry = entry;376 asi_u64_write(ASI_DTLB_DATA_ACCESS_REG, reg.value, value);377 membar();378 }379 380 /** Read IMMU TLB Tag Read Register.381 *382 * @param entry TLB Entry index.383 *384 * @return Current value of specified IMMU TLB Tag Read Register.385 */386 static inline uint64_t itlb_tag_read_read(size_t entry)387 {388 itlb_tag_read_addr_t tag;389 390 tag.value = 0;391 tag.tlb_entry = entry;392 return asi_u64_read(ASI_ITLB_TAG_READ_REG, tag.value);393 }394 395 /** Read DMMU TLB Tag Read Register.396 *397 * @param entry TLB Entry index.398 *399 * @return Current value of specified DMMU TLB Tag Read Register.400 */401 static inline uint64_t dtlb_tag_read_read(size_t entry)402 {403 dtlb_tag_read_addr_t tag;404 405 tag.value = 0;406 tag.tlb_entry = entry;407 return asi_u64_read(ASI_DTLB_TAG_READ_REG, tag.value);408 }409 410 #elif defined (US3)411 412 413 /** Read IMMU TLB Data Access Register.414 *415 * @param tlb TLB number (one of TLB_ISMALL or TLB_IBIG)416 * @param entry TLB Entry index.417 *418 * @return Current value of specified IMMU TLB Data Access419 * Register.420 */421 static inline uint64_t itlb_data_access_read(int tlb, size_t entry)422 {423 itlb_data_access_addr_t reg;424 425 reg.value = 0;426 reg.tlb_number = tlb;427 reg.local_tlb_entry = entry;428 return asi_u64_read(ASI_ITLB_DATA_ACCESS_REG, reg.value);429 }430 431 /** Write IMMU TLB Data Access Register.432 * @param tlb TLB number (one of TLB_ISMALL or TLB_IBIG)433 * @param entry TLB Entry index.434 * @param value Value to be written.435 */436 static inline void itlb_data_access_write(int tlb, size_t entry,437 uint64_t value)438 {439 itlb_data_access_addr_t reg;440 441 reg.value = 0;442 reg.tlb_number = tlb;443 reg.local_tlb_entry = entry;444 asi_u64_write(ASI_ITLB_DATA_ACCESS_REG, reg.value, value);445 flush_pipeline();446 }447 448 /** Read DMMU TLB Data Access Register.449 *450 * @param tlb TLB number (one of TLB_DSMALL, TLB_DBIG, TLB_DBIG)451 * @param entry TLB Entry index.452 *453 * @return Current value of specified DMMU TLB Data Access454 * Register.455 */456 static inline uint64_t dtlb_data_access_read(int tlb, size_t entry)457 {458 dtlb_data_access_addr_t reg;459 460 reg.value = 0;461 reg.tlb_number = tlb;462 reg.local_tlb_entry = entry;463 return asi_u64_read(ASI_DTLB_DATA_ACCESS_REG, reg.value);464 }465 466 /** Write DMMU TLB Data Access Register.467 *468 * @param tlb TLB number (one of TLB_DSMALL, TLB_DBIG_0, TLB_DBIG_1)469 * @param entry TLB Entry index.470 * @param value Value to be written.471 */472 static inline void dtlb_data_access_write(int tlb, size_t entry,473 uint64_t value)474 {475 dtlb_data_access_addr_t reg;476 477 reg.value = 0;478 reg.tlb_number = tlb;479 reg.local_tlb_entry = entry;480 asi_u64_write(ASI_DTLB_DATA_ACCESS_REG, reg.value, value);481 membar();482 }483 484 /** Read IMMU TLB Tag Read Register.485 *486 * @param tlb TLB number (one of TLB_ISMALL or TLB_IBIG)487 * @param entry TLB Entry index.488 *489 * @return Current value of specified IMMU TLB Tag Read Register.490 */491 static inline uint64_t itlb_tag_read_read(int tlb, size_t entry)492 {493 itlb_tag_read_addr_t tag;494 495 tag.value = 0;496 tag.tlb_number = tlb;497 tag.local_tlb_entry = entry;498 return asi_u64_read(ASI_ITLB_TAG_READ_REG, tag.value);499 }500 501 /** Read DMMU TLB Tag Read Register.502 *503 * @param tlb TLB number (one of TLB_DSMALL, TLB_DBIG_0, TLB_DBIG_1)504 * @param entry TLB Entry index.505 *506 * @return Current value of specified DMMU TLB Tag Read Register.507 */508 static inline uint64_t dtlb_tag_read_read(int tlb, size_t entry)509 {510 dtlb_tag_read_addr_t tag;511 512 tag.value = 0;513 tag.tlb_number = tlb;514 tag.local_tlb_entry = entry;515 return asi_u64_read(ASI_DTLB_TAG_READ_REG, tag.value);516 }517 518 #endif519 520 521 /** Write IMMU TLB Tag Access Register.522 *523 * @param v Value to be written.524 */525 static inline void itlb_tag_access_write(uint64_t v)526 {527 asi_u64_write(ASI_IMMU, VA_IMMU_TAG_ACCESS, v);528 flush_pipeline();529 }530 531 /** Read IMMU TLB Tag Access Register.532 *533 * @return Current value of IMMU TLB Tag Access Register.534 */535 static inline uint64_t itlb_tag_access_read(void)536 {537 return asi_u64_read(ASI_IMMU, VA_IMMU_TAG_ACCESS);538 }539 540 /** Write DMMU TLB Tag Access Register.541 *542 * @param v Value to be written.543 */544 static inline void dtlb_tag_access_write(uint64_t v)545 {546 asi_u64_write(ASI_DMMU, VA_DMMU_TAG_ACCESS, v);547 membar();548 }549 550 /** Read DMMU TLB Tag Access Register.551 *552 * @return Current value of DMMU TLB Tag Access Register.553 */554 static inline uint64_t dtlb_tag_access_read(void)555 {556 return asi_u64_read(ASI_DMMU, VA_DMMU_TAG_ACCESS);557 }558 559 560 /** Write IMMU TLB Data in Register.561 *562 * @param v Value to be written.563 */564 static inline void itlb_data_in_write(uint64_t v)565 {566 asi_u64_write(ASI_ITLB_DATA_IN_REG, 0, v);567 flush_pipeline();568 }569 570 /** Write DMMU TLB Data in Register.571 *572 * @param v Value to be written.573 */574 static inline void dtlb_data_in_write(uint64_t v)575 {576 asi_u64_write(ASI_DTLB_DATA_IN_REG, 0, v);577 membar();578 }579 580 /** Read ITLB Synchronous Fault Status Register.581 *582 * @return Current content of I-SFSR register.583 */584 static inline uint64_t itlb_sfsr_read(void)585 {586 return asi_u64_read(ASI_IMMU, VA_IMMU_SFSR);587 }588 589 /** Write ITLB Synchronous Fault Status Register.590 *591 * @param v New value of I-SFSR register.592 */593 static inline void itlb_sfsr_write(uint64_t v)594 {595 asi_u64_write(ASI_IMMU, VA_IMMU_SFSR, v);596 flush_pipeline();597 }598 599 /** Read DTLB Synchronous Fault Status Register.600 *601 * @return Current content of D-SFSR register.602 */603 static inline uint64_t dtlb_sfsr_read(void)604 {605 return asi_u64_read(ASI_DMMU, VA_DMMU_SFSR);606 }607 608 /** Write DTLB Synchronous Fault Status Register.609 *610 * @param v New value of D-SFSR register.611 */612 static inline void dtlb_sfsr_write(uint64_t v)613 {614 asi_u64_write(ASI_DMMU, VA_DMMU_SFSR, v);615 membar();616 }617 618 /** Read DTLB Synchronous Fault Address Register.619 *620 * @return Current content of D-SFAR register.621 */622 static inline uint64_t dtlb_sfar_read(void)623 {624 return asi_u64_read(ASI_DMMU, VA_DMMU_SFAR);625 }626 627 /** Perform IMMU TLB Demap Operation.628 *629 * @param type Selects between context and page demap (and entire MMU630 * demap on US3).631 * @param context_encoding Specifies which Context register has Context ID for632 * demap.633 * @param page Address which is on the page to be demapped.634 */635 static inline void itlb_demap(int type, int context_encoding, uintptr_t page)636 {637 tlb_demap_addr_t da;638 page_address_t pg;639 640 da.value = 0;641 pg.address = page;642 643 da.type = type;644 da.context = context_encoding;645 da.vpn = pg.vpn;646 647 /* da.value is the address within the ASI */648 asi_u64_write(ASI_IMMU_DEMAP, da.value, 0);649 650 flush_pipeline();651 }652 653 /** Perform DMMU TLB Demap Operation.654 *655 * @param type Selects between context and page demap (and entire MMU656 * demap on US3).657 * @param context_encoding Specifies which Context register has Context ID for658 * demap.659 * @param page Address which is on the page to be demapped.660 */661 static inline void dtlb_demap(int type, int context_encoding, uintptr_t page)662 {663 tlb_demap_addr_t da;664 page_address_t pg;665 666 da.value = 0;667 pg.address = page;668 669 da.type = type;670 da.context = context_encoding;671 da.vpn = pg.vpn;672 673 /* da.value is the address within the ASI */674 asi_u64_write(ASI_DMMU_DEMAP, da.value, 0);675 676 membar();677 }678 679 extern void fast_instruction_access_mmu_miss(unative_t, istate_t *);680 extern void fast_data_access_mmu_miss(tlb_tag_access_reg_t, istate_t *);681 extern void fast_data_access_protection(tlb_tag_access_reg_t , istate_t *);682 683 extern void dtlb_insert_mapping(uintptr_t, uintptr_t, int, bool, bool);684 685 extern void dump_sfsr_and_sfar(void);686 687 #endif /* !def __ASM__ */688 44 689 45 #endif -
kernel/arch/sparc64/include/mm/tsb.h
r1ccafee r0b9ac3c 36 36 #define KERN_sparc64_TSB_H_ 37 37 38 /* 39 * ITSB abd DTSB will claim 64K of memory, which 40 * is a nice number considered that it is one of 41 * the page sizes supported by hardware, which, 42 * again, is nice because TSBs need to be locked 43 * in TLBs - only one TLB entry will do. 44 */ 45 #define TSB_SIZE 2 /* when changing this, change 46 * as.c as well */ 47 #define ITSB_ENTRY_COUNT (512 * (1 << TSB_SIZE)) 48 #define DTSB_ENTRY_COUNT (512 * (1 << TSB_SIZE)) 49 50 #define TSB_TAG_TARGET_CONTEXT_SHIFT 48 51 52 #ifndef __ASM__ 53 54 #include <arch/mm/tte.h> 55 #include <arch/mm/mmu.h> 56 #include <arch/types.h> 57 58 /** TSB Base register. */ 59 typedef union tsb_base_reg { 60 uint64_t value; 61 struct { 62 uint64_t base : 51; /**< TSB base address, bits 63:13. */ 63 unsigned split : 1; /**< Split vs. common TSB for 8K and 64K 64 * pages. HelenOS uses only 8K pages 65 * for user mappings, so we always set 66 * this to 0. 67 */ 68 unsigned : 9; 69 unsigned size : 3; /**< TSB size. Number of entries is 70 * 512 * 2^size. */ 71 } __attribute__ ((packed)); 72 } tsb_base_reg_t; 73 74 /** Read ITSB Base register. 75 * 76 * @return Content of the ITSB Base register. 77 */ 78 static inline uint64_t itsb_base_read(void) 79 { 80 return asi_u64_read(ASI_IMMU, VA_IMMU_TSB_BASE); 81 } 82 83 /** Read DTSB Base register. 84 * 85 * @return Content of the DTSB Base register. 86 */ 87 static inline uint64_t dtsb_base_read(void) 88 { 89 return asi_u64_read(ASI_DMMU, VA_DMMU_TSB_BASE); 90 } 91 92 /** Write ITSB Base register. 93 * 94 * @param v New content of the ITSB Base register. 95 */ 96 static inline void itsb_base_write(uint64_t v) 97 { 98 asi_u64_write(ASI_IMMU, VA_IMMU_TSB_BASE, v); 99 } 100 101 /** Write DTSB Base register. 102 * 103 * @param v New content of the DTSB Base register. 104 */ 105 static inline void dtsb_base_write(uint64_t v) 106 { 107 asi_u64_write(ASI_DMMU, VA_DMMU_TSB_BASE, v); 108 } 109 110 #if defined (US3) 111 112 /** Write DTSB Primary Extension register. 113 * 114 * @param v New content of the DTSB Primary Extension register. 115 */ 116 static inline void dtsb_primary_extension_write(uint64_t v) 117 { 118 asi_u64_write(ASI_DMMU, VA_DMMU_PRIMARY_EXTENSION, v); 119 } 120 121 /** Write DTSB Secondary Extension register. 122 * 123 * @param v New content of the DTSB Secondary Extension register. 124 */ 125 static inline void dtsb_secondary_extension_write(uint64_t v) 126 { 127 asi_u64_write(ASI_DMMU, VA_DMMU_SECONDARY_EXTENSION, v); 128 } 129 130 /** Write DTSB Nucleus Extension register. 131 * 132 * @param v New content of the DTSB Nucleus Extension register. 133 */ 134 static inline void dtsb_nucleus_extension_write(uint64_t v) 135 { 136 asi_u64_write(ASI_DMMU, VA_DMMU_NUCLEUS_EXTENSION, v); 137 } 138 139 /** Write ITSB Primary Extension register. 140 * 141 * @param v New content of the ITSB Primary Extension register. 142 */ 143 static inline void itsb_primary_extension_write(uint64_t v) 144 { 145 asi_u64_write(ASI_IMMU, VA_IMMU_PRIMARY_EXTENSION, v); 146 } 147 148 /** Write ITSB Nucleus Extension register. 149 * 150 * @param v New content of the ITSB Nucleus Extension register. 151 */ 152 static inline void itsb_nucleus_extension_write(uint64_t v) 153 { 154 asi_u64_write(ASI_IMMU, VA_IMMU_NUCLEUS_EXTENSION, v); 155 } 156 38 #if defined (SUN4U) 39 #include <arch/mm/sun4u/tsb.h> 40 #elif defined (SUN4V) 41 #include <arch/mm/sun4v/tsb.h> 157 42 #endif 158 159 /* Forward declarations. */160 struct as;161 struct pte;162 163 extern void tsb_invalidate(struct as *as, uintptr_t page, size_t pages);164 extern void itsb_pte_copy(struct pte *t, size_t index);165 extern void dtsb_pte_copy(struct pte *t, size_t index, bool ro);166 167 #endif /* !def __ASM__ */168 43 169 44 #endif -
kernel/arch/sparc64/include/mm/tte.h
r1ccafee r0b9ac3c 36 36 #define KERN_sparc64_TTE_H_ 37 37 38 #define TTE_G (1 << 0) 39 #define TTE_W (1 << 1) 40 #define TTE_P (1 << 2) 41 #define TTE_E (1 << 3) 42 #define TTE_CV (1 << 4) 43 #define TTE_CP (1 << 5) 44 #define TTE_L (1 << 6) 45 46 #define TTE_V_SHIFT 63 47 #define TTE_SIZE_SHIFT 61 48 49 #ifndef __ASM__ 50 51 #include <arch/types.h> 52 53 /* TTE tag's VA_tag field contains bits <63:VA_TAG_PAGE_SHIFT> of the VA */ 54 #define VA_TAG_PAGE_SHIFT 22 55 56 /** Translation Table Entry - Tag. */ 57 union tte_tag { 58 uint64_t value; 59 struct { 60 unsigned g : 1; /**< Global. */ 61 unsigned : 2; /**< Reserved. */ 62 unsigned context : 13; /**< Context identifier. */ 63 unsigned : 6; /**< Reserved. */ 64 uint64_t va_tag : 42; /**< Virtual Address Tag, bits 63:22. */ 65 } __attribute__ ((packed)); 66 }; 67 68 typedef union tte_tag tte_tag_t; 69 70 /** Translation Table Entry - Data. */ 71 union tte_data { 72 uint64_t value; 73 struct { 74 unsigned v : 1; /**< Valid. */ 75 unsigned size : 2; /**< Page size of this entry. */ 76 unsigned nfo : 1; /**< No-Fault-Only. */ 77 unsigned ie : 1; /**< Invert Endianness. */ 78 unsigned soft2 : 9; /**< Software defined field. */ 79 #if defined (US) 80 unsigned diag : 9; /**< Diagnostic data. */ 81 unsigned pfn : 28; /**< Physical Address bits, bits 40:13. */ 82 #elif defined (US3) 83 unsigned : 7; /**< Reserved. */ 84 unsigned pfn : 30; /**< Physical Address bits, bits 42:13 */ 38 #if defined (SUN4U) 39 #include <arch/mm/sun4u/tte.h> 40 #elif defined (SUN4V) 41 #include <arch/mm/sun4v/tte.h> 85 42 #endif 86 unsigned soft : 6; /**< Software defined field. */87 unsigned l : 1; /**< Lock. */88 unsigned cp : 1; /**< Cacheable in physically indexed cache. */89 unsigned cv : 1; /**< Cacheable in virtually indexed cache. */90 unsigned e : 1; /**< Side-effect. */91 unsigned p : 1; /**< Privileged. */92 unsigned w : 1; /**< Writable. */93 unsigned g : 1; /**< Global. */94 } __attribute__ ((packed));95 };96 97 typedef union tte_data tte_data_t;98 99 #endif /* !def __ASM__ */100 43 101 44 #endif -
kernel/arch/sparc64/include/trap/exception.h
r1ccafee r0b9ac3c 38 38 39 39 #define TT_INSTRUCTION_ACCESS_EXCEPTION 0x08 40 #define TT_INSTRUCTION_ACCESS_MMU_MISS 0x09 40 41 #define TT_INSTRUCTION_ACCESS_ERROR 0x0a 42 #define TT_IAE_UNAUTH_ACCESS 0x0b 43 #define TT_IAE_NFO_PAGE 0x0c 41 44 #define TT_ILLEGAL_INSTRUCTION 0x10 42 45 #define TT_PRIVILEGED_OPCODE 0x11 43 46 #define TT_UNIMPLEMENTED_LDD 0x12 44 47 #define TT_UNIMPLEMENTED_STD 0x13 48 #define TT_DAE_INVALID_ASI 0x14 49 #define TT_DAE_PRIVILEGE_VIOLATION 0x15 50 #define TT_DAE_NC_PAGE 0x16 51 #define TT_DAE_NFO_PAGE 0x17 45 52 #define TT_FP_DISABLED 0x20 46 53 #define TT_FP_EXCEPTION_IEEE_754 0x21 … … 49 56 #define TT_DIVISION_BY_ZERO 0x28 50 57 #define TT_DATA_ACCESS_EXCEPTION 0x30 58 #define TT_DATA_ACCESS_MMU_MISS 0x31 51 59 #define TT_DATA_ACCESS_ERROR 0x32 52 60 #define TT_MEM_ADDRESS_NOT_ALIGNED 0x34 -
kernel/arch/sparc64/include/trap/interrupt.h
r1ccafee r0b9ac3c 32 32 /** 33 33 * @file 34 * @brief This file contains interrupt vector trap handler. 34 * @brief This file contains level N interrupt and inter-processor interrupt 35 * trap handler. 35 36 */ 36 37 #ifndef KERN_sparc64_TRAP_INTERRUPT_H_ 38 #define KERN_sparc64_TRAP_INTERRUPT_H_ 39 40 #include <arch/trap/trap_table.h> 41 #include <arch/stack.h> 42 43 /* IMAP register bits */ 44 #define IGN_MASK 0x7c0 45 #define INO_MASK 0x1f 46 #define IMAP_V_MASK (1ULL << 31) 47 48 #define IGN_SHIFT 6 49 50 51 /* Interrupt ASI registers. */ 52 #define ASI_INTR_W 0x77 53 #define ASI_INTR_DISPATCH_STATUS 0x48 54 #define ASI_INTR_R 0x7f 55 #define ASI_INTR_RECEIVE 0x49 56 57 /* VA's used with ASI_INTR_W register. */ 58 #if defined (US) 59 #define ASI_UDB_INTR_W_DATA_0 0x40 60 #define ASI_UDB_INTR_W_DATA_1 0x50 61 #define ASI_UDB_INTR_W_DATA_2 0x60 62 #elif defined (US3) 63 #define VA_INTR_W_DATA_0 0x40 64 #define VA_INTR_W_DATA_1 0x48 65 #define VA_INTR_W_DATA_2 0x50 66 #define VA_INTR_W_DATA_3 0x58 67 #define VA_INTR_W_DATA_4 0x60 68 #define VA_INTR_W_DATA_5 0x68 69 #define VA_INTR_W_DATA_6 0x80 70 #define VA_INTR_W_DATA_7 0x88 71 #endif 72 #define VA_INTR_W_DISPATCH 0x70 73 74 /* VA's used with ASI_INTR_R register. */ 75 #if defined(US) 76 #define ASI_UDB_INTR_R_DATA_0 0x40 77 #define ASI_UDB_INTR_R_DATA_1 0x50 78 #define ASI_UDB_INTR_R_DATA_2 0x60 79 #elif defined (US3) 80 #define VA_INTR_R_DATA_0 0x40 81 #define VA_INTR_R_DATA_1 0x48 82 #define VA_INTR_R_DATA_2 0x50 83 #define VA_INTR_R_DATA_3 0x58 84 #define VA_INTR_R_DATA_4 0x60 85 #define VA_INTR_R_DATA_5 0x68 86 #define VA_INTR_R_DATA_6 0x80 87 #define VA_INTR_R_DATA_7 0x88 88 #endif 89 90 /* Shifts in the Interrupt Vector Dispatch virtual address. */ 91 #define INTR_VEC_DISPATCH_MID_SHIFT 14 92 93 /* Bits in the Interrupt Dispatch Status register. */ 94 #define INTR_DISPATCH_STATUS_NACK 0x2 95 #define INTR_DISPATCH_STATUS_BUSY 0x1 37 #ifndef KERN_sparc64_INTERRUPT_TRAP_H_ 38 #define KERN_sparc64_INTERRUPT_TRAP_H_ 96 39 97 40 #define TT_INTERRUPT_LEVEL_1 0x41 … … 111 54 #define TT_INTERRUPT_LEVEL_15 0x4f 112 55 113 #define TT_INTERRUPT_VECTOR_TRAP 0x6056 #define INTERRUPT_LEVEL_N_HANDLER_SIZE TRAP_TABLE_ENTRY_SIZE 114 57 115 #define INTERRUPT_LEVEL_N_HANDLER_SIZE TRAP_TABLE_ENTRY_SIZE 116 #define INTERRUPT_VECTOR_TRAP_HANDLER_SIZE TRAP_TABLE_ENTRY_SIZE 58 /* IMAP register bits */ 59 #define IGN_MASK 0x7c0 60 #define INO_MASK 0x1f 61 #define IMAP_V_MASK (1ULL << 31) 62 63 #define IGN_SHIFT 6 64 117 65 118 66 #ifdef __ASM__ … … 121 69 PREEMPTIBLE_HANDLER exc_dispatch 122 70 .endm 123 124 .macro INTERRUPT_VECTOR_TRAP_HANDLER 125 PREEMPTIBLE_HANDLER interrupt 126 .endm 127 #endif /* __ASM__ */ 71 #endif 128 72 129 73 #ifndef __ASM__ … … 134 78 #endif /* !def __ASM__ */ 135 79 80 81 #if defined (SUN4U) 82 #include <arch/trap/sun4u/interrupt.h> 83 #elif defined (SUN4V) 84 #include <arch/trap/sun4v/interrupt.h> 85 #endif 86 136 87 #endif 137 88 -
kernel/arch/sparc64/include/trap/mmu.h
r1ccafee r0b9ac3c 38 38 #define KERN_sparc64_MMU_TRAP_H_ 39 39 40 #include <arch/stack.h> 41 #include <arch/regdef.h> 42 #include <arch/mm/tlb.h> 43 #include <arch/mm/mmu.h> 44 #include <arch/mm/tte.h> 45 #include <arch/trap/regwin.h> 46 47 #ifdef CONFIG_TSB 48 #include <arch/mm/tsb.h> 40 #if defined (SUN4U) 41 #include <arch/trap/sun4u/mmu.h> 42 #elif defined (SUN4V) 43 #include <arch/trap/sun4v/mmu.h> 49 44 #endif 50 51 #define TT_FAST_INSTRUCTION_ACCESS_MMU_MISS 0x6452 #define TT_FAST_DATA_ACCESS_MMU_MISS 0x6853 #define TT_FAST_DATA_ACCESS_PROTECTION 0x6c54 55 #define FAST_MMU_HANDLER_SIZE 12856 57 #ifdef __ASM__58 59 .macro FAST_INSTRUCTION_ACCESS_MMU_MISS_HANDLER60 /*61 * First, try to refill TLB from TSB.62 */63 #ifdef CONFIG_TSB64 ldxa [%g0] ASI_IMMU, %g1 ! read TSB Tag Target Register65 ldxa [%g0] ASI_IMMU_TSB_8KB_PTR_REG, %g2 ! read TSB 8K Pointer66 ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 ! 16-byte atomic load into %g4 and %g567 cmp %g1, %g4 ! is this the entry we are looking for?68 bne,pn %xcc, 0f69 nop70 stxa %g5, [%g0] ASI_ITLB_DATA_IN_REG ! copy mapping from ITSB to ITLB71 retry72 #endif73 74 0:75 wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate76 PREEMPTIBLE_HANDLER fast_instruction_access_mmu_miss77 .endm78 79 .macro FAST_DATA_ACCESS_MMU_MISS_HANDLER tl80 /*81 * First, try to refill TLB from TSB.82 */83 84 #ifdef CONFIG_TSB85 ldxa [%g0] ASI_DMMU, %g1 ! read TSB Tag Target Register86 srlx %g1, TSB_TAG_TARGET_CONTEXT_SHIFT, %g2 ! is this a kernel miss?87 brz,pn %g2, 0f88 ldxa [%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g3 ! read TSB 8K Pointer89 ldda [%g3] ASI_NUCLEUS_QUAD_LDD, %g4 ! 16-byte atomic load into %g4 and %g590 cmp %g1, %g4 ! is this the entry we are looking for?91 bne,pn %xcc, 0f92 nop93 stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG ! copy mapping from DTSB to DTLB94 retry95 #endif96 97 /*98 * Second, test if it is the portion of the kernel address space99 * which is faulting. If that is the case, immediately create100 * identity mapping for that page in DTLB. VPN 0 is excluded from101 * this treatment.102 *103 * Note that branch-delay slots are used in order to save space.104 */105 0:106 sethi %hi(fast_data_access_mmu_miss_data_hi), %g7107 wr %g0, ASI_DMMU, %asi108 ldxa [VA_DMMU_TAG_ACCESS] %asi, %g1 ! read the faulting Context and VPN109 set TLB_TAG_ACCESS_CONTEXT_MASK, %g2110 andcc %g1, %g2, %g3 ! get Context111 bnz %xcc, 0f ! Context is non-zero112 andncc %g1, %g2, %g3 ! get page address into %g3113 bz %xcc, 0f ! page address is zero114 ldx [%g7 + %lo(end_of_identity)], %g4115 cmp %g3, %g4116 bgeu %xcc, 0f117 118 ldx [%g7 + %lo(kernel_8k_tlb_data_template)], %g2119 add %g3, %g2, %g2120 stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG ! identity map the kernel page121 retry122 123 /*124 * Third, catch and handle special cases when the trap is caused by125 * the userspace register window spill or fill handler. In case126 * one of these two traps caused this trap, we just lower the trap127 * level and service the DTLB miss. In the end, we restart128 * the offending SAVE or RESTORE.129 */130 0:131 .if (\tl > 0)132 wrpr %g0, 1, %tl133 .endif134 135 /*136 * Switch from the MM globals.137 */138 wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate139 140 /*141 * Read the Tag Access register for the higher-level handler.142 * This is necessary to survive nested DTLB misses.143 */144 ldxa [VA_DMMU_TAG_ACCESS] %asi, %g2145 146 /*147 * g2 will be passed as an argument to fast_data_access_mmu_miss().148 */149 PREEMPTIBLE_HANDLER fast_data_access_mmu_miss150 .endm151 152 .macro FAST_DATA_ACCESS_PROTECTION_HANDLER tl153 /*154 * The same special case as in FAST_DATA_ACCESS_MMU_MISS_HANDLER.155 */156 157 .if (\tl > 0)158 wrpr %g0, 1, %tl159 .endif160 161 /*162 * Switch from the MM globals.163 */164 wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate165 166 /*167 * Read the Tag Access register for the higher-level handler.168 * This is necessary to survive nested DTLB misses.169 */170 mov VA_DMMU_TAG_ACCESS, %g2171 ldxa [%g2] ASI_DMMU, %g2172 173 /*174 * g2 will be passed as an argument to fast_data_access_mmu_miss().175 */176 PREEMPTIBLE_HANDLER fast_data_access_protection177 .endm178 179 #endif /* __ASM__ */180 45 181 46 #endif -
kernel/arch/sparc64/include/trap/regwin.h
r1ccafee r0b9ac3c 131 131 132 132 /* 133 * Macro used to spill userspace window to userspace window buffer.134 * It can be either triggered from preemptible_handler doing SAVE135 * at (TL=1) or from normal kernel code doing SAVE when OTHERWIN>0136 * at (TL=0).137 */138 .macro SPILL_TO_USPACE_WINDOW_BUFFER139 stx %l0, [%g7 + L0_OFFSET]140 stx %l1, [%g7 + L1_OFFSET]141 stx %l2, [%g7 + L2_OFFSET]142 stx %l3, [%g7 + L3_OFFSET]143 stx %l4, [%g7 + L4_OFFSET]144 stx %l5, [%g7 + L5_OFFSET]145 stx %l6, [%g7 + L6_OFFSET]146 stx %l7, [%g7 + L7_OFFSET]147 stx %i0, [%g7 + I0_OFFSET]148 stx %i1, [%g7 + I1_OFFSET]149 stx %i2, [%g7 + I2_OFFSET]150 stx %i3, [%g7 + I3_OFFSET]151 stx %i4, [%g7 + I4_OFFSET]152 stx %i5, [%g7 + I5_OFFSET]153 stx %i6, [%g7 + I6_OFFSET]154 stx %i7, [%g7 + I7_OFFSET]155 add %g7, STACK_WINDOW_SAVE_AREA_SIZE, %g7156 saved157 retry158 .endm159 160 161 /*162 133 * Macro used by the nucleus and the primary context 0 during normal fills. 163 134 */ … … 232 203 #endif /* __ASM__ */ 233 204 205 #if defined (SUN4U) 206 #include <arch/trap/sun4u/regwin.h> 207 #elif defined (SUN4V) 208 #include <arch/trap/sun4v/regwin.h> 234 209 #endif 235 210 211 #endif 212 236 213 /** @} 237 214 */ -
kernel/arch/sparc64/include/trap/trap_table.h
r1ccafee r0b9ac3c 101 101 .macro PREEMPTIBLE_HANDLER f 102 102 sethi %hi(\f), %g1 103 b a %xcc,preemptible_handler103 b preemptible_handler 104 104 or %g1, %lo(\f), %g1 105 105 .endm -
kernel/arch/sparc64/include/types.h
r1ccafee r0b9ac3c 27 27 */ 28 28 29 /** @addtogroup sparc64 29 /** @addtogroup sparc64 30 30 * @{ 31 31 */ … … 55 55 typedef uint64_t unative_t; 56 56 typedef int64_t native_t; 57 typedef uint64_t atomic_count_t; 57 58 58 59 typedef struct { -
kernel/arch/sparc64/src/asm.S
r1ccafee r0b9ac3c 29 29 #include <arch/arch.h> 30 30 #include <arch/stack.h> 31 #include <arch/regdef.h>32 #include <arch/mm/mmu.h>33 31 34 32 .text … … 234 232 nop 235 233 236 237 .macro WRITE_ALTERNATE_REGISTER reg, bit238 rdpr %pstate, %g1 ! save PSTATE.PEF239 wrpr %g0, (\bit | PSTATE_PRIV_BIT), %pstate240 mov %o0, \reg241 wrpr %g0, PSTATE_PRIV_BIT, %pstate242 retl243 wrpr %g1, 0, %pstate ! restore PSTATE.PEF244 .endm245 246 .macro READ_ALTERNATE_REGISTER reg, bit247 rdpr %pstate, %g1 ! save PSTATE.PEF248 wrpr %g0, (\bit | PSTATE_PRIV_BIT), %pstate249 mov \reg, %o0250 wrpr %g0, PSTATE_PRIV_BIT, %pstate251 retl252 wrpr %g1, 0, %pstate ! restore PSTATE.PEF253 .endm254 255 .global write_to_ag_g6256 write_to_ag_g6:257 WRITE_ALTERNATE_REGISTER %g6, PSTATE_AG_BIT258 259 .global write_to_ag_g7260 write_to_ag_g7:261 WRITE_ALTERNATE_REGISTER %g7, PSTATE_AG_BIT262 263 .global write_to_ig_g6264 write_to_ig_g6:265 WRITE_ALTERNATE_REGISTER %g6, PSTATE_IG_BIT266 267 .global read_from_ag_g7268 read_from_ag_g7:269 READ_ALTERNATE_REGISTER %g7, PSTATE_AG_BIT270 271 272 /** Switch to userspace.273 *274 * %o0 Userspace entry address.275 * %o1 Userspace stack pointer address.276 * %o2 Userspace address of uarg structure.277 */278 .global switch_to_userspace279 switch_to_userspace:280 save %o1, -(STACK_WINDOW_SAVE_AREA_SIZE + STACK_ARG_SAVE_AREA_SIZE), %sp281 flushw282 wrpr %g0, 0, %cleanwin ! avoid information leak283 284 mov %i2, %o0 ! uarg285 xor %o1, %o1, %o1 ! %o1 is defined to hold pcb_ptr286 ! set it to 0287 288 clr %i2289 clr %i3290 clr %i4291 clr %i5292 clr %i6293 294 wrpr %g0, 1, %tl ! enforce mapping via nucleus295 296 rdpr %cwp, %g1297 wrpr %g1, TSTATE_IE_BIT, %tstate298 wrpr %i0, 0, %tnpc299 300 /*301 * Set primary context according to secondary context.302 * Secondary context has been already installed by303 * higher-level functions.304 */305 wr %g0, ASI_DMMU, %asi306 ldxa [VA_SECONDARY_CONTEXT_REG] %asi, %g1307 stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi308 flush %i7309 310 /*311 * Spills and fills will be handled by the userspace handlers.312 */313 wrpr %g0, WSTATE_OTHER(0) | WSTATE_NORMAL(1), %wstate314 315 done ! jump to userspace316 -
kernel/arch/sparc64/src/drivers/kbd.c
r1ccafee r0b9ac3c 39 39 #include <console/console.h> 40 40 #include <ddi/irq.h> 41 #include <mm/page.h> 41 42 #include <arch/mm/page.h> 42 43 #include <arch/types.h> -
kernel/arch/sparc64/src/drivers/tick.c
r1ccafee r0b9ac3c 54 54 interrupt_register(14, "tick_int", tick_interrupt); 55 55 compare.int_dis = false; 56 compare.tick_cmpr = CPU->arch.clock_frequency / HZ; 56 compare.tick_cmpr = tick_counter_read() + 57 CPU->arch.clock_frequency / HZ; 57 58 CPU->arch.next_tick_cmpr = compare.tick_cmpr; 58 59 tick_compare_write(compare.value); 59 tick_write(0);60 60 61 #if defined (US3) 61 #if defined (US3) || defined (SUN4V) 62 62 /* disable STICK interrupts and clear any pending ones */ 63 63 tick_compare_reg_t stick_compare; … … 111 111 * overflow only in 146 years. 112 112 */ 113 drift = tick_ read() - CPU->arch.next_tick_cmpr;113 drift = tick_counter_read() - CPU->arch.next_tick_cmpr; 114 114 while (drift > CPU->arch.clock_frequency / HZ) { 115 115 drift -= CPU->arch.clock_frequency / HZ; 116 116 CPU->missed_clock_ticks++; 117 117 } 118 CPU->arch.next_tick_cmpr = tick_ read() +118 CPU->arch.next_tick_cmpr = tick_counter_read() + 119 119 (CPU->arch.clock_frequency / HZ) - drift; 120 120 tick_compare_write(CPU->arch.next_tick_cmpr); -
kernel/arch/sparc64/src/mm/page.c
r1ccafee r0b9ac3c 33 33 */ 34 34 35 #include <mm/page.h> 35 36 #include <arch/mm/page.h> 36 37 #include <arch/mm/tlb.h> -
kernel/arch/sparc64/src/mm/sun4u/tlb.c
r1ccafee r0b9ac3c 37 37 #include <mm/as.h> 38 38 #include <mm/asid.h> 39 #include <genarch/mm/page_ht.h>40 39 #include <arch/mm/frame.h> 41 40 #include <arch/mm/page.h> … … 51 50 #include <panic.h> 52 51 #include <arch/asm.h> 52 #include <genarch/mm/page_ht.h> 53 53 54 54 #ifdef CONFIG_TSB … … 476 476 } 477 477 478 void d ump_sfsr_and_sfar(void)478 void describe_dmmu_fault(void) 479 479 { 480 480 tlb_sfsr_reg_t sfsr; … … 499 499 } 500 500 501 void dump_sfsr_and_sfar(void) 502 { 503 tlb_sfsr_reg_t sfsr; 504 uintptr_t sfar; 505 506 sfsr.value = dtlb_sfsr_read(); 507 sfar = dtlb_sfar_read(); 508 509 #if defined (US) 510 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, " 511 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, 512 sfsr.ow, sfsr.fv); 513 #elif defined (US3) 514 printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, " 515 "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft, 516 sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv); 517 #endif 518 519 printf("DTLB SFAR: address=%p\n", sfar); 520 521 dtlb_sfsr_write(0); 522 } 523 501 524 #if defined (US) 502 525 /** Invalidate all unlocked ITLB and DTLB entries. */ -
kernel/arch/sparc64/src/smp/sun4u/ipi.c
r1ccafee r0b9ac3c 99 99 status = asi_u64_read(ASI_INTR_DISPATCH_STATUS, 0); 100 100 if (status & INTR_DISPATCH_STATUS_BUSY) 101 panic("Interrupt Dispatch Status busy bit set .");101 panic("Interrupt Dispatch Status busy bit set\n"); 102 102 103 103 ASSERT(!(pstate_read() & PSTATE_IE_BIT)); … … 152 152 break; 153 153 default: 154 panic("Unknown IPI (%d). ", ipi);154 panic("Unknown IPI (%d).\n", ipi); 155 155 break; 156 156 } -
kernel/arch/sparc64/src/smp/sun4u/smp.c
r1ccafee r0b9ac3c 62 62 { 63 63 ofw_tree_node_t *node; 64 size_t cnt = 0;64 unsigned int cnt = 0; 65 65 66 66 if (is_us() || is_us_iii()) { -
kernel/arch/sparc64/src/sun4u/sparc64.c
r1ccafee r0b9ac3c 44 44 #include <arch/mm/page.h> 45 45 #include <arch/stack.h> 46 #include <interrupt.h> 46 47 #include <genarch/ofw/ofw_tree.h> 47 48 #include <userspace.h> … … 166 167 } 167 168 169 void irq_initialize_arch(irq_t *irq) 170 { 171 (void) irq; 172 } 173 168 174 /** @} 169 175 */ -
kernel/arch/sparc64/src/trap/exception.c
r1ccafee r0b9ac3c 162 162 fault_if_from_uspace(istate, "%s.", __func__); 163 163 dump_istate(istate); 164 d ump_sfsr_and_sfar();164 describe_dmmu_fault(); 165 165 panic("%s.", __func__); 166 166 } -
kernel/arch/sparc64/src/trap/interrupt.c
r1ccafee r0b9ac3c 1 1 /* 2 2 * Copyright (c) 2005 Jakub Jermar 3 * Copyright (c) 2009 Pavel Rimsky 3 4 * All rights reserved. 4 5 * … … 34 35 35 36 #include <arch/interrupt.h> 37 #include <arch/trap/interrupt.h> 36 38 #include <arch/sparc64.h> 37 #include <arch/trap/interrupt.h>38 39 #include <interrupt.h> 39 40 #include <ddi/irq.h> … … 60 61 exc_register(n - 1, name, f); 61 62 } 62 63 /** Process hardware interrupt.64 *65 * @param n Ignored.66 * @param istate Ignored.67 */68 void interrupt(int n, istate_t *istate)69 {70 uint64_t status;71 uint64_t intrcv;72 uint64_t data0;73 status = asi_u64_read(ASI_INTR_DISPATCH_STATUS, 0);74 if (status & (!INTR_DISPATCH_STATUS_BUSY))75 panic("Interrupt Dispatch Status busy bit not set.");76 77 intrcv = asi_u64_read(ASI_INTR_RECEIVE, 0);78 #if defined (US)79 data0 = asi_u64_read(ASI_INTR_R, ASI_UDB_INTR_R_DATA_0);80 #elif defined (US3)81 data0 = asi_u64_read(ASI_INTR_R, VA_INTR_R_DATA_0);82 #endif83 84 irq_t *irq = irq_dispatch_and_lock(data0);85 if (irq) {86 /*87 * The IRQ handler was found.88 */89 irq->handler(irq);90 /*91 * See if there is a clear-interrupt-routine and call it.92 */93 if (irq->cir) {94 irq->cir(irq->cir_arg, irq->inr);95 }96 spinlock_unlock(&irq->lock);97 } else if (data0 > config.base) {98 /*99 * This is a cross-call.100 * data0 contains address of the kernel function.101 * We call the function only after we verify102 * it is one of the supported ones.103 */104 #ifdef CONFIG_SMP105 if (data0 == (uintptr_t) tlb_shootdown_ipi_recv) {106 tlb_shootdown_ipi_recv();107 }108 #endif109 } else {110 /*111 * Spurious interrupt.112 */113 #ifdef CONFIG_DEBUG114 printf("cpu%u: spurious interrupt (intrcv=%#" PRIx64115 ", data0=%#" PRIx64 ")\n", CPU->id, intrcv, data0);116 #endif117 }118 119 membar();120 asi_u64_write(ASI_INTR_RECEIVE, 0, 0);121 }122 123 63 /** @} 124 64 */
Note:
See TracChangeset
for help on using the changeset viewer.
