Changeset c2efbb4 in mainline for kernel/arch
- Timestamp:
- 2010-02-20T20:54:53Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 721d4e85, 95c4776
- Parents:
- f516bc2 (diff), b03a666 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel/arch
- Files:
-
- 1 added
- 42 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/abs32le/Makefile.inc
rf516bc2 rc2efbb4 30 30 # 31 31 32 BFD = binary 33 34 ifeq ($(COMPILER),gcc_cross) 35 TOOLCHAIN_DIR = $(CROSS_PREFIX)/$(CROSS_TARGET) 36 37 ifeq ($(CROSS_TARGET),arm32) 38 TARGET = arm-linux-gnu 39 ATSIGN = % 40 endif 41 42 ifeq ($(CROSS_TARGET),ia32) 43 TARGET = i686-pc-linux-gnu 44 endif 45 46 ifeq ($(CROSS_TARGET),mips32) 47 TARGET = mipsel-linux-gnu 48 GCC_CFLAGS += -mno-abicalls 49 endif 50 endif 51 32 52 BITS = 32 33 53 ENDIANESS = LE … … 43 63 arch/$(KARCH)/src/ddi/ddi.c \ 44 64 arch/$(KARCH)/src/smp/smp.c \ 65 arch/$(KARCH)/src/smp/ipi.c \ 45 66 arch/$(KARCH)/src/mm/as.c \ 46 67 arch/$(KARCH)/src/mm/frame.c \ -
kernel/arch/abs32le/include/asm.h
rf516bc2 rc2efbb4 40 40 #include <config.h> 41 41 42 extern void interrupt_handlers(void); 43 44 extern void enable_l_apic_in_msr(void); 45 46 47 extern void asm_delay_loop(uint32_t); 48 extern void asm_fake_loop(uint32_t); 49 42 static inline void asm_delay_loop(uint32_t usec) 43 { 44 } 50 45 51 46 static inline __attribute__((noreturn)) void cpu_halt(void) -
kernel/arch/abs32le/include/atomic.h
rf516bc2 rc2efbb4 54 54 } 55 55 56 static inline longatomic_postinc(atomic_t *val)56 static inline atomic_count_t atomic_postinc(atomic_t *val) 57 57 { 58 58 /* On real hardware both the storing of the previous … … 60 60 atomic action. */ 61 61 62 longprev = val->count;62 atomic_count_t prev = val->count; 63 63 64 64 val->count++; … … 66 66 } 67 67 68 static inline longatomic_postdec(atomic_t *val)68 static inline atomic_count_t atomic_postdec(atomic_t *val) 69 69 { 70 70 /* On real hardware both the storing of the previous … … 72 72 atomic action. */ 73 73 74 longprev = val->count;74 atomic_count_t prev = val->count; 75 75 76 76 val->count--; … … 81 81 #define atomic_predec(val) (atomic_postdec(val) - 1) 82 82 83 static inline uint32_t test_and_set(atomic_t *val) { 84 uint32_t v; 85 86 asm volatile ( 87 "movl $1, %[v]\n" 88 "xchgl %[v], %[count]\n" 89 : [v] "=r" (v), [count] "+m" (val->count) 90 ); 91 92 return v; 83 static inline atomic_count_t test_and_set(atomic_t *val) 84 { 85 atomic_count_t prev = val->count; 86 val->count = 1; 87 return prev; 93 88 } 94 89 95 /** ia32 specific fast spinlock */96 90 static inline void atomic_lock_arch(atomic_t *val) 97 91 { 98 uint32_t tmp; 99 100 preemption_disable(); 101 asm volatile ( 102 "0:\n" 103 "pause\n" /* Pentium 4's HT love this instruction */ 104 "mov %[count], %[tmp]\n" 105 "testl %[tmp], %[tmp]\n" 106 "jnz 0b\n" /* lightweight looping on locked spinlock */ 107 108 "incl %[tmp]\n" /* now use the atomic operation */ 109 "xchgl %[count], %[tmp]\n" 110 "testl %[tmp], %[tmp]\n" 111 "jnz 0b\n" 112 : [count] "+m" (val->count), [tmp] "=&r" (tmp) 113 ); 114 /* 115 * Prevent critical section code from bleeding out this way up. 116 */ 117 CS_ENTER_BARRIER(); 92 do { 93 while (val->count); 94 } while (test_and_set(val)); 118 95 } 119 96 -
kernel/arch/abs32le/include/barrier.h
rf516bc2 rc2efbb4 27 27 */ 28 28 29 /** @addtogroup ia3229 /** @addtogroup abs32le 30 30 * @{ 31 31 */ … … 33 33 */ 34 34 35 #ifndef KERN_ia32_BARRIER_H_ 36 #define KERN_ia32_BARRIER_H_ 37 38 /* 39 * NOTE: 40 * No barriers for critical section (i.e. spinlock) on IA-32 are needed: 41 * - spinlock_lock() and spinlock_trylock() use serializing XCHG instruction 42 * - writes cannot pass reads on IA-32 => spinlock_unlock() needs no barriers 43 */ 35 #ifndef KERN_abs32le_BARRIER_H_ 36 #define KERN_abs32le_BARRIER_H_ 44 37 45 38 /* … … 47 40 */ 48 41 49 #define CS_ENTER_BARRIER() asm volatile ("" ::: "memory")50 #define CS_LEAVE_BARRIER() asm volatile ("" ::: "memory")42 #define CS_ENTER_BARRIER() 43 #define CS_LEAVE_BARRIER() 51 44 52 static inline void cpuid_serialization(void) 53 { 54 asm volatile ( 55 "xorl %%eax, %%eax\n" 56 "cpuid\n" 57 ::: "eax", "ebx", "ecx", "edx", "memory" 58 ); 59 } 45 #define memory_barrier() 46 #define read_barrier() 47 #define write_barrier() 60 48 61 #if defined(CONFIG_FENCES_P4) 62 #define memory_barrier() asm volatile ("mfence\n" ::: "memory") 63 #define read_barrier() asm volatile ("lfence\n" ::: "memory") 64 #ifdef CONFIG_WEAK_MEMORY 65 #define write_barrier() asm volatile ("sfence\n" ::: "memory") 66 #else 67 #define write_barrier() asm volatile ("" ::: "memory"); 68 #endif 69 #elif defined(CONFIG_FENCES_P3) 70 #define memory_barrier() cpuid_serialization() 71 #define read_barrier() cpuid_serialization() 72 #ifdef CONFIG_WEAK_MEMORY 73 #define write_barrier() asm volatile ("sfence\n" ::: "memory") 74 #else 75 #define write_barrier() asm volatile ("" ::: "memory"); 76 #endif 77 #else 78 #define memory_barrier() cpuid_serialization() 79 #define read_barrier() cpuid_serialization() 80 #ifdef CONFIG_WEAK_MEMORY 81 #define write_barrier() cpuid_serialization() 82 #else 83 #define write_barrier() asm volatile ("" ::: "memory"); 84 #endif 85 #endif 86 87 /* 88 * On ia32, the hardware takes care about instruction and data cache coherence, 89 * even on SMP systems. We issue a write barrier to be sure that writes 90 * queueing in the store buffer drain to the memory (even though it would be 91 * sufficient for them to drain to the D-cache). 92 */ 93 #define smc_coherence(a) write_barrier() 94 #define smc_coherence_block(a, l) write_barrier() 49 #define smc_coherence(addr) 50 #define smc_coherence_block(addr, size) 95 51 96 52 #endif -
kernel/arch/abs32le/include/context.h
rf516bc2 rc2efbb4 40 40 41 41 #define context_set(ctx, pc, stack, size) \ 42 42 context_set_generic(ctx, pc, stack, size) 43 43 44 44 /* -
kernel/arch/abs32le/include/context_offset.h
rf516bc2 rc2efbb4 37 37 38 38 #define OFFSET_PC 0x00 39 40 #ifdef KERNEL 41 #define OFFSET_IPL 0x04 42 #else 43 #define OFFSET_TLS 0x04 44 #endif 39 #define OFFSET_IPL 0x04 45 40 46 41 #endif -
kernel/arch/abs32le/include/memstr.h
rf516bc2 rc2efbb4 36 36 #define KERN_abs32le_MEMSTR_H_ 37 37 38 #define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) 39 40 extern void memsetw(void *, size_t, uint16_t); 41 extern void memsetb(void *, size_t, uint8_t); 42 43 extern int memcmp(const void *, const void *, size_t); 38 #define memcpy(dst, src, cnt) _memcpy((dst), (src), (cnt)) 39 #define memsetb(dst, cnt, val) _memsetb((dst), (cnt), (val)) 40 #define memsetw(dst, cnt, val) _memsetw((dst), (cnt), (val)) 44 41 45 42 #endif -
kernel/arch/abs32le/include/mm/frame.h
rf516bc2 rc2efbb4 40 40 41 41 #ifdef KERNEL 42 #ifndef __ASM__43 42 44 43 #include <arch/types.h> … … 47 46 extern void physmem_print(void); 48 47 49 #endif /* __ASM__ */50 48 #endif /* KERNEL */ 51 49 -
kernel/arch/abs32le/include/mm/page.h
rf516bc2 rc2efbb4 43 43 #ifdef KERNEL 44 44 45 #ifndef __ASM__ 46 #define KA2PA(x) (((uintptr_t) (x)) - 0x80000000) 47 #define PA2KA(x) (((uintptr_t) (x)) + 0x80000000) 48 #else 49 #define KA2PA(x) ((x) - 0x80000000) 50 #define PA2KA(x) ((x) + 0x80000000) 51 #endif 45 #define KA2PA(x) (((uintptr_t) (x)) - 0x80000000) 46 #define PA2KA(x) (((uintptr_t) (x)) + 0x80000000) 52 47 53 48 /* … … 122 117 #define PTE_EXECUTABLE_ARCH(p) 1 123 118 124 #ifndef __ASM__125 126 119 #include <mm/mm.h> 127 120 #include <arch/interrupt.h> … … 129 122 #include <typedefs.h> 130 123 131 /* Page fault error codes. */132 133 /** When bit on this position is 0, the page fault was caused by a not-present134 * page.135 */136 #define PFERR_CODE_P (1 << 0)137 138 /** When bit on this position is 1, the page fault was caused by a write. */139 #define PFERR_CODE_RW (1 << 1)140 141 /** When bit on this position is 1, the page fault was caused in user mode. */142 #define PFERR_CODE_US (1 << 2)143 144 /** When bit on this position is 1, a reserved bit was set in page directory. */145 #define PFERR_CODE_RSVD (1 << 3)146 147 124 /** Page Table Entry. */ 148 125 typedef struct { 149 unsigned present : 1; 150 unsigned writeable : 1; 151 unsigned uaccessible : 1; 152 unsigned page_write_through : 1; 153 unsigned page_cache_disable : 1; 154 unsigned accessed : 1; 155 unsigned dirty : 1; 156 unsigned pat : 1; 157 unsigned global : 1; 158 unsigned soft_valid : 1; /**< Valid content even if the present bit is not set. */ 159 unsigned avl : 2; 160 unsigned frame_address : 20; 161 } __attribute__ ((packed)) pte_t; 126 unsigned int present : 1; 127 unsigned int writeable : 1; 128 unsigned int uaccessible : 1; 129 unsigned int page_write_through : 1; 130 unsigned int page_cache_disable : 1; 131 unsigned int accessed : 1; 132 unsigned int dirty : 1; 133 unsigned int pat : 1; 134 unsigned int global : 1; 135 136 /** Valid content even if the present bit is not set. */ 137 unsigned int soft_valid : 1; 138 unsigned int avl : 2; 139 unsigned int frame_address : 20; 140 } __attribute__((packed)) pte_t; 162 141 163 142 static inline unsigned int get_pt_flags(pte_t *pt, size_t i) … … 192 171 193 172 extern void page_arch_init(void); 194 extern void page_fault(int n, istate_t *istate); 195 196 #endif /* __ASM__ */ 173 extern void page_fault(int, istate_t *); 197 174 198 175 #endif /* KERNEL */ -
kernel/arch/abs32le/include/types.h
rf516bc2 rc2efbb4 55 55 typedef uint32_t unative_t; 56 56 typedef int32_t native_t; 57 typedef uint32_t atomic_count_t; 57 58 58 59 typedef struct { -
kernel/arch/abs32le/src/abs32le.c
rf516bc2 rc2efbb4 35 35 #include <arch.h> 36 36 #include <arch/types.h> 37 #include <arch/context.h>38 37 #include <arch/interrupt.h> 39 38 #include <arch/asm.h> … … 41 40 #include <func.h> 42 41 #include <config.h> 42 #include <errno.h> 43 43 #include <context.h> 44 #include <fpu_context.h> 44 45 #include <interrupt.h> 46 #include <syscall/copy.h> 45 47 #include <ddi/irq.h> 46 48 #include <proc/thread.h> … … 49 51 #include <sysinfo/sysinfo.h> 50 52 #include <memstr.h> 53 54 char memcpy_from_uspace_failover_address; 55 char memcpy_to_uspace_failover_address; 51 56 52 57 void arch_pre_mm_init(void) … … 83 88 unative_t sys_tls_set(unative_t addr) 84 89 { 85 return 0;90 return EOK; 86 91 } 87 92 … … 109 114 } 110 115 111 void memsetb(void *dst, size_t cnt, uint8_t val)112 {113 _memsetb(dst, cnt, val);114 }115 116 void memsetw(void *dst, size_t cnt, uint16_t val)117 {118 _memsetw(dst, cnt, val);119 }120 121 116 void panic_printf(char *fmt, ...) 122 117 { … … 140 135 } 141 136 137 void fpu_init(void) 138 { 139 } 140 141 void fpu_context_save(fpu_context_t *ctx) 142 { 143 } 144 145 void fpu_context_restore(fpu_context_t *ctx) 146 { 147 } 148 149 int memcpy_from_uspace(void *dst, const void *uspace_src, size_t size) 150 { 151 return EOK; 152 } 153 154 int memcpy_to_uspace(void *uspace_dst, const void *src, size_t size) 155 { 156 return EOK; 157 } 158 142 159 /** @} 143 160 */ -
kernel/arch/abs32le/src/debug/stacktrace.c
rf516bc2 rc2efbb4 40 40 bool kernel_frame_pointer_validate(uintptr_t fp) 41 41 { 42 return true; ;42 return true; 43 43 } 44 44 -
kernel/arch/amd64/include/atomic.h
rf516bc2 rc2efbb4 40 40 #include <preemption.h> 41 41 42 static inline void atomic_inc(atomic_t *val) { 42 static inline void atomic_inc(atomic_t *val) 43 { 43 44 #ifdef CONFIG_SMP 44 45 asm volatile ( … … 54 55 } 55 56 56 static inline void atomic_dec(atomic_t *val) { 57 static inline void atomic_dec(atomic_t *val) 58 { 57 59 #ifdef CONFIG_SMP 58 60 asm volatile ( … … 68 70 } 69 71 70 static inline long atomic_postinc(atomic_t *val)72 static inline atomic_count_t atomic_postinc(atomic_t *val) 71 73 { 72 longr = 1;74 atomic_count_t r = 1; 73 75 74 76 asm volatile ( 75 77 "lock xaddq %[r], %[count]\n" 76 : [count] "+m" (val->count), [r] "+r" (r) 78 : [count] "+m" (val->count), 79 [r] "+r" (r) 77 80 ); 78 81 … … 80 83 } 81 84 82 static inline long atomic_postdec(atomic_t *val)85 static inline atomic_count_t atomic_postdec(atomic_t *val) 83 86 { 84 longr = -1;87 atomic_count_t r = -1; 85 88 86 89 asm volatile ( 87 90 "lock xaddq %[r], %[count]\n" 88 : [count] "+m" (val->count), [r] "+r" (r) 91 : [count] "+m" (val->count), 92 [r] "+r" (r) 89 93 ); 90 94 … … 95 99 #define atomic_predec(val) (atomic_postdec(val) - 1) 96 100 97 static inline uint64_t test_and_set(atomic_t *val) { 98 uint64_t v; 101 static inline atomic_count_t test_and_set(atomic_t *val) 102 { 103 atomic_count_t v; 99 104 100 105 asm volatile ( 101 106 "movq $1, %[v]\n" 102 107 "xchgq %[v], %[count]\n" 103 : [v] "=r" (v), [count] "+m" (val->count) 108 : [v] "=r" (v), 109 [count] "+m" (val->count) 104 110 ); 105 111 … … 107 113 } 108 114 109 110 115 /** amd64 specific fast spinlock */ 111 116 static inline void atomic_lock_arch(atomic_t *val) 112 117 { 113 uint64_t tmp;118 atomic_count_t tmp; 114 119 115 120 preemption_disable(); … … 125 130 "testq %[tmp], %[tmp]\n" 126 131 "jnz 0b\n" 127 : [count] "+m" (val->count), [tmp] "=&r" (tmp) 132 : [count] "+m" (val->count), 133 [tmp] "=&r" (tmp) 128 134 ); 135 129 136 /* 130 137 * Prevent critical section code from bleeding out this way up. -
kernel/arch/amd64/include/interrupt.h
rf516bc2 rc2efbb4 54 54 #define IRQ_PIC_SPUR 7 55 55 #define IRQ_MOUSE 12 56 #define IRQ_DP8390 9 56 57 57 58 /* this one must have four least significant bits set to ones */ -
kernel/arch/amd64/include/memstr.h
rf516bc2 rc2efbb4 27 27 */ 28 28 29 /** @addtogroup amd64 29 /** @addtogroup amd64 30 30 * @{ 31 31 */ … … 38 38 #define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) 39 39 40 extern void memsetw(void *dst, size_t cnt, uint16_t x); 41 extern void memsetb(void *dst, size_t cnt, uint8_t x); 42 43 extern int memcmp(const void *a, const void *b, size_t cnt); 40 extern void memsetw(void *, size_t, uint16_t); 41 extern void memsetb(void *, size_t, uint8_t); 44 42 45 43 #endif -
kernel/arch/amd64/include/types.h
rf516bc2 rc2efbb4 55 55 typedef uint64_t unative_t; 56 56 typedef int64_t native_t; 57 typedef uint64_t atomic_count_t; 57 58 58 59 typedef struct { -
kernel/arch/amd64/src/amd64.c
rf516bc2 rc2efbb4 228 228 (uintptr_t) I8042_BASE); 229 229 #endif 230 231 #ifdef CONFIG_NETIF_DP8390 232 trap_virtual_enable_irqs(1 << IRQ_DP8390); 233 sysinfo_set_item_val("netif.dp8390.inr", NULL, IRQ_DP8390); 234 #endif 230 235 } 231 236 -
kernel/arch/amd64/src/debugger.c
rf516bc2 rc2efbb4 201 201 202 202 /* Send IPI */ 203 #ifdef CONFIG_SMP204 203 // ipi_broadcast(VECTOR_DEBUG_IPI); 205 #endif206 204 207 205 return curidx; … … 262 260 spinlock_unlock(&bkpoint_lock); 263 261 interrupts_restore(ipl); 264 #ifdef CONFIG_SMP 265 // ipi_broadcast(VECTOR_DEBUG_IPI); 266 #endif 262 // ipi_broadcast(VECTOR_DEBUG_IPI); 267 263 } 268 264 -
kernel/arch/arm32/include/atomic.h
rf516bc2 rc2efbb4 47 47 * 48 48 */ 49 static inline long atomic_add(atomic_t *val, int i)49 static inline atomic_count_t atomic_add(atomic_t *val, atomic_count_t i) 50 50 { 51 long ret;52 53 51 /* 54 52 * This implementation is for UP pre-ARMv6 systems where we do not have … … 57 55 ipl_t ipl = interrupts_disable(); 58 56 val->count += i; 59 ret = val->count;57 atomic_count_t ret = val->count; 60 58 interrupts_restore(ipl); 61 59 … … 66 64 * 67 65 * @param val Variable to be incremented. 66 * 68 67 */ 69 68 static inline void atomic_inc(atomic_t *val) … … 75 74 * 76 75 * @param val Variable to be decremented. 76 * 77 77 */ 78 78 static inline void atomic_dec(atomic_t *val) { … … 84 84 * @param val Variable to be incremented. 85 85 * @return Value after incrementation. 86 * 86 87 */ 87 static inline longatomic_preinc(atomic_t *val)88 static inline atomic_count_t atomic_preinc(atomic_t *val) 88 89 { 89 90 return atomic_add(val, 1); … … 94 95 * @param val Variable to be decremented. 95 96 * @return Value after decrementation. 97 * 96 98 */ 97 static inline longatomic_predec(atomic_t *val)99 static inline atomic_count_t atomic_predec(atomic_t *val) 98 100 { 99 101 return atomic_add(val, -1); … … 104 106 * @param val Variable to be incremented. 105 107 * @return Value before incrementation. 108 * 106 109 */ 107 static inline longatomic_postinc(atomic_t *val)110 static inline atomic_count_t atomic_postinc(atomic_t *val) 108 111 { 109 112 return atomic_add(val, 1) - 1; … … 114 117 * @param val Variable to be decremented. 115 118 * @return Value before decrementation. 119 * 116 120 */ 117 static inline longatomic_postdec(atomic_t *val)121 static inline atomic_count_t atomic_postdec(atomic_t *val) 118 122 { 119 123 return atomic_add(val, -1) + 1; -
kernel/arch/arm32/include/memstr.h
rf516bc2 rc2efbb4 27 27 */ 28 28 29 /** @addtogroup arm32 29 /** @addtogroup arm32 30 30 * @{ 31 31 */ … … 39 39 #define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) 40 40 41 extern void memsetw(void *dst, size_t cnt, uint16_t x); 42 extern void memsetb(void *dst, size_t cnt, uint8_t x); 43 44 extern int memcmp(const void *a, const void *b, size_t cnt); 41 extern void memsetw(void *, size_t, uint16_t); 42 extern void memsetb(void *, size_t, uint8_t); 45 43 46 44 #endif -
kernel/arch/arm32/include/types.h
rf516bc2 rc2efbb4 27 27 */ 28 28 29 /** @addtogroup arm32 29 /** @addtogroup arm32 30 30 * @{ 31 31 */ … … 38 38 39 39 #ifndef DOXYGEN 40 # define ATTRIBUTE_PACKED __attribute__((packed))40 #define ATTRIBUTE_PACKED __attribute__((packed)) 41 41 #else 42 #define ATTRIBUTE_PACKED42 #define ATTRIBUTE_PACKED 43 43 #endif 44 44 … … 62 62 typedef uint32_t unative_t; 63 63 typedef int32_t native_t; 64 typedef uint32_t atomic_count_t; 64 65 65 66 typedef struct { -
kernel/arch/ia32/include/atomic.h
rf516bc2 rc2efbb4 40 40 #include <preemption.h> 41 41 42 static inline void atomic_inc(atomic_t *val) { 42 static inline void atomic_inc(atomic_t *val) 43 { 43 44 #ifdef CONFIG_SMP 44 45 asm volatile ( … … 54 55 } 55 56 56 static inline void atomic_dec(atomic_t *val) { 57 static inline void atomic_dec(atomic_t *val) 58 { 57 59 #ifdef CONFIG_SMP 58 60 asm volatile ( … … 68 70 } 69 71 70 static inline long atomic_postinc(atomic_t *val)72 static inline atomic_count_t atomic_postinc(atomic_t *val) 71 73 { 72 longr = 1;74 atomic_count_t r = 1; 73 75 74 76 asm volatile ( 75 77 "lock xaddl %[r], %[count]\n" 76 : [count] "+m" (val->count), [r] "+r" (r) 78 : [count] "+m" (val->count), 79 [r] "+r" (r) 77 80 ); 78 81 … … 80 83 } 81 84 82 static inline long atomic_postdec(atomic_t *val)85 static inline atomic_count_t atomic_postdec(atomic_t *val) 83 86 { 84 longr = -1;87 atomic_count_t r = -1; 85 88 86 89 asm volatile ( 87 90 "lock xaddl %[r], %[count]\n" 88 : [count] "+m" (val->count), [r] "+r"(r) 91 : [count] "+m" (val->count), 92 [r] "+r" (r) 89 93 ); 90 94 … … 95 99 #define atomic_predec(val) (atomic_postdec(val) - 1) 96 100 97 static inline uint32_t test_and_set(atomic_t *val) { 98 uint32_t v; 101 static inline atomic_count_t test_and_set(atomic_t *val) 102 { 103 atomic_count_t v; 99 104 100 105 asm volatile ( 101 106 "movl $1, %[v]\n" 102 107 "xchgl %[v], %[count]\n" 103 : [v] "=r" (v), [count] "+m" (val->count) 108 : [v] "=r" (v), 109 [count] "+m" (val->count) 104 110 ); 105 111 … … 110 116 static inline void atomic_lock_arch(atomic_t *val) 111 117 { 112 uint32_t tmp;118 atomic_count_t tmp; 113 119 114 120 preemption_disable(); … … 124 130 "testl %[tmp], %[tmp]\n" 125 131 "jnz 0b\n" 126 : [count] "+m" (val->count), [tmp] "=&r" (tmp) 132 : [count] "+m" (val->count), 133 [tmp] "=&r" (tmp) 127 134 ); 135 128 136 /* 129 137 * Prevent critical section code from bleeding out this way up. -
kernel/arch/ia32/include/interrupt.h
rf516bc2 rc2efbb4 54 54 #define IRQ_PIC_SPUR 7 55 55 #define IRQ_MOUSE 12 56 #define IRQ_DP8390 9 56 57 57 58 /* this one must have four least significant bits set to ones */ -
kernel/arch/ia32/include/memstr.h
rf516bc2 rc2efbb4 27 27 */ 28 28 29 /** @addtogroup ia32 29 /** @addtogroup ia32 30 30 * @{ 31 31 */ … … 38 38 #define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) 39 39 40 extern void memsetw(void *dst, size_t cnt, uint16_t x); 41 extern void memsetb(void *dst, size_t cnt, uint8_t x); 42 43 extern int memcmp(const void *a, const void *b, size_t cnt); 40 extern void memsetw(void *, size_t, uint16_t); 41 extern void memsetb(void *, size_t, uint8_t); 44 42 45 43 #endif -
kernel/arch/ia32/include/types.h
rf516bc2 rc2efbb4 55 55 typedef uint32_t unative_t; 56 56 typedef int32_t native_t; 57 typedef uint32_t atomic_count_t; 57 58 58 59 typedef struct { -
kernel/arch/ia32/src/ia32.c
rf516bc2 rc2efbb4 186 186 (uintptr_t) I8042_BASE); 187 187 #endif 188 189 #ifdef CONFIG_NETIF_DP8390 190 trap_virtual_enable_irqs(1 << IRQ_DP8390); 191 sysinfo_set_item_val("netif.dp8390.inr", NULL, IRQ_DP8390); 192 #endif 188 193 } 189 194 -
kernel/arch/ia32/src/smp/ipi.c
rf516bc2 rc2efbb4 27 27 */ 28 28 29 /** @addtogroup ia32 29 /** @addtogroup ia32 30 30 * @{ 31 31 */ -
kernel/arch/ia64/include/atomic.h
rf516bc2 rc2efbb4 36 36 #define KERN_ia64_ATOMIC_H_ 37 37 38 static inline uint64_t test_and_set(atomic_t *val)38 static inline atomic_count_t test_and_set(atomic_t *val) 39 39 { 40 uint64_t v;41 40 atomic_count_t v; 41 42 42 asm volatile ( 43 43 "movl %[v] = 0x1;;\n" … … 53 53 { 54 54 do { 55 while (val->count) 56 ; 55 while (val->count); 57 56 } while (test_and_set(val)); 58 57 } … … 60 59 static inline void atomic_inc(atomic_t *val) 61 60 { 62 longv;61 atomic_count_t v; 63 62 64 63 asm volatile ( … … 71 70 static inline void atomic_dec(atomic_t *val) 72 71 { 73 longv;72 atomic_count_t v; 74 73 75 74 asm volatile ( … … 80 79 } 81 80 82 static inline longatomic_preinc(atomic_t *val)81 static inline atomic_count_t atomic_preinc(atomic_t *val) 83 82 { 84 longv;83 atomic_count_t v; 85 84 86 85 asm volatile ( … … 93 92 } 94 93 95 static inline longatomic_predec(atomic_t *val)94 static inline atomic_count_t atomic_predec(atomic_t *val) 96 95 { 97 longv;96 atomic_count_t v; 98 97 99 98 asm volatile ( … … 106 105 } 107 106 108 static inline longatomic_postinc(atomic_t *val)107 static inline atomic_count_t atomic_postinc(atomic_t *val) 109 108 { 110 longv;109 atomic_count_t v; 111 110 112 111 asm volatile ( … … 119 118 } 120 119 121 static inline longatomic_postdec(atomic_t *val)120 static inline atomic_count_t atomic_postdec(atomic_t *val) 122 121 { 123 longv;122 atomic_count_t v; 124 123 125 124 asm volatile ( -
kernel/arch/ia64/include/interrupt.h
rf516bc2 rc2efbb4 61 61 #define IRQ_KBD (0x01 + LEGACY_INTERRUPT_BASE) 62 62 #define IRQ_MOUSE (0x0c + LEGACY_INTERRUPT_BASE) 63 #define IRQ_DP8390 (0x09 + LEGACY_INTERRUPT_BASE) 63 64 64 65 /** General Exception codes. */ -
kernel/arch/ia64/include/memstr.h
rf516bc2 rc2efbb4 27 27 */ 28 28 29 /** @addtogroup ia64 29 /** @addtogroup ia64 30 30 * @{ 31 31 */ … … 38 38 #define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) 39 39 40 extern void memsetw(void *dst, size_t cnt, uint16_t x); 41 extern void memsetb(void *dst, size_t cnt, uint8_t x); 42 43 extern int memcmp(const void *a, const void *b, size_t cnt); 40 extern void memsetw(void *, size_t, uint16_t); 41 extern void memsetb(void *, size_t, uint8_t); 44 42 45 43 #endif -
kernel/arch/ia64/include/types.h
rf516bc2 rc2efbb4 27 27 */ 28 28 29 /** @addtogroup ia64 29 /** @addtogroup ia64 30 30 * @{ 31 31 */ … … 63 63 typedef uint64_t unative_t; 64 64 typedef int64_t native_t; 65 typedef uint64_t atomic_count_t; 65 66 66 67 typedef struct { -
kernel/arch/ia64/src/ia64.c
rf516bc2 rc2efbb4 212 212 (uintptr_t) I8042_BASE); 213 213 #endif 214 214 215 #ifdef CONFIG_NETIF_DP8390 216 sysinfo_set_item_val("netif.dp8390.inr", NULL, IRQ_DP8390); 217 #endif 218 215 219 sysinfo_set_item_val("ia64_iospace", NULL, true); 216 220 sysinfo_set_item_val("ia64_iospace.address", NULL, true); -
kernel/arch/mips32/include/atomic.h
rf516bc2 rc2efbb4 27 27 */ 28 28 29 /** @addtogroup mips32 29 /** @addtogroup mips32 30 30 * @{ 31 31 */ … … 51 51 * 52 52 * @return Value after addition. 53 * 53 54 */ 54 static inline long atomic_add(atomic_t *val, int i)55 static inline atomic_count_t atomic_add(atomic_t *val, atomic_count_t i) 55 56 { 56 long tmp, v; 57 atomic_count_t tmp; 58 atomic_count_t v; 57 59 58 60 asm volatile ( … … 64 66 " beq %0, %4, 1b\n" /* if the atomic operation failed, try again */ 65 67 " nop\n" 66 : "=&r" (tmp), "+m" (val->count), "=&r" (v) 67 : "r" (i), "i" (0) 68 : "=&r" (tmp), 69 "+m" (val->count), 70 "=&r" (v) 71 : "r" (i), 72 "i" (0) 68 73 ); 69 74 … … 71 76 } 72 77 73 static inline uint32_t test_and_set(atomic_t *val) { 74 uint32_t tmp, v; 78 static inline atomic_count_t test_and_set(atomic_t *val) 79 { 80 atomic_count_t tmp; 81 atomic_count_t v; 75 82 76 83 asm volatile ( … … 82 89 " beqz %0, 1b\n" 83 90 "2:\n" 84 : "=&r" (tmp), "+m" (val->count), "=&r" (v) 91 : "=&r" (tmp), 92 "+m" (val->count), 93 "=&r" (v) 85 94 : "i" (1) 86 95 ); … … 89 98 } 90 99 91 static inline void atomic_lock_arch(atomic_t *val) { 100 static inline void atomic_lock_arch(atomic_t *val) 101 { 92 102 do { 93 while (val->count) 94 ; 103 while (val->count); 95 104 } while (test_and_set(val)); 96 105 } -
kernel/arch/mips32/include/memstr.h
rf516bc2 rc2efbb4 27 27 */ 28 28 29 /** @addtogroup mips32 29 /** @addtogroup mips32 30 30 * @{ 31 31 */ … … 38 38 #define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) 39 39 40 extern void memsetw(void *dst, size_t cnt, uint16_t x); 41 extern void memsetb(void *dst, size_t cnt, uint8_t x); 42 43 extern int memcmp(const void *a, const void *b, size_t cnt); 40 extern void memsetw(void *, size_t, uint16_t); 41 extern void memsetb(void *, size_t, uint8_t); 44 42 45 43 #endif -
kernel/arch/mips32/include/types.h
rf516bc2 rc2efbb4 27 27 */ 28 28 29 /** @addtogroup mips32 29 /** @addtogroup mips32 30 30 * @{ 31 31 */ … … 55 55 typedef uint32_t unative_t; 56 56 typedef int32_t native_t; 57 typedef uint32_t atomic_count_t; 57 58 58 59 typedef struct { -
kernel/arch/mips32/src/smp/dorder.c
rf516bc2 rc2efbb4 33 33 */ 34 34 35 #include <arch/smp/dorder.h> 35 #include <smp/ipi.h> 36 37 #ifdef CONFIG_SMP 36 38 37 39 #define MSIM_DORDER_ADDRESS 0xB0000004 … … 39 41 void ipi_broadcast_arch(int ipi) 40 42 { 41 #ifdef CONFIG_SMP42 43 *((volatile unsigned int *) MSIM_DORDER_ADDRESS) = 0x7FFFFFFF; 44 } 45 43 46 #endif 44 }45 47 46 48 /** @} -
kernel/arch/ppc32/include/atomic.h
rf516bc2 rc2efbb4 27 27 */ 28 28 29 /** @addtogroup ppc32 29 /** @addtogroup ppc32 30 30 * @{ 31 31 */ … … 38 38 static inline void atomic_inc(atomic_t *val) 39 39 { 40 longtmp;41 40 atomic_count_t tmp; 41 42 42 asm volatile ( 43 43 "1:\n" … … 46 46 "stwcx. %0, 0, %2\n" 47 47 "bne- 1b" 48 : "=&r" (tmp), "=m" (val->count) 49 : "r" (&val->count), "m" (val->count) 48 : "=&r" (tmp), 49 "=m" (val->count) 50 : "r" (&val->count), 51 "m" (val->count) 50 52 : "cc" 51 53 ); … … 54 56 static inline void atomic_dec(atomic_t *val) 55 57 { 56 longtmp;57 58 atomic_count_t tmp; 59 58 60 asm volatile ( 59 61 "1:\n" 60 62 "lwarx %0, 0, %2\n" 61 63 "addic %0, %0, -1\n" 62 "stwcx. 64 "stwcx. %0, 0, %2\n" 63 65 "bne- 1b" 64 : "=&r" (tmp), "=m" (val->count) 65 : "r" (&val->count), "m" (val->count) 66 : "=&r" (tmp), 67 "=m" (val->count) 68 : "r" (&val->count), 69 "m" (val->count) 66 70 : "cc" 67 71 ); 68 72 } 69 73 70 static inline longatomic_postinc(atomic_t *val)74 static inline atomic_count_t atomic_postinc(atomic_t *val) 71 75 { 72 76 atomic_inc(val); … … 74 78 } 75 79 76 static inline longatomic_postdec(atomic_t *val)80 static inline atomic_count_t atomic_postdec(atomic_t *val) 77 81 { 78 82 atomic_dec(val); … … 80 84 } 81 85 82 static inline longatomic_preinc(atomic_t *val)86 static inline atomic_count_t atomic_preinc(atomic_t *val) 83 87 { 84 88 atomic_inc(val); … … 86 90 } 87 91 88 static inline longatomic_predec(atomic_t *val)92 static inline atomic_count_t atomic_predec(atomic_t *val) 89 93 { 90 94 atomic_dec(val); -
kernel/arch/ppc32/include/memstr.h
rf516bc2 rc2efbb4 27 27 */ 28 28 29 /** @addtogroup ppc32 29 /** @addtogroup ppc32 30 30 * @{ 31 31 */ … … 38 38 #define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) 39 39 40 extern void memsetw(void *dst, size_t cnt, uint16_t x); 41 extern void memsetb(void *dst, size_t cnt, uint8_t x); 42 43 extern int memcmp(const void *a, const void *b, size_t cnt); 40 extern void memsetw(void *, size_t, uint16_t); 41 extern void memsetb(void *, size_t, uint8_t); 44 42 45 43 #endif -
kernel/arch/ppc32/include/types.h
rf516bc2 rc2efbb4 27 27 */ 28 28 29 /** @addtogroup ppc32 29 /** @addtogroup ppc32 30 30 * @{ 31 31 */ … … 55 55 typedef uint32_t unative_t; 56 56 typedef int32_t native_t; 57 typedef uint32_t atomic_count_t; 57 58 58 59 typedef struct { -
kernel/arch/sparc64/include/atomic.h
rf516bc2 rc2efbb4 27 27 */ 28 28 29 /** @addtogroup sparc64 29 /** @addtogroup sparc64 30 30 * @{ 31 31 */ … … 45 45 * 46 46 * @param val Atomic variable. 47 * @param i Signed value to be added.47 * @param i Signed value to be added. 48 48 * 49 49 * @return Value of the atomic variable as it existed before addition. 50 * 50 51 */ 51 static inline long atomic_add(atomic_t *val, int i)52 static inline atomic_count_t atomic_add(atomic_t *val, atomic_count_t i) 52 53 { 53 uint64_t a, b; 54 54 atomic_count_t a; 55 atomic_count_t b; 56 55 57 do { 56 volatile uintptr_t x = (uint64_t) &val->count;57 58 a = *(( uint64_t *) x);58 volatile uintptr_t ptr = (uintptr_t) &val->count; 59 60 a = *((atomic_count_t *) ptr); 59 61 b = a + i; 60 asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *)x)), 61 "+r" (b) : "r" (a)); 62 63 asm volatile ( 64 "casx %0, %2, %1\n" 65 : "+m" (*((atomic_count_t *) ptr)), 66 "+r" (b) 67 : "r" (a) 68 ); 62 69 } while (a != b); 63 70 64 71 return a; 65 72 } 66 73 67 static inline longatomic_preinc(atomic_t *val)74 static inline atomic_count_t atomic_preinc(atomic_t *val) 68 75 { 69 76 return atomic_add(val, 1) + 1; 70 77 } 71 78 72 static inline longatomic_postinc(atomic_t *val)79 static inline atomic_count_t atomic_postinc(atomic_t *val) 73 80 { 74 81 return atomic_add(val, 1); 75 82 } 76 83 77 static inline longatomic_predec(atomic_t *val)84 static inline atomic_count_t atomic_predec(atomic_t *val) 78 85 { 79 86 return atomic_add(val, -1) - 1; 80 87 } 81 88 82 static inline longatomic_postdec(atomic_t *val)89 static inline atomic_count_t atomic_postdec(atomic_t *val) 83 90 { 84 91 return atomic_add(val, -1); … … 95 102 } 96 103 97 static inline longtest_and_set(atomic_t *val)104 static inline atomic_count_t test_and_set(atomic_t *val) 98 105 { 99 uint64_t v = 1; 100 volatile uintptr_t x = (uint64_t) &val->count; 101 102 asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *) x)), 103 "+r" (v) : "r" (0)); 104 106 atomic_count_t v = 1; 107 volatile uintptr_t ptr = (uintptr_t) &val->count; 108 109 asm volatile ( 110 "casx %0, %2, %1\n" 111 : "+m" (*((atomic_count_t *) ptr)), 112 "+r" (v) 113 : "r" (0) 114 ); 115 105 116 return v; 106 117 } … … 108 119 static inline void atomic_lock_arch(atomic_t *val) 109 120 { 110 uint64_t tmp1 = 1;111 uint64_t tmp2 = 0;112 113 volatile uintptr_t x = (uint64_t) &val->count;114 121 atomic_count_t tmp1 = 1; 122 atomic_count_t tmp2 = 0; 123 124 volatile uintptr_t ptr = (uintptr_t) &val->count; 125 115 126 preemption_disable(); 116 127 117 128 asm volatile ( 118 "0:\n" 119 "casx %0, %3, %1\n" 120 "brz %1, 2f\n" 121 "nop\n" 122 "1:\n" 123 "ldx %0, %2\n" 124 "brz %2, 0b\n" 125 "nop\n" 126 "ba %%xcc, 1b\n" 127 "nop\n" 128 "2:\n" 129 : "+m" (*((uint64_t *) x)), "+r" (tmp1), "+r" (tmp2) : "r" (0) 129 "0:\n" 130 "casx %0, %3, %1\n" 131 "brz %1, 2f\n" 132 "nop\n" 133 "1:\n" 134 "ldx %0, %2\n" 135 "brz %2, 0b\n" 136 "nop\n" 137 "ba %%xcc, 1b\n" 138 "nop\n" 139 "2:\n" 140 : "+m" (*((atomic_count_t *) ptr)), 141 "+r" (tmp1), 142 "+r" (tmp2) 143 : "r" (0) 130 144 ); 131 145 -
kernel/arch/sparc64/include/memstr.h
rf516bc2 rc2efbb4 38 38 #define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) 39 39 40 extern void memsetw(void *dst, size_t cnt, uint16_t x); 41 extern void memsetb(void *dst, size_t cnt, uint8_t x); 42 43 extern int memcmp(const void *a, const void *b, size_t cnt); 40 extern void memsetw(void *, size_t, uint16_t); 41 extern void memsetb(void *, size_t, uint8_t); 44 42 45 43 #endif -
kernel/arch/sparc64/include/types.h
rf516bc2 rc2efbb4 27 27 */ 28 28 29 /** @addtogroup sparc64 29 /** @addtogroup sparc64 30 30 * @{ 31 31 */ … … 55 55 typedef uint64_t unative_t; 56 56 typedef int64_t native_t; 57 typedef uint64_t atomic_count_t; 57 58 58 59 typedef struct {
Note:
See TracChangeset
for help on using the changeset viewer.