- Timestamp:
- 2005-06-06T20:01:57Z (21 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- b0bf501
- Parents:
- d47f0e1
- Location:
- arch
- Files:
-
- 2 edited
- 3 moved
-
ia32/Makefile.inc (modified) (1 diff)
-
ia32/include/atomic.h (moved) (moved from arch/ia32/include/smp/atomic.h ) (1 diff)
-
ia32/src/atomic.S (moved) (moved from arch/ia32/src/smp/atomic.S ) (2 diffs)
-
mips/_link.ld (modified) (1 diff)
-
mips/include/atomic.h (moved) (moved from arch/mips/include/smp/atomic.h ) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
arch/ia32/Makefile.inc
rd47f0e1 re3f41b6 31 31 arch/smp/mps.c \ 32 32 arch/smp/smp.c \ 33 arch/ smp/atomic.S \33 arch/atomic.S \ 34 34 arch/smp/ipi.c \ 35 35 arch/ia32.c \ -
arch/ia32/include/atomic.h
rd47f0e1 re3f41b6 32 32 #include <arch/types.h> 33 33 34 extern void atomic_inc( int *val);35 extern void atomic_dec( int *val);34 extern void atomic_inc(volatile int *val); 35 extern void atomic_dec(volatile int *val); 36 36 37 37 extern int test_and_set(int *val); -
arch/ia32/src/atomic.S
rd47f0e1 re3f41b6 33 33 pushl %ebx 34 34 movl 8(%esp),%ebx 35 #ifdef __SMP__ 35 36 lock incl (%ebx) 37 #else 38 incl (%ebx) 39 #endif 36 40 popl %ebx 37 41 ret … … 41 45 pushl %ebx 42 46 movl 8(%esp),%ebx 47 #ifdef __SMP__ 43 48 lock decl (%ebx) 49 #else 50 decl (%ebx) 51 #endif 44 52 popl %ebx 45 53 ret -
arch/mips/_link.ld
rd47f0e1 re3f41b6 8 8 9 9 OUTPUT_FORMAT(binary) 10 ENTRY(kernel_image_start) 10 ENTRY(kernel_image_start) 11 11 12 12 SECTIONS { -
arch/mips/include/atomic.h
rd47f0e1 re3f41b6 30 30 #define __MIPS_ATOMIC_H__ 31 31 32 #define atomic_inc(x) ((*x)++) 33 #define atomic_dec(x) ((*x)--) 32 #define atomic_inc(x) (a_add(x,1)) 33 #define atomic_dec(x) (a_sub(x,1)) 34 35 /* 36 * Atomic addition 37 * 38 * This case is harder, and we have to use the special LL and SC operations 39 * to achieve atomicity. The instructions are similar to LW (load) and SW 40 * (store), except that the LL (load-linked) instruction loads the address 41 * of the variable to a special register and if another process writes to 42 * the same location, the SC (store-conditional) instruction fails. 43 */ 44 static inline int a_add( volatile int *val, int i) 45 { 46 int tmp, tmp2; 47 48 asm volatile ( 49 " .set push\n" 50 " .set noreorder\n" 51 " nop\n" 52 "1:\n" 53 " ll %0, %1\n" 54 " addu %0, %0, %2\n" 55 " move %3, %0\n" 56 " sc %0, %1\n" 57 " beq %0, 0x0, 1b\n" 58 " move %0, %3\n" 59 " .set pop\n" 60 : "=&r" (tmp), "=o" (*val) 61 : "r" (i), "r" (tmp2) 62 ); 63 return tmp; 64 } 65 66 67 /* 68 * Atomic subtraction 69 * 70 * Implemented in the same manner as a_add, except we substract the value. 71 */ 72 static inline int a_sub( volatile int *val, int i) 73 74 { 75 int tmp, tmp2; 76 77 asm volatile ( 78 " .set push\n" 79 " .set noreorder\n" 80 " nop\n" 81 "1:\n" 82 " ll %0, %1\n" 83 " subu %0, %0, %2\n" 84 " move %3, %0\n" 85 " sc %0, %1\n" 86 " beq %0, 0x0, 1b\n" 87 " move %0, %3\n" 88 " .set pop\n" 89 : "=&r" (tmp), "=o" (*val) 90 : "r" (i), "r" (tmp2) 91 ); 92 return tmp; 93 } 94 34 95 35 96 #endif
Note:
See TracChangeset
for help on using the changeset viewer.
