Changes in kernel/arch/sparc64/include/barrier.h [723060a:7a0359b] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/sparc64/include/barrier.h
r723060a r7a0359b 27 27 */ 28 28 29 /** @addtogroup sparc64 29 /** @addtogroup sparc64 30 30 * @{ 31 31 */ … … 36 36 #define KERN_sparc64_BARRIER_H_ 37 37 38 #include <trace.h> 39 40 #ifdef KERNEL 41 42 #include <typedefs.h> 43 44 #else 45 46 #include <stdint.h> 47 48 #endif 49 38 50 /* 39 51 * Our critical section barriers are prepared for the weakest RMO memory model. 40 52 */ 41 #define CS_ENTER_BARRIER() \ 42 asm volatile ( \ 43 "membar #LoadLoad | #LoadStore\n" \ 44 ::: "memory" \ 45 ) 46 #define CS_LEAVE_BARRIER() \ 47 asm volatile ( \ 48 "membar #StoreStore\n" \ 49 "membar #LoadStore\n" \ 50 ::: "memory" \ 53 #define CS_ENTER_BARRIER() \ 54 asm volatile ( \ 55 "membar #LoadLoad | #LoadStore\n" \ 56 ::: "memory" \ 51 57 ) 52 58 53 #define memory_barrier()\54 asm volatile ( "membar #LoadLoad | #StoreStore\n" ::: "memory")55 #define read_barrier()\56 asm volatile ("membar #LoadLoad\n" ::: "memory")57 #define write_barrier()\58 asm volatile ("membar #StoreStore\n" ::: "memory")59 #define CS_LEAVE_BARRIER() \ 60 asm volatile ( \ 61 "membar #StoreStore\n" \ 62 "membar #LoadStore\n" \ 63 ::: "memory" \ 64 ) 59 65 60 #define flush(a) \ 61 asm volatile ("flush %0\n" :: "r" ((a)) : "memory") 66 #define memory_barrier() \ 67 asm volatile ( \ 68 "membar #LoadLoad | #StoreStore\n" \ 69 ::: "memory" \ 70 ) 71 72 #define read_barrier() \ 73 asm volatile ( \ 74 "membar #LoadLoad\n" \ 75 ::: "memory" \ 76 ) 77 78 #define write_barrier() \ 79 asm volatile ( \ 80 "membar #StoreStore\n" \ 81 ::: "memory" \ 82 ) 83 84 #define flush(a) \ 85 asm volatile ( \ 86 "flush %[reg]\n" \ 87 :: [reg] "r" ((a)) \ 88 : "memory" \ 89 ) 62 90 63 91 /** Flush Instruction pipeline. */ 64 static inline void flush_pipeline(void)92 NO_TRACE static inline void flush_pipeline(void) 65 93 { 94 uint64_t pc; 95 66 96 /* 67 97 * The FLUSH instruction takes address parameter. … … 70 100 * The entire kernel text is mapped by a locked ITLB and 71 101 * DTLB entries. Therefore, when this function is called, 72 * the % o7register will always be in the range mapped by102 * the %pc register will always be in the range mapped by 73 103 * DTLB. 104 * 74 105 */ 75 76 asm volatile ("flush %o7\n"); 106 107 asm volatile ( 108 "rd %%pc, %[pc]\n" 109 "flush %[pc]\n" 110 : [pc] "=&r" (pc) 111 ); 77 112 } 78 113 79 114 /** Memory Barrier instruction. */ 80 static inline void membar(void)115 NO_TRACE static inline void membar(void) 81 116 { 82 asm volatile ("membar #Sync\n"); 117 asm volatile ( 118 "membar #Sync\n" 119 ); 83 120 } 84 121 85 122 #if defined (US) 86 123 87 #define smc_coherence(a) \ 88 { \ 89 write_barrier(); \ 90 flush((a)); \ 91 } 124 #define FLUSH_INVAL_MIN 4 92 125 93 #define FLUSH_INVAL_MIN 4 94 #define smc_coherence_block(a, l) \ 95 { \ 96 unsigned long i; \ 97 write_barrier(); \ 98 for (i = 0; i < (l); i += FLUSH_INVAL_MIN) \ 99 flush((void *)(a) + i); \ 100 } 126 #define smc_coherence(a) \ 127 do { \ 128 write_barrier(); \ 129 flush((a)); \ 130 } while (0) 131 132 #define smc_coherence_block(a, l) \ 133 do { \ 134 unsigned long i; \ 135 write_barrier(); \ 136 \ 137 for (i = 0; i < (l); i += FLUSH_INVAL_MIN) \ 138 flush((void *)(a) + i); \ 139 } while (0) 101 140 102 141 #elif defined (US3) 103 142 104 #define smc_coherence(a) 105 {\106 write_barrier();\107 flush_pipeline();\108 } 143 #define smc_coherence(a) \ 144 do { \ 145 write_barrier(); \ 146 flush_pipeline(); \ 147 } while (0) 109 148 110 #define smc_coherence_block(a, l) 111 {\112 write_barrier();\113 flush_pipeline();\114 } 149 #define smc_coherence_block(a, l) \ 150 do { \ 151 write_barrier(); \ 152 flush_pipeline(); \ 153 } while (0) 115 154 116 #endif 155 #endif /* defined(US3) */ 117 156 118 157 #endif
Note:
See TracChangeset
for help on using the changeset viewer.