source: mainline/kernel/arch/ia32/include/atomic.h@ cb10bc9

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since cb10bc9 was 340ba25c, checked in by Adam Hraska <adam.hraska+hos@…>, 13 years ago

Updated copyright notice.

  • Property mode set to 100644
File size: 6.3 KB
Line 
1/*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * Copyright (c) 2012 Adam Hraska
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup ia32
31 * @{
32 */
33/** @file
34 */
35
36#ifndef KERN_ia32_ATOMIC_H_
37#define KERN_ia32_ATOMIC_H_
38
39#include <typedefs.h>
40#include <arch/barrier.h>
41#include <preemption.h>
42#include <trace.h>
43
44NO_TRACE static inline void atomic_inc(atomic_t *val)
45{
46#ifdef CONFIG_SMP
47 asm volatile (
48 "lock incl %[count]\n"
49 : [count] "+m" (val->count)
50 );
51#else
52 asm volatile (
53 "incl %[count]\n"
54 : [count] "+m" (val->count)
55 );
56#endif /* CONFIG_SMP */
57}
58
59NO_TRACE static inline void atomic_dec(atomic_t *val)
60{
61#ifdef CONFIG_SMP
62 asm volatile (
63 "lock decl %[count]\n"
64 : [count] "+m" (val->count)
65 );
66#else
67 asm volatile (
68 "decl %[count]\n"
69 : [count] "+m" (val->count)
70 );
71#endif /* CONFIG_SMP */
72}
73
74NO_TRACE static inline atomic_count_t atomic_postinc(atomic_t *val)
75{
76 atomic_count_t r = 1;
77
78 asm volatile (
79 "lock xaddl %[r], %[count]\n"
80 : [count] "+m" (val->count),
81 [r] "+r" (r)
82 );
83
84 return r;
85}
86
87NO_TRACE static inline atomic_count_t atomic_postdec(atomic_t *val)
88{
89 atomic_count_t r = -1;
90
91 asm volatile (
92 "lock xaddl %[r], %[count]\n"
93 : [count] "+m" (val->count),
94 [r] "+r" (r)
95 );
96
97 return r;
98}
99
100#define atomic_preinc(val) (atomic_postinc(val) + 1)
101#define atomic_predec(val) (atomic_postdec(val) - 1)
102
103NO_TRACE static inline atomic_count_t test_and_set(atomic_t *val)
104{
105 atomic_count_t v = 1;
106
107 asm volatile (
108 "xchgl %[v], %[count]\n"
109 : [v] "+r" (v),
110 [count] "+m" (val->count)
111 );
112
113 return v;
114}
115
116
117/** ia32 specific fast spinlock */
118NO_TRACE static inline void atomic_lock_arch(atomic_t *val)
119{
120 atomic_count_t tmp;
121
122 preemption_disable();
123 asm volatile (
124 "0:\n"
125#ifndef PROCESSOR_i486
126 "pause\n" /* Pentium 4's HT love this instruction */
127#endif
128 "mov %[count], %[tmp]\n"
129 "testl %[tmp], %[tmp]\n"
130 "jnz 0b\n" /* lightweight looping on locked spinlock */
131
132 "incl %[tmp]\n" /* now use the atomic operation */
133 "xchgl %[count], %[tmp]\n"
134 "testl %[tmp], %[tmp]\n"
135 "jnz 0b\n"
136 : [count] "+m" (val->count),
137 [tmp] "=&r" (tmp)
138 );
139
140 /*
141 * Prevent critical section code from bleeding out this way up.
142 */
143 CS_ENTER_BARRIER();
144}
145
146
147#define _atomic_cas_impl(pptr, exp_val, new_val, old_val, prefix) \
148({ \
149 switch (sizeof(typeof(*(pptr)))) { \
150 case 1: \
151 asm volatile ( \
152 prefix " cmpxchgb %[newval], %[ptr]\n" \
153 : /* Output operands. */ \
154 /* Old/current value is returned in eax. */ \
155 [oldval] "=a" (old_val), \
156 /* (*ptr) will be read and written to, hence "+" */ \
157 [ptr] "+m" (*pptr) \
158 : /* Input operands. */ \
159 /* Expected value must be in eax. */ \
160 [expval] "a" (exp_val), \
161 /* The new value may be in any register. */ \
162 [newval] "r" (new_val) \
163 : "memory" \
164 ); \
165 break; \
166 case 2: \
167 asm volatile ( \
168 prefix " cmpxchgw %[newval], %[ptr]\n" \
169 : /* Output operands. */ \
170 /* Old/current value is returned in eax. */ \
171 [oldval] "=a" (old_val), \
172 /* (*ptr) will be read and written to, hence "+" */ \
173 [ptr] "+m" (*pptr) \
174 : /* Input operands. */ \
175 /* Expected value must be in eax. */ \
176 [expval] "a" (exp_val), \
177 /* The new value may be in any register. */ \
178 [newval] "r" (new_val) \
179 : "memory" \
180 ); \
181 break; \
182 case 4: \
183 asm volatile ( \
184 prefix " cmpxchgl %[newval], %[ptr]\n" \
185 : /* Output operands. */ \
186 /* Old/current value is returned in eax. */ \
187 [oldval] "=a" (old_val), \
188 /* (*ptr) will be read and written to, hence "+" */ \
189 [ptr] "+m" (*pptr) \
190 : /* Input operands. */ \
191 /* Expected value must be in eax. */ \
192 [expval] "a" (exp_val), \
193 /* The new value may be in any register. */ \
194 [newval] "r" (new_val) \
195 : "memory" \
196 ); \
197 break; \
198 } \
199})
200
201
202#ifndef local_atomic_cas
203
204#define local_atomic_cas(pptr, exp_val, new_val) \
205({ \
206 /* Use proper types and avoid name clashes */ \
207 typeof(*(pptr)) _old_val_cas; \
208 typeof(*(pptr)) _exp_val_cas = exp_val; \
209 typeof(*(pptr)) _new_val_cas = new_val; \
210 _atomic_cas_impl(pptr, _exp_val_cas, _new_val_cas, _old_val_cas, ""); \
211 \
212 _old_val_cas; \
213})
214
215#else
216/* Check if arch/atomic.h does not accidentally include /atomic.h .*/
217#error Architecture specific cpu local atomics already defined! Check your includes.
218#endif
219
220
221#ifndef local_atomic_exchange
222/*
223 * Issuing a xchg instruction always implies lock prefix semantics.
224 * Therefore, it is cheaper to use a cmpxchg without a lock prefix
225 * in a loop.
226 */
227#define local_atomic_exchange(pptr, new_val) \
228({ \
229 /* Use proper types and avoid name clashes */ \
230 typeof(*(pptr)) _exp_val_x; \
231 typeof(*(pptr)) _old_val_x; \
232 typeof(*(pptr)) _new_val_x = new_val; \
233 \
234 do { \
235 _exp_val_x = *pptr; \
236 _old_val_x = local_atomic_cas(pptr, _exp_val_x, _new_val_x); \
237 } while (_old_val_x != _exp_val_x); \
238 \
239 _old_val_x; \
240})
241
242#else
243/* Check if arch/atomic.h does not accidentally include /atomic.h .*/
244#error Architecture specific cpu local atomics already defined! Check your includes.
245#endif
246
247
248#endif
249
250/** @}
251 */
Note: See TracBrowser for help on using the repository browser.