source: mainline/kernel/arch/ia32/include/atomic.h@ b17518e

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since b17518e was b17518e, checked in by Adam Hraska <adam.hraska+hos@…>, 13 years ago

Renamed atomic_swap_* to atmoic_set_return_* and added a local cpu native_t variant.

  • Property mode set to 100644
File size: 6.3 KB
Line 
1/*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup ia32
30 * @{
31 */
32/** @file
33 */
34
35#ifndef KERN_ia32_ATOMIC_H_
36#define KERN_ia32_ATOMIC_H_
37
38#include <typedefs.h>
39#include <arch/barrier.h>
40#include <preemption.h>
41#include <trace.h>
42
43NO_TRACE static inline void atomic_inc(atomic_t *val)
44{
45#ifdef CONFIG_SMP
46 asm volatile (
47 "lock incl %[count]\n"
48 : [count] "+m" (val->count)
49 );
50#else
51 asm volatile (
52 "incl %[count]\n"
53 : [count] "+m" (val->count)
54 );
55#endif /* CONFIG_SMP */
56}
57
58NO_TRACE static inline void atomic_dec(atomic_t *val)
59{
60#ifdef CONFIG_SMP
61 asm volatile (
62 "lock decl %[count]\n"
63 : [count] "+m" (val->count)
64 );
65#else
66 asm volatile (
67 "decl %[count]\n"
68 : [count] "+m" (val->count)
69 );
70#endif /* CONFIG_SMP */
71}
72
73NO_TRACE static inline atomic_count_t atomic_postinc(atomic_t *val)
74{
75 atomic_count_t r = 1;
76
77 asm volatile (
78 "lock xaddl %[r], %[count]\n"
79 : [count] "+m" (val->count),
80 [r] "+r" (r)
81 );
82
83 return r;
84}
85
86NO_TRACE static inline atomic_count_t atomic_postdec(atomic_t *val)
87{
88 atomic_count_t r = -1;
89
90 asm volatile (
91 "lock xaddl %[r], %[count]\n"
92 : [count] "+m" (val->count),
93 [r] "+r" (r)
94 );
95
96 return r;
97}
98
99#define atomic_preinc(val) (atomic_postinc(val) + 1)
100#define atomic_predec(val) (atomic_postdec(val) - 1)
101
102NO_TRACE static inline atomic_count_t test_and_set(atomic_t *val)
103{
104 atomic_count_t v = 1;
105
106 asm volatile (
107 "xchgl %[v], %[count]\n"
108 : [v] "+r" (v),
109 [count] "+m" (val->count)
110 );
111
112 return v;
113}
114
115
116/** ia32 specific fast spinlock */
117NO_TRACE static inline void atomic_lock_arch(atomic_t *val)
118{
119 atomic_count_t tmp;
120
121 preemption_disable();
122 asm volatile (
123 "0:\n"
124#ifndef PROCESSOR_i486
125 "pause\n" /* Pentium 4's HT love this instruction */
126#endif
127 "mov %[count], %[tmp]\n"
128 "testl %[tmp], %[tmp]\n"
129 "jnz 0b\n" /* lightweight looping on locked spinlock */
130
131 "incl %[tmp]\n" /* now use the atomic operation */
132 "xchgl %[count], %[tmp]\n"
133 "testl %[tmp], %[tmp]\n"
134 "jnz 0b\n"
135 : [count] "+m" (val->count),
136 [tmp] "=&r" (tmp)
137 );
138
139 /*
140 * Prevent critical section code from bleeding out this way up.
141 */
142 CS_ENTER_BARRIER();
143}
144
145
146#define _atomic_cas_impl(pptr, exp_val, new_val, old_val, prefix) \
147 asm volatile ( \
148 prefix " cmpxchgl %[newval], %[ptr]\n" \
149 : /* Output operands. */ \
150 /* Old/current value is returned in eax. */ \
151 [oldval] "=a" (old_val), \
152 /* (*ptr) will be read and written to, hence "+" */ \
153 [ptr] "+m" (*pptr) \
154 : /* Input operands. */ \
155 /* Expected value must be in eax. */ \
156 [expval] "a" (exp_val), \
157 /* The new value may be in any register. */ \
158 [newval] "r" (new_val) \
159 : "memory" \
160 )
161
162/** Atomically compares and swaps the pointer at pptr. */
163NO_TRACE static inline void * atomic_cas_ptr(void **pptr,
164 void *exp_val, void *new_val)
165{
166 void *old_val;
167 _atomic_cas_impl(pptr, exp_val, new_val, old_val, "lock\n");
168 return old_val;
169}
170
171/** Compare-and-swap of a pointer that is atomic wrt to local cpu's interrupts.
172 *
173 * This function is NOT smp safe and is not atomic with respect to other cpus.
174 */
175NO_TRACE static inline void * atomic_cas_ptr_local(void **pptr,
176 void *exp_val, void *new_val)
177{
178 void *old_val;
179 _atomic_cas_impl(pptr, exp_val, new_val, old_val, "");
180 return old_val;
181}
182
183
184#define _atomic_swap_impl(pptr, new_val) \
185({ \
186 typeof(*(pptr)) new_in_old_out = new_val; \
187 asm volatile ( \
188 "xchgl %[val], %[p_ptr]\n" \
189 : [val] "+r" (new_in_old_out), \
190 [p_ptr] "+m" (*pptr) \
191 ); \
192 \
193 new_in_old_out; \
194})
195
196/*
197 * Issuing a xchg instruction always implies lock prefix semantics.
198 * Therefore, it is cheaper to use a cmpxchg without a lock prefix
199 * in a loop.
200 */
201#define _atomic_swap_local_impl(pptr, new_val) \
202({ \
203 typeof(*(pptr)) exp_val; \
204 typeof(*(pptr)) old_val; \
205 \
206 do { \
207 exp_val = *pptr; \
208 _atomic_cas_impl(pptr, exp_val, new_val, old_val, ""); \
209 } while (old_val != exp_val); \
210 \
211 old_val; \
212})
213
214
215/** Atomicaly sets *ptr to val and returns the previous value. */
216NO_TRACE static inline void * atomic_set_return_ptr(void **pptr, void *val)
217{
218 return _atomic_swap_impl(pptr, val);
219}
220
221/** Sets *ptr to new_val and returns the previous value. NOT smp safe.
222 *
223 * This function is only atomic wrt to local interrupts and it is
224 * NOT atomic wrt to other cpus.
225 */
226NO_TRACE static inline void * atomic_set_return_ptr_local(
227 void **pptr, void *new_val)
228{
229 return _atomic_swap_local_impl(pptr, new_val);
230}
231
232/** Atomicaly sets *ptr to val and returns the previous value. */
233NO_TRACE static inline native_t atomic_set_return_native_t(
234 native_t *p, native_t val)
235{
236 return _atomic_swap_impl(p, val);
237}
238
239/** Sets *ptr to new_val and returns the previous value. NOT smp safe.
240 *
241 * This function is only atomic wrt to local interrupts and it is
242 * NOT atomic wrt to other cpus.
243 */
244NO_TRACE static inline native_t atomic_set_return_native_t_local(
245 native_t *p, native_t new_val)
246{
247 return _atomic_swap_local_impl(p, new_val);
248}
249
250
251#undef _atomic_cas_ptr_impl
252#undef _atomic_swap_impl
253#undef _atomic_swap_local_impl
254
255#endif
256
257/** @}
258 */
Note: See TracBrowser for help on using the repository browser.