source: mainline/kernel/arch/amd64/include/atomic.h@ cb10bc9

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since cb10bc9 was 340ba25c, checked in by Adam Hraska <adam.hraska+hos@…>, 13 years ago

Updated copyright notice.

  • Property mode set to 100644
File size: 6.7 KB
RevLine 
[5753fbb]1/*
[df4ed85]2 * Copyright (c) 2001-2004 Jakub Jermar
[340ba25c]3 * Copyright (c) 2012 Adam Hraska
[5753fbb]4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
[f24d300]30/** @addtogroup amd64
[b45c443]31 * @{
32 */
33/** @file
34 */
35
[06e1e95]36#ifndef KERN_amd64_ATOMIC_H_
37#define KERN_amd64_ATOMIC_H_
[5753fbb]38
[d99c1d2]39#include <typedefs.h>
[53f9821]40#include <arch/barrier.h>
41#include <preemption.h>
[7a0359b]42#include <trace.h>
[5753fbb]43
[7a0359b]44NO_TRACE static inline void atomic_inc(atomic_t *val)
[228666c]45{
[5753fbb]46#ifdef CONFIG_SMP
[f24d300]47 asm volatile (
48 "lock incq %[count]\n"
49 : [count] "+m" (val->count)
50 );
[5753fbb]51#else
[f24d300]52 asm volatile (
53 "incq %[count]\n"
54 : [count] "+m" (val->count)
55 );
[5753fbb]56#endif /* CONFIG_SMP */
57}
58
[7a0359b]59NO_TRACE static inline void atomic_dec(atomic_t *val)
[228666c]60{
[5753fbb]61#ifdef CONFIG_SMP
[f24d300]62 asm volatile (
63 "lock decq %[count]\n"
64 : [count] "+m" (val->count)
65 );
[5753fbb]66#else
[f24d300]67 asm volatile (
68 "decq %[count]\n"
69 : [count] "+m" (val->count)
70 );
[5753fbb]71#endif /* CONFIG_SMP */
72}
73
[7a0359b]74NO_TRACE static inline atomic_count_t atomic_postinc(atomic_t *val)
[5753fbb]75{
[228666c]76 atomic_count_t r = 1;
[f24d300]77
[e7b7be3f]78 asm volatile (
[f24d300]79 "lock xaddq %[r], %[count]\n"
[228666c]80 : [count] "+m" (val->count),
81 [r] "+r" (r)
[5753fbb]82 );
[f24d300]83
[5753fbb]84 return r;
85}
86
[7a0359b]87NO_TRACE static inline atomic_count_t atomic_postdec(atomic_t *val)
[5753fbb]88{
[228666c]89 atomic_count_t r = -1;
[5753fbb]90
[e7b7be3f]91 asm volatile (
[f24d300]92 "lock xaddq %[r], %[count]\n"
[228666c]93 : [count] "+m" (val->count),
94 [r] "+r" (r)
[5753fbb]95 );
96
97 return r;
98}
99
[f24d300]100#define atomic_preinc(val) (atomic_postinc(val) + 1)
101#define atomic_predec(val) (atomic_postdec(val) - 1)
[5753fbb]102
[7a0359b]103NO_TRACE static inline atomic_count_t test_and_set(atomic_t *val)
[228666c]104{
[ba371e1]105 atomic_count_t v = 1;
[5753fbb]106
[e7b7be3f]107 asm volatile (
[f24d300]108 "xchgq %[v], %[count]\n"
[ba371e1]109 : [v] "+r" (v),
[228666c]110 [count] "+m" (val->count)
[5753fbb]111 );
112
113 return v;
114}
115
[23684b7]116/** amd64 specific fast spinlock */
[7a0359b]117NO_TRACE static inline void atomic_lock_arch(atomic_t *val)
[53f9821]118{
[228666c]119 atomic_count_t tmp;
[f24d300]120
[53f9821]121 preemption_disable();
[e7b7be3f]122 asm volatile (
[ad2e39b]123 "0:\n"
[7a0359b]124 " pause\n"
125 " mov %[count], %[tmp]\n"
126 " testq %[tmp], %[tmp]\n"
127 " jnz 0b\n" /* lightweight looping on locked spinlock */
[53f9821]128
[7a0359b]129 " incq %[tmp]\n" /* now use the atomic operation */
130 " xchgq %[count], %[tmp]\n"
131 " testq %[tmp], %[tmp]\n"
132 " jnz 0b\n"
[228666c]133 : [count] "+m" (val->count),
134 [tmp] "=&r" (tmp)
[ad2e39b]135 );
[228666c]136
[53f9821]137 /*
138 * Prevent critical section code from bleeding out this way up.
139 */
140 CS_ENTER_BARRIER();
141}
[5753fbb]142
[2bcf6c6]143
[6831475]144#define _atomic_cas_impl(pptr, exp_val, new_val, old_val, prefix) \
145({ \
146 switch (sizeof(typeof(*(pptr)))) { \
147 case 1: \
148 asm volatile ( \
149 prefix " cmpxchgb %[newval], %[ptr]\n" \
150 : /* Output operands. */ \
151 /* Old/current value is returned in eax. */ \
152 [oldval] "=a" (old_val), \
153 /* (*ptr) will be read and written to, hence "+" */ \
154 [ptr] "+m" (*pptr) \
155 : /* Input operands. */ \
156 /* Expected value must be in eax. */ \
157 [expval] "a" (exp_val), \
158 /* The new value may be in any register. */ \
159 [newval] "r" (new_val) \
160 : "memory" \
161 ); \
162 break; \
163 case 2: \
164 asm volatile ( \
165 prefix " cmpxchgw %[newval], %[ptr]\n" \
166 : /* Output operands. */ \
167 /* Old/current value is returned in eax. */ \
168 [oldval] "=a" (old_val), \
169 /* (*ptr) will be read and written to, hence "+" */ \
170 [ptr] "+m" (*pptr) \
171 : /* Input operands. */ \
172 /* Expected value must be in eax. */ \
173 [expval] "a" (exp_val), \
174 /* The new value may be in any register. */ \
175 [newval] "r" (new_val) \
176 : "memory" \
177 ); \
178 break; \
179 case 4: \
180 asm volatile ( \
181 prefix " cmpxchgl %[newval], %[ptr]\n" \
182 : /* Output operands. */ \
183 /* Old/current value is returned in eax. */ \
184 [oldval] "=a" (old_val), \
185 /* (*ptr) will be read and written to, hence "+" */ \
186 [ptr] "+m" (*pptr) \
187 : /* Input operands. */ \
188 /* Expected value must be in eax. */ \
189 [expval] "a" (exp_val), \
190 /* The new value may be in any register. */ \
191 [newval] "r" (new_val) \
192 : "memory" \
193 ); \
194 break; \
195 case 8: \
196 asm volatile ( \
197 prefix " cmpxchgq %[newval], %[ptr]\n" \
198 : /* Output operands. */ \
199 /* Old/current value is returned in eax. */ \
200 [oldval] "=a" (old_val), \
201 /* (*ptr) will be read and written to, hence "+" */ \
202 [ptr] "+m" (*pptr) \
203 : /* Input operands. */ \
204 /* Expected value must be in eax. */ \
205 [expval] "a" (exp_val), \
206 /* The new value may be in any register. */ \
207 [newval] "r" (new_val) \
208 : "memory" \
209 ); \
210 break; \
211 } \
212})
213
214
215#ifndef local_atomic_cas
216
217#define local_atomic_cas(pptr, exp_val, new_val) \
218({ \
[a1ef4c0]219 /* Use proper types and avoid name clashes */ \
220 typeof(*(pptr)) _old_val_cas; \
221 typeof(*(pptr)) _exp_val_cas = exp_val; \
222 typeof(*(pptr)) _new_val_cas = new_val; \
223 _atomic_cas_impl(pptr, _exp_val_cas, _new_val_cas, _old_val_cas, ""); \
[6831475]224 \
[a1ef4c0]225 _old_val_cas; \
[6831475]226})
227
228#else
229/* Check if arch/atomic.h does not accidentally include /atomic.h .*/
230#error Architecture specific cpu local atomics already defined! Check your includes.
231#endif
232
233
234#ifndef local_atomic_exchange
235/*
236 * Issuing a xchg instruction always implies lock prefix semantics.
237 * Therefore, it is cheaper to use a cmpxchg without a lock prefix
238 * in a loop.
239 */
240#define local_atomic_exchange(pptr, new_val) \
241({ \
[a1ef4c0]242 /* Use proper types and avoid name clashes */ \
243 typeof(*(pptr)) _exp_val_x; \
244 typeof(*(pptr)) _old_val_x; \
245 typeof(*(pptr)) _new_val_x = new_val; \
[6831475]246 \
247 do { \
[a1ef4c0]248 _exp_val_x = *pptr; \
249 _old_val_x = local_atomic_cas(pptr, _exp_val_x, _new_val_x); \
250 } while (_old_val_x != _exp_val_x); \
[6831475]251 \
[a1ef4c0]252 _old_val_x; \
[6831475]253})
254
255#else
256/* Check if arch/atomic.h does not accidentally include /atomic.h .*/
257#error Architecture specific cpu local atomics already defined! Check your includes.
258#endif
259
260
[5753fbb]261#endif
[b45c443]262
[06e1e95]263/** @}
[b45c443]264 */
Note: See TracBrowser for help on using the repository browser.