source: mainline/kernel/arch/amd64/include/atomic.h@ 228666c

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 228666c was 228666c, checked in by Martin Decky <martin@…>, 15 years ago

introduce atomic_count_t as the explicit type of the internal value in atomic_t (this is probably better than the chaotic mix of int/long)
atomic_count_t is defined as unsigned, for signed semantics you can cast it to atomic_signed_t

  • Property mode set to 100644
File size: 3.4 KB
Line 
1/*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup amd64
30 * @{
31 */
32/** @file
33 */
34
35#ifndef KERN_amd64_ATOMIC_H_
36#define KERN_amd64_ATOMIC_H_
37
38#include <arch/types.h>
39#include <arch/barrier.h>
40#include <preemption.h>
41
42static inline void atomic_inc(atomic_t *val)
43{
44#ifdef CONFIG_SMP
45 asm volatile (
46 "lock incq %[count]\n"
47 : [count] "+m" (val->count)
48 );
49#else
50 asm volatile (
51 "incq %[count]\n"
52 : [count] "+m" (val->count)
53 );
54#endif /* CONFIG_SMP */
55}
56
57static inline void atomic_dec(atomic_t *val)
58{
59#ifdef CONFIG_SMP
60 asm volatile (
61 "lock decq %[count]\n"
62 : [count] "+m" (val->count)
63 );
64#else
65 asm volatile (
66 "decq %[count]\n"
67 : [count] "+m" (val->count)
68 );
69#endif /* CONFIG_SMP */
70}
71
72static inline atomic_count_t atomic_postinc(atomic_t *val)
73{
74 atomic_count_t r = 1;
75
76 asm volatile (
77 "lock xaddq %[r], %[count]\n"
78 : [count] "+m" (val->count),
79 [r] "+r" (r)
80 );
81
82 return r;
83}
84
85static inline atomic_count_t atomic_postdec(atomic_t *val)
86{
87 atomic_count_t r = -1;
88
89 asm volatile (
90 "lock xaddq %[r], %[count]\n"
91 : [count] "+m" (val->count),
92 [r] "+r" (r)
93 );
94
95 return r;
96}
97
98#define atomic_preinc(val) (atomic_postinc(val) + 1)
99#define atomic_predec(val) (atomic_postdec(val) - 1)
100
101static inline atomic_count_t test_and_set(atomic_t *val)
102{
103 atomic_count_t v;
104
105 asm volatile (
106 "movq $1, %[v]\n"
107 "xchgq %[v], %[count]\n"
108 : [v] "=r" (v),
109 [count] "+m" (val->count)
110 );
111
112 return v;
113}
114
115/** amd64 specific fast spinlock */
116static inline void atomic_lock_arch(atomic_t *val)
117{
118 atomic_count_t tmp;
119
120 preemption_disable();
121 asm volatile (
122 "0:\n"
123 "pause\n"
124 "mov %[count], %[tmp]\n"
125 "testq %[tmp], %[tmp]\n"
126 "jnz 0b\n" /* lightweight looping on locked spinlock */
127
128 "incq %[tmp]\n" /* now use the atomic operation */
129 "xchgq %[count], %[tmp]\n"
130 "testq %[tmp], %[tmp]\n"
131 "jnz 0b\n"
132 : [count] "+m" (val->count),
133 [tmp] "=&r" (tmp)
134 );
135
136 /*
137 * Prevent critical section code from bleeding out this way up.
138 */
139 CS_ENTER_BARRIER();
140}
141
142#endif
143
144/** @}
145 */
Note: See TracBrowser for help on using the repository browser.