source: mainline/kernel/arch/amd64/include/atomic.h@ b03a666

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since b03a666 was 228666c, checked in by Martin Decky <martin@…>, 15 years ago

introduce atomic_count_t as the explicit type of the internal value in atomic_t (this is probably better than the chaotic mix of int/long)
atomic_count_t is defined as unsigned, for signed semantics you can cast it to atomic_signed_t

  • Property mode set to 100644
File size: 3.4 KB
RevLine 
[5753fbb]1/*
[df4ed85]2 * Copyright (c) 2001-2004 Jakub Jermar
[5753fbb]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[f24d300]29/** @addtogroup amd64
[b45c443]30 * @{
31 */
32/** @file
33 */
34
[06e1e95]35#ifndef KERN_amd64_ATOMIC_H_
36#define KERN_amd64_ATOMIC_H_
[5753fbb]37
38#include <arch/types.h>
[53f9821]39#include <arch/barrier.h>
40#include <preemption.h>
[5753fbb]41
[228666c]42static inline void atomic_inc(atomic_t *val)
43{
[5753fbb]44#ifdef CONFIG_SMP
[f24d300]45 asm volatile (
46 "lock incq %[count]\n"
47 : [count] "+m" (val->count)
48 );
[5753fbb]49#else
[f24d300]50 asm volatile (
51 "incq %[count]\n"
52 : [count] "+m" (val->count)
53 );
[5753fbb]54#endif /* CONFIG_SMP */
55}
56
[228666c]57static inline void atomic_dec(atomic_t *val)
58{
[5753fbb]59#ifdef CONFIG_SMP
[f24d300]60 asm volatile (
61 "lock decq %[count]\n"
62 : [count] "+m" (val->count)
63 );
[5753fbb]64#else
[f24d300]65 asm volatile (
66 "decq %[count]\n"
67 : [count] "+m" (val->count)
68 );
[5753fbb]69#endif /* CONFIG_SMP */
70}
71
[228666c]72static inline atomic_count_t atomic_postinc(atomic_t *val)
[5753fbb]73{
[228666c]74 atomic_count_t r = 1;
[f24d300]75
[e7b7be3f]76 asm volatile (
[f24d300]77 "lock xaddq %[r], %[count]\n"
[228666c]78 : [count] "+m" (val->count),
79 [r] "+r" (r)
[5753fbb]80 );
[f24d300]81
[5753fbb]82 return r;
83}
84
[228666c]85static inline atomic_count_t atomic_postdec(atomic_t *val)
[5753fbb]86{
[228666c]87 atomic_count_t r = -1;
[5753fbb]88
[e7b7be3f]89 asm volatile (
[f24d300]90 "lock xaddq %[r], %[count]\n"
[228666c]91 : [count] "+m" (val->count),
92 [r] "+r" (r)
[5753fbb]93 );
94
95 return r;
96}
97
[f24d300]98#define atomic_preinc(val) (atomic_postinc(val) + 1)
99#define atomic_predec(val) (atomic_postdec(val) - 1)
[5753fbb]100
[228666c]101static inline atomic_count_t test_and_set(atomic_t *val)
102{
103 atomic_count_t v;
[5753fbb]104
[e7b7be3f]105 asm volatile (
[f24d300]106 "movq $1, %[v]\n"
107 "xchgq %[v], %[count]\n"
[228666c]108 : [v] "=r" (v),
109 [count] "+m" (val->count)
[5753fbb]110 );
111
112 return v;
113}
114
[23684b7]115/** amd64 specific fast spinlock */
[53f9821]116static inline void atomic_lock_arch(atomic_t *val)
117{
[228666c]118 atomic_count_t tmp;
[f24d300]119
[53f9821]120 preemption_disable();
[e7b7be3f]121 asm volatile (
[ad2e39b]122 "0:\n"
123 "pause\n"
[f24d300]124 "mov %[count], %[tmp]\n"
125 "testq %[tmp], %[tmp]\n"
[43b7b35]126 "jnz 0b\n" /* lightweight looping on locked spinlock */
[53f9821]127
[f24d300]128 "incq %[tmp]\n" /* now use the atomic operation */
129 "xchgq %[count], %[tmp]\n"
130 "testq %[tmp], %[tmp]\n"
[ad2e39b]131 "jnz 0b\n"
[228666c]132 : [count] "+m" (val->count),
133 [tmp] "=&r" (tmp)
[ad2e39b]134 );
[228666c]135
[53f9821]136 /*
137 * Prevent critical section code from bleeding out this way up.
138 */
139 CS_ENTER_BARRIER();
140}
[5753fbb]141
142#endif
[b45c443]143
[06e1e95]144/** @}
[b45c443]145 */
Note: See TracBrowser for help on using the repository browser.