source: mainline/kernel/arch/ia32/include/atomic.h@ 228666c

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 228666c was 228666c, checked in by Martin Decky <martin@…>, 15 years ago

introduce atomic_count_t as the explicit type of the internal value in atomic_t (this is probably better than the chaotic mix of int/long)
atomic_count_t is defined as unsigned, for signed semantics you can cast it to atomic_signed_t

  • Property mode set to 100644
File size: 3.4 KB
RevLine 
[f761f1eb]1/*
[df4ed85]2 * Copyright (c) 2001-2004 Jakub Jermar
[f761f1eb]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[add04f7]29/** @addtogroup ia32
[b45c443]30 * @{
31 */
32/** @file
33 */
34
[06e1e95]35#ifndef KERN_ia32_ATOMIC_H_
36#define KERN_ia32_ATOMIC_H_
[f761f1eb]37
38#include <arch/types.h>
[53f9821]39#include <arch/barrier.h>
40#include <preemption.h>
[59e07c91]41
[228666c]42static inline void atomic_inc(atomic_t *val)
43{
[5f85c91]44#ifdef CONFIG_SMP
[add04f7]45 asm volatile (
46 "lock incl %[count]\n"
47 : [count] "+m" (val->count)
48 );
[18e0a6c]49#else
[add04f7]50 asm volatile (
51 "incl %[count]\n"
52 : [count] "+m" (val->count)
53 );
[5f85c91]54#endif /* CONFIG_SMP */
[18e0a6c]55}
56
[228666c]57static inline void atomic_dec(atomic_t *val)
58{
[5f85c91]59#ifdef CONFIG_SMP
[add04f7]60 asm volatile (
61 "lock decl %[count]\n"
62 : [count] "+m" (val->count)
63 );
[18e0a6c]64#else
[add04f7]65 asm volatile (
66 "decl %[count]\n"
[f36c061]67 : [count] "+m" (val->count)
[add04f7]68 );
[5f85c91]69#endif /* CONFIG_SMP */
[18e0a6c]70}
71
[228666c]72static inline atomic_count_t atomic_postinc(atomic_t *val)
[73a4bab]73{
[228666c]74 atomic_count_t r = 1;
[add04f7]75
[e7b7be3f]76 asm volatile (
[add04f7]77 "lock xaddl %[r], %[count]\n"
[228666c]78 : [count] "+m" (val->count),
79 [r] "+r" (r)
[73a4bab]80 );
[add04f7]81
[73a4bab]82 return r;
83}
84
[228666c]85static inline atomic_count_t atomic_postdec(atomic_t *val)
[73a4bab]86{
[228666c]87 atomic_count_t r = -1;
[10c071e]88
[e7b7be3f]89 asm volatile (
[add04f7]90 "lock xaddl %[r], %[count]\n"
[228666c]91 : [count] "+m" (val->count),
92 [r] "+r" (r)
[73a4bab]93 );
[10c071e]94
[73a4bab]95 return r;
96}
97
[add04f7]98#define atomic_preinc(val) (atomic_postinc(val) + 1)
99#define atomic_predec(val) (atomic_postdec(val) - 1)
[73a4bab]100
[228666c]101static inline atomic_count_t test_and_set(atomic_t *val)
102{
103 atomic_count_t v;
[18e0a6c]104
[e7b7be3f]105 asm volatile (
[add04f7]106 "movl $1, %[v]\n"
107 "xchgl %[v], %[count]\n"
[228666c]108 : [v] "=r" (v),
109 [count] "+m" (val->count)
[18e0a6c]110 );
111
112 return v;
113}
114
[23684b7]115/** ia32 specific fast spinlock */
[53f9821]116static inline void atomic_lock_arch(atomic_t *val)
117{
[228666c]118 atomic_count_t tmp;
[add04f7]119
[53f9821]120 preemption_disable();
[e7b7be3f]121 asm volatile (
[9f491d7]122 "0:\n"
[add04f7]123 "pause\n" /* Pentium 4's HT love this instruction */
124 "mov %[count], %[tmp]\n"
125 "testl %[tmp], %[tmp]\n"
[9f491d7]126 "jnz 0b\n" /* lightweight looping on locked spinlock */
[53f9821]127
[add04f7]128 "incl %[tmp]\n" /* now use the atomic operation */
129 "xchgl %[count], %[tmp]\n"
130 "testl %[tmp], %[tmp]\n"
[9f491d7]131 "jnz 0b\n"
[228666c]132 : [count] "+m" (val->count),
133 [tmp] "=&r" (tmp)
[9f491d7]134 );
[228666c]135
[53f9821]136 /*
137 * Prevent critical section code from bleeding out this way up.
138 */
139 CS_ENTER_BARRIER();
140}
[f761f1eb]141
142#endif
[b45c443]143
[06e1e95]144/** @}
[b45c443]145 */
Note: See TracBrowser for help on using the repository browser.