source: mainline/arch/amd64/include/atomic.h@ e1be3b6

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since e1be3b6 was e1be3b6, checked in by Jakub Jermar <jakub@…>, 19 years ago

Small textual changes.

  • Property mode set to 100644
File size: 3.1 KB
Line 
1/*
2 * Copyright (C) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#ifndef __amd64_ATOMIC_H__
30#define __amd64_ATOMIC_H__
31
32#include <arch/types.h>
33#include <arch/barrier.h>
34#include <preemption.h>
35#include <typedefs.h>
36
37static inline void atomic_inc(atomic_t *val) {
38#ifdef CONFIG_SMP
39 __asm__ volatile ("lock incq %0\n" : "=m" (val->count));
40#else
41 __asm__ volatile ("incq %0\n" : "=m" (val->count));
42#endif /* CONFIG_SMP */
43}
44
45static inline void atomic_dec(atomic_t *val) {
46#ifdef CONFIG_SMP
47 __asm__ volatile ("lock decq %0\n" : "=m" (val->count));
48#else
49 __asm__ volatile ("decq %0\n" : "=m" (val->count));
50#endif /* CONFIG_SMP */
51}
52
53static inline long atomic_postinc(atomic_t *val)
54{
55 long r;
56
57 __asm__ volatile (
58 "movq $1, %0\n"
59 "lock xaddq %0, %1\n"
60 : "=r" (r), "=m" (val->count)
61 );
62
63 return r;
64}
65
66static inline long atomic_postdec(atomic_t *val)
67{
68 long r;
69
70 __asm__ volatile (
71 "movq $-1, %0\n"
72 "lock xaddq %0, %1\n"
73 : "=r" (r), "=m" (val->count)
74 );
75
76 return r;
77}
78
79#define atomic_preinc(val) (atomic_postinc(val)+1)
80#define atomic_predec(val) (atomic_postdec(val)-1)
81
82static inline __u64 test_and_set(atomic_t *val) {
83 __u64 v;
84
85 __asm__ volatile (
86 "movq $1, %0\n"
87 "xchgq %0, %1\n"
88 : "=r" (v),"=m" (val->count)
89 );
90
91 return v;
92}
93
94
95/** amd64 specific fast spinlock */
96static inline void atomic_lock_arch(atomic_t *val)
97{
98 __u64 tmp;
99
100 preemption_disable();
101 __asm__ volatile (
102 "0:;"
103#ifdef CONFIG_HT
104 "pause;"
105#endif
106 "mov %0, %1;"
107 "testq %1, %1;"
108 "jnz 0b;" /* Lightweight looping on locked spinlock */
109
110 "incq %1;" /* now use the atomic operation */
111 "xchgq %0, %1;"
112 "testq %1, %1;"
113 "jnz 0b;"
114 : "=m"(val->count),"=r"(tmp)
115 );
116 /*
117 * Prevent critical section code from bleeding out this way up.
118 */
119 CS_ENTER_BARRIER();
120}
121
122#endif
Note: See TracBrowser for help on using the repository browser.