source: mainline/kernel/arch/amd64/include/atomic.h@ 11675207

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 11675207 was 11675207, checked in by jermar <jermar@…>, 17 years ago

Move everything to kernel/.

  • Property mode set to 100644
File size: 3.2 KB
Line 
1/*
2 * Copyright (C) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /** @addtogroup amd64
30 * @{
31 */
32/** @file
33 */
34
35#ifndef __amd64_ATOMIC_H__
36#define __amd64_ATOMIC_H__
37
38#include <arch/types.h>
39#include <arch/barrier.h>
40#include <preemption.h>
41#include <typedefs.h>
42
43static inline void atomic_inc(atomic_t *val) {
44#ifdef CONFIG_SMP
45 __asm__ volatile ("lock incq %0\n" : "=m" (val->count));
46#else
47 __asm__ volatile ("incq %0\n" : "=m" (val->count));
48#endif /* CONFIG_SMP */
49}
50
51static inline void atomic_dec(atomic_t *val) {
52#ifdef CONFIG_SMP
53 __asm__ volatile ("lock decq %0\n" : "=m" (val->count));
54#else
55 __asm__ volatile ("decq %0\n" : "=m" (val->count));
56#endif /* CONFIG_SMP */
57}
58
59static inline long atomic_postinc(atomic_t *val)
60{
61 long r = 1;
62
63 __asm__ volatile (
64 "lock xaddq %1, %0\n"
65 : "=m" (val->count), "+r" (r)
66 );
67
68 return r;
69}
70
71static inline long atomic_postdec(atomic_t *val)
72{
73 long r = -1;
74
75 __asm__ volatile (
76 "lock xaddq %1, %0\n"
77 : "=m" (val->count), "+r" (r)
78 );
79
80 return r;
81}
82
83#define atomic_preinc(val) (atomic_postinc(val)+1)
84#define atomic_predec(val) (atomic_postdec(val)-1)
85
86static inline uint64_t test_and_set(atomic_t *val) {
87 uint64_t v;
88
89 __asm__ volatile (
90 "movq $1, %0\n"
91 "xchgq %0, %1\n"
92 : "=r" (v),"=m" (val->count)
93 );
94
95 return v;
96}
97
98
99/** amd64 specific fast spinlock */
100static inline void atomic_lock_arch(atomic_t *val)
101{
102 uint64_t tmp;
103
104 preemption_disable();
105 __asm__ volatile (
106 "0:;"
107#ifdef CONFIG_HT
108 "pause;"
109#endif
110 "mov %0, %1;"
111 "testq %1, %1;"
112 "jnz 0b;" /* Lightweight looping on locked spinlock */
113
114 "incq %1;" /* now use the atomic operation */
115 "xchgq %0, %1;"
116 "testq %1, %1;"
117 "jnz 0b;"
118 : "=m"(val->count),"=r"(tmp)
119 );
120 /*
121 * Prevent critical section code from bleeding out this way up.
122 */
123 CS_ENTER_BARRIER();
124}
125
126#endif
127
128 /** @}
129 */
130
Note: See TracBrowser for help on using the repository browser.