source: mainline/kernel/arch/amd64/include/atomic.h@ ddcc8a0

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since ddcc8a0 was 7a0359b, checked in by Martin Decky <martin@…>, 15 years ago

improve kernel function tracing

  • add support for more generic kernel sources
  • replace attribute((no_instrument_function)) with NO_TRACE macro (shorter and for future compatibility with different compilers)
  • to be on the safe side, do not instrument most of the inline and static functions (plus some specific non-static functions)

collateral code cleanup (no change in functionality)

  • Property mode set to 100644
File size: 3.4 KB
Line 
1/*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup amd64
30 * @{
31 */
32/** @file
33 */
34
35#ifndef KERN_amd64_ATOMIC_H_
36#define KERN_amd64_ATOMIC_H_
37
38#include <typedefs.h>
39#include <arch/barrier.h>
40#include <preemption.h>
41#include <trace.h>
42
43NO_TRACE static inline void atomic_inc(atomic_t *val)
44{
45#ifdef CONFIG_SMP
46 asm volatile (
47 "lock incq %[count]\n"
48 : [count] "+m" (val->count)
49 );
50#else
51 asm volatile (
52 "incq %[count]\n"
53 : [count] "+m" (val->count)
54 );
55#endif /* CONFIG_SMP */
56}
57
58NO_TRACE static inline void atomic_dec(atomic_t *val)
59{
60#ifdef CONFIG_SMP
61 asm volatile (
62 "lock decq %[count]\n"
63 : [count] "+m" (val->count)
64 );
65#else
66 asm volatile (
67 "decq %[count]\n"
68 : [count] "+m" (val->count)
69 );
70#endif /* CONFIG_SMP */
71}
72
73NO_TRACE static inline atomic_count_t atomic_postinc(atomic_t *val)
74{
75 atomic_count_t r = 1;
76
77 asm volatile (
78 "lock xaddq %[r], %[count]\n"
79 : [count] "+m" (val->count),
80 [r] "+r" (r)
81 );
82
83 return r;
84}
85
86NO_TRACE static inline atomic_count_t atomic_postdec(atomic_t *val)
87{
88 atomic_count_t r = -1;
89
90 asm volatile (
91 "lock xaddq %[r], %[count]\n"
92 : [count] "+m" (val->count),
93 [r] "+r" (r)
94 );
95
96 return r;
97}
98
99#define atomic_preinc(val) (atomic_postinc(val) + 1)
100#define atomic_predec(val) (atomic_postdec(val) - 1)
101
102NO_TRACE static inline atomic_count_t test_and_set(atomic_t *val)
103{
104 atomic_count_t v = 1;
105
106 asm volatile (
107 "xchgq %[v], %[count]\n"
108 : [v] "+r" (v),
109 [count] "+m" (val->count)
110 );
111
112 return v;
113}
114
115/** amd64 specific fast spinlock */
116NO_TRACE static inline void atomic_lock_arch(atomic_t *val)
117{
118 atomic_count_t tmp;
119
120 preemption_disable();
121 asm volatile (
122 "0:\n"
123 " pause\n"
124 " mov %[count], %[tmp]\n"
125 " testq %[tmp], %[tmp]\n"
126 " jnz 0b\n" /* lightweight looping on locked spinlock */
127
128 " incq %[tmp]\n" /* now use the atomic operation */
129 " xchgq %[count], %[tmp]\n"
130 " testq %[tmp], %[tmp]\n"
131 " jnz 0b\n"
132 : [count] "+m" (val->count),
133 [tmp] "=&r" (tmp)
134 );
135
136 /*
137 * Prevent critical section code from bleeding out this way up.
138 */
139 CS_ENTER_BARRIER();
140}
141
142#endif
143
144/** @}
145 */
Note: See TracBrowser for help on using the repository browser.