source: mainline/kernel/arch/sparc64/include/atomic.h@ 4a4c8bcf

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 4a4c8bcf was 7a0359b, checked in by Martin Decky <martin@…>, 15 years ago

improve kernel function tracing

  • add support for more generic kernel sources
  • replace attribute((no_instrument_function)) with NO_TRACE macro (shorter and for future compatibility with different compilers)
  • to be on the safe side, do not instrument most of the inline and static functions (plus some specific non-static functions)

collateral code cleanup (no change in functionality)

  • Property mode set to 100644
File size: 3.7 KB
RevLine 
[2a99fa8]1/*
[df4ed85]2 * Copyright (c) 2005 Jakub Jermar
[2a99fa8]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[228666c]29/** @addtogroup sparc64
[b45c443]30 * @{
31 */
32/** @file
33 */
34
[ed166f7]35#ifndef KERN_sparc64_ATOMIC_H_
36#define KERN_sparc64_ATOMIC_H_
[2a99fa8]37
[86b31ba9]38#include <arch/barrier.h>
[d99c1d2]39#include <typedefs.h>
[bf29fe5]40#include <preemption.h>
[7a0359b]41#include <trace.h>
[59e07c91]42
[0fad93a]43/** Atomic add operation.
44 *
45 * Use atomic compare and swap operation to atomically add signed value.
46 *
47 * @param val Atomic variable.
[228666c]48 * @param i Signed value to be added.
[0fad93a]49 *
50 * @return Value of the atomic variable as it existed before addition.
[228666c]51 *
[2a99fa8]52 */
[7a0359b]53NO_TRACE static inline atomic_count_t atomic_add(atomic_t *val,
54 atomic_count_t i)
[0fad93a]55{
[228666c]56 atomic_count_t a;
57 atomic_count_t b;
58
[06e1e95]59 do {
[228666c]60 volatile uintptr_t ptr = (uintptr_t) &val->count;
61
62 a = *((atomic_count_t *) ptr);
[06e1e95]63 b = a + i;
[228666c]64
65 asm volatile (
66 "casx %0, %2, %1\n"
67 : "+m" (*((atomic_count_t *) ptr)),
68 "+r" (b)
69 : "r" (a)
70 );
[06e1e95]71 } while (a != b);
[228666c]72
[0fad93a]73 return a;
74}
[2a99fa8]75
[7a0359b]76NO_TRACE static inline atomic_count_t atomic_preinc(atomic_t *val)
[9a2d6e1]77{
78 return atomic_add(val, 1) + 1;
79}
80
[7a0359b]81NO_TRACE static inline atomic_count_t atomic_postinc(atomic_t *val)
[9a2d6e1]82{
83 return atomic_add(val, 1);
84}
85
[7a0359b]86NO_TRACE static inline atomic_count_t atomic_predec(atomic_t *val)
[9a2d6e1]87{
88 return atomic_add(val, -1) - 1;
89}
90
[7a0359b]91NO_TRACE static inline atomic_count_t atomic_postdec(atomic_t *val)
[9a2d6e1]92{
[8eb36b0]93 return atomic_add(val, -1);
[9a2d6e1]94}
95
[7a0359b]96NO_TRACE static inline void atomic_inc(atomic_t *val)
[10c071e]97{
[0fad93a]98 (void) atomic_add(val, 1);
[2a99fa8]99}
100
[7a0359b]101NO_TRACE static inline void atomic_dec(atomic_t *val)
[10c071e]102{
[0fad93a]103 (void) atomic_add(val, -1);
[2a99fa8]104}
105
[7a0359b]106NO_TRACE static inline atomic_count_t test_and_set(atomic_t *val)
[86b31ba9]107{
[228666c]108 atomic_count_t v = 1;
109 volatile uintptr_t ptr = (uintptr_t) &val->count;
110
111 asm volatile (
112 "casx %0, %2, %1\n"
113 : "+m" (*((atomic_count_t *) ptr)),
114 "+r" (v)
115 : "r" (0)
116 );
117
[86b31ba9]118 return v;
119}
120
[7a0359b]121NO_TRACE static inline void atomic_lock_arch(atomic_t *val)
[86b31ba9]122{
[228666c]123 atomic_count_t tmp1 = 1;
124 atomic_count_t tmp2 = 0;
125
126 volatile uintptr_t ptr = (uintptr_t) &val->count;
127
[bf29fe5]128 preemption_disable();
[228666c]129
[e7b7be3f]130 asm volatile (
[228666c]131 "0:\n"
132 "casx %0, %3, %1\n"
133 "brz %1, 2f\n"
134 "nop\n"
135 "1:\n"
136 "ldx %0, %2\n"
137 "brz %2, 0b\n"
138 "nop\n"
[40239b9]139 "ba,a %%xcc, 1b\n"
[228666c]140 "2:\n"
141 : "+m" (*((atomic_count_t *) ptr)),
142 "+r" (tmp1),
143 "+r" (tmp2)
144 : "r" (0)
[86b31ba9]145 );
146
147 /*
148 * Prevent critical section code from bleeding out this way up.
149 */
150 CS_ENTER_BARRIER();
151}
152
[2a99fa8]153#endif
[b45c443]154
[0ffa3ef5]155/** @}
[b45c443]156 */
Note: See TracBrowser for help on using the repository browser.