1 | /*
|
---|
2 | * Copyright (c) 2005 Jakub Jermar
|
---|
3 | * All rights reserved.
|
---|
4 | *
|
---|
5 | * Redistribution and use in source and binary forms, with or without
|
---|
6 | * modification, are permitted provided that the following conditions
|
---|
7 | * are met:
|
---|
8 | *
|
---|
9 | * - Redistributions of source code must retain the above copyright
|
---|
10 | * notice, this list of conditions and the following disclaimer.
|
---|
11 | * - Redistributions in binary form must reproduce the above copyright
|
---|
12 | * notice, this list of conditions and the following disclaimer in the
|
---|
13 | * documentation and/or other materials provided with the distribution.
|
---|
14 | * - The name of the author may not be used to endorse or promote products
|
---|
15 | * derived from this software without specific prior written permission.
|
---|
16 | *
|
---|
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
---|
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
---|
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
---|
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
---|
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
---|
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
---|
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
---|
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
---|
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
27 | */
|
---|
28 |
|
---|
29 | /** @addtogroup sparc64
|
---|
30 | * @{
|
---|
31 | */
|
---|
32 | /** @file
|
---|
33 | */
|
---|
34 |
|
---|
35 | #ifndef KERN_sparc64_ATOMIC_H_
|
---|
36 | #define KERN_sparc64_ATOMIC_H_
|
---|
37 |
|
---|
38 | #include <arch/barrier.h>
|
---|
39 | #include <typedefs.h>
|
---|
40 | #include <preemption.h>
|
---|
41 | #include <trace.h>
|
---|
42 |
|
---|
43 | /** Atomic add operation.
|
---|
44 | *
|
---|
45 | * Use atomic compare and swap operation to atomically add signed value.
|
---|
46 | *
|
---|
47 | * @param val Atomic variable.
|
---|
48 | * @param i Signed value to be added.
|
---|
49 | *
|
---|
50 | * @return Value of the atomic variable as it existed before addition.
|
---|
51 | *
|
---|
52 | */
|
---|
53 | NO_TRACE static inline atomic_count_t atomic_add(atomic_t *val,
|
---|
54 | atomic_count_t i)
|
---|
55 | {
|
---|
56 | atomic_count_t a;
|
---|
57 | atomic_count_t b;
|
---|
58 |
|
---|
59 | do {
|
---|
60 | volatile uintptr_t ptr = (uintptr_t) &val->count;
|
---|
61 |
|
---|
62 | a = *((atomic_count_t *) ptr);
|
---|
63 | b = a + i;
|
---|
64 |
|
---|
65 | asm volatile (
|
---|
66 | "casx %0, %2, %1\n"
|
---|
67 | : "+m" (*((atomic_count_t *) ptr)),
|
---|
68 | "+r" (b)
|
---|
69 | : "r" (a)
|
---|
70 | );
|
---|
71 | } while (a != b);
|
---|
72 |
|
---|
73 | return a;
|
---|
74 | }
|
---|
75 |
|
---|
76 | NO_TRACE static inline atomic_count_t atomic_preinc(atomic_t *val)
|
---|
77 | {
|
---|
78 | return atomic_add(val, 1) + 1;
|
---|
79 | }
|
---|
80 |
|
---|
81 | NO_TRACE static inline atomic_count_t atomic_postinc(atomic_t *val)
|
---|
82 | {
|
---|
83 | return atomic_add(val, 1);
|
---|
84 | }
|
---|
85 |
|
---|
86 | NO_TRACE static inline atomic_count_t atomic_predec(atomic_t *val)
|
---|
87 | {
|
---|
88 | return atomic_add(val, -1) - 1;
|
---|
89 | }
|
---|
90 |
|
---|
91 | NO_TRACE static inline atomic_count_t atomic_postdec(atomic_t *val)
|
---|
92 | {
|
---|
93 | return atomic_add(val, -1);
|
---|
94 | }
|
---|
95 |
|
---|
96 | NO_TRACE static inline void atomic_inc(atomic_t *val)
|
---|
97 | {
|
---|
98 | (void) atomic_add(val, 1);
|
---|
99 | }
|
---|
100 |
|
---|
101 | NO_TRACE static inline void atomic_dec(atomic_t *val)
|
---|
102 | {
|
---|
103 | (void) atomic_add(val, -1);
|
---|
104 | }
|
---|
105 |
|
---|
106 | NO_TRACE static inline atomic_count_t test_and_set(atomic_t *val)
|
---|
107 | {
|
---|
108 | atomic_count_t v = 1;
|
---|
109 | volatile uintptr_t ptr = (uintptr_t) &val->count;
|
---|
110 |
|
---|
111 | asm volatile (
|
---|
112 | "casx %0, %2, %1\n"
|
---|
113 | : "+m" (*((atomic_count_t *) ptr)),
|
---|
114 | "+r" (v)
|
---|
115 | : "r" (0)
|
---|
116 | );
|
---|
117 |
|
---|
118 | return v;
|
---|
119 | }
|
---|
120 |
|
---|
121 | NO_TRACE static inline void atomic_lock_arch(atomic_t *val)
|
---|
122 | {
|
---|
123 | atomic_count_t tmp1 = 1;
|
---|
124 | atomic_count_t tmp2 = 0;
|
---|
125 |
|
---|
126 | volatile uintptr_t ptr = (uintptr_t) &val->count;
|
---|
127 |
|
---|
128 | preemption_disable();
|
---|
129 |
|
---|
130 | asm volatile (
|
---|
131 | "0:\n"
|
---|
132 | "casx %0, %3, %1\n"
|
---|
133 | "brz %1, 2f\n"
|
---|
134 | "nop\n"
|
---|
135 | "1:\n"
|
---|
136 | "ldx %0, %2\n"
|
---|
137 | "brz %2, 0b\n"
|
---|
138 | "nop\n"
|
---|
139 | "ba,a %%xcc, 1b\n"
|
---|
140 | "2:\n"
|
---|
141 | : "+m" (*((atomic_count_t *) ptr)),
|
---|
142 | "+r" (tmp1),
|
---|
143 | "+r" (tmp2)
|
---|
144 | : "r" (0)
|
---|
145 | );
|
---|
146 |
|
---|
147 | /*
|
---|
148 | * Prevent critical section code from bleeding out this way up.
|
---|
149 | */
|
---|
150 | CS_ENTER_BARRIER();
|
---|
151 | }
|
---|
152 |
|
---|
153 | #endif
|
---|
154 |
|
---|
155 | /** @}
|
---|
156 | */
|
---|