source: mainline/kernel/generic/include/synch/spinlock.h@ c28413a9

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since c28413a9 was cc106e4, checked in by Adam Hraska <adam.hraska+hos@…>, 13 years ago

Fixed build for sparc64/ultra, sparc64/niagara, ppc32, mips32/GXemul.

  • Property mode set to 100644
File size: 8.0 KB
RevLine 
[f761f1eb]1/*
[df4ed85]2 * Copyright (c) 2001-2004 Jakub Jermar
[f761f1eb]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[06e1e95]29/** @addtogroup sync
[b45c443]30 * @{
31 */
32/** @file
33 */
34
[06e1e95]35#ifndef KERN_SPINLOCK_H_
36#define KERN_SPINLOCK_H_
[f761f1eb]37
[d99c1d2]38#include <typedefs.h>
[59e4864]39#include <arch/barrier.h>
[c842f04]40#include <preemption.h>
[23684b7]41#include <atomic.h>
[53f9821]42#include <debug.h>
[42bbbe2]43#include <arch/asm.h>
[f761f1eb]44
[5f85c91]45#ifdef CONFIG_SMP
[90c8b8d]46
[cc106e4]47typedef struct spinlock {
[90c8b8d]48 atomic_t val;
49
[2d93f1f9]50#ifdef CONFIG_DEBUG_SPINLOCK
[a000878c]51 const char *name;
[2b4a9f26]52#endif /* CONFIG_DEBUG_SPINLOCK */
[e71a61d]53} spinlock_t;
[f761f1eb]54
[dc747e3]55/*
56 * SPINLOCK_DECLARE is to be used for dynamically allocated spinlocks,
57 * where the lock gets initialized in run time.
58 */
[90c8b8d]59#define SPINLOCK_DECLARE(lock_name) spinlock_t lock_name
60#define SPINLOCK_EXTERN(lock_name) extern spinlock_t lock_name
[dc747e3]61
62/*
[2b4a9f26]63 * SPINLOCK_INITIALIZE and SPINLOCK_STATIC_INITIALIZE are to be used
64 * for statically allocated spinlocks. They declare (either as global
65 * or static) symbol and initialize the lock.
[dc747e3]66 */
67#ifdef CONFIG_DEBUG_SPINLOCK
[90c8b8d]68
69#define SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \
70 spinlock_t lock_name = { \
71 .name = desc_name, \
72 .val = { 0 } \
[dc747e3]73 }
[90c8b8d]74
75#define SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, desc_name) \
76 static spinlock_t lock_name = { \
77 .name = desc_name, \
78 .val = { 0 } \
[dc747e3]79 }
80
[2b4a9f26]81#define ASSERT_SPINLOCK(expr, lock) \
82 ASSERT_VERBOSE(expr, (lock)->name)
83
84#define spinlock_lock(lock) spinlock_lock_debug((lock))
85#define spinlock_unlock(lock) spinlock_unlock_debug((lock))
[53f9821]86
[2b4a9f26]87#else /* CONFIG_DEBUG_SPINLOCK */
[90c8b8d]88
89#define SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \
90 spinlock_t lock_name = { \
91 .val = { 0 } \
92 }
93
94#define SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, desc_name) \
95 static spinlock_t lock_name = { \
96 .val = { 0 } \
97 }
98
[2b4a9f26]99#define ASSERT_SPINLOCK(expr, lock) \
100 ASSERT(expr)
[90c8b8d]101
[2b4a9f26]102#define spinlock_lock(lock) atomic_lock_arch(&(lock)->val)
103#define spinlock_unlock(lock) spinlock_unlock_nondebug((lock))
104
105#endif /* CONFIG_DEBUG_SPINLOCK */
[53f9821]106
[90c8b8d]107#define SPINLOCK_INITIALIZE(lock_name) \
108 SPINLOCK_INITIALIZE_NAME(lock_name, #lock_name)
109
110#define SPINLOCK_STATIC_INITIALIZE(lock_name) \
111 SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, #lock_name)
112
[2b4a9f26]113extern void spinlock_initialize(spinlock_t *, const char *);
114extern int spinlock_trylock(spinlock_t *);
115extern void spinlock_lock_debug(spinlock_t *);
116extern void spinlock_unlock_debug(spinlock_t *);
[ffe4a87]117extern bool spinlock_locked(spinlock_t *);
[90c8b8d]118
[53f9821]119/** Unlock spinlock
120 *
[13108f24]121 * Unlock spinlock for non-debug kernels.
[53f9821]122 *
123 * @param sl Pointer to spinlock_t structure.
[2b4a9f26]124 *
[53f9821]125 */
[7a0359b]126NO_TRACE static inline void spinlock_unlock_nondebug(spinlock_t *lock)
[53f9821]127{
128 /*
129 * Prevent critical section code from bleeding out this way down.
130 */
131 CS_LEAVE_BARRIER();
132
[90c8b8d]133 atomic_set(&lock->val, 0);
[53f9821]134 preemption_enable();
135}
[f761f1eb]136
[31d8e10]137#ifdef CONFIG_DEBUG_SPINLOCK
138
[90c8b8d]139#include <print.h>
140
141#define DEADLOCK_THRESHOLD 100000000
[31d8e10]142
[90c8b8d]143#define DEADLOCK_PROBE_INIT(pname) size_t pname = 0
144
145#define DEADLOCK_PROBE(pname, value) \
146 if ((pname)++ > (value)) { \
147 (pname) = 0; \
[7e752b2]148 printf("Deadlock probe %s: exceeded threshold %u\n" \
[90c8b8d]149 "cpu%u: function=%s, line=%u\n", \
150 #pname, (value), CPU->id, __func__, __LINE__); \
[31d8e10]151 }
[90c8b8d]152
[2b4a9f26]153#else /* CONFIG_DEBUG_SPINLOCK */
[90c8b8d]154
[31d8e10]155#define DEADLOCK_PROBE_INIT(pname)
156#define DEADLOCK_PROBE(pname, value)
[90c8b8d]157
[2b4a9f26]158#endif /* CONFIG_DEBUG_SPINLOCK */
[31d8e10]159
[90c8b8d]160#else /* CONFIG_SMP */
[f761f1eb]161
[dc747e3]162/* On UP systems, spinlocks are effectively left out. */
[90c8b8d]163
[cc106e4]164/* Allow the use of spinlock_t as an incomplete type. */
165typedef struct spinlock spinlock_t;
166
[dc747e3]167#define SPINLOCK_DECLARE(name)
[8be8cfa]168#define SPINLOCK_EXTERN(name)
[90c8b8d]169
[dc747e3]170#define SPINLOCK_INITIALIZE(name)
[90c8b8d]171#define SPINLOCK_STATIC_INITIALIZE(name)
172
173#define SPINLOCK_INITIALIZE_NAME(name, desc_name)
174#define SPINLOCK_STATIC_INITIALIZE_NAME(name, desc_name)
175
[da1bafb]176#define ASSERT_SPINLOCK(expr, lock) ASSERT(expr)
[2b4a9f26]177
[90c8b8d]178#define spinlock_initialize(lock, name)
[f761f1eb]179
[90c8b8d]180#define spinlock_lock(lock) preemption_disable()
[cc106e4]181#define spinlock_trylock(lock) ({ preemption_disable(); 1; })
[90c8b8d]182#define spinlock_unlock(lock) preemption_enable()
[ffe4a87]183#define spinlock_locked(lock) 1
184#define spinlock_unlocked(lock) 1
[f761f1eb]185
[31d8e10]186#define DEADLOCK_PROBE_INIT(pname)
187#define DEADLOCK_PROBE(pname, value)
188
[2b4a9f26]189#endif /* CONFIG_SMP */
190
191typedef struct {
[8aa9265]192 SPINLOCK_DECLARE(lock); /**< Spinlock */
193 bool guard; /**< Flag whether ipl is valid */
194 ipl_t ipl; /**< Original interrupt level */
[2b4a9f26]195} irq_spinlock_t;
196
197#define IRQ_SPINLOCK_DECLARE(lock_name) irq_spinlock_t lock_name
198#define IRQ_SPINLOCK_EXTERN(lock_name) extern irq_spinlock_t lock_name
199
[8aa9265]200#ifdef CONFIG_SMP
201
[2b4a9f26]202#define ASSERT_IRQ_SPINLOCK(expr, irq_lock) \
203 ASSERT_SPINLOCK(expr, &((irq_lock)->lock))
204
205/*
206 * IRQ_SPINLOCK_INITIALIZE and IRQ_SPINLOCK_STATIC_INITIALIZE are to be used
207 * for statically allocated interrupts-disabled spinlocks. They declare (either
208 * as global or static symbol) and initialize the lock.
209 */
210#ifdef CONFIG_DEBUG_SPINLOCK
211
212#define IRQ_SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \
213 irq_spinlock_t lock_name = { \
214 .lock = { \
215 .name = desc_name, \
216 .val = { 0 } \
217 }, \
218 .guard = false, \
219 .ipl = 0 \
220 }
221
222#define IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, desc_name) \
223 static irq_spinlock_t lock_name = { \
224 .lock = { \
225 .name = desc_name, \
226 .val = { 0 } \
227 }, \
228 .guard = false, \
229 .ipl = 0 \
230 }
231
232#else /* CONFIG_DEBUG_SPINLOCK */
233
234#define IRQ_SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \
235 irq_spinlock_t lock_name = { \
236 .lock = { \
237 .val = { 0 } \
238 }, \
239 .guard = false, \
240 .ipl = 0 \
241 }
242
243#define IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, desc_name) \
244 static irq_spinlock_t lock_name = { \
245 .lock = { \
246 .val = { 0 } \
247 }, \
248 .guard = false, \
249 .ipl = 0 \
250 }
251
252#endif /* CONFIG_DEBUG_SPINLOCK */
253
[8aa9265]254#else /* CONFIG_SMP */
255
256/*
257 * Since the spinlocks are void on UP systems, we also need
258 * to have a special variant of interrupts-disabled spinlock
259 * macros which take this into account.
260 */
261
262#define ASSERT_IRQ_SPINLOCK(expr, irq_lock) \
263 ASSERT_SPINLOCK(expr, NULL)
264
265#define IRQ_SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \
266 irq_spinlock_t lock_name = { \
267 .guard = false, \
268 .ipl = 0 \
269 }
270
271#define IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, desc_name) \
272 static irq_spinlock_t lock_name = { \
273 .guard = false, \
274 .ipl = 0 \
275 }
276
277#endif /* CONFIG_SMP */
278
[2b4a9f26]279#define IRQ_SPINLOCK_INITIALIZE(lock_name) \
280 IRQ_SPINLOCK_INITIALIZE_NAME(lock_name, #lock_name)
281
282#define IRQ_SPINLOCK_STATIC_INITIALIZE(lock_name) \
283 IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, #lock_name)
284
[a9f1372]285extern void irq_spinlock_initialize(irq_spinlock_t *, const char *);
286extern void irq_spinlock_lock(irq_spinlock_t *, bool);
287extern void irq_spinlock_unlock(irq_spinlock_t *, bool);
288extern int irq_spinlock_trylock(irq_spinlock_t *);
289extern void irq_spinlock_pass(irq_spinlock_t *, irq_spinlock_t *);
290extern void irq_spinlock_exchange(irq_spinlock_t *, irq_spinlock_t *);
[ffe4a87]291extern bool irq_spinlock_locked(irq_spinlock_t *);
[f761f1eb]292
293#endif
[b45c443]294
[06e1e95]295/** @}
[b45c443]296 */
Note: See TracBrowser for help on using the repository browser.