source: mainline/kernel/generic/src/synch/spinlock.c@ 3bb732b

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 3bb732b was 1066041, checked in by Adam Hraska <adam.hraska+hos@…>, 13 years ago

preemption_disable: Turned functions into macros. Moved THREAD, AS, TASK, CPU into thread.h, as.h, task.h, cpu.h to fix the include hell that ensued.

  • Property mode set to 100644
File size: 8.4 KB
Line 
1/*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup sync
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Spinlocks.
36 */
37
38#include <synch/spinlock.h>
39#include <atomic.h>
40#include <arch/barrier.h>
41#include <arch.h>
42#include <preemption.h>
43#include <print.h>
44#include <debug.h>
45#include <symtab.h>
46#include <stacktrace.h>
47#include <cpu.h>
48
49#ifdef CONFIG_SMP
50
51/** Initialize spinlock
52 *
53 * @param sl Pointer to spinlock_t structure.
54 *
55 */
56void spinlock_initialize(spinlock_t *lock, const char *name)
57{
58 atomic_set(&lock->val, 0);
59#ifdef CONFIG_DEBUG_SPINLOCK
60 lock->name = name;
61#endif
62}
63
64#ifdef CONFIG_DEBUG_SPINLOCK
65
66/** Lock spinlock
67 *
68 * Lock spinlock.
69 * This version has limitted ability to report
70 * possible occurence of deadlock.
71 *
72 * @param lock Pointer to spinlock_t structure.
73 *
74 */
75void spinlock_lock_debug(spinlock_t *lock)
76{
77 size_t i = 0;
78 bool deadlock_reported = false;
79
80 preemption_disable();
81 while (test_and_set(&lock->val)) {
82 /*
83 * We need to be careful about particular locks
84 * which are directly used to report deadlocks
85 * via printf() (and recursively other functions).
86 * This conserns especially printf_lock and the
87 * framebuffer lock.
88 *
89 * Any lock whose name is prefixed by "*" will be
90 * ignored by this deadlock detection routine
91 * as this might cause an infinite recursion.
92 * We trust our code that there is no possible deadlock
93 * caused by these locks (except when an exception
94 * is triggered for instance by printf()).
95 *
96 * We encountered false positives caused by very
97 * slow framebuffer interaction (especially when
98 * run in a simulator) that caused problems with both
99 * printf_lock and the framebuffer lock.
100 */
101 if (lock->name[0] == '*')
102 continue;
103
104 if (i++ > DEADLOCK_THRESHOLD) {
105 printf("cpu%u: looping on spinlock %p:%s, "
106 "caller=%p (%s)\n", CPU->id, lock, lock->name,
107 (void *) CALLER, symtab_fmt_name_lookup(CALLER));
108 stack_trace();
109
110 i = 0;
111 deadlock_reported = true;
112 }
113 }
114
115 if (deadlock_reported)
116 printf("cpu%u: not deadlocked\n", CPU->id);
117
118 /*
119 * Prevent critical section code from bleeding out this way up.
120 */
121 CS_ENTER_BARRIER();
122}
123
124/** Unlock spinlock
125 *
126 * Unlock spinlock.
127 *
128 * @param sl Pointer to spinlock_t structure.
129 */
130void spinlock_unlock_debug(spinlock_t *lock)
131{
132 ASSERT_SPINLOCK(spinlock_locked(lock), lock);
133
134 /*
135 * Prevent critical section code from bleeding out this way down.
136 */
137 CS_LEAVE_BARRIER();
138
139 atomic_set(&lock->val, 0);
140 preemption_enable();
141}
142
143#endif
144
145/** Lock spinlock conditionally
146 *
147 * Lock spinlock conditionally. If the spinlock is not available
148 * at the moment, signal failure.
149 *
150 * @param lock Pointer to spinlock_t structure.
151 *
152 * @return Zero on failure, non-zero otherwise.
153 *
154 */
155int spinlock_trylock(spinlock_t *lock)
156{
157 preemption_disable();
158 int rc = !test_and_set(&lock->val);
159
160 /*
161 * Prevent critical section code from bleeding out this way up.
162 */
163 CS_ENTER_BARRIER();
164
165 if (!rc)
166 preemption_enable();
167
168 return rc;
169}
170
171/** Find out whether the spinlock is currently locked.
172 *
173 * @param lock Spinlock.
174 * @return True if the spinlock is locked, false otherwise.
175 */
176bool spinlock_locked(spinlock_t *lock)
177{
178 return atomic_get(&lock->val) != 0;
179}
180
181#endif
182
183/** Initialize interrupts-disabled spinlock
184 *
185 * @param lock IRQ spinlock to be initialized.
186 * @param name IRQ spinlock name.
187 *
188 */
189void irq_spinlock_initialize(irq_spinlock_t *lock, const char *name)
190{
191 spinlock_initialize(&(lock->lock), name);
192 lock->guard = false;
193 lock->ipl = 0;
194}
195
196/** Lock interrupts-disabled spinlock
197 *
198 * Lock a spinlock which requires disabled interrupts.
199 *
200 * @param lock IRQ spinlock to be locked.
201 * @param irq_dis If true, interrupts are actually disabled
202 * prior locking the spinlock. If false, interrupts
203 * are expected to be already disabled.
204 *
205 */
206void irq_spinlock_lock(irq_spinlock_t *lock, bool irq_dis)
207{
208 if (irq_dis) {
209 ipl_t ipl = interrupts_disable();
210 spinlock_lock(&(lock->lock));
211
212 lock->guard = true;
213 lock->ipl = ipl;
214 } else {
215 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
216
217 spinlock_lock(&(lock->lock));
218 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
219 }
220}
221
222/** Unlock interrupts-disabled spinlock
223 *
224 * Unlock a spinlock which requires disabled interrupts.
225 *
226 * @param lock IRQ spinlock to be unlocked.
227 * @param irq_res If true, interrupts are restored to previously
228 * saved interrupt level.
229 *
230 */
231void irq_spinlock_unlock(irq_spinlock_t *lock, bool irq_res)
232{
233 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
234
235 if (irq_res) {
236 ASSERT_IRQ_SPINLOCK(lock->guard, lock);
237
238 lock->guard = false;
239 ipl_t ipl = lock->ipl;
240
241 spinlock_unlock(&(lock->lock));
242 interrupts_restore(ipl);
243 } else {
244 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
245 spinlock_unlock(&(lock->lock));
246 }
247}
248
249/** Lock interrupts-disabled spinlock
250 *
251 * Lock an interrupts-disabled spinlock conditionally. If the
252 * spinlock is not available at the moment, signal failure.
253 * Interrupts are expected to be already disabled.
254 *
255 * @param lock IRQ spinlock to be locked conditionally.
256 *
257 * @return Zero on failure, non-zero otherwise.
258 *
259 */
260int irq_spinlock_trylock(irq_spinlock_t *lock)
261{
262 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
263 int rc = spinlock_trylock(&(lock->lock));
264
265 ASSERT_IRQ_SPINLOCK((!rc) || (!lock->guard), lock);
266 return rc;
267}
268
269/** Pass lock from one interrupts-disabled spinlock to another
270 *
271 * Pass lock from one IRQ spinlock to another IRQ spinlock
272 * without enabling interrupts during the process.
273 *
274 * The first IRQ spinlock is supposed to be locked.
275 *
276 * @param unlock IRQ spinlock to be unlocked.
277 * @param lock IRQ spinlock to be locked.
278 *
279 */
280void irq_spinlock_pass(irq_spinlock_t *unlock, irq_spinlock_t *lock)
281{
282 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock);
283
284 /* Pass guard from unlock to lock */
285 bool guard = unlock->guard;
286 ipl_t ipl = unlock->ipl;
287 unlock->guard = false;
288
289 spinlock_unlock(&(unlock->lock));
290 spinlock_lock(&(lock->lock));
291
292 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
293
294 if (guard) {
295 lock->guard = true;
296 lock->ipl = ipl;
297 }
298}
299
300/** Hand-over-hand locking of interrupts-disabled spinlocks
301 *
302 * Implement hand-over-hand locking between two interrupts-disabled
303 * spinlocks without enabling interrupts during the process.
304 *
305 * The first IRQ spinlock is supposed to be locked.
306 *
307 * @param unlock IRQ spinlock to be unlocked.
308 * @param lock IRQ spinlock to be locked.
309 *
310 */
311void irq_spinlock_exchange(irq_spinlock_t *unlock, irq_spinlock_t *lock)
312{
313 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock);
314
315 spinlock_lock(&(lock->lock));
316 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
317
318 /* Pass guard from unlock to lock */
319 if (unlock->guard) {
320 lock->guard = true;
321 lock->ipl = unlock->ipl;
322 unlock->guard = false;
323 }
324
325 spinlock_unlock(&(unlock->lock));
326}
327
328/** Find out whether the IRQ spinlock is currently locked.
329 *
330 * @param lock IRQ spinlock.
331 * @return True if the IRQ spinlock is locked, false otherwise.
332 */
333bool irq_spinlock_locked(irq_spinlock_t *ilock)
334{
335 return spinlock_locked(&ilock->lock);
336}
337
338/** @}
339 */
Note: See TracBrowser for help on using the repository browser.