source: mainline/kernel/generic/src/synch/spinlock.c@ 45b4300

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 45b4300 was 4e5dabf, checked in by Martin Decky <martin@…>, 13 years ago

cstyle (no change in functionality)

  • Property mode set to 100644
File size: 8.4 KB
Line 
1/*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup sync
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Spinlocks.
36 */
37
38#include <synch/spinlock.h>
39#include <atomic.h>
40#include <arch/barrier.h>
41#include <arch.h>
42#include <preemption.h>
43#include <print.h>
44#include <debug.h>
45#include <symtab.h>
46#include <stacktrace.h>
47
48#ifdef CONFIG_SMP
49
50/** Initialize spinlock
51 *
52 * @param sl Pointer to spinlock_t structure.
53 *
54 */
55void spinlock_initialize(spinlock_t *lock, const char *name)
56{
57 atomic_set(&lock->val, 0);
58#ifdef CONFIG_DEBUG_SPINLOCK
59 lock->name = name;
60#endif
61}
62
63#ifdef CONFIG_DEBUG_SPINLOCK
64
65/** Lock spinlock
66 *
67 * Lock spinlock.
68 * This version has limitted ability to report
69 * possible occurence of deadlock.
70 *
71 * @param lock Pointer to spinlock_t structure.
72 *
73 */
74void spinlock_lock_debug(spinlock_t *lock)
75{
76 size_t i = 0;
77 bool deadlock_reported = false;
78
79 preemption_disable();
80 while (test_and_set(&lock->val)) {
81 /*
82 * We need to be careful about particular locks
83 * which are directly used to report deadlocks
84 * via printf() (and recursively other functions).
85 * This conserns especially printf_lock and the
86 * framebuffer lock.
87 *
88 * Any lock whose name is prefixed by "*" will be
89 * ignored by this deadlock detection routine
90 * as this might cause an infinite recursion.
91 * We trust our code that there is no possible deadlock
92 * caused by these locks (except when an exception
93 * is triggered for instance by printf()).
94 *
95 * We encountered false positives caused by very
96 * slow framebuffer interaction (especially when
97 * run in a simulator) that caused problems with both
98 * printf_lock and the framebuffer lock.
99 */
100 if (lock->name[0] == '*')
101 continue;
102
103 if (i++ > DEADLOCK_THRESHOLD) {
104 printf("cpu%u: looping on spinlock %p:%s, "
105 "caller=%p (%s)\n", CPU->id, lock, lock->name,
106 (void *) CALLER, symtab_fmt_name_lookup(CALLER));
107 stack_trace();
108
109 i = 0;
110 deadlock_reported = true;
111 }
112 }
113
114 if (deadlock_reported)
115 printf("cpu%u: not deadlocked\n", CPU->id);
116
117 /*
118 * Prevent critical section code from bleeding out this way up.
119 */
120 CS_ENTER_BARRIER();
121}
122
123/** Unlock spinlock
124 *
125 * Unlock spinlock.
126 *
127 * @param sl Pointer to spinlock_t structure.
128 */
129void spinlock_unlock_debug(spinlock_t *lock)
130{
131 ASSERT_SPINLOCK(spinlock_locked(lock), lock);
132
133 /*
134 * Prevent critical section code from bleeding out this way down.
135 */
136 CS_LEAVE_BARRIER();
137
138 atomic_set(&lock->val, 0);
139 preemption_enable();
140}
141
142#endif
143
144/** Lock spinlock conditionally
145 *
146 * Lock spinlock conditionally. If the spinlock is not available
147 * at the moment, signal failure.
148 *
149 * @param lock Pointer to spinlock_t structure.
150 *
151 * @return Zero on failure, non-zero otherwise.
152 *
153 */
154int spinlock_trylock(spinlock_t *lock)
155{
156 preemption_disable();
157 int rc = !test_and_set(&lock->val);
158
159 /*
160 * Prevent critical section code from bleeding out this way up.
161 */
162 CS_ENTER_BARRIER();
163
164 if (!rc)
165 preemption_enable();
166
167 return rc;
168}
169
170/** Find out whether the spinlock is currently locked.
171 *
172 * @param lock Spinlock.
173 * @return True if the spinlock is locked, false otherwise.
174 */
175bool spinlock_locked(spinlock_t *lock)
176{
177 return atomic_get(&lock->val) != 0;
178}
179
180#endif
181
182/** Initialize interrupts-disabled spinlock
183 *
184 * @param lock IRQ spinlock to be initialized.
185 * @param name IRQ spinlock name.
186 *
187 */
188void irq_spinlock_initialize(irq_spinlock_t *lock, const char *name)
189{
190 spinlock_initialize(&(lock->lock), name);
191 lock->guard = false;
192 lock->ipl = 0;
193}
194
195/** Lock interrupts-disabled spinlock
196 *
197 * Lock a spinlock which requires disabled interrupts.
198 *
199 * @param lock IRQ spinlock to be locked.
200 * @param irq_dis If true, interrupts are actually disabled
201 * prior locking the spinlock. If false, interrupts
202 * are expected to be already disabled.
203 *
204 */
205void irq_spinlock_lock(irq_spinlock_t *lock, bool irq_dis)
206{
207 if (irq_dis) {
208 ipl_t ipl = interrupts_disable();
209 spinlock_lock(&(lock->lock));
210
211 lock->guard = true;
212 lock->ipl = ipl;
213 } else {
214 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
215
216 spinlock_lock(&(lock->lock));
217 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
218 }
219}
220
221/** Unlock interrupts-disabled spinlock
222 *
223 * Unlock a spinlock which requires disabled interrupts.
224 *
225 * @param lock IRQ spinlock to be unlocked.
226 * @param irq_res If true, interrupts are restored to previously
227 * saved interrupt level.
228 *
229 */
230void irq_spinlock_unlock(irq_spinlock_t *lock, bool irq_res)
231{
232 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
233
234 if (irq_res) {
235 ASSERT_IRQ_SPINLOCK(lock->guard, lock);
236
237 lock->guard = false;
238 ipl_t ipl = lock->ipl;
239
240 spinlock_unlock(&(lock->lock));
241 interrupts_restore(ipl);
242 } else {
243 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
244 spinlock_unlock(&(lock->lock));
245 }
246}
247
248/** Lock interrupts-disabled spinlock
249 *
250 * Lock an interrupts-disabled spinlock conditionally. If the
251 * spinlock is not available at the moment, signal failure.
252 * Interrupts are expected to be already disabled.
253 *
254 * @param lock IRQ spinlock to be locked conditionally.
255 *
256 * @return Zero on failure, non-zero otherwise.
257 *
258 */
259int irq_spinlock_trylock(irq_spinlock_t *lock)
260{
261 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
262 int rc = spinlock_trylock(&(lock->lock));
263
264 ASSERT_IRQ_SPINLOCK((!rc) || (!lock->guard), lock);
265 return rc;
266}
267
268/** Pass lock from one interrupts-disabled spinlock to another
269 *
270 * Pass lock from one IRQ spinlock to another IRQ spinlock
271 * without enabling interrupts during the process.
272 *
273 * The first IRQ spinlock is supposed to be locked.
274 *
275 * @param unlock IRQ spinlock to be unlocked.
276 * @param lock IRQ spinlock to be locked.
277 *
278 */
279void irq_spinlock_pass(irq_spinlock_t *unlock, irq_spinlock_t *lock)
280{
281 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock);
282
283 /* Pass guard from unlock to lock */
284 bool guard = unlock->guard;
285 ipl_t ipl = unlock->ipl;
286 unlock->guard = false;
287
288 spinlock_unlock(&(unlock->lock));
289 spinlock_lock(&(lock->lock));
290
291 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
292
293 if (guard) {
294 lock->guard = true;
295 lock->ipl = ipl;
296 }
297}
298
299/** Hand-over-hand locking of interrupts-disabled spinlocks
300 *
301 * Implement hand-over-hand locking between two interrupts-disabled
302 * spinlocks without enabling interrupts during the process.
303 *
304 * The first IRQ spinlock is supposed to be locked.
305 *
306 * @param unlock IRQ spinlock to be unlocked.
307 * @param lock IRQ spinlock to be locked.
308 *
309 */
310void irq_spinlock_exchange(irq_spinlock_t *unlock, irq_spinlock_t *lock)
311{
312 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock);
313
314 spinlock_lock(&(lock->lock));
315 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
316
317 /* Pass guard from unlock to lock */
318 if (unlock->guard) {
319 lock->guard = true;
320 lock->ipl = unlock->ipl;
321 unlock->guard = false;
322 }
323
324 spinlock_unlock(&(unlock->lock));
325}
326
327/** Find out whether the IRQ spinlock is currently locked.
328 *
329 * @param lock IRQ spinlock.
330 * @return True if the IRQ spinlock is locked, false otherwise.
331 */
332bool irq_spinlock_locked(irq_spinlock_t *ilock)
333{
334 return spinlock_locked(&ilock->lock);
335}
336
337/** @}
338 */
Note: See TracBrowser for help on using the repository browser.