source: mainline/kernel/generic/src/synch/spinlock.c@ ffe4a87

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since ffe4a87 was ffe4a87, checked in by Jakub Jermar <jakub@…>, 15 years ago

Add interfaces for testing the status of plain spinlocks and the IRQ spinlocks.

Note that because of the non-SMP version of spinlocks, the status must be
checked only in the affirmative manner. Instead of:

ASSERT(!spinlock_locked(…));


one needs to do:

ASSERT(spinlock_unlocked(…));

Otherwise the assertion will be hit on debug non-SMP kernels.

  • Property mode set to 100644
File size: 8.8 KB
Line 
1/*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup sync
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Spinlocks.
36 */
37
38#include <synch/spinlock.h>
39#include <atomic.h>
40#include <arch/barrier.h>
41#include <arch.h>
42#include <preemption.h>
43#include <print.h>
44#include <debug.h>
45#include <symtab.h>
46
47#ifdef CONFIG_SMP
48
49/** Initialize spinlock
50 *
51 * @param sl Pointer to spinlock_t structure.
52 *
53 */
54void spinlock_initialize(spinlock_t *lock, const char *name)
55{
56 atomic_set(&lock->val, 0);
57#ifdef CONFIG_DEBUG_SPINLOCK
58 lock->name = name;
59#endif
60}
61
62#ifdef CONFIG_DEBUG_SPINLOCK
63
64/** Lock spinlock
65 *
66 * Lock spinlock.
67 * This version has limitted ability to report
68 * possible occurence of deadlock.
69 *
70 * @param lock Pointer to spinlock_t structure.
71 *
72 */
73void spinlock_lock_debug(spinlock_t *lock)
74{
75 size_t i = 0;
76 bool deadlock_reported = false;
77
78 preemption_disable();
79 while (test_and_set(&lock->val)) {
80 /*
81 * We need to be careful about particular locks
82 * which are directly used to report deadlocks
83 * via printf() (and recursively other functions).
84 * This conserns especially printf_lock and the
85 * framebuffer lock.
86 *
87 * Any lock whose name is prefixed by "*" will be
88 * ignored by this deadlock detection routine
89 * as this might cause an infinite recursion.
90 * We trust our code that there is no possible deadlock
91 * caused by these locks (except when an exception
92 * is triggered for instance by printf()).
93 *
94 * We encountered false positives caused by very
95 * slow framebuffer interaction (especially when
96 * run in a simulator) that caused problems with both
97 * printf_lock and the framebuffer lock.
98 *
99 */
100 if (lock->name[0] == '*')
101 continue;
102
103 if (i++ > DEADLOCK_THRESHOLD) {
104 printf("cpu%u: looping on spinlock %" PRIp ":%s, "
105 "caller=%" PRIp "(%s)\n", CPU->id, lock, lock->name,
106 CALLER, symtab_fmt_name_lookup(CALLER));
107
108 i = 0;
109 deadlock_reported = true;
110 }
111 }
112
113 if (deadlock_reported)
114 printf("cpu%u: not deadlocked\n", CPU->id);
115
116 /*
117 * Prevent critical section code from bleeding out this way up.
118 */
119 CS_ENTER_BARRIER();
120}
121
122/** Unlock spinlock
123 *
124 * Unlock spinlock.
125 *
126 * @param sl Pointer to spinlock_t structure.
127 */
128void spinlock_unlock_debug(spinlock_t *lock)
129{
130 ASSERT_SPINLOCK(spinlock_locked(lock), lock);
131
132 /*
133 * Prevent critical section code from bleeding out this way down.
134 */
135 CS_LEAVE_BARRIER();
136
137 atomic_set(&lock->val, 0);
138 preemption_enable();
139}
140
141#endif
142
143/** Lock spinlock conditionally
144 *
145 * Lock spinlock conditionally. If the spinlock is not available
146 * at the moment, signal failure.
147 *
148 * @param lock Pointer to spinlock_t structure.
149 *
150 * @return Zero on failure, non-zero otherwise.
151 *
152 */
153int spinlock_trylock(spinlock_t *lock)
154{
155 preemption_disable();
156 int rc = !test_and_set(&lock->val);
157
158 /*
159 * Prevent critical section code from bleeding out this way up.
160 */
161 CS_ENTER_BARRIER();
162
163 if (!rc)
164 preemption_enable();
165
166 return rc;
167}
168
169/** Find out whether the spinlock is currently locked.
170 *
171 * @param lock Spinlock.
172 * @return True if the spinlock is locked, false otherwise.
173 */
174bool spinlock_locked(spinlock_t *lock)
175{
176 return atomic_get(&lock->val) != 0;
177}
178
179/** Find out whether the spinlock is currently unlocked.
180 *
181 * @param lock Spinlock.
182 * @return True if the spinlock is not locked, false otherwise.
183 */
184bool spinlock_unlocked(spinlock_t *lock)
185{
186 return atomic_get(&lock->val) == 0;
187}
188
189#endif
190
191/** Initialize interrupts-disabled spinlock
192 *
193 * @param lock IRQ spinlock to be initialized.
194 * @param name IRQ spinlock name.
195 *
196 */
197void irq_spinlock_initialize(irq_spinlock_t *lock, const char *name)
198{
199 spinlock_initialize(&(lock->lock), name);
200 lock->guard = false;
201 lock->ipl = 0;
202}
203
204/** Lock interrupts-disabled spinlock
205 *
206 * Lock a spinlock which requires disabled interrupts.
207 *
208 * @param lock IRQ spinlock to be locked.
209 * @param irq_dis If true, interrupts are actually disabled
210 * prior locking the spinlock. If false, interrupts
211 * are expected to be already disabled.
212 *
213 */
214void irq_spinlock_lock(irq_spinlock_t *lock, bool irq_dis)
215{
216 if (irq_dis) {
217 ipl_t ipl = interrupts_disable();
218 spinlock_lock(&(lock->lock));
219
220 lock->guard = true;
221 lock->ipl = ipl;
222 } else {
223 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
224
225 spinlock_lock(&(lock->lock));
226 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
227 }
228}
229
230/** Unlock interrupts-disabled spinlock
231 *
232 * Unlock a spinlock which requires disabled interrupts.
233 *
234 * @param lock IRQ spinlock to be unlocked.
235 * @param irq_res If true, interrupts are restored to previously
236 * saved interrupt level.
237 *
238 */
239void irq_spinlock_unlock(irq_spinlock_t *lock, bool irq_res)
240{
241 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
242
243 if (irq_res) {
244 ASSERT_IRQ_SPINLOCK(lock->guard, lock);
245
246 lock->guard = false;
247 ipl_t ipl = lock->ipl;
248
249 spinlock_unlock(&(lock->lock));
250 interrupts_restore(ipl);
251 } else {
252 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
253 spinlock_unlock(&(lock->lock));
254 }
255}
256
257/** Lock interrupts-disabled spinlock
258 *
259 * Lock an interrupts-disabled spinlock conditionally. If the
260 * spinlock is not available at the moment, signal failure.
261 * Interrupts are expected to be already disabled.
262 *
263 * @param lock IRQ spinlock to be locked conditionally.
264 *
265 * @return Zero on failure, non-zero otherwise.
266 *
267 */
268int irq_spinlock_trylock(irq_spinlock_t *lock)
269{
270 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
271 int rc = spinlock_trylock(&(lock->lock));
272
273 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
274 return rc;
275}
276
277/** Pass lock from one interrupts-disabled spinlock to another
278 *
279 * Pass lock from one IRQ spinlock to another IRQ spinlock
280 * without enabling interrupts during the process.
281 *
282 * The first IRQ spinlock is supposed to be locked.
283 *
284 * @param unlock IRQ spinlock to be unlocked.
285 * @param lock IRQ spinlock to be locked.
286 *
287 */
288void irq_spinlock_pass(irq_spinlock_t *unlock, irq_spinlock_t *lock)
289{
290 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock);
291
292 /* Pass guard from unlock to lock */
293 bool guard = unlock->guard;
294 ipl_t ipl = unlock->ipl;
295 unlock->guard = false;
296
297 spinlock_unlock(&(unlock->lock));
298 spinlock_lock(&(lock->lock));
299
300 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
301
302 if (guard) {
303 lock->guard = true;
304 lock->ipl = ipl;
305 }
306}
307
308/** Hand-over-hand locking of interrupts-disabled spinlocks
309 *
310 * Implement hand-over-hand locking between two interrupts-disabled
311 * spinlocks without enabling interrupts during the process.
312 *
313 * The first IRQ spinlock is supposed to be locked.
314 *
315 * @param unlock IRQ spinlock to be unlocked.
316 * @param lock IRQ spinlock to be locked.
317 *
318 */
319void irq_spinlock_exchange(irq_spinlock_t *unlock, irq_spinlock_t *lock)
320{
321 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock);
322
323 spinlock_lock(&(lock->lock));
324 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
325
326 /* Pass guard from unlock to lock */
327 if (unlock->guard) {
328 lock->guard = true;
329 lock->ipl = unlock->ipl;
330 unlock->guard = false;
331 }
332
333 spinlock_unlock(&(unlock->lock));
334}
335
336/** Find out whether the IRQ spinlock is currently locked.
337 *
338 * @param lock IRQ spinlock.
339 * @return True if the IRQ spinlock is locked, false otherwise.
340 */
341bool irq_spinlock_locked(irq_spinlock_t *ilock)
342{
343 return spinlock_locked(&ilock->lock);
344}
345
346/** Find out whether the IRQ spinlock is currently unlocked.
347 *
348 * @param lock IRQ spinlock.
349 * @return True if the IRQ spinlock is not locked, false otherwise.
350 */
351bool irq_spinlock_unlocked(irq_spinlock_t *ilock)
352{
353 return spinlock_unlocked(&ilock->lock);
354}
355
356/** @}
357 */
Note: See TracBrowser for help on using the repository browser.