source: mainline/kernel/generic/src/synch/spinlock.c@ c263c77

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since c263c77 was c263c77, checked in by Martin Decky <martin@…>, 14 years ago

revert poisonous part of changeset mainline,971
(sadly, this reopens #243)

  • Property mode set to 100644
File size: 8.3 KB
Line 
1/*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup sync
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Spinlocks.
36 */
37
38#include <synch/spinlock.h>
39#include <atomic.h>
40#include <arch/barrier.h>
41#include <arch.h>
42#include <preemption.h>
43#include <print.h>
44#include <debug.h>
45#include <symtab.h>
46
47#ifdef CONFIG_SMP
48
49/** Initialize spinlock
50 *
51 * @param sl Pointer to spinlock_t structure.
52 *
53 */
54void spinlock_initialize(spinlock_t *lock, const char *name)
55{
56 atomic_set(&lock->val, 0);
57#ifdef CONFIG_DEBUG_SPINLOCK
58 lock->name = name;
59#endif
60}
61
62#ifdef CONFIG_DEBUG_SPINLOCK
63
64/** Lock spinlock
65 *
66 * Lock spinlock.
67 * This version has limitted ability to report
68 * possible occurence of deadlock.
69 *
70 * @param lock Pointer to spinlock_t structure.
71 *
72 */
73void spinlock_lock_debug(spinlock_t *lock)
74{
75 size_t i = 0;
76 bool deadlock_reported = false;
77
78 preemption_disable();
79 while (test_and_set(&lock->val)) {
80 /*
81 * We need to be careful about particular locks
82 * which are directly used to report deadlocks
83 * via printf() (and recursively other functions).
84 * This conserns especially printf_lock and the
85 * framebuffer lock.
86 *
87 * Any lock whose name is prefixed by "*" will be
88 * ignored by this deadlock detection routine
89 * as this might cause an infinite recursion.
90 * We trust our code that there is no possible deadlock
91 * caused by these locks (except when an exception
92 * is triggered for instance by printf()).
93 *
94 * We encountered false positives caused by very
95 * slow framebuffer interaction (especially when
96 * run in a simulator) that caused problems with both
97 * printf_lock and the framebuffer lock.
98 */
99 if (lock->name[0] == '*')
100 continue;
101
102 if (i++ > DEADLOCK_THRESHOLD) {
103 printf("cpu%u: looping on spinlock %p:%s, "
104 "caller=%p (%s)\n", CPU->id, lock, lock->name,
105 (void *) CALLER, symtab_fmt_name_lookup(CALLER));
106
107 i = 0;
108 deadlock_reported = true;
109 }
110 }
111
112 if (deadlock_reported)
113 printf("cpu%u: not deadlocked\n", CPU->id);
114
115 /*
116 * Prevent critical section code from bleeding out this way up.
117 */
118 CS_ENTER_BARRIER();
119}
120
121/** Unlock spinlock
122 *
123 * Unlock spinlock.
124 *
125 * @param sl Pointer to spinlock_t structure.
126 */
127void spinlock_unlock_debug(spinlock_t *lock)
128{
129 ASSERT_SPINLOCK(spinlock_locked(lock), lock);
130
131 /*
132 * Prevent critical section code from bleeding out this way down.
133 */
134 CS_LEAVE_BARRIER();
135
136 atomic_set(&lock->val, 0);
137 preemption_enable();
138}
139
140#endif
141
142/** Lock spinlock conditionally
143 *
144 * Lock spinlock conditionally. If the spinlock is not available
145 * at the moment, signal failure.
146 *
147 * @param lock Pointer to spinlock_t structure.
148 *
149 * @return Zero on failure, non-zero otherwise.
150 *
151 */
152int spinlock_trylock(spinlock_t *lock)
153{
154 preemption_disable();
155 int rc = !test_and_set(&lock->val);
156
157 /*
158 * Prevent critical section code from bleeding out this way up.
159 */
160 CS_ENTER_BARRIER();
161
162 if (!rc)
163 preemption_enable();
164
165 return rc;
166}
167
168/** Find out whether the spinlock is currently locked.
169 *
170 * @param lock Spinlock.
171 * @return True if the spinlock is locked, false otherwise.
172 */
173bool spinlock_locked(spinlock_t *lock)
174{
175 return atomic_get(&lock->val) != 0;
176}
177
178#endif
179
180/** Initialize interrupts-disabled spinlock
181 *
182 * @param lock IRQ spinlock to be initialized.
183 * @param name IRQ spinlock name.
184 *
185 */
186void irq_spinlock_initialize(irq_spinlock_t *lock, const char *name)
187{
188 spinlock_initialize(&(lock->lock), name);
189 lock->guard = false;
190 lock->ipl = 0;
191}
192
193/** Lock interrupts-disabled spinlock
194 *
195 * Lock a spinlock which requires disabled interrupts.
196 *
197 * @param lock IRQ spinlock to be locked.
198 * @param irq_dis If true, interrupts are actually disabled
199 * prior locking the spinlock. If false, interrupts
200 * are expected to be already disabled.
201 *
202 */
203void irq_spinlock_lock(irq_spinlock_t *lock, bool irq_dis)
204{
205 if (irq_dis) {
206 ipl_t ipl = interrupts_disable();
207 spinlock_lock(&(lock->lock));
208
209 lock->guard = true;
210 lock->ipl = ipl;
211 } else {
212 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
213
214 spinlock_lock(&(lock->lock));
215 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
216 }
217}
218
219/** Unlock interrupts-disabled spinlock
220 *
221 * Unlock a spinlock which requires disabled interrupts.
222 *
223 * @param lock IRQ spinlock to be unlocked.
224 * @param irq_res If true, interrupts are restored to previously
225 * saved interrupt level.
226 *
227 */
228void irq_spinlock_unlock(irq_spinlock_t *lock, bool irq_res)
229{
230 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
231
232 if (irq_res) {
233 ASSERT_IRQ_SPINLOCK(lock->guard, lock);
234
235 lock->guard = false;
236 ipl_t ipl = lock->ipl;
237
238 spinlock_unlock(&(lock->lock));
239 interrupts_restore(ipl);
240 } else {
241 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
242 spinlock_unlock(&(lock->lock));
243 }
244}
245
246/** Lock interrupts-disabled spinlock
247 *
248 * Lock an interrupts-disabled spinlock conditionally. If the
249 * spinlock is not available at the moment, signal failure.
250 * Interrupts are expected to be already disabled.
251 *
252 * @param lock IRQ spinlock to be locked conditionally.
253 *
254 * @return Zero on failure, non-zero otherwise.
255 *
256 */
257int irq_spinlock_trylock(irq_spinlock_t *lock)
258{
259 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
260 int rc = spinlock_trylock(&(lock->lock));
261
262 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
263 return rc;
264}
265
266/** Pass lock from one interrupts-disabled spinlock to another
267 *
268 * Pass lock from one IRQ spinlock to another IRQ spinlock
269 * without enabling interrupts during the process.
270 *
271 * The first IRQ spinlock is supposed to be locked.
272 *
273 * @param unlock IRQ spinlock to be unlocked.
274 * @param lock IRQ spinlock to be locked.
275 *
276 */
277void irq_spinlock_pass(irq_spinlock_t *unlock, irq_spinlock_t *lock)
278{
279 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock);
280
281 /* Pass guard from unlock to lock */
282 bool guard = unlock->guard;
283 ipl_t ipl = unlock->ipl;
284 unlock->guard = false;
285
286 spinlock_unlock(&(unlock->lock));
287 spinlock_lock(&(lock->lock));
288
289 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
290
291 if (guard) {
292 lock->guard = true;
293 lock->ipl = ipl;
294 }
295}
296
297/** Hand-over-hand locking of interrupts-disabled spinlocks
298 *
299 * Implement hand-over-hand locking between two interrupts-disabled
300 * spinlocks without enabling interrupts during the process.
301 *
302 * The first IRQ spinlock is supposed to be locked.
303 *
304 * @param unlock IRQ spinlock to be unlocked.
305 * @param lock IRQ spinlock to be locked.
306 *
307 */
308void irq_spinlock_exchange(irq_spinlock_t *unlock, irq_spinlock_t *lock)
309{
310 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock);
311
312 spinlock_lock(&(lock->lock));
313 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
314
315 /* Pass guard from unlock to lock */
316 if (unlock->guard) {
317 lock->guard = true;
318 lock->ipl = unlock->ipl;
319 unlock->guard = false;
320 }
321
322 spinlock_unlock(&(unlock->lock));
323}
324
325/** Find out whether the IRQ spinlock is currently locked.
326 *
327 * @param lock IRQ spinlock.
328 * @return True if the IRQ spinlock is locked, false otherwise.
329 */
330bool irq_spinlock_locked(irq_spinlock_t *ilock)
331{
332 return spinlock_locked(&ilock->lock);
333}
334
335/** @}
336 */
Note: See TracBrowser for help on using the repository browser.