source: mainline/kernel/generic/src/synch/irq_spinlock.c

Last change on this file was b076dfb, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 2 years ago

Implement more elaborate debug checking for spinlocks

  • Property mode set to 100644
File size: 6.7 KB
Line 
1/*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * Copyright (c) 2023 Jiří Zárevúcky
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup kernel_sync
31 * @{
32 */
33
34/**
35 * @file
36 * @brief IRQ Spinlocks.
37 */
38
39#include <arch/asm.h>
40#include <synch/spinlock.h>
41
42#include <cpu.h>
43
44#ifdef CONFIG_DEBUG_SPINLOCK
45
46#define CPU_OWNER ((CPU == NULL) ? (cpu_t *) UINTPTR_MAX : CPU)
47
48static inline bool owned_by_me(irq_spinlock_t *lock)
49{
50 return atomic_load_explicit(&lock->owner, memory_order_relaxed) == CPU_OWNER;
51}
52
53static inline bool not_owned_by_me(irq_spinlock_t *lock)
54{
55 return !owned_by_me(lock);
56}
57
58static inline void claim(irq_spinlock_t *lock)
59{
60 cpu_t *cpu = CPU_OWNER;
61 atomic_store_explicit(&lock->owner, cpu, memory_order_relaxed);
62 CURRENT->mutex_locks++;
63}
64
65static inline void unclaim(irq_spinlock_t *lock)
66{
67 CURRENT->mutex_locks--;
68 atomic_store_explicit(&lock->owner, NULL, memory_order_relaxed);
69}
70
71#else
72
73static inline bool owned_by_me(irq_spinlock_t *lock)
74{
75 return true;
76}
77
78static inline bool not_owned_by_me(irq_spinlock_t *lock)
79{
80 return true;
81}
82
83static inline void claim(irq_spinlock_t *lock)
84{
85}
86
87static inline void unclaim(irq_spinlock_t *lock)
88{
89}
90
91#endif
92
93/** Initialize interrupts-disabled spinlock
94 *
95 * @param lock IRQ spinlock to be initialized.
96 * @param name IRQ spinlock name.
97 *
98 */
99void irq_spinlock_initialize(irq_spinlock_t *lock, const char *name)
100{
101 *lock = (irq_spinlock_t) IRQ_SPINLOCK_INITIALIZER(name);
102}
103
104/** Lock interrupts-disabled spinlock
105 *
106 * Lock a spinlock which requires disabled interrupts.
107 *
108 * @param lock IRQ spinlock to be locked.
109 * @param irq_dis If true, disables interrupts before locking the spinlock.
110 * If false, interrupts are expected to be already disabled.
111 *
112 */
113void irq_spinlock_lock(irq_spinlock_t *lock, bool irq_dis)
114{
115 ASSERT_IRQ_SPINLOCK(not_owned_by_me(lock), lock);
116
117 if (irq_dis) {
118 ipl_t ipl = interrupts_disable();
119 spinlock_lock(&(lock->lock));
120
121 lock->guard = true;
122 lock->ipl = ipl;
123 } else {
124 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
125
126 spinlock_lock(&(lock->lock));
127 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
128 }
129
130 claim(lock);
131}
132
133/** Unlock interrupts-disabled spinlock
134 *
135 * Unlock a spinlock which requires disabled interrupts.
136 *
137 * @param lock IRQ spinlock to be unlocked.
138 * @param irq_res If true, interrupts are restored to previously
139 * saved interrupt level.
140 *
141 */
142void irq_spinlock_unlock(irq_spinlock_t *lock, bool irq_res)
143{
144 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
145 ASSERT_IRQ_SPINLOCK(owned_by_me(lock), lock);
146
147 unclaim(lock);
148
149 if (irq_res) {
150 ASSERT_IRQ_SPINLOCK(lock->guard, lock);
151
152 lock->guard = false;
153 ipl_t ipl = lock->ipl;
154
155 spinlock_unlock(&(lock->lock));
156 interrupts_restore(ipl);
157 } else {
158 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
159 spinlock_unlock(&(lock->lock));
160 }
161}
162
163/** Lock interrupts-disabled spinlock
164 *
165 * Lock an interrupts-disabled spinlock conditionally. If the
166 * spinlock is not available at the moment, signal failure.
167 * Interrupts are expected to be already disabled.
168 *
169 * @param lock IRQ spinlock to be locked conditionally.
170 *
171 * @return Zero on failure, non-zero otherwise.
172 *
173 */
174bool irq_spinlock_trylock(irq_spinlock_t *lock)
175{
176 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
177 bool ret = spinlock_trylock(&(lock->lock));
178 if (ret)
179 claim(lock);
180
181 ASSERT_IRQ_SPINLOCK((!ret) || (!lock->guard), lock);
182 return ret;
183}
184
185/** Pass lock from one interrupts-disabled spinlock to another
186 *
187 * Pass lock from one IRQ spinlock to another IRQ spinlock
188 * without enabling interrupts during the process.
189 *
190 * The first IRQ spinlock is supposed to be locked.
191 *
192 * @param unlock IRQ spinlock to be unlocked.
193 * @param lock IRQ spinlock to be locked.
194 *
195 */
196void irq_spinlock_pass(irq_spinlock_t *unlock, irq_spinlock_t *lock)
197{
198 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock);
199 ASSERT_IRQ_SPINLOCK(owned_by_me(unlock), unlock);
200 ASSERT_IRQ_SPINLOCK(not_owned_by_me(lock), lock);
201
202 /* Pass guard from unlock to lock */
203 bool guard = unlock->guard;
204 ipl_t ipl = unlock->ipl;
205 unlock->guard = false;
206
207 unclaim(unlock);
208
209 spinlock_unlock(&(unlock->lock));
210 spinlock_lock(&(lock->lock));
211
212 claim(lock);
213
214 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
215
216 if (guard) {
217 lock->guard = true;
218 lock->ipl = ipl;
219 }
220}
221
222/** Hand-over-hand locking of interrupts-disabled spinlocks
223 *
224 * Implement hand-over-hand locking between two interrupts-disabled
225 * spinlocks without enabling interrupts during the process.
226 *
227 * The first IRQ spinlock is supposed to be locked.
228 *
229 * @param unlock IRQ spinlock to be unlocked.
230 * @param lock IRQ spinlock to be locked.
231 *
232 */
233void irq_spinlock_exchange(irq_spinlock_t *unlock, irq_spinlock_t *lock)
234{
235 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock);
236 ASSERT_IRQ_SPINLOCK(owned_by_me(unlock), unlock);
237 ASSERT_IRQ_SPINLOCK(not_owned_by_me(lock), lock);
238
239 spinlock_lock(&(lock->lock));
240 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
241
242 /* Pass guard from unlock to lock */
243 if (unlock->guard) {
244 lock->guard = true;
245 lock->ipl = unlock->ipl;
246 unlock->guard = false;
247 }
248
249 claim(lock);
250 unclaim(unlock);
251
252 spinlock_unlock(&(unlock->lock));
253}
254
255/** Find out whether the IRQ spinlock is currently locked.
256 *
257 * @param lock IRQ spinlock.
258 * @return True if the IRQ spinlock is locked, false otherwise.
259 */
260bool irq_spinlock_locked(irq_spinlock_t *lock)
261{
262 return owned_by_me(lock) && spinlock_locked(&lock->lock);
263}
264
265/** @}
266 */
Note: See TracBrowser for help on using the repository browser.