1 | /*
|
---|
2 | * Copyright (c) 2001-2004 Jakub Jermar
|
---|
3 | * Copyright (c) 2023 Jiří Zárevúcky
|
---|
4 | * All rights reserved.
|
---|
5 | *
|
---|
6 | * Redistribution and use in source and binary forms, with or without
|
---|
7 | * modification, are permitted provided that the following conditions
|
---|
8 | * are met:
|
---|
9 | *
|
---|
10 | * - Redistributions of source code must retain the above copyright
|
---|
11 | * notice, this list of conditions and the following disclaimer.
|
---|
12 | * - Redistributions in binary form must reproduce the above copyright
|
---|
13 | * notice, this list of conditions and the following disclaimer in the
|
---|
14 | * documentation and/or other materials provided with the distribution.
|
---|
15 | * - The name of the author may not be used to endorse or promote products
|
---|
16 | * derived from this software without specific prior written permission.
|
---|
17 | *
|
---|
18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
---|
19 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
---|
20 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
---|
21 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
---|
22 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
---|
23 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
---|
24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
---|
25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
---|
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
28 | */
|
---|
29 |
|
---|
30 | /** @addtogroup kernel_sync
|
---|
31 | * @{
|
---|
32 | */
|
---|
33 |
|
---|
34 | /**
|
---|
35 | * @file
|
---|
36 | * @brief Spinlocks.
|
---|
37 | */
|
---|
38 |
|
---|
39 | #ifdef CONFIG_SMP
|
---|
40 |
|
---|
41 | #include <arch/asm.h>
|
---|
42 | #include <synch/spinlock.h>
|
---|
43 | #include <atomic.h>
|
---|
44 | #include <barrier.h>
|
---|
45 | #include <arch.h>
|
---|
46 | #include <preemption.h>
|
---|
47 | #include <stdio.h>
|
---|
48 | #include <debug.h>
|
---|
49 | #include <symtab.h>
|
---|
50 | #include <stacktrace.h>
|
---|
51 | #include <cpu.h>
|
---|
52 |
|
---|
53 | /** Initialize spinlock
|
---|
54 | *
|
---|
55 | * @param sl Pointer to spinlock_t structure.
|
---|
56 | *
|
---|
57 | */
|
---|
58 | void spinlock_initialize(spinlock_t *lock, const char *name)
|
---|
59 | {
|
---|
60 | atomic_flag_clear_explicit(&lock->flag, memory_order_relaxed);
|
---|
61 | #ifdef CONFIG_DEBUG_SPINLOCK
|
---|
62 | lock->name = name;
|
---|
63 | #endif
|
---|
64 | }
|
---|
65 |
|
---|
66 | /** Lock spinlock
|
---|
67 | *
|
---|
68 | * @param lock Pointer to spinlock_t structure.
|
---|
69 | *
|
---|
70 | */
|
---|
71 | void spinlock_lock(spinlock_t *lock)
|
---|
72 | {
|
---|
73 | preemption_disable();
|
---|
74 |
|
---|
75 | bool deadlock_reported = false;
|
---|
76 | size_t i = 0;
|
---|
77 |
|
---|
78 | while (atomic_flag_test_and_set_explicit(&lock->flag, memory_order_acquire)) {
|
---|
79 | cpu_spin_hint();
|
---|
80 |
|
---|
81 | #ifdef CONFIG_DEBUG_SPINLOCK
|
---|
82 | /*
|
---|
83 | * We need to be careful about particular locks
|
---|
84 | * which are directly used to report deadlocks
|
---|
85 | * via printf() (and recursively other functions).
|
---|
86 | * This concerns especially printf_lock and the
|
---|
87 | * framebuffer lock.
|
---|
88 | *
|
---|
89 | * Any lock whose name is prefixed by "*" will be
|
---|
90 | * ignored by this deadlock detection routine
|
---|
91 | * as this might cause an infinite recursion.
|
---|
92 | * We trust our code that there is no possible deadlock
|
---|
93 | * caused by these locks (except when an exception
|
---|
94 | * is triggered for instance by printf()).
|
---|
95 | *
|
---|
96 | * We encountered false positives caused by very
|
---|
97 | * slow framebuffer interaction (especially when
|
---|
98 | * run in a simulator) that caused problems with both
|
---|
99 | * printf_lock and the framebuffer lock.
|
---|
100 | */
|
---|
101 | if (lock->name[0] == '*')
|
---|
102 | continue;
|
---|
103 |
|
---|
104 | if (i++ > DEADLOCK_THRESHOLD) {
|
---|
105 | printf("cpu%u: looping on spinlock %p:%s, "
|
---|
106 | "caller=%p (%s)\n", CPU->id, lock, lock->name,
|
---|
107 | (void *) CALLER, symtab_fmt_name_lookup(CALLER));
|
---|
108 | stack_trace();
|
---|
109 |
|
---|
110 | i = 0;
|
---|
111 | deadlock_reported = true;
|
---|
112 | }
|
---|
113 | #endif
|
---|
114 | }
|
---|
115 |
|
---|
116 | /* Avoid compiler warning with debug disabled. */
|
---|
117 | (void) i;
|
---|
118 |
|
---|
119 | if (deadlock_reported)
|
---|
120 | printf("cpu%u: not deadlocked\n", CPU->id);
|
---|
121 | }
|
---|
122 |
|
---|
123 | /** Unlock spinlock
|
---|
124 | *
|
---|
125 | * @param sl Pointer to spinlock_t structure.
|
---|
126 | */
|
---|
127 | void spinlock_unlock(spinlock_t *lock)
|
---|
128 | {
|
---|
129 | #ifdef CONFIG_DEBUG_SPINLOCK
|
---|
130 | ASSERT_SPINLOCK(spinlock_locked(lock), lock);
|
---|
131 | #endif
|
---|
132 |
|
---|
133 | atomic_flag_clear_explicit(&lock->flag, memory_order_release);
|
---|
134 | preemption_enable();
|
---|
135 | }
|
---|
136 |
|
---|
137 | /**
|
---|
138 | * Lock spinlock conditionally. If the spinlock is not available
|
---|
139 | * at the moment, signal failure.
|
---|
140 | *
|
---|
141 | * @param lock Pointer to spinlock_t structure.
|
---|
142 | *
|
---|
143 | * @return true on success.
|
---|
144 | *
|
---|
145 | */
|
---|
146 | bool spinlock_trylock(spinlock_t *lock)
|
---|
147 | {
|
---|
148 | preemption_disable();
|
---|
149 |
|
---|
150 | bool ret = !atomic_flag_test_and_set_explicit(&lock->flag, memory_order_acquire);
|
---|
151 |
|
---|
152 | if (!ret)
|
---|
153 | preemption_enable();
|
---|
154 |
|
---|
155 | return ret;
|
---|
156 | }
|
---|
157 |
|
---|
158 | /** Find out whether the spinlock is currently locked.
|
---|
159 | *
|
---|
160 | * @param lock Spinlock.
|
---|
161 | * @return True if the spinlock is locked, false otherwise.
|
---|
162 | */
|
---|
163 | bool spinlock_locked(spinlock_t *lock)
|
---|
164 | {
|
---|
165 | // NOTE: Atomic flag doesn't support simple atomic read (by design),
|
---|
166 | // so instead we test_and_set and then clear if necessary.
|
---|
167 | // This function is only used inside assert, so we don't need
|
---|
168 | // any preemption_disable/enable here.
|
---|
169 |
|
---|
170 | bool ret = atomic_flag_test_and_set_explicit(&lock->flag, memory_order_relaxed);
|
---|
171 | if (!ret)
|
---|
172 | atomic_flag_clear_explicit(&lock->flag, memory_order_relaxed);
|
---|
173 | return ret;
|
---|
174 | }
|
---|
175 |
|
---|
176 | #endif /* CONFIG_SMP */
|
---|
177 |
|
---|
178 | /** @}
|
---|
179 | */
|
---|