source: mainline/kernel/generic/src/synch/spinlock.c@ 64e9cf4

ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 64e9cf4 was 2b264c4, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 3 years ago

Add architecture-specific spinlock optimization

  • Property mode set to 100644
File size: 5.0 KB
Line 
1/*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * Copyright (c) 2023 Jiří Zárevúcky
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/** @addtogroup kernel_sync
31 * @{
32 */
33
34/**
35 * @file
36 * @brief Spinlocks.
37 */
38
39#include <arch/asm.h>
40#include <synch/spinlock.h>
41#include <atomic.h>
42#include <barrier.h>
43#include <arch.h>
44#include <preemption.h>
45#include <stdio.h>
46#include <debug.h>
47#include <symtab.h>
48#include <stacktrace.h>
49#include <cpu.h>
50
51#ifndef ARCH_SPIN_HINT
52#define ARCH_SPIN_HINT() ((void)0)
53#endif
54
55/** Initialize spinlock
56 *
57 * @param sl Pointer to spinlock_t structure.
58 *
59 */
60void spinlock_initialize(spinlock_t *lock, const char *name)
61{
62#ifdef CONFIG_SMP
63 atomic_flag_clear_explicit(&lock->flag, memory_order_relaxed);
64#ifdef CONFIG_DEBUG_SPINLOCK
65 lock->name = name;
66#endif
67#endif
68}
69
70/** Lock spinlock
71 *
72 * @param lock Pointer to spinlock_t structure.
73 *
74 */
75void spinlock_lock(spinlock_t *lock)
76{
77 preemption_disable();
78
79#ifdef CONFIG_SMP
80 bool deadlock_reported = false;
81 size_t i = 0;
82
83 while (atomic_flag_test_and_set_explicit(&lock->flag, memory_order_acquire)) {
84 ARCH_SPIN_HINT();
85
86#ifdef CONFIG_DEBUG_SPINLOCK
87 /*
88 * We need to be careful about particular locks
89 * which are directly used to report deadlocks
90 * via printf() (and recursively other functions).
91 * This conserns especially printf_lock and the
92 * framebuffer lock.
93 *
94 * Any lock whose name is prefixed by "*" will be
95 * ignored by this deadlock detection routine
96 * as this might cause an infinite recursion.
97 * We trust our code that there is no possible deadlock
98 * caused by these locks (except when an exception
99 * is triggered for instance by printf()).
100 *
101 * We encountered false positives caused by very
102 * slow framebuffer interaction (especially when
103 * run in a simulator) that caused problems with both
104 * printf_lock and the framebuffer lock.
105 */
106 if (lock->name[0] == '*')
107 continue;
108
109 if (i++ > DEADLOCK_THRESHOLD) {
110 printf("cpu%u: looping on spinlock %p:%s, "
111 "caller=%p (%s)\n", CPU->id, lock, lock->name,
112 (void *) CALLER, symtab_fmt_name_lookup(CALLER));
113 stack_trace();
114
115 i = 0;
116 deadlock_reported = true;
117 }
118#endif
119 }
120
121 /* Avoid compiler warning with debug disabled. */
122 (void) i;
123
124 if (deadlock_reported)
125 printf("cpu%u: not deadlocked\n", CPU->id);
126
127#endif
128}
129
130/** Unlock spinlock
131 *
132 * @param sl Pointer to spinlock_t structure.
133 */
134void spinlock_unlock(spinlock_t *lock)
135{
136#ifdef CONFIG_SMP
137#ifdef CONFIG_DEBUG_SPINLOCK
138 ASSERT_SPINLOCK(spinlock_locked(lock), lock);
139#endif
140
141 atomic_flag_clear_explicit(&lock->flag, memory_order_release);
142#endif
143
144 preemption_enable();
145}
146
147/**
148 * Lock spinlock conditionally. If the spinlock is not available
149 * at the moment, signal failure.
150 *
151 * @param lock Pointer to spinlock_t structure.
152 *
153 * @return true on success.
154 *
155 */
156bool spinlock_trylock(spinlock_t *lock)
157{
158 preemption_disable();
159
160#ifdef CONFIG_SMP
161 bool ret = !atomic_flag_test_and_set_explicit(&lock->flag, memory_order_acquire);
162
163 if (!ret)
164 preemption_enable();
165
166 return ret;
167#else
168 return true;
169#endif
170}
171
172/** Find out whether the spinlock is currently locked.
173 *
174 * @param lock Spinlock.
175 * @return True if the spinlock is locked, false otherwise.
176 */
177bool spinlock_locked(spinlock_t *lock)
178{
179#ifdef CONFIG_SMP
180 // NOTE: Atomic flag doesn't support simple atomic read (by design),
181 // so instead we test_and_set and then clear if necessary.
182 // This function is only used inside assert, so we don't need
183 // any preemption_disable/enable here.
184
185 bool ret = atomic_flag_test_and_set_explicit(&lock->flag, memory_order_relaxed);
186 if (!ret)
187 atomic_flag_clear_explicit(&lock->flag, memory_order_relaxed);
188 return ret;
189#else
190 return true;
191#endif
192}
193
194/** @}
195 */
Note: See TracBrowser for help on using the repository browser.