Changeset b076dfb in mainline
- Timestamp:
- 2023-02-02T21:58:36Z (22 months ago)
- Branches:
- master, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 64e9cf4
- Parents:
- 2b264c4
- git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2023-01-20 19:05:09)
- git-committer:
- Jiří Zárevúcky <zarevucky.jiri@…> (2023-02-02 21:58:36)
- Location:
- kernel/generic
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/arch.h
r2b264c4 rb076dfb 75 75 typedef struct { 76 76 size_t preemption; /**< Preemption disabled counter and flag. */ 77 size_t mutex_locks; 77 78 struct thread *thread; /**< Current thread. */ 78 79 struct task *task; /**< Current task. */ -
kernel/generic/include/synch/spinlock.h
r2b264c4 rb076dfb 124 124 125 125 typedef struct { 126 spinlock_t lock; /**< Spinlock */ 127 bool guard; /**< Flag whether ipl is valid */ 128 ipl_t ipl; /**< Original interrupt level */ 126 spinlock_t lock; /**< Spinlock */ 127 bool guard; /**< Flag whether ipl is valid */ 128 ipl_t ipl; /**< Original interrupt level */ 129 #ifdef CONFIG_DEBUG_SPINLOCK 130 _Atomic(struct cpu *) owner; /**< Which cpu currently owns this lock */ 131 #endif 129 132 } irq_spinlock_t; 130 133 -
kernel/generic/src/synch/irq_spinlock.c
r2b264c4 rb076dfb 1 1 /* 2 2 * Copyright (c) 2001-2004 Jakub Jermar 3 * Copyright (c) 2023 Jiří Zárevúcky 3 4 * All rights reserved. 4 5 * … … 39 40 #include <synch/spinlock.h> 40 41 42 #include <cpu.h> 43 44 #ifdef CONFIG_DEBUG_SPINLOCK 45 46 #define CPU_OWNER ((CPU == NULL) ? (cpu_t *) UINTPTR_MAX : CPU) 47 48 static inline bool owned_by_me(irq_spinlock_t *lock) 49 { 50 return atomic_load_explicit(&lock->owner, memory_order_relaxed) == CPU_OWNER; 51 } 52 53 static inline bool not_owned_by_me(irq_spinlock_t *lock) 54 { 55 return !owned_by_me(lock); 56 } 57 58 static inline void claim(irq_spinlock_t *lock) 59 { 60 cpu_t *cpu = CPU_OWNER; 61 atomic_store_explicit(&lock->owner, cpu, memory_order_relaxed); 62 CURRENT->mutex_locks++; 63 } 64 65 static inline void unclaim(irq_spinlock_t *lock) 66 { 67 CURRENT->mutex_locks--; 68 atomic_store_explicit(&lock->owner, NULL, memory_order_relaxed); 69 } 70 71 #else 72 73 static inline bool owned_by_me(irq_spinlock_t *lock) 74 { 75 return true; 76 } 77 78 static inline bool not_owned_by_me(irq_spinlock_t *lock) 79 { 80 return true; 81 } 82 83 static inline void claim(irq_spinlock_t *lock) 84 { 85 } 86 87 static inline void unclaim(irq_spinlock_t *lock) 88 { 89 } 90 91 #endif 92 41 93 /** Initialize interrupts-disabled spinlock 42 94 * … … 47 99 void irq_spinlock_initialize(irq_spinlock_t *lock, const char *name) 48 100 { 49 spinlock_initialize(&(lock->lock), name); 50 lock->guard = false; 51 lock->ipl = 0; 101 *lock = (irq_spinlock_t) IRQ_SPINLOCK_INITIALIZER(name); 52 102 } 53 103 … … 63 113 void irq_spinlock_lock(irq_spinlock_t *lock, bool irq_dis) 64 114 { 115 ASSERT_IRQ_SPINLOCK(not_owned_by_me(lock), lock); 116 65 117 if (irq_dis) { 66 118 ipl_t ipl = interrupts_disable(); … … 75 127 ASSERT_IRQ_SPINLOCK(!lock->guard, lock); 76 128 } 129 130 claim(lock); 77 131 } 78 132 … … 89 143 { 90 144 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock); 145 ASSERT_IRQ_SPINLOCK(owned_by_me(lock), lock); 146 147 unclaim(lock); 91 148 92 149 if (irq_res) { … … 119 176 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock); 120 177 bool ret = spinlock_trylock(&(lock->lock)); 178 if (ret) 179 claim(lock); 121 180 122 181 ASSERT_IRQ_SPINLOCK((!ret) || (!lock->guard), lock); … … 138 197 { 139 198 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock); 199 ASSERT_IRQ_SPINLOCK(owned_by_me(unlock), unlock); 200 ASSERT_IRQ_SPINLOCK(not_owned_by_me(lock), lock); 140 201 141 202 /* Pass guard from unlock to lock */ … … 144 205 unlock->guard = false; 145 206 207 unclaim(unlock); 208 146 209 spinlock_unlock(&(unlock->lock)); 147 210 spinlock_lock(&(lock->lock)); 211 212 claim(lock); 148 213 149 214 ASSERT_IRQ_SPINLOCK(!lock->guard, lock); … … 169 234 { 170 235 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock); 236 ASSERT_IRQ_SPINLOCK(owned_by_me(unlock), unlock); 237 ASSERT_IRQ_SPINLOCK(not_owned_by_me(lock), lock); 171 238 172 239 spinlock_lock(&(lock->lock)); … … 180 247 } 181 248 249 claim(lock); 250 unclaim(unlock); 251 182 252 spinlock_unlock(&(unlock->lock)); 183 253 } … … 188 258 * @return True if the IRQ spinlock is locked, false otherwise. 189 259 */ 190 bool irq_spinlock_locked(irq_spinlock_t * ilock)191 { 192 return spinlock_locked(&ilock->lock);260 bool irq_spinlock_locked(irq_spinlock_t *lock) 261 { 262 return owned_by_me(lock) && spinlock_locked(&lock->lock); 193 263 } 194 264
Note:
See TracChangeset
for help on using the changeset viewer.