Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/synch/spinlock.c

    r2b264c4 r95d45482  
    11/*
    22 * Copyright (c) 2001-2004 Jakub Jermar
    3  * Copyright (c) 2023 Jiří Zárevúcky
    43 * All rights reserved.
    54 *
     
    3736 */
    3837
    39 #include <arch/asm.h>
    4038#include <synch/spinlock.h>
    4139#include <atomic.h>
     
    4947#include <cpu.h>
    5048
    51 #ifndef ARCH_SPIN_HINT
    52 #define ARCH_SPIN_HINT() ((void)0)
    53 #endif
     49#ifdef CONFIG_SMP
    5450
    5551/** Initialize spinlock
     
    6056void spinlock_initialize(spinlock_t *lock, const char *name)
    6157{
    62 #ifdef CONFIG_SMP
    6358        atomic_flag_clear_explicit(&lock->flag, memory_order_relaxed);
    6459#ifdef CONFIG_DEBUG_SPINLOCK
    6560        lock->name = name;
    6661#endif
    67 #endif
    68 }
     62}
     63
     64#ifdef CONFIG_DEBUG_SPINLOCK
    6965
    7066/** Lock spinlock
    7167 *
     68 * Lock spinlock.
     69 * This version has limitted ability to report
     70 * possible occurence of deadlock.
     71 *
    7272 * @param lock Pointer to spinlock_t structure.
    7373 *
    7474 */
    75 void spinlock_lock(spinlock_t *lock)
    76 {
     75void spinlock_lock_debug(spinlock_t *lock)
     76{
     77        size_t i = 0;
     78        bool deadlock_reported = false;
     79
    7780        preemption_disable();
    78 
    79 #ifdef CONFIG_SMP
    80         bool deadlock_reported = false;
    81         size_t i = 0;
    82 
    8381        while (atomic_flag_test_and_set_explicit(&lock->flag, memory_order_acquire)) {
    84                 ARCH_SPIN_HINT();
    85 
    86 #ifdef CONFIG_DEBUG_SPINLOCK
    8782                /*
    8883                 * We need to be careful about particular locks
     
    116111                        deadlock_reported = true;
    117112                }
    118 #endif
    119         }
    120 
    121         /* Avoid compiler warning with debug disabled. */
    122         (void) i;
     113        }
    123114
    124115        if (deadlock_reported)
    125116                printf("cpu%u: not deadlocked\n", CPU->id);
     117}
     118
     119/** Unlock spinlock
     120 *
     121 * Unlock spinlock.
     122 *
     123 * @param sl Pointer to spinlock_t structure.
     124 */
     125void spinlock_unlock_debug(spinlock_t *lock)
     126{
     127        ASSERT_SPINLOCK(spinlock_locked(lock), lock);
     128
     129        atomic_flag_clear_explicit(&lock->flag, memory_order_release);
     130        preemption_enable();
     131}
    126132
    127133#endif
    128 }
    129 
    130 /** Unlock spinlock
    131  *
    132  * @param sl Pointer to spinlock_t structure.
    133  */
    134 void spinlock_unlock(spinlock_t *lock)
    135 {
    136 #ifdef CONFIG_SMP
    137 #ifdef CONFIG_DEBUG_SPINLOCK
    138         ASSERT_SPINLOCK(spinlock_locked(lock), lock);
    139 #endif
    140 
    141         atomic_flag_clear_explicit(&lock->flag, memory_order_release);
    142 #endif
    143 
    144         preemption_enable();
    145 }
    146 
    147 /**
     134
     135/** Lock spinlock conditionally
     136 *
    148137 * Lock spinlock conditionally. If the spinlock is not available
    149138 * at the moment, signal failure.
     
    151140 * @param lock Pointer to spinlock_t structure.
    152141 *
    153  * @return true on success.
     142 * @return Zero on failure, non-zero otherwise.
    154143 *
    155144 */
     
    157146{
    158147        preemption_disable();
    159 
    160 #ifdef CONFIG_SMP
    161148        bool ret = !atomic_flag_test_and_set_explicit(&lock->flag, memory_order_acquire);
    162149
     
    165152
    166153        return ret;
    167 #else
    168         return true;
    169 #endif
    170154}
    171155
     
    177161bool spinlock_locked(spinlock_t *lock)
    178162{
    179 #ifdef CONFIG_SMP
    180163        // NOTE: Atomic flag doesn't support simple atomic read (by design),
    181164        //       so instead we test_and_set and then clear if necessary.
     
    187170                atomic_flag_clear_explicit(&lock->flag, memory_order_relaxed);
    188171        return ret;
    189 #else
    190         return true;
     172}
     173
    191174#endif
     175
     176/** Initialize interrupts-disabled spinlock
     177 *
     178 * @param lock IRQ spinlock to be initialized.
     179 * @param name IRQ spinlock name.
     180 *
     181 */
     182void irq_spinlock_initialize(irq_spinlock_t *lock, const char *name)
     183{
     184        spinlock_initialize(&(lock->lock), name);
     185        lock->guard = false;
     186        lock->ipl = 0;
     187}
     188
     189/** Lock interrupts-disabled spinlock
     190 *
     191 * Lock a spinlock which requires disabled interrupts.
     192 *
     193 * @param lock    IRQ spinlock to be locked.
     194 * @param irq_dis If true, disables interrupts before locking the spinlock.
     195 *                If false, interrupts are expected to be already disabled.
     196 *
     197 */
     198void irq_spinlock_lock(irq_spinlock_t *lock, bool irq_dis)
     199{
     200        if (irq_dis) {
     201                ipl_t ipl = interrupts_disable();
     202                spinlock_lock(&(lock->lock));
     203
     204                lock->guard = true;
     205                lock->ipl = ipl;
     206        } else {
     207                ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
     208
     209                spinlock_lock(&(lock->lock));
     210                ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
     211        }
     212}
     213
     214/** Unlock interrupts-disabled spinlock
     215 *
     216 * Unlock a spinlock which requires disabled interrupts.
     217 *
     218 * @param lock    IRQ spinlock to be unlocked.
     219 * @param irq_res If true, interrupts are restored to previously
     220 *                saved interrupt level.
     221 *
     222 */
     223void irq_spinlock_unlock(irq_spinlock_t *lock, bool irq_res)
     224{
     225        ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
     226
     227        if (irq_res) {
     228                ASSERT_IRQ_SPINLOCK(lock->guard, lock);
     229
     230                lock->guard = false;
     231                ipl_t ipl = lock->ipl;
     232
     233                spinlock_unlock(&(lock->lock));
     234                interrupts_restore(ipl);
     235        } else {
     236                ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
     237                spinlock_unlock(&(lock->lock));
     238        }
     239}
     240
     241/** Lock interrupts-disabled spinlock
     242 *
     243 * Lock an interrupts-disabled spinlock conditionally. If the
     244 * spinlock is not available at the moment, signal failure.
     245 * Interrupts are expected to be already disabled.
     246 *
     247 * @param lock IRQ spinlock to be locked conditionally.
     248 *
     249 * @return Zero on failure, non-zero otherwise.
     250 *
     251 */
     252bool irq_spinlock_trylock(irq_spinlock_t *lock)
     253{
     254        ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
     255        bool ret = spinlock_trylock(&(lock->lock));
     256
     257        ASSERT_IRQ_SPINLOCK((!ret) || (!lock->guard), lock);
     258        return ret;
     259}
     260
     261/** Pass lock from one interrupts-disabled spinlock to another
     262 *
     263 * Pass lock from one IRQ spinlock to another IRQ spinlock
     264 * without enabling interrupts during the process.
     265 *
     266 * The first IRQ spinlock is supposed to be locked.
     267 *
     268 * @param unlock IRQ spinlock to be unlocked.
     269 * @param lock   IRQ spinlock to be locked.
     270 *
     271 */
     272void irq_spinlock_pass(irq_spinlock_t *unlock, irq_spinlock_t *lock)
     273{
     274        ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock);
     275
     276        /* Pass guard from unlock to lock */
     277        bool guard = unlock->guard;
     278        ipl_t ipl = unlock->ipl;
     279        unlock->guard = false;
     280
     281        spinlock_unlock(&(unlock->lock));
     282        spinlock_lock(&(lock->lock));
     283
     284        ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
     285
     286        if (guard) {
     287                lock->guard = true;
     288                lock->ipl = ipl;
     289        }
     290}
     291
     292/** Hand-over-hand locking of interrupts-disabled spinlocks
     293 *
     294 * Implement hand-over-hand locking between two interrupts-disabled
     295 * spinlocks without enabling interrupts during the process.
     296 *
     297 * The first IRQ spinlock is supposed to be locked.
     298 *
     299 * @param unlock IRQ spinlock to be unlocked.
     300 * @param lock   IRQ spinlock to be locked.
     301 *
     302 */
     303void irq_spinlock_exchange(irq_spinlock_t *unlock, irq_spinlock_t *lock)
     304{
     305        ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock);
     306
     307        spinlock_lock(&(lock->lock));
     308        ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
     309
     310        /* Pass guard from unlock to lock */
     311        if (unlock->guard) {
     312                lock->guard = true;
     313                lock->ipl = unlock->ipl;
     314                unlock->guard = false;
     315        }
     316
     317        spinlock_unlock(&(unlock->lock));
     318}
     319
     320/** Find out whether the IRQ spinlock is currently locked.
     321 *
     322 * @param lock          IRQ spinlock.
     323 * @return              True if the IRQ spinlock is locked, false otherwise.
     324 */
     325bool irq_spinlock_locked(irq_spinlock_t *ilock)
     326{
     327        return spinlock_locked(&ilock->lock);
    192328}
    193329
Note: See TracChangeset for help on using the changeset viewer.