Changeset 64e9cf4 in mainline for kernel/generic/src/synch/spinlock.c


Ignore:
Timestamp:
2023-02-02T22:23:23Z (2 years ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
8addb24a
Parents:
f114d40 (diff), b076dfb (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge a few changes to spinlock code

Changed organization of the code a little bit, so that less of
the implementation is crammed in the header.
Also made all functions proper functions instead of macros.

Added a bit of debugging code for checking that we own the spinlock
we're unlocking. Also directly detects recursive locks.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/synch/spinlock.c

    rf114d40 r64e9cf4  
    11/*
    22 * Copyright (c) 2001-2004 Jakub Jermar
     3 * Copyright (c) 2023 Jiří Zárevúcky
    34 * All rights reserved.
    45 *
     
    3637 */
    3738
     39#include <arch/asm.h>
    3840#include <synch/spinlock.h>
    3941#include <atomic.h>
     
    4749#include <cpu.h>
    4850
    49 #ifdef CONFIG_SMP
     51#ifndef ARCH_SPIN_HINT
     52#define ARCH_SPIN_HINT() ((void)0)
     53#endif
    5054
    5155/** Initialize spinlock
     
    5660void spinlock_initialize(spinlock_t *lock, const char *name)
    5761{
     62#ifdef CONFIG_SMP
    5863        atomic_flag_clear_explicit(&lock->flag, memory_order_relaxed);
    5964#ifdef CONFIG_DEBUG_SPINLOCK
    6065        lock->name = name;
    6166#endif
     67#endif
    6268}
    6369
    64 #ifdef CONFIG_DEBUG_SPINLOCK
    65 
    6670/** Lock spinlock
    67  *
    68  * Lock spinlock.
    69  * This version has limitted ability to report
    70  * possible occurence of deadlock.
    7171 *
    7272 * @param lock Pointer to spinlock_t structure.
    7373 *
    7474 */
    75 void spinlock_lock_debug(spinlock_t *lock)
     75void spinlock_lock(spinlock_t *lock)
    7676{
     77        preemption_disable();
     78
     79#ifdef CONFIG_SMP
     80        bool deadlock_reported = false;
    7781        size_t i = 0;
    78         bool deadlock_reported = false;
    7982
    80         preemption_disable();
    8183        while (atomic_flag_test_and_set_explicit(&lock->flag, memory_order_acquire)) {
     84                ARCH_SPIN_HINT();
     85
     86#ifdef CONFIG_DEBUG_SPINLOCK
    8287                /*
    8388                 * We need to be careful about particular locks
     
    111116                        deadlock_reported = true;
    112117                }
     118#endif
    113119        }
     120
     121        /* Avoid compiler warning with debug disabled. */
     122        (void) i;
    114123
    115124        if (deadlock_reported)
    116125                printf("cpu%u: not deadlocked\n", CPU->id);
     126
     127#endif
    117128}
    118129
    119130/** Unlock spinlock
    120131 *
    121  * Unlock spinlock.
    122  *
    123132 * @param sl Pointer to spinlock_t structure.
    124133 */
    125 void spinlock_unlock_debug(spinlock_t *lock)
     134void spinlock_unlock(spinlock_t *lock)
    126135{
     136#ifdef CONFIG_SMP
     137#ifdef CONFIG_DEBUG_SPINLOCK
    127138        ASSERT_SPINLOCK(spinlock_locked(lock), lock);
     139#endif
    128140
    129141        atomic_flag_clear_explicit(&lock->flag, memory_order_release);
     142#endif
     143
    130144        preemption_enable();
    131145}
    132146
    133 #endif
    134 
    135 /** Lock spinlock conditionally
    136  *
     147/**
    137148 * Lock spinlock conditionally. If the spinlock is not available
    138149 * at the moment, signal failure.
     
    140151 * @param lock Pointer to spinlock_t structure.
    141152 *
    142  * @return Zero on failure, non-zero otherwise.
     153 * @return true on success.
    143154 *
    144155 */
     
    146157{
    147158        preemption_disable();
     159
     160#ifdef CONFIG_SMP
    148161        bool ret = !atomic_flag_test_and_set_explicit(&lock->flag, memory_order_acquire);
    149162
     
    152165
    153166        return ret;
     167#else
     168        return true;
     169#endif
    154170}
    155171
     
    161177bool spinlock_locked(spinlock_t *lock)
    162178{
     179#ifdef CONFIG_SMP
    163180        // NOTE: Atomic flag doesn't support simple atomic read (by design),
    164181        //       so instead we test_and_set and then clear if necessary.
     
    170187                atomic_flag_clear_explicit(&lock->flag, memory_order_relaxed);
    171188        return ret;
    172 }
    173 
     189#else
     190        return true;
    174191#endif
    175 
    176 /** Initialize interrupts-disabled spinlock
    177  *
    178  * @param lock IRQ spinlock to be initialized.
    179  * @param name IRQ spinlock name.
    180  *
    181  */
    182 void irq_spinlock_initialize(irq_spinlock_t *lock, const char *name)
    183 {
    184         spinlock_initialize(&(lock->lock), name);
    185         lock->guard = false;
    186         lock->ipl = 0;
    187 }
    188 
    189 /** Lock interrupts-disabled spinlock
    190  *
    191  * Lock a spinlock which requires disabled interrupts.
    192  *
    193  * @param lock    IRQ spinlock to be locked.
    194  * @param irq_dis If true, disables interrupts before locking the spinlock.
    195  *                If false, interrupts are expected to be already disabled.
    196  *
    197  */
    198 void irq_spinlock_lock(irq_spinlock_t *lock, bool irq_dis)
    199 {
    200         if (irq_dis) {
    201                 ipl_t ipl = interrupts_disable();
    202                 spinlock_lock(&(lock->lock));
    203 
    204                 lock->guard = true;
    205                 lock->ipl = ipl;
    206         } else {
    207                 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
    208 
    209                 spinlock_lock(&(lock->lock));
    210                 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
    211         }
    212 }
    213 
    214 /** Unlock interrupts-disabled spinlock
    215  *
    216  * Unlock a spinlock which requires disabled interrupts.
    217  *
    218  * @param lock    IRQ spinlock to be unlocked.
    219  * @param irq_res If true, interrupts are restored to previously
    220  *                saved interrupt level.
    221  *
    222  */
    223 void irq_spinlock_unlock(irq_spinlock_t *lock, bool irq_res)
    224 {
    225         ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
    226 
    227         if (irq_res) {
    228                 ASSERT_IRQ_SPINLOCK(lock->guard, lock);
    229 
    230                 lock->guard = false;
    231                 ipl_t ipl = lock->ipl;
    232 
    233                 spinlock_unlock(&(lock->lock));
    234                 interrupts_restore(ipl);
    235         } else {
    236                 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
    237                 spinlock_unlock(&(lock->lock));
    238         }
    239 }
    240 
    241 /** Lock interrupts-disabled spinlock
    242  *
    243  * Lock an interrupts-disabled spinlock conditionally. If the
    244  * spinlock is not available at the moment, signal failure.
    245  * Interrupts are expected to be already disabled.
    246  *
    247  * @param lock IRQ spinlock to be locked conditionally.
    248  *
    249  * @return Zero on failure, non-zero otherwise.
    250  *
    251  */
    252 bool irq_spinlock_trylock(irq_spinlock_t *lock)
    253 {
    254         ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
    255         bool ret = spinlock_trylock(&(lock->lock));
    256 
    257         ASSERT_IRQ_SPINLOCK((!ret) || (!lock->guard), lock);
    258         return ret;
    259 }
    260 
    261 /** Pass lock from one interrupts-disabled spinlock to another
    262  *
    263  * Pass lock from one IRQ spinlock to another IRQ spinlock
    264  * without enabling interrupts during the process.
    265  *
    266  * The first IRQ spinlock is supposed to be locked.
    267  *
    268  * @param unlock IRQ spinlock to be unlocked.
    269  * @param lock   IRQ spinlock to be locked.
    270  *
    271  */
    272 void irq_spinlock_pass(irq_spinlock_t *unlock, irq_spinlock_t *lock)
    273 {
    274         ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock);
    275 
    276         /* Pass guard from unlock to lock */
    277         bool guard = unlock->guard;
    278         ipl_t ipl = unlock->ipl;
    279         unlock->guard = false;
    280 
    281         spinlock_unlock(&(unlock->lock));
    282         spinlock_lock(&(lock->lock));
    283 
    284         ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
    285 
    286         if (guard) {
    287                 lock->guard = true;
    288                 lock->ipl = ipl;
    289         }
    290 }
    291 
    292 /** Hand-over-hand locking of interrupts-disabled spinlocks
    293  *
    294  * Implement hand-over-hand locking between two interrupts-disabled
    295  * spinlocks without enabling interrupts during the process.
    296  *
    297  * The first IRQ spinlock is supposed to be locked.
    298  *
    299  * @param unlock IRQ spinlock to be unlocked.
    300  * @param lock   IRQ spinlock to be locked.
    301  *
    302  */
    303 void irq_spinlock_exchange(irq_spinlock_t *unlock, irq_spinlock_t *lock)
    304 {
    305         ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock);
    306 
    307         spinlock_lock(&(lock->lock));
    308         ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
    309 
    310         /* Pass guard from unlock to lock */
    311         if (unlock->guard) {
    312                 lock->guard = true;
    313                 lock->ipl = unlock->ipl;
    314                 unlock->guard = false;
    315         }
    316 
    317         spinlock_unlock(&(unlock->lock));
    318 }
    319 
    320 /** Find out whether the IRQ spinlock is currently locked.
    321  *
    322  * @param lock          IRQ spinlock.
    323  * @return              True if the IRQ spinlock is locked, false otherwise.
    324  */
    325 bool irq_spinlock_locked(irq_spinlock_t *ilock)
    326 {
    327         return spinlock_locked(&ilock->lock);
    328192}
    329193
Note: See TracChangeset for help on using the changeset viewer.