Changeset 64e9cf4 in mainline


Ignore:
Timestamp:
2023-02-02T22:23:23Z (15 months ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
8addb24a
Parents:
f114d40 (diff), b076dfb (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge a few changes to spinlock code

Changed organization of the code a little bit, so that less of
the implementation is crammed in the header.
Also made all functions proper functions instead of macros.

Added a bit of debugging code for checking that we own the spinlock
we're unlocking. Also directly detects recursive locks.

Location:
kernel
Files:
1 added
13 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/amd64/include/arch/asm.h

    rf114d40 r64e9cf4  
    5858        }
    5959}
     60
     61#define ARCH_SPIN_HINT() asm volatile ("pause\n")
    6062
    6163/** Byte from port
  • kernel/arch/arm32/include/arch/asm.h

    rf114d40 r64e9cf4  
    6565}
    6666
     67#ifdef PROCESSOR_ARCH_armv7_a
     68#define ARCH_SPIN_HINT() asm volatile ("yield")
     69#endif
     70
    6771_NO_TRACE static inline void pio_write_8(ioport8_t *port, uint8_t v)
    6872{
  • kernel/arch/arm64/include/arch/asm.h

    rf114d40 r64e9cf4  
    6060                ;
    6161}
     62
     63#define ARCH_SPIN_HINT() asm volatile ("yield")
    6264
    6365/** Output byte to port.
  • kernel/arch/ia32/include/arch/asm.h

    rf114d40 r64e9cf4  
    6363        );
    6464}
     65
     66#define ARCH_SPIN_HINT() asm volatile ("pause\n")
    6567
    6668#define GEN_READ_REG(reg) _NO_TRACE static inline sysarg_t read_ ##reg (void) \
  • kernel/arch/mips32/src/mips32.c

    rf114d40 r64e9cf4  
    4141#include <str.h>
    4242#include <mem.h>
     43#include <preemption.h>
    4344#include <userspace.h>
    4445#include <stdbool.h>
  • kernel/arch/ppc32/src/mm/frame.c

    rf114d40 r64e9cf4  
    3333 */
    3434
     35#include <arch/asm.h>
    3536#include <arch/boot/boot.h>
    3637#include <arch/mm/frame.h>
  • kernel/generic/include/arch.h

    rf114d40 r64e9cf4  
    7575typedef struct {
    7676        size_t preemption;      /**< Preemption disabled counter and flag. */
     77        size_t mutex_locks;
    7778        struct thread *thread;  /**< Current thread. */
    7879        struct task *task;      /**< Current task. */
  • kernel/generic/include/mm/tlb.h

    rf114d40 r64e9cf4  
    3636#define KERN_TLB_H_
    3737
     38#include <arch/asm.h>
    3839#include <arch/mm/asid.h>
    3940#include <typedefs.h>
  • kernel/generic/include/synch/spinlock.h

    rf114d40 r64e9cf4  
    11/*
    22 * Copyright (c) 2001-2004 Jakub Jermar
     3 * Copyright (c) 2023 Jiří Zárevúcky
    34 * All rights reserved.
    45 *
     
    3637#define KERN_SPINLOCK_H_
    3738
    38 #include <assert.h>
    3939#include <stdatomic.h>
    4040#include <stdbool.h>
    41 #include <preemption.h>
    42 #include <arch/asm.h>
    4341
    44 #ifdef CONFIG_SMP
     42#include <arch/types.h>
     43#include <assert.h>
    4544
    46 typedef struct spinlock {
    47         atomic_flag flag;
     45#define DEADLOCK_THRESHOLD  100000000
    4846
    49 #ifdef CONFIG_DEBUG_SPINLOCK
    50         const char *name;
    51 #endif /* CONFIG_DEBUG_SPINLOCK */
    52 } spinlock_t;
    53 
    54 /*
    55  * SPINLOCK_DECLARE is to be used for dynamically allocated spinlocks,
    56  * where the lock gets initialized in run time.
    57  */
    58 #define SPINLOCK_DECLARE(lock_name)  spinlock_t lock_name
    59 #define SPINLOCK_EXTERN(lock_name)   extern spinlock_t lock_name
    60 
    61 /*
    62  * SPINLOCK_INITIALIZE and SPINLOCK_STATIC_INITIALIZE are to be used
    63  * for statically allocated spinlocks. They declare (either as global
    64  * or static) symbol and initialize the lock.
    65  */
    66 #ifdef CONFIG_DEBUG_SPINLOCK
    67 
    68 #define SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \
    69         spinlock_t lock_name = { \
    70                 .name = desc_name, \
    71                 .flag = ATOMIC_FLAG_INIT \
    72         }
    73 
    74 #define SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, desc_name) \
    75         static spinlock_t lock_name = { \
    76                 .name = desc_name, \
    77                 .flag = ATOMIC_FLAG_INIT \
    78         }
    79 
    80 #define ASSERT_SPINLOCK(expr, lock) \
    81         assert_verbose(expr, (lock)->name)
    82 
    83 #define spinlock_lock(lock)    spinlock_lock_debug((lock))
    84 #define spinlock_unlock(lock)  spinlock_unlock_debug((lock))
    85 
    86 #else /* CONFIG_DEBUG_SPINLOCK */
    87 
    88 #define SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \
    89         spinlock_t lock_name = { \
    90                 .flag = ATOMIC_FLAG_INIT \
    91         }
    92 
    93 #define SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, desc_name) \
    94         static spinlock_t lock_name = { \
    95                 .flag = ATOMIC_FLAG_INIT \
    96         }
    97 
    98 #define ASSERT_SPINLOCK(expr, lock) \
    99         assert(expr)
    100 
    101 /** Acquire spinlock
    102  *
    103  * @param lock  Pointer to spinlock_t structure.
    104  */
    105 _NO_TRACE static inline void spinlock_lock(spinlock_t *lock)
    106 {
    107         preemption_disable();
    108         while (atomic_flag_test_and_set_explicit(&lock->flag,
    109             memory_order_acquire))
    110                 ;
    111 }
    112 
    113 /** Release spinlock
    114  *
    115  * @param lock  Pointer to spinlock_t structure.
    116  */
    117 _NO_TRACE static inline void spinlock_unlock(spinlock_t *lock)
    118 {
    119         atomic_flag_clear_explicit(&lock->flag, memory_order_release);
    120         preemption_enable();
    121 }
    122 
    123 #endif /* CONFIG_DEBUG_SPINLOCK */
    124 
    125 #define SPINLOCK_INITIALIZE(lock_name) \
    126         SPINLOCK_INITIALIZE_NAME(lock_name, #lock_name)
    127 
    128 #define SPINLOCK_STATIC_INITIALIZE(lock_name) \
    129         SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, #lock_name)
    130 
    131 extern void spinlock_initialize(spinlock_t *, const char *);
    132 extern bool spinlock_trylock(spinlock_t *);
    133 extern void spinlock_lock_debug(spinlock_t *);
    134 extern void spinlock_unlock_debug(spinlock_t *);
    135 extern bool spinlock_locked(spinlock_t *);
    136 
    137 #ifdef CONFIG_DEBUG_SPINLOCK
     47#if defined(CONFIG_SMP) && defined(CONFIG_DEBUG_SPINLOCK)
    13848
    13949#include <log.h>
    140 
    141 #define DEADLOCK_THRESHOLD  100000000
    14250
    14351#define DEADLOCK_PROBE_INIT(pname)  size_t pname = 0
     
    15967#endif /* CONFIG_DEBUG_SPINLOCK */
    16068
    161 #else /* CONFIG_SMP */
     69typedef struct spinlock {
     70#ifdef CONFIG_SMP
     71        atomic_flag flag;
    16272
    163 /* On UP systems, spinlocks are effectively left out. */
     73#ifdef CONFIG_DEBUG_SPINLOCK
     74        const char *name;
     75#endif /* CONFIG_DEBUG_SPINLOCK */
     76#endif
     77} spinlock_t;
    16478
    165 /* Allow the use of spinlock_t as an incomplete type. */
    166 typedef struct spinlock spinlock_t;
     79/*
     80 * SPINLOCK_DECLARE is to be used for dynamically allocated spinlocks,
     81 * where the lock gets initialized in run time.
     82 */
     83#define SPINLOCK_DECLARE(lock_name)  spinlock_t lock_name
     84#define SPINLOCK_EXTERN(lock_name)   extern spinlock_t lock_name
    16785
    168 #define SPINLOCK_DECLARE(name)
    169 #define SPINLOCK_EXTERN(name)
     86#ifdef CONFIG_SMP
     87#ifdef CONFIG_DEBUG_SPINLOCK
     88#define SPINLOCK_INITIALIZER(desc_name) { .name = (desc_name), .flag = ATOMIC_FLAG_INIT }
     89#else
     90#define SPINLOCK_INITIALIZER(desc_name) { .flag = ATOMIC_FLAG_INIT }
     91#endif
     92#else
     93#define SPINLOCK_INITIALIZER(desc_name) {}
     94#endif
    17095
    171 #define SPINLOCK_INITIALIZE(name)
    172 #define SPINLOCK_STATIC_INITIALIZE(name)
     96/*
     97 * SPINLOCK_INITIALIZE and SPINLOCK_STATIC_INITIALIZE are to be used
     98 * for statically allocated spinlocks. They declare (either as global
     99 * or static) symbol and initialize the lock.
     100 */
     101#define SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \
     102        spinlock_t lock_name = SPINLOCK_INITIALIZER(desc_name)
    173103
    174 #define SPINLOCK_INITIALIZE_NAME(name, desc_name)
    175 #define SPINLOCK_STATIC_INITIALIZE_NAME(name, desc_name)
     104#define SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, desc_name) \
     105        static spinlock_t lock_name = SPINLOCK_INITIALIZER(desc_name)
    176106
    177 #define ASSERT_SPINLOCK(expr, lock)  assert(expr)
     107#if defined(CONFIG_SMP) && defined(CONFIG_DEBUG_SPINLOCK)
     108#define ASSERT_SPINLOCK(expr, lock) assert_verbose(expr, (lock)->name)
     109#else /* CONFIG_DEBUG_SPINLOCK */
     110#define ASSERT_SPINLOCK(expr, lock) assert(expr)
     111#endif /* CONFIG_DEBUG_SPINLOCK */
    178112
    179 #define spinlock_initialize(lock, name)
     113#define SPINLOCK_INITIALIZE(lock_name) \
     114        SPINLOCK_INITIALIZE_NAME(lock_name, #lock_name)
    180115
    181 #define spinlock_lock(lock)     preemption_disable()
    182 #define spinlock_trylock(lock)  ({ preemption_disable(); 1; })
    183 #define spinlock_unlock(lock)   preemption_enable()
    184 #define spinlock_locked(lock)   1
    185 #define spinlock_unlocked(lock) 1
     116#define SPINLOCK_STATIC_INITIALIZE(lock_name) \
     117        SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, #lock_name)
    186118
    187 #define DEADLOCK_PROBE_INIT(pname)
    188 #define DEADLOCK_PROBE(pname, value)
    189 
    190 #endif /* CONFIG_SMP */
     119extern void spinlock_initialize(spinlock_t *, const char *);
     120extern bool spinlock_trylock(spinlock_t *);
     121extern void spinlock_lock(spinlock_t *);
     122extern void spinlock_unlock(spinlock_t *);
     123extern bool spinlock_locked(spinlock_t *);
    191124
    192125typedef struct {
    193         SPINLOCK_DECLARE(lock);  /**< Spinlock */
    194         bool guard;              /**< Flag whether ipl is valid */
    195         ipl_t ipl;               /**< Original interrupt level */
     126        spinlock_t lock;              /**< Spinlock */
     127        bool guard;                   /**< Flag whether ipl is valid */
     128        ipl_t ipl;                    /**< Original interrupt level */
     129#ifdef CONFIG_DEBUG_SPINLOCK
     130        _Atomic(struct cpu *) owner;  /**< Which cpu currently owns this lock */
     131#endif
    196132} irq_spinlock_t;
    197133
     
    199135#define IRQ_SPINLOCK_EXTERN(lock_name)   extern irq_spinlock_t lock_name
    200136
    201 #ifdef CONFIG_SMP
    202 
    203137#define ASSERT_IRQ_SPINLOCK(expr, irq_lock) \
    204138        ASSERT_SPINLOCK(expr, &((irq_lock)->lock))
     139
     140#define IRQ_SPINLOCK_INITIALIZER(desc_name) \
     141        { \
     142                .lock = SPINLOCK_INITIALIZER(desc_name), \
     143                .guard = false, \
     144                .ipl = 0, \
     145        }
    205146
    206147/*
     
    209150 * as global or static symbol) and initialize the lock.
    210151 */
    211 #ifdef CONFIG_DEBUG_SPINLOCK
    212 
    213152#define IRQ_SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \
    214         irq_spinlock_t lock_name = { \
    215                 .lock = { \
    216                         .name = desc_name, \
    217                         .flag = ATOMIC_FLAG_INIT \
    218                 }, \
    219                 .guard = false, \
    220                 .ipl = 0 \
    221         }
     153        irq_spinlock_t lock_name = IRQ_SPINLOCK_INITIALIZER(desc_name)
    222154
    223155#define IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, desc_name) \
    224         static irq_spinlock_t lock_name = { \
    225                 .lock = { \
    226                         .name = desc_name, \
    227                         .flag = ATOMIC_FLAG_INIT \
    228                 }, \
    229                 .guard = false, \
    230                 .ipl = 0 \
    231         }
    232 
    233 #else /* CONFIG_DEBUG_SPINLOCK */
    234 
    235 #define IRQ_SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \
    236         irq_spinlock_t lock_name = { \
    237                 .lock = { \
    238                         .flag = ATOMIC_FLAG_INIT \
    239                 }, \
    240                 .guard = false, \
    241                 .ipl = 0 \
    242         }
    243 
    244 #define IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, desc_name) \
    245         static irq_spinlock_t lock_name = { \
    246                 .lock = { \
    247                         .flag = ATOMIC_FLAG_INIT \
    248                 }, \
    249                 .guard = false, \
    250                 .ipl = 0 \
    251         }
    252 
    253 #endif /* CONFIG_DEBUG_SPINLOCK */
    254 
    255 #else /* CONFIG_SMP */
    256 
    257 /*
    258  * Since the spinlocks are void on UP systems, we also need
    259  * to have a special variant of interrupts-disabled spinlock
    260  * macros which take this into account.
    261  */
    262 
    263 #define ASSERT_IRQ_SPINLOCK(expr, irq_lock) \
    264         ASSERT_SPINLOCK(expr, NULL)
    265 
    266 #define IRQ_SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \
    267         irq_spinlock_t lock_name = { \
    268                 .guard = false, \
    269                 .ipl = 0 \
    270         }
    271 
    272 #define IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, desc_name) \
    273         static irq_spinlock_t lock_name = { \
    274                 .guard = false, \
    275                 .ipl = 0 \
    276         }
    277 
    278 #endif /* CONFIG_SMP */
     156        static irq_spinlock_t lock_name = IRQ_SPINLOCK_INITIALIZER(desc_name)
    279157
    280158#define IRQ_SPINLOCK_INITIALIZE(lock_name) \
  • kernel/generic/meson.build

    rf114d40 r64e9cf4  
    101101        'src/smp/smp.c',
    102102        'src/synch/condvar.c',
     103        'src/synch/irq_spinlock.c',
    103104        'src/synch/mutex.c',
    104105        'src/synch/semaphore.c',
  • kernel/generic/src/synch/spinlock.c

    rf114d40 r64e9cf4  
    11/*
    22 * Copyright (c) 2001-2004 Jakub Jermar
     3 * Copyright (c) 2023 Jiří Zárevúcky
    34 * All rights reserved.
    45 *
     
    3637 */
    3738
     39#include <arch/asm.h>
    3840#include <synch/spinlock.h>
    3941#include <atomic.h>
     
    4749#include <cpu.h>
    4850
    49 #ifdef CONFIG_SMP
     51#ifndef ARCH_SPIN_HINT
     52#define ARCH_SPIN_HINT() ((void)0)
     53#endif
    5054
    5155/** Initialize spinlock
     
    5660void spinlock_initialize(spinlock_t *lock, const char *name)
    5761{
     62#ifdef CONFIG_SMP
    5863        atomic_flag_clear_explicit(&lock->flag, memory_order_relaxed);
    5964#ifdef CONFIG_DEBUG_SPINLOCK
    6065        lock->name = name;
    6166#endif
     67#endif
    6268}
    6369
    64 #ifdef CONFIG_DEBUG_SPINLOCK
    65 
    6670/** Lock spinlock
    67  *
    68  * Lock spinlock.
    69  * This version has limitted ability to report
    70  * possible occurence of deadlock.
    7171 *
    7272 * @param lock Pointer to spinlock_t structure.
    7373 *
    7474 */
    75 void spinlock_lock_debug(spinlock_t *lock)
     75void spinlock_lock(spinlock_t *lock)
    7676{
     77        preemption_disable();
     78
     79#ifdef CONFIG_SMP
     80        bool deadlock_reported = false;
    7781        size_t i = 0;
    78         bool deadlock_reported = false;
    7982
    80         preemption_disable();
    8183        while (atomic_flag_test_and_set_explicit(&lock->flag, memory_order_acquire)) {
     84                ARCH_SPIN_HINT();
     85
     86#ifdef CONFIG_DEBUG_SPINLOCK
    8287                /*
    8388                 * We need to be careful about particular locks
     
    111116                        deadlock_reported = true;
    112117                }
     118#endif
    113119        }
     120
     121        /* Avoid compiler warning with debug disabled. */
     122        (void) i;
    114123
    115124        if (deadlock_reported)
    116125                printf("cpu%u: not deadlocked\n", CPU->id);
     126
     127#endif
    117128}
    118129
    119130/** Unlock spinlock
    120131 *
    121  * Unlock spinlock.
    122  *
    123132 * @param sl Pointer to spinlock_t structure.
    124133 */
    125 void spinlock_unlock_debug(spinlock_t *lock)
     134void spinlock_unlock(spinlock_t *lock)
    126135{
     136#ifdef CONFIG_SMP
     137#ifdef CONFIG_DEBUG_SPINLOCK
    127138        ASSERT_SPINLOCK(spinlock_locked(lock), lock);
     139#endif
    128140
    129141        atomic_flag_clear_explicit(&lock->flag, memory_order_release);
     142#endif
     143
    130144        preemption_enable();
    131145}
    132146
    133 #endif
    134 
    135 /** Lock spinlock conditionally
    136  *
     147/**
    137148 * Lock spinlock conditionally. If the spinlock is not available
    138149 * at the moment, signal failure.
     
    140151 * @param lock Pointer to spinlock_t structure.
    141152 *
    142  * @return Zero on failure, non-zero otherwise.
     153 * @return true on success.
    143154 *
    144155 */
     
    146157{
    147158        preemption_disable();
     159
     160#ifdef CONFIG_SMP
    148161        bool ret = !atomic_flag_test_and_set_explicit(&lock->flag, memory_order_acquire);
    149162
     
    152165
    153166        return ret;
     167#else
     168        return true;
     169#endif
    154170}
    155171
     
    161177bool spinlock_locked(spinlock_t *lock)
    162178{
     179#ifdef CONFIG_SMP
    163180        // NOTE: Atomic flag doesn't support simple atomic read (by design),
    164181        //       so instead we test_and_set and then clear if necessary.
     
    170187                atomic_flag_clear_explicit(&lock->flag, memory_order_relaxed);
    171188        return ret;
    172 }
    173 
     189#else
     190        return true;
    174191#endif
    175 
    176 /** Initialize interrupts-disabled spinlock
    177  *
    178  * @param lock IRQ spinlock to be initialized.
    179  * @param name IRQ spinlock name.
    180  *
    181  */
    182 void irq_spinlock_initialize(irq_spinlock_t *lock, const char *name)
    183 {
    184         spinlock_initialize(&(lock->lock), name);
    185         lock->guard = false;
    186         lock->ipl = 0;
    187 }
    188 
    189 /** Lock interrupts-disabled spinlock
    190  *
    191  * Lock a spinlock which requires disabled interrupts.
    192  *
    193  * @param lock    IRQ spinlock to be locked.
    194  * @param irq_dis If true, disables interrupts before locking the spinlock.
    195  *                If false, interrupts are expected to be already disabled.
    196  *
    197  */
    198 void irq_spinlock_lock(irq_spinlock_t *lock, bool irq_dis)
    199 {
    200         if (irq_dis) {
    201                 ipl_t ipl = interrupts_disable();
    202                 spinlock_lock(&(lock->lock));
    203 
    204                 lock->guard = true;
    205                 lock->ipl = ipl;
    206         } else {
    207                 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
    208 
    209                 spinlock_lock(&(lock->lock));
    210                 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
    211         }
    212 }
    213 
    214 /** Unlock interrupts-disabled spinlock
    215  *
    216  * Unlock a spinlock which requires disabled interrupts.
    217  *
    218  * @param lock    IRQ spinlock to be unlocked.
    219  * @param irq_res If true, interrupts are restored to previously
    220  *                saved interrupt level.
    221  *
    222  */
    223 void irq_spinlock_unlock(irq_spinlock_t *lock, bool irq_res)
    224 {
    225         ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
    226 
    227         if (irq_res) {
    228                 ASSERT_IRQ_SPINLOCK(lock->guard, lock);
    229 
    230                 lock->guard = false;
    231                 ipl_t ipl = lock->ipl;
    232 
    233                 spinlock_unlock(&(lock->lock));
    234                 interrupts_restore(ipl);
    235         } else {
    236                 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
    237                 spinlock_unlock(&(lock->lock));
    238         }
    239 }
    240 
    241 /** Lock interrupts-disabled spinlock
    242  *
    243  * Lock an interrupts-disabled spinlock conditionally. If the
    244  * spinlock is not available at the moment, signal failure.
    245  * Interrupts are expected to be already disabled.
    246  *
    247  * @param lock IRQ spinlock to be locked conditionally.
    248  *
    249  * @return Zero on failure, non-zero otherwise.
    250  *
    251  */
    252 bool irq_spinlock_trylock(irq_spinlock_t *lock)
    253 {
    254         ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);
    255         bool ret = spinlock_trylock(&(lock->lock));
    256 
    257         ASSERT_IRQ_SPINLOCK((!ret) || (!lock->guard), lock);
    258         return ret;
    259 }
    260 
    261 /** Pass lock from one interrupts-disabled spinlock to another
    262  *
    263  * Pass lock from one IRQ spinlock to another IRQ spinlock
    264  * without enabling interrupts during the process.
    265  *
    266  * The first IRQ spinlock is supposed to be locked.
    267  *
    268  * @param unlock IRQ spinlock to be unlocked.
    269  * @param lock   IRQ spinlock to be locked.
    270  *
    271  */
    272 void irq_spinlock_pass(irq_spinlock_t *unlock, irq_spinlock_t *lock)
    273 {
    274         ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock);
    275 
    276         /* Pass guard from unlock to lock */
    277         bool guard = unlock->guard;
    278         ipl_t ipl = unlock->ipl;
    279         unlock->guard = false;
    280 
    281         spinlock_unlock(&(unlock->lock));
    282         spinlock_lock(&(lock->lock));
    283 
    284         ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
    285 
    286         if (guard) {
    287                 lock->guard = true;
    288                 lock->ipl = ipl;
    289         }
    290 }
    291 
    292 /** Hand-over-hand locking of interrupts-disabled spinlocks
    293  *
    294  * Implement hand-over-hand locking between two interrupts-disabled
    295  * spinlocks without enabling interrupts during the process.
    296  *
    297  * The first IRQ spinlock is supposed to be locked.
    298  *
    299  * @param unlock IRQ spinlock to be unlocked.
    300  * @param lock   IRQ spinlock to be locked.
    301  *
    302  */
    303 void irq_spinlock_exchange(irq_spinlock_t *unlock, irq_spinlock_t *lock)
    304 {
    305         ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock);
    306 
    307         spinlock_lock(&(lock->lock));
    308         ASSERT_IRQ_SPINLOCK(!lock->guard, lock);
    309 
    310         /* Pass guard from unlock to lock */
    311         if (unlock->guard) {
    312                 lock->guard = true;
    313                 lock->ipl = unlock->ipl;
    314                 unlock->guard = false;
    315         }
    316 
    317         spinlock_unlock(&(unlock->lock));
    318 }
    319 
    320 /** Find out whether the IRQ spinlock is currently locked.
    321  *
    322  * @param lock          IRQ spinlock.
    323  * @return              True if the IRQ spinlock is locked, false otherwise.
    324  */
    325 bool irq_spinlock_locked(irq_spinlock_t *ilock)
    326 {
    327         return spinlock_locked(&ilock->lock);
    328192}
    329193
  • kernel/generic/src/synch/waitq.c

    rf114d40 r64e9cf4  
    4848#include <synch/waitq.h>
    4949#include <synch/spinlock.h>
     50#include <preemption.h>
    5051#include <proc/thread.h>
    5152#include <proc/scheduler.h>
  • kernel/generic/src/time/clock.c

    rf114d40 r64e9cf4  
    5959#include <ddi/ddi.h>
    6060#include <arch/cycle.h>
     61#include <preemption.h>
    6162
    6263/* Pointer to variable with uptime */
Note: See TracChangeset for help on using the changeset viewer.