Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/ia32/include/atomic.h

    r9d58539 rb17518e  
    113113}
    114114
     115
    115116/** ia32 specific fast spinlock */
    116117NO_TRACE static inline void atomic_lock_arch(atomic_t *val)
     
    142143}
    143144
     145
     146#define _atomic_cas_impl(pptr, exp_val, new_val, old_val, prefix) \
     147        asm volatile ( \
     148                prefix " cmpxchgl %[newval], %[ptr]\n" \
     149                : /* Output operands. */ \
     150                /* Old/current value is returned in eax. */ \
     151                [oldval] "=a" (old_val), \
     152                /* (*ptr) will be read and written to, hence "+" */ \
     153                [ptr] "+m" (*pptr) \
     154                : /* Input operands. */ \
     155                /* Expected value must be in eax. */ \
     156                [expval] "a" (exp_val), \
     157                /* The new value may be in any register. */ \
     158                [newval] "r" (new_val) \
     159                : "memory" \
     160        )
     161       
     162/** Atomically compares and swaps the pointer at pptr. */
     163NO_TRACE static inline void * atomic_cas_ptr(void **pptr,
     164        void *exp_val, void *new_val)
     165{
     166        void *old_val;
     167        _atomic_cas_impl(pptr, exp_val, new_val, old_val, "lock\n");
     168        return old_val;
     169}
     170
     171/** Compare-and-swap of a pointer that is atomic wrt to local cpu's interrupts.
     172 *
     173 * This function is NOT smp safe and is not atomic with respect to other cpus.
     174 */
     175NO_TRACE static inline void * atomic_cas_ptr_local(void **pptr,
     176        void *exp_val, void *new_val)
     177{
     178        void *old_val;
     179        _atomic_cas_impl(pptr, exp_val, new_val, old_val, "");
     180        return old_val;
     181}
     182
     183
     184#define _atomic_swap_impl(pptr, new_val) \
     185({ \
     186        typeof(*(pptr)) new_in_old_out = new_val; \
     187        asm volatile ( \
     188                "xchgl %[val], %[p_ptr]\n" \
     189                : [val] "+r" (new_in_old_out), \
     190                  [p_ptr] "+m" (*pptr) \
     191        ); \
     192        \
     193        new_in_old_out; \
     194})
     195
     196/*
     197 * Issuing a xchg instruction always implies lock prefix semantics.
     198 * Therefore, it is cheaper to use a cmpxchg without a lock prefix
     199 * in a loop.
     200 */
     201#define _atomic_swap_local_impl(pptr, new_val) \
     202({ \
     203        typeof(*(pptr)) exp_val; \
     204        typeof(*(pptr)) old_val; \
     205        \
     206        do { \
     207                exp_val = *pptr; \
     208                _atomic_cas_impl(pptr, exp_val, new_val, old_val, ""); \
     209        } while (old_val != exp_val); \
     210        \
     211        old_val; \
     212})
     213
     214
     215/** Atomicaly sets *ptr to val and returns the previous value. */
     216NO_TRACE static inline void * atomic_set_return_ptr(void **pptr, void *val)
     217{
     218        return _atomic_swap_impl(pptr, val);
     219}
     220
     221/** Sets *ptr to new_val and returns the previous value. NOT smp safe.
     222 *
     223 * This function is only atomic wrt to local interrupts and it is
     224 * NOT atomic wrt to other cpus.
     225 */
     226NO_TRACE static inline void * atomic_set_return_ptr_local(
     227        void **pptr, void *new_val)
     228{
     229        return _atomic_swap_local_impl(pptr, new_val);
     230}
     231
     232/** Atomicaly sets *ptr to val and returns the previous value. */
     233NO_TRACE static inline native_t atomic_set_return_native_t(
     234        native_t *p, native_t val)
     235{
     236        return _atomic_swap_impl(p, val);
     237}
     238
     239/** Sets *ptr to new_val and returns the previous value. NOT smp safe.
     240 *
     241 * This function is only atomic wrt to local interrupts and it is
     242 * NOT atomic wrt to other cpus.
     243 */
     244NO_TRACE static inline native_t atomic_set_return_native_t_local(
     245        native_t *p, native_t new_val)
     246{
     247        return _atomic_swap_local_impl(p, new_val);
     248}
     249
     250
     251#undef _atomic_cas_ptr_impl
     252#undef _atomic_swap_impl
     253#undef _atomic_swap_local_impl
     254
    144255#endif
    145256
Note: See TracChangeset for help on using the changeset viewer.