Fork us on GitHub Follow us on Facebook Follow us on Twitter

Changeset b17518e in mainline


Ignore:
Timestamp:
2012-08-05T01:18:21Z (9 years ago)
Author:
Adam Hraska <adam.hraska+hos@…>
Branches:
lfn, master
Children:
bc216a0
Parents:
f1c7755
Message:

Renamed atomic_swap_* to atmoic_set_return_* and added a local cpu native_t variant.

Location:
kernel
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/amd64/include/atomic.h

    rf1c7755 rb17518e  
    141141
    142142
    143 #define _atomic_cas_ptr_impl(pptr, exp_val, new_val, old_val, prefix) \
     143#define _atomic_cas_impl(pptr, exp_val, new_val, old_val, prefix) \
    144144        asm volatile ( \
    145145                prefix " cmpxchgq %[newval], %[ptr]\n" \
     
    162162{
    163163        void *old_val;
    164         _atomic_cas_ptr_impl(pptr, exp_val, new_val, old_val, "lock\n");
     164        _atomic_cas_impl(pptr, exp_val, new_val, old_val, "lock\n");
    165165        return old_val;
    166166}
     
    174174{
    175175        void *old_val;
    176         _atomic_cas_ptr_impl(pptr, exp_val, new_val, old_val, "");
     176        _atomic_cas_impl(pptr, exp_val, new_val, old_val, "");
    177177        return old_val;
    178178}
    179179
    180 /** Atomicaly sets *ptr to new_val and returns the previous value. */
    181 NO_TRACE static inline void * atomic_swap_ptr(void **pptr, void *new_val)
    182 {
    183         void *new_in_old_out = new_val;
    184        
    185         asm volatile (
    186                 "xchgq %[val], %[pptr]\n"
    187                 : [val] "+r" (new_in_old_out),
    188                   [pptr] "+m" (*pptr)
    189         );
    190        
    191         return new_in_old_out;
     180
     181#define _atomic_swap_impl(pptr, new_val) \
     182({ \
     183        typeof(*(pptr)) new_in_old_out = new_val; \
     184        asm volatile ( \
     185                "xchgq %[val], %[p_ptr]\n" \
     186                : [val] "+r" (new_in_old_out), \
     187                  [p_ptr] "+m" (*pptr) \
     188        ); \
     189        \
     190        new_in_old_out; \
     191})
     192
     193/*
     194 * Issuing a xchg instruction always implies lock prefix semantics.
     195 * Therefore, it is cheaper to use a cmpxchg without a lock prefix
     196 * in a loop.
     197 */
     198#define _atomic_swap_local_impl(pptr, new_val) \
     199({ \
     200        typeof(*(pptr)) exp_val; \
     201        typeof(*(pptr)) old_val; \
     202        \
     203        do { \
     204                exp_val = *pptr; \
     205                _atomic_cas_impl(pptr, exp_val, new_val, old_val, ""); \
     206        } while (old_val != exp_val); \
     207        \
     208        old_val; \
     209})
     210
     211
     212/** Atomicaly sets *ptr to val and returns the previous value. */
     213NO_TRACE static inline void * atomic_set_return_ptr(void **pptr, void *val)
     214{
     215        return _atomic_swap_impl(pptr, val);
    192216}
    193217
     
    197221 * NOT atomic wrt to other cpus.
    198222 */
    199 NO_TRACE static inline void * atomic_swap_ptr_local(void **pptr, void *new_val)
    200 {
    201         /*
    202          * Issuing a xchg instruction always implies lock prefix semantics.
    203          * Therefore, it is cheaper to use a cmpxchg without a lock prefix
    204          * in a loop.
    205          */
    206         void *exp_val;
    207         void *old_val;
    208        
    209         do {
    210                 exp_val = *pptr;
    211                 old_val = atomic_cas_ptr_local(pptr, exp_val, new_val);
    212         } while (old_val != exp_val);
    213        
    214         return old_val;
     223NO_TRACE static inline void * atomic_set_return_ptr_local(
     224        void **pptr, void *new_val)
     225{
     226        return _atomic_swap_local_impl(pptr, new_val);
     227}
     228
     229/** Atomicaly sets *ptr to val and returns the previous value. */
     230NO_TRACE static inline native_t atomic_set_return_native_t(
     231        native_t *p, native_t val)
     232{
     233        return _atomic_swap_impl(p, val);
     234}
     235
     236/** Sets *ptr to new_val and returns the previous value. NOT smp safe.
     237 *
     238 * This function is only atomic wrt to local interrupts and it is
     239 * NOT atomic wrt to other cpus.
     240 */
     241NO_TRACE static inline native_t atomic_set_return_native_t_local(
     242        native_t *p, native_t new_val)
     243{
     244        return _atomic_swap_local_impl(p, new_val);
    215245}
    216246
    217247
    218248#undef _atomic_cas_ptr_impl
    219 
     249#undef _atomic_swap_impl
     250#undef _atomic_swap_local_impl
    220251
    221252#endif
  • kernel/arch/ia32/include/atomic.h

    rf1c7755 rb17518e  
    144144
    145145
    146 #define _atomic_cas_ptr_impl(pptr, exp_val, new_val, old_val, prefix) \
     146#define _atomic_cas_impl(pptr, exp_val, new_val, old_val, prefix) \
    147147        asm volatile ( \
    148148                prefix " cmpxchgl %[newval], %[ptr]\n" \
     
    165165{
    166166        void *old_val;
    167         _atomic_cas_ptr_impl(pptr, exp_val, new_val, old_val, "lock\n");
     167        _atomic_cas_impl(pptr, exp_val, new_val, old_val, "lock\n");
    168168        return old_val;
    169169}
     
    177177{
    178178        void *old_val;
    179         _atomic_cas_ptr_impl(pptr, exp_val, new_val, old_val, "");
     179        _atomic_cas_impl(pptr, exp_val, new_val, old_val, "");
    180180        return old_val;
    181181}
    182182
    183183
    184 /** Atomicaly sets *ptr to new_val and returns the previous value. */
    185 NO_TRACE static inline void * atomic_swap_ptr(void **pptr, void *new_val)
    186 {
    187         void *new_in_old_out = new_val;
    188        
    189         asm volatile (
    190                 "xchgl %[val], %[pptr]\n"
    191                 : [val] "+r" (new_in_old_out),
    192                   [pptr] "+m" (*pptr)
    193         );
    194        
    195         return new_in_old_out;
     184#define _atomic_swap_impl(pptr, new_val) \
     185({ \
     186        typeof(*(pptr)) new_in_old_out = new_val; \
     187        asm volatile ( \
     188                "xchgl %[val], %[p_ptr]\n" \
     189                : [val] "+r" (new_in_old_out), \
     190                  [p_ptr] "+m" (*pptr) \
     191        ); \
     192        \
     193        new_in_old_out; \
     194})
     195
     196/*
     197 * Issuing a xchg instruction always implies lock prefix semantics.
     198 * Therefore, it is cheaper to use a cmpxchg without a lock prefix
     199 * in a loop.
     200 */
     201#define _atomic_swap_local_impl(pptr, new_val) \
     202({ \
     203        typeof(*(pptr)) exp_val; \
     204        typeof(*(pptr)) old_val; \
     205        \
     206        do { \
     207                exp_val = *pptr; \
     208                _atomic_cas_impl(pptr, exp_val, new_val, old_val, ""); \
     209        } while (old_val != exp_val); \
     210        \
     211        old_val; \
     212})
     213
     214
     215/** Atomicaly sets *ptr to val and returns the previous value. */
     216NO_TRACE static inline void * atomic_set_return_ptr(void **pptr, void *val)
     217{
     218        return _atomic_swap_impl(pptr, val);
    196219}
    197220
     
    201224 * NOT atomic wrt to other cpus.
    202225 */
    203 NO_TRACE static inline void * atomic_swap_ptr_local(void **pptr, void *new_val)
    204 {
    205         /*
    206          * Issuing a xchg instruction always implies lock prefix semantics.
    207          * Therefore, it is cheaper to use a cmpxchg without a lock prefix
    208          * in a loop.
    209          */
    210         void *exp_val;
    211         void *old_val;
    212        
    213         do {
    214                 exp_val = *pptr;
    215                 old_val = atomic_cas_ptr_local(pptr, exp_val, new_val);
    216         } while (old_val != exp_val);
    217        
    218         return old_val;
    219 }
     226NO_TRACE static inline void * atomic_set_return_ptr_local(
     227        void **pptr, void *new_val)
     228{
     229        return _atomic_swap_local_impl(pptr, new_val);
     230}
     231
     232/** Atomicaly sets *ptr to val and returns the previous value. */
     233NO_TRACE static inline native_t atomic_set_return_native_t(
     234        native_t *p, native_t val)
     235{
     236        return _atomic_swap_impl(p, val);
     237}
     238
     239/** Sets *ptr to new_val and returns the previous value. NOT smp safe.
     240 *
     241 * This function is only atomic wrt to local interrupts and it is
     242 * NOT atomic wrt to other cpus.
     243 */
     244NO_TRACE static inline native_t atomic_set_return_native_t_local(
     245        native_t *p, native_t new_val)
     246{
     247        return _atomic_swap_local_impl(p, new_val);
     248}
     249
    220250
    221251#undef _atomic_cas_ptr_impl
     252#undef _atomic_swap_impl
     253#undef _atomic_swap_local_impl
    222254
    223255#endif
  • kernel/test/atomic/atomic1.c

    rf1c7755 rb17518e  
    7272       
    7373        ptr = 0;
    74         if (atomic_swap_ptr(&ptr, a_ptr) != 0)
    75                 return "Failed atomic_swap_ptr()";
    76         if (atomic_swap_ptr_local(&ptr, 0) != a_ptr || ptr != 0)
    77                 return "Failed atomic_swap_ptr_local()";
     74        if (atomic_set_return_ptr(&ptr, a_ptr) != 0)
     75                return "Failed atomic_set_return_ptr()";
     76        if (atomic_set_return_ptr_local(&ptr, 0) != a_ptr || ptr != 0)
     77                return "Failed atomic_set_return_ptr_local()";
    7878       
    7979        return NULL;
Note: See TracChangeset for help on using the changeset viewer.