Changeset 228666c in mainline for kernel/arch/amd64/include/atomic.h


Ignore:
Timestamp:
2010-02-20T18:41:53Z (14 years ago)
Author:
Martin Decky <martin@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
b03a666
Parents:
bc9da2a
Message:

introduce atomic_count_t as the explicit type of the internal value in atomic_t (this is probably better than the chaotic mix of int/long)
atomic_count_t is defined as unsigned, for signed semantics you can cast it to atomic_signed_t

File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/amd64/include/atomic.h

    rbc9da2a r228666c  
    4040#include <preemption.h>
    4141
    42 static inline void atomic_inc(atomic_t *val) {
     42static inline void atomic_inc(atomic_t *val)
     43{
    4344#ifdef CONFIG_SMP
    4445        asm volatile (
     
    5455}
    5556
    56 static inline void atomic_dec(atomic_t *val) {
     57static inline void atomic_dec(atomic_t *val)
     58{
    5759#ifdef CONFIG_SMP
    5860        asm volatile (
     
    6870}
    6971
    70 static inline long atomic_postinc(atomic_t *val)
     72static inline atomic_count_t atomic_postinc(atomic_t *val)
    7173{
    72         long r = 1;
     74        atomic_count_t r = 1;
    7375       
    7476        asm volatile (
    7577                "lock xaddq %[r], %[count]\n"
    76                 : [count] "+m" (val->count), [r] "+r" (r)
     78                : [count] "+m" (val->count),
     79                  [r] "+r" (r)
    7780        );
    7881       
     
    8083}
    8184
    82 static inline long atomic_postdec(atomic_t *val)
     85static inline atomic_count_t atomic_postdec(atomic_t *val)
    8386{
    84         long r = -1;
     87        atomic_count_t r = -1;
    8588       
    8689        asm volatile (
    8790                "lock xaddq %[r], %[count]\n"
    88                 : [count] "+m" (val->count), [r] "+r" (r)
     91                : [count] "+m" (val->count),
     92                  [r] "+r" (r)
    8993        );
    9094       
     
    9599#define atomic_predec(val)  (atomic_postdec(val) - 1)
    96100
    97 static inline uint64_t test_and_set(atomic_t *val) {
    98         uint64_t v;
     101static inline atomic_count_t test_and_set(atomic_t *val)
     102{
     103        atomic_count_t v;
    99104       
    100105        asm volatile (
    101106                "movq $1, %[v]\n"
    102107                "xchgq %[v], %[count]\n"
    103                 : [v] "=r" (v), [count] "+m" (val->count)
     108                : [v] "=r" (v),
     109                  [count] "+m" (val->count)
    104110        );
    105111       
     
    107113}
    108114
    109 
    110115/** amd64 specific fast spinlock */
    111116static inline void atomic_lock_arch(atomic_t *val)
    112117{
    113         uint64_t tmp;
     118        atomic_count_t tmp;
    114119       
    115120        preemption_disable();
     
    125130                "testq %[tmp], %[tmp]\n"
    126131                "jnz 0b\n"
    127                 : [count] "+m" (val->count), [tmp] "=&r" (tmp)
     132                : [count] "+m" (val->count),
     133                  [tmp] "=&r" (tmp)
    128134        );
     135       
    129136        /*
    130137         * Prevent critical section code from bleeding out this way up.
Note: See TracChangeset for help on using the changeset viewer.