Changeset 7328ff4 in mainline


Ignore:
Timestamp:
2018-09-06T18:18:52Z (6 years ago)
Author:
Jiří Zárevúcky <jiri.zarevucky@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
ffa73c6
Parents:
d51cca8
git-author:
Jiří Zárevúcky <jiri.zarevucky@…> (2018-08-13 01:29:17)
git-committer:
Jiří Zárevúcky <jiri.zarevucky@…> (2018-09-06 18:18:52)
Message:

Use builtin memory fences for kernel barriers, and convert smp_coherence() into a regular function

Location:
kernel
Files:
5 added
2 deleted
24 edited
4 moved

Legend:

Unmodified
Added
Removed
  • kernel/Makefile

    rd51cca8 r7328ff4  
    9191
    9292COMMON_CFLAGS = $(INCLUDES_FLAGS) -O$(OPTIMIZATION) -imacros $(CONFIG_HEADER) \
    93         -ffreestanding -nostdlib -nostdinc \
     93        -ffreestanding -nostdlib \
    9494        -fexec-charset=UTF-8 -finput-charset=UTF-8 -fno-common \
    9595        -fdebug-prefix-map=$(realpath $(ROOT_PATH))=.
  • kernel/arch/abs32le/Makefile.inc

    rd51cca8 r7328ff4  
    4949        arch/$(KARCH)/src/abs32le.c \
    5050        arch/$(KARCH)/src/userspace.c \
     51        arch/$(KARCH)/src/smc.c \
    5152        arch/$(KARCH)/src/cpu/cpu.c \
    5253        arch/$(KARCH)/src/smp/smp.c \
  • kernel/arch/abs32le/src/smc.c

    rd51cca8 r7328ff4  
    11/*
    2  * Copyright (c) 2016 Martin Decky
     2 * Copyright (c) 2005 Jakub Jermar
    33 * All rights reserved.
    44 *
     
    2727 */
    2828
    29 /** @addtogroup riscv64
    30  * @{
    31  */
    32 /** @file
    33  */
     29#include <barrier.h>
    3430
    35 #ifndef KERN_riscv64_BARRIER_H_
    36 #define KERN_riscv64_BARRIER_H_
     31void smc_coherence(void *a, size_t l)
     32{
     33        compiler_barrier();
     34}
    3735
    38 #include <trace.h>
    39 
    40 // FIXME
    41 
    42 #define CS_ENTER_BARRIER()  asm volatile ("" ::: "memory")
    43 #define CS_LEAVE_BARRIER()  asm volatile ("" ::: "memory")
    44 
    45 #define memory_barrier()  asm volatile ("" ::: "memory")
    46 #define read_barrier()    asm volatile ("" ::: "memory")
    47 #define write_barrier()   asm volatile ("" ::: "memory")
    48 
    49 #ifdef KERNEL
    50 
    51 #define smc_coherence(addr, size)
    52 
    53 #endif /* KERNEL */
    54 
    55 #endif
    56 
    57 /** @}
    58  */
  • kernel/arch/amd64/Makefile.inc

    rd51cca8 r7328ff4  
    7979        arch/$(KARCH)/src/proc/thread.c \
    8080        arch/$(KARCH)/src/userspace.c \
     81        arch/$(KARCH)/src/smc.c \
    8182        arch/$(KARCH)/src/syscall.c
    8283
  • kernel/arch/arm32/Makefile.inc

    rd51cca8 r7328ff4  
    5757        arch/$(KARCH)/src/exception.c \
    5858        arch/$(KARCH)/src/userspace.c \
     59        arch/$(KARCH)/src/smc.c \
    5960        arch/$(KARCH)/src/debug/stacktrace.c \
    6061        arch/$(KARCH)/src/debug/stacktrace_asm.S \
  • kernel/arch/arm32/include/arch/barrier.h

    rd51cca8 r7328ff4  
    3737#define KERN_arm32_BARRIER_H_
    3838
    39 #ifdef KERNEL
    4039#include <arch/cache.h>
    4140#include <arch/cp15.h>
    4241#include <align.h>
    43 #else
    44 #include <libarch/cp15.h>
    45 #endif
    46 
    47 #define CS_ENTER_BARRIER()  asm volatile ("" ::: "memory")
    48 #define CS_LEAVE_BARRIER()  asm volatile ("" ::: "memory")
    4942
    5043#if defined PROCESSOR_ARCH_armv7_a
     44
    5145/*
    5246 * ARMv7 uses instructions for memory barriers see ARM Architecture reference
     
    5751 * and functionality on armv7 architecture.
    5852 */
    59 #define memory_barrier()  asm volatile ("dmb" ::: "memory")
    60 #define read_barrier()    asm volatile ("dsb" ::: "memory")
    61 #define write_barrier()   asm volatile ("dsb st" ::: "memory")
    62 #define inst_barrier()    asm volatile ("isb" ::: "memory")
    63 #elif defined PROCESSOR_ARCH_armv6 | defined KERNEL
     53#define dmb()    asm volatile ("dmb" ::: "memory")
     54#define dsb()    asm volatile ("dsb" ::: "memory")
     55#define isb()    asm volatile ("isb" ::: "memory")
     56
     57#elif defined PROCESSOR_ARCH_armv6
     58
    6459/*
    6560 * ARMv6 introduced user access of the following commands:
     
    7570 * CP15 implementation is mandatory only for armv6+.
    7671 */
    77 #if defined(PROCESSOR_ARCH_armv6) || defined(PROCESSOR_ARCH_armv7_a)
    78 #define memory_barrier()  CP15DMB_write(0)
     72#define dmb()    CP15DMB_write(0)
     73#define dsb()    CP15DSB_write(0)
     74#define isb()    CP15ISB_write(0)
     75
    7976#else
    80 #define memory_barrier()  CP15DSB_write(0)
    81 #endif
    82 #define read_barrier()    CP15DSB_write(0)
    83 #define write_barrier()   read_barrier()
    84 #if defined(PROCESSOR_ARCH_armv6) || defined(PROCESSOR_ARCH_armv7_a)
    85 #define inst_barrier()    CP15ISB_write(0)
    86 #else
    87 #define inst_barrier()
    88 #endif
    89 #else
    90 /*
    91  * Older manuals mention syscalls as a way to implement cache coherency and
    92  * barriers. See for example ARM Architecture Reference Manual Version D
    93  * chapter 2.7.4 Prefetching and self-modifying code (p. A2-28)
    94  */
    95 // TODO implement on per PROCESSOR basis or via syscalls
    96 #define memory_barrier()  asm volatile ("" ::: "memory")
    97 #define read_barrier()    asm volatile ("" ::: "memory")
    98 #define write_barrier()   asm volatile ("" ::: "memory")
    99 #define inst_barrier()    asm volatile ("" ::: "memory")
    100 #endif
    10177
    102 #ifdef KERNEL
    103 
    104 /*
    105  * There are multiple ways ICache can be implemented on ARM machines. Namely
    106  * PIPT, VIPT, and ASID and VMID tagged VIVT (see ARM Architecture Reference
    107  * Manual B3.11.2 (p. 1383).  However, CortexA8 Manual states: "For maximum
    108  * compatibility across processors, ARM recommends that operating systems target
    109  * the ARMv7 base architecture that uses ASID-tagged VIVT instruction caches,
    110  * and do not assume the presence of the IVIPT extension. Software that relies
    111  * on the IVIPT extension might fail in an unpredictable way on an ARMv7
    112  * implementation that does not include the IVIPT extension." (7.2.6 p. 245).
    113  * Only PIPT invalidates cache for all VA aliases if one block is invalidated.
    114  *
    115  * @note: Supporting ASID and VMID tagged VIVT may need to add ICache
    116  * maintenance to other places than just smc.
    117  */
    118 
    119 #ifdef KERNEL
    120 
    121 /*
    122  * @note: Cache type register is not available in uspace. We would need
    123  * to export the cache line value, or use syscall for uspace smc_coherence
    124  */
    125 #define smc_coherence(a, l) \
    126 do { \
    127         for (uintptr_t addr = (uintptr_t) a; addr < (uintptr_t) a + l; \
    128             addr += CP15_C7_MVA_ALIGN) \
    129                 dcache_clean_mva_pou(ALIGN_DOWN((uintptr_t) a, CP15_C7_MVA_ALIGN)); \
    130         write_barrier();               /* Wait for completion */\
    131         icache_invalidate();\
    132         write_barrier();\
    133         inst_barrier();                /* Wait for Inst refetch */\
    134 } while (0)
     78#define dmb()    CP15DSB_write(0)
     79#define dsb()    CP15DSB_write(0)
     80#define isb()
    13581
    13682#endif
    137 
    138 #endif  /* KERNEL */
    13983
    14084#endif
  • kernel/arch/arm32/include/arch/mm/page.h

    rd51cca8 r7328ff4  
    4141#include <arch/exception.h>
    4242#include <barrier.h>
     43#include <arch/barrier.h>
    4344#include <arch/cp15.h>
    4445#include <trace.h>
  • kernel/arch/arm32/include/arch/mm/page_armv4.h

    rd51cca8 r7328ff4  
    3737#ifndef KERN_arm32_PAGE_armv4_H_
    3838#define KERN_arm32_PAGE_armv4_H_
     39
     40#include <arch/cache.h>
    3941
    4042#ifndef KERN_arm32_PAGE_H_
  • kernel/arch/arm32/src/atomic.c

    rd51cca8 r7328ff4  
    3535
    3636#include <synch/spinlock.h>
     37#include <arch/barrier.h>
    3738
    3839
     
    6465
    6566        return cur_val;
     67}
     68
     69void __sync_synchronize(void)
     70{
     71        dsb();
    6672}
    6773
  • kernel/arch/arm32/src/mm/tlb.c

    rd51cca8 r7328ff4  
    4141#include <arch/mm/page.h>
    4242#include <arch/cache.h>
     43#include <arch/barrier.h>
    4344
    4445/** Invalidate all entries in TLB.
     
    5960         * ARM Architecture reference Manual ch. B3.10.1 p. B3-1374 B3-1375
    6061         */
    61         read_barrier();
    62         inst_barrier();
     62        dsb();
     63        isb();
    6364}
    6465
     
    105106         * ARM Architecture reference Manual ch. B3.10.1 p. B3-1374 B3-1375
    106107         */
    107         read_barrier();
    108         inst_barrier();
     108        dsb();
     109        isb();
    109110}
    110111
  • kernel/arch/ia32/Makefile.inc

    rd51cca8 r7328ff4  
    8787        arch/$(KARCH)/src/pm.c \
    8888        arch/$(KARCH)/src/userspace.c \
     89        arch/$(KARCH)/src/smc.c \
    8990        arch/$(KARCH)/src/cpu/cpu.c \
    9091        arch/$(KARCH)/src/mm/km.c \
  • kernel/arch/ia64/Makefile.inc

    rd51cca8 r7328ff4  
    5858        arch/$(KARCH)/src/proc/scheduler.c \
    5959        arch/$(KARCH)/src/ddi/ddi.c \
     60        arch/$(KARCH)/src/smc.c \
    6061        arch/$(KARCH)/src/smp/smp.c \
    6162        arch/$(KARCH)/src/smp/smp_call.c \
  • kernel/arch/ia64/include/arch/barrier.h

    rd51cca8 r7328ff4  
    3636#define KERN_ia64_BARRIER_H_
    3737
    38 /*
    39  * TODO: Implement true IA-64 memory barriers for macros below.
    40  */
    41 #define CS_ENTER_BARRIER()      memory_barrier()
    42 #define CS_LEAVE_BARRIER()      memory_barrier()
    43 
    44 #define memory_barrier()        asm volatile ("mf\n" ::: "memory")
    45 #define read_barrier()          memory_barrier()
    46 #define write_barrier()         memory_barrier()
     38#define mf()    asm volatile ("mf\n" ::: "memory")
    4739
    4840#define srlz_i()                \
     
    5648        asm volatile (";; sync.i\n" ::: "memory")
    5749
    58 #ifdef KERNEL
    59 
    60 #define FC_INVAL_MIN            32
    61 #define smc_coherence(a, l)             \
    62 {                                               \
    63         unsigned long i;                        \
    64         for (i = 0; i < (l); i += FC_INVAL_MIN) \
    65                 fc_i((void *)(a) + i);          \
    66         sync_i();                               \
    67         srlz_i();                               \
    68 }
    69 
    70 #endif  /* KERNEL */
    71 
    7250#endif
    7351
  • kernel/arch/ia64/include/arch/cpu.h

    rd51cca8 r7328ff4  
    3838#include <arch/register.h>
    3939#include <arch/asm.h>
     40#include <arch/barrier.h>
    4041#include <arch/bootinfo.h>
    4142#include <stdint.h>
  • kernel/arch/ia64/src/mm/vhpt.c

    rd51cca8 r7328ff4  
    3434
    3535#include <mem.h>
     36#include <arch/barrier.h>
    3637#include <arch/mm/vhpt.h>
    3738#include <mm/frame.h>
  • kernel/arch/ia64/src/smc.c

    rd51cca8 r7328ff4  
    2727 */
    2828
    29 /** @addtogroup abs32le
    30  * @{
    31  */
    32 /** @file
    33  */
     29#include <barrier.h>
     30#include <arch/barrier.h>
    3431
    35 #ifndef KERN_abs32le_BARRIER_H_
    36 #define KERN_abs32le_BARRIER_H_
     32#define FC_INVAL_MIN            32
    3733
    38 /*
    39  * Provisions are made to prevent compiler from reordering instructions itself.
    40  */
     34void smc_coherence(void *a, size_t l)
     35{
     36        unsigned long i;
     37        for (i = 0; i < (l); i += FC_INVAL_MIN)
     38                fc_i(a + i);
     39        sync_i();
     40        srlz_i();
     41}
    4142
    42 #define CS_ENTER_BARRIER()
    43 #define CS_LEAVE_BARRIER()
    44 
    45 #define memory_barrier()
    46 #define read_barrier()
    47 #define write_barrier()
    48 
    49 #ifdef KERNEL
    50 
    51 #define smc_coherence(addr, size)
    52 
    53 #endif  /* KERNEL*/
    54 
    55 #endif
    56 
    57 /** @}
    58  */
  • kernel/arch/mips32/Makefile.inc

    rd51cca8 r7328ff4  
    7171        arch/$(KARCH)/src/mm/as.c \
    7272        arch/$(KARCH)/src/fpu_context.c \
     73        arch/$(KARCH)/src/smc.c \
    7374        arch/$(KARCH)/src/smp/smp.c \
    7475        arch/$(KARCH)/src/smp/smp_call.c \
  • kernel/arch/mips32/src/debugger.c

    rd51cca8 r7328ff4  
    147147}
    148148
     149static inline void write_inst(uintptr_t addr, uint32_t inst)
     150{
     151        *((uint32_t *) addr) = inst;
     152        smc_coherence((uint32_t *) addr, 4);
     153}
     154
    149155#ifdef CONFIG_KCONSOLE
    150156
     
    212218
    213219        /* Set breakpoint */
    214         *((sysarg_t *) cur->address) = 0x0d;
    215         smc_coherence(cur->address, 4);
     220        write_inst(cur->address, 0x0d);
    216221
    217222        irq_spinlock_unlock(&bkpoint_lock, true);
     
    245250        }
    246251
    247         ((uint32_t *) cur->address)[0] = cur->instruction;
    248         smc_coherence(((uint32_t *) cur->address)[0], 4);
    249         ((uint32_t *) cur->address)[1] = cur->nextinstruction;
    250         smc_coherence(((uint32_t *) cur->address)[1], 4);
     252        write_inst(cur->address, cur->instruction);
     253        write_inst(cur->address + 4, cur->nextinstruction);
    251254
    252255        cur->address = (uintptr_t) NULL;
     
    357360                if (cur->flags & BKPOINT_REINST) {
    358361                        /* Set breakpoint on first instruction */
    359                         ((uint32_t *) cur->address)[0] = 0x0d;
    360                         smc_coherence(((uint32_t *)cur->address)[0], 4);
     362                        write_inst(cur->address, 0x0d);
    361363
    362364                        /* Return back the second */
    363                         ((uint32_t *) cur->address)[1] = cur->nextinstruction;
    364                         smc_coherence(((uint32_t *) cur->address)[1], 4);
     365                        write_inst(cur->address + 4, cur->nextinstruction);
    365366
    366367                        cur->flags &= ~BKPOINT_REINST;
     
    379380
    380381                /* Return first instruction back */
    381                 ((uint32_t *)cur->address)[0] = cur->instruction;
    382                 smc_coherence(cur->address, 4);
     382                write_inst(cur->address, cur->instruction);
    383383
    384384                if (!(cur->flags & BKPOINT_ONESHOT)) {
    385385                        /* Set Breakpoint on next instruction */
    386                         ((uint32_t *)cur->address)[1] = 0x0d;
     386                        write_inst(cur->address + 4, 0x0d);
    387387                        cur->flags |= BKPOINT_REINST;
    388388                }
  • kernel/arch/ppc32/Makefile.inc

    rd51cca8 r7328ff4  
    5151        arch/$(KARCH)/src/cpu/cpu.c \
    5252        arch/$(KARCH)/src/proc/scheduler.c \
     53        arch/$(KARCH)/src/smc.c \
    5354        arch/$(KARCH)/src/mm/km.c \
    5455        arch/$(KARCH)/src/mm/as.c \
  • kernel/arch/ppc32/src/smc.c

    rd51cca8 r7328ff4  
    2727 */
    2828
    29 /** @addtogroup ppc32
    30  * @{
    31  */
    32 /** @file
    33  */
    3429
    35 #ifndef KERN_ppc32_BARRIER_H_
    36 #define KERN_ppc32_BARRIER_H_
    37 
    38 #include <trace.h>
    39 
    40 #define CS_ENTER_BARRIER()  asm volatile ("" ::: "memory")
    41 #define CS_LEAVE_BARRIER()  asm volatile ("" ::: "memory")
    42 
    43 #define memory_barrier()  asm volatile ("sync" ::: "memory")
    44 #define read_barrier()    asm volatile ("sync" ::: "memory")
    45 #define write_barrier()   asm volatile ("eieio" ::: "memory")
    46 
    47 #define instruction_barrier() \
    48         asm volatile ( \
    49                 "sync\n" \
    50                 "isync\n" \
    51         )
    52 
    53 #ifdef KERNEL
     30#include <barrier.h>
    5431
    5532#define COHERENCE_INVAL_MIN  4
     
    6239 */
    6340
    64 NO_TRACE static inline void smc_coherence(void *addr, unsigned int len)
     41void smc_coherence(void *addr, size_t len)
    6542{
    6643        unsigned int i;
     
    7249                );
    7350
    74         memory_barrier();
     51        asm volatile ("sync" ::: "memory");
    7552
    7653        for (i = 0; i < len; i += COHERENCE_INVAL_MIN)
     
    8057                );
    8158
    82         instruction_barrier();
     59        asm volatile ("sync" ::: "memory");
     60        asm volatile ("isync" ::: "memory");
    8361}
    8462
    85 #endif  /* KERNEL */
    86 
    87 #endif
    88 
    89 /** @}
    90  */
  • kernel/arch/riscv64/Makefile.inc

    rd51cca8 r7328ff4  
    4747        arch/$(KARCH)/src/riscv64.c \
    4848        arch/$(KARCH)/src/userspace.c \
     49        arch/$(KARCH)/src/smc.c \
    4950        arch/$(KARCH)/src/cpu/cpu.c \
    5051        arch/$(KARCH)/src/mm/km.c \
  • kernel/arch/sparc64/Makefile.inc

    rd51cca8 r7328ff4  
    7777        arch/$(KARCH)/src/proc/$(USARCH)/scheduler.c \
    7878        arch/$(KARCH)/src/proc/thread.c \
     79        arch/$(KARCH)/src/smc.c \
    7980        arch/$(KARCH)/src/trap/$(USARCH)/mmu.S \
    8081        arch/$(KARCH)/src/trap/$(USARCH)/trap_table.S \
  • kernel/arch/sparc64/include/arch/barrier.h

    rd51cca8 r7328ff4  
    3838#include <trace.h>
    3939
    40 /*
    41  * Our critical section barriers are prepared for the weakest RMO memory model.
    42  */
    43 #define CS_ENTER_BARRIER() \
    44         asm volatile ( \
    45                 "membar #LoadLoad | #LoadStore\n" \
    46                 ::: "memory" \
    47         )
    48 
    49 #define CS_LEAVE_BARRIER() \
    50         asm volatile ( \
    51                 "membar #StoreStore\n" \
    52                 "membar #LoadStore\n" \
    53                 ::: "memory" \
    54         )
    55 
    56 #define memory_barrier() \
    57         asm volatile ( \
    58                 "membar #LoadLoad | #StoreStore\n" \
    59                 ::: "memory" \
    60         )
    61 
    62 #define read_barrier() \
    63         asm volatile ( \
    64                 "membar #LoadLoad\n" \
    65                 ::: "memory" \
    66         )
    67 
    68 #define write_barrier() \
    69         asm volatile ( \
    70                 "membar #StoreStore\n" \
    71                 ::: "memory" \
    72         )
    73 
    74 #define flush(a) \
    75         asm volatile ( \
    76                 "flush %[reg]\n" \
    77                 :: [reg] "r" ((a)) \
    78                 : "memory" \
    79         )
    80 
    8140/** Flush Instruction pipeline. */
    8241NO_TRACE static inline void flush_pipeline(void)
     
    11069}
    11170
    112 #ifdef KERNEL
    113 
    114 #if defined(US)
    115 
    116 #define FLUSH_INVAL_MIN  4
    117 
    118 #define smc_coherence(a, l) \
    119         do { \
    120                 unsigned long i; \
    121                 write_barrier(); \
    122                 \
    123                 for (i = 0; i < (l); i += FLUSH_INVAL_MIN) \
    124                         flush((void *)(a) + i); \
    125         } while (0)
    126 
    127 #elif defined (US3)
    128 
    129 #define smc_coherence(a, l) \
    130         do { \
    131                 write_barrier(); \
    132                 flush_pipeline(); \
    133         } while (0)
    134 
    135 #endif  /* defined(US3) */
    136 
    137 #endif  /* KERNEL */
    138 
    13971#endif
    14072
  • kernel/arch/sparc64/include/arch/mm/sun4u/tlb.h

    rd51cca8 r7328ff4  
    9898#include <arch/mm/page.h>
    9999#include <arch/asm.h>
     100#include <arch/barrier.h>
    100101#include <barrier.h>
    101102#include <typedefs.h>
  • kernel/arch/sparc64/src/smc.c

    rd51cca8 r7328ff4  
    2727 */
    2828
    29 /** @addtogroup mips32
    30  * @{
    31  */
    32 /** @file
    33  */
     29#include <barrier.h>
     30#include <arch/barrier.h>
    3431
    35 #ifndef KERN_mips32_BARRIER_H_
    36 #define KERN_mips32_BARRIER_H_
     32#if defined(US)
    3733
    38 /*
    39  * TODO: implement true MIPS memory barriers for macros below.
    40  */
    41 #define CS_ENTER_BARRIER()  asm volatile ("" ::: "memory")
    42 #define CS_LEAVE_BARRIER()  asm volatile ("" ::: "memory")
     34#define FLUSH_INVAL_MIN  4
    4335
    44 #define memory_barrier() asm volatile ("" ::: "memory")
    45 #define read_barrier()   asm volatile ("" ::: "memory")
    46 #define write_barrier()  asm volatile ("" ::: "memory")
     36void smc_coherence(void *a, size_t l)
     37{
     38        asm volatile ("membar #StoreStore\n" ::: "memory");
    4739
    48 #ifdef KERNEL
     40        for (size_t i = 0; i < l; i += FLUSH_INVAL_MIN) {
     41                asm volatile (
     42                    "flush %[reg]\n"
     43                    :: [reg] "r" (a + i)
     44                    : "memory"
     45                );
     46        }
     47}
    4948
    50 #define smc_coherence(a, l)
     49#elif defined (US3)
    5150
    52 #endif  /* KERNEL */
    5351
    54 #endif
     52void smc_coherence(void *a, size_t l)
     53{
     54        asm volatile ("membar #StoreStore\n" ::: "memory");
    5555
    56 /** @}
    57  */
     56        flush_pipeline();
     57}
     58
     59#endif  /* defined(US3) */
     60
  • kernel/arch/sparc64/src/smp/sun4u/ipi.c

    rd51cca8 r7328ff4  
    3434
    3535#include <smp/ipi.h>
     36#include <arch/barrier.h>
    3637#include <arch/smp/sun4u/ipi.h>
    3738#include <assert.h>
  • kernel/arch/sparc64/src/trap/sun4u/interrupt.c

    rd51cca8 r7328ff4  
    3333 */
    3434
     35#include <arch/barrier.h>
    3536#include <arch/interrupt.h>
    3637#include <arch/sparc64.h>
  • kernel/generic/include/barrier.h

    rd51cca8 r7328ff4  
    3030#define KERN_COMPILER_BARRIER_H_
    3131
    32 #include <arch/barrier.h>
     32#include <stdatomic.h>
     33#include <stddef.h>
    3334
    34 #define compiler_barrier() asm volatile ("" ::: "memory")
     35static inline void compiler_barrier(void)
     36{
     37        atomic_signal_fence(memory_order_seq_cst);
     38}
     39
     40static inline void memory_barrier(void)
     41{
     42        atomic_thread_fence(memory_order_seq_cst);
     43}
     44
     45static inline void read_barrier(void)
     46{
     47        atomic_thread_fence(memory_order_acquire);
     48}
     49
     50static inline void write_barrier(void)
     51{
     52        atomic_thread_fence(memory_order_release);
     53}
     54
     55#define CS_ENTER_BARRIER() atomic_thread_fence(memory_order_acquire)
     56#define CS_LEAVE_BARRIER() atomic_thread_fence(memory_order_release)
    3557
    3658/** Forces the compiler to access (ie load/store) the variable only once. */
    3759#define ACCESS_ONCE(var) (*((volatile typeof(var)*)&(var)))
    3860
     61extern void smc_coherence(void *, size_t);
     62
    3963#endif /* KERN_COMPILER_BARRIER_H_ */
Note: See TracChangeset for help on using the changeset viewer.