Changeset 7328ff4 in mainline for kernel/arch/arm32/include


Ignore:
Timestamp:
2018-09-06T18:18:52Z (7 years ago)
Author:
Jiří Zárevúcky <jiri.zarevucky@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
ffa73c6
Parents:
d51cca8
git-author:
Jiří Zárevúcky <jiri.zarevucky@…> (2018-08-13 01:29:17)
git-committer:
Jiří Zárevúcky <jiri.zarevucky@…> (2018-09-06 18:18:52)
Message:

Use builtin memory fences for kernel barriers, and convert smp_coherence() into a regular function

Location:
kernel/arch/arm32/include/arch
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/arm32/include/arch/barrier.h

    rd51cca8 r7328ff4  
    3737#define KERN_arm32_BARRIER_H_
    3838
    39 #ifdef KERNEL
    4039#include <arch/cache.h>
    4140#include <arch/cp15.h>
    4241#include <align.h>
    43 #else
    44 #include <libarch/cp15.h>
    45 #endif
    46 
    47 #define CS_ENTER_BARRIER()  asm volatile ("" ::: "memory")
    48 #define CS_LEAVE_BARRIER()  asm volatile ("" ::: "memory")
    4942
    5043#if defined PROCESSOR_ARCH_armv7_a
     44
    5145/*
    5246 * ARMv7 uses instructions for memory barriers see ARM Architecture reference
     
    5751 * and functionality on armv7 architecture.
    5852 */
    59 #define memory_barrier()  asm volatile ("dmb" ::: "memory")
    60 #define read_barrier()    asm volatile ("dsb" ::: "memory")
    61 #define write_barrier()   asm volatile ("dsb st" ::: "memory")
    62 #define inst_barrier()    asm volatile ("isb" ::: "memory")
    63 #elif defined PROCESSOR_ARCH_armv6 | defined KERNEL
     53#define dmb()    asm volatile ("dmb" ::: "memory")
     54#define dsb()    asm volatile ("dsb" ::: "memory")
     55#define isb()    asm volatile ("isb" ::: "memory")
     56
     57#elif defined PROCESSOR_ARCH_armv6
     58
    6459/*
    6560 * ARMv6 introduced user access of the following commands:
     
    7570 * CP15 implementation is mandatory only for armv6+.
    7671 */
    77 #if defined(PROCESSOR_ARCH_armv6) || defined(PROCESSOR_ARCH_armv7_a)
    78 #define memory_barrier()  CP15DMB_write(0)
     72#define dmb()    CP15DMB_write(0)
     73#define dsb()    CP15DSB_write(0)
     74#define isb()    CP15ISB_write(0)
     75
    7976#else
    80 #define memory_barrier()  CP15DSB_write(0)
    81 #endif
    82 #define read_barrier()    CP15DSB_write(0)
    83 #define write_barrier()   read_barrier()
    84 #if defined(PROCESSOR_ARCH_armv6) || defined(PROCESSOR_ARCH_armv7_a)
    85 #define inst_barrier()    CP15ISB_write(0)
    86 #else
    87 #define inst_barrier()
    88 #endif
    89 #else
    90 /*
    91  * Older manuals mention syscalls as a way to implement cache coherency and
    92  * barriers. See for example ARM Architecture Reference Manual Version D
    93  * chapter 2.7.4 Prefetching and self-modifying code (p. A2-28)
    94  */
    95 // TODO implement on per PROCESSOR basis or via syscalls
    96 #define memory_barrier()  asm volatile ("" ::: "memory")
    97 #define read_barrier()    asm volatile ("" ::: "memory")
    98 #define write_barrier()   asm volatile ("" ::: "memory")
    99 #define inst_barrier()    asm volatile ("" ::: "memory")
    100 #endif
    10177
    102 #ifdef KERNEL
    103 
    104 /*
    105  * There are multiple ways ICache can be implemented on ARM machines. Namely
    106  * PIPT, VIPT, and ASID and VMID tagged VIVT (see ARM Architecture Reference
    107  * Manual B3.11.2 (p. 1383).  However, CortexA8 Manual states: "For maximum
    108  * compatibility across processors, ARM recommends that operating systems target
    109  * the ARMv7 base architecture that uses ASID-tagged VIVT instruction caches,
    110  * and do not assume the presence of the IVIPT extension. Software that relies
    111  * on the IVIPT extension might fail in an unpredictable way on an ARMv7
    112  * implementation that does not include the IVIPT extension." (7.2.6 p. 245).
    113  * Only PIPT invalidates cache for all VA aliases if one block is invalidated.
    114  *
    115  * @note: Supporting ASID and VMID tagged VIVT may need to add ICache
    116  * maintenance to other places than just smc.
    117  */
    118 
    119 #ifdef KERNEL
    120 
    121 /*
    122  * @note: Cache type register is not available in uspace. We would need
    123  * to export the cache line value, or use syscall for uspace smc_coherence
    124  */
    125 #define smc_coherence(a, l) \
    126 do { \
    127         for (uintptr_t addr = (uintptr_t) a; addr < (uintptr_t) a + l; \
    128             addr += CP15_C7_MVA_ALIGN) \
    129                 dcache_clean_mva_pou(ALIGN_DOWN((uintptr_t) a, CP15_C7_MVA_ALIGN)); \
    130         write_barrier();               /* Wait for completion */\
    131         icache_invalidate();\
    132         write_barrier();\
    133         inst_barrier();                /* Wait for Inst refetch */\
    134 } while (0)
     78#define dmb()    CP15DSB_write(0)
     79#define dsb()    CP15DSB_write(0)
     80#define isb()
    13581
    13682#endif
    137 
    138 #endif  /* KERNEL */
    13983
    14084#endif
  • kernel/arch/arm32/include/arch/mm/page.h

    rd51cca8 r7328ff4  
    4141#include <arch/exception.h>
    4242#include <barrier.h>
     43#include <arch/barrier.h>
    4344#include <arch/cp15.h>
    4445#include <trace.h>
  • kernel/arch/arm32/include/arch/mm/page_armv4.h

    rd51cca8 r7328ff4  
    3737#ifndef KERN_arm32_PAGE_armv4_H_
    3838#define KERN_arm32_PAGE_armv4_H_
     39
     40#include <arch/cache.h>
    3941
    4042#ifndef KERN_arm32_PAGE_H_
Note: See TracChangeset for help on using the changeset viewer.