Changeset 69146b93 in mainline for kernel


Ignore:
Timestamp:
2012-11-26T19:02:45Z (13 years ago)
Author:
Adam Hraska <adam.hraska+hos@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
04552324
Parents:
5d230a30 (diff), 7462674 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merged mainline,1723.

Location:
kernel
Files:
8 added
42 edited
1 moved

Legend:

Unmodified
Added
Removed
  • kernel/arch/amd64/Makefile.inc

    r5d230a30 r69146b93  
    3333
    3434FPU_NO_CFLAGS = -mno-sse -mno-sse2
    35 
    36 #
    37 # FIXME:
    38 #
    39 # The -fno-optimize-sibling-calls should be removed as soon as a bug
    40 # in GCC concerning the "large" memory model and tail call optimization
    41 # is fixed.
    42 #
    43 # When GCC generates a code for tail call, instead of generating ..
    44 #
    45 #   jmp *fnc
    46 #
    47 # it generates an assembly code with an illegal immediate prefix:
    48 #
    49 #   jmp *$fnc
    50 #
    51 # See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=48385 for reference.
    52 #
    53 
    54 CMN1 = -m64 -mcmodel=large -mno-red-zone -fno-unwind-tables -fno-omit-frame-pointer -fno-optimize-sibling-calls
     35CMN1 = -m64 -mcmodel=large -mno-red-zone -fno-unwind-tables -fno-omit-frame-pointer
    5536GCC_CFLAGS += $(CMN1)
    5637ICC_CFLAGS += $(CMN1)
  • kernel/arch/amd64/src/mm/page.c

    r5d230a30 r69146b93  
    9292                access = PF_ACCESS_READ;
    9393       
    94         if (as_page_fault(page, access, istate) == AS_PF_FAULT) {
    95                 fault_if_from_uspace(istate, "Page fault: %p.", (void *) page);
    96                 panic_memtrap(istate, access, page, NULL);
    97         }
     94        as_page_fault(page, access, istate);
    9895}
    9996
  • kernel/arch/arm32/Makefile.inc

    r5d230a30 r69146b93  
    3333ATSIGN = %
    3434
    35 GCC_CFLAGS += -march=armv4 -fno-omit-frame-pointer -mapcs-frame
     35GCC_CFLAGS += -fno-omit-frame-pointer -mapcs-frame -march=$(subst _,-,$(PROCESSOR)) -mno-unaligned-access
    3636
    3737BITS = 32
     
    7575endif
    7676
     77ifeq ($(MACHINE),beagleboardxm)
     78        ARCH_SOURCES += arch/$(KARCH)/src/mach/beagleboardxm/beagleboardxm.c
     79endif
     80
    7781ifeq ($(CONFIG_PL050),y)
    7882        ARCH_SOURCES += genarch/src/drivers/pl050/pl050.c
  • kernel/arch/arm32/_link.ld.in

    r5d230a30 r69146b93  
    99#ifdef MACHINE_gta02
    1010#define KERNEL_LOAD_ADDRESS 0xb0a08000
     11#elif defined MACHINE_beagleboardxm
     12#define KERNEL_LOAD_ADDRESS 0x80a00000
    1113#else
    1214#define KERNEL_LOAD_ADDRESS 0x80a00000
  • kernel/arch/arm32/include/asm.h

    r5d230a30 r69146b93  
    4343#include <trace.h>
    4444
    45 /** No such instruction on ARM to sleep CPU. */
     45/** No such instruction on old ARM to sleep CPU.
     46 *
     47 * ARMv7 introduced wait for event and wait for interrupt (wfe/wfi).
     48 * ARM920T has custom coprocessor action to do the same. See ARM920T Technical
     49 * Reference Manual ch 4.9 p. 4-23 (103 in the PDF)
     50 */
    4651NO_TRACE static inline void cpu_sleep(void)
    4752{
     53#ifdef PROCESSOR_armv7_a
     54        asm volatile ( "wfe" :: );
     55#elif defined(MACHINE_gta02)
     56        asm volatile ( "mcr p15,0,R0,c7,c0,4" :: );
     57#endif
    4858}
    4959
  • kernel/arch/arm32/include/barrier.h

    r5d230a30 r69146b93  
    4747#define write_barrier()   asm volatile ("" ::: "memory")
    4848
    49 #define smc_coherence(a)
    50 #define smc_coherence_block(a, l)
     49/*
     50 * There are multiple ways ICache can be implemented on ARM machines. Namely
     51 * PIPT, VIPT, and ASID and VMID tagged VIVT (see ARM Architecture Reference
     52 * Manual B3.11.2 (p. 1383).  However, CortexA8 Manual states: "For maximum
     53 * compatibility across processors, ARM recommends that operating systems target
     54 * the ARMv7 base architecture that uses ASID-tagged VIVT instruction caches,
     55 * and do not assume the presence of the IVIPT extension. Software that relies
     56 * on the IVIPT extension might fail in an unpredictable way on an ARMv7
     57 * implementation that does not include the IVIPT extension." (7.2.6 p. 245).
     58 * Only PIPT invalidates cache for all VA aliases if one block is invalidated.
     59 *
     60 * @note: Supporting ASID and VMID tagged VIVT may need to add ICache
     61 * maintenance to other places than just smc.
     62 */
     63
     64/* Available on both all supported arms,
     65 * invalidates entire ICache so the written value does not matter. */
     66#define smc_coherence(a) asm volatile ( "mcr p15, 0, r0, c7, c5, 0")
     67#define smc_coherence_block(a, l) smc_coherence(a)
     68
    5169
    5270#endif
  • kernel/arch/arm32/include/cpu.h

    r5d230a30 r69146b93  
    4141
    4242
    43 /** Struct representing ARM CPU identifiaction. */
     43/** Struct representing ARM CPU identification. */
    4444typedef struct {
    4545        /** Implementator (vendor) number. */
  • kernel/arch/arm32/include/mach/beagleboardxm/beagleboardxm.h

    r5d230a30 r69146b93  
    11/*
    2  * Copyright (c) 2011 Martin Sucha
     2 * Copyright (c) 2012 Jan Vesely
    33 * All rights reserved.
    44 *
     
    2626 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    2727 */
     28/** @addtogroup arm32beagleboardxm beagleboardxm
     29 *  @brief BeagleBoard-xM platform.
     30 *  @ingroup arm32
     31 * @{
     32 */
     33/** @file
     34 *  @brief BeagleBoard platform driver.
     35 */
    2836
    29 /** @addtogroup fs
    30  * @{
    31  */
     37#ifndef KERN_arm32_beagleboardxm_H_
     38#define KERN_arm32_beagleboardxm_H_
    3239
    33 #ifndef EXT2FS_EXT2FS_H_
    34 #define EXT2FS_EXT2FS_H_
     40#include <arch/machine_func.h>
    3541
    36 #include <libext2.h>
    37 #include <libfs.h>
    38 #include <sys/types.h>
    39 
    40 #define min(a, b)               ((a) < (b) ? (a) : (b))
    41 
    42 extern vfs_out_ops_t ext2fs_ops;
    43 extern libfs_ops_t ext2fs_libfs_ops;
    44 
    45 extern int ext2fs_global_init(void);
    46 extern int ext2fs_global_fini(void);
     42extern struct arm_machine_ops bbxm_machine_ops;
    4743
    4844#endif
    4945
    50 /**
    51  * @}
     46/** @}
    5247 */
     48
  • kernel/arch/arm32/include/machine_func.h

    r5d230a30 r69146b93  
    108108extern size_t machine_get_irq_count(void);
    109109
     110extern const char * machine_get_platform_name(void);
     111
    110112#endif
    111113
  • kernel/arch/arm32/include/mm/frame.h

    r5d230a30 r69146b93  
    4848#ifdef MACHINE_gta02
    4949#define BOOT_PAGE_TABLE_ADDRESS  0x30010000
     50#elif defined MACHINE_beagleboardxm
     51#define BOOT_PAGE_TABLE_ADDRESS  0x80008000
    5052#else
    5153#define BOOT_PAGE_TABLE_ADDRESS  0x00008000
     
    5759#ifdef MACHINE_gta02
    5860#define PHYSMEM_START_ADDR      0x30008000
     61#elif defined MACHINE_beagleboardxm
     62#define PHYSMEM_START_ADDR      0x80000000
    5963#else
    6064#define PHYSMEM_START_ADDR      0x00000000
  • kernel/arch/arm32/include/mm/page.h

    r5d230a30 r69146b93  
    4646#define PAGE_SIZE       FRAME_SIZE
    4747
     48#ifdef MACHINE_beagleboardxm
     49#ifndef __ASM__
     50#       define KA2PA(x) ((uintptr_t) (x))
     51#       define PA2KA(x) ((uintptr_t) (x))
     52#else
     53#       define KA2PA(x) (x)
     54#       define PA2KA(x) (x)
     55#endif
     56#else
    4857#ifndef __ASM__
    4958#       define KA2PA(x) (((uintptr_t) (x)) - 0x80000000)
     
    5362#       define PA2KA(x) ((x) + 0x80000000)
    5463#endif
     64#endif
    5565
    5666/* Number of entries in each level. */
    57 #define PTL0_ENTRIES_ARCH       (1 << 12)       /* 4096 */
    58 #define PTL1_ENTRIES_ARCH       0
    59 #define PTL2_ENTRIES_ARCH       0
     67#define PTL0_ENTRIES_ARCH       (1 << 12)       /* 4096 */
     68#define PTL1_ENTRIES_ARCH       0
     69#define PTL2_ENTRIES_ARCH       0
    6070/* coarse page tables used (256 * 4 = 1KB per page) */
    61 #define PTL3_ENTRIES_ARCH       (1 << 8)        /* 256 */
     71#define PTL3_ENTRIES_ARCH       (1 << 8)        /* 256 */
    6272
    6373/* Page table sizes for each level. */
    64 #define PTL0_SIZE_ARCH          FOUR_FRAMES
    65 #define PTL1_SIZE_ARCH          0
    66 #define PTL2_SIZE_ARCH          0
    67 #define PTL3_SIZE_ARCH          ONE_FRAME
     74#define PTL0_SIZE_ARCH          FOUR_FRAMES
     75#define PTL1_SIZE_ARCH          0
     76#define PTL2_SIZE_ARCH          0
     77#define PTL3_SIZE_ARCH          ONE_FRAME
    6878
    6979/* Macros calculating indices into page tables for each level. */
    70 #define PTL0_INDEX_ARCH(vaddr)  (((vaddr) >> 20) & 0xfff)
    71 #define PTL1_INDEX_ARCH(vaddr)  0
    72 #define PTL2_INDEX_ARCH(vaddr)  0
    73 #define PTL3_INDEX_ARCH(vaddr)  (((vaddr) >> 12) & 0x0ff)
     80#define PTL0_INDEX_ARCH(vaddr)  (((vaddr) >> 20) & 0xfff)
     81#define PTL1_INDEX_ARCH(vaddr)  0
     82#define PTL2_INDEX_ARCH(vaddr)  0
     83#define PTL3_INDEX_ARCH(vaddr)  (((vaddr) >> 12) & 0x0ff)
    7484
    7585/* Get PTE address accessors for each level. */
    7686#define GET_PTL1_ADDRESS_ARCH(ptl0, i) \
    77         ((pte_t *) ((((pte_t *)(ptl0))[(i)].l0).coarse_table_addr << 10))
     87        ((pte_t *) ((((pte_t *)(ptl0))[(i)].l0).coarse_table_addr << 10))
    7888#define GET_PTL2_ADDRESS_ARCH(ptl1, i) \
    79         (ptl1)
     89        (ptl1)
    8090#define GET_PTL3_ADDRESS_ARCH(ptl2, i) \
    81         (ptl2)
     91        (ptl2)
    8292#define GET_FRAME_ADDRESS_ARCH(ptl3, i) \
    83         ((uintptr_t) ((((pte_t *)(ptl3))[(i)].l1).frame_base_addr << 12))
     93        ((uintptr_t) ((((pte_t *)(ptl3))[(i)].l1).frame_base_addr << 12))
    8494
    8595/* Set PTE address accessors for each level. */
    8696#define SET_PTL0_ADDRESS_ARCH(ptl0) \
    87         (set_ptl0_addr((pte_t *) (ptl0)))
     97        (set_ptl0_addr((pte_t *) (ptl0)))
    8898#define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) \
    89         (((pte_t *) (ptl0))[(i)].l0.coarse_table_addr = (a) >> 10)
     99        (((pte_t *) (ptl0))[(i)].l0.coarse_table_addr = (a) >> 10)
    90100#define SET_PTL2_ADDRESS_ARCH(ptl1, i, a)
    91101#define SET_PTL3_ADDRESS_ARCH(ptl2, i, a)
    92102#define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) \
    93         (((pte_t *) (ptl3))[(i)].l1.frame_base_addr = (a) >> 12)
     103        (((pte_t *) (ptl3))[(i)].l1.frame_base_addr = (a) >> 12)
    94104
    95105/* Get PTE flags accessors for each level. */
    96106#define GET_PTL1_FLAGS_ARCH(ptl0, i) \
    97         get_pt_level0_flags((pte_t *) (ptl0), (size_t) (i))
     107        get_pt_level0_flags((pte_t *) (ptl0), (size_t) (i))
    98108#define GET_PTL2_FLAGS_ARCH(ptl1, i) \
    99         PAGE_PRESENT
     109        PAGE_PRESENT
    100110#define GET_PTL3_FLAGS_ARCH(ptl2, i) \
    101         PAGE_PRESENT
     111        PAGE_PRESENT
    102112#define GET_FRAME_FLAGS_ARCH(ptl3, i) \
    103         get_pt_level1_flags((pte_t *) (ptl3), (size_t) (i))
     113        get_pt_level1_flags((pte_t *) (ptl3), (size_t) (i))
    104114
    105115/* Set PTE flags accessors for each level. */
    106116#define SET_PTL1_FLAGS_ARCH(ptl0, i, x) \
    107         set_pt_level0_flags((pte_t *) (ptl0), (size_t) (i), (x))
     117        set_pt_level0_flags((pte_t *) (ptl0), (size_t) (i), (x))
    108118#define SET_PTL2_FLAGS_ARCH(ptl1, i, x)
    109119#define SET_PTL3_FLAGS_ARCH(ptl2, i, x)
     
    119129        set_pt_level1_present((pte_t *) (ptl3), (size_t) (i))
    120130
    121 /* Macros for querying the last-level PTE entries. */
    122 #define PTE_VALID_ARCH(pte) \
    123         (*((uint32_t *) (pte)) != 0)
    124 #define PTE_PRESENT_ARCH(pte) \
    125         (((pte_t *) (pte))->l0.descriptor_type != 0)
    126 #define PTE_GET_FRAME_ARCH(pte) \
    127         (((pte_t *) (pte))->l1.frame_base_addr << FRAME_WIDTH)
    128 #define PTE_WRITABLE_ARCH(pte) \
    129         (((pte_t *) (pte))->l1.access_permission_0 == PTE_AP_USER_RW_KERNEL_RW)
    130 #define PTE_EXECUTABLE_ARCH(pte) \
    131         1
    132 
    133 #ifndef __ASM__
    134 
    135 /** Level 0 page table entry. */
    136 typedef struct {
    137         /* 0b01 for coarse tables, see below for details */
    138         unsigned descriptor_type : 2;
    139         unsigned impl_specific : 3;
    140         unsigned domain : 4;
    141         unsigned should_be_zero : 1;
    142 
    143         /* Pointer to the coarse 2nd level page table (holding entries for small
    144          * (4KB) or large (64KB) pages. ARM also supports fine 2nd level page
    145          * tables that may hold even tiny pages (1KB) but they are bigger (4KB
    146          * per table in comparison with 1KB per the coarse table)
    147          */
    148         unsigned coarse_table_addr : 22;
    149 } ATTRIBUTE_PACKED pte_level0_t;
    150 
    151 /** Level 1 page table entry (small (4KB) pages used). */
    152 typedef struct {
    153 
    154         /* 0b10 for small pages */
    155         unsigned descriptor_type : 2;
    156         unsigned bufferable : 1;
    157         unsigned cacheable : 1;
    158 
    159         /* access permissions for each of 4 subparts of a page
    160          * (for each 1KB when small pages used */
    161         unsigned access_permission_0 : 2;
    162         unsigned access_permission_1 : 2;
    163         unsigned access_permission_2 : 2;
    164         unsigned access_permission_3 : 2;
    165         unsigned frame_base_addr : 20;
    166 } ATTRIBUTE_PACKED pte_level1_t;
    167 
    168 typedef union {
    169         pte_level0_t l0;
    170         pte_level1_t l1;
    171 } pte_t;
    172 
    173 /* Level 1 page tables access permissions */
    174 
    175 /** User mode: no access, privileged mode: no access. */
    176 #define PTE_AP_USER_NO_KERNEL_NO        0
    177 
    178 /** User mode: no access, privileged mode: read/write. */
    179 #define PTE_AP_USER_NO_KERNEL_RW        1
    180 
    181 /** User mode: read only, privileged mode: read/write. */
    182 #define PTE_AP_USER_RO_KERNEL_RW        2
    183 
    184 /** User mode: read/write, privileged mode: read/write. */
    185 #define PTE_AP_USER_RW_KERNEL_RW        3
    186 
    187 
    188 /* pte_level0_t and pte_level1_t descriptor_type flags */
    189 
    190 /** pte_level0_t and pte_level1_t "not present" flag (used in descriptor_type). */
    191 #define PTE_DESCRIPTOR_NOT_PRESENT      0
    192 
    193 /** pte_level0_t coarse page table flag (used in descriptor_type). */
    194 #define PTE_DESCRIPTOR_COARSE_TABLE     1
    195 
    196 /** pte_level1_t small page table flag (used in descriptor type). */
    197 #define PTE_DESCRIPTOR_SMALL_PAGE       2
    198 
    199 
    200 /** Sets the address of level 0 page table.
    201  *
    202  * @param pt Pointer to the page table to set.
    203  *
    204  */
    205 NO_TRACE static inline void set_ptl0_addr(pte_t *pt)
    206 {
    207         asm volatile (
    208                 "mcr p15, 0, %[pt], c2, c0, 0\n"
    209                 :: [pt] "r" (pt)
    210         );
    211 }
    212 
    213 
    214 /** Returns level 0 page table entry flags.
    215  *
    216  * @param pt Level 0 page table.
    217  * @param i  Index of the entry to return.
    218  *
    219  */
    220 NO_TRACE static inline int get_pt_level0_flags(pte_t *pt, size_t i)
    221 {
    222         pte_level0_t *p = &pt[i].l0;
    223         int np = (p->descriptor_type == PTE_DESCRIPTOR_NOT_PRESENT);
    224        
    225         return (np << PAGE_PRESENT_SHIFT) | (1 << PAGE_USER_SHIFT) |
    226             (1 << PAGE_READ_SHIFT) | (1 << PAGE_WRITE_SHIFT) |
    227             (1 << PAGE_EXEC_SHIFT) | (1 << PAGE_CACHEABLE_SHIFT);
    228 }
    229 
    230 /** Returns level 1 page table entry flags.
    231  *
    232  * @param pt Level 1 page table.
    233  * @param i  Index of the entry to return.
    234  *
    235  */
    236 NO_TRACE static inline int get_pt_level1_flags(pte_t *pt, size_t i)
    237 {
    238         pte_level1_t *p = &pt[i].l1;
    239        
    240         int dt = p->descriptor_type;
    241         int ap = p->access_permission_0;
    242        
    243         return ((dt == PTE_DESCRIPTOR_NOT_PRESENT) << PAGE_PRESENT_SHIFT) |
    244             ((ap == PTE_AP_USER_RO_KERNEL_RW) << PAGE_READ_SHIFT) |
    245             ((ap == PTE_AP_USER_RW_KERNEL_RW) << PAGE_READ_SHIFT) |
    246             ((ap == PTE_AP_USER_RW_KERNEL_RW) << PAGE_WRITE_SHIFT) |
    247             ((ap != PTE_AP_USER_NO_KERNEL_RW) << PAGE_USER_SHIFT) |
    248             ((ap == PTE_AP_USER_NO_KERNEL_RW) << PAGE_READ_SHIFT) |
    249             ((ap == PTE_AP_USER_NO_KERNEL_RW) << PAGE_WRITE_SHIFT) |
    250             (1 << PAGE_EXEC_SHIFT) |
    251             (p->bufferable << PAGE_CACHEABLE);
    252 }
    253 
    254 /** Sets flags of level 0 page table entry.
    255  *
    256  * @param pt    level 0 page table
    257  * @param i     index of the entry to be changed
    258  * @param flags new flags
    259  *
    260  */
    261 NO_TRACE static inline void set_pt_level0_flags(pte_t *pt, size_t i, int flags)
    262 {
    263         pte_level0_t *p = &pt[i].l0;
    264        
    265         if (flags & PAGE_NOT_PRESENT) {
    266                 p->descriptor_type = PTE_DESCRIPTOR_NOT_PRESENT;
    267                 /*
    268                  * Ensures that the entry will be recognized as valid when
    269                  * PTE_VALID_ARCH applied.
    270                  */
    271                 p->should_be_zero = 1;
    272         } else {
    273                 p->descriptor_type = PTE_DESCRIPTOR_COARSE_TABLE;
    274                 p->should_be_zero = 0;
    275         }
    276 }
    277 
    278 NO_TRACE static inline void set_pt_level0_present(pte_t *pt, size_t i)
    279 {
    280         pte_level0_t *p = &pt[i].l0;
    281 
    282         p->should_be_zero = 0;
    283         write_barrier();
    284         p->descriptor_type = PTE_DESCRIPTOR_COARSE_TABLE;
    285 }
    286 
    287 /** Sets flags of level 1 page table entry.
    288  *
    289  * We use same access rights for the whole page. When page
    290  * is not preset we store 1 in acess_rigts_3 so that at least
    291  * one bit is 1 (to mark correct page entry, see #PAGE_VALID_ARCH).
    292  *
    293  * @param pt    Level 1 page table.
    294  * @param i     Index of the entry to be changed.
    295  * @param flags New flags.
    296  *
    297  */
    298 NO_TRACE static inline void set_pt_level1_flags(pte_t *pt, size_t i, int flags)
    299 {
    300         pte_level1_t *p = &pt[i].l1;
    301        
    302         if (flags & PAGE_NOT_PRESENT)
    303                 p->descriptor_type = PTE_DESCRIPTOR_NOT_PRESENT;
    304         else
    305                 p->descriptor_type = PTE_DESCRIPTOR_SMALL_PAGE;
    306        
    307         p->cacheable = p->bufferable = (flags & PAGE_CACHEABLE) != 0;
    308        
    309         /* default access permission */
    310         p->access_permission_0 = p->access_permission_1 =
    311             p->access_permission_2 = p->access_permission_3 =
    312             PTE_AP_USER_NO_KERNEL_RW;
    313        
    314         if (flags & PAGE_USER)  {
    315                 if (flags & PAGE_READ) {
    316                         p->access_permission_0 = p->access_permission_1 =
    317                             p->access_permission_2 = p->access_permission_3 =
    318                             PTE_AP_USER_RO_KERNEL_RW;
    319                 }
    320                 if (flags & PAGE_WRITE) {
    321                         p->access_permission_0 = p->access_permission_1 =
    322                             p->access_permission_2 = p->access_permission_3 =
    323                             PTE_AP_USER_RW_KERNEL_RW;
    324                 }
    325         }
    326 }
    327 
    328 NO_TRACE static inline void set_pt_level1_present(pte_t *pt, size_t i)
    329 {
    330         pte_level1_t *p = &pt[i].l1;
    331 
    332         p->descriptor_type = PTE_DESCRIPTOR_SMALL_PAGE;
    333 }
    334        
    335 extern void page_arch_init(void);
    336 
    337 #endif /* __ASM__ */
     131#if defined(PROCESSOR_armv6) | defined(PROCESSOR_armv7_a)
     132#include "page_armv6.h"
     133#elif defined(PROCESSOR_armv4) | defined(PROCESSOR_armv5)
     134#include "page_armv4.h"
     135#else
     136#error "Unsupported architecture"
     137#endif
    338138
    339139#endif
  • kernel/arch/arm32/include/mm/page_fault.h

    r5d230a30 r69146b93  
    4040
    4141
    42 /** Decribes CP15 "fault status register" (FSR). */
    43 typedef struct {
    44         unsigned status : 3;
    45         unsigned domain : 4;
    46         unsigned zero : 1;
    47         unsigned should_be_zero : 24;
    48 } ATTRIBUTE_PACKED fault_status_t;
    49 
    50 
    51 /** Help union used for casting integer value into #fault_status_t. */
     42/** Decribes CP15 "fault status register" (FSR).
     43 *
     44 * See ARM Architecture Reference Manual ch. B4.9.6 (pdf p.743).
     45 */
    5246typedef union {
    53         fault_status_t fs;
    54         uint32_t dummy;
    55 } fault_status_union_t;
     47        struct {
     48                unsigned status : 4;
     49                unsigned domain : 4;
     50                unsigned zero : 1;
     51                unsigned lpae : 1; /**< Needs LPAE support implemented */
     52                unsigned fs : 1; /**< armv6+ mandated, earlier IPLM. DEFINED */
     53                unsigned wr : 1; /**< armv6+ only */
     54                unsigned ext : 1 ; /**< external abort */
     55                unsigned cm : 1; /**< Cache maintenance, needs LPAE support */
     56                unsigned should_be_zero : 18;
     57        } data;
     58        struct {
     59                unsigned status : 4;
     60                unsigned sbz0 : 6;
     61                unsigned fs : 1;
     62                unsigned should_be_zero : 21;
     63        } inst;
     64        uint32_t raw;
     65} fault_status_t;
    5666
    5767
  • kernel/arch/arm32/include/regutils.h

    r5d230a30 r69146b93  
    4141#define STATUS_REG_MODE_MASK         0x1f
    4242
    43 #define CP15_R1_HIGH_VECTORS_BIT     (1 << 13)
     43/* COntrol register bit values see ch. B4.1.130 of ARM Architecture Reference
     44 * Manual ARMv7-A and ARMv7-R edition, page 1687 */
     45#define CP15_R1_MMU_EN            (1 << 0)
     46#define CP15_R1_ALIGN_CHECK_EN    (1 << 1)  /* Allow alignemnt check */
     47#define CP15_R1_CACHE_EN          (1 << 2)
     48#define CP15_R1_CP15_BARRIER_EN   (1 << 5)
     49#define CP15_R1_B_EN              (1 << 7)  /* ARMv6- only big endian switch */
     50#define CP15_R1_SWAP_EN           (1 << 10)
     51#define CP15_R1_BRANCH_PREDICT_EN (1 << 11)
     52#define CP15_R1_INST_CACHE_EN     (1 << 12)
     53#define CP15_R1_HIGH_VECTORS_EN   (1 << 13)
     54#define CP15_R1_ROUND_ROBIN_EN    (1 << 14)
     55#define CP15_R1_HW_ACCESS_FLAG_EN (1 << 17)
     56#define CP15_R1_WRITE_XN_EN       (1 << 19) /* Only if virt. supported */
     57#define CP15_R1_USPCE_WRITE_XN_EN (1 << 20) /* Only if virt. supported */
     58#define CP15_R1_FAST_IRQ_EN       (1 << 21) /* Disbale impl.specific features */
     59#define CP15_R1_UNALIGNED_EN      (1 << 22) /* Must be 1 on armv7 */
     60#define CP15_R1_IRQ_VECTORS_EN    (1 << 24)
     61#define CP15_R1_BIG_ENDIAN_EXC    (1 << 25)
     62#define CP15_R1_NMFI_EN           (1 << 27)
     63#define CP15_R1_TEX_REMAP_EN      (1 << 28)
     64#define CP15_R1_ACCESS_FLAG_EN    (1 << 29)
     65#define CP15_R1_THUMB_EXC_EN      (1 << 30)
    4466
    4567/* ARM Processor Operation Modes */
  • kernel/arch/arm32/src/arm32.c

    r5d230a30 r69146b93  
    4949#include <str.h>
    5050#include <arch/ras.h>
     51#include <sysinfo/sysinfo.h>
    5152
    5253/** Performs arm32-specific initialization before main_bsp() is called. */
     
    116117{
    117118        machine_input_init();
     119        const char *platform = machine_get_platform_name();
     120
     121        sysinfo_set_item_data("platform", NULL, (void *) platform,
     122            str_size(platform));
    118123}
    119124
  • kernel/arch/arm32/src/cpu/cpu.c

    r5d230a30 r69146b93  
    4444/** Implementators (vendor) names */
    4545static const char *imp_data[] = {
    46         "?",                                    /* IMP_DATA_START_OFFSET */
    47         "ARM Ltd",                              /* 0x41 */
    48         "",                                     /* 0x42 */
    49         "",                                     /* 0x43 */
    50         "Digital Equipment Corporation",        /* 0x44 */
    51         "", "", "", "", "", "", "", "", "", "", /* 0x45 - 0x4e */
    52         "", "", "", "", "", "", "", "", "", "", /* 0x4f - 0x58 */
    53         "", "", "", "", "", "", "", "", "", "", /* 0x59 - 0x62 */
    54         "", "", "", "", "", "",                 /* 0x63 - 0x68 */
    55         "Intel Corporation"                     /* 0x69 */
     46        "?",                                     /* IMP_DATA_START_OFFSET */
     47        "ARM Limited",                           /* 0x41 */
     48        "", "",                                  /* 0x42 - 0x43 */
     49        "Digital Equipment Corporation",         /* 0x44 */
     50        "", "", "", "", "", "", "", "",          /* 0x45 - 0x4c */
     51        "Motorola, Freescale Semicondutor Inc.", /* 0x4d */
     52        "", "", "",                              /* 0x4e - 0x50 */
     53        "Qualcomm Inc.",                         /* 0x51 */
     54        "", "", "", "",                          /* 0x52 - 0x55 */
     55        "Marvell Semiconductor",                 /* 0x56 */
     56        "", "", "", "", "", "", "", "", "", "",  /* 0x57 - 0x60 */
     57        "", "", "", "", "", "", "", "",          /* 0x61 - 0x68 */
     58        "Intel Corporation"                      /* 0x69 */
    5659};
    5760
     
    9497}
    9598
    96 /** Does nothing on ARM. */
     99/** Enables unaligned access and caching for armv6+ */
    97100void cpu_arch_init(void)
    98101{
     102#if defined(PROCESSOR_armv7_a) | defined(PROCESSOR_armv6)
     103        uint32_t control_reg = 0;
     104        asm volatile (
     105                "mrc p15, 0, %[control_reg], c1, c0"
     106                : [control_reg] "=r" (control_reg)
     107        );
     108       
     109        /* Turn off tex remap, RAZ ignores writes prior to armv7 */
     110        control_reg &= ~CP15_R1_TEX_REMAP_EN;
     111        /* Turn off accessed flag, RAZ ignores writes prior to armv7 */
     112        control_reg &= ~(CP15_R1_ACCESS_FLAG_EN | CP15_R1_HW_ACCESS_FLAG_EN);
     113        /* Enable unaligned access, RAZ ignores writes prior to armv6
     114         * switchable on armv6, RAO ignores writes on armv7,
     115         * see ARM Architecture Reference Manual ARMv7-A and ARMv7-R edition
     116         * L.3.1 (p. 2456) */
     117        control_reg |= CP15_R1_UNALIGNED_EN;
     118        /* Disable alignment checks, this turns unaligned access to undefined,
     119         * unless U bit is set. */
     120        control_reg &= ~CP15_R1_ALIGN_CHECK_EN;
     121        /* Enable caching, On arm prior to armv7 there is only one level
     122         * of caches. Data cache is coherent.
     123         * "This means that the behavior of accesses from the same observer to
     124         * different VAs, that are translated to the same PA
     125         * with the same memory attributes, is fully coherent."
     126         *    ARM Architecture Reference Manual ARMv7-A and ARMv7-R Edition
     127         *    B3.11.1 (p. 1383)
     128         * ICache coherency is elaborate on in barrier.h.
     129         * We are safe to turn these on.
     130         */
     131        control_reg |= CP15_R1_CACHE_EN | CP15_R1_INST_CACHE_EN;
     132       
     133        asm volatile (
     134                "mcr p15, 0, %[control_reg], c1, c0"
     135                :: [control_reg] "r" (control_reg)
     136        );
     137#endif
    99138}
    100139
    101140/** Retrieves processor identification and stores it to #CPU.arch */
    102 void cpu_identify(void) 
     141void cpu_identify(void)
    103142{
    104143        arch_cpu_identify(&CPU->arch);
     
    112151        cpu_arch_t * cpu_arch = &m->arch;
    113152
    114         if ((cpu_arch->imp_num) > 0 &&
    115             (cpu_arch->imp_num < (imp_data_length + IMP_DATA_START_OFFSET))) {
     153        const unsigned imp_offset = cpu_arch->imp_num - IMP_DATA_START_OFFSET;
     154
     155        if (imp_offset < imp_data_length) {
    116156                vendor = imp_data[cpu_arch->imp_num - IMP_DATA_START_OFFSET];
    117157        }
    118158
    119         if ((cpu_arch->arch_num) > 0 &&
    120             (cpu_arch->arch_num < arch_data_length)) {
     159        // TODO CPUs with arch_num == 0xf use CPUID scheme for identification
     160        if (cpu_arch->arch_num < arch_data_length) {
    121161                architecture = arch_data[cpu_arch->arch_num];
    122162        }
  • kernel/arch/arm32/src/exception.c

    r5d230a30 r69146b93  
    117117
    118118#ifdef HIGH_EXCEPTION_VECTORS
    119 /** Activates use of high exception vectors addresses. */
     119/** Activates use of high exception vectors addresses.
     120 *
     121 * "High vectors were introduced into some implementations of ARMv4 and are
     122 * required in ARMv6 implementations. High vectors allow the exception vector
     123 * locations to be moved from their normal address range 0x00000000-0x0000001C
     124 * at the bottom of the 32-bit address space, to an alternative address range
     125 * 0xFFFF0000-0xFFFF001C near the top of the address space. These alternative
     126 * locations are known as the high vectors.
     127 *
     128 * Prior to ARMv6, it is IMPLEMENTATION DEFINED whether the high vectors are
     129 * supported. When they are, a hardware configuration input selects whether
     130 * the normal vectors or the high vectors are to be used from
     131 * reset." ARM Architecture Reference Manual A2.6.11 (p. 64 in the PDF).
     132 *
     133 * ARM920T (gta02) TRM A2.3.5 (PDF p. 36) and ARM926EJ-S (icp) 2.3.2 (PDF p. 42)
     134 * say that armv4 an armv5 chips that we support implement this.
     135 */
    120136static void high_vectors(void)
    121137{
    122         uint32_t control_reg;
    123        
     138        uint32_t control_reg = 0;
    124139        asm volatile (
    125140                "mrc p15, 0, %[control_reg], c1, c0"
     
    128143       
    129144        /* switch on the high vectors bit */
    130         control_reg |= CP15_R1_HIGH_VECTORS_BIT;
     145        control_reg |= CP15_R1_HIGH_VECTORS_EN;
    131146       
    132147        asm volatile (
     
    153168void exception_init(void)
    154169{
     170        // TODO check for availability of high vectors for <= armv5
    155171#ifdef HIGH_EXCEPTION_VECTORS
    156172        high_vectors();
  • kernel/arch/arm32/src/machine_func.c

    r5d230a30 r69146b93  
    4242#include <arch/mach/integratorcp/integratorcp.h>
    4343#include <arch/mach/testarm/testarm.h>
     44#include <arch/mach/beagleboardxm/beagleboardxm.h>
    4445
    4546/** Pointer to machine_ops structure being used. */
     
    5556#elif defined(MACHINE_integratorcp)
    5657        machine_ops = &icp_machine_ops;
     58#elif defined(MACHINE_beagleboardxm)
     59        machine_ops = &bbxm_machine_ops;
    5760#else
    5861#error Machine type not defined.
     
    131134}
    132135
     136const char * machine_get_platform_name(void)
     137{
     138        if (machine_ops->machine_get_platform_name)
     139                return machine_ops->machine_get_platform_name();
     140        return NULL;
     141}
    133142/** @}
    134143 */
  • kernel/arch/arm32/src/mm/page.c

    r5d230a30 r69146b93  
    5252void page_arch_init(void)
    5353{
    54         int flags = PAGE_CACHEABLE;
     54        int flags = PAGE_CACHEABLE | PAGE_EXEC;
    5555        page_mapping_operations = &pt_mapping_operations;
    5656
    5757        page_table_lock(AS_KERNEL, true);
    5858       
    59         uintptr_t cur;
    60 
    6159        /* Kernel identity mapping */
    62         for (cur = PHYSMEM_START_ADDR;
    63             cur < min(config.identity_size, config.physmem_end);
     60        //FIXME: We need to consider the possibility that
     61        //identity_base > identity_size and physmem_end.
     62        //This might lead to overflow if identity_size is too big.
     63        for (uintptr_t cur = PHYSMEM_START_ADDR;
     64            cur < min(KA2PA(config.identity_base) +
     65                config.identity_size, config.physmem_end);
    6466            cur += FRAME_SIZE)
    6567                page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
  • kernel/arch/arm32/src/mm/page_fault.c

    r5d230a30 r69146b93  
    4242#include <print.h>
    4343
    44 /** Returns value stored in fault status register.
     44
     45/**
     46 * FSR encoding ARM Architecture Reference Manual ARMv7-A and ARMv7-R edition.
     47 *
     48 * B3.13.3 page B3-1406 (PDF page 1406)
     49 */
     50typedef enum {
     51        DFSR_SOURCE_ALIGN = 0x0001,
     52        DFSR_SOURCE_CACHE_MAINTENANCE = 0x0004,
     53        DFSR_SOURCE_SYNC_EXTERNAL_TRANSLATION_L1 = 0x000c,
     54        DFSR_SOURCE_SYNC_EXTERNAL_TRANSLATION_L2 = 0x000e,
     55        DFSR_SOURCE_SYNC_PARITY_TRANSLATION_L1 = 0x040c,
     56        DFSR_SOURCE_SYNC_PARITY_TRANSLATION_L2 = 0x040e,
     57        DFSR_SOURCE_TRANSLATION_L1 = 0x0005,
     58        DFSR_SOURCE_TRANSLATION_L2 = 0x0007,
     59        DFSR_SOURCE_ACCESS_FLAG_L1 = 0x0003,  /**< @note: This used to be alignment enc. */
     60        DFSR_SOURCE_ACCESS_FLAG_L2 = 0x0006,
     61        DFSR_SOURCE_DOMAIN_L1 = 0x0009,
     62        DFSR_SOURCE_DOMAIN_L2 = 0x000b,
     63        DFSR_SOURCE_PERMISSION_L1 = 0x000d,
     64        DFSR_SOURCE_PERMISSION_L2 = 0x000f,
     65        DFSR_SOURCE_DEBUG = 0x0002,
     66        DFSR_SOURCE_SYNC_EXTERNAL = 0x0008,
     67        DFSR_SOURCE_TLB_CONFLICT = 0x0400,
     68        DFSR_SOURCE_LOCKDOWN = 0x0404, /**< @note: Implementation defined */
     69        DFSR_SOURCE_COPROCESSOR = 0x040a, /**< @note Implementation defined */
     70        DFSR_SOURCE_SYNC_PARITY = 0x0409,
     71        DFSR_SOURCE_ASYNC_EXTERNAL = 0x0406,
     72        DFSR_SOURCE_ASYNC_PARITY = 0x0408,
     73        DFSR_SOURCE_MASK = 0x0000040f,
     74} dfsr_source_t;
     75
     76static inline const char * dfsr_source_to_str(dfsr_source_t source)
     77{
     78        switch (source) {
     79        case DFSR_SOURCE_TRANSLATION_L1:
     80                return "Translation fault L1";
     81        case DFSR_SOURCE_TRANSLATION_L2:
     82                return "Translation fault L2";
     83        case DFSR_SOURCE_PERMISSION_L1:
     84                return "Permission fault L1";
     85        case DFSR_SOURCE_PERMISSION_L2:
     86                return "Permission fault L2";
     87        case DFSR_SOURCE_ALIGN:
     88                return "Alignment fault";
     89        case DFSR_SOURCE_CACHE_MAINTENANCE:
     90                return "Instruction cache maintenance fault";
     91        case DFSR_SOURCE_SYNC_EXTERNAL_TRANSLATION_L1:
     92                return "Synchronous external abort on translation table walk level 1";
     93        case DFSR_SOURCE_SYNC_EXTERNAL_TRANSLATION_L2:
     94                return "Synchronous external abort on translation table walk level 2";
     95        case DFSR_SOURCE_SYNC_PARITY_TRANSLATION_L1:
     96                return "Synchronous parity error on translation table walk level 1";
     97        case DFSR_SOURCE_SYNC_PARITY_TRANSLATION_L2:
     98                return "Synchronous parity error on translation table walk level 2";
     99        case DFSR_SOURCE_ACCESS_FLAG_L1:
     100                return "Access flag fault L1";
     101        case DFSR_SOURCE_ACCESS_FLAG_L2:
     102                return "Access flag fault L2";
     103        case DFSR_SOURCE_DOMAIN_L1:
     104                return "Domain fault L1";
     105        case DFSR_SOURCE_DOMAIN_L2:
     106                return "Domain flault L2";
     107        case DFSR_SOURCE_DEBUG:
     108                return "Debug event";
     109        case DFSR_SOURCE_SYNC_EXTERNAL:
     110                return "Synchronous external abort";
     111        case DFSR_SOURCE_TLB_CONFLICT:
     112                return "TLB conflict abort";
     113        case DFSR_SOURCE_LOCKDOWN:
     114                return "Lockdown (Implementation defined)";
     115        case DFSR_SOURCE_COPROCESSOR:
     116                return "Coprocessor abort (Implementation defined)";
     117        case DFSR_SOURCE_SYNC_PARITY:
     118                return "Synchronous parity error on memory access";
     119        case DFSR_SOURCE_ASYNC_EXTERNAL:
     120                return "Asynchronous external abort";
     121        case DFSR_SOURCE_ASYNC_PARITY:
     122                return "Asynchronous parity error on memory access";
     123        case DFSR_SOURCE_MASK:
     124                break;
     125        }
     126        return "Unknown data abort";
     127}
     128
     129
     130/** Returns value stored in comnbined/data fault status register.
    45131 *
    46132 *  @return Value stored in CP15 fault status register (FSR).
    47  */
    48 static inline fault_status_t read_fault_status_register(void)
    49 {
    50         fault_status_union_t fsu;
     133 *
     134 *  "VMSAv6 added a fifth fault status bit (bit[10]) to both the IFSR and DFSR.
     135 *  It is IMPLEMENTATION DEFINED how this bit is encoded in earlier versions of
     136 *  the architecture. A write flag (bit[11] of the DFSR) has also been
     137 *  introduced."
     138 *  ARM Architecture Reference Manual version i ch. B4.6 (PDF p. 719)
     139 *
     140 *  See ch. B4.9.6 for location of data/instruction FSR.
     141 *
     142 */
     143static inline fault_status_t read_data_fault_status_register(void)
     144{
     145        fault_status_t fsu;
    51146       
    52         /* fault status is stored in CP15 register 5 */
     147        /* Combined/Data fault status is stored in CP15 register 5, c0. */
    53148        asm volatile (
    54149                "mrc p15, 0, %[dummy], c5, c0, 0"
    55                 : [dummy] "=r" (fsu.dummy)
     150                : [dummy] "=r" (fsu.raw)
    56151        );
    57152       
    58         return fsu.fs;
    59 }
    60 
    61 /** Returns FAR (fault address register) content.
    62  *
    63  * @return FAR (fault address register) content (address that caused a page
     153        return fsu;
     154}
     155
     156/** Returns DFAR (fault address register) content.
     157 *
     158 * This register is equivalent to FAR on pre armv6 machines.
     159 *
     160 * @return DFAR (fault address register) content (address that caused a page
    64161 *         fault)
    65162 */
    66 static inline uintptr_t read_fault_address_register(void)
     163static inline uintptr_t read_data_fault_address_register(void)
    67164{
    68165        uintptr_t ret;
     
    77174}
    78175
     176#if defined(PROCESSOR_armv4) | defined(PROCESSOR_armv5)
    79177/** Decides whether read or write into memory is requested.
    80178 *
     
    97195                panic("page_fault - instruction does not access memory "
    98196                    "(instr_code: %#0" PRIx32 ", badvaddr:%p).",
    99                     instr_union.pc, (void *) badvaddr);
     197                    *(uint32_t*)instr_union.instr, (void *) badvaddr);
    100198                return PF_ACCESS_EXEC;
    101199        }
     
    136234            inst, (void *) badvaddr);
    137235}
     236#endif
    138237
    139238/** Handles "data abort" exception (load or store at invalid address).
     
    145244void data_abort(unsigned int exc_no, istate_t *istate)
    146245{
    147         fault_status_t fsr __attribute__ ((unused)) =
    148             read_fault_status_register();
    149         uintptr_t badvaddr = read_fault_address_register();
    150 
    151         pf_access_t access = get_memory_access_type(istate->pc, badvaddr);
    152 
    153         int ret = as_page_fault(badvaddr, access, istate);
    154 
    155         if (ret == AS_PF_FAULT) {
    156                 fault_if_from_uspace(istate, "Page fault: %#x.", badvaddr);
    157                 panic_memtrap(istate, access, badvaddr, NULL);
     246        const uintptr_t badvaddr = read_data_fault_address_register();
     247        const fault_status_t fsr = read_data_fault_status_register();
     248        const dfsr_source_t source = fsr.raw & DFSR_SOURCE_MASK;
     249
     250        switch (source) {
     251        case DFSR_SOURCE_TRANSLATION_L1:
     252        case DFSR_SOURCE_TRANSLATION_L2:
     253        case DFSR_SOURCE_PERMISSION_L1:
     254        case DFSR_SOURCE_PERMISSION_L2:
     255                /* Page fault is handled further down */
     256                break;
     257        case DFSR_SOURCE_ALIGN:
     258        case DFSR_SOURCE_CACHE_MAINTENANCE:
     259        case DFSR_SOURCE_SYNC_EXTERNAL_TRANSLATION_L1:
     260        case DFSR_SOURCE_SYNC_EXTERNAL_TRANSLATION_L2:
     261        case DFSR_SOURCE_SYNC_PARITY_TRANSLATION_L1:
     262        case DFSR_SOURCE_SYNC_PARITY_TRANSLATION_L2:
     263        case DFSR_SOURCE_ACCESS_FLAG_L1:
     264        case DFSR_SOURCE_ACCESS_FLAG_L2:
     265        case DFSR_SOURCE_DOMAIN_L1:
     266        case DFSR_SOURCE_DOMAIN_L2:
     267        case DFSR_SOURCE_DEBUG:
     268        case DFSR_SOURCE_SYNC_EXTERNAL:
     269        case DFSR_SOURCE_TLB_CONFLICT:
     270        case DFSR_SOURCE_LOCKDOWN:
     271        case DFSR_SOURCE_COPROCESSOR:
     272        case DFSR_SOURCE_SYNC_PARITY:
     273        case DFSR_SOURCE_ASYNC_EXTERNAL:
     274        case DFSR_SOURCE_ASYNC_PARITY:
     275        case DFSR_SOURCE_MASK:
     276                /* Weird abort stuff */
     277                fault_if_from_uspace(istate, "Unhandled abort %s at address: "
     278                    "%#x.", dfsr_source_to_str(source), badvaddr);
     279                panic("Unhandled abort %s at address: %#x.",
     280                    dfsr_source_to_str(source), badvaddr);
    158281        }
     282
     283#if defined(PROCESSOR_armv6) | defined(PROCESSOR_armv7_a)
     284        const pf_access_t access =
     285            fsr.data.wr ? PF_ACCESS_WRITE : PF_ACCESS_READ;
     286#elif defined(PROCESSOR_armv4) | defined(PROCESSOR_armv5)
     287        const pf_access_t access = get_memory_access_type(istate->pc, badvaddr);
     288#else
     289#error "Unsupported architecture"
     290#endif
     291        as_page_fault(badvaddr, access, istate);
    159292}
    160293
     
    167300void prefetch_abort(unsigned int exc_no, istate_t *istate)
    168301{
    169         int ret = as_page_fault(istate->pc, PF_ACCESS_EXEC, istate);
    170 
    171         if (ret == AS_PF_FAULT) {
    172                 fault_if_from_uspace(istate,
    173                     "Page fault - prefetch_abort: %#x.", istate->pc);
    174                 panic_memtrap(istate, PF_ACCESS_EXEC, istate->pc, NULL);
    175         }
     302        as_page_fault(istate->pc, PF_ACCESS_EXEC, istate);
    176303}
    177304
  • kernel/arch/ia64/include/mm/as.h

    r5d230a30 r69146b93  
    4343#define USER_ADDRESS_SPACE_END_ARCH      UINT64_C(0xdfffffffffffffff)
    4444
    45 #define USTACK_ADDRESS_ARCH  UINT64_C(0x0000000ff0000000)
    46 
    4745typedef struct {
    4846} as_arch_t;
  • kernel/arch/ia64/src/mm/tlb.c

    r5d230a30 r69146b93  
    113113        va = page;
    114114       
    115         rr.word = rr_read(VA2VRN(va));
    116         if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
     115        rr.word = rr_read(VA2VRN(page));
     116        if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(page))))) {
    117117                /*
    118118                 * The selected region register does not contain required RID.
     
    122122               
    123123                rr0 = rr;
    124                 rr0.map.rid = ASID2RID(asid, VA2VRN(va));
    125                 rr_write(VA2VRN(va), rr0.word);
     124                rr0.map.rid = ASID2RID(asid, VA2VRN(page));
     125                rr_write(VA2VRN(page), rr0.word);
    126126                srlz_d();
    127127                srlz_i();
     
    139139        case 1: /* cnt 4 - 15 */
    140140                ps = PAGE_WIDTH + 2;
    141                 va &= ~((1 << ps) - 1);
     141                va &= ~((1UL << ps) - 1);
    142142                break;
    143143        case 2: /* cnt 16 - 63 */
    144144                ps = PAGE_WIDTH + 4;
    145                 va &= ~((1 << ps) - 1);
     145                va &= ~((1UL << ps) - 1);
    146146                break;
    147147        case 3: /* cnt 64 - 255 */
    148148                ps = PAGE_WIDTH + 6;
    149                 va &= ~((1 << ps) - 1);
     149                va &= ~((1UL << ps) - 1);
    150150                break;
    151151        case 4: /* cnt 256 - 1023 */
    152152                ps = PAGE_WIDTH + 8;
    153                 va &= ~((1 << ps) - 1);
     153                va &= ~((1UL << ps) - 1);
    154154                break;
    155155        case 5: /* cnt 1024 - 4095 */
    156156                ps = PAGE_WIDTH + 10;
    157                 va &= ~((1 << ps) - 1);
     157                va &= ~((1UL << ps) - 1);
    158158                break;
    159159        case 6: /* cnt 4096 - 16383 */
    160160                ps = PAGE_WIDTH + 12;
    161                 va &= ~((1 << ps) - 1);
     161                va &= ~((1UL << ps) - 1);
    162162                break;
    163163        case 7: /* cnt 16384 - 65535 */
    164164        case 8: /* cnt 65536 - (256K - 1) */
    165165                ps = PAGE_WIDTH + 14;
    166                 va &= ~((1 << ps) - 1);
     166                va &= ~((1UL << ps) - 1);
    167167                break;
    168168        default:
    169169                ps = PAGE_WIDTH + 18;
    170                 va &= ~((1 << ps) - 1);
    171                 break;
    172         }
    173        
    174         for (; va < (page + cnt * PAGE_SIZE); va += (1 << ps))
     170                va &= ~((1UL << ps) - 1);
     171                break;
     172        }
     173       
     174        for (; va < (page + cnt * PAGE_SIZE); va += (1UL << ps))
    175175                asm volatile (
    176176                        "ptc.l %[va], %[ps] ;;"
     
    183183       
    184184        if (restore_rr) {
    185                 rr_write(VA2VRN(va), rr.word);
     185                rr_write(VA2VRN(page), rr.word);
    186186                srlz_d();
    187187                srlz_i();
     
    501501                 * Forward the page fault to address space page fault handler.
    502502                 */
    503                 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
    504                         fault_if_from_uspace(istate, "Page fault at %p.",
    505                             (void *) va);
    506                         panic_memtrap(istate, PF_ACCESS_EXEC, va, NULL);
    507                 }
     503                as_page_fault(va, PF_ACCESS_EXEC, istate);
    508504        }
    509505}
     
    619615                 * handler.
    620616                 */
    621                 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
    622                         fault_if_from_uspace(istate, "Page fault at %p.",
    623                             (void *) va);
    624                         panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, NULL);
    625                 }
     617                as_page_fault(va, PF_ACCESS_READ, istate);
    626618        }
    627619}
     
    667659                dtc_pte_copy(t);
    668660        } else {
    669                 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
    670                         fault_if_from_uspace(istate, "Page fault at %p.",
    671                             (void *) va);
    672                         panic_memtrap(istate, PF_ACCESS_WRITE, va, NULL);
    673                 }
     661                as_page_fault(va, PF_ACCESS_WRITE, istate);
    674662        }
    675663}
     
    700688                itc_pte_copy(t);
    701689        } else {
    702                 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
    703                         fault_if_from_uspace(istate, "Page fault at %p.",
    704                             (void *) va);
    705                         panic_memtrap(istate, PF_ACCESS_EXEC, va, NULL);
    706                 }
     690                as_page_fault(va, PF_ACCESS_EXEC, istate);
    707691        }
    708692}
     
    764748        ASSERT((t) && (t->p));
    765749        ASSERT(!t->w);
    766         if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
    767                 fault_if_from_uspace(istate, "Page fault at %p.",
    768                     (void *) va);
    769                 panic_memtrap(istate, PF_ACCESS_WRITE, va, NULL);
    770         }
     750        as_page_fault(va, PF_ACCESS_WRITE, istate);
    771751}
    772752
     
    799779                        dtc_pte_copy(t);
    800780        } else {
    801                 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
    802                         fault_if_from_uspace(istate, "Page fault at %p.",
    803                             (void *) va);
    804                         panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, NULL);
    805                 }
     781                as_page_fault(va, PF_ACCESS_READ, istate);
    806782        }
    807783}
  • kernel/arch/mips32/src/mm/tlb.c

    r5d230a30 r69146b93  
    4848#include <symtab.h>
    4949
    50 static void tlb_refill_fail(istate_t *);
    51 static void tlb_invalid_fail(istate_t *);
    52 static void tlb_modified_fail(istate_t *);
    53 
    54 static pte_t *find_mapping_and_check(uintptr_t, int, istate_t *, int *);
     50static pte_t *find_mapping_and_check(uintptr_t, int, istate_t *);
    5551
    5652/** Initialize TLB.
     
    9288        uintptr_t badvaddr;
    9389        pte_t *pte;
    94         int pfrc;
    9590       
    9691        badvaddr = cp0_badvaddr_read();
    9792        asid = AS->asid;
    9893       
    99         pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate, &pfrc);
    100         if (!pte) {
    101                 switch (pfrc) {
    102                 case AS_PF_FAULT:
    103                         goto fail;
    104                         break;
    105                 case AS_PF_DEFER:
    106                         /*
    107                          * The page fault came during copy_from_uspace()
    108                          * or copy_to_uspace().
    109                          */
    110                         return;
    111                 default:
    112                         panic("Unexpected pfrc (%d).", pfrc);
     94        pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate);
     95        if (pte) {
     96                /*
     97                 * Record access to PTE.
     98                 */
     99                pte->a = 1;
     100
     101                tlb_prepare_entry_hi(&hi, asid, badvaddr);
     102                tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d,
     103                    pte->cacheable, pte->pfn);
     104
     105                /*
     106                 * New entry is to be inserted into TLB
     107                 */
     108                cp0_entry_hi_write(hi.value);
     109                if ((badvaddr / PAGE_SIZE) % 2 == 0) {
     110                        cp0_entry_lo0_write(lo.value);
     111                        cp0_entry_lo1_write(0);
     112                } else {
     113                        cp0_entry_lo0_write(0);
     114                        cp0_entry_lo1_write(lo.value);
    113115                }
    114         }
    115 
    116         /*
    117          * Record access to PTE.
    118          */
    119         pte->a = 1;
    120 
    121         tlb_prepare_entry_hi(&hi, asid, badvaddr);
    122         tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->cacheable,
    123             pte->pfn);
    124 
    125         /*
    126          * New entry is to be inserted into TLB
    127          */
    128         cp0_entry_hi_write(hi.value);
    129         if ((badvaddr / PAGE_SIZE) % 2 == 0) {
    130                 cp0_entry_lo0_write(lo.value);
    131                 cp0_entry_lo1_write(0);
    132         }
    133         else {
    134                 cp0_entry_lo0_write(0);
    135                 cp0_entry_lo1_write(lo.value);
    136         }
    137         cp0_pagemask_write(TLB_PAGE_MASK_16K);
    138         tlbwr();
    139 
    140         return;
    141        
    142 fail:
    143         tlb_refill_fail(istate);
     116                cp0_pagemask_write(TLB_PAGE_MASK_16K);
     117                tlbwr();
     118        }
    144119}
    145120
     
    155130        entry_hi_t hi;
    156131        pte_t *pte;
    157         int pfrc;
    158132
    159133        badvaddr = cp0_badvaddr_read();
     
    168142        index.value = cp0_index_read();
    169143
    170         /*
    171          * Fail if the entry is not in TLB.
    172          */
    173         if (index.p) {
    174                 printf("TLB entry not found.\n");
    175                 goto fail;
    176         }
    177 
    178         pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate, &pfrc);
    179         if (!pte) {
    180                 switch (pfrc) {
    181                 case AS_PF_FAULT:
    182                         goto fail;
    183                         break;
    184                 case AS_PF_DEFER:
    185                         /*
    186                          * The page fault came during copy_from_uspace()
    187                          * or copy_to_uspace().
    188                          */
    189                         return;
    190                 default:
    191                         panic("Unexpected pfrc (%d).", pfrc);
    192                 }
    193         }
    194 
    195         /*
    196          * Read the faulting TLB entry.
    197          */
    198         tlbr();
    199 
    200         /*
    201          * Record access to PTE.
    202          */
    203         pte->a = 1;
    204 
    205         tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->cacheable,
    206             pte->pfn);
    207 
    208         /*
    209          * The entry is to be updated in TLB.
    210          */
    211         if ((badvaddr / PAGE_SIZE) % 2 == 0)
    212                 cp0_entry_lo0_write(lo.value);
    213         else
    214                 cp0_entry_lo1_write(lo.value);
    215         cp0_pagemask_write(TLB_PAGE_MASK_16K);
    216         tlbwi();
    217 
    218         return;
    219        
    220 fail:
    221         tlb_invalid_fail(istate);
     144        ASSERT(!index.p);
     145
     146        pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate);
     147        if (pte) {
     148                /*
     149                 * Read the faulting TLB entry.
     150                 */
     151                tlbr();
     152
     153                /*
     154                 * Record access to PTE.
     155                 */
     156                pte->a = 1;
     157
     158                tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d,
     159                    pte->cacheable, pte->pfn);
     160
     161                /*
     162                 * The entry is to be updated in TLB.
     163                 */
     164                if ((badvaddr / PAGE_SIZE) % 2 == 0)
     165                        cp0_entry_lo0_write(lo.value);
     166                else
     167                        cp0_entry_lo1_write(lo.value);
     168                cp0_pagemask_write(TLB_PAGE_MASK_16K);
     169                tlbwi();
     170        }
    222171}
    223172
     
    233182        entry_hi_t hi;
    234183        pte_t *pte;
    235         int pfrc;
    236184
    237185        badvaddr = cp0_badvaddr_read();
     
    249197         * Fail if the entry is not in TLB.
    250198         */
    251         if (index.p) {
    252                 printf("TLB entry not found.\n");
    253                 goto fail;
    254         }
    255 
    256         pte = find_mapping_and_check(badvaddr, PF_ACCESS_WRITE, istate, &pfrc);
    257         if (!pte) {
    258                 switch (pfrc) {
    259                 case AS_PF_FAULT:
    260                         goto fail;
    261                         break;
    262                 case AS_PF_DEFER:
    263                         /*
    264                          * The page fault came during copy_from_uspace()
    265                          * or copy_to_uspace().
    266                          */
    267                         return;
    268                 default:
    269                         panic("Unexpected pfrc (%d).", pfrc);
    270                 }
    271         }
    272 
    273         /*
    274          * Read the faulting TLB entry.
    275          */
    276         tlbr();
    277 
    278         /*
    279          * Record access and write to PTE.
    280          */
    281         pte->a = 1;
    282         pte->d = 1;
    283 
    284         tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->w, pte->cacheable,
    285             pte->pfn);
    286 
    287         /*
    288          * The entry is to be updated in TLB.
    289          */
    290         if ((badvaddr / PAGE_SIZE) % 2 == 0)
    291                 cp0_entry_lo0_write(lo.value);
    292         else
    293                 cp0_entry_lo1_write(lo.value);
    294         cp0_pagemask_write(TLB_PAGE_MASK_16K);
    295         tlbwi();
    296 
    297         return;
    298        
    299 fail:
    300         tlb_modified_fail(istate);
    301 }
    302 
    303 void tlb_refill_fail(istate_t *istate)
    304 {
    305         uintptr_t va = cp0_badvaddr_read();
    306        
    307         fault_if_from_uspace(istate, "TLB Refill Exception on %p.",
    308             (void *) va);
    309         panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, "TLB Refill Exception.");
    310 }
    311 
    312 
    313 void tlb_invalid_fail(istate_t *istate)
    314 {
    315         uintptr_t va = cp0_badvaddr_read();
    316        
    317         fault_if_from_uspace(istate, "TLB Invalid Exception on %p.",
    318             (void *) va);
    319         panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, "TLB Invalid Exception.");
    320 }
    321 
    322 void tlb_modified_fail(istate_t *istate)
    323 {
    324         uintptr_t va = cp0_badvaddr_read();
    325        
    326         fault_if_from_uspace(istate, "TLB Modified Exception on %p.",
    327             (void *) va);
    328         panic_memtrap(istate, PF_ACCESS_WRITE, va, "TLB Modified Exception.");
     199        ASSERT(!index.p);
     200
     201        pte = find_mapping_and_check(badvaddr, PF_ACCESS_WRITE, istate);
     202        if (pte) {
     203                /*
     204                 * Read the faulting TLB entry.
     205                 */
     206                tlbr();
     207
     208                /*
     209                 * Record access and write to PTE.
     210                 */
     211                pte->a = 1;
     212                pte->d = 1;
     213
     214                tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->w,
     215                    pte->cacheable, pte->pfn);
     216
     217                /*
     218                 * The entry is to be updated in TLB.
     219                 */
     220                if ((badvaddr / PAGE_SIZE) % 2 == 0)
     221                        cp0_entry_lo0_write(lo.value);
     222                else
     223                        cp0_entry_lo1_write(lo.value);
     224                cp0_pagemask_write(TLB_PAGE_MASK_16K);
     225                tlbwi();
     226        }
    329227}
    330228
     
    334232 * @param access        Access mode that caused the fault.
    335233 * @param istate        Pointer to interrupted state.
    336  * @param pfrc          Pointer to variable where as_page_fault() return code
    337  *                      will be stored.
    338234 *
    339235 * @return              PTE on success, NULL otherwise.
    340236 */
    341 pte_t *
    342 find_mapping_and_check(uintptr_t badvaddr, int access, istate_t *istate,
    343     int *pfrc)
     237pte_t *find_mapping_and_check(uintptr_t badvaddr, int access, istate_t *istate)
    344238{
    345239        entry_hi_t hi;
     
    348242        hi.value = cp0_entry_hi_read();
    349243
    350         /*
    351          * Handler cannot succeed if the ASIDs don't match.
    352          */
    353         if (hi.asid != AS->asid) {
    354                 printf("EntryHi.asid=%d, AS->asid=%d\n", hi.asid, AS->asid);
    355                 return NULL;
    356         }
     244        ASSERT(hi.asid == AS->asid);
    357245
    358246        /*
     
    366254                 */
    367255                return pte;
    368         } else {
    369                 int rc;
    370                
    371                 /*
    372                  * Mapping not found in page tables.
    373                  * Resort to higher-level page fault handler.
    374                  */
    375                 switch (rc = as_page_fault(badvaddr, access, istate)) {
    376                 case AS_PF_OK:
    377                         /*
    378                          * The higher-level page fault handler succeeded,
    379                          * The mapping ought to be in place.
    380                          */
    381                         pte = page_mapping_find(AS, badvaddr, true);
    382                         ASSERT(pte && pte->p);
    383                         ASSERT(pte->w || access != PF_ACCESS_WRITE);
    384                         return pte;
    385                 case AS_PF_DEFER:
    386                         *pfrc = AS_PF_DEFER;
    387                         return NULL;
    388                 case AS_PF_FAULT:
    389                         *pfrc = AS_PF_FAULT;
    390                         return NULL;
    391                 default:
    392                         panic("Unexpected rc (%d).", rc);
    393                 }
    394                
    395         }
     256        }
     257
     258        /*
     259         * Mapping not found in page tables.
     260         * Resort to higher-level page fault handler.
     261         */
     262        if (as_page_fault(badvaddr, access, istate) == AS_PF_OK) {
     263                pte = page_mapping_find(AS, badvaddr, true);
     264                ASSERT(pte && pte->p);
     265                ASSERT(pte->w || access != PF_ACCESS_WRITE);
     266                return pte;
     267        }
     268
     269        return NULL;
    396270}
    397271
  • kernel/arch/mips64/src/mm/tlb.c

    r5d230a30 r69146b93  
    7979 * @param access   Access mode that caused the fault.
    8080 * @param istate   Pointer to interrupted state.
    81  * @param pfrc     Pointer to variable where as_page_fault()
    82  *                 return code will be stored.
    8381 *
    8482 * @return PTE on success, NULL otherwise.
     
    8684 */
    8785static pte_t *find_mapping_and_check(uintptr_t badvaddr, int access,
    88     istate_t *istate, int *pfrc)
     86    istate_t *istate)
    8987{
    9088        entry_hi_t hi;
    9189        hi.value = cp0_entry_hi_read();
    9290       
    93         /*
    94          * Handler cannot succeed if the ASIDs don't match.
    95          */
    96         if (hi.asid != AS->asid) {
    97                 printf("EntryHi.asid=%d, AS->asid=%d\n", hi.asid, AS->asid);
    98                 return NULL;
    99         }
     91        ASSERT(hi.asid == AS->asid);
    10092       
    10193        /*
     
    109101                 */
    110102                return pte;
    111         } else {
    112                 int rc;
    113                
    114                 /*
    115                  * Mapping not found in page tables.
    116                  * Resort to higher-level page fault handler.
    117                  */
    118                 switch (rc = as_page_fault(badvaddr, access, istate)) {
    119                 case AS_PF_OK:
    120                         /*
    121                          * The higher-level page fault handler succeeded,
    122                          * The mapping ought to be in place.
    123                          */
    124                         pte = page_mapping_find(AS, badvaddr, true);
    125                         ASSERT(pte);
    126                         ASSERT(pte->p);
    127                         ASSERT((pte->w) || (access != PF_ACCESS_WRITE));
    128                         return pte;
    129                 case AS_PF_DEFER:
    130                         *pfrc = AS_PF_DEFER;
    131                         return NULL;
    132                 case AS_PF_FAULT:
    133                         *pfrc = AS_PF_FAULT;
    134                         return NULL;
    135                 default:
    136                         panic("Unexpected return code (%d).", rc);
    137                 }
    138         }
     103        }
     104
     105        /*
     106         * Mapping not found in page tables.
     107         * Resort to higher-level page fault handler.
     108         */
     109        if (as_page_fault(badvaddr, access, istate) == AS_PF_OK) {
     110                /*
     111                 * The higher-level page fault handler succeeded,
     112                 * The mapping ought to be in place.
     113                 */
     114                pte = page_mapping_find(AS, badvaddr, true);
     115                ASSERT(pte);
     116                ASSERT(pte->p);
     117                ASSERT((pte->w) || (access != PF_ACCESS_WRITE));
     118                return pte;
     119        }
     120
     121        return NULL;
    139122}
    140123
     
    156139}
    157140
    158 static void tlb_refill_fail(istate_t *istate)
    159 {
    160         uintptr_t va = cp0_badvaddr_read();
    161        
    162         fault_if_from_uspace(istate, "TLB Refill Exception on %p.",
    163             (void *) va);
    164         panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, "TLB Refill Exception.");
    165 }
    166 
    167 static void tlb_invalid_fail(istate_t *istate)
    168 {
    169         uintptr_t va = cp0_badvaddr_read();
    170        
    171         fault_if_from_uspace(istate, "TLB Invalid Exception on %p.",
    172             (void *) va);
    173         panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, "TLB Invalid Exception.");
    174 }
    175 
    176 static void tlb_modified_fail(istate_t *istate)
    177 {
    178         uintptr_t va = cp0_badvaddr_read();
    179        
    180         fault_if_from_uspace(istate, "TLB Modified Exception on %p.",
    181             (void *) va);
    182         panic_memtrap(istate, PF_ACCESS_WRITE, va, "TLB Modified Exception.");
    183 }
    184 
    185141/** Process TLB Refill Exception.
    186142 *
     
    196152        mutex_unlock(&AS->lock);
    197153       
    198         int pfrc;
    199         pte_t *pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ,
    200             istate, &pfrc);
    201         if (!pte) {
    202                 switch (pfrc) {
    203                 case AS_PF_FAULT:
    204                         goto fail;
    205                         break;
    206                 case AS_PF_DEFER:
    207                         /*
    208                          * The page fault came during copy_from_uspace()
    209                          * or copy_to_uspace().
    210                          */
    211                         return;
    212                 default:
    213                         panic("Unexpected pfrc (%d).", pfrc);
     154        pte_t *pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate);
     155        if (pte) {
     156                /*
     157                 * Record access to PTE.
     158                 */
     159                pte->a = 1;
     160       
     161                entry_lo_t lo;
     162                entry_hi_t hi;
     163       
     164                tlb_prepare_entry_hi(&hi, asid, badvaddr);
     165                tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->c,
     166                    pte->frame);
     167       
     168                /*
     169                 * New entry is to be inserted into TLB
     170                 */
     171                cp0_entry_hi_write(hi.value);
     172       
     173                if ((badvaddr / PAGE_SIZE) % 2 == 0) {
     174                        cp0_entry_lo0_write(lo.value);
     175                        cp0_entry_lo1_write(0);
     176                } else {
     177                        cp0_entry_lo0_write(0);
     178                        cp0_entry_lo1_write(lo.value);
    214179                }
    215         }
    216        
    217         /*
    218          * Record access to PTE.
    219          */
    220         pte->a = 1;
    221        
    222         entry_lo_t lo;
    223         entry_hi_t hi;
    224        
    225         tlb_prepare_entry_hi(&hi, asid, badvaddr);
    226         tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->c,
    227             pte->frame);
    228        
    229         /*
    230          * New entry is to be inserted into TLB
    231          */
    232         cp0_entry_hi_write(hi.value);
    233        
    234         if ((badvaddr / PAGE_SIZE) % 2 == 0) {
    235                 cp0_entry_lo0_write(lo.value);
    236                 cp0_entry_lo1_write(0);
    237         } else {
    238                 cp0_entry_lo0_write(0);
    239                 cp0_entry_lo1_write(lo.value);
    240         }
    241        
    242         cp0_pagemask_write(TLB_PAGE_MASK_16K);
    243         tlbwr();
    244        
    245         return;
    246        
    247 fail:
    248         tlb_refill_fail(istate);
     180       
     181                cp0_pagemask_write(TLB_PAGE_MASK_16K);
     182                tlbwr();
     183        }
    249184}
    250185
     
    271206        index.value = cp0_index_read();
    272207       
    273         /*
    274          * Fail if the entry is not in TLB.
    275          */
    276         if (index.p) {
    277                 printf("TLB entry not found.\n");
    278                 goto fail;
    279         }
    280        
    281         int pfrc;
    282         pte_t *pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ,
    283             istate, &pfrc);
    284         if (!pte) {
    285                 switch (pfrc) {
    286                 case AS_PF_FAULT:
    287                         goto fail;
    288                         break;
    289                 case AS_PF_DEFER:
    290                         /*
    291                          * The page fault came during copy_from_uspace()
    292                          * or copy_to_uspace().
    293                          */
    294                         return;
    295                 default:
    296                         panic("Unexpected pfrc (%d).", pfrc);
    297                 }
    298         }
    299        
    300         /*
    301          * Read the faulting TLB entry.
    302          */
    303         tlbr();
    304        
    305         /*
    306          * Record access to PTE.
    307          */
    308         pte->a = 1;
    309        
    310         entry_lo_t lo;
    311         tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->c,
    312             pte->frame);
    313        
    314         /*
    315          * The entry is to be updated in TLB.
    316          */
    317         if ((badvaddr / PAGE_SIZE) % 2 == 0)
    318                 cp0_entry_lo0_write(lo.value);
    319         else
    320                 cp0_entry_lo1_write(lo.value);
    321        
    322         cp0_pagemask_write(TLB_PAGE_MASK_16K);
    323         tlbwi();
    324        
    325         return;
    326        
    327 fail:
    328         tlb_invalid_fail(istate);
     208        ASSERT(!index.p);
     209       
     210        pte_t *pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate);
     211        if (pte) {
     212                /*
     213                 * Read the faulting TLB entry.
     214                 */
     215                tlbr();
     216       
     217                /*
     218                 * Record access to PTE.
     219                 */
     220                pte->a = 1;
     221
     222                entry_lo_t lo;
     223                tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->c,
     224                    pte->frame);
     225       
     226                /*
     227                 * The entry is to be updated in TLB.
     228                 */
     229                if ((badvaddr / PAGE_SIZE) % 2 == 0)
     230                        cp0_entry_lo0_write(lo.value);
     231                else
     232                        cp0_entry_lo1_write(lo.value);
     233       
     234                cp0_pagemask_write(TLB_PAGE_MASK_16K);
     235                tlbwi();
     236        }
     237       
    329238}
    330239
     
    351260        index.value = cp0_index_read();
    352261       
    353         /*
    354          * Fail if the entry is not in TLB.
    355          */
    356         if (index.p) {
    357                 printf("TLB entry not found.\n");
    358                 goto fail;
    359         }
    360        
    361         int pfrc;
    362         pte_t *pte = find_mapping_and_check(badvaddr, PF_ACCESS_WRITE,
    363             istate, &pfrc);
    364         if (!pte) {
    365                 switch (pfrc) {
    366                 case AS_PF_FAULT:
    367                         goto fail;
    368                         break;
    369                 case AS_PF_DEFER:
    370                         /*
    371                          * The page fault came during copy_from_uspace()
    372                          * or copy_to_uspace().
    373                          */
    374                         return;
    375                 default:
    376                         panic("Unexpected pfrc (%d).", pfrc);
    377                 }
    378         }
    379        
    380         /*
    381          * Read the faulting TLB entry.
    382          */
    383         tlbr();
    384        
    385         /*
    386          * Record access and write to PTE.
    387          */
    388         pte->a = 1;
    389         pte->d = 1;
    390        
    391         entry_lo_t lo;
    392         tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->w, pte->c,
    393             pte->frame);
    394        
    395         /*
    396          * The entry is to be updated in TLB.
    397          */
    398         if ((badvaddr / PAGE_SIZE) % 2 == 0)
    399                 cp0_entry_lo0_write(lo.value);
    400         else
    401                 cp0_entry_lo1_write(lo.value);
    402        
    403         cp0_pagemask_write(TLB_PAGE_MASK_16K);
    404         tlbwi();
    405        
    406         return;
    407        
    408 fail:
    409         tlb_modified_fail(istate);
     262        ASSERT(!index.p);
     263       
     264        pte_t *pte = find_mapping_and_check(badvaddr, PF_ACCESS_WRITE, istate);
     265        if (pte) {
     266                /*
     267                 * Read the faulting TLB entry.
     268                 */
     269                tlbr();
     270       
     271                /*
     272                 * Record access and write to PTE.
     273                 */
     274                pte->a = 1;
     275                pte->d = 1;
     276       
     277                entry_lo_t lo;
     278                tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->w, pte->c,
     279                    pte->frame);
     280       
     281                /*
     282                 * The entry is to be updated in TLB.
     283                 */
     284                if ((badvaddr / PAGE_SIZE) % 2 == 0)
     285                        cp0_entry_lo0_write(lo.value);
     286                else
     287                        cp0_entry_lo1_write(lo.value);
     288       
     289                cp0_pagemask_write(TLB_PAGE_MASK_16K);
     290                tlbwi();
     291        }
    410292}
    411293
  • kernel/arch/ppc32/src/mm/pht.c

    r5d230a30 r69146b93  
    4949 * @param access   Access mode that caused the fault.
    5050 * @param istate   Pointer to interrupted state.
    51  * @param pfrc     Pointer to variable where as_page_fault() return code
    52  *                 will be stored.
    5351 *
    5452 * @return PTE on success, NULL otherwise.
     
    5654 */
    5755static pte_t *find_mapping_and_check(as_t *as, uintptr_t badvaddr, int access,
    58     istate_t *istate, int *pfrc)
     56    istate_t *istate)
    5957{
    6058        /*
     
    6866                 */
    6967                return pte;
    70         } else {
     68        }
     69        /*
     70         * Mapping not found in page tables.
     71         * Resort to higher-level page fault handler.
     72         */
     73        if (as_page_fault(badvaddr, access, istate) == AS_PF_OK) {
    7174                /*
    72                  * Mapping not found in page tables.
    73                  * Resort to higher-level page fault handler.
     75                 * The higher-level page fault handler succeeded,
     76                 * The mapping ought to be in place.
    7477                 */
    75                 int rc = as_page_fault(badvaddr, access, istate);
    76                 switch (rc) {
    77                 case AS_PF_OK:
    78                         /*
    79                          * The higher-level page fault handler succeeded,
    80                          * The mapping ought to be in place.
    81                          */
    82                         pte = page_mapping_find(as, badvaddr, true);
    83                         ASSERT((pte) && (pte->present));
    84                         *pfrc = 0;
    85                         return pte;
    86                 case AS_PF_DEFER:
    87                         *pfrc = rc;
    88                         return NULL;
    89                 case AS_PF_FAULT:
    90                         *pfrc = rc;
    91                         return NULL;
    92                 default:
    93                         panic("Unexpected rc (%d).", rc);
    94                 }
    95         }
    96 }
    97 
    98 static void pht_refill_fail(uintptr_t badvaddr, istate_t *istate)
    99 {
    100         fault_if_from_uspace(istate, "PHT Refill Exception on %p.",
    101             (void *) badvaddr);
    102         panic_memtrap(istate, PF_ACCESS_UNKNOWN, badvaddr,
    103             "PHT Refill Exception.");
     78                pte = page_mapping_find(as, badvaddr, true);
     79                ASSERT((pte) && (pte->present));
     80                return pte;
     81        }
     82
     83        return NULL;
    10484}
    10585
     
    202182                badvaddr = istate->pc;
    203183       
    204         int pfrc;
    205184        pte_t *pte = find_mapping_and_check(AS, badvaddr,
    206             PF_ACCESS_READ /* FIXME */, istate, &pfrc);
    207        
    208         if (!pte) {
    209                 switch (pfrc) {
    210                 case AS_PF_FAULT:
    211                         pht_refill_fail(badvaddr, istate);
    212                         return;
    213                 case AS_PF_DEFER:
    214                         /*
    215                          * The page fault came during copy_from_uspace()
    216                          * or copy_to_uspace().
    217                          */
    218                         return;
    219                 default:
    220                         panic("Unexpected pfrc (%d).", pfrc);
    221                 }
    222         }
    223        
    224         /* Record access to PTE */
    225         pte->accessed = 1;
    226         pht_insert(badvaddr, pte);
     185            PF_ACCESS_READ /* FIXME */, istate);
     186       
     187        if (pte) {
     188                /* Record access to PTE */
     189                pte->accessed = 1;
     190                pht_insert(badvaddr, pte);
     191        }
    227192}
    228193
  • kernel/arch/ppc32/src/ppc32.c

    r5d230a30 r69146b93  
    103103}
    104104
     105#ifdef CONFIG_FB
    105106static bool display_register(ofw_tree_node_t *node, void *arg)
    106107{
     
    169170        return true;
    170171}
     172#endif
    171173
    172174void arch_post_mm_init(void)
  • kernel/arch/sparc64/src/mm/sun4u/tlb.c

    r5d230a30 r69146b93  
    5858static void dtlb_pte_copy(pte_t *, size_t, bool);
    5959static void itlb_pte_copy(pte_t *, size_t);
    60 static void do_fast_instruction_access_mmu_miss_fault(istate_t *, uintptr_t,
    61     const char *);
    62 static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t,
    63     const char *);
    64 static void do_fast_data_access_protection_fault(istate_t *,
    65     tlb_tag_access_reg_t, const char *);
    6660
    6761const char *context_encoding[] = {
     
    222216                 * handler.
    223217                 */
    224                 if (as_page_fault(page_16k, PF_ACCESS_EXEC, istate) ==
    225                     AS_PF_FAULT) {
    226                         do_fast_instruction_access_mmu_miss_fault(istate,
    227                             istate->tpc, __func__);
    228                 }
     218                as_page_fault(page_16k, PF_ACCESS_EXEC, istate);
    229219        }
    230220}
     
    256246                if (!tag.vpn) {
    257247                        /* NULL access in kernel */
    258                         do_fast_data_access_mmu_miss_fault(istate, tag,
    259                             "Dereferencing NULL pointer.");
     248                        panic("NULL pointer dereference.");
    260249                } else if (page_8k >= end_of_identity) {
    261250                        /* Kernel non-identity. */
    262251                        as = AS_KERNEL;
    263252                } else {
    264                         do_fast_data_access_mmu_miss_fault(istate, tag,
    265                     "Unexpected kernel page fault.");
     253                        panic("Unexpected kernel page fault.");
    266254                }
    267255        }
     
    283271                 * handler.
    284272                 */
    285                 if (as_page_fault(page_16k, PF_ACCESS_READ, istate) ==
    286                     AS_PF_FAULT) {
    287                         do_fast_data_access_mmu_miss_fault(istate, tag,
    288                             __func__);
    289                 }
     273                as_page_fault(page_16k, PF_ACCESS_READ, istate);
    290274        }
    291275}
     
    332316                 * handler.
    333317                 */             
    334                 if (as_page_fault(page_16k, PF_ACCESS_WRITE, istate) ==
    335                     AS_PF_FAULT) {
    336                         do_fast_data_access_protection_fault(istate, tag,
    337                             __func__);
    338                 }
     318                as_page_fault(page_16k, PF_ACCESS_WRITE, istate);
    339319        }
    340320}
     
    428408
    429409#endif
    430 
    431 void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
    432     uintptr_t va, const char *str)
    433 {
    434         fault_if_from_uspace(istate, "%s, address=%p.", str, (void *) va);
    435         panic_memtrap(istate, PF_ACCESS_EXEC, va, str);
    436 }
    437 
    438 void do_fast_data_access_mmu_miss_fault(istate_t *istate,
    439     tlb_tag_access_reg_t tag, const char *str)
    440 {
    441         uintptr_t va;
    442 
    443         va = tag.vpn << MMU_PAGE_WIDTH;
    444         fault_if_from_uspace(istate, "%s, page=%p (asid=%u).", str,
    445             (void *) va, tag.context);
    446         panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, str);
    447 }
    448 
    449 void do_fast_data_access_protection_fault(istate_t *istate,
    450     tlb_tag_access_reg_t tag, const char *str)
    451 {
    452         uintptr_t va;
    453 
    454         va = tag.vpn << MMU_PAGE_WIDTH;
    455         fault_if_from_uspace(istate, "%s, page=%p (asid=%u).", str,
    456             (void *) va, tag.context);
    457         panic_memtrap(istate, PF_ACCESS_WRITE, va, str);
    458 }
    459410
    460411void describe_dmmu_fault(void)
  • kernel/arch/sparc64/src/mm/sun4v/tlb.c

    r5d230a30 r69146b93  
    6262static void itlb_pte_copy(pte_t *);
    6363static void dtlb_pte_copy(pte_t *, bool);
    64 static void do_fast_instruction_access_mmu_miss_fault(istate_t *, uintptr_t,
    65     const char *);
    66 static void do_fast_data_access_mmu_miss_fault(istate_t *, uint64_t,
    67     const char *);
    68 static void do_fast_data_access_protection_fault(istate_t *,
    69     uint64_t, const char *);
    7064
    7165/*
     
    235229                 * handler.
    236230                 */
    237                 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
    238                         do_fast_instruction_access_mmu_miss_fault(istate,
    239                             istate->tpc, __func__);
    240                 }
     231                as_page_fault(va, PF_ACCESS_EXEC, istate);
    241232        }
    242233}
     
    264255                if (va == 0) {
    265256                        /* NULL access in kernel */
    266                         do_fast_data_access_mmu_miss_fault(istate, page_and_ctx,
    267                             __func__);
     257                        panic("NULL pointer dereference.");
    268258                }
    269                 do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, "Unexpected "
    270                     "kernel page fault.");
     259                panic("Unexpected kernel page fault.");
    271260        }
    272261
     
    287276                 * handler.
    288277                 */             
    289                 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
    290                         do_fast_data_access_mmu_miss_fault(istate, page_and_ctx,
    291                             __func__);
    292                 }
     278                as_page_fault(va, PF_ACCESS_READ, istate);
    293279        }
    294280}
     
    329315                 * handler.
    330316                 */             
    331                 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
    332                         do_fast_data_access_protection_fault(istate, page_and_ctx,
    333                             __func__);
    334                 }
     317                as_page_fault(va, PF_ACCESS_WRITE, istate);
    335318        }
    336319}
     
    346329}
    347330
    348 void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, uintptr_t va,
    349     const char *str)
    350 {
    351         fault_if_from_uspace(istate, "%s, address=%p.", str,
    352             (void *) va);
    353         panic_memtrap(istate, PF_ACCESS_EXEC, va, str);
    354 }
    355 
    356 void do_fast_data_access_mmu_miss_fault(istate_t *istate,
    357     uint64_t page_and_ctx, const char *str)
    358 {
    359         fault_if_from_uspace(istate, "%s, page=%p (asid=%" PRId64 ").", str,
    360             (void *) DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx));
    361         panic_memtrap(istate, PF_ACCESS_UNKNOWN, DMISS_ADDRESS(page_and_ctx),
    362             str);
    363 }
    364 
    365 void do_fast_data_access_protection_fault(istate_t *istate,
    366     uint64_t page_and_ctx, const char *str)
    367 {
    368         fault_if_from_uspace(istate, "%s, page=%p (asid=%" PRId64 ").", str,
    369             (void *) DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx));
    370         panic_memtrap(istate, PF_ACCESS_WRITE, DMISS_ADDRESS(page_and_ctx),
    371             str);
    372 }
    373 
    374331/**
    375332 * Describes the exact condition which caused the last DMMU fault.
  • kernel/genarch/Makefile.inc

    r5d230a30 r69146b93  
    106106endif
    107107
     108ifeq ($(CONFIG_AMDM37X_UART),y)
     109        GENARCH_SOURCES += \
     110                genarch/src/drivers/amdm37x_uart/amdm37x_uart.c
     111endif
     112
    108113ifeq ($(CONFIG_VIA_CUDA),y)
    109114        GENARCH_SOURCES += \
  • kernel/genarch/src/mm/page_ht.c

    r5d230a30 r69146b93  
    209209                pte->frame = ALIGN_DOWN(frame, FRAME_SIZE);
    210210
     211                /*
     212                 * Make sure that a concurrent ht_mapping_find() will see the
     213                 * new entry only after it is fully initialized.
     214                 */
    211215                write_barrier();
    212216               
  • kernel/genarch/src/mm/page_pt.c

    r5d230a30 r69146b93  
    8989                    PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
    9090                    PAGE_WRITE);
     91                /*
     92                 * Make sure that a concurrent hardware page table walk or
     93                 * pt_mapping_find() will see the new PTL1 only after it is
     94                 * fully initialized.
     95                 */
    9196                write_barrier();
    9297                SET_PTL1_PRESENT(ptl0, PTL0_INDEX(page));
     
    103108                    PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
    104109                    PAGE_WRITE);
     110                /*
     111                 * Make the new PTL2 visible only after it is fully initialized.
     112                 */
    105113                write_barrier();
    106114                SET_PTL2_PRESENT(ptl1, PTL1_INDEX(page));       
     
    117125                    PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
    118126                    PAGE_WRITE);
     127                /*
     128                 * Make the new PTL3 visible only after it is fully initialized.
     129                 */
    119130                write_barrier();
    120131                SET_PTL3_PRESENT(ptl2, PTL2_INDEX(page));
     
    125136        SET_FRAME_ADDRESS(ptl3, PTL3_INDEX(page), frame);
    126137        SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), flags | PAGE_NOT_PRESENT);
     138        /*
     139         * Make the new mapping visible only after it is fully initialized.
     140         */
    127141        write_barrier();
    128142        SET_FRAME_PRESENT(ptl3, PTL3_INDEX(page));
     
    296310
    297311#if (PTL1_ENTRIES != 0)
     312        /*
     313         * Always read ptl2 only after we are sure it is present.
     314         */
    298315        read_barrier();
    299316#endif
     
    304321
    305322#if (PTL2_ENTRIES != 0)
     323        /*
     324         * Always read ptl3 only after we are sure it is present.
     325         */
    306326        read_barrier();
    307327#endif
  • kernel/generic/include/config.h

    r5d230a30 r69146b93  
    4444#define STACK_FRAMES  TWO_FRAMES
    4545#define STACK_SIZE    ((1 << STACK_FRAMES) << PAGE_WIDTH)
     46
     47#define STACK_SIZE_USER (1 * 1024 * 1024)
    4648
    4749#define CONFIG_INIT_TASKS        32
  • kernel/generic/include/mm/as.h

    r5d230a30 r69146b93  
    6565#define USER_ADDRESS_SPACE_END      USER_ADDRESS_SPACE_END_ARCH
    6666
    67 #ifdef USTACK_ADDRESS_ARCH
    68         #define USTACK_ADDRESS  USTACK_ADDRESS_ARCH
    69 #else
    70         #define USTACK_ADDRESS  (USER_ADDRESS_SPACE_END - (STACK_SIZE - 1))
    71 #endif
    72 
    7367/** Kernel address space. */
    7468#define FLAG_AS_KERNEL  (1 << 0)
     
    7872#define AS_AREA_ATTR_PARTIAL  1  /**< Not fully initialized area. */
    7973
     74/** The page fault was resolved by as_page_fault(). */
     75#define AS_PF_OK     0
     76
     77/** The page fault was caused by memcpy_from_uspace() or memcpy_to_uspace(). */
     78#define AS_PF_DEFER  1
     79
    8080/** The page fault was not resolved by as_page_fault(). */
    81 #define AS_PF_FAULT  0
    82 
    83 /** The page fault was resolved by as_page_fault(). */
    84 #define AS_PF_OK  1
    85 
    86 /** The page fault was caused by memcpy_from_uspace() or memcpy_to_uspace(). */
    87 #define AS_PF_DEFER  2
     81#define AS_PF_FAULT  2
     82
     83/** The page fault was not resolved by as_page_fault(). Non-verbose version. */
     84#define AS_PF_SILENT 3
    8885
    8986/** Address space structure.
     
    228225        void (* destroy)(as_area_t *);
    229226
     227        bool (* is_resizable)(as_area_t *);
     228        bool (* is_shareable)(as_area_t *);
     229
    230230        int (* page_fault)(as_area_t *, uintptr_t, pf_access_t);
    231231        void (* frame_free)(as_area_t *, uintptr_t, uintptr_t);
  • kernel/generic/src/console/cmd.c

    r5d230a30 r69146b93  
    5656#include <cpu.h>
    5757#include <mm/tlb.h>
     58#include <mm/km.h>
    5859#include <arch/mm/tlb.h>
    5960#include <mm/frame.h>
     
    8384        .func = cmd_help,
    8485        .argc = 0
     86};
     87
     88/* Data and methods for pio_read_8 command */
     89static int cmd_pio_read_8(cmd_arg_t *argv);
     90static cmd_arg_t pio_read_8_argv[] = { { .type = ARG_TYPE_INT } };
     91static cmd_info_t pio_read_8_info = {
     92        .name = "pio_read_8",
     93        .description = "pio_read_8 <address> Read 1 byte from memory (or port).",
     94        .func = cmd_pio_read_8,
     95        .argc = 1,
     96        .argv = pio_read_8_argv
     97};
     98
     99/* Data and methods for pio_read_16 command */
     100static int cmd_pio_read_16(cmd_arg_t *argv);
     101static cmd_arg_t pio_read_16_argv[] = { { .type = ARG_TYPE_INT } };
     102static cmd_info_t pio_read_16_info = {
     103        .name = "pio_read_16",
     104        .description = "pio_read_16 <address> Read 2 bytes from memory (or port).",
     105        .func = cmd_pio_read_16,
     106        .argc = 1,
     107        .argv = pio_read_16_argv
     108};
     109
     110/* Data and methods for pio_read_32 command */
     111static int cmd_pio_read_32(cmd_arg_t *argv);
     112static cmd_arg_t pio_read_32_argv[] = { { .type = ARG_TYPE_INT } };
     113static cmd_info_t pio_read_32_info = {
     114        .name = "pio_read_32",
     115        .description = "pio_read_32 <address> Read 4 bytes from memory (or port).",
     116        .func = cmd_pio_read_32,
     117        .argc = 1,
     118        .argv = pio_read_32_argv
     119};
     120
     121/* Data and methods for pio_write_8 command */
     122static int cmd_pio_write_8(cmd_arg_t *argv);
     123static cmd_arg_t pio_write_8_argv[] = {
     124        { .type = ARG_TYPE_INT },
     125        { .type = ARG_TYPE_INT }
     126};
     127static cmd_info_t pio_write_8_info = {
     128        .name = "pio_write_8",
     129        .description = "pio_write_8 <address> <value> Write 1 byte to memory (or port).",
     130        .func = cmd_pio_write_8,
     131        .argc = 2,
     132        .argv = pio_write_8_argv
     133};
     134
     135/* Data and methods for pio_write_16 command */
     136static int cmd_pio_write_16(cmd_arg_t *argv);
     137static cmd_arg_t pio_write_16_argv[] = {
     138        { .type = ARG_TYPE_INT },
     139        { .type = ARG_TYPE_INT }
     140};
     141static cmd_info_t pio_write_16_info = {
     142        .name = "pio_write_16",
     143        .description = "pio_write_16 <address> <value> Write 2 bytes to memory (or port).",
     144        .func = cmd_pio_write_16,
     145        .argc = 2,
     146        .argv = pio_write_16_argv
     147};
     148
     149/* Data and methods for pio_write_32 command */
     150static int cmd_pio_write_32(cmd_arg_t *argv);
     151static cmd_arg_t pio_write_32_argv[] = {
     152        { .type = ARG_TYPE_INT },
     153        { .type = ARG_TYPE_INT }
     154};
     155static cmd_info_t pio_write_32_info = {
     156        .name = "pio_write_32",
     157        .description = "pio_write_32 <address> <value> Write 4 bytes to memory (or port).",
     158        .func = cmd_pio_write_32,
     159        .argc = 2,
     160        .argv = pio_write_32_argv
    85161};
    86162
     
    553629        &btrace_info,
    554630#endif
     631        &pio_read_8_info,
     632        &pio_read_16_info,
     633        &pio_read_32_info,
     634        &pio_write_8_info,
     635        &pio_write_16_info,
     636        &pio_write_32_info,
    555637        NULL
    556638};
     
    623705        spinlock_unlock(&cmd_lock);
    624706       
     707        return 1;
     708}
     709
     710/** Read 1 byte from phys memory or io port.
     711 *
     712 * @param argv Argument vector.
     713 *
     714 * @return 0 on failure, 1 on success.
     715 */
     716static int cmd_pio_read_8(cmd_arg_t *argv)
     717{
     718        uint8_t *ptr = NULL;
     719       
     720#ifdef IO_SPACE_BOUNDARY
     721        if ((void *) argv->intval < IO_SPACE_BOUNDARY)
     722                ptr = (void *) argv[0].intval;
     723        else
     724#endif
     725                ptr = (uint8_t *) km_map(argv[0].intval, sizeof(uint8_t),
     726                    PAGE_NOT_CACHEABLE);
     727       
     728        const uint8_t val = pio_read_8(ptr);
     729        printf("read %" PRIxn ": %" PRIx8 "\n", argv[0].intval, val);
     730       
     731#ifdef IO_SPACE_BOUNDARY
     732        if ((void *) argv->intval < IO_SPACE_BOUNDARY)
     733                return 1;
     734#endif
     735       
     736        km_unmap((uintptr_t) ptr, sizeof(uint8_t));
     737        return 1;
     738}
     739
     740/** Read 2 bytes from phys memory or io port.
     741 *
     742 * @param argv Argument vector.
     743 *
     744 * @return 0 on failure, 1 on success.
     745 */
     746static int cmd_pio_read_16(cmd_arg_t *argv)
     747{
     748        uint16_t *ptr = NULL;
     749       
     750#ifdef IO_SPACE_BOUNDARY
     751        if ((void *) argv->intval < IO_SPACE_BOUNDARY)
     752                ptr = (void *) argv[0].intval;
     753        else
     754#endif
     755                ptr = (uint16_t *) km_map(argv[0].intval, sizeof(uint16_t),
     756                    PAGE_NOT_CACHEABLE);
     757       
     758        const uint16_t val = pio_read_16(ptr);
     759        printf("read %" PRIxn ": %" PRIx16 "\n", argv[0].intval, val);
     760       
     761#ifdef IO_SPACE_BOUNDARY
     762        if ((void *) argv->intval < IO_SPACE_BOUNDARY)
     763                return 1;
     764#endif
     765       
     766        km_unmap((uintptr_t) ptr, sizeof(uint16_t));
     767        return 1;
     768}
     769
     770/** Read 4 bytes from phys memory or io port.
     771 *
     772 * @param argv Argument vector.
     773 *
     774 * @return 0 on failure, 1 on success.
     775 */
     776static int cmd_pio_read_32(cmd_arg_t *argv)
     777{
     778        uint32_t *ptr = NULL;
     779       
     780#ifdef IO_SPACE_BOUNDARY
     781        if ((void *) argv->intval < IO_SPACE_BOUNDARY)
     782                ptr = (void *) argv[0].intval;
     783        else
     784#endif
     785                ptr = (uint32_t *) km_map(argv[0].intval, sizeof(uint32_t),
     786                    PAGE_NOT_CACHEABLE);
     787       
     788        const uint32_t val = pio_read_32(ptr);
     789        printf("read %" PRIxn ": %" PRIx32 "\n", argv[0].intval, val);
     790       
     791#ifdef IO_SPACE_BOUNDARY
     792        if ((void *) argv->intval < IO_SPACE_BOUNDARY)
     793                return 1;
     794#endif
     795       
     796        km_unmap((uintptr_t) ptr, sizeof(uint32_t));
     797        return 1;
     798}
     799
     800/** Write 1 byte to phys memory or io port.
     801 *
     802 * @param argv Argument vector.
     803 *
     804 * @return 0 on failure, 1 on success.
     805 */
     806static int cmd_pio_write_8(cmd_arg_t *argv)
     807{
     808        uint8_t *ptr = NULL;
     809       
     810#ifdef IO_SPACE_BOUNDARY
     811        if ((void *) argv->intval < IO_SPACE_BOUNDARY)
     812                ptr = (void *) argv[0].intval;
     813        else
     814#endif
     815                ptr = (uint8_t *) km_map(argv[0].intval, sizeof(uint8_t),
     816                    PAGE_NOT_CACHEABLE);
     817       
     818        printf("write %" PRIxn ": %" PRIx8 "\n", argv[0].intval,
     819            (uint8_t) argv[1].intval);
     820        pio_write_8(ptr, (uint8_t) argv[1].intval);
     821       
     822#ifdef IO_SPACE_BOUNDARY
     823        if ((void *) argv->intval < IO_SPACE_BOUNDARY)
     824                return 1;
     825#endif
     826       
     827        km_unmap((uintptr_t) ptr, sizeof(uint8_t));
     828        return 1;
     829}
     830
     831/** Write 2 bytes to phys memory or io port.
     832 *
     833 * @param argv Argument vector.
     834 *
     835 * @return 0 on failure, 1 on success.
     836 */
     837static int cmd_pio_write_16(cmd_arg_t *argv)
     838{
     839        uint16_t *ptr = NULL;
     840       
     841#ifdef IO_SPACE_BOUNDARY
     842        if ((void *) argv->intval < IO_SPACE_BOUNDARY)
     843                ptr = (void *) argv[0].intval;
     844        else
     845#endif
     846                ptr = (uint16_t *) km_map(argv[0].intval, sizeof(uint16_t),
     847                    PAGE_NOT_CACHEABLE);
     848       
     849        printf("write %" PRIxn ": %" PRIx16 "\n", argv[0].intval,
     850            (uint16_t) argv[1].intval);
     851        pio_write_16(ptr, (uint16_t) argv[1].intval);
     852       
     853#ifdef IO_SPACE_BOUNDARY
     854        if ((void *) argv->intval < IO_SPACE_BOUNDARY)
     855                return 1;
     856#endif
     857       
     858        km_unmap((uintptr_t) ptr, sizeof(uint16_t));
     859        return 1;
     860}
     861
     862/** Write 4 bytes to phys memory or io port.
     863 *
     864 * @param argv Argument vector.
     865 *
     866 * @return 0 on failure, 1 on success.
     867 */
     868static int cmd_pio_write_32(cmd_arg_t *argv)
     869{
     870        uint32_t *ptr = NULL;
     871       
     872#ifdef IO_SPACE_BOUNDARY
     873        if ((void *) argv->intval < IO_SPACE_BOUNDARY)
     874                ptr = (void *) argv[0].intval;
     875        else
     876#endif
     877                ptr = (uint32_t *) km_map(argv[0].intval, sizeof(uint32_t),
     878                    PAGE_NOT_CACHEABLE);
     879       
     880        printf("write %" PRIxn ": %" PRIx32 "\n", argv[0].intval,
     881            (uint32_t) argv[1].intval);
     882        pio_write_32(ptr, (uint32_t) argv[1].intval);
     883       
     884#ifdef IO_SPACE_BOUNDARY
     885        if ((void *) argv->intval < IO_SPACE_BOUNDARY)
     886                return 1;
     887#endif
     888       
     889        km_unmap((uintptr_t) ptr, sizeof(uint32_t));
    625890        return 1;
    626891}
  • kernel/generic/src/console/kconsole.c

    r5d230a30 r69146b93  
    525525                /* It's a number - convert it */
    526526                uint64_t value;
    527                 int rc = str_uint64_t(text, NULL, 0, true, &value);
     527                char *end;
     528                int rc = str_uint64_t(text, &end, 0, false, &value);
     529                if (end != text + len)
     530                        rc = EINVAL;
    528531                switch (rc) {
    529532                case EINVAL:
    530                         printf("Invalid number.\n");
     533                        printf("Invalid number '%s'.\n", text);
    531534                        return false;
    532535                case EOVERFLOW:
    533                         printf("Integer overflow.\n");
     536                        printf("Integer overflow in '%s'.\n", text);
    534537                        return false;
    535538                case EOK:
     
    539542                        break;
    540543                default:
    541                         printf("Unknown error.\n");
     544                        printf("Unknown error parsing '%s'.\n", text);
    542545                        return false;
    543546                }
  • kernel/generic/src/interrupt/interrupt.c

    r5d230a30 r69146b93  
    164164}
    165165
    166 static NO_TRACE void fault_from_uspace_core(istate_t *istate, const char *fmt, va_list args)
     166static NO_TRACE
     167void fault_from_uspace_core(istate_t *istate, const char *fmt, va_list args)
    167168{
    168169        printf("Task %s (%" PRIu64 ") killed due to an exception at "
  • kernel/generic/src/main/kinit.c

    r5d230a30 r69146b93  
    182182#endif /* CONFIG_KCONSOLE */
    183183       
     184        /*
     185         * Store the default stack size in sysinfo so that uspace can create
     186         * stack with this default size.
     187         */
     188        sysinfo_set_item_val("default.stack_size", NULL, STACK_SIZE_USER);
     189       
    184190        interrupts_enable();
    185191       
  • kernel/generic/src/mm/as.c

    r5d230a30 r69146b93  
    7979#include <syscall/copy.h>
    8080#include <arch/interrupt.h>
     81#include <interrupt.h>
    8182
    8283/**
     
    426427        /*
    427428         * So far, the area does not conflict with other areas.
    428          * Check if it doesn't conflict with kernel address space.
     429         * Check if it is contained in the user address space.
    429430         */
    430431        if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
    431                 return !overlaps(addr, P2SZ(count), KERNEL_ADDRESS_SPACE_START,
    432                     KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);
     432                return iswithin(USER_ADDRESS_SPACE_START,
     433                    (USER_ADDRESS_SPACE_END - USER_ADDRESS_SPACE_START) + 1,
     434                    addr, P2SZ(count));
    433435        }
    434436       
     
    696698                return ENOENT;
    697699        }
    698        
    699         if (area->backend == &phys_backend) {
    700                 /*
    701                  * Remapping of address space areas associated
    702                  * with memory mapped devices is not supported.
     700
     701        if (!area->backend->is_resizable(area)) {
     702                /*
     703                 * The backend does not support resizing for this area.
    703704                 */
    704705                mutex_unlock(&area->lock);
     
    10571058        }
    10581059       
    1059         if ((!src_area->backend) || (!src_area->backend->share)) {
    1060                 /*
    1061                  * There is no backend or the backend does not
    1062                  * know how to share the area.
     1060        if (!src_area->backend->is_shareable(src_area)) {
     1061                /*
     1062                 * The backend does not permit sharing of this area.
    10631063                 */
    10641064                mutex_unlock(&src_area->lock);
     
    13631363int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
    13641364{
     1365        int rc = AS_PF_FAULT;
     1366
    13651367        if (!THREAD)
    1366                 return AS_PF_FAULT;
     1368                goto page_fault;
    13671369       
    13681370        if (!AS)
    1369                 return AS_PF_FAULT;
     1371                goto page_fault;
    13701372       
    13711373        mutex_lock(&AS->lock);
     
    14231425         * Resort to the backend page fault handler.
    14241426         */
    1425         if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
     1427        rc = area->backend->page_fault(area, page, access);
     1428        if (rc != AS_PF_OK) {
    14261429                page_table_unlock(AS, false);
    14271430                mutex_unlock(&area->lock);
     
    14441447                istate_set_retaddr(istate,
    14451448                    (uintptr_t) &memcpy_to_uspace_failover_address);
     1449        } else if (rc == AS_PF_SILENT) {
     1450                printf("Killing task %" PRIu64 " due to a "
     1451                    "failed late reservation request.\n", TASK->taskid);
     1452                task_kill_self(true);
    14461453        } else {
    1447                 return AS_PF_FAULT;
     1454                fault_if_from_uspace(istate, "Page fault: %p.", (void *) page);
     1455                panic_memtrap(istate, access, page, NULL);
    14481456        }
    14491457       
     
    21322140{
    21332141        uintptr_t virt = base;
    2134         as_area_t *area = as_area_create(AS, flags | AS_AREA_CACHEABLE, size,
     2142        as_area_t *area = as_area_create(AS, flags, size,
    21352143            AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, bound);
    21362144        if (area == NULL)
  • kernel/generic/src/mm/backend_anon.c

    r5d230a30 r69146b93  
    5959static void anon_destroy(as_area_t *);
    6060
     61static bool anon_is_resizable(as_area_t *);
     62static bool anon_is_shareable(as_area_t *);
     63
    6164static int anon_page_fault(as_area_t *, uintptr_t, pf_access_t);
    6265static void anon_frame_free(as_area_t *, uintptr_t, uintptr_t);
     
    6871        .destroy = anon_destroy,
    6972
     73        .is_resizable = anon_is_resizable,
     74        .is_shareable = anon_is_shareable,
     75
    7076        .page_fault = anon_page_fault,
    7177        .frame_free = anon_frame_free,
     
    7480bool anon_create(as_area_t *area)
    7581{
     82        if (area->flags & AS_AREA_LATE_RESERVE)
     83                return true;
     84
    7685        return reserve_try_alloc(area->pages);
    7786}
     
    7988bool anon_resize(as_area_t *area, size_t new_pages)
    8089{
     90        if (area->flags & AS_AREA_LATE_RESERVE)
     91                return true;
     92
    8193        if (new_pages > area->pages)
    8294                return reserve_try_alloc(new_pages - area->pages);
     
    100112        ASSERT(mutex_locked(&area->as->lock));
    101113        ASSERT(mutex_locked(&area->lock));
     114        ASSERT(!(area->flags & AS_AREA_LATE_RESERVE));
    102115
    103116        /*
     
    139152void anon_destroy(as_area_t *area)
    140153{
     154        if (area->flags & AS_AREA_LATE_RESERVE)
     155                return;
     156
    141157        reserve_free(area->pages);
    142158}
    143159
     160bool anon_is_resizable(as_area_t *area)
     161{
     162        return true;
     163}
     164
     165bool anon_is_shareable(as_area_t *area)
     166{
     167        return !(area->flags & AS_AREA_LATE_RESERVE);
     168}
    144169
    145170/** Service a page fault in the anonymous memory address space area.
     
    225250                 *   the different causes
    226251                 */
     252
     253                if (area->flags & AS_AREA_LATE_RESERVE) {
     254                        /*
     255                         * Reserve the memory for this page now.
     256                         */
     257                        if (!reserve_try_alloc(1))
     258                                return AS_PF_SILENT;
     259                }
     260
    227261                kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE);
    228262                memsetb((void *) kpage, PAGE_SIZE, 0);
     
    255289        ASSERT(mutex_locked(&area->lock));
    256290
    257         frame_free_noreserve(frame);
     291        if (area->flags & AS_AREA_LATE_RESERVE) {
     292                /*
     293                 * In case of the late reserve areas, physical memory will not
     294                 * be unreserved when the area is destroyed so we need to use
     295                 * the normal unreserving frame_free().
     296                 */
     297                frame_free(frame);
     298        } else {
     299                /*
     300                 * The reserve will be given back when the area is destroyed or
     301                 * resized, so use the frame_free_noreserve() which does not
     302                 * manipulate the reserve or it would be given back twice.
     303                 */
     304                frame_free_noreserve(frame);
     305        }
    258306}
    259307
  • kernel/generic/src/mm/backend_elf.c

    r5d230a30 r69146b93  
    5858static void elf_destroy(as_area_t *);
    5959
     60static bool elf_is_resizable(as_area_t *);
     61static bool elf_is_shareable(as_area_t *);
     62
    6063static int elf_page_fault(as_area_t *, uintptr_t, pf_access_t);
    6164static void elf_frame_free(as_area_t *, uintptr_t, uintptr_t);
     
    6669        .share = elf_share,
    6770        .destroy = elf_destroy,
     71
     72        .is_resizable = elf_is_resizable,
     73        .is_shareable = elf_is_shareable,
    6874
    6975        .page_fault = elf_page_fault,
     
    213219}
    214220
     221bool elf_is_resizable(as_area_t *area)
     222{
     223        return true;
     224}
     225
     226bool elf_is_shareable(as_area_t *area)
     227{
     228        return true;
     229}
     230
     231
    215232/** Service a page fault in the ELF backend address space area.
    216233 *
  • kernel/generic/src/mm/backend_phys.c

    r5d230a30 r69146b93  
    5252static void phys_destroy(as_area_t *);
    5353
     54static bool phys_is_resizable(as_area_t *);
     55static bool phys_is_shareable(as_area_t *);
     56
     57
    5458static int phys_page_fault(as_area_t *, uintptr_t, pf_access_t);
    5559
     
    5963        .share = phys_share,
    6064        .destroy = phys_destroy,
     65
     66        .is_resizable = phys_is_resizable,
     67        .is_shareable = phys_is_shareable,
    6168
    6269        .page_fault = phys_page_fault,
     
    8794        /* Nothing to do. */
    8895}
     96
     97bool phys_is_resizable(as_area_t *area)
     98{
     99        return false;
     100}
     101
     102bool phys_is_shareable(as_area_t *area)
     103{
     104        return true;
     105}
     106
    89107
    90108/** Service a page fault in the address space area backed by physical memory.
  • kernel/generic/src/mm/km.c

    r5d230a30 r69146b93  
    234234 * @param[inout] framep Pointer to a variable which will receive the physical
    235235 *                      address of the allocated frame.
    236  * @param[in] flags     Frame allocation flags. FRAME_NONE or FRAME_NO_RESERVE.
     236 * @param[in] flags     Frame allocation flags. FRAME_NONE, FRAME_NO_RESERVE
     237 *                      and FRAME_ATOMIC bits are allowed.
    237238 * @return              Virtual address of the allocated frame.
    238239 */
     
    244245        ASSERT(THREAD);
    245246        ASSERT(framep);
    246         ASSERT(!(flags & ~FRAME_NO_RESERVE));
     247        ASSERT(!(flags & ~(FRAME_NO_RESERVE | FRAME_ATOMIC)));
    247248
    248249        /*
     
    256257                ASSERT(page);   // FIXME
    257258        } else {
    258                 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME,
    259                     FRAME_LOWMEM);
     259                frame = (uintptr_t) frame_alloc(ONE_FRAME,
     260                    FRAME_LOWMEM | flags);
     261                if (!frame)
     262                        return (uintptr_t) NULL;
    260263                page = PA2KA(frame);
    261264        }
  • kernel/generic/src/proc/program.c

    r5d230a30 r69146b93  
    7979         * Create the stack address space area.
    8080         */
    81         uintptr_t virt = USTACK_ADDRESS;
     81        uintptr_t virt = (uintptr_t) -1;
     82        uintptr_t bound = USER_ADDRESS_SPACE_END - (STACK_SIZE_USER - 1);
     83
     84        /* Adjust bound to create space for the desired guard page. */
     85        bound -= PAGE_SIZE;
     86
    8287        as_area_t *area = as_area_create(as,
    83             AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE,
    84             STACK_SIZE, AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, 0);
     88            AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE | AS_AREA_GUARD |
     89            AS_AREA_LATE_RESERVE, STACK_SIZE_USER, AS_AREA_ATTR_NONE,
     90            &anon_backend, NULL, &virt, bound);
    8591        if (!area) {
    8692                task_destroy(prg->task);
     
    9399        kernel_uarg->uspace_entry = (void *) entry_addr;
    94100        kernel_uarg->uspace_stack = (void *) virt;
    95         kernel_uarg->uspace_stack_size = STACK_SIZE;
     101        kernel_uarg->uspace_stack_size = STACK_SIZE_USER;
    96102        kernel_uarg->uspace_thread_function = NULL;
    97103        kernel_uarg->uspace_thread_arg = NULL;
  • kernel/generic/src/proc/task.c

    r5d230a30 r69146b93  
    196196        task->ucycles = 0;
    197197        task->kcycles = 0;
    198        
     198
    199199        task->ipc_info.call_sent = 0;
    200200        task->ipc_info.call_received = 0;
Note: See TracChangeset for help on using the changeset viewer.