Changeset 46c20c8 in mainline for kernel/arch/abs32le/include


Ignore:
Timestamp:
2010-11-26T20:08:10Z (15 years ago)
Author:
Jiri Svoboda <jiri@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
45df59a
Parents:
fb150d78 (diff), ffdd2b9 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge mainline changes.

Location:
kernel/arch/abs32le/include
Files:
1 added
17 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/abs32le/include/asm.h

    rfb150d78 r46c20c8  
    3636#define KERN_abs32le_ASM_H_
    3737
    38 #include <arch/types.h>
    3938#include <typedefs.h>
    4039#include <config.h>
    41 
    42 extern void interrupt_handlers(void);
    43 
    44 extern void enable_l_apic_in_msr(void);
    45 
    46 
    47 extern void asm_delay_loop(uint32_t);
    48 extern void asm_fake_loop(uint32_t);
    49 
    50 
    51 static inline __attribute__((noreturn)) void cpu_halt(void)
     40#include <trace.h>
     41
     42NO_TRACE static inline void asm_delay_loop(uint32_t usec)
     43{
     44}
     45
     46NO_TRACE static inline __attribute__((noreturn)) void cpu_halt(void)
    5247{
    5348        /* On real hardware this should stop processing further
     
    5954}
    6055
    61 static inline void cpu_sleep(void)
     56NO_TRACE static inline void cpu_sleep(void)
    6257{
    6358        /* On real hardware this should put the CPU into low-power
     
    6762}
    6863
    69 static inline void pio_write_8(ioport8_t *port, uint8_t val)
     64NO_TRACE static inline void pio_write_8(ioport8_t *port, uint8_t val)
    7065{
    7166}
     
    7974 *
    8075 */
    81 static inline void pio_write_16(ioport16_t *port, uint16_t val)
     76NO_TRACE static inline void pio_write_16(ioport16_t *port, uint16_t val)
    8277{
    8378}
     
    9186 *
    9287 */
    93 static inline void pio_write_32(ioport32_t *port, uint32_t val)
     88NO_TRACE static inline void pio_write_32(ioport32_t *port, uint32_t val)
    9489{
    9590}
     
    10398 *
    10499 */
    105 static inline uint8_t pio_read_8(ioport8_t *port)
     100NO_TRACE static inline uint8_t pio_read_8(ioport8_t *port)
    106101{
    107102        return 0;
     
    116111 *
    117112 */
    118 static inline uint16_t pio_read_16(ioport16_t *port)
     113NO_TRACE static inline uint16_t pio_read_16(ioport16_t *port)
    119114{
    120115        return 0;
     
    129124 *
    130125 */
    131 static inline uint32_t pio_read_32(ioport32_t *port)
    132 {
    133         return 0;
    134 }
    135 
    136 static inline ipl_t interrupts_enable(void)
    137 {
    138         /* On real hardware this unconditionally enables preemption
    139            by internal and external interrupts.
    140            
    141            The return value stores the previous interrupt level. */
    142        
    143         return 0;
    144 }
    145 
    146 static inline ipl_t interrupts_disable(void)
    147 {
    148         /* On real hardware this disables preemption by the usual
    149            set of internal and external interrupts. This does not
    150            apply to special non-maskable interrupts and sychronous
    151            CPU exceptions.
    152            
    153            The return value stores the previous interrupt level. */
    154        
    155         return 0;
    156 }
    157 
    158 static inline void interrupts_restore(ipl_t ipl)
    159 {
    160         /* On real hardware this either enables or disables preemption
    161            according to the interrupt level value from the argument. */
    162 }
    163 
    164 static inline ipl_t interrupts_read(void)
    165 {
    166         /* On real hardware the return value stores the current interrupt
    167            level. */
    168        
    169         return 0;
    170 }
    171 
    172 static inline uintptr_t get_stack_base(void)
    173 {
    174         /* On real hardware this returns the address of the bottom
    175            of the current CPU stack. The the_t structure is stored
    176            on the bottom of stack and this is used to identify the
    177            current CPU, current task, current thread and current
    178            address space. */
    179        
    180         return 0;
    181 }
    182 
    183 static inline uintptr_t *get_ip()
    184 {
    185         /* On real hardware this returns the current instruction
    186            pointer value. The value certainly changes with each
    187            instruction, but it can be still used to identify
    188            a specific function. */
     126NO_TRACE static inline uint32_t pio_read_32(ioport32_t *port)
     127{
     128        return 0;
     129}
     130
     131NO_TRACE static inline ipl_t interrupts_enable(void)
     132{
     133        /*
     134         * On real hardware this unconditionally enables preemption
     135         * by internal and external interrupts.
     136         *
     137         * The return value stores the previous interrupt level.
     138         */
     139       
     140        return 0;
     141}
     142
     143NO_TRACE static inline ipl_t interrupts_disable(void)
     144{
     145        /*
     146         * On real hardware this disables preemption by the usual
     147         * set of internal and external interrupts. This does not
     148         * apply to special non-maskable interrupts and sychronous
     149         * CPU exceptions.
     150         *
     151         * The return value stores the previous interrupt level.
     152         */
     153       
     154        return 0;
     155}
     156
     157NO_TRACE static inline void interrupts_restore(ipl_t ipl)
     158{
     159        /*
     160         * On real hardware this either enables or disables preemption
     161         * according to the interrupt level value from the argument.
     162         */
     163}
     164
     165NO_TRACE static inline ipl_t interrupts_read(void)
     166{
     167        /*
     168         * On real hardware the return value stores the current interrupt
     169         * level.
     170         */
     171       
     172        return 0;
     173}
     174
     175NO_TRACE static inline bool interrupts_disabled(void)
     176{
     177        /*
     178         * On real hardware the return value is true iff interrupts are
     179         * disabled.
     180         */
     181       
     182        return false;
     183}
     184
     185NO_TRACE static inline uintptr_t get_stack_base(void)
     186{
     187        /*
     188         * On real hardware this returns the address of the bottom
     189         * of the current CPU stack. The the_t structure is stored
     190         * on the bottom of stack and this is used to identify the
     191         * current CPU, current task, current thread and current
     192         * address space.
     193         */
    189194       
    190195        return 0;
  • kernel/arch/abs32le/include/atomic.h

    rfb150d78 r46c20c8  
    3636#define KERN_abs32le_ATOMIC_H_
    3737
    38 #include <arch/types.h>
     38#include <typedefs.h>
    3939#include <arch/barrier.h>
    4040#include <preemption.h>
     41#include <verify.h>
     42#include <trace.h>
    4143
    42 static inline void atomic_inc(atomic_t *val) {
     44NO_TRACE ATOMIC static inline void atomic_inc(atomic_t *val)
     45    WRITES(&val->count)
     46    REQUIRES_EXTENT_MUTABLE(val)
     47    REQUIRES(val->count < ATOMIC_COUNT_MAX)
     48{
    4349        /* On real hardware the increment has to be done
    4450           as an atomic action. */
     
    4753}
    4854
    49 static inline void atomic_dec(atomic_t *val) {
     55NO_TRACE ATOMIC static inline void atomic_dec(atomic_t *val)
     56    WRITES(&val->count)
     57    REQUIRES_EXTENT_MUTABLE(val)
     58    REQUIRES(val->count > ATOMIC_COUNT_MIN)
     59{
    5060        /* On real hardware the decrement has to be done
    5161           as an atomic action. */
    5262       
    53         val->count++;
     63        val->count--;
    5464}
    5565
    56 static inline long atomic_postinc(atomic_t *val)
     66NO_TRACE ATOMIC static inline atomic_count_t atomic_postinc(atomic_t *val)
     67    WRITES(&val->count)
     68    REQUIRES_EXTENT_MUTABLE(val)
     69    REQUIRES(val->count < ATOMIC_COUNT_MAX)
    5770{
    5871        /* On real hardware both the storing of the previous
     
    6073           atomic action. */
    6174       
    62         long prev = val->count;
     75        atomic_count_t prev = val->count;
    6376       
    6477        val->count++;
     
    6679}
    6780
    68 static inline long atomic_postdec(atomic_t *val)
     81NO_TRACE ATOMIC static inline atomic_count_t atomic_postdec(atomic_t *val)
     82    WRITES(&val->count)
     83    REQUIRES_EXTENT_MUTABLE(val)
     84    REQUIRES(val->count > ATOMIC_COUNT_MIN)
    6985{
    7086        /* On real hardware both the storing of the previous
     
    7288           atomic action. */
    7389       
    74         long prev = val->count;
     90        atomic_count_t prev = val->count;
    7591       
    7692        val->count--;
     
    8197#define atomic_predec(val)  (atomic_postdec(val) - 1)
    8298
    83 static inline uint32_t test_and_set(atomic_t *val) {
    84         uint32_t v;
     99NO_TRACE ATOMIC static inline atomic_count_t test_and_set(atomic_t *val)
     100    WRITES(&val->count)
     101    REQUIRES_EXTENT_MUTABLE(val)
     102{
     103        /* On real hardware the retrieving of the original
     104           value and storing 1 have to be done as a single
     105           atomic action. */
    85106       
    86         asm volatile (
    87                 "movl $1, %[v]\n"
    88                 "xchgl %[v], %[count]\n"
    89                 : [v] "=r" (v), [count] "+m" (val->count)
    90         );
    91        
    92         return v;
     107        atomic_count_t prev = val->count;
     108        val->count = 1;
     109        return prev;
    93110}
    94111
    95 /** ia32 specific fast spinlock */
    96 static inline void atomic_lock_arch(atomic_t *val)
     112NO_TRACE static inline void atomic_lock_arch(atomic_t *val)
     113    WRITES(&val->count)
     114    REQUIRES_EXTENT_MUTABLE(val)
    97115{
    98         uint32_t tmp;
    99        
    100         preemption_disable();
    101         asm volatile (
    102                 "0:\n"
    103                 "pause\n"        /* Pentium 4's HT love this instruction */
    104                 "mov %[count], %[tmp]\n"
    105                 "testl %[tmp], %[tmp]\n"
    106                 "jnz 0b\n"       /* lightweight looping on locked spinlock */
    107                
    108                 "incl %[tmp]\n"  /* now use the atomic operation */
    109                 "xchgl %[count], %[tmp]\n"
    110                 "testl %[tmp], %[tmp]\n"
    111                 "jnz 0b\n"
    112                 : [count] "+m" (val->count), [tmp] "=&r" (tmp)
    113         );
    114         /*
    115          * Prevent critical section code from bleeding out this way up.
    116          */
    117         CS_ENTER_BARRIER();
     116        do {
     117                while (val->count);
     118        } while (test_and_set(val));
    118119}
    119120
  • kernel/arch/abs32le/include/barrier.h

    rfb150d78 r46c20c8  
    2727 */
    2828
    29 /** @addtogroup ia32
     29/** @addtogroup abs32le
    3030 * @{
    3131 */
     
    3333 */
    3434
    35 #ifndef KERN_ia32_BARRIER_H_
    36 #define KERN_ia32_BARRIER_H_
    37 
    38 /*
    39  * NOTE:
    40  * No barriers for critical section (i.e. spinlock) on IA-32 are needed:
    41  * - spinlock_lock() and spinlock_trylock() use serializing XCHG instruction
    42  * - writes cannot pass reads on IA-32 => spinlock_unlock() needs no barriers
    43  */
     35#ifndef KERN_abs32le_BARRIER_H_
     36#define KERN_abs32le_BARRIER_H_
    4437
    4538/*
     
    4740 */
    4841
    49 #define CS_ENTER_BARRIER()  asm volatile ("" ::: "memory")
    50 #define CS_LEAVE_BARRIER()  asm volatile ("" ::: "memory")
     42#define CS_ENTER_BARRIER()
     43#define CS_LEAVE_BARRIER()
    5144
    52 static inline void cpuid_serialization(void)
    53 {
    54         asm volatile (
    55                 "xorl %%eax, %%eax\n"
    56                 "cpuid\n"
    57                 ::: "eax", "ebx", "ecx", "edx", "memory"
    58         );
    59 }
     45#define memory_barrier()
     46#define read_barrier()
     47#define write_barrier()
    6048
    61 #if defined(CONFIG_FENCES_P4)
    62         #define memory_barrier()  asm volatile ("mfence\n" ::: "memory")
    63         #define read_barrier()    asm volatile ("lfence\n" ::: "memory")
    64         #ifdef CONFIG_WEAK_MEMORY
    65                 #define write_barrier()  asm volatile ("sfence\n" ::: "memory")
    66         #else
    67                 #define write_barrier()  asm volatile ("" ::: "memory");
    68         #endif
    69 #elif defined(CONFIG_FENCES_P3)
    70         #define memory_barrier()  cpuid_serialization()
    71         #define read_barrier()    cpuid_serialization()
    72         #ifdef CONFIG_WEAK_MEMORY
    73                 #define write_barrier()  asm volatile ("sfence\n" ::: "memory")
    74         #else
    75                 #define write_barrier()  asm volatile ("" ::: "memory");
    76         #endif
    77 #else
    78         #define memory_barrier()  cpuid_serialization()
    79         #define read_barrier()    cpuid_serialization()
    80         #ifdef CONFIG_WEAK_MEMORY
    81                 #define write_barrier()  cpuid_serialization()
    82         #else
    83                 #define write_barrier()  asm volatile ("" ::: "memory");
    84         #endif
    85 #endif
    86 
    87 /*
    88  * On ia32, the hardware takes care about instruction and data cache coherence,
    89  * even on SMP systems.  We issue a write barrier to be sure that writes
    90  * queueing in the store buffer drain to the memory (even though it would be
    91  * sufficient for them to drain to the D-cache).
    92  */
    93 #define smc_coherence(a)           write_barrier()
    94 #define smc_coherence_block(a, l)  write_barrier()
     49#define smc_coherence(addr)
     50#define smc_coherence_block(addr, size)
    9551
    9652#endif
  • kernel/arch/abs32le/include/context.h

    rfb150d78 r46c20c8  
    4040
    4141#define context_set(ctx, pc, stack, size) \
    42     context_set_generic(ctx, pc, stack, size)
     42        context_set_generic(ctx, pc, stack, size)
    4343
    4444/*
  • kernel/arch/abs32le/include/context_offset.h

    rfb150d78 r46c20c8  
    3737
    3838#define OFFSET_PC  0x00
    39 
    40 #ifdef KERNEL
    41         #define OFFSET_IPL 0x04
    42 #else
    43         #define OFFSET_TLS 0x04
    44 #endif
     39#define OFFSET_IPL 0x04
    4540
    4641#endif
  • kernel/arch/abs32le/include/cycle.h

    rfb150d78 r46c20c8  
    3636#define KERN_abs32le_CYCLE_H_
    3737
    38 static inline uint64_t get_cycle(void)
     38#include <trace.h>
     39
     40NO_TRACE static inline uint64_t get_cycle(void)
    3941{
    4042        return 0;
  • kernel/arch/abs32le/include/faddr.h

    rfb150d78 r46c20c8  
    3636#define KERN_abs32le_FADDR_H_
    3737
    38 #include <arch/types.h>
     38#include <typedefs.h>
    3939
    4040#define FADDR(fptr)  ((uintptr_t) (fptr))
  • kernel/arch/abs32le/include/fpu_context.h

    rfb150d78 r46c20c8  
    3636#define KERN_abs32le_FPU_CONTEXT_H_
    3737
    38 #include <arch/types.h>
     38#include <typedefs.h>
    3939
    4040#define FPU_CONTEXT_ALIGN  16
  • kernel/arch/abs32le/include/interrupt.h

    rfb150d78 r46c20c8  
    3636#define KERN_abs32le_INTERRUPT_H_
    3737
    38 #include <arch/types.h>
     38#include <typedefs.h>
     39#include <arch/istate.h>
    3940
    4041#define IVT_ITEMS  0
     
    4344#define VECTOR_TLB_SHOOTDOWN_IPI  0
    4445
    45 /*
    46  * On real hardware this stores the registers which
    47  * need to be preserved during interupts.
    48  */
    49 typedef struct istate {
    50         uintptr_t ip;
    51         uintptr_t fp;
    52         uint32_t stack[];
    53 } istate_t;
    54 
    55 static inline int istate_from_uspace(istate_t *istate)
    56 {
    57         /* On real hardware this checks whether the interrupted
    58            context originated from user space. */
    59        
    60         return !(istate->ip & 0x80000000);
    61 }
    62 
    63 static inline void istate_set_retaddr(istate_t *istate, uintptr_t retaddr)
    64 {
    65         /* On real hardware this sets the instruction pointer. */
    66        
    67         istate->ip = retaddr;
    68 }
    69 
    70 static inline unative_t istate_get_pc(istate_t *istate)
    71 {
    72         /* On real hardware this returns the instruction pointer. */
    73        
    74         return istate->ip;
    75 }
    76 
    77 static inline unative_t istate_get_fp(istate_t *istate)
    78 {
    79         /* On real hardware this returns the frame pointer. */
    80        
    81         return istate->fp;
    82 }
    83 
    8446#endif
    8547
  • kernel/arch/abs32le/include/memstr.h

    rfb150d78 r46c20c8  
    3636#define KERN_abs32le_MEMSTR_H_
    3737
    38 #define memcpy(dst, src, cnt)  __builtin_memcpy((dst), (src), (cnt))
    39 
    40 extern void memsetw(void *, size_t, uint16_t);
    41 extern void memsetb(void *, size_t, uint8_t);
    42 
    43 extern int memcmp(const void *, const void *, size_t);
     38#define memcpy(dst, src, cnt)   _memcpy((dst), (src), (cnt))
     39#define memsetb(dst, cnt, val)  _memsetb((dst), (cnt), (val))
     40#define memsetw(dst, cnt, val)  _memsetw((dst), (cnt), (val))
    4441
    4542#endif
  • kernel/arch/abs32le/include/mm/as.h

    rfb150d78 r46c20c8  
    3838#define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH  0
    3939
    40 #define KERNEL_ADDRESS_SPACE_START_ARCH  ((unsigned long) 0x80000000)
    41 #define KERNEL_ADDRESS_SPACE_END_ARCH    ((unsigned long) 0xffffffff)
    42 #define USER_ADDRESS_SPACE_START_ARCH    ((unsigned long) 0x00000000)
    43 #define USER_ADDRESS_SPACE_END_ARCH      ((unsigned long) 0x7fffffff)
     40#define KERNEL_ADDRESS_SPACE_START_ARCH  UINT32_C(0x80000000)
     41#define KERNEL_ADDRESS_SPACE_END_ARCH    UINT32_C(0xffffffff)
     42#define USER_ADDRESS_SPACE_START_ARCH    UINT32_C(0x00000000)
     43#define USER_ADDRESS_SPACE_END_ARCH      UINT32_C(0x7fffffff)
    4444
    4545#define USTACK_ADDRESS_ARCH  (USER_ADDRESS_SPACE_END_ARCH - (PAGE_SIZE - 1))
  • kernel/arch/abs32le/include/mm/asid.h

    rfb150d78 r46c20c8  
    3434#define KERN_abs32le_ASID_H_
    3535
    36 #include <arch/types.h>
     36#include <typedefs.h>
    3737
    3838typedef uint32_t asid_t;
  • kernel/arch/abs32le/include/mm/frame.h

    rfb150d78 r46c20c8  
    4040
    4141#ifdef KERNEL
    42 #ifndef __ASM__
    4342
    44 #include <arch/types.h>
     43#include <typedefs.h>
    4544
    4645extern void frame_arch_init(void);
    4746extern void physmem_print(void);
    4847
    49 #endif /* __ASM__ */
    5048#endif /* KERNEL */
    5149
  • kernel/arch/abs32le/include/mm/page.h

    rfb150d78 r46c20c8  
    3737
    3838#include <arch/mm/frame.h>
     39#include <trace.h>
    3940
    4041#define PAGE_WIDTH  FRAME_WIDTH
     
    4344#ifdef KERNEL
    4445
    45 #ifndef __ASM__
    46         #define KA2PA(x)  (((uintptr_t) (x)) - 0x80000000)
    47         #define PA2KA(x)  (((uintptr_t) (x)) + 0x80000000)
    48 #else
    49         #define KA2PA(x)  ((x) - 0x80000000)
    50         #define PA2KA(x)  ((x) + 0x80000000)
    51 #endif
     46#define KA2PA(x)  (((uintptr_t) (x)) - UINT32_C(0x80000000))
     47#define PA2KA(x)  (((uintptr_t) (x)) + UINT32_C(0x80000000))
    5248
    5349/*
     
    6965
    7066/* Macros calculating indices for each level. */
    71 #define PTL0_INDEX_ARCH(vaddr)  (((vaddr) >> 22) & 0x3ff)
     67#define PTL0_INDEX_ARCH(vaddr)  (((vaddr) >> 22) & 0x3ffU)
    7268#define PTL1_INDEX_ARCH(vaddr)  0
    7369#define PTL2_INDEX_ARCH(vaddr)  0
    74 #define PTL3_INDEX_ARCH(vaddr)  (((vaddr) >> 12) & 0x3ff)
     70#define PTL3_INDEX_ARCH(vaddr)  (((vaddr) >> 12) & 0x3ffU)
    7571
    7672/* Get PTE address accessors for each level. */
     
    122118#define PTE_EXECUTABLE_ARCH(p)  1
    123119
    124 #ifndef __ASM__
    125 
    126120#include <mm/mm.h>
    127121#include <arch/interrupt.h>
    128 #include <arch/types.h>
    129122#include <typedefs.h>
    130 
    131 /* Page fault error codes. */
    132 
    133 /** When bit on this position is 0, the page fault was caused by a not-present
    134  * page.
    135  */
    136 #define PFERR_CODE_P            (1 << 0)
    137 
    138 /** When bit on this position is 1, the page fault was caused by a write. */
    139 #define PFERR_CODE_RW           (1 << 1)
    140 
    141 /** When bit on this position is 1, the page fault was caused in user mode. */
    142 #define PFERR_CODE_US           (1 << 2)
    143 
    144 /** When bit on this position is 1, a reserved bit was set in page directory. */
    145 #define PFERR_CODE_RSVD         (1 << 3)       
    146123
    147124/** Page Table Entry. */
    148125typedef struct {
    149         unsigned present : 1;
    150         unsigned writeable : 1;
    151         unsigned uaccessible : 1;
    152         unsigned page_write_through : 1;
    153         unsigned page_cache_disable : 1;
    154         unsigned accessed : 1;
    155         unsigned dirty : 1;
    156         unsigned pat : 1;
    157         unsigned global : 1;
    158         unsigned soft_valid : 1;        /**< Valid content even if the present bit is not set. */
    159         unsigned avl : 2;
    160         unsigned frame_address : 20;
    161 } __attribute__ ((packed)) pte_t;
     126        unsigned int present : 1;
     127        unsigned int writeable : 1;
     128        unsigned int uaccessible : 1;
     129        unsigned int page_write_through : 1;
     130        unsigned int page_cache_disable : 1;
     131        unsigned int accessed : 1;
     132        unsigned int dirty : 1;
     133        unsigned int pat : 1;
     134        unsigned int global : 1;
     135       
     136        /** Valid content even if the present bit is not set. */
     137        unsigned int soft_valid : 1;
     138        unsigned int avl : 2;
     139        unsigned int frame_address : 20;
     140} __attribute__((packed)) pte_t;
    162141
    163 static inline unsigned int get_pt_flags(pte_t *pt, size_t i)
     142NO_TRACE static inline unsigned int get_pt_flags(pte_t *pt, size_t i)
     143    REQUIRES_ARRAY_MUTABLE(pt, PTL0_ENTRIES_ARCH)
    164144{
    165145        pte_t *p = &pt[i];
    166146       
    167         return ((!p->page_cache_disable) << PAGE_CACHEABLE_SHIFT |
    168             (!p->present) << PAGE_PRESENT_SHIFT |
    169             p->uaccessible << PAGE_USER_SHIFT |
    170             1 << PAGE_READ_SHIFT |
    171             p->writeable << PAGE_WRITE_SHIFT |
    172             1 << PAGE_EXEC_SHIFT |
    173             p->global << PAGE_GLOBAL_SHIFT);
     147        return (
     148            ((unsigned int) (!p->page_cache_disable) << PAGE_CACHEABLE_SHIFT) |
     149            ((unsigned int) (!p->present) << PAGE_PRESENT_SHIFT) |
     150            ((unsigned int) p->uaccessible << PAGE_USER_SHIFT) |
     151            (1 << PAGE_READ_SHIFT) |
     152            ((unsigned int) p->writeable << PAGE_WRITE_SHIFT) |
     153            (1 << PAGE_EXEC_SHIFT) |
     154            ((unsigned int) p->global << PAGE_GLOBAL_SHIFT)
     155        );
    174156}
    175157
    176 static inline void set_pt_flags(pte_t *pt, size_t i, int flags)
     158NO_TRACE static inline void set_pt_flags(pte_t *pt, size_t i, int flags)
     159    WRITES(ARRAY_RANGE(pt, PTL0_ENTRIES_ARCH))
     160    REQUIRES_ARRAY_MUTABLE(pt, PTL0_ENTRIES_ARCH)
    177161{
    178162        pte_t *p = &pt[i];
     
    192176
    193177extern void page_arch_init(void);
    194 extern void page_fault(int n, istate_t *istate);
    195 
    196 #endif /* __ASM__ */
     178extern void page_fault(unsigned int, istate_t *);
    197179
    198180#endif /* KERNEL */
  • kernel/arch/abs32le/include/proc/task.h

    rfb150d78 r46c20c8  
    3636#define KERN_abs32le_TASK_H_
    3737
    38 #include <arch/types.h>
     38#include <typedefs.h>
    3939#include <adt/bitmap.h>
    4040
  • kernel/arch/abs32le/include/proc/thread.h

    rfb150d78 r46c20c8  
    3636#define KERN_abs32le_THREAD_H_
    3737
    38 #include <arch/types.h>
     38#include <typedefs.h>
    3939
    4040typedef struct {
  • kernel/arch/abs32le/include/types.h

    rfb150d78 r46c20c8  
    3636#define KERN_abs32le_TYPES_H_
    3737
    38 typedef signed char int8_t;
    39 typedef signed short int16_t;
    40 typedef signed long int32_t;
    41 typedef signed long long int64_t;
    42 
    43 typedef unsigned char uint8_t;
    44 typedef unsigned short uint16_t;
    45 typedef unsigned long uint32_t;
    46 typedef unsigned long long uint64_t;
     38#define ATOMIC_COUNT_MIN  UINT32_MIN
     39#define ATOMIC_COUNT_MAX  UINT32_MAX
    4740
    4841typedef uint32_t size_t;
     
    5548typedef uint32_t unative_t;
    5649typedef int32_t native_t;
     50typedef uint32_t atomic_count_t;
    5751
    5852typedef struct {
    5953} fncptr_t;
    6054
    61 #define PRIp  "x"  /**< Format for uintptr_t. */
    62 #define PRIs  "u"  /**< Format for size_t. */
     55#define INTN_C(c)   INT32_C(c)
     56#define UINTN_C(c)  UINT32_C(c)
    6357
    64 #define PRId8   "d"    /**< Format for int8_t. */
    65 #define PRId16  "d"    /**< Format for int16_t. */
    66 #define PRId32  "d"    /**< Format for int32_t. */
    67 #define PRId64  "lld"  /**< Format for int64_t. */
    68 #define PRIdn   "d"    /**< Format for native_t. */
    69 
    70 #define PRIu8   "u"    /**< Format for uint8_t. */
    71 #define PRIu16  "u"    /**< Format for uint16_t. */
    72 #define PRIu32  "u"    /**< Format for uint32_t. */
    73 #define PRIu64  "llu"  /**< Format for uint64_t. */
    74 #define PRIun   "u"    /**< Format for unative_t. */
    75 
    76 #define PRIx8   "x"    /**< Format for hexadecimal (u)int8_t. */
    77 #define PRIx16  "x"    /**< Format for hexadecimal (u)int16_t. */
    78 #define PRIx32  "x"    /**< Format for hexadecimal (u)uint32_t. */
    79 #define PRIx64  "llx"  /**< Format for hexadecimal (u)int64_t. */
    80 #define PRIxn   "x"    /**< Format for hexadecimal (u)native_t. */
     58#define PRIdn  PRId32  /**< Format for native_t. */
     59#define PRIun  PRIu32  /**< Format for unative_t. */
     60#define PRIxn  PRIx32  /**< Format for hexadecimal unative_t. */
     61#define PRIua  PRIu32  /**< Format for atomic_count_t. */
    8162
    8263#endif
Note: See TracChangeset for help on using the changeset viewer.