Changeset 3396f59 in mainline for arch/amd64/include


Ignore:
Timestamp:
2005-09-04T08:28:55Z (20 years ago)
Author:
Ondrej Palkovsky <ondrap@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
8ff2f3f
Parents:
005384ad
Message:

Fixed gdtr naming issues after ia32 changes.
Fixed stack alignment on new thread to by multiple of 16,
we are now ABI-correct and we do not #GP on va_arg to boot.
Fixed bad exception register names reporting.
Fixed bad _hardcoded_load_addr, which caused allocation of kernel text
frames.

Location:
arch/amd64/include
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • arch/amd64/include/asm.h

    r005384ad r3396f59  
    141141}
    142142
     143/** Read CR0
     144 *
     145 * Return value in CR0
     146 *
     147 * @return Value read.
     148 */
     149static inline __u64 read_cr0(void)
     150{
     151        __u64 v;
     152        __asm__ volatile ("movq %%cr0,%0" : "=r" (v));
     153        return v;
     154}
     155
    143156/** Read CR2
    144157 *
     
    147160 * @return Value read.
    148161 */
    149 static inline __u64 read_cr2(void) { __u64 v; __asm__ volatile ("movq %%cr2,%0" : "=r" (v)); return v; }
     162static inline __u64 read_cr2(void)
     163{
     164        __u64 v;
     165        __asm__ volatile ("movq %%cr2,%0" : "=r" (v));
     166        return v;
     167}
    150168
    151169/** Write CR3
     
    155173 * @param v Value to be written.
    156174 */
    157 static inline void write_cr3(__u64 v) { __asm__ volatile ("movq %0,%%cr3\n" : : "r" (v)); }
     175static inline void write_cr3(__u64 v)
     176{
     177        __asm__ volatile ("movq %0,%%cr3\n" : : "r" (v));
     178}
    158179
    159180/** Read CR3
     
    163184 * @return Value read.
    164185 */
    165 static inline __u64 read_cr3(void) { __u64 v; __asm__ volatile ("movq %%cr3,%0" : "=r" (v)); return v; }
     186static inline __u64 read_cr3(void)
     187{
     188        __u64 v;
     189        __asm__ volatile ("movq %%cr3,%0" : "=r" (v));
     190        return v;
     191}
    166192
    167193
  • arch/amd64/include/context.h

    r005384ad r3396f59  
    3434#endif
    3535
    36 #define SP_DELTA     8
     36
     37/* According to ABI the stack MUST be aligned on
     38 * 16-byte boundary. If it is not, the va_arg calling will
     39 * panic sooner or later
     40 */
     41#define SP_DELTA     16
    3742
    3843struct context {
  • arch/amd64/include/cpu.h

    r005384ad r3396f59  
    5656extern void set_efer_flag(int flag);
    5757extern __u64 read_efer_flag(void);
     58void cpu_setup_fpu(void);
    5859
    5960#endif /* __ASM__ */
  • arch/amd64/include/cpuid.h

    r005384ad r3396f59  
    3030#define __CPUID_H__
    3131
    32 #define AMD_CPUID_EXTENDED 0x80000001
     32#define AMD_CPUID_EXTENDED   0x80000001
    3333#define AMD_EXT_NOEXECUTE    20
     34
     35#define INTEL_CPUID_STANDARD 0x1
     36#define INTEL_SSE2           26
     37#define INTEL_FXSAVE         24
    3438
    3539#ifndef __ASM__
  • arch/amd64/include/pm.h

    r005384ad r3396f59  
    146146
    147147extern struct ptr_16_64 gdtr;
    148 extern struct ptr_16_32 bsp_bootstrap_gdtr;
    149 extern struct ptr_16_32 ap_bootstrap_gdtr;
     148extern struct ptr_16_32 real_bootstrap_gdtr;
    150149
    151150extern void pm_init(void);
Note: See TracChangeset for help on using the changeset viewer.