Changeset a1f60f3 in mainline


Ignore:
Timestamp:
2010-06-27T23:04:20Z (14 years ago)
Author:
Martin Decky <martin@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
64f6ef04
Parents:
33dac7d
Message:

move from "kernel" memory model to "large" memory model
get rid of the extra identity mapping of the physical memory at -2 GB

Location:
kernel/arch/amd64
Files:
9 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/amd64/Makefile.inc

    r33dac7d ra1f60f3  
    3333
    3434FPU_NO_CFLAGS = -mno-sse -mno-sse2
    35 CMN1 = -m64 -mcmodel=kernel -mno-red-zone -fno-unwind-tables -fno-omit-frame-pointer
     35CMN1 = -m64 -mcmodel=large -mno-red-zone -fno-unwind-tables -fno-omit-frame-pointer
    3636GCC_CFLAGS += $(CMN1)
    3737ICC_CFLAGS += $(CMN1)
  • kernel/arch/amd64/include/mm/as.h

    r33dac7d ra1f60f3  
    2727 */
    2828
    29 /** @addtogroup amd64mm 
     29/** @addtogroup amd64mm
    3030 * @{
    3131 */
     
    3636#define KERN_amd64_AS_H_
    3737
    38 #define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH      0
     38#define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH  0
    3939
    40 #define KERNEL_ADDRESS_SPACE_START_ARCH         (unsigned long) 0xffff800000000000
    41 #define KERNEL_ADDRESS_SPACE_END_ARCH           (unsigned long) 0xffffffff80000000
    42 #define USER_ADDRESS_SPACE_START_ARCH           (unsigned long) 0x0000000000000000
    43 #define USER_ADDRESS_SPACE_END_ARCH             (unsigned long) 0x00007fffffffffff
     40#define KERNEL_ADDRESS_SPACE_START_ARCH  (unsigned long) 0xffff800000000000
     41#define KERNEL_ADDRESS_SPACE_END_ARCH    (unsigned long) 0xffffffffffffffff
    4442
    45 #define USTACK_ADDRESS_ARCH     (USER_ADDRESS_SPACE_END_ARCH-(PAGE_SIZE-1))
     43#define USER_ADDRESS_SPACE_START_ARCH    (unsigned long) 0x0000000000000000
     44#define USER_ADDRESS_SPACE_END_ARCH      (unsigned long) 0x00007fffffffffff
    4645
    47 #define as_constructor_arch(as, flags)          (as != as)
    48 #define as_destructor_arch(as)                  (as != as)
    49 #define as_create_arch(as, flags)               (as != as)
     46#define USTACK_ADDRESS_ARCH  (USER_ADDRESS_SPACE_END_ARCH - (PAGE_SIZE - 1))
     47
     48#define as_constructor_arch(as, flags)  (as != as)
     49#define as_destructor_arch(as)          (as != as)
     50#define as_create_arch(as, flags)       (as != as)
     51
    5052#define as_install_arch(as)
    5153#define as_deinstall_arch(as)
  • kernel/arch/amd64/include/mm/page.h

    r33dac7d ra1f60f3  
    3535/** Paging on AMD64
    3636 *
    37  * The space is divided in positive numbers - userspace and
    38  * negative numbers - kernel space. The 'negative' space starting
    39  * with 0xffff800000000000 and ending with 0xffffffff80000000
    40  * (-2GB) is identically mapped physical memory. The area
    41  * (0xffffffff80000000 ... 0xffffffffffffffff is again identically
    42  * mapped first 2GB.
    43  *
    44  * ATTENTION - PA2KA(KA2PA(x)) != x if 'x' is in kernel
     37 * The space is divided in positive numbers (uspace) and
     38 * negative numbers (kernel). The 'negative' space starting
     39 * with 0xffff800000000000 and ending with 0xffffffffffffffff
     40 * is identically mapped physical memory.
     41 *
    4542 */
    4643
     
    5047#include <arch/mm/frame.h>
    5148
    52 #define PAGE_WIDTH      FRAME_WIDTH
    53 #define PAGE_SIZE       FRAME_SIZE
     49#define PAGE_WIDTH  FRAME_WIDTH
     50#define PAGE_SIZE   FRAME_SIZE
    5451
    5552#ifdef KERNEL
    5653
    5754#ifndef __ASM__
    58 #       include <mm/mm.h>
    59 #       include <typedefs.h>
    60 #       include <arch/interrupt.h>
    61 
    62 static inline uintptr_t ka2pa(uintptr_t x)
    63 {
    64         if (x > 0xffffffff80000000)
    65                 return x - 0xffffffff80000000;
    66         else
    67                 return x - 0xffff800000000000;
    68 }
    69 
    70 #       define KA2PA(x)         ka2pa((uintptr_t) x)
    71 #       define PA2KA_CODE(x)    (((uintptr_t) (x)) + 0xffffffff80000000)
    72 #       define PA2KA(x)         (((uintptr_t) (x)) + 0xffff800000000000)
    73 #else
    74 #       define KA2PA(x)         ((x) - 0xffffffff80000000)
    75 #       define PA2KA(x)         ((x) + 0xffffffff80000000)
    76 #endif
     55
     56#define KA2PA(x)  (((uintptr_t) (x)) - 0xffff800000000000)
     57#define PA2KA(x)  (((uintptr_t) (x)) + 0xffff800000000000)
     58
     59#else /* __ASM__ */
     60
     61#define KA2PA(x)  ((x) - 0xffff800000000000)
     62#define PA2KA(x)  ((x) + 0xffff800000000000)
     63
     64#endif /* __ASM__ */
    7765
    7866/* Number of entries in each level. */
    79 #define PTL0_ENTRIES_ARCH       512
    80 #define PTL1_ENTRIES_ARCH       512
    81 #define PTL2_ENTRIES_ARCH       512
    82 #define PTL3_ENTRIES_ARCH       512
     67#define PTL0_ENTRIES_ARCH  512
     68#define PTL1_ENTRIES_ARCH  512
     69#define PTL2_ENTRIES_ARCH  512
     70#define PTL3_ENTRIES_ARCH  512
    8371
    8472/* Page table sizes for each level. */
    85 #define PTL0_SIZE_ARCH          ONE_FRAME
    86 #define PTL1_SIZE_ARCH          ONE_FRAME
    87 #define PTL2_SIZE_ARCH          ONE_FRAME
    88 #define PTL3_SIZE_ARCH          ONE_FRAME
     73#define PTL0_SIZE_ARCH  ONE_FRAME
     74#define PTL1_SIZE_ARCH  ONE_FRAME
     75#define PTL2_SIZE_ARCH  ONE_FRAME
     76#define PTL3_SIZE_ARCH  ONE_FRAME
    8977
    9078/* Macros calculating indices into page tables in each level. */
    91 #define PTL0_INDEX_ARCH(vaddr)  (((vaddr) >> 39) & 0x1ff)
    92 #define PTL1_INDEX_ARCH(vaddr)  (((vaddr) >> 30) & 0x1ff)
    93 #define PTL2_INDEX_ARCH(vaddr)  (((vaddr) >> 21) & 0x1ff)
    94 #define PTL3_INDEX_ARCH(vaddr)  (((vaddr) >> 12) & 0x1ff)
     79#define PTL0_INDEX_ARCH(vaddr)  (((vaddr) >> 39) & 0x1ff)
     80#define PTL1_INDEX_ARCH(vaddr)  (((vaddr) >> 30) & 0x1ff)
     81#define PTL2_INDEX_ARCH(vaddr)  (((vaddr) >> 21) & 0x1ff)
     82#define PTL3_INDEX_ARCH(vaddr)  (((vaddr) >> 12) & 0x1ff)
    9583
    9684/* Get PTE address accessors for each level. */
     
    156144#ifndef __ASM__
    157145
     146#include <mm/mm.h>
     147#include <arch/interrupt.h>
     148#include <typedefs.h>
     149
    158150/* Page fault error codes. */
    159151
     
    161153 * page.
    162154 */
    163 #define PFERR_CODE_P            (1 << 0) 
     155#define PFERR_CODE_P  (1 << 0)
    164156
    165157/** When bit on this position is 1, the page fault was caused by a write. */
    166 #define PFERR_CODE_RW           (1 << 1)
     158#define PFERR_CODE_RW  (1 << 1)
    167159
    168160/** When bit on this position is 1, the page fault was caused in user mode. */
    169 #define PFERR_CODE_US           (1 << 2)
     161#define PFERR_CODE_US  (1 << 2)
    170162
    171163/** When bit on this position is 1, a reserved bit was set in page directory. */
    172 #define PFERR_CODE_RSVD         (1 << 3)
     164#define PFERR_CODE_RSVD  (1 << 3)
    173165
    174166/** When bit on this position os 1, the page fault was caused during instruction
    175167 * fecth.
    176168 */
    177 #define PFERR_CODE_ID           (1 << 4)
     169#define PFERR_CODE_ID  (1 << 4)
    178170
    179171/** Page Table Entry. */
    180172typedef struct {
    181         unsigned present : 1;
    182         unsigned writeable : 1;
    183         unsigned uaccessible : 1;
    184         unsigned page_write_through : 1;
    185         unsigned page_cache_disable : 1;
    186         unsigned accessed : 1;
    187         unsigned dirty : 1;
    188         unsigned unused: 1;
    189         unsigned global : 1;
    190         unsigned soft_valid : 1;                /**< Valid content even if present bit is cleared. */
    191         unsigned avl : 2;
    192         unsigned addr_12_31 : 30;
    193         unsigned addr_32_51 : 21;
    194         unsigned no_execute : 1;
     173        unsigned int present : 1;
     174        unsigned int writeable : 1;
     175        unsigned int uaccessible : 1;
     176        unsigned int page_write_through : 1;
     177        unsigned int page_cache_disable : 1;
     178        unsigned int accessed : 1;
     179        unsigned int dirty : 1;
     180        unsigned int unused: 1;
     181        unsigned int global : 1;
     182        unsigned int soft_valid : 1;  /**< Valid content even if present bit is cleared. */
     183        unsigned int avl : 2;
     184        unsigned int addr_12_31 : 30;
     185        unsigned int addr_32_51 : 21;
     186        unsigned int no_execute : 1;
    195187} __attribute__ ((packed)) pte_t;
    196188
     
    211203{
    212204        pte_t *p = &pt[i];
    213 
     205       
    214206        p->addr_12_31 = (a >> 12) & 0xfffff;
    215207        p->addr_32_51 = a >> 32;
  • kernel/arch/amd64/src/asm_utils.S

    r33dac7d ra1f60f3  
    2727#
    2828
    29 #define IREGISTER_SPACE 80
    30 
    31 #define IOFFSET_RAX     0x0
    32 #define IOFFSET_RCX     0x8
    33 #define IOFFSET_RDX     0x10
    34 #define IOFFSET_RSI     0x18
    35 #define IOFFSET_RDI     0x20
    36 #define IOFFSET_R8      0x28
    37 #define IOFFSET_R9      0x30
    38 #define IOFFSET_R10     0x38
    39 #define IOFFSET_R11     0x40
    40 #define IOFFSET_RBP     0x48
    41 
    42 #  Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int has no error word
    43 # and 1 means interrupt with error word
    44 #define ERROR_WORD_INTERRUPT_LIST 0x00027D00
     29#define IREGISTER_SPACE  80
     30
     31#define IOFFSET_RAX  0x00
     32#define IOFFSET_RCX  0x08
     33#define IOFFSET_RDX  0x10
     34#define IOFFSET_RSI  0x18
     35#define IOFFSET_RDI  0x20
     36#define IOFFSET_R8   0x28
     37#define IOFFSET_R9   0x30
     38#define IOFFSET_R10  0x38
     39#define IOFFSET_R11  0x40
     40#define IOFFSET_RBP  0x48
     41
     42# Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int
     43# has no error word  and 1 means interrupt with error word
     44
     45#define ERROR_WORD_INTERRUPT_LIST  0x00027D00
    4546
    4647#include <arch/pm.h>
    4748#include <arch/mm/page.h>
    48        
     49
    4950.text
    5051.global interrupt_handlers
     
    5354
    5455panic_printf:
    55         movq $halt, (%rsp)
     56        movabsq $halt, %rax
     57        movq %rax, (%rsp)
    5658        jmp printf
    5759
     
    7678        jmp _memsetw
    7779
    78 #define MEMCPY_DST      %rdi
    79 #define MEMCPY_SRC      %rsi
    80 #define MEMCPY_SIZE     %rdx
     80#define MEMCPY_DST   %rdi
     81#define MEMCPY_SRC   %rsi
     82#define MEMCPY_SIZE  %rdx
    8183
    8284/**
     
    8991 * or copy_to_uspace().
    9092 *
    91  * @param MEMCPY_DST    Destination address.
    92  * @param MEMCPY_SRC    Source address.
    93  * @param MEMCPY_SIZE   Number of bytes to copy.
     93 * @param MEMCPY_DST  Destination address.
     94 * @param MEMCPY_SRC  Source address.
     95 * @param MEMCPY_SIZE Number of bytes to copy.
    9496 *
    9597 * @retrun MEMCPY_DST on success, 0 on failure.
     98 *
    9699 */
    97100memcpy:
     
    99102memcpy_to_uspace:
    100103        movq MEMCPY_DST, %rax
    101 
     104       
    102105        movq MEMCPY_SIZE, %rcx
    103         shrq $3, %rcx                   /* size / 8 */
    104        
    105         rep movsq                       /* copy as much as possible word by word */
    106 
     106        shrq $3, %rcx           /* size / 8 */
     107       
     108        rep movsq               /* copy as much as possible word by word */
     109       
    107110        movq MEMCPY_SIZE, %rcx
    108         andq $7, %rcx                   /* size % 8 */
     111        andq $7, %rcx           /* size % 8 */
    109112        jz 0f
    110113       
    111         rep movsb                       /* copy the rest byte by byte */
    112        
    113 0:
    114         ret                             /* return MEMCPY_SRC, success */
     114        rep movsb               /* copy the rest byte by byte */
     115       
     116        0:
     117                ret                 /* return MEMCPY_SRC, success */
    115118
    116119memcpy_from_uspace_failover_address:
    117120memcpy_to_uspace_failover_address:
    118         xorq %rax, %rax                 /* return 0, failure */
     121        xorq %rax, %rax         /* return 0, failure */
    119122        ret
    120123
     
    124127#
    125128has_cpuid:
    126         pushfq                  # store flags
    127         popq %rax               # read flags
    128         movq %rax,%rdx          # copy flags
    129         btcl $21,%edx           # swap the ID bit
     129        pushfq                 # store flags
     130        popq %rax              # read flags
     131        movq %rax, %rdx        # copy flags
     132        btcl $21, %edx         # swap the ID bit
    130133        pushq %rdx
    131         popfq                   # propagate the change into flags
     134        popfq                  # propagate the change into flags
    132135        pushfq
    133         popq %rdx               # read flags   
    134         andl $(1<<21),%eax      # interested only in ID bit
    135         andl $(1<<21),%edx
    136         xorl %edx,%eax          # 0 if not supported, 1 if supported
     136        popq %rdx              # read flags
     137        andl $(1 << 21), %eax  # interested only in ID bit
     138        andl $(1 << 21), %edx
     139        xorl %edx, %eax        # 0 if not supported, 1 if supported
    137140        ret
    138141
    139142cpuid:
    140         movq %rbx, %r10  # we have to preserve rbx across function calls
    141 
    142         movl %edi,%eax  # load the command into %eax
    143 
    144         cpuid   
    145         movl %eax,0(%rsi)
    146         movl %ebx,4(%rsi)
    147         movl %ecx,8(%rsi)
    148         movl %edx,12(%rsi)
    149 
     143        movq %rbx, %r10        # we have to preserve rbx across function calls
     144       
     145        movl %edi,%eax         # load the command into %eax
     146       
     147        cpuid
     148        movl %eax, 0(%rsi)
     149        movl %ebx, 4(%rsi)
     150        movl %ecx, 8(%rsi)
     151        movl %edx, 12(%rsi)
     152       
    150153        movq %r10, %rbx
    151154        ret
     
    157160        wrmsr
    158161        ret
    159        
     162
    160163read_efer_flag:
    161164        movq $0xc0000080, %rcx
    162165        rdmsr
    163         ret             
     166        ret
    164167
    165168# Push all volatile general purpose registers on stack
     
    190193.endm
    191194
    192 #define INTERRUPT_ALIGN 128
    193        
     195#define INTERRUPT_ALIGN  128
     196
    194197## Declare interrupt handlers
    195198#
     
    200203#
    201204.macro handler i n
    202 
     205       
    203206        /*
    204207         * Choose between version with error code and version without error
     
    209212         * Therefore we align the interrupt handlers.
    210213         */
    211 
     214       
    212215        .iflt \i-32
    213216                .if (1 << \i) & ERROR_WORD_INTERRUPT_LIST
     
    220223                         * Version without error word,
    221224                         */
    222                         subq $(IREGISTER_SPACE+8), %rsp
     225                        subq $(IREGISTER_SPACE + 8), %rsp
    223226                .endif
    224227        .else
     
    226229                 * Version without error word,
    227230                 */
    228                 subq $(IREGISTER_SPACE+8), %rsp
    229         .endif 
    230 
     231                subq $(IREGISTER_SPACE + 8), %rsp
     232        .endif
     233       
    231234        save_all_gpr
    232235        cld
    233 
     236       
    234237        # Stop stack traces here
    235238        xorq %rbp, %rbp
    236 
    237         movq $(\i), %rdi        # %rdi - first parameter
    238         movq %rsp, %rsi         # %rsi - pointer to istate
    239         call exc_dispatch       # exc_dispatch(i, istate)
     239       
     240        movq $(\i), %rdi    # %rdi - first parameter
     241        movq %rsp, %rsi     # %rsi - pointer to istate
     242        call exc_dispatch   # exc_dispatch(i, istate)
    240243       
    241244        restore_all_gpr
    242245        # $8 = Skip error word
    243         addq $(IREGISTER_SPACE+8), %rsp
     246        addq $(IREGISTER_SPACE + 8), %rsp
    244247        iretq
    245 
     248       
    246249        .align INTERRUPT_ALIGN
    247         .if (\n-\i)-1
    248         handler "(\i+1)",\n
     250        .if (\n - \i) - 1
     251                handler "(\i + 1)", \n
    249252        .endif
    250253.endm
     
    252255.align INTERRUPT_ALIGN
    253256interrupt_handlers:
    254 h_start:
    255         handler 0 IDT_ITEMS
    256 h_end:
     257        h_start:
     258                handler 0 IDT_ITEMS
     259        h_end:
    257260
    258261## Low-level syscall handler
    259 # 
     262#
    260263# Registers on entry:
    261264#
    262 # @param rcx            Userspace return address.
    263 # @param r11            Userspace RLFAGS.
    264 #
    265 # @param rax            Syscall number.
    266 # @param rdi            1st syscall argument.
    267 # @param rsi            2nd syscall argument.
    268 # @param rdx            3rd syscall argument.
    269 # @param r10            4th syscall argument. Used instead of RCX because the
    270 #                       SYSCALL instruction clobbers it.
    271 # @param r8             5th syscall argument.
    272 # @param r9             6th syscall argument.
    273 #
    274 # @return               Return value is in rax.
     265# @param rcx Userspace return address.
     266# @param r11 Userspace RLFAGS.
     267#
     268# @param rax Syscall number.
     269# @param rdi 1st syscall argument.
     270# @param rsi 2nd syscall argument.
     271# @param rdx 3rd syscall argument.
     272# @param r10 4th syscall argument. Used instead of RCX because
     273#            the SYSCALL instruction clobbers it.
     274# @param r8  5th syscall argument.
     275# @param r9  6th syscall argument.
     276#
     277# @return Return value is in rax.
    275278#
    276279syscall_entry:
    277         swapgs                  # Switch to hidden gs   
    278         #
    279         # %gs:0                 Scratch space for this thread's user RSP
    280         # %gs:8                 Address to be used as this thread's kernel RSP
     280        swapgs            # Switch to hidden gs
    281281        #
    282         movq %rsp, %gs:0        # Save this thread's user RSP
    283         movq %gs:8, %rsp        # Set this thread's kernel RSP
    284         swapgs                  # Switch back to remain consistent
     282        # %gs:0 Scratch space for this thread's user RSP
     283        # %gs:8 Address to be used as this thread's kernel RSP
     284        #
     285        movq %rsp, %gs:0  # Save this thread's user RSP
     286        movq %gs:8, %rsp  # Set this thread's kernel RSP
     287        swapgs            # Switch back to remain consistent
    285288        sti
    286289       
    287290        pushq %rcx
    288291        pushq %r11
    289 
    290         movq %r10, %rcx         # Copy the 4th argument where it is expected
     292       
     293        movq %r10, %rcx   # Copy the 4th argument where it is expected
    291294        pushq %rax
    292295        call syscall_handler
    293296        addq $8, %rsp
    294                
     297       
    295298        popq %r11
    296299        popq %rcx
    297 
     300       
    298301        cli
    299302        swapgs
    300         movq %gs:0, %rsp        # Restore the user RSP
     303        movq %gs:0, %rsp  # Restore the user RSP
    301304        swapgs
    302 
     305       
    303306        sysretq
    304307
     
    306309.global interrupt_handler_size
    307310
    308 interrupt_handler_size: .quad (h_end-h_start)/IDT_ITEMS
     311interrupt_handler_size: .quad (h_end - h_start) / IDT_ITEMS
  • kernel/arch/amd64/src/boot/boot.S

    r33dac7d ra1f60f3  
    3131#include <arch/boot/boot.h>
    3232#include <arch/boot/memmap.h>
    33 #include <arch/mm/page.h>       
     33#include <arch/mm/page.h>
    3434#include <arch/mm/ptl.h>
    3535#include <arch/pm.h>
     
    172172        xorq %rsi, %rsi
    173173        movl grub_ebx, %esi
    174         call arch_pre_main
     174       
     175        movabsq $arch_pre_main, %rax
     176        callq *%rax
    175177       
    176178        # create the first stack frame
    177179        pushq $0
    178180        movq %rsp, %rbp
    179 
    180         call main_bsp
     181       
     182        movabsq $main_bsp, %rax
     183        call *%rax
    181184       
    182185        # not reached
     
    256259#
    257260# Macro for generating initial page table contents.
    258 # @param cnt Number of entries to generat. Must be multiple of 8.
     261# @param cnt Number of entries to generate. Must be multiple of 8.
    259262# @param g   Number of GB that will be added to the mapping.
    260263#
    261 .macro ptl2gen cnt g 
     264.macro ptl2gen cnt g
    262265.if \cnt
    263         ptl2gen "\cnt - 8" \g 
     266        ptl2gen "\cnt - 8" \g
    264267        .quad ((\cnt - 8) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
    265268        .quad ((\cnt - 7) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
     
    276279.align 4096
    277280.global ptl_2_0g
    278 ptl_2_0g:       
     281ptl_2_0g:
    279282        ptl2gen 512 0
    280283
     
    302305        # Identity mapping for [0; 4G)
    303306        .quad ptl_2_0g + (PTL_WRITABLE | PTL_PRESENT)
    304         .quad ptl_2_1g + (PTL_WRITABLE | PTL_PRESENT) 
     307        .quad ptl_2_1g + (PTL_WRITABLE | PTL_PRESENT)
    305308        .quad ptl_2_2g + (PTL_WRITABLE | PTL_PRESENT)
    306309        .quad ptl_2_3g + (PTL_WRITABLE | PTL_PRESENT)
    307         .fill 506, 8, 0
    308         # Mapping of [0; 1G) at -2G
    309         .quad ptl_2_0g + (PTL_WRITABLE | PTL_PRESENT)
    310         .fill 1, 8, 0
     310        .fill 508, 8, 0
    311311
    312312.align 4096
     
    314314ptl_0:
    315315        .quad ptl_1 + (PTL_WRITABLE | PTL_PRESENT)
    316         .fill 255,8,0
     316        .fill 255, 8, 0
    317317        .quad ptl_1 + (PTL_WRITABLE | PTL_PRESENT)
    318         .fill 254,8,0
    319         .quad ptl_1 + (PTL_WRITABLE | PTL_PRESENT)
     318        .fill 255, 8, 0
    320319
    321320.section K_DATA_START, "aw", @progbits
  • kernel/arch/amd64/src/context.S

    r33dac7d ra1f60f3  
    4141context_save_arch:
    4242        movq (%rsp), %rdx     # the caller's return %eip
    43 
    44         # In %edi is passed 1st argument
    45         CONTEXT_SAVE_ARCH_CORE %rdi %rdx
    4643       
    47         xorq %rax,%rax          # context_save returns 1
     44        # 1st argument passed in %edi
     45        CONTEXT_SAVE_ARCH_CORE %rdi %rdx
     46       
     47        xorq %rax, %rax       # context_save returns 1
    4848        incq %rax
    4949        ret
     
    5555# pointed by the 1st argument. Returns 0 in EAX.
    5656#
    57 context_restore_arch:   
    58 
     57context_restore_arch:
    5958        CONTEXT_RESTORE_ARCH_CORE %rdi %rdx
    60 
    61         movq %rdx,(%rsp)
    62 
    63         xorq %rax,%rax          # context_restore returns 0
     59       
     60        movq %rdx, (%rsp)
     61       
     62        xorq %rax, %rax       # context_restore returns 0
    6463        ret
  • kernel/arch/amd64/src/fpu_context.c

    r33dac7d ra1f60f3  
    2727 */
    2828
    29 /** @addtogroup amd64   
     29/** @addtogroup amd64
    3030 * @{
    3131 */
  • kernel/arch/amd64/src/mm/page.c

    r33dac7d ra1f60f3  
    5252pte_t helper_ptl2[512] __attribute__((aligned (PAGE_SIZE)));
    5353pte_t helper_ptl3[512] __attribute__((aligned (PAGE_SIZE)));
    54 extern pte_t ptl_0; /* From boot.S */
    55 
    56 #define PTL1_PRESENT(ptl0, page) (!(GET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
    57 #define PTL2_PRESENT(ptl1, page) (!(GET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
    58 #define PTL3_PRESENT(ptl2, page) (!(GET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
    59 
    60 #define PTL1_ADDR(ptl0, page) ((pte_t *)PA2KA(GET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page))))
    61 #define PTL2_ADDR(ptl1, page) ((pte_t *)PA2KA(GET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page))))
    62 #define PTL3_ADDR(ptl2, page) ((pte_t *)PA2KA(GET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page))))
    63 
    64 #define SETUP_PTL1(ptl0, page, tgt)  {  \
    65         SET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
    66         SET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
    67     }
    68 #define SETUP_PTL2(ptl1, page, tgt)  {  \
    69         SET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
    70         SET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
    71     }
    72 #define SETUP_PTL3(ptl2, page, tgt)  {  \
    73         SET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
    74         SET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
    75     }
    76 #define SETUP_FRAME(ptl3, page, tgt)  { \
    77         SET_FRAME_ADDRESS_ARCH(ptl3, PTL3_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
    78         SET_FRAME_FLAGS_ARCH(ptl3, PTL3_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
    79     }
    80 
     54
     55static uintptr_t oldpage = 0;
     56
     57extern pte_t ptl_0;  /* From boot.S */
     58
     59#define PTL1_PRESENT(ptl0, page) \
     60        (!(GET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
     61
     62#define PTL2_PRESENT(ptl1, page) \
     63        (!(GET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
     64
     65#define PTL3_PRESENT(ptl2, page) \
     66        (!(GET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
     67
     68#define PTL1_ADDR(ptl0, page) \
     69        ((pte_t *) PA2KA(GET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page))))
     70
     71#define PTL2_ADDR(ptl1, page) \
     72        ((pte_t *) PA2KA(GET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page))))
     73
     74#define PTL3_ADDR(ptl2, page) \
     75        ((pte_t *) PA2KA(GET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page))))
     76
     77#define SETUP_PTL1(ptl0, page, tgt) \
     78        { \
     79                SET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
     80                SET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
     81        }
     82
     83#define SETUP_PTL2(ptl1, page, tgt) \
     84        { \
     85                SET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
     86                SET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
     87        }
     88
     89#define SETUP_PTL3(ptl2, page, tgt) \
     90        { \
     91                SET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
     92                SET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
     93        }
     94
     95#define SETUP_FRAME(ptl3, page, tgt) \
     96        { \
     97                SET_FRAME_ADDRESS_ARCH(ptl3, PTL3_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
     98                SET_FRAME_FLAGS_ARCH(ptl3, PTL3_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
     99        }
    81100
    82101void page_arch_init(void)
    83102{
    84         uintptr_t cur;
    85         unsigned int i;
    86         int identity_flags = PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL | PAGE_WRITE;
    87 
    88103        if (config.cpu_active == 1) {
     104                uintptr_t cur;
     105                unsigned int identity_flags =
     106                    PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL | PAGE_WRITE;
     107               
    89108                page_mapping_operations = &pt_mapping_operations;
    90 
     109               
    91110                page_table_lock(AS_KERNEL, true);
    92 
     111               
    93112                /*
    94113                 * PA2KA(identity) mapping for all frames.
    95114                 */
    96                 for (cur = 0; cur < last_frame; cur += FRAME_SIZE) {
    97                         /* Standard identity mapping */
     115                for (cur = 0; cur < last_frame; cur += FRAME_SIZE)
    98116                        page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, identity_flags);
    99                 }
    100                
    101                 /* Upper kernel mapping
    102                  * - from zero to top of kernel (include bottom addresses
    103                  *   because some are needed for init)
    104                  */
    105                 for (cur = PA2KA_CODE(0); cur < config.base + config.kernel_size; cur += FRAME_SIZE)
    106                         page_mapping_insert(AS_KERNEL, cur, KA2PA(cur), identity_flags);
    107                
    108                 for (cur = config.stack_base; cur < config.stack_base + config.stack_size; cur += FRAME_SIZE)
    109                         page_mapping_insert(AS_KERNEL, cur, KA2PA(cur), identity_flags);
    110                
    111                 for (i = 0; i < init.cnt; i++) {
    112                         for (cur = init.tasks[i].addr; cur < init.tasks[i].addr + init.tasks[i].size; cur += FRAME_SIZE)
    113                                 page_mapping_insert(AS_KERNEL, PA2KA_CODE(KA2PA(cur)), KA2PA(cur), identity_flags);
    114                 }
    115 
     117               
    116118                page_table_unlock(AS_KERNEL, true);
    117 
     119               
    118120                exc_register(14, "page_fault", true, (iroutine_t) page_fault);
    119121                write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);
     
    122124}
    123125
    124 
    125126/** Identity page mapper
    126127 *
     
    128129 * is initializaed. This thing clears page table and fills in the specific
    129130 * items.
     131 *
    130132 */
    131133void ident_page_fault(unsigned int n, istate_t *istate)
    132134{
    133         uintptr_t page;
    134         static uintptr_t oldpage = 0;
    135         pte_t *aptl_1, *aptl_2, *aptl_3;
    136 
    137         page = read_cr2();
     135        pte_t *aptl_1;
     136        pte_t *aptl_2;
     137        pte_t *aptl_3;
     138       
     139        uintptr_t page = read_cr2();
     140       
     141        /* Unmap old address */
    138142        if (oldpage) {
    139                 /* Unmap old address */
    140                 aptl_1 = PTL1_ADDR(&ptl_0, oldpage);
    141                 aptl_2 = PTL2_ADDR(aptl_1, oldpage);
    142                 aptl_3 = PTL3_ADDR(aptl_2, oldpage);
    143 
     143                pte_t *aptl_1 = PTL1_ADDR(&ptl_0, oldpage);
     144                pte_t *aptl_2 = PTL2_ADDR(aptl_1, oldpage);
     145                pte_t *aptl_3 = PTL3_ADDR(aptl_2, oldpage);
     146               
    144147                SET_FRAME_FLAGS_ARCH(aptl_3, PTL3_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
     148               
    145149                if (KA2PA(aptl_3) == KA2PA(helper_ptl3))
    146150                        SET_PTL3_FLAGS_ARCH(aptl_2, PTL2_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
     151               
    147152                if (KA2PA(aptl_2) == KA2PA(helper_ptl2))
    148153                        SET_PTL2_FLAGS_ARCH(aptl_1, PTL1_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
     154               
    149155                if (KA2PA(aptl_1) == KA2PA(helper_ptl1))
    150156                        SET_PTL1_FLAGS_ARCH(&ptl_0, PTL0_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
    151157        }
     158       
    152159        if (PTL1_PRESENT(&ptl_0, page))
    153160                aptl_1 = PTL1_ADDR(&ptl_0, page);
     
    156163                aptl_1 = helper_ptl1;
    157164        }
    158            
     165       
    159166        if (PTL2_PRESENT(aptl_1, page))
    160167                aptl_2 = PTL2_ADDR(aptl_1, page);
     
    163170                aptl_2 = helper_ptl2;
    164171        }
    165 
     172       
    166173        if (PTL3_PRESENT(aptl_2, page))
    167174                aptl_3 = PTL3_ADDR(aptl_2, page);
     
    172179       
    173180        SETUP_FRAME(aptl_3, page, page);
    174 
     181       
    175182        oldpage = page;
    176183}
    177184
    178 
    179185void page_fault(unsigned int n, istate_t *istate)
    180186{
    181         uintptr_t page;
    182         pf_access_t access;
    183        
    184         page = read_cr2();
     187        uintptr_t page = read_cr2();
    185188       
    186189        if (istate->error_word & PFERR_CODE_RSVD)
    187190                panic("Reserved bit set in page table entry.");
     191       
     192        pf_access_t access;
    188193       
    189194        if (istate->error_word & PFERR_CODE_RW)
     
    195200       
    196201        if (as_page_fault(page, access, istate) == AS_PF_FAULT) {
    197                 fault_if_from_uspace(istate, "Page fault: %#x.", page);
    198 
     202                fault_if_from_uspace(istate, "Page fault: %p.", page);
    199203                decode_istate(n, istate);
    200                 printf("Page fault address: %llx.\n", page);
    201                 panic("Page fault.");
    202         }
    203 }
    204 
     204                panic("Page fault: %p", page);
     205        }
     206}
    205207
    206208uintptr_t hw_map(uintptr_t physaddr, size_t size)
    207209{
    208210        if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
    209                 panic("Unable to map physical memory %p (%d bytes).", physaddr,
     211                panic("Unable to map physical memory %p (%" PRIs " bytes).", physaddr,
    210212                    size);
    211213       
    212214        uintptr_t virtaddr = PA2KA(last_frame);
    213215        pfn_t i;
    214 
     216       
    215217        page_table_lock(AS_KERNEL, true);
     218       
    216219        for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
    217220                page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE);
     221       
    218222        page_table_unlock(AS_KERNEL, true);
    219223       
  • kernel/arch/amd64/src/smp/ap.S

    r33dac7d ra1f60f3  
    5555        xorw %ax, %ax
    5656        movw %ax, %ds
    57 
    58         lgdtl ap_gdtr           # initialize Global Descriptor Table register
     57       
     58        lgdtl ap_gdtr       # initialize Global Descriptor Table register
    5959       
    6060        movl %cr0, %eax
    6161        orl $1, %eax
    62         movl %eax, %cr0         # switch to protected mode
     62        movl %eax, %cr0     # switch to protected mode
    6363        jmpl $gdtselector(KTEXT32_DES), $jump_to_kernel - BOOT_OFFSET + AP_BOOT_OFFSET
    64        
     64
    6565jump_to_kernel:
    6666.code32
     
    7272        movw %ax, %gs
    7373       
    74         # Enable 64-bit page transaltion entries - CR4.PAE = 1.
     74        # Enable 64-bit page transaltion entries (CR4.PAE = 1).
    7575        # Paging is not enabled until after long mode is enabled
    7676       
     
    7878        btsl $5, %eax
    7979        movl %eax, %cr4
    80 
     80       
    8181        leal ptl_0, %eax
    8282        movl %eax, %cr3
    8383       
    8484        # Enable long mode
    85         movl $EFER_MSR_NUM, %ecx        # EFER MSR number
    86         rdmsr                           # Read EFER
    87         btsl $AMD_LME_FLAG, %eax        # Set LME=1
    88         wrmsr                           # Write EFER
     85        movl $EFER_MSR_NUM, %ecx  # EFER MSR number
     86        rdmsr                     # Read EFER
     87        btsl $AMD_LME_FLAG, %eax  # Set LME=1
     88        wrmsr                     # Write EFER
    8989       
    90         # Enable paging to activate long mode (set CR0.PG=1)
     90        # Enable paging to activate long mode (set CR0.PG = 1)
    9191        movl %cr0, %eax
    9292        btsl $31, %eax
     
    9898.code64
    9999start64:
    100         movq (ctx), %rsp
     100        movabsq $ctx, %rsp
     101        movq (%rsp), %rsp
     102       
    101103        pushq $0
    102104        movq %rsp, %rbp
    103         call main_ap - AP_BOOT_OFFSET + BOOT_OFFSET   # never returns
     105       
     106        movabsq $main_ap, %rax
     107        callq *%rax   # never returns
    104108
    105109#endif /* CONFIG_SMP */
Note: See TracChangeset for help on using the changeset viewer.