Changeset f56e897f in mainline for kernel/arch/amd64/src


Ignore:
Timestamp:
2010-06-29T17:43:38Z (16 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
6473d41
Parents:
e4a4b44 (diff), 793cf029 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge mainlnie changes.

Location:
kernel/arch/amd64/src
Files:
14 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/amd64/src/amd64.c

    re4a4b44 rf56e897f  
    122122        /* Enable FPU */
    123123        cpu_setup_fpu();
    124 
     124       
    125125        /* Initialize segmentation */
    126126        pm_init();
     
    132132        /* Disable alignment check */
    133133        clean_AM_flag();
    134 
     134       
    135135        if (config.cpu_active == 1) {
    136136                interrupt_init();
     
    260260        THREAD->arch.tls = addr;
    261261        write_msr(AMD_MSR_FS, addr);
     262       
    262263        return 0;
    263264}
  • kernel/arch/amd64/src/asm_utils.S

    re4a4b44 rf56e897f  
    2727#
    2828
    29 #define IREGISTER_SPACE 80
    30 
    31 #define IOFFSET_RAX     0x0
    32 #define IOFFSET_RCX     0x8
    33 #define IOFFSET_RDX     0x10
    34 #define IOFFSET_RSI     0x18
    35 #define IOFFSET_RDI     0x20
    36 #define IOFFSET_R8      0x28
    37 #define IOFFSET_R9      0x30
    38 #define IOFFSET_R10     0x38
    39 #define IOFFSET_R11     0x40
    40 #define IOFFSET_RBP     0x48
    41 
    42 #  Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int has no error word
    43 # and 1 means interrupt with error word
    44 #define ERROR_WORD_INTERRUPT_LIST 0x00027D00
     29#define IREGISTER_SPACE  80
     30
     31#define IOFFSET_RAX  0x00
     32#define IOFFSET_RCX  0x08
     33#define IOFFSET_RDX  0x10
     34#define IOFFSET_RSI  0x18
     35#define IOFFSET_RDI  0x20
     36#define IOFFSET_R8   0x28
     37#define IOFFSET_R9   0x30
     38#define IOFFSET_R10  0x38
     39#define IOFFSET_R11  0x40
     40#define IOFFSET_RBP  0x48
     41
     42# Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int
     43# has no error word  and 1 means interrupt with error word
     44
     45#define ERROR_WORD_INTERRUPT_LIST  0x00027D00
    4546
    4647#include <arch/pm.h>
    4748#include <arch/mm/page.h>
    48        
     49
    4950.text
    5051.global interrupt_handlers
    5152.global syscall_entry
    52 
    5353.global cpuid
    5454.global has_cpuid
     
    7171        jmp _memsetw
    7272
    73 #define MEMCPY_DST      %rdi
    74 #define MEMCPY_SRC      %rsi
    75 #define MEMCPY_SIZE     %rdx
     73#define MEMCPY_DST   %rdi
     74#define MEMCPY_SRC   %rsi
     75#define MEMCPY_SIZE  %rdx
    7676
    7777/**
     
    8484 * or copy_to_uspace().
    8585 *
    86  * @param MEMCPY_DST    Destination address.
    87  * @param MEMCPY_SRC    Source address.
    88  * @param MEMCPY_SIZE   Number of bytes to copy.
     86 * @param MEMCPY_DST  Destination address.
     87 * @param MEMCPY_SRC  Source address.
     88 * @param MEMCPY_SIZE Number of bytes to copy.
    8989 *
    9090 * @retrun MEMCPY_DST on success, 0 on failure.
     91 *
    9192 */
    9293memcpy:
     
    9495memcpy_to_uspace:
    9596        movq MEMCPY_DST, %rax
    96 
     97       
    9798        movq MEMCPY_SIZE, %rcx
    98         shrq $3, %rcx                   /* size / 8 */
    99        
    100         rep movsq                       /* copy as much as possible word by word */
    101 
     99        shrq $3, %rcx           /* size / 8 */
     100       
     101        rep movsq               /* copy as much as possible word by word */
     102       
    102103        movq MEMCPY_SIZE, %rcx
    103         andq $7, %rcx                   /* size % 8 */
     104        andq $7, %rcx           /* size % 8 */
    104105        jz 0f
    105106       
    106         rep movsb                       /* copy the rest byte by byte */
    107        
    108 0:
    109         ret                             /* return MEMCPY_SRC, success */
     107        rep movsb               /* copy the rest byte by byte */
     108       
     109        0:
     110                ret                 /* return MEMCPY_SRC, success */
    110111
    111112memcpy_from_uspace_failover_address:
    112113memcpy_to_uspace_failover_address:
    113         xorq %rax, %rax                 /* return 0, failure */
     114        xorq %rax, %rax         /* return 0, failure */
    114115        ret
    115116
     
    119120#
    120121has_cpuid:
    121         pushfq                  # store flags
    122         popq %rax               # read flags
    123         movq %rax,%rdx          # copy flags
    124         btcl $21,%edx           # swap the ID bit
     122        pushfq                 # store flags
     123        popq %rax              # read flags
     124        movq %rax, %rdx        # copy flags
     125        btcl $21, %edx         # swap the ID bit
    125126        pushq %rdx
    126         popfq                   # propagate the change into flags
     127        popfq                  # propagate the change into flags
    127128        pushfq
    128         popq %rdx               # read flags   
    129         andl $(1<<21),%eax      # interested only in ID bit
    130         andl $(1<<21),%edx
    131         xorl %edx,%eax          # 0 if not supported, 1 if supported
     129        popq %rdx              # read flags
     130        andl $(1 << 21), %eax  # interested only in ID bit
     131        andl $(1 << 21), %edx
     132        xorl %edx, %eax        # 0 if not supported, 1 if supported
    132133        ret
    133134
    134135cpuid:
    135         movq %rbx, %r10  # we have to preserve rbx across function calls
    136 
    137         movl %edi,%eax  # load the command into %eax
    138 
    139         cpuid   
    140         movl %eax,0(%rsi)
    141         movl %ebx,4(%rsi)
    142         movl %ecx,8(%rsi)
    143         movl %edx,12(%rsi)
    144 
     136        movq %rbx, %r10        # we have to preserve rbx across function calls
     137       
     138        movl %edi,%eax         # load the command into %eax
     139       
     140        cpuid
     141        movl %eax, 0(%rsi)
     142        movl %ebx, 4(%rsi)
     143        movl %ecx, 8(%rsi)
     144        movl %edx, 12(%rsi)
     145       
    145146        movq %r10, %rbx
    146147        ret
     
    152153        wrmsr
    153154        ret
    154        
     155
    155156read_efer_flag:
    156157        movq $0xc0000080, %rcx
    157158        rdmsr
    158         ret             
     159        ret
    159160
    160161# Push all volatile general purpose registers on stack
     
    185186.endm
    186187
    187 #define INTERRUPT_ALIGN 128
    188        
     188#define INTERRUPT_ALIGN  128
     189
    189190## Declare interrupt handlers
    190191#
     
    195196#
    196197.macro handler i n
    197 
     198       
    198199        /*
    199200         * Choose between version with error code and version without error
     
    204205         * Therefore we align the interrupt handlers.
    205206         */
    206 
     207       
    207208        .iflt \i-32
    208209                .if (1 << \i) & ERROR_WORD_INTERRUPT_LIST
     
    215216                         * Version without error word,
    216217                         */
    217                         subq $(IREGISTER_SPACE+8), %rsp
     218                        subq $(IREGISTER_SPACE + 8), %rsp
    218219                .endif
    219220        .else
     
    221222                 * Version without error word,
    222223                 */
    223                 subq $(IREGISTER_SPACE+8), %rsp
    224         .endif 
    225 
     224                subq $(IREGISTER_SPACE + 8), %rsp
     225        .endif
     226       
    226227        save_all_gpr
    227228        cld
     
    241242        restore_all_gpr
    242243        # $8 = Skip error word
    243         addq $(IREGISTER_SPACE+8), %rsp
     244        addq $(IREGISTER_SPACE + 8), %rsp
    244245        iretq
    245 
     246       
    246247        .align INTERRUPT_ALIGN
    247         .if (\n-\i)-1
    248         handler "(\i+1)",\n
     248        .if (\n - \i) - 1
     249                handler "(\i + 1)", \n
    249250        .endif
    250251.endm
     
    252253.align INTERRUPT_ALIGN
    253254interrupt_handlers:
    254 h_start:
    255         handler 0 IDT_ITEMS
    256 h_end:
     255        h_start:
     256                handler 0 IDT_ITEMS
     257        h_end:
    257258
    258259## Low-level syscall handler
    259 # 
     260#
    260261# Registers on entry:
    261262#
    262 # @param rcx            Userspace return address.
    263 # @param r11            Userspace RLFAGS.
    264 #
    265 # @param rax            Syscall number.
    266 # @param rdi            1st syscall argument.
    267 # @param rsi            2nd syscall argument.
    268 # @param rdx            3rd syscall argument.
    269 # @param r10            4th syscall argument. Used instead of RCX because the
    270 #                       SYSCALL instruction clobbers it.
    271 # @param r8             5th syscall argument.
    272 # @param r9             6th syscall argument.
    273 #
    274 # @return               Return value is in rax.
     263# @param rcx Userspace return address.
     264# @param r11 Userspace RLFAGS.
     265#
     266# @param rax Syscall number.
     267# @param rdi 1st syscall argument.
     268# @param rsi 2nd syscall argument.
     269# @param rdx 3rd syscall argument.
     270# @param r10 4th syscall argument. Used instead of RCX because
     271#            the SYSCALL instruction clobbers it.
     272# @param r8  5th syscall argument.
     273# @param r9  6th syscall argument.
     274#
     275# @return Return value is in rax.
    275276#
    276277syscall_entry:
    277         swapgs                  # Switch to hidden gs   
    278         #
    279         # %gs:0                 Scratch space for this thread's user RSP
    280         # %gs:8                 Address to be used as this thread's kernel RSP
     278        swapgs            # Switch to hidden gs
    281279        #
    282         movq %rsp, %gs:0        # Save this thread's user RSP
    283         movq %gs:8, %rsp        # Set this thread's kernel RSP
    284         swapgs                  # Switch back to remain consistent
     280        # %gs:0 Scratch space for this thread's user RSP
     281        # %gs:8 Address to be used as this thread's kernel RSP
     282        #
     283        movq %rsp, %gs:0  # Save this thread's user RSP
     284        movq %gs:8, %rsp  # Set this thread's kernel RSP
     285        swapgs            # Switch back to remain consistent
    285286        sti
    286287       
     
    299300        popq %r11
    300301        popq %rcx
    301 
     302       
    302303        cli
    303304        swapgs
    304         movq %gs:0, %rsp        # Restore the user RSP
     305        movq %gs:0, %rsp  # Restore the user RSP
    305306        swapgs
    306 
     307       
    307308        sysretq
    308309
     
    310311.global interrupt_handler_size
    311312
    312 interrupt_handler_size: .quad (h_end-h_start)/IDT_ITEMS
     313interrupt_handler_size: .quad (h_end - h_start) / IDT_ITEMS
  • kernel/arch/amd64/src/boot/boot.S

    re4a4b44 rf56e897f  
    3131#include <arch/boot/boot.h>
    3232#include <arch/boot/memmap.h>
    33 #include <arch/mm/page.h>       
     33#include <arch/mm/page.h>
    3434#include <arch/mm/ptl.h>
    3535#include <arch/pm.h>
     
    172172        xorq %rsi, %rsi
    173173        movl grub_ebx, %esi
    174         call arch_pre_main
     174       
     175        movabsq $arch_pre_main, %rax
     176        callq *%rax
    175177       
    176178        # create the first stack frame
    177179        pushq $0
    178180        movq %rsp, %rbp
    179 
    180         call main_bsp
     181       
     182        movabsq $main_bsp, %rax
     183        call *%rax
    181184       
    182185        # not reached
     
    256259#
    257260# Macro for generating initial page table contents.
    258 # @param cnt Number of entries to generat. Must be multiple of 8.
     261# @param cnt Number of entries to generate. Must be multiple of 8.
    259262# @param g   Number of GB that will be added to the mapping.
    260263#
    261 .macro ptl2gen cnt g 
    262 .if \cnt
    263         ptl2gen "\cnt - 8" \g
    264         .quad ((\cnt - 8) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
    265         .quad ((\cnt - 7) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
    266         .quad ((\cnt - 6) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
    267         .quad ((\cnt - 5) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
    268         .quad ((\cnt - 4) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
    269         .quad ((\cnt - 3) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
    270         .quad ((\cnt - 2) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
    271         .quad ((\cnt - 1) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
    272 .endif
     264.macro ptl2gen cnt g
     265        .if \cnt
     266                ptl2gen "\cnt - 8" \g
     267                .quad ((\cnt - 8) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
     268                .quad ((\cnt - 7) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
     269                .quad ((\cnt - 6) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
     270                .quad ((\cnt - 5) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
     271                .quad ((\cnt - 4) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
     272                .quad ((\cnt - 3) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
     273                .quad ((\cnt - 2) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
     274                .quad ((\cnt - 1) * 0x200000) + (\g * 1024 * 1024 * 1024) | (PTL_WRITABLE | PTL_PRESENT | PTL_2MB_PAGE)
     275        .endif
    273276.endm
    274277
    275 # Page table for pages in the first gigabyte.
    276 .align 4096
    277 .global ptl_2_0g
    278 ptl_2_0g:       
     278# Page table for pages in the 1st gigabyte.
     279.align 4096
     280ptl_2_0g:
    279281        ptl2gen 512 0
    280282
    281 # Page table for pages in the second gigabyte.
    282 .align 4096
    283 .global ptl_2_1g
     283# Page table for pages in the 2nd gigabyte.
     284.align 4096
    284285ptl_2_1g:
    285286        ptl2gen 512 1
    286287
    287 # Page table for pages in the third gigabyte.
    288 .align 4096
    289 .global ptl_2_2g
     288# Page table for pages in the 3rd gigabyte.
     289.align 4096
    290290ptl_2_2g:
    291291        ptl2gen 512 2
    292292
    293 # Page table for pages in the fourth gigabyte.
    294 .align 4096
    295 .global ptl_2_3g
     293# Page table for pages in the 4th gigabyte.
     294.align 4096
    296295ptl_2_3g:
    297296        ptl2gen 512 3
    298297
    299 .align 4096
    300 .global ptl_1
     298# Page table for pages in the 5th gigabyte.
     299.align 4096
     300ptl_2_4g:
     301        ptl2gen 512 3
     302
     303# Page table for pages in the 6th gigabyte.
     304.align 4096
     305ptl_2_5g:
     306        ptl2gen 512 3
     307
     308# Page table for pages in the 7th gigabyte.
     309.align 4096
     310ptl_2_6g:
     311        ptl2gen 512 3
     312
     313# Page table for pages in the 8th gigabyte.
     314.align 4096
     315ptl_2_7g:
     316        ptl2gen 512 3
     317
     318.align 4096
    301319ptl_1:
    302         # Identity mapping for [0; 4G)
     320        # Identity mapping for [0; 8G)
    303321        .quad ptl_2_0g + (PTL_WRITABLE | PTL_PRESENT)
    304         .quad ptl_2_1g + (PTL_WRITABLE | PTL_PRESENT) 
     322        .quad ptl_2_1g + (PTL_WRITABLE | PTL_PRESENT)
    305323        .quad ptl_2_2g + (PTL_WRITABLE | PTL_PRESENT)
    306324        .quad ptl_2_3g + (PTL_WRITABLE | PTL_PRESENT)
    307         .fill 506, 8, 0
    308         # Mapping of [0; 1G) at -2G
    309         .quad ptl_2_0g + (PTL_WRITABLE | PTL_PRESENT)
    310         .fill 1, 8, 0
     325        .quad ptl_2_4g + (PTL_WRITABLE | PTL_PRESENT)
     326        .quad ptl_2_5g + (PTL_WRITABLE | PTL_PRESENT)
     327        .quad ptl_2_6g + (PTL_WRITABLE | PTL_PRESENT)
     328        .quad ptl_2_7g + (PTL_WRITABLE | PTL_PRESENT)
     329        .fill 504, 8, 0
    311330
    312331.align 4096
     
    314333ptl_0:
    315334        .quad ptl_1 + (PTL_WRITABLE | PTL_PRESENT)
    316         .fill 255,8,0
     335        .fill 255, 8, 0
    317336        .quad ptl_1 + (PTL_WRITABLE | PTL_PRESENT)
    318         .fill 254,8,0
    319         .quad ptl_1 + (PTL_WRITABLE | PTL_PRESENT)
     337        .fill 255, 8, 0
    320338
    321339.section K_DATA_START, "aw", @progbits
  • kernel/arch/amd64/src/context.S

    re4a4b44 rf56e897f  
    4141context_save_arch:
    4242        movq (%rsp), %rdx     # the caller's return %eip
    43 
    44         # In %edi is passed 1st argument
    45         CONTEXT_SAVE_ARCH_CORE %rdi %rdx
    4643       
    47         xorq %rax,%rax          # context_save returns 1
     44        # 1st argument passed in %edi
     45        CONTEXT_SAVE_ARCH_CORE %rdi %rdx
     46       
     47        xorq %rax, %rax       # context_save returns 1
    4848        incq %rax
    4949        ret
     
    5555# pointed by the 1st argument. Returns 0 in EAX.
    5656#
    57 context_restore_arch:   
    58 
     57context_restore_arch:
    5958        CONTEXT_RESTORE_ARCH_CORE %rdi %rdx
    60 
    61         movq %rdx,(%rsp)
    62 
    63         xorq %rax,%rax          # context_restore returns 0
     59       
     60        movq %rdx, (%rsp)
     61       
     62        xorq %rax, %rax       # context_restore returns 0
    6463        ret
  • kernel/arch/amd64/src/cpu/cpu.c

    re4a4b44 rf56e897f  
    4747 * Contains only non-MP-Specification specific SMP code.
    4848 */
    49 #define AMD_CPUID_EBX   0x68747541
    50 #define AMD_CPUID_ECX   0x444d4163
    51 #define AMD_CPUID_EDX   0x69746e65
     49#define AMD_CPUID_EBX  0x68747541
     50#define AMD_CPUID_ECX  0x444d4163
     51#define AMD_CPUID_EDX  0x69746e65
    5252
    53 #define INTEL_CPUID_EBX 0x756e6547
    54 #define INTEL_CPUID_ECX 0x6c65746e
    55 #define INTEL_CPUID_EDX 0x49656e69
     53#define INTEL_CPUID_EBX  0x756e6547
     54#define INTEL_CPUID_ECX  0x6c65746e
     55#define INTEL_CPUID_EDX  0x49656e69
    5656
    5757
     
    127127{
    128128        cpu_info_t info;
    129 
     129       
    130130        CPU->arch.vendor = VendorUnknown;
    131131        if (has_cpuid()) {
    132132                cpuid(INTEL_CPUID_LEVEL, &info);
    133 
     133               
    134134                /*
    135135                 * Check for AMD processor.
    136136                 */
    137                 if (info.cpuid_ebx == AMD_CPUID_EBX &&
    138                     info.cpuid_ecx == AMD_CPUID_ECX &&
    139                     info.cpuid_edx == AMD_CPUID_EDX) {
     137                if ((info.cpuid_ebx == AMD_CPUID_EBX) &&
     138                    (info.cpuid_ecx == AMD_CPUID_ECX) &&
     139                    (info.cpuid_edx == AMD_CPUID_EDX)) {
    140140                        CPU->arch.vendor = VendorAMD;
    141141                }
    142 
     142               
    143143                /*
    144144                 * Check for Intel processor.
    145                  */             
    146                 if (info.cpuid_ebx == INTEL_CPUID_EBX &&
    147                     info.cpuid_ecx == INTEL_CPUID_ECX &&
    148                     info.cpuid_edx == INTEL_CPUID_EDX) {
     145                 */
     146                if ((info.cpuid_ebx == INTEL_CPUID_EBX) &&
     147                    (info.cpuid_ecx == INTEL_CPUID_ECX) &&
     148                    (info.cpuid_edx == INTEL_CPUID_EDX)) {
    149149                        CPU->arch.vendor = VendorIntel;
    150150                }
    151                                
     151               
    152152                cpuid(INTEL_CPUID_STANDARD, &info);
    153153                CPU->arch.family = (info.cpuid_eax >> 8) & 0xf;
    154154                CPU->arch.model = (info.cpuid_eax >> 4) & 0xf;
    155                 CPU->arch.stepping = (info.cpuid_eax >> 0) & 0xf;                                               
     155                CPU->arch.stepping = (info.cpuid_eax >> 0) & 0xf;
    156156        }
    157157}
  • kernel/arch/amd64/src/debug/stacktrace.c

    re4a4b44 rf56e897f  
    3737#include <typedefs.h>
    3838
    39 #define FRAME_OFFSET_FP_PREV    0
    40 #define FRAME_OFFSET_RA         1
     39#define FRAME_OFFSET_FP_PREV  0
     40#define FRAME_OFFSET_RA       1
    4141
    4242bool kernel_frame_pointer_validate(uintptr_t fp)
     
    4949        uint64_t *stack = (void *) fp;
    5050        *prev = stack[FRAME_OFFSET_FP_PREV];
     51       
    5152        return true;
    5253}
     
    5657        uint64_t *stack = (void *) fp;
    5758        *ra = stack[FRAME_OFFSET_RA];
     59       
    5860        return true;
    5961}
  • kernel/arch/amd64/src/delay.S

    re4a4b44 rf56e897f  
    3737
    3838asm_delay_loop:
    39 0:      dec %rdi
    40         jnz 0b
     39        0:
     40                dec %rdi
     41                jnz 0b
     42       
    4143        ret
    4244
    4345asm_fake_loop:
    44 0:      dec %rdi
    45         jz 0b
     46        0:
     47                dec %rdi
     48                jz 0b
     49       
    4650        ret
  • kernel/arch/amd64/src/fpu_context.c

    re4a4b44 rf56e897f  
    2727 */
    2828
    29 /** @addtogroup amd64   
     29/** @addtogroup amd64
    3030 * @{
    3131 */
  • kernel/arch/amd64/src/interrupt.c

    re4a4b44 rf56e897f  
    202202        exc_register(12, "ss_fault", true, (iroutine_t) ss_fault);
    203203        exc_register(13, "gp_fault", true, (iroutine_t) gp_fault);
    204         exc_register(14, "ident_mapper", true, (iroutine_t) ident_page_fault);
    205204       
    206205#ifdef CONFIG_SMP
  • kernel/arch/amd64/src/mm/page.c

    re4a4b44 rf56e897f  
    3939#include <mm/frame.h>
    4040#include <mm/as.h>
    41 #include <arch/interrupt.h>
    4241#include <arch/asm.h>
    4342#include <config.h>
     
    4847#include <align.h>
    4948
    50 /* Definitions for identity page mapper */
    51 pte_t helper_ptl1[512] __attribute__((aligned (PAGE_SIZE)));
    52 pte_t helper_ptl2[512] __attribute__((aligned (PAGE_SIZE)));
    53 pte_t helper_ptl3[512] __attribute__((aligned (PAGE_SIZE)));
    54 extern pte_t ptl_0; /* From boot.S */
    55 
    56 #define PTL1_PRESENT(ptl0, page) (!(GET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
    57 #define PTL2_PRESENT(ptl1, page) (!(GET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
    58 #define PTL3_PRESENT(ptl2, page) (!(GET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
    59 
    60 #define PTL1_ADDR(ptl0, page) ((pte_t *)PA2KA(GET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page))))
    61 #define PTL2_ADDR(ptl1, page) ((pte_t *)PA2KA(GET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page))))
    62 #define PTL3_ADDR(ptl2, page) ((pte_t *)PA2KA(GET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page))))
    63 
    64 #define SETUP_PTL1(ptl0, page, tgt)  {  \
    65         SET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
    66         SET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
    67     }
    68 #define SETUP_PTL2(ptl1, page, tgt)  {  \
    69         SET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
    70         SET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
    71     }
    72 #define SETUP_PTL3(ptl2, page, tgt)  {  \
    73         SET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
    74         SET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
    75     }
    76 #define SETUP_FRAME(ptl3, page, tgt)  { \
    77         SET_FRAME_ADDRESS_ARCH(ptl3, PTL3_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
    78         SET_FRAME_FLAGS_ARCH(ptl3, PTL3_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
    79     }
    80 
    81 
    8249void page_arch_init(void)
    8350{
    84         uintptr_t cur;
    85         unsigned int i;
    86         int identity_flags = PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL | PAGE_WRITE;
    87 
    8851        if (config.cpu_active == 1) {
     52                uintptr_t cur;
     53                unsigned int identity_flags =
     54                    PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL | PAGE_WRITE;
     55               
    8956                page_mapping_operations = &pt_mapping_operations;
    90 
     57               
    9158                page_table_lock(AS_KERNEL, true);
    92 
     59               
    9360                /*
    9461                 * PA2KA(identity) mapping for all frames.
    9562                 */
    96                 for (cur = 0; cur < last_frame; cur += FRAME_SIZE) {
    97                         /* Standard identity mapping */
     63                for (cur = 0; cur < last_frame; cur += FRAME_SIZE)
    9864                        page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, identity_flags);
    99                 }
    10065               
    101                 /* Upper kernel mapping
    102                  * - from zero to top of kernel (include bottom addresses
    103                  *   because some are needed for init)
    104                  */
    105                 for (cur = PA2KA_CODE(0); cur < config.base + config.kernel_size; cur += FRAME_SIZE)
    106                         page_mapping_insert(AS_KERNEL, cur, KA2PA(cur), identity_flags);
     66                page_table_unlock(AS_KERNEL, true);
    10767               
    108                 for (cur = config.stack_base; cur < config.stack_base + config.stack_size; cur += FRAME_SIZE)
    109                         page_mapping_insert(AS_KERNEL, cur, KA2PA(cur), identity_flags);
    110                
    111                 for (i = 0; i < init.cnt; i++) {
    112                         for (cur = init.tasks[i].addr; cur < init.tasks[i].addr + init.tasks[i].size; cur += FRAME_SIZE)
    113                                 page_mapping_insert(AS_KERNEL, PA2KA_CODE(KA2PA(cur)), KA2PA(cur), identity_flags);
    114                 }
    115 
    116                 page_table_unlock(AS_KERNEL, true);
    117 
    11868                exc_register(14, "page_fault", true, (iroutine_t) page_fault);
    11969                write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);
     
    12272}
    12373
    124 
    125 /** Identity page mapper
    126  *
    127  * We need to map whole physical memory identically before the page subsystem
    128  * is initializaed. This thing clears page table and fills in the specific
    129  * items.
    130  */
    131 void ident_page_fault(unsigned int n, istate_t *istate)
    132 {
    133         uintptr_t page;
    134         static uintptr_t oldpage = 0;
    135         pte_t *aptl_1, *aptl_2, *aptl_3;
    136 
    137         page = read_cr2();
    138         if (oldpage) {
    139                 /* Unmap old address */
    140                 aptl_1 = PTL1_ADDR(&ptl_0, oldpage);
    141                 aptl_2 = PTL2_ADDR(aptl_1, oldpage);
    142                 aptl_3 = PTL3_ADDR(aptl_2, oldpage);
    143 
    144                 SET_FRAME_FLAGS_ARCH(aptl_3, PTL3_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
    145                 if (KA2PA(aptl_3) == KA2PA(helper_ptl3))
    146                         SET_PTL3_FLAGS_ARCH(aptl_2, PTL2_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
    147                 if (KA2PA(aptl_2) == KA2PA(helper_ptl2))
    148                         SET_PTL2_FLAGS_ARCH(aptl_1, PTL1_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
    149                 if (KA2PA(aptl_1) == KA2PA(helper_ptl1))
    150                         SET_PTL1_FLAGS_ARCH(&ptl_0, PTL0_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
    151         }
    152         if (PTL1_PRESENT(&ptl_0, page))
    153                 aptl_1 = PTL1_ADDR(&ptl_0, page);
    154         else {
    155                 SETUP_PTL1(&ptl_0, page, helper_ptl1);
    156                 aptl_1 = helper_ptl1;
    157         }
    158            
    159         if (PTL2_PRESENT(aptl_1, page))
    160                 aptl_2 = PTL2_ADDR(aptl_1, page);
    161         else {
    162                 SETUP_PTL2(aptl_1, page, helper_ptl2);
    163                 aptl_2 = helper_ptl2;
    164         }
    165 
    166         if (PTL3_PRESENT(aptl_2, page))
    167                 aptl_3 = PTL3_ADDR(aptl_2, page);
    168         else {
    169                 SETUP_PTL3(aptl_2, page, helper_ptl3);
    170                 aptl_3 = helper_ptl3;
    171         }
    172        
    173         SETUP_FRAME(aptl_3, page, page);
    174 
    175         oldpage = page;
    176 }
    177 
    178 
    17974void page_fault(unsigned int n, istate_t *istate)
    18075{
    181         uintptr_t page;
    182         pf_access_t access;
    183        
    184         page = read_cr2();
     76        uintptr_t page = read_cr2();
    18577       
    18678        if (istate->error_word & PFERR_CODE_RSVD)
    18779                panic("Reserved bit set in page table entry.");
     80       
     81        pf_access_t access;
    18882       
    18983        if (istate->error_word & PFERR_CODE_RW)
     
    20094}
    20195
    202 
    20396uintptr_t hw_map(uintptr_t physaddr, size_t size)
    20497{
    20598        if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
    206                 panic("Unable to map physical memory %p (%d bytes).", physaddr,
     99                panic("Unable to map physical memory %p (%" PRIs " bytes).", physaddr,
    207100                    size);
    208101       
    209102        uintptr_t virtaddr = PA2KA(last_frame);
    210103        pfn_t i;
    211 
     104       
    212105        page_table_lock(AS_KERNEL, true);
     106       
    213107        for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
    214108                page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE);
     109       
    215110        page_table_unlock(AS_KERNEL, true);
    216111       
  • kernel/arch/amd64/src/proc/scheduler.c

    re4a4b44 rf56e897f  
    3838#include <proc/thread.h>
    3939#include <arch.h>
    40 #include <arch/context.h>       /* SP_DELTA */
     40#include <arch/context.h>
    4141#include <arch/asm.h>
    4242#include <print.h>
     
    5858        CPU->arch.tss->rsp0 =
    5959            (uintptr_t) &THREAD->kstack[THREAD_STACK_SIZE - SP_DELTA];
    60 
     60       
    6161        /*
    6262         * Syscall support.
    6363         */
    6464        swapgs();
    65         write_msr(AMD_MSR_GS, (uintptr_t)THREAD->arch.syscall_rsp);
     65        write_msr(AMD_MSR_GS, (uintptr_t) THREAD->arch.syscall_rsp);
    6666        swapgs();
    67 
     67       
    6868        /* TLS support - set FS to thread local storage */
    6969        write_msr(AMD_MSR_FS, THREAD->arch.tls);
  • kernel/arch/amd64/src/proc/task.c

    re4a4b44 rf56e897f  
    3939/** Perform amd64 specific task initialization.
    4040 *
    41  * @param t Task to be initialized.
     41 * @param task Task to be initialized.
     42 *
    4243 */
    43 void task_create_arch(task_t *t)
     44void task_create_arch(task_t *task)
    4445{
    45         t->arch.iomapver = 0;
    46         bitmap_initialize(&t->arch.iomap, NULL, 0);
     46        task->arch.iomapver = 0;
     47        bitmap_initialize(&task->arch.iomap, NULL, 0);
    4748}
    4849
    4950/** Perform amd64 specific task destruction.
    5051 *
    51  * @param t Task to be initialized.
     52 * @param task Task to be initialized.
     53 *
    5254 */
    53 void task_destroy_arch(task_t *t)
     55void task_destroy_arch(task_t *task)
    5456{
    55         if (t->arch.iomap.map)
    56                 free(t->arch.iomap.map);
     57        if (task->arch.iomap.map)
     58                free(task->arch.iomap.map);
    5759}
    5860
  • kernel/arch/amd64/src/proc/thread.c

    re4a4b44 rf56e897f  
    3737/** Perform amd64 specific thread initialization.
    3838 *
    39  * @param t Thread to be initialized.
     39 * @param thread Thread to be initialized.
     40 *
    4041 */
    41 void thread_create_arch(thread_t *t)
     42void thread_create_arch(thread_t *thread)
    4243{
    43         t->arch.tls = 0;
    44         t->arch.syscall_rsp[SYSCALL_USTACK_RSP] = 0;
     44        thread->arch.tls = 0;
     45        thread->arch.syscall_rsp[SYSCALL_USTACK_RSP] = 0;
     46       
    4547        /*
    4648         * Kernel RSP can be precalculated at thread creation time.
    4749         */
    48         t->arch.syscall_rsp[SYSCALL_KSTACK_RSP] =
    49             (uintptr_t) &t->kstack[PAGE_SIZE - sizeof(uint64_t)];
     50        thread->arch.syscall_rsp[SYSCALL_KSTACK_RSP] =
     51            (uintptr_t) &thread->kstack[PAGE_SIZE - sizeof(uint64_t)];
    5052}
    5153
  • kernel/arch/amd64/src/smp/ap.S

    re4a4b44 rf56e897f  
    5555        xorw %ax, %ax
    5656        movw %ax, %ds
    57 
    58         lgdtl ap_gdtr           # initialize Global Descriptor Table register
     57       
     58        lgdtl ap_gdtr       # initialize Global Descriptor Table register
    5959       
    6060        movl %cr0, %eax
    6161        orl $1, %eax
    62         movl %eax, %cr0         # switch to protected mode
     62        movl %eax, %cr0     # switch to protected mode
    6363        jmpl $gdtselector(KTEXT32_DES), $jump_to_kernel - BOOT_OFFSET + AP_BOOT_OFFSET
    64        
     64
    6565jump_to_kernel:
    6666.code32
     
    7272        movw %ax, %gs
    7373       
    74         # Enable 64-bit page transaltion entries - CR4.PAE = 1.
     74        # Enable 64-bit page transaltion entries (CR4.PAE = 1).
    7575        # Paging is not enabled until after long mode is enabled
    7676       
     
    7878        btsl $5, %eax
    7979        movl %eax, %cr4
    80 
     80       
    8181        leal ptl_0, %eax
    8282        movl %eax, %cr3
    8383       
    8484        # Enable long mode
    85         movl $EFER_MSR_NUM, %ecx        # EFER MSR number
    86         rdmsr                           # Read EFER
    87         btsl $AMD_LME_FLAG, %eax        # Set LME=1
    88         wrmsr                           # Write EFER
     85        movl $EFER_MSR_NUM, %ecx  # EFER MSR number
     86        rdmsr                     # Read EFER
     87        btsl $AMD_LME_FLAG, %eax  # Set LME=1
     88        wrmsr                     # Write EFER
    8989       
    90         # Enable paging to activate long mode (set CR0.PG=1)
     90        # Enable paging to activate long mode (set CR0.PG = 1)
    9191        movl %cr0, %eax
    9292        btsl $31, %eax
     
    9898.code64
    9999start64:
    100         movq (ctx), %rsp
     100        movabsq $ctx, %rsp
     101        movq (%rsp), %rsp
     102       
    101103        pushq $0
    102104        movq %rsp, %rbp
    103         call main_ap - AP_BOOT_OFFSET + BOOT_OFFSET   # never returns
     105       
     106        movabsq $main_ap, %rax
     107        callq *%rax   # never returns
    104108
    105109#endif /* CONFIG_SMP */
Note: See TracChangeset for help on using the changeset viewer.