Fork us on GitHub Follow us on Facebook Follow us on Twitter

Changeset 2bf4936 in mainline


Ignore:
Timestamp:
2009-04-14T15:50:56Z (13 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master
Children:
47a6708
Parents:
577b531
Message:

On sparc64, do fast indentity mapping only for physical memory.
For addresses above physical memory, such as I/O devices,
fall through to the C miss handler and map the memory noncacheably.
Replace deprecated Bicc instructions with proper Bcc instructions.

Location:
kernel/arch/sparc64
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/sparc64/include/mm/frame.h

    r577b531 r2bf4936  
    7474
    7575extern uintptr_t last_frame;
     76extern uintptr_t end_of_identity;
     77
    7678extern void frame_arch_init(void);
    7779#define physmem_print()
  • kernel/arch/sparc64/include/trap/mmu.h

    r577b531 r2bf4936  
    104104         */
    1051050:
    106         mov VA_DMMU_TAG_ACCESS, %g1
    107         ldxa [%g1] ASI_DMMU, %g1                        ! read the faulting Context and VPN
     106        sethi %hi(fast_data_access_mmu_miss_data_hi), %g7
     107        wr %g0, ASI_DMMU, %asi
     108        ldxa [VA_DMMU_TAG_ACCESS] %asi, %g1             ! read the faulting Context and VPN
    108109        set TLB_TAG_ACCESS_CONTEXT_MASK, %g2
    109110        andcc %g1, %g2, %g3                             ! get Context
    110         bnz 0f                                          ! Context is non-zero
     111        bnz %xcc, 0f                                    ! Context is non-zero
    111112        andncc %g1, %g2, %g3                            ! get page address into %g3
    112         bz 0f                                           ! page address is zero
     113        bz  %xcc, 0f                                    ! page address is zero
     114        ldx [%g7 + %lo(end_of_identity)], %g4
     115        cmp %g3, %g4
     116        bgeu %xcc, 0f
    113117
    114         sethi %hi(kernel_8k_tlb_data_template), %g2
    115         ldx [%g2 + %lo(kernel_8k_tlb_data_template)], %g2
     118        ldx [%g7 + %lo(kernel_8k_tlb_data_template)], %g2
    116119        or %g3, %g2, %g2
    117120        stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG            ! identity map the kernel page
     
    139142         * This is necessary to survive nested DTLB misses.
    140143         */     
    141         mov VA_DMMU_TAG_ACCESS, %g2
    142         ldxa [%g2] ASI_DMMU, %g2
     144        ldxa [VA_DMMU_TAG_ACCESS] %asi, %g2
    143145
    144146        /*
  • kernel/arch/sparc64/src/mm/frame.c

    r577b531 r2bf4936  
    8080                frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1);
    8181        }
     82
     83        end_of_identity = PA2KA(last_frame);
    8284}
    8385
  • kernel/arch/sparc64/src/mm/page.c

    r577b531 r2bf4936  
    6363uintptr_t hw_map(uintptr_t physaddr, size_t size)
    6464{
    65         if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
    66                 panic("Unable to map physical memory %p (%d bytes).", physaddr, size)
    67        
    68         uintptr_t virtaddr = PA2KA(last_frame);
    69         pfn_t i;
    70         for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++) {
    71                 uintptr_t addr = PFN2ADDR(i);
    72                 page_mapping_insert(AS_KERNEL, virtaddr + addr, physaddr + addr, PAGE_NOT_CACHEABLE | PAGE_WRITE);
    73         }
    74        
    75         last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
    76        
    77         return virtaddr;
     65        return KA2PA(physaddr);
    7866}
    7967
  • kernel/arch/sparc64/src/mm/tlb.c

    r577b531 r2bf4936  
    200200void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
    201201{
    202         uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
     202        uintptr_t page_16k = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
    203203        index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
    204204        pte_t *t;
    205205
    206206        page_table_lock(AS, true);
    207         t = page_mapping_find(AS, va);
     207        t = page_mapping_find(AS, page_16k);
    208208        if (t && PTE_EXECUTABLE(t)) {
    209209                /*
     
    223223                 */             
    224224                page_table_unlock(AS, true);
    225                 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
     225                if (as_page_fault(page_16k, PF_ACCESS_EXEC, istate) ==
     226                    AS_PF_FAULT) {
    226227                        do_fast_instruction_access_mmu_miss_fault(istate,
    227228                            __func__);
     
    243244void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate)
    244245{
    245         uintptr_t va;
     246        uintptr_t page_8k;
     247        uintptr_t page_16k;
    246248        index_t index;
    247249        pte_t *t;
    248250
    249         va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
     251        page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH;
     252        page_16k = ALIGN_DOWN(page_8k, PAGE_SIZE);
    250253        index = tag.vpn % MMU_PAGES_PER_PAGE;
    251254
     
    255258                        do_fast_data_access_mmu_miss_fault(istate, tag,
    256259                            __func__);
     260                } else if (page_8k >= end_of_identity) {
     261                        /*
     262                         * The kernel is accessing the I/O space.
     263                         * We still do identity mapping for I/O,
     264                         * but without caching.
     265                         */
     266                        dtlb_insert_mapping(page_8k, KA2PA(page_8k),
     267                            PAGESIZE_8K, false, false);
     268                        return;
    257269                }
    258270                do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
     
    261273
    262274        page_table_lock(AS, true);
    263         t = page_mapping_find(AS, va);
     275        t = page_mapping_find(AS, page_16k);
    264276        if (t) {
    265277                /*
     
    279291                 */             
    280292                page_table_unlock(AS, true);
    281                 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
     293                if (as_page_fault(page_16k, PF_ACCESS_READ, istate) ==
     294                    AS_PF_FAULT) {
    282295                        do_fast_data_access_mmu_miss_fault(istate, tag,
    283296                            __func__);
     
    296309void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate)
    297310{
    298         uintptr_t va;
     311        uintptr_t page_16k;
    299312        index_t index;
    300313        pte_t *t;
    301314
    302         va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
     315        page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
    303316        index = tag.vpn % MMU_PAGES_PER_PAGE;   /* 16K-page emulation */
    304317
    305318        page_table_lock(AS, true);
    306         t = page_mapping_find(AS, va);
     319        t = page_mapping_find(AS, page_16k);
    307320        if (t && PTE_WRITABLE(t)) {
    308321                /*
     
    314327                t->d = true;
    315328                dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
    316                     va + index * MMU_PAGE_SIZE);
     329                    page_16k + index * MMU_PAGE_SIZE);
    317330                dtlb_pte_copy(t, index, false);
    318331#ifdef CONFIG_TSB
     
    326339                 */             
    327340                page_table_unlock(AS, true);
    328                 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
     341                if (as_page_fault(page_16k, PF_ACCESS_WRITE, istate) ==
     342                    AS_PF_FAULT) {
    329343                        do_fast_data_access_protection_fault(istate, tag,
    330344                            __func__);
  • kernel/arch/sparc64/src/start.S

    r577b531 r2bf4936  
    8585        sllx %l5, 13 + (63 - (PHYSMEM_ADDR_SIZE - 1)), %l5
    8686        srlx %l5, 63 - (PHYSMEM_ADDR_SIZE - 1), %l5     
    87        
     87
    8888        /*
    8989         * Setup basic runtime environment.
     
    334334        ldx [%g2], %g3
    335335        cmp %g3, %g1
    336         bne 2b
     336        bne %xcc, 2b
    337337        nop
    338338
     
    382382
    383383/*
    384  * This variable is used by the fast_data_MMU_miss trap handler. In runtime, it
    385  * is further modified to reflect the starting address of physical memory.
    386  */
     384 * The fast_data_access_mmu_miss_data_hi label and the end_of_identity and
     385 * kernel_8k_tlb_data_template variables are meant to stay together,
     386 * aligned on 16B boundary.
     387 */
     388.global fast_data_access_mmu_miss_data_hi
     389.global end_of_identity
    387390.global kernel_8k_tlb_data_template
     391
     392.align 16
     393/*
     394 * This label is used by the fast_data_access_MMU_miss trap handler.
     395 */
     396fast_data_access_mmu_miss_data_hi:
     397/*
     398 * This variable is used by the fast_data_access_MMU_miss trap handler.
     399 * In runtime, it is modified to contain the address of the end of physical
     400 * memory.
     401 */
     402end_of_identity:
     403        .quad -1
     404/*
     405 * This variable is used by the fast_data_access_MMU_miss trap handler.
     406 * In runtime, it is further modified to reflect the starting address of
     407 * physical memory.
     408 */
    388409kernel_8k_tlb_data_template:
    389410#ifdef CONFIG_VIRT_IDX_DCACHE
Note: See TracChangeset for help on using the changeset viewer.