Changeset 2bf4936 in mainline for kernel/arch/sparc64/src/mm/tlb.c


Ignore:
Timestamp:
2009-04-14T15:50:56Z (15 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
47a6708
Parents:
577b531
Message:

On sparc64, do fast indentity mapping only for physical memory.
For addresses above physical memory, such as I/O devices,
fall through to the C miss handler and map the memory noncacheably.
Replace deprecated Bicc instructions with proper Bcc instructions.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/sparc64/src/mm/tlb.c

    r577b531 r2bf4936  
    200200void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
    201201{
    202         uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
     202        uintptr_t page_16k = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
    203203        index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
    204204        pte_t *t;
    205205
    206206        page_table_lock(AS, true);
    207         t = page_mapping_find(AS, va);
     207        t = page_mapping_find(AS, page_16k);
    208208        if (t && PTE_EXECUTABLE(t)) {
    209209                /*
     
    223223                 */             
    224224                page_table_unlock(AS, true);
    225                 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
     225                if (as_page_fault(page_16k, PF_ACCESS_EXEC, istate) ==
     226                    AS_PF_FAULT) {
    226227                        do_fast_instruction_access_mmu_miss_fault(istate,
    227228                            __func__);
     
    243244void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate)
    244245{
    245         uintptr_t va;
     246        uintptr_t page_8k;
     247        uintptr_t page_16k;
    246248        index_t index;
    247249        pte_t *t;
    248250
    249         va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
     251        page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH;
     252        page_16k = ALIGN_DOWN(page_8k, PAGE_SIZE);
    250253        index = tag.vpn % MMU_PAGES_PER_PAGE;
    251254
     
    255258                        do_fast_data_access_mmu_miss_fault(istate, tag,
    256259                            __func__);
     260                } else if (page_8k >= end_of_identity) {
     261                        /*
     262                         * The kernel is accessing the I/O space.
     263                         * We still do identity mapping for I/O,
     264                         * but without caching.
     265                         */
     266                        dtlb_insert_mapping(page_8k, KA2PA(page_8k),
     267                            PAGESIZE_8K, false, false);
     268                        return;
    257269                }
    258270                do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
     
    261273
    262274        page_table_lock(AS, true);
    263         t = page_mapping_find(AS, va);
     275        t = page_mapping_find(AS, page_16k);
    264276        if (t) {
    265277                /*
     
    279291                 */             
    280292                page_table_unlock(AS, true);
    281                 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
     293                if (as_page_fault(page_16k, PF_ACCESS_READ, istate) ==
     294                    AS_PF_FAULT) {
    282295                        do_fast_data_access_mmu_miss_fault(istate, tag,
    283296                            __func__);
     
    296309void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate)
    297310{
    298         uintptr_t va;
     311        uintptr_t page_16k;
    299312        index_t index;
    300313        pte_t *t;
    301314
    302         va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
     315        page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
    303316        index = tag.vpn % MMU_PAGES_PER_PAGE;   /* 16K-page emulation */
    304317
    305318        page_table_lock(AS, true);
    306         t = page_mapping_find(AS, va);
     319        t = page_mapping_find(AS, page_16k);
    307320        if (t && PTE_WRITABLE(t)) {
    308321                /*
     
    314327                t->d = true;
    315328                dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
    316                     va + index * MMU_PAGE_SIZE);
     329                    page_16k + index * MMU_PAGE_SIZE);
    317330                dtlb_pte_copy(t, index, false);
    318331#ifdef CONFIG_TSB
     
    326339                 */             
    327340                page_table_unlock(AS, true);
    328                 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
     341                if (as_page_fault(page_16k, PF_ACCESS_WRITE, istate) ==
     342                    AS_PF_FAULT) {
    329343                        do_fast_data_access_protection_fault(istate, tag,
    330344                            __func__);
Note: See TracChangeset for help on using the changeset viewer.