Changeset 0fd9b35 in mainline for kernel/arch/ia64/src/mm/tlb.c


Ignore:
Timestamp:
2011-12-30T18:30:47Z (14 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
41deb2a
Parents:
aaa6af2
Message:

Limit kernel identity on ia64 to low memory and make sure to use
the kernel address space for kernel non-identity page table
lookups.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/ia64/src/mm/tlb.c

    raaa6af2 r0fd9b35  
    467467}
    468468
     469static bool is_kernel_fault(uintptr_t va)
     470{
     471        region_register_t rr;
     472
     473        rr.word = rr_read(VA2VRN(va));
     474        rid_t rid = rr.map.rid;
     475        return (RID2ASID(rid) == ASID_KERNEL) && (VA2VRN(va) == VRN_KERNEL);
     476}
     477
    469478/** Instruction TLB fault handler for faults with VHPT turned off.
    470479 *
     
    480489        va = istate->cr_ifa; /* faulting address */
    481490       
     491        ASSERT(!is_kernel_fault(va));
     492
    482493        t = page_mapping_find(AS, va, true);
    483494        if (t) {
     
    567578{
    568579        if (istate->cr_isr.sp) {
    569                 /* Speculative load. Deffer the exception
    570                    until a more clever approach can be used.
    571                   
    572                    Currently if we try to find the mapping
    573                    for the speculative load while in the kernel,
    574                    we might introduce a livelock because of
    575                    the possibly invalid values of the address. */
     580                /*
     581                 * Speculative load. Deffer the exception until a more clever
     582                 * approach can be used. Currently if we try to find the
     583                 * mapping for the speculative load while in the kernel, we
     584                 * might introduce a livelock because of the possibly invalid
     585                 * values of the address.
     586                 */
    576587                istate->cr_ipsr.ed = true;
    577588                return;
     
    579590       
    580591        uintptr_t va = istate->cr_ifa;  /* faulting address */
    581        
    582         region_register_t rr;
    583         rr.word = rr_read(VA2VRN(va));
    584         rid_t rid = rr.map.rid;
    585         if (RID2ASID(rid) == ASID_KERNEL) {
    586                 if (VA2VRN(va) == VRN_KERNEL) {
     592        as_t *as = AS;
     593       
     594        if (is_kernel_fault(va)) {
     595                if (va < end_of_identity) {
    587596                        /*
    588                          * Provide KA2PA(identity) mapping for faulting piece of
    589                          * kernel address space.
     597                         * Create kernel identity mapping for low memory.
    590598                         */
    591599                        dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
    592600                        return;
     601                } else {
     602                        as = AS_KERNEL;
    593603                }
    594604        }
    595605       
    596606       
    597         pte_t *entry = page_mapping_find(AS, va, true);
     607        pte_t *entry = page_mapping_find(as, va, true);
    598608        if (entry) {
    599609                /*
     
    641651        uintptr_t va;
    642652        pte_t *t;
     653        as_t *as = AS;
    643654       
    644655        va = istate->cr_ifa;  /* faulting address */
    645656       
    646         t = page_mapping_find(AS, va, true);
     657        if (is_kernel_fault(va))
     658                as = AS_KERNEL;
     659
     660        t = page_mapping_find(as, va, true);
    647661        ASSERT((t) && (t->p));
    648662        if ((t) && (t->p) && (t->w)) {
     
    674688       
    675689        va = istate->cr_ifa;  /* faulting address */
     690
     691        ASSERT(!is_kernel_fault(va));
    676692       
    677693        t = page_mapping_find(AS, va, true);
     
    703719        uintptr_t va;
    704720        pte_t *t;
     721        as_t *as = AS;
    705722       
    706723        va = istate->cr_ifa;  /* faulting address */
    707724       
    708         t = page_mapping_find(AS, va, true);
     725        if (is_kernel_fault(va))
     726                as = AS_KERNEL;
     727
     728        t = page_mapping_find(as, va, true);
    709729        ASSERT((t) && (t->p));
    710730        if ((t) && (t->p)) {
     
    736756       
    737757        va = istate->cr_ifa;  /* faulting address */
     758
     759        ASSERT(!is_kernel_fault(va));
    738760       
    739761        /*
     
    763785        va = istate->cr_ifa;  /* faulting address */
    764786       
     787        ASSERT(!is_kernel_fault(va));
     788
    765789        t = page_mapping_find(AS, va, true);
    766790        ASSERT(t);
Note: See TracChangeset for help on using the changeset viewer.