/* * Copyright (C) 2006 Jakub Jermar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * TLB management. */ #include #include #include #include #include #include #include #include #include #include #include #include #include /** Invalidate all TLB entries. */ void tlb_invalidate_all(void) { __address adr; __u32 count1,count2,stride1,stride2; int i,j; adr=PAL_PTCE_INFO_BASE(); count1=PAL_PTCE_INFO_COUNT1(); count2=PAL_PTCE_INFO_COUNT2(); stride1=PAL_PTCE_INFO_STRIDE1(); stride2=PAL_PTCE_INFO_STRIDE2(); interrupts_disable(); for(i=0;i>=1) b++; b>>=1; __u64 ps; switch(b) { case 0: /*cnt 1-3*/ { ps=PAGE_WIDTH; break; } case 1: /*cnt 4-15*/ { cnt=(cnt/4)+1; ps=PAGE_WIDTH+2; va&=~((1<> PPN_SHIFT; entry.ps = PAGE_WIDTH; if (dtr) dtr_mapping_insert(page, ASID_KERNEL, entry, tr); else dtc_mapping_insert(page, ASID_KERNEL, entry); } /** Copy content of PTE into data translation cache. * * @param t PTE. */ void dtc_pte_copy(pte_t *t) { tlb_entry_t entry; entry.word[0] = 0; entry.word[1] = 0; entry.p = t->p; entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE; entry.a = t->a; entry.d = t->d; entry.pl = t->k ? PL_KERNEL : PL_USER; entry.ar = t->w ? AR_WRITE : AR_READ; entry.ppn = t->frame >> PPN_SHIFT; entry.ps = PAGE_WIDTH; dtc_mapping_insert(t->page, t->as->asid, entry); } /** Copy content of PTE into instruction translation cache. * * @param t PTE. */ void itc_pte_copy(pte_t *t) { tlb_entry_t entry; entry.word[0] = 0; entry.word[1] = 0; ASSERT(t->x); entry.p = t->p; entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE; entry.a = t->a; entry.pl = t->k ? PL_KERNEL : PL_USER; entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ; entry.ppn = t->frame >> PPN_SHIFT; entry.ps = PAGE_WIDTH; itc_mapping_insert(t->page, t->as->asid, entry); } /** Instruction TLB fault handler for faults with VHPT turned off. * * @param vector Interruption vector. * @param pstate Structure with saved interruption state. */ void alternate_instruction_tlb_fault(__u64 vector, struct exception_regdump *pstate) { region_register rr; __address va; pte_t *t; va = pstate->cr_ifa; /* faulting address */ t = page_mapping_find(AS, va); if (t) { /* * The mapping was found in software page hash table. * Insert it into data translation cache. */ itc_pte_copy(t); } else { /* * Forward the page fault to address space page fault handler. */ if (!as_page_fault(va)) { panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid); } } } /** Data TLB fault handler for faults with VHPT turned off. * * @param vector Interruption vector. * @param pstate Structure with saved interruption state. */ void alternate_data_tlb_fault(__u64 vector, struct exception_regdump *pstate) { region_register rr; rid_t rid; __address va; pte_t *t; va = pstate->cr_ifa; /* faulting address */ rr.word = rr_read(VA2VRN(va)); rid = rr.map.rid; if (RID2ASID(rid) == ASID_KERNEL) { if (VA2VRN(va) == VRN_KERNEL) { /* * Provide KA2PA(identity) mapping for faulting piece of * kernel address space. */ dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0); return; } } t = page_mapping_find(AS, va); if (t) { /* * The mapping was found in software page hash table. * Insert it into data translation cache. */ dtc_pte_copy(t); } else { /* * Forward the page fault to address space page fault handler. */ if (!as_page_fault(va)) { panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid); } } } /** Data nested TLB fault handler. * * This fault should not occur. * * @param vector Interruption vector. * @param pstate Structure with saved interruption state. */ void data_nested_tlb_fault(__u64 vector, struct exception_regdump *pstate) { panic("%s\n", __FUNCTION__); } /** Data Dirty bit fault handler. * * @param vector Interruption vector. * @param pstate Structure with saved interruption state. */ void data_dirty_bit_fault(__u64 vector, struct exception_regdump *pstate) { pte_t *t; t = page_mapping_find(AS, pstate->cr_ifa); ASSERT(t && t->p); if (t && t->p) { /* * Update the Dirty bit in page tables and reinsert * the mapping into DTC. */ t->d = true; dtc_pte_copy(t); } } /** Instruction access bit fault handler. * * @param vector Interruption vector. * @param pstate Structure with saved interruption state. */ void instruction_access_bit_fault(__u64 vector, struct exception_regdump *pstate) { pte_t *t; t = page_mapping_find(AS, pstate->cr_ifa); ASSERT(t && t->p); if (t && t->p) { /* * Update the Accessed bit in page tables and reinsert * the mapping into ITC. */ t->a = true; itc_pte_copy(t); } } /** Data access bit fault handler. * * @param vector Interruption vector. * @param pstate Structure with saved interruption state. */ void data_access_bit_fault(__u64 vector, struct exception_regdump *pstate) { pte_t *t; t = page_mapping_find(AS, pstate->cr_ifa); ASSERT(t && t->p); if (t && t->p) { /* * Update the Accessed bit in page tables and reinsert * the mapping into DTC. */ t->a = true; dtc_pte_copy(t); } } /** Page not present fault handler. * * @param vector Interruption vector. * @param pstate Structure with saved interruption state. */ void page_not_present(__u64 vector, struct exception_regdump *pstate) { region_register rr; __address va; pte_t *t; va = pstate->cr_ifa; /* faulting address */ t = page_mapping_find(AS, va); ASSERT(t); if (t->p) { /* * If the Present bit is set in page hash table, just copy it * and update ITC/DTC. */ if (t->x) itc_pte_copy(t); else dtc_pte_copy(t); } else { if (!as_page_fault(va)) { panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid); } } }