Changeset 29b2bbf in mainline for kernel/arch/sparc64/src


Ignore:
Timestamp:
2006-09-18T22:10:20Z (19 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
19dba2b
Parents:
57da95c
Message:

sparc64 work:

  • Experimental support for TSB (Translation Storage Buffer).
Location:
kernel/arch/sparc64/src/mm
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/sparc64/src/mm/as.c

    r57da95c r29b2bbf  
    4141#ifdef CONFIG_TSB
    4242#include <arch/mm/tsb.h>
     43#include <arch/memstr.h>
     44#include <synch/mutex.h>
     45#include <arch/asm.h>
     46#include <mm/frame.h>
     47#include <bitops.h>
     48#include <macros.h>
    4349#endif
    4450
     
    4854        as_operations = &as_ht_operations;
    4955        asid_fifo_init();
     56}
     57
     58int as_constructor_arch(as_t *as, int flags)
     59{
     60#ifdef CONFIG_TSB
     61        int order = fnzb32(((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH);
     62        uintptr_t tsb = (uintptr_t) frame_alloc(order, flags);
     63
     64        if (!tsb)
     65                return -1;
     66
     67        as->arch.itsb = (tsb_entry_t *) tsb;
     68        as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT * sizeof(tsb_entry_t));
     69#endif
     70        return 0;
     71}
     72
     73int as_destructor_arch(as_t *as)
     74{
     75#ifdef CONFIG_TSB
     76        count_t cnt = ((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH;
     77        frame_free((uintptr_t) as->arch.itsb);
     78        return cnt;
     79#else
     80        return 0;
     81#endif
     82}
     83
     84int as_create_arch(as_t *as, int flags)
     85{
     86#ifdef CONFIG_TSB
     87        ipl_t ipl;
     88
     89        memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t), 0);
     90        ipl = interrupts_disable();
     91        mutex_lock_active(&as->lock);   /* completely unnecessary, but polite */
     92        tsb_invalidate(as, 0, (count_t) -1);
     93        mutex_unlock(&as->lock);
     94        interrupts_restore(ipl);
     95#endif
     96        return 0;
    5097}
    5198
     
    79126
    80127#ifdef CONFIG_TSB       
    81         if (as != AS_KERNEL) {
    82                 uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
     128        uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
    83129
    84                 ASSERT(as->arch.itsb && as->arch.dtsb);
     130        ASSERT(as->arch.itsb && as->arch.dtsb);
    85131
    86                 uintptr_t tsb = as->arch.itsb;
     132        uintptr_t tsb = (uintptr_t) as->arch.itsb;
    87133               
    88                 if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
    89                         /*
    90                         * TSBs were allocated from memory not covered
    91                         * by the locked 4M kernel DTLB entry. We need
    92                         * to map both TSBs explicitly.
    93                         */
    94                         dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
    95                         dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
    96                 }
     134        if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
     135                /*
     136                * TSBs were allocated from memory not covered
     137                * by the locked 4M kernel DTLB entry. We need
     138                * to map both TSBs explicitly.
     139                */
     140                dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
     141                dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
     142        }
    97143               
    98                 /*
    99                 * Setup TSB Base registers.
    100                 */
    101                 tsb_base_reg_t tsb_base;
     144        /*
     145        * Setup TSB Base registers.
     146        */
     147        tsb_base_reg_t tsb_base;
    102148               
    103                 tsb_base.value = 0;
    104                 tsb_base.size = TSB_SIZE;
    105                 tsb_base.split = 0;
     149        tsb_base.value = 0;
     150        tsb_base.size = TSB_SIZE;
     151        tsb_base.split = 0;
    106152
    107                 tsb_base.base = as->arch.itsb >> PAGE_WIDTH;
    108                 itsb_base_write(tsb_base.value);
    109                 tsb_base.base = as->arch.dtsb >> PAGE_WIDTH;
    110                 dtsb_base_write(tsb_base.value);
    111         }
     153        tsb_base.base = ((uintptr_t) as->arch.itsb) >> PAGE_WIDTH;
     154        itsb_base_write(tsb_base.value);
     155        tsb_base.base = ((uintptr_t) as->arch.dtsb) >> PAGE_WIDTH;
     156        dtsb_base_write(tsb_base.value);
    112157#endif
    113158}
     
    130175
    131176#ifdef CONFIG_TSB
    132         if (as != AS_KERNEL) {
    133                 uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
     177        uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
    134178
    135                 ASSERT(as->arch.itsb && as->arch.dtsb);
     179        ASSERT(as->arch.itsb && as->arch.dtsb);
    136180
    137                 uintptr_t tsb = as->arch.itsb;
     181        uintptr_t tsb = (uintptr_t) as->arch.itsb;
    138182               
    139                 if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
    140                         /*
    141                          * TSBs were allocated from memory not covered
    142                          * by the locked 4M kernel DTLB entry. We need
    143                          * to demap the entry installed by as_install_arch().
    144                          */
    145                         dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
    146                 }
    147                
     183        if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
     184                /*
     185                 * TSBs were allocated from memory not covered
     186                 * by the locked 4M kernel DTLB entry. We need
     187                 * to demap the entry installed by as_install_arch().
     188                 */
     189                dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
    148190        }
    149191#endif
  • kernel/arch/sparc64/src/mm/tlb.c

    r57da95c r29b2bbf  
    5252#include <arch/asm.h>
    5353
     54#ifdef CONFIG_TSB
     55#include <arch/mm/tsb.h>
     56#endif
     57
    5458static void dtlb_pte_copy(pte_t *t, bool ro);
    5559static void itlb_pte_copy(pte_t *t);
     
    145149}
    146150
     151/** Copy PTE to ITLB.
     152 *
     153 * @param t Page Table Entry to be copied.
     154 */
    147155void itlb_pte_copy(pte_t *t)
    148156{
     
    190198                t->a = true;
    191199                itlb_pte_copy(t);
     200#ifdef CONFIG_TSB
     201                itsb_pte_copy(t);
     202#endif
    192203                page_table_unlock(AS, true);
    193204        } else {
     
    234245                t->a = true;
    235246                dtlb_pte_copy(t, true);
     247#ifdef CONFIG_TSB
     248                dtsb_pte_copy(t, true);
     249#endif
    236250                page_table_unlock(AS, true);
    237251        } else {
     
    267281                dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, va);
    268282                dtlb_pte_copy(t, false);
     283#ifdef CONFIG_TSB
     284                dtsb_pte_copy(t, false);
     285#endif
    269286                page_table_unlock(AS, true);
    270287        } else {
  • kernel/arch/sparc64/src/mm/tsb.c

    r57da95c r29b2bbf  
    3434
    3535#include <arch/mm/tsb.h>
     36#include <arch/mm/tlb.h>
     37#include <arch/barrier.h>
    3638#include <mm/as.h>
    3739#include <arch/types.h>
    3840#include <typedefs.h>
     41#include <macros.h>
     42#include <debug.h>
     43
     44#define TSB_INDEX_MASK          ((1<<(21+1+TSB_SIZE-PAGE_WIDTH))-1)
    3945
    4046/** Invalidate portion of TSB.
    4147 *
    4248 * We assume that the address space is already locked.
     49 * Note that respective portions of both TSBs
     50 * are invalidated at a time.
    4351 *
    4452 * @param as Address space.
    4553 * @param page First page to invalidate in TSB.
    46  * @param pages Number of pages to invalidate.
     54 * @param pages Number of pages to invalidate. Value of (count_t) -1 means the whole TSB.
    4755 */
    4856void tsb_invalidate(as_t *as, uintptr_t page, count_t pages)
    4957{
     58        index_t i0, i;
     59        count_t cnt;
     60       
     61        ASSERT(as->arch.itsb && as->arch.dtsb);
     62       
     63        i0 = (page >> PAGE_WIDTH) & TSB_INDEX_MASK;
     64        cnt = min(pages, ITSB_ENTRY_COUNT);
     65       
     66        for (i = 0; i < cnt; i++) {
     67                as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT-1)].tag.invalid = 0;
     68                as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT-1)].tag.invalid = 0;
     69        }
     70}
     71
     72/** Copy software PTE to ITSB.
     73 *
     74 * @param t Software PTE.
     75 */
     76void itsb_pte_copy(pte_t *t)
     77{
     78        as_t *as;
     79        tsb_entry_t *tsb;
     80       
     81        as = t->as;
     82        tsb = &as->arch.itsb[(t->page >> PAGE_WIDTH) & TSB_INDEX_MASK];
     83
     84        /*
     85         * We use write barriers to make sure that the TSB load
     86         * won't use inconsistent data or that the fault will
     87         * be repeated.
     88         */
     89
     90        tsb->tag.invalid = 1;   /* invalidate the entry (tag target has this set to 0 */
     91
     92        write_barrier();
     93
     94        tsb->tag.context = as->asid;
     95        tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
     96        tsb->data.value = 0;
     97        tsb->data.size = PAGESIZE_8K;
     98        tsb->data.pfn = t->frame >> PAGE_WIDTH;
     99        tsb->data.cp = t->c;
     100        tsb->data.cv = t->c;
     101        tsb->data.p = t->k;     /* p as privileged */
     102        tsb->data.v = t->p;
     103       
     104        write_barrier();
     105       
     106        tsb->tag.invalid = 0;   /* mark the entry as valid */
     107}
     108
     109/** Copy software PTE to DTSB.
     110 *
     111 * @param t Software PTE.
     112 * @param ro If true, the mapping is copied read-only.
     113 */
     114void dtsb_pte_copy(pte_t *t, bool ro)
     115{
     116        as_t *as;
     117        tsb_entry_t *tsb;
     118       
     119        as = t->as;
     120        tsb = &as->arch.dtsb[(t->page >> PAGE_WIDTH) & TSB_INDEX_MASK];
     121
     122        /*
     123         * We use write barriers to make sure that the TSB load
     124         * won't use inconsistent data or that the fault will
     125         * be repeated.
     126         */
     127
     128        tsb->tag.invalid = 1;   /* invalidate the entry (tag target has this set to 0) */
     129
     130        write_barrier();
     131
     132        tsb->tag.context = as->asid;
     133        tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
     134        tsb->data.value = 0;
     135        tsb->data.size = PAGESIZE_8K;
     136        tsb->data.pfn = t->frame >> PAGE_WIDTH;
     137        tsb->data.cp = t->c;
     138        tsb->data.cv = t->c;
     139        tsb->data.p = t->k;     /* p as privileged */
     140        tsb->data.w = ro ? false : t->w;
     141        tsb->data.v = t->p;
     142       
     143        write_barrier();
     144       
     145        tsb->tag.invalid = 0;   /* mark the entry as valid */
    50146}
    51147
Note: See TracChangeset for help on using the changeset viewer.