Changeset f8ddd17 in mainline for kernel/generic/src


Ignore:
Timestamp:
2006-12-09T20:20:50Z (19 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
b82a13c
Parents:
9ab9c2ec
Message:

Rework support for virtually indexed cache.
Instead of repeatedly flushing the data cache, which was a huge overkill, refuse to create an illegal address alias
in the kernel (again) and allocate appropriate page color in userspace instead. Extend the detection also to
SYS_PHYSMEM_MAP syscall.

Add support for tracking physical memory areas mappable by SYS_PHYSMEM_MAP.

Lots of coding style changes.

Location:
kernel/generic/src
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/console/klog.c

    r9ab9c2ec rf8ddd17  
    3939#include <ddi/device.h>
    4040#include <ddi/irq.h>
     41#include <ddi/ddi.h>
    4142#include <ipc/irq.h>
    4243
     44/** Physical memory area used for klog. */
     45static parea_t klog_parea;
     46       
    4347/*
    4448 * For now, we use 0 as INR.
    45  * However, on some architectures 0 is the clock interrupt (e.g. amd64 and ia32).
    46  * It is therefore desirable to have architecture specific definition of KLOG_VIRT_INR
    47  * in the future.
     49 * However, on some architectures 0 is the clock interrupt (e.g. amd64 and
     50 * ia32). It is therefore desirable to have architecture specific definition of
     51 * KLOG_VIRT_INR in the future.
    4852 */
    4953#define KLOG_VIRT_INR   0
     
    7680        if (!faddr)
    7781                panic("Cannot allocate page for klog");
    78         klog = (char *)PA2KA(faddr);
     82        klog = (char *) PA2KA(faddr);
    7983       
    8084        devno_t devno = device_assign_devno();
    8185       
    82         sysinfo_set_item_val("klog.faddr", NULL, (unative_t)faddr);
     86        klog_parea.pbase = (uintptr_t) faddr;
     87        klog_parea.vbase = (uintptr_t) klog;
     88        klog_parea.frames = 1 << KLOG_ORDER;
     89        klog_parea.cacheable = true;
     90        ddi_parea_register(&klog_parea);
     91
     92        sysinfo_set_item_val("klog.faddr", NULL, (unative_t) faddr);
     93        sysinfo_set_item_val("klog.fcolor", NULL, (unative_t)
     94                PAGE_COLOR((uintptr_t) klog));
    8395        sysinfo_set_item_val("klog.pages", NULL, 1 << KLOG_ORDER);
    8496        sysinfo_set_item_val("klog.devno", NULL, devno);
  • kernel/generic/src/ddi/ddi.c

    r9ab9c2ec rf8ddd17  
    4848#include <synch/spinlock.h>
    4949#include <syscall/copy.h>
     50#include <adt/btree.h>
    5051#include <arch.h>
    5152#include <align.h>
    5253#include <errno.h>
    5354
     55/** This lock protects the parea_btree. */
     56SPINLOCK_INITIALIZE(parea_lock);
     57
     58/** B+tree with enabled physical memory areas. */
     59static btree_t parea_btree;
     60
     61/** Initialize DDI. */
     62void ddi_init(void)
     63{
     64        btree_create(&parea_btree);
     65}
     66
     67/** Enable piece of physical memory for mapping by physmem_map().
     68 *
     69 * @param parea Pointer to physical area structure.
     70 *
     71 * @todo This function doesn't check for overlaps. It depends on the kernel to
     72 * create disjunct physical memory areas.
     73 */
     74void ddi_parea_register(parea_t *parea)
     75{
     76        ipl_t ipl;
     77
     78        ipl = interrupts_disable();
     79        spinlock_lock(&parea_lock);
     80       
     81        /*
     82         * TODO: we should really check for overlaps here.
     83         * However, we should be safe because the kernel is pretty sane and
     84         * memory of different devices doesn't overlap.
     85         */
     86        btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL);
     87
     88        spinlock_unlock(&parea_lock);
     89        interrupts_restore(ipl);       
     90}
     91
    5492/** Map piece of physical memory into virtual address space of current task.
    5593 *
    56  * @param pf Physical frame address of the starting frame.
    57  * @param vp Virtual page address of the starting page.
     94 * @param pf Physical address of the starting frame.
     95 * @param vp Virtual address of the starting page.
    5896 * @param pages Number of pages to map.
    5997 * @param flags Address space area flags for the mapping.
    6098 *
    61  * @return 0 on success, EPERM if the caller lacks capabilities to use this syscall,
    62  *         ENOENT if there is no task matching the specified ID and ENOMEM if
    63  *         there was a problem in creating address space area.
     99 * @return 0 on success, EPERM if the caller lacks capabilities to use this
     100 *      syscall, ENOENT if there is no task matching the specified ID or the
     101 *      physical address space is not enabled for mapping and ENOMEM if there
     102 *      was a problem in creating address space area. ENOTSUP is returned when
     103 *      an attempt to create an illegal address alias is detected.
    64104 */
    65105static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, count_t pages, int flags)
     
    80120
    81121        ipl = interrupts_disable();
     122
     123        /*
     124         * Check if the physical memory area is enabled for mapping.
     125         * If the architecture supports virtually indexed caches, intercept
     126         * attempts to create an illegal address alias.
     127         */
     128        spinlock_lock(&parea_lock);
     129        parea_t *parea;
     130        btree_node_t *nodep;
     131        parea = btree_search(&parea_btree, (btree_key_t) pf, &nodep);
     132        if (!parea || parea->frames < pages || ((flags & AS_AREA_CACHEABLE) &&
     133                !parea->cacheable) || (!(flags & AS_AREA_CACHEABLE) &&
     134                parea->cacheable)) {
     135                /*
     136                 * This physical memory area cannot be mapped.
     137                 */
     138                spinlock_unlock(&parea_lock);
     139                interrupts_restore(ipl);
     140                return ENOENT;
     141        }
     142
     143#ifdef CONFIG_VIRT_IDX_DCACHE
     144        if (PAGE_COLOR(parea->vbase) != PAGE_COLOR(vp)) {
     145                /*
     146                 * Refuse to create an illegal address alias.
     147                 */
     148                spinlock_unlock(&parea_lock);
     149                interrupts_restore(ipl);
     150                return ENOTSUP;
     151        }
     152#endif /* CONFIG_VIRT_IDX_DCACHE */
     153
     154        spinlock_unlock(&parea_lock);
     155
    82156        spinlock_lock(&TASK->lock);
    83157       
     
    108182 * @param size Size of the enabled I/O space..
    109183 *
    110  * @return 0 on success, EPERM if the caller lacks capabilities to use this syscall,
    111  *        ENOENT if there is no task matching the specified ID.
     184 * @return 0 on success, EPERM if the caller lacks capabilities to use this
     185 *      syscall, ENOENT if there is no task matching the specified ID.
    112186 */
    113187static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size)
     
    161235 * @return 0 on success, otherwise it returns error code found in errno.h
    162236 */
    163 unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base, unative_t pages,
    164                         unative_t flags)
    165 {
    166         return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base, FRAME_SIZE),
    167                                           ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE), (count_t) pages,
    168                                           (int) flags);
     237unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base, unative_t
     238        pages, unative_t flags)
     239{
     240        return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base,
     241                FRAME_SIZE), ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE),
     242                (count_t) pages, (int) flags);
    169243}
    170244
     
    184258                return (unative_t) rc;
    185259               
    186         return (unative_t) ddi_iospace_enable((task_id_t) arg.task_id, (uintptr_t) arg.ioaddr, (size_t) arg.size);
     260        return (unative_t) ddi_iospace_enable((task_id_t) arg.task_id,
     261                (uintptr_t) arg.ioaddr, (size_t) arg.size);
    187262}
    188263
    189264/** Disable or enable preemption.
    190265 *
    191  * @param enable If non-zero, the preemption counter will be decremented, leading to potential
    192  *               enabling of preemption. Otherwise the preemption counter will be incremented,
    193  *              preventing preemption from occurring.
     266 * @param enable If non-zero, the preemption counter will be decremented,
     267 *      leading to potential enabling of preemption. Otherwise the preemption
     268 *      counter will be incremented, preventing preemption from occurring.
    194269 *
    195270 * @return Zero on success or EPERM if callers capabilities are not sufficient.
  • kernel/generic/src/lib/rd.c

    r9ab9c2ec rf8ddd17  
    4242#include <mm/frame.h>
    4343#include <sysinfo/sysinfo.h>
     44#include <ddi/ddi.h>
     45
     46static parea_t rd_parea;                /**< Physical memory area for rd. */
    4447
    4548int init_rd(rd_header * header, size_t size)
    4649{
    4750        /* Identify RAM disk */
    48         if ((header->magic[0] != RD_MAG0) || (header->magic[1] != RD_MAG1) || (header->magic[2] != RD_MAG2) || (header->magic[3] != RD_MAG3))
     51        if ((header->magic[0] != RD_MAG0) || (header->magic[1] != RD_MAG1) ||
     52                (header->magic[2] != RD_MAG2) || (header->magic[3] != RD_MAG3))
    4953                return RE_INVALID;
    5054       
     
    7781                dsize = size - hsize;
    7882       
     83        rd_parea.pbase = KA2PA((void *) header + hsize);
     84        rd_parea.vbase = (uintptr_t) ((void *) header + hsize);
     85        rd_parea.frames = SIZE2FRAMES(dsize);
     86        rd_parea.cacheable = true;
     87        ddi_parea_register(&rd_parea);
     88
    7989        sysinfo_set_item_val("rd", NULL, true);
    8090        sysinfo_set_item_val("rd.size", NULL, dsize);
    81         sysinfo_set_item_val("rd.address.physical", NULL, (unative_t) KA2PA((void *) header + hsize));
     91        sysinfo_set_item_val("rd.address.physical", NULL, (unative_t)
     92                KA2PA((void *) header + hsize));
     93        sysinfo_set_item_val("rd.address.color", NULL, (unative_t)
     94                PAGE_COLOR((uintptr_t) header + hsize));
    8295
    8396        return RE_OK;
  • kernel/generic/src/main/main.c

    r9ab9c2ec rf8ddd17  
    8181#include <console/klog.h>
    8282#include <smp/smp.h>
     83#include <ddi/ddi.h>
    8384
    8485/** Global configuration structure. */
     
    103104 * appropriate sizes and addresses.
    104105 */
    105 uintptr_t hardcoded_load_address = 0;   /**< Virtual address of where the kernel is loaded. */
    106 size_t hardcoded_ktext_size = 0;        /**< Size of the kernel code in bytes. */
    107 size_t hardcoded_kdata_size = 0;        /**< Size of the kernel data in bytes. */
    108 
    109 uintptr_t stack_safe = 0;               /**< Lowest safe stack virtual address */
     106uintptr_t hardcoded_load_address = 0;   /**< Virtual address of where the kernel
     107                                          *  is loaded. */
     108size_t hardcoded_ktext_size = 0;        /**< Size of the kernel code in bytes.
     109                                          */
     110size_t hardcoded_kdata_size = 0;        /**< Size of the kernel data in bytes.
     111                                         */
     112uintptr_t stack_safe = 0;               /**< Lowest safe stack virtual address.
     113                                          */
    110114
    111115void main_bsp(void);
     
    142146        config.memory_size = get_memory_size();
    143147       
    144         config.kernel_size = ALIGN_UP(hardcoded_ktext_size + hardcoded_kdata_size, PAGE_SIZE);
     148        config.kernel_size = ALIGN_UP(hardcoded_ktext_size +
     149                hardcoded_kdata_size, PAGE_SIZE);
    145150        config.stack_size = CONFIG_STACK_SIZE;
    146151       
     
    151156        count_t i;
    152157        for (i = 0; i < init.cnt; i++) {
    153                 if (PA_overlaps(config.stack_base, config.stack_size, init.tasks[i].addr, init.tasks[i].size))
    154                         config.stack_base = ALIGN_UP(init.tasks[i].addr + init.tasks[i].size, config.stack_size);
     158                if (PA_overlaps(config.stack_base, config.stack_size,
     159                        init.tasks[i].addr, init.tasks[i].size))
     160                        config.stack_base = ALIGN_UP(init.tasks[i].addr +
     161                                init.tasks[i].size, config.stack_size);
    155162        }
    156163
    157164        /* Avoid placing stack on top of boot allocations. */
    158165        if (ballocs.size) {
    159                 if (PA_overlaps(config.stack_base, config.stack_size, ballocs.base, ballocs.size))
    160                         config.stack_base = ALIGN_UP(ballocs.base + ballocs.size, PAGE_SIZE);
     166                if (PA_overlaps(config.stack_base, config.stack_size,
     167                        ballocs.base, ballocs.size))
     168                        config.stack_base = ALIGN_UP(ballocs.base +
     169                                ballocs.size, PAGE_SIZE);
    161170        }
    162171       
     
    165174       
    166175        context_save(&ctx);
    167         context_set(&ctx, FADDR(main_bsp_separated_stack), config.stack_base, THREAD_STACK_SIZE);
     176        context_set(&ctx, FADDR(main_bsp_separated_stack), config.stack_base,
     177                THREAD_STACK_SIZE);
    168178        context_restore(&ctx);
    169179        /* not reached */
     
    201211         */     
    202212        arch_pre_mm_init();
    203         frame_init();           /* Initialize at least 1 memory segment big enough for slab to work */
     213        frame_init();           
     214        /* Initialize at least 1 memory segment big enough for slab to work. */
    204215        slab_cache_init();
    205216        btree_init();
     
    207218        page_init();
    208219        tlb_init();
     220        ddi_init();
    209221        arch_post_mm_init();
    210222
    211223        version_print();
    212         printf("kernel: %.*p hardcoded_ktext_size=%zdK, hardcoded_kdata_size=%zdK\n", sizeof(uintptr_t) * 2, config.base, hardcoded_ktext_size >> 10, hardcoded_kdata_size >> 10);
    213         printf("stack:  %.*p size=%zdK\n", sizeof(uintptr_t) * 2, config.stack_base, config.stack_size >> 10);
     224        printf("kernel: %.*p hardcoded_ktext_size=%zdK, "
     225                "hardcoded_kdata_size=%zdK\n", sizeof(uintptr_t) * 2,
     226                config.base, hardcoded_ktext_size >> 10, hardcoded_kdata_size >>
     227                10);
     228        printf("stack:  %.*p size=%zdK\n", sizeof(uintptr_t) * 2,
     229                config.stack_base, config.stack_size >> 10);
    214230
    215231        arch_pre_smp_init();
    216232        smp_init();
    217        
    218         slab_enable_cpucache(); /* Slab must be initialized AFTER we know the number of processors */
     233        /* Slab must be initialized after we know the number of processors. */
     234        slab_enable_cpucache();
    219235
    220236        printf("config.memory_size=%zdM\n", config.memory_size >> 20);
     
    233249        if (init.cnt > 0) {
    234250                for (i = 0; i < init.cnt; i++)
    235                         printf("init[%zd].addr=%.*p, init[%zd].size=%zd\n", i, sizeof(uintptr_t) * 2, init.tasks[i].addr, i, init.tasks[i].size);
     251                        printf("init[%zd].addr=%.*p, init[%zd].size=%zd\n", i,
     252                                sizeof(uintptr_t) * 2, init.tasks[i].addr, i,
     253                                init.tasks[i].size);
    236254        } else
    237255                printf("No init binaries found\n");
     
    305323         * switch to this cpu's private stack prior to waking kmp up.
    306324         */
    307         context_set(&CPU->saved_context, FADDR(main_ap_separated_stack), (uintptr_t) CPU->stack, CPU_STACK_SIZE);
     325        context_set(&CPU->saved_context, FADDR(main_ap_separated_stack),
     326                (uintptr_t) CPU->stack, CPU_STACK_SIZE);
    308327        context_restore(&CPU->saved_context);
    309328        /* not reached */
  • kernel/generic/src/mm/as.c

    r9ab9c2ec rf8ddd17  
    167167        as->page_table = page_table_create(flags);
    168168
    169 #ifdef CONFIG_VIRT_IDX_DCACHE
    170         as->dcache_flush_on_install = false;
    171         as->dcache_flush_on_deinstall = false;
    172 #endif  /* CONFIG_VIRT_IDX_DCACHE */
    173 
    174169        return as;
    175170}
     
    278273        else
    279274                memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data), 0);
    280 
    281 #ifdef CONFIG_VIRT_IDX_DCACHE
    282         /*
    283          * When the area is being created with the AS_AREA_ATTR_PARTIAL flag, the
    284          * orig_color is probably wrong until the flag is reset. In other words, it is
    285          * initialized with the color of the area being created and not with the color
    286          * of the original address space area at the beginning of the share chain. Of
    287          * course, the correct color is set by as_area_share() before the flag is
    288          * reset.
    289          */
    290         a->orig_color = PAGE_COLOR(base);
    291 #endif /* CONFIG_VIRT_IDX_DCACHE */
    292275
    293276        btree_create(&a->used_space);
     
    576559 * or ENOMEM if there was a problem in allocating destination address space
    577560 * area. ENOTSUP is returned if the address space area backend does not support
    578  * sharing.
     561 * sharing or if the kernel detects an attempt to create an illegal address
     562 * alias.
    579563 */
    580564int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
     
    584568        int src_flags;
    585569        size_t src_size;
    586         int src_orig_color;
    587570        as_area_t *src_area, *dst_area;
    588571        share_info_t *sh_info;
     
    601584                return ENOENT;
    602585        }
    603        
    604586
    605587        if (!src_area->backend || !src_area->backend->share) {
     
    618600        src_backend = src_area->backend;
    619601        src_backend_data = src_area->backend_data;
    620         src_orig_color = src_area->orig_color;
    621602
    622603        /* Share the cacheable flag from the original mapping */
     
    630611                return EPERM;
    631612        }
     613
     614#ifdef CONFIG_VIRT_IDX_DCACHE
     615        if (!(dst_flags_mask & AS_AREA_EXEC)) {
     616                if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) {
     617                        /*
     618                         * Refuse to create an illegal address alias.
     619                         */
     620                        mutex_unlock(&src_area->lock);
     621                        mutex_unlock(&src_as->lock);
     622                        interrupts_restore(ipl);
     623                        return ENOTSUP;
     624                }
     625        }
     626#endif /* CONFIG_VIRT_IDX_DCACHE */
    632627
    633628        /*
     
    683678        dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
    684679        dst_area->sh_info = sh_info;
    685         dst_area->orig_color = src_orig_color;
    686 #ifdef CONFIG_VIRT_IDX_DCACHE
    687         if (src_orig_color != PAGE_COLOR(dst_base)) {
    688                 /*
    689                  * We have just detected an attempt to create an invalid address
    690                  * alias. We allow this and set a special flag that tells the
    691                  * architecture specific code to flush the D-cache when the
    692                  * offending address space is installed and deinstalled
    693                  * (cleanup).
    694                  *
    695                  * In order for the flags to take effect immediately, we also
    696                  * perform a global D-cache shootdown.
    697                  */
    698                 dcache_shootdown_start();
    699                 dst_as->dcache_flush_on_install = true;
    700                 dst_as->dcache_flush_on_deinstall = true;
    701                 dcache_flush();
    702                 dcache_shootdown_finalize();
    703         }
    704 #endif /* CONFIG_VIRT_IDX_DCACHE */
    705680        mutex_unlock(&dst_area->lock);
    706681        mutex_unlock(&dst_as->lock);   
  • kernel/generic/src/sysinfo/sysinfo.c

    r9ab9c2ec rf8ddd17  
    231231               
    232232                switch (root->val_type) {
    233                         case SYSINFO_VAL_UNDEFINED:
    234                                 val = 0;
    235                                 vtype = "UND";
    236                                 break;
    237                         case SYSINFO_VAL_VAL:
    238                                 val = root->val.val;
    239                                 vtype = "VAL";
    240                                 break;
    241                         case SYSINFO_VAL_FUNCTION:
    242                                 val = ((sysinfo_val_fn_t) (root->val.fn)) (root);
    243                                 vtype = "FUN";
    244                                 break;
     233                case SYSINFO_VAL_UNDEFINED:
     234                        val = 0;
     235                        vtype = "UND";
     236                        break;
     237                case SYSINFO_VAL_VAL:
     238                        val = root->val.val;
     239                        vtype = "VAL";
     240                        break;
     241                case SYSINFO_VAL_FUNCTION:
     242                        val = ((sysinfo_val_fn_t) (root->val.fn)) (root);
     243                        vtype = "FUN";
     244                        break;
    245245                }
    246246               
    247                 printf("%s    %s val:%d(%x) sub:%s\n", root->name, vtype, val, val, (root->subinfo_type == SYSINFO_SUBINFO_NONE) ? "NON" : ((root->subinfo_type == SYSINFO_SUBINFO_TABLE) ? "TAB" : "FUN"));
     247                printf("%s    %s val:%d(%x) sub:%s\n", root->name, vtype, val,
     248                        val, (root->subinfo_type == SYSINFO_SUBINFO_NONE) ?
     249                        "NON" : ((root->subinfo_type == SYSINFO_SUBINFO_TABLE) ?
     250                        "TAB" : "FUN"));
    248251               
    249252                if (root->subinfo_type == SYSINFO_SUBINFO_TABLE)
  • kernel/generic/src/time/clock.c

    r9ab9c2ec rf8ddd17  
    5555#include <sysinfo/sysinfo.h>
    5656#include <arch/barrier.h>
     57#include <mm/frame.h>
     58#include <ddi/ddi.h>
     59
     60/** Physical memory area of the real time clock. */
     61static parea_t clock_parea;
    5762
    5863/* Pointers to public variables with time */
     
    7378 * information about realtime data. We allocate 1 page with these
    7479 * data and update it periodically.
    75  *
    76  *
    7780 */
    7881void clock_counter_init(void)
     
    8083        void *faddr;
    8184
    82         faddr = frame_alloc(0, FRAME_ATOMIC);
     85        faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC);
    8386        if (!faddr)
    8487                panic("Cannot allocate page for clock");
    8588       
    86         public_time = (struct ptime *)PA2KA(faddr);
     89        public_time = (struct ptime *) PA2KA(faddr);
    8790
    8891        /* TODO: We would need some arch dependent settings here */
     
    9194        public_time->useconds = 0;
    9295
    93         sysinfo_set_item_val("clock.faddr", NULL, (unative_t)faddr);
     96        clock_parea.pbase = (uintptr_t) faddr;
     97        clock_parea.vbase = (uintptr_t) public_time;
     98        clock_parea.frames = 1;
     99        clock_parea.cacheable = true;
     100        ddi_parea_register(&clock_parea);
     101
     102        /*
     103         * Prepare information for the userspace so that it can successfully
     104         * physmem_map() the clock_parea.
     105         */
     106        sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true);
     107        sysinfo_set_item_val("clock.fcolor", NULL, (unative_t)
     108                PAGE_COLOR(clock_parea.vbase));
     109        sysinfo_set_item_val("clock.faddr", NULL, (unative_t) faddr);
    94110}
    95111
Note: See TracChangeset for help on using the changeset viewer.