Changeset ae318d3 in mainline for kernel/generic/src


Ignore:
Timestamp:
2009-02-16T18:50:48Z (16 years ago)
Author:
Martin Decky <martin@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
06da55b
Parents:
17f168e
Message:

overhaul pareas: use one single physical area for the physical address space not belonging to physical memory

Location:
kernel/generic/src
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/adt/avl.c

    r17f168e rae318d3  
    4444 * Every node has a pointer to its parent which allows insertion of multiple
    4545 * identical keys into the tree.
    46  * 
     46 *
    4747 * Be careful when using this tree because of the base atribute which is added
    4848 * to every inserted node key. There is no rule in which order nodes with the
  • kernel/generic/src/console/cmd.c

    r17f168e rae318d3  
    530530        }
    531531       
    532         spinlock_unlock(&cmd_lock);     
     532        spinlock_unlock(&cmd_lock);
    533533
    534534        return 1;
  • kernel/generic/src/console/console.c

    r17f168e rae318d3  
    127127       
    128128        klog_parea.pbase = (uintptr_t) faddr;
    129         klog_parea.vbase = (uintptr_t) klog;
    130129        klog_parea.frames = SIZE2FRAMES(KLOG_SIZE);
    131         klog_parea.cacheable = true;
    132130        ddi_parea_register(&klog_parea);
    133131
  • kernel/generic/src/ddi/ddi.c

    r17f168e rae318d3  
    4848#include <synch/spinlock.h>
    4949#include <syscall/copy.h>
    50 #include <adt/btree.h>
     50#include <adt/list.h>
    5151#include <arch.h>
    5252#include <align.h>
     
    5656SPINLOCK_INITIALIZE(parea_lock);
    5757
    58 /** B+tree with enabled physical memory areas. */
    59 static btree_t parea_btree;
     58/** List with enabled physical memory areas. */
     59static LIST_INITIALIZE(parea_head);
     60
     61/** Physical memory area for devices. */
     62static parea_t dev_area;
    6063
    6164/** Initialize DDI. */
    6265void ddi_init(void)
    6366{
    64         btree_create(&parea_btree);
     67        hw_area(&dev_area.pbase, &dev_area.frames);
     68        ddi_parea_register(&dev_area);
    6569}
    6670
     
    7579{
    7680        ipl_t ipl;
    77 
     81       
    7882        ipl = interrupts_disable();
    7983        spinlock_lock(&parea_lock);
     
    8185        /*
    8286         * TODO: we should really check for overlaps here.
    83          * However, we should be safe because the kernel is pretty sane and
    84          * memory of different devices doesn't overlap.
    85          */
    86         btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL);
    87 
     87         * However, we should be safe because the kernel is pretty sane.
     88         */
     89        link_initialize(&parea->link);
     90        list_append(&parea->link, &parea_head);
     91       
    8892        spinlock_unlock(&parea_lock);
    89         interrupts_restore(ipl);       
     93        interrupts_restore(ipl);
    9094}
    9195
     
    98102 *
    99103 * @return 0 on success, EPERM if the caller lacks capabilities to use this
    100  *      syscall, ENOENT if there is no task matching the specified ID or the
    101  *      physical address space is not enabled for mapping and ENOMEM if there
    102  *      was a problem in creating address space area.
    103  */
    104 static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, count_t pages, int flags)
     104 *  syscall, ENOENT if there is no task matching the specified ID or the
     105 *  physical address space is not enabled for mapping and ENOMEM if there
     106 *  was a problem in creating address space area.
     107 */
     108static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, pfn_t pages, int flags)
    105109{
    106110        ipl_t ipl;
    107111        cap_t caps;
    108112        mem_backend_data_t backend_data;
    109 
     113       
    110114        backend_data.base = pf;
    111115        backend_data.frames = pages;
     
    117121        if (!(caps & CAP_MEM_MANAGER))
    118122                return EPERM;
    119 
     123       
    120124        ipl = interrupts_disable();
    121 
     125       
    122126        /*
    123127         * Check if the physical memory area is enabled for mapping.
    124          * If the architecture supports virtually indexed caches, intercept
    125          * attempts to create an illegal address alias.
    126128         */
    127129        spinlock_lock(&parea_lock);
    128         parea_t *parea;
    129         btree_node_t *nodep;
    130         parea = (parea_t *) btree_search(&parea_btree, (btree_key_t) pf, &nodep);
    131         if (!parea || parea->frames < pages || ((flags & AS_AREA_CACHEABLE) &&
    132             !parea->cacheable) || (!(flags & AS_AREA_CACHEABLE) &&
    133             parea->cacheable)) {
     130       
     131        bool fnd = false;
     132        link_t *cur;
     133       
     134        for (cur = parea_head.next; cur != &parea_head; cur = cur->next) {
     135                parea_t *parea = list_get_instance(cur, parea_t, link);
     136                if ((parea->pbase <= pf) && (ADDR2PFN(pf - parea->pbase) + pages <= parea->frames)) {
     137                        fnd = true;
     138                        break;
     139                }
     140        }
     141       
     142        spinlock_unlock(&parea_lock);
     143       
     144        if (!fnd) {
    134145                /*
    135                  * This physical memory area cannot be mapped.
     146                 * Physical memory area cannot be mapped.
    136147                 */
    137                 spinlock_unlock(&parea_lock);
    138148                interrupts_restore(ipl);
    139149                return ENOENT;
    140150        }
    141         spinlock_unlock(&parea_lock);
    142 
     151       
    143152        spinlock_lock(&TASK->lock);
    144153       
     
    227236        return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base,
    228237            FRAME_SIZE), ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE),
    229             (count_t) pages, (int) flags);
     238            (pfn_t) pages, (int) flags);
    230239}
    231240
     
    259268unative_t sys_preempt_control(int enable)
    260269{
    261         if (!cap_get(TASK) & CAP_PREEMPT_CONTROL)
    262                 return EPERM;
    263         if (enable)
    264                 preemption_enable();
    265         else
    266                 preemption_disable();
    267         return 0;
     270        if (!cap_get(TASK) & CAP_PREEMPT_CONTROL)
     271                return EPERM;
     272        if (enable)
     273                preemption_enable();
     274        else
     275                preemption_disable();
     276        return 0;
    268277}
    269278
  • kernel/generic/src/lib/rd.c

    r17f168e rae318d3  
    8989        rd_parea.pbase = ALIGN_DOWN((uintptr_t) KA2PA((void *) header + hsize),
    9090            FRAME_SIZE);
    91         rd_parea.vbase = (uintptr_t) ((void *) header + hsize);
    9291        rd_parea.frames = SIZE2FRAMES(dsize);
    93         rd_parea.cacheable = true;
    9492        ddi_parea_register(&rd_parea);
    9593
  • kernel/generic/src/proc/task.c

    r17f168e rae318d3  
    340340               
    341341                thr = list_get_instance(cur, thread_t, th_link);
    342                        
     342               
    343343                spinlock_lock(&thr->lock);
    344344                thr->interrupted = true;
  • kernel/generic/src/time/clock.c

    r17f168e rae318d3  
    8989
    9090        clock_parea.pbase = (uintptr_t) faddr;
    91         clock_parea.vbase = (uintptr_t) uptime;
    9291        clock_parea.frames = 1;
    93         clock_parea.cacheable = true;
    9492        ddi_parea_register(&clock_parea);
    9593
Note: See TracChangeset for help on using the changeset viewer.