Changeset e49e234 in mainline for kernel/generic/src/ddi


Ignore:
Timestamp:
2009-02-27T11:32:31Z (17 years ago)
Author:
Martin Decky <martin@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
c1f7f6ea
Parents:
5f0f29ce
Message:

kernel memory management revisited (phase 2): map physical memory according to zones

  • ia32: register reserved and ACPI zones
  • pareas are now used only for mapping of present physical memory (hw_area() is gone)
  • firmware zones and physical addresses outside any zones are allowed to be mapped generally
  • fix nasty antient bug in zones_insert_zone()
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/ddi/ddi.c

    r5f0f29ce re49e234  
    3030 * @{
    3131 */
    32  
     32
    3333/**
    3434 * @file
    35  * @brief       Device Driver Interface functions.
     35 * @brief Device Driver Interface functions.
    3636 *
    3737 * This file contains functions that comprise the Device Driver Interface.
     
    4848#include <synch/spinlock.h>
    4949#include <syscall/copy.h>
    50 #include <adt/list.h>
     50#include <adt/btree.h>
    5151#include <arch.h>
    5252#include <align.h>
     
    5656SPINLOCK_INITIALIZE(parea_lock);
    5757
    58 /** List with enabled physical memory areas. */
    59 static LIST_INITIALIZE(parea_head);
     58/** B+tree with enabled physical memory areas. */
     59static btree_t parea_btree;
    6060
    6161/** Initialize DDI. */
    6262void ddi_init(void)
    6363{
    64         hw_area();
     64        btree_create(&parea_btree);
    6565}
    6666
     
    6969 * @param parea Pointer to physical area structure.
    7070 *
    71  * @todo This function doesn't check for overlaps. It depends on the kernel to
    72  * create disjunct physical memory areas.
    7371 */
    7472void ddi_parea_register(parea_t *parea)
    7573{
    76         ipl_t ipl;
    77        
    78         ipl = interrupts_disable();
     74        ipl_t ipl = interrupts_disable();
    7975        spinlock_lock(&parea_lock);
    8076       
    8177        /*
    82          * TODO: we should really check for overlaps here.
    83          * However, we should be safe because the kernel is pretty sane.
    84          */
    85         link_initialize(&parea->link);
    86         list_append(&parea->link, &parea_head);
     78         * We don't check for overlaps here as the kernel is pretty sane.
     79         */
     80        btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL);
    8781       
    8882        spinlock_unlock(&parea_lock);
     
    9286/** Map piece of physical memory into virtual address space of current task.
    9387 *
    94  * @param pf Physical address of the starting frame.
    95  * @param vp Virtual address of the starting page.
     88 * @param pf    Physical address of the starting frame.
     89 * @param vp    Virtual address of the starting page.
    9690 * @param pages Number of pages to map.
    9791 * @param flags Address space area flags for the mapping.
    9892 *
    9993 * @return 0 on success, EPERM if the caller lacks capabilities to use this
    100  *  syscall, ENOENT if there is no task matching the specified ID or the
    101  *  physical address space is not enabled for mapping and ENOMEM if there
    102  *  was a problem in creating address space area.
    103  */
    104 static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, pfn_t pages, int flags)
    105 {
    106         ipl_t ipl;
    107         cap_t caps;
     94 *         syscall, EBADMEM if pf or vf is not page aligned, ENOENT if there
     95 *         is no task matching the specified ID or the physical address space
     96 *         is not enabled for mapping and ENOMEM if there was a problem in
     97 *         creating address space area.
     98 *
     99 */
     100static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, count_t pages, int flags)
     101{
     102        ASSERT(TASK);
     103        ASSERT((pf % FRAME_SIZE) == 0);
     104        ASSERT((vp % PAGE_SIZE) == 0);
     105       
     106        /*
     107         * Make sure the caller is authorised to make this syscall.
     108         */
     109        cap_t caps = cap_get(TASK);
     110        if (!(caps & CAP_MEM_MANAGER))
     111                return EPERM;
     112       
    108113        mem_backend_data_t backend_data;
    109        
    110114        backend_data.base = pf;
    111115        backend_data.frames = pages;
    112116       
    113         /*
    114          * Make sure the caller is authorised to make this syscall.
    115          */
    116         caps = cap_get(TASK);
    117         if (!(caps & CAP_MEM_MANAGER))
    118                 return EPERM;
    119        
    120         ipl = interrupts_disable();
    121        
    122         /*
    123          * Check if the physical memory area is enabled for mapping.
    124          */
    125         spinlock_lock(&parea_lock);
    126        
    127         bool fnd = false;
    128         link_t *cur;
    129        
    130         for (cur = parea_head.next; cur != &parea_head; cur = cur->next) {
    131                 parea_t *parea = list_get_instance(cur, parea_t, link);
    132                 if ((parea->pbase <= pf) && (ADDR2PFN(pf - parea->pbase) + pages <= parea->frames)) {
    133                         fnd = true;
    134                         break;
    135                 }
    136         }
    137        
    138         spinlock_unlock(&parea_lock);
    139        
    140         if (!fnd) {
    141                 /*
    142                  * Physical memory area cannot be mapped.
    143                  */
    144                 interrupts_restore(ipl);
    145                 return ENOENT;
    146         }
    147        
     117        ipl_t ipl = interrupts_disable();
     118       
     119        /* Find the zone of the physical memory */
     120        spinlock_lock(&zones.lock);
     121        count_t znum = find_zone(ADDR2PFN(pf), pages, 0);
     122       
     123        if (znum == (count_t) -1) {
     124                /* Frames not found in any zones
     125                 * -> assume it is hardware device and allow mapping
     126                 */
     127                spinlock_unlock(&zones.lock);
     128                goto map;
     129        }
     130       
     131        if (zones.info[znum].flags & ZONE_FIRMWARE) {
     132                /* Frames are part of firmware */
     133                spinlock_unlock(&zones.lock);
     134                goto map;
     135        }
     136       
     137        if (zone_flags_available(zones.info[znum].flags)) {
     138                /* Frames are part of physical memory, check if the memory
     139                 * region is enabled for mapping.
     140                 */
     141                spinlock_unlock(&zones.lock);
     142               
     143                spinlock_lock(&parea_lock);
     144                btree_node_t *nodep;
     145                parea_t *parea = (parea_t *) btree_search(&parea_btree,
     146                    (btree_key_t) pf, &nodep);
     147               
     148                if ((!parea) || (parea->frames < pages))
     149                        goto err;
     150               
     151                spinlock_unlock(&parea_lock);
     152                goto map;
     153        }
     154       
     155err:
     156        spinlock_unlock(&zones.lock);
     157        interrupts_restore(ipl);
     158        return ENOENT;
     159       
     160map:
    148161        spinlock_lock(&TASK->lock);
    149162       
    150         if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp, AS_AREA_ATTR_NONE,
    151                 &phys_backend, &backend_data)) {
     163        if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp,
     164            AS_AREA_ATTR_NONE, &phys_backend, &backend_data)) {
    152165                /*
    153166                 * The address space area could not have been created.
     
    175188 *
    176189 * @return 0 on success, EPERM if the caller lacks capabilities to use this
    177  *      syscall, ENOENT if there is no task matching the specified ID.
     190 *           syscall, ENOENT if there is no task matching the specified ID.
     191 *
    178192 */
    179193static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size)
    180194{
    181         ipl_t ipl;
    182         cap_t caps;
    183         task_t *t;
    184         int rc;
    185        
    186195        /*
    187196         * Make sure the caller is authorised to make this syscall.
    188197         */
    189         caps = cap_get(TASK);
     198        cap_t caps = cap_get(TASK);
    190199        if (!(caps & CAP_IO_MANAGER))
    191200                return EPERM;
    192201       
    193         ipl = interrupts_disable();
     202        ipl_t ipl = interrupts_disable();
    194203        spinlock_lock(&tasks_lock);
    195204       
    196         t = task_find_by_id(id);
    197        
    198         if ((!t) || (!context_check(CONTEXT, t->context))) {
     205        task_t *task = task_find_by_id(id);
     206       
     207        if ((!task) || (!context_check(CONTEXT, task->context))) {
    199208                /*
    200209                 * There is no task with the specified ID
     
    206215                return ENOENT;
    207216        }
    208 
     217       
    209218        /* Lock the task and release the lock protecting tasks_btree. */
    210         spinlock_lock(&t->lock);
     219        spinlock_lock(&task->lock);
    211220        spinlock_unlock(&tasks_lock);
    212 
    213         rc = ddi_iospace_enable_arch(t, ioaddr, size);
    214        
    215         spinlock_unlock(&t->lock);
    216         interrupts_restore(ipl);
     221       
     222        int rc = ddi_iospace_enable_arch(task, ioaddr, size);
     223       
     224        spinlock_unlock(&task->lock);
     225        interrupts_restore(ipl);
     226       
    217227        return rc;
    218228}
     
    226236 *
    227237 * @return 0 on success, otherwise it returns error code found in errno.h
    228  */
     238 *
     239 */
    229240unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base,
    230241    unative_t pages, unative_t flags)
     
    232243        return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base,
    233244            FRAME_SIZE), ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE),
    234             (pfn_t) pages, (int) flags);
     245            (count_t) pages, (int) flags);
    235246}
    236247
     
    240251 *
    241252 * @return 0 on success, otherwise it returns error code found in errno.h
    242  */
     253 *
     254 */
    243255unative_t sys_iospace_enable(ddi_ioarg_t *uspace_io_arg)
    244256{
    245257        ddi_ioarg_t arg;
    246         int rc;
    247        
    248         rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t));
     258        int rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t));
    249259        if (rc != 0)
    250260                return (unative_t) rc;
    251                
     261       
    252262        return (unative_t) ddi_iospace_enable((task_id_t) arg.task_id,
    253263            (uintptr_t) arg.ioaddr, (size_t) arg.size);
     
    257267 *
    258268 * @param enable If non-zero, the preemption counter will be decremented,
    259  *      leading to potential enabling of preemption. Otherwise the preemption
    260  *      counter will be incremented, preventing preemption from occurring.
     269 *               leading to potential enabling of preemption. Otherwise
     270 *               the preemption counter will be incremented, preventing
     271 *               preemption from occurring.
    261272 *
    262273 * @return Zero on success or EPERM if callers capabilities are not sufficient.
    263  */
     274 *
     275 */
    264276unative_t sys_preempt_control(int enable)
    265277{
    266278        if (!cap_get(TASK) & CAP_PREEMPT_CONTROL)
    267279                return EPERM;
     280       
    268281        if (enable)
    269282                preemption_enable();
    270283        else
    271284                preemption_disable();
     285       
    272286        return 0;
    273287}
Note: See TracChangeset for help on using the changeset viewer.