Changeset fbcdeb8 in mainline for kernel/generic/src


Ignore:
Timestamp:
2011-12-19T17:30:39Z (14 years ago)
Author:
Martin Decky <martin@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
58f6229
Parents:
24cf31f1
Message:

Remove the two-phase way of creating virtual memory areas (first asking for a mappable address and then mapping it) which was prone to race conditions when two or more calls to as_get_mappable_page() and as_area_create() were interleaved. This for example caused the e1k driver to randomly fail.

The memory area related syscalls and IPC calls have all been altered to accept a special value (void *) -1, representing a demand to atomically search for a mappable address space "hole" and map to it.

Individual changes:

  • IPC_M_SHARE_OUT: the destination address space area is supplied by the kernel, the callee only specifies the lower bound

(the address is returned to the callee via a pointer in an IPC reply argument)

  • IPC_M_SHARE_IN: the destination address space ares is supplied by the kernel, the callee only specifies the lower bound

(the address is returned to the caller as usual via an IPC argument)

  • SYS_AS_GET_UNMAPPED_AREA was removed
  • dummy implementations of SYS_PHYSMEM_UNMAP and SYS_IOSPACE_DISABLE were added for the sake of symmetry (they do nothing yet)
  • SYS_PHYSMEM_MAP and SYS_DMAMEM_MAP were altered to accept (void *) -1 as address space area base and a lower bound
  • kernel as_area_create() and as_area_share() were altered to accept (void *) -1 as address space area base and a lower bound
  • uspace libraries and programs were altered to reflect the new API
Location:
kernel/generic/src
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/ddi/ddi.c

    r24cf31f1 rfbcdeb8  
    9090 *
    9191 * @param phys  Physical address of the starting frame.
    92  * @param virt  Virtual address of the starting page.
    9392 * @param pages Number of pages to map.
    9493 * @param flags Address space area flags for the mapping.
     94 * @param virt  Virtual address of the starting page.
     95 * @param bound Lowest virtual address bound.
    9596 *
    9697 * @return EOK on success.
    9798 * @return EPERM if the caller lacks capabilities to use this syscall.
    98  * @return EBADMEM if phys or virt is not page aligned.
     99 * @return EBADMEM if phys is not page aligned.
    99100 * @return ENOENT if there is no task matching the specified ID or
    100101 *         the physical address space is not enabled for mapping.
     
    102103 *
    103104 */
    104 NO_TRACE static int ddi_physmem_map(uintptr_t phys, uintptr_t virt, size_t pages,
    105     unsigned int flags)
     105NO_TRACE static int physmem_map(uintptr_t phys, size_t pages,
     106    unsigned int flags, uintptr_t *virt, uintptr_t bound)
    106107{
    107108        ASSERT(TASK);
    108109       
    109110        if ((phys % FRAME_SIZE) != 0)
    110                 return EBADMEM;
    111        
    112         if ((virt % PAGE_SIZE) != 0)
    113111                return EBADMEM;
    114112       
     
    185183       
    186184map:
    187         if (!as_area_create(TASK->as, flags, FRAMES2SIZE(pages), virt,
    188             AS_AREA_ATTR_NONE, &phys_backend, &backend_data)) {
     185        if (!as_area_create(TASK->as, flags, FRAMES2SIZE(pages),
     186            AS_AREA_ATTR_NONE, &phys_backend, &backend_data, virt, bound)) {
    189187                /*
    190188                 * The address space area was not created.
     
    210208}
    211209
     210NO_TRACE static int physmem_unmap(uintptr_t virt)
     211{
     212        // TODO: implement unmap
     213        return EOK;
     214}
     215
     216/** Wrapper for SYS_PHYSMEM_MAP syscall.
     217 *
     218 * @param phys     Physical base address to map
     219 * @param pages    Number of pages
     220 * @param flags    Flags of newly mapped pages
     221 * @param virt_ptr Destination virtual address
     222 * @param bound    Lowest virtual address bound.
     223 *
     224 * @return 0 on success, otherwise it returns error code found in errno.h
     225 *
     226 */
     227sysarg_t sys_physmem_map(uintptr_t phys, size_t pages, unsigned int flags,
     228    void *virt_ptr, uintptr_t bound)
     229{
     230        uintptr_t virt = (uintptr_t) -1;
     231        int rc = physmem_map(ALIGN_DOWN(phys, FRAME_SIZE), pages, flags,
     232            &virt, bound);
     233        if (rc != EOK)
     234                return rc;
     235       
     236        rc = copy_to_uspace(virt_ptr, &virt, sizeof(virt));
     237        if (rc != EOK) {
     238                physmem_unmap((uintptr_t) virt);
     239                return rc;
     240        }
     241       
     242        return EOK;
     243}
     244
     245sysarg_t sys_physmem_unmap(uintptr_t virt)
     246{
     247        return physmem_unmap(virt);
     248}
     249
    212250/** Enable range of I/O space for task.
    213251 *
     
    220258 *
    221259 */
    222 NO_TRACE static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr,
    223     size_t size)
     260NO_TRACE static int iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size)
    224261{
    225262        /*
     
    246283        /* Lock the task and release the lock protecting tasks_btree. */
    247284        irq_spinlock_exchange(&tasks_lock, &task->lock);
    248        
    249285        int rc = ddi_iospace_enable_arch(task, ioaddr, size);
    250        
    251286        irq_spinlock_unlock(&task->lock, true);
    252287       
    253288        return rc;
    254 }
    255 
    256 /** Wrapper for SYS_PHYSMEM_MAP syscall.
    257  *
    258  * @param phys  Physical base address to map
    259  * @param virt  Destination virtual address
    260  * @param pages Number of pages
    261  * @param flags Flags of newly mapped pages
    262  *
    263  * @return 0 on success, otherwise it returns error code found in errno.h
    264  *
    265  */
    266 sysarg_t sys_physmem_map(uintptr_t phys, uintptr_t virt,
    267     size_t pages, unsigned int flags)
    268 {
    269         return (sysarg_t)
    270             ddi_physmem_map(ALIGN_DOWN(phys, FRAME_SIZE),
    271             ALIGN_DOWN(virt, PAGE_SIZE), pages, flags);
    272289}
    273290
     
    286303                return (sysarg_t) rc;
    287304       
    288         return (sysarg_t) ddi_iospace_enable((task_id_t) arg.task_id,
     305        return (sysarg_t) iospace_enable((task_id_t) arg.task_id,
    289306            (uintptr_t) arg.ioaddr, (size_t) arg.size);
    290307}
    291308
    292 NO_TRACE static int dmamem_map(uintptr_t virt, size_t size,
    293     unsigned int map_flags, unsigned int flags, void **phys)
     309sysarg_t sys_iospace_disable(ddi_ioarg_t *uspace_io_arg)
     310{
     311        // TODO: implement
     312        return ENOTSUP;
     313}
     314
     315NO_TRACE static int dmamem_map(uintptr_t virt, size_t size, unsigned int map_flags,
     316    unsigned int flags, void **phys)
    294317{
    295318        ASSERT(TASK);
    296319       
     320        // TODO: implement locking of non-anonymous mapping
     321        return page_find_mapping(virt, phys);
     322}
     323
     324NO_TRACE static int dmamem_map_anonymous(size_t size, unsigned int map_flags,
     325    unsigned int flags, void **phys, uintptr_t *virt, uintptr_t bound)
     326{
     327        ASSERT(TASK);
     328       
     329        size_t pages = SIZE2FRAMES(size);
     330        uint8_t order;
     331       
     332        /* We need the 2^order >= pages */
     333        if (pages == 1)
     334                order = 0;
     335        else
     336                order = fnzb(pages - 1) + 1;
     337       
     338        *phys = frame_alloc_noreserve(order, 0);
     339        if (*phys == NULL)
     340                return ENOMEM;
     341       
     342        mem_backend_data_t backend_data;
     343        backend_data.base = (uintptr_t) *phys;
     344        backend_data.frames = pages;
     345       
     346        if (!as_area_create(TASK->as, map_flags, size,
     347            AS_AREA_ATTR_NONE, &phys_backend, &backend_data, virt, bound)) {
     348                frame_free_noreserve((uintptr_t) *phys);
     349                return ENOMEM;
     350        }
     351       
     352        return EOK;
     353}
     354
     355NO_TRACE static int dmamem_unmap(uintptr_t virt, size_t size)
     356{
     357        // TODO: implement unlocking & unmap
     358        return EOK;
     359}
     360
     361NO_TRACE static int dmamem_unmap_anonymous(uintptr_t virt)
     362{
     363        // TODO: implement unlocking & unmap
     364        return EOK;
     365}
     366
     367sysarg_t sys_dmamem_map(size_t size, unsigned int map_flags, unsigned int flags,
     368    void *phys_ptr, void *virt_ptr, uintptr_t bound)
     369{
    297370        if ((flags & DMAMEM_FLAGS_ANONYMOUS) == 0) {
    298                 // TODO: implement locking of non-anonymous mapping
    299                 return page_find_mapping(virt, phys);
     371                /*
     372                 * Non-anonymous DMA mapping
     373                 */
     374               
     375                void *phys;
     376                int rc = dmamem_map((uintptr_t) virt_ptr, size, map_flags,
     377                    flags, &phys);
     378               
     379                if (rc != EOK)
     380                        return rc;
     381               
     382                rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys));
     383                if (rc != EOK) {
     384                        dmamem_unmap((uintptr_t) virt_ptr, size);
     385                        return rc;
     386                }
    300387        } else {
    301                 // TODO: implement locking
    302                
    303                 if ((virt % PAGE_SIZE) != 0)
    304                         return EBADMEM;
    305                
    306                 size_t pages = SIZE2FRAMES(size);
    307                 uint8_t order;
    308                
    309                 /* We need the 2^order >= pages */
    310                 if (pages == 1)
    311                         order = 0;
    312                 else
    313                         order = fnzb(pages - 1) + 1;
    314                
    315                 *phys = frame_alloc_noreserve(order, 0);
    316                 if (*phys == NULL)
    317                         return ENOMEM;
    318                
    319                 mem_backend_data_t backend_data;
    320                 backend_data.base = (uintptr_t) *phys;
    321                 backend_data.frames = pages;
    322                
    323                 if (!as_area_create(TASK->as, map_flags, size, virt,
    324                     AS_AREA_ATTR_NONE, &phys_backend, &backend_data)) {
    325                         frame_free_noreserve((uintptr_t) *phys);
    326                         return ENOMEM;
     388                /*
     389                 * Anonymous DMA mapping
     390                 */
     391               
     392                void *phys;
     393                uintptr_t virt = (uintptr_t) -1;
     394                int rc = dmamem_map_anonymous(size, map_flags, flags,
     395                    &phys, &virt, bound);
     396                if (rc != EOK)
     397                        return rc;
     398               
     399                rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys));
     400                if (rc != EOK) {
     401                        dmamem_unmap_anonymous((uintptr_t) virt);
     402                        return rc;
    327403                }
    328404               
    329                 return EOK;
    330         }
    331 }
    332 
    333 NO_TRACE static int dmamem_unmap(uintptr_t virt, size_t size,
    334     unsigned int flags)
    335 {
    336         // TODO: implement unlocking & unmap
    337         return EOK;
    338 }
    339 
    340 sysarg_t sys_dmamem_map(uintptr_t virt, size_t size, unsigned int map_flags,
    341     unsigned int flags, void *phys_ptr)
    342 {
    343         void *phys;
    344         int rc = dmamem_map(virt, size, map_flags, flags, &phys);
    345         if (rc != EOK)
    346                 return rc;
    347        
    348         rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys));
    349         if (rc != EOK) {
    350                 dmamem_unmap(virt, size, flags);
    351                 return rc;
     405                rc = copy_to_uspace(virt_ptr, &virt, sizeof(virt));
     406                if (rc != EOK) {
     407                        dmamem_unmap_anonymous((uintptr_t) virt);
     408                        return rc;
     409                }
    352410        }
    353411       
     
    357415sysarg_t sys_dmamem_unmap(uintptr_t virt, size_t size, unsigned int flags)
    358416{
    359         return dmamem_unmap(virt, size, flags);
     417        if ((flags & DMAMEM_FLAGS_ANONYMOUS) == 0)
     418                return dmamem_unmap(virt, size);
     419        else
     420                return dmamem_unmap_anonymous(virt);
    360421}
    361422
  • kernel/generic/src/ipc/sysipc.c

    r24cf31f1 rfbcdeb8  
    271271                        irq_spinlock_unlock(&answer->sender->lock, true);
    272272                       
     273                        uintptr_t dst_base = (uintptr_t) -1;
    273274                        int rc = as_area_share(as, IPC_GET_ARG1(*olddata),
    274                             IPC_GET_ARG2(*olddata), AS,
    275                             IPC_GET_ARG1(answer->data), IPC_GET_ARG3(*olddata));
     275                            IPC_GET_ARG2(*olddata), AS, IPC_GET_ARG3(*olddata),
     276                            &dst_base, IPC_GET_ARG1(answer->data));
     277                       
     278                        if (rc == EOK)
     279                                rc = copy_to_uspace((void *) IPC_GET_ARG2(answer->data),
     280                                    &dst_base, sizeof(dst_base));
     281                       
    276282                        IPC_SET_RETVAL(answer->data, rc);
    277283                        return rc;
     
    283289                        irq_spinlock_unlock(&answer->sender->lock, true);
    284290                       
     291                        uintptr_t dst_base = (uintptr_t) -1;
    285292                        int rc = as_area_share(AS, IPC_GET_ARG1(answer->data),
    286                             IPC_GET_ARG2(*olddata), as, IPC_GET_ARG1(*olddata),
    287                             IPC_GET_ARG2(answer->data));
     293                            IPC_GET_ARG1(*olddata), as, IPC_GET_ARG2(answer->data),
     294                            &dst_base, IPC_GET_ARG3(answer->data));
     295                        IPC_SET_ARG4(answer->data, dst_base);
    288296                        IPC_SET_RETVAL(answer->data, rc);
    289297                }
  • kernel/generic/src/lib/elf.c

    r24cf31f1 rfbcdeb8  
    226226        size_t mem_sz = entry->p_memsz + (entry->p_vaddr - base);
    227227       
    228         as_area_t *area = as_area_create(as, flags, mem_sz, base,
    229             AS_AREA_ATTR_NONE, &elf_backend, &backend_data);
     228        as_area_t *area = as_area_create(as, flags, mem_sz,
     229            AS_AREA_ATTR_NONE, &elf_backend, &backend_data, &base, 0);
    230230        if (!area)
    231231                return EE_MEMORY;
  • kernel/generic/src/mm/as.c

    r24cf31f1 rfbcdeb8  
    387387}
    388388
     389/** Return pointer to unmapped address space area
     390 *
     391 * The address space must be already locked when calling
     392 * this function.
     393 *
     394 * @param as    Address space.
     395 * @param bound Lowest address bound.
     396 * @param size  Requested size of the allocation.
     397 *
     398 * @return Address of the beginning of unmapped address space area.
     399 * @return -1 if no suitable address space area was found.
     400 *
     401 */
     402NO_TRACE static uintptr_t as_get_unmapped_area(as_t *as, uintptr_t bound,
     403    size_t size)
     404{
     405        ASSERT(mutex_locked(&as->lock));
     406       
     407        if (size == 0)
     408                return (uintptr_t) -1;
     409       
     410        /*
     411         * Make sure we allocate from page-aligned
     412         * address. Check for possible overflow in
     413         * each step.
     414         */
     415       
     416        size_t pages = SIZE2FRAMES(size);
     417       
     418        /*
     419         * Find the lowest unmapped address aligned on the size
     420         * boundary, not smaller than bound and of the required size.
     421         */
     422       
     423        /* First check the bound address itself */
     424        uintptr_t addr = ALIGN_UP(bound, PAGE_SIZE);
     425        if ((addr >= bound) &&
     426            (check_area_conflicts(as, addr, pages, NULL)))
     427                return addr;
     428       
     429        /* Eventually check the addresses behind each area */
     430        list_foreach(as->as_area_btree.leaf_list, cur) {
     431                btree_node_t *node =
     432                    list_get_instance(cur, btree_node_t, leaf_link);
     433               
     434                for (btree_key_t i = 0; i < node->keys; i++) {
     435                        as_area_t *area = (as_area_t *) node->value[i];
     436                       
     437                        mutex_lock(&area->lock);
     438                       
     439                        addr =
     440                            ALIGN_UP(area->base + P2SZ(area->pages), PAGE_SIZE);
     441                        bool avail =
     442                            ((addr >= bound) && (addr >= area->base) &&
     443                            (check_area_conflicts(as, addr, pages, area)));
     444                       
     445                        mutex_unlock(&area->lock);
     446                       
     447                        if (avail)
     448                                return addr;
     449                }
     450        }
     451       
     452        /* No suitable address space area found */
     453        return (uintptr_t) -1;
     454}
     455
    389456/** Create address space area of common attributes.
    390457 *
     
    394461 * @param flags        Flags of the area memory.
    395462 * @param size         Size of area.
    396  * @param base         Base address of area.
    397463 * @param attrs        Attributes of the area.
    398464 * @param backend      Address space area backend. NULL if no backend is used.
    399465 * @param backend_data NULL or a pointer to an array holding two void *.
     466 * @param base         Starting virtual address of the area.
     467 *                     If set to -1, a suitable mappable area is found.
     468 * @param bound        Lowest address bound if base is set to -1.
     469 *                     Otherwise ignored.
    400470 *
    401471 * @return Address space area on success or NULL on failure.
     
    403473 */
    404474as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size,
    405     uintptr_t base, unsigned int attrs, mem_backend_t *backend,
    406     mem_backend_data_t *backend_data)
    407 {
    408         if ((base % PAGE_SIZE) != 0)
     475    unsigned int attrs, mem_backend_t *backend,
     476    mem_backend_data_t *backend_data, uintptr_t *base, uintptr_t bound)
     477{
     478        if ((*base != (uintptr_t) -1) && ((*base % PAGE_SIZE) != 0))
    409479                return NULL;
    410480       
     
    420490        mutex_lock(&as->lock);
    421491       
    422         if (!check_area_conflicts(as, base, pages, NULL)) {
     492        if (*base == (uintptr_t) -1) {
     493                *base = as_get_unmapped_area(as, bound, size);
     494                if (*base == (uintptr_t) -1) {
     495                        mutex_unlock(&as->lock);
     496                        return NULL;
     497                }
     498        }
     499       
     500        if (!check_area_conflicts(as, *base, pages, NULL)) {
    423501                mutex_unlock(&as->lock);
    424502                return NULL;
     
    434512        area->pages = pages;
    435513        area->resident = 0;
    436         area->base = base;
     514        area->base = *base;
    437515        area->sh_info = NULL;
    438516        area->backend = backend;
     
    452530       
    453531        btree_create(&area->used_space);
    454         btree_insert(&as->as_area_btree, base, (void *) area, NULL);
     532        btree_insert(&as->as_area_btree, *base, (void *) area,
     533            NULL);
    455534       
    456535        mutex_unlock(&as->lock);
     
    860939 * @param acc_size       Expected size of the source area.
    861940 * @param dst_as         Pointer to destination address space.
    862  * @param dst_base       Target base address.
    863941 * @param dst_flags_mask Destination address space area flags mask.
     942 * @param dst_base       Target base address. If set to -1,
     943 *                       a suitable mappable area is found.
     944 * @param bound          Lowest address bound if dst_base is set to -1.
     945 *                       Otherwise ignored.
    864946 *
    865947 * @return Zero on success.
     
    873955 */
    874956int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
    875     as_t *dst_as, uintptr_t dst_base, unsigned int dst_flags_mask)
     957    as_t *dst_as, unsigned int dst_flags_mask, uintptr_t *dst_base,
     958    uintptr_t bound)
    876959{
    877960        mutex_lock(&src_as->lock);
     
    9451028         * to support sharing in less privileged mode.
    9461029         */
    947         as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size,
    948             dst_base, AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
     1030        as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask,
     1031            src_size, AS_AREA_ATTR_PARTIAL, src_backend,
     1032            &src_backend_data, dst_base, bound);
    9491033        if (!dst_area) {
    9501034                /*
     
    19552039 */
    19562040
    1957 /** Wrapper for as_area_create(). */
    1958 sysarg_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags)
    1959 {
    1960         if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,
    1961             AS_AREA_ATTR_NONE, &anon_backend, NULL))
    1962                 return (sysarg_t) address;
    1963         else
     2041sysarg_t sys_as_area_create(uintptr_t base, size_t size, unsigned int flags,
     2042    uintptr_t bound)
     2043{
     2044        uintptr_t virt = base;
     2045        as_area_t *area = as_area_create(AS, flags | AS_AREA_CACHEABLE, size,
     2046            AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, bound);
     2047        if (area == NULL)
    19642048                return (sysarg_t) -1;
    1965 }
    1966 
    1967 /** Wrapper for as_area_resize(). */
     2049       
     2050        return (sysarg_t) virt;
     2051}
     2052
    19682053sysarg_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags)
    19692054{
     
    19712056}
    19722057
    1973 /** Wrapper for as_area_change_flags(). */
    19742058sysarg_t sys_as_area_change_flags(uintptr_t address, unsigned int flags)
    19752059{
     
    19772061}
    19782062
    1979 /** Wrapper for as_area_destroy(). */
    19802063sysarg_t sys_as_area_destroy(uintptr_t address)
    19812064{
    19822065        return (sysarg_t) as_area_destroy(AS, address);
    1983 }
    1984 
    1985 /** Return pointer to unmapped address space area
    1986  *
    1987  * @param base Lowest address bound.
    1988  * @param size Requested size of the allocation.
    1989  *
    1990  * @return Pointer to the beginning of unmapped address space area.
    1991  *
    1992  */
    1993 sysarg_t sys_as_get_unmapped_area(uintptr_t base, size_t size)
    1994 {
    1995         if (size == 0)
    1996                 return 0;
    1997        
    1998         /*
    1999          * Make sure we allocate from page-aligned
    2000          * address. Check for possible overflow in
    2001          * each step.
    2002          */
    2003        
    2004         size_t pages = SIZE2FRAMES(size);
    2005         uintptr_t ret = 0;
    2006        
    2007         /*
    2008          * Find the lowest unmapped address aligned on the sz
    2009          * boundary, not smaller than base and of the required size.
    2010          */
    2011        
    2012         mutex_lock(&AS->lock);
    2013        
    2014         /* First check the base address itself */
    2015         uintptr_t addr = ALIGN_UP(base, PAGE_SIZE);
    2016         if ((addr >= base) &&
    2017             (check_area_conflicts(AS, addr, pages, NULL)))
    2018                 ret = addr;
    2019        
    2020         /* Eventually check the addresses behind each area */
    2021         list_foreach(AS->as_area_btree.leaf_list, cur) {
    2022                 if (ret != 0)
    2023                         break;
    2024 
    2025                 btree_node_t *node =
    2026                     list_get_instance(cur, btree_node_t, leaf_link);
    2027                
    2028                 btree_key_t i;
    2029                 for (i = 0; (ret == 0) && (i < node->keys); i++) {
    2030                         uintptr_t addr;
    2031 
    2032                         as_area_t *area = (as_area_t *) node->value[i];
    2033                        
    2034                         mutex_lock(&area->lock);
    2035                        
    2036                         addr = ALIGN_UP(area->base + P2SZ(area->pages),
    2037                             PAGE_SIZE);
    2038                        
    2039                         if ((addr >= base) && (addr >= area->base) &&
    2040                             (check_area_conflicts(AS, addr, pages, area)))
    2041                                 ret = addr;
    2042                        
    2043                         mutex_unlock(&area->lock);
    2044                 }
    2045         }
    2046        
    2047         mutex_unlock(&AS->lock);
    2048        
    2049         return (sysarg_t) ret;
    20502066}
    20512067
  • kernel/generic/src/proc/program.c

    r24cf31f1 rfbcdeb8  
    8787         * Create the stack address space area.
    8888         */
     89        uintptr_t virt = USTACK_ADDRESS;
    8990        as_area_t *area = as_area_create(as,
    9091            AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE,
    91             STACK_SIZE, USTACK_ADDRESS, AS_AREA_ATTR_NONE,
    92             &anon_backend, NULL);
     92            STACK_SIZE, AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, 0);
    9393        if (!area)
    9494                return ENOMEM;
  • kernel/generic/src/syscall/syscall.c

    r24cf31f1 rfbcdeb8  
    146146        (syshandler_t) sys_as_area_change_flags,
    147147        (syshandler_t) sys_as_area_destroy,
    148         (syshandler_t) sys_as_get_unmapped_area,
    149148       
    150149        /* Page mapping related syscalls. */
     
    176175        (syshandler_t) sys_device_assign_devno,
    177176        (syshandler_t) sys_physmem_map,
     177        (syshandler_t) sys_physmem_unmap,
    178178        (syshandler_t) sys_dmamem_map,
    179179        (syshandler_t) sys_dmamem_unmap,
    180180        (syshandler_t) sys_iospace_enable,
     181        (syshandler_t) sys_iospace_disable,
    181182        (syshandler_t) sys_irq_register,
    182183        (syshandler_t) sys_irq_unregister,
Note: See TracChangeset for help on using the changeset viewer.