Changeset e5a015b in mainline


Ignore:
Timestamp:
2011-04-16T20:45:36Z (13 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
a7dbd49
Parents:
b2fb47f (diff), 9e953bda (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge the memory reservation feature (Phase 1) from
lp:~jakub/helenos/mm.

This merge makes the testcase from ticket #114 non-reproducible. The
testcase is now available as tester's malloc2 test. It also seems to me
that this merge makes it harder for the system to run out of memory
during kconsole 'test *' and 'tester *', even though I did see several
hangs already with this feature in place. See below for what is still
missing to make the hangs even less probable or even impossible.

In Phase 1, I am targeting just the low-hanging fruits. In particular,
only anonymous and ELF backend pages are reserved physical memory at
time of as_area_create() and as_area_resize(). Memory is unreserved on
as_area_destroy(). In all other cases, memory is reserved at the same
time as it is allocated, making those calls subject to infinite
blocking if FRAME_ATOMIC is not used.

Possible sources of memory overcommit not addressed in this merge:

  • As mentioned above, only backend pages are reserved; pages for supporting structures such as B+tree nodes, TTEs are not reserved or handled otherwise. Kernel heap allocator fragmentation is not included in the reservations either.
  • The initial amount of reservable memory is fed from zone_construct(). Zone merging is not taken into account, which can make the reservable memory tracking inaccurate.
Files:
3 added
21 edited
1 moved

Legend:

Unmodified
Added
Removed
  • kernel/Makefile

    rb2fb47f re5a015b  
    228228        generic/src/syscall/syscall.c \
    229229        generic/src/syscall/copy.c \
     230        generic/src/mm/reserve.c \
    230231        generic/src/mm/buddy.c \
    231232        generic/src/mm/frame.c \
  • kernel/arch/abs32le/include/types.h

    rb2fb47f re5a015b  
    4040
    4141typedef uint32_t size_t;
     42typedef int32_t ssize_t;
    4243
    4344typedef uint32_t uintptr_t;
  • kernel/arch/amd64/include/types.h

    rb2fb47f re5a015b  
    3737
    3838typedef uint64_t size_t;
     39typedef int64_t ssize_t;
    3940
    4041typedef uint64_t uintptr_t;
  • kernel/arch/arm32/include/types.h

    rb2fb47f re5a015b  
    4444
    4545typedef uint32_t size_t;
     46typedef int32_t ssize_t;
    4647
    4748typedef uint32_t uintptr_t;
  • kernel/arch/ia32/include/types.h

    rb2fb47f re5a015b  
    3737
    3838typedef uint32_t size_t;
     39typedef int32_t ssize_t;
    3940
    4041typedef uint32_t uintptr_t;
  • kernel/arch/ia64/include/types.h

    rb2fb47f re5a015b  
    3737
    3838typedef uint64_t size_t;
     39typedef int64_t ssize_t;
    3940
    4041typedef uint64_t uintptr_t;
  • kernel/arch/mips32/include/types.h

    rb2fb47f re5a015b  
    3737
    3838typedef uint32_t size_t;
     39typedef int32_t ssize_t;
    3940
    4041typedef uint32_t uintptr_t;
  • kernel/arch/ppc32/include/types.h

    rb2fb47f re5a015b  
    3737
    3838typedef uint32_t size_t;
     39typedef int32_t ssize_t;
    3940
    4041typedef uint32_t uintptr_t;
  • kernel/arch/sparc64/include/cpu.h

    rb2fb47f re5a015b  
    5959#include <arch/asm.h>
    6060
    61 #ifdef CONFIG_SMP
    62 #include <arch/mm/cache.h>
    63 #endif
    64 
    65 
    6661#if defined (SUN4U)
    6762#include <arch/sun4u/cpu.h>
  • kernel/arch/sparc64/include/sun4u/cpu.h

    rb2fb47f re5a015b  
    6060#include <trace.h>
    6161
    62 #ifdef CONFIG_SMP
    63 #include <arch/mm/cache.h>
    64 #endif
    65 
    6662typedef struct {
    6763        uint32_t mid;              /**< Processor ID as read from
  • kernel/arch/sparc64/include/types.h

    rb2fb47f re5a015b  
    3737
    3838typedef uint64_t size_t;
     39typedef int64_t ssize_t;
    3940
    4041typedef uint64_t uintptr_t;
  • kernel/generic/include/mm/as.h

    rb2fb47f re5a015b  
    238238/** Address space area backend structure. */
    239239typedef struct mem_backend {
     240        bool (* create)(as_area_t *);
     241        bool (* resize)(as_area_t *, size_t);
     242        void (* share)(as_area_t *);
     243        void (* destroy)(as_area_t *);
     244
    240245        int (* page_fault)(as_area_t *, uintptr_t, pf_access_t);
    241246        void (* frame_free)(as_area_t *, uintptr_t, uintptr_t);
    242         void (* share)(as_area_t *);
    243247} mem_backend_t;
    244248
  • kernel/generic/include/mm/frame.h

    rb2fb47f re5a015b  
    6262
    6363/** Convert the frame address to kernel VA. */
    64 #define FRAME_KA          0x01
     64#define FRAME_KA          0x1
    6565/** Do not panic and do not sleep on failure. */
    66 #define FRAME_ATOMIC      0x02
     66#define FRAME_ATOMIC      0x2
    6767/** Do not start reclaiming when no free memory. */
    68 #define FRAME_NO_RECLAIM  0x04
     68#define FRAME_NO_RECLAIM  0x4
     69/** Do not reserve / unreserve memory. */
     70#define FRAME_NO_RESERVE  0x8
    6971
    7072typedef uint8_t zone_flags_t;
    7173
    7274/** Available zone (free for allocation) */
    73 #define ZONE_AVAILABLE  0x00
     75#define ZONE_AVAILABLE  0x0
    7476/** Zone is reserved (not available for allocation) */
    75 #define ZONE_RESERVED   0x08
     77#define ZONE_RESERVED   0x8
    7678/** Zone is used by firmware (not available for allocation) */
    7779#define ZONE_FIRMWARE   0x10
     
    8587        uint8_t buddy_order;  /**< Buddy system block order */
    8688        link_t buddy_link;    /**< Link to the next free block inside
    87                                one order */
     89                                   one order */
    8890        void *parent;         /**< If allocated by slab, this points there */
    8991} frame_t;
     
    9193typedef struct {
    9294        pfn_t base;                    /**< Frame_no of the first frame
    93                                         in the frames array */
     95                                            in the frames array */
    9496        size_t count;                  /**< Size of zone */
    9597        size_t free_count;             /**< Number of free frame_t
    96                                         structures */
     98                                            structures */
    9799        size_t busy_count;             /**< Number of busy frame_t
    98                                         structures */
     100                                            structures */
    99101        zone_flags_t flags;            /**< Type of the zone */
    100102       
    101103        frame_t *frames;               /**< Array of frame_t structures
    102                                         in this zone */
     104                                            in this zone */
    103105        buddy_system_t *buddy_system;  /**< Buddy system for the zone */
    104106} zone_t;
     
    146148    ((~(((sysarg_t) -1) << (order)) & (index)) == 0)
    147149#define IS_BUDDY_LEFT_BLOCK(zone, frame) \
    148     (((frame_index((zone), (frame)) >> (frame)->buddy_order) & 0x01) == 0)
     150    (((frame_index((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 0)
    149151#define IS_BUDDY_RIGHT_BLOCK(zone, frame) \
    150     (((frame_index((zone), (frame)) >> (frame)->buddy_order) & 0x01) == 1)
     152    (((frame_index((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 1)
    151153#define IS_BUDDY_LEFT_BLOCK_ABS(zone, frame) \
    152     (((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x01) == 0)
     154    (((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 0)
    153155#define IS_BUDDY_RIGHT_BLOCK_ABS(zone, frame) \
    154     (((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x01) == 1)
    155 
    156 #define frame_alloc(order, flags) \
    157     frame_alloc_generic(order, flags, NULL)
     156    (((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 1)
    158157
    159158extern void frame_init(void);
    160159extern void *frame_alloc_generic(uint8_t, frame_flags_t, size_t *);
     160extern void *frame_alloc(uint8_t, frame_flags_t);
     161extern void *frame_alloc_noreserve(uint8_t, frame_flags_t);
     162extern void frame_free_generic(uintptr_t, frame_flags_t);
    161163extern void frame_free(uintptr_t);
     164extern void frame_free_noreserve(uintptr_t);
    162165extern void frame_reference_add(pfn_t);
    163166
    164 extern size_t find_zone(pfn_t frame, size_t count, size_t hint);
     167extern size_t find_zone(pfn_t, size_t, size_t);
    165168extern size_t zone_create(pfn_t, size_t, pfn_t, zone_flags_t);
    166169extern void *frame_get_parent(pfn_t, size_t);
  • kernel/generic/include/mm/reserve.h

    rb2fb47f re5a015b  
    11/*
    2  * Copyright (c) 2006 Jakub Jermar
     2 * Copyright (c) 2011 Jakub Jermar
    33 * All rights reserved.
    44 *
     
    2727 */
    2828
    29 /** @addtogroup sparc64mm       
     29/** @addtogroup genericmm
    3030 * @{
    3131 */
     
    3333 */
    3434
    35 #ifndef KERN_sparc64_CACHE_H_
    36 #define KERN_sparc64_CACHE_H_
     35#ifndef KERN_RESERVE_H_
     36#define KERN_RESERVE_H_
    3737
    38 #include <mm/page.h>
    39 #include <mm/frame.h>
     38#include <typedefs.h>
     39
     40extern bool reserve_try_alloc(size_t);
     41extern void reserve_force_alloc(size_t);
     42extern void reserve_free(size_t);
    4043
    4144#endif
  • kernel/generic/src/mm/as.c

    rb2fb47f re5a015b  
    8080#include <arch/interrupt.h>
    8181
    82 #ifdef CONFIG_VIRT_IDX_DCACHE
    83 #include <arch/mm/cache.h>
    84 #endif /* CONFIG_VIRT_IDX_DCACHE */
    85 
    8682/**
    8783 * Each architecture decides what functions will be used to carry out
     
    447443        else
    448444                memsetb(&area->backend_data, sizeof(area->backend_data), 0);
     445       
     446        if (area->backend && area->backend->create) {
     447                if (!area->backend->create(area)) {
     448                        free(area);
     449                        mutex_unlock(&as->lock);
     450                        return NULL;
     451                }
     452        }
    449453       
    450454        btree_create(&area->used_space);
     
    690694        }
    691695       
     696        if (area->backend && area->backend->resize) {
     697                if (!area->backend->resize(area, pages)) {
     698                        mutex_unlock(&area->lock);
     699                        mutex_unlock(&as->lock);
     700                        return ENOMEM;
     701                }
     702        }
     703       
    692704        area->pages = pages;
    693705       
     
    756768                return ENOENT;
    757769        }
     770
     771        if (area->backend && area->backend->destroy)
     772                area->backend->destroy(area);
    758773       
    759774        uintptr_t base = area->base;
  • kernel/generic/src/mm/backend_anon.c

    rb2fb47f re5a015b  
    3939#include <mm/as.h>
    4040#include <mm/page.h>
     41#include <mm/reserve.h>
    4142#include <genarch/mm/page_pt.h>
    4243#include <genarch/mm/page_ht.h>
     
    5152#include <arch.h>
    5253
    53 #ifdef CONFIG_VIRT_IDX_DCACHE
    54 #include <arch/mm/cache.h>
    55 #endif
     54static bool anon_create(as_area_t *);
     55static bool anon_resize(as_area_t *, size_t);
     56static void anon_share(as_area_t *);
     57static void anon_destroy(as_area_t *);
    5658
    5759static int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access);
    5860static void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame);
    59 static void anon_share(as_area_t *area);
    6061
    6162mem_backend_t anon_backend = {
     63        .create = anon_create,
     64        .resize = anon_resize,
     65        .share = anon_share,
     66        .destroy = anon_destroy,
     67
    6268        .page_fault = anon_page_fault,
    6369        .frame_free = anon_frame_free,
    64         .share = anon_share
    6570};
     71
     72bool anon_create(as_area_t *area)
     73{
     74        return reserve_try_alloc(area->pages);
     75}
     76
     77bool anon_resize(as_area_t *area, size_t new_pages)
     78{
     79        if (new_pages > area->pages)
     80                return reserve_try_alloc(new_pages - area->pages);
     81        else if (new_pages < area->pages)
     82                reserve_free(area->pages - new_pages);
     83
     84        return true;
     85}
     86
     87/** Share the anonymous address space area.
     88 *
     89 * Sharing of anonymous area is done by duplicating its entire mapping
     90 * to the pagemap. Page faults will primarily search for frames there.
     91 *
     92 * The address space and address space area must be already locked.
     93 *
     94 * @param area Address space area to be shared.
     95 */
     96void anon_share(as_area_t *area)
     97{
     98        link_t *cur;
     99
     100        ASSERT(mutex_locked(&area->as->lock));
     101        ASSERT(mutex_locked(&area->lock));
     102
     103        /*
     104         * Copy used portions of the area to sh_info's page map.
     105         */
     106        mutex_lock(&area->sh_info->lock);
     107        for (cur = area->used_space.leaf_head.next;
     108            cur != &area->used_space.leaf_head; cur = cur->next) {
     109                btree_node_t *node;
     110                unsigned int i;
     111               
     112                node = list_get_instance(cur, btree_node_t, leaf_link);
     113                for (i = 0; i < node->keys; i++) {
     114                        uintptr_t base = node->key[i];
     115                        size_t count = (size_t) node->value[i];
     116                        unsigned int j;
     117                       
     118                        for (j = 0; j < count; j++) {
     119                                pte_t *pte;
     120                       
     121                                page_table_lock(area->as, false);
     122                                pte = page_mapping_find(area->as,
     123                                    base + j * PAGE_SIZE);
     124                                ASSERT(pte && PTE_VALID(pte) &&
     125                                    PTE_PRESENT(pte));
     126                                btree_insert(&area->sh_info->pagemap,
     127                                    (base + j * PAGE_SIZE) - area->base,
     128                                    (void *) PTE_GET_FRAME(pte), NULL);
     129                                page_table_unlock(area->as, false);
     130
     131                                pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));
     132                                frame_reference_add(pfn);
     133                        }
     134
     135                }
     136        }
     137        mutex_unlock(&area->sh_info->lock);
     138}
     139
     140void anon_destroy(as_area_t *area)
     141{
     142        reserve_free(area->pages);
     143}
     144
    66145
    67146/** Service a page fault in the anonymous memory address space area.
     
    115194                        }
    116195                        if (allocate) {
    117                                 frame = (uintptr_t) frame_alloc(ONE_FRAME, 0);
     196                                frame = (uintptr_t) frame_alloc_noreserve(
     197                                    ONE_FRAME, 0);
    118198                                memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);
    119199                               
     
    145225                 *   the different causes
    146226                 */
    147                 frame = (uintptr_t) frame_alloc(ONE_FRAME, 0);
     227                frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0);
    148228                memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);
    149229        }
     
    174254        ASSERT(mutex_locked(&area->lock));
    175255
    176         frame_free(frame);
    177 }
    178 
    179 /** Share the anonymous address space area.
    180  *
    181  * Sharing of anonymous area is done by duplicating its entire mapping
    182  * to the pagemap. Page faults will primarily search for frames there.
    183  *
    184  * The address space and address space area must be already locked.
    185  *
    186  * @param area Address space area to be shared.
    187  */
    188 void anon_share(as_area_t *area)
    189 {
    190         link_t *cur;
    191 
    192         ASSERT(mutex_locked(&area->as->lock));
    193         ASSERT(mutex_locked(&area->lock));
    194 
    195         /*
    196          * Copy used portions of the area to sh_info's page map.
    197          */
    198         mutex_lock(&area->sh_info->lock);
    199         for (cur = area->used_space.leaf_head.next;
    200             cur != &area->used_space.leaf_head; cur = cur->next) {
    201                 btree_node_t *node;
    202                 unsigned int i;
    203                
    204                 node = list_get_instance(cur, btree_node_t, leaf_link);
    205                 for (i = 0; i < node->keys; i++) {
    206                         uintptr_t base = node->key[i];
    207                         size_t count = (size_t) node->value[i];
    208                         unsigned int j;
    209                        
    210                         for (j = 0; j < count; j++) {
    211                                 pte_t *pte;
    212                        
    213                                 page_table_lock(area->as, false);
    214                                 pte = page_mapping_find(area->as,
    215                                     base + j * PAGE_SIZE);
    216                                 ASSERT(pte && PTE_VALID(pte) &&
    217                                     PTE_PRESENT(pte));
    218                                 btree_insert(&area->sh_info->pagemap,
    219                                     (base + j * PAGE_SIZE) - area->base,
    220                                     (void *) PTE_GET_FRAME(pte), NULL);
    221                                 page_table_unlock(area->as, false);
    222 
    223                                 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));
    224                                 frame_reference_add(pfn);
    225                         }
    226 
    227                 }
    228         }
    229         mutex_unlock(&area->sh_info->lock);
     256        frame_free_noreserve(frame);
    230257}
    231258
  • kernel/generic/src/mm/backend_elf.c

    rb2fb47f re5a015b  
    4343#include <mm/slab.h>
    4444#include <mm/page.h>
     45#include <mm/reserve.h>
    4546#include <genarch/mm/page_pt.h>
    4647#include <genarch/mm/page_ht.h>
     
    5152#include <arch/barrier.h>
    5253
    53 #ifdef CONFIG_VIRT_IDX_DCACHE
    54 #include <arch/mm/cache.h>
    55 #endif
     54static bool elf_create(as_area_t *);
     55static bool elf_resize(as_area_t *, size_t);
     56static void elf_share(as_area_t *);
     57static void elf_destroy(as_area_t *);
    5658
    5759static int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access);
    5860static void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame);
    59 static void elf_share(as_area_t *area);
    6061
    6162mem_backend_t elf_backend = {
     63        .create = elf_create,
     64        .resize = elf_resize,
     65        .share = elf_share,
     66        .destroy = elf_destroy,
     67
    6268        .page_fault = elf_page_fault,
    6369        .frame_free = elf_frame_free,
    64         .share = elf_share
    6570};
    6671
    67 /** Service a page fault in the ELF backend address space area.
    68  *
    69  * The address space area and page tables must be already locked.
    70  *
    71  * @param area          Pointer to the address space area.
    72  * @param addr          Faulting virtual address.
    73  * @param access        Access mode that caused the fault (i.e.
    74  *                      read/write/exec).
    75  *
    76  * @return              AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK
    77  *                      on success (i.e. serviced).
    78  */
    79 int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
    80 {
    81         elf_header_t *elf = area->backend_data.elf;
    82         elf_segment_header_t *entry = area->backend_data.segment;
    83         btree_node_t *leaf;
    84         uintptr_t base, frame, page, start_anon;
    85         size_t i;
    86         bool dirty = false;
    87 
    88         ASSERT(page_table_locked(AS));
    89         ASSERT(mutex_locked(&area->lock));
    90 
    91         if (!as_area_check_access(area, access))
    92                 return AS_PF_FAULT;
     72bool elf_create(as_area_t *area)
     73{
     74        elf_segment_header_t *entry = area->backend_data.segment;
     75        size_t nonanon_pages = ALIGN_DOWN(entry->p_filesz, PAGE_SIZE);
     76
     77        if (area->pages <= nonanon_pages)
     78                return true;
    9379       
    94         if (addr < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE))
    95                 return AS_PF_FAULT;
     80        return reserve_try_alloc(area->pages - nonanon_pages);
     81}
     82
     83bool elf_resize(as_area_t *area, size_t new_pages)
     84{
     85        elf_segment_header_t *entry = area->backend_data.segment;
     86        size_t nonanon_pages = ALIGN_DOWN(entry->p_filesz, PAGE_SIZE);
     87
     88        if (new_pages > area->pages) {
     89                /* The area is growing. */
     90                if (area->pages >= nonanon_pages)
     91                        return reserve_try_alloc(new_pages - area->pages);
     92                else if (new_pages > nonanon_pages)
     93                        return reserve_try_alloc(new_pages - nonanon_pages);
     94        } else if (new_pages < area->pages) {
     95                /* The area is shrinking. */
     96                if (new_pages >= nonanon_pages)
     97                        reserve_free(area->pages - new_pages);
     98                else if (area->pages > nonanon_pages)
     99                        reserve_free(nonanon_pages - new_pages);
     100        }
    96101       
    97         if (addr >= entry->p_vaddr + entry->p_memsz)
    98                 return AS_PF_FAULT;
    99        
    100         i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH;
    101         base = (uintptr_t)
    102             (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE));
    103 
    104         /* Virtual address of faulting page*/
    105         page = ALIGN_DOWN(addr, PAGE_SIZE);
    106 
    107         /* Virtual address of the end of initialized part of segment */
    108         start_anon = entry->p_vaddr + entry->p_filesz;
    109 
    110         if (area->sh_info) {
    111                 bool found = false;
    112 
    113                 /*
    114                  * The address space area is shared.
    115                  */
    116                
    117                 mutex_lock(&area->sh_info->lock);
    118                 frame = (uintptr_t) btree_search(&area->sh_info->pagemap,
    119                     page - area->base, &leaf);
    120                 if (!frame) {
    121                         unsigned int i;
    122 
    123                         /*
    124                          * Workaround for valid NULL address.
    125                          */
    126 
    127                         for (i = 0; i < leaf->keys; i++) {
    128                                 if (leaf->key[i] == page - area->base) {
    129                                         found = true;
    130                                         break;
    131                                 }
    132                         }
    133                 }
    134                 if (frame || found) {
    135                         frame_reference_add(ADDR2PFN(frame));
    136                         page_mapping_insert(AS, addr, frame,
    137                             as_area_get_flags(area));
    138                         if (!used_space_insert(area, page, 1))
    139                                 panic("Cannot insert used space.");
    140                         mutex_unlock(&area->sh_info->lock);
    141                         return AS_PF_OK;
    142                 }
    143         }
    144 
    145         /*
    146          * The area is either not shared or the pagemap does not contain the
    147          * mapping.
    148          */
    149         if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {
    150                 /*
    151                  * Initialized portion of the segment. The memory is backed
    152                  * directly by the content of the ELF image. Pages are
    153                  * only copied if the segment is writable so that there
    154                  * can be more instantions of the same memory ELF image
    155                  * used at a time. Note that this could be later done
    156                  * as COW.
    157                  */
    158                 if (entry->p_flags & PF_W) {
    159                         frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
    160                         memcpy((void *) PA2KA(frame),
    161                             (void *) (base + i * FRAME_SIZE), FRAME_SIZE);
    162                         if (entry->p_flags & PF_X) {
    163                                 smc_coherence_block((void *) PA2KA(frame),
    164                                     FRAME_SIZE);
    165                         }
    166                         dirty = true;
    167                 } else {
    168                         frame = KA2PA(base + i * FRAME_SIZE);
    169                 }       
    170         } else if (page >= start_anon) {
    171                 /*
    172                  * This is the uninitialized portion of the segment.
    173                  * It is not physically present in the ELF image.
    174                  * To resolve the situation, a frame must be allocated
    175                  * and cleared.
    176                  */
    177                 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
    178                 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);
    179                 dirty = true;
    180         } else {
    181                 size_t pad_lo, pad_hi;
    182                 /*
    183                  * The mixed case.
    184                  *
    185                  * The middle part is backed by the ELF image and
    186                  * the lower and upper parts are anonymous memory.
    187                  * (The segment can be and often is shorter than 1 page).
    188                  */
    189                 if (page < entry->p_vaddr)
    190                         pad_lo = entry->p_vaddr - page;
    191                 else
    192                         pad_lo = 0;
    193 
    194                 if (start_anon < page + PAGE_SIZE)
    195                         pad_hi = page + PAGE_SIZE - start_anon;
    196                 else
    197                         pad_hi = 0;
    198 
    199                 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
    200                 memcpy((void *) (PA2KA(frame) + pad_lo),
    201                     (void *) (base + i * FRAME_SIZE + pad_lo),
    202                     FRAME_SIZE - pad_lo - pad_hi);
    203                 if (entry->p_flags & PF_X) {
    204                         smc_coherence_block((void *) (PA2KA(frame) + pad_lo),
    205                             FRAME_SIZE - pad_lo - pad_hi);
    206                 }
    207                 memsetb((void *) PA2KA(frame), pad_lo, 0);
    208                 memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi,
    209                     0);
    210                 dirty = true;
    211         }
    212 
    213         if (dirty && area->sh_info) {
    214                 frame_reference_add(ADDR2PFN(frame));
    215                 btree_insert(&area->sh_info->pagemap, page - area->base,
    216                     (void *) frame, leaf);
    217         }
    218 
    219         if (area->sh_info)
    220                 mutex_unlock(&area->sh_info->lock);
    221 
    222         page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
    223         if (!used_space_insert(area, page, 1))
    224                 panic("Cannot insert used space.");
    225 
    226         return AS_PF_OK;
    227 }
    228 
    229 /** Free a frame that is backed by the ELF backend.
    230  *
    231  * The address space area and page tables must be already locked.
    232  *
    233  * @param area          Pointer to the address space area.
    234  * @param page          Page that is mapped to frame. Must be aligned to
    235  *                      PAGE_SIZE.
    236  * @param frame         Frame to be released.
    237  *
    238  */
    239 void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
    240 {
    241         elf_segment_header_t *entry = area->backend_data.segment;
    242         uintptr_t start_anon;
    243 
    244         ASSERT(page_table_locked(area->as));
    245         ASSERT(mutex_locked(&area->lock));
    246 
    247         ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE));
    248         ASSERT(page < entry->p_vaddr + entry->p_memsz);
    249 
    250         start_anon = entry->p_vaddr + entry->p_filesz;
    251 
    252         if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {
    253                 if (entry->p_flags & PF_W) {
    254                         /*
    255                          * Free the frame with the copy of writable segment
    256                          * data.
    257                          */
    258                         frame_free(frame);
    259                 }
    260         } else {
    261                 /*
    262                  * The frame is either anonymous memory or the mixed case (i.e.
    263                  * lower part is backed by the ELF image and the upper is
    264                  * anonymous). In any case, a frame needs to be freed.
    265                  */
    266                 frame_free(frame);
    267         }
     102        return true;
    268103}
    269104
     
    356191}
    357192
     193void elf_destroy(as_area_t *area)
     194{
     195        elf_segment_header_t *entry = area->backend_data.segment;
     196        size_t nonanon_pages = ALIGN_DOWN(entry->p_filesz, PAGE_SIZE);
     197
     198        if (area->pages > nonanon_pages)
     199                reserve_free(area->pages - nonanon_pages);
     200}
     201
     202/** Service a page fault in the ELF backend address space area.
     203 *
     204 * The address space area and page tables must be already locked.
     205 *
     206 * @param area          Pointer to the address space area.
     207 * @param addr          Faulting virtual address.
     208 * @param access        Access mode that caused the fault (i.e.
     209 *                      read/write/exec).
     210 *
     211 * @return              AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK
     212 *                      on success (i.e. serviced).
     213 */
     214int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
     215{
     216        elf_header_t *elf = area->backend_data.elf;
     217        elf_segment_header_t *entry = area->backend_data.segment;
     218        btree_node_t *leaf;
     219        uintptr_t base, frame, page, start_anon;
     220        size_t i;
     221        bool dirty = false;
     222
     223        ASSERT(page_table_locked(AS));
     224        ASSERT(mutex_locked(&area->lock));
     225
     226        if (!as_area_check_access(area, access))
     227                return AS_PF_FAULT;
     228       
     229        if (addr < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE))
     230                return AS_PF_FAULT;
     231       
     232        if (addr >= entry->p_vaddr + entry->p_memsz)
     233                return AS_PF_FAULT;
     234       
     235        i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH;
     236        base = (uintptr_t)
     237            (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE));
     238
     239        /* Virtual address of faulting page*/
     240        page = ALIGN_DOWN(addr, PAGE_SIZE);
     241
     242        /* Virtual address of the end of initialized part of segment */
     243        start_anon = entry->p_vaddr + entry->p_filesz;
     244
     245        if (area->sh_info) {
     246                bool found = false;
     247
     248                /*
     249                 * The address space area is shared.
     250                 */
     251               
     252                mutex_lock(&area->sh_info->lock);
     253                frame = (uintptr_t) btree_search(&area->sh_info->pagemap,
     254                    page - area->base, &leaf);
     255                if (!frame) {
     256                        unsigned int i;
     257
     258                        /*
     259                         * Workaround for valid NULL address.
     260                         */
     261
     262                        for (i = 0; i < leaf->keys; i++) {
     263                                if (leaf->key[i] == page - area->base) {
     264                                        found = true;
     265                                        break;
     266                                }
     267                        }
     268                }
     269                if (frame || found) {
     270                        frame_reference_add(ADDR2PFN(frame));
     271                        page_mapping_insert(AS, addr, frame,
     272                            as_area_get_flags(area));
     273                        if (!used_space_insert(area, page, 1))
     274                                panic("Cannot insert used space.");
     275                        mutex_unlock(&area->sh_info->lock);
     276                        return AS_PF_OK;
     277                }
     278        }
     279
     280        /*
     281         * The area is either not shared or the pagemap does not contain the
     282         * mapping.
     283         */
     284        if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {
     285                /*
     286                 * Initialized portion of the segment. The memory is backed
     287                 * directly by the content of the ELF image. Pages are
     288                 * only copied if the segment is writable so that there
     289                 * can be more instantions of the same memory ELF image
     290                 * used at a time. Note that this could be later done
     291                 * as COW.
     292                 */
     293                if (entry->p_flags & PF_W) {
     294                        frame = (uintptr_t)frame_alloc_noreserve(ONE_FRAME, 0);
     295                        memcpy((void *) PA2KA(frame),
     296                            (void *) (base + i * FRAME_SIZE), FRAME_SIZE);
     297                        if (entry->p_flags & PF_X) {
     298                                smc_coherence_block((void *) PA2KA(frame),
     299                                    FRAME_SIZE);
     300                        }
     301                        dirty = true;
     302                } else {
     303                        frame = KA2PA(base + i * FRAME_SIZE);
     304                }       
     305        } else if (page >= start_anon) {
     306                /*
     307                 * This is the uninitialized portion of the segment.
     308                 * It is not physically present in the ELF image.
     309                 * To resolve the situation, a frame must be allocated
     310                 * and cleared.
     311                 */
     312                frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0);
     313                memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);
     314                dirty = true;
     315        } else {
     316                size_t pad_lo, pad_hi;
     317                /*
     318                 * The mixed case.
     319                 *
     320                 * The middle part is backed by the ELF image and
     321                 * the lower and upper parts are anonymous memory.
     322                 * (The segment can be and often is shorter than 1 page).
     323                 */
     324                if (page < entry->p_vaddr)
     325                        pad_lo = entry->p_vaddr - page;
     326                else
     327                        pad_lo = 0;
     328
     329                if (start_anon < page + PAGE_SIZE)
     330                        pad_hi = page + PAGE_SIZE - start_anon;
     331                else
     332                        pad_hi = 0;
     333
     334                frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0);
     335                memcpy((void *) (PA2KA(frame) + pad_lo),
     336                    (void *) (base + i * FRAME_SIZE + pad_lo),
     337                    FRAME_SIZE - pad_lo - pad_hi);
     338                if (entry->p_flags & PF_X) {
     339                        smc_coherence_block((void *) (PA2KA(frame) + pad_lo),
     340                            FRAME_SIZE - pad_lo - pad_hi);
     341                }
     342                memsetb((void *) PA2KA(frame), pad_lo, 0);
     343                memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi,
     344                    0);
     345                dirty = true;
     346        }
     347
     348        if (dirty && area->sh_info) {
     349                frame_reference_add(ADDR2PFN(frame));
     350                btree_insert(&area->sh_info->pagemap, page - area->base,
     351                    (void *) frame, leaf);
     352        }
     353
     354        if (area->sh_info)
     355                mutex_unlock(&area->sh_info->lock);
     356
     357        page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
     358        if (!used_space_insert(area, page, 1))
     359                panic("Cannot insert used space.");
     360
     361        return AS_PF_OK;
     362}
     363
     364/** Free a frame that is backed by the ELF backend.
     365 *
     366 * The address space area and page tables must be already locked.
     367 *
     368 * @param area          Pointer to the address space area.
     369 * @param page          Page that is mapped to frame. Must be aligned to
     370 *                      PAGE_SIZE.
     371 * @param frame         Frame to be released.
     372 *
     373 */
     374void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
     375{
     376        elf_segment_header_t *entry = area->backend_data.segment;
     377        uintptr_t start_anon;
     378
     379        ASSERT(page_table_locked(area->as));
     380        ASSERT(mutex_locked(&area->lock));
     381
     382        ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE));
     383        ASSERT(page < entry->p_vaddr + entry->p_memsz);
     384
     385        start_anon = entry->p_vaddr + entry->p_filesz;
     386
     387        if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {
     388                if (entry->p_flags & PF_W) {
     389                        /*
     390                         * Free the frame with the copy of writable segment
     391                         * data.
     392                         */
     393                        frame_free_noreserve(frame);
     394                }
     395        } else {
     396                /*
     397                 * The frame is either anonymous memory or the mixed case (i.e.
     398                 * lower part is backed by the ELF image and the upper is
     399                 * anonymous). In any case, a frame needs to be freed.
     400                 */
     401                frame_free_noreserve(frame);
     402        }
     403}
     404
    358405/** @}
    359406 */
  • kernel/generic/src/mm/backend_phys.c

    rb2fb47f re5a015b  
    4848#include <align.h>
    4949
     50static bool phys_create(as_area_t *);
     51static void phys_share(as_area_t *);
     52static void phys_destroy(as_area_t *);
     53
    5054static int phys_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access);
    51 static void phys_share(as_area_t *area);
    5255
    5356mem_backend_t phys_backend = {
     57        .create = phys_create,
     58        .resize = NULL,
     59        .share = phys_share,
     60        .destroy = phys_destroy,
     61
    5462        .page_fault = phys_page_fault,
    5563        .frame_free = NULL,
    56         .share = phys_share
    5764};
     65
     66bool phys_create(as_area_t *area)
     67{
     68        return true;
     69}
     70
     71/** Share address space area backed by physical memory.
     72 *
     73 * Do actually nothing as sharing of address space areas
     74 * that are backed up by physical memory is very easy.
     75 * Note that the function must be defined so that
     76 * as_area_share() will succeed.
     77 */
     78void phys_share(as_area_t *area)
     79{
     80        ASSERT(mutex_locked(&area->as->lock));
     81        ASSERT(mutex_locked(&area->lock));
     82}
     83
     84
     85void phys_destroy(as_area_t *area)
     86{
     87        /* Nothing to do. */
     88}
    5889
    5990/** Service a page fault in the address space area backed by physical memory.
     
    88119}
    89120
    90 /** Share address space area backed by physical memory.
    91  *
    92  * Do actually nothing as sharing of address space areas
    93  * that are backed up by physical memory is very easy.
    94  * Note that the function must be defined so that
    95  * as_area_share() will succeed.
    96  */
    97 void phys_share(as_area_t *area)
    98 {
    99         ASSERT(mutex_locked(&area->as->lock));
    100         ASSERT(mutex_locked(&area->lock));
    101 }
    102 
    103121/** @}
    104122 */
  • kernel/generic/src/mm/frame.c

    rb2fb47f re5a015b  
    4545#include <typedefs.h>
    4646#include <mm/frame.h>
     47#include <mm/reserve.h>
    4748#include <mm/as.h>
    4849#include <panic.h>
     
    472473 * @param frame_idx Frame index relative to zone.
    473474 *
    474  */
    475 NO_TRACE static void zone_frame_free(zone_t *zone, size_t frame_idx)
     475 * @return          Number of freed frames.
     476 *
     477 */
     478NO_TRACE static size_t zone_frame_free(zone_t *zone, size_t frame_idx)
    476479{
    477480        ASSERT(zone_flags_available(zone->flags));
    478481       
    479482        frame_t *frame = &zone->frames[frame_idx];
    480        
    481         /* Remember frame order */
    482         uint8_t order = frame->buddy_order;
     483        size_t size = 1 << frame->buddy_order;
    483484       
    484485        ASSERT(frame->refcount);
     
    488489               
    489490                /* Update zone information. */
    490                 zone->free_count += (1 << order);
    491                 zone->busy_count -= (1 << order);
    492         }
     491                zone->free_count += size;
     492                zone->busy_count -= size;
     493        }
     494       
     495        return size;
    493496}
    494497
     
    516519        ASSERT(link);
    517520        zone->free_count--;
     521        reserve_force_alloc(1);
    518522}
    519523
     
    645649        for (i = 0; i < cframes; i++) {
    646650                zones.info[znum].busy_count++;
    647                 zone_frame_free(&zones.info[znum],
     651                (void) zone_frame_free(&zones.info[znum],
    648652                    pfn - zones.info[znum].base + i);
    649653        }
     
    683687        /* Free unneeded frames */
    684688        for (i = count; i < (size_t) (1 << order); i++)
    685                 zone_frame_free(&zones.info[znum], i + frame_idx);
     689                (void) zone_frame_free(&zones.info[znum], i + frame_idx);
    686690}
    687691
     
    695699 * not to be 2^order size. Once the allocator is running it is no longer
    696700 * possible, merged configuration data occupies more space :-/
    697  *
    698  * The function uses
    699701 *
    700702 */
     
    837839                        buddy_system_free(zone->buddy_system, &zone->frames[i].buddy_link);
    838840                }
     841
     842                /* "Unreserve" new frames. */
     843                reserve_free(count);
    839844        } else
    840845                zone->frames = NULL;
     
    9991004        size_t hint = pzone ? (*pzone) : 0;
    10001005       
     1006        /*
     1007         * If not told otherwise, we must first reserve the memory.
     1008         */
     1009        if (!(flags & FRAME_NO_RESERVE)) {
     1010                if (flags & FRAME_ATOMIC) {
     1011                        if (!reserve_try_alloc(size))
     1012                                return NULL;
     1013                } else {
     1014                        reserve_force_alloc(size);
     1015                }
     1016        }
     1017       
    10011018loop:
    10021019        irq_spinlock_lock(&zones.lock, true);
     
    10331050                if (flags & FRAME_ATOMIC) {
    10341051                        irq_spinlock_unlock(&zones.lock, true);
     1052                        if (!(flags & FRAME_NO_RESERVE))
     1053                                reserve_free(size);
    10351054                        return NULL;
    10361055                }
     
    10881107}
    10891108
     1109void *frame_alloc(uint8_t order, frame_flags_t flags)
     1110{
     1111        return frame_alloc_generic(order, flags, NULL);
     1112}
     1113
     1114void *frame_alloc_noreserve(uint8_t order, frame_flags_t flags)
     1115{
     1116        return frame_alloc_generic(order, flags | FRAME_NO_RESERVE, NULL);
     1117}
     1118
    10901119/** Free a frame.
    10911120 *
     
    10951124 *
    10961125 * @param frame Physical Address of of the frame to be freed.
    1097  *
    1098  */
    1099 void frame_free(uintptr_t frame)
    1100 {
     1126 * @param flags Flags to control memory reservation.
     1127 *
     1128 */
     1129void frame_free_generic(uintptr_t frame, frame_flags_t flags)
     1130{
     1131        size_t size;
     1132       
    11011133        irq_spinlock_lock(&zones.lock, true);
    11021134       
     
    11061138        pfn_t pfn = ADDR2PFN(frame);
    11071139        size_t znum = find_zone(pfn, 1, 0);
     1140
    11081141       
    11091142        ASSERT(znum != (size_t) -1);
    11101143       
    1111         zone_frame_free(&zones.info[znum], pfn - zones.info[znum].base);
     1144        size = zone_frame_free(&zones.info[znum], pfn - zones.info[znum].base);
    11121145       
    11131146        irq_spinlock_unlock(&zones.lock, true);
     
    11181151        mutex_lock(&mem_avail_mtx);
    11191152        if (mem_avail_req > 0)
    1120                 mem_avail_req--;
     1153                mem_avail_req -= min(mem_avail_req, size);
    11211154       
    11221155        if (mem_avail_req == 0) {
     
    11251158        }
    11261159        mutex_unlock(&mem_avail_mtx);
     1160       
     1161        if (!(flags & FRAME_NO_RESERVE))
     1162                reserve_free(size);
     1163}
     1164
     1165void frame_free(uintptr_t frame)
     1166{
     1167        frame_free_generic(frame, 0);
     1168}
     1169
     1170void frame_free_noreserve(uintptr_t frame)
     1171{
     1172        frame_free_generic(frame, FRAME_NO_RESERVE);
    11271173}
    11281174
  • uspace/app/tester/Makefile

    rb2fb47f re5a015b  
    4949        loop/loop1.c \
    5050        mm/malloc1.c \
     51        mm/malloc2.c \
    5152        devs/devman1.c \
    5253        hw/misc/virtchar1.c \
  • uspace/app/tester/tester.c

    rb2fb47f re5a015b  
    6262#include "loop/loop1.def"
    6363#include "mm/malloc1.def"
     64#include "mm/malloc2.def"
    6465#include "hw/serial/serial1.def"
    6566#include "hw/misc/virtchar1.def"
  • uspace/app/tester/tester.h

    rb2fb47f re5a015b  
    7878extern const char *test_loop1(void);
    7979extern const char *test_malloc1(void);
     80extern const char *test_malloc2(void);
    8081extern const char *test_serial1(void);
    8182extern const char *test_virtchar1(void);
Note: See TracChangeset for help on using the changeset viewer.