Changeset e5a015b in mainline for kernel/generic/src/mm/backend_anon.c


Ignore:
Timestamp:
2011-04-16T20:45:36Z (13 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
a7dbd49
Parents:
b2fb47f (diff), 9e953bda (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge the memory reservation feature (Phase 1) from
lp:~jakub/helenos/mm.

This merge makes the testcase from ticket #114 non-reproducible. The
testcase is now available as tester's malloc2 test. It also seems to me
that this merge makes it harder for the system to run out of memory
during kconsole 'test *' and 'tester *', even though I did see several
hangs already with this feature in place. See below for what is still
missing to make the hangs even less probable or even impossible.

In Phase 1, I am targeting just the low-hanging fruits. In particular,
only anonymous and ELF backend pages are reserved physical memory at
time of as_area_create() and as_area_resize(). Memory is unreserved on
as_area_destroy(). In all other cases, memory is reserved at the same
time as it is allocated, making those calls subject to infinite
blocking if FRAME_ATOMIC is not used.

Possible sources of memory overcommit not addressed in this merge:

  • As mentioned above, only backend pages are reserved; pages for supporting structures such as B+tree nodes, TTEs are not reserved or handled otherwise. Kernel heap allocator fragmentation is not included in the reservations either.
  • The initial amount of reservable memory is fed from zone_construct(). Zone merging is not taken into account, which can make the reservable memory tracking inaccurate.
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/mm/backend_anon.c

    rb2fb47f re5a015b  
    3939#include <mm/as.h>
    4040#include <mm/page.h>
     41#include <mm/reserve.h>
    4142#include <genarch/mm/page_pt.h>
    4243#include <genarch/mm/page_ht.h>
     
    5152#include <arch.h>
    5253
    53 #ifdef CONFIG_VIRT_IDX_DCACHE
    54 #include <arch/mm/cache.h>
    55 #endif
     54static bool anon_create(as_area_t *);
     55static bool anon_resize(as_area_t *, size_t);
     56static void anon_share(as_area_t *);
     57static void anon_destroy(as_area_t *);
    5658
    5759static int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access);
    5860static void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame);
    59 static void anon_share(as_area_t *area);
    6061
    6162mem_backend_t anon_backend = {
     63        .create = anon_create,
     64        .resize = anon_resize,
     65        .share = anon_share,
     66        .destroy = anon_destroy,
     67
    6268        .page_fault = anon_page_fault,
    6369        .frame_free = anon_frame_free,
    64         .share = anon_share
    6570};
     71
     72bool anon_create(as_area_t *area)
     73{
     74        return reserve_try_alloc(area->pages);
     75}
     76
     77bool anon_resize(as_area_t *area, size_t new_pages)
     78{
     79        if (new_pages > area->pages)
     80                return reserve_try_alloc(new_pages - area->pages);
     81        else if (new_pages < area->pages)
     82                reserve_free(area->pages - new_pages);
     83
     84        return true;
     85}
     86
     87/** Share the anonymous address space area.
     88 *
     89 * Sharing of anonymous area is done by duplicating its entire mapping
     90 * to the pagemap. Page faults will primarily search for frames there.
     91 *
     92 * The address space and address space area must be already locked.
     93 *
     94 * @param area Address space area to be shared.
     95 */
     96void anon_share(as_area_t *area)
     97{
     98        link_t *cur;
     99
     100        ASSERT(mutex_locked(&area->as->lock));
     101        ASSERT(mutex_locked(&area->lock));
     102
     103        /*
     104         * Copy used portions of the area to sh_info's page map.
     105         */
     106        mutex_lock(&area->sh_info->lock);
     107        for (cur = area->used_space.leaf_head.next;
     108            cur != &area->used_space.leaf_head; cur = cur->next) {
     109                btree_node_t *node;
     110                unsigned int i;
     111               
     112                node = list_get_instance(cur, btree_node_t, leaf_link);
     113                for (i = 0; i < node->keys; i++) {
     114                        uintptr_t base = node->key[i];
     115                        size_t count = (size_t) node->value[i];
     116                        unsigned int j;
     117                       
     118                        for (j = 0; j < count; j++) {
     119                                pte_t *pte;
     120                       
     121                                page_table_lock(area->as, false);
     122                                pte = page_mapping_find(area->as,
     123                                    base + j * PAGE_SIZE);
     124                                ASSERT(pte && PTE_VALID(pte) &&
     125                                    PTE_PRESENT(pte));
     126                                btree_insert(&area->sh_info->pagemap,
     127                                    (base + j * PAGE_SIZE) - area->base,
     128                                    (void *) PTE_GET_FRAME(pte), NULL);
     129                                page_table_unlock(area->as, false);
     130
     131                                pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));
     132                                frame_reference_add(pfn);
     133                        }
     134
     135                }
     136        }
     137        mutex_unlock(&area->sh_info->lock);
     138}
     139
     140void anon_destroy(as_area_t *area)
     141{
     142        reserve_free(area->pages);
     143}
     144
    66145
    67146/** Service a page fault in the anonymous memory address space area.
     
    115194                        }
    116195                        if (allocate) {
    117                                 frame = (uintptr_t) frame_alloc(ONE_FRAME, 0);
     196                                frame = (uintptr_t) frame_alloc_noreserve(
     197                                    ONE_FRAME, 0);
    118198                                memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);
    119199                               
     
    145225                 *   the different causes
    146226                 */
    147                 frame = (uintptr_t) frame_alloc(ONE_FRAME, 0);
     227                frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0);
    148228                memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);
    149229        }
     
    174254        ASSERT(mutex_locked(&area->lock));
    175255
    176         frame_free(frame);
    177 }
    178 
    179 /** Share the anonymous address space area.
    180  *
    181  * Sharing of anonymous area is done by duplicating its entire mapping
    182  * to the pagemap. Page faults will primarily search for frames there.
    183  *
    184  * The address space and address space area must be already locked.
    185  *
    186  * @param area Address space area to be shared.
    187  */
    188 void anon_share(as_area_t *area)
    189 {
    190         link_t *cur;
    191 
    192         ASSERT(mutex_locked(&area->as->lock));
    193         ASSERT(mutex_locked(&area->lock));
    194 
    195         /*
    196          * Copy used portions of the area to sh_info's page map.
    197          */
    198         mutex_lock(&area->sh_info->lock);
    199         for (cur = area->used_space.leaf_head.next;
    200             cur != &area->used_space.leaf_head; cur = cur->next) {
    201                 btree_node_t *node;
    202                 unsigned int i;
    203                
    204                 node = list_get_instance(cur, btree_node_t, leaf_link);
    205                 for (i = 0; i < node->keys; i++) {
    206                         uintptr_t base = node->key[i];
    207                         size_t count = (size_t) node->value[i];
    208                         unsigned int j;
    209                        
    210                         for (j = 0; j < count; j++) {
    211                                 pte_t *pte;
    212                        
    213                                 page_table_lock(area->as, false);
    214                                 pte = page_mapping_find(area->as,
    215                                     base + j * PAGE_SIZE);
    216                                 ASSERT(pte && PTE_VALID(pte) &&
    217                                     PTE_PRESENT(pte));
    218                                 btree_insert(&area->sh_info->pagemap,
    219                                     (base + j * PAGE_SIZE) - area->base,
    220                                     (void *) PTE_GET_FRAME(pte), NULL);
    221                                 page_table_unlock(area->as, false);
    222 
    223                                 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));
    224                                 frame_reference_add(pfn);
    225                         }
    226 
    227                 }
    228         }
    229         mutex_unlock(&area->sh_info->lock);
     256        frame_free_noreserve(frame);
    230257}
    231258
Note: See TracChangeset for help on using the changeset viewer.