as.c

Go to the documentation of this file.
00001 /*
00002  * Copyright (C) 2001-2006 Jakub Jermar
00003  * All rights reserved.
00004  *
00005  * Redistribution and use in source and binary forms, with or without
00006  * modification, are permitted provided that the following conditions
00007  * are met:
00008  *
00009  * - Redistributions of source code must retain the above copyright
00010  *   notice, this list of conditions and the following disclaimer.
00011  * - Redistributions in binary form must reproduce the above copyright
00012  *   notice, this list of conditions and the following disclaimer in the
00013  *   documentation and/or other materials provided with the distribution.
00014  * - The name of the author may not be used to endorse or promote products
00015  *   derived from this software without specific prior written permission.
00016  *
00017  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
00018  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
00019  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
00020  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
00021  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
00022  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
00023  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
00024  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
00025  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
00026  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00027  */
00028 
00049 #include <mm/as.h>
00050 #include <arch/mm/as.h>
00051 #include <mm/page.h>
00052 #include <mm/frame.h>
00053 #include <mm/slab.h>
00054 #include <mm/tlb.h>
00055 #include <arch/mm/page.h>
00056 #include <genarch/mm/page_pt.h>
00057 #include <genarch/mm/page_ht.h>
00058 #include <mm/asid.h>
00059 #include <arch/mm/asid.h>
00060 #include <synch/spinlock.h>
00061 #include <synch/mutex.h>
00062 #include <adt/list.h>
00063 #include <adt/btree.h>
00064 #include <proc/task.h>
00065 #include <proc/thread.h>
00066 #include <arch/asm.h>
00067 #include <panic.h>
00068 #include <debug.h>
00069 #include <print.h>
00070 #include <memstr.h>
00071 #include <macros.h>
00072 #include <arch.h>
00073 #include <errno.h>
00074 #include <config.h>
00075 #include <align.h>
00076 #include <arch/types.h>
00077 #include <typedefs.h>
00078 #include <syscall/copy.h>
00079 #include <arch/interrupt.h>
00080 
00081 as_operations_t *as_operations = NULL;
00082 
00084 SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
00085 
00090 LIST_INITIALIZE(inactive_as_with_asid_head);
00091 
00093 as_t *AS_KERNEL = NULL;
00094 
00095 static int area_flags_to_page_flags(int aflags);
00096 static as_area_t *find_area_and_lock(as_t *as, __address va);
00097 static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
00098 static void sh_info_remove_reference(share_info_t *sh_info);
00099 
00101 void as_init(void)
00102 {
00103         as_arch_init();
00104         AS_KERNEL = as_create(FLAG_AS_KERNEL);
00105         if (!AS_KERNEL)
00106                 panic("can't create kernel address space\n");
00107         
00108 }
00109 
00114 as_t *as_create(int flags)
00115 {
00116         as_t *as;
00117 
00118         as = (as_t *) malloc(sizeof(as_t), 0);
00119         link_initialize(&as->inactive_as_with_asid_link);
00120         mutex_initialize(&as->lock);
00121         btree_create(&as->as_area_btree);
00122         
00123         if (flags & FLAG_AS_KERNEL)
00124                 as->asid = ASID_KERNEL;
00125         else
00126                 as->asid = ASID_INVALID;
00127         
00128         as->refcount = 0;
00129         as->cpu_refcount = 0;
00130         as->page_table = page_table_create(flags);
00131 
00132         return as;
00133 }
00134 
00140 void as_destroy(as_t *as)
00141 {
00142         ipl_t ipl;
00143         bool cond;
00144 
00145         ASSERT(as->refcount == 0);
00146         
00147         /*
00148          * Since there is no reference to this area,
00149          * it is safe not to lock its mutex.
00150          */
00151         ipl = interrupts_disable();
00152         spinlock_lock(&inactive_as_with_asid_lock);
00153         if (as->asid != ASID_INVALID && as != AS_KERNEL) {
00154                 if (as != AS && as->cpu_refcount == 0)
00155                         list_remove(&as->inactive_as_with_asid_link);
00156                 asid_put(as->asid);
00157         }
00158         spinlock_unlock(&inactive_as_with_asid_lock);
00159 
00160         /*
00161          * Destroy address space areas of the address space.
00162          * The B+tee must be walked carefully because it is
00163          * also being destroyed.
00164          */     
00165         for (cond = true; cond; ) {
00166                 btree_node_t *node;
00167 
00168                 ASSERT(!list_empty(&as->as_area_btree.leaf_head));
00169                 node = list_get_instance(as->as_area_btree.leaf_head.next, btree_node_t, leaf_link);
00170 
00171                 if ((cond = node->keys)) {
00172                         as_area_destroy(as, node->key[0]);
00173                 }
00174         }
00175 
00176         btree_destroy(&as->as_area_btree);
00177         page_table_destroy(as->page_table);
00178 
00179         interrupts_restore(ipl);
00180         
00181         free(as);
00182 }
00183 
00198 as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs,
00199                mem_backend_t *backend, mem_backend_data_t *backend_data)
00200 {
00201         ipl_t ipl;
00202         as_area_t *a;
00203         
00204         if (base % PAGE_SIZE)
00205                 return NULL;
00206 
00207         if (!size)
00208                 return NULL;
00209 
00210         /* Writeable executable areas are not supported. */
00211         if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
00212                 return NULL;
00213         
00214         ipl = interrupts_disable();
00215         mutex_lock(&as->lock);
00216         
00217         if (!check_area_conflicts(as, base, size, NULL)) {
00218                 mutex_unlock(&as->lock);
00219                 interrupts_restore(ipl);
00220                 return NULL;
00221         }
00222         
00223         a = (as_area_t *) malloc(sizeof(as_area_t), 0);
00224 
00225         mutex_initialize(&a->lock);
00226         
00227         a->as = as;
00228         a->flags = flags;
00229         a->attributes = attrs;
00230         a->pages = SIZE2FRAMES(size);
00231         a->base = base;
00232         a->sh_info = NULL;
00233         a->backend = backend;
00234         if (backend_data)
00235                 a->backend_data = *backend_data;
00236         else
00237                 memsetb((__address) &a->backend_data, sizeof(a->backend_data), 0);
00238 
00239         btree_create(&a->used_space);
00240         
00241         btree_insert(&as->as_area_btree, base, (void *) a, NULL);
00242 
00243         mutex_unlock(&as->lock);
00244         interrupts_restore(ipl);
00245 
00246         return a;
00247 }
00248 
00258 int as_area_resize(as_t *as, __address address, size_t size, int flags)
00259 {
00260         as_area_t *area;
00261         ipl_t ipl;
00262         size_t pages;
00263         
00264         ipl = interrupts_disable();
00265         mutex_lock(&as->lock);
00266         
00267         /*
00268          * Locate the area.
00269          */
00270         area = find_area_and_lock(as, address);
00271         if (!area) {
00272                 mutex_unlock(&as->lock);
00273                 interrupts_restore(ipl);
00274                 return ENOENT;
00275         }
00276 
00277         if (area->backend == &phys_backend) {
00278                 /*
00279                  * Remapping of address space areas associated
00280                  * with memory mapped devices is not supported.
00281                  */
00282                 mutex_unlock(&area->lock);
00283                 mutex_unlock(&as->lock);
00284                 interrupts_restore(ipl);
00285                 return ENOTSUP;
00286         }
00287         if (area->sh_info) {
00288                 /*
00289                  * Remapping of shared address space areas 
00290                  * is not supported.
00291                  */
00292                 mutex_unlock(&area->lock);
00293                 mutex_unlock(&as->lock);
00294                 interrupts_restore(ipl);
00295                 return ENOTSUP;
00296         }
00297 
00298         pages = SIZE2FRAMES((address - area->base) + size);
00299         if (!pages) {
00300                 /*
00301                  * Zero size address space areas are not allowed.
00302                  */
00303                 mutex_unlock(&area->lock);
00304                 mutex_unlock(&as->lock);
00305                 interrupts_restore(ipl);
00306                 return EPERM;
00307         }
00308         
00309         if (pages < area->pages) {
00310                 bool cond;
00311                 __address start_free = area->base + pages*PAGE_SIZE;
00312 
00313                 /*
00314                  * Shrinking the area.
00315                  * No need to check for overlaps.
00316                  */
00317 
00318                 /*
00319                  * Start TLB shootdown sequence.
00320                  */
00321                 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
00322 
00323                 /*
00324                  * Remove frames belonging to used space starting from
00325                  * the highest addresses downwards until an overlap with
00326                  * the resized address space area is found. Note that this
00327                  * is also the right way to remove part of the used_space
00328                  * B+tree leaf list.
00329                  */             
00330                 for (cond = true; cond;) {
00331                         btree_node_t *node;
00332                 
00333                         ASSERT(!list_empty(&area->used_space.leaf_head));
00334                         node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link);
00335                         if ((cond = (bool) node->keys)) {
00336                                 __address b = node->key[node->keys - 1];
00337                                 count_t c = (count_t) node->value[node->keys - 1];
00338                                 int i = 0;
00339                         
00340                                 if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) {
00341                                         
00342                                         if (b + c*PAGE_SIZE <= start_free) {
00343                                                 /*
00344                                                  * The whole interval fits completely
00345                                                  * in the resized address space area.
00346                                                  */
00347                                                 break;
00348                                         }
00349                 
00350                                         /*
00351                                          * Part of the interval corresponding to b and c
00352                                          * overlaps with the resized address space area.
00353                                          */
00354                 
00355                                         cond = false;   /* we are almost done */
00356                                         i = (start_free - b) >> PAGE_WIDTH;
00357                                         if (!used_space_remove(area, start_free, c - i))
00358                                                 panic("Could not remove used space.");
00359                                 } else {
00360                                         /*
00361                                          * The interval of used space can be completely removed.
00362                                          */
00363                                         if (!used_space_remove(area, b, c))
00364                                                 panic("Could not remove used space.\n");
00365                                 }
00366                         
00367                                 for (; i < c; i++) {
00368                                         pte_t *pte;
00369                         
00370                                         page_table_lock(as, false);
00371                                         pte = page_mapping_find(as, b + i*PAGE_SIZE);
00372                                         ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
00373                                         if (area->backend && area->backend->frame_free) {
00374                                                 area->backend->frame_free(area,
00375                                                         b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
00376                                         }
00377                                         page_mapping_remove(as, b + i*PAGE_SIZE);
00378                                         page_table_unlock(as, false);
00379                                 }
00380                         }
00381                 }
00382 
00383                 /*
00384                  * Finish TLB shootdown sequence.
00385                  */
00386                 tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
00387                 tlb_shootdown_finalize();
00388         } else {
00389                 /*
00390                  * Growing the area.
00391                  * Check for overlaps with other address space areas.
00392                  */
00393                 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
00394                         mutex_unlock(&area->lock);
00395                         mutex_unlock(&as->lock);                
00396                         interrupts_restore(ipl);
00397                         return EADDRNOTAVAIL;
00398                 }
00399         } 
00400 
00401         area->pages = pages;
00402         
00403         mutex_unlock(&area->lock);
00404         mutex_unlock(&as->lock);
00405         interrupts_restore(ipl);
00406 
00407         return 0;
00408 }
00409 
00417 int as_area_destroy(as_t *as, __address address)
00418 {
00419         as_area_t *area;
00420         __address base;
00421         link_t *cur;
00422         ipl_t ipl;
00423 
00424         ipl = interrupts_disable();
00425         mutex_lock(&as->lock);
00426 
00427         area = find_area_and_lock(as, address);
00428         if (!area) {
00429                 mutex_unlock(&as->lock);
00430                 interrupts_restore(ipl);
00431                 return ENOENT;
00432         }
00433 
00434         base = area->base;
00435 
00436         /*
00437          * Start TLB shootdown sequence.
00438          */
00439         tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base, area->pages);
00440 
00441         /*
00442          * Visit only the pages mapped by used_space B+tree.
00443          */
00444         for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) {
00445                 btree_node_t *node;
00446                 int i;
00447                 
00448                 node = list_get_instance(cur, btree_node_t, leaf_link);
00449                 for (i = 0; i < node->keys; i++) {
00450                         __address b = node->key[i];
00451                         count_t j;
00452                         pte_t *pte;
00453                         
00454                         for (j = 0; j < (count_t) node->value[i]; j++) {
00455                                 page_table_lock(as, false);
00456                                 pte = page_mapping_find(as, b + j*PAGE_SIZE);
00457                                 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
00458                                 if (area->backend && area->backend->frame_free) {
00459                                         area->backend->frame_free(area,
00460                                                 b + j*PAGE_SIZE, PTE_GET_FRAME(pte));
00461                                 }
00462                                 page_mapping_remove(as, b + j*PAGE_SIZE);
00463                                 page_table_unlock(as, false);
00464                         }
00465                 }
00466         }
00467 
00468         /*
00469          * Finish TLB shootdown sequence.
00470          */
00471         tlb_invalidate_pages(AS->asid, area->base, area->pages);
00472         tlb_shootdown_finalize();
00473         
00474         btree_destroy(&area->used_space);
00475 
00476         area->attributes |= AS_AREA_ATTR_PARTIAL;
00477         
00478         if (area->sh_info)
00479                 sh_info_remove_reference(area->sh_info);
00480                 
00481         mutex_unlock(&area->lock);
00482 
00483         /*
00484          * Remove the empty area from address space.
00485          */
00486         btree_remove(&AS->as_area_btree, base, NULL);
00487         
00488         free(area);
00489         
00490         mutex_unlock(&AS->lock);
00491         interrupts_restore(ipl);
00492         return 0;
00493 }
00494 
00517 int as_area_share(as_t *src_as, __address src_base, size_t acc_size,
00518                   as_t *dst_as, __address dst_base, int dst_flags_mask)
00519 {
00520         ipl_t ipl;
00521         int src_flags;
00522         size_t src_size;
00523         as_area_t *src_area, *dst_area;
00524         share_info_t *sh_info;
00525         mem_backend_t *src_backend;
00526         mem_backend_data_t src_backend_data;
00527         
00528         ipl = interrupts_disable();
00529         mutex_lock(&src_as->lock);
00530         src_area = find_area_and_lock(src_as, src_base);
00531         if (!src_area) {
00532                 /*
00533                  * Could not find the source address space area.
00534                  */
00535                 mutex_unlock(&src_as->lock);
00536                 interrupts_restore(ipl);
00537                 return ENOENT;
00538         }
00539         
00540         if (!src_area->backend || !src_area->backend->share) {
00541                 /*
00542                  * There is now backend or the backend does not
00543                  * know how to share the area.
00544                  */
00545                 mutex_unlock(&src_area->lock);
00546                 mutex_unlock(&src_as->lock);
00547                 interrupts_restore(ipl);
00548                 return ENOTSUP;
00549         }
00550         
00551         src_size = src_area->pages * PAGE_SIZE;
00552         src_flags = src_area->flags;
00553         src_backend = src_area->backend;
00554         src_backend_data = src_area->backend_data;
00555 
00556         /* Share the cacheable flag from the original mapping */
00557         if (src_flags & AS_AREA_CACHEABLE)
00558                 dst_flags_mask |= AS_AREA_CACHEABLE;
00559 
00560         if (src_size != acc_size || (src_flags & dst_flags_mask) != dst_flags_mask) {
00561                 mutex_unlock(&src_area->lock);
00562                 mutex_unlock(&src_as->lock);
00563                 interrupts_restore(ipl);
00564                 return EPERM;
00565         }
00566 
00567         /*
00568          * Now we are committed to sharing the area.
00569          * First prepare the area for sharing.
00570          * Then it will be safe to unlock it.
00571          */
00572         sh_info = src_area->sh_info;
00573         if (!sh_info) {
00574                 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
00575                 mutex_initialize(&sh_info->lock);
00576                 sh_info->refcount = 2;
00577                 btree_create(&sh_info->pagemap);
00578                 src_area->sh_info = sh_info;
00579         } else {
00580                 mutex_lock(&sh_info->lock);
00581                 sh_info->refcount++;
00582                 mutex_unlock(&sh_info->lock);
00583         }
00584 
00585         src_area->backend->share(src_area);
00586 
00587         mutex_unlock(&src_area->lock);
00588         mutex_unlock(&src_as->lock);
00589 
00590         /*
00591          * Create copy of the source address space area.
00592          * The destination area is created with AS_AREA_ATTR_PARTIAL
00593          * attribute set which prevents race condition with
00594          * preliminary as_page_fault() calls.
00595          * The flags of the source area are masked against dst_flags_mask
00596          * to support sharing in less privileged mode.
00597          */
00598         dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
00599                                   AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
00600         if (!dst_area) {
00601                 /*
00602                  * Destination address space area could not be created.
00603                  */
00604                 sh_info_remove_reference(sh_info);
00605                 
00606                 interrupts_restore(ipl);
00607                 return ENOMEM;
00608         }
00609         
00610         /*
00611          * Now the destination address space area has been
00612          * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
00613          * attribute and set the sh_info.
00614          */     
00615         mutex_lock(&dst_area->lock);
00616         dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
00617         dst_area->sh_info = sh_info;
00618         mutex_unlock(&dst_area->lock);
00619         
00620         interrupts_restore(ipl);
00621         
00622         return 0;
00623 }
00624 
00634 bool as_area_check_access(as_area_t *area, pf_access_t access)
00635 {
00636         int flagmap[] = {
00637                 [PF_ACCESS_READ] = AS_AREA_READ,
00638                 [PF_ACCESS_WRITE] = AS_AREA_WRITE,
00639                 [PF_ACCESS_EXEC] = AS_AREA_EXEC
00640         };
00641 
00642         if (!(area->flags & flagmap[access]))
00643                 return false;
00644         
00645         return true;
00646 }
00647 
00664 int as_page_fault(__address page, pf_access_t access, istate_t *istate)
00665 {
00666         pte_t *pte;
00667         as_area_t *area;
00668         
00669         if (!THREAD)
00670                 return AS_PF_FAULT;
00671                 
00672         ASSERT(AS);
00673 
00674         mutex_lock(&AS->lock);
00675         area = find_area_and_lock(AS, page);    
00676         if (!area) {
00677                 /*
00678                  * No area contained mapping for 'page'.
00679                  * Signal page fault to low-level handler.
00680                  */
00681                 mutex_unlock(&AS->lock);
00682                 goto page_fault;
00683         }
00684 
00685         if (area->attributes & AS_AREA_ATTR_PARTIAL) {
00686                 /*
00687                  * The address space area is not fully initialized.
00688                  * Avoid possible race by returning error.
00689                  */
00690                 mutex_unlock(&area->lock);
00691                 mutex_unlock(&AS->lock);
00692                 goto page_fault;                
00693         }
00694 
00695         if (!area->backend || !area->backend->page_fault) {
00696                 /*
00697                  * The address space area is not backed by any backend
00698                  * or the backend cannot handle page faults.
00699                  */
00700                 mutex_unlock(&area->lock);
00701                 mutex_unlock(&AS->lock);
00702                 goto page_fault;                
00703         }
00704 
00705         page_table_lock(AS, false);
00706         
00707         /*
00708          * To avoid race condition between two page faults
00709          * on the same address, we need to make sure
00710          * the mapping has not been already inserted.
00711          */
00712         if ((pte = page_mapping_find(AS, page))) {
00713                 if (PTE_PRESENT(pte)) {
00714                         if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
00715                                 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
00716                                 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
00717                                 page_table_unlock(AS, false);
00718                                 mutex_unlock(&area->lock);
00719                                 mutex_unlock(&AS->lock);
00720                                 return AS_PF_OK;
00721                         }
00722                 }
00723         }
00724         
00725         /*
00726          * Resort to the backend page fault handler.
00727          */
00728         if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
00729                 page_table_unlock(AS, false);
00730                 mutex_unlock(&area->lock);
00731                 mutex_unlock(&AS->lock);
00732                 goto page_fault;
00733         }
00734         
00735         page_table_unlock(AS, false);
00736         mutex_unlock(&area->lock);
00737         mutex_unlock(&AS->lock);
00738         return AS_PF_OK;
00739 
00740 page_fault:
00741         if (THREAD->in_copy_from_uspace) {
00742                 THREAD->in_copy_from_uspace = false;
00743                 istate_set_retaddr(istate, (__address) &memcpy_from_uspace_failover_address);
00744         } else if (THREAD->in_copy_to_uspace) {
00745                 THREAD->in_copy_to_uspace = false;
00746                 istate_set_retaddr(istate, (__address) &memcpy_to_uspace_failover_address);
00747         } else {
00748                 return AS_PF_FAULT;
00749         }
00750 
00751         return AS_PF_DEFER;
00752 }
00753 
00762 void as_switch(as_t *old, as_t *new)
00763 {
00764         ipl_t ipl;
00765         bool needs_asid = false;
00766         
00767         ipl = interrupts_disable();
00768         spinlock_lock(&inactive_as_with_asid_lock);
00769 
00770         /*
00771          * First, take care of the old address space.
00772          */     
00773         if (old) {
00774                 mutex_lock_active(&old->lock);
00775                 ASSERT(old->cpu_refcount);
00776                 if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) {
00777                         /*
00778                          * The old address space is no longer active on
00779                          * any processor. It can be appended to the
00780                          * list of inactive address spaces with assigned
00781                          * ASID.
00782                          */
00783                          ASSERT(old->asid != ASID_INVALID);
00784                          list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
00785                 }
00786                 mutex_unlock(&old->lock);
00787         }
00788 
00789         /*
00790          * Second, prepare the new address space.
00791          */
00792         mutex_lock_active(&new->lock);
00793         if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) {
00794                 if (new->asid != ASID_INVALID)
00795                         list_remove(&new->inactive_as_with_asid_link);
00796                 else
00797                         needs_asid = true;      /* defer call to asid_get() until new->lock is released */
00798         }
00799         SET_PTL0_ADDRESS(new->page_table);
00800         mutex_unlock(&new->lock);
00801 
00802         if (needs_asid) {
00803                 /*
00804                  * Allocation of new ASID was deferred
00805                  * until now in order to avoid deadlock.
00806                  */
00807                 asid_t asid;
00808                 
00809                 asid = asid_get();
00810                 mutex_lock_active(&new->lock);
00811                 new->asid = asid;
00812                 mutex_unlock(&new->lock);
00813         }
00814         spinlock_unlock(&inactive_as_with_asid_lock);
00815         interrupts_restore(ipl);
00816         
00817         /*
00818          * Perform architecture-specific steps.
00819          * (e.g. write ASID to hardware register etc.)
00820          */
00821         as_install_arch(new);
00822         
00823         AS = new;
00824 }
00825 
00832 int area_flags_to_page_flags(int aflags)
00833 {
00834         int flags;
00835 
00836         flags = PAGE_USER | PAGE_PRESENT;
00837         
00838         if (aflags & AS_AREA_READ)
00839                 flags |= PAGE_READ;
00840                 
00841         if (aflags & AS_AREA_WRITE)
00842                 flags |= PAGE_WRITE;
00843         
00844         if (aflags & AS_AREA_EXEC)
00845                 flags |= PAGE_EXEC;
00846         
00847         if (aflags & AS_AREA_CACHEABLE)
00848                 flags |= PAGE_CACHEABLE;
00849                 
00850         return flags;
00851 }
00852 
00862 int as_area_get_flags(as_area_t *a)
00863 {
00864         return area_flags_to_page_flags(a->flags);
00865 }
00866 
00876 pte_t *page_table_create(int flags)
00877 {
00878         ASSERT(as_operations);
00879         ASSERT(as_operations->page_table_create);
00880 
00881         return as_operations->page_table_create(flags);
00882 }
00883 
00890 void page_table_destroy(pte_t *page_table)
00891 {
00892         ASSERT(as_operations);
00893         ASSERT(as_operations->page_table_destroy);
00894 
00895         as_operations->page_table_destroy(page_table);
00896 }
00897 
00910 void page_table_lock(as_t *as, bool lock)
00911 {
00912         ASSERT(as_operations);
00913         ASSERT(as_operations->page_table_lock);
00914 
00915         as_operations->page_table_lock(as, lock);
00916 }
00917 
00923 void page_table_unlock(as_t *as, bool unlock)
00924 {
00925         ASSERT(as_operations);
00926         ASSERT(as_operations->page_table_unlock);
00927 
00928         as_operations->page_table_unlock(as, unlock);
00929 }
00930 
00931 
00941 as_area_t *find_area_and_lock(as_t *as, __address va)
00942 {
00943         as_area_t *a;
00944         btree_node_t *leaf, *lnode;
00945         int i;
00946         
00947         a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
00948         if (a) {
00949                 /* va is the base address of an address space area */
00950                 mutex_lock(&a->lock);
00951                 return a;
00952         }
00953         
00954         /*
00955          * Search the leaf node and the righmost record of its left neighbour
00956          * to find out whether this is a miss or va belongs to an address
00957          * space area found there.
00958          */
00959         
00960         /* First, search the leaf node itself. */
00961         for (i = 0; i < leaf->keys; i++) {
00962                 a = (as_area_t *) leaf->value[i];
00963                 mutex_lock(&a->lock);
00964                 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
00965                         return a;
00966                 }
00967                 mutex_unlock(&a->lock);
00968         }
00969 
00970         /*
00971          * Second, locate the left neighbour and test its last record.
00972          * Because of its position in the B+tree, it must have base < va.
00973          */
00974         if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
00975                 a = (as_area_t *) lnode->value[lnode->keys - 1];
00976                 mutex_lock(&a->lock);
00977                 if (va < a->base + a->pages * PAGE_SIZE) {
00978                         return a;
00979                 }
00980                 mutex_unlock(&a->lock);
00981         }
00982 
00983         return NULL;
00984 }
00985 
00997 bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
00998 {
00999         as_area_t *a;
01000         btree_node_t *leaf, *node;
01001         int i;
01002         
01003         /*
01004          * We don't want any area to have conflicts with NULL page.
01005          */
01006         if (overlaps(va, size, NULL, PAGE_SIZE))
01007                 return false;
01008         
01009         /*
01010          * The leaf node is found in O(log n), where n is proportional to
01011          * the number of address space areas belonging to as.
01012          * The check for conflicts is then attempted on the rightmost
01013          * record in the left neighbour, the leftmost record in the right
01014          * neighbour and all records in the leaf node itself.
01015          */
01016         
01017         if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
01018                 if (a != avoid_area)
01019                         return false;
01020         }
01021         
01022         /* First, check the two border cases. */
01023         if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
01024                 a = (as_area_t *) node->value[node->keys - 1];
01025                 mutex_lock(&a->lock);
01026                 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
01027                         mutex_unlock(&a->lock);
01028                         return false;
01029                 }
01030                 mutex_unlock(&a->lock);
01031         }
01032         if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
01033                 a = (as_area_t *) node->value[0];
01034                 mutex_lock(&a->lock);
01035                 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
01036                         mutex_unlock(&a->lock);
01037                         return false;
01038                 }
01039                 mutex_unlock(&a->lock);
01040         }
01041         
01042         /* Second, check the leaf node. */
01043         for (i = 0; i < leaf->keys; i++) {
01044                 a = (as_area_t *) leaf->value[i];
01045         
01046                 if (a == avoid_area)
01047                         continue;
01048         
01049                 mutex_lock(&a->lock);
01050                 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
01051                         mutex_unlock(&a->lock);
01052                         return false;
01053                 }
01054                 mutex_unlock(&a->lock);
01055         }
01056 
01057         /*
01058          * So far, the area does not conflict with other areas.
01059          * Check if it doesn't conflict with kernel address space.
01060          */      
01061         if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
01062                 return !overlaps(va, size, 
01063                         KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
01064         }
01065 
01066         return true;
01067 }
01068 
01070 size_t as_get_size(__address base)
01071 {
01072         ipl_t ipl;
01073         as_area_t *src_area;
01074         size_t size;
01075 
01076         ipl = interrupts_disable();
01077         src_area = find_area_and_lock(AS, base);
01078         if (src_area){
01079                 size = src_area->pages * PAGE_SIZE;
01080                 mutex_unlock(&src_area->lock);
01081         } else {
01082                 size = 0;
01083         }
01084         interrupts_restore(ipl);
01085         return size;
01086 }
01087 
01098 int used_space_insert(as_area_t *a, __address page, count_t count)
01099 {
01100         btree_node_t *leaf, *node;
01101         count_t pages;
01102         int i;
01103 
01104         ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
01105         ASSERT(count);
01106 
01107         pages = (count_t) btree_search(&a->used_space, page, &leaf);
01108         if (pages) {
01109                 /*
01110                  * We hit the beginning of some used space.
01111                  */
01112                 return 0;
01113         }
01114 
01115         if (!leaf->keys) {
01116                 btree_insert(&a->used_space, page, (void *) count, leaf);
01117                 return 1;
01118         }
01119 
01120         node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
01121         if (node) {
01122                 __address left_pg = node->key[node->keys - 1], right_pg = leaf->key[0];
01123                 count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0];
01124                 
01125                 /*
01126                  * Examine the possibility that the interval fits
01127                  * somewhere between the rightmost interval of
01128                  * the left neigbour and the first interval of the leaf.
01129                  */
01130                  
01131                 if (page >= right_pg) {
01132                         /* Do nothing. */
01133                 } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
01134                         /* The interval intersects with the left interval. */
01135                         return 0;
01136                 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
01137                         /* The interval intersects with the right interval. */
01138                         return 0;                       
01139                 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
01140                         /* The interval can be added by merging the two already present intervals. */
01141                         node->value[node->keys - 1] += count + right_cnt;
01142                         btree_remove(&a->used_space, right_pg, leaf);
01143                         return 1; 
01144                 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
01145                         /* The interval can be added by simply growing the left interval. */
01146                         node->value[node->keys - 1] += count;
01147                         return 1;
01148                 } else if (page + count*PAGE_SIZE == right_pg) {
01149                         /*
01150                          * The interval can be addded by simply moving base of the right
01151                          * interval down and increasing its size accordingly.
01152                          */
01153                         leaf->value[0] += count;
01154                         leaf->key[0] = page;
01155                         return 1;
01156                 } else {
01157                         /*
01158                          * The interval is between both neigbouring intervals,
01159                          * but cannot be merged with any of them.
01160                          */
01161                         btree_insert(&a->used_space, page, (void *) count, leaf);
01162                         return 1;
01163                 }
01164         } else if (page < leaf->key[0]) {
01165                 __address right_pg = leaf->key[0];
01166                 count_t right_cnt = (count_t) leaf->value[0];
01167         
01168                 /*
01169                  * Investigate the border case in which the left neighbour does not
01170                  * exist but the interval fits from the left.
01171                  */
01172                  
01173                 if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
01174                         /* The interval intersects with the right interval. */
01175                         return 0;
01176                 } else if (page + count*PAGE_SIZE == right_pg) {
01177                         /*
01178                          * The interval can be added by moving the base of the right interval down
01179                          * and increasing its size accordingly.
01180                          */
01181                         leaf->key[0] = page;
01182                         leaf->value[0] += count;
01183                         return 1;
01184                 } else {
01185                         /*
01186                          * The interval doesn't adjoin with the right interval.
01187                          * It must be added individually.
01188                          */
01189                         btree_insert(&a->used_space, page, (void *) count, leaf);
01190                         return 1;
01191                 }
01192         }
01193 
01194         node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
01195         if (node) {
01196                 __address left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0];
01197                 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0];
01198                 
01199                 /*
01200                  * Examine the possibility that the interval fits
01201                  * somewhere between the leftmost interval of
01202                  * the right neigbour and the last interval of the leaf.
01203                  */
01204 
01205                 if (page < left_pg) {
01206                         /* Do nothing. */
01207                 } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
01208                         /* The interval intersects with the left interval. */
01209                         return 0;
01210                 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
01211                         /* The interval intersects with the right interval. */
01212                         return 0;                       
01213                 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
01214                         /* The interval can be added by merging the two already present intervals. */
01215                         leaf->value[leaf->keys - 1] += count + right_cnt;
01216                         btree_remove(&a->used_space, right_pg, node);
01217                         return 1; 
01218                 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
01219                         /* The interval can be added by simply growing the left interval. */
01220                         leaf->value[leaf->keys - 1] +=  count;
01221                         return 1;
01222                 } else if (page + count*PAGE_SIZE == right_pg) {
01223                         /*
01224                          * The interval can be addded by simply moving base of the right
01225                          * interval down and increasing its size accordingly.
01226                          */
01227                         node->value[0] += count;
01228                         node->key[0] = page;
01229                         return 1;
01230                 } else {
01231                         /*
01232                          * The interval is between both neigbouring intervals,
01233                          * but cannot be merged with any of them.
01234                          */
01235                         btree_insert(&a->used_space, page, (void *) count, leaf);
01236                         return 1;
01237                 }
01238         } else if (page >= leaf->key[leaf->keys - 1]) {
01239                 __address left_pg = leaf->key[leaf->keys - 1];
01240                 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
01241         
01242                 /*
01243                  * Investigate the border case in which the right neighbour does not
01244                  * exist but the interval fits from the right.
01245                  */
01246                  
01247                 if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
01248                         /* The interval intersects with the left interval. */
01249                         return 0;
01250                 } else if (left_pg + left_cnt*PAGE_SIZE == page) {
01251                         /* The interval can be added by growing the left interval. */
01252                         leaf->value[leaf->keys - 1] += count;
01253                         return 1;
01254                 } else {
01255                         /*
01256                          * The interval doesn't adjoin with the left interval.
01257                          * It must be added individually.
01258                          */
01259                         btree_insert(&a->used_space, page, (void *) count, leaf);
01260                         return 1;
01261                 }
01262         }
01263         
01264         /*
01265          * Note that if the algorithm made it thus far, the interval can fit only
01266          * between two other intervals of the leaf. The two border cases were already
01267          * resolved.
01268          */
01269         for (i = 1; i < leaf->keys; i++) {
01270                 if (page < leaf->key[i]) {
01271                         __address left_pg = leaf->key[i - 1], right_pg = leaf->key[i];
01272                         count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i];
01273 
01274                         /*
01275                          * The interval fits between left_pg and right_pg.
01276                          */
01277 
01278                         if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
01279                                 /* The interval intersects with the left interval. */
01280                                 return 0;
01281                         } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
01282                                 /* The interval intersects with the right interval. */
01283                                 return 0;                       
01284                         } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
01285                                 /* The interval can be added by merging the two already present intervals. */
01286                                 leaf->value[i - 1] += count + right_cnt;
01287                                 btree_remove(&a->used_space, right_pg, leaf);
01288                                 return 1; 
01289                         } else if (page == left_pg + left_cnt*PAGE_SIZE) {
01290                                 /* The interval can be added by simply growing the left interval. */
01291                                 leaf->value[i - 1] += count;
01292                                 return 1;
01293                         } else if (page + count*PAGE_SIZE == right_pg) {
01294                                 /*
01295                                  * The interval can be addded by simply moving base of the right
01296                                  * interval down and increasing its size accordingly.
01297                                  */
01298                                 leaf->value[i] += count;
01299                                 leaf->key[i] = page;
01300                                 return 1;
01301                         } else {
01302                                 /*
01303                                  * The interval is between both neigbouring intervals,
01304                                  * but cannot be merged with any of them.
01305                                  */
01306                                 btree_insert(&a->used_space, page, (void *) count, leaf);
01307                                 return 1;
01308                         }
01309                 }
01310         }
01311 
01312         panic("Inconsistency detected while adding %d pages of used space at %p.\n", count, page);
01313 }
01314 
01325 int used_space_remove(as_area_t *a, __address page, count_t count)
01326 {
01327         btree_node_t *leaf, *node;
01328         count_t pages;
01329         int i;
01330 
01331         ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
01332         ASSERT(count);
01333 
01334         pages = (count_t) btree_search(&a->used_space, page, &leaf);
01335         if (pages) {
01336                 /*
01337                  * We are lucky, page is the beginning of some interval.
01338                  */
01339                 if (count > pages) {
01340                         return 0;
01341                 } else if (count == pages) {
01342                         btree_remove(&a->used_space, page, leaf);
01343                         return 1;
01344                 } else {
01345                         /*
01346                          * Find the respective interval.
01347                          * Decrease its size and relocate its start address.
01348                          */
01349                         for (i = 0; i < leaf->keys; i++) {
01350                                 if (leaf->key[i] == page) {
01351                                         leaf->key[i] += count*PAGE_SIZE;
01352                                         leaf->value[i] -= count;
01353                                         return 1;
01354                                 }
01355                         }
01356                         goto error;
01357                 }
01358         }
01359 
01360         node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
01361         if (node && page < leaf->key[0]) {
01362                 __address left_pg = node->key[node->keys - 1];
01363                 count_t left_cnt = (count_t) node->value[node->keys - 1];
01364 
01365                 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
01366                         if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
01367                                 /*
01368                                  * The interval is contained in the rightmost interval
01369                                  * of the left neighbour and can be removed by
01370                                  * updating the size of the bigger interval.
01371                                  */
01372                                 node->value[node->keys - 1] -= count;
01373                                 return 1;
01374                         } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
01375                                 count_t new_cnt;
01376                                 
01377                                 /*
01378                                  * The interval is contained in the rightmost interval
01379                                  * of the left neighbour but its removal requires
01380                                  * both updating the size of the original interval and
01381                                  * also inserting a new interval.
01382                                  */
01383                                 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
01384                                 node->value[node->keys - 1] -= count + new_cnt;
01385                                 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
01386                                 return 1;
01387                         }
01388                 }
01389                 return 0;
01390         } else if (page < leaf->key[0]) {
01391                 return 0;
01392         }
01393         
01394         if (page > leaf->key[leaf->keys - 1]) {
01395                 __address left_pg = leaf->key[leaf->keys - 1];
01396                 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
01397 
01398                 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
01399                         if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
01400                                 /*
01401                                  * The interval is contained in the rightmost interval
01402                                  * of the leaf and can be removed by updating the size
01403                                  * of the bigger interval.
01404                                  */
01405                                 leaf->value[leaf->keys - 1] -= count;
01406                                 return 1;
01407                         } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
01408                                 count_t new_cnt;
01409                                 
01410                                 /*
01411                                  * The interval is contained in the rightmost interval
01412                                  * of the leaf but its removal requires both updating
01413                                  * the size of the original interval and
01414                                  * also inserting a new interval.
01415                                  */
01416                                 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
01417                                 leaf->value[leaf->keys - 1] -= count + new_cnt;
01418                                 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
01419                                 return 1;
01420                         }
01421                 }
01422                 return 0;
01423         }       
01424         
01425         /*
01426          * The border cases have been already resolved.
01427          * Now the interval can be only between intervals of the leaf. 
01428          */
01429         for (i = 1; i < leaf->keys - 1; i++) {
01430                 if (page < leaf->key[i]) {
01431                         __address left_pg = leaf->key[i - 1];
01432                         count_t left_cnt = (count_t) leaf->value[i - 1];
01433 
01434                         /*
01435                          * Now the interval is between intervals corresponding to (i - 1) and i.
01436                          */
01437                         if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
01438                                 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
01439                                         /*
01440                                         * The interval is contained in the interval (i - 1)
01441                                          * of the leaf and can be removed by updating the size
01442                                          * of the bigger interval.
01443                                          */
01444                                         leaf->value[i - 1] -= count;
01445                                         return 1;
01446                                 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
01447                                         count_t new_cnt;
01448                                 
01449                                         /*
01450                                          * The interval is contained in the interval (i - 1)
01451                                          * of the leaf but its removal requires both updating
01452                                          * the size of the original interval and
01453                                          * also inserting a new interval.
01454                                          */
01455                                         new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
01456                                         leaf->value[i - 1] -= count + new_cnt;
01457                                         btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
01458                                         return 1;
01459                                 }
01460                         }
01461                         return 0;
01462                 }
01463         }
01464 
01465 error:
01466         panic("Inconsistency detected while removing %d pages of used space from %p.\n", count, page);
01467 }
01468 
01475 void sh_info_remove_reference(share_info_t *sh_info)
01476 {
01477         bool dealloc = false;
01478 
01479         mutex_lock(&sh_info->lock);
01480         ASSERT(sh_info->refcount);
01481         if (--sh_info->refcount == 0) {
01482                 dealloc = true;
01483                 link_t *cur;
01484                 
01485                 /*
01486                  * Now walk carefully the pagemap B+tree and free/remove
01487                  * reference from all frames found there.
01488                  */
01489                 for (cur = sh_info->pagemap.leaf_head.next; cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
01490                         btree_node_t *node;
01491                         int i;
01492                         
01493                         node = list_get_instance(cur, btree_node_t, leaf_link);
01494                         for (i = 0; i < node->keys; i++) 
01495                                 frame_free(ADDR2PFN((__address) node->value[i]));
01496                 }
01497                 
01498         }
01499         mutex_unlock(&sh_info->lock);
01500         
01501         if (dealloc) {
01502                 btree_destroy(&sh_info->pagemap);
01503                 free(sh_info);
01504         }
01505 }
01506 
01507 /*
01508  * Address space related syscalls.
01509  */
01510 
01512 __native sys_as_area_create(__address address, size_t size, int flags)
01513 {
01514         if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
01515                 return (__native) address;
01516         else
01517                 return (__native) -1;
01518 }
01519 
01521 __native sys_as_area_resize(__address address, size_t size, int flags)
01522 {
01523         return (__native) as_area_resize(AS, address, size, 0);
01524 }
01525 
01527 __native sys_as_area_destroy(__address address)
01528 {
01529         return (__native) as_area_destroy(AS, address);
01530 }
01531 

Generated on Sun Jun 18 17:01:58 2006 for HelenOS Kernel (mips32) by  doxygen 1.4.6