source: mainline/kernel/generic/src/mm/as.c@ 7bf7ef7

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 7bf7ef7 was 8440473, checked in by Jakub Jermar <jakub@…>, 19 years ago

Minor changes. Some coding style fixes and also a type (tee vs. tree).
One AS → as change.

  • Property mode set to 100644
File size: 42.8 KB
RevLine 
[20d50a1]1/*
2 * Copyright (C) 2001-2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[cc73a8a1]29/** @addtogroup genericmm
[b45c443]30 * @{
31 */
32
[9179d0a]33/**
[b45c443]34 * @file
[9179d0a]35 * @brief Address space related functions.
36 *
[20d50a1]37 * This file contains address space manipulation functions.
38 * Roughly speaking, this is a higher-level client of
39 * Virtual Address Translation (VAT) subsystem.
[9179d0a]40 *
41 * Functionality provided by this file allows one to
[cc73a8a1]42 * create address spaces and create, resize and share
[9179d0a]43 * address space areas.
44 *
45 * @see page.c
46 *
[20d50a1]47 */
48
49#include <mm/as.h>
[ef67bab]50#include <arch/mm/as.h>
[20d50a1]51#include <mm/page.h>
52#include <mm/frame.h>
[085d973]53#include <mm/slab.h>
[20d50a1]54#include <mm/tlb.h>
55#include <arch/mm/page.h>
56#include <genarch/mm/page_pt.h>
[2802767]57#include <genarch/mm/page_ht.h>
[4512d7e]58#include <mm/asid.h>
[20d50a1]59#include <arch/mm/asid.h>
60#include <synch/spinlock.h>
[1068f6a]61#include <synch/mutex.h>
[5c9a08b]62#include <adt/list.h>
[252127e]63#include <adt/btree.h>
[df0103f7]64#include <proc/task.h>
[e3c762cd]65#include <proc/thread.h>
[20d50a1]66#include <arch/asm.h>
[df0103f7]67#include <panic.h>
[20d50a1]68#include <debug.h>
[df0103f7]69#include <print.h>
[20d50a1]70#include <memstr.h>
[5a7d9d1]71#include <macros.h>
[20d50a1]72#include <arch.h>
[df0103f7]73#include <errno.h>
74#include <config.h>
[25bf215]75#include <align.h>
[df0103f7]76#include <arch/types.h>
77#include <typedefs.h>
[e3c762cd]78#include <syscall/copy.h>
79#include <arch/interrupt.h>
[20d50a1]80
[cc73a8a1]81/**
82 * Each architecture decides what functions will be used to carry out
83 * address space operations such as creating or locking page tables.
84 */
[ef67bab]85as_operations_t *as_operations = NULL;
[20d50a1]86
[57da95c]87/**
88 * Slab for as_t objects.
89 */
90static slab_cache_t *as_slab;
91
[47800e0]92/** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */
93SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
[7e4e532]94
95/**
96 * This list contains address spaces that are not active on any
97 * processor and that have valid ASID.
98 */
99LIST_INITIALIZE(inactive_as_with_asid_head);
100
[071a8ae6]101/** Kernel address space. */
102as_t *AS_KERNEL = NULL;
103
[df0103f7]104static int area_flags_to_page_flags(int aflags);
[7f1c620]105static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
106static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area);
[8182031]107static void sh_info_remove_reference(share_info_t *sh_info);
[20d50a1]108
[29b2bbf]109static int as_constructor(void *obj, int flags)
110{
111 as_t *as = (as_t *) obj;
112 int rc;
113
114 link_initialize(&as->inactive_as_with_asid_link);
115 mutex_initialize(&as->lock);
116
117 rc = as_constructor_arch(as, flags);
118
119 return rc;
120}
121
122static int as_destructor(void *obj)
123{
124 as_t *as = (as_t *) obj;
125
126 return as_destructor_arch(as);
127}
128
[ef67bab]129/** Initialize address space subsystem. */
130void as_init(void)
131{
132 as_arch_init();
[57da95c]133
[29b2bbf]134 as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
135 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
[57da95c]136
[8e1ea655]137 AS_KERNEL = as_create(FLAG_AS_KERNEL);
[125e944]138 if (!AS_KERNEL)
139 panic("can't create kernel address space\n");
140
[ef67bab]141}
142
[071a8ae6]143/** Create address space.
144 *
145 * @param flags Flags that influence way in wich the address space is created.
146 */
[ef67bab]147as_t *as_create(int flags)
[20d50a1]148{
149 as_t *as;
150
[57da95c]151 as = (as_t *) slab_alloc(as_slab, 0);
[29b2bbf]152 (void) as_create_arch(as, 0);
153
[252127e]154 btree_create(&as->as_area_btree);
[bb68433]155
156 if (flags & FLAG_AS_KERNEL)
157 as->asid = ASID_KERNEL;
158 else
159 as->asid = ASID_INVALID;
160
[482826d]161 as->refcount = 0;
[47800e0]162 as->cpu_refcount = 0;
[bb68433]163 as->page_table = page_table_create(flags);
[20d50a1]164
165 return as;
166}
167
[482826d]168/** Destroy adress space.
169 *
170 * When there are no tasks referencing this address space (i.e. its refcount is zero),
171 * the address space can be destroyed.
172 */
173void as_destroy(as_t *as)
[5be1923]174{
[482826d]175 ipl_t ipl;
[6f9a9bc]176 bool cond;
[482826d]177
178 ASSERT(as->refcount == 0);
179
180 /*
181 * Since there is no reference to this area,
182 * it is safe not to lock its mutex.
183 */
184 ipl = interrupts_disable();
185 spinlock_lock(&inactive_as_with_asid_lock);
[31e8ddd]186 if (as->asid != ASID_INVALID && as != AS_KERNEL) {
[6f9a9bc]187 if (as != AS && as->cpu_refcount == 0)
[31e8ddd]188 list_remove(&as->inactive_as_with_asid_link);
[482826d]189 asid_put(as->asid);
190 }
191 spinlock_unlock(&inactive_as_with_asid_lock);
192
193 /*
194 * Destroy address space areas of the address space.
[8440473]195 * The B+tree must be walked carefully because it is
[6f9a9bc]196 * also being destroyed.
[482826d]197 */
[6f9a9bc]198 for (cond = true; cond; ) {
[482826d]199 btree_node_t *node;
[6f9a9bc]200
201 ASSERT(!list_empty(&as->as_area_btree.leaf_head));
202 node = list_get_instance(as->as_area_btree.leaf_head.next, btree_node_t, leaf_link);
203
204 if ((cond = node->keys)) {
205 as_area_destroy(as, node->key[0]);
206 }
[482826d]207 }
[f8d069e8]208
[152b2b0]209 btree_destroy(&as->as_area_btree);
[482826d]210 page_table_destroy(as->page_table);
[5be1923]211
[482826d]212 interrupts_restore(ipl);
213
[57da95c]214 slab_free(as_slab, as);
[5be1923]215}
216
[20d50a1]217/** Create address space area of common attributes.
218 *
219 * The created address space area is added to the target address space.
220 *
221 * @param as Target address space.
[a9e8b39]222 * @param flags Flags of the area memory.
[37e7d2b9]223 * @param size Size of area.
[20d50a1]224 * @param base Base address of area.
[a9e8b39]225 * @param attrs Attributes of the area.
[8182031]226 * @param backend Address space area backend. NULL if no backend is used.
227 * @param backend_data NULL or a pointer to an array holding two void *.
[20d50a1]228 *
229 * @return Address space area on success or NULL on failure.
230 */
[7f1c620]231as_area_t *as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
[0ee077ee]232 mem_backend_t *backend, mem_backend_data_t *backend_data)
[20d50a1]233{
234 ipl_t ipl;
235 as_area_t *a;
236
237 if (base % PAGE_SIZE)
[37e7d2b9]238 return NULL;
239
[dbbeb26]240 if (!size)
241 return NULL;
242
[37e7d2b9]243 /* Writeable executable areas are not supported. */
244 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
245 return NULL;
[20d50a1]246
247 ipl = interrupts_disable();
[1068f6a]248 mutex_lock(&as->lock);
[20d50a1]249
[37e7d2b9]250 if (!check_area_conflicts(as, base, size, NULL)) {
[1068f6a]251 mutex_unlock(&as->lock);
[37e7d2b9]252 interrupts_restore(ipl);
253 return NULL;
254 }
[20d50a1]255
[bb68433]256 a = (as_area_t *) malloc(sizeof(as_area_t), 0);
257
[1068f6a]258 mutex_initialize(&a->lock);
[bb68433]259
[0ee077ee]260 a->as = as;
[c23502d]261 a->flags = flags;
[a9e8b39]262 a->attributes = attrs;
[37e7d2b9]263 a->pages = SIZE2FRAMES(size);
[bb68433]264 a->base = base;
[8182031]265 a->sh_info = NULL;
266 a->backend = backend;
[0ee077ee]267 if (backend_data)
268 a->backend_data = *backend_data;
269 else
[7f1c620]270 memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data), 0);
[0ee077ee]271
[25bf215]272 btree_create(&a->used_space);
[bb68433]273
[252127e]274 btree_insert(&as->as_area_btree, base, (void *) a, NULL);
[20d50a1]275
[1068f6a]276 mutex_unlock(&as->lock);
[20d50a1]277 interrupts_restore(ipl);
[f9425006]278
[20d50a1]279 return a;
280}
281
[df0103f7]282/** Find address space area and change it.
283 *
284 * @param as Address space.
285 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
286 * @param size New size of the virtual memory block starting at address.
287 * @param flags Flags influencing the remap operation. Currently unused.
288 *
[7242a78e]289 * @return Zero on success or a value from @ref errno.h otherwise.
[df0103f7]290 */
[7f1c620]291int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
[df0103f7]292{
[7242a78e]293 as_area_t *area;
[df0103f7]294 ipl_t ipl;
295 size_t pages;
296
297 ipl = interrupts_disable();
[1068f6a]298 mutex_lock(&as->lock);
[df0103f7]299
300 /*
301 * Locate the area.
302 */
303 area = find_area_and_lock(as, address);
304 if (!area) {
[1068f6a]305 mutex_unlock(&as->lock);
[df0103f7]306 interrupts_restore(ipl);
[7242a78e]307 return ENOENT;
[df0103f7]308 }
309
[0ee077ee]310 if (area->backend == &phys_backend) {
[df0103f7]311 /*
312 * Remapping of address space areas associated
313 * with memory mapped devices is not supported.
314 */
[1068f6a]315 mutex_unlock(&area->lock);
316 mutex_unlock(&as->lock);
[df0103f7]317 interrupts_restore(ipl);
[7242a78e]318 return ENOTSUP;
[df0103f7]319 }
[8182031]320 if (area->sh_info) {
321 /*
322 * Remapping of shared address space areas
323 * is not supported.
324 */
325 mutex_unlock(&area->lock);
326 mutex_unlock(&as->lock);
327 interrupts_restore(ipl);
328 return ENOTSUP;
329 }
[df0103f7]330
331 pages = SIZE2FRAMES((address - area->base) + size);
332 if (!pages) {
333 /*
334 * Zero size address space areas are not allowed.
335 */
[1068f6a]336 mutex_unlock(&area->lock);
337 mutex_unlock(&as->lock);
[df0103f7]338 interrupts_restore(ipl);
[7242a78e]339 return EPERM;
[df0103f7]340 }
341
342 if (pages < area->pages) {
[56789125]343 bool cond;
[7f1c620]344 uintptr_t start_free = area->base + pages*PAGE_SIZE;
[df0103f7]345
346 /*
347 * Shrinking the area.
348 * No need to check for overlaps.
349 */
350
[5552d60]351 /*
352 * Start TLB shootdown sequence.
353 */
354 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
355
[56789125]356 /*
357 * Remove frames belonging to used space starting from
358 * the highest addresses downwards until an overlap with
359 * the resized address space area is found. Note that this
360 * is also the right way to remove part of the used_space
361 * B+tree leaf list.
362 */
363 for (cond = true; cond;) {
364 btree_node_t *node;
365
366 ASSERT(!list_empty(&area->used_space.leaf_head));
367 node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link);
368 if ((cond = (bool) node->keys)) {
[7f1c620]369 uintptr_t b = node->key[node->keys - 1];
[56789125]370 count_t c = (count_t) node->value[node->keys - 1];
371 int i = 0;
372
373 if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) {
374
375 if (b + c*PAGE_SIZE <= start_free) {
376 /*
377 * The whole interval fits completely
378 * in the resized address space area.
379 */
380 break;
381 }
382
383 /*
384 * Part of the interval corresponding to b and c
385 * overlaps with the resized address space area.
386 */
387
388 cond = false; /* we are almost done */
389 i = (start_free - b) >> PAGE_WIDTH;
390 if (!used_space_remove(area, start_free, c - i))
[f1d1f5d3]391 panic("Could not remove used space.\n");
[56789125]392 } else {
393 /*
394 * The interval of used space can be completely removed.
395 */
396 if (!used_space_remove(area, b, c))
397 panic("Could not remove used space.\n");
398 }
399
400 for (; i < c; i++) {
401 pte_t *pte;
402
403 page_table_lock(as, false);
404 pte = page_mapping_find(as, b + i*PAGE_SIZE);
405 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
[0ee077ee]406 if (area->backend && area->backend->frame_free) {
407 area->backend->frame_free(area,
[8182031]408 b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
409 }
[56789125]410 page_mapping_remove(as, b + i*PAGE_SIZE);
411 page_table_unlock(as, false);
412 }
[df0103f7]413 }
414 }
[5552d60]415
[df0103f7]416 /*
[5552d60]417 * Finish TLB shootdown sequence.
[df0103f7]418 */
[8440473]419 tlb_invalidate_pages(as->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
[df0103f7]420 tlb_shootdown_finalize();
[f1d1f5d3]421
422 /*
423 * Invalidate software translation caches (e.g. TSB on sparc64).
424 */
425 as_invalidate_translation_cache(as, area->base + pages*PAGE_SIZE, area->pages - pages);
[df0103f7]426 } else {
427 /*
428 * Growing the area.
429 * Check for overlaps with other address space areas.
430 */
431 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
[1068f6a]432 mutex_unlock(&area->lock);
433 mutex_unlock(&as->lock);
[df0103f7]434 interrupts_restore(ipl);
[7242a78e]435 return EADDRNOTAVAIL;
[df0103f7]436 }
437 }
438
439 area->pages = pages;
440
[1068f6a]441 mutex_unlock(&area->lock);
442 mutex_unlock(&as->lock);
[df0103f7]443 interrupts_restore(ipl);
444
[7242a78e]445 return 0;
446}
447
448/** Destroy address space area.
449 *
450 * @param as Address space.
451 * @param address Address withing the area to be deleted.
452 *
453 * @return Zero on success or a value from @ref errno.h on failure.
454 */
[7f1c620]455int as_area_destroy(as_t *as, uintptr_t address)
[7242a78e]456{
457 as_area_t *area;
[7f1c620]458 uintptr_t base;
[f8d069e8]459 link_t *cur;
[7242a78e]460 ipl_t ipl;
461
462 ipl = interrupts_disable();
[1068f6a]463 mutex_lock(&as->lock);
[7242a78e]464
465 area = find_area_and_lock(as, address);
466 if (!area) {
[1068f6a]467 mutex_unlock(&as->lock);
[7242a78e]468 interrupts_restore(ipl);
469 return ENOENT;
470 }
471
[56789125]472 base = area->base;
473
[5552d60]474 /*
475 * Start TLB shootdown sequence.
476 */
[f1d1f5d3]477 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
[5552d60]478
[567807b1]479 /*
480 * Visit only the pages mapped by used_space B+tree.
481 */
[f8d069e8]482 for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) {
[567807b1]483 btree_node_t *node;
[f8d069e8]484 int i;
[56789125]485
[f8d069e8]486 node = list_get_instance(cur, btree_node_t, leaf_link);
487 for (i = 0; i < node->keys; i++) {
[7f1c620]488 uintptr_t b = node->key[i];
[f8d069e8]489 count_t j;
[567807b1]490 pte_t *pte;
[56789125]491
[f8d069e8]492 for (j = 0; j < (count_t) node->value[i]; j++) {
[567807b1]493 page_table_lock(as, false);
[f8d069e8]494 pte = page_mapping_find(as, b + j*PAGE_SIZE);
[567807b1]495 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
[0ee077ee]496 if (area->backend && area->backend->frame_free) {
497 area->backend->frame_free(area,
[f8d069e8]498 b + j*PAGE_SIZE, PTE_GET_FRAME(pte));
[56789125]499 }
[f1d1f5d3]500 page_mapping_remove(as, b + j*PAGE_SIZE);
[567807b1]501 page_table_unlock(as, false);
[7242a78e]502 }
503 }
504 }
[56789125]505
[7242a78e]506 /*
[5552d60]507 * Finish TLB shootdown sequence.
[7242a78e]508 */
[f1d1f5d3]509 tlb_invalidate_pages(as->asid, area->base, area->pages);
[7242a78e]510 tlb_shootdown_finalize();
[5552d60]511
[f1d1f5d3]512 /*
513 * Invalidate potential software translation caches (e.g. TSB on sparc64).
514 */
515 as_invalidate_translation_cache(as, area->base, area->pages);
516
[5552d60]517 btree_destroy(&area->used_space);
[7242a78e]518
[8d4f2ae]519 area->attributes |= AS_AREA_ATTR_PARTIAL;
[8182031]520
521 if (area->sh_info)
522 sh_info_remove_reference(area->sh_info);
523
[1068f6a]524 mutex_unlock(&area->lock);
[7242a78e]525
526 /*
527 * Remove the empty area from address space.
528 */
[f1d1f5d3]529 btree_remove(&as->as_area_btree, base, NULL);
[7242a78e]530
[8d4f2ae]531 free(area);
532
[f1d1f5d3]533 mutex_unlock(&as->lock);
[7242a78e]534 interrupts_restore(ipl);
535 return 0;
[df0103f7]536}
537
[8d6bc2d5]538/** Share address space area with another or the same address space.
[df0103f7]539 *
[0ee077ee]540 * Address space area mapping is shared with a new address space area.
541 * If the source address space area has not been shared so far,
542 * a new sh_info is created. The new address space area simply gets the
543 * sh_info of the source area. The process of duplicating the
544 * mapping is done through the backend share function.
[8d6bc2d5]545 *
[fd4d8c0]546 * @param src_as Pointer to source address space.
[a9e8b39]547 * @param src_base Base address of the source address space area.
[fd4d8c0]548 * @param acc_size Expected size of the source area.
[46fc2f9]549 * @param dst_as Pointer to destination address space.
[fd4d8c0]550 * @param dst_base Target base address.
551 * @param dst_flags_mask Destination address space area flags mask.
[df0103f7]552 *
[7242a78e]553 * @return Zero on success or ENOENT if there is no such task or
[df0103f7]554 * if there is no such address space area,
555 * EPERM if there was a problem in accepting the area or
556 * ENOMEM if there was a problem in allocating destination
[8d6bc2d5]557 * address space area. ENOTSUP is returned if an attempt
558 * to share non-anonymous address space area is detected.
[df0103f7]559 */
[7f1c620]560int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
561 as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
[df0103f7]562{
563 ipl_t ipl;
[a9e8b39]564 int src_flags;
565 size_t src_size;
566 as_area_t *src_area, *dst_area;
[8d6bc2d5]567 share_info_t *sh_info;
[0ee077ee]568 mem_backend_t *src_backend;
569 mem_backend_data_t src_backend_data;
[d6e5cbc]570
[7c23af9]571 ipl = interrupts_disable();
[1068f6a]572 mutex_lock(&src_as->lock);
[7c23af9]573 src_area = find_area_and_lock(src_as, src_base);
[a9e8b39]574 if (!src_area) {
[6fa476f7]575 /*
576 * Could not find the source address space area.
577 */
[1068f6a]578 mutex_unlock(&src_as->lock);
[6fa476f7]579 interrupts_restore(ipl);
580 return ENOENT;
581 }
[8d6bc2d5]582
[0ee077ee]583 if (!src_area->backend || !src_area->backend->share) {
[8d6bc2d5]584 /*
[f47fd19]585 * There is no backend or the backend does not
[0ee077ee]586 * know how to share the area.
[8d6bc2d5]587 */
588 mutex_unlock(&src_area->lock);
589 mutex_unlock(&src_as->lock);
590 interrupts_restore(ipl);
591 return ENOTSUP;
592 }
593
[a9e8b39]594 src_size = src_area->pages * PAGE_SIZE;
595 src_flags = src_area->flags;
[0ee077ee]596 src_backend = src_area->backend;
597 src_backend_data = src_area->backend_data;
[1ec1fd8]598
599 /* Share the cacheable flag from the original mapping */
600 if (src_flags & AS_AREA_CACHEABLE)
601 dst_flags_mask |= AS_AREA_CACHEABLE;
602
[76d7305]603 if (src_size != acc_size || (src_flags & dst_flags_mask) != dst_flags_mask) {
[8d6bc2d5]604 mutex_unlock(&src_area->lock);
605 mutex_unlock(&src_as->lock);
[df0103f7]606 interrupts_restore(ipl);
607 return EPERM;
608 }
[8d6bc2d5]609
610 /*
611 * Now we are committed to sharing the area.
[8440473]612 * First, prepare the area for sharing.
[8d6bc2d5]613 * Then it will be safe to unlock it.
614 */
615 sh_info = src_area->sh_info;
616 if (!sh_info) {
617 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
618 mutex_initialize(&sh_info->lock);
619 sh_info->refcount = 2;
620 btree_create(&sh_info->pagemap);
621 src_area->sh_info = sh_info;
622 } else {
623 mutex_lock(&sh_info->lock);
624 sh_info->refcount++;
625 mutex_unlock(&sh_info->lock);
626 }
627
[0ee077ee]628 src_area->backend->share(src_area);
[8d6bc2d5]629
630 mutex_unlock(&src_area->lock);
631 mutex_unlock(&src_as->lock);
632
[df0103f7]633 /*
[a9e8b39]634 * Create copy of the source address space area.
635 * The destination area is created with AS_AREA_ATTR_PARTIAL
636 * attribute set which prevents race condition with
637 * preliminary as_page_fault() calls.
[fd4d8c0]638 * The flags of the source area are masked against dst_flags_mask
639 * to support sharing in less privileged mode.
[df0103f7]640 */
[76d7305]641 dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
[0ee077ee]642 AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
[a9e8b39]643 if (!dst_area) {
[df0103f7]644 /*
645 * Destination address space area could not be created.
646 */
[8d6bc2d5]647 sh_info_remove_reference(sh_info);
648
[df0103f7]649 interrupts_restore(ipl);
650 return ENOMEM;
651 }
652
[a9e8b39]653 /*
654 * Now the destination address space area has been
655 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
[8d6bc2d5]656 * attribute and set the sh_info.
[a9e8b39]657 */
[1068f6a]658 mutex_lock(&dst_area->lock);
[a9e8b39]659 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
[8d6bc2d5]660 dst_area->sh_info = sh_info;
[1068f6a]661 mutex_unlock(&dst_area->lock);
[df0103f7]662
663 interrupts_restore(ipl);
664
665 return 0;
666}
667
[fb84455]668/** Check access mode for address space area.
669 *
670 * The address space area must be locked prior to this call.
671 *
672 * @param area Address space area.
673 * @param access Access mode.
674 *
675 * @return False if access violates area's permissions, true otherwise.
676 */
677bool as_area_check_access(as_area_t *area, pf_access_t access)
678{
679 int flagmap[] = {
680 [PF_ACCESS_READ] = AS_AREA_READ,
681 [PF_ACCESS_WRITE] = AS_AREA_WRITE,
682 [PF_ACCESS_EXEC] = AS_AREA_EXEC
683 };
684
685 if (!(area->flags & flagmap[access]))
686 return false;
687
688 return true;
689}
690
[20d50a1]691/** Handle page fault within the current address space.
692 *
[8182031]693 * This is the high-level page fault handler. It decides
694 * whether the page fault can be resolved by any backend
695 * and if so, it invokes the backend to resolve the page
696 * fault.
697 *
[20d50a1]698 * Interrupts are assumed disabled.
699 *
700 * @param page Faulting page.
[567807b1]701 * @param access Access mode that caused the fault (i.e. read/write/exec).
[e3c762cd]702 * @param istate Pointer to interrupted state.
[20d50a1]703 *
[8182031]704 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
705 * fault was caused by copy_to_uspace() or copy_from_uspace().
[20d50a1]706 */
[7f1c620]707int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
[20d50a1]708{
[2299914]709 pte_t *pte;
[d3e7ff4]710 as_area_t *area;
[20d50a1]711
[1068f6a]712 if (!THREAD)
[8182031]713 return AS_PF_FAULT;
[1068f6a]714
[20d50a1]715 ASSERT(AS);
[2299914]716
[1068f6a]717 mutex_lock(&AS->lock);
[d3e7ff4]718 area = find_area_and_lock(AS, page);
[20d50a1]719 if (!area) {
720 /*
721 * No area contained mapping for 'page'.
722 * Signal page fault to low-level handler.
723 */
[1068f6a]724 mutex_unlock(&AS->lock);
[e3c762cd]725 goto page_fault;
[20d50a1]726 }
727
[a9e8b39]728 if (area->attributes & AS_AREA_ATTR_PARTIAL) {
729 /*
730 * The address space area is not fully initialized.
731 * Avoid possible race by returning error.
732 */
[1068f6a]733 mutex_unlock(&area->lock);
734 mutex_unlock(&AS->lock);
[e3c762cd]735 goto page_fault;
[a9e8b39]736 }
737
[0ee077ee]738 if (!area->backend || !area->backend->page_fault) {
[8182031]739 /*
740 * The address space area is not backed by any backend
741 * or the backend cannot handle page faults.
742 */
743 mutex_unlock(&area->lock);
744 mutex_unlock(&AS->lock);
745 goto page_fault;
746 }
[1ace9ea]747
[2299914]748 page_table_lock(AS, false);
749
750 /*
751 * To avoid race condition between two page faults
752 * on the same address, we need to make sure
753 * the mapping has not been already inserted.
754 */
755 if ((pte = page_mapping_find(AS, page))) {
756 if (PTE_PRESENT(pte)) {
[fb84455]757 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
758 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
759 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
760 page_table_unlock(AS, false);
761 mutex_unlock(&area->lock);
762 mutex_unlock(&AS->lock);
763 return AS_PF_OK;
764 }
[2299914]765 }
766 }
[20d50a1]767
768 /*
[8182031]769 * Resort to the backend page fault handler.
[20d50a1]770 */
[0ee077ee]771 if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
[8182031]772 page_table_unlock(AS, false);
773 mutex_unlock(&area->lock);
774 mutex_unlock(&AS->lock);
775 goto page_fault;
776 }
[20d50a1]777
[8182031]778 page_table_unlock(AS, false);
[1068f6a]779 mutex_unlock(&area->lock);
780 mutex_unlock(&AS->lock);
[e3c762cd]781 return AS_PF_OK;
782
783page_fault:
784 if (THREAD->in_copy_from_uspace) {
785 THREAD->in_copy_from_uspace = false;
[7f1c620]786 istate_set_retaddr(istate, (uintptr_t) &memcpy_from_uspace_failover_address);
[e3c762cd]787 } else if (THREAD->in_copy_to_uspace) {
788 THREAD->in_copy_to_uspace = false;
[7f1c620]789 istate_set_retaddr(istate, (uintptr_t) &memcpy_to_uspace_failover_address);
[e3c762cd]790 } else {
791 return AS_PF_FAULT;
792 }
793
794 return AS_PF_DEFER;
[20d50a1]795}
796
[7e4e532]797/** Switch address spaces.
[1068f6a]798 *
799 * Note that this function cannot sleep as it is essentially a part of
[47800e0]800 * scheduling. Sleeping here would lead to deadlock on wakeup.
[20d50a1]801 *
[7e4e532]802 * @param old Old address space or NULL.
803 * @param new New address space.
[20d50a1]804 */
[7e4e532]805void as_switch(as_t *old, as_t *new)
[20d50a1]806{
807 ipl_t ipl;
[7e4e532]808 bool needs_asid = false;
[4512d7e]809
[20d50a1]810 ipl = interrupts_disable();
[47800e0]811 spinlock_lock(&inactive_as_with_asid_lock);
[7e4e532]812
813 /*
814 * First, take care of the old address space.
815 */
816 if (old) {
[1068f6a]817 mutex_lock_active(&old->lock);
[47800e0]818 ASSERT(old->cpu_refcount);
819 if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) {
[7e4e532]820 /*
821 * The old address space is no longer active on
822 * any processor. It can be appended to the
823 * list of inactive address spaces with assigned
824 * ASID.
825 */
826 ASSERT(old->asid != ASID_INVALID);
827 list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
828 }
[1068f6a]829 mutex_unlock(&old->lock);
[57da95c]830
831 /*
832 * Perform architecture-specific tasks when the address space
833 * is being removed from the CPU.
834 */
835 as_deinstall_arch(old);
[7e4e532]836 }
837
838 /*
839 * Second, prepare the new address space.
840 */
[1068f6a]841 mutex_lock_active(&new->lock);
[47800e0]842 if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) {
[7e4e532]843 if (new->asid != ASID_INVALID)
844 list_remove(&new->inactive_as_with_asid_link);
845 else
846 needs_asid = true; /* defer call to asid_get() until new->lock is released */
847 }
848 SET_PTL0_ADDRESS(new->page_table);
[1068f6a]849 mutex_unlock(&new->lock);
[20d50a1]850
[7e4e532]851 if (needs_asid) {
852 /*
853 * Allocation of new ASID was deferred
854 * until now in order to avoid deadlock.
855 */
856 asid_t asid;
857
858 asid = asid_get();
[1068f6a]859 mutex_lock_active(&new->lock);
[7e4e532]860 new->asid = asid;
[1068f6a]861 mutex_unlock(&new->lock);
[7e4e532]862 }
[47800e0]863 spinlock_unlock(&inactive_as_with_asid_lock);
[7e4e532]864 interrupts_restore(ipl);
865
[20d50a1]866 /*
867 * Perform architecture-specific steps.
[4512d7e]868 * (e.g. write ASID to hardware register etc.)
[20d50a1]869 */
[7e4e532]870 as_install_arch(new);
[20d50a1]871
[7e4e532]872 AS = new;
[20d50a1]873}
[6a3c9a7]874
[df0103f7]875/** Convert address space area flags to page flags.
[6a3c9a7]876 *
[df0103f7]877 * @param aflags Flags of some address space area.
[6a3c9a7]878 *
[df0103f7]879 * @return Flags to be passed to page_mapping_insert().
[6a3c9a7]880 */
[df0103f7]881int area_flags_to_page_flags(int aflags)
[6a3c9a7]882{
883 int flags;
884
[9a8d91b]885 flags = PAGE_USER | PAGE_PRESENT;
[c23502d]886
[df0103f7]887 if (aflags & AS_AREA_READ)
[c23502d]888 flags |= PAGE_READ;
889
[df0103f7]890 if (aflags & AS_AREA_WRITE)
[c23502d]891 flags |= PAGE_WRITE;
892
[df0103f7]893 if (aflags & AS_AREA_EXEC)
[c23502d]894 flags |= PAGE_EXEC;
[6a3c9a7]895
[0ee077ee]896 if (aflags & AS_AREA_CACHEABLE)
[9a8d91b]897 flags |= PAGE_CACHEABLE;
898
[6a3c9a7]899 return flags;
900}
[ef67bab]901
[df0103f7]902/** Compute flags for virtual address translation subsytem.
903 *
904 * The address space area must be locked.
905 * Interrupts must be disabled.
906 *
907 * @param a Address space area.
908 *
909 * @return Flags to be used in page_mapping_insert().
910 */
[8182031]911int as_area_get_flags(as_area_t *a)
[df0103f7]912{
913 return area_flags_to_page_flags(a->flags);
914}
915
[ef67bab]916/** Create page table.
917 *
918 * Depending on architecture, create either address space
919 * private or global page table.
920 *
921 * @param flags Flags saying whether the page table is for kernel address space.
922 *
923 * @return First entry of the page table.
924 */
925pte_t *page_table_create(int flags)
926{
927 ASSERT(as_operations);
928 ASSERT(as_operations->page_table_create);
929
930 return as_operations->page_table_create(flags);
931}
[d3e7ff4]932
[482826d]933/** Destroy page table.
934 *
935 * Destroy page table in architecture specific way.
936 *
937 * @param page_table Physical address of PTL0.
938 */
939void page_table_destroy(pte_t *page_table)
940{
941 ASSERT(as_operations);
942 ASSERT(as_operations->page_table_destroy);
943
944 as_operations->page_table_destroy(page_table);
945}
946
[2299914]947/** Lock page table.
948 *
949 * This function should be called before any page_mapping_insert(),
950 * page_mapping_remove() and page_mapping_find().
951 *
952 * Locking order is such that address space areas must be locked
953 * prior to this call. Address space can be locked prior to this
954 * call in which case the lock argument is false.
955 *
956 * @param as Address space.
[9179d0a]957 * @param lock If false, do not attempt to lock as->lock.
[2299914]958 */
959void page_table_lock(as_t *as, bool lock)
960{
961 ASSERT(as_operations);
962 ASSERT(as_operations->page_table_lock);
963
964 as_operations->page_table_lock(as, lock);
965}
966
967/** Unlock page table.
968 *
969 * @param as Address space.
[9179d0a]970 * @param unlock If false, do not attempt to unlock as->lock.
[2299914]971 */
972void page_table_unlock(as_t *as, bool unlock)
973{
974 ASSERT(as_operations);
975 ASSERT(as_operations->page_table_unlock);
976
977 as_operations->page_table_unlock(as, unlock);
978}
979
[d3e7ff4]980
981/** Find address space area and lock it.
982 *
983 * The address space must be locked and interrupts must be disabled.
984 *
985 * @param as Address space.
986 * @param va Virtual address.
987 *
988 * @return Locked address space area containing va on success or NULL on failure.
989 */
[7f1c620]990as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
[d3e7ff4]991{
992 as_area_t *a;
[252127e]993 btree_node_t *leaf, *lnode;
994 int i;
995
996 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
997 if (a) {
998 /* va is the base address of an address space area */
[1068f6a]999 mutex_lock(&a->lock);
[252127e]1000 return a;
1001 }
[d3e7ff4]1002
[252127e]1003 /*
[c47912f]1004 * Search the leaf node and the righmost record of its left neighbour
[252127e]1005 * to find out whether this is a miss or va belongs to an address
1006 * space area found there.
1007 */
1008
1009 /* First, search the leaf node itself. */
1010 for (i = 0; i < leaf->keys; i++) {
1011 a = (as_area_t *) leaf->value[i];
[1068f6a]1012 mutex_lock(&a->lock);
[252127e]1013 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
1014 return a;
1015 }
[1068f6a]1016 mutex_unlock(&a->lock);
[252127e]1017 }
[d3e7ff4]1018
[252127e]1019 /*
[c47912f]1020 * Second, locate the left neighbour and test its last record.
[b26db0c]1021 * Because of its position in the B+tree, it must have base < va.
[252127e]1022 */
[c47912f]1023 if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
[252127e]1024 a = (as_area_t *) lnode->value[lnode->keys - 1];
[1068f6a]1025 mutex_lock(&a->lock);
[252127e]1026 if (va < a->base + a->pages * PAGE_SIZE) {
[37e7d2b9]1027 return a;
[252127e]1028 }
[1068f6a]1029 mutex_unlock(&a->lock);
[d3e7ff4]1030 }
1031
1032 return NULL;
1033}
[37e7d2b9]1034
1035/** Check area conflicts with other areas.
1036 *
1037 * The address space must be locked and interrupts must be disabled.
1038 *
1039 * @param as Address space.
1040 * @param va Starting virtual address of the area being tested.
1041 * @param size Size of the area being tested.
1042 * @param avoid_area Do not touch this area.
1043 *
1044 * @return True if there is no conflict, false otherwise.
1045 */
[7f1c620]1046bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area)
[37e7d2b9]1047{
1048 as_area_t *a;
[252127e]1049 btree_node_t *leaf, *node;
1050 int i;
[37e7d2b9]1051
[5a7d9d1]1052 /*
1053 * We don't want any area to have conflicts with NULL page.
1054 */
1055 if (overlaps(va, size, NULL, PAGE_SIZE))
1056 return false;
1057
[252127e]1058 /*
1059 * The leaf node is found in O(log n), where n is proportional to
1060 * the number of address space areas belonging to as.
1061 * The check for conflicts is then attempted on the rightmost
[c47912f]1062 * record in the left neighbour, the leftmost record in the right
1063 * neighbour and all records in the leaf node itself.
[252127e]1064 */
1065
1066 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1067 if (a != avoid_area)
1068 return false;
1069 }
1070
1071 /* First, check the two border cases. */
[c47912f]1072 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
[252127e]1073 a = (as_area_t *) node->value[node->keys - 1];
[1068f6a]1074 mutex_lock(&a->lock);
[252127e]1075 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
[1068f6a]1076 mutex_unlock(&a->lock);
[252127e]1077 return false;
1078 }
[1068f6a]1079 mutex_unlock(&a->lock);
[252127e]1080 }
[c47912f]1081 if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
[252127e]1082 a = (as_area_t *) node->value[0];
[1068f6a]1083 mutex_lock(&a->lock);
[252127e]1084 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
[1068f6a]1085 mutex_unlock(&a->lock);
[252127e]1086 return false;
1087 }
[1068f6a]1088 mutex_unlock(&a->lock);
[252127e]1089 }
1090
1091 /* Second, check the leaf node. */
1092 for (i = 0; i < leaf->keys; i++) {
1093 a = (as_area_t *) leaf->value[i];
[37e7d2b9]1094
1095 if (a == avoid_area)
1096 continue;
[252127e]1097
[1068f6a]1098 mutex_lock(&a->lock);
[252127e]1099 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
[1068f6a]1100 mutex_unlock(&a->lock);
[252127e]1101 return false;
1102 }
[1068f6a]1103 mutex_unlock(&a->lock);
[5a7d9d1]1104 }
[37e7d2b9]1105
[5a7d9d1]1106 /*
1107 * So far, the area does not conflict with other areas.
1108 * Check if it doesn't conflict with kernel address space.
1109 */
1110 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1111 return !overlaps(va, size,
1112 KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
[37e7d2b9]1113 }
1114
1115 return true;
1116}
[df0103f7]1117
[1068f6a]1118/** Return size of the address space area with given base. */
[7f1c620]1119size_t as_get_size(uintptr_t base)
[7c23af9]1120{
1121 ipl_t ipl;
1122 as_area_t *src_area;
1123 size_t size;
1124
1125 ipl = interrupts_disable();
1126 src_area = find_area_and_lock(AS, base);
1127 if (src_area){
1128 size = src_area->pages * PAGE_SIZE;
[1068f6a]1129 mutex_unlock(&src_area->lock);
[7c23af9]1130 } else {
1131 size = 0;
1132 }
1133 interrupts_restore(ipl);
1134 return size;
1135}
1136
[25bf215]1137/** Mark portion of address space area as used.
1138 *
1139 * The address space area must be already locked.
1140 *
1141 * @param a Address space area.
1142 * @param page First page to be marked.
1143 * @param count Number of page to be marked.
1144 *
1145 * @return 0 on failure and 1 on success.
1146 */
[7f1c620]1147int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
[25bf215]1148{
1149 btree_node_t *leaf, *node;
1150 count_t pages;
1151 int i;
1152
1153 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1154 ASSERT(count);
1155
1156 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1157 if (pages) {
1158 /*
1159 * We hit the beginning of some used space.
1160 */
1161 return 0;
1162 }
1163
[a6cb8cb]1164 if (!leaf->keys) {
1165 btree_insert(&a->used_space, page, (void *) count, leaf);
1166 return 1;
1167 }
1168
[25bf215]1169 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1170 if (node) {
[7f1c620]1171 uintptr_t left_pg = node->key[node->keys - 1], right_pg = leaf->key[0];
[25bf215]1172 count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0];
1173
1174 /*
1175 * Examine the possibility that the interval fits
1176 * somewhere between the rightmost interval of
1177 * the left neigbour and the first interval of the leaf.
1178 */
1179
1180 if (page >= right_pg) {
1181 /* Do nothing. */
1182 } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1183 /* The interval intersects with the left interval. */
1184 return 0;
1185 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1186 /* The interval intersects with the right interval. */
1187 return 0;
1188 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1189 /* The interval can be added by merging the two already present intervals. */
[56789125]1190 node->value[node->keys - 1] += count + right_cnt;
[25bf215]1191 btree_remove(&a->used_space, right_pg, leaf);
1192 return 1;
1193 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1194 /* The interval can be added by simply growing the left interval. */
[56789125]1195 node->value[node->keys - 1] += count;
[25bf215]1196 return 1;
1197 } else if (page + count*PAGE_SIZE == right_pg) {
1198 /*
1199 * The interval can be addded by simply moving base of the right
1200 * interval down and increasing its size accordingly.
1201 */
[56789125]1202 leaf->value[0] += count;
[25bf215]1203 leaf->key[0] = page;
1204 return 1;
1205 } else {
1206 /*
1207 * The interval is between both neigbouring intervals,
1208 * but cannot be merged with any of them.
1209 */
1210 btree_insert(&a->used_space, page, (void *) count, leaf);
1211 return 1;
1212 }
1213 } else if (page < leaf->key[0]) {
[7f1c620]1214 uintptr_t right_pg = leaf->key[0];
[25bf215]1215 count_t right_cnt = (count_t) leaf->value[0];
1216
1217 /*
1218 * Investigate the border case in which the left neighbour does not
1219 * exist but the interval fits from the left.
1220 */
1221
1222 if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1223 /* The interval intersects with the right interval. */
1224 return 0;
1225 } else if (page + count*PAGE_SIZE == right_pg) {
1226 /*
1227 * The interval can be added by moving the base of the right interval down
1228 * and increasing its size accordingly.
1229 */
1230 leaf->key[0] = page;
[56789125]1231 leaf->value[0] += count;
[25bf215]1232 return 1;
1233 } else {
1234 /*
1235 * The interval doesn't adjoin with the right interval.
1236 * It must be added individually.
1237 */
1238 btree_insert(&a->used_space, page, (void *) count, leaf);
1239 return 1;
1240 }
1241 }
1242
1243 node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1244 if (node) {
[7f1c620]1245 uintptr_t left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0];
[25bf215]1246 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0];
1247
1248 /*
1249 * Examine the possibility that the interval fits
1250 * somewhere between the leftmost interval of
1251 * the right neigbour and the last interval of the leaf.
1252 */
1253
1254 if (page < left_pg) {
1255 /* Do nothing. */
1256 } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1257 /* The interval intersects with the left interval. */
1258 return 0;
1259 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1260 /* The interval intersects with the right interval. */
1261 return 0;
1262 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1263 /* The interval can be added by merging the two already present intervals. */
[56789125]1264 leaf->value[leaf->keys - 1] += count + right_cnt;
[25bf215]1265 btree_remove(&a->used_space, right_pg, node);
1266 return 1;
1267 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1268 /* The interval can be added by simply growing the left interval. */
[56789125]1269 leaf->value[leaf->keys - 1] += count;
[25bf215]1270 return 1;
1271 } else if (page + count*PAGE_SIZE == right_pg) {
1272 /*
1273 * The interval can be addded by simply moving base of the right
1274 * interval down and increasing its size accordingly.
1275 */
[56789125]1276 node->value[0] += count;
[25bf215]1277 node->key[0] = page;
1278 return 1;
1279 } else {
1280 /*
1281 * The interval is between both neigbouring intervals,
1282 * but cannot be merged with any of them.
1283 */
1284 btree_insert(&a->used_space, page, (void *) count, leaf);
1285 return 1;
1286 }
1287 } else if (page >= leaf->key[leaf->keys - 1]) {
[7f1c620]1288 uintptr_t left_pg = leaf->key[leaf->keys - 1];
[25bf215]1289 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1290
1291 /*
1292 * Investigate the border case in which the right neighbour does not
1293 * exist but the interval fits from the right.
1294 */
1295
1296 if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
[56789125]1297 /* The interval intersects with the left interval. */
[25bf215]1298 return 0;
1299 } else if (left_pg + left_cnt*PAGE_SIZE == page) {
1300 /* The interval can be added by growing the left interval. */
[56789125]1301 leaf->value[leaf->keys - 1] += count;
[25bf215]1302 return 1;
1303 } else {
1304 /*
1305 * The interval doesn't adjoin with the left interval.
1306 * It must be added individually.
1307 */
1308 btree_insert(&a->used_space, page, (void *) count, leaf);
1309 return 1;
1310 }
1311 }
1312
1313 /*
1314 * Note that if the algorithm made it thus far, the interval can fit only
1315 * between two other intervals of the leaf. The two border cases were already
1316 * resolved.
1317 */
1318 for (i = 1; i < leaf->keys; i++) {
1319 if (page < leaf->key[i]) {
[7f1c620]1320 uintptr_t left_pg = leaf->key[i - 1], right_pg = leaf->key[i];
[25bf215]1321 count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i];
1322
1323 /*
1324 * The interval fits between left_pg and right_pg.
1325 */
1326
1327 if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1328 /* The interval intersects with the left interval. */
1329 return 0;
1330 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1331 /* The interval intersects with the right interval. */
1332 return 0;
1333 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1334 /* The interval can be added by merging the two already present intervals. */
[56789125]1335 leaf->value[i - 1] += count + right_cnt;
[25bf215]1336 btree_remove(&a->used_space, right_pg, leaf);
1337 return 1;
1338 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1339 /* The interval can be added by simply growing the left interval. */
[56789125]1340 leaf->value[i - 1] += count;
[25bf215]1341 return 1;
1342 } else if (page + count*PAGE_SIZE == right_pg) {
1343 /*
1344 * The interval can be addded by simply moving base of the right
1345 * interval down and increasing its size accordingly.
1346 */
[56789125]1347 leaf->value[i] += count;
[25bf215]1348 leaf->key[i] = page;
1349 return 1;
1350 } else {
1351 /*
1352 * The interval is between both neigbouring intervals,
1353 * but cannot be merged with any of them.
1354 */
1355 btree_insert(&a->used_space, page, (void *) count, leaf);
1356 return 1;
1357 }
1358 }
1359 }
1360
[fbf7b4c]1361 panic("Inconsistency detected while adding %d pages of used space at %p.\n", count, page);
[25bf215]1362}
1363
1364/** Mark portion of address space area as unused.
1365 *
1366 * The address space area must be already locked.
1367 *
1368 * @param a Address space area.
1369 * @param page First page to be marked.
1370 * @param count Number of page to be marked.
1371 *
1372 * @return 0 on failure and 1 on success.
1373 */
[7f1c620]1374int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
[25bf215]1375{
1376 btree_node_t *leaf, *node;
1377 count_t pages;
1378 int i;
1379
1380 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1381 ASSERT(count);
1382
1383 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1384 if (pages) {
1385 /*
1386 * We are lucky, page is the beginning of some interval.
1387 */
1388 if (count > pages) {
1389 return 0;
1390 } else if (count == pages) {
1391 btree_remove(&a->used_space, page, leaf);
[56789125]1392 return 1;
[25bf215]1393 } else {
1394 /*
1395 * Find the respective interval.
1396 * Decrease its size and relocate its start address.
1397 */
1398 for (i = 0; i < leaf->keys; i++) {
1399 if (leaf->key[i] == page) {
1400 leaf->key[i] += count*PAGE_SIZE;
[56789125]1401 leaf->value[i] -= count;
[25bf215]1402 return 1;
1403 }
1404 }
1405 goto error;
1406 }
1407 }
1408
1409 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1410 if (node && page < leaf->key[0]) {
[7f1c620]1411 uintptr_t left_pg = node->key[node->keys - 1];
[25bf215]1412 count_t left_cnt = (count_t) node->value[node->keys - 1];
1413
1414 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1415 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1416 /*
1417 * The interval is contained in the rightmost interval
1418 * of the left neighbour and can be removed by
1419 * updating the size of the bigger interval.
1420 */
[56789125]1421 node->value[node->keys - 1] -= count;
[25bf215]1422 return 1;
1423 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
[56789125]1424 count_t new_cnt;
[25bf215]1425
1426 /*
1427 * The interval is contained in the rightmost interval
1428 * of the left neighbour but its removal requires
1429 * both updating the size of the original interval and
1430 * also inserting a new interval.
1431 */
[56789125]1432 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1433 node->value[node->keys - 1] -= count + new_cnt;
[25bf215]1434 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1435 return 1;
1436 }
1437 }
1438 return 0;
1439 } else if (page < leaf->key[0]) {
1440 return 0;
1441 }
1442
1443 if (page > leaf->key[leaf->keys - 1]) {
[7f1c620]1444 uintptr_t left_pg = leaf->key[leaf->keys - 1];
[25bf215]1445 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1446
1447 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1448 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1449 /*
1450 * The interval is contained in the rightmost interval
1451 * of the leaf and can be removed by updating the size
1452 * of the bigger interval.
1453 */
[56789125]1454 leaf->value[leaf->keys - 1] -= count;
[25bf215]1455 return 1;
1456 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
[56789125]1457 count_t new_cnt;
[25bf215]1458
1459 /*
1460 * The interval is contained in the rightmost interval
1461 * of the leaf but its removal requires both updating
1462 * the size of the original interval and
1463 * also inserting a new interval.
1464 */
[56789125]1465 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1466 leaf->value[leaf->keys - 1] -= count + new_cnt;
[25bf215]1467 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1468 return 1;
1469 }
1470 }
1471 return 0;
1472 }
1473
1474 /*
1475 * The border cases have been already resolved.
1476 * Now the interval can be only between intervals of the leaf.
1477 */
1478 for (i = 1; i < leaf->keys - 1; i++) {
1479 if (page < leaf->key[i]) {
[7f1c620]1480 uintptr_t left_pg = leaf->key[i - 1];
[25bf215]1481 count_t left_cnt = (count_t) leaf->value[i - 1];
1482
1483 /*
1484 * Now the interval is between intervals corresponding to (i - 1) and i.
1485 */
1486 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1487 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1488 /*
1489 * The interval is contained in the interval (i - 1)
1490 * of the leaf and can be removed by updating the size
1491 * of the bigger interval.
1492 */
[56789125]1493 leaf->value[i - 1] -= count;
[25bf215]1494 return 1;
1495 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
[56789125]1496 count_t new_cnt;
[25bf215]1497
1498 /*
1499 * The interval is contained in the interval (i - 1)
1500 * of the leaf but its removal requires both updating
1501 * the size of the original interval and
1502 * also inserting a new interval.
1503 */
[56789125]1504 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1505 leaf->value[i - 1] -= count + new_cnt;
[25bf215]1506 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1507 return 1;
1508 }
1509 }
1510 return 0;
1511 }
1512 }
1513
1514error:
[fbf7b4c]1515 panic("Inconsistency detected while removing %d pages of used space from %p.\n", count, page);
[25bf215]1516}
1517
[8182031]1518/** Remove reference to address space area share info.
1519 *
1520 * If the reference count drops to 0, the sh_info is deallocated.
1521 *
1522 * @param sh_info Pointer to address space area share info.
1523 */
1524void sh_info_remove_reference(share_info_t *sh_info)
1525{
1526 bool dealloc = false;
1527
1528 mutex_lock(&sh_info->lock);
1529 ASSERT(sh_info->refcount);
1530 if (--sh_info->refcount == 0) {
1531 dealloc = true;
[f8d069e8]1532 link_t *cur;
[8182031]1533
1534 /*
1535 * Now walk carefully the pagemap B+tree and free/remove
1536 * reference from all frames found there.
1537 */
[f8d069e8]1538 for (cur = sh_info->pagemap.leaf_head.next; cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
[8182031]1539 btree_node_t *node;
[f8d069e8]1540 int i;
[8182031]1541
[f8d069e8]1542 node = list_get_instance(cur, btree_node_t, leaf_link);
1543 for (i = 0; i < node->keys; i++)
[7f1c620]1544 frame_free((uintptr_t) node->value[i]);
[8182031]1545 }
1546
1547 }
1548 mutex_unlock(&sh_info->lock);
1549
1550 if (dealloc) {
1551 btree_destroy(&sh_info->pagemap);
1552 free(sh_info);
1553 }
1554}
1555
[df0103f7]1556/*
1557 * Address space related syscalls.
1558 */
1559
1560/** Wrapper for as_area_create(). */
[7f1c620]1561unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
[df0103f7]1562{
[0ee077ee]1563 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
[7f1c620]1564 return (unative_t) address;
[df0103f7]1565 else
[7f1c620]1566 return (unative_t) -1;
[df0103f7]1567}
1568
[c6e314a]1569/** Wrapper for as_area_resize(). */
[7f1c620]1570unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
[df0103f7]1571{
[7f1c620]1572 return (unative_t) as_area_resize(AS, address, size, 0);
[7242a78e]1573}
1574
[c6e314a]1575/** Wrapper for as_area_destroy(). */
[7f1c620]1576unative_t sys_as_area_destroy(uintptr_t address)
[7242a78e]1577{
[7f1c620]1578 return (unative_t) as_area_destroy(AS, address);
[df0103f7]1579}
[b45c443]1580
[64c2ad5]1581/** Print out information about address space.
1582 *
1583 * @param as Address space.
1584 */
1585void as_print(as_t *as)
1586{
1587 ipl_t ipl;
1588
1589 ipl = interrupts_disable();
1590 mutex_lock(&as->lock);
1591
1592 /* print out info about address space areas */
1593 link_t *cur;
1594 for (cur = as->as_area_btree.leaf_head.next; cur != &as->as_area_btree.leaf_head; cur = cur->next) {
[7ba7c6d]1595 btree_node_t *node = list_get_instance(cur, btree_node_t, leaf_link);
[64c2ad5]1596
1597 int i;
1598 for (i = 0; i < node->keys; i++) {
[7ba7c6d]1599 as_area_t *area = node->value[i];
[64c2ad5]1600
1601 mutex_lock(&area->lock);
1602 printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
1603 area, area->base, area->pages, area->base, area->base + area->pages*PAGE_SIZE);
1604 mutex_unlock(&area->lock);
1605 }
1606 }
1607
1608 mutex_unlock(&as->lock);
1609 interrupts_restore(ipl);
1610}
1611
[cc73a8a1]1612/** @}
[b45c443]1613 */
Note: See TracBrowser for help on using the repository browser.