source: mainline/kernel/generic/src/mm/as.c@ 6f4495f5

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 6f4495f5 was 6f4495f5, checked in by Jakub Jermar <jakub@…>, 18 years ago

Indentaion and formatting changes even Martin will like :-)

  • Property mode set to 100644
File size: 44.4 KB
RevLine 
[20d50a1]1/*
[df4ed85]2 * Copyright (c) 2001-2006 Jakub Jermar
[20d50a1]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[cc73a8a1]29/** @addtogroup genericmm
[b45c443]30 * @{
31 */
32
[9179d0a]33/**
[b45c443]34 * @file
[9179d0a]35 * @brief Address space related functions.
36 *
[20d50a1]37 * This file contains address space manipulation functions.
38 * Roughly speaking, this is a higher-level client of
39 * Virtual Address Translation (VAT) subsystem.
[9179d0a]40 *
41 * Functionality provided by this file allows one to
[cc73a8a1]42 * create address spaces and create, resize and share
[9179d0a]43 * address space areas.
44 *
45 * @see page.c
46 *
[20d50a1]47 */
48
49#include <mm/as.h>
[ef67bab]50#include <arch/mm/as.h>
[20d50a1]51#include <mm/page.h>
52#include <mm/frame.h>
[085d973]53#include <mm/slab.h>
[20d50a1]54#include <mm/tlb.h>
55#include <arch/mm/page.h>
56#include <genarch/mm/page_pt.h>
[2802767]57#include <genarch/mm/page_ht.h>
[4512d7e]58#include <mm/asid.h>
[20d50a1]59#include <arch/mm/asid.h>
60#include <synch/spinlock.h>
[1068f6a]61#include <synch/mutex.h>
[5c9a08b]62#include <adt/list.h>
[252127e]63#include <adt/btree.h>
[df0103f7]64#include <proc/task.h>
[e3c762cd]65#include <proc/thread.h>
[20d50a1]66#include <arch/asm.h>
[df0103f7]67#include <panic.h>
[20d50a1]68#include <debug.h>
[df0103f7]69#include <print.h>
[20d50a1]70#include <memstr.h>
[5a7d9d1]71#include <macros.h>
[20d50a1]72#include <arch.h>
[df0103f7]73#include <errno.h>
74#include <config.h>
[25bf215]75#include <align.h>
[df0103f7]76#include <arch/types.h>
77#include <typedefs.h>
[e3c762cd]78#include <syscall/copy.h>
79#include <arch/interrupt.h>
[20d50a1]80
[92778f2]81#ifdef CONFIG_VIRT_IDX_DCACHE
82#include <arch/mm/cache.h>
83#endif /* CONFIG_VIRT_IDX_DCACHE */
84
[cc73a8a1]85/**
86 * Each architecture decides what functions will be used to carry out
87 * address space operations such as creating or locking page tables.
88 */
[ef67bab]89as_operations_t *as_operations = NULL;
[20d50a1]90
[57da95c]91/**
92 * Slab for as_t objects.
93 */
94static slab_cache_t *as_slab;
95
[6f4495f5]96/**
97 * This lock protects inactive_as_with_asid_head list. It must be acquired
98 * before as_t mutex.
99 */
[47800e0]100SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
[7e4e532]101
102/**
103 * This list contains address spaces that are not active on any
104 * processor and that have valid ASID.
105 */
106LIST_INITIALIZE(inactive_as_with_asid_head);
107
[071a8ae6]108/** Kernel address space. */
109as_t *AS_KERNEL = NULL;
110
[df0103f7]111static int area_flags_to_page_flags(int aflags);
[7f1c620]112static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
[6f4495f5]113static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
114 as_area_t *avoid_area);
[8182031]115static void sh_info_remove_reference(share_info_t *sh_info);
[20d50a1]116
[29b2bbf]117static int as_constructor(void *obj, int flags)
118{
119 as_t *as = (as_t *) obj;
120 int rc;
121
122 link_initialize(&as->inactive_as_with_asid_link);
123 mutex_initialize(&as->lock);
124
125 rc = as_constructor_arch(as, flags);
126
127 return rc;
128}
129
130static int as_destructor(void *obj)
131{
132 as_t *as = (as_t *) obj;
133
134 return as_destructor_arch(as);
135}
136
[ef67bab]137/** Initialize address space subsystem. */
138void as_init(void)
139{
140 as_arch_init();
[57da95c]141
[29b2bbf]142 as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
[6f4495f5]143 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
[57da95c]144
[8e1ea655]145 AS_KERNEL = as_create(FLAG_AS_KERNEL);
[125e944]146 if (!AS_KERNEL)
147 panic("can't create kernel address space\n");
148
[ef67bab]149}
150
[071a8ae6]151/** Create address space.
152 *
153 * @param flags Flags that influence way in wich the address space is created.
154 */
[ef67bab]155as_t *as_create(int flags)
[20d50a1]156{
157 as_t *as;
158
[57da95c]159 as = (as_t *) slab_alloc(as_slab, 0);
[29b2bbf]160 (void) as_create_arch(as, 0);
161
[252127e]162 btree_create(&as->as_area_btree);
[bb68433]163
164 if (flags & FLAG_AS_KERNEL)
165 as->asid = ASID_KERNEL;
166 else
167 as->asid = ASID_INVALID;
168
[482826d]169 as->refcount = 0;
[47800e0]170 as->cpu_refcount = 0;
[bb68433]171 as->page_table = page_table_create(flags);
[20d50a1]172
173 return as;
174}
175
[482826d]176/** Destroy adress space.
177 *
[6f4495f5]178 * When there are no tasks referencing this address space (i.e. its refcount is
179 * zero), the address space can be destroyed.
[482826d]180 */
181void as_destroy(as_t *as)
[5be1923]182{
[482826d]183 ipl_t ipl;
[6f9a9bc]184 bool cond;
[482826d]185
186 ASSERT(as->refcount == 0);
187
188 /*
189 * Since there is no reference to this area,
190 * it is safe not to lock its mutex.
191 */
192 ipl = interrupts_disable();
193 spinlock_lock(&inactive_as_with_asid_lock);
[31e8ddd]194 if (as->asid != ASID_INVALID && as != AS_KERNEL) {
[6f9a9bc]195 if (as != AS && as->cpu_refcount == 0)
[31e8ddd]196 list_remove(&as->inactive_as_with_asid_link);
[482826d]197 asid_put(as->asid);
198 }
199 spinlock_unlock(&inactive_as_with_asid_lock);
200
201 /*
202 * Destroy address space areas of the address space.
[8440473]203 * The B+tree must be walked carefully because it is
[6f9a9bc]204 * also being destroyed.
[482826d]205 */
[6f9a9bc]206 for (cond = true; cond; ) {
[482826d]207 btree_node_t *node;
[6f9a9bc]208
209 ASSERT(!list_empty(&as->as_area_btree.leaf_head));
[6f4495f5]210 node = list_get_instance(as->as_area_btree.leaf_head.next,
211 btree_node_t, leaf_link);
[6f9a9bc]212
213 if ((cond = node->keys)) {
214 as_area_destroy(as, node->key[0]);
215 }
[482826d]216 }
[f8d069e8]217
[152b2b0]218 btree_destroy(&as->as_area_btree);
[482826d]219 page_table_destroy(as->page_table);
[5be1923]220
[482826d]221 interrupts_restore(ipl);
222
[57da95c]223 slab_free(as_slab, as);
[5be1923]224}
225
[20d50a1]226/** Create address space area of common attributes.
227 *
228 * The created address space area is added to the target address space.
229 *
230 * @param as Target address space.
[a9e8b39]231 * @param flags Flags of the area memory.
[37e7d2b9]232 * @param size Size of area.
[20d50a1]233 * @param base Base address of area.
[a9e8b39]234 * @param attrs Attributes of the area.
[8182031]235 * @param backend Address space area backend. NULL if no backend is used.
236 * @param backend_data NULL or a pointer to an array holding two void *.
[20d50a1]237 *
238 * @return Address space area on success or NULL on failure.
239 */
[c738d65]240as_area_t *
241as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
[0ee077ee]242 mem_backend_t *backend, mem_backend_data_t *backend_data)
[20d50a1]243{
244 ipl_t ipl;
245 as_area_t *a;
246
247 if (base % PAGE_SIZE)
[37e7d2b9]248 return NULL;
249
[dbbeb26]250 if (!size)
251 return NULL;
252
[37e7d2b9]253 /* Writeable executable areas are not supported. */
254 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
255 return NULL;
[20d50a1]256
257 ipl = interrupts_disable();
[1068f6a]258 mutex_lock(&as->lock);
[20d50a1]259
[37e7d2b9]260 if (!check_area_conflicts(as, base, size, NULL)) {
[1068f6a]261 mutex_unlock(&as->lock);
[37e7d2b9]262 interrupts_restore(ipl);
263 return NULL;
264 }
[20d50a1]265
[bb68433]266 a = (as_area_t *) malloc(sizeof(as_area_t), 0);
267
[1068f6a]268 mutex_initialize(&a->lock);
[bb68433]269
[0ee077ee]270 a->as = as;
[c23502d]271 a->flags = flags;
[a9e8b39]272 a->attributes = attrs;
[37e7d2b9]273 a->pages = SIZE2FRAMES(size);
[bb68433]274 a->base = base;
[8182031]275 a->sh_info = NULL;
276 a->backend = backend;
[0ee077ee]277 if (backend_data)
278 a->backend_data = *backend_data;
279 else
[6f4495f5]280 memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data),
281 0);
[0ee077ee]282
[25bf215]283 btree_create(&a->used_space);
[bb68433]284
[252127e]285 btree_insert(&as->as_area_btree, base, (void *) a, NULL);
[20d50a1]286
[1068f6a]287 mutex_unlock(&as->lock);
[20d50a1]288 interrupts_restore(ipl);
[f9425006]289
[20d50a1]290 return a;
291}
292
[df0103f7]293/** Find address space area and change it.
294 *
295 * @param as Address space.
[6f4495f5]296 * @param address Virtual address belonging to the area to be changed. Must be
297 * page-aligned.
[df0103f7]298 * @param size New size of the virtual memory block starting at address.
299 * @param flags Flags influencing the remap operation. Currently unused.
300 *
[7242a78e]301 * @return Zero on success or a value from @ref errno.h otherwise.
[df0103f7]302 */
[7f1c620]303int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
[df0103f7]304{
[7242a78e]305 as_area_t *area;
[df0103f7]306 ipl_t ipl;
307 size_t pages;
308
309 ipl = interrupts_disable();
[1068f6a]310 mutex_lock(&as->lock);
[df0103f7]311
312 /*
313 * Locate the area.
314 */
315 area = find_area_and_lock(as, address);
316 if (!area) {
[1068f6a]317 mutex_unlock(&as->lock);
[df0103f7]318 interrupts_restore(ipl);
[7242a78e]319 return ENOENT;
[df0103f7]320 }
321
[0ee077ee]322 if (area->backend == &phys_backend) {
[df0103f7]323 /*
324 * Remapping of address space areas associated
325 * with memory mapped devices is not supported.
326 */
[1068f6a]327 mutex_unlock(&area->lock);
328 mutex_unlock(&as->lock);
[df0103f7]329 interrupts_restore(ipl);
[7242a78e]330 return ENOTSUP;
[df0103f7]331 }
[8182031]332 if (area->sh_info) {
333 /*
334 * Remapping of shared address space areas
335 * is not supported.
336 */
337 mutex_unlock(&area->lock);
338 mutex_unlock(&as->lock);
339 interrupts_restore(ipl);
340 return ENOTSUP;
341 }
[df0103f7]342
343 pages = SIZE2FRAMES((address - area->base) + size);
344 if (!pages) {
345 /*
346 * Zero size address space areas are not allowed.
347 */
[1068f6a]348 mutex_unlock(&area->lock);
349 mutex_unlock(&as->lock);
[df0103f7]350 interrupts_restore(ipl);
[7242a78e]351 return EPERM;
[df0103f7]352 }
353
354 if (pages < area->pages) {
[56789125]355 bool cond;
[7f1c620]356 uintptr_t start_free = area->base + pages*PAGE_SIZE;
[df0103f7]357
358 /*
359 * Shrinking the area.
360 * No need to check for overlaps.
361 */
362
[5552d60]363 /*
364 * Start TLB shootdown sequence.
365 */
[6f4495f5]366 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base +
367 pages * PAGE_SIZE, area->pages - pages);
[5552d60]368
[56789125]369 /*
370 * Remove frames belonging to used space starting from
371 * the highest addresses downwards until an overlap with
372 * the resized address space area is found. Note that this
373 * is also the right way to remove part of the used_space
374 * B+tree leaf list.
375 */
376 for (cond = true; cond;) {
377 btree_node_t *node;
378
379 ASSERT(!list_empty(&area->used_space.leaf_head));
[6f4495f5]380 node =
381 list_get_instance(area->used_space.leaf_head.prev,
382 btree_node_t, leaf_link);
[56789125]383 if ((cond = (bool) node->keys)) {
[7f1c620]384 uintptr_t b = node->key[node->keys - 1];
[6f4495f5]385 count_t c =
386 (count_t) node->value[node->keys - 1];
[56789125]387 int i = 0;
388
[6f4495f5]389 if (overlaps(b, c * PAGE_SIZE, area->base,
390 pages*PAGE_SIZE)) {
[56789125]391
[6f4495f5]392 if (b + c * PAGE_SIZE <= start_free) {
[56789125]393 /*
[6f4495f5]394 * The whole interval fits
395 * completely in the resized
396 * address space area.
[56789125]397 */
398 break;
399 }
400
401 /*
[6f4495f5]402 * Part of the interval corresponding
403 * to b and c overlaps with the resized
404 * address space area.
[56789125]405 */
406
407 cond = false; /* we are almost done */
408 i = (start_free - b) >> PAGE_WIDTH;
[6f4495f5]409 if (!used_space_remove(area, start_free,
410 c - i))
411 panic("Could not remove used "
412 "space.\n");
[56789125]413 } else {
414 /*
[6f4495f5]415 * The interval of used space can be
416 * completely removed.
[56789125]417 */
418 if (!used_space_remove(area, b, c))
[6f4495f5]419 panic("Could not remove used "
420 "space.\n");
[56789125]421 }
422
423 for (; i < c; i++) {
424 pte_t *pte;
425
426 page_table_lock(as, false);
[6f4495f5]427 pte = page_mapping_find(as, b +
428 i * PAGE_SIZE);
429 ASSERT(pte && PTE_VALID(pte) &&
430 PTE_PRESENT(pte));
431 if (area->backend &&
432 area->backend->frame_free) {
[0ee077ee]433 area->backend->frame_free(area,
[6f4495f5]434 b + i * PAGE_SIZE,
435 PTE_GET_FRAME(pte));
[8182031]436 }
[6f4495f5]437 page_mapping_remove(as, b +
438 i * PAGE_SIZE);
[56789125]439 page_table_unlock(as, false);
440 }
[df0103f7]441 }
442 }
[5552d60]443
[df0103f7]444 /*
[5552d60]445 * Finish TLB shootdown sequence.
[df0103f7]446 */
[6f4495f5]447 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
448 area->pages - pages);
[df0103f7]449 tlb_shootdown_finalize();
[f1d1f5d3]450
451 /*
452 * Invalidate software translation caches (e.g. TSB on sparc64).
453 */
[6f4495f5]454 as_invalidate_translation_cache(as, area->base +
455 pages * PAGE_SIZE, area->pages - pages);
[df0103f7]456 } else {
457 /*
458 * Growing the area.
459 * Check for overlaps with other address space areas.
460 */
[6f4495f5]461 if (!check_area_conflicts(as, address, pages * PAGE_SIZE,
462 area)) {
[1068f6a]463 mutex_unlock(&area->lock);
464 mutex_unlock(&as->lock);
[df0103f7]465 interrupts_restore(ipl);
[7242a78e]466 return EADDRNOTAVAIL;
[df0103f7]467 }
468 }
469
470 area->pages = pages;
471
[1068f6a]472 mutex_unlock(&area->lock);
473 mutex_unlock(&as->lock);
[df0103f7]474 interrupts_restore(ipl);
475
[7242a78e]476 return 0;
477}
478
479/** Destroy address space area.
480 *
481 * @param as Address space.
482 * @param address Address withing the area to be deleted.
483 *
484 * @return Zero on success or a value from @ref errno.h on failure.
485 */
[7f1c620]486int as_area_destroy(as_t *as, uintptr_t address)
[7242a78e]487{
488 as_area_t *area;
[7f1c620]489 uintptr_t base;
[f8d069e8]490 link_t *cur;
[7242a78e]491 ipl_t ipl;
492
493 ipl = interrupts_disable();
[1068f6a]494 mutex_lock(&as->lock);
[7242a78e]495
496 area = find_area_and_lock(as, address);
497 if (!area) {
[1068f6a]498 mutex_unlock(&as->lock);
[7242a78e]499 interrupts_restore(ipl);
500 return ENOENT;
501 }
502
[56789125]503 base = area->base;
504
[5552d60]505 /*
506 * Start TLB shootdown sequence.
507 */
[f1d1f5d3]508 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
[5552d60]509
[567807b1]510 /*
511 * Visit only the pages mapped by used_space B+tree.
512 */
[6f4495f5]513 for (cur = area->used_space.leaf_head.next;
514 cur != &area->used_space.leaf_head; cur = cur->next) {
[567807b1]515 btree_node_t *node;
[f8d069e8]516 int i;
[56789125]517
[f8d069e8]518 node = list_get_instance(cur, btree_node_t, leaf_link);
519 for (i = 0; i < node->keys; i++) {
[7f1c620]520 uintptr_t b = node->key[i];
[f8d069e8]521 count_t j;
[567807b1]522 pte_t *pte;
[56789125]523
[f8d069e8]524 for (j = 0; j < (count_t) node->value[i]; j++) {
[567807b1]525 page_table_lock(as, false);
[6f4495f5]526 pte = page_mapping_find(as, b + j * PAGE_SIZE);
527 ASSERT(pte && PTE_VALID(pte) &&
528 PTE_PRESENT(pte));
529 if (area->backend &&
530 area->backend->frame_free) {
531 area->backend->frame_free(area, b +
532 j * PAGE_SIZE, PTE_GET_FRAME(pte));
[56789125]533 }
[6f4495f5]534 page_mapping_remove(as, b + j * PAGE_SIZE);
[567807b1]535 page_table_unlock(as, false);
[7242a78e]536 }
537 }
538 }
[56789125]539
[7242a78e]540 /*
[5552d60]541 * Finish TLB shootdown sequence.
[7242a78e]542 */
[f1d1f5d3]543 tlb_invalidate_pages(as->asid, area->base, area->pages);
[7242a78e]544 tlb_shootdown_finalize();
[5552d60]545
[f1d1f5d3]546 /*
[6f4495f5]547 * Invalidate potential software translation caches (e.g. TSB on
548 * sparc64).
[f1d1f5d3]549 */
550 as_invalidate_translation_cache(as, area->base, area->pages);
551
[5552d60]552 btree_destroy(&area->used_space);
[7242a78e]553
[8d4f2ae]554 area->attributes |= AS_AREA_ATTR_PARTIAL;
[8182031]555
556 if (area->sh_info)
557 sh_info_remove_reference(area->sh_info);
558
[1068f6a]559 mutex_unlock(&area->lock);
[7242a78e]560
561 /*
562 * Remove the empty area from address space.
563 */
[f1d1f5d3]564 btree_remove(&as->as_area_btree, base, NULL);
[7242a78e]565
[8d4f2ae]566 free(area);
567
[f1d1f5d3]568 mutex_unlock(&as->lock);
[7242a78e]569 interrupts_restore(ipl);
570 return 0;
[df0103f7]571}
572
[8d6bc2d5]573/** Share address space area with another or the same address space.
[df0103f7]574 *
[0ee077ee]575 * Address space area mapping is shared with a new address space area.
576 * If the source address space area has not been shared so far,
577 * a new sh_info is created. The new address space area simply gets the
578 * sh_info of the source area. The process of duplicating the
579 * mapping is done through the backend share function.
[8d6bc2d5]580 *
[fd4d8c0]581 * @param src_as Pointer to source address space.
[a9e8b39]582 * @param src_base Base address of the source address space area.
[fd4d8c0]583 * @param acc_size Expected size of the source area.
[46fc2f9]584 * @param dst_as Pointer to destination address space.
[fd4d8c0]585 * @param dst_base Target base address.
586 * @param dst_flags_mask Destination address space area flags mask.
[df0103f7]587 *
[d0485c6]588 * @return Zero on success or ENOENT if there is no such task or if there is no
589 * such address space area, EPERM if there was a problem in accepting the area
590 * or ENOMEM if there was a problem in allocating destination address space
591 * area. ENOTSUP is returned if the address space area backend does not support
[f8ddd17]592 * sharing or if the kernel detects an attempt to create an illegal address
593 * alias.
[df0103f7]594 */
[7f1c620]595int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
596 as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
[df0103f7]597{
598 ipl_t ipl;
[a9e8b39]599 int src_flags;
600 size_t src_size;
601 as_area_t *src_area, *dst_area;
[8d6bc2d5]602 share_info_t *sh_info;
[0ee077ee]603 mem_backend_t *src_backend;
604 mem_backend_data_t src_backend_data;
[d6e5cbc]605
[7c23af9]606 ipl = interrupts_disable();
[1068f6a]607 mutex_lock(&src_as->lock);
[7c23af9]608 src_area = find_area_and_lock(src_as, src_base);
[a9e8b39]609 if (!src_area) {
[6fa476f7]610 /*
611 * Could not find the source address space area.
612 */
[1068f6a]613 mutex_unlock(&src_as->lock);
[6fa476f7]614 interrupts_restore(ipl);
615 return ENOENT;
616 }
[d0485c6]617
[0ee077ee]618 if (!src_area->backend || !src_area->backend->share) {
[8d6bc2d5]619 /*
[f47fd19]620 * There is no backend or the backend does not
[0ee077ee]621 * know how to share the area.
[8d6bc2d5]622 */
623 mutex_unlock(&src_area->lock);
624 mutex_unlock(&src_as->lock);
625 interrupts_restore(ipl);
626 return ENOTSUP;
627 }
628
[a9e8b39]629 src_size = src_area->pages * PAGE_SIZE;
630 src_flags = src_area->flags;
[0ee077ee]631 src_backend = src_area->backend;
632 src_backend_data = src_area->backend_data;
[1ec1fd8]633
634 /* Share the cacheable flag from the original mapping */
635 if (src_flags & AS_AREA_CACHEABLE)
636 dst_flags_mask |= AS_AREA_CACHEABLE;
637
[6f4495f5]638 if (src_size != acc_size ||
639 (src_flags & dst_flags_mask) != dst_flags_mask) {
[8d6bc2d5]640 mutex_unlock(&src_area->lock);
641 mutex_unlock(&src_as->lock);
[df0103f7]642 interrupts_restore(ipl);
643 return EPERM;
644 }
[8d6bc2d5]645
[f8ddd17]646#ifdef CONFIG_VIRT_IDX_DCACHE
647 if (!(dst_flags_mask & AS_AREA_EXEC)) {
648 if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) {
649 /*
650 * Refuse to create an illegal address alias.
651 */
652 mutex_unlock(&src_area->lock);
653 mutex_unlock(&src_as->lock);
654 interrupts_restore(ipl);
655 return ENOTSUP;
656 }
657 }
658#endif /* CONFIG_VIRT_IDX_DCACHE */
659
[8d6bc2d5]660 /*
661 * Now we are committed to sharing the area.
[8440473]662 * First, prepare the area for sharing.
[8d6bc2d5]663 * Then it will be safe to unlock it.
664 */
665 sh_info = src_area->sh_info;
666 if (!sh_info) {
667 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
668 mutex_initialize(&sh_info->lock);
669 sh_info->refcount = 2;
670 btree_create(&sh_info->pagemap);
671 src_area->sh_info = sh_info;
672 } else {
673 mutex_lock(&sh_info->lock);
674 sh_info->refcount++;
675 mutex_unlock(&sh_info->lock);
676 }
677
[0ee077ee]678 src_area->backend->share(src_area);
[8d6bc2d5]679
680 mutex_unlock(&src_area->lock);
681 mutex_unlock(&src_as->lock);
682
[df0103f7]683 /*
[a9e8b39]684 * Create copy of the source address space area.
685 * The destination area is created with AS_AREA_ATTR_PARTIAL
686 * attribute set which prevents race condition with
687 * preliminary as_page_fault() calls.
[fd4d8c0]688 * The flags of the source area are masked against dst_flags_mask
689 * to support sharing in less privileged mode.
[df0103f7]690 */
[76d7305]691 dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
[6f4495f5]692 AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
[a9e8b39]693 if (!dst_area) {
[df0103f7]694 /*
695 * Destination address space area could not be created.
696 */
[8d6bc2d5]697 sh_info_remove_reference(sh_info);
698
[df0103f7]699 interrupts_restore(ipl);
700 return ENOMEM;
701 }
[92778f2]702
[a9e8b39]703 /*
704 * Now the destination address space area has been
705 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
[8d6bc2d5]706 * attribute and set the sh_info.
[a9e8b39]707 */
[92778f2]708 mutex_lock(&dst_as->lock);
[1068f6a]709 mutex_lock(&dst_area->lock);
[a9e8b39]710 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
[8d6bc2d5]711 dst_area->sh_info = sh_info;
[1068f6a]712 mutex_unlock(&dst_area->lock);
[92778f2]713 mutex_unlock(&dst_as->lock);
714
[df0103f7]715 interrupts_restore(ipl);
716
717 return 0;
718}
719
[fb84455]720/** Check access mode for address space area.
721 *
722 * The address space area must be locked prior to this call.
723 *
724 * @param area Address space area.
725 * @param access Access mode.
726 *
727 * @return False if access violates area's permissions, true otherwise.
728 */
729bool as_area_check_access(as_area_t *area, pf_access_t access)
730{
731 int flagmap[] = {
732 [PF_ACCESS_READ] = AS_AREA_READ,
733 [PF_ACCESS_WRITE] = AS_AREA_WRITE,
734 [PF_ACCESS_EXEC] = AS_AREA_EXEC
735 };
736
737 if (!(area->flags & flagmap[access]))
738 return false;
739
740 return true;
741}
742
[20d50a1]743/** Handle page fault within the current address space.
744 *
[8182031]745 * This is the high-level page fault handler. It decides
746 * whether the page fault can be resolved by any backend
747 * and if so, it invokes the backend to resolve the page
748 * fault.
749 *
[20d50a1]750 * Interrupts are assumed disabled.
751 *
752 * @param page Faulting page.
[567807b1]753 * @param access Access mode that caused the fault (i.e. read/write/exec).
[e3c762cd]754 * @param istate Pointer to interrupted state.
[20d50a1]755 *
[8182031]756 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
757 * fault was caused by copy_to_uspace() or copy_from_uspace().
[20d50a1]758 */
[7f1c620]759int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
[20d50a1]760{
[2299914]761 pte_t *pte;
[d3e7ff4]762 as_area_t *area;
[20d50a1]763
[1068f6a]764 if (!THREAD)
[8182031]765 return AS_PF_FAULT;
[1068f6a]766
[20d50a1]767 ASSERT(AS);
[2299914]768
[1068f6a]769 mutex_lock(&AS->lock);
[d3e7ff4]770 area = find_area_and_lock(AS, page);
[20d50a1]771 if (!area) {
772 /*
773 * No area contained mapping for 'page'.
774 * Signal page fault to low-level handler.
775 */
[1068f6a]776 mutex_unlock(&AS->lock);
[e3c762cd]777 goto page_fault;
[20d50a1]778 }
779
[a9e8b39]780 if (area->attributes & AS_AREA_ATTR_PARTIAL) {
781 /*
782 * The address space area is not fully initialized.
783 * Avoid possible race by returning error.
784 */
[1068f6a]785 mutex_unlock(&area->lock);
786 mutex_unlock(&AS->lock);
[e3c762cd]787 goto page_fault;
[a9e8b39]788 }
789
[0ee077ee]790 if (!area->backend || !area->backend->page_fault) {
[8182031]791 /*
792 * The address space area is not backed by any backend
793 * or the backend cannot handle page faults.
794 */
795 mutex_unlock(&area->lock);
796 mutex_unlock(&AS->lock);
797 goto page_fault;
798 }
[1ace9ea]799
[2299914]800 page_table_lock(AS, false);
801
802 /*
803 * To avoid race condition between two page faults
804 * on the same address, we need to make sure
805 * the mapping has not been already inserted.
806 */
807 if ((pte = page_mapping_find(AS, page))) {
808 if (PTE_PRESENT(pte)) {
[fb84455]809 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
[6f4495f5]810 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
811 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
[fb84455]812 page_table_unlock(AS, false);
813 mutex_unlock(&area->lock);
814 mutex_unlock(&AS->lock);
815 return AS_PF_OK;
816 }
[2299914]817 }
818 }
[20d50a1]819
820 /*
[8182031]821 * Resort to the backend page fault handler.
[20d50a1]822 */
[0ee077ee]823 if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
[8182031]824 page_table_unlock(AS, false);
825 mutex_unlock(&area->lock);
826 mutex_unlock(&AS->lock);
827 goto page_fault;
828 }
[20d50a1]829
[8182031]830 page_table_unlock(AS, false);
[1068f6a]831 mutex_unlock(&area->lock);
832 mutex_unlock(&AS->lock);
[e3c762cd]833 return AS_PF_OK;
834
835page_fault:
836 if (THREAD->in_copy_from_uspace) {
837 THREAD->in_copy_from_uspace = false;
[6f4495f5]838 istate_set_retaddr(istate,
839 (uintptr_t) &memcpy_from_uspace_failover_address);
[e3c762cd]840 } else if (THREAD->in_copy_to_uspace) {
841 THREAD->in_copy_to_uspace = false;
[6f4495f5]842 istate_set_retaddr(istate,
843 (uintptr_t) &memcpy_to_uspace_failover_address);
[e3c762cd]844 } else {
845 return AS_PF_FAULT;
846 }
847
848 return AS_PF_DEFER;
[20d50a1]849}
850
[7e4e532]851/** Switch address spaces.
[1068f6a]852 *
853 * Note that this function cannot sleep as it is essentially a part of
[47800e0]854 * scheduling. Sleeping here would lead to deadlock on wakeup.
[20d50a1]855 *
[7e4e532]856 * @param old Old address space or NULL.
857 * @param new New address space.
[20d50a1]858 */
[7e4e532]859void as_switch(as_t *old, as_t *new)
[20d50a1]860{
861 ipl_t ipl;
[7e4e532]862 bool needs_asid = false;
[4512d7e]863
[20d50a1]864 ipl = interrupts_disable();
[47800e0]865 spinlock_lock(&inactive_as_with_asid_lock);
[7e4e532]866
867 /*
868 * First, take care of the old address space.
869 */
870 if (old) {
[1068f6a]871 mutex_lock_active(&old->lock);
[47800e0]872 ASSERT(old->cpu_refcount);
873 if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) {
[7e4e532]874 /*
875 * The old address space is no longer active on
876 * any processor. It can be appended to the
877 * list of inactive address spaces with assigned
878 * ASID.
879 */
880 ASSERT(old->asid != ASID_INVALID);
[6f4495f5]881 list_append(&old->inactive_as_with_asid_link,
882 &inactive_as_with_asid_head);
[7e4e532]883 }
[1068f6a]884 mutex_unlock(&old->lock);
[57da95c]885
886 /*
887 * Perform architecture-specific tasks when the address space
888 * is being removed from the CPU.
889 */
890 as_deinstall_arch(old);
[7e4e532]891 }
892
893 /*
894 * Second, prepare the new address space.
895 */
[1068f6a]896 mutex_lock_active(&new->lock);
[47800e0]897 if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) {
[6f4495f5]898 if (new->asid != ASID_INVALID) {
[7e4e532]899 list_remove(&new->inactive_as_with_asid_link);
[6f4495f5]900 } else {
901 /*
902 * Defer call to asid_get() until new->lock is released.
903 */
904 needs_asid = true;
905 }
[7e4e532]906 }
907 SET_PTL0_ADDRESS(new->page_table);
[1068f6a]908 mutex_unlock(&new->lock);
[20d50a1]909
[7e4e532]910 if (needs_asid) {
911 /*
912 * Allocation of new ASID was deferred
913 * until now in order to avoid deadlock.
914 */
915 asid_t asid;
916
917 asid = asid_get();
[1068f6a]918 mutex_lock_active(&new->lock);
[7e4e532]919 new->asid = asid;
[1068f6a]920 mutex_unlock(&new->lock);
[7e4e532]921 }
[47800e0]922 spinlock_unlock(&inactive_as_with_asid_lock);
[7e4e532]923 interrupts_restore(ipl);
924
[20d50a1]925 /*
926 * Perform architecture-specific steps.
[4512d7e]927 * (e.g. write ASID to hardware register etc.)
[20d50a1]928 */
[7e4e532]929 as_install_arch(new);
[20d50a1]930
[7e4e532]931 AS = new;
[20d50a1]932}
[6a3c9a7]933
[df0103f7]934/** Convert address space area flags to page flags.
[6a3c9a7]935 *
[df0103f7]936 * @param aflags Flags of some address space area.
[6a3c9a7]937 *
[df0103f7]938 * @return Flags to be passed to page_mapping_insert().
[6a3c9a7]939 */
[df0103f7]940int area_flags_to_page_flags(int aflags)
[6a3c9a7]941{
942 int flags;
943
[9a8d91b]944 flags = PAGE_USER | PAGE_PRESENT;
[c23502d]945
[df0103f7]946 if (aflags & AS_AREA_READ)
[c23502d]947 flags |= PAGE_READ;
948
[df0103f7]949 if (aflags & AS_AREA_WRITE)
[c23502d]950 flags |= PAGE_WRITE;
951
[df0103f7]952 if (aflags & AS_AREA_EXEC)
[c23502d]953 flags |= PAGE_EXEC;
[6a3c9a7]954
[0ee077ee]955 if (aflags & AS_AREA_CACHEABLE)
[9a8d91b]956 flags |= PAGE_CACHEABLE;
957
[6a3c9a7]958 return flags;
959}
[ef67bab]960
[df0103f7]961/** Compute flags for virtual address translation subsytem.
962 *
963 * The address space area must be locked.
964 * Interrupts must be disabled.
965 *
966 * @param a Address space area.
967 *
968 * @return Flags to be used in page_mapping_insert().
969 */
[8182031]970int as_area_get_flags(as_area_t *a)
[df0103f7]971{
972 return area_flags_to_page_flags(a->flags);
973}
974
[ef67bab]975/** Create page table.
976 *
977 * Depending on architecture, create either address space
978 * private or global page table.
979 *
980 * @param flags Flags saying whether the page table is for kernel address space.
981 *
982 * @return First entry of the page table.
983 */
984pte_t *page_table_create(int flags)
985{
986 ASSERT(as_operations);
987 ASSERT(as_operations->page_table_create);
988
989 return as_operations->page_table_create(flags);
990}
[d3e7ff4]991
[482826d]992/** Destroy page table.
993 *
994 * Destroy page table in architecture specific way.
995 *
996 * @param page_table Physical address of PTL0.
997 */
998void page_table_destroy(pte_t *page_table)
999{
1000 ASSERT(as_operations);
1001 ASSERT(as_operations->page_table_destroy);
1002
1003 as_operations->page_table_destroy(page_table);
1004}
1005
[2299914]1006/** Lock page table.
1007 *
1008 * This function should be called before any page_mapping_insert(),
1009 * page_mapping_remove() and page_mapping_find().
1010 *
1011 * Locking order is such that address space areas must be locked
1012 * prior to this call. Address space can be locked prior to this
1013 * call in which case the lock argument is false.
1014 *
1015 * @param as Address space.
[9179d0a]1016 * @param lock If false, do not attempt to lock as->lock.
[2299914]1017 */
1018void page_table_lock(as_t *as, bool lock)
1019{
1020 ASSERT(as_operations);
1021 ASSERT(as_operations->page_table_lock);
1022
1023 as_operations->page_table_lock(as, lock);
1024}
1025
1026/** Unlock page table.
1027 *
1028 * @param as Address space.
[9179d0a]1029 * @param unlock If false, do not attempt to unlock as->lock.
[2299914]1030 */
1031void page_table_unlock(as_t *as, bool unlock)
1032{
1033 ASSERT(as_operations);
1034 ASSERT(as_operations->page_table_unlock);
1035
1036 as_operations->page_table_unlock(as, unlock);
1037}
1038
[d3e7ff4]1039
1040/** Find address space area and lock it.
1041 *
1042 * The address space must be locked and interrupts must be disabled.
1043 *
1044 * @param as Address space.
1045 * @param va Virtual address.
1046 *
[6f4495f5]1047 * @return Locked address space area containing va on success or NULL on
1048 * failure.
[d3e7ff4]1049 */
[7f1c620]1050as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
[d3e7ff4]1051{
1052 as_area_t *a;
[252127e]1053 btree_node_t *leaf, *lnode;
1054 int i;
1055
1056 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
1057 if (a) {
1058 /* va is the base address of an address space area */
[1068f6a]1059 mutex_lock(&a->lock);
[252127e]1060 return a;
1061 }
[d3e7ff4]1062
[252127e]1063 /*
[c47912f]1064 * Search the leaf node and the righmost record of its left neighbour
[252127e]1065 * to find out whether this is a miss or va belongs to an address
1066 * space area found there.
1067 */
1068
1069 /* First, search the leaf node itself. */
1070 for (i = 0; i < leaf->keys; i++) {
1071 a = (as_area_t *) leaf->value[i];
[1068f6a]1072 mutex_lock(&a->lock);
[252127e]1073 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
1074 return a;
1075 }
[1068f6a]1076 mutex_unlock(&a->lock);
[252127e]1077 }
[d3e7ff4]1078
[252127e]1079 /*
[c47912f]1080 * Second, locate the left neighbour and test its last record.
[b26db0c]1081 * Because of its position in the B+tree, it must have base < va.
[252127e]1082 */
[6f4495f5]1083 lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
1084 if (lnode) {
[252127e]1085 a = (as_area_t *) lnode->value[lnode->keys - 1];
[1068f6a]1086 mutex_lock(&a->lock);
[252127e]1087 if (va < a->base + a->pages * PAGE_SIZE) {
[37e7d2b9]1088 return a;
[252127e]1089 }
[1068f6a]1090 mutex_unlock(&a->lock);
[d3e7ff4]1091 }
1092
1093 return NULL;
1094}
[37e7d2b9]1095
1096/** Check area conflicts with other areas.
1097 *
1098 * The address space must be locked and interrupts must be disabled.
1099 *
1100 * @param as Address space.
1101 * @param va Starting virtual address of the area being tested.
1102 * @param size Size of the area being tested.
1103 * @param avoid_area Do not touch this area.
1104 *
1105 * @return True if there is no conflict, false otherwise.
1106 */
[6f4495f5]1107bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
1108 as_area_t *avoid_area)
[37e7d2b9]1109{
1110 as_area_t *a;
[252127e]1111 btree_node_t *leaf, *node;
1112 int i;
[37e7d2b9]1113
[5a7d9d1]1114 /*
1115 * We don't want any area to have conflicts with NULL page.
1116 */
1117 if (overlaps(va, size, NULL, PAGE_SIZE))
1118 return false;
1119
[252127e]1120 /*
1121 * The leaf node is found in O(log n), where n is proportional to
1122 * the number of address space areas belonging to as.
1123 * The check for conflicts is then attempted on the rightmost
[c47912f]1124 * record in the left neighbour, the leftmost record in the right
1125 * neighbour and all records in the leaf node itself.
[252127e]1126 */
1127
1128 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1129 if (a != avoid_area)
1130 return false;
1131 }
1132
1133 /* First, check the two border cases. */
[c47912f]1134 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
[252127e]1135 a = (as_area_t *) node->value[node->keys - 1];
[1068f6a]1136 mutex_lock(&a->lock);
[252127e]1137 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
[1068f6a]1138 mutex_unlock(&a->lock);
[252127e]1139 return false;
1140 }
[1068f6a]1141 mutex_unlock(&a->lock);
[252127e]1142 }
[6f4495f5]1143 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
1144 if (node) {
[252127e]1145 a = (as_area_t *) node->value[0];
[1068f6a]1146 mutex_lock(&a->lock);
[252127e]1147 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
[1068f6a]1148 mutex_unlock(&a->lock);
[252127e]1149 return false;
1150 }
[1068f6a]1151 mutex_unlock(&a->lock);
[252127e]1152 }
1153
1154 /* Second, check the leaf node. */
1155 for (i = 0; i < leaf->keys; i++) {
1156 a = (as_area_t *) leaf->value[i];
[37e7d2b9]1157
1158 if (a == avoid_area)
1159 continue;
[252127e]1160
[1068f6a]1161 mutex_lock(&a->lock);
[252127e]1162 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
[1068f6a]1163 mutex_unlock(&a->lock);
[252127e]1164 return false;
1165 }
[1068f6a]1166 mutex_unlock(&a->lock);
[5a7d9d1]1167 }
[37e7d2b9]1168
[5a7d9d1]1169 /*
1170 * So far, the area does not conflict with other areas.
1171 * Check if it doesn't conflict with kernel address space.
1172 */
1173 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1174 return !overlaps(va, size,
[6f4495f5]1175 KERNEL_ADDRESS_SPACE_START,
1176 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);
[37e7d2b9]1177 }
1178
1179 return true;
1180}
[df0103f7]1181
[1068f6a]1182/** Return size of the address space area with given base. */
[7f1c620]1183size_t as_get_size(uintptr_t base)
[7c23af9]1184{
1185 ipl_t ipl;
1186 as_area_t *src_area;
1187 size_t size;
1188
1189 ipl = interrupts_disable();
1190 src_area = find_area_and_lock(AS, base);
1191 if (src_area){
1192 size = src_area->pages * PAGE_SIZE;
[1068f6a]1193 mutex_unlock(&src_area->lock);
[7c23af9]1194 } else {
1195 size = 0;
1196 }
1197 interrupts_restore(ipl);
1198 return size;
1199}
1200
[25bf215]1201/** Mark portion of address space area as used.
1202 *
1203 * The address space area must be already locked.
1204 *
1205 * @param a Address space area.
1206 * @param page First page to be marked.
1207 * @param count Number of page to be marked.
1208 *
1209 * @return 0 on failure and 1 on success.
1210 */
[7f1c620]1211int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
[25bf215]1212{
1213 btree_node_t *leaf, *node;
1214 count_t pages;
1215 int i;
1216
1217 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1218 ASSERT(count);
1219
1220 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1221 if (pages) {
1222 /*
1223 * We hit the beginning of some used space.
1224 */
1225 return 0;
1226 }
1227
[a6cb8cb]1228 if (!leaf->keys) {
1229 btree_insert(&a->used_space, page, (void *) count, leaf);
1230 return 1;
1231 }
1232
[25bf215]1233 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1234 if (node) {
[6f4495f5]1235 uintptr_t left_pg = node->key[node->keys - 1];
1236 uintptr_t right_pg = leaf->key[0];
1237 count_t left_cnt = (count_t) node->value[node->keys - 1];
1238 count_t right_cnt = (count_t) leaf->value[0];
[25bf215]1239
1240 /*
1241 * Examine the possibility that the interval fits
1242 * somewhere between the rightmost interval of
1243 * the left neigbour and the first interval of the leaf.
1244 */
1245
1246 if (page >= right_pg) {
1247 /* Do nothing. */
[6f4495f5]1248 } else if (overlaps(page, count * PAGE_SIZE, left_pg,
1249 left_cnt * PAGE_SIZE)) {
[25bf215]1250 /* The interval intersects with the left interval. */
1251 return 0;
[6f4495f5]1252 } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1253 right_cnt * PAGE_SIZE)) {
[25bf215]1254 /* The interval intersects with the right interval. */
1255 return 0;
[6f4495f5]1256 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1257 (page + count * PAGE_SIZE == right_pg)) {
1258 /*
1259 * The interval can be added by merging the two already
1260 * present intervals.
1261 */
[56789125]1262 node->value[node->keys - 1] += count + right_cnt;
[25bf215]1263 btree_remove(&a->used_space, right_pg, leaf);
1264 return 1;
[6f4495f5]1265 } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1266 /*
1267 * The interval can be added by simply growing the left
1268 * interval.
1269 */
[56789125]1270 node->value[node->keys - 1] += count;
[25bf215]1271 return 1;
[6f4495f5]1272 } else if (page + count * PAGE_SIZE == right_pg) {
[25bf215]1273 /*
[6f4495f5]1274 * The interval can be addded by simply moving base of
1275 * the right interval down and increasing its size
1276 * accordingly.
[25bf215]1277 */
[56789125]1278 leaf->value[0] += count;
[25bf215]1279 leaf->key[0] = page;
1280 return 1;
1281 } else {
1282 /*
1283 * The interval is between both neigbouring intervals,
1284 * but cannot be merged with any of them.
1285 */
[6f4495f5]1286 btree_insert(&a->used_space, page, (void *) count,
1287 leaf);
[25bf215]1288 return 1;
1289 }
1290 } else if (page < leaf->key[0]) {
[7f1c620]1291 uintptr_t right_pg = leaf->key[0];
[25bf215]1292 count_t right_cnt = (count_t) leaf->value[0];
1293
1294 /*
[6f4495f5]1295 * Investigate the border case in which the left neighbour does
1296 * not exist but the interval fits from the left.
[25bf215]1297 */
1298
[6f4495f5]1299 if (overlaps(page, count * PAGE_SIZE, right_pg,
1300 right_cnt * PAGE_SIZE)) {
[25bf215]1301 /* The interval intersects with the right interval. */
1302 return 0;
[6f4495f5]1303 } else if (page + count * PAGE_SIZE == right_pg) {
[25bf215]1304 /*
[6f4495f5]1305 * The interval can be added by moving the base of the
1306 * right interval down and increasing its size
1307 * accordingly.
[25bf215]1308 */
1309 leaf->key[0] = page;
[56789125]1310 leaf->value[0] += count;
[25bf215]1311 return 1;
1312 } else {
1313 /*
1314 * The interval doesn't adjoin with the right interval.
1315 * It must be added individually.
1316 */
[6f4495f5]1317 btree_insert(&a->used_space, page, (void *) count,
1318 leaf);
[25bf215]1319 return 1;
1320 }
1321 }
1322
1323 node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1324 if (node) {
[6f4495f5]1325 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1326 uintptr_t right_pg = node->key[0];
1327 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1328 count_t right_cnt = (count_t) node->value[0];
[25bf215]1329
1330 /*
1331 * Examine the possibility that the interval fits
1332 * somewhere between the leftmost interval of
1333 * the right neigbour and the last interval of the leaf.
1334 */
1335
1336 if (page < left_pg) {
1337 /* Do nothing. */
[6f4495f5]1338 } else if (overlaps(page, count * PAGE_SIZE, left_pg,
1339 left_cnt * PAGE_SIZE)) {
[25bf215]1340 /* The interval intersects with the left interval. */
1341 return 0;
[6f4495f5]1342 } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1343 right_cnt * PAGE_SIZE)) {
[25bf215]1344 /* The interval intersects with the right interval. */
1345 return 0;
[6f4495f5]1346 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1347 (page + count * PAGE_SIZE == right_pg)) {
1348 /*
1349 * The interval can be added by merging the two already
1350 * present intervals.
1351 * */
[56789125]1352 leaf->value[leaf->keys - 1] += count + right_cnt;
[25bf215]1353 btree_remove(&a->used_space, right_pg, node);
1354 return 1;
[6f4495f5]1355 } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1356 /*
1357 * The interval can be added by simply growing the left
1358 * interval.
1359 * */
[56789125]1360 leaf->value[leaf->keys - 1] += count;
[25bf215]1361 return 1;
[6f4495f5]1362 } else if (page + count * PAGE_SIZE == right_pg) {
[25bf215]1363 /*
[6f4495f5]1364 * The interval can be addded by simply moving base of
1365 * the right interval down and increasing its size
1366 * accordingly.
[25bf215]1367 */
[56789125]1368 node->value[0] += count;
[25bf215]1369 node->key[0] = page;
1370 return 1;
1371 } else {
1372 /*
1373 * The interval is between both neigbouring intervals,
1374 * but cannot be merged with any of them.
1375 */
[6f4495f5]1376 btree_insert(&a->used_space, page, (void *) count,
1377 leaf);
[25bf215]1378 return 1;
1379 }
1380 } else if (page >= leaf->key[leaf->keys - 1]) {
[7f1c620]1381 uintptr_t left_pg = leaf->key[leaf->keys - 1];
[25bf215]1382 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1383
1384 /*
[6f4495f5]1385 * Investigate the border case in which the right neighbour
1386 * does not exist but the interval fits from the right.
[25bf215]1387 */
1388
[6f4495f5]1389 if (overlaps(page, count * PAGE_SIZE, left_pg,
1390 left_cnt * PAGE_SIZE)) {
[56789125]1391 /* The interval intersects with the left interval. */
[25bf215]1392 return 0;
[6f4495f5]1393 } else if (left_pg + left_cnt * PAGE_SIZE == page) {
1394 /*
1395 * The interval can be added by growing the left
1396 * interval.
1397 */
[56789125]1398 leaf->value[leaf->keys - 1] += count;
[25bf215]1399 return 1;
1400 } else {
1401 /*
1402 * The interval doesn't adjoin with the left interval.
1403 * It must be added individually.
1404 */
[6f4495f5]1405 btree_insert(&a->used_space, page, (void *) count,
1406 leaf);
[25bf215]1407 return 1;
1408 }
1409 }
1410
1411 /*
[6f4495f5]1412 * Note that if the algorithm made it thus far, the interval can fit
1413 * only between two other intervals of the leaf. The two border cases
1414 * were already resolved.
[25bf215]1415 */
1416 for (i = 1; i < leaf->keys; i++) {
1417 if (page < leaf->key[i]) {
[6f4495f5]1418 uintptr_t left_pg = leaf->key[i - 1];
1419 uintptr_t right_pg = leaf->key[i];
1420 count_t left_cnt = (count_t) leaf->value[i - 1];
1421 count_t right_cnt = (count_t) leaf->value[i];
[25bf215]1422
1423 /*
1424 * The interval fits between left_pg and right_pg.
1425 */
1426
[6f4495f5]1427 if (overlaps(page, count * PAGE_SIZE, left_pg,
1428 left_cnt * PAGE_SIZE)) {
1429 /*
1430 * The interval intersects with the left
1431 * interval.
1432 */
[25bf215]1433 return 0;
[6f4495f5]1434 } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1435 right_cnt * PAGE_SIZE)) {
1436 /*
1437 * The interval intersects with the right
1438 * interval.
1439 */
[25bf215]1440 return 0;
[6f4495f5]1441 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1442 (page + count * PAGE_SIZE == right_pg)) {
1443 /*
1444 * The interval can be added by merging the two
1445 * already present intervals.
1446 */
[56789125]1447 leaf->value[i - 1] += count + right_cnt;
[25bf215]1448 btree_remove(&a->used_space, right_pg, leaf);
1449 return 1;
[6f4495f5]1450 } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1451 /*
1452 * The interval can be added by simply growing
1453 * the left interval.
1454 */
[56789125]1455 leaf->value[i - 1] += count;
[25bf215]1456 return 1;
[6f4495f5]1457 } else if (page + count * PAGE_SIZE == right_pg) {
[25bf215]1458 /*
[6f4495f5]1459 * The interval can be addded by simply moving
1460 * base of the right interval down and
1461 * increasing its size accordingly.
[25bf215]1462 */
[56789125]1463 leaf->value[i] += count;
[25bf215]1464 leaf->key[i] = page;
1465 return 1;
1466 } else {
1467 /*
[6f4495f5]1468 * The interval is between both neigbouring
1469 * intervals, but cannot be merged with any of
1470 * them.
[25bf215]1471 */
[6f4495f5]1472 btree_insert(&a->used_space, page,
1473 (void *) count, leaf);
[25bf215]1474 return 1;
1475 }
1476 }
1477 }
1478
[6f4495f5]1479 panic("Inconsistency detected while adding %d pages of used space at "
1480 "%p.\n", count, page);
[25bf215]1481}
1482
1483/** Mark portion of address space area as unused.
1484 *
1485 * The address space area must be already locked.
1486 *
1487 * @param a Address space area.
1488 * @param page First page to be marked.
1489 * @param count Number of page to be marked.
1490 *
1491 * @return 0 on failure and 1 on success.
1492 */
[7f1c620]1493int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
[25bf215]1494{
1495 btree_node_t *leaf, *node;
1496 count_t pages;
1497 int i;
1498
1499 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1500 ASSERT(count);
1501
1502 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1503 if (pages) {
1504 /*
1505 * We are lucky, page is the beginning of some interval.
1506 */
1507 if (count > pages) {
1508 return 0;
1509 } else if (count == pages) {
1510 btree_remove(&a->used_space, page, leaf);
[56789125]1511 return 1;
[25bf215]1512 } else {
1513 /*
1514 * Find the respective interval.
1515 * Decrease its size and relocate its start address.
1516 */
1517 for (i = 0; i < leaf->keys; i++) {
1518 if (leaf->key[i] == page) {
[6f4495f5]1519 leaf->key[i] += count * PAGE_SIZE;
[56789125]1520 leaf->value[i] -= count;
[25bf215]1521 return 1;
1522 }
1523 }
1524 goto error;
1525 }
1526 }
1527
1528 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1529 if (node && page < leaf->key[0]) {
[7f1c620]1530 uintptr_t left_pg = node->key[node->keys - 1];
[25bf215]1531 count_t left_cnt = (count_t) node->value[node->keys - 1];
1532
[6f4495f5]1533 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1534 count * PAGE_SIZE)) {
1535 if (page + count * PAGE_SIZE ==
1536 left_pg + left_cnt * PAGE_SIZE) {
[25bf215]1537 /*
[6f4495f5]1538 * The interval is contained in the rightmost
1539 * interval of the left neighbour and can be
1540 * removed by updating the size of the bigger
1541 * interval.
[25bf215]1542 */
[56789125]1543 node->value[node->keys - 1] -= count;
[25bf215]1544 return 1;
[6f4495f5]1545 } else if (page + count * PAGE_SIZE <
1546 left_pg + left_cnt*PAGE_SIZE) {
[56789125]1547 count_t new_cnt;
[25bf215]1548
1549 /*
[6f4495f5]1550 * The interval is contained in the rightmost
1551 * interval of the left neighbour but its
1552 * removal requires both updating the size of
1553 * the original interval and also inserting a
1554 * new interval.
[25bf215]1555 */
[6f4495f5]1556 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1557 (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
[56789125]1558 node->value[node->keys - 1] -= count + new_cnt;
[6f4495f5]1559 btree_insert(&a->used_space, page +
1560 count * PAGE_SIZE, (void *) new_cnt, leaf);
[25bf215]1561 return 1;
1562 }
1563 }
1564 return 0;
1565 } else if (page < leaf->key[0]) {
1566 return 0;
1567 }
1568
1569 if (page > leaf->key[leaf->keys - 1]) {
[7f1c620]1570 uintptr_t left_pg = leaf->key[leaf->keys - 1];
[25bf215]1571 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1572
[6f4495f5]1573 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1574 count * PAGE_SIZE)) {
1575 if (page + count * PAGE_SIZE ==
1576 left_pg + left_cnt * PAGE_SIZE) {
[25bf215]1577 /*
[6f4495f5]1578 * The interval is contained in the rightmost
1579 * interval of the leaf and can be removed by
1580 * updating the size of the bigger interval.
[25bf215]1581 */
[56789125]1582 leaf->value[leaf->keys - 1] -= count;
[25bf215]1583 return 1;
[6f4495f5]1584 } else if (page + count * PAGE_SIZE < left_pg +
1585 left_cnt * PAGE_SIZE) {
[56789125]1586 count_t new_cnt;
[25bf215]1587
1588 /*
[6f4495f5]1589 * The interval is contained in the rightmost
1590 * interval of the leaf but its removal
1591 * requires both updating the size of the
1592 * original interval and also inserting a new
1593 * interval.
[25bf215]1594 */
[6f4495f5]1595 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1596 (page + count * PAGE_SIZE)) >> PAGE_WIDTH;
[56789125]1597 leaf->value[leaf->keys - 1] -= count + new_cnt;
[6f4495f5]1598 btree_insert(&a->used_space, page +
1599 count * PAGE_SIZE, (void *) new_cnt, leaf);
[25bf215]1600 return 1;
1601 }
1602 }
1603 return 0;
1604 }
1605
1606 /*
1607 * The border cases have been already resolved.
1608 * Now the interval can be only between intervals of the leaf.
1609 */
1610 for (i = 1; i < leaf->keys - 1; i++) {
1611 if (page < leaf->key[i]) {
[7f1c620]1612 uintptr_t left_pg = leaf->key[i - 1];
[25bf215]1613 count_t left_cnt = (count_t) leaf->value[i - 1];
1614
1615 /*
[6f4495f5]1616 * Now the interval is between intervals corresponding
1617 * to (i - 1) and i.
[25bf215]1618 */
[6f4495f5]1619 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1620 count * PAGE_SIZE)) {
1621 if (page + count * PAGE_SIZE ==
1622 left_pg + left_cnt*PAGE_SIZE) {
[25bf215]1623 /*
[6f4495f5]1624 * The interval is contained in the
1625 * interval (i - 1) of the leaf and can
1626 * be removed by updating the size of
1627 * the bigger interval.
[25bf215]1628 */
[56789125]1629 leaf->value[i - 1] -= count;
[25bf215]1630 return 1;
[6f4495f5]1631 } else if (page + count * PAGE_SIZE <
1632 left_pg + left_cnt * PAGE_SIZE) {
[56789125]1633 count_t new_cnt;
[25bf215]1634
1635 /*
[6f4495f5]1636 * The interval is contained in the
1637 * interval (i - 1) of the leaf but its
1638 * removal requires both updating the
1639 * size of the original interval and
[25bf215]1640 * also inserting a new interval.
1641 */
[6f4495f5]1642 new_cnt = ((left_pg +
1643 left_cnt * PAGE_SIZE) -
1644 (page + count * PAGE_SIZE)) >>
1645 PAGE_WIDTH;
[56789125]1646 leaf->value[i - 1] -= count + new_cnt;
[6f4495f5]1647 btree_insert(&a->used_space, page +
1648 count * PAGE_SIZE, (void *) new_cnt,
1649 leaf);
[25bf215]1650 return 1;
1651 }
1652 }
1653 return 0;
1654 }
1655 }
1656
1657error:
[6f4495f5]1658 panic("Inconsistency detected while removing %d pages of used space "
1659 "from %p.\n", count, page);
[25bf215]1660}
1661
[8182031]1662/** Remove reference to address space area share info.
1663 *
1664 * If the reference count drops to 0, the sh_info is deallocated.
1665 *
1666 * @param sh_info Pointer to address space area share info.
1667 */
1668void sh_info_remove_reference(share_info_t *sh_info)
1669{
1670 bool dealloc = false;
1671
1672 mutex_lock(&sh_info->lock);
1673 ASSERT(sh_info->refcount);
1674 if (--sh_info->refcount == 0) {
1675 dealloc = true;
[f8d069e8]1676 link_t *cur;
[8182031]1677
1678 /*
1679 * Now walk carefully the pagemap B+tree and free/remove
1680 * reference from all frames found there.
1681 */
[6f4495f5]1682 for (cur = sh_info->pagemap.leaf_head.next;
1683 cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
[8182031]1684 btree_node_t *node;
[f8d069e8]1685 int i;
[8182031]1686
[f8d069e8]1687 node = list_get_instance(cur, btree_node_t, leaf_link);
1688 for (i = 0; i < node->keys; i++)
[7f1c620]1689 frame_free((uintptr_t) node->value[i]);
[8182031]1690 }
1691
1692 }
1693 mutex_unlock(&sh_info->lock);
1694
1695 if (dealloc) {
1696 btree_destroy(&sh_info->pagemap);
1697 free(sh_info);
1698 }
1699}
1700
[df0103f7]1701/*
1702 * Address space related syscalls.
1703 */
1704
1705/** Wrapper for as_area_create(). */
[7f1c620]1706unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
[df0103f7]1707{
[6f4495f5]1708 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,
1709 AS_AREA_ATTR_NONE, &anon_backend, NULL))
[7f1c620]1710 return (unative_t) address;
[df0103f7]1711 else
[7f1c620]1712 return (unative_t) -1;
[df0103f7]1713}
1714
[c6e314a]1715/** Wrapper for as_area_resize(). */
[7f1c620]1716unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
[df0103f7]1717{
[7f1c620]1718 return (unative_t) as_area_resize(AS, address, size, 0);
[7242a78e]1719}
1720
[c6e314a]1721/** Wrapper for as_area_destroy(). */
[7f1c620]1722unative_t sys_as_area_destroy(uintptr_t address)
[7242a78e]1723{
[7f1c620]1724 return (unative_t) as_area_destroy(AS, address);
[df0103f7]1725}
[b45c443]1726
[64c2ad5]1727/** Print out information about address space.
1728 *
1729 * @param as Address space.
1730 */
1731void as_print(as_t *as)
1732{
1733 ipl_t ipl;
1734
1735 ipl = interrupts_disable();
1736 mutex_lock(&as->lock);
1737
1738 /* print out info about address space areas */
1739 link_t *cur;
[6f4495f5]1740 for (cur = as->as_area_btree.leaf_head.next;
1741 cur != &as->as_area_btree.leaf_head; cur = cur->next) {
1742 btree_node_t *node;
1743
1744 node = list_get_instance(cur, btree_node_t, leaf_link);
[64c2ad5]1745
1746 int i;
1747 for (i = 0; i < node->keys; i++) {
[7ba7c6d]1748 as_area_t *area = node->value[i];
[64c2ad5]1749
1750 mutex_lock(&area->lock);
1751 printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
[6f4495f5]1752 area, area->base, area->pages, area->base,
1753 area->base + area->pages*PAGE_SIZE);
[64c2ad5]1754 mutex_unlock(&area->lock);
1755 }
1756 }
1757
1758 mutex_unlock(&as->lock);
1759 interrupts_restore(ipl);
1760}
1761
[cc73a8a1]1762/** @}
[b45c443]1763 */
Note: See TracBrowser for help on using the repository browser.