source: mainline/kernel/generic/src/mm/as.c@ aca4a04

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since aca4a04 was e6a78b9, checked in by Jakub Jermar <jakub@…>, 13 years ago

Merge mainline changes.

  • Property mode set to 100644
File size: 53.6 KB
RevLine 
[20d50a1]1/*
[0321109]2 * Copyright (c) 2010 Jakub Jermar
[20d50a1]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[cc73a8a1]29/** @addtogroup genericmm
[b45c443]30 * @{
31 */
32
[9179d0a]33/**
[b45c443]34 * @file
[da1bafb]35 * @brief Address space related functions.
[9179d0a]36 *
[20d50a1]37 * This file contains address space manipulation functions.
38 * Roughly speaking, this is a higher-level client of
39 * Virtual Address Translation (VAT) subsystem.
[9179d0a]40 *
41 * Functionality provided by this file allows one to
[cc73a8a1]42 * create address spaces and create, resize and share
[9179d0a]43 * address space areas.
44 *
45 * @see page.c
46 *
[20d50a1]47 */
48
49#include <mm/as.h>
[ef67bab]50#include <arch/mm/as.h>
[20d50a1]51#include <mm/page.h>
52#include <mm/frame.h>
[085d973]53#include <mm/slab.h>
[20d50a1]54#include <mm/tlb.h>
55#include <arch/mm/page.h>
56#include <genarch/mm/page_pt.h>
[2802767]57#include <genarch/mm/page_ht.h>
[4512d7e]58#include <mm/asid.h>
[20d50a1]59#include <arch/mm/asid.h>
[31d8e10]60#include <preemption.h>
[20d50a1]61#include <synch/spinlock.h>
[1068f6a]62#include <synch/mutex.h>
[5c9a08b]63#include <adt/list.h>
[252127e]64#include <adt/btree.h>
[df0103f7]65#include <proc/task.h>
[e3c762cd]66#include <proc/thread.h>
[20d50a1]67#include <arch/asm.h>
[df0103f7]68#include <panic.h>
[20d50a1]69#include <debug.h>
[df0103f7]70#include <print.h>
[20d50a1]71#include <memstr.h>
[5a7d9d1]72#include <macros.h>
[0b37882]73#include <bitops.h>
[20d50a1]74#include <arch.h>
[df0103f7]75#include <errno.h>
76#include <config.h>
[25bf215]77#include <align.h>
[d99c1d2]78#include <typedefs.h>
[e3c762cd]79#include <syscall/copy.h>
80#include <arch/interrupt.h>
[20d50a1]81
[cc73a8a1]82/**
83 * Each architecture decides what functions will be used to carry out
84 * address space operations such as creating or locking page tables.
85 */
[ef67bab]86as_operations_t *as_operations = NULL;
[20d50a1]87
[fc47885]88/** Slab for as_t objects.
[da1bafb]89 *
[57da95c]90 */
91static slab_cache_t *as_slab;
92
[fc47885]93/** ASID subsystem lock.
94 *
95 * This lock protects:
[55b77d9]96 * - inactive_as_with_asid_list
[879585a3]97 * - as->asid for each as of the as_t type
98 * - asids_allocated counter
[da1bafb]99 *
[6f4495f5]100 */
[879585a3]101SPINLOCK_INITIALIZE(asidlock);
[7e4e532]102
103/**
[fc47885]104 * Inactive address spaces (on all processors)
105 * that have valid ASID.
[7e4e532]106 */
[55b77d9]107LIST_INITIALIZE(inactive_as_with_asid_list);
[7e4e532]108
[071a8ae6]109/** Kernel address space. */
110as_t *AS_KERNEL = NULL;
111
[7a0359b]112NO_TRACE static int as_constructor(void *obj, unsigned int flags)
[29b2bbf]113{
114 as_t *as = (as_t *) obj;
[da1bafb]115
[29b2bbf]116 link_initialize(&as->inactive_as_with_asid_link);
[7f341820]117 mutex_initialize(&as->lock, MUTEX_PASSIVE);
[29b2bbf]118
[fc47885]119 return as_constructor_arch(as, flags);
[29b2bbf]120}
121
[7a0359b]122NO_TRACE static size_t as_destructor(void *obj)
[29b2bbf]123{
[fc47885]124 return as_destructor_arch((as_t *) obj);
[29b2bbf]125}
126
[ef67bab]127/** Initialize address space subsystem. */
128void as_init(void)
129{
130 as_arch_init();
[da1bafb]131
[f97f1e51]132 as_slab = slab_cache_create("as_t", sizeof(as_t), 0,
[6f4495f5]133 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
[57da95c]134
[8e1ea655]135 AS_KERNEL = as_create(FLAG_AS_KERNEL);
[125e944]136 if (!AS_KERNEL)
[f651e80]137 panic("Cannot create kernel address space.");
[125e944]138
[fc47885]139 /*
140 * Make sure the kernel address space
[76fca31]141 * reference count never drops to zero.
142 */
[6193351]143 as_hold(AS_KERNEL);
[ef67bab]144}
145
[071a8ae6]146/** Create address space.
147 *
[da1bafb]148 * @param flags Flags that influence the way in wich the address
149 * space is created.
150 *
[071a8ae6]151 */
[da1bafb]152as_t *as_create(unsigned int flags)
[20d50a1]153{
[da1bafb]154 as_t *as = (as_t *) slab_alloc(as_slab, 0);
[29b2bbf]155 (void) as_create_arch(as, 0);
156
[252127e]157 btree_create(&as->as_area_btree);
[bb68433]158
159 if (flags & FLAG_AS_KERNEL)
160 as->asid = ASID_KERNEL;
161 else
162 as->asid = ASID_INVALID;
163
[31d8e10]164 atomic_set(&as->refcount, 0);
[47800e0]165 as->cpu_refcount = 0;
[da1bafb]166
[b3f8fb7]167#ifdef AS_PAGE_TABLE
[80bcaed]168 as->genarch.page_table = page_table_create(flags);
[b3f8fb7]169#else
170 page_table_create(flags);
171#endif
[76fca31]172
[20d50a1]173 return as;
174}
175
[482826d]176/** Destroy adress space.
177 *
[6f4495f5]178 * When there are no tasks referencing this address space (i.e. its refcount is
179 * zero), the address space can be destroyed.
[31d8e10]180 *
181 * We know that we don't hold any spinlock.
[6745592]182 *
[da1bafb]183 * @param as Address space to be destroyed.
184 *
[482826d]185 */
186void as_destroy(as_t *as)
[5be1923]187{
[31d8e10]188 DEADLOCK_PROBE_INIT(p_asidlock);
[fc47885]189
[1624aae]190 ASSERT(as != AS);
[31d8e10]191 ASSERT(atomic_get(&as->refcount) == 0);
[482826d]192
193 /*
[663bb537]194 * Since there is no reference to this address space, it is safe not to
195 * lock its mutex.
[482826d]196 */
[fc47885]197
[31d8e10]198 /*
199 * We need to avoid deadlock between TLB shootdown and asidlock.
200 * We therefore try to take asid conditionally and if we don't succeed,
201 * we enable interrupts and try again. This is done while preemption is
202 * disabled to prevent nested context switches. We also depend on the
203 * fact that so far no spinlocks are held.
204 */
205 preemption_disable();
[da1bafb]206 ipl_t ipl = interrupts_read();
207
[31d8e10]208retry:
209 interrupts_disable();
210 if (!spinlock_trylock(&asidlock)) {
211 interrupts_enable();
212 DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
213 goto retry;
214 }
[da1bafb]215
216 /* Interrupts disabled, enable preemption */
217 preemption_enable();
218
219 if ((as->asid != ASID_INVALID) && (as != AS_KERNEL)) {
[1624aae]220 if (as->cpu_refcount == 0)
[31e8ddd]221 list_remove(&as->inactive_as_with_asid_link);
[da1bafb]222
[482826d]223 asid_put(as->asid);
224 }
[da1bafb]225
[879585a3]226 spinlock_unlock(&asidlock);
[fdaad75d]227 interrupts_restore(ipl);
[fc47885]228
[da1bafb]229
[482826d]230 /*
231 * Destroy address space areas of the address space.
[8440473]232 * The B+tree must be walked carefully because it is
[6f9a9bc]233 * also being destroyed.
[da1bafb]234 */
235 bool cond = true;
236 while (cond) {
[55b77d9]237 ASSERT(!list_empty(&as->as_area_btree.leaf_list));
[da1bafb]238
239 btree_node_t *node =
[55b77d9]240 list_get_instance(list_first(&as->as_area_btree.leaf_list),
[6f4495f5]241 btree_node_t, leaf_link);
[da1bafb]242
243 if ((cond = node->keys))
[6f9a9bc]244 as_area_destroy(as, node->key[0]);
[482826d]245 }
[da1bafb]246
[152b2b0]247 btree_destroy(&as->as_area_btree);
[da1bafb]248
[b3f8fb7]249#ifdef AS_PAGE_TABLE
[80bcaed]250 page_table_destroy(as->genarch.page_table);
[b3f8fb7]251#else
252 page_table_destroy(NULL);
253#endif
[da1bafb]254
[57da95c]255 slab_free(as_slab, as);
[5be1923]256}
257
[0321109]258/** Hold a reference to an address space.
259 *
[fc47885]260 * Holding a reference to an address space prevents destruction
261 * of that address space.
[0321109]262 *
[da1bafb]263 * @param as Address space to be held.
264 *
[0321109]265 */
[7a0359b]266NO_TRACE void as_hold(as_t *as)
[0321109]267{
268 atomic_inc(&as->refcount);
269}
270
271/** Release a reference to an address space.
272 *
[fc47885]273 * The last one to release a reference to an address space
274 * destroys the address space.
[0321109]275 *
[da1bafb]276 * @param asAddress space to be released.
277 *
[0321109]278 */
[7a0359b]279NO_TRACE void as_release(as_t *as)
[0321109]280{
281 if (atomic_predec(&as->refcount) == 0)
282 as_destroy(as);
283}
284
[e3ee9b9]285/** Check area conflicts with other areas.
286 *
[0b37882]287 * @param as Address space.
288 * @param addr Starting virtual address of the area being tested.
289 * @param count Number of pages in the area being tested.
290 * @param avoid Do not touch this area.
[e3ee9b9]291 *
292 * @return True if there is no conflict, false otherwise.
293 *
294 */
[0b37882]295NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr,
296 size_t count, as_area_t *avoid)
[e3ee9b9]297{
[0b37882]298 ASSERT((addr % PAGE_SIZE) == 0);
[e3ee9b9]299 ASSERT(mutex_locked(&as->lock));
300
301 /*
302 * We don't want any area to have conflicts with NULL page.
303 */
[b6f3e7e]304 if (overlaps(addr, P2SZ(count), (uintptr_t) NULL, PAGE_SIZE))
[e3ee9b9]305 return false;
306
307 /*
308 * The leaf node is found in O(log n), where n is proportional to
309 * the number of address space areas belonging to as.
310 * The check for conflicts is then attempted on the rightmost
311 * record in the left neighbour, the leftmost record in the right
312 * neighbour and all records in the leaf node itself.
313 */
314 btree_node_t *leaf;
315 as_area_t *area =
[0b37882]316 (as_area_t *) btree_search(&as->as_area_btree, addr, &leaf);
[e3ee9b9]317 if (area) {
[0b37882]318 if (area != avoid)
[e3ee9b9]319 return false;
320 }
321
322 /* First, check the two border cases. */
323 btree_node_t *node =
324 btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
325 if (node) {
326 area = (as_area_t *) node->value[node->keys - 1];
327
[0b37882]328 if (area != avoid) {
329 mutex_lock(&area->lock);
330
[b6f3e7e]331 if (overlaps(addr, P2SZ(count), area->base,
332 P2SZ(area->pages))) {
[0b37882]333 mutex_unlock(&area->lock);
334 return false;
335 }
336
[e3ee9b9]337 mutex_unlock(&area->lock);
338 }
339 }
340
341 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
342 if (node) {
343 area = (as_area_t *) node->value[0];
344
[0b37882]345 if (area != avoid) {
346 mutex_lock(&area->lock);
347
[b6f3e7e]348 if (overlaps(addr, P2SZ(count), area->base,
349 P2SZ(area->pages))) {
[0b37882]350 mutex_unlock(&area->lock);
351 return false;
352 }
353
[e3ee9b9]354 mutex_unlock(&area->lock);
355 }
356 }
357
358 /* Second, check the leaf node. */
359 btree_key_t i;
360 for (i = 0; i < leaf->keys; i++) {
361 area = (as_area_t *) leaf->value[i];
362
[0b37882]363 if (area == avoid)
[e3ee9b9]364 continue;
365
366 mutex_lock(&area->lock);
367
[b6f3e7e]368 if (overlaps(addr, P2SZ(count), area->base,
369 P2SZ(area->pages))) {
[e3ee9b9]370 mutex_unlock(&area->lock);
371 return false;
372 }
373
374 mutex_unlock(&area->lock);
375 }
376
377 /*
378 * So far, the area does not conflict with other areas.
379 * Check if it doesn't conflict with kernel address space.
380 */
381 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
[b6f3e7e]382 return !overlaps(addr, P2SZ(count), KERNEL_ADDRESS_SPACE_START,
[e3ee9b9]383 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);
384 }
385
386 return true;
387}
388
[fbcdeb8]389/** Return pointer to unmapped address space area
390 *
391 * The address space must be already locked when calling
392 * this function.
393 *
394 * @param as Address space.
395 * @param bound Lowest address bound.
396 * @param size Requested size of the allocation.
397 *
398 * @return Address of the beginning of unmapped address space area.
399 * @return -1 if no suitable address space area was found.
400 *
401 */
402NO_TRACE static uintptr_t as_get_unmapped_area(as_t *as, uintptr_t bound,
403 size_t size)
404{
405 ASSERT(mutex_locked(&as->lock));
406
407 if (size == 0)
408 return (uintptr_t) -1;
409
410 /*
411 * Make sure we allocate from page-aligned
412 * address. Check for possible overflow in
413 * each step.
414 */
415
416 size_t pages = SIZE2FRAMES(size);
417
418 /*
419 * Find the lowest unmapped address aligned on the size
420 * boundary, not smaller than bound and of the required size.
421 */
422
423 /* First check the bound address itself */
424 uintptr_t addr = ALIGN_UP(bound, PAGE_SIZE);
425 if ((addr >= bound) &&
426 (check_area_conflicts(as, addr, pages, NULL)))
427 return addr;
428
429 /* Eventually check the addresses behind each area */
430 list_foreach(as->as_area_btree.leaf_list, cur) {
431 btree_node_t *node =
432 list_get_instance(cur, btree_node_t, leaf_link);
433
434 for (btree_key_t i = 0; i < node->keys; i++) {
435 as_area_t *area = (as_area_t *) node->value[i];
436
437 mutex_lock(&area->lock);
438
439 addr =
440 ALIGN_UP(area->base + P2SZ(area->pages), PAGE_SIZE);
441 bool avail =
442 ((addr >= bound) && (addr >= area->base) &&
443 (check_area_conflicts(as, addr, pages, area)));
444
445 mutex_unlock(&area->lock);
446
447 if (avail)
448 return addr;
449 }
450 }
451
452 /* No suitable address space area found */
453 return (uintptr_t) -1;
454}
455
[20d50a1]456/** Create address space area of common attributes.
457 *
458 * The created address space area is added to the target address space.
459 *
[da1bafb]460 * @param as Target address space.
461 * @param flags Flags of the area memory.
462 * @param size Size of area.
463 * @param attrs Attributes of the area.
464 * @param backend Address space area backend. NULL if no backend is used.
465 * @param backend_data NULL or a pointer to an array holding two void *.
[fbcdeb8]466 * @param base Starting virtual address of the area.
467 * If set to -1, a suitable mappable area is found.
468 * @param bound Lowest address bound if base is set to -1.
469 * Otherwise ignored.
[da1bafb]470 *
471 * @return Address space area on success or NULL on failure.
[20d50a1]472 *
473 */
[da1bafb]474as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size,
[fbcdeb8]475 unsigned int attrs, mem_backend_t *backend,
476 mem_backend_data_t *backend_data, uintptr_t *base, uintptr_t bound)
[20d50a1]477{
[fbcdeb8]478 if ((*base != (uintptr_t) -1) && ((*base % PAGE_SIZE) != 0))
[37e7d2b9]479 return NULL;
[da1bafb]480
[0b37882]481 if (size == 0)
[dbbeb26]482 return NULL;
[da1bafb]483
[0b37882]484 size_t pages = SIZE2FRAMES(size);
485
[37e7d2b9]486 /* Writeable executable areas are not supported. */
487 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
488 return NULL;
[20d50a1]489
[1068f6a]490 mutex_lock(&as->lock);
[20d50a1]491
[fbcdeb8]492 if (*base == (uintptr_t) -1) {
493 *base = as_get_unmapped_area(as, bound, size);
494 if (*base == (uintptr_t) -1) {
495 mutex_unlock(&as->lock);
496 return NULL;
497 }
498 }
499
500 if (!check_area_conflicts(as, *base, pages, NULL)) {
[1068f6a]501 mutex_unlock(&as->lock);
[37e7d2b9]502 return NULL;
503 }
[20d50a1]504
[da1bafb]505 as_area_t *area = (as_area_t *) malloc(sizeof(as_area_t), 0);
506
507 mutex_initialize(&area->lock, MUTEX_PASSIVE);
508
509 area->as = as;
510 area->flags = flags;
511 area->attributes = attrs;
[0b37882]512 area->pages = pages;
[fc47885]513 area->resident = 0;
[fbcdeb8]514 area->base = *base;
[da1bafb]515 area->sh_info = NULL;
516 area->backend = backend;
517
[0ee077ee]518 if (backend_data)
[da1bafb]519 area->backend_data = *backend_data;
[0ee077ee]520 else
[da1bafb]521 memsetb(&area->backend_data, sizeof(area->backend_data), 0);
522
[e394b736]523 if (area->backend && area->backend->create) {
524 if (!area->backend->create(area)) {
525 free(area);
526 mutex_unlock(&as->lock);
527 return NULL;
528 }
529 }
530
[da1bafb]531 btree_create(&area->used_space);
[fbcdeb8]532 btree_insert(&as->as_area_btree, *base, (void *) area,
533 NULL);
[bb68433]534
[1068f6a]535 mutex_unlock(&as->lock);
[da1bafb]536
537 return area;
[20d50a1]538}
539
[e3ee9b9]540/** Find address space area and lock it.
541 *
542 * @param as Address space.
543 * @param va Virtual address.
544 *
545 * @return Locked address space area containing va on success or
546 * NULL on failure.
547 *
548 */
[7a0359b]549NO_TRACE static as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
[e3ee9b9]550{
551 ASSERT(mutex_locked(&as->lock));
552
553 btree_node_t *leaf;
[b6f3e7e]554 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va,
555 &leaf);
[e3ee9b9]556 if (area) {
557 /* va is the base address of an address space area */
558 mutex_lock(&area->lock);
559 return area;
560 }
561
562 /*
[326bf65]563 * Search the leaf node and the rightmost record of its left neighbour
[e3ee9b9]564 * to find out whether this is a miss or va belongs to an address
565 * space area found there.
566 */
567
568 /* First, search the leaf node itself. */
569 btree_key_t i;
570
571 for (i = 0; i < leaf->keys; i++) {
572 area = (as_area_t *) leaf->value[i];
573
574 mutex_lock(&area->lock);
[326bf65]575
[b6f3e7e]576 if ((area->base <= va) &&
577 (va <= area->base + (P2SZ(area->pages) - 1)))
[e3ee9b9]578 return area;
579
580 mutex_unlock(&area->lock);
581 }
582
583 /*
584 * Second, locate the left neighbour and test its last record.
585 * Because of its position in the B+tree, it must have base < va.
586 */
[b6f3e7e]587 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree,
588 leaf);
[e3ee9b9]589 if (lnode) {
590 area = (as_area_t *) lnode->value[lnode->keys - 1];
591
592 mutex_lock(&area->lock);
593
[b6f3e7e]594 if (va <= area->base + (P2SZ(area->pages) - 1))
[e3ee9b9]595 return area;
596
597 mutex_unlock(&area->lock);
598 }
599
600 return NULL;
601}
602
[df0103f7]603/** Find address space area and change it.
604 *
[da1bafb]605 * @param as Address space.
606 * @param address Virtual address belonging to the area to be changed.
607 * Must be page-aligned.
608 * @param size New size of the virtual memory block starting at
609 * address.
610 * @param flags Flags influencing the remap operation. Currently unused.
611 *
612 * @return Zero on success or a value from @ref errno.h otherwise.
[df0103f7]613 *
[da1bafb]614 */
615int as_area_resize(as_t *as, uintptr_t address, size_t size, unsigned int flags)
[df0103f7]616{
[1068f6a]617 mutex_lock(&as->lock);
[df0103f7]618
619 /*
620 * Locate the area.
621 */
[da1bafb]622 as_area_t *area = find_area_and_lock(as, address);
[df0103f7]623 if (!area) {
[1068f6a]624 mutex_unlock(&as->lock);
[7242a78e]625 return ENOENT;
[df0103f7]626 }
[da1bafb]627
[0ee077ee]628 if (area->backend == &phys_backend) {
[df0103f7]629 /*
630 * Remapping of address space areas associated
631 * with memory mapped devices is not supported.
632 */
[1068f6a]633 mutex_unlock(&area->lock);
634 mutex_unlock(&as->lock);
[7242a78e]635 return ENOTSUP;
[df0103f7]636 }
[da1bafb]637
[8182031]638 if (area->sh_info) {
639 /*
[da1bafb]640 * Remapping of shared address space areas
[8182031]641 * is not supported.
642 */
643 mutex_unlock(&area->lock);
644 mutex_unlock(&as->lock);
645 return ENOTSUP;
646 }
[da1bafb]647
648 size_t pages = SIZE2FRAMES((address - area->base) + size);
[df0103f7]649 if (!pages) {
650 /*
651 * Zero size address space areas are not allowed.
652 */
[1068f6a]653 mutex_unlock(&area->lock);
654 mutex_unlock(&as->lock);
[7242a78e]655 return EPERM;
[df0103f7]656 }
657
658 if (pages < area->pages) {
[b6f3e7e]659 uintptr_t start_free = area->base + P2SZ(pages);
[da1bafb]660
[df0103f7]661 /*
662 * Shrinking the area.
663 * No need to check for overlaps.
664 */
[da1bafb]665
[c964521]666 page_table_lock(as, false);
[da1bafb]667
[56789125]668 /*
669 * Remove frames belonging to used space starting from
670 * the highest addresses downwards until an overlap with
671 * the resized address space area is found. Note that this
672 * is also the right way to remove part of the used_space
673 * B+tree leaf list.
[da1bafb]674 */
675 bool cond = true;
676 while (cond) {
[55b77d9]677 ASSERT(!list_empty(&area->used_space.leaf_list));
[da1bafb]678
679 btree_node_t *node =
[55b77d9]680 list_get_instance(list_last(&area->used_space.leaf_list),
[6f4495f5]681 btree_node_t, leaf_link);
[da1bafb]682
[56789125]683 if ((cond = (bool) node->keys)) {
[da1bafb]684 uintptr_t ptr = node->key[node->keys - 1];
685 size_t size =
[98000fb]686 (size_t) node->value[node->keys - 1];
[da1bafb]687 size_t i = 0;
688
[b6f3e7e]689 if (overlaps(ptr, P2SZ(size), area->base,
690 P2SZ(pages))) {
[56789125]691
[b6f3e7e]692 if (ptr + P2SZ(size) <= start_free) {
[56789125]693 /*
[6f4495f5]694 * The whole interval fits
695 * completely in the resized
696 * address space area.
[56789125]697 */
698 break;
699 }
[da1bafb]700
[56789125]701 /*
[6f4495f5]702 * Part of the interval corresponding
703 * to b and c overlaps with the resized
704 * address space area.
[56789125]705 */
[da1bafb]706
707 /* We are almost done */
708 cond = false;
709 i = (start_free - ptr) >> PAGE_WIDTH;
[6745592]710 if (!used_space_remove(area, start_free,
[da1bafb]711 size - i))
712 panic("Cannot remove used space.");
[56789125]713 } else {
714 /*
[6f4495f5]715 * The interval of used space can be
716 * completely removed.
[56789125]717 */
[da1bafb]718 if (!used_space_remove(area, ptr, size))
719 panic("Cannot remove used space.");
[56789125]720 }
[da1bafb]721
[d67dfdc]722 /*
723 * Start TLB shootdown sequence.
724 *
725 * The sequence is rather short and can be
726 * repeated multiple times. The reason is that
727 * we don't want to have used_space_remove()
728 * inside the sequence as it may use a blocking
729 * memory allocation for its B+tree. Blocking
730 * while holding the tlblock spinlock is
731 * forbidden and would hit a kernel assertion.
732 */
733
734 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES,
735 as->asid, area->base + P2SZ(pages),
736 area->pages - pages);
737
[da1bafb]738 for (; i < size; i++) {
[b6f3e7e]739 pte_t *pte = page_mapping_find(as,
[0ff03f3]740 ptr + P2SZ(i), false);
[da1bafb]741
742 ASSERT(pte);
743 ASSERT(PTE_VALID(pte));
744 ASSERT(PTE_PRESENT(pte));
745
746 if ((area->backend) &&
747 (area->backend->frame_free)) {
[0ee077ee]748 area->backend->frame_free(area,
[b6f3e7e]749 ptr + P2SZ(i),
[6f4495f5]750 PTE_GET_FRAME(pte));
[8182031]751 }
[da1bafb]752
[b6f3e7e]753 page_mapping_remove(as, ptr + P2SZ(i));
[56789125]754 }
[da1bafb]755
[d67dfdc]756 /*
757 * Finish TLB shootdown sequence.
758 */
[da1bafb]759
[d67dfdc]760 tlb_invalidate_pages(as->asid,
761 area->base + P2SZ(pages),
762 area->pages - pages);
[31d8e10]763
[d67dfdc]764 /*
765 * Invalidate software translation caches
766 * (e.g. TSB on sparc64, PHT on ppc32).
767 */
768 as_invalidate_translation_cache(as,
769 area->base + P2SZ(pages),
770 area->pages - pages);
771 tlb_shootdown_finalize(ipl);
772 }
773 }
[da1bafb]774 page_table_unlock(as, false);
[df0103f7]775 } else {
776 /*
777 * Growing the area.
778 * Check for overlaps with other address space areas.
779 */
[0b37882]780 if (!check_area_conflicts(as, address, pages, area)) {
[1068f6a]781 mutex_unlock(&area->lock);
[da1bafb]782 mutex_unlock(&as->lock);
[7242a78e]783 return EADDRNOTAVAIL;
[df0103f7]784 }
[da1bafb]785 }
786
[e394b736]787 if (area->backend && area->backend->resize) {
788 if (!area->backend->resize(area, pages)) {
789 mutex_unlock(&area->lock);
790 mutex_unlock(&as->lock);
791 return ENOMEM;
792 }
793 }
794
[df0103f7]795 area->pages = pages;
796
[1068f6a]797 mutex_unlock(&area->lock);
798 mutex_unlock(&as->lock);
[da1bafb]799
[7242a78e]800 return 0;
801}
802
[e3ee9b9]803/** Remove reference to address space area share info.
804 *
805 * If the reference count drops to 0, the sh_info is deallocated.
806 *
807 * @param sh_info Pointer to address space area share info.
808 *
809 */
[7a0359b]810NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info)
[e3ee9b9]811{
812 bool dealloc = false;
813
814 mutex_lock(&sh_info->lock);
815 ASSERT(sh_info->refcount);
816
817 if (--sh_info->refcount == 0) {
818 dealloc = true;
819
820 /*
821 * Now walk carefully the pagemap B+tree and free/remove
822 * reference from all frames found there.
823 */
[55b77d9]824 list_foreach(sh_info->pagemap.leaf_list, cur) {
[e3ee9b9]825 btree_node_t *node
826 = list_get_instance(cur, btree_node_t, leaf_link);
827 btree_key_t i;
828
829 for (i = 0; i < node->keys; i++)
830 frame_free((uintptr_t) node->value[i]);
831 }
832
833 }
834 mutex_unlock(&sh_info->lock);
835
836 if (dealloc) {
837 btree_destroy(&sh_info->pagemap);
838 free(sh_info);
839 }
840}
841
[7242a78e]842/** Destroy address space area.
843 *
[da1bafb]844 * @param as Address space.
845 * @param address Address within the area to be deleted.
846 *
847 * @return Zero on success or a value from @ref errno.h on failure.
[7242a78e]848 *
849 */
[7f1c620]850int as_area_destroy(as_t *as, uintptr_t address)
[7242a78e]851{
[1068f6a]852 mutex_lock(&as->lock);
[da1bafb]853
854 as_area_t *area = find_area_and_lock(as, address);
[7242a78e]855 if (!area) {
[1068f6a]856 mutex_unlock(&as->lock);
[7242a78e]857 return ENOENT;
858 }
[e394b736]859
860 if (area->backend && area->backend->destroy)
861 area->backend->destroy(area);
[da1bafb]862
863 uintptr_t base = area->base;
864
[c964521]865 page_table_lock(as, false);
[da1bafb]866
[5552d60]867 /*
868 * Start TLB shootdown sequence.
869 */
[402eda5]870 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base,
871 area->pages);
[da1bafb]872
[567807b1]873 /*
874 * Visit only the pages mapped by used_space B+tree.
875 */
[55b77d9]876 list_foreach(area->used_space.leaf_list, cur) {
[567807b1]877 btree_node_t *node;
[da1bafb]878 btree_key_t i;
[56789125]879
[f8d069e8]880 node = list_get_instance(cur, btree_node_t, leaf_link);
881 for (i = 0; i < node->keys; i++) {
[da1bafb]882 uintptr_t ptr = node->key[i];
883 size_t size;
[56789125]884
[da1bafb]885 for (size = 0; size < (size_t) node->value[i]; size++) {
[b6f3e7e]886 pte_t *pte = page_mapping_find(as,
[0ff03f3]887 ptr + P2SZ(size), false);
[da1bafb]888
889 ASSERT(pte);
890 ASSERT(PTE_VALID(pte));
891 ASSERT(PTE_PRESENT(pte));
892
893 if ((area->backend) &&
894 (area->backend->frame_free)) {
895 area->backend->frame_free(area,
[b6f3e7e]896 ptr + P2SZ(size),
897 PTE_GET_FRAME(pte));
[56789125]898 }
[da1bafb]899
[b6f3e7e]900 page_mapping_remove(as, ptr + P2SZ(size));
[7242a78e]901 }
902 }
903 }
[da1bafb]904
[7242a78e]905 /*
[5552d60]906 * Finish TLB shootdown sequence.
[7242a78e]907 */
[da1bafb]908
[f1d1f5d3]909 tlb_invalidate_pages(as->asid, area->base, area->pages);
[da1bafb]910
[f1d1f5d3]911 /*
[eef1b031]912 * Invalidate potential software translation caches
913 * (e.g. TSB on sparc64, PHT on ppc32).
[f1d1f5d3]914 */
915 as_invalidate_translation_cache(as, area->base, area->pages);
[402eda5]916 tlb_shootdown_finalize(ipl);
[da1bafb]917
[c964521]918 page_table_unlock(as, false);
[f1d1f5d3]919
[5552d60]920 btree_destroy(&area->used_space);
[da1bafb]921
[8d4f2ae]922 area->attributes |= AS_AREA_ATTR_PARTIAL;
[8182031]923
924 if (area->sh_info)
925 sh_info_remove_reference(area->sh_info);
[da1bafb]926
[1068f6a]927 mutex_unlock(&area->lock);
[da1bafb]928
[7242a78e]929 /*
930 * Remove the empty area from address space.
931 */
[f1d1f5d3]932 btree_remove(&as->as_area_btree, base, NULL);
[7242a78e]933
[8d4f2ae]934 free(area);
935
[f1d1f5d3]936 mutex_unlock(&as->lock);
[7242a78e]937 return 0;
[df0103f7]938}
939
[8d6bc2d5]940/** Share address space area with another or the same address space.
[df0103f7]941 *
[0ee077ee]942 * Address space area mapping is shared with a new address space area.
943 * If the source address space area has not been shared so far,
944 * a new sh_info is created. The new address space area simply gets the
945 * sh_info of the source area. The process of duplicating the
946 * mapping is done through the backend share function.
[da1bafb]947 *
948 * @param src_as Pointer to source address space.
949 * @param src_base Base address of the source address space area.
950 * @param acc_size Expected size of the source area.
951 * @param dst_as Pointer to destination address space.
[fd4d8c0]952 * @param dst_flags_mask Destination address space area flags mask.
[fbcdeb8]953 * @param dst_base Target base address. If set to -1,
954 * a suitable mappable area is found.
955 * @param bound Lowest address bound if dst_base is set to -1.
956 * Otherwise ignored.
[df0103f7]957 *
[da1bafb]958 * @return Zero on success.
959 * @return ENOENT if there is no such task or such address space.
960 * @return EPERM if there was a problem in accepting the area.
961 * @return ENOMEM if there was a problem in allocating destination
962 * address space area.
963 * @return ENOTSUP if the address space area backend does not support
964 * sharing.
965 *
[df0103f7]966 */
[7f1c620]967int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
[fbcdeb8]968 as_t *dst_as, unsigned int dst_flags_mask, uintptr_t *dst_base,
969 uintptr_t bound)
[df0103f7]970{
[1068f6a]971 mutex_lock(&src_as->lock);
[da1bafb]972 as_area_t *src_area = find_area_and_lock(src_as, src_base);
[a9e8b39]973 if (!src_area) {
[6fa476f7]974 /*
975 * Could not find the source address space area.
976 */
[1068f6a]977 mutex_unlock(&src_as->lock);
[6fa476f7]978 return ENOENT;
979 }
[da1bafb]980
981 if ((!src_area->backend) || (!src_area->backend->share)) {
[8d6bc2d5]982 /*
[f47fd19]983 * There is no backend or the backend does not
[0ee077ee]984 * know how to share the area.
[8d6bc2d5]985 */
986 mutex_unlock(&src_area->lock);
987 mutex_unlock(&src_as->lock);
988 return ENOTSUP;
989 }
990
[b6f3e7e]991 size_t src_size = P2SZ(src_area->pages);
[da1bafb]992 unsigned int src_flags = src_area->flags;
993 mem_backend_t *src_backend = src_area->backend;
994 mem_backend_data_t src_backend_data = src_area->backend_data;
995
[1ec1fd8]996 /* Share the cacheable flag from the original mapping */
997 if (src_flags & AS_AREA_CACHEABLE)
998 dst_flags_mask |= AS_AREA_CACHEABLE;
[da1bafb]999
1000 if ((src_size != acc_size) ||
1001 ((src_flags & dst_flags_mask) != dst_flags_mask)) {
[8d6bc2d5]1002 mutex_unlock(&src_area->lock);
1003 mutex_unlock(&src_as->lock);
[df0103f7]1004 return EPERM;
1005 }
[da1bafb]1006
[8d6bc2d5]1007 /*
1008 * Now we are committed to sharing the area.
[8440473]1009 * First, prepare the area for sharing.
[8d6bc2d5]1010 * Then it will be safe to unlock it.
1011 */
[da1bafb]1012 share_info_t *sh_info = src_area->sh_info;
[8d6bc2d5]1013 if (!sh_info) {
1014 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
[08a19ba]1015 mutex_initialize(&sh_info->lock, MUTEX_PASSIVE);
[8d6bc2d5]1016 sh_info->refcount = 2;
1017 btree_create(&sh_info->pagemap);
1018 src_area->sh_info = sh_info;
[da1bafb]1019
[c0697c4c]1020 /*
1021 * Call the backend to setup sharing.
1022 */
1023 src_area->backend->share(src_area);
[8d6bc2d5]1024 } else {
1025 mutex_lock(&sh_info->lock);
1026 sh_info->refcount++;
1027 mutex_unlock(&sh_info->lock);
1028 }
[da1bafb]1029
[8d6bc2d5]1030 mutex_unlock(&src_area->lock);
1031 mutex_unlock(&src_as->lock);
[da1bafb]1032
[df0103f7]1033 /*
[a9e8b39]1034 * Create copy of the source address space area.
1035 * The destination area is created with AS_AREA_ATTR_PARTIAL
1036 * attribute set which prevents race condition with
1037 * preliminary as_page_fault() calls.
[fd4d8c0]1038 * The flags of the source area are masked against dst_flags_mask
1039 * to support sharing in less privileged mode.
[df0103f7]1040 */
[fbcdeb8]1041 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask,
1042 src_size, AS_AREA_ATTR_PARTIAL, src_backend,
1043 &src_backend_data, dst_base, bound);
[a9e8b39]1044 if (!dst_area) {
[df0103f7]1045 /*
1046 * Destination address space area could not be created.
1047 */
[8d6bc2d5]1048 sh_info_remove_reference(sh_info);
1049
[df0103f7]1050 return ENOMEM;
1051 }
[da1bafb]1052
[a9e8b39]1053 /*
1054 * Now the destination address space area has been
1055 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
[8d6bc2d5]1056 * attribute and set the sh_info.
[da1bafb]1057 */
1058 mutex_lock(&dst_as->lock);
[1068f6a]1059 mutex_lock(&dst_area->lock);
[a9e8b39]1060 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
[8d6bc2d5]1061 dst_area->sh_info = sh_info;
[1068f6a]1062 mutex_unlock(&dst_area->lock);
[da1bafb]1063 mutex_unlock(&dst_as->lock);
1064
[df0103f7]1065 return 0;
1066}
1067
[fb84455]1068/** Check access mode for address space area.
1069 *
[da1bafb]1070 * @param area Address space area.
1071 * @param access Access mode.
1072 *
1073 * @return False if access violates area's permissions, true
1074 * otherwise.
[fb84455]1075 *
1076 */
[97bdb4a]1077NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access)
[fb84455]1078{
[fc47885]1079 ASSERT(mutex_locked(&area->lock));
1080
[fb84455]1081 int flagmap[] = {
1082 [PF_ACCESS_READ] = AS_AREA_READ,
1083 [PF_ACCESS_WRITE] = AS_AREA_WRITE,
1084 [PF_ACCESS_EXEC] = AS_AREA_EXEC
1085 };
[da1bafb]1086
[fb84455]1087 if (!(area->flags & flagmap[access]))
1088 return false;
1089
1090 return true;
1091}
1092
[e3ee9b9]1093/** Convert address space area flags to page flags.
1094 *
1095 * @param aflags Flags of some address space area.
1096 *
1097 * @return Flags to be passed to page_mapping_insert().
1098 *
1099 */
[7a0359b]1100NO_TRACE static unsigned int area_flags_to_page_flags(unsigned int aflags)
[e3ee9b9]1101{
1102 unsigned int flags = PAGE_USER | PAGE_PRESENT;
1103
1104 if (aflags & AS_AREA_READ)
1105 flags |= PAGE_READ;
1106
1107 if (aflags & AS_AREA_WRITE)
1108 flags |= PAGE_WRITE;
1109
1110 if (aflags & AS_AREA_EXEC)
1111 flags |= PAGE_EXEC;
1112
1113 if (aflags & AS_AREA_CACHEABLE)
1114 flags |= PAGE_CACHEABLE;
1115
1116 return flags;
1117}
1118
[6745592]1119/** Change adress space area flags.
[c98e6ee]1120 *
1121 * The idea is to have the same data, but with a different access mode.
1122 * This is needed e.g. for writing code into memory and then executing it.
1123 * In order for this to work properly, this may copy the data
1124 * into private anonymous memory (unless it's already there).
1125 *
[76fca31]1126 * @param as Address space.
1127 * @param flags Flags of the area memory.
1128 * @param address Address within the area to be changed.
1129 *
1130 * @return Zero on success or a value from @ref errno.h on failure.
[c98e6ee]1131 *
1132 */
[da1bafb]1133int as_area_change_flags(as_t *as, unsigned int flags, uintptr_t address)
[c98e6ee]1134{
1135 /* Flags for the new memory mapping */
[da1bafb]1136 unsigned int page_flags = area_flags_to_page_flags(flags);
1137
[c98e6ee]1138 mutex_lock(&as->lock);
[da1bafb]1139
1140 as_area_t *area = find_area_and_lock(as, address);
[c98e6ee]1141 if (!area) {
1142 mutex_unlock(&as->lock);
1143 return ENOENT;
1144 }
[da1bafb]1145
[76fca31]1146 if ((area->sh_info) || (area->backend != &anon_backend)) {
[c98e6ee]1147 /* Copying shared areas not supported yet */
1148 /* Copying non-anonymous memory not supported yet */
1149 mutex_unlock(&area->lock);
1150 mutex_unlock(&as->lock);
1151 return ENOTSUP;
1152 }
[da1bafb]1153
[c98e6ee]1154 /*
1155 * Compute total number of used pages in the used_space B+tree
1156 */
[da1bafb]1157 size_t used_pages = 0;
1158
[55b77d9]1159 list_foreach(area->used_space.leaf_list, cur) {
[da1bafb]1160 btree_node_t *node
1161 = list_get_instance(cur, btree_node_t, leaf_link);
1162 btree_key_t i;
[c98e6ee]1163
[da1bafb]1164 for (i = 0; i < node->keys; i++)
[98000fb]1165 used_pages += (size_t) node->value[i];
[c98e6ee]1166 }
[da1bafb]1167
[c98e6ee]1168 /* An array for storing frame numbers */
[da1bafb]1169 uintptr_t *old_frame = malloc(used_pages * sizeof(uintptr_t), 0);
1170
[c964521]1171 page_table_lock(as, false);
[da1bafb]1172
[c98e6ee]1173 /*
1174 * Start TLB shootdown sequence.
1175 */
[402eda5]1176 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base,
1177 area->pages);
[da1bafb]1178
[c98e6ee]1179 /*
1180 * Remove used pages from page tables and remember their frame
1181 * numbers.
1182 */
[da1bafb]1183 size_t frame_idx = 0;
1184
[55b77d9]1185 list_foreach(area->used_space.leaf_list, cur) {
[b6f3e7e]1186 btree_node_t *node = list_get_instance(cur, btree_node_t,
1187 leaf_link);
[da1bafb]1188 btree_key_t i;
[c98e6ee]1189
1190 for (i = 0; i < node->keys; i++) {
[da1bafb]1191 uintptr_t ptr = node->key[i];
1192 size_t size;
[c98e6ee]1193
[da1bafb]1194 for (size = 0; size < (size_t) node->value[i]; size++) {
[b6f3e7e]1195 pte_t *pte = page_mapping_find(as,
[0ff03f3]1196 ptr + P2SZ(size), false);
[da1bafb]1197
1198 ASSERT(pte);
1199 ASSERT(PTE_VALID(pte));
1200 ASSERT(PTE_PRESENT(pte));
1201
[c98e6ee]1202 old_frame[frame_idx++] = PTE_GET_FRAME(pte);
[da1bafb]1203
[c98e6ee]1204 /* Remove old mapping */
[b6f3e7e]1205 page_mapping_remove(as, ptr + P2SZ(size));
[c98e6ee]1206 }
1207 }
1208 }
[da1bafb]1209
[c98e6ee]1210 /*
1211 * Finish TLB shootdown sequence.
1212 */
[da1bafb]1213
[c98e6ee]1214 tlb_invalidate_pages(as->asid, area->base, area->pages);
[76fca31]1215
[c98e6ee]1216 /*
[eef1b031]1217 * Invalidate potential software translation caches
1218 * (e.g. TSB on sparc64, PHT on ppc32).
[c98e6ee]1219 */
1220 as_invalidate_translation_cache(as, area->base, area->pages);
[402eda5]1221 tlb_shootdown_finalize(ipl);
[da1bafb]1222
[c964521]1223 page_table_unlock(as, false);
[da1bafb]1224
[ae7f6fb]1225 /*
1226 * Set the new flags.
1227 */
1228 area->flags = flags;
[da1bafb]1229
[c98e6ee]1230 /*
1231 * Map pages back in with new flags. This step is kept separate
[6745592]1232 * so that the memory area could not be accesed with both the old and
1233 * the new flags at once.
[c98e6ee]1234 */
1235 frame_idx = 0;
[da1bafb]1236
[55b77d9]1237 list_foreach(area->used_space.leaf_list, cur) {
[da1bafb]1238 btree_node_t *node
1239 = list_get_instance(cur, btree_node_t, leaf_link);
1240 btree_key_t i;
[c98e6ee]1241
1242 for (i = 0; i < node->keys; i++) {
[da1bafb]1243 uintptr_t ptr = node->key[i];
1244 size_t size;
[c98e6ee]1245
[da1bafb]1246 for (size = 0; size < (size_t) node->value[i]; size++) {
[c98e6ee]1247 page_table_lock(as, false);
[da1bafb]1248
[c98e6ee]1249 /* Insert the new mapping */
[b6f3e7e]1250 page_mapping_insert(as, ptr + P2SZ(size),
[c98e6ee]1251 old_frame[frame_idx++], page_flags);
[da1bafb]1252
[c98e6ee]1253 page_table_unlock(as, false);
1254 }
1255 }
1256 }
[da1bafb]1257
[c98e6ee]1258 free(old_frame);
[da1bafb]1259
[c98e6ee]1260 mutex_unlock(&area->lock);
1261 mutex_unlock(&as->lock);
[da1bafb]1262
[c98e6ee]1263 return 0;
1264}
1265
[20d50a1]1266/** Handle page fault within the current address space.
1267 *
[6745592]1268 * This is the high-level page fault handler. It decides whether the page fault
1269 * can be resolved by any backend and if so, it invokes the backend to resolve
1270 * the page fault.
[8182031]1271 *
[20d50a1]1272 * Interrupts are assumed disabled.
1273 *
[da1bafb]1274 * @param page Faulting page.
1275 * @param access Access mode that caused the page fault (i.e.
1276 * read/write/exec).
1277 * @param istate Pointer to the interrupted state.
1278 *
1279 * @return AS_PF_FAULT on page fault.
1280 * @return AS_PF_OK on success.
1281 * @return AS_PF_DEFER if the fault was caused by copy_to_uspace()
1282 * or copy_from_uspace().
[20d50a1]1283 *
1284 */
[7f1c620]1285int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
[20d50a1]1286{
[1068f6a]1287 if (!THREAD)
[8182031]1288 return AS_PF_FAULT;
[7af8c0e]1289
1290 if (!AS)
1291 return AS_PF_FAULT;
1292
[1068f6a]1293 mutex_lock(&AS->lock);
[da1bafb]1294 as_area_t *area = find_area_and_lock(AS, page);
[20d50a1]1295 if (!area) {
1296 /*
1297 * No area contained mapping for 'page'.
1298 * Signal page fault to low-level handler.
1299 */
[1068f6a]1300 mutex_unlock(&AS->lock);
[e3c762cd]1301 goto page_fault;
[20d50a1]1302 }
[da1bafb]1303
[a9e8b39]1304 if (area->attributes & AS_AREA_ATTR_PARTIAL) {
1305 /*
1306 * The address space area is not fully initialized.
1307 * Avoid possible race by returning error.
1308 */
[1068f6a]1309 mutex_unlock(&area->lock);
1310 mutex_unlock(&AS->lock);
[da1bafb]1311 goto page_fault;
[a9e8b39]1312 }
[da1bafb]1313
1314 if ((!area->backend) || (!area->backend->page_fault)) {
[8182031]1315 /*
1316 * The address space area is not backed by any backend
1317 * or the backend cannot handle page faults.
1318 */
1319 mutex_unlock(&area->lock);
1320 mutex_unlock(&AS->lock);
[da1bafb]1321 goto page_fault;
[8182031]1322 }
[da1bafb]1323
[2299914]1324 page_table_lock(AS, false);
1325
1326 /*
[6745592]1327 * To avoid race condition between two page faults on the same address,
1328 * we need to make sure the mapping has not been already inserted.
[2299914]1329 */
[da1bafb]1330 pte_t *pte;
[0ff03f3]1331 if ((pte = page_mapping_find(AS, page, false))) {
[2299914]1332 if (PTE_PRESENT(pte)) {
[fb84455]1333 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
[6f4495f5]1334 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
1335 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
[fb84455]1336 page_table_unlock(AS, false);
1337 mutex_unlock(&area->lock);
1338 mutex_unlock(&AS->lock);
1339 return AS_PF_OK;
1340 }
[2299914]1341 }
1342 }
[20d50a1]1343
1344 /*
[8182031]1345 * Resort to the backend page fault handler.
[20d50a1]1346 */
[0ee077ee]1347 if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
[8182031]1348 page_table_unlock(AS, false);
1349 mutex_unlock(&area->lock);
1350 mutex_unlock(&AS->lock);
1351 goto page_fault;
1352 }
[20d50a1]1353
[8182031]1354 page_table_unlock(AS, false);
[1068f6a]1355 mutex_unlock(&area->lock);
1356 mutex_unlock(&AS->lock);
[e3c762cd]1357 return AS_PF_OK;
[da1bafb]1358
[e3c762cd]1359page_fault:
1360 if (THREAD->in_copy_from_uspace) {
1361 THREAD->in_copy_from_uspace = false;
[6f4495f5]1362 istate_set_retaddr(istate,
1363 (uintptr_t) &memcpy_from_uspace_failover_address);
[e3c762cd]1364 } else if (THREAD->in_copy_to_uspace) {
1365 THREAD->in_copy_to_uspace = false;
[6f4495f5]1366 istate_set_retaddr(istate,
1367 (uintptr_t) &memcpy_to_uspace_failover_address);
[e3c762cd]1368 } else {
1369 return AS_PF_FAULT;
1370 }
[da1bafb]1371
[e3c762cd]1372 return AS_PF_DEFER;
[20d50a1]1373}
1374
[7e4e532]1375/** Switch address spaces.
[1068f6a]1376 *
1377 * Note that this function cannot sleep as it is essentially a part of
[879585a3]1378 * scheduling. Sleeping here would lead to deadlock on wakeup. Another
1379 * thing which is forbidden in this context is locking the address space.
[20d50a1]1380 *
[7250d2c]1381 * When this function is entered, no spinlocks may be held.
[31d8e10]1382 *
[da1bafb]1383 * @param old Old address space or NULL.
1384 * @param new New address space.
1385 *
[20d50a1]1386 */
[80bcaed]1387void as_switch(as_t *old_as, as_t *new_as)
[20d50a1]1388{
[31d8e10]1389 DEADLOCK_PROBE_INIT(p_asidlock);
1390 preemption_disable();
[da1bafb]1391
[31d8e10]1392retry:
1393 (void) interrupts_disable();
1394 if (!spinlock_trylock(&asidlock)) {
[da1bafb]1395 /*
[31d8e10]1396 * Avoid deadlock with TLB shootdown.
1397 * We can enable interrupts here because
1398 * preemption is disabled. We should not be
1399 * holding any other lock.
1400 */
1401 (void) interrupts_enable();
1402 DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
1403 goto retry;
1404 }
1405 preemption_enable();
[da1bafb]1406
[7e4e532]1407 /*
1408 * First, take care of the old address space.
[da1bafb]1409 */
[80bcaed]1410 if (old_as) {
1411 ASSERT(old_as->cpu_refcount);
[da1bafb]1412
1413 if ((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
[7e4e532]1414 /*
1415 * The old address space is no longer active on
1416 * any processor. It can be appended to the
1417 * list of inactive address spaces with assigned
1418 * ASID.
1419 */
[2057572]1420 ASSERT(old_as->asid != ASID_INVALID);
[da1bafb]1421
[2057572]1422 list_append(&old_as->inactive_as_with_asid_link,
[55b77d9]1423 &inactive_as_with_asid_list);
[7e4e532]1424 }
[da1bafb]1425
[57da95c]1426 /*
1427 * Perform architecture-specific tasks when the address space
1428 * is being removed from the CPU.
1429 */
[80bcaed]1430 as_deinstall_arch(old_as);
[7e4e532]1431 }
[da1bafb]1432
[7e4e532]1433 /*
1434 * Second, prepare the new address space.
1435 */
[80bcaed]1436 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
[879585a3]1437 if (new_as->asid != ASID_INVALID)
[80bcaed]1438 list_remove(&new_as->inactive_as_with_asid_link);
[879585a3]1439 else
1440 new_as->asid = asid_get();
[7e4e532]1441 }
[da1bafb]1442
[80bcaed]1443#ifdef AS_PAGE_TABLE
1444 SET_PTL0_ADDRESS(new_as->genarch.page_table);
1445#endif
[7e4e532]1446
[20d50a1]1447 /*
1448 * Perform architecture-specific steps.
[4512d7e]1449 * (e.g. write ASID to hardware register etc.)
[20d50a1]1450 */
[80bcaed]1451 as_install_arch(new_as);
[da1bafb]1452
[879585a3]1453 spinlock_unlock(&asidlock);
[20d50a1]1454
[80bcaed]1455 AS = new_as;
[20d50a1]1456}
[6a3c9a7]1457
[df0103f7]1458/** Compute flags for virtual address translation subsytem.
1459 *
[da1bafb]1460 * @param area Address space area.
1461 *
1462 * @return Flags to be used in page_mapping_insert().
[df0103f7]1463 *
1464 */
[97bdb4a]1465NO_TRACE unsigned int as_area_get_flags(as_area_t *area)
[df0103f7]1466{
[1d432f9]1467 ASSERT(mutex_locked(&area->lock));
[fc47885]1468
[da1bafb]1469 return area_flags_to_page_flags(area->flags);
[df0103f7]1470}
1471
[ef67bab]1472/** Create page table.
1473 *
[6745592]1474 * Depending on architecture, create either address space private or global page
1475 * table.
[ef67bab]1476 *
[da1bafb]1477 * @param flags Flags saying whether the page table is for the kernel
1478 * address space.
1479 *
1480 * @return First entry of the page table.
[ef67bab]1481 *
1482 */
[97bdb4a]1483NO_TRACE pte_t *page_table_create(unsigned int flags)
[ef67bab]1484{
[bd1deed]1485 ASSERT(as_operations);
1486 ASSERT(as_operations->page_table_create);
1487
1488 return as_operations->page_table_create(flags);
[ef67bab]1489}
[d3e7ff4]1490
[482826d]1491/** Destroy page table.
1492 *
1493 * Destroy page table in architecture specific way.
1494 *
[da1bafb]1495 * @param page_table Physical address of PTL0.
1496 *
[482826d]1497 */
[97bdb4a]1498NO_TRACE void page_table_destroy(pte_t *page_table)
[482826d]1499{
[bd1deed]1500 ASSERT(as_operations);
1501 ASSERT(as_operations->page_table_destroy);
1502
1503 as_operations->page_table_destroy(page_table);
[482826d]1504}
1505
[2299914]1506/** Lock page table.
1507 *
1508 * This function should be called before any page_mapping_insert(),
1509 * page_mapping_remove() and page_mapping_find().
[da1bafb]1510 *
[2299914]1511 * Locking order is such that address space areas must be locked
1512 * prior to this call. Address space can be locked prior to this
1513 * call in which case the lock argument is false.
1514 *
[da1bafb]1515 * @param as Address space.
1516 * @param lock If false, do not attempt to lock as->lock.
1517 *
[2299914]1518 */
[97bdb4a]1519NO_TRACE void page_table_lock(as_t *as, bool lock)
[2299914]1520{
1521 ASSERT(as_operations);
1522 ASSERT(as_operations->page_table_lock);
[bd1deed]1523
[2299914]1524 as_operations->page_table_lock(as, lock);
1525}
1526
1527/** Unlock page table.
1528 *
[da1bafb]1529 * @param as Address space.
1530 * @param unlock If false, do not attempt to unlock as->lock.
1531 *
[2299914]1532 */
[97bdb4a]1533NO_TRACE void page_table_unlock(as_t *as, bool unlock)
[2299914]1534{
1535 ASSERT(as_operations);
1536 ASSERT(as_operations->page_table_unlock);
[bd1deed]1537
[2299914]1538 as_operations->page_table_unlock(as, unlock);
1539}
1540
[ada559c]1541/** Test whether page tables are locked.
1542 *
[e3ee9b9]1543 * @param as Address space where the page tables belong.
[ada559c]1544 *
[e3ee9b9]1545 * @return True if the page tables belonging to the address soace
1546 * are locked, otherwise false.
[ada559c]1547 */
[97bdb4a]1548NO_TRACE bool page_table_locked(as_t *as)
[ada559c]1549{
1550 ASSERT(as_operations);
1551 ASSERT(as_operations->page_table_locked);
1552
1553 return as_operations->page_table_locked(as);
1554}
1555
[b878df3]1556/** Return size of the address space area with given base.
1557 *
[1d432f9]1558 * @param base Arbitrary address inside the address space area.
[da1bafb]1559 *
1560 * @return Size of the address space area in bytes or zero if it
1561 * does not exist.
[b878df3]1562 *
1563 */
1564size_t as_area_get_size(uintptr_t base)
[7c23af9]1565{
1566 size_t size;
[da1bafb]1567
[1d432f9]1568 page_table_lock(AS, true);
[da1bafb]1569 as_area_t *src_area = find_area_and_lock(AS, base);
1570
[6745592]1571 if (src_area) {
[b6f3e7e]1572 size = P2SZ(src_area->pages);
[1068f6a]1573 mutex_unlock(&src_area->lock);
[da1bafb]1574 } else
[7c23af9]1575 size = 0;
[da1bafb]1576
[1d432f9]1577 page_table_unlock(AS, true);
[7c23af9]1578 return size;
1579}
1580
[25bf215]1581/** Mark portion of address space area as used.
1582 *
1583 * The address space area must be already locked.
1584 *
[da1bafb]1585 * @param area Address space area.
1586 * @param page First page to be marked.
1587 * @param count Number of page to be marked.
1588 *
[fc47885]1589 * @return False on failure or true on success.
[25bf215]1590 *
1591 */
[fc47885]1592bool used_space_insert(as_area_t *area, uintptr_t page, size_t count)
[25bf215]1593{
[1d432f9]1594 ASSERT(mutex_locked(&area->lock));
[25bf215]1595 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1596 ASSERT(count);
[da1bafb]1597
1598 btree_node_t *leaf;
1599 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf);
[25bf215]1600 if (pages) {
1601 /*
1602 * We hit the beginning of some used space.
1603 */
[fc47885]1604 return false;
[25bf215]1605 }
[da1bafb]1606
[a6cb8cb]1607 if (!leaf->keys) {
[da1bafb]1608 btree_insert(&area->used_space, page, (void *) count, leaf);
[fc47885]1609 goto success;
[a6cb8cb]1610 }
[da1bafb]1611
1612 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf);
[25bf215]1613 if (node) {
[6f4495f5]1614 uintptr_t left_pg = node->key[node->keys - 1];
1615 uintptr_t right_pg = leaf->key[0];
[98000fb]1616 size_t left_cnt = (size_t) node->value[node->keys - 1];
1617 size_t right_cnt = (size_t) leaf->value[0];
[25bf215]1618
1619 /*
1620 * Examine the possibility that the interval fits
1621 * somewhere between the rightmost interval of
1622 * the left neigbour and the first interval of the leaf.
1623 */
[da1bafb]1624
[25bf215]1625 if (page >= right_pg) {
1626 /* Do nothing. */
[b6f3e7e]1627 } else if (overlaps(page, P2SZ(count), left_pg,
1628 P2SZ(left_cnt))) {
[25bf215]1629 /* The interval intersects with the left interval. */
[fc47885]1630 return false;
[b6f3e7e]1631 } else if (overlaps(page, P2SZ(count), right_pg,
1632 P2SZ(right_cnt))) {
[25bf215]1633 /* The interval intersects with the right interval. */
[fc47885]1634 return false;
[b6f3e7e]1635 } else if ((page == left_pg + P2SZ(left_cnt)) &&
1636 (page + P2SZ(count) == right_pg)) {
[6f4495f5]1637 /*
1638 * The interval can be added by merging the two already
1639 * present intervals.
1640 */
[56789125]1641 node->value[node->keys - 1] += count + right_cnt;
[da1bafb]1642 btree_remove(&area->used_space, right_pg, leaf);
[fc47885]1643 goto success;
[b6f3e7e]1644 } else if (page == left_pg + P2SZ(left_cnt)) {
[da1bafb]1645 /*
[6f4495f5]1646 * The interval can be added by simply growing the left
1647 * interval.
1648 */
[56789125]1649 node->value[node->keys - 1] += count;
[fc47885]1650 goto success;
[b6f3e7e]1651 } else if (page + P2SZ(count) == right_pg) {
[25bf215]1652 /*
[6f4495f5]1653 * The interval can be addded by simply moving base of
1654 * the right interval down and increasing its size
1655 * accordingly.
[25bf215]1656 */
[56789125]1657 leaf->value[0] += count;
[25bf215]1658 leaf->key[0] = page;
[fc47885]1659 goto success;
[25bf215]1660 } else {
1661 /*
1662 * The interval is between both neigbouring intervals,
1663 * but cannot be merged with any of them.
1664 */
[da1bafb]1665 btree_insert(&area->used_space, page, (void *) count,
[6f4495f5]1666 leaf);
[fc47885]1667 goto success;
[25bf215]1668 }
1669 } else if (page < leaf->key[0]) {
[7f1c620]1670 uintptr_t right_pg = leaf->key[0];
[98000fb]1671 size_t right_cnt = (size_t) leaf->value[0];
[da1bafb]1672
[25bf215]1673 /*
[6f4495f5]1674 * Investigate the border case in which the left neighbour does
1675 * not exist but the interval fits from the left.
[25bf215]1676 */
[da1bafb]1677
[b6f3e7e]1678 if (overlaps(page, P2SZ(count), right_pg, P2SZ(right_cnt))) {
[25bf215]1679 /* The interval intersects with the right interval. */
[fc47885]1680 return false;
[b6f3e7e]1681 } else if (page + P2SZ(count) == right_pg) {
[25bf215]1682 /*
[6f4495f5]1683 * The interval can be added by moving the base of the
1684 * right interval down and increasing its size
1685 * accordingly.
[25bf215]1686 */
1687 leaf->key[0] = page;
[56789125]1688 leaf->value[0] += count;
[fc47885]1689 goto success;
[25bf215]1690 } else {
1691 /*
1692 * The interval doesn't adjoin with the right interval.
1693 * It must be added individually.
1694 */
[da1bafb]1695 btree_insert(&area->used_space, page, (void *) count,
[6f4495f5]1696 leaf);
[fc47885]1697 goto success;
[25bf215]1698 }
1699 }
[da1bafb]1700
1701 node = btree_leaf_node_right_neighbour(&area->used_space, leaf);
[25bf215]1702 if (node) {
[6f4495f5]1703 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1704 uintptr_t right_pg = node->key[0];
[98000fb]1705 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
1706 size_t right_cnt = (size_t) node->value[0];
[25bf215]1707
1708 /*
1709 * Examine the possibility that the interval fits
1710 * somewhere between the leftmost interval of
1711 * the right neigbour and the last interval of the leaf.
1712 */
[da1bafb]1713
[25bf215]1714 if (page < left_pg) {
1715 /* Do nothing. */
[b6f3e7e]1716 } else if (overlaps(page, P2SZ(count), left_pg,
1717 P2SZ(left_cnt))) {
[25bf215]1718 /* The interval intersects with the left interval. */
[fc47885]1719 return false;
[b6f3e7e]1720 } else if (overlaps(page, P2SZ(count), right_pg,
1721 P2SZ(right_cnt))) {
[25bf215]1722 /* The interval intersects with the right interval. */
[fc47885]1723 return false;
[b6f3e7e]1724 } else if ((page == left_pg + P2SZ(left_cnt)) &&
1725 (page + P2SZ(count) == right_pg)) {
[6f4495f5]1726 /*
1727 * The interval can be added by merging the two already
1728 * present intervals.
[da1bafb]1729 */
[56789125]1730 leaf->value[leaf->keys - 1] += count + right_cnt;
[da1bafb]1731 btree_remove(&area->used_space, right_pg, node);
[fc47885]1732 goto success;
[b6f3e7e]1733 } else if (page == left_pg + P2SZ(left_cnt)) {
[6f4495f5]1734 /*
1735 * The interval can be added by simply growing the left
1736 * interval.
[da1bafb]1737 */
[fc47885]1738 leaf->value[leaf->keys - 1] += count;
1739 goto success;
[b6f3e7e]1740 } else if (page + P2SZ(count) == right_pg) {
[25bf215]1741 /*
[6f4495f5]1742 * The interval can be addded by simply moving base of
1743 * the right interval down and increasing its size
1744 * accordingly.
[25bf215]1745 */
[56789125]1746 node->value[0] += count;
[25bf215]1747 node->key[0] = page;
[fc47885]1748 goto success;
[25bf215]1749 } else {
1750 /*
1751 * The interval is between both neigbouring intervals,
1752 * but cannot be merged with any of them.
1753 */
[da1bafb]1754 btree_insert(&area->used_space, page, (void *) count,
[6f4495f5]1755 leaf);
[fc47885]1756 goto success;
[25bf215]1757 }
1758 } else if (page >= leaf->key[leaf->keys - 1]) {
[7f1c620]1759 uintptr_t left_pg = leaf->key[leaf->keys - 1];
[98000fb]1760 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
[da1bafb]1761
[25bf215]1762 /*
[6f4495f5]1763 * Investigate the border case in which the right neighbour
1764 * does not exist but the interval fits from the right.
[25bf215]1765 */
[da1bafb]1766
[b6f3e7e]1767 if (overlaps(page, P2SZ(count), left_pg, P2SZ(left_cnt))) {
[56789125]1768 /* The interval intersects with the left interval. */
[fc47885]1769 return false;
[b6f3e7e]1770 } else if (left_pg + P2SZ(left_cnt) == page) {
[6f4495f5]1771 /*
1772 * The interval can be added by growing the left
1773 * interval.
1774 */
[56789125]1775 leaf->value[leaf->keys - 1] += count;
[fc47885]1776 goto success;
[25bf215]1777 } else {
1778 /*
1779 * The interval doesn't adjoin with the left interval.
1780 * It must be added individually.
1781 */
[da1bafb]1782 btree_insert(&area->used_space, page, (void *) count,
[6f4495f5]1783 leaf);
[fc47885]1784 goto success;
[25bf215]1785 }
1786 }
1787
1788 /*
[6f4495f5]1789 * Note that if the algorithm made it thus far, the interval can fit
1790 * only between two other intervals of the leaf. The two border cases
1791 * were already resolved.
[25bf215]1792 */
[da1bafb]1793 btree_key_t i;
[25bf215]1794 for (i = 1; i < leaf->keys; i++) {
1795 if (page < leaf->key[i]) {
[6f4495f5]1796 uintptr_t left_pg = leaf->key[i - 1];
1797 uintptr_t right_pg = leaf->key[i];
[98000fb]1798 size_t left_cnt = (size_t) leaf->value[i - 1];
1799 size_t right_cnt = (size_t) leaf->value[i];
[da1bafb]1800
[25bf215]1801 /*
1802 * The interval fits between left_pg and right_pg.
1803 */
[da1bafb]1804
[b6f3e7e]1805 if (overlaps(page, P2SZ(count), left_pg,
1806 P2SZ(left_cnt))) {
[6f4495f5]1807 /*
1808 * The interval intersects with the left
1809 * interval.
1810 */
[fc47885]1811 return false;
[b6f3e7e]1812 } else if (overlaps(page, P2SZ(count), right_pg,
1813 P2SZ(right_cnt))) {
[6f4495f5]1814 /*
1815 * The interval intersects with the right
1816 * interval.
1817 */
[fc47885]1818 return false;
[b6f3e7e]1819 } else if ((page == left_pg + P2SZ(left_cnt)) &&
1820 (page + P2SZ(count) == right_pg)) {
[6f4495f5]1821 /*
1822 * The interval can be added by merging the two
1823 * already present intervals.
1824 */
[56789125]1825 leaf->value[i - 1] += count + right_cnt;
[da1bafb]1826 btree_remove(&area->used_space, right_pg, leaf);
[fc47885]1827 goto success;
[b6f3e7e]1828 } else if (page == left_pg + P2SZ(left_cnt)) {
[6f4495f5]1829 /*
1830 * The interval can be added by simply growing
1831 * the left interval.
1832 */
[56789125]1833 leaf->value[i - 1] += count;
[fc47885]1834 goto success;
[b6f3e7e]1835 } else if (page + P2SZ(count) == right_pg) {
[25bf215]1836 /*
[da1bafb]1837 * The interval can be addded by simply moving
[6f4495f5]1838 * base of the right interval down and
1839 * increasing its size accordingly.
[da1bafb]1840 */
[56789125]1841 leaf->value[i] += count;
[25bf215]1842 leaf->key[i] = page;
[fc47885]1843 goto success;
[25bf215]1844 } else {
1845 /*
[6f4495f5]1846 * The interval is between both neigbouring
1847 * intervals, but cannot be merged with any of
1848 * them.
[25bf215]1849 */
[da1bafb]1850 btree_insert(&area->used_space, page,
[6f4495f5]1851 (void *) count, leaf);
[fc47885]1852 goto success;
[25bf215]1853 }
1854 }
1855 }
[da1bafb]1856
[7e752b2]1857 panic("Inconsistency detected while adding %zu pages of used "
1858 "space at %p.", count, (void *) page);
[fc47885]1859
1860success:
1861 area->resident += count;
1862 return true;
[25bf215]1863}
1864
1865/** Mark portion of address space area as unused.
1866 *
1867 * The address space area must be already locked.
1868 *
[da1bafb]1869 * @param area Address space area.
1870 * @param page First page to be marked.
1871 * @param count Number of page to be marked.
1872 *
[fc47885]1873 * @return False on failure or true on success.
[25bf215]1874 *
1875 */
[fc47885]1876bool used_space_remove(as_area_t *area, uintptr_t page, size_t count)
[25bf215]1877{
[1d432f9]1878 ASSERT(mutex_locked(&area->lock));
[25bf215]1879 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1880 ASSERT(count);
[da1bafb]1881
1882 btree_node_t *leaf;
1883 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf);
[25bf215]1884 if (pages) {
1885 /*
1886 * We are lucky, page is the beginning of some interval.
1887 */
1888 if (count > pages) {
[fc47885]1889 return false;
[25bf215]1890 } else if (count == pages) {
[da1bafb]1891 btree_remove(&area->used_space, page, leaf);
[fc47885]1892 goto success;
[25bf215]1893 } else {
1894 /*
1895 * Find the respective interval.
1896 * Decrease its size and relocate its start address.
1897 */
[da1bafb]1898 btree_key_t i;
[25bf215]1899 for (i = 0; i < leaf->keys; i++) {
1900 if (leaf->key[i] == page) {
[b6f3e7e]1901 leaf->key[i] += P2SZ(count);
[56789125]1902 leaf->value[i] -= count;
[fc47885]1903 goto success;
[25bf215]1904 }
1905 }
[fc47885]1906
[25bf215]1907 goto error;
1908 }
1909 }
[da1bafb]1910
[b6f3e7e]1911 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space,
1912 leaf);
[da1bafb]1913 if ((node) && (page < leaf->key[0])) {
[7f1c620]1914 uintptr_t left_pg = node->key[node->keys - 1];
[98000fb]1915 size_t left_cnt = (size_t) node->value[node->keys - 1];
[da1bafb]1916
[b6f3e7e]1917 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) {
1918 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) {
[25bf215]1919 /*
[6f4495f5]1920 * The interval is contained in the rightmost
1921 * interval of the left neighbour and can be
1922 * removed by updating the size of the bigger
1923 * interval.
[25bf215]1924 */
[56789125]1925 node->value[node->keys - 1] -= count;
[fc47885]1926 goto success;
[b6f3e7e]1927 } else if (page + P2SZ(count) <
1928 left_pg + P2SZ(left_cnt)) {
1929 size_t new_cnt;
1930
[25bf215]1931 /*
[6f4495f5]1932 * The interval is contained in the rightmost
1933 * interval of the left neighbour but its
1934 * removal requires both updating the size of
1935 * the original interval and also inserting a
1936 * new interval.
[25bf215]1937 */
[b6f3e7e]1938 new_cnt = ((left_pg + P2SZ(left_cnt)) -
1939 (page + P2SZ(count))) >> PAGE_WIDTH;
[56789125]1940 node->value[node->keys - 1] -= count + new_cnt;
[da1bafb]1941 btree_insert(&area->used_space, page +
[b6f3e7e]1942 P2SZ(count), (void *) new_cnt, leaf);
[fc47885]1943 goto success;
[25bf215]1944 }
1945 }
[fc47885]1946
1947 return false;
[da1bafb]1948 } else if (page < leaf->key[0])
[fc47885]1949 return false;
[25bf215]1950
1951 if (page > leaf->key[leaf->keys - 1]) {
[7f1c620]1952 uintptr_t left_pg = leaf->key[leaf->keys - 1];
[98000fb]1953 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
[da1bafb]1954
[b6f3e7e]1955 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) {
1956 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) {
[25bf215]1957 /*
[6f4495f5]1958 * The interval is contained in the rightmost
1959 * interval of the leaf and can be removed by
1960 * updating the size of the bigger interval.
[25bf215]1961 */
[56789125]1962 leaf->value[leaf->keys - 1] -= count;
[fc47885]1963 goto success;
[b6f3e7e]1964 } else if (page + P2SZ(count) < left_pg +
1965 P2SZ(left_cnt)) {
1966 size_t new_cnt;
1967
[25bf215]1968 /*
[6f4495f5]1969 * The interval is contained in the rightmost
1970 * interval of the leaf but its removal
1971 * requires both updating the size of the
1972 * original interval and also inserting a new
1973 * interval.
[25bf215]1974 */
[b6f3e7e]1975 new_cnt = ((left_pg + P2SZ(left_cnt)) -
1976 (page + P2SZ(count))) >> PAGE_WIDTH;
[56789125]1977 leaf->value[leaf->keys - 1] -= count + new_cnt;
[da1bafb]1978 btree_insert(&area->used_space, page +
[b6f3e7e]1979 P2SZ(count), (void *) new_cnt, leaf);
[fc47885]1980 goto success;
[25bf215]1981 }
1982 }
[fc47885]1983
1984 return false;
[da1bafb]1985 }
[25bf215]1986
1987 /*
1988 * The border cases have been already resolved.
[fc47885]1989 * Now the interval can be only between intervals of the leaf.
[25bf215]1990 */
[da1bafb]1991 btree_key_t i;
[25bf215]1992 for (i = 1; i < leaf->keys - 1; i++) {
1993 if (page < leaf->key[i]) {
[7f1c620]1994 uintptr_t left_pg = leaf->key[i - 1];
[98000fb]1995 size_t left_cnt = (size_t) leaf->value[i - 1];
[da1bafb]1996
[25bf215]1997 /*
[6f4495f5]1998 * Now the interval is between intervals corresponding
1999 * to (i - 1) and i.
[25bf215]2000 */
[b6f3e7e]2001 if (overlaps(left_pg, P2SZ(left_cnt), page,
2002 P2SZ(count))) {
2003 if (page + P2SZ(count) ==
2004 left_pg + P2SZ(left_cnt)) {
[25bf215]2005 /*
[6f4495f5]2006 * The interval is contained in the
2007 * interval (i - 1) of the leaf and can
2008 * be removed by updating the size of
2009 * the bigger interval.
[25bf215]2010 */
[56789125]2011 leaf->value[i - 1] -= count;
[fc47885]2012 goto success;
[b6f3e7e]2013 } else if (page + P2SZ(count) <
2014 left_pg + P2SZ(left_cnt)) {
2015 size_t new_cnt;
2016
[25bf215]2017 /*
[6f4495f5]2018 * The interval is contained in the
2019 * interval (i - 1) of the leaf but its
2020 * removal requires both updating the
2021 * size of the original interval and
[25bf215]2022 * also inserting a new interval.
2023 */
[b6f3e7e]2024 new_cnt = ((left_pg + P2SZ(left_cnt)) -
2025 (page + P2SZ(count))) >>
[6f4495f5]2026 PAGE_WIDTH;
[56789125]2027 leaf->value[i - 1] -= count + new_cnt;
[da1bafb]2028 btree_insert(&area->used_space, page +
[b6f3e7e]2029 P2SZ(count), (void *) new_cnt,
[6f4495f5]2030 leaf);
[fc47885]2031 goto success;
[25bf215]2032 }
2033 }
[fc47885]2034
2035 return false;
[25bf215]2036 }
2037 }
[da1bafb]2038
[25bf215]2039error:
[7e752b2]2040 panic("Inconsistency detected while removing %zu pages of used "
2041 "space from %p.", count, (void *) page);
[fc47885]2042
2043success:
2044 area->resident -= count;
2045 return true;
[25bf215]2046}
2047
[df0103f7]2048/*
2049 * Address space related syscalls.
2050 */
2051
[fbcdeb8]2052sysarg_t sys_as_area_create(uintptr_t base, size_t size, unsigned int flags,
2053 uintptr_t bound)
[df0103f7]2054{
[fbcdeb8]2055 uintptr_t virt = base;
2056 as_area_t *area = as_area_create(AS, flags | AS_AREA_CACHEABLE, size,
2057 AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, bound);
2058 if (area == NULL)
[96b02eb9]2059 return (sysarg_t) -1;
[fbcdeb8]2060
2061 return (sysarg_t) virt;
[df0103f7]2062}
2063
[96b02eb9]2064sysarg_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags)
[df0103f7]2065{
[96b02eb9]2066 return (sysarg_t) as_area_resize(AS, address, size, 0);
[7242a78e]2067}
2068
[96b02eb9]2069sysarg_t sys_as_area_change_flags(uintptr_t address, unsigned int flags)
[c98e6ee]2070{
[96b02eb9]2071 return (sysarg_t) as_area_change_flags(AS, flags, address);
[c98e6ee]2072}
2073
[96b02eb9]2074sysarg_t sys_as_area_destroy(uintptr_t address)
[7242a78e]2075{
[96b02eb9]2076 return (sysarg_t) as_area_destroy(AS, address);
[df0103f7]2077}
[b45c443]2078
[336db295]2079/** Get list of adress space areas.
2080 *
[da1bafb]2081 * @param as Address space.
2082 * @param obuf Place to save pointer to returned buffer.
2083 * @param osize Place to save size of returned buffer.
2084 *
[336db295]2085 */
2086void as_get_area_info(as_t *as, as_area_info_t **obuf, size_t *osize)
2087{
2088 mutex_lock(&as->lock);
[da1bafb]2089
[336db295]2090 /* First pass, count number of areas. */
[da1bafb]2091
2092 size_t area_cnt = 0;
2093
[55b77d9]2094 list_foreach(as->as_area_btree.leaf_list, cur) {
[da1bafb]2095 btree_node_t *node =
2096 list_get_instance(cur, btree_node_t, leaf_link);
[336db295]2097 area_cnt += node->keys;
2098 }
[da1bafb]2099
2100 size_t isize = area_cnt * sizeof(as_area_info_t);
2101 as_area_info_t *info = malloc(isize, 0);
2102
[336db295]2103 /* Second pass, record data. */
[da1bafb]2104
2105 size_t area_idx = 0;
2106
[55b77d9]2107 list_foreach(as->as_area_btree.leaf_list, cur) {
[da1bafb]2108 btree_node_t *node =
2109 list_get_instance(cur, btree_node_t, leaf_link);
2110 btree_key_t i;
2111
[336db295]2112 for (i = 0; i < node->keys; i++) {
2113 as_area_t *area = node->value[i];
[da1bafb]2114
[336db295]2115 ASSERT(area_idx < area_cnt);
2116 mutex_lock(&area->lock);
[da1bafb]2117
[336db295]2118 info[area_idx].start_addr = area->base;
[b6f3e7e]2119 info[area_idx].size = P2SZ(area->pages);
[336db295]2120 info[area_idx].flags = area->flags;
2121 ++area_idx;
[da1bafb]2122
[336db295]2123 mutex_unlock(&area->lock);
2124 }
2125 }
[da1bafb]2126
[336db295]2127 mutex_unlock(&as->lock);
[da1bafb]2128
[336db295]2129 *obuf = info;
2130 *osize = isize;
2131}
2132
[64c2ad5]2133/** Print out information about address space.
2134 *
[da1bafb]2135 * @param as Address space.
2136 *
[64c2ad5]2137 */
2138void as_print(as_t *as)
2139{
2140 mutex_lock(&as->lock);
2141
[0b37882]2142 /* Print out info about address space areas */
[55b77d9]2143 list_foreach(as->as_area_btree.leaf_list, cur) {
[da1bafb]2144 btree_node_t *node
2145 = list_get_instance(cur, btree_node_t, leaf_link);
2146 btree_key_t i;
[64c2ad5]2147
2148 for (i = 0; i < node->keys; i++) {
[7ba7c6d]2149 as_area_t *area = node->value[i];
[da1bafb]2150
[64c2ad5]2151 mutex_lock(&area->lock);
[7e752b2]2152 printf("as_area: %p, base=%p, pages=%zu"
2153 " (%p - %p)\n", area, (void *) area->base,
2154 area->pages, (void *) area->base,
[b6f3e7e]2155 (void *) (area->base + P2SZ(area->pages)));
[64c2ad5]2156 mutex_unlock(&area->lock);
2157 }
2158 }
2159
2160 mutex_unlock(&as->lock);
2161}
2162
[cc73a8a1]2163/** @}
[b45c443]2164 */
Note: See TracBrowser for help on using the repository browser.