source: mainline/kernel/generic/src/mm/as.c@ e3ee9b9

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since e3ee9b9 was e3ee9b9, checked in by Martin Decky <martin@…>, 15 years ago

remove forward static function declarations and reorder functions
(if not needed for recursion, forward static function declaration only increases source code size and makes it much harder to instantly tell whether a function is actually static or not)

coding style changes
(no change in functionality)

  • Property mode set to 100644
File size: 51.4 KB
RevLine 
[20d50a1]1/*
[0321109]2 * Copyright (c) 2010 Jakub Jermar
[20d50a1]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[cc73a8a1]29/** @addtogroup genericmm
[b45c443]30 * @{
31 */
32
[9179d0a]33/**
[b45c443]34 * @file
[da1bafb]35 * @brief Address space related functions.
[9179d0a]36 *
[20d50a1]37 * This file contains address space manipulation functions.
38 * Roughly speaking, this is a higher-level client of
39 * Virtual Address Translation (VAT) subsystem.
[9179d0a]40 *
41 * Functionality provided by this file allows one to
[cc73a8a1]42 * create address spaces and create, resize and share
[9179d0a]43 * address space areas.
44 *
45 * @see page.c
46 *
[20d50a1]47 */
48
49#include <mm/as.h>
[ef67bab]50#include <arch/mm/as.h>
[20d50a1]51#include <mm/page.h>
52#include <mm/frame.h>
[085d973]53#include <mm/slab.h>
[20d50a1]54#include <mm/tlb.h>
55#include <arch/mm/page.h>
56#include <genarch/mm/page_pt.h>
[2802767]57#include <genarch/mm/page_ht.h>
[4512d7e]58#include <mm/asid.h>
[20d50a1]59#include <arch/mm/asid.h>
[31d8e10]60#include <preemption.h>
[20d50a1]61#include <synch/spinlock.h>
[1068f6a]62#include <synch/mutex.h>
[5c9a08b]63#include <adt/list.h>
[252127e]64#include <adt/btree.h>
[df0103f7]65#include <proc/task.h>
[e3c762cd]66#include <proc/thread.h>
[20d50a1]67#include <arch/asm.h>
[df0103f7]68#include <panic.h>
[20d50a1]69#include <debug.h>
[df0103f7]70#include <print.h>
[20d50a1]71#include <memstr.h>
[5a7d9d1]72#include <macros.h>
[20d50a1]73#include <arch.h>
[df0103f7]74#include <errno.h>
75#include <config.h>
[25bf215]76#include <align.h>
[d99c1d2]77#include <typedefs.h>
[e3c762cd]78#include <syscall/copy.h>
79#include <arch/interrupt.h>
[20d50a1]80
[92778f2]81#ifdef CONFIG_VIRT_IDX_DCACHE
82#include <arch/mm/cache.h>
83#endif /* CONFIG_VIRT_IDX_DCACHE */
84
[cc73a8a1]85/**
86 * Each architecture decides what functions will be used to carry out
87 * address space operations such as creating or locking page tables.
[da1bafb]88 *
[cc73a8a1]89 */
[ef67bab]90as_operations_t *as_operations = NULL;
[20d50a1]91
[57da95c]92/**
93 * Slab for as_t objects.
[da1bafb]94 *
[57da95c]95 */
96static slab_cache_t *as_slab;
97
[6f4495f5]98/**
[879585a3]99 * This lock serializes access to the ASID subsystem.
100 * It protects:
101 * - inactive_as_with_asid_head list
102 * - as->asid for each as of the as_t type
103 * - asids_allocated counter
[da1bafb]104 *
[6f4495f5]105 */
[879585a3]106SPINLOCK_INITIALIZE(asidlock);
[7e4e532]107
108/**
109 * This list contains address spaces that are not active on any
110 * processor and that have valid ASID.
[da1bafb]111 *
[7e4e532]112 */
113LIST_INITIALIZE(inactive_as_with_asid_head);
114
[071a8ae6]115/** Kernel address space. */
116as_t *AS_KERNEL = NULL;
117
[da1bafb]118static int as_constructor(void *obj, unsigned int flags)
[29b2bbf]119{
120 as_t *as = (as_t *) obj;
[da1bafb]121
[29b2bbf]122 link_initialize(&as->inactive_as_with_asid_link);
[7f341820]123 mutex_initialize(&as->lock, MUTEX_PASSIVE);
[29b2bbf]124
[da1bafb]125 int rc = as_constructor_arch(as, flags);
[29b2bbf]126
127 return rc;
128}
129
[da1bafb]130static size_t as_destructor(void *obj)
[29b2bbf]131{
132 as_t *as = (as_t *) obj;
133 return as_destructor_arch(as);
134}
135
[ef67bab]136/** Initialize address space subsystem. */
137void as_init(void)
138{
139 as_arch_init();
[da1bafb]140
[29b2bbf]141 as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
[6f4495f5]142 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
[57da95c]143
[8e1ea655]144 AS_KERNEL = as_create(FLAG_AS_KERNEL);
[125e944]145 if (!AS_KERNEL)
[f651e80]146 panic("Cannot create kernel address space.");
[125e944]147
[76fca31]148 /* Make sure the kernel address space
149 * reference count never drops to zero.
150 */
[6193351]151 as_hold(AS_KERNEL);
[ef67bab]152}
153
[071a8ae6]154/** Create address space.
155 *
[da1bafb]156 * @param flags Flags that influence the way in wich the address
157 * space is created.
158 *
[071a8ae6]159 */
[da1bafb]160as_t *as_create(unsigned int flags)
[20d50a1]161{
[da1bafb]162 as_t *as = (as_t *) slab_alloc(as_slab, 0);
[29b2bbf]163 (void) as_create_arch(as, 0);
164
[252127e]165 btree_create(&as->as_area_btree);
[bb68433]166
167 if (flags & FLAG_AS_KERNEL)
168 as->asid = ASID_KERNEL;
169 else
170 as->asid = ASID_INVALID;
171
[31d8e10]172 atomic_set(&as->refcount, 0);
[47800e0]173 as->cpu_refcount = 0;
[da1bafb]174
[b3f8fb7]175#ifdef AS_PAGE_TABLE
[80bcaed]176 as->genarch.page_table = page_table_create(flags);
[b3f8fb7]177#else
178 page_table_create(flags);
179#endif
[76fca31]180
[20d50a1]181 return as;
182}
183
[482826d]184/** Destroy adress space.
185 *
[6f4495f5]186 * When there are no tasks referencing this address space (i.e. its refcount is
187 * zero), the address space can be destroyed.
[31d8e10]188 *
189 * We know that we don't hold any spinlock.
[6745592]190 *
[da1bafb]191 * @param as Address space to be destroyed.
192 *
[482826d]193 */
194void as_destroy(as_t *as)
[5be1923]195{
[31d8e10]196 DEADLOCK_PROBE_INIT(p_asidlock);
[482826d]197
[1624aae]198 ASSERT(as != AS);
[31d8e10]199 ASSERT(atomic_get(&as->refcount) == 0);
[482826d]200
201 /*
[663bb537]202 * Since there is no reference to this address space, it is safe not to
203 * lock its mutex.
[482826d]204 */
[879585a3]205
[31d8e10]206 /*
207 * We need to avoid deadlock between TLB shootdown and asidlock.
208 * We therefore try to take asid conditionally and if we don't succeed,
209 * we enable interrupts and try again. This is done while preemption is
210 * disabled to prevent nested context switches. We also depend on the
211 * fact that so far no spinlocks are held.
[da1bafb]212 *
[31d8e10]213 */
214 preemption_disable();
[da1bafb]215 ipl_t ipl = interrupts_read();
216
[31d8e10]217retry:
218 interrupts_disable();
219 if (!spinlock_trylock(&asidlock)) {
220 interrupts_enable();
221 DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
222 goto retry;
223 }
[da1bafb]224
225 /* Interrupts disabled, enable preemption */
226 preemption_enable();
227
228 if ((as->asid != ASID_INVALID) && (as != AS_KERNEL)) {
[1624aae]229 if (as->cpu_refcount == 0)
[31e8ddd]230 list_remove(&as->inactive_as_with_asid_link);
[da1bafb]231
[482826d]232 asid_put(as->asid);
233 }
[da1bafb]234
[879585a3]235 spinlock_unlock(&asidlock);
[fdaad75d]236 interrupts_restore(ipl);
237
[da1bafb]238
[482826d]239 /*
240 * Destroy address space areas of the address space.
[8440473]241 * The B+tree must be walked carefully because it is
[6f9a9bc]242 * also being destroyed.
[da1bafb]243 *
244 */
245 bool cond = true;
246 while (cond) {
[6f9a9bc]247 ASSERT(!list_empty(&as->as_area_btree.leaf_head));
[da1bafb]248
249 btree_node_t *node =
250 list_get_instance(as->as_area_btree.leaf_head.next,
[6f4495f5]251 btree_node_t, leaf_link);
[da1bafb]252
253 if ((cond = node->keys))
[6f9a9bc]254 as_area_destroy(as, node->key[0]);
[482826d]255 }
[da1bafb]256
[152b2b0]257 btree_destroy(&as->as_area_btree);
[da1bafb]258
[b3f8fb7]259#ifdef AS_PAGE_TABLE
[80bcaed]260 page_table_destroy(as->genarch.page_table);
[b3f8fb7]261#else
262 page_table_destroy(NULL);
263#endif
[da1bafb]264
[57da95c]265 slab_free(as_slab, as);
[5be1923]266}
267
[0321109]268/** Hold a reference to an address space.
269 *
270 * Holding a reference to an address space prevents destruction of that address
271 * space.
272 *
[da1bafb]273 * @param as Address space to be held.
274 *
[0321109]275 */
276void as_hold(as_t *as)
277{
278 atomic_inc(&as->refcount);
279}
280
281/** Release a reference to an address space.
282 *
283 * The last one to release a reference to an address space destroys the address
284 * space.
285 *
[da1bafb]286 * @param asAddress space to be released.
287 *
[0321109]288 */
289void as_release(as_t *as)
290{
291 if (atomic_predec(&as->refcount) == 0)
292 as_destroy(as);
293}
294
[e3ee9b9]295/** Check area conflicts with other areas.
296 *
297 * @param as Address space.
298 * @param va Starting virtual address of the area being tested.
299 * @param size Size of the area being tested.
300 * @param avoid_area Do not touch this area.
301 *
302 * @return True if there is no conflict, false otherwise.
303 *
304 */
305static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
306 as_area_t *avoid_area)
307{
308 ASSERT(mutex_locked(&as->lock));
309
310 /*
311 * We don't want any area to have conflicts with NULL page.
312 *
313 */
314 if (overlaps(va, size, NULL, PAGE_SIZE))
315 return false;
316
317 /*
318 * The leaf node is found in O(log n), where n is proportional to
319 * the number of address space areas belonging to as.
320 * The check for conflicts is then attempted on the rightmost
321 * record in the left neighbour, the leftmost record in the right
322 * neighbour and all records in the leaf node itself.
323 *
324 */
325 btree_node_t *leaf;
326 as_area_t *area =
327 (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
328 if (area) {
329 if (area != avoid_area)
330 return false;
331 }
332
333 /* First, check the two border cases. */
334 btree_node_t *node =
335 btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
336 if (node) {
337 area = (as_area_t *) node->value[node->keys - 1];
338
339 mutex_lock(&area->lock);
340
341 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) {
342 mutex_unlock(&area->lock);
343 return false;
344 }
345
346 mutex_unlock(&area->lock);
347 }
348
349 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
350 if (node) {
351 area = (as_area_t *) node->value[0];
352
353 mutex_lock(&area->lock);
354
355 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) {
356 mutex_unlock(&area->lock);
357 return false;
358 }
359
360 mutex_unlock(&area->lock);
361 }
362
363 /* Second, check the leaf node. */
364 btree_key_t i;
365 for (i = 0; i < leaf->keys; i++) {
366 area = (as_area_t *) leaf->value[i];
367
368 if (area == avoid_area)
369 continue;
370
371 mutex_lock(&area->lock);
372
373 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) {
374 mutex_unlock(&area->lock);
375 return false;
376 }
377
378 mutex_unlock(&area->lock);
379 }
380
381 /*
382 * So far, the area does not conflict with other areas.
383 * Check if it doesn't conflict with kernel address space.
384 *
385 */
386 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
387 return !overlaps(va, size,
388 KERNEL_ADDRESS_SPACE_START,
389 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);
390 }
391
392 return true;
393}
394
[20d50a1]395/** Create address space area of common attributes.
396 *
397 * The created address space area is added to the target address space.
398 *
[da1bafb]399 * @param as Target address space.
400 * @param flags Flags of the area memory.
401 * @param size Size of area.
402 * @param base Base address of area.
403 * @param attrs Attributes of the area.
404 * @param backend Address space area backend. NULL if no backend is used.
405 * @param backend_data NULL or a pointer to an array holding two void *.
406 *
407 * @return Address space area on success or NULL on failure.
[20d50a1]408 *
409 */
[da1bafb]410as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size,
411 uintptr_t base, unsigned int attrs, mem_backend_t *backend,
412 mem_backend_data_t *backend_data)
[20d50a1]413{
414 if (base % PAGE_SIZE)
[37e7d2b9]415 return NULL;
[da1bafb]416
[dbbeb26]417 if (!size)
418 return NULL;
[da1bafb]419
[37e7d2b9]420 /* Writeable executable areas are not supported. */
421 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
422 return NULL;
[20d50a1]423
[1068f6a]424 mutex_lock(&as->lock);
[20d50a1]425
[37e7d2b9]426 if (!check_area_conflicts(as, base, size, NULL)) {
[1068f6a]427 mutex_unlock(&as->lock);
[37e7d2b9]428 return NULL;
429 }
[20d50a1]430
[da1bafb]431 as_area_t *area = (as_area_t *) malloc(sizeof(as_area_t), 0);
432
433 mutex_initialize(&area->lock, MUTEX_PASSIVE);
434
435 area->as = as;
436 area->flags = flags;
437 area->attributes = attrs;
438 area->pages = SIZE2FRAMES(size);
439 area->base = base;
440 area->sh_info = NULL;
441 area->backend = backend;
442
[0ee077ee]443 if (backend_data)
[da1bafb]444 area->backend_data = *backend_data;
[0ee077ee]445 else
[da1bafb]446 memsetb(&area->backend_data, sizeof(area->backend_data), 0);
447
448 btree_create(&area->used_space);
449 btree_insert(&as->as_area_btree, base, (void *) area, NULL);
[bb68433]450
[1068f6a]451 mutex_unlock(&as->lock);
[da1bafb]452
453 return area;
[20d50a1]454}
455
[e3ee9b9]456/** Find address space area and lock it.
457 *
458 * @param as Address space.
459 * @param va Virtual address.
460 *
461 * @return Locked address space area containing va on success or
462 * NULL on failure.
463 *
464 */
465static as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
466{
467 ASSERT(mutex_locked(&as->lock));
468
469 btree_node_t *leaf;
470 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
471 if (area) {
472 /* va is the base address of an address space area */
473 mutex_lock(&area->lock);
474 return area;
475 }
476
477 /*
478 * Search the leaf node and the righmost record of its left neighbour
479 * to find out whether this is a miss or va belongs to an address
480 * space area found there.
481 *
482 */
483
484 /* First, search the leaf node itself. */
485 btree_key_t i;
486
487 for (i = 0; i < leaf->keys; i++) {
488 area = (as_area_t *) leaf->value[i];
489
490 mutex_lock(&area->lock);
491
492 if ((area->base <= va) && (va < area->base + area->pages * PAGE_SIZE))
493 return area;
494
495 mutex_unlock(&area->lock);
496 }
497
498 /*
499 * Second, locate the left neighbour and test its last record.
500 * Because of its position in the B+tree, it must have base < va.
501 *
502 */
503 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
504 if (lnode) {
505 area = (as_area_t *) lnode->value[lnode->keys - 1];
506
507 mutex_lock(&area->lock);
508
509 if (va < area->base + area->pages * PAGE_SIZE)
510 return area;
511
512 mutex_unlock(&area->lock);
513 }
514
515 return NULL;
516}
517
[df0103f7]518/** Find address space area and change it.
519 *
[da1bafb]520 * @param as Address space.
521 * @param address Virtual address belonging to the area to be changed.
522 * Must be page-aligned.
523 * @param size New size of the virtual memory block starting at
524 * address.
525 * @param flags Flags influencing the remap operation. Currently unused.
526 *
527 * @return Zero on success or a value from @ref errno.h otherwise.
[df0103f7]528 *
[da1bafb]529 */
530int as_area_resize(as_t *as, uintptr_t address, size_t size, unsigned int flags)
[df0103f7]531{
[1068f6a]532 mutex_lock(&as->lock);
[df0103f7]533
534 /*
535 * Locate the area.
[da1bafb]536 *
[df0103f7]537 */
[da1bafb]538 as_area_t *area = find_area_and_lock(as, address);
[df0103f7]539 if (!area) {
[1068f6a]540 mutex_unlock(&as->lock);
[7242a78e]541 return ENOENT;
[df0103f7]542 }
[da1bafb]543
[0ee077ee]544 if (area->backend == &phys_backend) {
[df0103f7]545 /*
546 * Remapping of address space areas associated
547 * with memory mapped devices is not supported.
[da1bafb]548 *
[df0103f7]549 */
[1068f6a]550 mutex_unlock(&area->lock);
551 mutex_unlock(&as->lock);
[7242a78e]552 return ENOTSUP;
[df0103f7]553 }
[da1bafb]554
[8182031]555 if (area->sh_info) {
556 /*
[da1bafb]557 * Remapping of shared address space areas
[8182031]558 * is not supported.
[da1bafb]559 *
[8182031]560 */
561 mutex_unlock(&area->lock);
562 mutex_unlock(&as->lock);
563 return ENOTSUP;
564 }
[da1bafb]565
566 size_t pages = SIZE2FRAMES((address - area->base) + size);
[df0103f7]567 if (!pages) {
568 /*
569 * Zero size address space areas are not allowed.
[da1bafb]570 *
[df0103f7]571 */
[1068f6a]572 mutex_unlock(&area->lock);
573 mutex_unlock(&as->lock);
[7242a78e]574 return EPERM;
[df0103f7]575 }
576
577 if (pages < area->pages) {
[eeb2bde2]578 uintptr_t start_free = area->base + pages * PAGE_SIZE;
[da1bafb]579
[df0103f7]580 /*
581 * Shrinking the area.
582 * No need to check for overlaps.
[da1bafb]583 *
[df0103f7]584 */
[da1bafb]585
[c964521]586 page_table_lock(as, false);
[da1bafb]587
[5552d60]588 /*
589 * Start TLB shootdown sequence.
[da1bafb]590 *
[5552d60]591 */
[402eda5]592 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid,
593 area->base + pages * PAGE_SIZE, area->pages - pages);
[da1bafb]594
[56789125]595 /*
596 * Remove frames belonging to used space starting from
597 * the highest addresses downwards until an overlap with
598 * the resized address space area is found. Note that this
599 * is also the right way to remove part of the used_space
600 * B+tree leaf list.
[da1bafb]601 *
602 */
603 bool cond = true;
604 while (cond) {
[56789125]605 ASSERT(!list_empty(&area->used_space.leaf_head));
[da1bafb]606
607 btree_node_t *node =
[6f4495f5]608 list_get_instance(area->used_space.leaf_head.prev,
609 btree_node_t, leaf_link);
[da1bafb]610
[56789125]611 if ((cond = (bool) node->keys)) {
[da1bafb]612 uintptr_t ptr = node->key[node->keys - 1];
613 size_t size =
[98000fb]614 (size_t) node->value[node->keys - 1];
[da1bafb]615 size_t i = 0;
616
617 if (overlaps(ptr, size * PAGE_SIZE, area->base,
[4638401]618 pages * PAGE_SIZE)) {
[56789125]619
[da1bafb]620 if (ptr + size * PAGE_SIZE <= start_free) {
[56789125]621 /*
[6f4495f5]622 * The whole interval fits
623 * completely in the resized
624 * address space area.
[da1bafb]625 *
[56789125]626 */
627 break;
628 }
[da1bafb]629
[56789125]630 /*
[6f4495f5]631 * Part of the interval corresponding
632 * to b and c overlaps with the resized
633 * address space area.
[da1bafb]634 *
[56789125]635 */
[da1bafb]636
637 /* We are almost done */
638 cond = false;
639 i = (start_free - ptr) >> PAGE_WIDTH;
[6745592]640 if (!used_space_remove(area, start_free,
[da1bafb]641 size - i))
642 panic("Cannot remove used space.");
[56789125]643 } else {
644 /*
[6f4495f5]645 * The interval of used space can be
646 * completely removed.
[56789125]647 */
[da1bafb]648 if (!used_space_remove(area, ptr, size))
649 panic("Cannot remove used space.");
[56789125]650 }
[da1bafb]651
652 for (; i < size; i++) {
653 pte_t *pte = page_mapping_find(as, ptr +
[6f4495f5]654 i * PAGE_SIZE);
[da1bafb]655
656 ASSERT(pte);
657 ASSERT(PTE_VALID(pte));
658 ASSERT(PTE_PRESENT(pte));
659
660 if ((area->backend) &&
661 (area->backend->frame_free)) {
[0ee077ee]662 area->backend->frame_free(area,
[da1bafb]663 ptr + i * PAGE_SIZE,
[6f4495f5]664 PTE_GET_FRAME(pte));
[8182031]665 }
[da1bafb]666
667 page_mapping_remove(as, ptr +
[6f4495f5]668 i * PAGE_SIZE);
[56789125]669 }
[df0103f7]670 }
671 }
[da1bafb]672
[df0103f7]673 /*
[5552d60]674 * Finish TLB shootdown sequence.
[da1bafb]675 *
[df0103f7]676 */
[da1bafb]677
[6f4495f5]678 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
679 area->pages - pages);
[da1bafb]680
[f1d1f5d3]681 /*
682 * Invalidate software translation caches (e.g. TSB on sparc64).
[da1bafb]683 *
[f1d1f5d3]684 */
[6f4495f5]685 as_invalidate_translation_cache(as, area->base +
686 pages * PAGE_SIZE, area->pages - pages);
[402eda5]687 tlb_shootdown_finalize(ipl);
[31d8e10]688
[da1bafb]689 page_table_unlock(as, false);
[df0103f7]690 } else {
691 /*
692 * Growing the area.
693 * Check for overlaps with other address space areas.
[da1bafb]694 *
[df0103f7]695 */
[6f4495f5]696 if (!check_area_conflicts(as, address, pages * PAGE_SIZE,
697 area)) {
[1068f6a]698 mutex_unlock(&area->lock);
[da1bafb]699 mutex_unlock(&as->lock);
[7242a78e]700 return EADDRNOTAVAIL;
[df0103f7]701 }
[da1bafb]702 }
703
[df0103f7]704 area->pages = pages;
705
[1068f6a]706 mutex_unlock(&area->lock);
707 mutex_unlock(&as->lock);
[da1bafb]708
[7242a78e]709 return 0;
710}
711
[e3ee9b9]712/** Remove reference to address space area share info.
713 *
714 * If the reference count drops to 0, the sh_info is deallocated.
715 *
716 * @param sh_info Pointer to address space area share info.
717 *
718 */
719static void sh_info_remove_reference(share_info_t *sh_info)
720{
721 bool dealloc = false;
722
723 mutex_lock(&sh_info->lock);
724 ASSERT(sh_info->refcount);
725
726 if (--sh_info->refcount == 0) {
727 dealloc = true;
728 link_t *cur;
729
730 /*
731 * Now walk carefully the pagemap B+tree and free/remove
732 * reference from all frames found there.
733 */
734 for (cur = sh_info->pagemap.leaf_head.next;
735 cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
736 btree_node_t *node
737 = list_get_instance(cur, btree_node_t, leaf_link);
738 btree_key_t i;
739
740 for (i = 0; i < node->keys; i++)
741 frame_free((uintptr_t) node->value[i]);
742 }
743
744 }
745 mutex_unlock(&sh_info->lock);
746
747 if (dealloc) {
748 btree_destroy(&sh_info->pagemap);
749 free(sh_info);
750 }
751}
752
[7242a78e]753/** Destroy address space area.
754 *
[da1bafb]755 * @param as Address space.
756 * @param address Address within the area to be deleted.
757 *
758 * @return Zero on success or a value from @ref errno.h on failure.
[7242a78e]759 *
760 */
[7f1c620]761int as_area_destroy(as_t *as, uintptr_t address)
[7242a78e]762{
[1068f6a]763 mutex_lock(&as->lock);
[da1bafb]764
765 as_area_t *area = find_area_and_lock(as, address);
[7242a78e]766 if (!area) {
[1068f6a]767 mutex_unlock(&as->lock);
[7242a78e]768 return ENOENT;
769 }
[da1bafb]770
771 uintptr_t base = area->base;
772
[c964521]773 page_table_lock(as, false);
[da1bafb]774
[5552d60]775 /*
776 * Start TLB shootdown sequence.
777 */
[402eda5]778 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base,
779 area->pages);
[da1bafb]780
[567807b1]781 /*
782 * Visit only the pages mapped by used_space B+tree.
783 */
[da1bafb]784 link_t *cur;
[6f4495f5]785 for (cur = area->used_space.leaf_head.next;
786 cur != &area->used_space.leaf_head; cur = cur->next) {
[567807b1]787 btree_node_t *node;
[da1bafb]788 btree_key_t i;
[56789125]789
[f8d069e8]790 node = list_get_instance(cur, btree_node_t, leaf_link);
791 for (i = 0; i < node->keys; i++) {
[da1bafb]792 uintptr_t ptr = node->key[i];
793 size_t size;
[56789125]794
[da1bafb]795 for (size = 0; size < (size_t) node->value[i]; size++) {
796 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE);
797
798 ASSERT(pte);
799 ASSERT(PTE_VALID(pte));
800 ASSERT(PTE_PRESENT(pte));
801
802 if ((area->backend) &&
803 (area->backend->frame_free)) {
804 area->backend->frame_free(area,
805 ptr + size * PAGE_SIZE, PTE_GET_FRAME(pte));
[56789125]806 }
[da1bafb]807
808 page_mapping_remove(as, ptr + size * PAGE_SIZE);
[7242a78e]809 }
810 }
811 }
[da1bafb]812
[7242a78e]813 /*
[5552d60]814 * Finish TLB shootdown sequence.
[da1bafb]815 *
[7242a78e]816 */
[da1bafb]817
[f1d1f5d3]818 tlb_invalidate_pages(as->asid, area->base, area->pages);
[da1bafb]819
[f1d1f5d3]820 /*
[6f4495f5]821 * Invalidate potential software translation caches (e.g. TSB on
822 * sparc64).
[da1bafb]823 *
[f1d1f5d3]824 */
825 as_invalidate_translation_cache(as, area->base, area->pages);
[402eda5]826 tlb_shootdown_finalize(ipl);
[da1bafb]827
[c964521]828 page_table_unlock(as, false);
[f1d1f5d3]829
[5552d60]830 btree_destroy(&area->used_space);
[da1bafb]831
[8d4f2ae]832 area->attributes |= AS_AREA_ATTR_PARTIAL;
[8182031]833
834 if (area->sh_info)
835 sh_info_remove_reference(area->sh_info);
[da1bafb]836
[1068f6a]837 mutex_unlock(&area->lock);
[da1bafb]838
[7242a78e]839 /*
840 * Remove the empty area from address space.
[da1bafb]841 *
[7242a78e]842 */
[f1d1f5d3]843 btree_remove(&as->as_area_btree, base, NULL);
[7242a78e]844
[8d4f2ae]845 free(area);
846
[f1d1f5d3]847 mutex_unlock(&as->lock);
[7242a78e]848 return 0;
[df0103f7]849}
850
[8d6bc2d5]851/** Share address space area with another or the same address space.
[df0103f7]852 *
[0ee077ee]853 * Address space area mapping is shared with a new address space area.
854 * If the source address space area has not been shared so far,
855 * a new sh_info is created. The new address space area simply gets the
856 * sh_info of the source area. The process of duplicating the
857 * mapping is done through the backend share function.
[da1bafb]858 *
859 * @param src_as Pointer to source address space.
860 * @param src_base Base address of the source address space area.
861 * @param acc_size Expected size of the source area.
862 * @param dst_as Pointer to destination address space.
863 * @param dst_base Target base address.
[fd4d8c0]864 * @param dst_flags_mask Destination address space area flags mask.
[df0103f7]865 *
[da1bafb]866 * @return Zero on success.
867 * @return ENOENT if there is no such task or such address space.
868 * @return EPERM if there was a problem in accepting the area.
869 * @return ENOMEM if there was a problem in allocating destination
870 * address space area.
871 * @return ENOTSUP if the address space area backend does not support
872 * sharing.
873 *
[df0103f7]874 */
[7f1c620]875int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
[da1bafb]876 as_t *dst_as, uintptr_t dst_base, unsigned int dst_flags_mask)
[df0103f7]877{
[1068f6a]878 mutex_lock(&src_as->lock);
[da1bafb]879 as_area_t *src_area = find_area_and_lock(src_as, src_base);
[a9e8b39]880 if (!src_area) {
[6fa476f7]881 /*
882 * Could not find the source address space area.
[da1bafb]883 *
[6fa476f7]884 */
[1068f6a]885 mutex_unlock(&src_as->lock);
[6fa476f7]886 return ENOENT;
887 }
[da1bafb]888
889 if ((!src_area->backend) || (!src_area->backend->share)) {
[8d6bc2d5]890 /*
[f47fd19]891 * There is no backend or the backend does not
[0ee077ee]892 * know how to share the area.
[da1bafb]893 *
[8d6bc2d5]894 */
895 mutex_unlock(&src_area->lock);
896 mutex_unlock(&src_as->lock);
897 return ENOTSUP;
898 }
899
[da1bafb]900 size_t src_size = src_area->pages * PAGE_SIZE;
901 unsigned int src_flags = src_area->flags;
902 mem_backend_t *src_backend = src_area->backend;
903 mem_backend_data_t src_backend_data = src_area->backend_data;
904
[1ec1fd8]905 /* Share the cacheable flag from the original mapping */
906 if (src_flags & AS_AREA_CACHEABLE)
907 dst_flags_mask |= AS_AREA_CACHEABLE;
[da1bafb]908
909 if ((src_size != acc_size) ||
910 ((src_flags & dst_flags_mask) != dst_flags_mask)) {
[8d6bc2d5]911 mutex_unlock(&src_area->lock);
912 mutex_unlock(&src_as->lock);
[df0103f7]913 return EPERM;
914 }
[da1bafb]915
[8d6bc2d5]916 /*
917 * Now we are committed to sharing the area.
[8440473]918 * First, prepare the area for sharing.
[8d6bc2d5]919 * Then it will be safe to unlock it.
[da1bafb]920 *
[8d6bc2d5]921 */
[da1bafb]922 share_info_t *sh_info = src_area->sh_info;
[8d6bc2d5]923 if (!sh_info) {
924 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
[08a19ba]925 mutex_initialize(&sh_info->lock, MUTEX_PASSIVE);
[8d6bc2d5]926 sh_info->refcount = 2;
927 btree_create(&sh_info->pagemap);
928 src_area->sh_info = sh_info;
[da1bafb]929
[c0697c4c]930 /*
931 * Call the backend to setup sharing.
[da1bafb]932 *
[c0697c4c]933 */
934 src_area->backend->share(src_area);
[8d6bc2d5]935 } else {
936 mutex_lock(&sh_info->lock);
937 sh_info->refcount++;
938 mutex_unlock(&sh_info->lock);
939 }
[da1bafb]940
[8d6bc2d5]941 mutex_unlock(&src_area->lock);
942 mutex_unlock(&src_as->lock);
[da1bafb]943
[df0103f7]944 /*
[a9e8b39]945 * Create copy of the source address space area.
946 * The destination area is created with AS_AREA_ATTR_PARTIAL
947 * attribute set which prevents race condition with
948 * preliminary as_page_fault() calls.
[fd4d8c0]949 * The flags of the source area are masked against dst_flags_mask
950 * to support sharing in less privileged mode.
[da1bafb]951 *
[df0103f7]952 */
[da1bafb]953 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size,
954 dst_base, AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
[a9e8b39]955 if (!dst_area) {
[df0103f7]956 /*
957 * Destination address space area could not be created.
958 */
[8d6bc2d5]959 sh_info_remove_reference(sh_info);
960
[df0103f7]961 return ENOMEM;
962 }
[da1bafb]963
[a9e8b39]964 /*
965 * Now the destination address space area has been
966 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
[8d6bc2d5]967 * attribute and set the sh_info.
[da1bafb]968 *
969 */
970 mutex_lock(&dst_as->lock);
[1068f6a]971 mutex_lock(&dst_area->lock);
[a9e8b39]972 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
[8d6bc2d5]973 dst_area->sh_info = sh_info;
[1068f6a]974 mutex_unlock(&dst_area->lock);
[da1bafb]975 mutex_unlock(&dst_as->lock);
976
[df0103f7]977 return 0;
978}
979
[fb84455]980/** Check access mode for address space area.
981 *
[da1bafb]982 * @param area Address space area.
983 * @param access Access mode.
984 *
985 * @return False if access violates area's permissions, true
986 * otherwise.
[fb84455]987 *
988 */
989bool as_area_check_access(as_area_t *area, pf_access_t access)
990{
991 int flagmap[] = {
992 [PF_ACCESS_READ] = AS_AREA_READ,
993 [PF_ACCESS_WRITE] = AS_AREA_WRITE,
994 [PF_ACCESS_EXEC] = AS_AREA_EXEC
995 };
[1d432f9]996
997 ASSERT(mutex_locked(&area->lock));
[da1bafb]998
[fb84455]999 if (!(area->flags & flagmap[access]))
1000 return false;
1001
1002 return true;
1003}
1004
[e3ee9b9]1005/** Convert address space area flags to page flags.
1006 *
1007 * @param aflags Flags of some address space area.
1008 *
1009 * @return Flags to be passed to page_mapping_insert().
1010 *
1011 */
1012static unsigned int area_flags_to_page_flags(unsigned int aflags)
1013{
1014 unsigned int flags = PAGE_USER | PAGE_PRESENT;
1015
1016 if (aflags & AS_AREA_READ)
1017 flags |= PAGE_READ;
1018
1019 if (aflags & AS_AREA_WRITE)
1020 flags |= PAGE_WRITE;
1021
1022 if (aflags & AS_AREA_EXEC)
1023 flags |= PAGE_EXEC;
1024
1025 if (aflags & AS_AREA_CACHEABLE)
1026 flags |= PAGE_CACHEABLE;
1027
1028 return flags;
1029}
1030
[6745592]1031/** Change adress space area flags.
[c98e6ee]1032 *
1033 * The idea is to have the same data, but with a different access mode.
1034 * This is needed e.g. for writing code into memory and then executing it.
1035 * In order for this to work properly, this may copy the data
1036 * into private anonymous memory (unless it's already there).
1037 *
[76fca31]1038 * @param as Address space.
1039 * @param flags Flags of the area memory.
1040 * @param address Address within the area to be changed.
1041 *
1042 * @return Zero on success or a value from @ref errno.h on failure.
[c98e6ee]1043 *
1044 */
[da1bafb]1045int as_area_change_flags(as_t *as, unsigned int flags, uintptr_t address)
[c98e6ee]1046{
1047 /* Flags for the new memory mapping */
[da1bafb]1048 unsigned int page_flags = area_flags_to_page_flags(flags);
1049
[c98e6ee]1050 mutex_lock(&as->lock);
[da1bafb]1051
1052 as_area_t *area = find_area_and_lock(as, address);
[c98e6ee]1053 if (!area) {
1054 mutex_unlock(&as->lock);
1055 return ENOENT;
1056 }
[da1bafb]1057
[76fca31]1058 if ((area->sh_info) || (area->backend != &anon_backend)) {
[c98e6ee]1059 /* Copying shared areas not supported yet */
1060 /* Copying non-anonymous memory not supported yet */
1061 mutex_unlock(&area->lock);
1062 mutex_unlock(&as->lock);
1063 return ENOTSUP;
1064 }
[da1bafb]1065
[c98e6ee]1066 /*
1067 * Compute total number of used pages in the used_space B+tree
[da1bafb]1068 *
[c98e6ee]1069 */
[da1bafb]1070 size_t used_pages = 0;
1071 link_t *cur;
1072
[c98e6ee]1073 for (cur = area->used_space.leaf_head.next;
1074 cur != &area->used_space.leaf_head; cur = cur->next) {
[da1bafb]1075 btree_node_t *node
1076 = list_get_instance(cur, btree_node_t, leaf_link);
1077 btree_key_t i;
[c98e6ee]1078
[da1bafb]1079 for (i = 0; i < node->keys; i++)
[98000fb]1080 used_pages += (size_t) node->value[i];
[c98e6ee]1081 }
[da1bafb]1082
[c98e6ee]1083 /* An array for storing frame numbers */
[da1bafb]1084 uintptr_t *old_frame = malloc(used_pages * sizeof(uintptr_t), 0);
1085
[c964521]1086 page_table_lock(as, false);
[da1bafb]1087
[c98e6ee]1088 /*
1089 * Start TLB shootdown sequence.
[da1bafb]1090 *
[c98e6ee]1091 */
[402eda5]1092 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base,
1093 area->pages);
[da1bafb]1094
[c98e6ee]1095 /*
1096 * Remove used pages from page tables and remember their frame
1097 * numbers.
[da1bafb]1098 *
[c98e6ee]1099 */
[da1bafb]1100 size_t frame_idx = 0;
1101
[c98e6ee]1102 for (cur = area->used_space.leaf_head.next;
1103 cur != &area->used_space.leaf_head; cur = cur->next) {
[da1bafb]1104 btree_node_t *node
1105 = list_get_instance(cur, btree_node_t, leaf_link);
1106 btree_key_t i;
[c98e6ee]1107
1108 for (i = 0; i < node->keys; i++) {
[da1bafb]1109 uintptr_t ptr = node->key[i];
1110 size_t size;
[c98e6ee]1111
[da1bafb]1112 for (size = 0; size < (size_t) node->value[i]; size++) {
1113 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE);
1114
1115 ASSERT(pte);
1116 ASSERT(PTE_VALID(pte));
1117 ASSERT(PTE_PRESENT(pte));
1118
[c98e6ee]1119 old_frame[frame_idx++] = PTE_GET_FRAME(pte);
[da1bafb]1120
[c98e6ee]1121 /* Remove old mapping */
[da1bafb]1122 page_mapping_remove(as, ptr + size * PAGE_SIZE);
[c98e6ee]1123 }
1124 }
1125 }
[da1bafb]1126
[c98e6ee]1127 /*
1128 * Finish TLB shootdown sequence.
[da1bafb]1129 *
[c98e6ee]1130 */
[da1bafb]1131
[c98e6ee]1132 tlb_invalidate_pages(as->asid, area->base, area->pages);
[76fca31]1133
[c98e6ee]1134 /*
1135 * Invalidate potential software translation caches (e.g. TSB on
1136 * sparc64).
[da1bafb]1137 *
[c98e6ee]1138 */
1139 as_invalidate_translation_cache(as, area->base, area->pages);
[402eda5]1140 tlb_shootdown_finalize(ipl);
[da1bafb]1141
[c964521]1142 page_table_unlock(as, false);
[da1bafb]1143
[ae7f6fb]1144 /*
1145 * Set the new flags.
1146 */
1147 area->flags = flags;
[da1bafb]1148
[c98e6ee]1149 /*
1150 * Map pages back in with new flags. This step is kept separate
[6745592]1151 * so that the memory area could not be accesed with both the old and
1152 * the new flags at once.
[c98e6ee]1153 */
1154 frame_idx = 0;
[da1bafb]1155
[c98e6ee]1156 for (cur = area->used_space.leaf_head.next;
1157 cur != &area->used_space.leaf_head; cur = cur->next) {
[da1bafb]1158 btree_node_t *node
1159 = list_get_instance(cur, btree_node_t, leaf_link);
1160 btree_key_t i;
[c98e6ee]1161
1162 for (i = 0; i < node->keys; i++) {
[da1bafb]1163 uintptr_t ptr = node->key[i];
1164 size_t size;
[c98e6ee]1165
[da1bafb]1166 for (size = 0; size < (size_t) node->value[i]; size++) {
[c98e6ee]1167 page_table_lock(as, false);
[da1bafb]1168
[c98e6ee]1169 /* Insert the new mapping */
[da1bafb]1170 page_mapping_insert(as, ptr + size * PAGE_SIZE,
[c98e6ee]1171 old_frame[frame_idx++], page_flags);
[da1bafb]1172
[c98e6ee]1173 page_table_unlock(as, false);
1174 }
1175 }
1176 }
[da1bafb]1177
[c98e6ee]1178 free(old_frame);
[da1bafb]1179
[c98e6ee]1180 mutex_unlock(&area->lock);
1181 mutex_unlock(&as->lock);
[da1bafb]1182
[c98e6ee]1183 return 0;
1184}
1185
[20d50a1]1186/** Handle page fault within the current address space.
1187 *
[6745592]1188 * This is the high-level page fault handler. It decides whether the page fault
1189 * can be resolved by any backend and if so, it invokes the backend to resolve
1190 * the page fault.
[8182031]1191 *
[20d50a1]1192 * Interrupts are assumed disabled.
1193 *
[da1bafb]1194 * @param page Faulting page.
1195 * @param access Access mode that caused the page fault (i.e.
1196 * read/write/exec).
1197 * @param istate Pointer to the interrupted state.
1198 *
1199 * @return AS_PF_FAULT on page fault.
1200 * @return AS_PF_OK on success.
1201 * @return AS_PF_DEFER if the fault was caused by copy_to_uspace()
1202 * or copy_from_uspace().
[20d50a1]1203 *
1204 */
[7f1c620]1205int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
[20d50a1]1206{
[1068f6a]1207 if (!THREAD)
[8182031]1208 return AS_PF_FAULT;
[7af8c0e]1209
1210 if (!AS)
1211 return AS_PF_FAULT;
1212
[1068f6a]1213 mutex_lock(&AS->lock);
[da1bafb]1214 as_area_t *area = find_area_and_lock(AS, page);
[20d50a1]1215 if (!area) {
1216 /*
1217 * No area contained mapping for 'page'.
1218 * Signal page fault to low-level handler.
[da1bafb]1219 *
[20d50a1]1220 */
[1068f6a]1221 mutex_unlock(&AS->lock);
[e3c762cd]1222 goto page_fault;
[20d50a1]1223 }
[da1bafb]1224
[a9e8b39]1225 if (area->attributes & AS_AREA_ATTR_PARTIAL) {
1226 /*
1227 * The address space area is not fully initialized.
1228 * Avoid possible race by returning error.
1229 */
[1068f6a]1230 mutex_unlock(&area->lock);
1231 mutex_unlock(&AS->lock);
[da1bafb]1232 goto page_fault;
[a9e8b39]1233 }
[da1bafb]1234
1235 if ((!area->backend) || (!area->backend->page_fault)) {
[8182031]1236 /*
1237 * The address space area is not backed by any backend
1238 * or the backend cannot handle page faults.
[da1bafb]1239 *
[8182031]1240 */
1241 mutex_unlock(&area->lock);
1242 mutex_unlock(&AS->lock);
[da1bafb]1243 goto page_fault;
[8182031]1244 }
[da1bafb]1245
[2299914]1246 page_table_lock(AS, false);
1247
1248 /*
[6745592]1249 * To avoid race condition between two page faults on the same address,
1250 * we need to make sure the mapping has not been already inserted.
[da1bafb]1251 *
[2299914]1252 */
[da1bafb]1253 pte_t *pte;
[2299914]1254 if ((pte = page_mapping_find(AS, page))) {
1255 if (PTE_PRESENT(pte)) {
[fb84455]1256 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
[6f4495f5]1257 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
1258 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
[fb84455]1259 page_table_unlock(AS, false);
1260 mutex_unlock(&area->lock);
1261 mutex_unlock(&AS->lock);
1262 return AS_PF_OK;
1263 }
[2299914]1264 }
1265 }
[20d50a1]1266
1267 /*
[8182031]1268 * Resort to the backend page fault handler.
[da1bafb]1269 *
[20d50a1]1270 */
[0ee077ee]1271 if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
[8182031]1272 page_table_unlock(AS, false);
1273 mutex_unlock(&area->lock);
1274 mutex_unlock(&AS->lock);
1275 goto page_fault;
1276 }
[20d50a1]1277
[8182031]1278 page_table_unlock(AS, false);
[1068f6a]1279 mutex_unlock(&area->lock);
1280 mutex_unlock(&AS->lock);
[e3c762cd]1281 return AS_PF_OK;
[da1bafb]1282
[e3c762cd]1283page_fault:
1284 if (THREAD->in_copy_from_uspace) {
1285 THREAD->in_copy_from_uspace = false;
[6f4495f5]1286 istate_set_retaddr(istate,
1287 (uintptr_t) &memcpy_from_uspace_failover_address);
[e3c762cd]1288 } else if (THREAD->in_copy_to_uspace) {
1289 THREAD->in_copy_to_uspace = false;
[6f4495f5]1290 istate_set_retaddr(istate,
1291 (uintptr_t) &memcpy_to_uspace_failover_address);
[e3c762cd]1292 } else {
1293 return AS_PF_FAULT;
1294 }
[da1bafb]1295
[e3c762cd]1296 return AS_PF_DEFER;
[20d50a1]1297}
1298
[7e4e532]1299/** Switch address spaces.
[1068f6a]1300 *
1301 * Note that this function cannot sleep as it is essentially a part of
[879585a3]1302 * scheduling. Sleeping here would lead to deadlock on wakeup. Another
1303 * thing which is forbidden in this context is locking the address space.
[20d50a1]1304 *
[31d8e10]1305 * When this function is enetered, no spinlocks may be held.
1306 *
[da1bafb]1307 * @param old Old address space or NULL.
1308 * @param new New address space.
1309 *
[20d50a1]1310 */
[80bcaed]1311void as_switch(as_t *old_as, as_t *new_as)
[20d50a1]1312{
[31d8e10]1313 DEADLOCK_PROBE_INIT(p_asidlock);
1314 preemption_disable();
[da1bafb]1315
[31d8e10]1316retry:
1317 (void) interrupts_disable();
1318 if (!spinlock_trylock(&asidlock)) {
[da1bafb]1319 /*
[31d8e10]1320 * Avoid deadlock with TLB shootdown.
1321 * We can enable interrupts here because
1322 * preemption is disabled. We should not be
1323 * holding any other lock.
[da1bafb]1324 *
[31d8e10]1325 */
1326 (void) interrupts_enable();
1327 DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
1328 goto retry;
1329 }
1330 preemption_enable();
[da1bafb]1331
[7e4e532]1332 /*
1333 * First, take care of the old address space.
[da1bafb]1334 */
[80bcaed]1335 if (old_as) {
1336 ASSERT(old_as->cpu_refcount);
[da1bafb]1337
1338 if ((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
[7e4e532]1339 /*
1340 * The old address space is no longer active on
1341 * any processor. It can be appended to the
1342 * list of inactive address spaces with assigned
1343 * ASID.
[da1bafb]1344 *
[7e4e532]1345 */
[2057572]1346 ASSERT(old_as->asid != ASID_INVALID);
[da1bafb]1347
[2057572]1348 list_append(&old_as->inactive_as_with_asid_link,
1349 &inactive_as_with_asid_head);
[7e4e532]1350 }
[da1bafb]1351
[57da95c]1352 /*
1353 * Perform architecture-specific tasks when the address space
1354 * is being removed from the CPU.
[da1bafb]1355 *
[57da95c]1356 */
[80bcaed]1357 as_deinstall_arch(old_as);
[7e4e532]1358 }
[da1bafb]1359
[7e4e532]1360 /*
1361 * Second, prepare the new address space.
[da1bafb]1362 *
[7e4e532]1363 */
[80bcaed]1364 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
[879585a3]1365 if (new_as->asid != ASID_INVALID)
[80bcaed]1366 list_remove(&new_as->inactive_as_with_asid_link);
[879585a3]1367 else
1368 new_as->asid = asid_get();
[7e4e532]1369 }
[da1bafb]1370
[80bcaed]1371#ifdef AS_PAGE_TABLE
1372 SET_PTL0_ADDRESS(new_as->genarch.page_table);
1373#endif
[7e4e532]1374
[20d50a1]1375 /*
1376 * Perform architecture-specific steps.
[4512d7e]1377 * (e.g. write ASID to hardware register etc.)
[da1bafb]1378 *
[20d50a1]1379 */
[80bcaed]1380 as_install_arch(new_as);
[da1bafb]1381
[879585a3]1382 spinlock_unlock(&asidlock);
[20d50a1]1383
[80bcaed]1384 AS = new_as;
[20d50a1]1385}
[6a3c9a7]1386
[e3ee9b9]1387
[ef67bab]1388
[df0103f7]1389/** Compute flags for virtual address translation subsytem.
1390 *
[da1bafb]1391 * @param area Address space area.
1392 *
1393 * @return Flags to be used in page_mapping_insert().
[df0103f7]1394 *
1395 */
[da1bafb]1396unsigned int as_area_get_flags(as_area_t *area)
[df0103f7]1397{
[1d432f9]1398 ASSERT(mutex_locked(&area->lock));
1399
[da1bafb]1400 return area_flags_to_page_flags(area->flags);
[df0103f7]1401}
1402
[ef67bab]1403/** Create page table.
1404 *
[6745592]1405 * Depending on architecture, create either address space private or global page
1406 * table.
[ef67bab]1407 *
[da1bafb]1408 * @param flags Flags saying whether the page table is for the kernel
1409 * address space.
1410 *
1411 * @return First entry of the page table.
[ef67bab]1412 *
1413 */
[da1bafb]1414pte_t *page_table_create(unsigned int flags)
[ef67bab]1415{
[bd1deed]1416 ASSERT(as_operations);
1417 ASSERT(as_operations->page_table_create);
1418
1419 return as_operations->page_table_create(flags);
[ef67bab]1420}
[d3e7ff4]1421
[482826d]1422/** Destroy page table.
1423 *
1424 * Destroy page table in architecture specific way.
1425 *
[da1bafb]1426 * @param page_table Physical address of PTL0.
1427 *
[482826d]1428 */
1429void page_table_destroy(pte_t *page_table)
1430{
[bd1deed]1431 ASSERT(as_operations);
1432 ASSERT(as_operations->page_table_destroy);
1433
1434 as_operations->page_table_destroy(page_table);
[482826d]1435}
1436
[2299914]1437/** Lock page table.
1438 *
1439 * This function should be called before any page_mapping_insert(),
1440 * page_mapping_remove() and page_mapping_find().
[da1bafb]1441 *
[2299914]1442 * Locking order is such that address space areas must be locked
1443 * prior to this call. Address space can be locked prior to this
1444 * call in which case the lock argument is false.
1445 *
[da1bafb]1446 * @param as Address space.
1447 * @param lock If false, do not attempt to lock as->lock.
1448 *
[2299914]1449 */
1450void page_table_lock(as_t *as, bool lock)
1451{
1452 ASSERT(as_operations);
1453 ASSERT(as_operations->page_table_lock);
[bd1deed]1454
[2299914]1455 as_operations->page_table_lock(as, lock);
1456}
1457
1458/** Unlock page table.
1459 *
[da1bafb]1460 * @param as Address space.
1461 * @param unlock If false, do not attempt to unlock as->lock.
1462 *
[2299914]1463 */
1464void page_table_unlock(as_t *as, bool unlock)
1465{
1466 ASSERT(as_operations);
1467 ASSERT(as_operations->page_table_unlock);
[bd1deed]1468
[2299914]1469 as_operations->page_table_unlock(as, unlock);
1470}
1471
[ada559c]1472/** Test whether page tables are locked.
1473 *
[e3ee9b9]1474 * @param as Address space where the page tables belong.
[ada559c]1475 *
[e3ee9b9]1476 * @return True if the page tables belonging to the address soace
1477 * are locked, otherwise false.
[ada559c]1478 */
1479bool page_table_locked(as_t *as)
1480{
1481 ASSERT(as_operations);
1482 ASSERT(as_operations->page_table_locked);
1483
1484 return as_operations->page_table_locked(as);
1485}
1486
[b878df3]1487/** Return size of the address space area with given base.
1488 *
[1d432f9]1489 * @param base Arbitrary address inside the address space area.
[da1bafb]1490 *
1491 * @return Size of the address space area in bytes or zero if it
1492 * does not exist.
[b878df3]1493 *
1494 */
1495size_t as_area_get_size(uintptr_t base)
[7c23af9]1496{
1497 size_t size;
[da1bafb]1498
[1d432f9]1499 page_table_lock(AS, true);
[da1bafb]1500 as_area_t *src_area = find_area_and_lock(AS, base);
1501
[6745592]1502 if (src_area) {
[7c23af9]1503 size = src_area->pages * PAGE_SIZE;
[1068f6a]1504 mutex_unlock(&src_area->lock);
[da1bafb]1505 } else
[7c23af9]1506 size = 0;
[da1bafb]1507
[1d432f9]1508 page_table_unlock(AS, true);
[7c23af9]1509 return size;
1510}
1511
[25bf215]1512/** Mark portion of address space area as used.
1513 *
1514 * The address space area must be already locked.
1515 *
[da1bafb]1516 * @param area Address space area.
1517 * @param page First page to be marked.
1518 * @param count Number of page to be marked.
1519 *
1520 * @return Zero on failure and non-zero on success.
[25bf215]1521 *
1522 */
[da1bafb]1523int used_space_insert(as_area_t *area, uintptr_t page, size_t count)
[25bf215]1524{
[1d432f9]1525 ASSERT(mutex_locked(&area->lock));
[25bf215]1526 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1527 ASSERT(count);
[da1bafb]1528
1529 btree_node_t *leaf;
1530 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf);
[25bf215]1531 if (pages) {
1532 /*
1533 * We hit the beginning of some used space.
[da1bafb]1534 *
[25bf215]1535 */
1536 return 0;
1537 }
[da1bafb]1538
[a6cb8cb]1539 if (!leaf->keys) {
[da1bafb]1540 btree_insert(&area->used_space, page, (void *) count, leaf);
[a6cb8cb]1541 return 1;
1542 }
[da1bafb]1543
1544 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf);
[25bf215]1545 if (node) {
[6f4495f5]1546 uintptr_t left_pg = node->key[node->keys - 1];
1547 uintptr_t right_pg = leaf->key[0];
[98000fb]1548 size_t left_cnt = (size_t) node->value[node->keys - 1];
1549 size_t right_cnt = (size_t) leaf->value[0];
[25bf215]1550
1551 /*
1552 * Examine the possibility that the interval fits
1553 * somewhere between the rightmost interval of
1554 * the left neigbour and the first interval of the leaf.
[da1bafb]1555 *
[25bf215]1556 */
[da1bafb]1557
[25bf215]1558 if (page >= right_pg) {
1559 /* Do nothing. */
[6f4495f5]1560 } else if (overlaps(page, count * PAGE_SIZE, left_pg,
1561 left_cnt * PAGE_SIZE)) {
[25bf215]1562 /* The interval intersects with the left interval. */
1563 return 0;
[6f4495f5]1564 } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1565 right_cnt * PAGE_SIZE)) {
[25bf215]1566 /* The interval intersects with the right interval. */
[da1bafb]1567 return 0;
[6f4495f5]1568 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1569 (page + count * PAGE_SIZE == right_pg)) {
1570 /*
1571 * The interval can be added by merging the two already
1572 * present intervals.
[da1bafb]1573 *
[6f4495f5]1574 */
[56789125]1575 node->value[node->keys - 1] += count + right_cnt;
[da1bafb]1576 btree_remove(&area->used_space, right_pg, leaf);
1577 return 1;
[6f4495f5]1578 } else if (page == left_pg + left_cnt * PAGE_SIZE) {
[da1bafb]1579 /*
[6f4495f5]1580 * The interval can be added by simply growing the left
1581 * interval.
[da1bafb]1582 *
[6f4495f5]1583 */
[56789125]1584 node->value[node->keys - 1] += count;
[25bf215]1585 return 1;
[6f4495f5]1586 } else if (page + count * PAGE_SIZE == right_pg) {
[25bf215]1587 /*
[6f4495f5]1588 * The interval can be addded by simply moving base of
1589 * the right interval down and increasing its size
1590 * accordingly.
[da1bafb]1591 *
[25bf215]1592 */
[56789125]1593 leaf->value[0] += count;
[25bf215]1594 leaf->key[0] = page;
1595 return 1;
1596 } else {
1597 /*
1598 * The interval is between both neigbouring intervals,
1599 * but cannot be merged with any of them.
[da1bafb]1600 *
[25bf215]1601 */
[da1bafb]1602 btree_insert(&area->used_space, page, (void *) count,
[6f4495f5]1603 leaf);
[25bf215]1604 return 1;
1605 }
1606 } else if (page < leaf->key[0]) {
[7f1c620]1607 uintptr_t right_pg = leaf->key[0];
[98000fb]1608 size_t right_cnt = (size_t) leaf->value[0];
[da1bafb]1609
[25bf215]1610 /*
[6f4495f5]1611 * Investigate the border case in which the left neighbour does
1612 * not exist but the interval fits from the left.
[da1bafb]1613 *
[25bf215]1614 */
[da1bafb]1615
[6f4495f5]1616 if (overlaps(page, count * PAGE_SIZE, right_pg,
1617 right_cnt * PAGE_SIZE)) {
[25bf215]1618 /* The interval intersects with the right interval. */
1619 return 0;
[6f4495f5]1620 } else if (page + count * PAGE_SIZE == right_pg) {
[25bf215]1621 /*
[6f4495f5]1622 * The interval can be added by moving the base of the
1623 * right interval down and increasing its size
1624 * accordingly.
[da1bafb]1625 *
[25bf215]1626 */
1627 leaf->key[0] = page;
[56789125]1628 leaf->value[0] += count;
[25bf215]1629 return 1;
1630 } else {
1631 /*
1632 * The interval doesn't adjoin with the right interval.
1633 * It must be added individually.
[da1bafb]1634 *
[25bf215]1635 */
[da1bafb]1636 btree_insert(&area->used_space, page, (void *) count,
[6f4495f5]1637 leaf);
[25bf215]1638 return 1;
1639 }
1640 }
[da1bafb]1641
1642 node = btree_leaf_node_right_neighbour(&area->used_space, leaf);
[25bf215]1643 if (node) {
[6f4495f5]1644 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1645 uintptr_t right_pg = node->key[0];
[98000fb]1646 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
1647 size_t right_cnt = (size_t) node->value[0];
[25bf215]1648
1649 /*
1650 * Examine the possibility that the interval fits
1651 * somewhere between the leftmost interval of
1652 * the right neigbour and the last interval of the leaf.
[da1bafb]1653 *
[25bf215]1654 */
[da1bafb]1655
[25bf215]1656 if (page < left_pg) {
1657 /* Do nothing. */
[6f4495f5]1658 } else if (overlaps(page, count * PAGE_SIZE, left_pg,
1659 left_cnt * PAGE_SIZE)) {
[25bf215]1660 /* The interval intersects with the left interval. */
1661 return 0;
[6f4495f5]1662 } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1663 right_cnt * PAGE_SIZE)) {
[25bf215]1664 /* The interval intersects with the right interval. */
[da1bafb]1665 return 0;
[6f4495f5]1666 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1667 (page + count * PAGE_SIZE == right_pg)) {
1668 /*
1669 * The interval can be added by merging the two already
1670 * present intervals.
[da1bafb]1671 *
1672 */
[56789125]1673 leaf->value[leaf->keys - 1] += count + right_cnt;
[da1bafb]1674 btree_remove(&area->used_space, right_pg, node);
1675 return 1;
[6f4495f5]1676 } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1677 /*
1678 * The interval can be added by simply growing the left
1679 * interval.
[da1bafb]1680 *
1681 */
[56789125]1682 leaf->value[leaf->keys - 1] += count;
[25bf215]1683 return 1;
[6f4495f5]1684 } else if (page + count * PAGE_SIZE == right_pg) {
[25bf215]1685 /*
[6f4495f5]1686 * The interval can be addded by simply moving base of
1687 * the right interval down and increasing its size
1688 * accordingly.
[da1bafb]1689 *
[25bf215]1690 */
[56789125]1691 node->value[0] += count;
[25bf215]1692 node->key[0] = page;
1693 return 1;
1694 } else {
1695 /*
1696 * The interval is between both neigbouring intervals,
1697 * but cannot be merged with any of them.
[da1bafb]1698 *
[25bf215]1699 */
[da1bafb]1700 btree_insert(&area->used_space, page, (void *) count,
[6f4495f5]1701 leaf);
[25bf215]1702 return 1;
1703 }
1704 } else if (page >= leaf->key[leaf->keys - 1]) {
[7f1c620]1705 uintptr_t left_pg = leaf->key[leaf->keys - 1];
[98000fb]1706 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
[da1bafb]1707
[25bf215]1708 /*
[6f4495f5]1709 * Investigate the border case in which the right neighbour
1710 * does not exist but the interval fits from the right.
[da1bafb]1711 *
[25bf215]1712 */
[da1bafb]1713
[6f4495f5]1714 if (overlaps(page, count * PAGE_SIZE, left_pg,
1715 left_cnt * PAGE_SIZE)) {
[56789125]1716 /* The interval intersects with the left interval. */
[25bf215]1717 return 0;
[6f4495f5]1718 } else if (left_pg + left_cnt * PAGE_SIZE == page) {
1719 /*
1720 * The interval can be added by growing the left
1721 * interval.
[da1bafb]1722 *
[6f4495f5]1723 */
[56789125]1724 leaf->value[leaf->keys - 1] += count;
[25bf215]1725 return 1;
1726 } else {
1727 /*
1728 * The interval doesn't adjoin with the left interval.
1729 * It must be added individually.
[da1bafb]1730 *
[25bf215]1731 */
[da1bafb]1732 btree_insert(&area->used_space, page, (void *) count,
[6f4495f5]1733 leaf);
[25bf215]1734 return 1;
1735 }
1736 }
1737
1738 /*
[6f4495f5]1739 * Note that if the algorithm made it thus far, the interval can fit
1740 * only between two other intervals of the leaf. The two border cases
1741 * were already resolved.
[da1bafb]1742 *
[25bf215]1743 */
[da1bafb]1744 btree_key_t i;
[25bf215]1745 for (i = 1; i < leaf->keys; i++) {
1746 if (page < leaf->key[i]) {
[6f4495f5]1747 uintptr_t left_pg = leaf->key[i - 1];
1748 uintptr_t right_pg = leaf->key[i];
[98000fb]1749 size_t left_cnt = (size_t) leaf->value[i - 1];
1750 size_t right_cnt = (size_t) leaf->value[i];
[da1bafb]1751
[25bf215]1752 /*
1753 * The interval fits between left_pg and right_pg.
[da1bafb]1754 *
[25bf215]1755 */
[da1bafb]1756
[6f4495f5]1757 if (overlaps(page, count * PAGE_SIZE, left_pg,
1758 left_cnt * PAGE_SIZE)) {
1759 /*
1760 * The interval intersects with the left
1761 * interval.
[da1bafb]1762 *
[6f4495f5]1763 */
[25bf215]1764 return 0;
[6f4495f5]1765 } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1766 right_cnt * PAGE_SIZE)) {
1767 /*
1768 * The interval intersects with the right
1769 * interval.
[da1bafb]1770 *
[6f4495f5]1771 */
[da1bafb]1772 return 0;
[6f4495f5]1773 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1774 (page + count * PAGE_SIZE == right_pg)) {
1775 /*
1776 * The interval can be added by merging the two
1777 * already present intervals.
[da1bafb]1778 *
[6f4495f5]1779 */
[56789125]1780 leaf->value[i - 1] += count + right_cnt;
[da1bafb]1781 btree_remove(&area->used_space, right_pg, leaf);
1782 return 1;
[6f4495f5]1783 } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1784 /*
1785 * The interval can be added by simply growing
1786 * the left interval.
[da1bafb]1787 *
[6f4495f5]1788 */
[56789125]1789 leaf->value[i - 1] += count;
[25bf215]1790 return 1;
[6f4495f5]1791 } else if (page + count * PAGE_SIZE == right_pg) {
[25bf215]1792 /*
[da1bafb]1793 * The interval can be addded by simply moving
[6f4495f5]1794 * base of the right interval down and
1795 * increasing its size accordingly.
[da1bafb]1796 *
1797 */
[56789125]1798 leaf->value[i] += count;
[25bf215]1799 leaf->key[i] = page;
1800 return 1;
1801 } else {
1802 /*
[6f4495f5]1803 * The interval is between both neigbouring
1804 * intervals, but cannot be merged with any of
1805 * them.
[da1bafb]1806 *
[25bf215]1807 */
[da1bafb]1808 btree_insert(&area->used_space, page,
[6f4495f5]1809 (void *) count, leaf);
[25bf215]1810 return 1;
1811 }
1812 }
1813 }
[da1bafb]1814
[98000fb]1815 panic("Inconsistency detected while adding %" PRIs " pages of used "
[f651e80]1816 "space at %p.", count, page);
[25bf215]1817}
1818
1819/** Mark portion of address space area as unused.
1820 *
1821 * The address space area must be already locked.
1822 *
[da1bafb]1823 * @param area Address space area.
1824 * @param page First page to be marked.
1825 * @param count Number of page to be marked.
1826 *
1827 * @return Zero on failure and non-zero on success.
[25bf215]1828 *
1829 */
[da1bafb]1830int used_space_remove(as_area_t *area, uintptr_t page, size_t count)
[25bf215]1831{
[1d432f9]1832 ASSERT(mutex_locked(&area->lock));
[25bf215]1833 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1834 ASSERT(count);
[da1bafb]1835
1836 btree_node_t *leaf;
1837 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf);
[25bf215]1838 if (pages) {
1839 /*
1840 * We are lucky, page is the beginning of some interval.
[da1bafb]1841 *
[25bf215]1842 */
1843 if (count > pages) {
1844 return 0;
1845 } else if (count == pages) {
[da1bafb]1846 btree_remove(&area->used_space, page, leaf);
[56789125]1847 return 1;
[25bf215]1848 } else {
1849 /*
1850 * Find the respective interval.
1851 * Decrease its size and relocate its start address.
[da1bafb]1852 *
[25bf215]1853 */
[da1bafb]1854 btree_key_t i;
[25bf215]1855 for (i = 0; i < leaf->keys; i++) {
1856 if (leaf->key[i] == page) {
[6f4495f5]1857 leaf->key[i] += count * PAGE_SIZE;
[56789125]1858 leaf->value[i] -= count;
[25bf215]1859 return 1;
1860 }
1861 }
1862 goto error;
1863 }
1864 }
[da1bafb]1865
1866 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf);
1867 if ((node) && (page < leaf->key[0])) {
[7f1c620]1868 uintptr_t left_pg = node->key[node->keys - 1];
[98000fb]1869 size_t left_cnt = (size_t) node->value[node->keys - 1];
[da1bafb]1870
[6f4495f5]1871 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1872 count * PAGE_SIZE)) {
1873 if (page + count * PAGE_SIZE ==
1874 left_pg + left_cnt * PAGE_SIZE) {
[25bf215]1875 /*
[6f4495f5]1876 * The interval is contained in the rightmost
1877 * interval of the left neighbour and can be
1878 * removed by updating the size of the bigger
1879 * interval.
[da1bafb]1880 *
[25bf215]1881 */
[56789125]1882 node->value[node->keys - 1] -= count;
[25bf215]1883 return 1;
[6f4495f5]1884 } else if (page + count * PAGE_SIZE <
1885 left_pg + left_cnt*PAGE_SIZE) {
[25bf215]1886 /*
[6f4495f5]1887 * The interval is contained in the rightmost
1888 * interval of the left neighbour but its
1889 * removal requires both updating the size of
1890 * the original interval and also inserting a
1891 * new interval.
[da1bafb]1892 *
[25bf215]1893 */
[da1bafb]1894 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
[6f4495f5]1895 (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
[56789125]1896 node->value[node->keys - 1] -= count + new_cnt;
[da1bafb]1897 btree_insert(&area->used_space, page +
[6f4495f5]1898 count * PAGE_SIZE, (void *) new_cnt, leaf);
[25bf215]1899 return 1;
1900 }
1901 }
1902 return 0;
[da1bafb]1903 } else if (page < leaf->key[0])
[25bf215]1904 return 0;
1905
1906 if (page > leaf->key[leaf->keys - 1]) {
[7f1c620]1907 uintptr_t left_pg = leaf->key[leaf->keys - 1];
[98000fb]1908 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
[da1bafb]1909
[6f4495f5]1910 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1911 count * PAGE_SIZE)) {
[da1bafb]1912 if (page + count * PAGE_SIZE ==
[6f4495f5]1913 left_pg + left_cnt * PAGE_SIZE) {
[25bf215]1914 /*
[6f4495f5]1915 * The interval is contained in the rightmost
1916 * interval of the leaf and can be removed by
1917 * updating the size of the bigger interval.
[da1bafb]1918 *
[25bf215]1919 */
[56789125]1920 leaf->value[leaf->keys - 1] -= count;
[25bf215]1921 return 1;
[6f4495f5]1922 } else if (page + count * PAGE_SIZE < left_pg +
1923 left_cnt * PAGE_SIZE) {
[25bf215]1924 /*
[6f4495f5]1925 * The interval is contained in the rightmost
1926 * interval of the leaf but its removal
1927 * requires both updating the size of the
1928 * original interval and also inserting a new
1929 * interval.
[da1bafb]1930 *
[25bf215]1931 */
[da1bafb]1932 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
[6f4495f5]1933 (page + count * PAGE_SIZE)) >> PAGE_WIDTH;
[56789125]1934 leaf->value[leaf->keys - 1] -= count + new_cnt;
[da1bafb]1935 btree_insert(&area->used_space, page +
[6f4495f5]1936 count * PAGE_SIZE, (void *) new_cnt, leaf);
[25bf215]1937 return 1;
1938 }
1939 }
1940 return 0;
[da1bafb]1941 }
[25bf215]1942
1943 /*
1944 * The border cases have been already resolved.
1945 * Now the interval can be only between intervals of the leaf.
1946 */
[da1bafb]1947 btree_key_t i;
[25bf215]1948 for (i = 1; i < leaf->keys - 1; i++) {
1949 if (page < leaf->key[i]) {
[7f1c620]1950 uintptr_t left_pg = leaf->key[i - 1];
[98000fb]1951 size_t left_cnt = (size_t) leaf->value[i - 1];
[da1bafb]1952
[25bf215]1953 /*
[6f4495f5]1954 * Now the interval is between intervals corresponding
1955 * to (i - 1) and i.
[25bf215]1956 */
[6f4495f5]1957 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1958 count * PAGE_SIZE)) {
1959 if (page + count * PAGE_SIZE ==
1960 left_pg + left_cnt*PAGE_SIZE) {
[25bf215]1961 /*
[6f4495f5]1962 * The interval is contained in the
1963 * interval (i - 1) of the leaf and can
1964 * be removed by updating the size of
1965 * the bigger interval.
[da1bafb]1966 *
[25bf215]1967 */
[56789125]1968 leaf->value[i - 1] -= count;
[25bf215]1969 return 1;
[6f4495f5]1970 } else if (page + count * PAGE_SIZE <
1971 left_pg + left_cnt * PAGE_SIZE) {
[25bf215]1972 /*
[6f4495f5]1973 * The interval is contained in the
1974 * interval (i - 1) of the leaf but its
1975 * removal requires both updating the
1976 * size of the original interval and
[25bf215]1977 * also inserting a new interval.
1978 */
[da1bafb]1979 size_t new_cnt = ((left_pg +
[6f4495f5]1980 left_cnt * PAGE_SIZE) -
1981 (page + count * PAGE_SIZE)) >>
1982 PAGE_WIDTH;
[56789125]1983 leaf->value[i - 1] -= count + new_cnt;
[da1bafb]1984 btree_insert(&area->used_space, page +
[6f4495f5]1985 count * PAGE_SIZE, (void *) new_cnt,
1986 leaf);
[25bf215]1987 return 1;
1988 }
1989 }
1990 return 0;
1991 }
1992 }
[da1bafb]1993
[25bf215]1994error:
[98000fb]1995 panic("Inconsistency detected while removing %" PRIs " pages of used "
[f651e80]1996 "space from %p.", count, page);
[25bf215]1997}
1998
[df0103f7]1999/*
2000 * Address space related syscalls.
2001 */
2002
2003/** Wrapper for as_area_create(). */
[da1bafb]2004unative_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags)
[df0103f7]2005{
[6f4495f5]2006 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,
2007 AS_AREA_ATTR_NONE, &anon_backend, NULL))
[7f1c620]2008 return (unative_t) address;
[df0103f7]2009 else
[7f1c620]2010 return (unative_t) -1;
[df0103f7]2011}
2012
[c6e314a]2013/** Wrapper for as_area_resize(). */
[da1bafb]2014unative_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags)
[df0103f7]2015{
[7f1c620]2016 return (unative_t) as_area_resize(AS, address, size, 0);
[7242a78e]2017}
2018
[c98e6ee]2019/** Wrapper for as_area_change_flags(). */
[da1bafb]2020unative_t sys_as_area_change_flags(uintptr_t address, unsigned int flags)
[c98e6ee]2021{
2022 return (unative_t) as_area_change_flags(AS, flags, address);
2023}
2024
[c6e314a]2025/** Wrapper for as_area_destroy(). */
[7f1c620]2026unative_t sys_as_area_destroy(uintptr_t address)
[7242a78e]2027{
[7f1c620]2028 return (unative_t) as_area_destroy(AS, address);
[df0103f7]2029}
[b45c443]2030
[336db295]2031/** Get list of adress space areas.
2032 *
[da1bafb]2033 * @param as Address space.
2034 * @param obuf Place to save pointer to returned buffer.
2035 * @param osize Place to save size of returned buffer.
2036 *
[336db295]2037 */
2038void as_get_area_info(as_t *as, as_area_info_t **obuf, size_t *osize)
2039{
2040 mutex_lock(&as->lock);
[da1bafb]2041
[336db295]2042 /* First pass, count number of areas. */
[da1bafb]2043
2044 size_t area_cnt = 0;
2045 link_t *cur;
2046
[336db295]2047 for (cur = as->as_area_btree.leaf_head.next;
2048 cur != &as->as_area_btree.leaf_head; cur = cur->next) {
[da1bafb]2049 btree_node_t *node =
2050 list_get_instance(cur, btree_node_t, leaf_link);
[336db295]2051 area_cnt += node->keys;
2052 }
[da1bafb]2053
2054 size_t isize = area_cnt * sizeof(as_area_info_t);
2055 as_area_info_t *info = malloc(isize, 0);
2056
[336db295]2057 /* Second pass, record data. */
[da1bafb]2058
2059 size_t area_idx = 0;
2060
[336db295]2061 for (cur = as->as_area_btree.leaf_head.next;
2062 cur != &as->as_area_btree.leaf_head; cur = cur->next) {
[da1bafb]2063 btree_node_t *node =
2064 list_get_instance(cur, btree_node_t, leaf_link);
2065 btree_key_t i;
2066
[336db295]2067 for (i = 0; i < node->keys; i++) {
2068 as_area_t *area = node->value[i];
[da1bafb]2069
[336db295]2070 ASSERT(area_idx < area_cnt);
2071 mutex_lock(&area->lock);
[da1bafb]2072
[336db295]2073 info[area_idx].start_addr = area->base;
2074 info[area_idx].size = FRAMES2SIZE(area->pages);
2075 info[area_idx].flags = area->flags;
2076 ++area_idx;
[da1bafb]2077
[336db295]2078 mutex_unlock(&area->lock);
2079 }
2080 }
[da1bafb]2081
[336db295]2082 mutex_unlock(&as->lock);
[da1bafb]2083
[336db295]2084 *obuf = info;
2085 *osize = isize;
2086}
2087
[64c2ad5]2088/** Print out information about address space.
2089 *
[da1bafb]2090 * @param as Address space.
2091 *
[64c2ad5]2092 */
2093void as_print(as_t *as)
2094{
2095 mutex_lock(&as->lock);
2096
2097 /* print out info about address space areas */
2098 link_t *cur;
[6f4495f5]2099 for (cur = as->as_area_btree.leaf_head.next;
2100 cur != &as->as_area_btree.leaf_head; cur = cur->next) {
[da1bafb]2101 btree_node_t *node
2102 = list_get_instance(cur, btree_node_t, leaf_link);
2103 btree_key_t i;
[64c2ad5]2104
2105 for (i = 0; i < node->keys; i++) {
[7ba7c6d]2106 as_area_t *area = node->value[i];
[da1bafb]2107
[64c2ad5]2108 mutex_lock(&area->lock);
[98000fb]2109 printf("as_area: %p, base=%p, pages=%" PRIs
[6745592]2110 " (%p - %p)\n", area, area->base, area->pages,
2111 area->base, area->base + FRAMES2SIZE(area->pages));
[64c2ad5]2112 mutex_unlock(&area->lock);
2113 }
2114 }
2115
2116 mutex_unlock(&as->lock);
2117}
2118
[cc73a8a1]2119/** @}
[b45c443]2120 */
Note: See TracBrowser for help on using the repository browser.