source: mainline/kernel/generic/src/mm/as.c@ 83b6ba9f

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 83b6ba9f was 83b6ba9f, checked in by Jakub Jermar <jakub@…>, 11 years ago

Support for AS area backend shared data.

  • share_info_t is now created for every new address space area.
  • share_info_t was extended to point to AS area backend shared data.
  • AS area backend may decide to define create/destroy_shared_data() methods.
  • Anonymous backend_phys frames are now freed from phys_destroy_shared_data().
  • Fixed one case of forgotten locked mutex.
  • Property mode set to 100644
File size: 56.6 KB
RevLine 
[20d50a1]1/*
[0321109]2 * Copyright (c) 2010 Jakub Jermar
[20d50a1]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[cc73a8a1]29/** @addtogroup genericmm
[b45c443]30 * @{
31 */
32
[9179d0a]33/**
[b45c443]34 * @file
[da1bafb]35 * @brief Address space related functions.
[9179d0a]36 *
[20d50a1]37 * This file contains address space manipulation functions.
38 * Roughly speaking, this is a higher-level client of
39 * Virtual Address Translation (VAT) subsystem.
[9179d0a]40 *
41 * Functionality provided by this file allows one to
[cc73a8a1]42 * create address spaces and create, resize and share
[9179d0a]43 * address space areas.
44 *
45 * @see page.c
46 *
[20d50a1]47 */
48
49#include <mm/as.h>
[ef67bab]50#include <arch/mm/as.h>
[20d50a1]51#include <mm/page.h>
52#include <mm/frame.h>
[085d973]53#include <mm/slab.h>
[20d50a1]54#include <mm/tlb.h>
55#include <arch/mm/page.h>
56#include <genarch/mm/page_pt.h>
[2802767]57#include <genarch/mm/page_ht.h>
[4512d7e]58#include <mm/asid.h>
[20d50a1]59#include <arch/mm/asid.h>
[31d8e10]60#include <preemption.h>
[20d50a1]61#include <synch/spinlock.h>
[1068f6a]62#include <synch/mutex.h>
[5c9a08b]63#include <adt/list.h>
[252127e]64#include <adt/btree.h>
[df0103f7]65#include <proc/task.h>
[e3c762cd]66#include <proc/thread.h>
[20d50a1]67#include <arch/asm.h>
[df0103f7]68#include <panic.h>
[20d50a1]69#include <debug.h>
[df0103f7]70#include <print.h>
[20d50a1]71#include <memstr.h>
[5a7d9d1]72#include <macros.h>
[0b37882]73#include <bitops.h>
[20d50a1]74#include <arch.h>
[df0103f7]75#include <errno.h>
76#include <config.h>
[25bf215]77#include <align.h>
[d99c1d2]78#include <typedefs.h>
[e3c762cd]79#include <syscall/copy.h>
80#include <arch/interrupt.h>
[1dbc43f]81#include <interrupt.h>
[20d50a1]82
[cc73a8a1]83/**
84 * Each architecture decides what functions will be used to carry out
85 * address space operations such as creating or locking page tables.
86 */
[ef67bab]87as_operations_t *as_operations = NULL;
[20d50a1]88
[fc47885]89/** Slab for as_t objects.
[da1bafb]90 *
[57da95c]91 */
92static slab_cache_t *as_slab;
93
[fc47885]94/** ASID subsystem lock.
95 *
96 * This lock protects:
[55b77d9]97 * - inactive_as_with_asid_list
[879585a3]98 * - as->asid for each as of the as_t type
99 * - asids_allocated counter
[da1bafb]100 *
[6f4495f5]101 */
[879585a3]102SPINLOCK_INITIALIZE(asidlock);
[7e4e532]103
104/**
[fc47885]105 * Inactive address spaces (on all processors)
106 * that have valid ASID.
[7e4e532]107 */
[55b77d9]108LIST_INITIALIZE(inactive_as_with_asid_list);
[7e4e532]109
[071a8ae6]110/** Kernel address space. */
111as_t *AS_KERNEL = NULL;
112
[7a0359b]113NO_TRACE static int as_constructor(void *obj, unsigned int flags)
[29b2bbf]114{
115 as_t *as = (as_t *) obj;
[da1bafb]116
[29b2bbf]117 link_initialize(&as->inactive_as_with_asid_link);
[7f341820]118 mutex_initialize(&as->lock, MUTEX_PASSIVE);
[29b2bbf]119
[fc47885]120 return as_constructor_arch(as, flags);
[29b2bbf]121}
122
[7a0359b]123NO_TRACE static size_t as_destructor(void *obj)
[29b2bbf]124{
[fc47885]125 return as_destructor_arch((as_t *) obj);
[29b2bbf]126}
127
[ef67bab]128/** Initialize address space subsystem. */
129void as_init(void)
130{
131 as_arch_init();
[da1bafb]132
[f97f1e51]133 as_slab = slab_cache_create("as_t", sizeof(as_t), 0,
[6f4495f5]134 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
[57da95c]135
[8e1ea655]136 AS_KERNEL = as_create(FLAG_AS_KERNEL);
[125e944]137 if (!AS_KERNEL)
[f651e80]138 panic("Cannot create kernel address space.");
[125e944]139
[fc47885]140 /*
141 * Make sure the kernel address space
[76fca31]142 * reference count never drops to zero.
143 */
[6193351]144 as_hold(AS_KERNEL);
[ef67bab]145}
146
[071a8ae6]147/** Create address space.
148 *
[da1bafb]149 * @param flags Flags that influence the way in wich the address
150 * space is created.
151 *
[071a8ae6]152 */
[da1bafb]153as_t *as_create(unsigned int flags)
[20d50a1]154{
[da1bafb]155 as_t *as = (as_t *) slab_alloc(as_slab, 0);
[29b2bbf]156 (void) as_create_arch(as, 0);
157
[252127e]158 btree_create(&as->as_area_btree);
[bb68433]159
160 if (flags & FLAG_AS_KERNEL)
161 as->asid = ASID_KERNEL;
162 else
163 as->asid = ASID_INVALID;
164
[31d8e10]165 atomic_set(&as->refcount, 0);
[47800e0]166 as->cpu_refcount = 0;
[da1bafb]167
[b3f8fb7]168#ifdef AS_PAGE_TABLE
[80bcaed]169 as->genarch.page_table = page_table_create(flags);
[b3f8fb7]170#else
171 page_table_create(flags);
172#endif
[76fca31]173
[20d50a1]174 return as;
175}
176
[482826d]177/** Destroy adress space.
178 *
[6f4495f5]179 * When there are no tasks referencing this address space (i.e. its refcount is
180 * zero), the address space can be destroyed.
[31d8e10]181 *
182 * We know that we don't hold any spinlock.
[6745592]183 *
[da1bafb]184 * @param as Address space to be destroyed.
185 *
[482826d]186 */
187void as_destroy(as_t *as)
[5be1923]188{
[31d8e10]189 DEADLOCK_PROBE_INIT(p_asidlock);
[fc47885]190
[1624aae]191 ASSERT(as != AS);
[31d8e10]192 ASSERT(atomic_get(&as->refcount) == 0);
[482826d]193
194 /*
[663bb537]195 * Since there is no reference to this address space, it is safe not to
196 * lock its mutex.
[482826d]197 */
[fc47885]198
[31d8e10]199 /*
200 * We need to avoid deadlock between TLB shootdown and asidlock.
201 * We therefore try to take asid conditionally and if we don't succeed,
202 * we enable interrupts and try again. This is done while preemption is
203 * disabled to prevent nested context switches. We also depend on the
204 * fact that so far no spinlocks are held.
205 */
206 preemption_disable();
[da1bafb]207 ipl_t ipl = interrupts_read();
208
[31d8e10]209retry:
210 interrupts_disable();
211 if (!spinlock_trylock(&asidlock)) {
212 interrupts_enable();
213 DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
214 goto retry;
215 }
[da1bafb]216
217 /* Interrupts disabled, enable preemption */
218 preemption_enable();
219
220 if ((as->asid != ASID_INVALID) && (as != AS_KERNEL)) {
[1624aae]221 if (as->cpu_refcount == 0)
[31e8ddd]222 list_remove(&as->inactive_as_with_asid_link);
[da1bafb]223
[482826d]224 asid_put(as->asid);
225 }
[da1bafb]226
[879585a3]227 spinlock_unlock(&asidlock);
[fdaad75d]228 interrupts_restore(ipl);
[fc47885]229
[da1bafb]230
[482826d]231 /*
232 * Destroy address space areas of the address space.
[8440473]233 * The B+tree must be walked carefully because it is
[6f9a9bc]234 * also being destroyed.
[da1bafb]235 */
236 bool cond = true;
237 while (cond) {
[55b77d9]238 ASSERT(!list_empty(&as->as_area_btree.leaf_list));
[da1bafb]239
240 btree_node_t *node =
[55b77d9]241 list_get_instance(list_first(&as->as_area_btree.leaf_list),
[6f4495f5]242 btree_node_t, leaf_link);
[da1bafb]243
244 if ((cond = node->keys))
[6f9a9bc]245 as_area_destroy(as, node->key[0]);
[482826d]246 }
[da1bafb]247
[152b2b0]248 btree_destroy(&as->as_area_btree);
[da1bafb]249
[b3f8fb7]250#ifdef AS_PAGE_TABLE
[80bcaed]251 page_table_destroy(as->genarch.page_table);
[b3f8fb7]252#else
253 page_table_destroy(NULL);
254#endif
[da1bafb]255
[57da95c]256 slab_free(as_slab, as);
[5be1923]257}
258
[0321109]259/** Hold a reference to an address space.
260 *
[fc47885]261 * Holding a reference to an address space prevents destruction
262 * of that address space.
[0321109]263 *
[da1bafb]264 * @param as Address space to be held.
265 *
[0321109]266 */
[7a0359b]267NO_TRACE void as_hold(as_t *as)
[0321109]268{
269 atomic_inc(&as->refcount);
270}
271
272/** Release a reference to an address space.
273 *
[fc47885]274 * The last one to release a reference to an address space
275 * destroys the address space.
[0321109]276 *
[da1bafb]277 * @param asAddress space to be released.
278 *
[0321109]279 */
[7a0359b]280NO_TRACE void as_release(as_t *as)
[0321109]281{
282 if (atomic_predec(&as->refcount) == 0)
283 as_destroy(as);
284}
285
[e3ee9b9]286/** Check area conflicts with other areas.
287 *
[35a3d950]288 * @param as Address space.
289 * @param addr Starting virtual address of the area being tested.
290 * @param count Number of pages in the area being tested.
291 * @param guarded True if the area being tested is protected by guard pages.
292 * @param avoid Do not touch this area.
[e3ee9b9]293 *
294 * @return True if there is no conflict, false otherwise.
295 *
296 */
[0b37882]297NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr,
[35a3d950]298 size_t count, bool guarded, as_area_t *avoid)
[e3ee9b9]299{
[0b37882]300 ASSERT((addr % PAGE_SIZE) == 0);
[e3ee9b9]301 ASSERT(mutex_locked(&as->lock));
[94795812]302
303 /*
304 * If the addition of the supposed area address and size overflows,
305 * report conflict.
306 */
307 if (overflows_into_positive(addr, P2SZ(count)))
308 return false;
[e3ee9b9]309
310 /*
311 * We don't want any area to have conflicts with NULL page.
312 */
[b6f3e7e]313 if (overlaps(addr, P2SZ(count), (uintptr_t) NULL, PAGE_SIZE))
[e3ee9b9]314 return false;
[35a3d950]315
[e3ee9b9]316 /*
317 * The leaf node is found in O(log n), where n is proportional to
318 * the number of address space areas belonging to as.
319 * The check for conflicts is then attempted on the rightmost
320 * record in the left neighbour, the leftmost record in the right
321 * neighbour and all records in the leaf node itself.
322 */
323 btree_node_t *leaf;
324 as_area_t *area =
[0b37882]325 (as_area_t *) btree_search(&as->as_area_btree, addr, &leaf);
[e3ee9b9]326 if (area) {
[0b37882]327 if (area != avoid)
[e3ee9b9]328 return false;
329 }
330
331 /* First, check the two border cases. */
332 btree_node_t *node =
333 btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
334 if (node) {
335 area = (as_area_t *) node->value[node->keys - 1];
336
[0b37882]337 if (area != avoid) {
338 mutex_lock(&area->lock);
[35a3d950]339
[94795812]340 /*
341 * If at least one of the two areas are protected
[35a3d950]342 * by the AS_AREA_GUARD flag then we must be sure
343 * that they are separated by at least one unmapped
344 * page.
345 */
346 int const gp = (guarded ||
347 (area->flags & AS_AREA_GUARD)) ? 1 : 0;
[0b37882]348
[94795812]349 /*
350 * The area comes from the left neighbour node, which
351 * means that there already are some areas in the leaf
352 * node, which in turn means that adding gp is safe and
353 * will not cause an integer overflow.
354 */
[b6f3e7e]355 if (overlaps(addr, P2SZ(count), area->base,
[35a3d950]356 P2SZ(area->pages + gp))) {
[0b37882]357 mutex_unlock(&area->lock);
358 return false;
359 }
360
[e3ee9b9]361 mutex_unlock(&area->lock);
362 }
363 }
364
365 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
366 if (node) {
367 area = (as_area_t *) node->value[0];
368
[0b37882]369 if (area != avoid) {
[94795812]370 int gp;
371
[0b37882]372 mutex_lock(&area->lock);
[35a3d950]373
[94795812]374 gp = (guarded || (area->flags & AS_AREA_GUARD)) ? 1 : 0;
375 if (gp && overflows(addr, P2SZ(count))) {
376 /*
377 * Guard page not needed if the supposed area
378 * is adjacent to the end of the address space.
379 * We already know that the following test is
380 * going to fail...
381 */
382 gp--;
383 }
[0b37882]384
[35a3d950]385 if (overlaps(addr, P2SZ(count + gp), area->base,
[b6f3e7e]386 P2SZ(area->pages))) {
[0b37882]387 mutex_unlock(&area->lock);
388 return false;
389 }
390
[e3ee9b9]391 mutex_unlock(&area->lock);
392 }
393 }
394
395 /* Second, check the leaf node. */
396 btree_key_t i;
397 for (i = 0; i < leaf->keys; i++) {
398 area = (as_area_t *) leaf->value[i];
[94795812]399 int agp;
400 int gp;
[e3ee9b9]401
[0b37882]402 if (area == avoid)
[e3ee9b9]403 continue;
404
405 mutex_lock(&area->lock);
[35a3d950]406
[94795812]407 gp = (guarded || (area->flags & AS_AREA_GUARD)) ? 1 : 0;
408 agp = gp;
409
410 /*
411 * Sanitize the two possible unsigned integer overflows.
412 */
413 if (gp && overflows(addr, P2SZ(count)))
414 gp--;
415 if (agp && overflows(area->base, P2SZ(area->pages)))
416 agp--;
[35a3d950]417
418 if (overlaps(addr, P2SZ(count + gp), area->base,
[94795812]419 P2SZ(area->pages + agp))) {
[e3ee9b9]420 mutex_unlock(&area->lock);
421 return false;
422 }
423
424 mutex_unlock(&area->lock);
425 }
426
427 /*
428 * So far, the area does not conflict with other areas.
[57355a40]429 * Check if it is contained in the user address space.
[e3ee9b9]430 */
431 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
[57355a40]432 return iswithin(USER_ADDRESS_SPACE_START,
433 (USER_ADDRESS_SPACE_END - USER_ADDRESS_SPACE_START) + 1,
434 addr, P2SZ(count));
[e3ee9b9]435 }
436
437 return true;
438}
439
[fbcdeb8]440/** Return pointer to unmapped address space area
441 *
442 * The address space must be already locked when calling
443 * this function.
444 *
[35a3d950]445 * @param as Address space.
446 * @param bound Lowest address bound.
447 * @param size Requested size of the allocation.
448 * @param guarded True if the allocation must be protected by guard pages.
[fbcdeb8]449 *
450 * @return Address of the beginning of unmapped address space area.
451 * @return -1 if no suitable address space area was found.
452 *
453 */
454NO_TRACE static uintptr_t as_get_unmapped_area(as_t *as, uintptr_t bound,
[35a3d950]455 size_t size, bool guarded)
[fbcdeb8]456{
457 ASSERT(mutex_locked(&as->lock));
458
459 if (size == 0)
460 return (uintptr_t) -1;
461
462 /*
463 * Make sure we allocate from page-aligned
464 * address. Check for possible overflow in
465 * each step.
466 */
467
468 size_t pages = SIZE2FRAMES(size);
469
470 /*
471 * Find the lowest unmapped address aligned on the size
472 * boundary, not smaller than bound and of the required size.
473 */
474
475 /* First check the bound address itself */
476 uintptr_t addr = ALIGN_UP(bound, PAGE_SIZE);
[35a3d950]477 if (addr >= bound) {
478 if (guarded) {
479 /* Leave an unmapped page between the lower
480 * bound and the area's start address.
481 */
482 addr += P2SZ(1);
483 }
484
485 if (check_area_conflicts(as, addr, pages, guarded, NULL))
486 return addr;
487 }
[fbcdeb8]488
489 /* Eventually check the addresses behind each area */
[feeac0d]490 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, node) {
[fbcdeb8]491
492 for (btree_key_t i = 0; i < node->keys; i++) {
493 as_area_t *area = (as_area_t *) node->value[i];
494
495 mutex_lock(&area->lock);
496
497 addr =
498 ALIGN_UP(area->base + P2SZ(area->pages), PAGE_SIZE);
[35a3d950]499
500 if (guarded || area->flags & AS_AREA_GUARD) {
501 /* We must leave an unmapped page
502 * between the two areas.
503 */
504 addr += P2SZ(1);
505 }
506
[fbcdeb8]507 bool avail =
508 ((addr >= bound) && (addr >= area->base) &&
[35a3d950]509 (check_area_conflicts(as, addr, pages, guarded, area)));
[fbcdeb8]510
511 mutex_unlock(&area->lock);
512
513 if (avail)
514 return addr;
515 }
516 }
517
518 /* No suitable address space area found */
519 return (uintptr_t) -1;
520}
521
[83b6ba9f]522/** Remove reference to address space area share info.
523 *
524 * If the reference count drops to 0, the sh_info is deallocated.
525 *
526 * @param sh_info Pointer to address space area share info.
527 *
528 */
529NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info)
530{
531 bool dealloc = false;
532
533 mutex_lock(&sh_info->lock);
534 ASSERT(sh_info->refcount);
535
536 if (--sh_info->refcount == 0) {
537 dealloc = true;
538
539 /*
540 * Now walk carefully the pagemap B+tree and free/remove
541 * reference from all frames found there.
542 */
543 list_foreach(sh_info->pagemap.leaf_list, leaf_link,
544 btree_node_t, node) {
545 btree_key_t i;
546
547 for (i = 0; i < node->keys; i++)
548 frame_free((uintptr_t) node->value[i], 1);
549 }
550
551 }
552 mutex_unlock(&sh_info->lock);
553
554 if (dealloc) {
555 if (sh_info->backend && sh_info->backend->destroy_shared_data) {
556 sh_info->backend->destroy_shared_data(
557 sh_info->backend_shared_data);
558 }
559 btree_destroy(&sh_info->pagemap);
560 free(sh_info);
561 }
562}
563
564
[20d50a1]565/** Create address space area of common attributes.
566 *
567 * The created address space area is added to the target address space.
568 *
[da1bafb]569 * @param as Target address space.
570 * @param flags Flags of the area memory.
571 * @param size Size of area.
572 * @param attrs Attributes of the area.
573 * @param backend Address space area backend. NULL if no backend is used.
574 * @param backend_data NULL or a pointer to an array holding two void *.
[fbcdeb8]575 * @param base Starting virtual address of the area.
576 * If set to -1, a suitable mappable area is found.
577 * @param bound Lowest address bound if base is set to -1.
578 * Otherwise ignored.
[da1bafb]579 *
580 * @return Address space area on success or NULL on failure.
[20d50a1]581 *
582 */
[da1bafb]583as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size,
[fbcdeb8]584 unsigned int attrs, mem_backend_t *backend,
585 mem_backend_data_t *backend_data, uintptr_t *base, uintptr_t bound)
[20d50a1]586{
[59fb782]587 if ((*base != (uintptr_t) -1) && !IS_ALIGNED(*base, PAGE_SIZE))
[37e7d2b9]588 return NULL;
[da1bafb]589
[0b37882]590 if (size == 0)
[dbbeb26]591 return NULL;
[0941e9ae]592
[0b37882]593 size_t pages = SIZE2FRAMES(size);
594
[37e7d2b9]595 /* Writeable executable areas are not supported. */
596 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
597 return NULL;
[35a3d950]598
599 bool const guarded = flags & AS_AREA_GUARD;
[20d50a1]600
[1068f6a]601 mutex_lock(&as->lock);
[20d50a1]602
[fbcdeb8]603 if (*base == (uintptr_t) -1) {
[35a3d950]604 *base = as_get_unmapped_area(as, bound, size, guarded);
[fbcdeb8]605 if (*base == (uintptr_t) -1) {
606 mutex_unlock(&as->lock);
607 return NULL;
608 }
609 }
[35a3d950]610
[83b6ba9f]611 if (overflows_into_positive(*base, size)) {
612 mutex_unlock(&as->lock);
[0941e9ae]613 return NULL;
[83b6ba9f]614 }
[0941e9ae]615
[35a3d950]616 if (!check_area_conflicts(as, *base, pages, guarded, NULL)) {
[1068f6a]617 mutex_unlock(&as->lock);
[37e7d2b9]618 return NULL;
619 }
[20d50a1]620
[da1bafb]621 as_area_t *area = (as_area_t *) malloc(sizeof(as_area_t), 0);
622
623 mutex_initialize(&area->lock, MUTEX_PASSIVE);
624
625 area->as = as;
626 area->flags = flags;
627 area->attributes = attrs;
[0b37882]628 area->pages = pages;
[fc47885]629 area->resident = 0;
[fbcdeb8]630 area->base = *base;
[da1bafb]631 area->backend = backend;
[83b6ba9f]632 area->sh_info = NULL;
[da1bafb]633
[0ee077ee]634 if (backend_data)
[da1bafb]635 area->backend_data = *backend_data;
[0ee077ee]636 else
[da1bafb]637 memsetb(&area->backend_data, sizeof(area->backend_data), 0);
[83b6ba9f]638
639 share_info_t *si = NULL;
640
641 /*
642 * Create the sharing info structure.
643 * We do this in advance for every new area, even if it is not going
644 * to be shared.
645 */
646 if (!(attrs & AS_AREA_ATTR_PARTIAL)) {
647 si = (share_info_t *) malloc(sizeof(share_info_t), 0);
648 mutex_initialize(&si->lock, MUTEX_PASSIVE);
649 si->refcount = 1;
650 si->shared = false;
651 si->backend_shared_data = NULL;
652 si->backend = backend;
653 btree_create(&si->pagemap);
654
655 area->sh_info = si;
656
657 if (area->backend && area->backend->create_shared_data) {
658 if (!area->backend->create_shared_data(area)) {
659 free(area);
660 mutex_unlock(&as->lock);
661 sh_info_remove_reference(si);
662 return NULL;
663 }
664 }
665 }
666
[e394b736]667 if (area->backend && area->backend->create) {
668 if (!area->backend->create(area)) {
669 free(area);
670 mutex_unlock(&as->lock);
[83b6ba9f]671 if (!(attrs & AS_AREA_ATTR_PARTIAL))
672 sh_info_remove_reference(si);
[e394b736]673 return NULL;
674 }
675 }
[83b6ba9f]676
[da1bafb]677 btree_create(&area->used_space);
[fbcdeb8]678 btree_insert(&as->as_area_btree, *base, (void *) area,
679 NULL);
[bb68433]680
[1068f6a]681 mutex_unlock(&as->lock);
[da1bafb]682
683 return area;
[20d50a1]684}
685
[e3ee9b9]686/** Find address space area and lock it.
687 *
688 * @param as Address space.
689 * @param va Virtual address.
690 *
691 * @return Locked address space area containing va on success or
692 * NULL on failure.
693 *
694 */
[7a0359b]695NO_TRACE static as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
[e3ee9b9]696{
697 ASSERT(mutex_locked(&as->lock));
698
699 btree_node_t *leaf;
[b6f3e7e]700 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va,
701 &leaf);
[e3ee9b9]702 if (area) {
703 /* va is the base address of an address space area */
704 mutex_lock(&area->lock);
705 return area;
706 }
707
708 /*
[326bf65]709 * Search the leaf node and the rightmost record of its left neighbour
[e3ee9b9]710 * to find out whether this is a miss or va belongs to an address
711 * space area found there.
712 */
713
714 /* First, search the leaf node itself. */
715 btree_key_t i;
716
717 for (i = 0; i < leaf->keys; i++) {
718 area = (as_area_t *) leaf->value[i];
719
720 mutex_lock(&area->lock);
[326bf65]721
[b6f3e7e]722 if ((area->base <= va) &&
723 (va <= area->base + (P2SZ(area->pages) - 1)))
[e3ee9b9]724 return area;
725
726 mutex_unlock(&area->lock);
727 }
728
729 /*
730 * Second, locate the left neighbour and test its last record.
731 * Because of its position in the B+tree, it must have base < va.
732 */
[b6f3e7e]733 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree,
734 leaf);
[e3ee9b9]735 if (lnode) {
736 area = (as_area_t *) lnode->value[lnode->keys - 1];
737
738 mutex_lock(&area->lock);
739
[b6f3e7e]740 if (va <= area->base + (P2SZ(area->pages) - 1))
[e3ee9b9]741 return area;
742
743 mutex_unlock(&area->lock);
744 }
745
746 return NULL;
747}
748
[df0103f7]749/** Find address space area and change it.
750 *
[da1bafb]751 * @param as Address space.
752 * @param address Virtual address belonging to the area to be changed.
753 * Must be page-aligned.
754 * @param size New size of the virtual memory block starting at
755 * address.
756 * @param flags Flags influencing the remap operation. Currently unused.
757 *
758 * @return Zero on success or a value from @ref errno.h otherwise.
[df0103f7]759 *
[da1bafb]760 */
761int as_area_resize(as_t *as, uintptr_t address, size_t size, unsigned int flags)
[df0103f7]762{
[59fb782]763 if (!IS_ALIGNED(address, PAGE_SIZE))
764 return EINVAL;
765
[1068f6a]766 mutex_lock(&as->lock);
[df0103f7]767
768 /*
769 * Locate the area.
770 */
[da1bafb]771 as_area_t *area = find_area_and_lock(as, address);
[df0103f7]772 if (!area) {
[1068f6a]773 mutex_unlock(&as->lock);
[7242a78e]774 return ENOENT;
[df0103f7]775 }
[01029fc]776
777 if (!area->backend->is_resizable(area)) {
[df0103f7]778 /*
[01029fc]779 * The backend does not support resizing for this area.
[df0103f7]780 */
[1068f6a]781 mutex_unlock(&area->lock);
782 mutex_unlock(&as->lock);
[7242a78e]783 return ENOTSUP;
[df0103f7]784 }
[da1bafb]785
[83b6ba9f]786 mutex_lock(&area->sh_info->lock);
787 if (area->sh_info->shared) {
[8182031]788 /*
[da1bafb]789 * Remapping of shared address space areas
[8182031]790 * is not supported.
791 */
[83b6ba9f]792 mutex_unlock(&area->sh_info->lock);
[8182031]793 mutex_unlock(&area->lock);
794 mutex_unlock(&as->lock);
795 return ENOTSUP;
796 }
[83b6ba9f]797 mutex_unlock(&area->sh_info->lock);
[da1bafb]798
799 size_t pages = SIZE2FRAMES((address - area->base) + size);
[df0103f7]800 if (!pages) {
801 /*
802 * Zero size address space areas are not allowed.
803 */
[1068f6a]804 mutex_unlock(&area->lock);
805 mutex_unlock(&as->lock);
[7242a78e]806 return EPERM;
[df0103f7]807 }
808
809 if (pages < area->pages) {
[b6f3e7e]810 uintptr_t start_free = area->base + P2SZ(pages);
[da1bafb]811
[df0103f7]812 /*
813 * Shrinking the area.
814 * No need to check for overlaps.
815 */
[da1bafb]816
[c964521]817 page_table_lock(as, false);
[da1bafb]818
[56789125]819 /*
820 * Remove frames belonging to used space starting from
821 * the highest addresses downwards until an overlap with
822 * the resized address space area is found. Note that this
823 * is also the right way to remove part of the used_space
824 * B+tree leaf list.
[da1bafb]825 */
826 bool cond = true;
827 while (cond) {
[55b77d9]828 ASSERT(!list_empty(&area->used_space.leaf_list));
[da1bafb]829
830 btree_node_t *node =
[55b77d9]831 list_get_instance(list_last(&area->used_space.leaf_list),
[6f4495f5]832 btree_node_t, leaf_link);
[da1bafb]833
[56789125]834 if ((cond = (bool) node->keys)) {
[da1bafb]835 uintptr_t ptr = node->key[node->keys - 1];
836 size_t size =
[98000fb]837 (size_t) node->value[node->keys - 1];
[da1bafb]838 size_t i = 0;
839
[b6f3e7e]840 if (overlaps(ptr, P2SZ(size), area->base,
841 P2SZ(pages))) {
[56789125]842
[b6f3e7e]843 if (ptr + P2SZ(size) <= start_free) {
[56789125]844 /*
[6f4495f5]845 * The whole interval fits
846 * completely in the resized
847 * address space area.
[56789125]848 */
849 break;
850 }
[da1bafb]851
[56789125]852 /*
[6f4495f5]853 * Part of the interval corresponding
854 * to b and c overlaps with the resized
855 * address space area.
[56789125]856 */
[da1bafb]857
858 /* We are almost done */
859 cond = false;
860 i = (start_free - ptr) >> PAGE_WIDTH;
[6745592]861 if (!used_space_remove(area, start_free,
[da1bafb]862 size - i))
863 panic("Cannot remove used space.");
[56789125]864 } else {
865 /*
[6f4495f5]866 * The interval of used space can be
867 * completely removed.
[56789125]868 */
[da1bafb]869 if (!used_space_remove(area, ptr, size))
870 panic("Cannot remove used space.");
[56789125]871 }
[da1bafb]872
[d67dfdc]873 /*
874 * Start TLB shootdown sequence.
875 *
876 * The sequence is rather short and can be
877 * repeated multiple times. The reason is that
878 * we don't want to have used_space_remove()
879 * inside the sequence as it may use a blocking
880 * memory allocation for its B+tree. Blocking
881 * while holding the tlblock spinlock is
882 * forbidden and would hit a kernel assertion.
883 */
884
885 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES,
886 as->asid, area->base + P2SZ(pages),
887 area->pages - pages);
888
[da1bafb]889 for (; i < size; i++) {
[b6f3e7e]890 pte_t *pte = page_mapping_find(as,
[0ff03f3]891 ptr + P2SZ(i), false);
[da1bafb]892
893 ASSERT(pte);
894 ASSERT(PTE_VALID(pte));
895 ASSERT(PTE_PRESENT(pte));
896
897 if ((area->backend) &&
898 (area->backend->frame_free)) {
[0ee077ee]899 area->backend->frame_free(area,
[b6f3e7e]900 ptr + P2SZ(i),
[6f4495f5]901 PTE_GET_FRAME(pte));
[8182031]902 }
[da1bafb]903
[b6f3e7e]904 page_mapping_remove(as, ptr + P2SZ(i));
[56789125]905 }
[da1bafb]906
[d67dfdc]907 /*
908 * Finish TLB shootdown sequence.
909 */
[da1bafb]910
[d67dfdc]911 tlb_invalidate_pages(as->asid,
912 area->base + P2SZ(pages),
913 area->pages - pages);
[31d8e10]914
[d67dfdc]915 /*
916 * Invalidate software translation caches
917 * (e.g. TSB on sparc64, PHT on ppc32).
918 */
919 as_invalidate_translation_cache(as,
920 area->base + P2SZ(pages),
921 area->pages - pages);
922 tlb_shootdown_finalize(ipl);
923 }
924 }
[da1bafb]925 page_table_unlock(as, false);
[df0103f7]926 } else {
927 /*
928 * Growing the area.
[0941e9ae]929 */
930
[94795812]931 if (overflows_into_positive(address, P2SZ(pages)))
[0941e9ae]932 return EINVAL;
933
934 /*
[df0103f7]935 * Check for overlaps with other address space areas.
936 */
[35a3d950]937 bool const guarded = area->flags & AS_AREA_GUARD;
938 if (!check_area_conflicts(as, address, pages, guarded, area)) {
[1068f6a]939 mutex_unlock(&area->lock);
[da1bafb]940 mutex_unlock(&as->lock);
[7242a78e]941 return EADDRNOTAVAIL;
[df0103f7]942 }
[da1bafb]943 }
944
[e394b736]945 if (area->backend && area->backend->resize) {
946 if (!area->backend->resize(area, pages)) {
947 mutex_unlock(&area->lock);
948 mutex_unlock(&as->lock);
949 return ENOMEM;
950 }
951 }
952
[df0103f7]953 area->pages = pages;
954
[1068f6a]955 mutex_unlock(&area->lock);
956 mutex_unlock(&as->lock);
[da1bafb]957
[7242a78e]958 return 0;
959}
960
961/** Destroy address space area.
962 *
[da1bafb]963 * @param as Address space.
964 * @param address Address within the area to be deleted.
965 *
966 * @return Zero on success or a value from @ref errno.h on failure.
[7242a78e]967 *
968 */
[7f1c620]969int as_area_destroy(as_t *as, uintptr_t address)
[7242a78e]970{
[1068f6a]971 mutex_lock(&as->lock);
[da1bafb]972
973 as_area_t *area = find_area_and_lock(as, address);
[7242a78e]974 if (!area) {
[1068f6a]975 mutex_unlock(&as->lock);
[7242a78e]976 return ENOENT;
977 }
[e394b736]978
979 if (area->backend && area->backend->destroy)
980 area->backend->destroy(area);
[da1bafb]981
982 uintptr_t base = area->base;
983
[c964521]984 page_table_lock(as, false);
[da1bafb]985
[5552d60]986 /*
987 * Start TLB shootdown sequence.
988 */
[402eda5]989 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base,
990 area->pages);
[da1bafb]991
[567807b1]992 /*
993 * Visit only the pages mapped by used_space B+tree.
994 */
[feeac0d]995 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t,
996 node) {
[da1bafb]997 btree_key_t i;
[56789125]998
[f8d069e8]999 for (i = 0; i < node->keys; i++) {
[da1bafb]1000 uintptr_t ptr = node->key[i];
1001 size_t size;
[56789125]1002
[da1bafb]1003 for (size = 0; size < (size_t) node->value[i]; size++) {
[b6f3e7e]1004 pte_t *pte = page_mapping_find(as,
[0ff03f3]1005 ptr + P2SZ(size), false);
[da1bafb]1006
1007 ASSERT(pte);
1008 ASSERT(PTE_VALID(pte));
1009 ASSERT(PTE_PRESENT(pte));
1010
1011 if ((area->backend) &&
1012 (area->backend->frame_free)) {
1013 area->backend->frame_free(area,
[b6f3e7e]1014 ptr + P2SZ(size),
1015 PTE_GET_FRAME(pte));
[56789125]1016 }
[da1bafb]1017
[b6f3e7e]1018 page_mapping_remove(as, ptr + P2SZ(size));
[7242a78e]1019 }
1020 }
1021 }
[da1bafb]1022
[7242a78e]1023 /*
[5552d60]1024 * Finish TLB shootdown sequence.
[7242a78e]1025 */
[da1bafb]1026
[f1d1f5d3]1027 tlb_invalidate_pages(as->asid, area->base, area->pages);
[da1bafb]1028
[f1d1f5d3]1029 /*
[eef1b031]1030 * Invalidate potential software translation caches
1031 * (e.g. TSB on sparc64, PHT on ppc32).
[f1d1f5d3]1032 */
1033 as_invalidate_translation_cache(as, area->base, area->pages);
[402eda5]1034 tlb_shootdown_finalize(ipl);
[da1bafb]1035
[c964521]1036 page_table_unlock(as, false);
[f1d1f5d3]1037
[5552d60]1038 btree_destroy(&area->used_space);
[da1bafb]1039
[8d4f2ae]1040 area->attributes |= AS_AREA_ATTR_PARTIAL;
[8182031]1041
[83b6ba9f]1042 sh_info_remove_reference(area->sh_info);
[da1bafb]1043
[1068f6a]1044 mutex_unlock(&area->lock);
[da1bafb]1045
[7242a78e]1046 /*
1047 * Remove the empty area from address space.
1048 */
[f1d1f5d3]1049 btree_remove(&as->as_area_btree, base, NULL);
[7242a78e]1050
[8d4f2ae]1051 free(area);
1052
[f1d1f5d3]1053 mutex_unlock(&as->lock);
[7242a78e]1054 return 0;
[df0103f7]1055}
1056
[8d6bc2d5]1057/** Share address space area with another or the same address space.
[df0103f7]1058 *
[0ee077ee]1059 * Address space area mapping is shared with a new address space area.
1060 * If the source address space area has not been shared so far,
1061 * a new sh_info is created. The new address space area simply gets the
1062 * sh_info of the source area. The process of duplicating the
1063 * mapping is done through the backend share function.
[da1bafb]1064 *
1065 * @param src_as Pointer to source address space.
1066 * @param src_base Base address of the source address space area.
1067 * @param acc_size Expected size of the source area.
1068 * @param dst_as Pointer to destination address space.
[fd4d8c0]1069 * @param dst_flags_mask Destination address space area flags mask.
[fbcdeb8]1070 * @param dst_base Target base address. If set to -1,
1071 * a suitable mappable area is found.
1072 * @param bound Lowest address bound if dst_base is set to -1.
1073 * Otherwise ignored.
[df0103f7]1074 *
[da1bafb]1075 * @return Zero on success.
1076 * @return ENOENT if there is no such task or such address space.
1077 * @return EPERM if there was a problem in accepting the area.
1078 * @return ENOMEM if there was a problem in allocating destination
1079 * address space area.
1080 * @return ENOTSUP if the address space area backend does not support
1081 * sharing.
1082 *
[df0103f7]1083 */
[7f1c620]1084int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
[fbcdeb8]1085 as_t *dst_as, unsigned int dst_flags_mask, uintptr_t *dst_base,
1086 uintptr_t bound)
[df0103f7]1087{
[1068f6a]1088 mutex_lock(&src_as->lock);
[da1bafb]1089 as_area_t *src_area = find_area_and_lock(src_as, src_base);
[a9e8b39]1090 if (!src_area) {
[6fa476f7]1091 /*
1092 * Could not find the source address space area.
1093 */
[1068f6a]1094 mutex_unlock(&src_as->lock);
[6fa476f7]1095 return ENOENT;
1096 }
[da1bafb]1097
[01029fc]1098 if (!src_area->backend->is_shareable(src_area)) {
[8d6bc2d5]1099 /*
[01029fc]1100 * The backend does not permit sharing of this area.
[8d6bc2d5]1101 */
1102 mutex_unlock(&src_area->lock);
1103 mutex_unlock(&src_as->lock);
1104 return ENOTSUP;
1105 }
1106
[b6f3e7e]1107 size_t src_size = P2SZ(src_area->pages);
[da1bafb]1108 unsigned int src_flags = src_area->flags;
1109 mem_backend_t *src_backend = src_area->backend;
1110 mem_backend_data_t src_backend_data = src_area->backend_data;
1111
[1ec1fd8]1112 /* Share the cacheable flag from the original mapping */
1113 if (src_flags & AS_AREA_CACHEABLE)
1114 dst_flags_mask |= AS_AREA_CACHEABLE;
[da1bafb]1115
1116 if ((src_size != acc_size) ||
1117 ((src_flags & dst_flags_mask) != dst_flags_mask)) {
[8d6bc2d5]1118 mutex_unlock(&src_area->lock);
1119 mutex_unlock(&src_as->lock);
[df0103f7]1120 return EPERM;
1121 }
[da1bafb]1122
[8d6bc2d5]1123 /*
1124 * Now we are committed to sharing the area.
[8440473]1125 * First, prepare the area for sharing.
[8d6bc2d5]1126 * Then it will be safe to unlock it.
1127 */
[da1bafb]1128 share_info_t *sh_info = src_area->sh_info;
[83b6ba9f]1129
1130 mutex_lock(&sh_info->lock);
1131 sh_info->refcount++;
1132 bool shared = sh_info->shared;
1133 sh_info->shared = true;
1134 mutex_unlock(&sh_info->lock);
1135
1136 if (!shared) {
[c0697c4c]1137 /*
1138 * Call the backend to setup sharing.
[83b6ba9f]1139 * This only happens once for each sh_info.
[c0697c4c]1140 */
1141 src_area->backend->share(src_area);
[8d6bc2d5]1142 }
[da1bafb]1143
[8d6bc2d5]1144 mutex_unlock(&src_area->lock);
1145 mutex_unlock(&src_as->lock);
[da1bafb]1146
[df0103f7]1147 /*
[a9e8b39]1148 * Create copy of the source address space area.
1149 * The destination area is created with AS_AREA_ATTR_PARTIAL
1150 * attribute set which prevents race condition with
1151 * preliminary as_page_fault() calls.
[fd4d8c0]1152 * The flags of the source area are masked against dst_flags_mask
1153 * to support sharing in less privileged mode.
[df0103f7]1154 */
[fbcdeb8]1155 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask,
1156 src_size, AS_AREA_ATTR_PARTIAL, src_backend,
1157 &src_backend_data, dst_base, bound);
[a9e8b39]1158 if (!dst_area) {
[df0103f7]1159 /*
1160 * Destination address space area could not be created.
1161 */
[8d6bc2d5]1162 sh_info_remove_reference(sh_info);
1163
[df0103f7]1164 return ENOMEM;
1165 }
[da1bafb]1166
[a9e8b39]1167 /*
1168 * Now the destination address space area has been
1169 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
[8d6bc2d5]1170 * attribute and set the sh_info.
[da1bafb]1171 */
1172 mutex_lock(&dst_as->lock);
[1068f6a]1173 mutex_lock(&dst_area->lock);
[a9e8b39]1174 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
[8d6bc2d5]1175 dst_area->sh_info = sh_info;
[1068f6a]1176 mutex_unlock(&dst_area->lock);
[da1bafb]1177 mutex_unlock(&dst_as->lock);
1178
[df0103f7]1179 return 0;
1180}
1181
[fb84455]1182/** Check access mode for address space area.
1183 *
[da1bafb]1184 * @param area Address space area.
1185 * @param access Access mode.
1186 *
1187 * @return False if access violates area's permissions, true
1188 * otherwise.
[fb84455]1189 *
1190 */
[97bdb4a]1191NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access)
[fb84455]1192{
[fc47885]1193 ASSERT(mutex_locked(&area->lock));
1194
[fb84455]1195 int flagmap[] = {
1196 [PF_ACCESS_READ] = AS_AREA_READ,
1197 [PF_ACCESS_WRITE] = AS_AREA_WRITE,
1198 [PF_ACCESS_EXEC] = AS_AREA_EXEC
1199 };
[da1bafb]1200
[fb84455]1201 if (!(area->flags & flagmap[access]))
1202 return false;
1203
1204 return true;
1205}
1206
[e3ee9b9]1207/** Convert address space area flags to page flags.
1208 *
1209 * @param aflags Flags of some address space area.
1210 *
1211 * @return Flags to be passed to page_mapping_insert().
1212 *
1213 */
[7a0359b]1214NO_TRACE static unsigned int area_flags_to_page_flags(unsigned int aflags)
[e3ee9b9]1215{
1216 unsigned int flags = PAGE_USER | PAGE_PRESENT;
1217
1218 if (aflags & AS_AREA_READ)
1219 flags |= PAGE_READ;
1220
1221 if (aflags & AS_AREA_WRITE)
1222 flags |= PAGE_WRITE;
1223
1224 if (aflags & AS_AREA_EXEC)
1225 flags |= PAGE_EXEC;
1226
1227 if (aflags & AS_AREA_CACHEABLE)
1228 flags |= PAGE_CACHEABLE;
1229
1230 return flags;
1231}
1232
[6745592]1233/** Change adress space area flags.
[c98e6ee]1234 *
1235 * The idea is to have the same data, but with a different access mode.
1236 * This is needed e.g. for writing code into memory and then executing it.
1237 * In order for this to work properly, this may copy the data
1238 * into private anonymous memory (unless it's already there).
1239 *
[76fca31]1240 * @param as Address space.
1241 * @param flags Flags of the area memory.
1242 * @param address Address within the area to be changed.
1243 *
1244 * @return Zero on success or a value from @ref errno.h on failure.
[c98e6ee]1245 *
1246 */
[da1bafb]1247int as_area_change_flags(as_t *as, unsigned int flags, uintptr_t address)
[c98e6ee]1248{
1249 /* Flags for the new memory mapping */
[da1bafb]1250 unsigned int page_flags = area_flags_to_page_flags(flags);
1251
[c98e6ee]1252 mutex_lock(&as->lock);
[da1bafb]1253
1254 as_area_t *area = find_area_and_lock(as, address);
[c98e6ee]1255 if (!area) {
1256 mutex_unlock(&as->lock);
1257 return ENOENT;
1258 }
[da1bafb]1259
[83b6ba9f]1260 if (area->backend != &anon_backend) {
[c98e6ee]1261 /* Copying non-anonymous memory not supported yet */
1262 mutex_unlock(&area->lock);
1263 mutex_unlock(&as->lock);
1264 return ENOTSUP;
1265 }
[83b6ba9f]1266
1267 mutex_lock(&area->sh_info->lock);
1268 if (area->sh_info->shared) {
1269 /* Copying shared areas not supported yet */
1270 mutex_unlock(&area->sh_info->lock);
1271 mutex_unlock(&area->lock);
1272 mutex_unlock(&as->lock);
1273 return ENOTSUP;
1274 }
1275 mutex_unlock(&area->sh_info->lock);
[da1bafb]1276
[c98e6ee]1277 /*
1278 * Compute total number of used pages in the used_space B+tree
1279 */
[da1bafb]1280 size_t used_pages = 0;
1281
[feeac0d]1282 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t,
1283 node) {
[da1bafb]1284 btree_key_t i;
[c98e6ee]1285
[da1bafb]1286 for (i = 0; i < node->keys; i++)
[98000fb]1287 used_pages += (size_t) node->value[i];
[c98e6ee]1288 }
[da1bafb]1289
[c98e6ee]1290 /* An array for storing frame numbers */
[da1bafb]1291 uintptr_t *old_frame = malloc(used_pages * sizeof(uintptr_t), 0);
1292
[c964521]1293 page_table_lock(as, false);
[da1bafb]1294
[c98e6ee]1295 /*
1296 * Start TLB shootdown sequence.
1297 */
[402eda5]1298 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base,
1299 area->pages);
[da1bafb]1300
[c98e6ee]1301 /*
1302 * Remove used pages from page tables and remember their frame
1303 * numbers.
1304 */
[da1bafb]1305 size_t frame_idx = 0;
1306
[feeac0d]1307 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t,
1308 node) {
[da1bafb]1309 btree_key_t i;
[c98e6ee]1310
1311 for (i = 0; i < node->keys; i++) {
[da1bafb]1312 uintptr_t ptr = node->key[i];
1313 size_t size;
[c98e6ee]1314
[da1bafb]1315 for (size = 0; size < (size_t) node->value[i]; size++) {
[b6f3e7e]1316 pte_t *pte = page_mapping_find(as,
[0ff03f3]1317 ptr + P2SZ(size), false);
[da1bafb]1318
1319 ASSERT(pte);
1320 ASSERT(PTE_VALID(pte));
1321 ASSERT(PTE_PRESENT(pte));
1322
[c98e6ee]1323 old_frame[frame_idx++] = PTE_GET_FRAME(pte);
[da1bafb]1324
[c98e6ee]1325 /* Remove old mapping */
[b6f3e7e]1326 page_mapping_remove(as, ptr + P2SZ(size));
[c98e6ee]1327 }
1328 }
1329 }
[da1bafb]1330
[c98e6ee]1331 /*
1332 * Finish TLB shootdown sequence.
1333 */
[da1bafb]1334
[c98e6ee]1335 tlb_invalidate_pages(as->asid, area->base, area->pages);
[76fca31]1336
[c98e6ee]1337 /*
[eef1b031]1338 * Invalidate potential software translation caches
1339 * (e.g. TSB on sparc64, PHT on ppc32).
[c98e6ee]1340 */
1341 as_invalidate_translation_cache(as, area->base, area->pages);
[402eda5]1342 tlb_shootdown_finalize(ipl);
[da1bafb]1343
[c964521]1344 page_table_unlock(as, false);
[da1bafb]1345
[ae7f6fb]1346 /*
1347 * Set the new flags.
1348 */
1349 area->flags = flags;
[da1bafb]1350
[c98e6ee]1351 /*
1352 * Map pages back in with new flags. This step is kept separate
[6745592]1353 * so that the memory area could not be accesed with both the old and
1354 * the new flags at once.
[c98e6ee]1355 */
1356 frame_idx = 0;
[da1bafb]1357
[feeac0d]1358 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t,
1359 node) {
[da1bafb]1360 btree_key_t i;
[c98e6ee]1361
1362 for (i = 0; i < node->keys; i++) {
[da1bafb]1363 uintptr_t ptr = node->key[i];
1364 size_t size;
[c98e6ee]1365
[da1bafb]1366 for (size = 0; size < (size_t) node->value[i]; size++) {
[c98e6ee]1367 page_table_lock(as, false);
[da1bafb]1368
[c98e6ee]1369 /* Insert the new mapping */
[b6f3e7e]1370 page_mapping_insert(as, ptr + P2SZ(size),
[c98e6ee]1371 old_frame[frame_idx++], page_flags);
[da1bafb]1372
[c98e6ee]1373 page_table_unlock(as, false);
1374 }
1375 }
1376 }
[da1bafb]1377
[c98e6ee]1378 free(old_frame);
[da1bafb]1379
[c98e6ee]1380 mutex_unlock(&area->lock);
1381 mutex_unlock(&as->lock);
[da1bafb]1382
[c98e6ee]1383 return 0;
1384}
1385
[20d50a1]1386/** Handle page fault within the current address space.
1387 *
[6745592]1388 * This is the high-level page fault handler. It decides whether the page fault
1389 * can be resolved by any backend and if so, it invokes the backend to resolve
1390 * the page fault.
[8182031]1391 *
[20d50a1]1392 * Interrupts are assumed disabled.
1393 *
[59fb782]1394 * @param address Faulting address.
1395 * @param access Access mode that caused the page fault (i.e.
1396 * read/write/exec).
1397 * @param istate Pointer to the interrupted state.
[da1bafb]1398 *
1399 * @return AS_PF_FAULT on page fault.
1400 * @return AS_PF_OK on success.
1401 * @return AS_PF_DEFER if the fault was caused by copy_to_uspace()
1402 * or copy_from_uspace().
[20d50a1]1403 *
1404 */
[59fb782]1405int as_page_fault(uintptr_t address, pf_access_t access, istate_t *istate)
[20d50a1]1406{
[59fb782]1407 uintptr_t page = ALIGN_DOWN(address, PAGE_SIZE);
[908bb96]1408 int rc = AS_PF_FAULT;
1409
[1068f6a]1410 if (!THREAD)
[1dbc43f]1411 goto page_fault;
[7af8c0e]1412
1413 if (!AS)
[1dbc43f]1414 goto page_fault;
[7af8c0e]1415
[1068f6a]1416 mutex_lock(&AS->lock);
[da1bafb]1417 as_area_t *area = find_area_and_lock(AS, page);
[20d50a1]1418 if (!area) {
1419 /*
1420 * No area contained mapping for 'page'.
1421 * Signal page fault to low-level handler.
1422 */
[1068f6a]1423 mutex_unlock(&AS->lock);
[e3c762cd]1424 goto page_fault;
[20d50a1]1425 }
[da1bafb]1426
[a9e8b39]1427 if (area->attributes & AS_AREA_ATTR_PARTIAL) {
1428 /*
1429 * The address space area is not fully initialized.
1430 * Avoid possible race by returning error.
1431 */
[1068f6a]1432 mutex_unlock(&area->lock);
1433 mutex_unlock(&AS->lock);
[da1bafb]1434 goto page_fault;
[a9e8b39]1435 }
[da1bafb]1436
1437 if ((!area->backend) || (!area->backend->page_fault)) {
[8182031]1438 /*
1439 * The address space area is not backed by any backend
1440 * or the backend cannot handle page faults.
1441 */
1442 mutex_unlock(&area->lock);
1443 mutex_unlock(&AS->lock);
[da1bafb]1444 goto page_fault;
[8182031]1445 }
[da1bafb]1446
[2299914]1447 page_table_lock(AS, false);
1448
1449 /*
[6745592]1450 * To avoid race condition between two page faults on the same address,
1451 * we need to make sure the mapping has not been already inserted.
[2299914]1452 */
[da1bafb]1453 pte_t *pte;
[0ff03f3]1454 if ((pte = page_mapping_find(AS, page, false))) {
[2299914]1455 if (PTE_PRESENT(pte)) {
[fb84455]1456 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
[6f4495f5]1457 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
1458 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
[fb84455]1459 page_table_unlock(AS, false);
1460 mutex_unlock(&area->lock);
1461 mutex_unlock(&AS->lock);
1462 return AS_PF_OK;
1463 }
[2299914]1464 }
1465 }
[20d50a1]1466
1467 /*
[8182031]1468 * Resort to the backend page fault handler.
[20d50a1]1469 */
[908bb96]1470 rc = area->backend->page_fault(area, page, access);
1471 if (rc != AS_PF_OK) {
[8182031]1472 page_table_unlock(AS, false);
1473 mutex_unlock(&area->lock);
1474 mutex_unlock(&AS->lock);
1475 goto page_fault;
1476 }
[20d50a1]1477
[8182031]1478 page_table_unlock(AS, false);
[1068f6a]1479 mutex_unlock(&area->lock);
1480 mutex_unlock(&AS->lock);
[e3c762cd]1481 return AS_PF_OK;
[da1bafb]1482
[e3c762cd]1483page_fault:
1484 if (THREAD->in_copy_from_uspace) {
1485 THREAD->in_copy_from_uspace = false;
[6f4495f5]1486 istate_set_retaddr(istate,
1487 (uintptr_t) &memcpy_from_uspace_failover_address);
[e3c762cd]1488 } else if (THREAD->in_copy_to_uspace) {
1489 THREAD->in_copy_to_uspace = false;
[6f4495f5]1490 istate_set_retaddr(istate,
1491 (uintptr_t) &memcpy_to_uspace_failover_address);
[908bb96]1492 } else if (rc == AS_PF_SILENT) {
1493 printf("Killing task %" PRIu64 " due to a "
1494 "failed late reservation request.\n", TASK->taskid);
1495 task_kill_self(true);
[e3c762cd]1496 } else {
[59fb782]1497 fault_if_from_uspace(istate, "Page fault: %p.", (void *) address);
1498 panic_memtrap(istate, access, address, NULL);
[e3c762cd]1499 }
[da1bafb]1500
[e3c762cd]1501 return AS_PF_DEFER;
[20d50a1]1502}
1503
[7e4e532]1504/** Switch address spaces.
[1068f6a]1505 *
1506 * Note that this function cannot sleep as it is essentially a part of
[879585a3]1507 * scheduling. Sleeping here would lead to deadlock on wakeup. Another
1508 * thing which is forbidden in this context is locking the address space.
[20d50a1]1509 *
[7250d2c]1510 * When this function is entered, no spinlocks may be held.
[31d8e10]1511 *
[da1bafb]1512 * @param old Old address space or NULL.
1513 * @param new New address space.
1514 *
[20d50a1]1515 */
[80bcaed]1516void as_switch(as_t *old_as, as_t *new_as)
[20d50a1]1517{
[31d8e10]1518 DEADLOCK_PROBE_INIT(p_asidlock);
1519 preemption_disable();
[da1bafb]1520
[31d8e10]1521retry:
1522 (void) interrupts_disable();
1523 if (!spinlock_trylock(&asidlock)) {
[da1bafb]1524 /*
[31d8e10]1525 * Avoid deadlock with TLB shootdown.
1526 * We can enable interrupts here because
1527 * preemption is disabled. We should not be
1528 * holding any other lock.
1529 */
1530 (void) interrupts_enable();
1531 DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
1532 goto retry;
1533 }
1534 preemption_enable();
[da1bafb]1535
[7e4e532]1536 /*
1537 * First, take care of the old address space.
[da1bafb]1538 */
[80bcaed]1539 if (old_as) {
1540 ASSERT(old_as->cpu_refcount);
[da1bafb]1541
1542 if ((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
[7e4e532]1543 /*
1544 * The old address space is no longer active on
1545 * any processor. It can be appended to the
1546 * list of inactive address spaces with assigned
1547 * ASID.
1548 */
[2057572]1549 ASSERT(old_as->asid != ASID_INVALID);
[da1bafb]1550
[2057572]1551 list_append(&old_as->inactive_as_with_asid_link,
[55b77d9]1552 &inactive_as_with_asid_list);
[7e4e532]1553 }
[da1bafb]1554
[57da95c]1555 /*
1556 * Perform architecture-specific tasks when the address space
1557 * is being removed from the CPU.
1558 */
[80bcaed]1559 as_deinstall_arch(old_as);
[7e4e532]1560 }
[da1bafb]1561
[7e4e532]1562 /*
1563 * Second, prepare the new address space.
1564 */
[80bcaed]1565 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
[879585a3]1566 if (new_as->asid != ASID_INVALID)
[80bcaed]1567 list_remove(&new_as->inactive_as_with_asid_link);
[879585a3]1568 else
1569 new_as->asid = asid_get();
[7e4e532]1570 }
[da1bafb]1571
[80bcaed]1572#ifdef AS_PAGE_TABLE
1573 SET_PTL0_ADDRESS(new_as->genarch.page_table);
1574#endif
[7e4e532]1575
[20d50a1]1576 /*
1577 * Perform architecture-specific steps.
[4512d7e]1578 * (e.g. write ASID to hardware register etc.)
[20d50a1]1579 */
[80bcaed]1580 as_install_arch(new_as);
[da1bafb]1581
[879585a3]1582 spinlock_unlock(&asidlock);
[20d50a1]1583
[80bcaed]1584 AS = new_as;
[20d50a1]1585}
[6a3c9a7]1586
[df0103f7]1587/** Compute flags for virtual address translation subsytem.
1588 *
[da1bafb]1589 * @param area Address space area.
1590 *
1591 * @return Flags to be used in page_mapping_insert().
[df0103f7]1592 *
1593 */
[97bdb4a]1594NO_TRACE unsigned int as_area_get_flags(as_area_t *area)
[df0103f7]1595{
[1d432f9]1596 ASSERT(mutex_locked(&area->lock));
[fc47885]1597
[da1bafb]1598 return area_flags_to_page_flags(area->flags);
[df0103f7]1599}
1600
[ef67bab]1601/** Create page table.
1602 *
[6745592]1603 * Depending on architecture, create either address space private or global page
1604 * table.
[ef67bab]1605 *
[da1bafb]1606 * @param flags Flags saying whether the page table is for the kernel
1607 * address space.
1608 *
1609 * @return First entry of the page table.
[ef67bab]1610 *
1611 */
[97bdb4a]1612NO_TRACE pte_t *page_table_create(unsigned int flags)
[ef67bab]1613{
[bd1deed]1614 ASSERT(as_operations);
1615 ASSERT(as_operations->page_table_create);
1616
1617 return as_operations->page_table_create(flags);
[ef67bab]1618}
[d3e7ff4]1619
[482826d]1620/** Destroy page table.
1621 *
1622 * Destroy page table in architecture specific way.
1623 *
[da1bafb]1624 * @param page_table Physical address of PTL0.
1625 *
[482826d]1626 */
[97bdb4a]1627NO_TRACE void page_table_destroy(pte_t *page_table)
[482826d]1628{
[bd1deed]1629 ASSERT(as_operations);
1630 ASSERT(as_operations->page_table_destroy);
1631
1632 as_operations->page_table_destroy(page_table);
[482826d]1633}
1634
[2299914]1635/** Lock page table.
1636 *
1637 * This function should be called before any page_mapping_insert(),
1638 * page_mapping_remove() and page_mapping_find().
[da1bafb]1639 *
[2299914]1640 * Locking order is such that address space areas must be locked
1641 * prior to this call. Address space can be locked prior to this
1642 * call in which case the lock argument is false.
1643 *
[da1bafb]1644 * @param as Address space.
1645 * @param lock If false, do not attempt to lock as->lock.
1646 *
[2299914]1647 */
[97bdb4a]1648NO_TRACE void page_table_lock(as_t *as, bool lock)
[2299914]1649{
1650 ASSERT(as_operations);
1651 ASSERT(as_operations->page_table_lock);
[bd1deed]1652
[2299914]1653 as_operations->page_table_lock(as, lock);
1654}
1655
1656/** Unlock page table.
1657 *
[da1bafb]1658 * @param as Address space.
1659 * @param unlock If false, do not attempt to unlock as->lock.
1660 *
[2299914]1661 */
[97bdb4a]1662NO_TRACE void page_table_unlock(as_t *as, bool unlock)
[2299914]1663{
1664 ASSERT(as_operations);
1665 ASSERT(as_operations->page_table_unlock);
[bd1deed]1666
[2299914]1667 as_operations->page_table_unlock(as, unlock);
1668}
1669
[ada559c]1670/** Test whether page tables are locked.
1671 *
[e3ee9b9]1672 * @param as Address space where the page tables belong.
[ada559c]1673 *
[e3ee9b9]1674 * @return True if the page tables belonging to the address soace
1675 * are locked, otherwise false.
[ada559c]1676 */
[97bdb4a]1677NO_TRACE bool page_table_locked(as_t *as)
[ada559c]1678{
1679 ASSERT(as_operations);
1680 ASSERT(as_operations->page_table_locked);
1681
1682 return as_operations->page_table_locked(as);
1683}
1684
[b878df3]1685/** Return size of the address space area with given base.
1686 *
[1d432f9]1687 * @param base Arbitrary address inside the address space area.
[da1bafb]1688 *
1689 * @return Size of the address space area in bytes or zero if it
1690 * does not exist.
[b878df3]1691 *
1692 */
1693size_t as_area_get_size(uintptr_t base)
[7c23af9]1694{
1695 size_t size;
[da1bafb]1696
[1d432f9]1697 page_table_lock(AS, true);
[da1bafb]1698 as_area_t *src_area = find_area_and_lock(AS, base);
1699
[6745592]1700 if (src_area) {
[b6f3e7e]1701 size = P2SZ(src_area->pages);
[1068f6a]1702 mutex_unlock(&src_area->lock);
[da1bafb]1703 } else
[7c23af9]1704 size = 0;
[da1bafb]1705
[1d432f9]1706 page_table_unlock(AS, true);
[7c23af9]1707 return size;
1708}
1709
[25bf215]1710/** Mark portion of address space area as used.
1711 *
1712 * The address space area must be already locked.
1713 *
[da1bafb]1714 * @param area Address space area.
1715 * @param page First page to be marked.
1716 * @param count Number of page to be marked.
1717 *
[fc47885]1718 * @return False on failure or true on success.
[25bf215]1719 *
1720 */
[fc47885]1721bool used_space_insert(as_area_t *area, uintptr_t page, size_t count)
[25bf215]1722{
[1d432f9]1723 ASSERT(mutex_locked(&area->lock));
[59fb782]1724 ASSERT(IS_ALIGNED(page, PAGE_SIZE));
[25bf215]1725 ASSERT(count);
[da1bafb]1726
1727 btree_node_t *leaf;
1728 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf);
[25bf215]1729 if (pages) {
1730 /*
1731 * We hit the beginning of some used space.
1732 */
[fc47885]1733 return false;
[25bf215]1734 }
[da1bafb]1735
[a6cb8cb]1736 if (!leaf->keys) {
[da1bafb]1737 btree_insert(&area->used_space, page, (void *) count, leaf);
[fc47885]1738 goto success;
[a6cb8cb]1739 }
[da1bafb]1740
1741 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf);
[25bf215]1742 if (node) {
[6f4495f5]1743 uintptr_t left_pg = node->key[node->keys - 1];
1744 uintptr_t right_pg = leaf->key[0];
[98000fb]1745 size_t left_cnt = (size_t) node->value[node->keys - 1];
1746 size_t right_cnt = (size_t) leaf->value[0];
[25bf215]1747
1748 /*
1749 * Examine the possibility that the interval fits
1750 * somewhere between the rightmost interval of
1751 * the left neigbour and the first interval of the leaf.
1752 */
[da1bafb]1753
[25bf215]1754 if (page >= right_pg) {
1755 /* Do nothing. */
[b6f3e7e]1756 } else if (overlaps(page, P2SZ(count), left_pg,
1757 P2SZ(left_cnt))) {
[25bf215]1758 /* The interval intersects with the left interval. */
[fc47885]1759 return false;
[b6f3e7e]1760 } else if (overlaps(page, P2SZ(count), right_pg,
1761 P2SZ(right_cnt))) {
[25bf215]1762 /* The interval intersects with the right interval. */
[fc47885]1763 return false;
[b6f3e7e]1764 } else if ((page == left_pg + P2SZ(left_cnt)) &&
1765 (page + P2SZ(count) == right_pg)) {
[6f4495f5]1766 /*
1767 * The interval can be added by merging the two already
1768 * present intervals.
1769 */
[56789125]1770 node->value[node->keys - 1] += count + right_cnt;
[da1bafb]1771 btree_remove(&area->used_space, right_pg, leaf);
[fc47885]1772 goto success;
[b6f3e7e]1773 } else if (page == left_pg + P2SZ(left_cnt)) {
[da1bafb]1774 /*
[6f4495f5]1775 * The interval can be added by simply growing the left
1776 * interval.
1777 */
[56789125]1778 node->value[node->keys - 1] += count;
[fc47885]1779 goto success;
[b6f3e7e]1780 } else if (page + P2SZ(count) == right_pg) {
[25bf215]1781 /*
[6f4495f5]1782 * The interval can be addded by simply moving base of
1783 * the right interval down and increasing its size
1784 * accordingly.
[25bf215]1785 */
[56789125]1786 leaf->value[0] += count;
[25bf215]1787 leaf->key[0] = page;
[fc47885]1788 goto success;
[25bf215]1789 } else {
1790 /*
1791 * The interval is between both neigbouring intervals,
1792 * but cannot be merged with any of them.
1793 */
[da1bafb]1794 btree_insert(&area->used_space, page, (void *) count,
[6f4495f5]1795 leaf);
[fc47885]1796 goto success;
[25bf215]1797 }
1798 } else if (page < leaf->key[0]) {
[7f1c620]1799 uintptr_t right_pg = leaf->key[0];
[98000fb]1800 size_t right_cnt = (size_t) leaf->value[0];
[da1bafb]1801
[25bf215]1802 /*
[6f4495f5]1803 * Investigate the border case in which the left neighbour does
1804 * not exist but the interval fits from the left.
[25bf215]1805 */
[da1bafb]1806
[b6f3e7e]1807 if (overlaps(page, P2SZ(count), right_pg, P2SZ(right_cnt))) {
[25bf215]1808 /* The interval intersects with the right interval. */
[fc47885]1809 return false;
[b6f3e7e]1810 } else if (page + P2SZ(count) == right_pg) {
[25bf215]1811 /*
[6f4495f5]1812 * The interval can be added by moving the base of the
1813 * right interval down and increasing its size
1814 * accordingly.
[25bf215]1815 */
1816 leaf->key[0] = page;
[56789125]1817 leaf->value[0] += count;
[fc47885]1818 goto success;
[25bf215]1819 } else {
1820 /*
1821 * The interval doesn't adjoin with the right interval.
1822 * It must be added individually.
1823 */
[da1bafb]1824 btree_insert(&area->used_space, page, (void *) count,
[6f4495f5]1825 leaf);
[fc47885]1826 goto success;
[25bf215]1827 }
1828 }
[da1bafb]1829
1830 node = btree_leaf_node_right_neighbour(&area->used_space, leaf);
[25bf215]1831 if (node) {
[6f4495f5]1832 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1833 uintptr_t right_pg = node->key[0];
[98000fb]1834 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
1835 size_t right_cnt = (size_t) node->value[0];
[25bf215]1836
1837 /*
1838 * Examine the possibility that the interval fits
1839 * somewhere between the leftmost interval of
1840 * the right neigbour and the last interval of the leaf.
1841 */
[da1bafb]1842
[25bf215]1843 if (page < left_pg) {
1844 /* Do nothing. */
[b6f3e7e]1845 } else if (overlaps(page, P2SZ(count), left_pg,
1846 P2SZ(left_cnt))) {
[25bf215]1847 /* The interval intersects with the left interval. */
[fc47885]1848 return false;
[b6f3e7e]1849 } else if (overlaps(page, P2SZ(count), right_pg,
1850 P2SZ(right_cnt))) {
[25bf215]1851 /* The interval intersects with the right interval. */
[fc47885]1852 return false;
[b6f3e7e]1853 } else if ((page == left_pg + P2SZ(left_cnt)) &&
1854 (page + P2SZ(count) == right_pg)) {
[6f4495f5]1855 /*
1856 * The interval can be added by merging the two already
1857 * present intervals.
[da1bafb]1858 */
[56789125]1859 leaf->value[leaf->keys - 1] += count + right_cnt;
[da1bafb]1860 btree_remove(&area->used_space, right_pg, node);
[fc47885]1861 goto success;
[b6f3e7e]1862 } else if (page == left_pg + P2SZ(left_cnt)) {
[6f4495f5]1863 /*
1864 * The interval can be added by simply growing the left
1865 * interval.
[da1bafb]1866 */
[fc47885]1867 leaf->value[leaf->keys - 1] += count;
1868 goto success;
[b6f3e7e]1869 } else if (page + P2SZ(count) == right_pg) {
[25bf215]1870 /*
[6f4495f5]1871 * The interval can be addded by simply moving base of
1872 * the right interval down and increasing its size
1873 * accordingly.
[25bf215]1874 */
[56789125]1875 node->value[0] += count;
[25bf215]1876 node->key[0] = page;
[fc47885]1877 goto success;
[25bf215]1878 } else {
1879 /*
1880 * The interval is between both neigbouring intervals,
1881 * but cannot be merged with any of them.
1882 */
[da1bafb]1883 btree_insert(&area->used_space, page, (void *) count,
[6f4495f5]1884 leaf);
[fc47885]1885 goto success;
[25bf215]1886 }
1887 } else if (page >= leaf->key[leaf->keys - 1]) {
[7f1c620]1888 uintptr_t left_pg = leaf->key[leaf->keys - 1];
[98000fb]1889 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
[da1bafb]1890
[25bf215]1891 /*
[6f4495f5]1892 * Investigate the border case in which the right neighbour
1893 * does not exist but the interval fits from the right.
[25bf215]1894 */
[da1bafb]1895
[b6f3e7e]1896 if (overlaps(page, P2SZ(count), left_pg, P2SZ(left_cnt))) {
[56789125]1897 /* The interval intersects with the left interval. */
[fc47885]1898 return false;
[b6f3e7e]1899 } else if (left_pg + P2SZ(left_cnt) == page) {
[6f4495f5]1900 /*
1901 * The interval can be added by growing the left
1902 * interval.
1903 */
[56789125]1904 leaf->value[leaf->keys - 1] += count;
[fc47885]1905 goto success;
[25bf215]1906 } else {
1907 /*
1908 * The interval doesn't adjoin with the left interval.
1909 * It must be added individually.
1910 */
[da1bafb]1911 btree_insert(&area->used_space, page, (void *) count,
[6f4495f5]1912 leaf);
[fc47885]1913 goto success;
[25bf215]1914 }
1915 }
1916
1917 /*
[6f4495f5]1918 * Note that if the algorithm made it thus far, the interval can fit
1919 * only between two other intervals of the leaf. The two border cases
1920 * were already resolved.
[25bf215]1921 */
[da1bafb]1922 btree_key_t i;
[25bf215]1923 for (i = 1; i < leaf->keys; i++) {
1924 if (page < leaf->key[i]) {
[6f4495f5]1925 uintptr_t left_pg = leaf->key[i - 1];
1926 uintptr_t right_pg = leaf->key[i];
[98000fb]1927 size_t left_cnt = (size_t) leaf->value[i - 1];
1928 size_t right_cnt = (size_t) leaf->value[i];
[da1bafb]1929
[25bf215]1930 /*
1931 * The interval fits between left_pg and right_pg.
1932 */
[da1bafb]1933
[b6f3e7e]1934 if (overlaps(page, P2SZ(count), left_pg,
1935 P2SZ(left_cnt))) {
[6f4495f5]1936 /*
1937 * The interval intersects with the left
1938 * interval.
1939 */
[fc47885]1940 return false;
[b6f3e7e]1941 } else if (overlaps(page, P2SZ(count), right_pg,
1942 P2SZ(right_cnt))) {
[6f4495f5]1943 /*
1944 * The interval intersects with the right
1945 * interval.
1946 */
[fc47885]1947 return false;
[b6f3e7e]1948 } else if ((page == left_pg + P2SZ(left_cnt)) &&
1949 (page + P2SZ(count) == right_pg)) {
[6f4495f5]1950 /*
1951 * The interval can be added by merging the two
1952 * already present intervals.
1953 */
[56789125]1954 leaf->value[i - 1] += count + right_cnt;
[da1bafb]1955 btree_remove(&area->used_space, right_pg, leaf);
[fc47885]1956 goto success;
[b6f3e7e]1957 } else if (page == left_pg + P2SZ(left_cnt)) {
[6f4495f5]1958 /*
1959 * The interval can be added by simply growing
1960 * the left interval.
1961 */
[56789125]1962 leaf->value[i - 1] += count;
[fc47885]1963 goto success;
[b6f3e7e]1964 } else if (page + P2SZ(count) == right_pg) {
[25bf215]1965 /*
[da1bafb]1966 * The interval can be addded by simply moving
[6f4495f5]1967 * base of the right interval down and
1968 * increasing its size accordingly.
[da1bafb]1969 */
[56789125]1970 leaf->value[i] += count;
[25bf215]1971 leaf->key[i] = page;
[fc47885]1972 goto success;
[25bf215]1973 } else {
1974 /*
[6f4495f5]1975 * The interval is between both neigbouring
1976 * intervals, but cannot be merged with any of
1977 * them.
[25bf215]1978 */
[da1bafb]1979 btree_insert(&area->used_space, page,
[6f4495f5]1980 (void *) count, leaf);
[fc47885]1981 goto success;
[25bf215]1982 }
1983 }
1984 }
[da1bafb]1985
[7e752b2]1986 panic("Inconsistency detected while adding %zu pages of used "
1987 "space at %p.", count, (void *) page);
[fc47885]1988
1989success:
1990 area->resident += count;
1991 return true;
[25bf215]1992}
1993
1994/** Mark portion of address space area as unused.
1995 *
1996 * The address space area must be already locked.
1997 *
[da1bafb]1998 * @param area Address space area.
1999 * @param page First page to be marked.
2000 * @param count Number of page to be marked.
2001 *
[fc47885]2002 * @return False on failure or true on success.
[25bf215]2003 *
2004 */
[fc47885]2005bool used_space_remove(as_area_t *area, uintptr_t page, size_t count)
[25bf215]2006{
[1d432f9]2007 ASSERT(mutex_locked(&area->lock));
[59fb782]2008 ASSERT(IS_ALIGNED(page, PAGE_SIZE));
[25bf215]2009 ASSERT(count);
[da1bafb]2010
2011 btree_node_t *leaf;
2012 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf);
[25bf215]2013 if (pages) {
2014 /*
2015 * We are lucky, page is the beginning of some interval.
2016 */
2017 if (count > pages) {
[fc47885]2018 return false;
[25bf215]2019 } else if (count == pages) {
[da1bafb]2020 btree_remove(&area->used_space, page, leaf);
[fc47885]2021 goto success;
[25bf215]2022 } else {
2023 /*
2024 * Find the respective interval.
2025 * Decrease its size and relocate its start address.
2026 */
[da1bafb]2027 btree_key_t i;
[25bf215]2028 for (i = 0; i < leaf->keys; i++) {
2029 if (leaf->key[i] == page) {
[b6f3e7e]2030 leaf->key[i] += P2SZ(count);
[56789125]2031 leaf->value[i] -= count;
[fc47885]2032 goto success;
[25bf215]2033 }
2034 }
[fc47885]2035
[25bf215]2036 goto error;
2037 }
2038 }
[da1bafb]2039
[b6f3e7e]2040 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space,
2041 leaf);
[da1bafb]2042 if ((node) && (page < leaf->key[0])) {
[7f1c620]2043 uintptr_t left_pg = node->key[node->keys - 1];
[98000fb]2044 size_t left_cnt = (size_t) node->value[node->keys - 1];
[da1bafb]2045
[b6f3e7e]2046 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) {
2047 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) {
[25bf215]2048 /*
[6f4495f5]2049 * The interval is contained in the rightmost
2050 * interval of the left neighbour and can be
2051 * removed by updating the size of the bigger
2052 * interval.
[25bf215]2053 */
[56789125]2054 node->value[node->keys - 1] -= count;
[fc47885]2055 goto success;
[b6f3e7e]2056 } else if (page + P2SZ(count) <
2057 left_pg + P2SZ(left_cnt)) {
2058 size_t new_cnt;
2059
[25bf215]2060 /*
[6f4495f5]2061 * The interval is contained in the rightmost
2062 * interval of the left neighbour but its
2063 * removal requires both updating the size of
2064 * the original interval and also inserting a
2065 * new interval.
[25bf215]2066 */
[b6f3e7e]2067 new_cnt = ((left_pg + P2SZ(left_cnt)) -
2068 (page + P2SZ(count))) >> PAGE_WIDTH;
[56789125]2069 node->value[node->keys - 1] -= count + new_cnt;
[da1bafb]2070 btree_insert(&area->used_space, page +
[b6f3e7e]2071 P2SZ(count), (void *) new_cnt, leaf);
[fc47885]2072 goto success;
[25bf215]2073 }
2074 }
[fc47885]2075
2076 return false;
[da1bafb]2077 } else if (page < leaf->key[0])
[fc47885]2078 return false;
[25bf215]2079
2080 if (page > leaf->key[leaf->keys - 1]) {
[7f1c620]2081 uintptr_t left_pg = leaf->key[leaf->keys - 1];
[98000fb]2082 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
[da1bafb]2083
[b6f3e7e]2084 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) {
2085 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) {
[25bf215]2086 /*
[6f4495f5]2087 * The interval is contained in the rightmost
2088 * interval of the leaf and can be removed by
2089 * updating the size of the bigger interval.
[25bf215]2090 */
[56789125]2091 leaf->value[leaf->keys - 1] -= count;
[fc47885]2092 goto success;
[b6f3e7e]2093 } else if (page + P2SZ(count) < left_pg +
2094 P2SZ(left_cnt)) {
2095 size_t new_cnt;
2096
[25bf215]2097 /*
[6f4495f5]2098 * The interval is contained in the rightmost
2099 * interval of the leaf but its removal
2100 * requires both updating the size of the
2101 * original interval and also inserting a new
2102 * interval.
[25bf215]2103 */
[b6f3e7e]2104 new_cnt = ((left_pg + P2SZ(left_cnt)) -
2105 (page + P2SZ(count))) >> PAGE_WIDTH;
[56789125]2106 leaf->value[leaf->keys - 1] -= count + new_cnt;
[da1bafb]2107 btree_insert(&area->used_space, page +
[b6f3e7e]2108 P2SZ(count), (void *) new_cnt, leaf);
[fc47885]2109 goto success;
[25bf215]2110 }
2111 }
[fc47885]2112
2113 return false;
[da1bafb]2114 }
[25bf215]2115
2116 /*
2117 * The border cases have been already resolved.
[fc47885]2118 * Now the interval can be only between intervals of the leaf.
[25bf215]2119 */
[da1bafb]2120 btree_key_t i;
[25bf215]2121 for (i = 1; i < leaf->keys - 1; i++) {
2122 if (page < leaf->key[i]) {
[7f1c620]2123 uintptr_t left_pg = leaf->key[i - 1];
[98000fb]2124 size_t left_cnt = (size_t) leaf->value[i - 1];
[da1bafb]2125
[25bf215]2126 /*
[6f4495f5]2127 * Now the interval is between intervals corresponding
2128 * to (i - 1) and i.
[25bf215]2129 */
[b6f3e7e]2130 if (overlaps(left_pg, P2SZ(left_cnt), page,
2131 P2SZ(count))) {
2132 if (page + P2SZ(count) ==
2133 left_pg + P2SZ(left_cnt)) {
[25bf215]2134 /*
[6f4495f5]2135 * The interval is contained in the
2136 * interval (i - 1) of the leaf and can
2137 * be removed by updating the size of
2138 * the bigger interval.
[25bf215]2139 */
[56789125]2140 leaf->value[i - 1] -= count;
[fc47885]2141 goto success;
[b6f3e7e]2142 } else if (page + P2SZ(count) <
2143 left_pg + P2SZ(left_cnt)) {
2144 size_t new_cnt;
2145
[25bf215]2146 /*
[6f4495f5]2147 * The interval is contained in the
2148 * interval (i - 1) of the leaf but its
2149 * removal requires both updating the
2150 * size of the original interval and
[25bf215]2151 * also inserting a new interval.
2152 */
[b6f3e7e]2153 new_cnt = ((left_pg + P2SZ(left_cnt)) -
2154 (page + P2SZ(count))) >>
[6f4495f5]2155 PAGE_WIDTH;
[56789125]2156 leaf->value[i - 1] -= count + new_cnt;
[da1bafb]2157 btree_insert(&area->used_space, page +
[b6f3e7e]2158 P2SZ(count), (void *) new_cnt,
[6f4495f5]2159 leaf);
[fc47885]2160 goto success;
[25bf215]2161 }
2162 }
[fc47885]2163
2164 return false;
[25bf215]2165 }
2166 }
[da1bafb]2167
[25bf215]2168error:
[7e752b2]2169 panic("Inconsistency detected while removing %zu pages of used "
2170 "space from %p.", count, (void *) page);
[fc47885]2171
2172success:
2173 area->resident -= count;
2174 return true;
[25bf215]2175}
2176
[df0103f7]2177/*
2178 * Address space related syscalls.
2179 */
2180
[fbcdeb8]2181sysarg_t sys_as_area_create(uintptr_t base, size_t size, unsigned int flags,
2182 uintptr_t bound)
[df0103f7]2183{
[fbcdeb8]2184 uintptr_t virt = base;
[c4c2406]2185 as_area_t *area = as_area_create(AS, flags, size,
[fbcdeb8]2186 AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, bound);
2187 if (area == NULL)
[96b02eb9]2188 return (sysarg_t) -1;
[fbcdeb8]2189
2190 return (sysarg_t) virt;
[df0103f7]2191}
2192
[96b02eb9]2193sysarg_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags)
[df0103f7]2194{
[96b02eb9]2195 return (sysarg_t) as_area_resize(AS, address, size, 0);
[7242a78e]2196}
2197
[96b02eb9]2198sysarg_t sys_as_area_change_flags(uintptr_t address, unsigned int flags)
[c98e6ee]2199{
[96b02eb9]2200 return (sysarg_t) as_area_change_flags(AS, flags, address);
[c98e6ee]2201}
2202
[96b02eb9]2203sysarg_t sys_as_area_destroy(uintptr_t address)
[7242a78e]2204{
[96b02eb9]2205 return (sysarg_t) as_area_destroy(AS, address);
[df0103f7]2206}
[b45c443]2207
[336db295]2208/** Get list of adress space areas.
2209 *
[da1bafb]2210 * @param as Address space.
2211 * @param obuf Place to save pointer to returned buffer.
2212 * @param osize Place to save size of returned buffer.
2213 *
[336db295]2214 */
2215void as_get_area_info(as_t *as, as_area_info_t **obuf, size_t *osize)
2216{
2217 mutex_lock(&as->lock);
[da1bafb]2218
[336db295]2219 /* First pass, count number of areas. */
[da1bafb]2220
2221 size_t area_cnt = 0;
2222
[feeac0d]2223 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t,
2224 node) {
[336db295]2225 area_cnt += node->keys;
2226 }
[da1bafb]2227
2228 size_t isize = area_cnt * sizeof(as_area_info_t);
2229 as_area_info_t *info = malloc(isize, 0);
2230
[336db295]2231 /* Second pass, record data. */
[da1bafb]2232
2233 size_t area_idx = 0;
2234
[feeac0d]2235 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t,
2236 node) {
[da1bafb]2237 btree_key_t i;
2238
[336db295]2239 for (i = 0; i < node->keys; i++) {
2240 as_area_t *area = node->value[i];
[da1bafb]2241
[336db295]2242 ASSERT(area_idx < area_cnt);
2243 mutex_lock(&area->lock);
[da1bafb]2244
[336db295]2245 info[area_idx].start_addr = area->base;
[b6f3e7e]2246 info[area_idx].size = P2SZ(area->pages);
[336db295]2247 info[area_idx].flags = area->flags;
2248 ++area_idx;
[da1bafb]2249
[336db295]2250 mutex_unlock(&area->lock);
2251 }
2252 }
[da1bafb]2253
[336db295]2254 mutex_unlock(&as->lock);
[da1bafb]2255
[336db295]2256 *obuf = info;
2257 *osize = isize;
2258}
2259
[64c2ad5]2260/** Print out information about address space.
2261 *
[da1bafb]2262 * @param as Address space.
2263 *
[64c2ad5]2264 */
2265void as_print(as_t *as)
2266{
2267 mutex_lock(&as->lock);
2268
[0b37882]2269 /* Print out info about address space areas */
[feeac0d]2270 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t,
2271 node) {
[da1bafb]2272 btree_key_t i;
[64c2ad5]2273
2274 for (i = 0; i < node->keys; i++) {
[7ba7c6d]2275 as_area_t *area = node->value[i];
[da1bafb]2276
[64c2ad5]2277 mutex_lock(&area->lock);
[7e752b2]2278 printf("as_area: %p, base=%p, pages=%zu"
2279 " (%p - %p)\n", area, (void *) area->base,
2280 area->pages, (void *) area->base,
[b6f3e7e]2281 (void *) (area->base + P2SZ(area->pages)));
[64c2ad5]2282 mutex_unlock(&area->lock);
2283 }
2284 }
2285
2286 mutex_unlock(&as->lock);
2287}
2288
[cc73a8a1]2289/** @}
[b45c443]2290 */
Note: See TracBrowser for help on using the repository browser.