source: mainline/kernel/generic/src/mm/as.c@ c477c80

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since c477c80 was c477c80, checked in by Jiri Svoboda <jiri@…>, 6 years ago

Fix some common misspellings

  • Property mode set to 100644
File size: 54.0 KB
RevLine 
[20d50a1]1/*
[0321109]2 * Copyright (c) 2010 Jakub Jermar
[88cc71c0]3 * Copyright (c) 2018 Jiri Svoboda
[20d50a1]4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
[174156fd]30/** @addtogroup kernel_generic_mm
[b45c443]31 * @{
32 */
33
[9179d0a]34/**
[b45c443]35 * @file
[da1bafb]36 * @brief Address space related functions.
[9179d0a]37 *
[20d50a1]38 * This file contains address space manipulation functions.
39 * Roughly speaking, this is a higher-level client of
40 * Virtual Address Translation (VAT) subsystem.
[9179d0a]41 *
42 * Functionality provided by this file allows one to
[cc73a8a1]43 * create address spaces and create, resize and share
[9179d0a]44 * address space areas.
45 *
46 * @see page.c
47 *
[20d50a1]48 */
49
50#include <mm/as.h>
[ef67bab]51#include <arch/mm/as.h>
[20d50a1]52#include <mm/page.h>
53#include <mm/frame.h>
[085d973]54#include <mm/slab.h>
[20d50a1]55#include <mm/tlb.h>
56#include <arch/mm/page.h>
57#include <genarch/mm/page_pt.h>
[2802767]58#include <genarch/mm/page_ht.h>
[4512d7e]59#include <mm/asid.h>
[20d50a1]60#include <arch/mm/asid.h>
[31d8e10]61#include <preemption.h>
[20d50a1]62#include <synch/spinlock.h>
[1068f6a]63#include <synch/mutex.h>
[5c9a08b]64#include <adt/list.h>
[df0103f7]65#include <proc/task.h>
[e3c762cd]66#include <proc/thread.h>
[20d50a1]67#include <arch/asm.h>
[df0103f7]68#include <panic.h>
[63e27ef]69#include <assert.h>
[bab75df6]70#include <stdio.h>
[44a7ee5]71#include <mem.h>
[5a7d9d1]72#include <macros.h>
[0b37882]73#include <bitops.h>
[20d50a1]74#include <arch.h>
[df0103f7]75#include <errno.h>
76#include <config.h>
[25bf215]77#include <align.h>
[d99c1d2]78#include <typedefs.h>
[e3c762cd]79#include <syscall/copy.h>
80#include <arch/interrupt.h>
[1dbc43f]81#include <interrupt.h>
[aafed15]82#include <stdlib.h>
[20d50a1]83
[cc73a8a1]84/**
85 * Each architecture decides what functions will be used to carry out
86 * address space operations such as creating or locking page tables.
87 */
[ef67bab]88as_operations_t *as_operations = NULL;
[20d50a1]89
[de0af3a]90/** Cache for as_t objects */
[82d515e9]91static slab_cache_t *as_cache;
[57da95c]92
[de0af3a]93/** Cache for as_page_mapping_t objects */
94static slab_cache_t *as_page_mapping_cache;
95
[2fc3b2d]96/** Cache for used_space_ival_t objects */
97static slab_cache_t *used_space_ival_cache;
98
[fc47885]99/** ASID subsystem lock.
100 *
101 * This lock protects:
[55b77d9]102 * - inactive_as_with_asid_list
[879585a3]103 * - as->asid for each as of the as_t type
104 * - asids_allocated counter
[da1bafb]105 *
[6f4495f5]106 */
[879585a3]107SPINLOCK_INITIALIZE(asidlock);
[7e4e532]108
109/**
[fc47885]110 * Inactive address spaces (on all processors)
111 * that have valid ASID.
[7e4e532]112 */
[55b77d9]113LIST_INITIALIZE(inactive_as_with_asid_list);
[7e4e532]114
[071a8ae6]115/** Kernel address space. */
116as_t *AS_KERNEL = NULL;
117
[88cc71c0]118static void *as_areas_getkey(odlink_t *);
119static int as_areas_cmp(void *, void *);
120
[2fc3b2d]121static void used_space_initialize(used_space_t *);
122static void used_space_finalize(used_space_t *);
123static void *used_space_getkey(odlink_t *);
124static int used_space_cmp(void *, void *);
125static used_space_ival_t *used_space_last(used_space_t *);
126static void used_space_remove_ival(used_space_ival_t *);
127static void used_space_shorten_ival(used_space_ival_t *, size_t);
128
[b7fd2a0]129NO_TRACE static errno_t as_constructor(void *obj, unsigned int flags)
[29b2bbf]130{
131 as_t *as = (as_t *) obj;
[a35b458]132
[29b2bbf]133 link_initialize(&as->inactive_as_with_asid_link);
[7f341820]134 mutex_initialize(&as->lock, MUTEX_PASSIVE);
[a35b458]135
[fc47885]136 return as_constructor_arch(as, flags);
[29b2bbf]137}
138
[7a0359b]139NO_TRACE static size_t as_destructor(void *obj)
[29b2bbf]140{
[fc47885]141 return as_destructor_arch((as_t *) obj);
[29b2bbf]142}
143
[ef67bab]144/** Initialize address space subsystem. */
145void as_init(void)
146{
147 as_arch_init();
[a35b458]148
[82d515e9]149 as_cache = slab_cache_create("as_t", sizeof(as_t), 0,
[6f4495f5]150 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
[a35b458]151
[de0af3a]152 as_page_mapping_cache = slab_cache_create("as_page_mapping_t",
153 sizeof(as_page_mapping_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED);
154
[2fc3b2d]155 used_space_ival_cache = slab_cache_create("used_space_ival_t",
156 sizeof(used_space_ival_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED);
157
[8e1ea655]158 AS_KERNEL = as_create(FLAG_AS_KERNEL);
[125e944]159 if (!AS_KERNEL)
[f651e80]160 panic("Cannot create kernel address space.");
[ef67bab]161}
162
[071a8ae6]163/** Create address space.
164 *
[da1bafb]165 * @param flags Flags that influence the way in wich the address
166 * space is created.
167 *
[071a8ae6]168 */
[da1bafb]169as_t *as_create(unsigned int flags)
[20d50a1]170{
[abf6c01]171 as_t *as = (as_t *) slab_alloc(as_cache, FRAME_ATOMIC);
172 if (!as)
173 return NULL;
174
[29b2bbf]175 (void) as_create_arch(as, 0);
[a35b458]176
[88cc71c0]177 odict_initialize(&as->as_areas, as_areas_getkey, as_areas_cmp);
[a35b458]178
[bb68433]179 if (flags & FLAG_AS_KERNEL)
180 as->asid = ASID_KERNEL;
181 else
182 as->asid = ASID_INVALID;
[a35b458]183
[78de83de]184 refcount_init(&as->refcount);
[47800e0]185 as->cpu_refcount = 0;
[a35b458]186
[b3f8fb7]187#ifdef AS_PAGE_TABLE
[80bcaed]188 as->genarch.page_table = page_table_create(flags);
[b3f8fb7]189#else
190 page_table_create(flags);
191#endif
[a35b458]192
[20d50a1]193 return as;
194}
195
[c477c80]196/** Destroy address space.
[482826d]197 *
[6f4495f5]198 * When there are no tasks referencing this address space (i.e. its refcount is
199 * zero), the address space can be destroyed.
[31d8e10]200 *
201 * We know that we don't hold any spinlock.
[6745592]202 *
[da1bafb]203 * @param as Address space to be destroyed.
204 *
[482826d]205 */
[ca21f1e2]206static void as_destroy(as_t *as)
[5be1923]207{
[31d8e10]208 DEADLOCK_PROBE_INIT(p_asidlock);
[a35b458]209
[63e27ef]210 assert(as != AS);
[78de83de]211 assert(refcount_unique(&as->refcount));
[a35b458]212
[482826d]213 /*
[663bb537]214 * Since there is no reference to this address space, it is safe not to
215 * lock its mutex.
[482826d]216 */
[a35b458]217
[31d8e10]218 /*
219 * We need to avoid deadlock between TLB shootdown and asidlock.
220 * We therefore try to take asid conditionally and if we don't succeed,
221 * we enable interrupts and try again. This is done while preemption is
222 * disabled to prevent nested context switches. We also depend on the
223 * fact that so far no spinlocks are held.
224 */
225 preemption_disable();
[da1bafb]226 ipl_t ipl = interrupts_read();
[a35b458]227
[31d8e10]228retry:
229 interrupts_disable();
230 if (!spinlock_trylock(&asidlock)) {
231 interrupts_enable();
232 DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
233 goto retry;
234 }
[a35b458]235
[da1bafb]236 /* Interrupts disabled, enable preemption */
237 preemption_enable();
[a35b458]238
[da1bafb]239 if ((as->asid != ASID_INVALID) && (as != AS_KERNEL)) {
[1624aae]240 if (as->cpu_refcount == 0)
[31e8ddd]241 list_remove(&as->inactive_as_with_asid_link);
[a35b458]242
[482826d]243 asid_put(as->asid);
244 }
[a35b458]245
[879585a3]246 spinlock_unlock(&asidlock);
[fdaad75d]247 interrupts_restore(ipl);
[a35b458]248
[482826d]249 /*
250 * Destroy address space areas of the address space.
[88cc71c0]251 * Need to start from the beginning each time since we are destroying
252 * the areas.
[da1bafb]253 */
[88cc71c0]254 as_area_t *area = as_area_first(as);
255 while (area != NULL) {
256 /*
257 * XXX We already have as_area_t, but as_area_destroy will
258 * have to search for it. This could be made faster.
259 */
260 as_area_destroy(as, area->base);
261 area = as_area_first(as);
[482826d]262 }
[a35b458]263
[88cc71c0]264 odict_finalize(&as->as_areas);
[a35b458]265
[b3f8fb7]266#ifdef AS_PAGE_TABLE
[80bcaed]267 page_table_destroy(as->genarch.page_table);
[b3f8fb7]268#else
269 page_table_destroy(NULL);
270#endif
[a35b458]271
[82d515e9]272 slab_free(as_cache, as);
[5be1923]273}
274
[0321109]275/** Hold a reference to an address space.
276 *
[fc47885]277 * Holding a reference to an address space prevents destruction
278 * of that address space.
[0321109]279 *
[da1bafb]280 * @param as Address space to be held.
281 *
[0321109]282 */
[7a0359b]283NO_TRACE void as_hold(as_t *as)
[0321109]284{
[78de83de]285 refcount_up(&as->refcount);
[0321109]286}
287
288/** Release a reference to an address space.
289 *
[fc47885]290 * The last one to release a reference to an address space
291 * destroys the address space.
[0321109]292 *
[78de83de]293 * @param as Address space to be released.
[da1bafb]294 *
[0321109]295 */
[7a0359b]296NO_TRACE void as_release(as_t *as)
[0321109]297{
[78de83de]298 if (refcount_down(&as->refcount))
[0321109]299 as_destroy(as);
300}
301
[88cc71c0]302/** Return first address space area.
303 *
304 * @param as Address space
305 * @return First area in @a as (i.e. area with the lowest base address)
306 * or @c NULL if there is none
307 */
308as_area_t *as_area_first(as_t *as)
309{
310 odlink_t *odlink = odict_first(&as->as_areas);
311 if (odlink == NULL)
312 return NULL;
313
314 return odict_get_instance(odlink, as_area_t, las_areas);
315}
316
317/** Return next address space area.
318 *
319 * @param cur Current area
320 * @return Next area in the same address space or @c NULL if @a cur is the
321 * last area.
322 */
323as_area_t *as_area_next(as_area_t *cur)
324{
325 odlink_t *odlink = odict_next(&cur->las_areas, &cur->as->as_areas);
326 if (odlink == NULL)
327 return NULL;
328
329 return odict_get_instance(odlink, as_area_t, las_areas);
330}
331
332/** Determine if area with specified parameters would conflict with
333 * a specific existing address space area.
334 *
335 * @param addr Starting virtual address of the area being tested.
336 * @param count Number of pages in the area being tested.
337 * @param guarded True if the area being tested is protected by guard pages.
338 * @param area Area against which we are testing.
339 *
340 * @return True if the two areas conflict, false otherwise.
341 */
342NO_TRACE static bool area_is_conflicting(uintptr_t addr,
343 size_t count, bool guarded, as_area_t *area)
344{
345 assert((addr % PAGE_SIZE) == 0);
346
347 size_t gsize = P2SZ(count);
[6785b88b]348 size_t agsize = P2SZ(area->pages);
[cd1ecf11]349
350 /*
351 * A guarded area has one guard page before, one page after.
352 * What we do here is: if either area is guarded, we add
353 * PAGE_SIZE to the size of both areas. That guarantees
354 * they will be spaced at least one page apart.
355 */
356 if (guarded || (area->flags & AS_AREA_GUARD) != 0) {
357 /* Add guard page size unless area is at the end of VA domain */
358 if (!overflows(addr, P2SZ(count)))
359 gsize += PAGE_SIZE;
360
361 /* Add guard page size unless area is at the end of VA domain */
362 if (!overflows(area->base, P2SZ(area->pages)))
363 agsize += PAGE_SIZE;
364 }
[88cc71c0]365
366 return overlaps(addr, gsize, area->base, agsize);
367
368}
369
[e3ee9b9]370/** Check area conflicts with other areas.
371 *
[35a3d950]372 * @param as Address space.
373 * @param addr Starting virtual address of the area being tested.
374 * @param count Number of pages in the area being tested.
375 * @param guarded True if the area being tested is protected by guard pages.
[88cc71c0]376 * @param avoid Do not touch this area. I.e. this area is not considered
377 * as presenting a conflict.
[e3ee9b9]378 *
379 * @return True if there is no conflict, false otherwise.
380 *
381 */
[0b37882]382NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr,
[35a3d950]383 size_t count, bool guarded, as_area_t *avoid)
[e3ee9b9]384{
[63e27ef]385 assert((addr % PAGE_SIZE) == 0);
386 assert(mutex_locked(&as->lock));
[94795812]387
388 /*
389 * If the addition of the supposed area address and size overflows,
390 * report conflict.
391 */
392 if (overflows_into_positive(addr, P2SZ(count)))
393 return false;
[a35b458]394
[e3ee9b9]395 /*
396 * We don't want any area to have conflicts with NULL page.
397 */
[b6f3e7e]398 if (overlaps(addr, P2SZ(count), (uintptr_t) NULL, PAGE_SIZE))
[e3ee9b9]399 return false;
[35a3d950]400
[e3ee9b9]401 /*
[88cc71c0]402 * To determine if we overlap with another area, we just need
403 * to look at overlap with the last area with base address <=
404 * to ours and on the first area with base address > than ours.
405 *
406 * First find last area with <= base address.
[e3ee9b9]407 */
[88cc71c0]408 odlink_t *odlink = odict_find_leq(&as->as_areas, &addr, NULL);
409 if (odlink != NULL) {
410 as_area_t *area = odict_get_instance(odlink, as_area_t,
411 las_areas);
[a35b458]412
[0b37882]413 if (area != avoid) {
414 mutex_lock(&area->lock);
[88cc71c0]415 if (area_is_conflicting(addr, count, guarded, area)) {
[0b37882]416 mutex_unlock(&area->lock);
417 return false;
418 }
[a35b458]419
[e3ee9b9]420 mutex_unlock(&area->lock);
421 }
[88cc71c0]422
423 /* Next area */
424 odlink = odict_next(odlink, &as->as_areas);
[e3ee9b9]425 }
[a35b458]426
[d9d0088]427 /*
428 * Next area, if any, is the first with base > than our base address.
429 * If there was no area with <= base, we need to look at the first area.
430 */
431 if (odlink == NULL)
432 odlink = odict_first(&as->as_areas);
433
[88cc71c0]434 if (odlink != NULL) {
435 as_area_t *area = odict_get_instance(odlink, as_area_t,
436 las_areas);
[a35b458]437
[0b37882]438 if (area != avoid) {
439 mutex_lock(&area->lock);
[88cc71c0]440 if (area_is_conflicting(addr, count, guarded, area)) {
[0b37882]441 mutex_unlock(&area->lock);
442 return false;
443 }
[a35b458]444
[e3ee9b9]445 mutex_unlock(&area->lock);
446 }
447 }
[a35b458]448
[e3ee9b9]449 /*
450 * So far, the area does not conflict with other areas.
[57355a40]451 * Check if it is contained in the user address space.
[e3ee9b9]452 */
453 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
[57355a40]454 return iswithin(USER_ADDRESS_SPACE_START,
455 (USER_ADDRESS_SPACE_END - USER_ADDRESS_SPACE_START) + 1,
456 addr, P2SZ(count));
[e3ee9b9]457 }
[a35b458]458
[e3ee9b9]459 return true;
460}
461
[fbcdeb8]462/** Return pointer to unmapped address space area
463 *
464 * The address space must be already locked when calling
465 * this function.
466 *
[35a3d950]467 * @param as Address space.
468 * @param bound Lowest address bound.
469 * @param size Requested size of the allocation.
470 * @param guarded True if the allocation must be protected by guard pages.
[fbcdeb8]471 *
472 * @return Address of the beginning of unmapped address space area.
473 * @return -1 if no suitable address space area was found.
474 *
475 */
476NO_TRACE static uintptr_t as_get_unmapped_area(as_t *as, uintptr_t bound,
[35a3d950]477 size_t size, bool guarded)
[fbcdeb8]478{
[63e27ef]479 assert(mutex_locked(&as->lock));
[a35b458]480
[fbcdeb8]481 if (size == 0)
482 return (uintptr_t) -1;
[a35b458]483
[fbcdeb8]484 /*
485 * Make sure we allocate from page-aligned
486 * address. Check for possible overflow in
487 * each step.
488 */
[a35b458]489
[fbcdeb8]490 size_t pages = SIZE2FRAMES(size);
[a35b458]491
[fbcdeb8]492 /*
493 * Find the lowest unmapped address aligned on the size
494 * boundary, not smaller than bound and of the required size.
495 */
[a35b458]496
[fbcdeb8]497 /* First check the bound address itself */
498 uintptr_t addr = ALIGN_UP(bound, PAGE_SIZE);
[35a3d950]499 if (addr >= bound) {
500 if (guarded) {
[7c3fb9b]501 /*
502 * Leave an unmapped page between the lower
[35a3d950]503 * bound and the area's start address.
504 */
505 addr += P2SZ(1);
506 }
507
508 if (check_area_conflicts(as, addr, pages, guarded, NULL))
509 return addr;
510 }
[a35b458]511
[fbcdeb8]512 /* Eventually check the addresses behind each area */
[88cc71c0]513 as_area_t *area = as_area_first(as);
514 while (area != NULL) {
515 mutex_lock(&area->lock);
[a35b458]516
[d9d0088]517 addr = area->base + P2SZ(area->pages);
[a35b458]518
[88cc71c0]519 if (guarded || area->flags & AS_AREA_GUARD) {
520 /*
521 * We must leave an unmapped page
522 * between the two areas.
523 */
524 addr += P2SZ(1);
525 }
[35a3d950]526
[88cc71c0]527 bool avail =
528 ((addr >= bound) && (addr >= area->base) &&
529 (check_area_conflicts(as, addr, pages, guarded, area)));
[35a3d950]530
[88cc71c0]531 mutex_unlock(&area->lock);
[a35b458]532
[88cc71c0]533 if (avail)
534 return addr;
[a35b458]535
[88cc71c0]536 area = as_area_next(area);
[fbcdeb8]537 }
[a35b458]538
[fbcdeb8]539 /* No suitable address space area found */
540 return (uintptr_t) -1;
541}
542
[de0af3a]543/** Get key function for pagemap ordered dictionary.
544 *
545 * The key is the virtual address of the page (as_page_mapping_t.vaddr)
546 *
547 * @param odlink Link to as_pagemap_t.map ordered dictionary
548 * @return Pointer to virtual address cast as @c void *
549 */
550static void *as_pagemap_getkey(odlink_t *odlink)
551{
552 as_page_mapping_t *mapping;
553
554 mapping = odict_get_instance(odlink, as_page_mapping_t, lpagemap);
555 return (void *) &mapping->vaddr;
556}
557
558/** Comparison function for pagemap ordered dictionary.
559 *
560 * @param a Pointer to virtual address cast as @c void *
561 * @param b Pointer to virtual address cast as @c void *
562 * @return <0, =0, >0 if virtual address a is less than, equal to, or
[2fc3b2d]563 * greater than b, respectively.
[de0af3a]564 */
565static int as_pagemap_cmp(void *a, void *b)
566{
567 uintptr_t va = *(uintptr_t *)a;
568 uintptr_t vb = *(uintptr_t *)b;
569
[5a2e0dd5]570 if (va < vb)
571 return -1;
572 else if (va == vb)
573 return 0;
574 else
575 return +1;
[de0af3a]576}
577
578/** Initialize pagemap.
579 *
580 * @param pagemap Pagemap
581 */
582NO_TRACE void as_pagemap_initialize(as_pagemap_t *pagemap)
583{
584 odict_initialize(&pagemap->map, as_pagemap_getkey, as_pagemap_cmp);
585}
586
587/** Finalize pagemap.
588 *
589 * Destroy any entries in the pagemap.
590 *
591 * @param pagemap Pagemap
592 */
593NO_TRACE void as_pagemap_finalize(as_pagemap_t *pagemap)
594{
595 as_page_mapping_t *mapping = as_pagemap_first(pagemap);
596 while (mapping != NULL) {
597 as_pagemap_remove(mapping);
598 mapping = as_pagemap_first(pagemap);
599 }
600 odict_finalize(&pagemap->map);
601}
602
603/** Get first page mapping.
604 *
605 * @param pagemap Pagemap
606 * @return First mapping or @c NULL if there is none
607 */
608NO_TRACE as_page_mapping_t *as_pagemap_first(as_pagemap_t *pagemap)
609{
610 odlink_t *odlink;
611
612 odlink = odict_first(&pagemap->map);
613 if (odlink == NULL)
614 return NULL;
615
616 return odict_get_instance(odlink, as_page_mapping_t, lpagemap);
617}
618
619/** Get next page mapping.
620 *
621 * @param cur Current mapping
622 * @return Next mapping or @c NULL if @a cur is the last one
623 */
624NO_TRACE as_page_mapping_t *as_pagemap_next(as_page_mapping_t *cur)
625{
626 odlink_t *odlink;
627
628 odlink = odict_next(&cur->lpagemap, &cur->pagemap->map);
629 if (odlink == NULL)
630 return NULL;
631
632 return odict_get_instance(odlink, as_page_mapping_t, lpagemap);
633}
634
635/** Find frame by virtual address.
636 *
637 * @param pagemap Pagemap
638 * @param vaddr Virtual address of page
639 * @param rframe Place to store physical frame address
640 * @return EOK on succcess or ENOENT if no mapping found
641 */
642NO_TRACE errno_t as_pagemap_find(as_pagemap_t *pagemap, uintptr_t vaddr,
643 uintptr_t *rframe)
644{
645 odlink_t *odlink;
646 as_page_mapping_t *mapping;
647
648 odlink = odict_find_eq(&pagemap->map, &vaddr, NULL);
649 if (odlink == NULL)
650 return ENOENT;
651
652 mapping = odict_get_instance(odlink, as_page_mapping_t, lpagemap);
653 *rframe = mapping->frame;
654 return EOK;
655}
656
657/** Insert new page mapping.
658 *
659 * This function can block to allocate kernel memory.
660 *
661 * @param pagemap Pagemap
662 * @param vaddr Virtual page address
663 * @param frame Physical frame address
664 */
665NO_TRACE void as_pagemap_insert(as_pagemap_t *pagemap, uintptr_t vaddr,
666 uintptr_t frame)
667{
668 as_page_mapping_t *mapping;
669
670 mapping = slab_alloc(as_page_mapping_cache, 0);
671 mapping->pagemap = pagemap;
672 odlink_initialize(&mapping->lpagemap);
673 mapping->vaddr = vaddr;
674 mapping->frame = frame;
675 odict_insert(&mapping->lpagemap, &pagemap->map, NULL);
676}
677
678/** Remove page mapping.
679 *
680 * @param mapping Mapping
681 */
682NO_TRACE void as_pagemap_remove(as_page_mapping_t *mapping)
683{
684 odict_remove(&mapping->lpagemap);
685 slab_free(as_page_mapping_cache, mapping);
686}
687
[83b6ba9f]688/** Remove reference to address space area share info.
689 *
690 * If the reference count drops to 0, the sh_info is deallocated.
691 *
692 * @param sh_info Pointer to address space area share info.
693 *
694 */
695NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info)
696{
697 bool dealloc = false;
[a35b458]698
[83b6ba9f]699 mutex_lock(&sh_info->lock);
[63e27ef]700 assert(sh_info->refcount);
[a35b458]701
[83b6ba9f]702 if (--sh_info->refcount == 0) {
703 dealloc = true;
[a35b458]704
[de0af3a]705 as_page_mapping_t *mapping = as_pagemap_first(&sh_info->pagemap);
706 while (mapping != NULL) {
707 frame_free(mapping->frame, 1);
708 mapping = as_pagemap_next(mapping);
[83b6ba9f]709 }
[a35b458]710
[83b6ba9f]711 }
712 mutex_unlock(&sh_info->lock);
[a35b458]713
[83b6ba9f]714 if (dealloc) {
715 if (sh_info->backend && sh_info->backend->destroy_shared_data) {
716 sh_info->backend->destroy_shared_data(
717 sh_info->backend_shared_data);
718 }
[de0af3a]719 as_pagemap_finalize(&sh_info->pagemap);
[83b6ba9f]720 free(sh_info);
721 }
722}
723
[20d50a1]724/** Create address space area of common attributes.
725 *
726 * The created address space area is added to the target address space.
727 *
[da1bafb]728 * @param as Target address space.
729 * @param flags Flags of the area memory.
730 * @param size Size of area.
731 * @param attrs Attributes of the area.
732 * @param backend Address space area backend. NULL if no backend is used.
[826599a2]733 * @param backend_data NULL or a pointer to custom backend data.
[fbcdeb8]734 * @param base Starting virtual address of the area.
[f2c3fed]735 * If set to AS_AREA_ANY, a suitable mappable area is
736 * found.
737 * @param bound Lowest address bound if base is set to AS_AREA_ANY.
[fbcdeb8]738 * Otherwise ignored.
[da1bafb]739 *
740 * @return Address space area on success or NULL on failure.
[20d50a1]741 *
742 */
[da1bafb]743as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size,
[fbcdeb8]744 unsigned int attrs, mem_backend_t *backend,
745 mem_backend_data_t *backend_data, uintptr_t *base, uintptr_t bound)
[20d50a1]746{
[f2c3fed]747 if ((*base != (uintptr_t) AS_AREA_ANY) && !IS_ALIGNED(*base, PAGE_SIZE))
[37e7d2b9]748 return NULL;
[a35b458]749
[0b37882]750 if (size == 0)
[dbbeb26]751 return NULL;
[0941e9ae]752
[0b37882]753 size_t pages = SIZE2FRAMES(size);
[a35b458]754
[37e7d2b9]755 /* Writeable executable areas are not supported. */
756 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
757 return NULL;
[35a3d950]758
759 bool const guarded = flags & AS_AREA_GUARD;
[a35b458]760
[1068f6a]761 mutex_lock(&as->lock);
[a35b458]762
[f2c3fed]763 if (*base == (uintptr_t) AS_AREA_ANY) {
[35a3d950]764 *base = as_get_unmapped_area(as, bound, size, guarded);
[fbcdeb8]765 if (*base == (uintptr_t) -1) {
766 mutex_unlock(&as->lock);
767 return NULL;
768 }
769 }
[35a3d950]770
[83b6ba9f]771 if (overflows_into_positive(*base, size)) {
772 mutex_unlock(&as->lock);
[0941e9ae]773 return NULL;
[83b6ba9f]774 }
[0941e9ae]775
[35a3d950]776 if (!check_area_conflicts(as, *base, pages, guarded, NULL)) {
[1068f6a]777 mutex_unlock(&as->lock);
[37e7d2b9]778 return NULL;
779 }
[a35b458]780
[11b285d]781 as_area_t *area = (as_area_t *) malloc(sizeof(as_area_t));
[7473807]782 if (!area) {
783 mutex_unlock(&as->lock);
784 return NULL;
785 }
[a35b458]786
[da1bafb]787 mutex_initialize(&area->lock, MUTEX_PASSIVE);
[a35b458]788
[da1bafb]789 area->as = as;
[88cc71c0]790 odlink_initialize(&area->las_areas);
[da1bafb]791 area->flags = flags;
792 area->attributes = attrs;
[0b37882]793 area->pages = pages;
[fbcdeb8]794 area->base = *base;
[da1bafb]795 area->backend = backend;
[83b6ba9f]796 area->sh_info = NULL;
[a35b458]797
[0ee077ee]798 if (backend_data)
[da1bafb]799 area->backend_data = *backend_data;
[0ee077ee]800 else
[da1bafb]801 memsetb(&area->backend_data, sizeof(area->backend_data), 0);
[83b6ba9f]802
803 share_info_t *si = NULL;
804
805 /*
[ae7d03c]806 * Create the sharing info structure.
807 * We do this in advance for every new area, even if it is not going
808 * to be shared.
809 */
[83b6ba9f]810 if (!(attrs & AS_AREA_ATTR_PARTIAL)) {
[11b285d]811 si = (share_info_t *) malloc(sizeof(share_info_t));
[7473807]812 if (!si) {
813 free(area);
814 mutex_unlock(&as->lock);
815 return NULL;
816 }
[83b6ba9f]817 mutex_initialize(&si->lock, MUTEX_PASSIVE);
818 si->refcount = 1;
819 si->shared = false;
820 si->backend_shared_data = NULL;
821 si->backend = backend;
[de0af3a]822 as_pagemap_initialize(&si->pagemap);
[83b6ba9f]823
824 area->sh_info = si;
[a35b458]825
[83b6ba9f]826 if (area->backend && area->backend->create_shared_data) {
827 if (!area->backend->create_shared_data(area)) {
828 free(area);
829 mutex_unlock(&as->lock);
830 sh_info_remove_reference(si);
831 return NULL;
832 }
833 }
834 }
835
[e394b736]836 if (area->backend && area->backend->create) {
837 if (!area->backend->create(area)) {
838 free(area);
839 mutex_unlock(&as->lock);
[83b6ba9f]840 if (!(attrs & AS_AREA_ATTR_PARTIAL))
841 sh_info_remove_reference(si);
[e394b736]842 return NULL;
843 }
844 }
[83b6ba9f]845
[2fc3b2d]846 used_space_initialize(&area->used_space);
[88cc71c0]847 odict_insert(&area->las_areas, &as->as_areas, NULL);
[a35b458]848
[1068f6a]849 mutex_unlock(&as->lock);
[a35b458]850
[da1bafb]851 return area;
[20d50a1]852}
853
[e3ee9b9]854/** Find address space area and lock it.
855 *
856 * @param as Address space.
857 * @param va Virtual address.
858 *
859 * @return Locked address space area containing va on success or
860 * NULL on failure.
861 *
862 */
[7a0359b]863NO_TRACE static as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
[e3ee9b9]864{
[63e27ef]865 assert(mutex_locked(&as->lock));
[a35b458]866
[88cc71c0]867 odlink_t *odlink = odict_find_leq(&as->as_areas, &va, NULL);
868 if (odlink == NULL)
869 return NULL;
[a35b458]870
[88cc71c0]871 as_area_t *area = odict_get_instance(odlink, as_area_t, las_areas);
872 mutex_lock(&area->lock);
[a35b458]873
[88cc71c0]874 assert(area->base <= va);
[a35b458]875
[88cc71c0]876 if (va <= area->base + (P2SZ(area->pages) - 1))
877 return area;
[a35b458]878
[88cc71c0]879 mutex_unlock(&area->lock);
[e3ee9b9]880 return NULL;
881}
882
[df0103f7]883/** Find address space area and change it.
884 *
[da1bafb]885 * @param as Address space.
886 * @param address Virtual address belonging to the area to be changed.
887 * Must be page-aligned.
888 * @param size New size of the virtual memory block starting at
889 * address.
890 * @param flags Flags influencing the remap operation. Currently unused.
891 *
892 * @return Zero on success or a value from @ref errno.h otherwise.
[df0103f7]893 *
[da1bafb]894 */
[b7fd2a0]895errno_t as_area_resize(as_t *as, uintptr_t address, size_t size, unsigned int flags)
[df0103f7]896{
[59fb782]897 if (!IS_ALIGNED(address, PAGE_SIZE))
898 return EINVAL;
899
[1068f6a]900 mutex_lock(&as->lock);
[a35b458]901
[df0103f7]902 /*
903 * Locate the area.
904 */
[da1bafb]905 as_area_t *area = find_area_and_lock(as, address);
[df0103f7]906 if (!area) {
[1068f6a]907 mutex_unlock(&as->lock);
[7242a78e]908 return ENOENT;
[df0103f7]909 }
[01029fc]910
911 if (!area->backend->is_resizable(area)) {
[df0103f7]912 /*
[01029fc]913 * The backend does not support resizing for this area.
[df0103f7]914 */
[1068f6a]915 mutex_unlock(&area->lock);
916 mutex_unlock(&as->lock);
[7242a78e]917 return ENOTSUP;
[df0103f7]918 }
[a35b458]919
[83b6ba9f]920 mutex_lock(&area->sh_info->lock);
921 if (area->sh_info->shared) {
[8182031]922 /*
[da1bafb]923 * Remapping of shared address space areas
[8182031]924 * is not supported.
925 */
[83b6ba9f]926 mutex_unlock(&area->sh_info->lock);
[8182031]927 mutex_unlock(&area->lock);
928 mutex_unlock(&as->lock);
929 return ENOTSUP;
930 }
[83b6ba9f]931 mutex_unlock(&area->sh_info->lock);
[a35b458]932
[da1bafb]933 size_t pages = SIZE2FRAMES((address - area->base) + size);
[df0103f7]934 if (!pages) {
935 /*
936 * Zero size address space areas are not allowed.
937 */
[1068f6a]938 mutex_unlock(&area->lock);
939 mutex_unlock(&as->lock);
[7242a78e]940 return EPERM;
[df0103f7]941 }
[a35b458]942
[df0103f7]943 if (pages < area->pages) {
[b6f3e7e]944 uintptr_t start_free = area->base + P2SZ(pages);
[a35b458]945
[df0103f7]946 /*
947 * Shrinking the area.
948 * No need to check for overlaps.
949 */
[a35b458]950
[c964521]951 page_table_lock(as, false);
[a35b458]952
[2fc3b2d]953 /*
954 * Start TLB shootdown sequence.
955 */
956
957 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES,
958 as->asid, area->base + P2SZ(pages),
959 area->pages - pages);
960
[56789125]961 /*
962 * Remove frames belonging to used space starting from
963 * the highest addresses downwards until an overlap with
[2fc3b2d]964 * the resized address space area is found.
[da1bafb]965 */
966 bool cond = true;
967 while (cond) {
[2fc3b2d]968 used_space_ival_t *ival =
969 used_space_last(&area->used_space);
970 assert(ival != NULL);
[a35b458]971
[2fc3b2d]972 uintptr_t ptr = ival->page;
973 size_t pcount = ival->count;
974 size_t i = 0;
975
976 if (overlaps(ptr, P2SZ(pcount), area->base,
977 P2SZ(pages))) {
[a35b458]978
[2fc3b2d]979 if (ptr + P2SZ(pcount) <= start_free) {
[56789125]980 /*
[2fc3b2d]981 * The whole interval fits completely
982 * in the resized address space area.
[56789125]983 */
[2fc3b2d]984 break;
[56789125]985 }
[a35b458]986
[d67dfdc]987 /*
[2fc3b2d]988 * Part of the interval corresponding to b and
989 * c overlaps with the resized address space
990 * area.
[d67dfdc]991 */
992
[2fc3b2d]993 /* We are almost done */
994 cond = false;
995 i = (start_free - ptr) >> PAGE_WIDTH;
[a35b458]996
[2fc3b2d]997 /* Shorten the interval to @c i pages */
998 used_space_shorten_ival(ival, i);
999 } else {
1000 /*
1001 * The interval of used space can be completely
1002 * removed.
1003 */
1004 used_space_remove_ival(ival);
1005 }
[a35b458]1006
[2fc3b2d]1007 for (; i < pcount; i++) {
1008 pte_t pte;
1009 bool found = page_mapping_find(as,
1010 ptr + P2SZ(i), false, &pte);
[a35b458]1011
[2fc3b2d]1012 (void) found;
1013 assert(found);
1014 assert(PTE_VALID(&pte));
1015 assert(PTE_PRESENT(&pte));
[a35b458]1016
[2fc3b2d]1017 if ((area->backend) &&
1018 (area->backend->frame_free)) {
1019 area->backend->frame_free(area,
1020 ptr + P2SZ(i),
1021 PTE_GET_FRAME(&pte));
[56789125]1022 }
[a35b458]1023
[2fc3b2d]1024 page_mapping_remove(as, ptr + P2SZ(i));
[d67dfdc]1025 }
[2fc3b2d]1026
[d67dfdc]1027 }
[2fc3b2d]1028
1029 /*
1030 * Finish TLB shootdown sequence.
1031 */
1032
1033 tlb_invalidate_pages(as->asid,
1034 area->base + P2SZ(pages),
1035 area->pages - pages);
1036
1037 /*
1038 * Invalidate software translation caches
1039 * (e.g. TSB on sparc64, PHT on ppc32).
1040 */
1041 as_invalidate_translation_cache(as,
1042 area->base + P2SZ(pages),
1043 area->pages - pages);
1044 tlb_shootdown_finalize(ipl);
1045
[da1bafb]1046 page_table_unlock(as, false);
[df0103f7]1047 } else {
1048 /*
1049 * Growing the area.
[0941e9ae]1050 */
1051
[94795812]1052 if (overflows_into_positive(address, P2SZ(pages)))
[0941e9ae]1053 return EINVAL;
1054
1055 /*
[df0103f7]1056 * Check for overlaps with other address space areas.
1057 */
[35a3d950]1058 bool const guarded = area->flags & AS_AREA_GUARD;
1059 if (!check_area_conflicts(as, address, pages, guarded, area)) {
[1068f6a]1060 mutex_unlock(&area->lock);
[da1bafb]1061 mutex_unlock(&as->lock);
[7242a78e]1062 return EADDRNOTAVAIL;
[df0103f7]1063 }
[da1bafb]1064 }
[a35b458]1065
[e394b736]1066 if (area->backend && area->backend->resize) {
1067 if (!area->backend->resize(area, pages)) {
1068 mutex_unlock(&area->lock);
1069 mutex_unlock(&as->lock);
1070 return ENOMEM;
1071 }
1072 }
[a35b458]1073
[df0103f7]1074 area->pages = pages;
[a35b458]1075
[1068f6a]1076 mutex_unlock(&area->lock);
1077 mutex_unlock(&as->lock);
[a35b458]1078
[7242a78e]1079 return 0;
1080}
1081
1082/** Destroy address space area.
1083 *
[da1bafb]1084 * @param as Address space.
1085 * @param address Address within the area to be deleted.
1086 *
1087 * @return Zero on success or a value from @ref errno.h on failure.
[7242a78e]1088 *
1089 */
[b7fd2a0]1090errno_t as_area_destroy(as_t *as, uintptr_t address)
[7242a78e]1091{
[1068f6a]1092 mutex_lock(&as->lock);
[a35b458]1093
[da1bafb]1094 as_area_t *area = find_area_and_lock(as, address);
[7242a78e]1095 if (!area) {
[1068f6a]1096 mutex_unlock(&as->lock);
[7242a78e]1097 return ENOENT;
1098 }
[e394b736]1099
1100 if (area->backend && area->backend->destroy)
1101 area->backend->destroy(area);
[a35b458]1102
[c964521]1103 page_table_lock(as, false);
[5552d60]1104 /*
1105 * Start TLB shootdown sequence.
1106 */
[402eda5]1107 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base,
1108 area->pages);
[a35b458]1109
[567807b1]1110 /*
[2fc3b2d]1111 * Visit only the pages mapped by used_space.
[567807b1]1112 */
[2fc3b2d]1113 used_space_ival_t *ival = used_space_first(&area->used_space);
1114 while (ival != NULL) {
1115 uintptr_t ptr = ival->page;
1116
1117 for (size_t size = 0; size < ival->count; size++) {
1118 pte_t pte;
1119 bool found = page_mapping_find(as,
1120 ptr + P2SZ(size), false, &pte);
1121
1122 (void) found;
1123 assert(found);
1124 assert(PTE_VALID(&pte));
1125 assert(PTE_PRESENT(&pte));
1126
1127 if ((area->backend) &&
1128 (area->backend->frame_free)) {
1129 area->backend->frame_free(area,
1130 ptr + P2SZ(size),
1131 PTE_GET_FRAME(&pte));
[7242a78e]1132 }
[2fc3b2d]1133
1134 page_mapping_remove(as, ptr + P2SZ(size));
[7242a78e]1135 }
[2fc3b2d]1136
1137 used_space_remove_ival(ival);
1138 ival = used_space_first(&area->used_space);
[7242a78e]1139 }
[a35b458]1140
[7242a78e]1141 /*
[5552d60]1142 * Finish TLB shootdown sequence.
[7242a78e]1143 */
[a35b458]1144
[f1d1f5d3]1145 tlb_invalidate_pages(as->asid, area->base, area->pages);
[a35b458]1146
[f1d1f5d3]1147 /*
[eef1b031]1148 * Invalidate potential software translation caches
1149 * (e.g. TSB on sparc64, PHT on ppc32).
[f1d1f5d3]1150 */
1151 as_invalidate_translation_cache(as, area->base, area->pages);
[402eda5]1152 tlb_shootdown_finalize(ipl);
[a35b458]1153
[c964521]1154 page_table_unlock(as, false);
[a35b458]1155
[2fc3b2d]1156 used_space_finalize(&area->used_space);
[8d4f2ae]1157 area->attributes |= AS_AREA_ATTR_PARTIAL;
[83b6ba9f]1158 sh_info_remove_reference(area->sh_info);
[a35b458]1159
[1068f6a]1160 mutex_unlock(&area->lock);
[a35b458]1161
[7242a78e]1162 /*
1163 * Remove the empty area from address space.
1164 */
[88cc71c0]1165 odict_remove(&area->las_areas);
[a35b458]1166
[8d4f2ae]1167 free(area);
[a35b458]1168
[f1d1f5d3]1169 mutex_unlock(&as->lock);
[7242a78e]1170 return 0;
[df0103f7]1171}
1172
[8d6bc2d5]1173/** Share address space area with another or the same address space.
[df0103f7]1174 *
[0ee077ee]1175 * Address space area mapping is shared with a new address space area.
1176 * If the source address space area has not been shared so far,
1177 * a new sh_info is created. The new address space area simply gets the
1178 * sh_info of the source area. The process of duplicating the
1179 * mapping is done through the backend share function.
[da1bafb]1180 *
1181 * @param src_as Pointer to source address space.
1182 * @param src_base Base address of the source address space area.
1183 * @param acc_size Expected size of the source area.
1184 * @param dst_as Pointer to destination address space.
[fd4d8c0]1185 * @param dst_flags_mask Destination address space area flags mask.
[fbcdeb8]1186 * @param dst_base Target base address. If set to -1,
1187 * a suitable mappable area is found.
1188 * @param bound Lowest address bound if dst_base is set to -1.
1189 * Otherwise ignored.
[df0103f7]1190 *
[da1bafb]1191 * @return Zero on success.
1192 * @return ENOENT if there is no such task or such address space.
1193 * @return EPERM if there was a problem in accepting the area.
1194 * @return ENOMEM if there was a problem in allocating destination
1195 * address space area.
1196 * @return ENOTSUP if the address space area backend does not support
1197 * sharing.
1198 *
[df0103f7]1199 */
[b7fd2a0]1200errno_t as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
[fbcdeb8]1201 as_t *dst_as, unsigned int dst_flags_mask, uintptr_t *dst_base,
1202 uintptr_t bound)
[df0103f7]1203{
[1068f6a]1204 mutex_lock(&src_as->lock);
[da1bafb]1205 as_area_t *src_area = find_area_and_lock(src_as, src_base);
[a9e8b39]1206 if (!src_area) {
[6fa476f7]1207 /*
1208 * Could not find the source address space area.
1209 */
[1068f6a]1210 mutex_unlock(&src_as->lock);
[6fa476f7]1211 return ENOENT;
1212 }
[a35b458]1213
[01029fc]1214 if (!src_area->backend->is_shareable(src_area)) {
[8d6bc2d5]1215 /*
[01029fc]1216 * The backend does not permit sharing of this area.
[8d6bc2d5]1217 */
1218 mutex_unlock(&src_area->lock);
1219 mutex_unlock(&src_as->lock);
1220 return ENOTSUP;
1221 }
[a35b458]1222
[b6f3e7e]1223 size_t src_size = P2SZ(src_area->pages);
[da1bafb]1224 unsigned int src_flags = src_area->flags;
1225 mem_backend_t *src_backend = src_area->backend;
1226 mem_backend_data_t src_backend_data = src_area->backend_data;
[a35b458]1227
[1ec1fd8]1228 /* Share the cacheable flag from the original mapping */
1229 if (src_flags & AS_AREA_CACHEABLE)
1230 dst_flags_mask |= AS_AREA_CACHEABLE;
[a35b458]1231
[da1bafb]1232 if ((src_size != acc_size) ||
1233 ((src_flags & dst_flags_mask) != dst_flags_mask)) {
[8d6bc2d5]1234 mutex_unlock(&src_area->lock);
1235 mutex_unlock(&src_as->lock);
[df0103f7]1236 return EPERM;
1237 }
[a35b458]1238
[8d6bc2d5]1239 /*
1240 * Now we are committed to sharing the area.
[8440473]1241 * First, prepare the area for sharing.
[8d6bc2d5]1242 * Then it will be safe to unlock it.
1243 */
[da1bafb]1244 share_info_t *sh_info = src_area->sh_info;
[a35b458]1245
[83b6ba9f]1246 mutex_lock(&sh_info->lock);
1247 sh_info->refcount++;
1248 bool shared = sh_info->shared;
1249 sh_info->shared = true;
1250 mutex_unlock(&sh_info->lock);
1251
1252 if (!shared) {
[c0697c4c]1253 /*
1254 * Call the backend to setup sharing.
[83b6ba9f]1255 * This only happens once for each sh_info.
[c0697c4c]1256 */
1257 src_area->backend->share(src_area);
[8d6bc2d5]1258 }
[a35b458]1259
[8d6bc2d5]1260 mutex_unlock(&src_area->lock);
1261 mutex_unlock(&src_as->lock);
[a35b458]1262
[df0103f7]1263 /*
[a9e8b39]1264 * Create copy of the source address space area.
1265 * The destination area is created with AS_AREA_ATTR_PARTIAL
1266 * attribute set which prevents race condition with
1267 * preliminary as_page_fault() calls.
[fd4d8c0]1268 * The flags of the source area are masked against dst_flags_mask
1269 * to support sharing in less privileged mode.
[df0103f7]1270 */
[fbcdeb8]1271 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask,
1272 src_size, AS_AREA_ATTR_PARTIAL, src_backend,
1273 &src_backend_data, dst_base, bound);
[a9e8b39]1274 if (!dst_area) {
[df0103f7]1275 /*
1276 * Destination address space area could not be created.
1277 */
[8d6bc2d5]1278 sh_info_remove_reference(sh_info);
[a35b458]1279
[df0103f7]1280 return ENOMEM;
1281 }
[a35b458]1282
[a9e8b39]1283 /*
1284 * Now the destination address space area has been
1285 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
[8d6bc2d5]1286 * attribute and set the sh_info.
[da1bafb]1287 */
1288 mutex_lock(&dst_as->lock);
[1068f6a]1289 mutex_lock(&dst_area->lock);
[a9e8b39]1290 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
[8d6bc2d5]1291 dst_area->sh_info = sh_info;
[1068f6a]1292 mutex_unlock(&dst_area->lock);
[da1bafb]1293 mutex_unlock(&dst_as->lock);
[a35b458]1294
[df0103f7]1295 return 0;
1296}
1297
[fb84455]1298/** Check access mode for address space area.
1299 *
[da1bafb]1300 * @param area Address space area.
1301 * @param access Access mode.
1302 *
1303 * @return False if access violates area's permissions, true
1304 * otherwise.
[fb84455]1305 *
1306 */
[97bdb4a]1307NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access)
[fb84455]1308{
[63e27ef]1309 assert(mutex_locked(&area->lock));
[a35b458]1310
[fb84455]1311 int flagmap[] = {
1312 [PF_ACCESS_READ] = AS_AREA_READ,
1313 [PF_ACCESS_WRITE] = AS_AREA_WRITE,
1314 [PF_ACCESS_EXEC] = AS_AREA_EXEC
1315 };
[a35b458]1316
[fb84455]1317 if (!(area->flags & flagmap[access]))
1318 return false;
[a35b458]1319
[fb84455]1320 return true;
1321}
1322
[e3ee9b9]1323/** Convert address space area flags to page flags.
1324 *
1325 * @param aflags Flags of some address space area.
1326 *
1327 * @return Flags to be passed to page_mapping_insert().
1328 *
1329 */
[7a0359b]1330NO_TRACE static unsigned int area_flags_to_page_flags(unsigned int aflags)
[e3ee9b9]1331{
1332 unsigned int flags = PAGE_USER | PAGE_PRESENT;
[a35b458]1333
[e3ee9b9]1334 if (aflags & AS_AREA_READ)
1335 flags |= PAGE_READ;
[a35b458]1336
[e3ee9b9]1337 if (aflags & AS_AREA_WRITE)
1338 flags |= PAGE_WRITE;
[a35b458]1339
[e3ee9b9]1340 if (aflags & AS_AREA_EXEC)
1341 flags |= PAGE_EXEC;
[a35b458]1342
[e3ee9b9]1343 if (aflags & AS_AREA_CACHEABLE)
1344 flags |= PAGE_CACHEABLE;
[a35b458]1345
[e3ee9b9]1346 return flags;
1347}
1348
[c477c80]1349/** Change address space area flags.
[c98e6ee]1350 *
1351 * The idea is to have the same data, but with a different access mode.
1352 * This is needed e.g. for writing code into memory and then executing it.
1353 * In order for this to work properly, this may copy the data
1354 * into private anonymous memory (unless it's already there).
1355 *
[76fca31]1356 * @param as Address space.
1357 * @param flags Flags of the area memory.
1358 * @param address Address within the area to be changed.
1359 *
1360 * @return Zero on success or a value from @ref errno.h on failure.
[c98e6ee]1361 *
1362 */
[b7fd2a0]1363errno_t as_area_change_flags(as_t *as, unsigned int flags, uintptr_t address)
[c98e6ee]1364{
1365 /* Flags for the new memory mapping */
[da1bafb]1366 unsigned int page_flags = area_flags_to_page_flags(flags);
[a35b458]1367
[c98e6ee]1368 mutex_lock(&as->lock);
[a35b458]1369
[da1bafb]1370 as_area_t *area = find_area_and_lock(as, address);
[c98e6ee]1371 if (!area) {
1372 mutex_unlock(&as->lock);
1373 return ENOENT;
1374 }
[a35b458]1375
[83b6ba9f]1376 if (area->backend != &anon_backend) {
[c98e6ee]1377 /* Copying non-anonymous memory not supported yet */
1378 mutex_unlock(&area->lock);
1379 mutex_unlock(&as->lock);
1380 return ENOTSUP;
1381 }
[83b6ba9f]1382
1383 mutex_lock(&area->sh_info->lock);
1384 if (area->sh_info->shared) {
1385 /* Copying shared areas not supported yet */
1386 mutex_unlock(&area->sh_info->lock);
1387 mutex_unlock(&area->lock);
1388 mutex_unlock(&as->lock);
1389 return ENOTSUP;
1390 }
1391 mutex_unlock(&area->sh_info->lock);
[a35b458]1392
[c98e6ee]1393 /* An array for storing frame numbers */
[433d52f]1394 uintptr_t *old_frame = malloc(area->used_space.pages *
1395 sizeof(uintptr_t));
[7473807]1396 if (!old_frame) {
1397 mutex_unlock(&area->lock);
1398 mutex_unlock(&as->lock);
1399 return ENOMEM;
1400 }
[a35b458]1401
[c964521]1402 page_table_lock(as, false);
[a35b458]1403
[c98e6ee]1404 /*
1405 * Start TLB shootdown sequence.
1406 */
[402eda5]1407 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base,
1408 area->pages);
[a35b458]1409
[c98e6ee]1410 /*
1411 * Remove used pages from page tables and remember their frame
1412 * numbers.
1413 */
[da1bafb]1414 size_t frame_idx = 0;
[a35b458]1415
[433d52f]1416 used_space_ival_t *ival = used_space_first(&area->used_space);
[2fc3b2d]1417 while (ival != NULL) {
1418 uintptr_t ptr = ival->page;
1419 size_t size;
[a35b458]1420
[2fc3b2d]1421 for (size = 0; size < ival->count; size++) {
1422 pte_t pte;
1423 bool found = page_mapping_find(as, ptr + P2SZ(size),
1424 false, &pte);
[a35b458]1425
[2fc3b2d]1426 (void) found;
1427 assert(found);
1428 assert(PTE_VALID(&pte));
1429 assert(PTE_PRESENT(&pte));
[a35b458]1430
[2fc3b2d]1431 old_frame[frame_idx++] = PTE_GET_FRAME(&pte);
[a35b458]1432
[2fc3b2d]1433 /* Remove old mapping */
1434 page_mapping_remove(as, ptr + P2SZ(size));
[c98e6ee]1435 }
[2fc3b2d]1436
1437 ival = used_space_next(ival);
[c98e6ee]1438 }
[a35b458]1439
[c98e6ee]1440 /*
1441 * Finish TLB shootdown sequence.
1442 */
[a35b458]1443
[c98e6ee]1444 tlb_invalidate_pages(as->asid, area->base, area->pages);
[a35b458]1445
[c98e6ee]1446 /*
[eef1b031]1447 * Invalidate potential software translation caches
1448 * (e.g. TSB on sparc64, PHT on ppc32).
[c98e6ee]1449 */
1450 as_invalidate_translation_cache(as, area->base, area->pages);
[402eda5]1451 tlb_shootdown_finalize(ipl);
[a35b458]1452
[c964521]1453 page_table_unlock(as, false);
[a35b458]1454
[ae7f6fb]1455 /*
1456 * Set the new flags.
1457 */
1458 area->flags = flags;
[a35b458]1459
[c98e6ee]1460 /*
1461 * Map pages back in with new flags. This step is kept separate
[6745592]1462 * so that the memory area could not be accesed with both the old and
1463 * the new flags at once.
[c98e6ee]1464 */
1465 frame_idx = 0;
[a35b458]1466
[2fc3b2d]1467 ival = used_space_first(&area->used_space);
1468 while (ival != NULL) {
1469 uintptr_t ptr = ival->page;
1470 size_t size;
[a35b458]1471
[2fc3b2d]1472 for (size = 0; size < ival->count; size++) {
1473 page_table_lock(as, false);
[a35b458]1474
[2fc3b2d]1475 /* Insert the new mapping */
1476 page_mapping_insert(as, ptr + P2SZ(size),
1477 old_frame[frame_idx++], page_flags);
[a35b458]1478
[2fc3b2d]1479 page_table_unlock(as, false);
[c98e6ee]1480 }
[2fc3b2d]1481
1482 ival = used_space_next(ival);
[c98e6ee]1483 }
[a35b458]1484
[c98e6ee]1485 free(old_frame);
[a35b458]1486
[c98e6ee]1487 mutex_unlock(&area->lock);
1488 mutex_unlock(&as->lock);
[a35b458]1489
[c98e6ee]1490 return 0;
1491}
1492
[20d50a1]1493/** Handle page fault within the current address space.
1494 *
[6745592]1495 * This is the high-level page fault handler. It decides whether the page fault
1496 * can be resolved by any backend and if so, it invokes the backend to resolve
1497 * the page fault.
[8182031]1498 *
[20d50a1]1499 * Interrupts are assumed disabled.
1500 *
[59fb782]1501 * @param address Faulting address.
1502 * @param access Access mode that caused the page fault (i.e.
1503 * read/write/exec).
1504 * @param istate Pointer to the interrupted state.
[da1bafb]1505 *
1506 * @return AS_PF_FAULT on page fault.
1507 * @return AS_PF_OK on success.
1508 * @return AS_PF_DEFER if the fault was caused by copy_to_uspace()
1509 * or copy_from_uspace().
[20d50a1]1510 *
1511 */
[59fb782]1512int as_page_fault(uintptr_t address, pf_access_t access, istate_t *istate)
[20d50a1]1513{
[59fb782]1514 uintptr_t page = ALIGN_DOWN(address, PAGE_SIZE);
[908bb96]1515 int rc = AS_PF_FAULT;
1516
[1068f6a]1517 if (!THREAD)
[1dbc43f]1518 goto page_fault;
[a35b458]1519
[7af8c0e]1520 if (!AS)
[1dbc43f]1521 goto page_fault;
[a35b458]1522
[1068f6a]1523 mutex_lock(&AS->lock);
[da1bafb]1524 as_area_t *area = find_area_and_lock(AS, page);
[20d50a1]1525 if (!area) {
1526 /*
1527 * No area contained mapping for 'page'.
1528 * Signal page fault to low-level handler.
1529 */
[1068f6a]1530 mutex_unlock(&AS->lock);
[e3c762cd]1531 goto page_fault;
[20d50a1]1532 }
[a35b458]1533
[a9e8b39]1534 if (area->attributes & AS_AREA_ATTR_PARTIAL) {
1535 /*
1536 * The address space area is not fully initialized.
1537 * Avoid possible race by returning error.
1538 */
[1068f6a]1539 mutex_unlock(&area->lock);
1540 mutex_unlock(&AS->lock);
[da1bafb]1541 goto page_fault;
[a9e8b39]1542 }
[a35b458]1543
[da1bafb]1544 if ((!area->backend) || (!area->backend->page_fault)) {
[8182031]1545 /*
1546 * The address space area is not backed by any backend
1547 * or the backend cannot handle page faults.
1548 */
1549 mutex_unlock(&area->lock);
1550 mutex_unlock(&AS->lock);
[da1bafb]1551 goto page_fault;
[8182031]1552 }
[a35b458]1553
[2299914]1554 page_table_lock(AS, false);
[a35b458]1555
[2299914]1556 /*
[6745592]1557 * To avoid race condition between two page faults on the same address,
1558 * we need to make sure the mapping has not been already inserted.
[2299914]1559 */
[38dc82d]1560 pte_t pte;
1561 bool found = page_mapping_find(AS, page, false, &pte);
[560b81c]1562 if (found && PTE_PRESENT(&pte)) {
1563 if (((access == PF_ACCESS_READ) && PTE_READABLE(&pte)) ||
1564 (access == PF_ACCESS_WRITE && PTE_WRITABLE(&pte)) ||
1565 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(&pte))) {
1566 page_table_unlock(AS, false);
1567 mutex_unlock(&area->lock);
1568 mutex_unlock(&AS->lock);
1569 return AS_PF_OK;
[2299914]1570 }
1571 }
[a35b458]1572
[20d50a1]1573 /*
[8182031]1574 * Resort to the backend page fault handler.
[20d50a1]1575 */
[908bb96]1576 rc = area->backend->page_fault(area, page, access);
1577 if (rc != AS_PF_OK) {
[8182031]1578 page_table_unlock(AS, false);
1579 mutex_unlock(&area->lock);
1580 mutex_unlock(&AS->lock);
1581 goto page_fault;
1582 }
[a35b458]1583
[8182031]1584 page_table_unlock(AS, false);
[1068f6a]1585 mutex_unlock(&area->lock);
1586 mutex_unlock(&AS->lock);
[e3c762cd]1587 return AS_PF_OK;
[a35b458]1588
[e3c762cd]1589page_fault:
[5071f8a]1590 if (THREAD && THREAD->in_copy_from_uspace) {
[e3c762cd]1591 THREAD->in_copy_from_uspace = false;
[6f4495f5]1592 istate_set_retaddr(istate,
1593 (uintptr_t) &memcpy_from_uspace_failover_address);
[5071f8a]1594 } else if (THREAD && THREAD->in_copy_to_uspace) {
[e3c762cd]1595 THREAD->in_copy_to_uspace = false;
[6f4495f5]1596 istate_set_retaddr(istate,
1597 (uintptr_t) &memcpy_to_uspace_failover_address);
[908bb96]1598 } else if (rc == AS_PF_SILENT) {
1599 printf("Killing task %" PRIu64 " due to a "
1600 "failed late reservation request.\n", TASK->taskid);
1601 task_kill_self(true);
[e3c762cd]1602 } else {
[59fb782]1603 fault_if_from_uspace(istate, "Page fault: %p.", (void *) address);
1604 panic_memtrap(istate, access, address, NULL);
[e3c762cd]1605 }
[a35b458]1606
[e3c762cd]1607 return AS_PF_DEFER;
[20d50a1]1608}
1609
[7e4e532]1610/** Switch address spaces.
[1068f6a]1611 *
1612 * Note that this function cannot sleep as it is essentially a part of
[879585a3]1613 * scheduling. Sleeping here would lead to deadlock on wakeup. Another
1614 * thing which is forbidden in this context is locking the address space.
[20d50a1]1615 *
[7250d2c]1616 * When this function is entered, no spinlocks may be held.
[31d8e10]1617 *
[da1bafb]1618 * @param old Old address space or NULL.
1619 * @param new New address space.
1620 *
[20d50a1]1621 */
[80bcaed]1622void as_switch(as_t *old_as, as_t *new_as)
[20d50a1]1623{
[31d8e10]1624 DEADLOCK_PROBE_INIT(p_asidlock);
1625 preemption_disable();
[a35b458]1626
[31d8e10]1627retry:
1628 (void) interrupts_disable();
1629 if (!spinlock_trylock(&asidlock)) {
[da1bafb]1630 /*
[31d8e10]1631 * Avoid deadlock with TLB shootdown.
1632 * We can enable interrupts here because
1633 * preemption is disabled. We should not be
1634 * holding any other lock.
1635 */
1636 (void) interrupts_enable();
1637 DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
1638 goto retry;
1639 }
1640 preemption_enable();
[a35b458]1641
[7e4e532]1642 /*
1643 * First, take care of the old address space.
[da1bafb]1644 */
[80bcaed]1645 if (old_as) {
[63e27ef]1646 assert(old_as->cpu_refcount);
[a35b458]1647
[da1bafb]1648 if ((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
[7e4e532]1649 /*
1650 * The old address space is no longer active on
1651 * any processor. It can be appended to the
1652 * list of inactive address spaces with assigned
1653 * ASID.
1654 */
[63e27ef]1655 assert(old_as->asid != ASID_INVALID);
[a35b458]1656
[2057572]1657 list_append(&old_as->inactive_as_with_asid_link,
[55b77d9]1658 &inactive_as_with_asid_list);
[7e4e532]1659 }
[a35b458]1660
[57da95c]1661 /*
1662 * Perform architecture-specific tasks when the address space
1663 * is being removed from the CPU.
1664 */
[80bcaed]1665 as_deinstall_arch(old_as);
[7e4e532]1666 }
[a35b458]1667
[7e4e532]1668 /*
1669 * Second, prepare the new address space.
1670 */
[80bcaed]1671 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
[879585a3]1672 if (new_as->asid != ASID_INVALID)
[80bcaed]1673 list_remove(&new_as->inactive_as_with_asid_link);
[879585a3]1674 else
1675 new_as->asid = asid_get();
[7e4e532]1676 }
[a35b458]1677
[80bcaed]1678#ifdef AS_PAGE_TABLE
1679 SET_PTL0_ADDRESS(new_as->genarch.page_table);
1680#endif
[a35b458]1681
[20d50a1]1682 /*
1683 * Perform architecture-specific steps.
[4512d7e]1684 * (e.g. write ASID to hardware register etc.)
[20d50a1]1685 */
[80bcaed]1686 as_install_arch(new_as);
[a35b458]1687
[879585a3]1688 spinlock_unlock(&asidlock);
[a35b458]1689
[80bcaed]1690 AS = new_as;
[20d50a1]1691}
[6a3c9a7]1692
[df0103f7]1693/** Compute flags for virtual address translation subsytem.
1694 *
[da1bafb]1695 * @param area Address space area.
1696 *
1697 * @return Flags to be used in page_mapping_insert().
[df0103f7]1698 *
1699 */
[97bdb4a]1700NO_TRACE unsigned int as_area_get_flags(as_area_t *area)
[df0103f7]1701{
[63e27ef]1702 assert(mutex_locked(&area->lock));
[a35b458]1703
[da1bafb]1704 return area_flags_to_page_flags(area->flags);
[df0103f7]1705}
1706
[88cc71c0]1707/** Get key function for the @c as_t.as_areas ordered dictionary.
1708 *
1709 * @param odlink Link
1710 * @return Pointer to task ID cast as 'void *'
1711 */
1712static void *as_areas_getkey(odlink_t *odlink)
1713{
1714 as_area_t *area = odict_get_instance(odlink, as_area_t, las_areas);
1715 return (void *) &area->base;
1716}
1717
1718/** Key comparison function for the @c as_t.as_areas ordered dictionary.
1719 *
1720 * @param a Pointer to area A base
1721 * @param b Pointer to area B base
1722 * @return -1, 0, 1 iff base of A is lower than, equal to, higher than B
1723 */
1724static int as_areas_cmp(void *a, void *b)
1725{
1726 uintptr_t base_a = *(uintptr_t *)a;
1727 uintptr_t base_b = *(uintptr_t *)b;
1728
1729 if (base_a < base_b)
1730 return -1;
1731 else if (base_a == base_b)
1732 return 0;
1733 else
1734 return +1;
1735}
1736
[ef67bab]1737/** Create page table.
1738 *
[6745592]1739 * Depending on architecture, create either address space private or global page
1740 * table.
[ef67bab]1741 *
[da1bafb]1742 * @param flags Flags saying whether the page table is for the kernel
1743 * address space.
1744 *
1745 * @return First entry of the page table.
[ef67bab]1746 *
1747 */
[97bdb4a]1748NO_TRACE pte_t *page_table_create(unsigned int flags)
[ef67bab]1749{
[63e27ef]1750 assert(as_operations);
1751 assert(as_operations->page_table_create);
[a35b458]1752
[bd1deed]1753 return as_operations->page_table_create(flags);
[ef67bab]1754}
[d3e7ff4]1755
[482826d]1756/** Destroy page table.
1757 *
1758 * Destroy page table in architecture specific way.
1759 *
[da1bafb]1760 * @param page_table Physical address of PTL0.
1761 *
[482826d]1762 */
[97bdb4a]1763NO_TRACE void page_table_destroy(pte_t *page_table)
[482826d]1764{
[63e27ef]1765 assert(as_operations);
1766 assert(as_operations->page_table_destroy);
[a35b458]1767
[bd1deed]1768 as_operations->page_table_destroy(page_table);
[482826d]1769}
1770
[2299914]1771/** Lock page table.
1772 *
1773 * This function should be called before any page_mapping_insert(),
1774 * page_mapping_remove() and page_mapping_find().
[da1bafb]1775 *
[2299914]1776 * Locking order is such that address space areas must be locked
1777 * prior to this call. Address space can be locked prior to this
1778 * call in which case the lock argument is false.
1779 *
[da1bafb]1780 * @param as Address space.
1781 * @param lock If false, do not attempt to lock as->lock.
1782 *
[2299914]1783 */
[97bdb4a]1784NO_TRACE void page_table_lock(as_t *as, bool lock)
[2299914]1785{
[63e27ef]1786 assert(as_operations);
1787 assert(as_operations->page_table_lock);
[a35b458]1788
[2299914]1789 as_operations->page_table_lock(as, lock);
1790}
1791
1792/** Unlock page table.
1793 *
[da1bafb]1794 * @param as Address space.
1795 * @param unlock If false, do not attempt to unlock as->lock.
1796 *
[2299914]1797 */
[97bdb4a]1798NO_TRACE void page_table_unlock(as_t *as, bool unlock)
[2299914]1799{
[63e27ef]1800 assert(as_operations);
1801 assert(as_operations->page_table_unlock);
[a35b458]1802
[2299914]1803 as_operations->page_table_unlock(as, unlock);
1804}
1805
[ada559c]1806/** Test whether page tables are locked.
1807 *
[e3ee9b9]1808 * @param as Address space where the page tables belong.
[ada559c]1809 *
[e3ee9b9]1810 * @return True if the page tables belonging to the address soace
1811 * are locked, otherwise false.
[ada559c]1812 */
[97bdb4a]1813NO_TRACE bool page_table_locked(as_t *as)
[ada559c]1814{
[63e27ef]1815 assert(as_operations);
1816 assert(as_operations->page_table_locked);
[ada559c]1817
1818 return as_operations->page_table_locked(as);
1819}
1820
[b878df3]1821/** Return size of the address space area with given base.
1822 *
[1d432f9]1823 * @param base Arbitrary address inside the address space area.
[da1bafb]1824 *
1825 * @return Size of the address space area in bytes or zero if it
1826 * does not exist.
[b878df3]1827 *
1828 */
1829size_t as_area_get_size(uintptr_t base)
[7c23af9]1830{
1831 size_t size;
[a35b458]1832
[1d432f9]1833 page_table_lock(AS, true);
[da1bafb]1834 as_area_t *src_area = find_area_and_lock(AS, base);
[a35b458]1835
[6745592]1836 if (src_area) {
[b6f3e7e]1837 size = P2SZ(src_area->pages);
[1068f6a]1838 mutex_unlock(&src_area->lock);
[da1bafb]1839 } else
[7c23af9]1840 size = 0;
[a35b458]1841
[1d432f9]1842 page_table_unlock(AS, true);
[7c23af9]1843 return size;
1844}
1845
[2fc3b2d]1846/** Initialize used space map.
[25bf215]1847 *
[2fc3b2d]1848 * @param used_space Used space map
1849 */
1850static void used_space_initialize(used_space_t *used_space)
1851{
1852 odict_initialize(&used_space->ivals, used_space_getkey, used_space_cmp);
1853 used_space->pages = 0;
1854}
1855
1856/** Finalize used space map.
[da1bafb]1857 *
[2fc3b2d]1858 * @param used_space Used space map
1859 */
1860static void used_space_finalize(used_space_t *used_space)
1861{
1862 assert(odict_empty(&used_space->ivals));
1863 odict_finalize(&used_space->ivals);
1864}
1865
1866/** Get first interval of used space.
[25bf215]1867 *
[2fc3b2d]1868 * @param used_space Used space map
1869 * @return First interval or @c NULL if there are none
[25bf215]1870 */
[2fc3b2d]1871used_space_ival_t *used_space_first(used_space_t *used_space)
[25bf215]1872{
[2fc3b2d]1873 odlink_t *odlink = odict_first(&used_space->ivals);
[a35b458]1874
[2fc3b2d]1875 if (odlink == NULL)
1876 return NULL;
[566da7f8]1877
[2fc3b2d]1878 return odict_get_instance(odlink, used_space_ival_t, lused_space);
1879}
[a35b458]1880
[2fc3b2d]1881/** Get next interval of used space.
1882 *
1883 * @param cur Current interval
1884 * @return Next interval or @c NULL if there are none
1885 */
1886used_space_ival_t *used_space_next(used_space_ival_t *cur)
1887{
1888 odlink_t *odlink = odict_next(&cur->lused_space,
1889 &cur->used_space->ivals);
[a35b458]1890
[2fc3b2d]1891 if (odlink == NULL)
1892 return NULL;
[a35b458]1893
[2fc3b2d]1894 return odict_get_instance(odlink, used_space_ival_t, lused_space);
1895}
[a35b458]1896
[2fc3b2d]1897/** Get last interval of used space.
1898 *
1899 * @param used_space Used space map
1900 * @return First interval or @c NULL if there are none
1901 */
1902static used_space_ival_t *used_space_last(used_space_t *used_space)
1903{
1904 odlink_t *odlink = odict_last(&used_space->ivals);
[a35b458]1905
[2fc3b2d]1906 if (odlink == NULL)
1907 return NULL;
[a35b458]1908
[2fc3b2d]1909 return odict_get_instance(odlink, used_space_ival_t, lused_space);
1910}
[a35b458]1911
[2fc3b2d]1912/** Find the first interval that contains addresses greater than or equal to
1913 * @a ptr.
1914 *
1915 * @param used_space Used space map
1916 * @param ptr Virtual address
1917 *
1918 * @return Used space interval or @c NULL if none matches
1919 */
1920used_space_ival_t *used_space_find_gteq(used_space_t *used_space, uintptr_t ptr)
1921{
1922 odlink_t *odlink;
1923 used_space_ival_t *ival;
[a35b458]1924
[2fc3b2d]1925 /* Find last interval to start at address less than @a ptr */
1926 odlink = odict_find_lt(&used_space->ivals, &ptr, NULL);
1927 if (odlink != NULL) {
1928 ival = odict_get_instance(odlink, used_space_ival_t,
1929 lused_space);
[a35b458]1930
[2fc3b2d]1931 /* If the interval extends above @a ptr, return it */
1932 if (ival->page + P2SZ(ival->count) > ptr)
1933 return ival;
[a35b458]1934
[25bf215]1935 /*
[2fc3b2d]1936 * Otherwise, if a next interval exists, it must match
1937 * the criteria.
1938 */
1939 odlink = odict_next(&ival->lused_space, &used_space->ivals);
1940 } else {
1941 /*
1942 * No interval with lower base address, so if there is any
1943 * interval at all, it must match the criteria
[25bf215]1944 */
[2fc3b2d]1945 odlink = odict_first(&used_space->ivals);
1946 }
[a35b458]1947
[2fc3b2d]1948 if (odlink != NULL) {
1949 ival = odict_get_instance(odlink, used_space_ival_t,
1950 lused_space);
1951 return ival;
[25bf215]1952 }
[a35b458]1953
[2fc3b2d]1954 return NULL;
1955}
[a35b458]1956
[2fc3b2d]1957/** Get key function for used space ordered dictionary.
1958 *
1959 * The key is the virtual address of the first page
1960 *
1961 * @param odlink Ordered dictionary link (used_space_ival_t.lused_space)
1962 * @return Pointer to virtual address of first page cast as @c void *.
1963 */
1964static void *used_space_getkey(odlink_t *odlink)
1965{
1966 used_space_ival_t *ival = odict_get_instance(odlink, used_space_ival_t,
1967 lused_space);
1968 return (void *) &ival->page;
1969}
[a35b458]1970
[2fc3b2d]1971/** Compare function for used space ordered dictionary.
1972 *
1973 * @param a Pointer to virtual address of first page cast as @c void *
1974 * @param b Pointer to virtual address of first page cast as @c void *
1975 * @return Less than zero, zero, greater than zero if virtual address @a a
1976 * is less than, equal to, greater than virtual address b, respectively.
1977 */
1978static int used_space_cmp(void *a, void *b)
1979{
1980 uintptr_t va = *(uintptr_t *) a;
1981 uintptr_t vb = *(uintptr_t *) b;
1982
1983 if (va < vb)
1984 return -1;
1985 else if (va == vb)
1986 return 0;
1987 else
1988 return +1;
1989}
1990
1991/** Remove used space interval.
1992 *
1993 * @param ival Used space interval
1994 */
1995static void used_space_remove_ival(used_space_ival_t *ival)
1996{
1997 ival->used_space->pages -= ival->count;
1998 odict_remove(&ival->lused_space);
1999 slab_free(used_space_ival_cache, ival);
2000}
[a35b458]2001
[2fc3b2d]2002/** Shorten used space interval.
2003 *
2004 * @param ival Used space interval
2005 * @param count New number of pages in the interval
2006 */
2007static void used_space_shorten_ival(used_space_ival_t *ival, size_t count)
2008{
2009 assert(count > 0);
2010 assert(count < ival->count);
[a35b458]2011
[2fc3b2d]2012 ival->used_space->pages -= ival->count - count;
2013 ival->count = count;
[25bf215]2014}
2015
[2fc3b2d]2016/** Mark portion of address space area as used.
[25bf215]2017 *
2018 * The address space area must be already locked.
2019 *
[2fc3b2d]2020 * @param used_space Used space map
2021 * @param page First page to be marked.
[da1bafb]2022 * @param count Number of page to be marked.
2023 *
[fc47885]2024 * @return False on failure or true on success.
[25bf215]2025 *
2026 */
[2fc3b2d]2027bool used_space_insert(used_space_t *used_space, uintptr_t page, size_t count)
[25bf215]2028{
[2fc3b2d]2029 used_space_ival_t *a;
2030 used_space_ival_t *b;
2031 bool adj_a;
2032 bool adj_b;
2033 odlink_t *odlink;
2034 used_space_ival_t *ival;
2035
[63e27ef]2036 assert(IS_ALIGNED(page, PAGE_SIZE));
2037 assert(count);
[a35b458]2038
[2fc3b2d]2039 /* Interval to the left */
2040 odlink = odict_find_lt(&used_space->ivals, &page, NULL);
2041 a = (odlink != NULL) ?
2042 odict_get_instance(odlink, used_space_ival_t, lused_space) :
2043 NULL;
[a35b458]2044
[2fc3b2d]2045 /* Interval to the right */
2046 b = (a != NULL) ? used_space_next(a) :
2047 used_space_first(used_space);
[a35b458]2048
[2fc3b2d]2049 /* Check for conflict with left interval */
2050 if (a != NULL && overlaps(a->page, P2SZ(a->count), page, P2SZ(count)))
[fc47885]2051 return false;
[a35b458]2052
[2fc3b2d]2053 /* Check for conflict with right interval */
2054 if (b != NULL && overlaps(page, P2SZ(count), b->page, P2SZ(b->count)))
[fc47885]2055 return false;
[a35b458]2056
[2fc3b2d]2057 /* Check if A is adjacent to the new interval */
2058 adj_a = (a != NULL) && (a->page + P2SZ(a->count) == page);
2059 /* Check if the new interval is adjacent to B*/
2060 adj_b = (b != NULL) && page + P2SZ(count) == b->page;
2061
2062 if (adj_a && adj_b) {
2063 /* Fuse into a single interval */
2064 a->count += count + b->count;
2065 used_space_remove_ival(b);
2066 } else if (adj_a) {
2067 /* Append to A */
2068 a->count += count;
2069 } else if (adj_b) {
2070 /* Prepend to B */
2071 b->page = page;
2072 b->count += count;
2073 } else {
2074 /* Create new interval */
2075 ival = slab_alloc(used_space_ival_cache, 0);
2076 ival->used_space = used_space;
2077 odlink_initialize(&ival->lused_space);
2078 ival->page = page;
2079 ival->count = count;
2080
2081 odict_insert(&ival->lused_space, &used_space->ivals,
2082 NULL);
[25bf215]2083 }
[a35b458]2084
[2fc3b2d]2085 used_space->pages += count;
[fc47885]2086 return true;
[25bf215]2087}
2088
[df0103f7]2089/*
2090 * Address space related syscalls.
2091 */
2092
[fbcdeb8]2093sysarg_t sys_as_area_create(uintptr_t base, size_t size, unsigned int flags,
[ae6021d]2094 uintptr_t bound, as_area_pager_info_t *pager_info)
[df0103f7]2095{
[fbcdeb8]2096 uintptr_t virt = base;
[75b139f]2097 mem_backend_t *backend;
2098 mem_backend_data_t backend_data;
2099
[ae6021d]2100 if (pager_info == AS_AREA_UNPAGED)
[75b139f]2101 backend = &anon_backend;
2102 else {
2103 backend = &user_backend;
[ae6021d]2104 if (copy_from_uspace(&backend_data.pager_info, pager_info,
[3bacee1]2105 sizeof(as_area_pager_info_t)) != EOK) {
[ae6021d]2106 return (sysarg_t) AS_MAP_FAILED;
2107 }
[75b139f]2108 }
[c4c2406]2109 as_area_t *area = as_area_create(AS, flags, size,
[75b139f]2110 AS_AREA_ATTR_NONE, backend, &backend_data, &virt, bound);
[fbcdeb8]2111 if (area == NULL)
[f2c3fed]2112 return (sysarg_t) AS_MAP_FAILED;
[a35b458]2113
[fbcdeb8]2114 return (sysarg_t) virt;
[df0103f7]2115}
2116
[b7fd2a0]2117sys_errno_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags)
[df0103f7]2118{
[b7fd2a0]2119 return (sys_errno_t) as_area_resize(AS, address, size, 0);
[7242a78e]2120}
2121
[b7fd2a0]2122sys_errno_t sys_as_area_change_flags(uintptr_t address, unsigned int flags)
[c98e6ee]2123{
[b7fd2a0]2124 return (sys_errno_t) as_area_change_flags(AS, flags, address);
[c98e6ee]2125}
2126
[3b3fcf36]2127sys_errno_t sys_as_area_get_info(uintptr_t address, as_area_info_t *dest)
2128{
2129 as_area_t *area;
2130
2131 mutex_lock(&AS->lock);
2132 area = find_area_and_lock(AS, address);
2133 if (area == NULL) {
2134 mutex_unlock(&AS->lock);
2135 return ENOENT;
2136 }
2137
2138 dest->start_addr = area->base;
2139 dest->size = P2SZ(area->pages);
2140 dest->flags = area->flags;
2141
2142 mutex_unlock(&area->lock);
2143 mutex_unlock(&AS->lock);
2144 return EOK;
2145}
2146
[b7fd2a0]2147sys_errno_t sys_as_area_destroy(uintptr_t address)
[7242a78e]2148{
[b7fd2a0]2149 return (sys_errno_t) as_area_destroy(AS, address);
[df0103f7]2150}
[b45c443]2151
[c477c80]2152/** Get list of address space areas.
[336db295]2153 *
[da1bafb]2154 * @param as Address space.
2155 * @param obuf Place to save pointer to returned buffer.
2156 * @param osize Place to save size of returned buffer.
2157 *
[336db295]2158 */
[b389f95]2159as_area_info_t *as_get_area_info(as_t *as, size_t *osize)
[336db295]2160{
2161 mutex_lock(&as->lock);
[a35b458]2162
[88cc71c0]2163 /* Count number of areas. */
2164 size_t area_cnt = odict_count(&as->as_areas);
[a35b458]2165
[da1bafb]2166 size_t isize = area_cnt * sizeof(as_area_info_t);
[b389f95]2167 as_area_info_t *info = malloc(isize);
2168 if (!info) {
2169 mutex_unlock(&as->lock);
2170 return NULL;
2171 }
[a35b458]2172
[88cc71c0]2173 /* Record area data. */
[a35b458]2174
[da1bafb]2175 size_t area_idx = 0;
[a35b458]2176
[88cc71c0]2177 as_area_t *area = as_area_first(as);
2178 while (area != NULL) {
2179 assert(area_idx < area_cnt);
2180 mutex_lock(&area->lock);
[a35b458]2181
[88cc71c0]2182 info[area_idx].start_addr = area->base;
2183 info[area_idx].size = P2SZ(area->pages);
2184 info[area_idx].flags = area->flags;
2185 ++area_idx;
[a35b458]2186
[88cc71c0]2187 mutex_unlock(&area->lock);
2188 area = as_area_next(area);
[336db295]2189 }
[a35b458]2190
[336db295]2191 mutex_unlock(&as->lock);
[a35b458]2192
[336db295]2193 *osize = isize;
[b389f95]2194 return info;
[336db295]2195}
2196
[64c2ad5]2197/** Print out information about address space.
2198 *
[da1bafb]2199 * @param as Address space.
2200 *
[64c2ad5]2201 */
2202void as_print(as_t *as)
2203{
2204 mutex_lock(&as->lock);
[a35b458]2205
[0b37882]2206 /* Print out info about address space areas */
[88cc71c0]2207 as_area_t *area = as_area_first(as);
2208 while (area != NULL) {
2209 mutex_lock(&area->lock);
2210 printf("as_area: %p, base=%p, pages=%zu"
2211 " (%p - %p)\n", area, (void *) area->base,
2212 area->pages, (void *) area->base,
2213 (void *) (area->base + P2SZ(area->pages)));
2214 mutex_unlock(&area->lock);
[a35b458]2215
[88cc71c0]2216 area = as_area_next(area);
[64c2ad5]2217 }
[a35b458]2218
[64c2ad5]2219 mutex_unlock(&as->lock);
2220}
2221
[cc73a8a1]2222/** @}
[b45c443]2223 */
Note: See TracBrowser for help on using the repository browser.