source: mainline/kernel/generic/src/mm/as.c@ 7be8d4d

Last change on this file since 7be8d4d was 7be8d4d, checked in by Jiri Svoboda <jiri@…>, 7 years ago

Replace B+tree with ordered dict. for used space

Replace the use of B+tree with ordered dictionary for used space,
adding a little bit more abstraction around used space tracking.
This allows performing TLB shootdown while shrinking an area
in a single sequence. A generic used_space_remove() is no longer
needed.

  • Property mode set to 100644
File size: 54.2 KB
RevLine 
[20d50a1]1/*
[0321109]2 * Copyright (c) 2010 Jakub Jermar
[88cc71c0]3 * Copyright (c) 2018 Jiri Svoboda
[20d50a1]4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
[174156fd]30/** @addtogroup kernel_generic_mm
[b45c443]31 * @{
32 */
33
[9179d0a]34/**
[b45c443]35 * @file
[da1bafb]36 * @brief Address space related functions.
[9179d0a]37 *
[20d50a1]38 * This file contains address space manipulation functions.
39 * Roughly speaking, this is a higher-level client of
40 * Virtual Address Translation (VAT) subsystem.
[9179d0a]41 *
42 * Functionality provided by this file allows one to
[cc73a8a1]43 * create address spaces and create, resize and share
[9179d0a]44 * address space areas.
45 *
46 * @see page.c
47 *
[20d50a1]48 */
49
50#include <mm/as.h>
[ef67bab]51#include <arch/mm/as.h>
[20d50a1]52#include <mm/page.h>
53#include <mm/frame.h>
[085d973]54#include <mm/slab.h>
[20d50a1]55#include <mm/tlb.h>
56#include <arch/mm/page.h>
57#include <genarch/mm/page_pt.h>
[2802767]58#include <genarch/mm/page_ht.h>
[4512d7e]59#include <mm/asid.h>
[20d50a1]60#include <arch/mm/asid.h>
[31d8e10]61#include <preemption.h>
[20d50a1]62#include <synch/spinlock.h>
[1068f6a]63#include <synch/mutex.h>
[5c9a08b]64#include <adt/list.h>
[df0103f7]65#include <proc/task.h>
[e3c762cd]66#include <proc/thread.h>
[20d50a1]67#include <arch/asm.h>
[df0103f7]68#include <panic.h>
[63e27ef]69#include <assert.h>
[bab75df6]70#include <stdio.h>
[44a7ee5]71#include <mem.h>
[5a7d9d1]72#include <macros.h>
[0b37882]73#include <bitops.h>
[20d50a1]74#include <arch.h>
[df0103f7]75#include <errno.h>
76#include <config.h>
[25bf215]77#include <align.h>
[d99c1d2]78#include <typedefs.h>
[e3c762cd]79#include <syscall/copy.h>
80#include <arch/interrupt.h>
[1dbc43f]81#include <interrupt.h>
[aafed15]82#include <stdlib.h>
[20d50a1]83
[cc73a8a1]84/**
85 * Each architecture decides what functions will be used to carry out
86 * address space operations such as creating or locking page tables.
87 */
[ef67bab]88as_operations_t *as_operations = NULL;
[20d50a1]89
[de0af3a]90/** Cache for as_t objects */
[82d515e9]91static slab_cache_t *as_cache;
[57da95c]92
[de0af3a]93/** Cache for as_page_mapping_t objects */
94static slab_cache_t *as_page_mapping_cache;
95
[7be8d4d]96/** Cache for used_space_ival_t objects */
97static slab_cache_t *used_space_ival_cache;
98
[fc47885]99/** ASID subsystem lock.
100 *
101 * This lock protects:
[55b77d9]102 * - inactive_as_with_asid_list
[879585a3]103 * - as->asid for each as of the as_t type
104 * - asids_allocated counter
[da1bafb]105 *
[6f4495f5]106 */
[879585a3]107SPINLOCK_INITIALIZE(asidlock);
[7e4e532]108
109/**
[fc47885]110 * Inactive address spaces (on all processors)
111 * that have valid ASID.
[7e4e532]112 */
[55b77d9]113LIST_INITIALIZE(inactive_as_with_asid_list);
[7e4e532]114
[071a8ae6]115/** Kernel address space. */
116as_t *AS_KERNEL = NULL;
117
[88cc71c0]118static void *as_areas_getkey(odlink_t *);
119static int as_areas_cmp(void *, void *);
120
[7be8d4d]121static void used_space_initialize(used_space_t *);
122static void used_space_finalize(used_space_t *);
123static void *used_space_getkey(odlink_t *);
124static int used_space_cmp(void *, void *);
125static used_space_ival_t *used_space_last(used_space_t *);
126static void used_space_remove_ival(used_space_ival_t *);
127static void used_space_shorten_ival(used_space_ival_t *, size_t);
128
[b7fd2a0]129NO_TRACE static errno_t as_constructor(void *obj, unsigned int flags)
[29b2bbf]130{
131 as_t *as = (as_t *) obj;
[a35b458]132
[29b2bbf]133 link_initialize(&as->inactive_as_with_asid_link);
[7f341820]134 mutex_initialize(&as->lock, MUTEX_PASSIVE);
[a35b458]135
[fc47885]136 return as_constructor_arch(as, flags);
[29b2bbf]137}
138
[7a0359b]139NO_TRACE static size_t as_destructor(void *obj)
[29b2bbf]140{
[fc47885]141 return as_destructor_arch((as_t *) obj);
[29b2bbf]142}
143
[ef67bab]144/** Initialize address space subsystem. */
145void as_init(void)
146{
147 as_arch_init();
[a35b458]148
[82d515e9]149 as_cache = slab_cache_create("as_t", sizeof(as_t), 0,
[6f4495f5]150 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
[a35b458]151
[de0af3a]152 as_page_mapping_cache = slab_cache_create("as_page_mapping_t",
153 sizeof(as_page_mapping_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED);
154
[7be8d4d]155 used_space_ival_cache = slab_cache_create("used_space_ival_t",
156 sizeof(used_space_ival_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED);
157
[8e1ea655]158 AS_KERNEL = as_create(FLAG_AS_KERNEL);
[125e944]159 if (!AS_KERNEL)
[f651e80]160 panic("Cannot create kernel address space.");
[ef67bab]161}
162
[071a8ae6]163/** Create address space.
164 *
[da1bafb]165 * @param flags Flags that influence the way in wich the address
166 * space is created.
167 *
[071a8ae6]168 */
[da1bafb]169as_t *as_create(unsigned int flags)
[20d50a1]170{
[abf6c01]171 as_t *as = (as_t *) slab_alloc(as_cache, FRAME_ATOMIC);
172 if (!as)
173 return NULL;
174
[29b2bbf]175 (void) as_create_arch(as, 0);
[a35b458]176
[88cc71c0]177 odict_initialize(&as->as_areas, as_areas_getkey, as_areas_cmp);
[a35b458]178
[bb68433]179 if (flags & FLAG_AS_KERNEL)
180 as->asid = ASID_KERNEL;
181 else
182 as->asid = ASID_INVALID;
[a35b458]183
[78de83de]184 refcount_init(&as->refcount);
[47800e0]185 as->cpu_refcount = 0;
[a35b458]186
[b3f8fb7]187#ifdef AS_PAGE_TABLE
[80bcaed]188 as->genarch.page_table = page_table_create(flags);
[b3f8fb7]189#else
190 page_table_create(flags);
191#endif
[a35b458]192
[20d50a1]193 return as;
194}
195
[482826d]196/** Destroy adress space.
197 *
[6f4495f5]198 * When there are no tasks referencing this address space (i.e. its refcount is
199 * zero), the address space can be destroyed.
[31d8e10]200 *
201 * We know that we don't hold any spinlock.
[6745592]202 *
[da1bafb]203 * @param as Address space to be destroyed.
204 *
[482826d]205 */
[ca21f1e2]206static void as_destroy(as_t *as)
[5be1923]207{
[31d8e10]208 DEADLOCK_PROBE_INIT(p_asidlock);
[a35b458]209
[63e27ef]210 assert(as != AS);
[78de83de]211 assert(refcount_unique(&as->refcount));
[a35b458]212
[482826d]213 /*
[663bb537]214 * Since there is no reference to this address space, it is safe not to
215 * lock its mutex.
[482826d]216 */
[a35b458]217
[31d8e10]218 /*
219 * We need to avoid deadlock between TLB shootdown and asidlock.
220 * We therefore try to take asid conditionally and if we don't succeed,
221 * we enable interrupts and try again. This is done while preemption is
222 * disabled to prevent nested context switches. We also depend on the
223 * fact that so far no spinlocks are held.
224 */
225 preemption_disable();
[da1bafb]226 ipl_t ipl = interrupts_read();
[a35b458]227
[31d8e10]228retry:
229 interrupts_disable();
230 if (!spinlock_trylock(&asidlock)) {
231 interrupts_enable();
232 DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
233 goto retry;
234 }
[a35b458]235
[da1bafb]236 /* Interrupts disabled, enable preemption */
237 preemption_enable();
[a35b458]238
[da1bafb]239 if ((as->asid != ASID_INVALID) && (as != AS_KERNEL)) {
[1624aae]240 if (as->cpu_refcount == 0)
[31e8ddd]241 list_remove(&as->inactive_as_with_asid_link);
[a35b458]242
[482826d]243 asid_put(as->asid);
244 }
[a35b458]245
[879585a3]246 spinlock_unlock(&asidlock);
[fdaad75d]247 interrupts_restore(ipl);
[a35b458]248
[482826d]249 /*
250 * Destroy address space areas of the address space.
[88cc71c0]251 * Need to start from the beginning each time since we are destroying
252 * the areas.
[da1bafb]253 */
[88cc71c0]254 as_area_t *area = as_area_first(as);
255 while (area != NULL) {
256 /*
257 * XXX We already have as_area_t, but as_area_destroy will
258 * have to search for it. This could be made faster.
259 */
260 as_area_destroy(as, area->base);
261 area = as_area_first(as);
[482826d]262 }
[a35b458]263
[88cc71c0]264 odict_finalize(&as->as_areas);
[a35b458]265
[b3f8fb7]266#ifdef AS_PAGE_TABLE
[80bcaed]267 page_table_destroy(as->genarch.page_table);
[b3f8fb7]268#else
269 page_table_destroy(NULL);
270#endif
[a35b458]271
[82d515e9]272 slab_free(as_cache, as);
[5be1923]273}
274
[0321109]275/** Hold a reference to an address space.
276 *
[fc47885]277 * Holding a reference to an address space prevents destruction
278 * of that address space.
[0321109]279 *
[da1bafb]280 * @param as Address space to be held.
281 *
[0321109]282 */
[7a0359b]283NO_TRACE void as_hold(as_t *as)
[0321109]284{
[78de83de]285 refcount_up(&as->refcount);
[0321109]286}
287
288/** Release a reference to an address space.
289 *
[fc47885]290 * The last one to release a reference to an address space
291 * destroys the address space.
[0321109]292 *
[78de83de]293 * @param as Address space to be released.
[da1bafb]294 *
[0321109]295 */
[7a0359b]296NO_TRACE void as_release(as_t *as)
[0321109]297{
[78de83de]298 if (refcount_down(&as->refcount))
[0321109]299 as_destroy(as);
300}
301
[88cc71c0]302/** Return first address space area.
303 *
304 * @param as Address space
305 * @return First area in @a as (i.e. area with the lowest base address)
306 * or @c NULL if there is none
307 */
308as_area_t *as_area_first(as_t *as)
309{
310 odlink_t *odlink = odict_first(&as->as_areas);
311 if (odlink == NULL)
312 return NULL;
313
314 return odict_get_instance(odlink, as_area_t, las_areas);
315}
316
317/** Return next address space area.
318 *
319 * @param cur Current area
320 * @return Next area in the same address space or @c NULL if @a cur is the
321 * last area.
322 */
323as_area_t *as_area_next(as_area_t *cur)
324{
325 odlink_t *odlink = odict_next(&cur->las_areas, &cur->as->as_areas);
326 if (odlink == NULL)
327 return NULL;
328
329 return odict_get_instance(odlink, as_area_t, las_areas);
330}
331
332/** Determine if area with specified parameters would conflict with
333 * a specific existing address space area.
334 *
335 * @param addr Starting virtual address of the area being tested.
336 * @param count Number of pages in the area being tested.
337 * @param guarded True if the area being tested is protected by guard pages.
338 * @param area Area against which we are testing.
339 *
340 * @return True if the two areas conflict, false otherwise.
341 */
342NO_TRACE static bool area_is_conflicting(uintptr_t addr,
343 size_t count, bool guarded, as_area_t *area)
344{
345 assert((addr % PAGE_SIZE) == 0);
346
347 size_t gsize = P2SZ(count);
[6785b88b]348 size_t agsize = P2SZ(area->pages);
[cd1ecf11]349
350 /*
351 * A guarded area has one guard page before, one page after.
352 * What we do here is: if either area is guarded, we add
353 * PAGE_SIZE to the size of both areas. That guarantees
354 * they will be spaced at least one page apart.
355 */
356 if (guarded || (area->flags & AS_AREA_GUARD) != 0) {
357 /* Add guard page size unless area is at the end of VA domain */
358 if (!overflows(addr, P2SZ(count)))
359 gsize += PAGE_SIZE;
360
361 /* Add guard page size unless area is at the end of VA domain */
362 if (!overflows(area->base, P2SZ(area->pages)))
363 agsize += PAGE_SIZE;
364 }
[88cc71c0]365
366 return overlaps(addr, gsize, area->base, agsize);
367
368}
369
[e3ee9b9]370/** Check area conflicts with other areas.
371 *
[35a3d950]372 * @param as Address space.
373 * @param addr Starting virtual address of the area being tested.
374 * @param count Number of pages in the area being tested.
375 * @param guarded True if the area being tested is protected by guard pages.
[88cc71c0]376 * @param avoid Do not touch this area. I.e. this area is not considered
377 * as presenting a conflict.
[e3ee9b9]378 *
379 * @return True if there is no conflict, false otherwise.
380 *
381 */
[0b37882]382NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr,
[35a3d950]383 size_t count, bool guarded, as_area_t *avoid)
[e3ee9b9]384{
[63e27ef]385 assert((addr % PAGE_SIZE) == 0);
386 assert(mutex_locked(&as->lock));
[94795812]387
388 /*
389 * If the addition of the supposed area address and size overflows,
390 * report conflict.
391 */
392 if (overflows_into_positive(addr, P2SZ(count)))
393 return false;
[a35b458]394
[e3ee9b9]395 /*
396 * We don't want any area to have conflicts with NULL page.
397 */
[b6f3e7e]398 if (overlaps(addr, P2SZ(count), (uintptr_t) NULL, PAGE_SIZE))
[e3ee9b9]399 return false;
[35a3d950]400
[e3ee9b9]401 /*
[88cc71c0]402 * To determine if we overlap with another area, we just need
403 * to look at overlap with the last area with base address <=
404 * to ours and on the first area with base address > than ours.
405 *
406 * First find last area with <= base address.
[e3ee9b9]407 */
[88cc71c0]408 odlink_t *odlink = odict_find_leq(&as->as_areas, &addr, NULL);
409 if (odlink != NULL) {
410 as_area_t *area = odict_get_instance(odlink, as_area_t,
411 las_areas);
[a35b458]412
[0b37882]413 if (area != avoid) {
414 mutex_lock(&area->lock);
[88cc71c0]415 if (area_is_conflicting(addr, count, guarded, area)) {
[0b37882]416 mutex_unlock(&area->lock);
417 return false;
418 }
[a35b458]419
[e3ee9b9]420 mutex_unlock(&area->lock);
421 }
[88cc71c0]422
423 /* Next area */
424 odlink = odict_next(odlink, &as->as_areas);
[e3ee9b9]425 }
[a35b458]426
[d9d0088]427 /*
428 * Next area, if any, is the first with base > than our base address.
429 * If there was no area with <= base, we need to look at the first area.
430 */
431 if (odlink == NULL)
432 odlink = odict_first(&as->as_areas);
433
[88cc71c0]434 if (odlink != NULL) {
435 as_area_t *area = odict_get_instance(odlink, as_area_t,
436 las_areas);
[a35b458]437
[0b37882]438 if (area != avoid) {
439 mutex_lock(&area->lock);
[88cc71c0]440 if (area_is_conflicting(addr, count, guarded, area)) {
[0b37882]441 mutex_unlock(&area->lock);
442 return false;
443 }
[a35b458]444
[e3ee9b9]445 mutex_unlock(&area->lock);
446 }
447 }
[a35b458]448
[e3ee9b9]449 /*
450 * So far, the area does not conflict with other areas.
[57355a40]451 * Check if it is contained in the user address space.
[e3ee9b9]452 */
453 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
[57355a40]454 return iswithin(USER_ADDRESS_SPACE_START,
455 (USER_ADDRESS_SPACE_END - USER_ADDRESS_SPACE_START) + 1,
456 addr, P2SZ(count));
[e3ee9b9]457 }
[a35b458]458
[e3ee9b9]459 return true;
460}
461
[fbcdeb8]462/** Return pointer to unmapped address space area
463 *
464 * The address space must be already locked when calling
465 * this function.
466 *
[35a3d950]467 * @param as Address space.
468 * @param bound Lowest address bound.
469 * @param size Requested size of the allocation.
470 * @param guarded True if the allocation must be protected by guard pages.
[fbcdeb8]471 *
472 * @return Address of the beginning of unmapped address space area.
473 * @return -1 if no suitable address space area was found.
474 *
475 */
476NO_TRACE static uintptr_t as_get_unmapped_area(as_t *as, uintptr_t bound,
[35a3d950]477 size_t size, bool guarded)
[fbcdeb8]478{
[63e27ef]479 assert(mutex_locked(&as->lock));
[a35b458]480
[fbcdeb8]481 if (size == 0)
482 return (uintptr_t) -1;
[a35b458]483
[fbcdeb8]484 /*
485 * Make sure we allocate from page-aligned
486 * address. Check for possible overflow in
487 * each step.
488 */
[a35b458]489
[fbcdeb8]490 size_t pages = SIZE2FRAMES(size);
[a35b458]491
[fbcdeb8]492 /*
493 * Find the lowest unmapped address aligned on the size
494 * boundary, not smaller than bound and of the required size.
495 */
[a35b458]496
[fbcdeb8]497 /* First check the bound address itself */
498 uintptr_t addr = ALIGN_UP(bound, PAGE_SIZE);
[35a3d950]499 if (addr >= bound) {
500 if (guarded) {
[7c3fb9b]501 /*
502 * Leave an unmapped page between the lower
[35a3d950]503 * bound and the area's start address.
504 */
505 addr += P2SZ(1);
506 }
507
508 if (check_area_conflicts(as, addr, pages, guarded, NULL))
509 return addr;
510 }
[a35b458]511
[fbcdeb8]512 /* Eventually check the addresses behind each area */
[88cc71c0]513 as_area_t *area = as_area_first(as);
514 while (area != NULL) {
515 mutex_lock(&area->lock);
[a35b458]516
[d9d0088]517 addr = area->base + P2SZ(area->pages);
[a35b458]518
[88cc71c0]519 if (guarded || area->flags & AS_AREA_GUARD) {
520 /*
521 * We must leave an unmapped page
522 * between the two areas.
523 */
524 addr += P2SZ(1);
525 }
[35a3d950]526
[88cc71c0]527 bool avail =
528 ((addr >= bound) && (addr >= area->base) &&
529 (check_area_conflicts(as, addr, pages, guarded, area)));
[35a3d950]530
[88cc71c0]531 mutex_unlock(&area->lock);
[a35b458]532
[88cc71c0]533 if (avail)
534 return addr;
[a35b458]535
[88cc71c0]536 area = as_area_next(area);
[fbcdeb8]537 }
[a35b458]538
[fbcdeb8]539 /* No suitable address space area found */
540 return (uintptr_t) -1;
541}
542
[de0af3a]543/** Get key function for pagemap ordered dictionary.
544 *
545 * The key is the virtual address of the page (as_page_mapping_t.vaddr)
546 *
547 * @param odlink Link to as_pagemap_t.map ordered dictionary
548 * @return Pointer to virtual address cast as @c void *
549 */
550static void *as_pagemap_getkey(odlink_t *odlink)
551{
552 as_page_mapping_t *mapping;
553
554 mapping = odict_get_instance(odlink, as_page_mapping_t, lpagemap);
555 return (void *) &mapping->vaddr;
556}
557
558/** Comparison function for pagemap ordered dictionary.
559 *
560 * @param a Pointer to virtual address cast as @c void *
561 * @param b Pointer to virtual address cast as @c void *
562 * @return <0, =0, >0 if virtual address a is less than, equal to, or
[7be8d4d]563 * greater than b, respectively.
[de0af3a]564 */
565static int as_pagemap_cmp(void *a, void *b)
566{
567 uintptr_t va = *(uintptr_t *)a;
568 uintptr_t vb = *(uintptr_t *)b;
569
570 return va - vb;
571}
572
573/** Initialize pagemap.
574 *
575 * @param pagemap Pagemap
576 */
577NO_TRACE void as_pagemap_initialize(as_pagemap_t *pagemap)
578{
579 odict_initialize(&pagemap->map, as_pagemap_getkey, as_pagemap_cmp);
580}
581
582/** Finalize pagemap.
583 *
584 * Destroy any entries in the pagemap.
585 *
586 * @param pagemap Pagemap
587 */
588NO_TRACE void as_pagemap_finalize(as_pagemap_t *pagemap)
589{
590 as_page_mapping_t *mapping = as_pagemap_first(pagemap);
591 while (mapping != NULL) {
592 as_pagemap_remove(mapping);
593 mapping = as_pagemap_first(pagemap);
594 }
595 odict_finalize(&pagemap->map);
596}
597
598/** Get first page mapping.
599 *
600 * @param pagemap Pagemap
601 * @return First mapping or @c NULL if there is none
602 */
603NO_TRACE as_page_mapping_t *as_pagemap_first(as_pagemap_t *pagemap)
604{
605 odlink_t *odlink;
606
607 odlink = odict_first(&pagemap->map);
608 if (odlink == NULL)
609 return NULL;
610
611 return odict_get_instance(odlink, as_page_mapping_t, lpagemap);
612}
613
614/** Get next page mapping.
615 *
616 * @param cur Current mapping
617 * @return Next mapping or @c NULL if @a cur is the last one
618 */
619NO_TRACE as_page_mapping_t *as_pagemap_next(as_page_mapping_t *cur)
620{
621 odlink_t *odlink;
622
623 odlink = odict_next(&cur->lpagemap, &cur->pagemap->map);
624 if (odlink == NULL)
625 return NULL;
626
627 return odict_get_instance(odlink, as_page_mapping_t, lpagemap);
628}
629
630/** Find frame by virtual address.
631 *
632 * @param pagemap Pagemap
633 * @param vaddr Virtual address of page
634 * @param rframe Place to store physical frame address
635 * @return EOK on succcess or ENOENT if no mapping found
636 */
637NO_TRACE errno_t as_pagemap_find(as_pagemap_t *pagemap, uintptr_t vaddr,
638 uintptr_t *rframe)
639{
640 odlink_t *odlink;
641 as_page_mapping_t *mapping;
642
643 odlink = odict_find_eq(&pagemap->map, &vaddr, NULL);
644 if (odlink == NULL)
645 return ENOENT;
646
647 mapping = odict_get_instance(odlink, as_page_mapping_t, lpagemap);
648 *rframe = mapping->frame;
649 return EOK;
650}
651
652/** Insert new page mapping.
653 *
654 * This function can block to allocate kernel memory.
655 *
656 * @param pagemap Pagemap
657 * @param vaddr Virtual page address
658 * @param frame Physical frame address
659 */
660NO_TRACE void as_pagemap_insert(as_pagemap_t *pagemap, uintptr_t vaddr,
661 uintptr_t frame)
662{
663 as_page_mapping_t *mapping;
664
665 mapping = slab_alloc(as_page_mapping_cache, 0);
666 mapping->pagemap = pagemap;
667 odlink_initialize(&mapping->lpagemap);
668 mapping->vaddr = vaddr;
669 mapping->frame = frame;
670 odict_insert(&mapping->lpagemap, &pagemap->map, NULL);
671}
672
673/** Remove page mapping.
674 *
675 * @param mapping Mapping
676 */
677NO_TRACE void as_pagemap_remove(as_page_mapping_t *mapping)
678{
679 odict_remove(&mapping->lpagemap);
680 slab_free(as_page_mapping_cache, mapping);
681}
682
[83b6ba9f]683/** Remove reference to address space area share info.
684 *
685 * If the reference count drops to 0, the sh_info is deallocated.
686 *
687 * @param sh_info Pointer to address space area share info.
688 *
689 */
690NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info)
691{
692 bool dealloc = false;
[a35b458]693
[83b6ba9f]694 mutex_lock(&sh_info->lock);
[63e27ef]695 assert(sh_info->refcount);
[a35b458]696
[83b6ba9f]697 if (--sh_info->refcount == 0) {
698 dealloc = true;
[a35b458]699
[83b6ba9f]700 /*
701 * Now walk carefully the pagemap B+tree and free/remove
702 * reference from all frames found there.
703 */
[de0af3a]704 as_page_mapping_t *mapping = as_pagemap_first(&sh_info->pagemap);
705 while (mapping != NULL) {
706 frame_free(mapping->frame, 1);
707 mapping = as_pagemap_next(mapping);
[83b6ba9f]708 }
[a35b458]709
[83b6ba9f]710 }
711 mutex_unlock(&sh_info->lock);
[a35b458]712
[83b6ba9f]713 if (dealloc) {
714 if (sh_info->backend && sh_info->backend->destroy_shared_data) {
715 sh_info->backend->destroy_shared_data(
716 sh_info->backend_shared_data);
717 }
[de0af3a]718 as_pagemap_finalize(&sh_info->pagemap);
[83b6ba9f]719 free(sh_info);
720 }
721}
722
[20d50a1]723/** Create address space area of common attributes.
724 *
725 * The created address space area is added to the target address space.
726 *
[da1bafb]727 * @param as Target address space.
728 * @param flags Flags of the area memory.
729 * @param size Size of area.
730 * @param attrs Attributes of the area.
731 * @param backend Address space area backend. NULL if no backend is used.
[826599a2]732 * @param backend_data NULL or a pointer to custom backend data.
[fbcdeb8]733 * @param base Starting virtual address of the area.
[f2c3fed]734 * If set to AS_AREA_ANY, a suitable mappable area is
735 * found.
736 * @param bound Lowest address bound if base is set to AS_AREA_ANY.
[fbcdeb8]737 * Otherwise ignored.
[da1bafb]738 *
739 * @return Address space area on success or NULL on failure.
[20d50a1]740 *
741 */
[da1bafb]742as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size,
[fbcdeb8]743 unsigned int attrs, mem_backend_t *backend,
744 mem_backend_data_t *backend_data, uintptr_t *base, uintptr_t bound)
[20d50a1]745{
[f2c3fed]746 if ((*base != (uintptr_t) AS_AREA_ANY) && !IS_ALIGNED(*base, PAGE_SIZE))
[37e7d2b9]747 return NULL;
[a35b458]748
[0b37882]749 if (size == 0)
[dbbeb26]750 return NULL;
[0941e9ae]751
[0b37882]752 size_t pages = SIZE2FRAMES(size);
[a35b458]753
[37e7d2b9]754 /* Writeable executable areas are not supported. */
755 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
756 return NULL;
[35a3d950]757
758 bool const guarded = flags & AS_AREA_GUARD;
[a35b458]759
[1068f6a]760 mutex_lock(&as->lock);
[a35b458]761
[f2c3fed]762 if (*base == (uintptr_t) AS_AREA_ANY) {
[35a3d950]763 *base = as_get_unmapped_area(as, bound, size, guarded);
[fbcdeb8]764 if (*base == (uintptr_t) -1) {
765 mutex_unlock(&as->lock);
766 return NULL;
767 }
768 }
[35a3d950]769
[83b6ba9f]770 if (overflows_into_positive(*base, size)) {
771 mutex_unlock(&as->lock);
[0941e9ae]772 return NULL;
[83b6ba9f]773 }
[0941e9ae]774
[35a3d950]775 if (!check_area_conflicts(as, *base, pages, guarded, NULL)) {
[1068f6a]776 mutex_unlock(&as->lock);
[37e7d2b9]777 return NULL;
778 }
[a35b458]779
[11b285d]780 as_area_t *area = (as_area_t *) malloc(sizeof(as_area_t));
[7473807]781 if (!area) {
782 mutex_unlock(&as->lock);
783 return NULL;
784 }
[a35b458]785
[da1bafb]786 mutex_initialize(&area->lock, MUTEX_PASSIVE);
[a35b458]787
[da1bafb]788 area->as = as;
[88cc71c0]789 odlink_initialize(&area->las_areas);
[da1bafb]790 area->flags = flags;
791 area->attributes = attrs;
[0b37882]792 area->pages = pages;
[fbcdeb8]793 area->base = *base;
[da1bafb]794 area->backend = backend;
[83b6ba9f]795 area->sh_info = NULL;
[a35b458]796
[0ee077ee]797 if (backend_data)
[da1bafb]798 area->backend_data = *backend_data;
[0ee077ee]799 else
[da1bafb]800 memsetb(&area->backend_data, sizeof(area->backend_data), 0);
[83b6ba9f]801
802 share_info_t *si = NULL;
803
804 /*
[ae7d03c]805 * Create the sharing info structure.
806 * We do this in advance for every new area, even if it is not going
807 * to be shared.
808 */
[83b6ba9f]809 if (!(attrs & AS_AREA_ATTR_PARTIAL)) {
[11b285d]810 si = (share_info_t *) malloc(sizeof(share_info_t));
[7473807]811 if (!si) {
812 free(area);
813 mutex_unlock(&as->lock);
814 return NULL;
815 }
[83b6ba9f]816 mutex_initialize(&si->lock, MUTEX_PASSIVE);
817 si->refcount = 1;
818 si->shared = false;
819 si->backend_shared_data = NULL;
820 si->backend = backend;
[de0af3a]821 as_pagemap_initialize(&si->pagemap);
[83b6ba9f]822
823 area->sh_info = si;
[a35b458]824
[83b6ba9f]825 if (area->backend && area->backend->create_shared_data) {
826 if (!area->backend->create_shared_data(area)) {
827 free(area);
828 mutex_unlock(&as->lock);
829 sh_info_remove_reference(si);
830 return NULL;
831 }
832 }
833 }
834
[e394b736]835 if (area->backend && area->backend->create) {
836 if (!area->backend->create(area)) {
837 free(area);
838 mutex_unlock(&as->lock);
[83b6ba9f]839 if (!(attrs & AS_AREA_ATTR_PARTIAL))
840 sh_info_remove_reference(si);
[e394b736]841 return NULL;
842 }
843 }
[83b6ba9f]844
[7be8d4d]845 used_space_initialize(&area->used_space);
[88cc71c0]846 odict_insert(&area->las_areas, &as->as_areas, NULL);
[a35b458]847
[1068f6a]848 mutex_unlock(&as->lock);
[a35b458]849
[da1bafb]850 return area;
[20d50a1]851}
852
[e3ee9b9]853/** Find address space area and lock it.
854 *
855 * @param as Address space.
856 * @param va Virtual address.
857 *
858 * @return Locked address space area containing va on success or
859 * NULL on failure.
860 *
861 */
[7a0359b]862NO_TRACE static as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
[e3ee9b9]863{
[63e27ef]864 assert(mutex_locked(&as->lock));
[a35b458]865
[88cc71c0]866 odlink_t *odlink = odict_find_leq(&as->as_areas, &va, NULL);
867 if (odlink == NULL)
868 return NULL;
[a35b458]869
[88cc71c0]870 as_area_t *area = odict_get_instance(odlink, as_area_t, las_areas);
871 mutex_lock(&area->lock);
[a35b458]872
[88cc71c0]873 assert(area->base <= va);
[a35b458]874
[88cc71c0]875 if (va <= area->base + (P2SZ(area->pages) - 1))
876 return area;
[a35b458]877
[88cc71c0]878 mutex_unlock(&area->lock);
[e3ee9b9]879 return NULL;
880}
881
[df0103f7]882/** Find address space area and change it.
883 *
[da1bafb]884 * @param as Address space.
885 * @param address Virtual address belonging to the area to be changed.
886 * Must be page-aligned.
887 * @param size New size of the virtual memory block starting at
888 * address.
889 * @param flags Flags influencing the remap operation. Currently unused.
890 *
891 * @return Zero on success or a value from @ref errno.h otherwise.
[df0103f7]892 *
[da1bafb]893 */
[b7fd2a0]894errno_t as_area_resize(as_t *as, uintptr_t address, size_t size, unsigned int flags)
[df0103f7]895{
[59fb782]896 if (!IS_ALIGNED(address, PAGE_SIZE))
897 return EINVAL;
898
[1068f6a]899 mutex_lock(&as->lock);
[a35b458]900
[df0103f7]901 /*
902 * Locate the area.
903 */
[da1bafb]904 as_area_t *area = find_area_and_lock(as, address);
[df0103f7]905 if (!area) {
[1068f6a]906 mutex_unlock(&as->lock);
[7242a78e]907 return ENOENT;
[df0103f7]908 }
[01029fc]909
910 if (!area->backend->is_resizable(area)) {
[df0103f7]911 /*
[01029fc]912 * The backend does not support resizing for this area.
[df0103f7]913 */
[1068f6a]914 mutex_unlock(&area->lock);
915 mutex_unlock(&as->lock);
[7242a78e]916 return ENOTSUP;
[df0103f7]917 }
[a35b458]918
[83b6ba9f]919 mutex_lock(&area->sh_info->lock);
920 if (area->sh_info->shared) {
[8182031]921 /*
[da1bafb]922 * Remapping of shared address space areas
[8182031]923 * is not supported.
924 */
[83b6ba9f]925 mutex_unlock(&area->sh_info->lock);
[8182031]926 mutex_unlock(&area->lock);
927 mutex_unlock(&as->lock);
928 return ENOTSUP;
929 }
[83b6ba9f]930 mutex_unlock(&area->sh_info->lock);
[a35b458]931
[da1bafb]932 size_t pages = SIZE2FRAMES((address - area->base) + size);
[df0103f7]933 if (!pages) {
934 /*
935 * Zero size address space areas are not allowed.
936 */
[1068f6a]937 mutex_unlock(&area->lock);
938 mutex_unlock(&as->lock);
[7242a78e]939 return EPERM;
[df0103f7]940 }
[a35b458]941
[df0103f7]942 if (pages < area->pages) {
[b6f3e7e]943 uintptr_t start_free = area->base + P2SZ(pages);
[a35b458]944
[df0103f7]945 /*
946 * Shrinking the area.
947 * No need to check for overlaps.
948 */
[a35b458]949
[c964521]950 page_table_lock(as, false);
[a35b458]951
[7be8d4d]952 /*
953 * Start TLB shootdown sequence.
954 */
955
956 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES,
957 as->asid, area->base + P2SZ(pages),
958 area->pages - pages);
959
[56789125]960 /*
961 * Remove frames belonging to used space starting from
962 * the highest addresses downwards until an overlap with
[7be8d4d]963 * the resized address space area is found.
[da1bafb]964 */
965 bool cond = true;
966 while (cond) {
[7be8d4d]967 used_space_ival_t *ival =
968 used_space_last(&area->used_space);
969 assert(ival != NULL);
[a35b458]970
[7be8d4d]971 uintptr_t ptr = ival->page;
972 size_t pcount = ival->count;
973 size_t i = 0;
974
975 if (overlaps(ptr, P2SZ(pcount), area->base,
976 P2SZ(pages))) {
[a35b458]977
[7be8d4d]978 if (ptr + P2SZ(pcount) <= start_free) {
[56789125]979 /*
[7be8d4d]980 * The whole interval fits completely
981 * in the resized address space area.
[56789125]982 */
[7be8d4d]983 break;
[56789125]984 }
[a35b458]985
[d67dfdc]986 /*
[7be8d4d]987 * Part of the interval corresponding to b and
988 * c overlaps with the resized address space
989 * area.
[d67dfdc]990 */
991
[7be8d4d]992 /* We are almost done */
993 cond = false;
994 i = (start_free - ptr) >> PAGE_WIDTH;
[a35b458]995
[7be8d4d]996 /* Shorten the interval to @c i pages */
997 used_space_shorten_ival(ival, i);
998 } else {
999 /*
1000 * The interval of used space can be completely
1001 * removed.
1002 */
1003 used_space_remove_ival(ival);
1004 }
[a35b458]1005
[7be8d4d]1006 for (; i < pcount; i++) {
1007 pte_t pte;
1008 bool found = page_mapping_find(as,
1009 ptr + P2SZ(i), false, &pte);
[a35b458]1010
[7be8d4d]1011 (void) found;
1012 assert(found);
1013 assert(PTE_VALID(&pte));
1014 assert(PTE_PRESENT(&pte));
[a35b458]1015
[7be8d4d]1016 if ((area->backend) &&
1017 (area->backend->frame_free)) {
1018 area->backend->frame_free(area,
1019 ptr + P2SZ(i),
1020 PTE_GET_FRAME(&pte));
[56789125]1021 }
[a35b458]1022
[7be8d4d]1023 page_mapping_remove(as, ptr + P2SZ(i));
[d67dfdc]1024 }
[7be8d4d]1025
[d67dfdc]1026 }
[7be8d4d]1027
1028 /*
1029 * Finish TLB shootdown sequence.
1030 */
1031
1032 tlb_invalidate_pages(as->asid,
1033 area->base + P2SZ(pages),
1034 area->pages - pages);
1035
1036 /*
1037 * Invalidate software translation caches
1038 * (e.g. TSB on sparc64, PHT on ppc32).
1039 */
1040 as_invalidate_translation_cache(as,
1041 area->base + P2SZ(pages),
1042 area->pages - pages);
1043 tlb_shootdown_finalize(ipl);
1044
[da1bafb]1045 page_table_unlock(as, false);
[df0103f7]1046 } else {
1047 /*
1048 * Growing the area.
[0941e9ae]1049 */
1050
[94795812]1051 if (overflows_into_positive(address, P2SZ(pages)))
[0941e9ae]1052 return EINVAL;
1053
1054 /*
[df0103f7]1055 * Check for overlaps with other address space areas.
1056 */
[35a3d950]1057 bool const guarded = area->flags & AS_AREA_GUARD;
1058 if (!check_area_conflicts(as, address, pages, guarded, area)) {
[1068f6a]1059 mutex_unlock(&area->lock);
[da1bafb]1060 mutex_unlock(&as->lock);
[7242a78e]1061 return EADDRNOTAVAIL;
[df0103f7]1062 }
[da1bafb]1063 }
[a35b458]1064
[e394b736]1065 if (area->backend && area->backend->resize) {
1066 if (!area->backend->resize(area, pages)) {
1067 mutex_unlock(&area->lock);
1068 mutex_unlock(&as->lock);
1069 return ENOMEM;
1070 }
1071 }
[a35b458]1072
[df0103f7]1073 area->pages = pages;
[a35b458]1074
[1068f6a]1075 mutex_unlock(&area->lock);
1076 mutex_unlock(&as->lock);
[a35b458]1077
[7242a78e]1078 return 0;
1079}
1080
1081/** Destroy address space area.
1082 *
[da1bafb]1083 * @param as Address space.
1084 * @param address Address within the area to be deleted.
1085 *
1086 * @return Zero on success or a value from @ref errno.h on failure.
[7242a78e]1087 *
1088 */
[b7fd2a0]1089errno_t as_area_destroy(as_t *as, uintptr_t address)
[7242a78e]1090{
[1068f6a]1091 mutex_lock(&as->lock);
[a35b458]1092
[da1bafb]1093 as_area_t *area = find_area_and_lock(as, address);
[7242a78e]1094 if (!area) {
[1068f6a]1095 mutex_unlock(&as->lock);
[7242a78e]1096 return ENOENT;
1097 }
[e394b736]1098
1099 if (area->backend && area->backend->destroy)
1100 area->backend->destroy(area);
[a35b458]1101
[c964521]1102 page_table_lock(as, false);
[5552d60]1103 /*
1104 * Start TLB shootdown sequence.
1105 */
[402eda5]1106 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base,
1107 area->pages);
[a35b458]1108
[567807b1]1109 /*
[7be8d4d]1110 * Visit only the pages mapped by used_space.
[567807b1]1111 */
[7be8d4d]1112 used_space_ival_t *ival = used_space_first(&area->used_space);
1113 while (ival != NULL) {
1114 uintptr_t ptr = ival->page;
1115
1116 for (size_t size = 0; size < ival->count; size++) {
1117 pte_t pte;
1118 bool found = page_mapping_find(as,
1119 ptr + P2SZ(size), false, &pte);
1120
1121 (void) found;
1122 assert(found);
1123 assert(PTE_VALID(&pte));
1124 assert(PTE_PRESENT(&pte));
1125
1126 if ((area->backend) &&
1127 (area->backend->frame_free)) {
1128 area->backend->frame_free(area,
1129 ptr + P2SZ(size),
1130 PTE_GET_FRAME(&pte));
[7242a78e]1131 }
[7be8d4d]1132
1133 page_mapping_remove(as, ptr + P2SZ(size));
[7242a78e]1134 }
[7be8d4d]1135
1136 used_space_remove_ival(ival);
1137 ival = used_space_first(&area->used_space);
[7242a78e]1138 }
[a35b458]1139
[7242a78e]1140 /*
[5552d60]1141 * Finish TLB shootdown sequence.
[7242a78e]1142 */
[a35b458]1143
[f1d1f5d3]1144 tlb_invalidate_pages(as->asid, area->base, area->pages);
[a35b458]1145
[f1d1f5d3]1146 /*
[eef1b031]1147 * Invalidate potential software translation caches
1148 * (e.g. TSB on sparc64, PHT on ppc32).
[f1d1f5d3]1149 */
1150 as_invalidate_translation_cache(as, area->base, area->pages);
[402eda5]1151 tlb_shootdown_finalize(ipl);
[a35b458]1152
[c964521]1153 page_table_unlock(as, false);
[a35b458]1154
[7be8d4d]1155 used_space_finalize(&area->used_space);
[8d4f2ae]1156 area->attributes |= AS_AREA_ATTR_PARTIAL;
[83b6ba9f]1157 sh_info_remove_reference(area->sh_info);
[a35b458]1158
[1068f6a]1159 mutex_unlock(&area->lock);
[a35b458]1160
[7242a78e]1161 /*
1162 * Remove the empty area from address space.
1163 */
[88cc71c0]1164 odict_remove(&area->las_areas);
[a35b458]1165
[8d4f2ae]1166 free(area);
[a35b458]1167
[f1d1f5d3]1168 mutex_unlock(&as->lock);
[7242a78e]1169 return 0;
[df0103f7]1170}
1171
[8d6bc2d5]1172/** Share address space area with another or the same address space.
[df0103f7]1173 *
[0ee077ee]1174 * Address space area mapping is shared with a new address space area.
1175 * If the source address space area has not been shared so far,
1176 * a new sh_info is created. The new address space area simply gets the
1177 * sh_info of the source area. The process of duplicating the
1178 * mapping is done through the backend share function.
[da1bafb]1179 *
1180 * @param src_as Pointer to source address space.
1181 * @param src_base Base address of the source address space area.
1182 * @param acc_size Expected size of the source area.
1183 * @param dst_as Pointer to destination address space.
[fd4d8c0]1184 * @param dst_flags_mask Destination address space area flags mask.
[fbcdeb8]1185 * @param dst_base Target base address. If set to -1,
1186 * a suitable mappable area is found.
1187 * @param bound Lowest address bound if dst_base is set to -1.
1188 * Otherwise ignored.
[df0103f7]1189 *
[da1bafb]1190 * @return Zero on success.
1191 * @return ENOENT if there is no such task or such address space.
1192 * @return EPERM if there was a problem in accepting the area.
1193 * @return ENOMEM if there was a problem in allocating destination
1194 * address space area.
1195 * @return ENOTSUP if the address space area backend does not support
1196 * sharing.
1197 *
[df0103f7]1198 */
[b7fd2a0]1199errno_t as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
[fbcdeb8]1200 as_t *dst_as, unsigned int dst_flags_mask, uintptr_t *dst_base,
1201 uintptr_t bound)
[df0103f7]1202{
[1068f6a]1203 mutex_lock(&src_as->lock);
[da1bafb]1204 as_area_t *src_area = find_area_and_lock(src_as, src_base);
[a9e8b39]1205 if (!src_area) {
[6fa476f7]1206 /*
1207 * Could not find the source address space area.
1208 */
[1068f6a]1209 mutex_unlock(&src_as->lock);
[6fa476f7]1210 return ENOENT;
1211 }
[a35b458]1212
[01029fc]1213 if (!src_area->backend->is_shareable(src_area)) {
[8d6bc2d5]1214 /*
[01029fc]1215 * The backend does not permit sharing of this area.
[8d6bc2d5]1216 */
1217 mutex_unlock(&src_area->lock);
1218 mutex_unlock(&src_as->lock);
1219 return ENOTSUP;
1220 }
[a35b458]1221
[b6f3e7e]1222 size_t src_size = P2SZ(src_area->pages);
[da1bafb]1223 unsigned int src_flags = src_area->flags;
1224 mem_backend_t *src_backend = src_area->backend;
1225 mem_backend_data_t src_backend_data = src_area->backend_data;
[a35b458]1226
[1ec1fd8]1227 /* Share the cacheable flag from the original mapping */
1228 if (src_flags & AS_AREA_CACHEABLE)
1229 dst_flags_mask |= AS_AREA_CACHEABLE;
[a35b458]1230
[da1bafb]1231 if ((src_size != acc_size) ||
1232 ((src_flags & dst_flags_mask) != dst_flags_mask)) {
[8d6bc2d5]1233 mutex_unlock(&src_area->lock);
1234 mutex_unlock(&src_as->lock);
[df0103f7]1235 return EPERM;
1236 }
[a35b458]1237
[8d6bc2d5]1238 /*
1239 * Now we are committed to sharing the area.
[8440473]1240 * First, prepare the area for sharing.
[8d6bc2d5]1241 * Then it will be safe to unlock it.
1242 */
[da1bafb]1243 share_info_t *sh_info = src_area->sh_info;
[a35b458]1244
[83b6ba9f]1245 mutex_lock(&sh_info->lock);
1246 sh_info->refcount++;
1247 bool shared = sh_info->shared;
1248 sh_info->shared = true;
1249 mutex_unlock(&sh_info->lock);
1250
1251 if (!shared) {
[c0697c4c]1252 /*
1253 * Call the backend to setup sharing.
[83b6ba9f]1254 * This only happens once for each sh_info.
[c0697c4c]1255 */
1256 src_area->backend->share(src_area);
[8d6bc2d5]1257 }
[a35b458]1258
[8d6bc2d5]1259 mutex_unlock(&src_area->lock);
1260 mutex_unlock(&src_as->lock);
[a35b458]1261
[df0103f7]1262 /*
[a9e8b39]1263 * Create copy of the source address space area.
1264 * The destination area is created with AS_AREA_ATTR_PARTIAL
1265 * attribute set which prevents race condition with
1266 * preliminary as_page_fault() calls.
[fd4d8c0]1267 * The flags of the source area are masked against dst_flags_mask
1268 * to support sharing in less privileged mode.
[df0103f7]1269 */
[fbcdeb8]1270 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask,
1271 src_size, AS_AREA_ATTR_PARTIAL, src_backend,
1272 &src_backend_data, dst_base, bound);
[a9e8b39]1273 if (!dst_area) {
[df0103f7]1274 /*
1275 * Destination address space area could not be created.
1276 */
[8d6bc2d5]1277 sh_info_remove_reference(sh_info);
[a35b458]1278
[df0103f7]1279 return ENOMEM;
1280 }
[a35b458]1281
[a9e8b39]1282 /*
1283 * Now the destination address space area has been
1284 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
[8d6bc2d5]1285 * attribute and set the sh_info.
[da1bafb]1286 */
1287 mutex_lock(&dst_as->lock);
[1068f6a]1288 mutex_lock(&dst_area->lock);
[a9e8b39]1289 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
[8d6bc2d5]1290 dst_area->sh_info = sh_info;
[1068f6a]1291 mutex_unlock(&dst_area->lock);
[da1bafb]1292 mutex_unlock(&dst_as->lock);
[a35b458]1293
[df0103f7]1294 return 0;
1295}
1296
[fb84455]1297/** Check access mode for address space area.
1298 *
[da1bafb]1299 * @param area Address space area.
1300 * @param access Access mode.
1301 *
1302 * @return False if access violates area's permissions, true
1303 * otherwise.
[fb84455]1304 *
1305 */
[97bdb4a]1306NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access)
[fb84455]1307{
[63e27ef]1308 assert(mutex_locked(&area->lock));
[a35b458]1309
[fb84455]1310 int flagmap[] = {
1311 [PF_ACCESS_READ] = AS_AREA_READ,
1312 [PF_ACCESS_WRITE] = AS_AREA_WRITE,
1313 [PF_ACCESS_EXEC] = AS_AREA_EXEC
1314 };
[a35b458]1315
[fb84455]1316 if (!(area->flags & flagmap[access]))
1317 return false;
[a35b458]1318
[fb84455]1319 return true;
1320}
1321
[e3ee9b9]1322/** Convert address space area flags to page flags.
1323 *
1324 * @param aflags Flags of some address space area.
1325 *
1326 * @return Flags to be passed to page_mapping_insert().
1327 *
1328 */
[7a0359b]1329NO_TRACE static unsigned int area_flags_to_page_flags(unsigned int aflags)
[e3ee9b9]1330{
1331 unsigned int flags = PAGE_USER | PAGE_PRESENT;
[a35b458]1332
[e3ee9b9]1333 if (aflags & AS_AREA_READ)
1334 flags |= PAGE_READ;
[a35b458]1335
[e3ee9b9]1336 if (aflags & AS_AREA_WRITE)
1337 flags |= PAGE_WRITE;
[a35b458]1338
[e3ee9b9]1339 if (aflags & AS_AREA_EXEC)
1340 flags |= PAGE_EXEC;
[a35b458]1341
[e3ee9b9]1342 if (aflags & AS_AREA_CACHEABLE)
1343 flags |= PAGE_CACHEABLE;
[a35b458]1344
[e3ee9b9]1345 return flags;
1346}
1347
[6745592]1348/** Change adress space area flags.
[c98e6ee]1349 *
1350 * The idea is to have the same data, but with a different access mode.
1351 * This is needed e.g. for writing code into memory and then executing it.
1352 * In order for this to work properly, this may copy the data
1353 * into private anonymous memory (unless it's already there).
1354 *
[76fca31]1355 * @param as Address space.
1356 * @param flags Flags of the area memory.
1357 * @param address Address within the area to be changed.
1358 *
1359 * @return Zero on success or a value from @ref errno.h on failure.
[c98e6ee]1360 *
1361 */
[b7fd2a0]1362errno_t as_area_change_flags(as_t *as, unsigned int flags, uintptr_t address)
[c98e6ee]1363{
1364 /* Flags for the new memory mapping */
[da1bafb]1365 unsigned int page_flags = area_flags_to_page_flags(flags);
[a35b458]1366
[c98e6ee]1367 mutex_lock(&as->lock);
[a35b458]1368
[da1bafb]1369 as_area_t *area = find_area_and_lock(as, address);
[c98e6ee]1370 if (!area) {
1371 mutex_unlock(&as->lock);
1372 return ENOENT;
1373 }
[a35b458]1374
[83b6ba9f]1375 if (area->backend != &anon_backend) {
[c98e6ee]1376 /* Copying non-anonymous memory not supported yet */
1377 mutex_unlock(&area->lock);
1378 mutex_unlock(&as->lock);
1379 return ENOTSUP;
1380 }
[83b6ba9f]1381
1382 mutex_lock(&area->sh_info->lock);
1383 if (area->sh_info->shared) {
1384 /* Copying shared areas not supported yet */
1385 mutex_unlock(&area->sh_info->lock);
1386 mutex_unlock(&area->lock);
1387 mutex_unlock(&as->lock);
1388 return ENOTSUP;
1389 }
1390 mutex_unlock(&area->sh_info->lock);
[a35b458]1391
[c98e6ee]1392 /*
[7be8d4d]1393 * Compute total number of used pages
[c98e6ee]1394 */
[da1bafb]1395 size_t used_pages = 0;
[a35b458]1396
[7be8d4d]1397 used_space_ival_t *ival = used_space_first(&area->used_space);
1398 while (ival != NULL) {
1399 used_pages += ival->count;
1400 ival = used_space_next(ival);
[c98e6ee]1401 }
[a35b458]1402
[c98e6ee]1403 /* An array for storing frame numbers */
[11b285d]1404 uintptr_t *old_frame = malloc(used_pages * sizeof(uintptr_t));
[7473807]1405 if (!old_frame) {
1406 mutex_unlock(&area->lock);
1407 mutex_unlock(&as->lock);
1408 return ENOMEM;
1409 }
[a35b458]1410
[c964521]1411 page_table_lock(as, false);
[a35b458]1412
[c98e6ee]1413 /*
1414 * Start TLB shootdown sequence.
1415 */
[402eda5]1416 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base,
1417 area->pages);
[a35b458]1418
[c98e6ee]1419 /*
1420 * Remove used pages from page tables and remember their frame
1421 * numbers.
1422 */
[da1bafb]1423 size_t frame_idx = 0;
[a35b458]1424
[7be8d4d]1425 ival = used_space_first(&area->used_space);
1426 while (ival != NULL) {
1427 uintptr_t ptr = ival->page;
1428 size_t size;
[a35b458]1429
[7be8d4d]1430 for (size = 0; size < ival->count; size++) {
1431 pte_t pte;
1432 bool found = page_mapping_find(as, ptr + P2SZ(size),
1433 false, &pte);
[a35b458]1434
[7be8d4d]1435 (void) found;
1436 assert(found);
1437 assert(PTE_VALID(&pte));
1438 assert(PTE_PRESENT(&pte));
[a35b458]1439
[7be8d4d]1440 old_frame[frame_idx++] = PTE_GET_FRAME(&pte);
[a35b458]1441
[7be8d4d]1442 /* Remove old mapping */
1443 page_mapping_remove(as, ptr + P2SZ(size));
[c98e6ee]1444 }
[7be8d4d]1445
1446 ival = used_space_next(ival);
[c98e6ee]1447 }
[a35b458]1448
[c98e6ee]1449 /*
1450 * Finish TLB shootdown sequence.
1451 */
[a35b458]1452
[c98e6ee]1453 tlb_invalidate_pages(as->asid, area->base, area->pages);
[a35b458]1454
[c98e6ee]1455 /*
[eef1b031]1456 * Invalidate potential software translation caches
1457 * (e.g. TSB on sparc64, PHT on ppc32).
[c98e6ee]1458 */
1459 as_invalidate_translation_cache(as, area->base, area->pages);
[402eda5]1460 tlb_shootdown_finalize(ipl);
[a35b458]1461
[c964521]1462 page_table_unlock(as, false);
[a35b458]1463
[ae7f6fb]1464 /*
1465 * Set the new flags.
1466 */
1467 area->flags = flags;
[a35b458]1468
[c98e6ee]1469 /*
1470 * Map pages back in with new flags. This step is kept separate
[6745592]1471 * so that the memory area could not be accesed with both the old and
1472 * the new flags at once.
[c98e6ee]1473 */
1474 frame_idx = 0;
[a35b458]1475
[7be8d4d]1476 ival = used_space_first(&area->used_space);
1477 while (ival != NULL) {
1478 uintptr_t ptr = ival->page;
1479 size_t size;
[a35b458]1480
[7be8d4d]1481 for (size = 0; size < ival->count; size++) {
1482 page_table_lock(as, false);
[a35b458]1483
[7be8d4d]1484 /* Insert the new mapping */
1485 page_mapping_insert(as, ptr + P2SZ(size),
1486 old_frame[frame_idx++], page_flags);
[a35b458]1487
[7be8d4d]1488 page_table_unlock(as, false);
[c98e6ee]1489 }
[7be8d4d]1490
1491 ival = used_space_next(ival);
[c98e6ee]1492 }
[a35b458]1493
[c98e6ee]1494 free(old_frame);
[a35b458]1495
[c98e6ee]1496 mutex_unlock(&area->lock);
1497 mutex_unlock(&as->lock);
[a35b458]1498
[c98e6ee]1499 return 0;
1500}
1501
[20d50a1]1502/** Handle page fault within the current address space.
1503 *
[6745592]1504 * This is the high-level page fault handler. It decides whether the page fault
1505 * can be resolved by any backend and if so, it invokes the backend to resolve
1506 * the page fault.
[8182031]1507 *
[20d50a1]1508 * Interrupts are assumed disabled.
1509 *
[59fb782]1510 * @param address Faulting address.
1511 * @param access Access mode that caused the page fault (i.e.
1512 * read/write/exec).
1513 * @param istate Pointer to the interrupted state.
[da1bafb]1514 *
1515 * @return AS_PF_FAULT on page fault.
1516 * @return AS_PF_OK on success.
1517 * @return AS_PF_DEFER if the fault was caused by copy_to_uspace()
1518 * or copy_from_uspace().
[20d50a1]1519 *
1520 */
[59fb782]1521int as_page_fault(uintptr_t address, pf_access_t access, istate_t *istate)
[20d50a1]1522{
[59fb782]1523 uintptr_t page = ALIGN_DOWN(address, PAGE_SIZE);
[908bb96]1524 int rc = AS_PF_FAULT;
1525
[1068f6a]1526 if (!THREAD)
[1dbc43f]1527 goto page_fault;
[a35b458]1528
[7af8c0e]1529 if (!AS)
[1dbc43f]1530 goto page_fault;
[a35b458]1531
[1068f6a]1532 mutex_lock(&AS->lock);
[da1bafb]1533 as_area_t *area = find_area_and_lock(AS, page);
[20d50a1]1534 if (!area) {
1535 /*
1536 * No area contained mapping for 'page'.
1537 * Signal page fault to low-level handler.
1538 */
[1068f6a]1539 mutex_unlock(&AS->lock);
[e3c762cd]1540 goto page_fault;
[20d50a1]1541 }
[a35b458]1542
[a9e8b39]1543 if (area->attributes & AS_AREA_ATTR_PARTIAL) {
1544 /*
1545 * The address space area is not fully initialized.
1546 * Avoid possible race by returning error.
1547 */
[1068f6a]1548 mutex_unlock(&area->lock);
1549 mutex_unlock(&AS->lock);
[da1bafb]1550 goto page_fault;
[a9e8b39]1551 }
[a35b458]1552
[da1bafb]1553 if ((!area->backend) || (!area->backend->page_fault)) {
[8182031]1554 /*
1555 * The address space area is not backed by any backend
1556 * or the backend cannot handle page faults.
1557 */
1558 mutex_unlock(&area->lock);
1559 mutex_unlock(&AS->lock);
[da1bafb]1560 goto page_fault;
[8182031]1561 }
[a35b458]1562
[2299914]1563 page_table_lock(AS, false);
[a35b458]1564
[2299914]1565 /*
[6745592]1566 * To avoid race condition between two page faults on the same address,
1567 * we need to make sure the mapping has not been already inserted.
[2299914]1568 */
[38dc82d]1569 pte_t pte;
1570 bool found = page_mapping_find(AS, page, false, &pte);
[560b81c]1571 if (found && PTE_PRESENT(&pte)) {
1572 if (((access == PF_ACCESS_READ) && PTE_READABLE(&pte)) ||
1573 (access == PF_ACCESS_WRITE && PTE_WRITABLE(&pte)) ||
1574 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(&pte))) {
1575 page_table_unlock(AS, false);
1576 mutex_unlock(&area->lock);
1577 mutex_unlock(&AS->lock);
1578 return AS_PF_OK;
[2299914]1579 }
1580 }
[a35b458]1581
[20d50a1]1582 /*
[8182031]1583 * Resort to the backend page fault handler.
[20d50a1]1584 */
[908bb96]1585 rc = area->backend->page_fault(area, page, access);
1586 if (rc != AS_PF_OK) {
[8182031]1587 page_table_unlock(AS, false);
1588 mutex_unlock(&area->lock);
1589 mutex_unlock(&AS->lock);
1590 goto page_fault;
1591 }
[a35b458]1592
[8182031]1593 page_table_unlock(AS, false);
[1068f6a]1594 mutex_unlock(&area->lock);
1595 mutex_unlock(&AS->lock);
[e3c762cd]1596 return AS_PF_OK;
[a35b458]1597
[e3c762cd]1598page_fault:
[5071f8a]1599 if (THREAD && THREAD->in_copy_from_uspace) {
[e3c762cd]1600 THREAD->in_copy_from_uspace = false;
[6f4495f5]1601 istate_set_retaddr(istate,
1602 (uintptr_t) &memcpy_from_uspace_failover_address);
[5071f8a]1603 } else if (THREAD && THREAD->in_copy_to_uspace) {
[e3c762cd]1604 THREAD->in_copy_to_uspace = false;
[6f4495f5]1605 istate_set_retaddr(istate,
1606 (uintptr_t) &memcpy_to_uspace_failover_address);
[908bb96]1607 } else if (rc == AS_PF_SILENT) {
1608 printf("Killing task %" PRIu64 " due to a "
1609 "failed late reservation request.\n", TASK->taskid);
1610 task_kill_self(true);
[e3c762cd]1611 } else {
[59fb782]1612 fault_if_from_uspace(istate, "Page fault: %p.", (void *) address);
1613 panic_memtrap(istate, access, address, NULL);
[e3c762cd]1614 }
[a35b458]1615
[e3c762cd]1616 return AS_PF_DEFER;
[20d50a1]1617}
1618
[7e4e532]1619/** Switch address spaces.
[1068f6a]1620 *
1621 * Note that this function cannot sleep as it is essentially a part of
[879585a3]1622 * scheduling. Sleeping here would lead to deadlock on wakeup. Another
1623 * thing which is forbidden in this context is locking the address space.
[20d50a1]1624 *
[7250d2c]1625 * When this function is entered, no spinlocks may be held.
[31d8e10]1626 *
[da1bafb]1627 * @param old Old address space or NULL.
1628 * @param new New address space.
1629 *
[20d50a1]1630 */
[80bcaed]1631void as_switch(as_t *old_as, as_t *new_as)
[20d50a1]1632{
[31d8e10]1633 DEADLOCK_PROBE_INIT(p_asidlock);
1634 preemption_disable();
[a35b458]1635
[31d8e10]1636retry:
1637 (void) interrupts_disable();
1638 if (!spinlock_trylock(&asidlock)) {
[da1bafb]1639 /*
[31d8e10]1640 * Avoid deadlock with TLB shootdown.
1641 * We can enable interrupts here because
1642 * preemption is disabled. We should not be
1643 * holding any other lock.
1644 */
1645 (void) interrupts_enable();
1646 DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
1647 goto retry;
1648 }
1649 preemption_enable();
[a35b458]1650
[7e4e532]1651 /*
1652 * First, take care of the old address space.
[da1bafb]1653 */
[80bcaed]1654 if (old_as) {
[63e27ef]1655 assert(old_as->cpu_refcount);
[a35b458]1656
[da1bafb]1657 if ((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
[7e4e532]1658 /*
1659 * The old address space is no longer active on
1660 * any processor. It can be appended to the
1661 * list of inactive address spaces with assigned
1662 * ASID.
1663 */
[63e27ef]1664 assert(old_as->asid != ASID_INVALID);
[a35b458]1665
[2057572]1666 list_append(&old_as->inactive_as_with_asid_link,
[55b77d9]1667 &inactive_as_with_asid_list);
[7e4e532]1668 }
[a35b458]1669
[57da95c]1670 /*
1671 * Perform architecture-specific tasks when the address space
1672 * is being removed from the CPU.
1673 */
[80bcaed]1674 as_deinstall_arch(old_as);
[7e4e532]1675 }
[a35b458]1676
[7e4e532]1677 /*
1678 * Second, prepare the new address space.
1679 */
[80bcaed]1680 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
[879585a3]1681 if (new_as->asid != ASID_INVALID)
[80bcaed]1682 list_remove(&new_as->inactive_as_with_asid_link);
[879585a3]1683 else
1684 new_as->asid = asid_get();
[7e4e532]1685 }
[a35b458]1686
[80bcaed]1687#ifdef AS_PAGE_TABLE
1688 SET_PTL0_ADDRESS(new_as->genarch.page_table);
1689#endif
[a35b458]1690
[20d50a1]1691 /*
1692 * Perform architecture-specific steps.
[4512d7e]1693 * (e.g. write ASID to hardware register etc.)
[20d50a1]1694 */
[80bcaed]1695 as_install_arch(new_as);
[a35b458]1696
[879585a3]1697 spinlock_unlock(&asidlock);
[a35b458]1698
[80bcaed]1699 AS = new_as;
[20d50a1]1700}
[6a3c9a7]1701
[df0103f7]1702/** Compute flags for virtual address translation subsytem.
1703 *
[da1bafb]1704 * @param area Address space area.
1705 *
1706 * @return Flags to be used in page_mapping_insert().
[df0103f7]1707 *
1708 */
[97bdb4a]1709NO_TRACE unsigned int as_area_get_flags(as_area_t *area)
[df0103f7]1710{
[63e27ef]1711 assert(mutex_locked(&area->lock));
[a35b458]1712
[da1bafb]1713 return area_flags_to_page_flags(area->flags);
[df0103f7]1714}
1715
[88cc71c0]1716/** Get key function for the @c as_t.as_areas ordered dictionary.
1717 *
1718 * @param odlink Link
1719 * @return Pointer to task ID cast as 'void *'
1720 */
1721static void *as_areas_getkey(odlink_t *odlink)
1722{
1723 as_area_t *area = odict_get_instance(odlink, as_area_t, las_areas);
1724 return (void *) &area->base;
1725}
1726
1727/** Key comparison function for the @c as_t.as_areas ordered dictionary.
1728 *
1729 * @param a Pointer to area A base
1730 * @param b Pointer to area B base
1731 * @return -1, 0, 1 iff base of A is lower than, equal to, higher than B
1732 */
1733static int as_areas_cmp(void *a, void *b)
1734{
1735 uintptr_t base_a = *(uintptr_t *)a;
1736 uintptr_t base_b = *(uintptr_t *)b;
1737
1738 if (base_a < base_b)
1739 return -1;
1740 else if (base_a == base_b)
1741 return 0;
1742 else
1743 return +1;
1744}
1745
[ef67bab]1746/** Create page table.
1747 *
[6745592]1748 * Depending on architecture, create either address space private or global page
1749 * table.
[ef67bab]1750 *
[da1bafb]1751 * @param flags Flags saying whether the page table is for the kernel
1752 * address space.
1753 *
1754 * @return First entry of the page table.
[ef67bab]1755 *
1756 */
[97bdb4a]1757NO_TRACE pte_t *page_table_create(unsigned int flags)
[ef67bab]1758{
[63e27ef]1759 assert(as_operations);
1760 assert(as_operations->page_table_create);
[a35b458]1761
[bd1deed]1762 return as_operations->page_table_create(flags);
[ef67bab]1763}
[d3e7ff4]1764
[482826d]1765/** Destroy page table.
1766 *
1767 * Destroy page table in architecture specific way.
1768 *
[da1bafb]1769 * @param page_table Physical address of PTL0.
1770 *
[482826d]1771 */
[97bdb4a]1772NO_TRACE void page_table_destroy(pte_t *page_table)
[482826d]1773{
[63e27ef]1774 assert(as_operations);
1775 assert(as_operations->page_table_destroy);
[a35b458]1776
[bd1deed]1777 as_operations->page_table_destroy(page_table);
[482826d]1778}
1779
[2299914]1780/** Lock page table.
1781 *
1782 * This function should be called before any page_mapping_insert(),
1783 * page_mapping_remove() and page_mapping_find().
[da1bafb]1784 *
[2299914]1785 * Locking order is such that address space areas must be locked
1786 * prior to this call. Address space can be locked prior to this
1787 * call in which case the lock argument is false.
1788 *
[da1bafb]1789 * @param as Address space.
1790 * @param lock If false, do not attempt to lock as->lock.
1791 *
[2299914]1792 */
[97bdb4a]1793NO_TRACE void page_table_lock(as_t *as, bool lock)
[2299914]1794{
[63e27ef]1795 assert(as_operations);
1796 assert(as_operations->page_table_lock);
[a35b458]1797
[2299914]1798 as_operations->page_table_lock(as, lock);
1799}
1800
1801/** Unlock page table.
1802 *
[da1bafb]1803 * @param as Address space.
1804 * @param unlock If false, do not attempt to unlock as->lock.
1805 *
[2299914]1806 */
[97bdb4a]1807NO_TRACE void page_table_unlock(as_t *as, bool unlock)
[2299914]1808{
[63e27ef]1809 assert(as_operations);
1810 assert(as_operations->page_table_unlock);
[a35b458]1811
[2299914]1812 as_operations->page_table_unlock(as, unlock);
1813}
1814
[ada559c]1815/** Test whether page tables are locked.
1816 *
[e3ee9b9]1817 * @param as Address space where the page tables belong.
[ada559c]1818 *
[e3ee9b9]1819 * @return True if the page tables belonging to the address soace
1820 * are locked, otherwise false.
[ada559c]1821 */
[97bdb4a]1822NO_TRACE bool page_table_locked(as_t *as)
[ada559c]1823{
[63e27ef]1824 assert(as_operations);
1825 assert(as_operations->page_table_locked);
[ada559c]1826
1827 return as_operations->page_table_locked(as);
1828}
1829
[b878df3]1830/** Return size of the address space area with given base.
1831 *
[1d432f9]1832 * @param base Arbitrary address inside the address space area.
[da1bafb]1833 *
1834 * @return Size of the address space area in bytes or zero if it
1835 * does not exist.
[b878df3]1836 *
1837 */
1838size_t as_area_get_size(uintptr_t base)
[7c23af9]1839{
1840 size_t size;
[a35b458]1841
[1d432f9]1842 page_table_lock(AS, true);
[da1bafb]1843 as_area_t *src_area = find_area_and_lock(AS, base);
[a35b458]1844
[6745592]1845 if (src_area) {
[b6f3e7e]1846 size = P2SZ(src_area->pages);
[1068f6a]1847 mutex_unlock(&src_area->lock);
[da1bafb]1848 } else
[7c23af9]1849 size = 0;
[a35b458]1850
[1d432f9]1851 page_table_unlock(AS, true);
[7c23af9]1852 return size;
1853}
1854
[7be8d4d]1855/** Initialize used space map.
[25bf215]1856 *
[7be8d4d]1857 * @param used_space Used space map
1858 */
1859static void used_space_initialize(used_space_t *used_space)
1860{
1861 odict_initialize(&used_space->ivals, used_space_getkey, used_space_cmp);
1862 used_space->pages = 0;
1863}
1864
1865/** Finalize used space map.
[da1bafb]1866 *
[7be8d4d]1867 * @param used_space Used space map
1868 */
1869static void used_space_finalize(used_space_t *used_space)
1870{
1871 assert(odict_empty(&used_space->ivals));
1872 odict_finalize(&used_space->ivals);
1873}
1874
1875/** Get first interval of used space.
[25bf215]1876 *
[7be8d4d]1877 * @param used_space Used space map
1878 * @return First interval or @c NULL if there are none
[25bf215]1879 */
[7be8d4d]1880used_space_ival_t *used_space_first(used_space_t *used_space)
[25bf215]1881{
[7be8d4d]1882 odlink_t *odlink = odict_first(&used_space->ivals);
[a35b458]1883
[7be8d4d]1884 if (odlink == NULL)
1885 return NULL;
[566da7f8]1886
[7be8d4d]1887 return odict_get_instance(odlink, used_space_ival_t, lused_space);
1888}
[a35b458]1889
[7be8d4d]1890/** Get next interval of used space.
1891 *
1892 * @param cur Current interval
1893 * @return Next interval or @c NULL if there are none
1894 */
1895used_space_ival_t *used_space_next(used_space_ival_t *cur)
1896{
1897 odlink_t *odlink = odict_next(&cur->lused_space,
1898 &cur->used_space->ivals);
[a35b458]1899
[7be8d4d]1900 if (odlink == NULL)
1901 return NULL;
[a35b458]1902
[7be8d4d]1903 return odict_get_instance(odlink, used_space_ival_t, lused_space);
1904}
[a35b458]1905
[7be8d4d]1906/** Get last interval of used space.
1907 *
1908 * @param used_space Used space map
1909 * @return First interval or @c NULL if there are none
1910 */
1911static used_space_ival_t *used_space_last(used_space_t *used_space)
1912{
1913 odlink_t *odlink = odict_last(&used_space->ivals);
[a35b458]1914
[7be8d4d]1915 if (odlink == NULL)
1916 return NULL;
[a35b458]1917
[7be8d4d]1918 return odict_get_instance(odlink, used_space_ival_t, lused_space);
1919}
[a35b458]1920
[7be8d4d]1921/** Find the first interval that contains addresses greater than or equal to
1922 * @a ptr.
1923 *
1924 * @param used_space Used space map
1925 * @param ptr Virtual address
1926 *
1927 * @return Used space interval or @c NULL if none matches
1928 */
1929used_space_ival_t *used_space_find_gteq(used_space_t *used_space, uintptr_t ptr)
1930{
1931 odlink_t *odlink;
1932 used_space_ival_t *ival;
[a35b458]1933
[7be8d4d]1934 /* Find last interval to start at address less than @a ptr */
1935 odlink = odict_find_lt(&used_space->ivals, &ptr, NULL);
1936 if (odlink != NULL) {
1937 ival = odict_get_instance(odlink, used_space_ival_t,
1938 lused_space);
[a35b458]1939
[7be8d4d]1940 /* If the interval extends above @a ptr, return it */
1941 if (ival->page + P2SZ(ival->count) > ptr)
1942 return ival;
[a35b458]1943
[25bf215]1944 /*
[7be8d4d]1945 * Otherwise, if a next interval exists, it must match
1946 * the criteria.
[25bf215]1947 */
[7be8d4d]1948 odlink = odict_next(&ival->lused_space, &used_space->ivals);
1949 } else {
1950 /*
1951 * No interval with lower base address, so if there is any
1952 * interval at all, it must match the criteria
1953 */
1954 odlink = odict_first(&used_space->ivals);
1955 }
[a35b458]1956
[7be8d4d]1957 if (odlink != NULL) {
1958 ival = odict_get_instance(odlink, used_space_ival_t,
1959 lused_space);
1960 return ival;
[25bf215]1961 }
[a35b458]1962
[7be8d4d]1963 return NULL;
1964}
[a35b458]1965
[7be8d4d]1966/** Get key function for used space ordered dictionary.
1967 *
1968 * The key is the virtual address of the first page
1969 *
1970 * @param odlink Ordered dictionary link (used_space_ival_t.lused_space)
1971 * @return Pointer to virtual address of first page cast as @c void *.
1972 */
1973static void *used_space_getkey(odlink_t *odlink)
1974{
1975 used_space_ival_t *ival = odict_get_instance(odlink, used_space_ival_t,
1976 lused_space);
1977 return (void *) &ival->page;
1978}
[a35b458]1979
[7be8d4d]1980/** Compare function for used space ordered dictionary.
1981 *
1982 * @param a Pointer to virtual address of first page cast as @c void *
1983 * @param b Pointer to virtual address of first page cast as @c void *
1984 * @return Less than zero, zero, greater than zero if virtual address @a a
1985 * is less than, equal to, greater than virtual address b, respectively.
1986 */
1987static int used_space_cmp(void *a, void *b)
1988{
1989 uintptr_t va = *(uintptr_t *) a;
1990 uintptr_t vb = *(uintptr_t *) b;
[a35b458]1991
[7be8d4d]1992 if (va < vb)
1993 return -1;
1994 else if (va == vb)
1995 return 0;
1996 else
1997 return +1;
1998}
[a35b458]1999
[7be8d4d]2000/** Remove used space interval.
2001 *
2002 * @param ival Used space interval
2003 */
2004static void used_space_remove_ival(used_space_ival_t *ival)
2005{
2006 ival->used_space->pages -= ival->count;
2007 odict_remove(&ival->lused_space);
2008 slab_free(used_space_ival_cache, ival);
[25bf215]2009}
2010
[7be8d4d]2011/** Shorten used space interval.
2012 *
2013 * @param ival Used space interval
2014 * @param count New number of pages in the interval
2015 */
2016static void used_space_shorten_ival(used_space_ival_t *ival, size_t count)
2017{
2018 assert(count > 0);
2019 assert(count < ival->count);
2020
2021 ival->used_space->pages -= ival->count - count;
2022 ival->count = count;
2023}
2024
2025/** Mark portion of address space area as used.
[25bf215]2026 *
2027 * The address space area must be already locked.
2028 *
[7be8d4d]2029 * @param used_space Used space map
2030 * @param page First page to be marked.
[da1bafb]2031 * @param count Number of page to be marked.
2032 *
[fc47885]2033 * @return False on failure or true on success.
[25bf215]2034 *
2035 */
[7be8d4d]2036bool used_space_insert(used_space_t *used_space, uintptr_t page, size_t count)
[25bf215]2037{
[7be8d4d]2038 used_space_ival_t *a;
2039 used_space_ival_t *b;
2040 bool adj_a;
2041 bool adj_b;
2042 odlink_t *odlink;
2043 used_space_ival_t *ival;
2044
[63e27ef]2045 assert(IS_ALIGNED(page, PAGE_SIZE));
2046 assert(count);
[a35b458]2047
[7be8d4d]2048 /* Interval to the left */
2049 odlink = odict_find_lt(&used_space->ivals, &page, NULL);
2050 a = (odlink != NULL) ?
2051 odict_get_instance(odlink, used_space_ival_t, lused_space) :
2052 NULL;
[b6f3e7e]2053
[7be8d4d]2054 /* Interval to the right */
2055 b = (a != NULL) ? used_space_next(a) :
2056 used_space_first(used_space);
[a35b458]2057
[7be8d4d]2058 /* Check for conflict with left interval */
2059 if (a != NULL && overlaps(a->page, P2SZ(a->count), page, P2SZ(count)))
[fc47885]2060 return false;
[a35b458]2061
[7be8d4d]2062 /* Check for conflict with right interval */
2063 if (b != NULL && overlaps(page, P2SZ(count), b->page, P2SZ(b->count)))
[fc47885]2064 return false;
[a35b458]2065
[7be8d4d]2066 /* Check if A is adjacent to the new interval */
2067 adj_a = (a != NULL) && (a->page + P2SZ(a->count) == page);
2068 /* Check if the new interval is adjacent to B*/
2069 adj_b = (b != NULL) && page + P2SZ(count) == b->page;
2070
2071 if (adj_a && adj_b) {
2072 /* Fuse into a single interval */
2073 a->count += count + b->count;
2074 used_space_remove_ival(b);
2075 } else if (adj_a) {
2076 /* Append to A */
2077 a->count += count;
2078 } else if (adj_b) {
2079 /* Prepend to B */
2080 b->page = page;
2081 b->count += count;
2082 } else {
2083 /* Create new interval */
2084 ival = slab_alloc(used_space_ival_cache, 0);
2085 ival->used_space = used_space;
2086 odlink_initialize(&ival->lused_space);
2087 ival->page = page;
2088 ival->count = count;
2089
2090 odict_insert(&ival->lused_space, &used_space->ivals,
2091 NULL);
[25bf215]2092 }
[a35b458]2093
[7be8d4d]2094 used_space->pages += count;
[fc47885]2095 return true;
[25bf215]2096}
2097
[df0103f7]2098/*
2099 * Address space related syscalls.
2100 */
2101
[fbcdeb8]2102sysarg_t sys_as_area_create(uintptr_t base, size_t size, unsigned int flags,
[ae6021d]2103 uintptr_t bound, as_area_pager_info_t *pager_info)
[df0103f7]2104{
[fbcdeb8]2105 uintptr_t virt = base;
[75b139f]2106 mem_backend_t *backend;
2107 mem_backend_data_t backend_data;
2108
[ae6021d]2109 if (pager_info == AS_AREA_UNPAGED)
[75b139f]2110 backend = &anon_backend;
2111 else {
2112 backend = &user_backend;
[ae6021d]2113 if (copy_from_uspace(&backend_data.pager_info, pager_info,
[3bacee1]2114 sizeof(as_area_pager_info_t)) != EOK) {
[ae6021d]2115 return (sysarg_t) AS_MAP_FAILED;
2116 }
[75b139f]2117 }
[c4c2406]2118 as_area_t *area = as_area_create(AS, flags, size,
[75b139f]2119 AS_AREA_ATTR_NONE, backend, &backend_data, &virt, bound);
[fbcdeb8]2120 if (area == NULL)
[f2c3fed]2121 return (sysarg_t) AS_MAP_FAILED;
[a35b458]2122
[fbcdeb8]2123 return (sysarg_t) virt;
[df0103f7]2124}
2125
[b7fd2a0]2126sys_errno_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags)
[df0103f7]2127{
[b7fd2a0]2128 return (sys_errno_t) as_area_resize(AS, address, size, 0);
[7242a78e]2129}
2130
[b7fd2a0]2131sys_errno_t sys_as_area_change_flags(uintptr_t address, unsigned int flags)
[c98e6ee]2132{
[b7fd2a0]2133 return (sys_errno_t) as_area_change_flags(AS, flags, address);
[c98e6ee]2134}
2135
[3b3fcf36]2136sys_errno_t sys_as_area_get_info(uintptr_t address, as_area_info_t *dest)
2137{
2138 as_area_t *area;
2139
2140 mutex_lock(&AS->lock);
2141 area = find_area_and_lock(AS, address);
2142 if (area == NULL) {
2143 mutex_unlock(&AS->lock);
2144 return ENOENT;
2145 }
2146
2147 dest->start_addr = area->base;
2148 dest->size = P2SZ(area->pages);
2149 dest->flags = area->flags;
2150
2151 mutex_unlock(&area->lock);
2152 mutex_unlock(&AS->lock);
2153 return EOK;
2154}
2155
[b7fd2a0]2156sys_errno_t sys_as_area_destroy(uintptr_t address)
[7242a78e]2157{
[b7fd2a0]2158 return (sys_errno_t) as_area_destroy(AS, address);
[df0103f7]2159}
[b45c443]2160
[336db295]2161/** Get list of adress space areas.
2162 *
[da1bafb]2163 * @param as Address space.
2164 * @param obuf Place to save pointer to returned buffer.
2165 * @param osize Place to save size of returned buffer.
2166 *
[336db295]2167 */
[b389f95]2168as_area_info_t *as_get_area_info(as_t *as, size_t *osize)
[336db295]2169{
2170 mutex_lock(&as->lock);
[a35b458]2171
[88cc71c0]2172 /* Count number of areas. */
2173 size_t area_cnt = odict_count(&as->as_areas);
[a35b458]2174
[da1bafb]2175 size_t isize = area_cnt * sizeof(as_area_info_t);
[b389f95]2176 as_area_info_t *info = malloc(isize);
2177 if (!info) {
2178 mutex_unlock(&as->lock);
2179 return NULL;
2180 }
[a35b458]2181
[88cc71c0]2182 /* Record area data. */
[a35b458]2183
[da1bafb]2184 size_t area_idx = 0;
[a35b458]2185
[88cc71c0]2186 as_area_t *area = as_area_first(as);
2187 while (area != NULL) {
2188 assert(area_idx < area_cnt);
2189 mutex_lock(&area->lock);
[a35b458]2190
[88cc71c0]2191 info[area_idx].start_addr = area->base;
2192 info[area_idx].size = P2SZ(area->pages);
2193 info[area_idx].flags = area->flags;
2194 ++area_idx;
[a35b458]2195
[88cc71c0]2196 mutex_unlock(&area->lock);
2197 area = as_area_next(area);
[336db295]2198 }
[a35b458]2199
[336db295]2200 mutex_unlock(&as->lock);
[a35b458]2201
[336db295]2202 *osize = isize;
[b389f95]2203 return info;
[336db295]2204}
2205
[64c2ad5]2206/** Print out information about address space.
2207 *
[da1bafb]2208 * @param as Address space.
2209 *
[64c2ad5]2210 */
2211void as_print(as_t *as)
2212{
2213 mutex_lock(&as->lock);
[a35b458]2214
[0b37882]2215 /* Print out info about address space areas */
[88cc71c0]2216 as_area_t *area = as_area_first(as);
2217 while (area != NULL) {
2218 mutex_lock(&area->lock);
2219 printf("as_area: %p, base=%p, pages=%zu"
2220 " (%p - %p)\n", area, (void *) area->base,
2221 area->pages, (void *) area->base,
2222 (void *) (area->base + P2SZ(area->pages)));
2223 mutex_unlock(&area->lock);
[a35b458]2224
[88cc71c0]2225 area = as_area_next(area);
[64c2ad5]2226 }
[a35b458]2227
[64c2ad5]2228 mutex_unlock(&as->lock);
2229}
2230
[cc73a8a1]2231/** @}
[b45c443]2232 */
Note: See TracBrowser for help on using the repository browser.