source: mainline/kernel/generic/src/mm/as.c@ ae7d03c

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since ae7d03c was ae7d03c, checked in by Jiri Svoboda <jiri@…>, 7 years ago

Selected ccheck-proposed comment fixes.

  • Property mode set to 100644
File size: 56.8 KB
Line 
1/*
2 * Copyright (c) 2010 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Address space related functions.
36 *
37 * This file contains address space manipulation functions.
38 * Roughly speaking, this is a higher-level client of
39 * Virtual Address Translation (VAT) subsystem.
40 *
41 * Functionality provided by this file allows one to
42 * create address spaces and create, resize and share
43 * address space areas.
44 *
45 * @see page.c
46 *
47 */
48
49#include <mm/as.h>
50#include <arch/mm/as.h>
51#include <mm/page.h>
52#include <mm/frame.h>
53#include <mm/slab.h>
54#include <mm/tlb.h>
55#include <arch/mm/page.h>
56#include <genarch/mm/page_pt.h>
57#include <genarch/mm/page_ht.h>
58#include <mm/asid.h>
59#include <arch/mm/asid.h>
60#include <preemption.h>
61#include <synch/spinlock.h>
62#include <synch/mutex.h>
63#include <adt/list.h>
64#include <adt/btree.h>
65#include <proc/task.h>
66#include <proc/thread.h>
67#include <arch/asm.h>
68#include <panic.h>
69#include <assert.h>
70#include <print.h>
71#include <mem.h>
72#include <macros.h>
73#include <bitops.h>
74#include <arch.h>
75#include <errno.h>
76#include <config.h>
77#include <align.h>
78#include <typedefs.h>
79#include <syscall/copy.h>
80#include <arch/interrupt.h>
81#include <interrupt.h>
82
83/**
84 * Each architecture decides what functions will be used to carry out
85 * address space operations such as creating or locking page tables.
86 */
87as_operations_t *as_operations = NULL;
88
89/** Slab for as_t objects.
90 *
91 */
92static slab_cache_t *as_cache;
93
94/** ASID subsystem lock.
95 *
96 * This lock protects:
97 * - inactive_as_with_asid_list
98 * - as->asid for each as of the as_t type
99 * - asids_allocated counter
100 *
101 */
102SPINLOCK_INITIALIZE(asidlock);
103
104/**
105 * Inactive address spaces (on all processors)
106 * that have valid ASID.
107 */
108LIST_INITIALIZE(inactive_as_with_asid_list);
109
110/** Kernel address space. */
111as_t *AS_KERNEL = NULL;
112
113NO_TRACE static errno_t as_constructor(void *obj, unsigned int flags)
114{
115 as_t *as = (as_t *) obj;
116
117 link_initialize(&as->inactive_as_with_asid_link);
118 mutex_initialize(&as->lock, MUTEX_PASSIVE);
119
120 return as_constructor_arch(as, flags);
121}
122
123NO_TRACE static size_t as_destructor(void *obj)
124{
125 return as_destructor_arch((as_t *) obj);
126}
127
128/** Initialize address space subsystem. */
129void as_init(void)
130{
131 as_arch_init();
132
133 as_cache = slab_cache_create("as_t", sizeof(as_t), 0,
134 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
135
136 AS_KERNEL = as_create(FLAG_AS_KERNEL);
137 if (!AS_KERNEL)
138 panic("Cannot create kernel address space.");
139
140 /*
141 * Make sure the kernel address space
142 * reference count never drops to zero.
143 */
144 as_hold(AS_KERNEL);
145}
146
147/** Create address space.
148 *
149 * @param flags Flags that influence the way in wich the address
150 * space is created.
151 *
152 */
153as_t *as_create(unsigned int flags)
154{
155 as_t *as = (as_t *) slab_alloc(as_cache, 0);
156 (void) as_create_arch(as, 0);
157
158 btree_create(&as->as_area_btree);
159
160 if (flags & FLAG_AS_KERNEL)
161 as->asid = ASID_KERNEL;
162 else
163 as->asid = ASID_INVALID;
164
165 atomic_set(&as->refcount, 0);
166 as->cpu_refcount = 0;
167
168#ifdef AS_PAGE_TABLE
169 as->genarch.page_table = page_table_create(flags);
170#else
171 page_table_create(flags);
172#endif
173
174 return as;
175}
176
177/** Destroy adress space.
178 *
179 * When there are no tasks referencing this address space (i.e. its refcount is
180 * zero), the address space can be destroyed.
181 *
182 * We know that we don't hold any spinlock.
183 *
184 * @param as Address space to be destroyed.
185 *
186 */
187void as_destroy(as_t *as)
188{
189 DEADLOCK_PROBE_INIT(p_asidlock);
190
191 assert(as != AS);
192 assert(atomic_get(&as->refcount) == 0);
193
194 /*
195 * Since there is no reference to this address space, it is safe not to
196 * lock its mutex.
197 */
198
199 /*
200 * We need to avoid deadlock between TLB shootdown and asidlock.
201 * We therefore try to take asid conditionally and if we don't succeed,
202 * we enable interrupts and try again. This is done while preemption is
203 * disabled to prevent nested context switches. We also depend on the
204 * fact that so far no spinlocks are held.
205 */
206 preemption_disable();
207 ipl_t ipl = interrupts_read();
208
209retry:
210 interrupts_disable();
211 if (!spinlock_trylock(&asidlock)) {
212 interrupts_enable();
213 DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
214 goto retry;
215 }
216
217 /* Interrupts disabled, enable preemption */
218 preemption_enable();
219
220 if ((as->asid != ASID_INVALID) && (as != AS_KERNEL)) {
221 if (as->cpu_refcount == 0)
222 list_remove(&as->inactive_as_with_asid_link);
223
224 asid_put(as->asid);
225 }
226
227 spinlock_unlock(&asidlock);
228 interrupts_restore(ipl);
229
230
231 /*
232 * Destroy address space areas of the address space.
233 * The B+tree must be walked carefully because it is
234 * also being destroyed.
235 */
236 bool cond = true;
237 while (cond) {
238 assert(!list_empty(&as->as_area_btree.leaf_list));
239
240 btree_node_t *node =
241 list_get_instance(list_first(&as->as_area_btree.leaf_list),
242 btree_node_t, leaf_link);
243
244 if ((cond = node->keys))
245 as_area_destroy(as, node->key[0]);
246 }
247
248 btree_destroy(&as->as_area_btree);
249
250#ifdef AS_PAGE_TABLE
251 page_table_destroy(as->genarch.page_table);
252#else
253 page_table_destroy(NULL);
254#endif
255
256 slab_free(as_cache, as);
257}
258
259/** Hold a reference to an address space.
260 *
261 * Holding a reference to an address space prevents destruction
262 * of that address space.
263 *
264 * @param as Address space to be held.
265 *
266 */
267NO_TRACE void as_hold(as_t *as)
268{
269 atomic_inc(&as->refcount);
270}
271
272/** Release a reference to an address space.
273 *
274 * The last one to release a reference to an address space
275 * destroys the address space.
276 *
277 * @param asAddress space to be released.
278 *
279 */
280NO_TRACE void as_release(as_t *as)
281{
282 if (atomic_predec(&as->refcount) == 0)
283 as_destroy(as);
284}
285
286/** Check area conflicts with other areas.
287 *
288 * @param as Address space.
289 * @param addr Starting virtual address of the area being tested.
290 * @param count Number of pages in the area being tested.
291 * @param guarded True if the area being tested is protected by guard pages.
292 * @param avoid Do not touch this area.
293 *
294 * @return True if there is no conflict, false otherwise.
295 *
296 */
297NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr,
298 size_t count, bool guarded, as_area_t *avoid)
299{
300 assert((addr % PAGE_SIZE) == 0);
301 assert(mutex_locked(&as->lock));
302
303 /*
304 * If the addition of the supposed area address and size overflows,
305 * report conflict.
306 */
307 if (overflows_into_positive(addr, P2SZ(count)))
308 return false;
309
310 /*
311 * We don't want any area to have conflicts with NULL page.
312 */
313 if (overlaps(addr, P2SZ(count), (uintptr_t) NULL, PAGE_SIZE))
314 return false;
315
316 /*
317 * The leaf node is found in O(log n), where n is proportional to
318 * the number of address space areas belonging to as.
319 * The check for conflicts is then attempted on the rightmost
320 * record in the left neighbour, the leftmost record in the right
321 * neighbour and all records in the leaf node itself.
322 */
323 btree_node_t *leaf;
324 as_area_t *area =
325 (as_area_t *) btree_search(&as->as_area_btree, addr, &leaf);
326 if (area) {
327 if (area != avoid)
328 return false;
329 }
330
331 /* First, check the two border cases. */
332 btree_node_t *node =
333 btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
334 if (node) {
335 area = (as_area_t *) node->value[node->keys - 1];
336
337 if (area != avoid) {
338 mutex_lock(&area->lock);
339
340 /*
341 * If at least one of the two areas are protected
342 * by the AS_AREA_GUARD flag then we must be sure
343 * that they are separated by at least one unmapped
344 * page.
345 */
346 int const gp = (guarded ||
347 (area->flags & AS_AREA_GUARD)) ? 1 : 0;
348
349 /*
350 * The area comes from the left neighbour node, which
351 * means that there already are some areas in the leaf
352 * node, which in turn means that adding gp is safe and
353 * will not cause an integer overflow.
354 */
355 if (overlaps(addr, P2SZ(count), area->base,
356 P2SZ(area->pages + gp))) {
357 mutex_unlock(&area->lock);
358 return false;
359 }
360
361 mutex_unlock(&area->lock);
362 }
363 }
364
365 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
366 if (node) {
367 area = (as_area_t *) node->value[0];
368
369 if (area != avoid) {
370 int gp;
371
372 mutex_lock(&area->lock);
373
374 gp = (guarded || (area->flags & AS_AREA_GUARD)) ? 1 : 0;
375 if (gp && overflows(addr, P2SZ(count))) {
376 /*
377 * Guard page not needed if the supposed area
378 * is adjacent to the end of the address space.
379 * We already know that the following test is
380 * going to fail...
381 */
382 gp--;
383 }
384
385 if (overlaps(addr, P2SZ(count + gp), area->base,
386 P2SZ(area->pages))) {
387 mutex_unlock(&area->lock);
388 return false;
389 }
390
391 mutex_unlock(&area->lock);
392 }
393 }
394
395 /* Second, check the leaf node. */
396 btree_key_t i;
397 for (i = 0; i < leaf->keys; i++) {
398 area = (as_area_t *) leaf->value[i];
399 int agp;
400 int gp;
401
402 if (area == avoid)
403 continue;
404
405 mutex_lock(&area->lock);
406
407 gp = (guarded || (area->flags & AS_AREA_GUARD)) ? 1 : 0;
408 agp = gp;
409
410 /*
411 * Sanitize the two possible unsigned integer overflows.
412 */
413 if (gp && overflows(addr, P2SZ(count)))
414 gp--;
415 if (agp && overflows(area->base, P2SZ(area->pages)))
416 agp--;
417
418 if (overlaps(addr, P2SZ(count + gp), area->base,
419 P2SZ(area->pages + agp))) {
420 mutex_unlock(&area->lock);
421 return false;
422 }
423
424 mutex_unlock(&area->lock);
425 }
426
427 /*
428 * So far, the area does not conflict with other areas.
429 * Check if it is contained in the user address space.
430 */
431 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
432 return iswithin(USER_ADDRESS_SPACE_START,
433 (USER_ADDRESS_SPACE_END - USER_ADDRESS_SPACE_START) + 1,
434 addr, P2SZ(count));
435 }
436
437 return true;
438}
439
440/** Return pointer to unmapped address space area
441 *
442 * The address space must be already locked when calling
443 * this function.
444 *
445 * @param as Address space.
446 * @param bound Lowest address bound.
447 * @param size Requested size of the allocation.
448 * @param guarded True if the allocation must be protected by guard pages.
449 *
450 * @return Address of the beginning of unmapped address space area.
451 * @return -1 if no suitable address space area was found.
452 *
453 */
454NO_TRACE static uintptr_t as_get_unmapped_area(as_t *as, uintptr_t bound,
455 size_t size, bool guarded)
456{
457 assert(mutex_locked(&as->lock));
458
459 if (size == 0)
460 return (uintptr_t) -1;
461
462 /*
463 * Make sure we allocate from page-aligned
464 * address. Check for possible overflow in
465 * each step.
466 */
467
468 size_t pages = SIZE2FRAMES(size);
469
470 /*
471 * Find the lowest unmapped address aligned on the size
472 * boundary, not smaller than bound and of the required size.
473 */
474
475 /* First check the bound address itself */
476 uintptr_t addr = ALIGN_UP(bound, PAGE_SIZE);
477 if (addr >= bound) {
478 if (guarded) {
479 /* Leave an unmapped page between the lower
480 * bound and the area's start address.
481 */
482 addr += P2SZ(1);
483 }
484
485 if (check_area_conflicts(as, addr, pages, guarded, NULL))
486 return addr;
487 }
488
489 /* Eventually check the addresses behind each area */
490 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, node) {
491
492 for (btree_key_t i = 0; i < node->keys; i++) {
493 as_area_t *area = (as_area_t *) node->value[i];
494
495 mutex_lock(&area->lock);
496
497 addr =
498 ALIGN_UP(area->base + P2SZ(area->pages), PAGE_SIZE);
499
500 if (guarded || area->flags & AS_AREA_GUARD) {
501 /* We must leave an unmapped page
502 * between the two areas.
503 */
504 addr += P2SZ(1);
505 }
506
507 bool avail =
508 ((addr >= bound) && (addr >= area->base) &&
509 (check_area_conflicts(as, addr, pages, guarded, area)));
510
511 mutex_unlock(&area->lock);
512
513 if (avail)
514 return addr;
515 }
516 }
517
518 /* No suitable address space area found */
519 return (uintptr_t) -1;
520}
521
522/** Remove reference to address space area share info.
523 *
524 * If the reference count drops to 0, the sh_info is deallocated.
525 *
526 * @param sh_info Pointer to address space area share info.
527 *
528 */
529NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info)
530{
531 bool dealloc = false;
532
533 mutex_lock(&sh_info->lock);
534 assert(sh_info->refcount);
535
536 if (--sh_info->refcount == 0) {
537 dealloc = true;
538
539 /*
540 * Now walk carefully the pagemap B+tree and free/remove
541 * reference from all frames found there.
542 */
543 list_foreach(sh_info->pagemap.leaf_list, leaf_link,
544 btree_node_t, node) {
545 btree_key_t i;
546
547 for (i = 0; i < node->keys; i++)
548 frame_free((uintptr_t) node->value[i], 1);
549 }
550
551 }
552 mutex_unlock(&sh_info->lock);
553
554 if (dealloc) {
555 if (sh_info->backend && sh_info->backend->destroy_shared_data) {
556 sh_info->backend->destroy_shared_data(
557 sh_info->backend_shared_data);
558 }
559 btree_destroy(&sh_info->pagemap);
560 free(sh_info);
561 }
562}
563
564
565/** Create address space area of common attributes.
566 *
567 * The created address space area is added to the target address space.
568 *
569 * @param as Target address space.
570 * @param flags Flags of the area memory.
571 * @param size Size of area.
572 * @param attrs Attributes of the area.
573 * @param backend Address space area backend. NULL if no backend is used.
574 * @param backend_data NULL or a pointer to custom backend data.
575 * @param base Starting virtual address of the area.
576 * If set to AS_AREA_ANY, a suitable mappable area is
577 * found.
578 * @param bound Lowest address bound if base is set to AS_AREA_ANY.
579 * Otherwise ignored.
580 *
581 * @return Address space area on success or NULL on failure.
582 *
583 */
584as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size,
585 unsigned int attrs, mem_backend_t *backend,
586 mem_backend_data_t *backend_data, uintptr_t *base, uintptr_t bound)
587{
588 if ((*base != (uintptr_t) AS_AREA_ANY) && !IS_ALIGNED(*base, PAGE_SIZE))
589 return NULL;
590
591 if (size == 0)
592 return NULL;
593
594 size_t pages = SIZE2FRAMES(size);
595
596 /* Writeable executable areas are not supported. */
597 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
598 return NULL;
599
600 bool const guarded = flags & AS_AREA_GUARD;
601
602 mutex_lock(&as->lock);
603
604 if (*base == (uintptr_t) AS_AREA_ANY) {
605 *base = as_get_unmapped_area(as, bound, size, guarded);
606 if (*base == (uintptr_t) -1) {
607 mutex_unlock(&as->lock);
608 return NULL;
609 }
610 }
611
612 if (overflows_into_positive(*base, size)) {
613 mutex_unlock(&as->lock);
614 return NULL;
615 }
616
617 if (!check_area_conflicts(as, *base, pages, guarded, NULL)) {
618 mutex_unlock(&as->lock);
619 return NULL;
620 }
621
622 as_area_t *area = (as_area_t *) malloc(sizeof(as_area_t), 0);
623
624 mutex_initialize(&area->lock, MUTEX_PASSIVE);
625
626 area->as = as;
627 area->flags = flags;
628 area->attributes = attrs;
629 area->pages = pages;
630 area->resident = 0;
631 area->base = *base;
632 area->backend = backend;
633 area->sh_info = NULL;
634
635 if (backend_data)
636 area->backend_data = *backend_data;
637 else
638 memsetb(&area->backend_data, sizeof(area->backend_data), 0);
639
640 share_info_t *si = NULL;
641
642 /*
643 * Create the sharing info structure.
644 * We do this in advance for every new area, even if it is not going
645 * to be shared.
646 */
647 if (!(attrs & AS_AREA_ATTR_PARTIAL)) {
648 si = (share_info_t *) malloc(sizeof(share_info_t), 0);
649 mutex_initialize(&si->lock, MUTEX_PASSIVE);
650 si->refcount = 1;
651 si->shared = false;
652 si->backend_shared_data = NULL;
653 si->backend = backend;
654 btree_create(&si->pagemap);
655
656 area->sh_info = si;
657
658 if (area->backend && area->backend->create_shared_data) {
659 if (!area->backend->create_shared_data(area)) {
660 free(area);
661 mutex_unlock(&as->lock);
662 sh_info_remove_reference(si);
663 return NULL;
664 }
665 }
666 }
667
668 if (area->backend && area->backend->create) {
669 if (!area->backend->create(area)) {
670 free(area);
671 mutex_unlock(&as->lock);
672 if (!(attrs & AS_AREA_ATTR_PARTIAL))
673 sh_info_remove_reference(si);
674 return NULL;
675 }
676 }
677
678 btree_create(&area->used_space);
679 btree_insert(&as->as_area_btree, *base, (void *) area,
680 NULL);
681
682 mutex_unlock(&as->lock);
683
684 return area;
685}
686
687/** Find address space area and lock it.
688 *
689 * @param as Address space.
690 * @param va Virtual address.
691 *
692 * @return Locked address space area containing va on success or
693 * NULL on failure.
694 *
695 */
696NO_TRACE static as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
697{
698 assert(mutex_locked(&as->lock));
699
700 btree_node_t *leaf;
701 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va,
702 &leaf);
703 if (area) {
704 /* va is the base address of an address space area */
705 mutex_lock(&area->lock);
706 return area;
707 }
708
709 /*
710 * Search the leaf node and the rightmost record of its left neighbour
711 * to find out whether this is a miss or va belongs to an address
712 * space area found there.
713 */
714
715 /* First, search the leaf node itself. */
716 btree_key_t i;
717
718 for (i = 0; i < leaf->keys; i++) {
719 area = (as_area_t *) leaf->value[i];
720
721 mutex_lock(&area->lock);
722
723 if ((area->base <= va) &&
724 (va <= area->base + (P2SZ(area->pages) - 1)))
725 return area;
726
727 mutex_unlock(&area->lock);
728 }
729
730 /*
731 * Second, locate the left neighbour and test its last record.
732 * Because of its position in the B+tree, it must have base < va.
733 */
734 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree,
735 leaf);
736 if (lnode) {
737 area = (as_area_t *) lnode->value[lnode->keys - 1];
738
739 mutex_lock(&area->lock);
740
741 if (va <= area->base + (P2SZ(area->pages) - 1))
742 return area;
743
744 mutex_unlock(&area->lock);
745 }
746
747 return NULL;
748}
749
750/** Find address space area and change it.
751 *
752 * @param as Address space.
753 * @param address Virtual address belonging to the area to be changed.
754 * Must be page-aligned.
755 * @param size New size of the virtual memory block starting at
756 * address.
757 * @param flags Flags influencing the remap operation. Currently unused.
758 *
759 * @return Zero on success or a value from @ref errno.h otherwise.
760 *
761 */
762errno_t as_area_resize(as_t *as, uintptr_t address, size_t size, unsigned int flags)
763{
764 if (!IS_ALIGNED(address, PAGE_SIZE))
765 return EINVAL;
766
767 mutex_lock(&as->lock);
768
769 /*
770 * Locate the area.
771 */
772 as_area_t *area = find_area_and_lock(as, address);
773 if (!area) {
774 mutex_unlock(&as->lock);
775 return ENOENT;
776 }
777
778 if (!area->backend->is_resizable(area)) {
779 /*
780 * The backend does not support resizing for this area.
781 */
782 mutex_unlock(&area->lock);
783 mutex_unlock(&as->lock);
784 return ENOTSUP;
785 }
786
787 mutex_lock(&area->sh_info->lock);
788 if (area->sh_info->shared) {
789 /*
790 * Remapping of shared address space areas
791 * is not supported.
792 */
793 mutex_unlock(&area->sh_info->lock);
794 mutex_unlock(&area->lock);
795 mutex_unlock(&as->lock);
796 return ENOTSUP;
797 }
798 mutex_unlock(&area->sh_info->lock);
799
800 size_t pages = SIZE2FRAMES((address - area->base) + size);
801 if (!pages) {
802 /*
803 * Zero size address space areas are not allowed.
804 */
805 mutex_unlock(&area->lock);
806 mutex_unlock(&as->lock);
807 return EPERM;
808 }
809
810 if (pages < area->pages) {
811 uintptr_t start_free = area->base + P2SZ(pages);
812
813 /*
814 * Shrinking the area.
815 * No need to check for overlaps.
816 */
817
818 page_table_lock(as, false);
819
820 /*
821 * Remove frames belonging to used space starting from
822 * the highest addresses downwards until an overlap with
823 * the resized address space area is found. Note that this
824 * is also the right way to remove part of the used_space
825 * B+tree leaf list.
826 */
827 bool cond = true;
828 while (cond) {
829 assert(!list_empty(&area->used_space.leaf_list));
830
831 btree_node_t *node =
832 list_get_instance(list_last(&area->used_space.leaf_list),
833 btree_node_t, leaf_link);
834
835 if ((cond = (node->keys != 0))) {
836 uintptr_t ptr = node->key[node->keys - 1];
837 size_t node_size =
838 (size_t) node->value[node->keys - 1];
839 size_t i = 0;
840
841 if (overlaps(ptr, P2SZ(node_size), area->base,
842 P2SZ(pages))) {
843
844 if (ptr + P2SZ(node_size) <= start_free) {
845 /*
846 * The whole interval fits
847 * completely in the resized
848 * address space area.
849 */
850 break;
851 }
852
853 /*
854 * Part of the interval corresponding
855 * to b and c overlaps with the resized
856 * address space area.
857 */
858
859 /* We are almost done */
860 cond = false;
861 i = (start_free - ptr) >> PAGE_WIDTH;
862 if (!used_space_remove(area, start_free,
863 node_size - i))
864 panic("Cannot remove used space.");
865 } else {
866 /*
867 * The interval of used space can be
868 * completely removed.
869 */
870 if (!used_space_remove(area, ptr, node_size))
871 panic("Cannot remove used space.");
872 }
873
874 /*
875 * Start TLB shootdown sequence.
876 *
877 * The sequence is rather short and can be
878 * repeated multiple times. The reason is that
879 * we don't want to have used_space_remove()
880 * inside the sequence as it may use a blocking
881 * memory allocation for its B+tree. Blocking
882 * while holding the tlblock spinlock is
883 * forbidden and would hit a kernel assertion.
884 */
885
886 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES,
887 as->asid, area->base + P2SZ(pages),
888 area->pages - pages);
889
890 for (; i < node_size; i++) {
891 pte_t pte;
892 bool found = page_mapping_find(as,
893 ptr + P2SZ(i), false, &pte);
894
895 assert(found);
896 assert(PTE_VALID(&pte));
897 assert(PTE_PRESENT(&pte));
898
899 if ((area->backend) &&
900 (area->backend->frame_free)) {
901 area->backend->frame_free(area,
902 ptr + P2SZ(i),
903 PTE_GET_FRAME(&pte));
904 }
905
906 page_mapping_remove(as, ptr + P2SZ(i));
907 }
908
909 /*
910 * Finish TLB shootdown sequence.
911 */
912
913 tlb_invalidate_pages(as->asid,
914 area->base + P2SZ(pages),
915 area->pages - pages);
916
917 /*
918 * Invalidate software translation caches
919 * (e.g. TSB on sparc64, PHT on ppc32).
920 */
921 as_invalidate_translation_cache(as,
922 area->base + P2SZ(pages),
923 area->pages - pages);
924 tlb_shootdown_finalize(ipl);
925 }
926 }
927 page_table_unlock(as, false);
928 } else {
929 /*
930 * Growing the area.
931 */
932
933 if (overflows_into_positive(address, P2SZ(pages)))
934 return EINVAL;
935
936 /*
937 * Check for overlaps with other address space areas.
938 */
939 bool const guarded = area->flags & AS_AREA_GUARD;
940 if (!check_area_conflicts(as, address, pages, guarded, area)) {
941 mutex_unlock(&area->lock);
942 mutex_unlock(&as->lock);
943 return EADDRNOTAVAIL;
944 }
945 }
946
947 if (area->backend && area->backend->resize) {
948 if (!area->backend->resize(area, pages)) {
949 mutex_unlock(&area->lock);
950 mutex_unlock(&as->lock);
951 return ENOMEM;
952 }
953 }
954
955 area->pages = pages;
956
957 mutex_unlock(&area->lock);
958 mutex_unlock(&as->lock);
959
960 return 0;
961}
962
963/** Destroy address space area.
964 *
965 * @param as Address space.
966 * @param address Address within the area to be deleted.
967 *
968 * @return Zero on success or a value from @ref errno.h on failure.
969 *
970 */
971errno_t as_area_destroy(as_t *as, uintptr_t address)
972{
973 mutex_lock(&as->lock);
974
975 as_area_t *area = find_area_and_lock(as, address);
976 if (!area) {
977 mutex_unlock(&as->lock);
978 return ENOENT;
979 }
980
981 if (area->backend && area->backend->destroy)
982 area->backend->destroy(area);
983
984 uintptr_t base = area->base;
985
986 page_table_lock(as, false);
987
988 /*
989 * Start TLB shootdown sequence.
990 */
991 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base,
992 area->pages);
993
994 /*
995 * Visit only the pages mapped by used_space B+tree.
996 */
997 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t,
998 node) {
999 btree_key_t i;
1000
1001 for (i = 0; i < node->keys; i++) {
1002 uintptr_t ptr = node->key[i];
1003 size_t size;
1004
1005 for (size = 0; size < (size_t) node->value[i]; size++) {
1006 pte_t pte;
1007 bool found = page_mapping_find(as,
1008 ptr + P2SZ(size), false, &pte);
1009
1010 assert(found);
1011 assert(PTE_VALID(&pte));
1012 assert(PTE_PRESENT(&pte));
1013
1014 if ((area->backend) &&
1015 (area->backend->frame_free)) {
1016 area->backend->frame_free(area,
1017 ptr + P2SZ(size),
1018 PTE_GET_FRAME(&pte));
1019 }
1020
1021 page_mapping_remove(as, ptr + P2SZ(size));
1022 }
1023 }
1024 }
1025
1026 /*
1027 * Finish TLB shootdown sequence.
1028 */
1029
1030 tlb_invalidate_pages(as->asid, area->base, area->pages);
1031
1032 /*
1033 * Invalidate potential software translation caches
1034 * (e.g. TSB on sparc64, PHT on ppc32).
1035 */
1036 as_invalidate_translation_cache(as, area->base, area->pages);
1037 tlb_shootdown_finalize(ipl);
1038
1039 page_table_unlock(as, false);
1040
1041 btree_destroy(&area->used_space);
1042
1043 area->attributes |= AS_AREA_ATTR_PARTIAL;
1044
1045 sh_info_remove_reference(area->sh_info);
1046
1047 mutex_unlock(&area->lock);
1048
1049 /*
1050 * Remove the empty area from address space.
1051 */
1052 btree_remove(&as->as_area_btree, base, NULL);
1053
1054 free(area);
1055
1056 mutex_unlock(&as->lock);
1057 return 0;
1058}
1059
1060/** Share address space area with another or the same address space.
1061 *
1062 * Address space area mapping is shared with a new address space area.
1063 * If the source address space area has not been shared so far,
1064 * a new sh_info is created. The new address space area simply gets the
1065 * sh_info of the source area. The process of duplicating the
1066 * mapping is done through the backend share function.
1067 *
1068 * @param src_as Pointer to source address space.
1069 * @param src_base Base address of the source address space area.
1070 * @param acc_size Expected size of the source area.
1071 * @param dst_as Pointer to destination address space.
1072 * @param dst_flags_mask Destination address space area flags mask.
1073 * @param dst_base Target base address. If set to -1,
1074 * a suitable mappable area is found.
1075 * @param bound Lowest address bound if dst_base is set to -1.
1076 * Otherwise ignored.
1077 *
1078 * @return Zero on success.
1079 * @return ENOENT if there is no such task or such address space.
1080 * @return EPERM if there was a problem in accepting the area.
1081 * @return ENOMEM if there was a problem in allocating destination
1082 * address space area.
1083 * @return ENOTSUP if the address space area backend does not support
1084 * sharing.
1085 *
1086 */
1087errno_t as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
1088 as_t *dst_as, unsigned int dst_flags_mask, uintptr_t *dst_base,
1089 uintptr_t bound)
1090{
1091 mutex_lock(&src_as->lock);
1092 as_area_t *src_area = find_area_and_lock(src_as, src_base);
1093 if (!src_area) {
1094 /*
1095 * Could not find the source address space area.
1096 */
1097 mutex_unlock(&src_as->lock);
1098 return ENOENT;
1099 }
1100
1101 if (!src_area->backend->is_shareable(src_area)) {
1102 /*
1103 * The backend does not permit sharing of this area.
1104 */
1105 mutex_unlock(&src_area->lock);
1106 mutex_unlock(&src_as->lock);
1107 return ENOTSUP;
1108 }
1109
1110 size_t src_size = P2SZ(src_area->pages);
1111 unsigned int src_flags = src_area->flags;
1112 mem_backend_t *src_backend = src_area->backend;
1113 mem_backend_data_t src_backend_data = src_area->backend_data;
1114
1115 /* Share the cacheable flag from the original mapping */
1116 if (src_flags & AS_AREA_CACHEABLE)
1117 dst_flags_mask |= AS_AREA_CACHEABLE;
1118
1119 if ((src_size != acc_size) ||
1120 ((src_flags & dst_flags_mask) != dst_flags_mask)) {
1121 mutex_unlock(&src_area->lock);
1122 mutex_unlock(&src_as->lock);
1123 return EPERM;
1124 }
1125
1126 /*
1127 * Now we are committed to sharing the area.
1128 * First, prepare the area for sharing.
1129 * Then it will be safe to unlock it.
1130 */
1131 share_info_t *sh_info = src_area->sh_info;
1132
1133 mutex_lock(&sh_info->lock);
1134 sh_info->refcount++;
1135 bool shared = sh_info->shared;
1136 sh_info->shared = true;
1137 mutex_unlock(&sh_info->lock);
1138
1139 if (!shared) {
1140 /*
1141 * Call the backend to setup sharing.
1142 * This only happens once for each sh_info.
1143 */
1144 src_area->backend->share(src_area);
1145 }
1146
1147 mutex_unlock(&src_area->lock);
1148 mutex_unlock(&src_as->lock);
1149
1150 /*
1151 * Create copy of the source address space area.
1152 * The destination area is created with AS_AREA_ATTR_PARTIAL
1153 * attribute set which prevents race condition with
1154 * preliminary as_page_fault() calls.
1155 * The flags of the source area are masked against dst_flags_mask
1156 * to support sharing in less privileged mode.
1157 */
1158 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask,
1159 src_size, AS_AREA_ATTR_PARTIAL, src_backend,
1160 &src_backend_data, dst_base, bound);
1161 if (!dst_area) {
1162 /*
1163 * Destination address space area could not be created.
1164 */
1165 sh_info_remove_reference(sh_info);
1166
1167 return ENOMEM;
1168 }
1169
1170 /*
1171 * Now the destination address space area has been
1172 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
1173 * attribute and set the sh_info.
1174 */
1175 mutex_lock(&dst_as->lock);
1176 mutex_lock(&dst_area->lock);
1177 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
1178 dst_area->sh_info = sh_info;
1179 mutex_unlock(&dst_area->lock);
1180 mutex_unlock(&dst_as->lock);
1181
1182 return 0;
1183}
1184
1185/** Check access mode for address space area.
1186 *
1187 * @param area Address space area.
1188 * @param access Access mode.
1189 *
1190 * @return False if access violates area's permissions, true
1191 * otherwise.
1192 *
1193 */
1194NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access)
1195{
1196 assert(mutex_locked(&area->lock));
1197
1198 int flagmap[] = {
1199 [PF_ACCESS_READ] = AS_AREA_READ,
1200 [PF_ACCESS_WRITE] = AS_AREA_WRITE,
1201 [PF_ACCESS_EXEC] = AS_AREA_EXEC
1202 };
1203
1204 if (!(area->flags & flagmap[access]))
1205 return false;
1206
1207 return true;
1208}
1209
1210/** Convert address space area flags to page flags.
1211 *
1212 * @param aflags Flags of some address space area.
1213 *
1214 * @return Flags to be passed to page_mapping_insert().
1215 *
1216 */
1217NO_TRACE static unsigned int area_flags_to_page_flags(unsigned int aflags)
1218{
1219 unsigned int flags = PAGE_USER | PAGE_PRESENT;
1220
1221 if (aflags & AS_AREA_READ)
1222 flags |= PAGE_READ;
1223
1224 if (aflags & AS_AREA_WRITE)
1225 flags |= PAGE_WRITE;
1226
1227 if (aflags & AS_AREA_EXEC)
1228 flags |= PAGE_EXEC;
1229
1230 if (aflags & AS_AREA_CACHEABLE)
1231 flags |= PAGE_CACHEABLE;
1232
1233 return flags;
1234}
1235
1236/** Change adress space area flags.
1237 *
1238 * The idea is to have the same data, but with a different access mode.
1239 * This is needed e.g. for writing code into memory and then executing it.
1240 * In order for this to work properly, this may copy the data
1241 * into private anonymous memory (unless it's already there).
1242 *
1243 * @param as Address space.
1244 * @param flags Flags of the area memory.
1245 * @param address Address within the area to be changed.
1246 *
1247 * @return Zero on success or a value from @ref errno.h on failure.
1248 *
1249 */
1250errno_t as_area_change_flags(as_t *as, unsigned int flags, uintptr_t address)
1251{
1252 /* Flags for the new memory mapping */
1253 unsigned int page_flags = area_flags_to_page_flags(flags);
1254
1255 mutex_lock(&as->lock);
1256
1257 as_area_t *area = find_area_and_lock(as, address);
1258 if (!area) {
1259 mutex_unlock(&as->lock);
1260 return ENOENT;
1261 }
1262
1263 if (area->backend != &anon_backend) {
1264 /* Copying non-anonymous memory not supported yet */
1265 mutex_unlock(&area->lock);
1266 mutex_unlock(&as->lock);
1267 return ENOTSUP;
1268 }
1269
1270 mutex_lock(&area->sh_info->lock);
1271 if (area->sh_info->shared) {
1272 /* Copying shared areas not supported yet */
1273 mutex_unlock(&area->sh_info->lock);
1274 mutex_unlock(&area->lock);
1275 mutex_unlock(&as->lock);
1276 return ENOTSUP;
1277 }
1278 mutex_unlock(&area->sh_info->lock);
1279
1280 /*
1281 * Compute total number of used pages in the used_space B+tree
1282 */
1283 size_t used_pages = 0;
1284
1285 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t,
1286 node) {
1287 btree_key_t i;
1288
1289 for (i = 0; i < node->keys; i++)
1290 used_pages += (size_t) node->value[i];
1291 }
1292
1293 /* An array for storing frame numbers */
1294 uintptr_t *old_frame = malloc(used_pages * sizeof(uintptr_t), 0);
1295
1296 page_table_lock(as, false);
1297
1298 /*
1299 * Start TLB shootdown sequence.
1300 */
1301 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base,
1302 area->pages);
1303
1304 /*
1305 * Remove used pages from page tables and remember their frame
1306 * numbers.
1307 */
1308 size_t frame_idx = 0;
1309
1310 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t,
1311 node) {
1312 btree_key_t i;
1313
1314 for (i = 0; i < node->keys; i++) {
1315 uintptr_t ptr = node->key[i];
1316 size_t size;
1317
1318 for (size = 0; size < (size_t) node->value[i]; size++) {
1319 pte_t pte;
1320 bool found = page_mapping_find(as,
1321 ptr + P2SZ(size), false, &pte);
1322
1323 assert(found);
1324 assert(PTE_VALID(&pte));
1325 assert(PTE_PRESENT(&pte));
1326
1327 old_frame[frame_idx++] = PTE_GET_FRAME(&pte);
1328
1329 /* Remove old mapping */
1330 page_mapping_remove(as, ptr + P2SZ(size));
1331 }
1332 }
1333 }
1334
1335 /*
1336 * Finish TLB shootdown sequence.
1337 */
1338
1339 tlb_invalidate_pages(as->asid, area->base, area->pages);
1340
1341 /*
1342 * Invalidate potential software translation caches
1343 * (e.g. TSB on sparc64, PHT on ppc32).
1344 */
1345 as_invalidate_translation_cache(as, area->base, area->pages);
1346 tlb_shootdown_finalize(ipl);
1347
1348 page_table_unlock(as, false);
1349
1350 /*
1351 * Set the new flags.
1352 */
1353 area->flags = flags;
1354
1355 /*
1356 * Map pages back in with new flags. This step is kept separate
1357 * so that the memory area could not be accesed with both the old and
1358 * the new flags at once.
1359 */
1360 frame_idx = 0;
1361
1362 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t,
1363 node) {
1364 btree_key_t i;
1365
1366 for (i = 0; i < node->keys; i++) {
1367 uintptr_t ptr = node->key[i];
1368 size_t size;
1369
1370 for (size = 0; size < (size_t) node->value[i]; size++) {
1371 page_table_lock(as, false);
1372
1373 /* Insert the new mapping */
1374 page_mapping_insert(as, ptr + P2SZ(size),
1375 old_frame[frame_idx++], page_flags);
1376
1377 page_table_unlock(as, false);
1378 }
1379 }
1380 }
1381
1382 free(old_frame);
1383
1384 mutex_unlock(&area->lock);
1385 mutex_unlock(&as->lock);
1386
1387 return 0;
1388}
1389
1390/** Handle page fault within the current address space.
1391 *
1392 * This is the high-level page fault handler. It decides whether the page fault
1393 * can be resolved by any backend and if so, it invokes the backend to resolve
1394 * the page fault.
1395 *
1396 * Interrupts are assumed disabled.
1397 *
1398 * @param address Faulting address.
1399 * @param access Access mode that caused the page fault (i.e.
1400 * read/write/exec).
1401 * @param istate Pointer to the interrupted state.
1402 *
1403 * @return AS_PF_FAULT on page fault.
1404 * @return AS_PF_OK on success.
1405 * @return AS_PF_DEFER if the fault was caused by copy_to_uspace()
1406 * or copy_from_uspace().
1407 *
1408 */
1409int as_page_fault(uintptr_t address, pf_access_t access, istate_t *istate)
1410{
1411 uintptr_t page = ALIGN_DOWN(address, PAGE_SIZE);
1412 int rc = AS_PF_FAULT;
1413
1414 if (!THREAD)
1415 goto page_fault;
1416
1417 if (!AS)
1418 goto page_fault;
1419
1420 mutex_lock(&AS->lock);
1421 as_area_t *area = find_area_and_lock(AS, page);
1422 if (!area) {
1423 /*
1424 * No area contained mapping for 'page'.
1425 * Signal page fault to low-level handler.
1426 */
1427 mutex_unlock(&AS->lock);
1428 goto page_fault;
1429 }
1430
1431 if (area->attributes & AS_AREA_ATTR_PARTIAL) {
1432 /*
1433 * The address space area is not fully initialized.
1434 * Avoid possible race by returning error.
1435 */
1436 mutex_unlock(&area->lock);
1437 mutex_unlock(&AS->lock);
1438 goto page_fault;
1439 }
1440
1441 if ((!area->backend) || (!area->backend->page_fault)) {
1442 /*
1443 * The address space area is not backed by any backend
1444 * or the backend cannot handle page faults.
1445 */
1446 mutex_unlock(&area->lock);
1447 mutex_unlock(&AS->lock);
1448 goto page_fault;
1449 }
1450
1451 page_table_lock(AS, false);
1452
1453 /*
1454 * To avoid race condition between two page faults on the same address,
1455 * we need to make sure the mapping has not been already inserted.
1456 */
1457 pte_t pte;
1458 bool found = page_mapping_find(AS, page, false, &pte);
1459 if (found && PTE_PRESENT(&pte)) {
1460 if (((access == PF_ACCESS_READ) && PTE_READABLE(&pte)) ||
1461 (access == PF_ACCESS_WRITE && PTE_WRITABLE(&pte)) ||
1462 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(&pte))) {
1463 page_table_unlock(AS, false);
1464 mutex_unlock(&area->lock);
1465 mutex_unlock(&AS->lock);
1466 return AS_PF_OK;
1467 }
1468 }
1469
1470 /*
1471 * Resort to the backend page fault handler.
1472 */
1473 rc = area->backend->page_fault(area, page, access);
1474 if (rc != AS_PF_OK) {
1475 page_table_unlock(AS, false);
1476 mutex_unlock(&area->lock);
1477 mutex_unlock(&AS->lock);
1478 goto page_fault;
1479 }
1480
1481 page_table_unlock(AS, false);
1482 mutex_unlock(&area->lock);
1483 mutex_unlock(&AS->lock);
1484 return AS_PF_OK;
1485
1486page_fault:
1487 if (THREAD->in_copy_from_uspace) {
1488 THREAD->in_copy_from_uspace = false;
1489 istate_set_retaddr(istate,
1490 (uintptr_t) &memcpy_from_uspace_failover_address);
1491 } else if (THREAD->in_copy_to_uspace) {
1492 THREAD->in_copy_to_uspace = false;
1493 istate_set_retaddr(istate,
1494 (uintptr_t) &memcpy_to_uspace_failover_address);
1495 } else if (rc == AS_PF_SILENT) {
1496 printf("Killing task %" PRIu64 " due to a "
1497 "failed late reservation request.\n", TASK->taskid);
1498 task_kill_self(true);
1499 } else {
1500 fault_if_from_uspace(istate, "Page fault: %p.", (void *) address);
1501 panic_memtrap(istate, access, address, NULL);
1502 }
1503
1504 return AS_PF_DEFER;
1505}
1506
1507/** Switch address spaces.
1508 *
1509 * Note that this function cannot sleep as it is essentially a part of
1510 * scheduling. Sleeping here would lead to deadlock on wakeup. Another
1511 * thing which is forbidden in this context is locking the address space.
1512 *
1513 * When this function is entered, no spinlocks may be held.
1514 *
1515 * @param old Old address space or NULL.
1516 * @param new New address space.
1517 *
1518 */
1519void as_switch(as_t *old_as, as_t *new_as)
1520{
1521 DEADLOCK_PROBE_INIT(p_asidlock);
1522 preemption_disable();
1523
1524retry:
1525 (void) interrupts_disable();
1526 if (!spinlock_trylock(&asidlock)) {
1527 /*
1528 * Avoid deadlock with TLB shootdown.
1529 * We can enable interrupts here because
1530 * preemption is disabled. We should not be
1531 * holding any other lock.
1532 */
1533 (void) interrupts_enable();
1534 DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
1535 goto retry;
1536 }
1537 preemption_enable();
1538
1539 /*
1540 * First, take care of the old address space.
1541 */
1542 if (old_as) {
1543 assert(old_as->cpu_refcount);
1544
1545 if ((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
1546 /*
1547 * The old address space is no longer active on
1548 * any processor. It can be appended to the
1549 * list of inactive address spaces with assigned
1550 * ASID.
1551 */
1552 assert(old_as->asid != ASID_INVALID);
1553
1554 list_append(&old_as->inactive_as_with_asid_link,
1555 &inactive_as_with_asid_list);
1556 }
1557
1558 /*
1559 * Perform architecture-specific tasks when the address space
1560 * is being removed from the CPU.
1561 */
1562 as_deinstall_arch(old_as);
1563 }
1564
1565 /*
1566 * Second, prepare the new address space.
1567 */
1568 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
1569 if (new_as->asid != ASID_INVALID)
1570 list_remove(&new_as->inactive_as_with_asid_link);
1571 else
1572 new_as->asid = asid_get();
1573 }
1574
1575#ifdef AS_PAGE_TABLE
1576 SET_PTL0_ADDRESS(new_as->genarch.page_table);
1577#endif
1578
1579 /*
1580 * Perform architecture-specific steps.
1581 * (e.g. write ASID to hardware register etc.)
1582 */
1583 as_install_arch(new_as);
1584
1585 spinlock_unlock(&asidlock);
1586
1587 AS = new_as;
1588}
1589
1590/** Compute flags for virtual address translation subsytem.
1591 *
1592 * @param area Address space area.
1593 *
1594 * @return Flags to be used in page_mapping_insert().
1595 *
1596 */
1597NO_TRACE unsigned int as_area_get_flags(as_area_t *area)
1598{
1599 assert(mutex_locked(&area->lock));
1600
1601 return area_flags_to_page_flags(area->flags);
1602}
1603
1604/** Create page table.
1605 *
1606 * Depending on architecture, create either address space private or global page
1607 * table.
1608 *
1609 * @param flags Flags saying whether the page table is for the kernel
1610 * address space.
1611 *
1612 * @return First entry of the page table.
1613 *
1614 */
1615NO_TRACE pte_t *page_table_create(unsigned int flags)
1616{
1617 assert(as_operations);
1618 assert(as_operations->page_table_create);
1619
1620 return as_operations->page_table_create(flags);
1621}
1622
1623/** Destroy page table.
1624 *
1625 * Destroy page table in architecture specific way.
1626 *
1627 * @param page_table Physical address of PTL0.
1628 *
1629 */
1630NO_TRACE void page_table_destroy(pte_t *page_table)
1631{
1632 assert(as_operations);
1633 assert(as_operations->page_table_destroy);
1634
1635 as_operations->page_table_destroy(page_table);
1636}
1637
1638/** Lock page table.
1639 *
1640 * This function should be called before any page_mapping_insert(),
1641 * page_mapping_remove() and page_mapping_find().
1642 *
1643 * Locking order is such that address space areas must be locked
1644 * prior to this call. Address space can be locked prior to this
1645 * call in which case the lock argument is false.
1646 *
1647 * @param as Address space.
1648 * @param lock If false, do not attempt to lock as->lock.
1649 *
1650 */
1651NO_TRACE void page_table_lock(as_t *as, bool lock)
1652{
1653 assert(as_operations);
1654 assert(as_operations->page_table_lock);
1655
1656 as_operations->page_table_lock(as, lock);
1657}
1658
1659/** Unlock page table.
1660 *
1661 * @param as Address space.
1662 * @param unlock If false, do not attempt to unlock as->lock.
1663 *
1664 */
1665NO_TRACE void page_table_unlock(as_t *as, bool unlock)
1666{
1667 assert(as_operations);
1668 assert(as_operations->page_table_unlock);
1669
1670 as_operations->page_table_unlock(as, unlock);
1671}
1672
1673/** Test whether page tables are locked.
1674 *
1675 * @param as Address space where the page tables belong.
1676 *
1677 * @return True if the page tables belonging to the address soace
1678 * are locked, otherwise false.
1679 */
1680NO_TRACE bool page_table_locked(as_t *as)
1681{
1682 assert(as_operations);
1683 assert(as_operations->page_table_locked);
1684
1685 return as_operations->page_table_locked(as);
1686}
1687
1688/** Return size of the address space area with given base.
1689 *
1690 * @param base Arbitrary address inside the address space area.
1691 *
1692 * @return Size of the address space area in bytes or zero if it
1693 * does not exist.
1694 *
1695 */
1696size_t as_area_get_size(uintptr_t base)
1697{
1698 size_t size;
1699
1700 page_table_lock(AS, true);
1701 as_area_t *src_area = find_area_and_lock(AS, base);
1702
1703 if (src_area) {
1704 size = P2SZ(src_area->pages);
1705 mutex_unlock(&src_area->lock);
1706 } else
1707 size = 0;
1708
1709 page_table_unlock(AS, true);
1710 return size;
1711}
1712
1713/** Mark portion of address space area as used.
1714 *
1715 * The address space area must be already locked.
1716 *
1717 * @param area Address space area.
1718 * @param page First page to be marked.
1719 * @param count Number of page to be marked.
1720 *
1721 * @return False on failure or true on success.
1722 *
1723 */
1724bool used_space_insert(as_area_t *area, uintptr_t page, size_t count)
1725{
1726 assert(mutex_locked(&area->lock));
1727 assert(IS_ALIGNED(page, PAGE_SIZE));
1728 assert(count);
1729
1730 btree_node_t *leaf = NULL;
1731 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf);
1732 if (pages) {
1733 /*
1734 * We hit the beginning of some used space.
1735 */
1736 return false;
1737 }
1738
1739 assert(leaf != NULL);
1740
1741 if (!leaf->keys) {
1742 btree_insert(&area->used_space, page, (void *) count, leaf);
1743 goto success;
1744 }
1745
1746 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf);
1747 if (node) {
1748 uintptr_t left_pg = node->key[node->keys - 1];
1749 uintptr_t right_pg = leaf->key[0];
1750 size_t left_cnt = (size_t) node->value[node->keys - 1];
1751 size_t right_cnt = (size_t) leaf->value[0];
1752
1753 /*
1754 * Examine the possibility that the interval fits
1755 * somewhere between the rightmost interval of
1756 * the left neigbour and the first interval of the leaf.
1757 */
1758
1759 if (page >= right_pg) {
1760 /* Do nothing. */
1761 } else if (overlaps(page, P2SZ(count), left_pg,
1762 P2SZ(left_cnt))) {
1763 /* The interval intersects with the left interval. */
1764 return false;
1765 } else if (overlaps(page, P2SZ(count), right_pg,
1766 P2SZ(right_cnt))) {
1767 /* The interval intersects with the right interval. */
1768 return false;
1769 } else if ((page == left_pg + P2SZ(left_cnt)) &&
1770 (page + P2SZ(count) == right_pg)) {
1771 /*
1772 * The interval can be added by merging the two already
1773 * present intervals.
1774 */
1775 node->value[node->keys - 1] += count + right_cnt;
1776 btree_remove(&area->used_space, right_pg, leaf);
1777 goto success;
1778 } else if (page == left_pg + P2SZ(left_cnt)) {
1779 /*
1780 * The interval can be added by simply growing the left
1781 * interval.
1782 */
1783 node->value[node->keys - 1] += count;
1784 goto success;
1785 } else if (page + P2SZ(count) == right_pg) {
1786 /*
1787 * The interval can be addded by simply moving base of
1788 * the right interval down and increasing its size
1789 * accordingly.
1790 */
1791 leaf->value[0] += count;
1792 leaf->key[0] = page;
1793 goto success;
1794 } else {
1795 /*
1796 * The interval is between both neigbouring intervals,
1797 * but cannot be merged with any of them.
1798 */
1799 btree_insert(&area->used_space, page, (void *) count,
1800 leaf);
1801 goto success;
1802 }
1803 } else if (page < leaf->key[0]) {
1804 uintptr_t right_pg = leaf->key[0];
1805 size_t right_cnt = (size_t) leaf->value[0];
1806
1807 /*
1808 * Investigate the border case in which the left neighbour does
1809 * not exist but the interval fits from the left.
1810 */
1811
1812 if (overlaps(page, P2SZ(count), right_pg, P2SZ(right_cnt))) {
1813 /* The interval intersects with the right interval. */
1814 return false;
1815 } else if (page + P2SZ(count) == right_pg) {
1816 /*
1817 * The interval can be added by moving the base of the
1818 * right interval down and increasing its size
1819 * accordingly.
1820 */
1821 leaf->key[0] = page;
1822 leaf->value[0] += count;
1823 goto success;
1824 } else {
1825 /*
1826 * The interval doesn't adjoin with the right interval.
1827 * It must be added individually.
1828 */
1829 btree_insert(&area->used_space, page, (void *) count,
1830 leaf);
1831 goto success;
1832 }
1833 }
1834
1835 node = btree_leaf_node_right_neighbour(&area->used_space, leaf);
1836 if (node) {
1837 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1838 uintptr_t right_pg = node->key[0];
1839 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
1840 size_t right_cnt = (size_t) node->value[0];
1841
1842 /*
1843 * Examine the possibility that the interval fits
1844 * somewhere between the leftmost interval of
1845 * the right neigbour and the last interval of the leaf.
1846 */
1847
1848 if (page < left_pg) {
1849 /* Do nothing. */
1850 } else if (overlaps(page, P2SZ(count), left_pg,
1851 P2SZ(left_cnt))) {
1852 /* The interval intersects with the left interval. */
1853 return false;
1854 } else if (overlaps(page, P2SZ(count), right_pg,
1855 P2SZ(right_cnt))) {
1856 /* The interval intersects with the right interval. */
1857 return false;
1858 } else if ((page == left_pg + P2SZ(left_cnt)) &&
1859 (page + P2SZ(count) == right_pg)) {
1860 /*
1861 * The interval can be added by merging the two already
1862 * present intervals.
1863 */
1864 leaf->value[leaf->keys - 1] += count + right_cnt;
1865 btree_remove(&area->used_space, right_pg, node);
1866 goto success;
1867 } else if (page == left_pg + P2SZ(left_cnt)) {
1868 /*
1869 * The interval can be added by simply growing the left
1870 * interval.
1871 */
1872 leaf->value[leaf->keys - 1] += count;
1873 goto success;
1874 } else if (page + P2SZ(count) == right_pg) {
1875 /*
1876 * The interval can be addded by simply moving base of
1877 * the right interval down and increasing its size
1878 * accordingly.
1879 */
1880 node->value[0] += count;
1881 node->key[0] = page;
1882 goto success;
1883 } else {
1884 /*
1885 * The interval is between both neigbouring intervals,
1886 * but cannot be merged with any of them.
1887 */
1888 btree_insert(&area->used_space, page, (void *) count,
1889 leaf);
1890 goto success;
1891 }
1892 } else if (page >= leaf->key[leaf->keys - 1]) {
1893 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1894 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
1895
1896 /*
1897 * Investigate the border case in which the right neighbour
1898 * does not exist but the interval fits from the right.
1899 */
1900
1901 if (overlaps(page, P2SZ(count), left_pg, P2SZ(left_cnt))) {
1902 /* The interval intersects with the left interval. */
1903 return false;
1904 } else if (left_pg + P2SZ(left_cnt) == page) {
1905 /*
1906 * The interval can be added by growing the left
1907 * interval.
1908 */
1909 leaf->value[leaf->keys - 1] += count;
1910 goto success;
1911 } else {
1912 /*
1913 * The interval doesn't adjoin with the left interval.
1914 * It must be added individually.
1915 */
1916 btree_insert(&area->used_space, page, (void *) count,
1917 leaf);
1918 goto success;
1919 }
1920 }
1921
1922 /*
1923 * Note that if the algorithm made it thus far, the interval can fit
1924 * only between two other intervals of the leaf. The two border cases
1925 * were already resolved.
1926 */
1927 btree_key_t i;
1928 for (i = 1; i < leaf->keys; i++) {
1929 if (page < leaf->key[i]) {
1930 uintptr_t left_pg = leaf->key[i - 1];
1931 uintptr_t right_pg = leaf->key[i];
1932 size_t left_cnt = (size_t) leaf->value[i - 1];
1933 size_t right_cnt = (size_t) leaf->value[i];
1934
1935 /*
1936 * The interval fits between left_pg and right_pg.
1937 */
1938
1939 if (overlaps(page, P2SZ(count), left_pg,
1940 P2SZ(left_cnt))) {
1941 /*
1942 * The interval intersects with the left
1943 * interval.
1944 */
1945 return false;
1946 } else if (overlaps(page, P2SZ(count), right_pg,
1947 P2SZ(right_cnt))) {
1948 /*
1949 * The interval intersects with the right
1950 * interval.
1951 */
1952 return false;
1953 } else if ((page == left_pg + P2SZ(left_cnt)) &&
1954 (page + P2SZ(count) == right_pg)) {
1955 /*
1956 * The interval can be added by merging the two
1957 * already present intervals.
1958 */
1959 leaf->value[i - 1] += count + right_cnt;
1960 btree_remove(&area->used_space, right_pg, leaf);
1961 goto success;
1962 } else if (page == left_pg + P2SZ(left_cnt)) {
1963 /*
1964 * The interval can be added by simply growing
1965 * the left interval.
1966 */
1967 leaf->value[i - 1] += count;
1968 goto success;
1969 } else if (page + P2SZ(count) == right_pg) {
1970 /*
1971 * The interval can be addded by simply moving
1972 * base of the right interval down and
1973 * increasing its size accordingly.
1974 */
1975 leaf->value[i] += count;
1976 leaf->key[i] = page;
1977 goto success;
1978 } else {
1979 /*
1980 * The interval is between both neigbouring
1981 * intervals, but cannot be merged with any of
1982 * them.
1983 */
1984 btree_insert(&area->used_space, page,
1985 (void *) count, leaf);
1986 goto success;
1987 }
1988 }
1989 }
1990
1991 panic("Inconsistency detected while adding %zu pages of used "
1992 "space at %p.", count, (void *) page);
1993
1994success:
1995 area->resident += count;
1996 return true;
1997}
1998
1999/** Mark portion of address space area as unused.
2000 *
2001 * The address space area must be already locked.
2002 *
2003 * @param area Address space area.
2004 * @param page First page to be marked.
2005 * @param count Number of page to be marked.
2006 *
2007 * @return False on failure or true on success.
2008 *
2009 */
2010bool used_space_remove(as_area_t *area, uintptr_t page, size_t count)
2011{
2012 assert(mutex_locked(&area->lock));
2013 assert(IS_ALIGNED(page, PAGE_SIZE));
2014 assert(count);
2015
2016 btree_node_t *leaf;
2017 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf);
2018 if (pages) {
2019 /*
2020 * We are lucky, page is the beginning of some interval.
2021 */
2022 if (count > pages) {
2023 return false;
2024 } else if (count == pages) {
2025 btree_remove(&area->used_space, page, leaf);
2026 goto success;
2027 } else {
2028 /*
2029 * Find the respective interval.
2030 * Decrease its size and relocate its start address.
2031 */
2032 btree_key_t i;
2033 for (i = 0; i < leaf->keys; i++) {
2034 if (leaf->key[i] == page) {
2035 leaf->key[i] += P2SZ(count);
2036 leaf->value[i] -= count;
2037 goto success;
2038 }
2039 }
2040
2041 goto error;
2042 }
2043 }
2044
2045 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space,
2046 leaf);
2047 if ((node) && (page < leaf->key[0])) {
2048 uintptr_t left_pg = node->key[node->keys - 1];
2049 size_t left_cnt = (size_t) node->value[node->keys - 1];
2050
2051 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) {
2052 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) {
2053 /*
2054 * The interval is contained in the rightmost
2055 * interval of the left neighbour and can be
2056 * removed by updating the size of the bigger
2057 * interval.
2058 */
2059 node->value[node->keys - 1] -= count;
2060 goto success;
2061 } else if (page + P2SZ(count) <
2062 left_pg + P2SZ(left_cnt)) {
2063 size_t new_cnt;
2064
2065 /*
2066 * The interval is contained in the rightmost
2067 * interval of the left neighbour but its
2068 * removal requires both updating the size of
2069 * the original interval and also inserting a
2070 * new interval.
2071 */
2072 new_cnt = ((left_pg + P2SZ(left_cnt)) -
2073 (page + P2SZ(count))) >> PAGE_WIDTH;
2074 node->value[node->keys - 1] -= count + new_cnt;
2075 btree_insert(&area->used_space, page +
2076 P2SZ(count), (void *) new_cnt, leaf);
2077 goto success;
2078 }
2079 }
2080
2081 return false;
2082 } else if (page < leaf->key[0])
2083 return false;
2084
2085 if (page > leaf->key[leaf->keys - 1]) {
2086 uintptr_t left_pg = leaf->key[leaf->keys - 1];
2087 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
2088
2089 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) {
2090 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) {
2091 /*
2092 * The interval is contained in the rightmost
2093 * interval of the leaf and can be removed by
2094 * updating the size of the bigger interval.
2095 */
2096 leaf->value[leaf->keys - 1] -= count;
2097 goto success;
2098 } else if (page + P2SZ(count) < left_pg +
2099 P2SZ(left_cnt)) {
2100 size_t new_cnt;
2101
2102 /*
2103 * The interval is contained in the rightmost
2104 * interval of the leaf but its removal
2105 * requires both updating the size of the
2106 * original interval and also inserting a new
2107 * interval.
2108 */
2109 new_cnt = ((left_pg + P2SZ(left_cnt)) -
2110 (page + P2SZ(count))) >> PAGE_WIDTH;
2111 leaf->value[leaf->keys - 1] -= count + new_cnt;
2112 btree_insert(&area->used_space, page +
2113 P2SZ(count), (void *) new_cnt, leaf);
2114 goto success;
2115 }
2116 }
2117
2118 return false;
2119 }
2120
2121 /*
2122 * The border cases have been already resolved.
2123 * Now the interval can be only between intervals of the leaf.
2124 */
2125 btree_key_t i;
2126 for (i = 1; i < leaf->keys - 1; i++) {
2127 if (page < leaf->key[i]) {
2128 uintptr_t left_pg = leaf->key[i - 1];
2129 size_t left_cnt = (size_t) leaf->value[i - 1];
2130
2131 /*
2132 * Now the interval is between intervals corresponding
2133 * to (i - 1) and i.
2134 */
2135 if (overlaps(left_pg, P2SZ(left_cnt), page,
2136 P2SZ(count))) {
2137 if (page + P2SZ(count) ==
2138 left_pg + P2SZ(left_cnt)) {
2139 /*
2140 * The interval is contained in the
2141 * interval (i - 1) of the leaf and can
2142 * be removed by updating the size of
2143 * the bigger interval.
2144 */
2145 leaf->value[i - 1] -= count;
2146 goto success;
2147 } else if (page + P2SZ(count) <
2148 left_pg + P2SZ(left_cnt)) {
2149 size_t new_cnt;
2150
2151 /*
2152 * The interval is contained in the
2153 * interval (i - 1) of the leaf but its
2154 * removal requires both updating the
2155 * size of the original interval and
2156 * also inserting a new interval.
2157 */
2158 new_cnt = ((left_pg + P2SZ(left_cnt)) -
2159 (page + P2SZ(count))) >>
2160 PAGE_WIDTH;
2161 leaf->value[i - 1] -= count + new_cnt;
2162 btree_insert(&area->used_space, page +
2163 P2SZ(count), (void *) new_cnt,
2164 leaf);
2165 goto success;
2166 }
2167 }
2168
2169 return false;
2170 }
2171 }
2172
2173error:
2174 panic("Inconsistency detected while removing %zu pages of used "
2175 "space from %p.", count, (void *) page);
2176
2177success:
2178 area->resident -= count;
2179 return true;
2180}
2181
2182/*
2183 * Address space related syscalls.
2184 */
2185
2186sysarg_t sys_as_area_create(uintptr_t base, size_t size, unsigned int flags,
2187 uintptr_t bound, as_area_pager_info_t *pager_info)
2188{
2189 uintptr_t virt = base;
2190 mem_backend_t *backend;
2191 mem_backend_data_t backend_data;
2192
2193 if (pager_info == AS_AREA_UNPAGED)
2194 backend = &anon_backend;
2195 else {
2196 backend = &user_backend;
2197 if (copy_from_uspace(&backend_data.pager_info, pager_info,
2198 sizeof(as_area_pager_info_t)) != EOK) {
2199 return (sysarg_t) AS_MAP_FAILED;
2200 }
2201 }
2202 as_area_t *area = as_area_create(AS, flags, size,
2203 AS_AREA_ATTR_NONE, backend, &backend_data, &virt, bound);
2204 if (area == NULL)
2205 return (sysarg_t) AS_MAP_FAILED;
2206
2207 return (sysarg_t) virt;
2208}
2209
2210sys_errno_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags)
2211{
2212 return (sys_errno_t) as_area_resize(AS, address, size, 0);
2213}
2214
2215sys_errno_t sys_as_area_change_flags(uintptr_t address, unsigned int flags)
2216{
2217 return (sys_errno_t) as_area_change_flags(AS, flags, address);
2218}
2219
2220sys_errno_t sys_as_area_destroy(uintptr_t address)
2221{
2222 return (sys_errno_t) as_area_destroy(AS, address);
2223}
2224
2225/** Get list of adress space areas.
2226 *
2227 * @param as Address space.
2228 * @param obuf Place to save pointer to returned buffer.
2229 * @param osize Place to save size of returned buffer.
2230 *
2231 */
2232void as_get_area_info(as_t *as, as_area_info_t **obuf, size_t *osize)
2233{
2234 mutex_lock(&as->lock);
2235
2236 /* First pass, count number of areas. */
2237
2238 size_t area_cnt = 0;
2239
2240 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t,
2241 node) {
2242 area_cnt += node->keys;
2243 }
2244
2245 size_t isize = area_cnt * sizeof(as_area_info_t);
2246 as_area_info_t *info = malloc(isize, 0);
2247
2248 /* Second pass, record data. */
2249
2250 size_t area_idx = 0;
2251
2252 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t,
2253 node) {
2254 btree_key_t i;
2255
2256 for (i = 0; i < node->keys; i++) {
2257 as_area_t *area = node->value[i];
2258
2259 assert(area_idx < area_cnt);
2260 mutex_lock(&area->lock);
2261
2262 info[area_idx].start_addr = area->base;
2263 info[area_idx].size = P2SZ(area->pages);
2264 info[area_idx].flags = area->flags;
2265 ++area_idx;
2266
2267 mutex_unlock(&area->lock);
2268 }
2269 }
2270
2271 mutex_unlock(&as->lock);
2272
2273 *obuf = info;
2274 *osize = isize;
2275}
2276
2277/** Print out information about address space.
2278 *
2279 * @param as Address space.
2280 *
2281 */
2282void as_print(as_t *as)
2283{
2284 mutex_lock(&as->lock);
2285
2286 /* Print out info about address space areas */
2287 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t,
2288 node) {
2289 btree_key_t i;
2290
2291 for (i = 0; i < node->keys; i++) {
2292 as_area_t *area = node->value[i];
2293
2294 mutex_lock(&area->lock);
2295 printf("as_area: %p, base=%p, pages=%zu"
2296 " (%p - %p)\n", area, (void *) area->base,
2297 area->pages, (void *) area->base,
2298 (void *) (area->base + P2SZ(area->pages)));
2299 mutex_unlock(&area->lock);
2300 }
2301 }
2302
2303 mutex_unlock(&as->lock);
2304}
2305
2306/** @}
2307 */
Note: See TracBrowser for help on using the repository browser.