source: mainline/kernel/generic/src/mm/as.c@ 2c0b348

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 2c0b348 was 2c0b348, checked in by Jan Vesely <jano.vesely@…>, 12 years ago

kernel: free memory in dmamem_unmap

  • Property mode set to 100644
File size: 56.1 KB
Line 
1/*
2 * Copyright (c) 2010 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Address space related functions.
36 *
37 * This file contains address space manipulation functions.
38 * Roughly speaking, this is a higher-level client of
39 * Virtual Address Translation (VAT) subsystem.
40 *
41 * Functionality provided by this file allows one to
42 * create address spaces and create, resize and share
43 * address space areas.
44 *
45 * @see page.c
46 *
47 */
48
49#include <mm/as.h>
50#include <arch/mm/as.h>
51#include <mm/page.h>
52#include <mm/frame.h>
53#include <mm/slab.h>
54#include <mm/tlb.h>
55#include <arch/mm/page.h>
56#include <genarch/mm/page_pt.h>
57#include <genarch/mm/page_ht.h>
58#include <mm/asid.h>
59#include <arch/mm/asid.h>
60#include <preemption.h>
61#include <synch/spinlock.h>
62#include <synch/mutex.h>
63#include <adt/list.h>
64#include <adt/btree.h>
65#include <proc/task.h>
66#include <proc/thread.h>
67#include <arch/asm.h>
68#include <panic.h>
69#include <debug.h>
70#include <print.h>
71#include <memstr.h>
72#include <macros.h>
73#include <bitops.h>
74#include <arch.h>
75#include <errno.h>
76#include <config.h>
77#include <align.h>
78#include <typedefs.h>
79#include <syscall/copy.h>
80#include <arch/interrupt.h>
81#include <interrupt.h>
82
83/**
84 * Each architecture decides what functions will be used to carry out
85 * address space operations such as creating or locking page tables.
86 */
87as_operations_t *as_operations = NULL;
88
89/** Slab for as_t objects.
90 *
91 */
92static slab_cache_t *as_slab;
93
94/** ASID subsystem lock.
95 *
96 * This lock protects:
97 * - inactive_as_with_asid_list
98 * - as->asid for each as of the as_t type
99 * - asids_allocated counter
100 *
101 */
102SPINLOCK_INITIALIZE(asidlock);
103
104/**
105 * Inactive address spaces (on all processors)
106 * that have valid ASID.
107 */
108LIST_INITIALIZE(inactive_as_with_asid_list);
109
110/** Kernel address space. */
111as_t *AS_KERNEL = NULL;
112
113NO_TRACE static int as_constructor(void *obj, unsigned int flags)
114{
115 as_t *as = (as_t *) obj;
116
117 link_initialize(&as->inactive_as_with_asid_link);
118 mutex_initialize(&as->lock, MUTEX_PASSIVE);
119
120 return as_constructor_arch(as, flags);
121}
122
123NO_TRACE static size_t as_destructor(void *obj)
124{
125 return as_destructor_arch((as_t *) obj);
126}
127
128/** Initialize address space subsystem. */
129void as_init(void)
130{
131 as_arch_init();
132
133 as_slab = slab_cache_create("as_t", sizeof(as_t), 0,
134 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
135
136 AS_KERNEL = as_create(FLAG_AS_KERNEL);
137 if (!AS_KERNEL)
138 panic("Cannot create kernel address space.");
139
140 /*
141 * Make sure the kernel address space
142 * reference count never drops to zero.
143 */
144 as_hold(AS_KERNEL);
145}
146
147/** Create address space.
148 *
149 * @param flags Flags that influence the way in wich the address
150 * space is created.
151 *
152 */
153as_t *as_create(unsigned int flags)
154{
155 as_t *as = (as_t *) slab_alloc(as_slab, 0);
156 (void) as_create_arch(as, 0);
157
158 btree_create(&as->as_area_btree);
159
160 if (flags & FLAG_AS_KERNEL)
161 as->asid = ASID_KERNEL;
162 else
163 as->asid = ASID_INVALID;
164
165 atomic_set(&as->refcount, 0);
166 as->cpu_refcount = 0;
167
168#ifdef AS_PAGE_TABLE
169 as->genarch.page_table = page_table_create(flags);
170#else
171 page_table_create(flags);
172#endif
173
174 return as;
175}
176
177/** Destroy adress space.
178 *
179 * When there are no tasks referencing this address space (i.e. its refcount is
180 * zero), the address space can be destroyed.
181 *
182 * We know that we don't hold any spinlock.
183 *
184 * @param as Address space to be destroyed.
185 *
186 */
187void as_destroy(as_t *as)
188{
189 DEADLOCK_PROBE_INIT(p_asidlock);
190
191 ASSERT(as != AS);
192 ASSERT(atomic_get(&as->refcount) == 0);
193
194 /*
195 * Since there is no reference to this address space, it is safe not to
196 * lock its mutex.
197 */
198
199 /*
200 * We need to avoid deadlock between TLB shootdown and asidlock.
201 * We therefore try to take asid conditionally and if we don't succeed,
202 * we enable interrupts and try again. This is done while preemption is
203 * disabled to prevent nested context switches. We also depend on the
204 * fact that so far no spinlocks are held.
205 */
206 preemption_disable();
207 ipl_t ipl = interrupts_read();
208
209retry:
210 interrupts_disable();
211 if (!spinlock_trylock(&asidlock)) {
212 interrupts_enable();
213 DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
214 goto retry;
215 }
216
217 /* Interrupts disabled, enable preemption */
218 preemption_enable();
219
220 if ((as->asid != ASID_INVALID) && (as != AS_KERNEL)) {
221 if (as->cpu_refcount == 0)
222 list_remove(&as->inactive_as_with_asid_link);
223
224 asid_put(as->asid);
225 }
226
227 spinlock_unlock(&asidlock);
228 interrupts_restore(ipl);
229
230
231 /*
232 * Destroy address space areas of the address space.
233 * The B+tree must be walked carefully because it is
234 * also being destroyed.
235 */
236 bool cond = true;
237 while (cond) {
238 ASSERT(!list_empty(&as->as_area_btree.leaf_list));
239
240 btree_node_t *node =
241 list_get_instance(list_first(&as->as_area_btree.leaf_list),
242 btree_node_t, leaf_link);
243
244 if ((cond = node->keys))
245 as_area_destroy(as, node->key[0]);
246 }
247
248 btree_destroy(&as->as_area_btree);
249
250#ifdef AS_PAGE_TABLE
251 page_table_destroy(as->genarch.page_table);
252#else
253 page_table_destroy(NULL);
254#endif
255
256 slab_free(as_slab, as);
257}
258
259/** Hold a reference to an address space.
260 *
261 * Holding a reference to an address space prevents destruction
262 * of that address space.
263 *
264 * @param as Address space to be held.
265 *
266 */
267NO_TRACE void as_hold(as_t *as)
268{
269 atomic_inc(&as->refcount);
270}
271
272/** Release a reference to an address space.
273 *
274 * The last one to release a reference to an address space
275 * destroys the address space.
276 *
277 * @param asAddress space to be released.
278 *
279 */
280NO_TRACE void as_release(as_t *as)
281{
282 if (atomic_predec(&as->refcount) == 0)
283 as_destroy(as);
284}
285
286/** Check area conflicts with other areas.
287 *
288 * @param as Address space.
289 * @param addr Starting virtual address of the area being tested.
290 * @param count Number of pages in the area being tested.
291 * @param guarded True if the area being tested is protected by guard pages.
292 * @param avoid Do not touch this area.
293 *
294 * @return True if there is no conflict, false otherwise.
295 *
296 */
297NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr,
298 size_t count, bool guarded, as_area_t *avoid)
299{
300 ASSERT((addr % PAGE_SIZE) == 0);
301 ASSERT(mutex_locked(&as->lock));
302
303 /*
304 * If the addition of the supposed area address and size overflows,
305 * report conflict.
306 */
307 if (overflows_into_positive(addr, P2SZ(count)))
308 return false;
309
310 /*
311 * We don't want any area to have conflicts with NULL page.
312 */
313 if (overlaps(addr, P2SZ(count), (uintptr_t) NULL, PAGE_SIZE))
314 return false;
315
316 /*
317 * The leaf node is found in O(log n), where n is proportional to
318 * the number of address space areas belonging to as.
319 * The check for conflicts is then attempted on the rightmost
320 * record in the left neighbour, the leftmost record in the right
321 * neighbour and all records in the leaf node itself.
322 */
323 btree_node_t *leaf;
324 as_area_t *area =
325 (as_area_t *) btree_search(&as->as_area_btree, addr, &leaf);
326 if (area) {
327 if (area != avoid)
328 return false;
329 }
330
331 /* First, check the two border cases. */
332 btree_node_t *node =
333 btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
334 if (node) {
335 area = (as_area_t *) node->value[node->keys - 1];
336
337 if (area != avoid) {
338 mutex_lock(&area->lock);
339
340 /*
341 * If at least one of the two areas are protected
342 * by the AS_AREA_GUARD flag then we must be sure
343 * that they are separated by at least one unmapped
344 * page.
345 */
346 int const gp = (guarded ||
347 (area->flags & AS_AREA_GUARD)) ? 1 : 0;
348
349 /*
350 * The area comes from the left neighbour node, which
351 * means that there already are some areas in the leaf
352 * node, which in turn means that adding gp is safe and
353 * will not cause an integer overflow.
354 */
355 if (overlaps(addr, P2SZ(count), area->base,
356 P2SZ(area->pages + gp))) {
357 mutex_unlock(&area->lock);
358 return false;
359 }
360
361 mutex_unlock(&area->lock);
362 }
363 }
364
365 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
366 if (node) {
367 area = (as_area_t *) node->value[0];
368
369 if (area != avoid) {
370 int gp;
371
372 mutex_lock(&area->lock);
373
374 gp = (guarded || (area->flags & AS_AREA_GUARD)) ? 1 : 0;
375 if (gp && overflows(addr, P2SZ(count))) {
376 /*
377 * Guard page not needed if the supposed area
378 * is adjacent to the end of the address space.
379 * We already know that the following test is
380 * going to fail...
381 */
382 gp--;
383 }
384
385 if (overlaps(addr, P2SZ(count + gp), area->base,
386 P2SZ(area->pages))) {
387 mutex_unlock(&area->lock);
388 return false;
389 }
390
391 mutex_unlock(&area->lock);
392 }
393 }
394
395 /* Second, check the leaf node. */
396 btree_key_t i;
397 for (i = 0; i < leaf->keys; i++) {
398 area = (as_area_t *) leaf->value[i];
399 int agp;
400 int gp;
401
402 if (area == avoid)
403 continue;
404
405 mutex_lock(&area->lock);
406
407 gp = (guarded || (area->flags & AS_AREA_GUARD)) ? 1 : 0;
408 agp = gp;
409
410 /*
411 * Sanitize the two possible unsigned integer overflows.
412 */
413 if (gp && overflows(addr, P2SZ(count)))
414 gp--;
415 if (agp && overflows(area->base, P2SZ(area->pages)))
416 agp--;
417
418 if (overlaps(addr, P2SZ(count + gp), area->base,
419 P2SZ(area->pages + agp))) {
420 mutex_unlock(&area->lock);
421 return false;
422 }
423
424 mutex_unlock(&area->lock);
425 }
426
427 /*
428 * So far, the area does not conflict with other areas.
429 * Check if it is contained in the user address space.
430 */
431 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
432 return iswithin(USER_ADDRESS_SPACE_START,
433 (USER_ADDRESS_SPACE_END - USER_ADDRESS_SPACE_START) + 1,
434 addr, P2SZ(count));
435 }
436
437 return true;
438}
439
440/** Return pointer to unmapped address space area
441 *
442 * The address space must be already locked when calling
443 * this function.
444 *
445 * @param as Address space.
446 * @param bound Lowest address bound.
447 * @param size Requested size of the allocation.
448 * @param guarded True if the allocation must be protected by guard pages.
449 *
450 * @return Address of the beginning of unmapped address space area.
451 * @return -1 if no suitable address space area was found.
452 *
453 */
454NO_TRACE static uintptr_t as_get_unmapped_area(as_t *as, uintptr_t bound,
455 size_t size, bool guarded)
456{
457 ASSERT(mutex_locked(&as->lock));
458
459 if (size == 0)
460 return (uintptr_t) -1;
461
462 /*
463 * Make sure we allocate from page-aligned
464 * address. Check for possible overflow in
465 * each step.
466 */
467
468 size_t pages = SIZE2FRAMES(size);
469
470 /*
471 * Find the lowest unmapped address aligned on the size
472 * boundary, not smaller than bound and of the required size.
473 */
474
475 /* First check the bound address itself */
476 uintptr_t addr = ALIGN_UP(bound, PAGE_SIZE);
477 if (addr >= bound) {
478 if (guarded) {
479 /* Leave an unmapped page between the lower
480 * bound and the area's start address.
481 */
482 addr += P2SZ(1);
483 }
484
485 if (check_area_conflicts(as, addr, pages, guarded, NULL))
486 return addr;
487 }
488
489 /* Eventually check the addresses behind each area */
490 list_foreach(as->as_area_btree.leaf_list, cur) {
491 btree_node_t *node =
492 list_get_instance(cur, btree_node_t, leaf_link);
493
494 for (btree_key_t i = 0; i < node->keys; i++) {
495 as_area_t *area = (as_area_t *) node->value[i];
496
497 mutex_lock(&area->lock);
498
499 addr =
500 ALIGN_UP(area->base + P2SZ(area->pages), PAGE_SIZE);
501
502 if (guarded || area->flags & AS_AREA_GUARD) {
503 /* We must leave an unmapped page
504 * between the two areas.
505 */
506 addr += P2SZ(1);
507 }
508
509 bool avail =
510 ((addr >= bound) && (addr >= area->base) &&
511 (check_area_conflicts(as, addr, pages, guarded, area)));
512
513 mutex_unlock(&area->lock);
514
515 if (avail)
516 return addr;
517 }
518 }
519
520 /* No suitable address space area found */
521 return (uintptr_t) -1;
522}
523
524/** Create address space area of common attributes.
525 *
526 * The created address space area is added to the target address space.
527 *
528 * @param as Target address space.
529 * @param flags Flags of the area memory.
530 * @param size Size of area.
531 * @param attrs Attributes of the area.
532 * @param backend Address space area backend. NULL if no backend is used.
533 * @param backend_data NULL or a pointer to an array holding two void *.
534 * @param base Starting virtual address of the area.
535 * If set to -1, a suitable mappable area is found.
536 * @param bound Lowest address bound if base is set to -1.
537 * Otherwise ignored.
538 *
539 * @return Address space area on success or NULL on failure.
540 *
541 */
542as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size,
543 unsigned int attrs, mem_backend_t *backend,
544 mem_backend_data_t *backend_data, uintptr_t *base, uintptr_t bound)
545{
546 if ((*base != (uintptr_t) -1) && !IS_ALIGNED(*base, PAGE_SIZE))
547 return NULL;
548
549 if (size == 0)
550 return NULL;
551
552 size_t pages = SIZE2FRAMES(size);
553
554 /* Writeable executable areas are not supported. */
555 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
556 return NULL;
557
558 bool const guarded = flags & AS_AREA_GUARD;
559
560 mutex_lock(&as->lock);
561
562 if (*base == (uintptr_t) -1) {
563 *base = as_get_unmapped_area(as, bound, size, guarded);
564 if (*base == (uintptr_t) -1) {
565 mutex_unlock(&as->lock);
566 return NULL;
567 }
568 }
569
570 if (overflows_into_positive(*base, size))
571 return NULL;
572
573 if (!check_area_conflicts(as, *base, pages, guarded, NULL)) {
574 mutex_unlock(&as->lock);
575 return NULL;
576 }
577
578 as_area_t *area = (as_area_t *) malloc(sizeof(as_area_t), 0);
579
580 mutex_initialize(&area->lock, MUTEX_PASSIVE);
581
582 area->as = as;
583 area->flags = flags;
584 area->attributes = attrs;
585 area->pages = pages;
586 area->resident = 0;
587 area->base = *base;
588 area->sh_info = NULL;
589 area->backend = backend;
590
591 if (backend_data)
592 area->backend_data = *backend_data;
593 else
594 memsetb(&area->backend_data, sizeof(area->backend_data), 0);
595
596 if (area->backend && area->backend->create) {
597 if (!area->backend->create(area)) {
598 free(area);
599 mutex_unlock(&as->lock);
600 return NULL;
601 }
602 }
603
604 btree_create(&area->used_space);
605 btree_insert(&as->as_area_btree, *base, (void *) area,
606 NULL);
607
608 mutex_unlock(&as->lock);
609
610 return area;
611}
612
613/** Find address space area and lock it.
614 *
615 * @param as Address space.
616 * @param va Virtual address.
617 *
618 * @return Locked address space area containing va on success or
619 * NULL on failure.
620 *
621 */
622NO_TRACE static as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
623{
624 ASSERT(mutex_locked(&as->lock));
625
626 btree_node_t *leaf;
627 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va,
628 &leaf);
629 if (area) {
630 /* va is the base address of an address space area */
631 mutex_lock(&area->lock);
632 return area;
633 }
634
635 /*
636 * Search the leaf node and the rightmost record of its left neighbour
637 * to find out whether this is a miss or va belongs to an address
638 * space area found there.
639 */
640
641 /* First, search the leaf node itself. */
642 btree_key_t i;
643
644 for (i = 0; i < leaf->keys; i++) {
645 area = (as_area_t *) leaf->value[i];
646
647 mutex_lock(&area->lock);
648
649 if ((area->base <= va) &&
650 (va <= area->base + (P2SZ(area->pages) - 1)))
651 return area;
652
653 mutex_unlock(&area->lock);
654 }
655
656 /*
657 * Second, locate the left neighbour and test its last record.
658 * Because of its position in the B+tree, it must have base < va.
659 */
660 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree,
661 leaf);
662 if (lnode) {
663 area = (as_area_t *) lnode->value[lnode->keys - 1];
664
665 mutex_lock(&area->lock);
666
667 if (va <= area->base + (P2SZ(area->pages) - 1))
668 return area;
669
670 mutex_unlock(&area->lock);
671 }
672
673 return NULL;
674}
675
676/** UGLY! UGLY! UGLY! */
677// TODO: REMOVE ASAP!
678as_area_t * find_locked_area(as_t *as, uintptr_t va)
679{
680 return find_area_and_lock(as, va);
681}
682
683/** Find address space area and change it.
684 *
685 * @param as Address space.
686 * @param address Virtual address belonging to the area to be changed.
687 * Must be page-aligned.
688 * @param size New size of the virtual memory block starting at
689 * address.
690 * @param flags Flags influencing the remap operation. Currently unused.
691 *
692 * @return Zero on success or a value from @ref errno.h otherwise.
693 *
694 */
695int as_area_resize(as_t *as, uintptr_t address, size_t size, unsigned int flags)
696{
697 if (!IS_ALIGNED(address, PAGE_SIZE))
698 return EINVAL;
699
700 mutex_lock(&as->lock);
701
702 /*
703 * Locate the area.
704 */
705 as_area_t *area = find_area_and_lock(as, address);
706 if (!area) {
707 mutex_unlock(&as->lock);
708 return ENOENT;
709 }
710
711 if (!area->backend->is_resizable(area)) {
712 /*
713 * The backend does not support resizing for this area.
714 */
715 mutex_unlock(&area->lock);
716 mutex_unlock(&as->lock);
717 return ENOTSUP;
718 }
719
720 if (area->sh_info) {
721 /*
722 * Remapping of shared address space areas
723 * is not supported.
724 */
725 mutex_unlock(&area->lock);
726 mutex_unlock(&as->lock);
727 return ENOTSUP;
728 }
729
730 size_t pages = SIZE2FRAMES((address - area->base) + size);
731 if (!pages) {
732 /*
733 * Zero size address space areas are not allowed.
734 */
735 mutex_unlock(&area->lock);
736 mutex_unlock(&as->lock);
737 return EPERM;
738 }
739
740 if (pages < area->pages) {
741 uintptr_t start_free = area->base + P2SZ(pages);
742
743 /*
744 * Shrinking the area.
745 * No need to check for overlaps.
746 */
747
748 page_table_lock(as, false);
749
750 /*
751 * Remove frames belonging to used space starting from
752 * the highest addresses downwards until an overlap with
753 * the resized address space area is found. Note that this
754 * is also the right way to remove part of the used_space
755 * B+tree leaf list.
756 */
757 bool cond = true;
758 while (cond) {
759 ASSERT(!list_empty(&area->used_space.leaf_list));
760
761 btree_node_t *node =
762 list_get_instance(list_last(&area->used_space.leaf_list),
763 btree_node_t, leaf_link);
764
765 if ((cond = (bool) node->keys)) {
766 uintptr_t ptr = node->key[node->keys - 1];
767 size_t size =
768 (size_t) node->value[node->keys - 1];
769 size_t i = 0;
770
771 if (overlaps(ptr, P2SZ(size), area->base,
772 P2SZ(pages))) {
773
774 if (ptr + P2SZ(size) <= start_free) {
775 /*
776 * The whole interval fits
777 * completely in the resized
778 * address space area.
779 */
780 break;
781 }
782
783 /*
784 * Part of the interval corresponding
785 * to b and c overlaps with the resized
786 * address space area.
787 */
788
789 /* We are almost done */
790 cond = false;
791 i = (start_free - ptr) >> PAGE_WIDTH;
792 if (!used_space_remove(area, start_free,
793 size - i))
794 panic("Cannot remove used space.");
795 } else {
796 /*
797 * The interval of used space can be
798 * completely removed.
799 */
800 if (!used_space_remove(area, ptr, size))
801 panic("Cannot remove used space.");
802 }
803
804 /*
805 * Start TLB shootdown sequence.
806 *
807 * The sequence is rather short and can be
808 * repeated multiple times. The reason is that
809 * we don't want to have used_space_remove()
810 * inside the sequence as it may use a blocking
811 * memory allocation for its B+tree. Blocking
812 * while holding the tlblock spinlock is
813 * forbidden and would hit a kernel assertion.
814 */
815
816 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES,
817 as->asid, area->base + P2SZ(pages),
818 area->pages - pages);
819
820 for (; i < size; i++) {
821 pte_t *pte = page_mapping_find(as,
822 ptr + P2SZ(i), false);
823
824 ASSERT(pte);
825 ASSERT(PTE_VALID(pte));
826 ASSERT(PTE_PRESENT(pte));
827
828 if ((area->backend) &&
829 (area->backend->frame_free)) {
830 area->backend->frame_free(area,
831 ptr + P2SZ(i),
832 PTE_GET_FRAME(pte));
833 }
834
835 page_mapping_remove(as, ptr + P2SZ(i));
836 }
837
838 /*
839 * Finish TLB shootdown sequence.
840 */
841
842 tlb_invalidate_pages(as->asid,
843 area->base + P2SZ(pages),
844 area->pages - pages);
845
846 /*
847 * Invalidate software translation caches
848 * (e.g. TSB on sparc64, PHT on ppc32).
849 */
850 as_invalidate_translation_cache(as,
851 area->base + P2SZ(pages),
852 area->pages - pages);
853 tlb_shootdown_finalize(ipl);
854 }
855 }
856 page_table_unlock(as, false);
857 } else {
858 /*
859 * Growing the area.
860 */
861
862 if (overflows_into_positive(address, P2SZ(pages)))
863 return EINVAL;
864
865 /*
866 * Check for overlaps with other address space areas.
867 */
868 bool const guarded = area->flags & AS_AREA_GUARD;
869 if (!check_area_conflicts(as, address, pages, guarded, area)) {
870 mutex_unlock(&area->lock);
871 mutex_unlock(&as->lock);
872 return EADDRNOTAVAIL;
873 }
874 }
875
876 if (area->backend && area->backend->resize) {
877 if (!area->backend->resize(area, pages)) {
878 mutex_unlock(&area->lock);
879 mutex_unlock(&as->lock);
880 return ENOMEM;
881 }
882 }
883
884 area->pages = pages;
885
886 mutex_unlock(&area->lock);
887 mutex_unlock(&as->lock);
888
889 return 0;
890}
891
892/** Remove reference to address space area share info.
893 *
894 * If the reference count drops to 0, the sh_info is deallocated.
895 *
896 * @param sh_info Pointer to address space area share info.
897 *
898 */
899NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info)
900{
901 bool dealloc = false;
902
903 mutex_lock(&sh_info->lock);
904 ASSERT(sh_info->refcount);
905
906 if (--sh_info->refcount == 0) {
907 dealloc = true;
908
909 /*
910 * Now walk carefully the pagemap B+tree and free/remove
911 * reference from all frames found there.
912 */
913 list_foreach(sh_info->pagemap.leaf_list, cur) {
914 btree_node_t *node
915 = list_get_instance(cur, btree_node_t, leaf_link);
916 btree_key_t i;
917
918 for (i = 0; i < node->keys; i++)
919 frame_free((uintptr_t) node->value[i]);
920 }
921
922 }
923 mutex_unlock(&sh_info->lock);
924
925 if (dealloc) {
926 btree_destroy(&sh_info->pagemap);
927 free(sh_info);
928 }
929}
930
931/** Destroy address space area.
932 *
933 * @param as Address space.
934 * @param address Address within the area to be deleted.
935 *
936 * @return Zero on success or a value from @ref errno.h on failure.
937 *
938 */
939int as_area_destroy(as_t *as, uintptr_t address)
940{
941 mutex_lock(&as->lock);
942
943 as_area_t *area = find_area_and_lock(as, address);
944 if (!area) {
945 mutex_unlock(&as->lock);
946 return ENOENT;
947 }
948
949 if (area->backend && area->backend->destroy)
950 area->backend->destroy(area);
951
952 uintptr_t base = area->base;
953
954 page_table_lock(as, false);
955
956 /*
957 * Start TLB shootdown sequence.
958 */
959 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base,
960 area->pages);
961
962 /*
963 * Visit only the pages mapped by used_space B+tree.
964 */
965 list_foreach(area->used_space.leaf_list, cur) {
966 btree_node_t *node;
967 btree_key_t i;
968
969 node = list_get_instance(cur, btree_node_t, leaf_link);
970 for (i = 0; i < node->keys; i++) {
971 uintptr_t ptr = node->key[i];
972 size_t size;
973
974 for (size = 0; size < (size_t) node->value[i]; size++) {
975 pte_t *pte = page_mapping_find(as,
976 ptr + P2SZ(size), false);
977
978 ASSERT(pte);
979 ASSERT(PTE_VALID(pte));
980 ASSERT(PTE_PRESENT(pte));
981
982 if ((area->backend) &&
983 (area->backend->frame_free)) {
984 area->backend->frame_free(area,
985 ptr + P2SZ(size),
986 PTE_GET_FRAME(pte));
987 }
988
989 page_mapping_remove(as, ptr + P2SZ(size));
990 }
991 }
992 }
993
994 /*
995 * Finish TLB shootdown sequence.
996 */
997
998 tlb_invalidate_pages(as->asid, area->base, area->pages);
999
1000 /*
1001 * Invalidate potential software translation caches
1002 * (e.g. TSB on sparc64, PHT on ppc32).
1003 */
1004 as_invalidate_translation_cache(as, area->base, area->pages);
1005 tlb_shootdown_finalize(ipl);
1006
1007 page_table_unlock(as, false);
1008
1009 btree_destroy(&area->used_space);
1010
1011 area->attributes |= AS_AREA_ATTR_PARTIAL;
1012
1013 if (area->sh_info)
1014 sh_info_remove_reference(area->sh_info);
1015
1016 mutex_unlock(&area->lock);
1017
1018 /*
1019 * Remove the empty area from address space.
1020 */
1021 btree_remove(&as->as_area_btree, base, NULL);
1022
1023 free(area);
1024
1025 mutex_unlock(&as->lock);
1026 return 0;
1027}
1028
1029/** Share address space area with another or the same address space.
1030 *
1031 * Address space area mapping is shared with a new address space area.
1032 * If the source address space area has not been shared so far,
1033 * a new sh_info is created. The new address space area simply gets the
1034 * sh_info of the source area. The process of duplicating the
1035 * mapping is done through the backend share function.
1036 *
1037 * @param src_as Pointer to source address space.
1038 * @param src_base Base address of the source address space area.
1039 * @param acc_size Expected size of the source area.
1040 * @param dst_as Pointer to destination address space.
1041 * @param dst_flags_mask Destination address space area flags mask.
1042 * @param dst_base Target base address. If set to -1,
1043 * a suitable mappable area is found.
1044 * @param bound Lowest address bound if dst_base is set to -1.
1045 * Otherwise ignored.
1046 *
1047 * @return Zero on success.
1048 * @return ENOENT if there is no such task or such address space.
1049 * @return EPERM if there was a problem in accepting the area.
1050 * @return ENOMEM if there was a problem in allocating destination
1051 * address space area.
1052 * @return ENOTSUP if the address space area backend does not support
1053 * sharing.
1054 *
1055 */
1056int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
1057 as_t *dst_as, unsigned int dst_flags_mask, uintptr_t *dst_base,
1058 uintptr_t bound)
1059{
1060 mutex_lock(&src_as->lock);
1061 as_area_t *src_area = find_area_and_lock(src_as, src_base);
1062 if (!src_area) {
1063 /*
1064 * Could not find the source address space area.
1065 */
1066 mutex_unlock(&src_as->lock);
1067 return ENOENT;
1068 }
1069
1070 if (!src_area->backend->is_shareable(src_area)) {
1071 /*
1072 * The backend does not permit sharing of this area.
1073 */
1074 mutex_unlock(&src_area->lock);
1075 mutex_unlock(&src_as->lock);
1076 return ENOTSUP;
1077 }
1078
1079 size_t src_size = P2SZ(src_area->pages);
1080 unsigned int src_flags = src_area->flags;
1081 mem_backend_t *src_backend = src_area->backend;
1082 mem_backend_data_t src_backend_data = src_area->backend_data;
1083
1084 /* Share the cacheable flag from the original mapping */
1085 if (src_flags & AS_AREA_CACHEABLE)
1086 dst_flags_mask |= AS_AREA_CACHEABLE;
1087
1088 if ((src_size != acc_size) ||
1089 ((src_flags & dst_flags_mask) != dst_flags_mask)) {
1090 mutex_unlock(&src_area->lock);
1091 mutex_unlock(&src_as->lock);
1092 return EPERM;
1093 }
1094
1095 /*
1096 * Now we are committed to sharing the area.
1097 * First, prepare the area for sharing.
1098 * Then it will be safe to unlock it.
1099 */
1100 share_info_t *sh_info = src_area->sh_info;
1101 if (!sh_info) {
1102 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
1103 mutex_initialize(&sh_info->lock, MUTEX_PASSIVE);
1104 sh_info->refcount = 2;
1105 btree_create(&sh_info->pagemap);
1106 src_area->sh_info = sh_info;
1107
1108 /*
1109 * Call the backend to setup sharing.
1110 */
1111 src_area->backend->share(src_area);
1112 } else {
1113 mutex_lock(&sh_info->lock);
1114 sh_info->refcount++;
1115 mutex_unlock(&sh_info->lock);
1116 }
1117
1118 mutex_unlock(&src_area->lock);
1119 mutex_unlock(&src_as->lock);
1120
1121 /*
1122 * Create copy of the source address space area.
1123 * The destination area is created with AS_AREA_ATTR_PARTIAL
1124 * attribute set which prevents race condition with
1125 * preliminary as_page_fault() calls.
1126 * The flags of the source area are masked against dst_flags_mask
1127 * to support sharing in less privileged mode.
1128 */
1129 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask,
1130 src_size, AS_AREA_ATTR_PARTIAL, src_backend,
1131 &src_backend_data, dst_base, bound);
1132 if (!dst_area) {
1133 /*
1134 * Destination address space area could not be created.
1135 */
1136 sh_info_remove_reference(sh_info);
1137
1138 return ENOMEM;
1139 }
1140
1141 /*
1142 * Now the destination address space area has been
1143 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
1144 * attribute and set the sh_info.
1145 */
1146 mutex_lock(&dst_as->lock);
1147 mutex_lock(&dst_area->lock);
1148 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
1149 dst_area->sh_info = sh_info;
1150 mutex_unlock(&dst_area->lock);
1151 mutex_unlock(&dst_as->lock);
1152
1153 return 0;
1154}
1155
1156/** Check access mode for address space area.
1157 *
1158 * @param area Address space area.
1159 * @param access Access mode.
1160 *
1161 * @return False if access violates area's permissions, true
1162 * otherwise.
1163 *
1164 */
1165NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access)
1166{
1167 ASSERT(mutex_locked(&area->lock));
1168
1169 int flagmap[] = {
1170 [PF_ACCESS_READ] = AS_AREA_READ,
1171 [PF_ACCESS_WRITE] = AS_AREA_WRITE,
1172 [PF_ACCESS_EXEC] = AS_AREA_EXEC
1173 };
1174
1175 if (!(area->flags & flagmap[access]))
1176 return false;
1177
1178 return true;
1179}
1180
1181/** Convert address space area flags to page flags.
1182 *
1183 * @param aflags Flags of some address space area.
1184 *
1185 * @return Flags to be passed to page_mapping_insert().
1186 *
1187 */
1188NO_TRACE static unsigned int area_flags_to_page_flags(unsigned int aflags)
1189{
1190 unsigned int flags = PAGE_USER | PAGE_PRESENT;
1191
1192 if (aflags & AS_AREA_READ)
1193 flags |= PAGE_READ;
1194
1195 if (aflags & AS_AREA_WRITE)
1196 flags |= PAGE_WRITE;
1197
1198 if (aflags & AS_AREA_EXEC)
1199 flags |= PAGE_EXEC;
1200
1201 if (aflags & AS_AREA_CACHEABLE)
1202 flags |= PAGE_CACHEABLE;
1203
1204 return flags;
1205}
1206
1207/** Change adress space area flags.
1208 *
1209 * The idea is to have the same data, but with a different access mode.
1210 * This is needed e.g. for writing code into memory and then executing it.
1211 * In order for this to work properly, this may copy the data
1212 * into private anonymous memory (unless it's already there).
1213 *
1214 * @param as Address space.
1215 * @param flags Flags of the area memory.
1216 * @param address Address within the area to be changed.
1217 *
1218 * @return Zero on success or a value from @ref errno.h on failure.
1219 *
1220 */
1221int as_area_change_flags(as_t *as, unsigned int flags, uintptr_t address)
1222{
1223 /* Flags for the new memory mapping */
1224 unsigned int page_flags = area_flags_to_page_flags(flags);
1225
1226 mutex_lock(&as->lock);
1227
1228 as_area_t *area = find_area_and_lock(as, address);
1229 if (!area) {
1230 mutex_unlock(&as->lock);
1231 return ENOENT;
1232 }
1233
1234 if ((area->sh_info) || (area->backend != &anon_backend)) {
1235 /* Copying shared areas not supported yet */
1236 /* Copying non-anonymous memory not supported yet */
1237 mutex_unlock(&area->lock);
1238 mutex_unlock(&as->lock);
1239 return ENOTSUP;
1240 }
1241
1242 /*
1243 * Compute total number of used pages in the used_space B+tree
1244 */
1245 size_t used_pages = 0;
1246
1247 list_foreach(area->used_space.leaf_list, cur) {
1248 btree_node_t *node
1249 = list_get_instance(cur, btree_node_t, leaf_link);
1250 btree_key_t i;
1251
1252 for (i = 0; i < node->keys; i++)
1253 used_pages += (size_t) node->value[i];
1254 }
1255
1256 /* An array for storing frame numbers */
1257 uintptr_t *old_frame = malloc(used_pages * sizeof(uintptr_t), 0);
1258
1259 page_table_lock(as, false);
1260
1261 /*
1262 * Start TLB shootdown sequence.
1263 */
1264 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base,
1265 area->pages);
1266
1267 /*
1268 * Remove used pages from page tables and remember their frame
1269 * numbers.
1270 */
1271 size_t frame_idx = 0;
1272
1273 list_foreach(area->used_space.leaf_list, cur) {
1274 btree_node_t *node = list_get_instance(cur, btree_node_t,
1275 leaf_link);
1276 btree_key_t i;
1277
1278 for (i = 0; i < node->keys; i++) {
1279 uintptr_t ptr = node->key[i];
1280 size_t size;
1281
1282 for (size = 0; size < (size_t) node->value[i]; size++) {
1283 pte_t *pte = page_mapping_find(as,
1284 ptr + P2SZ(size), false);
1285
1286 ASSERT(pte);
1287 ASSERT(PTE_VALID(pte));
1288 ASSERT(PTE_PRESENT(pte));
1289
1290 old_frame[frame_idx++] = PTE_GET_FRAME(pte);
1291
1292 /* Remove old mapping */
1293 page_mapping_remove(as, ptr + P2SZ(size));
1294 }
1295 }
1296 }
1297
1298 /*
1299 * Finish TLB shootdown sequence.
1300 */
1301
1302 tlb_invalidate_pages(as->asid, area->base, area->pages);
1303
1304 /*
1305 * Invalidate potential software translation caches
1306 * (e.g. TSB on sparc64, PHT on ppc32).
1307 */
1308 as_invalidate_translation_cache(as, area->base, area->pages);
1309 tlb_shootdown_finalize(ipl);
1310
1311 page_table_unlock(as, false);
1312
1313 /*
1314 * Set the new flags.
1315 */
1316 area->flags = flags;
1317
1318 /*
1319 * Map pages back in with new flags. This step is kept separate
1320 * so that the memory area could not be accesed with both the old and
1321 * the new flags at once.
1322 */
1323 frame_idx = 0;
1324
1325 list_foreach(area->used_space.leaf_list, cur) {
1326 btree_node_t *node
1327 = list_get_instance(cur, btree_node_t, leaf_link);
1328 btree_key_t i;
1329
1330 for (i = 0; i < node->keys; i++) {
1331 uintptr_t ptr = node->key[i];
1332 size_t size;
1333
1334 for (size = 0; size < (size_t) node->value[i]; size++) {
1335 page_table_lock(as, false);
1336
1337 /* Insert the new mapping */
1338 page_mapping_insert(as, ptr + P2SZ(size),
1339 old_frame[frame_idx++], page_flags);
1340
1341 page_table_unlock(as, false);
1342 }
1343 }
1344 }
1345
1346 free(old_frame);
1347
1348 mutex_unlock(&area->lock);
1349 mutex_unlock(&as->lock);
1350
1351 return 0;
1352}
1353
1354/** Handle page fault within the current address space.
1355 *
1356 * This is the high-level page fault handler. It decides whether the page fault
1357 * can be resolved by any backend and if so, it invokes the backend to resolve
1358 * the page fault.
1359 *
1360 * Interrupts are assumed disabled.
1361 *
1362 * @param address Faulting address.
1363 * @param access Access mode that caused the page fault (i.e.
1364 * read/write/exec).
1365 * @param istate Pointer to the interrupted state.
1366 *
1367 * @return AS_PF_FAULT on page fault.
1368 * @return AS_PF_OK on success.
1369 * @return AS_PF_DEFER if the fault was caused by copy_to_uspace()
1370 * or copy_from_uspace().
1371 *
1372 */
1373int as_page_fault(uintptr_t address, pf_access_t access, istate_t *istate)
1374{
1375 uintptr_t page = ALIGN_DOWN(address, PAGE_SIZE);
1376 int rc = AS_PF_FAULT;
1377
1378 if (!THREAD)
1379 goto page_fault;
1380
1381 if (!AS)
1382 goto page_fault;
1383
1384 mutex_lock(&AS->lock);
1385 as_area_t *area = find_area_and_lock(AS, page);
1386 if (!area) {
1387 /*
1388 * No area contained mapping for 'page'.
1389 * Signal page fault to low-level handler.
1390 */
1391 mutex_unlock(&AS->lock);
1392 goto page_fault;
1393 }
1394
1395 if (area->attributes & AS_AREA_ATTR_PARTIAL) {
1396 /*
1397 * The address space area is not fully initialized.
1398 * Avoid possible race by returning error.
1399 */
1400 mutex_unlock(&area->lock);
1401 mutex_unlock(&AS->lock);
1402 goto page_fault;
1403 }
1404
1405 if ((!area->backend) || (!area->backend->page_fault)) {
1406 /*
1407 * The address space area is not backed by any backend
1408 * or the backend cannot handle page faults.
1409 */
1410 mutex_unlock(&area->lock);
1411 mutex_unlock(&AS->lock);
1412 goto page_fault;
1413 }
1414
1415 page_table_lock(AS, false);
1416
1417 /*
1418 * To avoid race condition between two page faults on the same address,
1419 * we need to make sure the mapping has not been already inserted.
1420 */
1421 pte_t *pte;
1422 if ((pte = page_mapping_find(AS, page, false))) {
1423 if (PTE_PRESENT(pte)) {
1424 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
1425 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
1426 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
1427 page_table_unlock(AS, false);
1428 mutex_unlock(&area->lock);
1429 mutex_unlock(&AS->lock);
1430 return AS_PF_OK;
1431 }
1432 }
1433 }
1434
1435 /*
1436 * Resort to the backend page fault handler.
1437 */
1438 rc = area->backend->page_fault(area, page, access);
1439 if (rc != AS_PF_OK) {
1440 page_table_unlock(AS, false);
1441 mutex_unlock(&area->lock);
1442 mutex_unlock(&AS->lock);
1443 goto page_fault;
1444 }
1445
1446 page_table_unlock(AS, false);
1447 mutex_unlock(&area->lock);
1448 mutex_unlock(&AS->lock);
1449 return AS_PF_OK;
1450
1451page_fault:
1452 if (THREAD->in_copy_from_uspace) {
1453 THREAD->in_copy_from_uspace = false;
1454 istate_set_retaddr(istate,
1455 (uintptr_t) &memcpy_from_uspace_failover_address);
1456 } else if (THREAD->in_copy_to_uspace) {
1457 THREAD->in_copy_to_uspace = false;
1458 istate_set_retaddr(istate,
1459 (uintptr_t) &memcpy_to_uspace_failover_address);
1460 } else if (rc == AS_PF_SILENT) {
1461 printf("Killing task %" PRIu64 " due to a "
1462 "failed late reservation request.\n", TASK->taskid);
1463 task_kill_self(true);
1464 } else {
1465 fault_if_from_uspace(istate, "Page fault: %p.", (void *) address);
1466 panic_memtrap(istate, access, address, NULL);
1467 }
1468
1469 return AS_PF_DEFER;
1470}
1471
1472/** Switch address spaces.
1473 *
1474 * Note that this function cannot sleep as it is essentially a part of
1475 * scheduling. Sleeping here would lead to deadlock on wakeup. Another
1476 * thing which is forbidden in this context is locking the address space.
1477 *
1478 * When this function is entered, no spinlocks may be held.
1479 *
1480 * @param old Old address space or NULL.
1481 * @param new New address space.
1482 *
1483 */
1484void as_switch(as_t *old_as, as_t *new_as)
1485{
1486 DEADLOCK_PROBE_INIT(p_asidlock);
1487 preemption_disable();
1488
1489retry:
1490 (void) interrupts_disable();
1491 if (!spinlock_trylock(&asidlock)) {
1492 /*
1493 * Avoid deadlock with TLB shootdown.
1494 * We can enable interrupts here because
1495 * preemption is disabled. We should not be
1496 * holding any other lock.
1497 */
1498 (void) interrupts_enable();
1499 DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
1500 goto retry;
1501 }
1502 preemption_enable();
1503
1504 /*
1505 * First, take care of the old address space.
1506 */
1507 if (old_as) {
1508 ASSERT(old_as->cpu_refcount);
1509
1510 if ((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
1511 /*
1512 * The old address space is no longer active on
1513 * any processor. It can be appended to the
1514 * list of inactive address spaces with assigned
1515 * ASID.
1516 */
1517 ASSERT(old_as->asid != ASID_INVALID);
1518
1519 list_append(&old_as->inactive_as_with_asid_link,
1520 &inactive_as_with_asid_list);
1521 }
1522
1523 /*
1524 * Perform architecture-specific tasks when the address space
1525 * is being removed from the CPU.
1526 */
1527 as_deinstall_arch(old_as);
1528 }
1529
1530 /*
1531 * Second, prepare the new address space.
1532 */
1533 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
1534 if (new_as->asid != ASID_INVALID)
1535 list_remove(&new_as->inactive_as_with_asid_link);
1536 else
1537 new_as->asid = asid_get();
1538 }
1539
1540#ifdef AS_PAGE_TABLE
1541 SET_PTL0_ADDRESS(new_as->genarch.page_table);
1542#endif
1543
1544 /*
1545 * Perform architecture-specific steps.
1546 * (e.g. write ASID to hardware register etc.)
1547 */
1548 as_install_arch(new_as);
1549
1550 spinlock_unlock(&asidlock);
1551
1552 AS = new_as;
1553}
1554
1555/** Compute flags for virtual address translation subsytem.
1556 *
1557 * @param area Address space area.
1558 *
1559 * @return Flags to be used in page_mapping_insert().
1560 *
1561 */
1562NO_TRACE unsigned int as_area_get_flags(as_area_t *area)
1563{
1564 ASSERT(mutex_locked(&area->lock));
1565
1566 return area_flags_to_page_flags(area->flags);
1567}
1568
1569/** Create page table.
1570 *
1571 * Depending on architecture, create either address space private or global page
1572 * table.
1573 *
1574 * @param flags Flags saying whether the page table is for the kernel
1575 * address space.
1576 *
1577 * @return First entry of the page table.
1578 *
1579 */
1580NO_TRACE pte_t *page_table_create(unsigned int flags)
1581{
1582 ASSERT(as_operations);
1583 ASSERT(as_operations->page_table_create);
1584
1585 return as_operations->page_table_create(flags);
1586}
1587
1588/** Destroy page table.
1589 *
1590 * Destroy page table in architecture specific way.
1591 *
1592 * @param page_table Physical address of PTL0.
1593 *
1594 */
1595NO_TRACE void page_table_destroy(pte_t *page_table)
1596{
1597 ASSERT(as_operations);
1598 ASSERT(as_operations->page_table_destroy);
1599
1600 as_operations->page_table_destroy(page_table);
1601}
1602
1603/** Lock page table.
1604 *
1605 * This function should be called before any page_mapping_insert(),
1606 * page_mapping_remove() and page_mapping_find().
1607 *
1608 * Locking order is such that address space areas must be locked
1609 * prior to this call. Address space can be locked prior to this
1610 * call in which case the lock argument is false.
1611 *
1612 * @param as Address space.
1613 * @param lock If false, do not attempt to lock as->lock.
1614 *
1615 */
1616NO_TRACE void page_table_lock(as_t *as, bool lock)
1617{
1618 ASSERT(as_operations);
1619 ASSERT(as_operations->page_table_lock);
1620
1621 as_operations->page_table_lock(as, lock);
1622}
1623
1624/** Unlock page table.
1625 *
1626 * @param as Address space.
1627 * @param unlock If false, do not attempt to unlock as->lock.
1628 *
1629 */
1630NO_TRACE void page_table_unlock(as_t *as, bool unlock)
1631{
1632 ASSERT(as_operations);
1633 ASSERT(as_operations->page_table_unlock);
1634
1635 as_operations->page_table_unlock(as, unlock);
1636}
1637
1638/** Test whether page tables are locked.
1639 *
1640 * @param as Address space where the page tables belong.
1641 *
1642 * @return True if the page tables belonging to the address soace
1643 * are locked, otherwise false.
1644 */
1645NO_TRACE bool page_table_locked(as_t *as)
1646{
1647 ASSERT(as_operations);
1648 ASSERT(as_operations->page_table_locked);
1649
1650 return as_operations->page_table_locked(as);
1651}
1652
1653/** Return size of the address space area with given base.
1654 *
1655 * @param base Arbitrary address inside the address space area.
1656 *
1657 * @return Size of the address space area in bytes or zero if it
1658 * does not exist.
1659 *
1660 */
1661size_t as_area_get_size(uintptr_t base)
1662{
1663 size_t size;
1664
1665 page_table_lock(AS, true);
1666 as_area_t *src_area = find_area_and_lock(AS, base);
1667
1668 if (src_area) {
1669 size = P2SZ(src_area->pages);
1670 mutex_unlock(&src_area->lock);
1671 } else
1672 size = 0;
1673
1674 page_table_unlock(AS, true);
1675 return size;
1676}
1677
1678/** Mark portion of address space area as used.
1679 *
1680 * The address space area must be already locked.
1681 *
1682 * @param area Address space area.
1683 * @param page First page to be marked.
1684 * @param count Number of page to be marked.
1685 *
1686 * @return False on failure or true on success.
1687 *
1688 */
1689bool used_space_insert(as_area_t *area, uintptr_t page, size_t count)
1690{
1691 ASSERT(mutex_locked(&area->lock));
1692 ASSERT(IS_ALIGNED(page, PAGE_SIZE));
1693 ASSERT(count);
1694
1695 btree_node_t *leaf;
1696 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf);
1697 if (pages) {
1698 /*
1699 * We hit the beginning of some used space.
1700 */
1701 return false;
1702 }
1703
1704 if (!leaf->keys) {
1705 btree_insert(&area->used_space, page, (void *) count, leaf);
1706 goto success;
1707 }
1708
1709 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf);
1710 if (node) {
1711 uintptr_t left_pg = node->key[node->keys - 1];
1712 uintptr_t right_pg = leaf->key[0];
1713 size_t left_cnt = (size_t) node->value[node->keys - 1];
1714 size_t right_cnt = (size_t) leaf->value[0];
1715
1716 /*
1717 * Examine the possibility that the interval fits
1718 * somewhere between the rightmost interval of
1719 * the left neigbour and the first interval of the leaf.
1720 */
1721
1722 if (page >= right_pg) {
1723 /* Do nothing. */
1724 } else if (overlaps(page, P2SZ(count), left_pg,
1725 P2SZ(left_cnt))) {
1726 /* The interval intersects with the left interval. */
1727 return false;
1728 } else if (overlaps(page, P2SZ(count), right_pg,
1729 P2SZ(right_cnt))) {
1730 /* The interval intersects with the right interval. */
1731 return false;
1732 } else if ((page == left_pg + P2SZ(left_cnt)) &&
1733 (page + P2SZ(count) == right_pg)) {
1734 /*
1735 * The interval can be added by merging the two already
1736 * present intervals.
1737 */
1738 node->value[node->keys - 1] += count + right_cnt;
1739 btree_remove(&area->used_space, right_pg, leaf);
1740 goto success;
1741 } else if (page == left_pg + P2SZ(left_cnt)) {
1742 /*
1743 * The interval can be added by simply growing the left
1744 * interval.
1745 */
1746 node->value[node->keys - 1] += count;
1747 goto success;
1748 } else if (page + P2SZ(count) == right_pg) {
1749 /*
1750 * The interval can be addded by simply moving base of
1751 * the right interval down and increasing its size
1752 * accordingly.
1753 */
1754 leaf->value[0] += count;
1755 leaf->key[0] = page;
1756 goto success;
1757 } else {
1758 /*
1759 * The interval is between both neigbouring intervals,
1760 * but cannot be merged with any of them.
1761 */
1762 btree_insert(&area->used_space, page, (void *) count,
1763 leaf);
1764 goto success;
1765 }
1766 } else if (page < leaf->key[0]) {
1767 uintptr_t right_pg = leaf->key[0];
1768 size_t right_cnt = (size_t) leaf->value[0];
1769
1770 /*
1771 * Investigate the border case in which the left neighbour does
1772 * not exist but the interval fits from the left.
1773 */
1774
1775 if (overlaps(page, P2SZ(count), right_pg, P2SZ(right_cnt))) {
1776 /* The interval intersects with the right interval. */
1777 return false;
1778 } else if (page + P2SZ(count) == right_pg) {
1779 /*
1780 * The interval can be added by moving the base of the
1781 * right interval down and increasing its size
1782 * accordingly.
1783 */
1784 leaf->key[0] = page;
1785 leaf->value[0] += count;
1786 goto success;
1787 } else {
1788 /*
1789 * The interval doesn't adjoin with the right interval.
1790 * It must be added individually.
1791 */
1792 btree_insert(&area->used_space, page, (void *) count,
1793 leaf);
1794 goto success;
1795 }
1796 }
1797
1798 node = btree_leaf_node_right_neighbour(&area->used_space, leaf);
1799 if (node) {
1800 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1801 uintptr_t right_pg = node->key[0];
1802 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
1803 size_t right_cnt = (size_t) node->value[0];
1804
1805 /*
1806 * Examine the possibility that the interval fits
1807 * somewhere between the leftmost interval of
1808 * the right neigbour and the last interval of the leaf.
1809 */
1810
1811 if (page < left_pg) {
1812 /* Do nothing. */
1813 } else if (overlaps(page, P2SZ(count), left_pg,
1814 P2SZ(left_cnt))) {
1815 /* The interval intersects with the left interval. */
1816 return false;
1817 } else if (overlaps(page, P2SZ(count), right_pg,
1818 P2SZ(right_cnt))) {
1819 /* The interval intersects with the right interval. */
1820 return false;
1821 } else if ((page == left_pg + P2SZ(left_cnt)) &&
1822 (page + P2SZ(count) == right_pg)) {
1823 /*
1824 * The interval can be added by merging the two already
1825 * present intervals.
1826 */
1827 leaf->value[leaf->keys - 1] += count + right_cnt;
1828 btree_remove(&area->used_space, right_pg, node);
1829 goto success;
1830 } else if (page == left_pg + P2SZ(left_cnt)) {
1831 /*
1832 * The interval can be added by simply growing the left
1833 * interval.
1834 */
1835 leaf->value[leaf->keys - 1] += count;
1836 goto success;
1837 } else if (page + P2SZ(count) == right_pg) {
1838 /*
1839 * The interval can be addded by simply moving base of
1840 * the right interval down and increasing its size
1841 * accordingly.
1842 */
1843 node->value[0] += count;
1844 node->key[0] = page;
1845 goto success;
1846 } else {
1847 /*
1848 * The interval is between both neigbouring intervals,
1849 * but cannot be merged with any of them.
1850 */
1851 btree_insert(&area->used_space, page, (void *) count,
1852 leaf);
1853 goto success;
1854 }
1855 } else if (page >= leaf->key[leaf->keys - 1]) {
1856 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1857 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
1858
1859 /*
1860 * Investigate the border case in which the right neighbour
1861 * does not exist but the interval fits from the right.
1862 */
1863
1864 if (overlaps(page, P2SZ(count), left_pg, P2SZ(left_cnt))) {
1865 /* The interval intersects with the left interval. */
1866 return false;
1867 } else if (left_pg + P2SZ(left_cnt) == page) {
1868 /*
1869 * The interval can be added by growing the left
1870 * interval.
1871 */
1872 leaf->value[leaf->keys - 1] += count;
1873 goto success;
1874 } else {
1875 /*
1876 * The interval doesn't adjoin with the left interval.
1877 * It must be added individually.
1878 */
1879 btree_insert(&area->used_space, page, (void *) count,
1880 leaf);
1881 goto success;
1882 }
1883 }
1884
1885 /*
1886 * Note that if the algorithm made it thus far, the interval can fit
1887 * only between two other intervals of the leaf. The two border cases
1888 * were already resolved.
1889 */
1890 btree_key_t i;
1891 for (i = 1; i < leaf->keys; i++) {
1892 if (page < leaf->key[i]) {
1893 uintptr_t left_pg = leaf->key[i - 1];
1894 uintptr_t right_pg = leaf->key[i];
1895 size_t left_cnt = (size_t) leaf->value[i - 1];
1896 size_t right_cnt = (size_t) leaf->value[i];
1897
1898 /*
1899 * The interval fits between left_pg and right_pg.
1900 */
1901
1902 if (overlaps(page, P2SZ(count), left_pg,
1903 P2SZ(left_cnt))) {
1904 /*
1905 * The interval intersects with the left
1906 * interval.
1907 */
1908 return false;
1909 } else if (overlaps(page, P2SZ(count), right_pg,
1910 P2SZ(right_cnt))) {
1911 /*
1912 * The interval intersects with the right
1913 * interval.
1914 */
1915 return false;
1916 } else if ((page == left_pg + P2SZ(left_cnt)) &&
1917 (page + P2SZ(count) == right_pg)) {
1918 /*
1919 * The interval can be added by merging the two
1920 * already present intervals.
1921 */
1922 leaf->value[i - 1] += count + right_cnt;
1923 btree_remove(&area->used_space, right_pg, leaf);
1924 goto success;
1925 } else if (page == left_pg + P2SZ(left_cnt)) {
1926 /*
1927 * The interval can be added by simply growing
1928 * the left interval.
1929 */
1930 leaf->value[i - 1] += count;
1931 goto success;
1932 } else if (page + P2SZ(count) == right_pg) {
1933 /*
1934 * The interval can be addded by simply moving
1935 * base of the right interval down and
1936 * increasing its size accordingly.
1937 */
1938 leaf->value[i] += count;
1939 leaf->key[i] = page;
1940 goto success;
1941 } else {
1942 /*
1943 * The interval is between both neigbouring
1944 * intervals, but cannot be merged with any of
1945 * them.
1946 */
1947 btree_insert(&area->used_space, page,
1948 (void *) count, leaf);
1949 goto success;
1950 }
1951 }
1952 }
1953
1954 panic("Inconsistency detected while adding %zu pages of used "
1955 "space at %p.", count, (void *) page);
1956
1957success:
1958 area->resident += count;
1959 return true;
1960}
1961
1962/** Mark portion of address space area as unused.
1963 *
1964 * The address space area must be already locked.
1965 *
1966 * @param area Address space area.
1967 * @param page First page to be marked.
1968 * @param count Number of page to be marked.
1969 *
1970 * @return False on failure or true on success.
1971 *
1972 */
1973bool used_space_remove(as_area_t *area, uintptr_t page, size_t count)
1974{
1975 ASSERT(mutex_locked(&area->lock));
1976 ASSERT(IS_ALIGNED(page, PAGE_SIZE));
1977 ASSERT(count);
1978
1979 btree_node_t *leaf;
1980 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf);
1981 if (pages) {
1982 /*
1983 * We are lucky, page is the beginning of some interval.
1984 */
1985 if (count > pages) {
1986 return false;
1987 } else if (count == pages) {
1988 btree_remove(&area->used_space, page, leaf);
1989 goto success;
1990 } else {
1991 /*
1992 * Find the respective interval.
1993 * Decrease its size and relocate its start address.
1994 */
1995 btree_key_t i;
1996 for (i = 0; i < leaf->keys; i++) {
1997 if (leaf->key[i] == page) {
1998 leaf->key[i] += P2SZ(count);
1999 leaf->value[i] -= count;
2000 goto success;
2001 }
2002 }
2003
2004 goto error;
2005 }
2006 }
2007
2008 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space,
2009 leaf);
2010 if ((node) && (page < leaf->key[0])) {
2011 uintptr_t left_pg = node->key[node->keys - 1];
2012 size_t left_cnt = (size_t) node->value[node->keys - 1];
2013
2014 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) {
2015 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) {
2016 /*
2017 * The interval is contained in the rightmost
2018 * interval of the left neighbour and can be
2019 * removed by updating the size of the bigger
2020 * interval.
2021 */
2022 node->value[node->keys - 1] -= count;
2023 goto success;
2024 } else if (page + P2SZ(count) <
2025 left_pg + P2SZ(left_cnt)) {
2026 size_t new_cnt;
2027
2028 /*
2029 * The interval is contained in the rightmost
2030 * interval of the left neighbour but its
2031 * removal requires both updating the size of
2032 * the original interval and also inserting a
2033 * new interval.
2034 */
2035 new_cnt = ((left_pg + P2SZ(left_cnt)) -
2036 (page + P2SZ(count))) >> PAGE_WIDTH;
2037 node->value[node->keys - 1] -= count + new_cnt;
2038 btree_insert(&area->used_space, page +
2039 P2SZ(count), (void *) new_cnt, leaf);
2040 goto success;
2041 }
2042 }
2043
2044 return false;
2045 } else if (page < leaf->key[0])
2046 return false;
2047
2048 if (page > leaf->key[leaf->keys - 1]) {
2049 uintptr_t left_pg = leaf->key[leaf->keys - 1];
2050 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
2051
2052 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) {
2053 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) {
2054 /*
2055 * The interval is contained in the rightmost
2056 * interval of the leaf and can be removed by
2057 * updating the size of the bigger interval.
2058 */
2059 leaf->value[leaf->keys - 1] -= count;
2060 goto success;
2061 } else if (page + P2SZ(count) < left_pg +
2062 P2SZ(left_cnt)) {
2063 size_t new_cnt;
2064
2065 /*
2066 * The interval is contained in the rightmost
2067 * interval of the leaf but its removal
2068 * requires both updating the size of the
2069 * original interval and also inserting a new
2070 * interval.
2071 */
2072 new_cnt = ((left_pg + P2SZ(left_cnt)) -
2073 (page + P2SZ(count))) >> PAGE_WIDTH;
2074 leaf->value[leaf->keys - 1] -= count + new_cnt;
2075 btree_insert(&area->used_space, page +
2076 P2SZ(count), (void *) new_cnt, leaf);
2077 goto success;
2078 }
2079 }
2080
2081 return false;
2082 }
2083
2084 /*
2085 * The border cases have been already resolved.
2086 * Now the interval can be only between intervals of the leaf.
2087 */
2088 btree_key_t i;
2089 for (i = 1; i < leaf->keys - 1; i++) {
2090 if (page < leaf->key[i]) {
2091 uintptr_t left_pg = leaf->key[i - 1];
2092 size_t left_cnt = (size_t) leaf->value[i - 1];
2093
2094 /*
2095 * Now the interval is between intervals corresponding
2096 * to (i - 1) and i.
2097 */
2098 if (overlaps(left_pg, P2SZ(left_cnt), page,
2099 P2SZ(count))) {
2100 if (page + P2SZ(count) ==
2101 left_pg + P2SZ(left_cnt)) {
2102 /*
2103 * The interval is contained in the
2104 * interval (i - 1) of the leaf and can
2105 * be removed by updating the size of
2106 * the bigger interval.
2107 */
2108 leaf->value[i - 1] -= count;
2109 goto success;
2110 } else if (page + P2SZ(count) <
2111 left_pg + P2SZ(left_cnt)) {
2112 size_t new_cnt;
2113
2114 /*
2115 * The interval is contained in the
2116 * interval (i - 1) of the leaf but its
2117 * removal requires both updating the
2118 * size of the original interval and
2119 * also inserting a new interval.
2120 */
2121 new_cnt = ((left_pg + P2SZ(left_cnt)) -
2122 (page + P2SZ(count))) >>
2123 PAGE_WIDTH;
2124 leaf->value[i - 1] -= count + new_cnt;
2125 btree_insert(&area->used_space, page +
2126 P2SZ(count), (void *) new_cnt,
2127 leaf);
2128 goto success;
2129 }
2130 }
2131
2132 return false;
2133 }
2134 }
2135
2136error:
2137 panic("Inconsistency detected while removing %zu pages of used "
2138 "space from %p.", count, (void *) page);
2139
2140success:
2141 area->resident -= count;
2142 return true;
2143}
2144
2145/*
2146 * Address space related syscalls.
2147 */
2148
2149sysarg_t sys_as_area_create(uintptr_t base, size_t size, unsigned int flags,
2150 uintptr_t bound)
2151{
2152 uintptr_t virt = base;
2153 as_area_t *area = as_area_create(AS, flags, size,
2154 AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, bound);
2155 if (area == NULL)
2156 return (sysarg_t) -1;
2157
2158 return (sysarg_t) virt;
2159}
2160
2161sysarg_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags)
2162{
2163 return (sysarg_t) as_area_resize(AS, address, size, 0);
2164}
2165
2166sysarg_t sys_as_area_change_flags(uintptr_t address, unsigned int flags)
2167{
2168 return (sysarg_t) as_area_change_flags(AS, flags, address);
2169}
2170
2171sysarg_t sys_as_area_destroy(uintptr_t address)
2172{
2173 return (sysarg_t) as_area_destroy(AS, address);
2174}
2175
2176/** Get list of adress space areas.
2177 *
2178 * @param as Address space.
2179 * @param obuf Place to save pointer to returned buffer.
2180 * @param osize Place to save size of returned buffer.
2181 *
2182 */
2183void as_get_area_info(as_t *as, as_area_info_t **obuf, size_t *osize)
2184{
2185 mutex_lock(&as->lock);
2186
2187 /* First pass, count number of areas. */
2188
2189 size_t area_cnt = 0;
2190
2191 list_foreach(as->as_area_btree.leaf_list, cur) {
2192 btree_node_t *node =
2193 list_get_instance(cur, btree_node_t, leaf_link);
2194 area_cnt += node->keys;
2195 }
2196
2197 size_t isize = area_cnt * sizeof(as_area_info_t);
2198 as_area_info_t *info = malloc(isize, 0);
2199
2200 /* Second pass, record data. */
2201
2202 size_t area_idx = 0;
2203
2204 list_foreach(as->as_area_btree.leaf_list, cur) {
2205 btree_node_t *node =
2206 list_get_instance(cur, btree_node_t, leaf_link);
2207 btree_key_t i;
2208
2209 for (i = 0; i < node->keys; i++) {
2210 as_area_t *area = node->value[i];
2211
2212 ASSERT(area_idx < area_cnt);
2213 mutex_lock(&area->lock);
2214
2215 info[area_idx].start_addr = area->base;
2216 info[area_idx].size = P2SZ(area->pages);
2217 info[area_idx].flags = area->flags;
2218 ++area_idx;
2219
2220 mutex_unlock(&area->lock);
2221 }
2222 }
2223
2224 mutex_unlock(&as->lock);
2225
2226 *obuf = info;
2227 *osize = isize;
2228}
2229
2230/** Print out information about address space.
2231 *
2232 * @param as Address space.
2233 *
2234 */
2235void as_print(as_t *as)
2236{
2237 mutex_lock(&as->lock);
2238
2239 /* Print out info about address space areas */
2240 list_foreach(as->as_area_btree.leaf_list, cur) {
2241 btree_node_t *node
2242 = list_get_instance(cur, btree_node_t, leaf_link);
2243 btree_key_t i;
2244
2245 for (i = 0; i < node->keys; i++) {
2246 as_area_t *area = node->value[i];
2247
2248 mutex_lock(&area->lock);
2249 printf("as_area: %p, base=%p, pages=%zu"
2250 " (%p - %p)\n", area, (void *) area->base,
2251 area->pages, (void *) area->base,
2252 (void *) (area->base + P2SZ(area->pages)));
2253 mutex_unlock(&area->lock);
2254 }
2255 }
2256
2257 mutex_unlock(&as->lock);
2258}
2259
2260/** @}
2261 */
Note: See TracBrowser for help on using the repository browser.