source: mainline/kernel/generic/src/mm/as.c@ c7fbb90

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since c7fbb90 was 30718cc2, checked in by Jan Vesely <jano.vesely@…>, 14 years ago

Move cacheable flag from syscall to malloc

  • Property mode set to 100644
File size: 53.5 KB
Line 
1/*
2 * Copyright (c) 2010 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Address space related functions.
36 *
37 * This file contains address space manipulation functions.
38 * Roughly speaking, this is a higher-level client of
39 * Virtual Address Translation (VAT) subsystem.
40 *
41 * Functionality provided by this file allows one to
42 * create address spaces and create, resize and share
43 * address space areas.
44 *
45 * @see page.c
46 *
47 */
48
49#include <mm/as.h>
50#include <arch/mm/as.h>
51#include <mm/page.h>
52#include <mm/frame.h>
53#include <mm/slab.h>
54#include <mm/tlb.h>
55#include <arch/mm/page.h>
56#include <genarch/mm/page_pt.h>
57#include <genarch/mm/page_ht.h>
58#include <mm/asid.h>
59#include <arch/mm/asid.h>
60#include <preemption.h>
61#include <synch/spinlock.h>
62#include <synch/mutex.h>
63#include <adt/list.h>
64#include <adt/btree.h>
65#include <proc/task.h>
66#include <proc/thread.h>
67#include <arch/asm.h>
68#include <panic.h>
69#include <debug.h>
70#include <print.h>
71#include <memstr.h>
72#include <macros.h>
73#include <bitops.h>
74#include <arch.h>
75#include <errno.h>
76#include <config.h>
77#include <align.h>
78#include <typedefs.h>
79#include <syscall/copy.h>
80#include <arch/interrupt.h>
81
82#ifdef CONFIG_VIRT_IDX_DCACHE
83#include <arch/mm/cache.h>
84#endif /* CONFIG_VIRT_IDX_DCACHE */
85
86/**
87 * Each architecture decides what functions will be used to carry out
88 * address space operations such as creating or locking page tables.
89 */
90as_operations_t *as_operations = NULL;
91
92/** Slab for as_t objects.
93 *
94 */
95static slab_cache_t *as_slab;
96
97/** ASID subsystem lock.
98 *
99 * This lock protects:
100 * - inactive_as_with_asid_head list
101 * - as->asid for each as of the as_t type
102 * - asids_allocated counter
103 *
104 */
105SPINLOCK_INITIALIZE(asidlock);
106
107/**
108 * Inactive address spaces (on all processors)
109 * that have valid ASID.
110 */
111LIST_INITIALIZE(inactive_as_with_asid_head);
112
113/** Kernel address space. */
114as_t *AS_KERNEL = NULL;
115
116NO_TRACE static int as_constructor(void *obj, unsigned int flags)
117{
118 as_t *as = (as_t *) obj;
119
120 link_initialize(&as->inactive_as_with_asid_link);
121 mutex_initialize(&as->lock, MUTEX_PASSIVE);
122
123 return as_constructor_arch(as, flags);
124}
125
126NO_TRACE static size_t as_destructor(void *obj)
127{
128 return as_destructor_arch((as_t *) obj);
129}
130
131/** Initialize address space subsystem. */
132void as_init(void)
133{
134 as_arch_init();
135
136 as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
137 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
138
139 AS_KERNEL = as_create(FLAG_AS_KERNEL);
140 if (!AS_KERNEL)
141 panic("Cannot create kernel address space.");
142
143 /*
144 * Make sure the kernel address space
145 * reference count never drops to zero.
146 */
147 as_hold(AS_KERNEL);
148}
149
150/** Create address space.
151 *
152 * @param flags Flags that influence the way in wich the address
153 * space is created.
154 *
155 */
156as_t *as_create(unsigned int flags)
157{
158 as_t *as = (as_t *) slab_alloc(as_slab, 0);
159 (void) as_create_arch(as, 0);
160
161 btree_create(&as->as_area_btree);
162
163 if (flags & FLAG_AS_KERNEL)
164 as->asid = ASID_KERNEL;
165 else
166 as->asid = ASID_INVALID;
167
168 atomic_set(&as->refcount, 0);
169 as->cpu_refcount = 0;
170
171#ifdef AS_PAGE_TABLE
172 as->genarch.page_table = page_table_create(flags);
173#else
174 page_table_create(flags);
175#endif
176
177 return as;
178}
179
180/** Destroy adress space.
181 *
182 * When there are no tasks referencing this address space (i.e. its refcount is
183 * zero), the address space can be destroyed.
184 *
185 * We know that we don't hold any spinlock.
186 *
187 * @param as Address space to be destroyed.
188 *
189 */
190void as_destroy(as_t *as)
191{
192 DEADLOCK_PROBE_INIT(p_asidlock);
193
194 ASSERT(as != AS);
195 ASSERT(atomic_get(&as->refcount) == 0);
196
197 /*
198 * Since there is no reference to this address space, it is safe not to
199 * lock its mutex.
200 */
201
202 /*
203 * We need to avoid deadlock between TLB shootdown and asidlock.
204 * We therefore try to take asid conditionally and if we don't succeed,
205 * we enable interrupts and try again. This is done while preemption is
206 * disabled to prevent nested context switches. We also depend on the
207 * fact that so far no spinlocks are held.
208 */
209 preemption_disable();
210 ipl_t ipl = interrupts_read();
211
212retry:
213 interrupts_disable();
214 if (!spinlock_trylock(&asidlock)) {
215 interrupts_enable();
216 DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
217 goto retry;
218 }
219
220 /* Interrupts disabled, enable preemption */
221 preemption_enable();
222
223 if ((as->asid != ASID_INVALID) && (as != AS_KERNEL)) {
224 if (as->cpu_refcount == 0)
225 list_remove(&as->inactive_as_with_asid_link);
226
227 asid_put(as->asid);
228 }
229
230 spinlock_unlock(&asidlock);
231 interrupts_restore(ipl);
232
233
234 /*
235 * Destroy address space areas of the address space.
236 * The B+tree must be walked carefully because it is
237 * also being destroyed.
238 */
239 bool cond = true;
240 while (cond) {
241 ASSERT(!list_empty(&as->as_area_btree.leaf_head));
242
243 btree_node_t *node =
244 list_get_instance(as->as_area_btree.leaf_head.next,
245 btree_node_t, leaf_link);
246
247 if ((cond = node->keys))
248 as_area_destroy(as, node->key[0]);
249 }
250
251 btree_destroy(&as->as_area_btree);
252
253#ifdef AS_PAGE_TABLE
254 page_table_destroy(as->genarch.page_table);
255#else
256 page_table_destroy(NULL);
257#endif
258
259 slab_free(as_slab, as);
260}
261
262/** Hold a reference to an address space.
263 *
264 * Holding a reference to an address space prevents destruction
265 * of that address space.
266 *
267 * @param as Address space to be held.
268 *
269 */
270NO_TRACE void as_hold(as_t *as)
271{
272 atomic_inc(&as->refcount);
273}
274
275/** Release a reference to an address space.
276 *
277 * The last one to release a reference to an address space
278 * destroys the address space.
279 *
280 * @param asAddress space to be released.
281 *
282 */
283NO_TRACE void as_release(as_t *as)
284{
285 if (atomic_predec(&as->refcount) == 0)
286 as_destroy(as);
287}
288
289/** Check area conflicts with other areas.
290 *
291 * @param as Address space.
292 * @param addr Starting virtual address of the area being tested.
293 * @param count Number of pages in the area being tested.
294 * @param avoid Do not touch this area.
295 *
296 * @return True if there is no conflict, false otherwise.
297 *
298 */
299NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr,
300 size_t count, as_area_t *avoid)
301{
302 ASSERT((addr % PAGE_SIZE) == 0);
303 ASSERT(mutex_locked(&as->lock));
304
305 /*
306 * We don't want any area to have conflicts with NULL page.
307 */
308 if (overlaps(addr, count << PAGE_WIDTH, (uintptr_t) NULL, PAGE_SIZE))
309 return false;
310
311 /*
312 * The leaf node is found in O(log n), where n is proportional to
313 * the number of address space areas belonging to as.
314 * The check for conflicts is then attempted on the rightmost
315 * record in the left neighbour, the leftmost record in the right
316 * neighbour and all records in the leaf node itself.
317 */
318 btree_node_t *leaf;
319 as_area_t *area =
320 (as_area_t *) btree_search(&as->as_area_btree, addr, &leaf);
321 if (area) {
322 if (area != avoid)
323 return false;
324 }
325
326 /* First, check the two border cases. */
327 btree_node_t *node =
328 btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
329 if (node) {
330 area = (as_area_t *) node->value[node->keys - 1];
331
332 if (area != avoid) {
333 mutex_lock(&area->lock);
334
335 if (overlaps(addr, count << PAGE_WIDTH,
336 area->base, area->pages << PAGE_WIDTH)) {
337 mutex_unlock(&area->lock);
338 return false;
339 }
340
341 mutex_unlock(&area->lock);
342 }
343 }
344
345 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
346 if (node) {
347 area = (as_area_t *) node->value[0];
348
349 if (area != avoid) {
350 mutex_lock(&area->lock);
351
352 if (overlaps(addr, count << PAGE_WIDTH,
353 area->base, area->pages << PAGE_WIDTH)) {
354 mutex_unlock(&area->lock);
355 return false;
356 }
357
358 mutex_unlock(&area->lock);
359 }
360 }
361
362 /* Second, check the leaf node. */
363 btree_key_t i;
364 for (i = 0; i < leaf->keys; i++) {
365 area = (as_area_t *) leaf->value[i];
366
367 if (area == avoid)
368 continue;
369
370 mutex_lock(&area->lock);
371
372 if (overlaps(addr, count << PAGE_WIDTH,
373 area->base, area->pages << PAGE_WIDTH)) {
374 mutex_unlock(&area->lock);
375 return false;
376 }
377
378 mutex_unlock(&area->lock);
379 }
380
381 /*
382 * So far, the area does not conflict with other areas.
383 * Check if it doesn't conflict with kernel address space.
384 */
385 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
386 return !overlaps(addr, count << PAGE_WIDTH,
387 KERNEL_ADDRESS_SPACE_START,
388 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);
389 }
390
391 return true;
392}
393
394/** Create address space area of common attributes.
395 *
396 * The created address space area is added to the target address space.
397 *
398 * @param as Target address space.
399 * @param flags Flags of the area memory.
400 * @param size Size of area.
401 * @param base Base address of area.
402 * @param attrs Attributes of the area.
403 * @param backend Address space area backend. NULL if no backend is used.
404 * @param backend_data NULL or a pointer to an array holding two void *.
405 *
406 * @return Address space area on success or NULL on failure.
407 *
408 */
409as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size,
410 uintptr_t base, unsigned int attrs, mem_backend_t *backend,
411 mem_backend_data_t *backend_data)
412{
413 if ((base % PAGE_SIZE) != 0)
414 return NULL;
415
416 if (size == 0)
417 return NULL;
418
419 size_t pages = SIZE2FRAMES(size);
420
421 /* Writeable executable areas are not supported. */
422 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
423 return NULL;
424
425 mutex_lock(&as->lock);
426
427 if (!check_area_conflicts(as, base, pages, NULL)) {
428 mutex_unlock(&as->lock);
429 return NULL;
430 }
431
432 as_area_t *area = (as_area_t *) malloc(sizeof(as_area_t), 0);
433
434 mutex_initialize(&area->lock, MUTEX_PASSIVE);
435
436 area->as = as;
437 area->flags = flags;
438 area->attributes = attrs;
439 area->pages = pages;
440 area->resident = 0;
441 area->base = base;
442 area->sh_info = NULL;
443 area->backend = backend;
444
445 if (backend_data)
446 area->backend_data = *backend_data;
447 else
448 memsetb(&area->backend_data, sizeof(area->backend_data), 0);
449
450 btree_create(&area->used_space);
451 btree_insert(&as->as_area_btree, base, (void *) area, NULL);
452
453 mutex_unlock(&as->lock);
454
455 return area;
456}
457
458/** Find address space area and lock it.
459 *
460 * @param as Address space.
461 * @param va Virtual address.
462 *
463 * @return Locked address space area containing va on success or
464 * NULL on failure.
465 *
466 */
467NO_TRACE static as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
468{
469 ASSERT(mutex_locked(&as->lock));
470
471 btree_node_t *leaf;
472 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
473 if (area) {
474 /* va is the base address of an address space area */
475 mutex_lock(&area->lock);
476 return area;
477 }
478
479 /*
480 * Search the leaf node and the righmost record of its left neighbour
481 * to find out whether this is a miss or va belongs to an address
482 * space area found there.
483 */
484
485 /* First, search the leaf node itself. */
486 btree_key_t i;
487
488 for (i = 0; i < leaf->keys; i++) {
489 area = (as_area_t *) leaf->value[i];
490
491 mutex_lock(&area->lock);
492
493 if ((area->base <= va) &&
494 (va < area->base + (area->pages << PAGE_WIDTH)))
495 return area;
496
497 mutex_unlock(&area->lock);
498 }
499
500 /*
501 * Second, locate the left neighbour and test its last record.
502 * Because of its position in the B+tree, it must have base < va.
503 */
504 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
505 if (lnode) {
506 area = (as_area_t *) lnode->value[lnode->keys - 1];
507
508 mutex_lock(&area->lock);
509
510 if (va < area->base + (area->pages << PAGE_WIDTH))
511 return area;
512
513 mutex_unlock(&area->lock);
514 }
515
516 return NULL;
517}
518
519/** Find address space area and change it.
520 *
521 * @param as Address space.
522 * @param address Virtual address belonging to the area to be changed.
523 * Must be page-aligned.
524 * @param size New size of the virtual memory block starting at
525 * address.
526 * @param flags Flags influencing the remap operation. Currently unused.
527 *
528 * @return Zero on success or a value from @ref errno.h otherwise.
529 *
530 */
531int as_area_resize(as_t *as, uintptr_t address, size_t size, unsigned int flags)
532{
533 mutex_lock(&as->lock);
534
535 /*
536 * Locate the area.
537 */
538 as_area_t *area = find_area_and_lock(as, address);
539 if (!area) {
540 mutex_unlock(&as->lock);
541 return ENOENT;
542 }
543
544 if (area->backend == &phys_backend) {
545 /*
546 * Remapping of address space areas associated
547 * with memory mapped devices is not supported.
548 */
549 mutex_unlock(&area->lock);
550 mutex_unlock(&as->lock);
551 return ENOTSUP;
552 }
553
554 if (area->sh_info) {
555 /*
556 * Remapping of shared address space areas
557 * is not supported.
558 */
559 mutex_unlock(&area->lock);
560 mutex_unlock(&as->lock);
561 return ENOTSUP;
562 }
563
564 size_t pages = SIZE2FRAMES((address - area->base) + size);
565 if (!pages) {
566 /*
567 * Zero size address space areas are not allowed.
568 */
569 mutex_unlock(&area->lock);
570 mutex_unlock(&as->lock);
571 return EPERM;
572 }
573
574 if (pages < area->pages) {
575 uintptr_t start_free = area->base + (pages << PAGE_WIDTH);
576
577 /*
578 * Shrinking the area.
579 * No need to check for overlaps.
580 */
581
582 page_table_lock(as, false);
583
584 /*
585 * Start TLB shootdown sequence.
586 */
587 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid,
588 area->base + (pages << PAGE_WIDTH), area->pages - pages);
589
590 /*
591 * Remove frames belonging to used space starting from
592 * the highest addresses downwards until an overlap with
593 * the resized address space area is found. Note that this
594 * is also the right way to remove part of the used_space
595 * B+tree leaf list.
596 */
597 bool cond = true;
598 while (cond) {
599 ASSERT(!list_empty(&area->used_space.leaf_head));
600
601 btree_node_t *node =
602 list_get_instance(area->used_space.leaf_head.prev,
603 btree_node_t, leaf_link);
604
605 if ((cond = (bool) node->keys)) {
606 uintptr_t ptr = node->key[node->keys - 1];
607 size_t size =
608 (size_t) node->value[node->keys - 1];
609 size_t i = 0;
610
611 if (overlaps(ptr, size << PAGE_WIDTH, area->base,
612 pages << PAGE_WIDTH)) {
613
614 if (ptr + (size << PAGE_WIDTH) <= start_free) {
615 /*
616 * The whole interval fits
617 * completely in the resized
618 * address space area.
619 */
620 break;
621 }
622
623 /*
624 * Part of the interval corresponding
625 * to b and c overlaps with the resized
626 * address space area.
627 */
628
629 /* We are almost done */
630 cond = false;
631 i = (start_free - ptr) >> PAGE_WIDTH;
632 if (!used_space_remove(area, start_free,
633 size - i))
634 panic("Cannot remove used space.");
635 } else {
636 /*
637 * The interval of used space can be
638 * completely removed.
639 */
640 if (!used_space_remove(area, ptr, size))
641 panic("Cannot remove used space.");
642 }
643
644 for (; i < size; i++) {
645 pte_t *pte = page_mapping_find(as, ptr +
646 (i << PAGE_WIDTH));
647
648 ASSERT(pte);
649 ASSERT(PTE_VALID(pte));
650 ASSERT(PTE_PRESENT(pte));
651
652 if ((area->backend) &&
653 (area->backend->frame_free)) {
654 area->backend->frame_free(area,
655 ptr + (i << PAGE_WIDTH),
656 PTE_GET_FRAME(pte));
657 }
658
659 page_mapping_remove(as, ptr +
660 (i << PAGE_WIDTH));
661 }
662 }
663 }
664
665 /*
666 * Finish TLB shootdown sequence.
667 */
668
669 tlb_invalidate_pages(as->asid, area->base + (pages << PAGE_WIDTH),
670 area->pages - pages);
671
672 /*
673 * Invalidate software translation caches (e.g. TSB on sparc64).
674 */
675 as_invalidate_translation_cache(as, area->base +
676 (pages << PAGE_WIDTH), area->pages - pages);
677 tlb_shootdown_finalize(ipl);
678
679 page_table_unlock(as, false);
680 } else {
681 /*
682 * Growing the area.
683 * Check for overlaps with other address space areas.
684 */
685 if (!check_area_conflicts(as, address, pages, area)) {
686 mutex_unlock(&area->lock);
687 mutex_unlock(&as->lock);
688 return EADDRNOTAVAIL;
689 }
690 }
691
692 area->pages = pages;
693
694 mutex_unlock(&area->lock);
695 mutex_unlock(&as->lock);
696
697 return 0;
698}
699
700/** Remove reference to address space area share info.
701 *
702 * If the reference count drops to 0, the sh_info is deallocated.
703 *
704 * @param sh_info Pointer to address space area share info.
705 *
706 */
707NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info)
708{
709 bool dealloc = false;
710
711 mutex_lock(&sh_info->lock);
712 ASSERT(sh_info->refcount);
713
714 if (--sh_info->refcount == 0) {
715 dealloc = true;
716 link_t *cur;
717
718 /*
719 * Now walk carefully the pagemap B+tree and free/remove
720 * reference from all frames found there.
721 */
722 for (cur = sh_info->pagemap.leaf_head.next;
723 cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
724 btree_node_t *node
725 = list_get_instance(cur, btree_node_t, leaf_link);
726 btree_key_t i;
727
728 for (i = 0; i < node->keys; i++)
729 frame_free((uintptr_t) node->value[i]);
730 }
731
732 }
733 mutex_unlock(&sh_info->lock);
734
735 if (dealloc) {
736 btree_destroy(&sh_info->pagemap);
737 free(sh_info);
738 }
739}
740
741/** Destroy address space area.
742 *
743 * @param as Address space.
744 * @param address Address within the area to be deleted.
745 *
746 * @return Zero on success or a value from @ref errno.h on failure.
747 *
748 */
749int as_area_destroy(as_t *as, uintptr_t address)
750{
751 mutex_lock(&as->lock);
752
753 as_area_t *area = find_area_and_lock(as, address);
754 if (!area) {
755 mutex_unlock(&as->lock);
756 return ENOENT;
757 }
758
759 uintptr_t base = area->base;
760
761 page_table_lock(as, false);
762
763 /*
764 * Start TLB shootdown sequence.
765 */
766 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base,
767 area->pages);
768
769 /*
770 * Visit only the pages mapped by used_space B+tree.
771 */
772 link_t *cur;
773 for (cur = area->used_space.leaf_head.next;
774 cur != &area->used_space.leaf_head; cur = cur->next) {
775 btree_node_t *node;
776 btree_key_t i;
777
778 node = list_get_instance(cur, btree_node_t, leaf_link);
779 for (i = 0; i < node->keys; i++) {
780 uintptr_t ptr = node->key[i];
781 size_t size;
782
783 for (size = 0; size < (size_t) node->value[i]; size++) {
784 pte_t *pte =
785 page_mapping_find(as, ptr + (size << PAGE_WIDTH));
786
787 ASSERT(pte);
788 ASSERT(PTE_VALID(pte));
789 ASSERT(PTE_PRESENT(pte));
790
791 if ((area->backend) &&
792 (area->backend->frame_free)) {
793 area->backend->frame_free(area,
794 ptr + (size << PAGE_WIDTH), PTE_GET_FRAME(pte));
795 }
796
797 page_mapping_remove(as, ptr + (size << PAGE_WIDTH));
798 }
799 }
800 }
801
802 /*
803 * Finish TLB shootdown sequence.
804 */
805
806 tlb_invalidate_pages(as->asid, area->base, area->pages);
807
808 /*
809 * Invalidate potential software translation caches (e.g. TSB on
810 * sparc64).
811 */
812 as_invalidate_translation_cache(as, area->base, area->pages);
813 tlb_shootdown_finalize(ipl);
814
815 page_table_unlock(as, false);
816
817 btree_destroy(&area->used_space);
818
819 area->attributes |= AS_AREA_ATTR_PARTIAL;
820
821 if (area->sh_info)
822 sh_info_remove_reference(area->sh_info);
823
824 mutex_unlock(&area->lock);
825
826 /*
827 * Remove the empty area from address space.
828 */
829 btree_remove(&as->as_area_btree, base, NULL);
830
831 free(area);
832
833 mutex_unlock(&as->lock);
834 return 0;
835}
836
837/** Share address space area with another or the same address space.
838 *
839 * Address space area mapping is shared with a new address space area.
840 * If the source address space area has not been shared so far,
841 * a new sh_info is created. The new address space area simply gets the
842 * sh_info of the source area. The process of duplicating the
843 * mapping is done through the backend share function.
844 *
845 * @param src_as Pointer to source address space.
846 * @param src_base Base address of the source address space area.
847 * @param acc_size Expected size of the source area.
848 * @param dst_as Pointer to destination address space.
849 * @param dst_base Target base address.
850 * @param dst_flags_mask Destination address space area flags mask.
851 *
852 * @return Zero on success.
853 * @return ENOENT if there is no such task or such address space.
854 * @return EPERM if there was a problem in accepting the area.
855 * @return ENOMEM if there was a problem in allocating destination
856 * address space area.
857 * @return ENOTSUP if the address space area backend does not support
858 * sharing.
859 *
860 */
861int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
862 as_t *dst_as, uintptr_t dst_base, unsigned int dst_flags_mask)
863{
864 mutex_lock(&src_as->lock);
865 as_area_t *src_area = find_area_and_lock(src_as, src_base);
866 if (!src_area) {
867 /*
868 * Could not find the source address space area.
869 */
870 mutex_unlock(&src_as->lock);
871 return ENOENT;
872 }
873
874 if ((!src_area->backend) || (!src_area->backend->share)) {
875 /*
876 * There is no backend or the backend does not
877 * know how to share the area.
878 */
879 mutex_unlock(&src_area->lock);
880 mutex_unlock(&src_as->lock);
881 return ENOTSUP;
882 }
883
884 size_t src_size = src_area->pages << PAGE_WIDTH;
885 unsigned int src_flags = src_area->flags;
886 mem_backend_t *src_backend = src_area->backend;
887 mem_backend_data_t src_backend_data = src_area->backend_data;
888
889 /* Share the cacheable flag from the original mapping */
890 if (src_flags & AS_AREA_CACHEABLE)
891 dst_flags_mask |= AS_AREA_CACHEABLE;
892
893 if ((src_size != acc_size) ||
894 ((src_flags & dst_flags_mask) != dst_flags_mask)) {
895 mutex_unlock(&src_area->lock);
896 mutex_unlock(&src_as->lock);
897 return EPERM;
898 }
899
900 /*
901 * Now we are committed to sharing the area.
902 * First, prepare the area for sharing.
903 * Then it will be safe to unlock it.
904 */
905 share_info_t *sh_info = src_area->sh_info;
906 if (!sh_info) {
907 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
908 mutex_initialize(&sh_info->lock, MUTEX_PASSIVE);
909 sh_info->refcount = 2;
910 btree_create(&sh_info->pagemap);
911 src_area->sh_info = sh_info;
912
913 /*
914 * Call the backend to setup sharing.
915 */
916 src_area->backend->share(src_area);
917 } else {
918 mutex_lock(&sh_info->lock);
919 sh_info->refcount++;
920 mutex_unlock(&sh_info->lock);
921 }
922
923 mutex_unlock(&src_area->lock);
924 mutex_unlock(&src_as->lock);
925
926 /*
927 * Create copy of the source address space area.
928 * The destination area is created with AS_AREA_ATTR_PARTIAL
929 * attribute set which prevents race condition with
930 * preliminary as_page_fault() calls.
931 * The flags of the source area are masked against dst_flags_mask
932 * to support sharing in less privileged mode.
933 */
934 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size,
935 dst_base, AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
936 if (!dst_area) {
937 /*
938 * Destination address space area could not be created.
939 */
940 sh_info_remove_reference(sh_info);
941
942 return ENOMEM;
943 }
944
945 /*
946 * Now the destination address space area has been
947 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
948 * attribute and set the sh_info.
949 */
950 mutex_lock(&dst_as->lock);
951 mutex_lock(&dst_area->lock);
952 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
953 dst_area->sh_info = sh_info;
954 mutex_unlock(&dst_area->lock);
955 mutex_unlock(&dst_as->lock);
956
957 return 0;
958}
959
960/** Check access mode for address space area.
961 *
962 * @param area Address space area.
963 * @param access Access mode.
964 *
965 * @return False if access violates area's permissions, true
966 * otherwise.
967 *
968 */
969NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access)
970{
971 ASSERT(mutex_locked(&area->lock));
972
973 int flagmap[] = {
974 [PF_ACCESS_READ] = AS_AREA_READ,
975 [PF_ACCESS_WRITE] = AS_AREA_WRITE,
976 [PF_ACCESS_EXEC] = AS_AREA_EXEC
977 };
978
979 if (!(area->flags & flagmap[access]))
980 return false;
981
982 return true;
983}
984
985/** Convert address space area flags to page flags.
986 *
987 * @param aflags Flags of some address space area.
988 *
989 * @return Flags to be passed to page_mapping_insert().
990 *
991 */
992NO_TRACE static unsigned int area_flags_to_page_flags(unsigned int aflags)
993{
994 unsigned int flags = PAGE_USER | PAGE_PRESENT;
995
996 if (aflags & AS_AREA_READ)
997 flags |= PAGE_READ;
998
999 if (aflags & AS_AREA_WRITE)
1000 flags |= PAGE_WRITE;
1001
1002 if (aflags & AS_AREA_EXEC)
1003 flags |= PAGE_EXEC;
1004
1005 if (aflags & AS_AREA_CACHEABLE)
1006 flags |= PAGE_CACHEABLE;
1007
1008 return flags;
1009}
1010
1011/** Change adress space area flags.
1012 *
1013 * The idea is to have the same data, but with a different access mode.
1014 * This is needed e.g. for writing code into memory and then executing it.
1015 * In order for this to work properly, this may copy the data
1016 * into private anonymous memory (unless it's already there).
1017 *
1018 * @param as Address space.
1019 * @param flags Flags of the area memory.
1020 * @param address Address within the area to be changed.
1021 *
1022 * @return Zero on success or a value from @ref errno.h on failure.
1023 *
1024 */
1025int as_area_change_flags(as_t *as, unsigned int flags, uintptr_t address)
1026{
1027 /* Flags for the new memory mapping */
1028 unsigned int page_flags = area_flags_to_page_flags(flags);
1029
1030 mutex_lock(&as->lock);
1031
1032 as_area_t *area = find_area_and_lock(as, address);
1033 if (!area) {
1034 mutex_unlock(&as->lock);
1035 return ENOENT;
1036 }
1037
1038 if ((area->sh_info) || (area->backend != &anon_backend)) {
1039 /* Copying shared areas not supported yet */
1040 /* Copying non-anonymous memory not supported yet */
1041 mutex_unlock(&area->lock);
1042 mutex_unlock(&as->lock);
1043 return ENOTSUP;
1044 }
1045
1046 /*
1047 * Compute total number of used pages in the used_space B+tree
1048 */
1049 size_t used_pages = 0;
1050 link_t *cur;
1051
1052 for (cur = area->used_space.leaf_head.next;
1053 cur != &area->used_space.leaf_head; cur = cur->next) {
1054 btree_node_t *node
1055 = list_get_instance(cur, btree_node_t, leaf_link);
1056 btree_key_t i;
1057
1058 for (i = 0; i < node->keys; i++)
1059 used_pages += (size_t) node->value[i];
1060 }
1061
1062 /* An array for storing frame numbers */
1063 uintptr_t *old_frame = malloc(used_pages * sizeof(uintptr_t), 0);
1064
1065 page_table_lock(as, false);
1066
1067 /*
1068 * Start TLB shootdown sequence.
1069 */
1070 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base,
1071 area->pages);
1072
1073 /*
1074 * Remove used pages from page tables and remember their frame
1075 * numbers.
1076 */
1077 size_t frame_idx = 0;
1078
1079 for (cur = area->used_space.leaf_head.next;
1080 cur != &area->used_space.leaf_head; cur = cur->next) {
1081 btree_node_t *node
1082 = list_get_instance(cur, btree_node_t, leaf_link);
1083 btree_key_t i;
1084
1085 for (i = 0; i < node->keys; i++) {
1086 uintptr_t ptr = node->key[i];
1087 size_t size;
1088
1089 for (size = 0; size < (size_t) node->value[i]; size++) {
1090 pte_t *pte =
1091 page_mapping_find(as, ptr + (size << PAGE_WIDTH));
1092
1093 ASSERT(pte);
1094 ASSERT(PTE_VALID(pte));
1095 ASSERT(PTE_PRESENT(pte));
1096
1097 old_frame[frame_idx++] = PTE_GET_FRAME(pte);
1098
1099 /* Remove old mapping */
1100 page_mapping_remove(as, ptr + (size << PAGE_WIDTH));
1101 }
1102 }
1103 }
1104
1105 /*
1106 * Finish TLB shootdown sequence.
1107 */
1108
1109 tlb_invalidate_pages(as->asid, area->base, area->pages);
1110
1111 /*
1112 * Invalidate potential software translation caches (e.g. TSB on
1113 * sparc64).
1114 */
1115 as_invalidate_translation_cache(as, area->base, area->pages);
1116 tlb_shootdown_finalize(ipl);
1117
1118 page_table_unlock(as, false);
1119
1120 /*
1121 * Set the new flags.
1122 */
1123 area->flags = flags;
1124
1125 /*
1126 * Map pages back in with new flags. This step is kept separate
1127 * so that the memory area could not be accesed with both the old and
1128 * the new flags at once.
1129 */
1130 frame_idx = 0;
1131
1132 for (cur = area->used_space.leaf_head.next;
1133 cur != &area->used_space.leaf_head; cur = cur->next) {
1134 btree_node_t *node
1135 = list_get_instance(cur, btree_node_t, leaf_link);
1136 btree_key_t i;
1137
1138 for (i = 0; i < node->keys; i++) {
1139 uintptr_t ptr = node->key[i];
1140 size_t size;
1141
1142 for (size = 0; size < (size_t) node->value[i]; size++) {
1143 page_table_lock(as, false);
1144
1145 /* Insert the new mapping */
1146 page_mapping_insert(as, ptr + (size << PAGE_WIDTH),
1147 old_frame[frame_idx++], page_flags);
1148
1149 page_table_unlock(as, false);
1150 }
1151 }
1152 }
1153
1154 free(old_frame);
1155
1156 mutex_unlock(&area->lock);
1157 mutex_unlock(&as->lock);
1158
1159 return 0;
1160}
1161
1162/** Handle page fault within the current address space.
1163 *
1164 * This is the high-level page fault handler. It decides whether the page fault
1165 * can be resolved by any backend and if so, it invokes the backend to resolve
1166 * the page fault.
1167 *
1168 * Interrupts are assumed disabled.
1169 *
1170 * @param page Faulting page.
1171 * @param access Access mode that caused the page fault (i.e.
1172 * read/write/exec).
1173 * @param istate Pointer to the interrupted state.
1174 *
1175 * @return AS_PF_FAULT on page fault.
1176 * @return AS_PF_OK on success.
1177 * @return AS_PF_DEFER if the fault was caused by copy_to_uspace()
1178 * or copy_from_uspace().
1179 *
1180 */
1181int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
1182{
1183 if (!THREAD)
1184 return AS_PF_FAULT;
1185
1186 if (!AS)
1187 return AS_PF_FAULT;
1188
1189 mutex_lock(&AS->lock);
1190 as_area_t *area = find_area_and_lock(AS, page);
1191 if (!area) {
1192 /*
1193 * No area contained mapping for 'page'.
1194 * Signal page fault to low-level handler.
1195 */
1196 mutex_unlock(&AS->lock);
1197 goto page_fault;
1198 }
1199
1200 if (area->attributes & AS_AREA_ATTR_PARTIAL) {
1201 /*
1202 * The address space area is not fully initialized.
1203 * Avoid possible race by returning error.
1204 */
1205 mutex_unlock(&area->lock);
1206 mutex_unlock(&AS->lock);
1207 goto page_fault;
1208 }
1209
1210 if ((!area->backend) || (!area->backend->page_fault)) {
1211 /*
1212 * The address space area is not backed by any backend
1213 * or the backend cannot handle page faults.
1214 */
1215 mutex_unlock(&area->lock);
1216 mutex_unlock(&AS->lock);
1217 goto page_fault;
1218 }
1219
1220 page_table_lock(AS, false);
1221
1222 /*
1223 * To avoid race condition between two page faults on the same address,
1224 * we need to make sure the mapping has not been already inserted.
1225 */
1226 pte_t *pte;
1227 if ((pte = page_mapping_find(AS, page))) {
1228 if (PTE_PRESENT(pte)) {
1229 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
1230 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
1231 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
1232 page_table_unlock(AS, false);
1233 mutex_unlock(&area->lock);
1234 mutex_unlock(&AS->lock);
1235 return AS_PF_OK;
1236 }
1237 }
1238 }
1239
1240 /*
1241 * Resort to the backend page fault handler.
1242 */
1243 if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
1244 page_table_unlock(AS, false);
1245 mutex_unlock(&area->lock);
1246 mutex_unlock(&AS->lock);
1247 goto page_fault;
1248 }
1249
1250 page_table_unlock(AS, false);
1251 mutex_unlock(&area->lock);
1252 mutex_unlock(&AS->lock);
1253 return AS_PF_OK;
1254
1255page_fault:
1256 if (THREAD->in_copy_from_uspace) {
1257 THREAD->in_copy_from_uspace = false;
1258 istate_set_retaddr(istate,
1259 (uintptr_t) &memcpy_from_uspace_failover_address);
1260 } else if (THREAD->in_copy_to_uspace) {
1261 THREAD->in_copy_to_uspace = false;
1262 istate_set_retaddr(istate,
1263 (uintptr_t) &memcpy_to_uspace_failover_address);
1264 } else {
1265 return AS_PF_FAULT;
1266 }
1267
1268 return AS_PF_DEFER;
1269}
1270
1271/** Switch address spaces.
1272 *
1273 * Note that this function cannot sleep as it is essentially a part of
1274 * scheduling. Sleeping here would lead to deadlock on wakeup. Another
1275 * thing which is forbidden in this context is locking the address space.
1276 *
1277 * When this function is enetered, no spinlocks may be held.
1278 *
1279 * @param old Old address space or NULL.
1280 * @param new New address space.
1281 *
1282 */
1283void as_switch(as_t *old_as, as_t *new_as)
1284{
1285 DEADLOCK_PROBE_INIT(p_asidlock);
1286 preemption_disable();
1287
1288retry:
1289 (void) interrupts_disable();
1290 if (!spinlock_trylock(&asidlock)) {
1291 /*
1292 * Avoid deadlock with TLB shootdown.
1293 * We can enable interrupts here because
1294 * preemption is disabled. We should not be
1295 * holding any other lock.
1296 */
1297 (void) interrupts_enable();
1298 DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
1299 goto retry;
1300 }
1301 preemption_enable();
1302
1303 /*
1304 * First, take care of the old address space.
1305 */
1306 if (old_as) {
1307 ASSERT(old_as->cpu_refcount);
1308
1309 if ((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
1310 /*
1311 * The old address space is no longer active on
1312 * any processor. It can be appended to the
1313 * list of inactive address spaces with assigned
1314 * ASID.
1315 */
1316 ASSERT(old_as->asid != ASID_INVALID);
1317
1318 list_append(&old_as->inactive_as_with_asid_link,
1319 &inactive_as_with_asid_head);
1320 }
1321
1322 /*
1323 * Perform architecture-specific tasks when the address space
1324 * is being removed from the CPU.
1325 */
1326 as_deinstall_arch(old_as);
1327 }
1328
1329 /*
1330 * Second, prepare the new address space.
1331 */
1332 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
1333 if (new_as->asid != ASID_INVALID)
1334 list_remove(&new_as->inactive_as_with_asid_link);
1335 else
1336 new_as->asid = asid_get();
1337 }
1338
1339#ifdef AS_PAGE_TABLE
1340 SET_PTL0_ADDRESS(new_as->genarch.page_table);
1341#endif
1342
1343 /*
1344 * Perform architecture-specific steps.
1345 * (e.g. write ASID to hardware register etc.)
1346 */
1347 as_install_arch(new_as);
1348
1349 spinlock_unlock(&asidlock);
1350
1351 AS = new_as;
1352}
1353
1354/** Compute flags for virtual address translation subsytem.
1355 *
1356 * @param area Address space area.
1357 *
1358 * @return Flags to be used in page_mapping_insert().
1359 *
1360 */
1361NO_TRACE unsigned int as_area_get_flags(as_area_t *area)
1362{
1363 ASSERT(mutex_locked(&area->lock));
1364
1365 return area_flags_to_page_flags(area->flags);
1366}
1367
1368/** Create page table.
1369 *
1370 * Depending on architecture, create either address space private or global page
1371 * table.
1372 *
1373 * @param flags Flags saying whether the page table is for the kernel
1374 * address space.
1375 *
1376 * @return First entry of the page table.
1377 *
1378 */
1379NO_TRACE pte_t *page_table_create(unsigned int flags)
1380{
1381 ASSERT(as_operations);
1382 ASSERT(as_operations->page_table_create);
1383
1384 return as_operations->page_table_create(flags);
1385}
1386
1387/** Destroy page table.
1388 *
1389 * Destroy page table in architecture specific way.
1390 *
1391 * @param page_table Physical address of PTL0.
1392 *
1393 */
1394NO_TRACE void page_table_destroy(pte_t *page_table)
1395{
1396 ASSERT(as_operations);
1397 ASSERT(as_operations->page_table_destroy);
1398
1399 as_operations->page_table_destroy(page_table);
1400}
1401
1402/** Lock page table.
1403 *
1404 * This function should be called before any page_mapping_insert(),
1405 * page_mapping_remove() and page_mapping_find().
1406 *
1407 * Locking order is such that address space areas must be locked
1408 * prior to this call. Address space can be locked prior to this
1409 * call in which case the lock argument is false.
1410 *
1411 * @param as Address space.
1412 * @param lock If false, do not attempt to lock as->lock.
1413 *
1414 */
1415NO_TRACE void page_table_lock(as_t *as, bool lock)
1416{
1417 ASSERT(as_operations);
1418 ASSERT(as_operations->page_table_lock);
1419
1420 as_operations->page_table_lock(as, lock);
1421}
1422
1423/** Unlock page table.
1424 *
1425 * @param as Address space.
1426 * @param unlock If false, do not attempt to unlock as->lock.
1427 *
1428 */
1429NO_TRACE void page_table_unlock(as_t *as, bool unlock)
1430{
1431 ASSERT(as_operations);
1432 ASSERT(as_operations->page_table_unlock);
1433
1434 as_operations->page_table_unlock(as, unlock);
1435}
1436
1437/** Test whether page tables are locked.
1438 *
1439 * @param as Address space where the page tables belong.
1440 *
1441 * @return True if the page tables belonging to the address soace
1442 * are locked, otherwise false.
1443 */
1444NO_TRACE bool page_table_locked(as_t *as)
1445{
1446 ASSERT(as_operations);
1447 ASSERT(as_operations->page_table_locked);
1448
1449 return as_operations->page_table_locked(as);
1450}
1451
1452/** Return size of the address space area with given base.
1453 *
1454 * @param base Arbitrary address inside the address space area.
1455 *
1456 * @return Size of the address space area in bytes or zero if it
1457 * does not exist.
1458 *
1459 */
1460size_t as_area_get_size(uintptr_t base)
1461{
1462 size_t size;
1463
1464 page_table_lock(AS, true);
1465 as_area_t *src_area = find_area_and_lock(AS, base);
1466
1467 if (src_area) {
1468 size = src_area->pages << PAGE_WIDTH;
1469 mutex_unlock(&src_area->lock);
1470 } else
1471 size = 0;
1472
1473 page_table_unlock(AS, true);
1474 return size;
1475}
1476
1477/** Mark portion of address space area as used.
1478 *
1479 * The address space area must be already locked.
1480 *
1481 * @param area Address space area.
1482 * @param page First page to be marked.
1483 * @param count Number of page to be marked.
1484 *
1485 * @return False on failure or true on success.
1486 *
1487 */
1488bool used_space_insert(as_area_t *area, uintptr_t page, size_t count)
1489{
1490 ASSERT(mutex_locked(&area->lock));
1491 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1492 ASSERT(count);
1493
1494 btree_node_t *leaf;
1495 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf);
1496 if (pages) {
1497 /*
1498 * We hit the beginning of some used space.
1499 */
1500 return false;
1501 }
1502
1503 if (!leaf->keys) {
1504 btree_insert(&area->used_space, page, (void *) count, leaf);
1505 goto success;
1506 }
1507
1508 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf);
1509 if (node) {
1510 uintptr_t left_pg = node->key[node->keys - 1];
1511 uintptr_t right_pg = leaf->key[0];
1512 size_t left_cnt = (size_t) node->value[node->keys - 1];
1513 size_t right_cnt = (size_t) leaf->value[0];
1514
1515 /*
1516 * Examine the possibility that the interval fits
1517 * somewhere between the rightmost interval of
1518 * the left neigbour and the first interval of the leaf.
1519 */
1520
1521 if (page >= right_pg) {
1522 /* Do nothing. */
1523 } else if (overlaps(page, count << PAGE_WIDTH, left_pg,
1524 left_cnt << PAGE_WIDTH)) {
1525 /* The interval intersects with the left interval. */
1526 return false;
1527 } else if (overlaps(page, count << PAGE_WIDTH, right_pg,
1528 right_cnt << PAGE_WIDTH)) {
1529 /* The interval intersects with the right interval. */
1530 return false;
1531 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) &&
1532 (page + (count << PAGE_WIDTH) == right_pg)) {
1533 /*
1534 * The interval can be added by merging the two already
1535 * present intervals.
1536 */
1537 node->value[node->keys - 1] += count + right_cnt;
1538 btree_remove(&area->used_space, right_pg, leaf);
1539 goto success;
1540 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) {
1541 /*
1542 * The interval can be added by simply growing the left
1543 * interval.
1544 */
1545 node->value[node->keys - 1] += count;
1546 goto success;
1547 } else if (page + (count << PAGE_WIDTH) == right_pg) {
1548 /*
1549 * The interval can be addded by simply moving base of
1550 * the right interval down and increasing its size
1551 * accordingly.
1552 */
1553 leaf->value[0] += count;
1554 leaf->key[0] = page;
1555 goto success;
1556 } else {
1557 /*
1558 * The interval is between both neigbouring intervals,
1559 * but cannot be merged with any of them.
1560 */
1561 btree_insert(&area->used_space, page, (void *) count,
1562 leaf);
1563 goto success;
1564 }
1565 } else if (page < leaf->key[0]) {
1566 uintptr_t right_pg = leaf->key[0];
1567 size_t right_cnt = (size_t) leaf->value[0];
1568
1569 /*
1570 * Investigate the border case in which the left neighbour does
1571 * not exist but the interval fits from the left.
1572 */
1573
1574 if (overlaps(page, count << PAGE_WIDTH, right_pg,
1575 right_cnt << PAGE_WIDTH)) {
1576 /* The interval intersects with the right interval. */
1577 return false;
1578 } else if (page + (count << PAGE_WIDTH) == right_pg) {
1579 /*
1580 * The interval can be added by moving the base of the
1581 * right interval down and increasing its size
1582 * accordingly.
1583 */
1584 leaf->key[0] = page;
1585 leaf->value[0] += count;
1586 goto success;
1587 } else {
1588 /*
1589 * The interval doesn't adjoin with the right interval.
1590 * It must be added individually.
1591 */
1592 btree_insert(&area->used_space, page, (void *) count,
1593 leaf);
1594 goto success;
1595 }
1596 }
1597
1598 node = btree_leaf_node_right_neighbour(&area->used_space, leaf);
1599 if (node) {
1600 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1601 uintptr_t right_pg = node->key[0];
1602 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
1603 size_t right_cnt = (size_t) node->value[0];
1604
1605 /*
1606 * Examine the possibility that the interval fits
1607 * somewhere between the leftmost interval of
1608 * the right neigbour and the last interval of the leaf.
1609 */
1610
1611 if (page < left_pg) {
1612 /* Do nothing. */
1613 } else if (overlaps(page, count << PAGE_WIDTH, left_pg,
1614 left_cnt << PAGE_WIDTH)) {
1615 /* The interval intersects with the left interval. */
1616 return false;
1617 } else if (overlaps(page, count << PAGE_WIDTH, right_pg,
1618 right_cnt << PAGE_WIDTH)) {
1619 /* The interval intersects with the right interval. */
1620 return false;
1621 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) &&
1622 (page + (count << PAGE_WIDTH) == right_pg)) {
1623 /*
1624 * The interval can be added by merging the two already
1625 * present intervals.
1626 */
1627 leaf->value[leaf->keys - 1] += count + right_cnt;
1628 btree_remove(&area->used_space, right_pg, node);
1629 goto success;
1630 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) {
1631 /*
1632 * The interval can be added by simply growing the left
1633 * interval.
1634 */
1635 leaf->value[leaf->keys - 1] += count;
1636 goto success;
1637 } else if (page + (count << PAGE_WIDTH) == right_pg) {
1638 /*
1639 * The interval can be addded by simply moving base of
1640 * the right interval down and increasing its size
1641 * accordingly.
1642 */
1643 node->value[0] += count;
1644 node->key[0] = page;
1645 goto success;
1646 } else {
1647 /*
1648 * The interval is between both neigbouring intervals,
1649 * but cannot be merged with any of them.
1650 */
1651 btree_insert(&area->used_space, page, (void *) count,
1652 leaf);
1653 goto success;
1654 }
1655 } else if (page >= leaf->key[leaf->keys - 1]) {
1656 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1657 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
1658
1659 /*
1660 * Investigate the border case in which the right neighbour
1661 * does not exist but the interval fits from the right.
1662 */
1663
1664 if (overlaps(page, count << PAGE_WIDTH, left_pg,
1665 left_cnt << PAGE_WIDTH)) {
1666 /* The interval intersects with the left interval. */
1667 return false;
1668 } else if (left_pg + (left_cnt << PAGE_WIDTH) == page) {
1669 /*
1670 * The interval can be added by growing the left
1671 * interval.
1672 */
1673 leaf->value[leaf->keys - 1] += count;
1674 goto success;
1675 } else {
1676 /*
1677 * The interval doesn't adjoin with the left interval.
1678 * It must be added individually.
1679 */
1680 btree_insert(&area->used_space, page, (void *) count,
1681 leaf);
1682 goto success;
1683 }
1684 }
1685
1686 /*
1687 * Note that if the algorithm made it thus far, the interval can fit
1688 * only between two other intervals of the leaf. The two border cases
1689 * were already resolved.
1690 */
1691 btree_key_t i;
1692 for (i = 1; i < leaf->keys; i++) {
1693 if (page < leaf->key[i]) {
1694 uintptr_t left_pg = leaf->key[i - 1];
1695 uintptr_t right_pg = leaf->key[i];
1696 size_t left_cnt = (size_t) leaf->value[i - 1];
1697 size_t right_cnt = (size_t) leaf->value[i];
1698
1699 /*
1700 * The interval fits between left_pg and right_pg.
1701 */
1702
1703 if (overlaps(page, count << PAGE_WIDTH, left_pg,
1704 left_cnt << PAGE_WIDTH)) {
1705 /*
1706 * The interval intersects with the left
1707 * interval.
1708 */
1709 return false;
1710 } else if (overlaps(page, count << PAGE_WIDTH, right_pg,
1711 right_cnt << PAGE_WIDTH)) {
1712 /*
1713 * The interval intersects with the right
1714 * interval.
1715 */
1716 return false;
1717 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) &&
1718 (page + (count << PAGE_WIDTH) == right_pg)) {
1719 /*
1720 * The interval can be added by merging the two
1721 * already present intervals.
1722 */
1723 leaf->value[i - 1] += count + right_cnt;
1724 btree_remove(&area->used_space, right_pg, leaf);
1725 goto success;
1726 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) {
1727 /*
1728 * The interval can be added by simply growing
1729 * the left interval.
1730 */
1731 leaf->value[i - 1] += count;
1732 goto success;
1733 } else if (page + (count << PAGE_WIDTH) == right_pg) {
1734 /*
1735 * The interval can be addded by simply moving
1736 * base of the right interval down and
1737 * increasing its size accordingly.
1738 */
1739 leaf->value[i] += count;
1740 leaf->key[i] = page;
1741 goto success;
1742 } else {
1743 /*
1744 * The interval is between both neigbouring
1745 * intervals, but cannot be merged with any of
1746 * them.
1747 */
1748 btree_insert(&area->used_space, page,
1749 (void *) count, leaf);
1750 goto success;
1751 }
1752 }
1753 }
1754
1755 panic("Inconsistency detected while adding %zu pages of used "
1756 "space at %p.", count, (void *) page);
1757
1758success:
1759 area->resident += count;
1760 return true;
1761}
1762
1763/** Mark portion of address space area as unused.
1764 *
1765 * The address space area must be already locked.
1766 *
1767 * @param area Address space area.
1768 * @param page First page to be marked.
1769 * @param count Number of page to be marked.
1770 *
1771 * @return False on failure or true on success.
1772 *
1773 */
1774bool used_space_remove(as_area_t *area, uintptr_t page, size_t count)
1775{
1776 ASSERT(mutex_locked(&area->lock));
1777 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1778 ASSERT(count);
1779
1780 btree_node_t *leaf;
1781 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf);
1782 if (pages) {
1783 /*
1784 * We are lucky, page is the beginning of some interval.
1785 */
1786 if (count > pages) {
1787 return false;
1788 } else if (count == pages) {
1789 btree_remove(&area->used_space, page, leaf);
1790 goto success;
1791 } else {
1792 /*
1793 * Find the respective interval.
1794 * Decrease its size and relocate its start address.
1795 */
1796 btree_key_t i;
1797 for (i = 0; i < leaf->keys; i++) {
1798 if (leaf->key[i] == page) {
1799 leaf->key[i] += count << PAGE_WIDTH;
1800 leaf->value[i] -= count;
1801 goto success;
1802 }
1803 }
1804
1805 goto error;
1806 }
1807 }
1808
1809 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf);
1810 if ((node) && (page < leaf->key[0])) {
1811 uintptr_t left_pg = node->key[node->keys - 1];
1812 size_t left_cnt = (size_t) node->value[node->keys - 1];
1813
1814 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page,
1815 count << PAGE_WIDTH)) {
1816 if (page + (count << PAGE_WIDTH) ==
1817 left_pg + (left_cnt << PAGE_WIDTH)) {
1818 /*
1819 * The interval is contained in the rightmost
1820 * interval of the left neighbour and can be
1821 * removed by updating the size of the bigger
1822 * interval.
1823 */
1824 node->value[node->keys - 1] -= count;
1825 goto success;
1826 } else if (page + (count << PAGE_WIDTH) <
1827 left_pg + (left_cnt << PAGE_WIDTH)) {
1828 /*
1829 * The interval is contained in the rightmost
1830 * interval of the left neighbour but its
1831 * removal requires both updating the size of
1832 * the original interval and also inserting a
1833 * new interval.
1834 */
1835 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) -
1836 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH;
1837 node->value[node->keys - 1] -= count + new_cnt;
1838 btree_insert(&area->used_space, page +
1839 (count << PAGE_WIDTH), (void *) new_cnt, leaf);
1840 goto success;
1841 }
1842 }
1843
1844 return false;
1845 } else if (page < leaf->key[0])
1846 return false;
1847
1848 if (page > leaf->key[leaf->keys - 1]) {
1849 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1850 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
1851
1852 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page,
1853 count << PAGE_WIDTH)) {
1854 if (page + (count << PAGE_WIDTH) ==
1855 left_pg + (left_cnt << PAGE_WIDTH)) {
1856 /*
1857 * The interval is contained in the rightmost
1858 * interval of the leaf and can be removed by
1859 * updating the size of the bigger interval.
1860 */
1861 leaf->value[leaf->keys - 1] -= count;
1862 goto success;
1863 } else if (page + (count << PAGE_WIDTH) < left_pg +
1864 (left_cnt << PAGE_WIDTH)) {
1865 /*
1866 * The interval is contained in the rightmost
1867 * interval of the leaf but its removal
1868 * requires both updating the size of the
1869 * original interval and also inserting a new
1870 * interval.
1871 */
1872 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) -
1873 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH;
1874 leaf->value[leaf->keys - 1] -= count + new_cnt;
1875 btree_insert(&area->used_space, page +
1876 (count << PAGE_WIDTH), (void *) new_cnt, leaf);
1877 goto success;
1878 }
1879 }
1880
1881 return false;
1882 }
1883
1884 /*
1885 * The border cases have been already resolved.
1886 * Now the interval can be only between intervals of the leaf.
1887 */
1888 btree_key_t i;
1889 for (i = 1; i < leaf->keys - 1; i++) {
1890 if (page < leaf->key[i]) {
1891 uintptr_t left_pg = leaf->key[i - 1];
1892 size_t left_cnt = (size_t) leaf->value[i - 1];
1893
1894 /*
1895 * Now the interval is between intervals corresponding
1896 * to (i - 1) and i.
1897 */
1898 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page,
1899 count << PAGE_WIDTH)) {
1900 if (page + (count << PAGE_WIDTH) ==
1901 left_pg + (left_cnt << PAGE_WIDTH)) {
1902 /*
1903 * The interval is contained in the
1904 * interval (i - 1) of the leaf and can
1905 * be removed by updating the size of
1906 * the bigger interval.
1907 */
1908 leaf->value[i - 1] -= count;
1909 goto success;
1910 } else if (page + (count << PAGE_WIDTH) <
1911 left_pg + (left_cnt << PAGE_WIDTH)) {
1912 /*
1913 * The interval is contained in the
1914 * interval (i - 1) of the leaf but its
1915 * removal requires both updating the
1916 * size of the original interval and
1917 * also inserting a new interval.
1918 */
1919 size_t new_cnt = ((left_pg +
1920 (left_cnt << PAGE_WIDTH)) -
1921 (page + (count << PAGE_WIDTH))) >>
1922 PAGE_WIDTH;
1923 leaf->value[i - 1] -= count + new_cnt;
1924 btree_insert(&area->used_space, page +
1925 (count << PAGE_WIDTH), (void *) new_cnt,
1926 leaf);
1927 goto success;
1928 }
1929 }
1930
1931 return false;
1932 }
1933 }
1934
1935error:
1936 panic("Inconsistency detected while removing %zu pages of used "
1937 "space from %p.", count, (void *) page);
1938
1939success:
1940 area->resident -= count;
1941 return true;
1942}
1943
1944/*
1945 * Address space related syscalls.
1946 */
1947
1948/** Wrapper for as_area_create(). */
1949sysarg_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags)
1950{
1951 if (as_area_create(AS, flags, size, address,
1952 AS_AREA_ATTR_NONE, &anon_backend, NULL))
1953 return (sysarg_t) address;
1954 else
1955 return (sysarg_t) -1;
1956}
1957
1958/** Wrapper for as_area_resize(). */
1959sysarg_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags)
1960{
1961 return (sysarg_t) as_area_resize(AS, address, size, 0);
1962}
1963
1964/** Wrapper for as_area_change_flags(). */
1965sysarg_t sys_as_area_change_flags(uintptr_t address, unsigned int flags)
1966{
1967 return (sysarg_t) as_area_change_flags(AS, flags, address);
1968}
1969
1970/** Wrapper for as_area_destroy(). */
1971sysarg_t sys_as_area_destroy(uintptr_t address)
1972{
1973 return (sysarg_t) as_area_destroy(AS, address);
1974}
1975
1976/** Return pointer to unmapped address space area
1977 *
1978 * @param base Lowest address bound.
1979 * @param size Requested size of the allocation.
1980 *
1981 * @return Pointer to the beginning of unmapped address space area.
1982 *
1983 */
1984sysarg_t sys_as_get_unmapped_area(uintptr_t base, size_t size)
1985{
1986 if (size == 0)
1987 return 0;
1988
1989 /*
1990 * Make sure we allocate from page-aligned
1991 * address. Check for possible overflow in
1992 * each step.
1993 */
1994
1995 size_t pages = SIZE2FRAMES(size);
1996 uintptr_t ret = 0;
1997
1998 /*
1999 * Find the lowest unmapped address aligned on the sz
2000 * boundary, not smaller than base and of the required size.
2001 */
2002
2003 mutex_lock(&AS->lock);
2004
2005 /* First check the base address itself */
2006 uintptr_t addr = ALIGN_UP(base, PAGE_SIZE);
2007 if ((addr >= base) &&
2008 (check_area_conflicts(AS, addr, pages, NULL)))
2009 ret = addr;
2010
2011 /* Eventually check the addresses behind each area */
2012 link_t *cur;
2013 for (cur = AS->as_area_btree.leaf_head.next;
2014 (ret == 0) && (cur != &AS->as_area_btree.leaf_head);
2015 cur = cur->next) {
2016 btree_node_t *node =
2017 list_get_instance(cur, btree_node_t, leaf_link);
2018
2019 btree_key_t i;
2020 for (i = 0; (ret == 0) && (i < node->keys); i++) {
2021 as_area_t *area = (as_area_t *) node->value[i];
2022
2023 mutex_lock(&area->lock);
2024
2025 uintptr_t addr =
2026 ALIGN_UP(area->base + (area->pages << PAGE_WIDTH),
2027 PAGE_SIZE);
2028
2029 if ((addr >= base) && (addr >= area->base) &&
2030 (check_area_conflicts(AS, addr, pages, area)))
2031 ret = addr;
2032
2033 mutex_unlock(&area->lock);
2034 }
2035 }
2036
2037 mutex_unlock(&AS->lock);
2038
2039 return (sysarg_t) ret;
2040}
2041
2042/** Get list of adress space areas.
2043 *
2044 * @param as Address space.
2045 * @param obuf Place to save pointer to returned buffer.
2046 * @param osize Place to save size of returned buffer.
2047 *
2048 */
2049void as_get_area_info(as_t *as, as_area_info_t **obuf, size_t *osize)
2050{
2051 mutex_lock(&as->lock);
2052
2053 /* First pass, count number of areas. */
2054
2055 size_t area_cnt = 0;
2056 link_t *cur;
2057
2058 for (cur = as->as_area_btree.leaf_head.next;
2059 cur != &as->as_area_btree.leaf_head; cur = cur->next) {
2060 btree_node_t *node =
2061 list_get_instance(cur, btree_node_t, leaf_link);
2062 area_cnt += node->keys;
2063 }
2064
2065 size_t isize = area_cnt * sizeof(as_area_info_t);
2066 as_area_info_t *info = malloc(isize, 0);
2067
2068 /* Second pass, record data. */
2069
2070 size_t area_idx = 0;
2071
2072 for (cur = as->as_area_btree.leaf_head.next;
2073 cur != &as->as_area_btree.leaf_head; cur = cur->next) {
2074 btree_node_t *node =
2075 list_get_instance(cur, btree_node_t, leaf_link);
2076 btree_key_t i;
2077
2078 for (i = 0; i < node->keys; i++) {
2079 as_area_t *area = node->value[i];
2080
2081 ASSERT(area_idx < area_cnt);
2082 mutex_lock(&area->lock);
2083
2084 info[area_idx].start_addr = area->base;
2085 info[area_idx].size = FRAMES2SIZE(area->pages);
2086 info[area_idx].flags = area->flags;
2087 ++area_idx;
2088
2089 mutex_unlock(&area->lock);
2090 }
2091 }
2092
2093 mutex_unlock(&as->lock);
2094
2095 *obuf = info;
2096 *osize = isize;
2097}
2098
2099/** Print out information about address space.
2100 *
2101 * @param as Address space.
2102 *
2103 */
2104void as_print(as_t *as)
2105{
2106 mutex_lock(&as->lock);
2107
2108 /* Print out info about address space areas */
2109 link_t *cur;
2110 for (cur = as->as_area_btree.leaf_head.next;
2111 cur != &as->as_area_btree.leaf_head; cur = cur->next) {
2112 btree_node_t *node
2113 = list_get_instance(cur, btree_node_t, leaf_link);
2114 btree_key_t i;
2115
2116 for (i = 0; i < node->keys; i++) {
2117 as_area_t *area = node->value[i];
2118
2119 mutex_lock(&area->lock);
2120 printf("as_area: %p, base=%p, pages=%zu"
2121 " (%p - %p)\n", area, (void *) area->base,
2122 area->pages, (void *) area->base,
2123 (void *) (area->base + FRAMES2SIZE(area->pages)));
2124 mutex_unlock(&area->lock);
2125 }
2126 }
2127
2128 mutex_unlock(&as->lock);
2129}
2130
2131/** @}
2132 */
Note: See TracBrowser for help on using the repository browser.