source: mainline/kernel/generic/src/mm/as.c@ bd1deed

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since bd1deed was bd1deed, checked in by Martin Decky <martin@…>, 19 years ago

experimental support for Objective C
(disabled by default)

  • Property mode set to 100644
File size: 44.8 KB
Line 
1/*
2 * Copyright (c) 2001-2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Address space related functions.
36 *
37 * This file contains address space manipulation functions.
38 * Roughly speaking, this is a higher-level client of
39 * Virtual Address Translation (VAT) subsystem.
40 *
41 * Functionality provided by this file allows one to
42 * create address spaces and create, resize and share
43 * address space areas.
44 *
45 * @see page.c
46 *
47 */
48
49#include <mm/as.h>
50#include <arch/mm/as.h>
51#include <mm/page.h>
52#include <mm/frame.h>
53#include <mm/slab.h>
54#include <mm/tlb.h>
55#include <arch/mm/page.h>
56#include <genarch/mm/page_pt.h>
57#include <genarch/mm/page_ht.h>
58#include <mm/asid.h>
59#include <arch/mm/asid.h>
60#include <synch/spinlock.h>
61#include <synch/mutex.h>
62#include <adt/list.h>
63#include <adt/btree.h>
64#include <proc/task.h>
65#include <proc/thread.h>
66#include <arch/asm.h>
67#include <panic.h>
68#include <debug.h>
69#include <print.h>
70#include <memstr.h>
71#include <macros.h>
72#include <arch.h>
73#include <errno.h>
74#include <config.h>
75#include <align.h>
76#include <arch/types.h>
77#include <syscall/copy.h>
78#include <arch/interrupt.h>
79
80#ifdef CONFIG_VIRT_IDX_DCACHE
81#include <arch/mm/cache.h>
82#endif /* CONFIG_VIRT_IDX_DCACHE */
83
84#ifndef __OBJC__
85/**
86 * Each architecture decides what functions will be used to carry out
87 * address space operations such as creating or locking page tables.
88 */
89as_operations_t *as_operations = NULL;
90#endif
91
92/**
93 * Slab for as_t objects.
94 */
95static slab_cache_t *as_slab;
96
97/**
98 * This lock protects inactive_as_with_asid_head list. It must be acquired
99 * before as_t mutex.
100 */
101SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
102
103/**
104 * This list contains address spaces that are not active on any
105 * processor and that have valid ASID.
106 */
107LIST_INITIALIZE(inactive_as_with_asid_head);
108
109/** Kernel address space. */
110as_t *AS_KERNEL = NULL;
111
112static int area_flags_to_page_flags(int aflags);
113static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
114static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
115 as_area_t *avoid_area);
116static void sh_info_remove_reference(share_info_t *sh_info);
117
118static int as_constructor(void *obj, int flags)
119{
120 as_t *as = (as_t *) obj;
121 int rc;
122
123 link_initialize(&as->inactive_as_with_asid_link);
124 mutex_initialize(&as->lock);
125
126 rc = as_constructor_arch(as, flags);
127
128 return rc;
129}
130
131static int as_destructor(void *obj)
132{
133 as_t *as = (as_t *) obj;
134
135 return as_destructor_arch(as);
136}
137
138/** Initialize address space subsystem. */
139void as_init(void)
140{
141 as_arch_init();
142
143 as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
144 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
145
146 AS_KERNEL = as_create(FLAG_AS_KERNEL);
147 if (!AS_KERNEL)
148 panic("can't create kernel address space\n");
149
150}
151
152/** Create address space.
153 *
154 * @param flags Flags that influence way in wich the address space is created.
155 */
156as_t *as_create(int flags)
157{
158 as_t *as;
159
160 as = (as_t *) slab_alloc(as_slab, 0);
161 (void) as_create_arch(as, 0);
162
163 btree_create(&as->as_area_btree);
164
165 if (flags & FLAG_AS_KERNEL)
166 as->asid = ASID_KERNEL;
167 else
168 as->asid = ASID_INVALID;
169
170 as->refcount = 0;
171 as->cpu_refcount = 0;
172#ifdef AS_PAGE_TABLE
173 as->genarch.page_table = page_table_create(flags);
174#else
175 page_table_create(flags);
176#endif
177
178 return as;
179}
180
181/** Destroy adress space.
182 *
183 * When there are no tasks referencing this address space (i.e. its refcount is
184 * zero), the address space can be destroyed.
185 */
186void as_destroy(as_t *as)
187{
188 ipl_t ipl;
189 bool cond;
190
191 ASSERT(as->refcount == 0);
192
193 /*
194 * Since there is no reference to this area,
195 * it is safe not to lock its mutex.
196 */
197 ipl = interrupts_disable();
198 spinlock_lock(&inactive_as_with_asid_lock);
199 if (as->asid != ASID_INVALID && as != AS_KERNEL) {
200 if (as != AS && as->cpu_refcount == 0)
201 list_remove(&as->inactive_as_with_asid_link);
202 asid_put(as->asid);
203 }
204 spinlock_unlock(&inactive_as_with_asid_lock);
205
206 /*
207 * Destroy address space areas of the address space.
208 * The B+tree must be walked carefully because it is
209 * also being destroyed.
210 */
211 for (cond = true; cond; ) {
212 btree_node_t *node;
213
214 ASSERT(!list_empty(&as->as_area_btree.leaf_head));
215 node = list_get_instance(as->as_area_btree.leaf_head.next,
216 btree_node_t, leaf_link);
217
218 if ((cond = node->keys)) {
219 as_area_destroy(as, node->key[0]);
220 }
221 }
222
223 btree_destroy(&as->as_area_btree);
224#ifdef AS_PAGE_TABLE
225 page_table_destroy(as->genarch.page_table);
226#else
227 page_table_destroy(NULL);
228#endif
229
230 interrupts_restore(ipl);
231
232 slab_free(as_slab, as);
233}
234
235/** Create address space area of common attributes.
236 *
237 * The created address space area is added to the target address space.
238 *
239 * @param as Target address space.
240 * @param flags Flags of the area memory.
241 * @param size Size of area.
242 * @param base Base address of area.
243 * @param attrs Attributes of the area.
244 * @param backend Address space area backend. NULL if no backend is used.
245 * @param backend_data NULL or a pointer to an array holding two void *.
246 *
247 * @return Address space area on success or NULL on failure.
248 */
249as_area_t *
250as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
251 mem_backend_t *backend, mem_backend_data_t *backend_data)
252{
253 ipl_t ipl;
254 as_area_t *a;
255
256 if (base % PAGE_SIZE)
257 return NULL;
258
259 if (!size)
260 return NULL;
261
262 /* Writeable executable areas are not supported. */
263 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
264 return NULL;
265
266 ipl = interrupts_disable();
267 mutex_lock(&as->lock);
268
269 if (!check_area_conflicts(as, base, size, NULL)) {
270 mutex_unlock(&as->lock);
271 interrupts_restore(ipl);
272 return NULL;
273 }
274
275 a = (as_area_t *) malloc(sizeof(as_area_t), 0);
276
277 mutex_initialize(&a->lock);
278
279 a->as = as;
280 a->flags = flags;
281 a->attributes = attrs;
282 a->pages = SIZE2FRAMES(size);
283 a->base = base;
284 a->sh_info = NULL;
285 a->backend = backend;
286 if (backend_data)
287 a->backend_data = *backend_data;
288 else
289 memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data),
290 0);
291
292 btree_create(&a->used_space);
293
294 btree_insert(&as->as_area_btree, base, (void *) a, NULL);
295
296 mutex_unlock(&as->lock);
297 interrupts_restore(ipl);
298
299 return a;
300}
301
302/** Find address space area and change it.
303 *
304 * @param as Address space.
305 * @param address Virtual address belonging to the area to be changed. Must be
306 * page-aligned.
307 * @param size New size of the virtual memory block starting at address.
308 * @param flags Flags influencing the remap operation. Currently unused.
309 *
310 * @return Zero on success or a value from @ref errno.h otherwise.
311 */
312int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
313{
314 as_area_t *area;
315 ipl_t ipl;
316 size_t pages;
317
318 ipl = interrupts_disable();
319 mutex_lock(&as->lock);
320
321 /*
322 * Locate the area.
323 */
324 area = find_area_and_lock(as, address);
325 if (!area) {
326 mutex_unlock(&as->lock);
327 interrupts_restore(ipl);
328 return ENOENT;
329 }
330
331 if (area->backend == &phys_backend) {
332 /*
333 * Remapping of address space areas associated
334 * with memory mapped devices is not supported.
335 */
336 mutex_unlock(&area->lock);
337 mutex_unlock(&as->lock);
338 interrupts_restore(ipl);
339 return ENOTSUP;
340 }
341 if (area->sh_info) {
342 /*
343 * Remapping of shared address space areas
344 * is not supported.
345 */
346 mutex_unlock(&area->lock);
347 mutex_unlock(&as->lock);
348 interrupts_restore(ipl);
349 return ENOTSUP;
350 }
351
352 pages = SIZE2FRAMES((address - area->base) + size);
353 if (!pages) {
354 /*
355 * Zero size address space areas are not allowed.
356 */
357 mutex_unlock(&area->lock);
358 mutex_unlock(&as->lock);
359 interrupts_restore(ipl);
360 return EPERM;
361 }
362
363 if (pages < area->pages) {
364 bool cond;
365 uintptr_t start_free = area->base + pages*PAGE_SIZE;
366
367 /*
368 * Shrinking the area.
369 * No need to check for overlaps.
370 */
371
372 /*
373 * Start TLB shootdown sequence.
374 */
375 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base +
376 pages * PAGE_SIZE, area->pages - pages);
377
378 /*
379 * Remove frames belonging to used space starting from
380 * the highest addresses downwards until an overlap with
381 * the resized address space area is found. Note that this
382 * is also the right way to remove part of the used_space
383 * B+tree leaf list.
384 */
385 for (cond = true; cond;) {
386 btree_node_t *node;
387
388 ASSERT(!list_empty(&area->used_space.leaf_head));
389 node =
390 list_get_instance(area->used_space.leaf_head.prev,
391 btree_node_t, leaf_link);
392 if ((cond = (bool) node->keys)) {
393 uintptr_t b = node->key[node->keys - 1];
394 count_t c =
395 (count_t) node->value[node->keys - 1];
396 int i = 0;
397
398 if (overlaps(b, c * PAGE_SIZE, area->base,
399 pages*PAGE_SIZE)) {
400
401 if (b + c * PAGE_SIZE <= start_free) {
402 /*
403 * The whole interval fits
404 * completely in the resized
405 * address space area.
406 */
407 break;
408 }
409
410 /*
411 * Part of the interval corresponding
412 * to b and c overlaps with the resized
413 * address space area.
414 */
415
416 cond = false; /* we are almost done */
417 i = (start_free - b) >> PAGE_WIDTH;
418 if (!used_space_remove(area, start_free,
419 c - i))
420 panic("Could not remove used "
421 "space.\n");
422 } else {
423 /*
424 * The interval of used space can be
425 * completely removed.
426 */
427 if (!used_space_remove(area, b, c))
428 panic("Could not remove used "
429 "space.\n");
430 }
431
432 for (; i < c; i++) {
433 pte_t *pte;
434
435 page_table_lock(as, false);
436 pte = page_mapping_find(as, b +
437 i * PAGE_SIZE);
438 ASSERT(pte && PTE_VALID(pte) &&
439 PTE_PRESENT(pte));
440 if (area->backend &&
441 area->backend->frame_free) {
442 area->backend->frame_free(area,
443 b + i * PAGE_SIZE,
444 PTE_GET_FRAME(pte));
445 }
446 page_mapping_remove(as, b +
447 i * PAGE_SIZE);
448 page_table_unlock(as, false);
449 }
450 }
451 }
452
453 /*
454 * Finish TLB shootdown sequence.
455 */
456 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
457 area->pages - pages);
458 tlb_shootdown_finalize();
459
460 /*
461 * Invalidate software translation caches (e.g. TSB on sparc64).
462 */
463 as_invalidate_translation_cache(as, area->base +
464 pages * PAGE_SIZE, area->pages - pages);
465 } else {
466 /*
467 * Growing the area.
468 * Check for overlaps with other address space areas.
469 */
470 if (!check_area_conflicts(as, address, pages * PAGE_SIZE,
471 area)) {
472 mutex_unlock(&area->lock);
473 mutex_unlock(&as->lock);
474 interrupts_restore(ipl);
475 return EADDRNOTAVAIL;
476 }
477 }
478
479 area->pages = pages;
480
481 mutex_unlock(&area->lock);
482 mutex_unlock(&as->lock);
483 interrupts_restore(ipl);
484
485 return 0;
486}
487
488/** Destroy address space area.
489 *
490 * @param as Address space.
491 * @param address Address withing the area to be deleted.
492 *
493 * @return Zero on success or a value from @ref errno.h on failure.
494 */
495int as_area_destroy(as_t *as, uintptr_t address)
496{
497 as_area_t *area;
498 uintptr_t base;
499 link_t *cur;
500 ipl_t ipl;
501
502 ipl = interrupts_disable();
503 mutex_lock(&as->lock);
504
505 area = find_area_and_lock(as, address);
506 if (!area) {
507 mutex_unlock(&as->lock);
508 interrupts_restore(ipl);
509 return ENOENT;
510 }
511
512 base = area->base;
513
514 /*
515 * Start TLB shootdown sequence.
516 */
517 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
518
519 /*
520 * Visit only the pages mapped by used_space B+tree.
521 */
522 for (cur = area->used_space.leaf_head.next;
523 cur != &area->used_space.leaf_head; cur = cur->next) {
524 btree_node_t *node;
525 int i;
526
527 node = list_get_instance(cur, btree_node_t, leaf_link);
528 for (i = 0; i < node->keys; i++) {
529 uintptr_t b = node->key[i];
530 count_t j;
531 pte_t *pte;
532
533 for (j = 0; j < (count_t) node->value[i]; j++) {
534 page_table_lock(as, false);
535 pte = page_mapping_find(as, b + j * PAGE_SIZE);
536 ASSERT(pte && PTE_VALID(pte) &&
537 PTE_PRESENT(pte));
538 if (area->backend &&
539 area->backend->frame_free) {
540 area->backend->frame_free(area, b +
541 j * PAGE_SIZE, PTE_GET_FRAME(pte));
542 }
543 page_mapping_remove(as, b + j * PAGE_SIZE);
544 page_table_unlock(as, false);
545 }
546 }
547 }
548
549 /*
550 * Finish TLB shootdown sequence.
551 */
552 tlb_invalidate_pages(as->asid, area->base, area->pages);
553 tlb_shootdown_finalize();
554
555 /*
556 * Invalidate potential software translation caches (e.g. TSB on
557 * sparc64).
558 */
559 as_invalidate_translation_cache(as, area->base, area->pages);
560
561 btree_destroy(&area->used_space);
562
563 area->attributes |= AS_AREA_ATTR_PARTIAL;
564
565 if (area->sh_info)
566 sh_info_remove_reference(area->sh_info);
567
568 mutex_unlock(&area->lock);
569
570 /*
571 * Remove the empty area from address space.
572 */
573 btree_remove(&as->as_area_btree, base, NULL);
574
575 free(area);
576
577 mutex_unlock(&as->lock);
578 interrupts_restore(ipl);
579 return 0;
580}
581
582/** Share address space area with another or the same address space.
583 *
584 * Address space area mapping is shared with a new address space area.
585 * If the source address space area has not been shared so far,
586 * a new sh_info is created. The new address space area simply gets the
587 * sh_info of the source area. The process of duplicating the
588 * mapping is done through the backend share function.
589 *
590 * @param src_as Pointer to source address space.
591 * @param src_base Base address of the source address space area.
592 * @param acc_size Expected size of the source area.
593 * @param dst_as Pointer to destination address space.
594 * @param dst_base Target base address.
595 * @param dst_flags_mask Destination address space area flags mask.
596 *
597 * @return Zero on success or ENOENT if there is no such task or if there is no
598 * such address space area, EPERM if there was a problem in accepting the area
599 * or ENOMEM if there was a problem in allocating destination address space
600 * area. ENOTSUP is returned if the address space area backend does not support
601 * sharing or if the kernel detects an attempt to create an illegal address
602 * alias.
603 */
604int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
605 as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
606{
607 ipl_t ipl;
608 int src_flags;
609 size_t src_size;
610 as_area_t *src_area, *dst_area;
611 share_info_t *sh_info;
612 mem_backend_t *src_backend;
613 mem_backend_data_t src_backend_data;
614
615 ipl = interrupts_disable();
616 mutex_lock(&src_as->lock);
617 src_area = find_area_and_lock(src_as, src_base);
618 if (!src_area) {
619 /*
620 * Could not find the source address space area.
621 */
622 mutex_unlock(&src_as->lock);
623 interrupts_restore(ipl);
624 return ENOENT;
625 }
626
627 if (!src_area->backend || !src_area->backend->share) {
628 /*
629 * There is no backend or the backend does not
630 * know how to share the area.
631 */
632 mutex_unlock(&src_area->lock);
633 mutex_unlock(&src_as->lock);
634 interrupts_restore(ipl);
635 return ENOTSUP;
636 }
637
638 src_size = src_area->pages * PAGE_SIZE;
639 src_flags = src_area->flags;
640 src_backend = src_area->backend;
641 src_backend_data = src_area->backend_data;
642
643 /* Share the cacheable flag from the original mapping */
644 if (src_flags & AS_AREA_CACHEABLE)
645 dst_flags_mask |= AS_AREA_CACHEABLE;
646
647 if (src_size != acc_size ||
648 (src_flags & dst_flags_mask) != dst_flags_mask) {
649 mutex_unlock(&src_area->lock);
650 mutex_unlock(&src_as->lock);
651 interrupts_restore(ipl);
652 return EPERM;
653 }
654
655#ifdef CONFIG_VIRT_IDX_DCACHE
656 if (!(dst_flags_mask & AS_AREA_EXEC)) {
657 if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) {
658 /*
659 * Refuse to create an illegal address alias.
660 */
661 mutex_unlock(&src_area->lock);
662 mutex_unlock(&src_as->lock);
663 interrupts_restore(ipl);
664 return ENOTSUP;
665 }
666 }
667#endif /* CONFIG_VIRT_IDX_DCACHE */
668
669 /*
670 * Now we are committed to sharing the area.
671 * First, prepare the area for sharing.
672 * Then it will be safe to unlock it.
673 */
674 sh_info = src_area->sh_info;
675 if (!sh_info) {
676 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
677 mutex_initialize(&sh_info->lock);
678 sh_info->refcount = 2;
679 btree_create(&sh_info->pagemap);
680 src_area->sh_info = sh_info;
681 } else {
682 mutex_lock(&sh_info->lock);
683 sh_info->refcount++;
684 mutex_unlock(&sh_info->lock);
685 }
686
687 src_area->backend->share(src_area);
688
689 mutex_unlock(&src_area->lock);
690 mutex_unlock(&src_as->lock);
691
692 /*
693 * Create copy of the source address space area.
694 * The destination area is created with AS_AREA_ATTR_PARTIAL
695 * attribute set which prevents race condition with
696 * preliminary as_page_fault() calls.
697 * The flags of the source area are masked against dst_flags_mask
698 * to support sharing in less privileged mode.
699 */
700 dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
701 AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
702 if (!dst_area) {
703 /*
704 * Destination address space area could not be created.
705 */
706 sh_info_remove_reference(sh_info);
707
708 interrupts_restore(ipl);
709 return ENOMEM;
710 }
711
712 /*
713 * Now the destination address space area has been
714 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
715 * attribute and set the sh_info.
716 */
717 mutex_lock(&dst_as->lock);
718 mutex_lock(&dst_area->lock);
719 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
720 dst_area->sh_info = sh_info;
721 mutex_unlock(&dst_area->lock);
722 mutex_unlock(&dst_as->lock);
723
724 interrupts_restore(ipl);
725
726 return 0;
727}
728
729/** Check access mode for address space area.
730 *
731 * The address space area must be locked prior to this call.
732 *
733 * @param area Address space area.
734 * @param access Access mode.
735 *
736 * @return False if access violates area's permissions, true otherwise.
737 */
738bool as_area_check_access(as_area_t *area, pf_access_t access)
739{
740 int flagmap[] = {
741 [PF_ACCESS_READ] = AS_AREA_READ,
742 [PF_ACCESS_WRITE] = AS_AREA_WRITE,
743 [PF_ACCESS_EXEC] = AS_AREA_EXEC
744 };
745
746 if (!(area->flags & flagmap[access]))
747 return false;
748
749 return true;
750}
751
752/** Handle page fault within the current address space.
753 *
754 * This is the high-level page fault handler. It decides
755 * whether the page fault can be resolved by any backend
756 * and if so, it invokes the backend to resolve the page
757 * fault.
758 *
759 * Interrupts are assumed disabled.
760 *
761 * @param page Faulting page.
762 * @param access Access mode that caused the fault (i.e. read/write/exec).
763 * @param istate Pointer to interrupted state.
764 *
765 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
766 * fault was caused by copy_to_uspace() or copy_from_uspace().
767 */
768int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
769{
770 pte_t *pte;
771 as_area_t *area;
772
773 if (!THREAD)
774 return AS_PF_FAULT;
775
776 ASSERT(AS);
777
778 mutex_lock(&AS->lock);
779 area = find_area_and_lock(AS, page);
780 if (!area) {
781 /*
782 * No area contained mapping for 'page'.
783 * Signal page fault to low-level handler.
784 */
785 mutex_unlock(&AS->lock);
786 goto page_fault;
787 }
788
789 if (area->attributes & AS_AREA_ATTR_PARTIAL) {
790 /*
791 * The address space area is not fully initialized.
792 * Avoid possible race by returning error.
793 */
794 mutex_unlock(&area->lock);
795 mutex_unlock(&AS->lock);
796 goto page_fault;
797 }
798
799 if (!area->backend || !area->backend->page_fault) {
800 /*
801 * The address space area is not backed by any backend
802 * or the backend cannot handle page faults.
803 */
804 mutex_unlock(&area->lock);
805 mutex_unlock(&AS->lock);
806 goto page_fault;
807 }
808
809 page_table_lock(AS, false);
810
811 /*
812 * To avoid race condition between two page faults
813 * on the same address, we need to make sure
814 * the mapping has not been already inserted.
815 */
816 if ((pte = page_mapping_find(AS, page))) {
817 if (PTE_PRESENT(pte)) {
818 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
819 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
820 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
821 page_table_unlock(AS, false);
822 mutex_unlock(&area->lock);
823 mutex_unlock(&AS->lock);
824 return AS_PF_OK;
825 }
826 }
827 }
828
829 /*
830 * Resort to the backend page fault handler.
831 */
832 if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
833 page_table_unlock(AS, false);
834 mutex_unlock(&area->lock);
835 mutex_unlock(&AS->lock);
836 goto page_fault;
837 }
838
839 page_table_unlock(AS, false);
840 mutex_unlock(&area->lock);
841 mutex_unlock(&AS->lock);
842 return AS_PF_OK;
843
844page_fault:
845 if (THREAD->in_copy_from_uspace) {
846 THREAD->in_copy_from_uspace = false;
847 istate_set_retaddr(istate,
848 (uintptr_t) &memcpy_from_uspace_failover_address);
849 } else if (THREAD->in_copy_to_uspace) {
850 THREAD->in_copy_to_uspace = false;
851 istate_set_retaddr(istate,
852 (uintptr_t) &memcpy_to_uspace_failover_address);
853 } else {
854 return AS_PF_FAULT;
855 }
856
857 return AS_PF_DEFER;
858}
859
860/** Switch address spaces.
861 *
862 * Note that this function cannot sleep as it is essentially a part of
863 * scheduling. Sleeping here would lead to deadlock on wakeup.
864 *
865 * @param old Old address space or NULL.
866 * @param new New address space.
867 */
868void as_switch(as_t *old_as, as_t *new_as)
869{
870 ipl_t ipl;
871 bool needs_asid = false;
872
873 ipl = interrupts_disable();
874 spinlock_lock(&inactive_as_with_asid_lock);
875
876 /*
877 * First, take care of the old address space.
878 */
879 if (old_as) {
880 mutex_lock_active(&old_as->lock);
881 ASSERT(old_as->cpu_refcount);
882 if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
883 /*
884 * The old address space is no longer active on
885 * any processor. It can be appended to the
886 * list of inactive address spaces with assigned
887 * ASID.
888 */
889 ASSERT(old_as->asid != ASID_INVALID);
890 list_append(&old_as->inactive_as_with_asid_link,
891 &inactive_as_with_asid_head);
892 }
893 mutex_unlock(&old_as->lock);
894
895 /*
896 * Perform architecture-specific tasks when the address space
897 * is being removed from the CPU.
898 */
899 as_deinstall_arch(old_as);
900 }
901
902 /*
903 * Second, prepare the new address space.
904 */
905 mutex_lock_active(&new_as->lock);
906 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
907 if (new_as->asid != ASID_INVALID) {
908 list_remove(&new_as->inactive_as_with_asid_link);
909 } else {
910 /*
911 * Defer call to asid_get() until new_as->lock is released.
912 */
913 needs_asid = true;
914 }
915 }
916#ifdef AS_PAGE_TABLE
917 SET_PTL0_ADDRESS(new_as->genarch.page_table);
918#endif
919 mutex_unlock(&new_as->lock);
920
921 if (needs_asid) {
922 /*
923 * Allocation of new ASID was deferred
924 * until now in order to avoid deadlock.
925 */
926 asid_t asid;
927
928 asid = asid_get();
929 mutex_lock_active(&new_as->lock);
930 new_as->asid = asid;
931 mutex_unlock(&new_as->lock);
932 }
933 spinlock_unlock(&inactive_as_with_asid_lock);
934 interrupts_restore(ipl);
935
936 /*
937 * Perform architecture-specific steps.
938 * (e.g. write ASID to hardware register etc.)
939 */
940 as_install_arch(new_as);
941
942 AS = new_as;
943}
944
945/** Convert address space area flags to page flags.
946 *
947 * @param aflags Flags of some address space area.
948 *
949 * @return Flags to be passed to page_mapping_insert().
950 */
951int area_flags_to_page_flags(int aflags)
952{
953 int flags;
954
955 flags = PAGE_USER | PAGE_PRESENT;
956
957 if (aflags & AS_AREA_READ)
958 flags |= PAGE_READ;
959
960 if (aflags & AS_AREA_WRITE)
961 flags |= PAGE_WRITE;
962
963 if (aflags & AS_AREA_EXEC)
964 flags |= PAGE_EXEC;
965
966 if (aflags & AS_AREA_CACHEABLE)
967 flags |= PAGE_CACHEABLE;
968
969 return flags;
970}
971
972/** Compute flags for virtual address translation subsytem.
973 *
974 * The address space area must be locked.
975 * Interrupts must be disabled.
976 *
977 * @param a Address space area.
978 *
979 * @return Flags to be used in page_mapping_insert().
980 */
981int as_area_get_flags(as_area_t *a)
982{
983 return area_flags_to_page_flags(a->flags);
984}
985
986/** Create page table.
987 *
988 * Depending on architecture, create either address space
989 * private or global page table.
990 *
991 * @param flags Flags saying whether the page table is for kernel address space.
992 *
993 * @return First entry of the page table.
994 */
995pte_t *page_table_create(int flags)
996{
997#ifdef __OBJC__
998 return [as_t page_table_create: flags];
999#else
1000 ASSERT(as_operations);
1001 ASSERT(as_operations->page_table_create);
1002
1003 return as_operations->page_table_create(flags);
1004#endif
1005}
1006
1007/** Destroy page table.
1008 *
1009 * Destroy page table in architecture specific way.
1010 *
1011 * @param page_table Physical address of PTL0.
1012 */
1013void page_table_destroy(pte_t *page_table)
1014{
1015#ifdef __OBJC__
1016 return [as_t page_table_destroy: page_table];
1017#else
1018 ASSERT(as_operations);
1019 ASSERT(as_operations->page_table_destroy);
1020
1021 as_operations->page_table_destroy(page_table);
1022#endif
1023}
1024
1025/** Lock page table.
1026 *
1027 * This function should be called before any page_mapping_insert(),
1028 * page_mapping_remove() and page_mapping_find().
1029 *
1030 * Locking order is such that address space areas must be locked
1031 * prior to this call. Address space can be locked prior to this
1032 * call in which case the lock argument is false.
1033 *
1034 * @param as Address space.
1035 * @param lock If false, do not attempt to lock as->lock.
1036 */
1037void page_table_lock(as_t *as, bool lock)
1038{
1039#ifdef __OBJC__
1040 [as page_table_lock: lock];
1041#else
1042 ASSERT(as_operations);
1043 ASSERT(as_operations->page_table_lock);
1044
1045 as_operations->page_table_lock(as, lock);
1046#endif
1047}
1048
1049/** Unlock page table.
1050 *
1051 * @param as Address space.
1052 * @param unlock If false, do not attempt to unlock as->lock.
1053 */
1054void page_table_unlock(as_t *as, bool unlock)
1055{
1056#ifdef __OBJC__
1057 [as page_table_unlock: unlock];
1058#else
1059 ASSERT(as_operations);
1060 ASSERT(as_operations->page_table_unlock);
1061
1062 as_operations->page_table_unlock(as, unlock);
1063#endif
1064}
1065
1066
1067/** Find address space area and lock it.
1068 *
1069 * The address space must be locked and interrupts must be disabled.
1070 *
1071 * @param as Address space.
1072 * @param va Virtual address.
1073 *
1074 * @return Locked address space area containing va on success or NULL on
1075 * failure.
1076 */
1077as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
1078{
1079 as_area_t *a;
1080 btree_node_t *leaf, *lnode;
1081 int i;
1082
1083 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
1084 if (a) {
1085 /* va is the base address of an address space area */
1086 mutex_lock(&a->lock);
1087 return a;
1088 }
1089
1090 /*
1091 * Search the leaf node and the righmost record of its left neighbour
1092 * to find out whether this is a miss or va belongs to an address
1093 * space area found there.
1094 */
1095
1096 /* First, search the leaf node itself. */
1097 for (i = 0; i < leaf->keys; i++) {
1098 a = (as_area_t *) leaf->value[i];
1099 mutex_lock(&a->lock);
1100 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
1101 return a;
1102 }
1103 mutex_unlock(&a->lock);
1104 }
1105
1106 /*
1107 * Second, locate the left neighbour and test its last record.
1108 * Because of its position in the B+tree, it must have base < va.
1109 */
1110 lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
1111 if (lnode) {
1112 a = (as_area_t *) lnode->value[lnode->keys - 1];
1113 mutex_lock(&a->lock);
1114 if (va < a->base + a->pages * PAGE_SIZE) {
1115 return a;
1116 }
1117 mutex_unlock(&a->lock);
1118 }
1119
1120 return NULL;
1121}
1122
1123/** Check area conflicts with other areas.
1124 *
1125 * The address space must be locked and interrupts must be disabled.
1126 *
1127 * @param as Address space.
1128 * @param va Starting virtual address of the area being tested.
1129 * @param size Size of the area being tested.
1130 * @param avoid_area Do not touch this area.
1131 *
1132 * @return True if there is no conflict, false otherwise.
1133 */
1134bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
1135 as_area_t *avoid_area)
1136{
1137 as_area_t *a;
1138 btree_node_t *leaf, *node;
1139 int i;
1140
1141 /*
1142 * We don't want any area to have conflicts with NULL page.
1143 */
1144 if (overlaps(va, size, NULL, PAGE_SIZE))
1145 return false;
1146
1147 /*
1148 * The leaf node is found in O(log n), where n is proportional to
1149 * the number of address space areas belonging to as.
1150 * The check for conflicts is then attempted on the rightmost
1151 * record in the left neighbour, the leftmost record in the right
1152 * neighbour and all records in the leaf node itself.
1153 */
1154
1155 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1156 if (a != avoid_area)
1157 return false;
1158 }
1159
1160 /* First, check the two border cases. */
1161 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1162 a = (as_area_t *) node->value[node->keys - 1];
1163 mutex_lock(&a->lock);
1164 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1165 mutex_unlock(&a->lock);
1166 return false;
1167 }
1168 mutex_unlock(&a->lock);
1169 }
1170 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
1171 if (node) {
1172 a = (as_area_t *) node->value[0];
1173 mutex_lock(&a->lock);
1174 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1175 mutex_unlock(&a->lock);
1176 return false;
1177 }
1178 mutex_unlock(&a->lock);
1179 }
1180
1181 /* Second, check the leaf node. */
1182 for (i = 0; i < leaf->keys; i++) {
1183 a = (as_area_t *) leaf->value[i];
1184
1185 if (a == avoid_area)
1186 continue;
1187
1188 mutex_lock(&a->lock);
1189 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1190 mutex_unlock(&a->lock);
1191 return false;
1192 }
1193 mutex_unlock(&a->lock);
1194 }
1195
1196 /*
1197 * So far, the area does not conflict with other areas.
1198 * Check if it doesn't conflict with kernel address space.
1199 */
1200 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1201 return !overlaps(va, size,
1202 KERNEL_ADDRESS_SPACE_START,
1203 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);
1204 }
1205
1206 return true;
1207}
1208
1209/** Return size of the address space area with given base. */
1210size_t as_get_size(uintptr_t base)
1211{
1212 ipl_t ipl;
1213 as_area_t *src_area;
1214 size_t size;
1215
1216 ipl = interrupts_disable();
1217 src_area = find_area_and_lock(AS, base);
1218 if (src_area){
1219 size = src_area->pages * PAGE_SIZE;
1220 mutex_unlock(&src_area->lock);
1221 } else {
1222 size = 0;
1223 }
1224 interrupts_restore(ipl);
1225 return size;
1226}
1227
1228/** Mark portion of address space area as used.
1229 *
1230 * The address space area must be already locked.
1231 *
1232 * @param a Address space area.
1233 * @param page First page to be marked.
1234 * @param count Number of page to be marked.
1235 *
1236 * @return 0 on failure and 1 on success.
1237 */
1238int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
1239{
1240 btree_node_t *leaf, *node;
1241 count_t pages;
1242 int i;
1243
1244 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1245 ASSERT(count);
1246
1247 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1248 if (pages) {
1249 /*
1250 * We hit the beginning of some used space.
1251 */
1252 return 0;
1253 }
1254
1255 if (!leaf->keys) {
1256 btree_insert(&a->used_space, page, (void *) count, leaf);
1257 return 1;
1258 }
1259
1260 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1261 if (node) {
1262 uintptr_t left_pg = node->key[node->keys - 1];
1263 uintptr_t right_pg = leaf->key[0];
1264 count_t left_cnt = (count_t) node->value[node->keys - 1];
1265 count_t right_cnt = (count_t) leaf->value[0];
1266
1267 /*
1268 * Examine the possibility that the interval fits
1269 * somewhere between the rightmost interval of
1270 * the left neigbour and the first interval of the leaf.
1271 */
1272
1273 if (page >= right_pg) {
1274 /* Do nothing. */
1275 } else if (overlaps(page, count * PAGE_SIZE, left_pg,
1276 left_cnt * PAGE_SIZE)) {
1277 /* The interval intersects with the left interval. */
1278 return 0;
1279 } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1280 right_cnt * PAGE_SIZE)) {
1281 /* The interval intersects with the right interval. */
1282 return 0;
1283 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1284 (page + count * PAGE_SIZE == right_pg)) {
1285 /*
1286 * The interval can be added by merging the two already
1287 * present intervals.
1288 */
1289 node->value[node->keys - 1] += count + right_cnt;
1290 btree_remove(&a->used_space, right_pg, leaf);
1291 return 1;
1292 } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1293 /*
1294 * The interval can be added by simply growing the left
1295 * interval.
1296 */
1297 node->value[node->keys - 1] += count;
1298 return 1;
1299 } else if (page + count * PAGE_SIZE == right_pg) {
1300 /*
1301 * The interval can be addded by simply moving base of
1302 * the right interval down and increasing its size
1303 * accordingly.
1304 */
1305 leaf->value[0] += count;
1306 leaf->key[0] = page;
1307 return 1;
1308 } else {
1309 /*
1310 * The interval is between both neigbouring intervals,
1311 * but cannot be merged with any of them.
1312 */
1313 btree_insert(&a->used_space, page, (void *) count,
1314 leaf);
1315 return 1;
1316 }
1317 } else if (page < leaf->key[0]) {
1318 uintptr_t right_pg = leaf->key[0];
1319 count_t right_cnt = (count_t) leaf->value[0];
1320
1321 /*
1322 * Investigate the border case in which the left neighbour does
1323 * not exist but the interval fits from the left.
1324 */
1325
1326 if (overlaps(page, count * PAGE_SIZE, right_pg,
1327 right_cnt * PAGE_SIZE)) {
1328 /* The interval intersects with the right interval. */
1329 return 0;
1330 } else if (page + count * PAGE_SIZE == right_pg) {
1331 /*
1332 * The interval can be added by moving the base of the
1333 * right interval down and increasing its size
1334 * accordingly.
1335 */
1336 leaf->key[0] = page;
1337 leaf->value[0] += count;
1338 return 1;
1339 } else {
1340 /*
1341 * The interval doesn't adjoin with the right interval.
1342 * It must be added individually.
1343 */
1344 btree_insert(&a->used_space, page, (void *) count,
1345 leaf);
1346 return 1;
1347 }
1348 }
1349
1350 node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1351 if (node) {
1352 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1353 uintptr_t right_pg = node->key[0];
1354 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1355 count_t right_cnt = (count_t) node->value[0];
1356
1357 /*
1358 * Examine the possibility that the interval fits
1359 * somewhere between the leftmost interval of
1360 * the right neigbour and the last interval of the leaf.
1361 */
1362
1363 if (page < left_pg) {
1364 /* Do nothing. */
1365 } else if (overlaps(page, count * PAGE_SIZE, left_pg,
1366 left_cnt * PAGE_SIZE)) {
1367 /* The interval intersects with the left interval. */
1368 return 0;
1369 } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1370 right_cnt * PAGE_SIZE)) {
1371 /* The interval intersects with the right interval. */
1372 return 0;
1373 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1374 (page + count * PAGE_SIZE == right_pg)) {
1375 /*
1376 * The interval can be added by merging the two already
1377 * present intervals.
1378 * */
1379 leaf->value[leaf->keys - 1] += count + right_cnt;
1380 btree_remove(&a->used_space, right_pg, node);
1381 return 1;
1382 } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1383 /*
1384 * The interval can be added by simply growing the left
1385 * interval.
1386 * */
1387 leaf->value[leaf->keys - 1] += count;
1388 return 1;
1389 } else if (page + count * PAGE_SIZE == right_pg) {
1390 /*
1391 * The interval can be addded by simply moving base of
1392 * the right interval down and increasing its size
1393 * accordingly.
1394 */
1395 node->value[0] += count;
1396 node->key[0] = page;
1397 return 1;
1398 } else {
1399 /*
1400 * The interval is between both neigbouring intervals,
1401 * but cannot be merged with any of them.
1402 */
1403 btree_insert(&a->used_space, page, (void *) count,
1404 leaf);
1405 return 1;
1406 }
1407 } else if (page >= leaf->key[leaf->keys - 1]) {
1408 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1409 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1410
1411 /*
1412 * Investigate the border case in which the right neighbour
1413 * does not exist but the interval fits from the right.
1414 */
1415
1416 if (overlaps(page, count * PAGE_SIZE, left_pg,
1417 left_cnt * PAGE_SIZE)) {
1418 /* The interval intersects with the left interval. */
1419 return 0;
1420 } else if (left_pg + left_cnt * PAGE_SIZE == page) {
1421 /*
1422 * The interval can be added by growing the left
1423 * interval.
1424 */
1425 leaf->value[leaf->keys - 1] += count;
1426 return 1;
1427 } else {
1428 /*
1429 * The interval doesn't adjoin with the left interval.
1430 * It must be added individually.
1431 */
1432 btree_insert(&a->used_space, page, (void *) count,
1433 leaf);
1434 return 1;
1435 }
1436 }
1437
1438 /*
1439 * Note that if the algorithm made it thus far, the interval can fit
1440 * only between two other intervals of the leaf. The two border cases
1441 * were already resolved.
1442 */
1443 for (i = 1; i < leaf->keys; i++) {
1444 if (page < leaf->key[i]) {
1445 uintptr_t left_pg = leaf->key[i - 1];
1446 uintptr_t right_pg = leaf->key[i];
1447 count_t left_cnt = (count_t) leaf->value[i - 1];
1448 count_t right_cnt = (count_t) leaf->value[i];
1449
1450 /*
1451 * The interval fits between left_pg and right_pg.
1452 */
1453
1454 if (overlaps(page, count * PAGE_SIZE, left_pg,
1455 left_cnt * PAGE_SIZE)) {
1456 /*
1457 * The interval intersects with the left
1458 * interval.
1459 */
1460 return 0;
1461 } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1462 right_cnt * PAGE_SIZE)) {
1463 /*
1464 * The interval intersects with the right
1465 * interval.
1466 */
1467 return 0;
1468 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1469 (page + count * PAGE_SIZE == right_pg)) {
1470 /*
1471 * The interval can be added by merging the two
1472 * already present intervals.
1473 */
1474 leaf->value[i - 1] += count + right_cnt;
1475 btree_remove(&a->used_space, right_pg, leaf);
1476 return 1;
1477 } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1478 /*
1479 * The interval can be added by simply growing
1480 * the left interval.
1481 */
1482 leaf->value[i - 1] += count;
1483 return 1;
1484 } else if (page + count * PAGE_SIZE == right_pg) {
1485 /*
1486 * The interval can be addded by simply moving
1487 * base of the right interval down and
1488 * increasing its size accordingly.
1489 */
1490 leaf->value[i] += count;
1491 leaf->key[i] = page;
1492 return 1;
1493 } else {
1494 /*
1495 * The interval is between both neigbouring
1496 * intervals, but cannot be merged with any of
1497 * them.
1498 */
1499 btree_insert(&a->used_space, page,
1500 (void *) count, leaf);
1501 return 1;
1502 }
1503 }
1504 }
1505
1506 panic("Inconsistency detected while adding %d pages of used space at "
1507 "%p.\n", count, page);
1508}
1509
1510/** Mark portion of address space area as unused.
1511 *
1512 * The address space area must be already locked.
1513 *
1514 * @param a Address space area.
1515 * @param page First page to be marked.
1516 * @param count Number of page to be marked.
1517 *
1518 * @return 0 on failure and 1 on success.
1519 */
1520int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
1521{
1522 btree_node_t *leaf, *node;
1523 count_t pages;
1524 int i;
1525
1526 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1527 ASSERT(count);
1528
1529 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1530 if (pages) {
1531 /*
1532 * We are lucky, page is the beginning of some interval.
1533 */
1534 if (count > pages) {
1535 return 0;
1536 } else if (count == pages) {
1537 btree_remove(&a->used_space, page, leaf);
1538 return 1;
1539 } else {
1540 /*
1541 * Find the respective interval.
1542 * Decrease its size and relocate its start address.
1543 */
1544 for (i = 0; i < leaf->keys; i++) {
1545 if (leaf->key[i] == page) {
1546 leaf->key[i] += count * PAGE_SIZE;
1547 leaf->value[i] -= count;
1548 return 1;
1549 }
1550 }
1551 goto error;
1552 }
1553 }
1554
1555 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1556 if (node && page < leaf->key[0]) {
1557 uintptr_t left_pg = node->key[node->keys - 1];
1558 count_t left_cnt = (count_t) node->value[node->keys - 1];
1559
1560 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1561 count * PAGE_SIZE)) {
1562 if (page + count * PAGE_SIZE ==
1563 left_pg + left_cnt * PAGE_SIZE) {
1564 /*
1565 * The interval is contained in the rightmost
1566 * interval of the left neighbour and can be
1567 * removed by updating the size of the bigger
1568 * interval.
1569 */
1570 node->value[node->keys - 1] -= count;
1571 return 1;
1572 } else if (page + count * PAGE_SIZE <
1573 left_pg + left_cnt*PAGE_SIZE) {
1574 count_t new_cnt;
1575
1576 /*
1577 * The interval is contained in the rightmost
1578 * interval of the left neighbour but its
1579 * removal requires both updating the size of
1580 * the original interval and also inserting a
1581 * new interval.
1582 */
1583 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1584 (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1585 node->value[node->keys - 1] -= count + new_cnt;
1586 btree_insert(&a->used_space, page +
1587 count * PAGE_SIZE, (void *) new_cnt, leaf);
1588 return 1;
1589 }
1590 }
1591 return 0;
1592 } else if (page < leaf->key[0]) {
1593 return 0;
1594 }
1595
1596 if (page > leaf->key[leaf->keys - 1]) {
1597 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1598 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1599
1600 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1601 count * PAGE_SIZE)) {
1602 if (page + count * PAGE_SIZE ==
1603 left_pg + left_cnt * PAGE_SIZE) {
1604 /*
1605 * The interval is contained in the rightmost
1606 * interval of the leaf and can be removed by
1607 * updating the size of the bigger interval.
1608 */
1609 leaf->value[leaf->keys - 1] -= count;
1610 return 1;
1611 } else if (page + count * PAGE_SIZE < left_pg +
1612 left_cnt * PAGE_SIZE) {
1613 count_t new_cnt;
1614
1615 /*
1616 * The interval is contained in the rightmost
1617 * interval of the leaf but its removal
1618 * requires both updating the size of the
1619 * original interval and also inserting a new
1620 * interval.
1621 */
1622 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1623 (page + count * PAGE_SIZE)) >> PAGE_WIDTH;
1624 leaf->value[leaf->keys - 1] -= count + new_cnt;
1625 btree_insert(&a->used_space, page +
1626 count * PAGE_SIZE, (void *) new_cnt, leaf);
1627 return 1;
1628 }
1629 }
1630 return 0;
1631 }
1632
1633 /*
1634 * The border cases have been already resolved.
1635 * Now the interval can be only between intervals of the leaf.
1636 */
1637 for (i = 1; i < leaf->keys - 1; i++) {
1638 if (page < leaf->key[i]) {
1639 uintptr_t left_pg = leaf->key[i - 1];
1640 count_t left_cnt = (count_t) leaf->value[i - 1];
1641
1642 /*
1643 * Now the interval is between intervals corresponding
1644 * to (i - 1) and i.
1645 */
1646 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1647 count * PAGE_SIZE)) {
1648 if (page + count * PAGE_SIZE ==
1649 left_pg + left_cnt*PAGE_SIZE) {
1650 /*
1651 * The interval is contained in the
1652 * interval (i - 1) of the leaf and can
1653 * be removed by updating the size of
1654 * the bigger interval.
1655 */
1656 leaf->value[i - 1] -= count;
1657 return 1;
1658 } else if (page + count * PAGE_SIZE <
1659 left_pg + left_cnt * PAGE_SIZE) {
1660 count_t new_cnt;
1661
1662 /*
1663 * The interval is contained in the
1664 * interval (i - 1) of the leaf but its
1665 * removal requires both updating the
1666 * size of the original interval and
1667 * also inserting a new interval.
1668 */
1669 new_cnt = ((left_pg +
1670 left_cnt * PAGE_SIZE) -
1671 (page + count * PAGE_SIZE)) >>
1672 PAGE_WIDTH;
1673 leaf->value[i - 1] -= count + new_cnt;
1674 btree_insert(&a->used_space, page +
1675 count * PAGE_SIZE, (void *) new_cnt,
1676 leaf);
1677 return 1;
1678 }
1679 }
1680 return 0;
1681 }
1682 }
1683
1684error:
1685 panic("Inconsistency detected while removing %d pages of used space "
1686 "from %p.\n", count, page);
1687}
1688
1689/** Remove reference to address space area share info.
1690 *
1691 * If the reference count drops to 0, the sh_info is deallocated.
1692 *
1693 * @param sh_info Pointer to address space area share info.
1694 */
1695void sh_info_remove_reference(share_info_t *sh_info)
1696{
1697 bool dealloc = false;
1698
1699 mutex_lock(&sh_info->lock);
1700 ASSERT(sh_info->refcount);
1701 if (--sh_info->refcount == 0) {
1702 dealloc = true;
1703 link_t *cur;
1704
1705 /*
1706 * Now walk carefully the pagemap B+tree and free/remove
1707 * reference from all frames found there.
1708 */
1709 for (cur = sh_info->pagemap.leaf_head.next;
1710 cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
1711 btree_node_t *node;
1712 int i;
1713
1714 node = list_get_instance(cur, btree_node_t, leaf_link);
1715 for (i = 0; i < node->keys; i++)
1716 frame_free((uintptr_t) node->value[i]);
1717 }
1718
1719 }
1720 mutex_unlock(&sh_info->lock);
1721
1722 if (dealloc) {
1723 btree_destroy(&sh_info->pagemap);
1724 free(sh_info);
1725 }
1726}
1727
1728/*
1729 * Address space related syscalls.
1730 */
1731
1732/** Wrapper for as_area_create(). */
1733unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
1734{
1735 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,
1736 AS_AREA_ATTR_NONE, &anon_backend, NULL))
1737 return (unative_t) address;
1738 else
1739 return (unative_t) -1;
1740}
1741
1742/** Wrapper for as_area_resize(). */
1743unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
1744{
1745 return (unative_t) as_area_resize(AS, address, size, 0);
1746}
1747
1748/** Wrapper for as_area_destroy(). */
1749unative_t sys_as_area_destroy(uintptr_t address)
1750{
1751 return (unative_t) as_area_destroy(AS, address);
1752}
1753
1754/** Print out information about address space.
1755 *
1756 * @param as Address space.
1757 */
1758void as_print(as_t *as)
1759{
1760 ipl_t ipl;
1761
1762 ipl = interrupts_disable();
1763 mutex_lock(&as->lock);
1764
1765 /* print out info about address space areas */
1766 link_t *cur;
1767 for (cur = as->as_area_btree.leaf_head.next;
1768 cur != &as->as_area_btree.leaf_head; cur = cur->next) {
1769 btree_node_t *node;
1770
1771 node = list_get_instance(cur, btree_node_t, leaf_link);
1772
1773 int i;
1774 for (i = 0; i < node->keys; i++) {
1775 as_area_t *area = node->value[i];
1776
1777 mutex_lock(&area->lock);
1778 printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
1779 area, area->base, area->pages, area->base,
1780 area->base + area->pages*PAGE_SIZE);
1781 mutex_unlock(&area->lock);
1782 }
1783 }
1784
1785 mutex_unlock(&as->lock);
1786 interrupts_restore(ipl);
1787}
1788
1789/** @}
1790 */
Note: See TracBrowser for help on using the repository browser.