source: mainline/kernel/generic/src/mm/as.c@ d5bd8d7

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since d5bd8d7 was c993e45, checked in by Martin Decky <martin@…>, 19 years ago

small cleanup, more work coming

  • Property mode set to 100644
File size: 45.1 KB
Line 
1/*
2 * Copyright (c) 2001-2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Address space related functions.
36 *
37 * This file contains address space manipulation functions.
38 * Roughly speaking, this is a higher-level client of
39 * Virtual Address Translation (VAT) subsystem.
40 *
41 * Functionality provided by this file allows one to
42 * create address spaces and create, resize and share
43 * address space areas.
44 *
45 * @see page.c
46 *
47 */
48
49#include <mm/as.h>
50#include <arch/mm/as.h>
51#include <mm/page.h>
52#include <mm/frame.h>
53#include <mm/slab.h>
54#include <mm/tlb.h>
55#include <arch/mm/page.h>
56#include <genarch/mm/page_pt.h>
57#include <genarch/mm/page_ht.h>
58#include <mm/asid.h>
59#include <arch/mm/asid.h>
60#include <synch/spinlock.h>
61#include <synch/mutex.h>
62#include <adt/list.h>
63#include <adt/btree.h>
64#include <proc/task.h>
65#include <proc/thread.h>
66#include <arch/asm.h>
67#include <panic.h>
68#include <debug.h>
69#include <print.h>
70#include <memstr.h>
71#include <macros.h>
72#include <arch.h>
73#include <errno.h>
74#include <config.h>
75#include <align.h>
76#include <arch/types.h>
77#include <syscall/copy.h>
78#include <arch/interrupt.h>
79
80#ifdef CONFIG_VIRT_IDX_DCACHE
81#include <arch/mm/cache.h>
82#endif /* CONFIG_VIRT_IDX_DCACHE */
83
84#ifndef __OBJC__
85/**
86 * Each architecture decides what functions will be used to carry out
87 * address space operations such as creating or locking page tables.
88 */
89as_operations_t *as_operations = NULL;
90
91/**
92 * Slab for as_t objects.
93 */
94static slab_cache_t *as_slab;
95#endif
96
97/**
98 * This lock protects inactive_as_with_asid_head list. It must be acquired
99 * before as_t mutex.
100 */
101SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
102
103/**
104 * This list contains address spaces that are not active on any
105 * processor and that have valid ASID.
106 */
107LIST_INITIALIZE(inactive_as_with_asid_head);
108
109/** Kernel address space. */
110as_t *AS_KERNEL = NULL;
111
112static int area_flags_to_page_flags(int aflags);
113static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
114static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
115 as_area_t *avoid_area);
116static void sh_info_remove_reference(share_info_t *sh_info);
117
118#ifndef __OBJC__
119static int as_constructor(void *obj, int flags)
120{
121 as_t *as = (as_t *) obj;
122 int rc;
123
124 link_initialize(&as->inactive_as_with_asid_link);
125 mutex_initialize(&as->lock);
126
127 rc = as_constructor_arch(as, flags);
128
129 return rc;
130}
131
132static int as_destructor(void *obj)
133{
134 as_t *as = (as_t *) obj;
135
136 return as_destructor_arch(as);
137}
138#endif
139
140/** Initialize address space subsystem. */
141void as_init(void)
142{
143 as_arch_init();
144
145#ifndef __OBJC__
146 as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
147 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
148#endif
149
150 AS_KERNEL = as_create(FLAG_AS_KERNEL);
151 if (!AS_KERNEL)
152 panic("can't create kernel address space\n");
153
154}
155
156/** Create address space.
157 *
158 * @param flags Flags that influence way in wich the address space is created.
159 */
160as_t *as_create(int flags)
161{
162 as_t *as;
163
164#ifdef __OBJC__
165 as = [as_t new];
166 link_initialize(&as->inactive_as_with_asid_link);
167 mutex_initialize(&as->lock);
168 (void) as_constructor_arch(as, flags);
169#else
170 as = (as_t *) slab_alloc(as_slab, 0);
171#endif
172 (void) as_create_arch(as, 0);
173
174 btree_create(&as->as_area_btree);
175
176 if (flags & FLAG_AS_KERNEL)
177 as->asid = ASID_KERNEL;
178 else
179 as->asid = ASID_INVALID;
180
181 as->refcount = 0;
182 as->cpu_refcount = 0;
183#ifdef AS_PAGE_TABLE
184 as->genarch.page_table = page_table_create(flags);
185#else
186 page_table_create(flags);
187#endif
188
189 return as;
190}
191
192/** Destroy adress space.
193 *
194 * When there are no tasks referencing this address space (i.e. its refcount is
195 * zero), the address space can be destroyed.
196 */
197void as_destroy(as_t *as)
198{
199 ipl_t ipl;
200 bool cond;
201
202 ASSERT(as->refcount == 0);
203
204 /*
205 * Since there is no reference to this area,
206 * it is safe not to lock its mutex.
207 */
208 ipl = interrupts_disable();
209 spinlock_lock(&inactive_as_with_asid_lock);
210 if (as->asid != ASID_INVALID && as != AS_KERNEL) {
211 if (as != AS && as->cpu_refcount == 0)
212 list_remove(&as->inactive_as_with_asid_link);
213 asid_put(as->asid);
214 }
215 spinlock_unlock(&inactive_as_with_asid_lock);
216
217 /*
218 * Destroy address space areas of the address space.
219 * The B+tree must be walked carefully because it is
220 * also being destroyed.
221 */
222 for (cond = true; cond; ) {
223 btree_node_t *node;
224
225 ASSERT(!list_empty(&as->as_area_btree.leaf_head));
226 node = list_get_instance(as->as_area_btree.leaf_head.next,
227 btree_node_t, leaf_link);
228
229 if ((cond = node->keys)) {
230 as_area_destroy(as, node->key[0]);
231 }
232 }
233
234 btree_destroy(&as->as_area_btree);
235#ifdef AS_PAGE_TABLE
236 page_table_destroy(as->genarch.page_table);
237#else
238 page_table_destroy(NULL);
239#endif
240
241 interrupts_restore(ipl);
242
243#ifdef __OBJC__
244 [as free];
245#else
246 slab_free(as_slab, as);
247#endif
248}
249
250/** Create address space area of common attributes.
251 *
252 * The created address space area is added to the target address space.
253 *
254 * @param as Target address space.
255 * @param flags Flags of the area memory.
256 * @param size Size of area.
257 * @param base Base address of area.
258 * @param attrs Attributes of the area.
259 * @param backend Address space area backend. NULL if no backend is used.
260 * @param backend_data NULL or a pointer to an array holding two void *.
261 *
262 * @return Address space area on success or NULL on failure.
263 */
264as_area_t *
265as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
266 mem_backend_t *backend, mem_backend_data_t *backend_data)
267{
268 ipl_t ipl;
269 as_area_t *a;
270
271 if (base % PAGE_SIZE)
272 return NULL;
273
274 if (!size)
275 return NULL;
276
277 /* Writeable executable areas are not supported. */
278 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
279 return NULL;
280
281 ipl = interrupts_disable();
282 mutex_lock(&as->lock);
283
284 if (!check_area_conflicts(as, base, size, NULL)) {
285 mutex_unlock(&as->lock);
286 interrupts_restore(ipl);
287 return NULL;
288 }
289
290 a = (as_area_t *) malloc(sizeof(as_area_t), 0);
291
292 mutex_initialize(&a->lock);
293
294 a->as = as;
295 a->flags = flags;
296 a->attributes = attrs;
297 a->pages = SIZE2FRAMES(size);
298 a->base = base;
299 a->sh_info = NULL;
300 a->backend = backend;
301 if (backend_data)
302 a->backend_data = *backend_data;
303 else
304 memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data),
305 0);
306
307 btree_create(&a->used_space);
308
309 btree_insert(&as->as_area_btree, base, (void *) a, NULL);
310
311 mutex_unlock(&as->lock);
312 interrupts_restore(ipl);
313
314 return a;
315}
316
317/** Find address space area and change it.
318 *
319 * @param as Address space.
320 * @param address Virtual address belonging to the area to be changed. Must be
321 * page-aligned.
322 * @param size New size of the virtual memory block starting at address.
323 * @param flags Flags influencing the remap operation. Currently unused.
324 *
325 * @return Zero on success or a value from @ref errno.h otherwise.
326 */
327int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
328{
329 as_area_t *area;
330 ipl_t ipl;
331 size_t pages;
332
333 ipl = interrupts_disable();
334 mutex_lock(&as->lock);
335
336 /*
337 * Locate the area.
338 */
339 area = find_area_and_lock(as, address);
340 if (!area) {
341 mutex_unlock(&as->lock);
342 interrupts_restore(ipl);
343 return ENOENT;
344 }
345
346 if (area->backend == &phys_backend) {
347 /*
348 * Remapping of address space areas associated
349 * with memory mapped devices is not supported.
350 */
351 mutex_unlock(&area->lock);
352 mutex_unlock(&as->lock);
353 interrupts_restore(ipl);
354 return ENOTSUP;
355 }
356 if (area->sh_info) {
357 /*
358 * Remapping of shared address space areas
359 * is not supported.
360 */
361 mutex_unlock(&area->lock);
362 mutex_unlock(&as->lock);
363 interrupts_restore(ipl);
364 return ENOTSUP;
365 }
366
367 pages = SIZE2FRAMES((address - area->base) + size);
368 if (!pages) {
369 /*
370 * Zero size address space areas are not allowed.
371 */
372 mutex_unlock(&area->lock);
373 mutex_unlock(&as->lock);
374 interrupts_restore(ipl);
375 return EPERM;
376 }
377
378 if (pages < area->pages) {
379 bool cond;
380 uintptr_t start_free = area->base + pages*PAGE_SIZE;
381
382 /*
383 * Shrinking the area.
384 * No need to check for overlaps.
385 */
386
387 /*
388 * Start TLB shootdown sequence.
389 */
390 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base +
391 pages * PAGE_SIZE, area->pages - pages);
392
393 /*
394 * Remove frames belonging to used space starting from
395 * the highest addresses downwards until an overlap with
396 * the resized address space area is found. Note that this
397 * is also the right way to remove part of the used_space
398 * B+tree leaf list.
399 */
400 for (cond = true; cond;) {
401 btree_node_t *node;
402
403 ASSERT(!list_empty(&area->used_space.leaf_head));
404 node =
405 list_get_instance(area->used_space.leaf_head.prev,
406 btree_node_t, leaf_link);
407 if ((cond = (bool) node->keys)) {
408 uintptr_t b = node->key[node->keys - 1];
409 count_t c =
410 (count_t) node->value[node->keys - 1];
411 int i = 0;
412
413 if (overlaps(b, c * PAGE_SIZE, area->base,
414 pages*PAGE_SIZE)) {
415
416 if (b + c * PAGE_SIZE <= start_free) {
417 /*
418 * The whole interval fits
419 * completely in the resized
420 * address space area.
421 */
422 break;
423 }
424
425 /*
426 * Part of the interval corresponding
427 * to b and c overlaps with the resized
428 * address space area.
429 */
430
431 cond = false; /* we are almost done */
432 i = (start_free - b) >> PAGE_WIDTH;
433 if (!used_space_remove(area, start_free,
434 c - i))
435 panic("Could not remove used "
436 "space.\n");
437 } else {
438 /*
439 * The interval of used space can be
440 * completely removed.
441 */
442 if (!used_space_remove(area, b, c))
443 panic("Could not remove used "
444 "space.\n");
445 }
446
447 for (; i < c; i++) {
448 pte_t *pte;
449
450 page_table_lock(as, false);
451 pte = page_mapping_find(as, b +
452 i * PAGE_SIZE);
453 ASSERT(pte && PTE_VALID(pte) &&
454 PTE_PRESENT(pte));
455 if (area->backend &&
456 area->backend->frame_free) {
457 area->backend->frame_free(area,
458 b + i * PAGE_SIZE,
459 PTE_GET_FRAME(pte));
460 }
461 page_mapping_remove(as, b +
462 i * PAGE_SIZE);
463 page_table_unlock(as, false);
464 }
465 }
466 }
467
468 /*
469 * Finish TLB shootdown sequence.
470 */
471 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
472 area->pages - pages);
473 tlb_shootdown_finalize();
474
475 /*
476 * Invalidate software translation caches (e.g. TSB on sparc64).
477 */
478 as_invalidate_translation_cache(as, area->base +
479 pages * PAGE_SIZE, area->pages - pages);
480 } else {
481 /*
482 * Growing the area.
483 * Check for overlaps with other address space areas.
484 */
485 if (!check_area_conflicts(as, address, pages * PAGE_SIZE,
486 area)) {
487 mutex_unlock(&area->lock);
488 mutex_unlock(&as->lock);
489 interrupts_restore(ipl);
490 return EADDRNOTAVAIL;
491 }
492 }
493
494 area->pages = pages;
495
496 mutex_unlock(&area->lock);
497 mutex_unlock(&as->lock);
498 interrupts_restore(ipl);
499
500 return 0;
501}
502
503/** Destroy address space area.
504 *
505 * @param as Address space.
506 * @param address Address withing the area to be deleted.
507 *
508 * @return Zero on success or a value from @ref errno.h on failure.
509 */
510int as_area_destroy(as_t *as, uintptr_t address)
511{
512 as_area_t *area;
513 uintptr_t base;
514 link_t *cur;
515 ipl_t ipl;
516
517 ipl = interrupts_disable();
518 mutex_lock(&as->lock);
519
520 area = find_area_and_lock(as, address);
521 if (!area) {
522 mutex_unlock(&as->lock);
523 interrupts_restore(ipl);
524 return ENOENT;
525 }
526
527 base = area->base;
528
529 /*
530 * Start TLB shootdown sequence.
531 */
532 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
533
534 /*
535 * Visit only the pages mapped by used_space B+tree.
536 */
537 for (cur = area->used_space.leaf_head.next;
538 cur != &area->used_space.leaf_head; cur = cur->next) {
539 btree_node_t *node;
540 int i;
541
542 node = list_get_instance(cur, btree_node_t, leaf_link);
543 for (i = 0; i < node->keys; i++) {
544 uintptr_t b = node->key[i];
545 count_t j;
546 pte_t *pte;
547
548 for (j = 0; j < (count_t) node->value[i]; j++) {
549 page_table_lock(as, false);
550 pte = page_mapping_find(as, b + j * PAGE_SIZE);
551 ASSERT(pte && PTE_VALID(pte) &&
552 PTE_PRESENT(pte));
553 if (area->backend &&
554 area->backend->frame_free) {
555 area->backend->frame_free(area, b +
556 j * PAGE_SIZE, PTE_GET_FRAME(pte));
557 }
558 page_mapping_remove(as, b + j * PAGE_SIZE);
559 page_table_unlock(as, false);
560 }
561 }
562 }
563
564 /*
565 * Finish TLB shootdown sequence.
566 */
567 tlb_invalidate_pages(as->asid, area->base, area->pages);
568 tlb_shootdown_finalize();
569
570 /*
571 * Invalidate potential software translation caches (e.g. TSB on
572 * sparc64).
573 */
574 as_invalidate_translation_cache(as, area->base, area->pages);
575
576 btree_destroy(&area->used_space);
577
578 area->attributes |= AS_AREA_ATTR_PARTIAL;
579
580 if (area->sh_info)
581 sh_info_remove_reference(area->sh_info);
582
583 mutex_unlock(&area->lock);
584
585 /*
586 * Remove the empty area from address space.
587 */
588 btree_remove(&as->as_area_btree, base, NULL);
589
590 free(area);
591
592 mutex_unlock(&as->lock);
593 interrupts_restore(ipl);
594 return 0;
595}
596
597/** Share address space area with another or the same address space.
598 *
599 * Address space area mapping is shared with a new address space area.
600 * If the source address space area has not been shared so far,
601 * a new sh_info is created. The new address space area simply gets the
602 * sh_info of the source area. The process of duplicating the
603 * mapping is done through the backend share function.
604 *
605 * @param src_as Pointer to source address space.
606 * @param src_base Base address of the source address space area.
607 * @param acc_size Expected size of the source area.
608 * @param dst_as Pointer to destination address space.
609 * @param dst_base Target base address.
610 * @param dst_flags_mask Destination address space area flags mask.
611 *
612 * @return Zero on success or ENOENT if there is no such task or if there is no
613 * such address space area, EPERM if there was a problem in accepting the area
614 * or ENOMEM if there was a problem in allocating destination address space
615 * area. ENOTSUP is returned if the address space area backend does not support
616 * sharing or if the kernel detects an attempt to create an illegal address
617 * alias.
618 */
619int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
620 as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
621{
622 ipl_t ipl;
623 int src_flags;
624 size_t src_size;
625 as_area_t *src_area, *dst_area;
626 share_info_t *sh_info;
627 mem_backend_t *src_backend;
628 mem_backend_data_t src_backend_data;
629
630 ipl = interrupts_disable();
631 mutex_lock(&src_as->lock);
632 src_area = find_area_and_lock(src_as, src_base);
633 if (!src_area) {
634 /*
635 * Could not find the source address space area.
636 */
637 mutex_unlock(&src_as->lock);
638 interrupts_restore(ipl);
639 return ENOENT;
640 }
641
642 if (!src_area->backend || !src_area->backend->share) {
643 /*
644 * There is no backend or the backend does not
645 * know how to share the area.
646 */
647 mutex_unlock(&src_area->lock);
648 mutex_unlock(&src_as->lock);
649 interrupts_restore(ipl);
650 return ENOTSUP;
651 }
652
653 src_size = src_area->pages * PAGE_SIZE;
654 src_flags = src_area->flags;
655 src_backend = src_area->backend;
656 src_backend_data = src_area->backend_data;
657
658 /* Share the cacheable flag from the original mapping */
659 if (src_flags & AS_AREA_CACHEABLE)
660 dst_flags_mask |= AS_AREA_CACHEABLE;
661
662 if (src_size != acc_size ||
663 (src_flags & dst_flags_mask) != dst_flags_mask) {
664 mutex_unlock(&src_area->lock);
665 mutex_unlock(&src_as->lock);
666 interrupts_restore(ipl);
667 return EPERM;
668 }
669
670#ifdef CONFIG_VIRT_IDX_DCACHE
671 if (!(dst_flags_mask & AS_AREA_EXEC)) {
672 if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) {
673 /*
674 * Refuse to create an illegal address alias.
675 */
676 mutex_unlock(&src_area->lock);
677 mutex_unlock(&src_as->lock);
678 interrupts_restore(ipl);
679 return ENOTSUP;
680 }
681 }
682#endif /* CONFIG_VIRT_IDX_DCACHE */
683
684 /*
685 * Now we are committed to sharing the area.
686 * First, prepare the area for sharing.
687 * Then it will be safe to unlock it.
688 */
689 sh_info = src_area->sh_info;
690 if (!sh_info) {
691 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
692 mutex_initialize(&sh_info->lock);
693 sh_info->refcount = 2;
694 btree_create(&sh_info->pagemap);
695 src_area->sh_info = sh_info;
696 } else {
697 mutex_lock(&sh_info->lock);
698 sh_info->refcount++;
699 mutex_unlock(&sh_info->lock);
700 }
701
702 src_area->backend->share(src_area);
703
704 mutex_unlock(&src_area->lock);
705 mutex_unlock(&src_as->lock);
706
707 /*
708 * Create copy of the source address space area.
709 * The destination area is created with AS_AREA_ATTR_PARTIAL
710 * attribute set which prevents race condition with
711 * preliminary as_page_fault() calls.
712 * The flags of the source area are masked against dst_flags_mask
713 * to support sharing in less privileged mode.
714 */
715 dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
716 AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
717 if (!dst_area) {
718 /*
719 * Destination address space area could not be created.
720 */
721 sh_info_remove_reference(sh_info);
722
723 interrupts_restore(ipl);
724 return ENOMEM;
725 }
726
727 /*
728 * Now the destination address space area has been
729 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
730 * attribute and set the sh_info.
731 */
732 mutex_lock(&dst_as->lock);
733 mutex_lock(&dst_area->lock);
734 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
735 dst_area->sh_info = sh_info;
736 mutex_unlock(&dst_area->lock);
737 mutex_unlock(&dst_as->lock);
738
739 interrupts_restore(ipl);
740
741 return 0;
742}
743
744/** Check access mode for address space area.
745 *
746 * The address space area must be locked prior to this call.
747 *
748 * @param area Address space area.
749 * @param access Access mode.
750 *
751 * @return False if access violates area's permissions, true otherwise.
752 */
753bool as_area_check_access(as_area_t *area, pf_access_t access)
754{
755 int flagmap[] = {
756 [PF_ACCESS_READ] = AS_AREA_READ,
757 [PF_ACCESS_WRITE] = AS_AREA_WRITE,
758 [PF_ACCESS_EXEC] = AS_AREA_EXEC
759 };
760
761 if (!(area->flags & flagmap[access]))
762 return false;
763
764 return true;
765}
766
767/** Handle page fault within the current address space.
768 *
769 * This is the high-level page fault handler. It decides
770 * whether the page fault can be resolved by any backend
771 * and if so, it invokes the backend to resolve the page
772 * fault.
773 *
774 * Interrupts are assumed disabled.
775 *
776 * @param page Faulting page.
777 * @param access Access mode that caused the fault (i.e. read/write/exec).
778 * @param istate Pointer to interrupted state.
779 *
780 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
781 * fault was caused by copy_to_uspace() or copy_from_uspace().
782 */
783int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
784{
785 pte_t *pte;
786 as_area_t *area;
787
788 if (!THREAD)
789 return AS_PF_FAULT;
790
791 ASSERT(AS);
792
793 mutex_lock(&AS->lock);
794 area = find_area_and_lock(AS, page);
795 if (!area) {
796 /*
797 * No area contained mapping for 'page'.
798 * Signal page fault to low-level handler.
799 */
800 mutex_unlock(&AS->lock);
801 goto page_fault;
802 }
803
804 if (area->attributes & AS_AREA_ATTR_PARTIAL) {
805 /*
806 * The address space area is not fully initialized.
807 * Avoid possible race by returning error.
808 */
809 mutex_unlock(&area->lock);
810 mutex_unlock(&AS->lock);
811 goto page_fault;
812 }
813
814 if (!area->backend || !area->backend->page_fault) {
815 /*
816 * The address space area is not backed by any backend
817 * or the backend cannot handle page faults.
818 */
819 mutex_unlock(&area->lock);
820 mutex_unlock(&AS->lock);
821 goto page_fault;
822 }
823
824 page_table_lock(AS, false);
825
826 /*
827 * To avoid race condition between two page faults
828 * on the same address, we need to make sure
829 * the mapping has not been already inserted.
830 */
831 if ((pte = page_mapping_find(AS, page))) {
832 if (PTE_PRESENT(pte)) {
833 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
834 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
835 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
836 page_table_unlock(AS, false);
837 mutex_unlock(&area->lock);
838 mutex_unlock(&AS->lock);
839 return AS_PF_OK;
840 }
841 }
842 }
843
844 /*
845 * Resort to the backend page fault handler.
846 */
847 if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
848 page_table_unlock(AS, false);
849 mutex_unlock(&area->lock);
850 mutex_unlock(&AS->lock);
851 goto page_fault;
852 }
853
854 page_table_unlock(AS, false);
855 mutex_unlock(&area->lock);
856 mutex_unlock(&AS->lock);
857 return AS_PF_OK;
858
859page_fault:
860 if (THREAD->in_copy_from_uspace) {
861 THREAD->in_copy_from_uspace = false;
862 istate_set_retaddr(istate,
863 (uintptr_t) &memcpy_from_uspace_failover_address);
864 } else if (THREAD->in_copy_to_uspace) {
865 THREAD->in_copy_to_uspace = false;
866 istate_set_retaddr(istate,
867 (uintptr_t) &memcpy_to_uspace_failover_address);
868 } else {
869 return AS_PF_FAULT;
870 }
871
872 return AS_PF_DEFER;
873}
874
875/** Switch address spaces.
876 *
877 * Note that this function cannot sleep as it is essentially a part of
878 * scheduling. Sleeping here would lead to deadlock on wakeup.
879 *
880 * @param old Old address space or NULL.
881 * @param new New address space.
882 */
883void as_switch(as_t *old_as, as_t *new_as)
884{
885 ipl_t ipl;
886 bool needs_asid = false;
887
888 ipl = interrupts_disable();
889 spinlock_lock(&inactive_as_with_asid_lock);
890
891 /*
892 * First, take care of the old address space.
893 */
894 if (old_as) {
895 mutex_lock_active(&old_as->lock);
896 ASSERT(old_as->cpu_refcount);
897 if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
898 /*
899 * The old address space is no longer active on
900 * any processor. It can be appended to the
901 * list of inactive address spaces with assigned
902 * ASID.
903 */
904 ASSERT(old_as->asid != ASID_INVALID);
905 list_append(&old_as->inactive_as_with_asid_link,
906 &inactive_as_with_asid_head);
907 }
908 mutex_unlock(&old_as->lock);
909
910 /*
911 * Perform architecture-specific tasks when the address space
912 * is being removed from the CPU.
913 */
914 as_deinstall_arch(old_as);
915 }
916
917 /*
918 * Second, prepare the new address space.
919 */
920 mutex_lock_active(&new_as->lock);
921 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
922 if (new_as->asid != ASID_INVALID) {
923 list_remove(&new_as->inactive_as_with_asid_link);
924 } else {
925 /*
926 * Defer call to asid_get() until new_as->lock is released.
927 */
928 needs_asid = true;
929 }
930 }
931#ifdef AS_PAGE_TABLE
932 SET_PTL0_ADDRESS(new_as->genarch.page_table);
933#endif
934 mutex_unlock(&new_as->lock);
935
936 if (needs_asid) {
937 /*
938 * Allocation of new ASID was deferred
939 * until now in order to avoid deadlock.
940 */
941 asid_t asid;
942
943 asid = asid_get();
944 mutex_lock_active(&new_as->lock);
945 new_as->asid = asid;
946 mutex_unlock(&new_as->lock);
947 }
948 spinlock_unlock(&inactive_as_with_asid_lock);
949 interrupts_restore(ipl);
950
951 /*
952 * Perform architecture-specific steps.
953 * (e.g. write ASID to hardware register etc.)
954 */
955 as_install_arch(new_as);
956
957 AS = new_as;
958}
959
960/** Convert address space area flags to page flags.
961 *
962 * @param aflags Flags of some address space area.
963 *
964 * @return Flags to be passed to page_mapping_insert().
965 */
966int area_flags_to_page_flags(int aflags)
967{
968 int flags;
969
970 flags = PAGE_USER | PAGE_PRESENT;
971
972 if (aflags & AS_AREA_READ)
973 flags |= PAGE_READ;
974
975 if (aflags & AS_AREA_WRITE)
976 flags |= PAGE_WRITE;
977
978 if (aflags & AS_AREA_EXEC)
979 flags |= PAGE_EXEC;
980
981 if (aflags & AS_AREA_CACHEABLE)
982 flags |= PAGE_CACHEABLE;
983
984 return flags;
985}
986
987/** Compute flags for virtual address translation subsytem.
988 *
989 * The address space area must be locked.
990 * Interrupts must be disabled.
991 *
992 * @param a Address space area.
993 *
994 * @return Flags to be used in page_mapping_insert().
995 */
996int as_area_get_flags(as_area_t *a)
997{
998 return area_flags_to_page_flags(a->flags);
999}
1000
1001/** Create page table.
1002 *
1003 * Depending on architecture, create either address space
1004 * private or global page table.
1005 *
1006 * @param flags Flags saying whether the page table is for kernel address space.
1007 *
1008 * @return First entry of the page table.
1009 */
1010pte_t *page_table_create(int flags)
1011{
1012#ifdef __OBJC__
1013 return [as_t page_table_create: flags];
1014#else
1015 ASSERT(as_operations);
1016 ASSERT(as_operations->page_table_create);
1017
1018 return as_operations->page_table_create(flags);
1019#endif
1020}
1021
1022/** Destroy page table.
1023 *
1024 * Destroy page table in architecture specific way.
1025 *
1026 * @param page_table Physical address of PTL0.
1027 */
1028void page_table_destroy(pte_t *page_table)
1029{
1030#ifdef __OBJC__
1031 return [as_t page_table_destroy: page_table];
1032#else
1033 ASSERT(as_operations);
1034 ASSERT(as_operations->page_table_destroy);
1035
1036 as_operations->page_table_destroy(page_table);
1037#endif
1038}
1039
1040/** Lock page table.
1041 *
1042 * This function should be called before any page_mapping_insert(),
1043 * page_mapping_remove() and page_mapping_find().
1044 *
1045 * Locking order is such that address space areas must be locked
1046 * prior to this call. Address space can be locked prior to this
1047 * call in which case the lock argument is false.
1048 *
1049 * @param as Address space.
1050 * @param lock If false, do not attempt to lock as->lock.
1051 */
1052void page_table_lock(as_t *as, bool lock)
1053{
1054#ifdef __OBJC__
1055 [as page_table_lock: lock];
1056#else
1057 ASSERT(as_operations);
1058 ASSERT(as_operations->page_table_lock);
1059
1060 as_operations->page_table_lock(as, lock);
1061#endif
1062}
1063
1064/** Unlock page table.
1065 *
1066 * @param as Address space.
1067 * @param unlock If false, do not attempt to unlock as->lock.
1068 */
1069void page_table_unlock(as_t *as, bool unlock)
1070{
1071#ifdef __OBJC__
1072 [as page_table_unlock: unlock];
1073#else
1074 ASSERT(as_operations);
1075 ASSERT(as_operations->page_table_unlock);
1076
1077 as_operations->page_table_unlock(as, unlock);
1078#endif
1079}
1080
1081
1082/** Find address space area and lock it.
1083 *
1084 * The address space must be locked and interrupts must be disabled.
1085 *
1086 * @param as Address space.
1087 * @param va Virtual address.
1088 *
1089 * @return Locked address space area containing va on success or NULL on
1090 * failure.
1091 */
1092as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
1093{
1094 as_area_t *a;
1095 btree_node_t *leaf, *lnode;
1096 int i;
1097
1098 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
1099 if (a) {
1100 /* va is the base address of an address space area */
1101 mutex_lock(&a->lock);
1102 return a;
1103 }
1104
1105 /*
1106 * Search the leaf node and the righmost record of its left neighbour
1107 * to find out whether this is a miss or va belongs to an address
1108 * space area found there.
1109 */
1110
1111 /* First, search the leaf node itself. */
1112 for (i = 0; i < leaf->keys; i++) {
1113 a = (as_area_t *) leaf->value[i];
1114 mutex_lock(&a->lock);
1115 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
1116 return a;
1117 }
1118 mutex_unlock(&a->lock);
1119 }
1120
1121 /*
1122 * Second, locate the left neighbour and test its last record.
1123 * Because of its position in the B+tree, it must have base < va.
1124 */
1125 lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
1126 if (lnode) {
1127 a = (as_area_t *) lnode->value[lnode->keys - 1];
1128 mutex_lock(&a->lock);
1129 if (va < a->base + a->pages * PAGE_SIZE) {
1130 return a;
1131 }
1132 mutex_unlock(&a->lock);
1133 }
1134
1135 return NULL;
1136}
1137
1138/** Check area conflicts with other areas.
1139 *
1140 * The address space must be locked and interrupts must be disabled.
1141 *
1142 * @param as Address space.
1143 * @param va Starting virtual address of the area being tested.
1144 * @param size Size of the area being tested.
1145 * @param avoid_area Do not touch this area.
1146 *
1147 * @return True if there is no conflict, false otherwise.
1148 */
1149bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
1150 as_area_t *avoid_area)
1151{
1152 as_area_t *a;
1153 btree_node_t *leaf, *node;
1154 int i;
1155
1156 /*
1157 * We don't want any area to have conflicts with NULL page.
1158 */
1159 if (overlaps(va, size, NULL, PAGE_SIZE))
1160 return false;
1161
1162 /*
1163 * The leaf node is found in O(log n), where n is proportional to
1164 * the number of address space areas belonging to as.
1165 * The check for conflicts is then attempted on the rightmost
1166 * record in the left neighbour, the leftmost record in the right
1167 * neighbour and all records in the leaf node itself.
1168 */
1169
1170 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1171 if (a != avoid_area)
1172 return false;
1173 }
1174
1175 /* First, check the two border cases. */
1176 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1177 a = (as_area_t *) node->value[node->keys - 1];
1178 mutex_lock(&a->lock);
1179 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1180 mutex_unlock(&a->lock);
1181 return false;
1182 }
1183 mutex_unlock(&a->lock);
1184 }
1185 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
1186 if (node) {
1187 a = (as_area_t *) node->value[0];
1188 mutex_lock(&a->lock);
1189 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1190 mutex_unlock(&a->lock);
1191 return false;
1192 }
1193 mutex_unlock(&a->lock);
1194 }
1195
1196 /* Second, check the leaf node. */
1197 for (i = 0; i < leaf->keys; i++) {
1198 a = (as_area_t *) leaf->value[i];
1199
1200 if (a == avoid_area)
1201 continue;
1202
1203 mutex_lock(&a->lock);
1204 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1205 mutex_unlock(&a->lock);
1206 return false;
1207 }
1208 mutex_unlock(&a->lock);
1209 }
1210
1211 /*
1212 * So far, the area does not conflict with other areas.
1213 * Check if it doesn't conflict with kernel address space.
1214 */
1215 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1216 return !overlaps(va, size,
1217 KERNEL_ADDRESS_SPACE_START,
1218 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);
1219 }
1220
1221 return true;
1222}
1223
1224/** Return size of the address space area with given base. */
1225size_t as_get_size(uintptr_t base)
1226{
1227 ipl_t ipl;
1228 as_area_t *src_area;
1229 size_t size;
1230
1231 ipl = interrupts_disable();
1232 src_area = find_area_and_lock(AS, base);
1233 if (src_area){
1234 size = src_area->pages * PAGE_SIZE;
1235 mutex_unlock(&src_area->lock);
1236 } else {
1237 size = 0;
1238 }
1239 interrupts_restore(ipl);
1240 return size;
1241}
1242
1243/** Mark portion of address space area as used.
1244 *
1245 * The address space area must be already locked.
1246 *
1247 * @param a Address space area.
1248 * @param page First page to be marked.
1249 * @param count Number of page to be marked.
1250 *
1251 * @return 0 on failure and 1 on success.
1252 */
1253int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
1254{
1255 btree_node_t *leaf, *node;
1256 count_t pages;
1257 int i;
1258
1259 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1260 ASSERT(count);
1261
1262 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1263 if (pages) {
1264 /*
1265 * We hit the beginning of some used space.
1266 */
1267 return 0;
1268 }
1269
1270 if (!leaf->keys) {
1271 btree_insert(&a->used_space, page, (void *) count, leaf);
1272 return 1;
1273 }
1274
1275 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1276 if (node) {
1277 uintptr_t left_pg = node->key[node->keys - 1];
1278 uintptr_t right_pg = leaf->key[0];
1279 count_t left_cnt = (count_t) node->value[node->keys - 1];
1280 count_t right_cnt = (count_t) leaf->value[0];
1281
1282 /*
1283 * Examine the possibility that the interval fits
1284 * somewhere between the rightmost interval of
1285 * the left neigbour and the first interval of the leaf.
1286 */
1287
1288 if (page >= right_pg) {
1289 /* Do nothing. */
1290 } else if (overlaps(page, count * PAGE_SIZE, left_pg,
1291 left_cnt * PAGE_SIZE)) {
1292 /* The interval intersects with the left interval. */
1293 return 0;
1294 } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1295 right_cnt * PAGE_SIZE)) {
1296 /* The interval intersects with the right interval. */
1297 return 0;
1298 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1299 (page + count * PAGE_SIZE == right_pg)) {
1300 /*
1301 * The interval can be added by merging the two already
1302 * present intervals.
1303 */
1304 node->value[node->keys - 1] += count + right_cnt;
1305 btree_remove(&a->used_space, right_pg, leaf);
1306 return 1;
1307 } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1308 /*
1309 * The interval can be added by simply growing the left
1310 * interval.
1311 */
1312 node->value[node->keys - 1] += count;
1313 return 1;
1314 } else if (page + count * PAGE_SIZE == right_pg) {
1315 /*
1316 * The interval can be addded by simply moving base of
1317 * the right interval down and increasing its size
1318 * accordingly.
1319 */
1320 leaf->value[0] += count;
1321 leaf->key[0] = page;
1322 return 1;
1323 } else {
1324 /*
1325 * The interval is between both neigbouring intervals,
1326 * but cannot be merged with any of them.
1327 */
1328 btree_insert(&a->used_space, page, (void *) count,
1329 leaf);
1330 return 1;
1331 }
1332 } else if (page < leaf->key[0]) {
1333 uintptr_t right_pg = leaf->key[0];
1334 count_t right_cnt = (count_t) leaf->value[0];
1335
1336 /*
1337 * Investigate the border case in which the left neighbour does
1338 * not exist but the interval fits from the left.
1339 */
1340
1341 if (overlaps(page, count * PAGE_SIZE, right_pg,
1342 right_cnt * PAGE_SIZE)) {
1343 /* The interval intersects with the right interval. */
1344 return 0;
1345 } else if (page + count * PAGE_SIZE == right_pg) {
1346 /*
1347 * The interval can be added by moving the base of the
1348 * right interval down and increasing its size
1349 * accordingly.
1350 */
1351 leaf->key[0] = page;
1352 leaf->value[0] += count;
1353 return 1;
1354 } else {
1355 /*
1356 * The interval doesn't adjoin with the right interval.
1357 * It must be added individually.
1358 */
1359 btree_insert(&a->used_space, page, (void *) count,
1360 leaf);
1361 return 1;
1362 }
1363 }
1364
1365 node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1366 if (node) {
1367 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1368 uintptr_t right_pg = node->key[0];
1369 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1370 count_t right_cnt = (count_t) node->value[0];
1371
1372 /*
1373 * Examine the possibility that the interval fits
1374 * somewhere between the leftmost interval of
1375 * the right neigbour and the last interval of the leaf.
1376 */
1377
1378 if (page < left_pg) {
1379 /* Do nothing. */
1380 } else if (overlaps(page, count * PAGE_SIZE, left_pg,
1381 left_cnt * PAGE_SIZE)) {
1382 /* The interval intersects with the left interval. */
1383 return 0;
1384 } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1385 right_cnt * PAGE_SIZE)) {
1386 /* The interval intersects with the right interval. */
1387 return 0;
1388 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1389 (page + count * PAGE_SIZE == right_pg)) {
1390 /*
1391 * The interval can be added by merging the two already
1392 * present intervals.
1393 * */
1394 leaf->value[leaf->keys - 1] += count + right_cnt;
1395 btree_remove(&a->used_space, right_pg, node);
1396 return 1;
1397 } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1398 /*
1399 * The interval can be added by simply growing the left
1400 * interval.
1401 * */
1402 leaf->value[leaf->keys - 1] += count;
1403 return 1;
1404 } else if (page + count * PAGE_SIZE == right_pg) {
1405 /*
1406 * The interval can be addded by simply moving base of
1407 * the right interval down and increasing its size
1408 * accordingly.
1409 */
1410 node->value[0] += count;
1411 node->key[0] = page;
1412 return 1;
1413 } else {
1414 /*
1415 * The interval is between both neigbouring intervals,
1416 * but cannot be merged with any of them.
1417 */
1418 btree_insert(&a->used_space, page, (void *) count,
1419 leaf);
1420 return 1;
1421 }
1422 } else if (page >= leaf->key[leaf->keys - 1]) {
1423 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1424 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1425
1426 /*
1427 * Investigate the border case in which the right neighbour
1428 * does not exist but the interval fits from the right.
1429 */
1430
1431 if (overlaps(page, count * PAGE_SIZE, left_pg,
1432 left_cnt * PAGE_SIZE)) {
1433 /* The interval intersects with the left interval. */
1434 return 0;
1435 } else if (left_pg + left_cnt * PAGE_SIZE == page) {
1436 /*
1437 * The interval can be added by growing the left
1438 * interval.
1439 */
1440 leaf->value[leaf->keys - 1] += count;
1441 return 1;
1442 } else {
1443 /*
1444 * The interval doesn't adjoin with the left interval.
1445 * It must be added individually.
1446 */
1447 btree_insert(&a->used_space, page, (void *) count,
1448 leaf);
1449 return 1;
1450 }
1451 }
1452
1453 /*
1454 * Note that if the algorithm made it thus far, the interval can fit
1455 * only between two other intervals of the leaf. The two border cases
1456 * were already resolved.
1457 */
1458 for (i = 1; i < leaf->keys; i++) {
1459 if (page < leaf->key[i]) {
1460 uintptr_t left_pg = leaf->key[i - 1];
1461 uintptr_t right_pg = leaf->key[i];
1462 count_t left_cnt = (count_t) leaf->value[i - 1];
1463 count_t right_cnt = (count_t) leaf->value[i];
1464
1465 /*
1466 * The interval fits between left_pg and right_pg.
1467 */
1468
1469 if (overlaps(page, count * PAGE_SIZE, left_pg,
1470 left_cnt * PAGE_SIZE)) {
1471 /*
1472 * The interval intersects with the left
1473 * interval.
1474 */
1475 return 0;
1476 } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1477 right_cnt * PAGE_SIZE)) {
1478 /*
1479 * The interval intersects with the right
1480 * interval.
1481 */
1482 return 0;
1483 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1484 (page + count * PAGE_SIZE == right_pg)) {
1485 /*
1486 * The interval can be added by merging the two
1487 * already present intervals.
1488 */
1489 leaf->value[i - 1] += count + right_cnt;
1490 btree_remove(&a->used_space, right_pg, leaf);
1491 return 1;
1492 } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1493 /*
1494 * The interval can be added by simply growing
1495 * the left interval.
1496 */
1497 leaf->value[i - 1] += count;
1498 return 1;
1499 } else if (page + count * PAGE_SIZE == right_pg) {
1500 /*
1501 * The interval can be addded by simply moving
1502 * base of the right interval down and
1503 * increasing its size accordingly.
1504 */
1505 leaf->value[i] += count;
1506 leaf->key[i] = page;
1507 return 1;
1508 } else {
1509 /*
1510 * The interval is between both neigbouring
1511 * intervals, but cannot be merged with any of
1512 * them.
1513 */
1514 btree_insert(&a->used_space, page,
1515 (void *) count, leaf);
1516 return 1;
1517 }
1518 }
1519 }
1520
1521 panic("Inconsistency detected while adding %d pages of used space at "
1522 "%p.\n", count, page);
1523}
1524
1525/** Mark portion of address space area as unused.
1526 *
1527 * The address space area must be already locked.
1528 *
1529 * @param a Address space area.
1530 * @param page First page to be marked.
1531 * @param count Number of page to be marked.
1532 *
1533 * @return 0 on failure and 1 on success.
1534 */
1535int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
1536{
1537 btree_node_t *leaf, *node;
1538 count_t pages;
1539 int i;
1540
1541 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1542 ASSERT(count);
1543
1544 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1545 if (pages) {
1546 /*
1547 * We are lucky, page is the beginning of some interval.
1548 */
1549 if (count > pages) {
1550 return 0;
1551 } else if (count == pages) {
1552 btree_remove(&a->used_space, page, leaf);
1553 return 1;
1554 } else {
1555 /*
1556 * Find the respective interval.
1557 * Decrease its size and relocate its start address.
1558 */
1559 for (i = 0; i < leaf->keys; i++) {
1560 if (leaf->key[i] == page) {
1561 leaf->key[i] += count * PAGE_SIZE;
1562 leaf->value[i] -= count;
1563 return 1;
1564 }
1565 }
1566 goto error;
1567 }
1568 }
1569
1570 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1571 if (node && page < leaf->key[0]) {
1572 uintptr_t left_pg = node->key[node->keys - 1];
1573 count_t left_cnt = (count_t) node->value[node->keys - 1];
1574
1575 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1576 count * PAGE_SIZE)) {
1577 if (page + count * PAGE_SIZE ==
1578 left_pg + left_cnt * PAGE_SIZE) {
1579 /*
1580 * The interval is contained in the rightmost
1581 * interval of the left neighbour and can be
1582 * removed by updating the size of the bigger
1583 * interval.
1584 */
1585 node->value[node->keys - 1] -= count;
1586 return 1;
1587 } else if (page + count * PAGE_SIZE <
1588 left_pg + left_cnt*PAGE_SIZE) {
1589 count_t new_cnt;
1590
1591 /*
1592 * The interval is contained in the rightmost
1593 * interval of the left neighbour but its
1594 * removal requires both updating the size of
1595 * the original interval and also inserting a
1596 * new interval.
1597 */
1598 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1599 (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1600 node->value[node->keys - 1] -= count + new_cnt;
1601 btree_insert(&a->used_space, page +
1602 count * PAGE_SIZE, (void *) new_cnt, leaf);
1603 return 1;
1604 }
1605 }
1606 return 0;
1607 } else if (page < leaf->key[0]) {
1608 return 0;
1609 }
1610
1611 if (page > leaf->key[leaf->keys - 1]) {
1612 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1613 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1614
1615 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1616 count * PAGE_SIZE)) {
1617 if (page + count * PAGE_SIZE ==
1618 left_pg + left_cnt * PAGE_SIZE) {
1619 /*
1620 * The interval is contained in the rightmost
1621 * interval of the leaf and can be removed by
1622 * updating the size of the bigger interval.
1623 */
1624 leaf->value[leaf->keys - 1] -= count;
1625 return 1;
1626 } else if (page + count * PAGE_SIZE < left_pg +
1627 left_cnt * PAGE_SIZE) {
1628 count_t new_cnt;
1629
1630 /*
1631 * The interval is contained in the rightmost
1632 * interval of the leaf but its removal
1633 * requires both updating the size of the
1634 * original interval and also inserting a new
1635 * interval.
1636 */
1637 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1638 (page + count * PAGE_SIZE)) >> PAGE_WIDTH;
1639 leaf->value[leaf->keys - 1] -= count + new_cnt;
1640 btree_insert(&a->used_space, page +
1641 count * PAGE_SIZE, (void *) new_cnt, leaf);
1642 return 1;
1643 }
1644 }
1645 return 0;
1646 }
1647
1648 /*
1649 * The border cases have been already resolved.
1650 * Now the interval can be only between intervals of the leaf.
1651 */
1652 for (i = 1; i < leaf->keys - 1; i++) {
1653 if (page < leaf->key[i]) {
1654 uintptr_t left_pg = leaf->key[i - 1];
1655 count_t left_cnt = (count_t) leaf->value[i - 1];
1656
1657 /*
1658 * Now the interval is between intervals corresponding
1659 * to (i - 1) and i.
1660 */
1661 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1662 count * PAGE_SIZE)) {
1663 if (page + count * PAGE_SIZE ==
1664 left_pg + left_cnt*PAGE_SIZE) {
1665 /*
1666 * The interval is contained in the
1667 * interval (i - 1) of the leaf and can
1668 * be removed by updating the size of
1669 * the bigger interval.
1670 */
1671 leaf->value[i - 1] -= count;
1672 return 1;
1673 } else if (page + count * PAGE_SIZE <
1674 left_pg + left_cnt * PAGE_SIZE) {
1675 count_t new_cnt;
1676
1677 /*
1678 * The interval is contained in the
1679 * interval (i - 1) of the leaf but its
1680 * removal requires both updating the
1681 * size of the original interval and
1682 * also inserting a new interval.
1683 */
1684 new_cnt = ((left_pg +
1685 left_cnt * PAGE_SIZE) -
1686 (page + count * PAGE_SIZE)) >>
1687 PAGE_WIDTH;
1688 leaf->value[i - 1] -= count + new_cnt;
1689 btree_insert(&a->used_space, page +
1690 count * PAGE_SIZE, (void *) new_cnt,
1691 leaf);
1692 return 1;
1693 }
1694 }
1695 return 0;
1696 }
1697 }
1698
1699error:
1700 panic("Inconsistency detected while removing %d pages of used space "
1701 "from %p.\n", count, page);
1702}
1703
1704/** Remove reference to address space area share info.
1705 *
1706 * If the reference count drops to 0, the sh_info is deallocated.
1707 *
1708 * @param sh_info Pointer to address space area share info.
1709 */
1710void sh_info_remove_reference(share_info_t *sh_info)
1711{
1712 bool dealloc = false;
1713
1714 mutex_lock(&sh_info->lock);
1715 ASSERT(sh_info->refcount);
1716 if (--sh_info->refcount == 0) {
1717 dealloc = true;
1718 link_t *cur;
1719
1720 /*
1721 * Now walk carefully the pagemap B+tree and free/remove
1722 * reference from all frames found there.
1723 */
1724 for (cur = sh_info->pagemap.leaf_head.next;
1725 cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
1726 btree_node_t *node;
1727 int i;
1728
1729 node = list_get_instance(cur, btree_node_t, leaf_link);
1730 for (i = 0; i < node->keys; i++)
1731 frame_free((uintptr_t) node->value[i]);
1732 }
1733
1734 }
1735 mutex_unlock(&sh_info->lock);
1736
1737 if (dealloc) {
1738 btree_destroy(&sh_info->pagemap);
1739 free(sh_info);
1740 }
1741}
1742
1743/*
1744 * Address space related syscalls.
1745 */
1746
1747/** Wrapper for as_area_create(). */
1748unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
1749{
1750 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,
1751 AS_AREA_ATTR_NONE, &anon_backend, NULL))
1752 return (unative_t) address;
1753 else
1754 return (unative_t) -1;
1755}
1756
1757/** Wrapper for as_area_resize(). */
1758unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
1759{
1760 return (unative_t) as_area_resize(AS, address, size, 0);
1761}
1762
1763/** Wrapper for as_area_destroy(). */
1764unative_t sys_as_area_destroy(uintptr_t address)
1765{
1766 return (unative_t) as_area_destroy(AS, address);
1767}
1768
1769/** Print out information about address space.
1770 *
1771 * @param as Address space.
1772 */
1773void as_print(as_t *as)
1774{
1775 ipl_t ipl;
1776
1777 ipl = interrupts_disable();
1778 mutex_lock(&as->lock);
1779
1780 /* print out info about address space areas */
1781 link_t *cur;
1782 for (cur = as->as_area_btree.leaf_head.next;
1783 cur != &as->as_area_btree.leaf_head; cur = cur->next) {
1784 btree_node_t *node;
1785
1786 node = list_get_instance(cur, btree_node_t, leaf_link);
1787
1788 int i;
1789 for (i = 0; i < node->keys; i++) {
1790 as_area_t *area = node->value[i];
1791
1792 mutex_lock(&area->lock);
1793 printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
1794 area, area->base, area->pages, area->base,
1795 area->base + area->pages*PAGE_SIZE);
1796 mutex_unlock(&area->lock);
1797 }
1798 }
1799
1800 mutex_unlock(&as->lock);
1801 interrupts_restore(ipl);
1802}
1803
1804/** @}
1805 */
Note: See TracBrowser for help on using the repository browser.