source: mainline/kernel/generic/src/mm/as.c@ 563c2dd

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 563c2dd was 2057572, checked in by Jakub Jermar <jakub@…>, 18 years ago

The Ultimate Solution To Illegal Virtual Aliases.
It is better to avoid them completely than to fight them.
Switch the sparc64 port to 16K pages. The TLBs and TSBs
continue to operate with 8K pages only. Page tables and
other generic parts operate with 16K pages.

Because the MMU doesn't support 16K directly, each 16K
page is emulated by a pair of 8K pages. With 16K pages,
illegal aliases cannot be created in 16K D-cache.

  • Property mode set to 100644
File size: 44.7 KB
Line 
1/*
2 * Copyright (c) 2001-2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Address space related functions.
36 *
37 * This file contains address space manipulation functions.
38 * Roughly speaking, this is a higher-level client of
39 * Virtual Address Translation (VAT) subsystem.
40 *
41 * Functionality provided by this file allows one to
42 * create address spaces and create, resize and share
43 * address space areas.
44 *
45 * @see page.c
46 *
47 */
48
49#include <mm/as.h>
50#include <arch/mm/as.h>
51#include <mm/page.h>
52#include <mm/frame.h>
53#include <mm/slab.h>
54#include <mm/tlb.h>
55#include <arch/mm/page.h>
56#include <genarch/mm/page_pt.h>
57#include <genarch/mm/page_ht.h>
58#include <mm/asid.h>
59#include <arch/mm/asid.h>
60#include <synch/spinlock.h>
61#include <synch/mutex.h>
62#include <adt/list.h>
63#include <adt/btree.h>
64#include <proc/task.h>
65#include <proc/thread.h>
66#include <arch/asm.h>
67#include <panic.h>
68#include <debug.h>
69#include <print.h>
70#include <memstr.h>
71#include <macros.h>
72#include <arch.h>
73#include <errno.h>
74#include <config.h>
75#include <align.h>
76#include <arch/types.h>
77#include <syscall/copy.h>
78#include <arch/interrupt.h>
79
80#ifdef CONFIG_VIRT_IDX_DCACHE
81#include <arch/mm/cache.h>
82#endif /* CONFIG_VIRT_IDX_DCACHE */
83
84#ifndef __OBJC__
85/**
86 * Each architecture decides what functions will be used to carry out
87 * address space operations such as creating or locking page tables.
88 */
89as_operations_t *as_operations = NULL;
90
91/**
92 * Slab for as_t objects.
93 */
94static slab_cache_t *as_slab;
95#endif
96
97/**
98 * This lock protects inactive_as_with_asid_head list. It must be acquired
99 * before as_t mutex.
100 */
101SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
102
103/**
104 * This list contains address spaces that are not active on any
105 * processor and that have valid ASID.
106 */
107LIST_INITIALIZE(inactive_as_with_asid_head);
108
109/** Kernel address space. */
110as_t *AS_KERNEL = NULL;
111
112static int area_flags_to_page_flags(int aflags);
113static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
114static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
115 as_area_t *avoid_area);
116static void sh_info_remove_reference(share_info_t *sh_info);
117
118#ifndef __OBJC__
119static int as_constructor(void *obj, int flags)
120{
121 as_t *as = (as_t *) obj;
122 int rc;
123
124 link_initialize(&as->inactive_as_with_asid_link);
125 mutex_initialize(&as->lock);
126
127 rc = as_constructor_arch(as, flags);
128
129 return rc;
130}
131
132static int as_destructor(void *obj)
133{
134 as_t *as = (as_t *) obj;
135
136 return as_destructor_arch(as);
137}
138#endif
139
140/** Initialize address space subsystem. */
141void as_init(void)
142{
143 as_arch_init();
144
145#ifndef __OBJC__
146 as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
147 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
148#endif
149
150 AS_KERNEL = as_create(FLAG_AS_KERNEL);
151 if (!AS_KERNEL)
152 panic("can't create kernel address space\n");
153
154}
155
156/** Create address space.
157 *
158 * @param flags Flags that influence way in wich the address space is created.
159 */
160as_t *as_create(int flags)
161{
162 as_t *as;
163
164#ifdef __OBJC__
165 as = [as_t new];
166 link_initialize(&as->inactive_as_with_asid_link);
167 mutex_initialize(&as->lock);
168 (void) as_constructor_arch(as, flags);
169#else
170 as = (as_t *) slab_alloc(as_slab, 0);
171#endif
172 (void) as_create_arch(as, 0);
173
174 btree_create(&as->as_area_btree);
175
176 if (flags & FLAG_AS_KERNEL)
177 as->asid = ASID_KERNEL;
178 else
179 as->asid = ASID_INVALID;
180
181 as->refcount = 0;
182 as->cpu_refcount = 0;
183#ifdef AS_PAGE_TABLE
184 as->genarch.page_table = page_table_create(flags);
185#else
186 page_table_create(flags);
187#endif
188
189 return as;
190}
191
192/** Destroy adress space.
193 *
194 * When there are no tasks referencing this address space (i.e. its refcount is
195 * zero), the address space can be destroyed.
196 */
197void as_destroy(as_t *as)
198{
199 ipl_t ipl;
200 bool cond;
201
202 ASSERT(as->refcount == 0);
203
204 /*
205 * Since there is no reference to this area,
206 * it is safe not to lock its mutex.
207 */
208 ipl = interrupts_disable();
209 spinlock_lock(&inactive_as_with_asid_lock);
210 if (as->asid != ASID_INVALID && as != AS_KERNEL) {
211 if (as != AS && as->cpu_refcount == 0)
212 list_remove(&as->inactive_as_with_asid_link);
213 asid_put(as->asid);
214 }
215 spinlock_unlock(&inactive_as_with_asid_lock);
216
217 /*
218 * Destroy address space areas of the address space.
219 * The B+tree must be walked carefully because it is
220 * also being destroyed.
221 */
222 for (cond = true; cond; ) {
223 btree_node_t *node;
224
225 ASSERT(!list_empty(&as->as_area_btree.leaf_head));
226 node = list_get_instance(as->as_area_btree.leaf_head.next,
227 btree_node_t, leaf_link);
228
229 if ((cond = node->keys)) {
230 as_area_destroy(as, node->key[0]);
231 }
232 }
233
234 btree_destroy(&as->as_area_btree);
235#ifdef AS_PAGE_TABLE
236 page_table_destroy(as->genarch.page_table);
237#else
238 page_table_destroy(NULL);
239#endif
240
241 interrupts_restore(ipl);
242
243#ifdef __OBJC__
244 [as free];
245#else
246 slab_free(as_slab, as);
247#endif
248}
249
250/** Create address space area of common attributes.
251 *
252 * The created address space area is added to the target address space.
253 *
254 * @param as Target address space.
255 * @param flags Flags of the area memory.
256 * @param size Size of area.
257 * @param base Base address of area.
258 * @param attrs Attributes of the area.
259 * @param backend Address space area backend. NULL if no backend is used.
260 * @param backend_data NULL or a pointer to an array holding two void *.
261 *
262 * @return Address space area on success or NULL on failure.
263 */
264as_area_t *
265as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
266 mem_backend_t *backend, mem_backend_data_t *backend_data)
267{
268 ipl_t ipl;
269 as_area_t *a;
270
271 if (base % PAGE_SIZE)
272 return NULL;
273
274 if (!size)
275 return NULL;
276
277 /* Writeable executable areas are not supported. */
278 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
279 return NULL;
280
281 ipl = interrupts_disable();
282 mutex_lock(&as->lock);
283
284 if (!check_area_conflicts(as, base, size, NULL)) {
285 mutex_unlock(&as->lock);
286 interrupts_restore(ipl);
287 return NULL;
288 }
289
290 a = (as_area_t *) malloc(sizeof(as_area_t), 0);
291
292 mutex_initialize(&a->lock);
293
294 a->as = as;
295 a->flags = flags;
296 a->attributes = attrs;
297 a->pages = SIZE2FRAMES(size);
298 a->base = base;
299 a->sh_info = NULL;
300 a->backend = backend;
301 if (backend_data)
302 a->backend_data = *backend_data;
303 else
304 memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data),
305 0);
306
307 btree_create(&a->used_space);
308
309 btree_insert(&as->as_area_btree, base, (void *) a, NULL);
310
311 mutex_unlock(&as->lock);
312 interrupts_restore(ipl);
313
314 return a;
315}
316
317/** Find address space area and change it.
318 *
319 * @param as Address space.
320 * @param address Virtual address belonging to the area to be changed. Must be
321 * page-aligned.
322 * @param size New size of the virtual memory block starting at address.
323 * @param flags Flags influencing the remap operation. Currently unused.
324 *
325 * @return Zero on success or a value from @ref errno.h otherwise.
326 */
327int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
328{
329 as_area_t *area;
330 ipl_t ipl;
331 size_t pages;
332
333 ipl = interrupts_disable();
334 mutex_lock(&as->lock);
335
336 /*
337 * Locate the area.
338 */
339 area = find_area_and_lock(as, address);
340 if (!area) {
341 mutex_unlock(&as->lock);
342 interrupts_restore(ipl);
343 return ENOENT;
344 }
345
346 if (area->backend == &phys_backend) {
347 /*
348 * Remapping of address space areas associated
349 * with memory mapped devices is not supported.
350 */
351 mutex_unlock(&area->lock);
352 mutex_unlock(&as->lock);
353 interrupts_restore(ipl);
354 return ENOTSUP;
355 }
356 if (area->sh_info) {
357 /*
358 * Remapping of shared address space areas
359 * is not supported.
360 */
361 mutex_unlock(&area->lock);
362 mutex_unlock(&as->lock);
363 interrupts_restore(ipl);
364 return ENOTSUP;
365 }
366
367 pages = SIZE2FRAMES((address - area->base) + size);
368 if (!pages) {
369 /*
370 * Zero size address space areas are not allowed.
371 */
372 mutex_unlock(&area->lock);
373 mutex_unlock(&as->lock);
374 interrupts_restore(ipl);
375 return EPERM;
376 }
377
378 if (pages < area->pages) {
379 bool cond;
380 uintptr_t start_free = area->base + pages*PAGE_SIZE;
381
382 /*
383 * Shrinking the area.
384 * No need to check for overlaps.
385 */
386
387 /*
388 * Start TLB shootdown sequence.
389 */
390 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base +
391 pages * PAGE_SIZE, area->pages - pages);
392
393 /*
394 * Remove frames belonging to used space starting from
395 * the highest addresses downwards until an overlap with
396 * the resized address space area is found. Note that this
397 * is also the right way to remove part of the used_space
398 * B+tree leaf list.
399 */
400 for (cond = true; cond;) {
401 btree_node_t *node;
402
403 ASSERT(!list_empty(&area->used_space.leaf_head));
404 node =
405 list_get_instance(area->used_space.leaf_head.prev,
406 btree_node_t, leaf_link);
407 if ((cond = (bool) node->keys)) {
408 uintptr_t b = node->key[node->keys - 1];
409 count_t c =
410 (count_t) node->value[node->keys - 1];
411 int i = 0;
412
413 if (overlaps(b, c * PAGE_SIZE, area->base,
414 pages * PAGE_SIZE)) {
415
416 if (b + c * PAGE_SIZE <= start_free) {
417 /*
418 * The whole interval fits
419 * completely in the resized
420 * address space area.
421 */
422 break;
423 }
424
425 /*
426 * Part of the interval corresponding
427 * to b and c overlaps with the resized
428 * address space area.
429 */
430
431 cond = false; /* we are almost done */
432 i = (start_free - b) >> PAGE_WIDTH;
433 if (!used_space_remove(area, start_free,
434 c - i))
435 panic("Could not remove used "
436 "space.\n");
437 } else {
438 /*
439 * The interval of used space can be
440 * completely removed.
441 */
442 if (!used_space_remove(area, b, c))
443 panic("Could not remove used "
444 "space.\n");
445 }
446
447 for (; i < c; i++) {
448 pte_t *pte;
449
450 page_table_lock(as, false);
451 pte = page_mapping_find(as, b +
452 i * PAGE_SIZE);
453 ASSERT(pte && PTE_VALID(pte) &&
454 PTE_PRESENT(pte));
455 if (area->backend &&
456 area->backend->frame_free) {
457 area->backend->frame_free(area,
458 b + i * PAGE_SIZE,
459 PTE_GET_FRAME(pte));
460 }
461 page_mapping_remove(as, b +
462 i * PAGE_SIZE);
463 page_table_unlock(as, false);
464 }
465 }
466 }
467
468 /*
469 * Finish TLB shootdown sequence.
470 */
471 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
472 area->pages - pages);
473 tlb_shootdown_finalize();
474
475 /*
476 * Invalidate software translation caches (e.g. TSB on sparc64).
477 */
478 as_invalidate_translation_cache(as, area->base +
479 pages * PAGE_SIZE, area->pages - pages);
480 } else {
481 /*
482 * Growing the area.
483 * Check for overlaps with other address space areas.
484 */
485 if (!check_area_conflicts(as, address, pages * PAGE_SIZE,
486 area)) {
487 mutex_unlock(&area->lock);
488 mutex_unlock(&as->lock);
489 interrupts_restore(ipl);
490 return EADDRNOTAVAIL;
491 }
492 }
493
494 area->pages = pages;
495
496 mutex_unlock(&area->lock);
497 mutex_unlock(&as->lock);
498 interrupts_restore(ipl);
499
500 return 0;
501}
502
503/** Destroy address space area.
504 *
505 * @param as Address space.
506 * @param address Address withing the area to be deleted.
507 *
508 * @return Zero on success or a value from @ref errno.h on failure.
509 */
510int as_area_destroy(as_t *as, uintptr_t address)
511{
512 as_area_t *area;
513 uintptr_t base;
514 link_t *cur;
515 ipl_t ipl;
516
517 ipl = interrupts_disable();
518 mutex_lock(&as->lock);
519
520 area = find_area_and_lock(as, address);
521 if (!area) {
522 mutex_unlock(&as->lock);
523 interrupts_restore(ipl);
524 return ENOENT;
525 }
526
527 base = area->base;
528
529 /*
530 * Start TLB shootdown sequence.
531 */
532 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
533
534 /*
535 * Visit only the pages mapped by used_space B+tree.
536 */
537 for (cur = area->used_space.leaf_head.next;
538 cur != &area->used_space.leaf_head; cur = cur->next) {
539 btree_node_t *node;
540 int i;
541
542 node = list_get_instance(cur, btree_node_t, leaf_link);
543 for (i = 0; i < node->keys; i++) {
544 uintptr_t b = node->key[i];
545 count_t j;
546 pte_t *pte;
547
548 for (j = 0; j < (count_t) node->value[i]; j++) {
549 page_table_lock(as, false);
550 pte = page_mapping_find(as, b + j * PAGE_SIZE);
551 ASSERT(pte && PTE_VALID(pte) &&
552 PTE_PRESENT(pte));
553 if (area->backend &&
554 area->backend->frame_free) {
555 area->backend->frame_free(area, b +
556 j * PAGE_SIZE, PTE_GET_FRAME(pte));
557 }
558 page_mapping_remove(as, b + j * PAGE_SIZE);
559 page_table_unlock(as, false);
560 }
561 }
562 }
563
564 /*
565 * Finish TLB shootdown sequence.
566 */
567 tlb_invalidate_pages(as->asid, area->base, area->pages);
568 tlb_shootdown_finalize();
569
570 /*
571 * Invalidate potential software translation caches (e.g. TSB on
572 * sparc64).
573 */
574 as_invalidate_translation_cache(as, area->base, area->pages);
575
576 btree_destroy(&area->used_space);
577
578 area->attributes |= AS_AREA_ATTR_PARTIAL;
579
580 if (area->sh_info)
581 sh_info_remove_reference(area->sh_info);
582
583 mutex_unlock(&area->lock);
584
585 /*
586 * Remove the empty area from address space.
587 */
588 btree_remove(&as->as_area_btree, base, NULL);
589
590 free(area);
591
592 mutex_unlock(&as->lock);
593 interrupts_restore(ipl);
594 return 0;
595}
596
597/** Share address space area with another or the same address space.
598 *
599 * Address space area mapping is shared with a new address space area.
600 * If the source address space area has not been shared so far,
601 * a new sh_info is created. The new address space area simply gets the
602 * sh_info of the source area. The process of duplicating the
603 * mapping is done through the backend share function.
604 *
605 * @param src_as Pointer to source address space.
606 * @param src_base Base address of the source address space area.
607 * @param acc_size Expected size of the source area.
608 * @param dst_as Pointer to destination address space.
609 * @param dst_base Target base address.
610 * @param dst_flags_mask Destination address space area flags mask.
611 *
612 * @return Zero on success or ENOENT if there is no such task or if there is no
613 * such address space area, EPERM if there was a problem in accepting the area
614 * or ENOMEM if there was a problem in allocating destination address space
615 * area. ENOTSUP is returned if the address space area backend does not support
616 * sharing.
617 */
618int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
619 as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
620{
621 ipl_t ipl;
622 int src_flags;
623 size_t src_size;
624 as_area_t *src_area, *dst_area;
625 share_info_t *sh_info;
626 mem_backend_t *src_backend;
627 mem_backend_data_t src_backend_data;
628
629 ipl = interrupts_disable();
630 mutex_lock(&src_as->lock);
631 src_area = find_area_and_lock(src_as, src_base);
632 if (!src_area) {
633 /*
634 * Could not find the source address space area.
635 */
636 mutex_unlock(&src_as->lock);
637 interrupts_restore(ipl);
638 return ENOENT;
639 }
640
641 if (!src_area->backend || !src_area->backend->share) {
642 /*
643 * There is no backend or the backend does not
644 * know how to share the area.
645 */
646 mutex_unlock(&src_area->lock);
647 mutex_unlock(&src_as->lock);
648 interrupts_restore(ipl);
649 return ENOTSUP;
650 }
651
652 src_size = src_area->pages * PAGE_SIZE;
653 src_flags = src_area->flags;
654 src_backend = src_area->backend;
655 src_backend_data = src_area->backend_data;
656
657 /* Share the cacheable flag from the original mapping */
658 if (src_flags & AS_AREA_CACHEABLE)
659 dst_flags_mask |= AS_AREA_CACHEABLE;
660
661 if (src_size != acc_size ||
662 (src_flags & dst_flags_mask) != dst_flags_mask) {
663 mutex_unlock(&src_area->lock);
664 mutex_unlock(&src_as->lock);
665 interrupts_restore(ipl);
666 return EPERM;
667 }
668
669 /*
670 * Now we are committed to sharing the area.
671 * First, prepare the area for sharing.
672 * Then it will be safe to unlock it.
673 */
674 sh_info = src_area->sh_info;
675 if (!sh_info) {
676 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
677 mutex_initialize(&sh_info->lock);
678 sh_info->refcount = 2;
679 btree_create(&sh_info->pagemap);
680 src_area->sh_info = sh_info;
681 } else {
682 mutex_lock(&sh_info->lock);
683 sh_info->refcount++;
684 mutex_unlock(&sh_info->lock);
685 }
686
687 src_area->backend->share(src_area);
688
689 mutex_unlock(&src_area->lock);
690 mutex_unlock(&src_as->lock);
691
692 /*
693 * Create copy of the source address space area.
694 * The destination area is created with AS_AREA_ATTR_PARTIAL
695 * attribute set which prevents race condition with
696 * preliminary as_page_fault() calls.
697 * The flags of the source area are masked against dst_flags_mask
698 * to support sharing in less privileged mode.
699 */
700 dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
701 AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
702 if (!dst_area) {
703 /*
704 * Destination address space area could not be created.
705 */
706 sh_info_remove_reference(sh_info);
707
708 interrupts_restore(ipl);
709 return ENOMEM;
710 }
711
712 /*
713 * Now the destination address space area has been
714 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
715 * attribute and set the sh_info.
716 */
717 mutex_lock(&dst_as->lock);
718 mutex_lock(&dst_area->lock);
719 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
720 dst_area->sh_info = sh_info;
721 mutex_unlock(&dst_area->lock);
722 mutex_unlock(&dst_as->lock);
723
724 interrupts_restore(ipl);
725
726 return 0;
727}
728
729/** Check access mode for address space area.
730 *
731 * The address space area must be locked prior to this call.
732 *
733 * @param area Address space area.
734 * @param access Access mode.
735 *
736 * @return False if access violates area's permissions, true otherwise.
737 */
738bool as_area_check_access(as_area_t *area, pf_access_t access)
739{
740 int flagmap[] = {
741 [PF_ACCESS_READ] = AS_AREA_READ,
742 [PF_ACCESS_WRITE] = AS_AREA_WRITE,
743 [PF_ACCESS_EXEC] = AS_AREA_EXEC
744 };
745
746 if (!(area->flags & flagmap[access]))
747 return false;
748
749 return true;
750}
751
752/** Handle page fault within the current address space.
753 *
754 * This is the high-level page fault handler. It decides
755 * whether the page fault can be resolved by any backend
756 * and if so, it invokes the backend to resolve the page
757 * fault.
758 *
759 * Interrupts are assumed disabled.
760 *
761 * @param page Faulting page.
762 * @param access Access mode that caused the fault (i.e. read/write/exec).
763 * @param istate Pointer to interrupted state.
764 *
765 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
766 * fault was caused by copy_to_uspace() or copy_from_uspace().
767 */
768int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
769{
770 pte_t *pte;
771 as_area_t *area;
772
773 if (!THREAD)
774 return AS_PF_FAULT;
775
776 ASSERT(AS);
777
778 mutex_lock(&AS->lock);
779 area = find_area_and_lock(AS, page);
780 if (!area) {
781 /*
782 * No area contained mapping for 'page'.
783 * Signal page fault to low-level handler.
784 */
785 mutex_unlock(&AS->lock);
786 goto page_fault;
787 }
788
789 if (area->attributes & AS_AREA_ATTR_PARTIAL) {
790 /*
791 * The address space area is not fully initialized.
792 * Avoid possible race by returning error.
793 */
794 mutex_unlock(&area->lock);
795 mutex_unlock(&AS->lock);
796 goto page_fault;
797 }
798
799 if (!area->backend || !area->backend->page_fault) {
800 /*
801 * The address space area is not backed by any backend
802 * or the backend cannot handle page faults.
803 */
804 mutex_unlock(&area->lock);
805 mutex_unlock(&AS->lock);
806 goto page_fault;
807 }
808
809 page_table_lock(AS, false);
810
811 /*
812 * To avoid race condition between two page faults
813 * on the same address, we need to make sure
814 * the mapping has not been already inserted.
815 */
816 if ((pte = page_mapping_find(AS, page))) {
817 if (PTE_PRESENT(pte)) {
818 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
819 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
820 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
821 page_table_unlock(AS, false);
822 mutex_unlock(&area->lock);
823 mutex_unlock(&AS->lock);
824 return AS_PF_OK;
825 }
826 }
827 }
828
829 /*
830 * Resort to the backend page fault handler.
831 */
832 if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
833 page_table_unlock(AS, false);
834 mutex_unlock(&area->lock);
835 mutex_unlock(&AS->lock);
836 goto page_fault;
837 }
838
839 page_table_unlock(AS, false);
840 mutex_unlock(&area->lock);
841 mutex_unlock(&AS->lock);
842 return AS_PF_OK;
843
844page_fault:
845 if (THREAD->in_copy_from_uspace) {
846 THREAD->in_copy_from_uspace = false;
847 istate_set_retaddr(istate,
848 (uintptr_t) &memcpy_from_uspace_failover_address);
849 } else if (THREAD->in_copy_to_uspace) {
850 THREAD->in_copy_to_uspace = false;
851 istate_set_retaddr(istate,
852 (uintptr_t) &memcpy_to_uspace_failover_address);
853 } else {
854 return AS_PF_FAULT;
855 }
856
857 return AS_PF_DEFER;
858}
859
860/** Switch address spaces.
861 *
862 * Note that this function cannot sleep as it is essentially a part of
863 * scheduling. Sleeping here would lead to deadlock on wakeup.
864 *
865 * @param old Old address space or NULL.
866 * @param new New address space.
867 */
868void as_switch(as_t *old_as, as_t *new_as)
869{
870 ipl_t ipl;
871 bool needs_asid = false;
872
873 ipl = interrupts_disable();
874 spinlock_lock(&inactive_as_with_asid_lock);
875
876 /*
877 * First, take care of the old address space.
878 */
879 if (old_as) {
880 mutex_lock_active(&old_as->lock);
881 ASSERT(old_as->cpu_refcount);
882 if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
883 /*
884 * The old address space is no longer active on
885 * any processor. It can be appended to the
886 * list of inactive address spaces with assigned
887 * ASID.
888 */
889 ASSERT(old_as->asid != ASID_INVALID);
890 list_append(&old_as->inactive_as_with_asid_link,
891 &inactive_as_with_asid_head);
892 }
893 mutex_unlock(&old_as->lock);
894
895 /*
896 * Perform architecture-specific tasks when the address space
897 * is being removed from the CPU.
898 */
899 as_deinstall_arch(old_as);
900 }
901
902 /*
903 * Second, prepare the new address space.
904 */
905 mutex_lock_active(&new_as->lock);
906 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
907 if (new_as->asid != ASID_INVALID) {
908 list_remove(&new_as->inactive_as_with_asid_link);
909 } else {
910 /*
911 * Defer call to asid_get() until new_as->lock is released.
912 */
913 needs_asid = true;
914 }
915 }
916#ifdef AS_PAGE_TABLE
917 SET_PTL0_ADDRESS(new_as->genarch.page_table);
918#endif
919 mutex_unlock(&new_as->lock);
920
921 if (needs_asid) {
922 /*
923 * Allocation of new ASID was deferred
924 * until now in order to avoid deadlock.
925 */
926 asid_t asid;
927
928 asid = asid_get();
929 mutex_lock_active(&new_as->lock);
930 new_as->asid = asid;
931 mutex_unlock(&new_as->lock);
932 }
933 spinlock_unlock(&inactive_as_with_asid_lock);
934 interrupts_restore(ipl);
935
936 /*
937 * Perform architecture-specific steps.
938 * (e.g. write ASID to hardware register etc.)
939 */
940 as_install_arch(new_as);
941
942 AS = new_as;
943}
944
945/** Convert address space area flags to page flags.
946 *
947 * @param aflags Flags of some address space area.
948 *
949 * @return Flags to be passed to page_mapping_insert().
950 */
951int area_flags_to_page_flags(int aflags)
952{
953 int flags;
954
955 flags = PAGE_USER | PAGE_PRESENT;
956
957 if (aflags & AS_AREA_READ)
958 flags |= PAGE_READ;
959
960 if (aflags & AS_AREA_WRITE)
961 flags |= PAGE_WRITE;
962
963 if (aflags & AS_AREA_EXEC)
964 flags |= PAGE_EXEC;
965
966 if (aflags & AS_AREA_CACHEABLE)
967 flags |= PAGE_CACHEABLE;
968
969 return flags;
970}
971
972/** Compute flags for virtual address translation subsytem.
973 *
974 * The address space area must be locked.
975 * Interrupts must be disabled.
976 *
977 * @param a Address space area.
978 *
979 * @return Flags to be used in page_mapping_insert().
980 */
981int as_area_get_flags(as_area_t *a)
982{
983 return area_flags_to_page_flags(a->flags);
984}
985
986/** Create page table.
987 *
988 * Depending on architecture, create either address space
989 * private or global page table.
990 *
991 * @param flags Flags saying whether the page table is for kernel address space.
992 *
993 * @return First entry of the page table.
994 */
995pte_t *page_table_create(int flags)
996{
997#ifdef __OBJC__
998 return [as_t page_table_create: flags];
999#else
1000 ASSERT(as_operations);
1001 ASSERT(as_operations->page_table_create);
1002
1003 return as_operations->page_table_create(flags);
1004#endif
1005}
1006
1007/** Destroy page table.
1008 *
1009 * Destroy page table in architecture specific way.
1010 *
1011 * @param page_table Physical address of PTL0.
1012 */
1013void page_table_destroy(pte_t *page_table)
1014{
1015#ifdef __OBJC__
1016 return [as_t page_table_destroy: page_table];
1017#else
1018 ASSERT(as_operations);
1019 ASSERT(as_operations->page_table_destroy);
1020
1021 as_operations->page_table_destroy(page_table);
1022#endif
1023}
1024
1025/** Lock page table.
1026 *
1027 * This function should be called before any page_mapping_insert(),
1028 * page_mapping_remove() and page_mapping_find().
1029 *
1030 * Locking order is such that address space areas must be locked
1031 * prior to this call. Address space can be locked prior to this
1032 * call in which case the lock argument is false.
1033 *
1034 * @param as Address space.
1035 * @param lock If false, do not attempt to lock as->lock.
1036 */
1037void page_table_lock(as_t *as, bool lock)
1038{
1039#ifdef __OBJC__
1040 [as page_table_lock: lock];
1041#else
1042 ASSERT(as_operations);
1043 ASSERT(as_operations->page_table_lock);
1044
1045 as_operations->page_table_lock(as, lock);
1046#endif
1047}
1048
1049/** Unlock page table.
1050 *
1051 * @param as Address space.
1052 * @param unlock If false, do not attempt to unlock as->lock.
1053 */
1054void page_table_unlock(as_t *as, bool unlock)
1055{
1056#ifdef __OBJC__
1057 [as page_table_unlock: unlock];
1058#else
1059 ASSERT(as_operations);
1060 ASSERT(as_operations->page_table_unlock);
1061
1062 as_operations->page_table_unlock(as, unlock);
1063#endif
1064}
1065
1066
1067/** Find address space area and lock it.
1068 *
1069 * The address space must be locked and interrupts must be disabled.
1070 *
1071 * @param as Address space.
1072 * @param va Virtual address.
1073 *
1074 * @return Locked address space area containing va on success or NULL on
1075 * failure.
1076 */
1077as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
1078{
1079 as_area_t *a;
1080 btree_node_t *leaf, *lnode;
1081 int i;
1082
1083 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
1084 if (a) {
1085 /* va is the base address of an address space area */
1086 mutex_lock(&a->lock);
1087 return a;
1088 }
1089
1090 /*
1091 * Search the leaf node and the righmost record of its left neighbour
1092 * to find out whether this is a miss or va belongs to an address
1093 * space area found there.
1094 */
1095
1096 /* First, search the leaf node itself. */
1097 for (i = 0; i < leaf->keys; i++) {
1098 a = (as_area_t *) leaf->value[i];
1099 mutex_lock(&a->lock);
1100 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
1101 return a;
1102 }
1103 mutex_unlock(&a->lock);
1104 }
1105
1106 /*
1107 * Second, locate the left neighbour and test its last record.
1108 * Because of its position in the B+tree, it must have base < va.
1109 */
1110 lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
1111 if (lnode) {
1112 a = (as_area_t *) lnode->value[lnode->keys - 1];
1113 mutex_lock(&a->lock);
1114 if (va < a->base + a->pages * PAGE_SIZE) {
1115 return a;
1116 }
1117 mutex_unlock(&a->lock);
1118 }
1119
1120 return NULL;
1121}
1122
1123/** Check area conflicts with other areas.
1124 *
1125 * The address space must be locked and interrupts must be disabled.
1126 *
1127 * @param as Address space.
1128 * @param va Starting virtual address of the area being tested.
1129 * @param size Size of the area being tested.
1130 * @param avoid_area Do not touch this area.
1131 *
1132 * @return True if there is no conflict, false otherwise.
1133 */
1134bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
1135 as_area_t *avoid_area)
1136{
1137 as_area_t *a;
1138 btree_node_t *leaf, *node;
1139 int i;
1140
1141 /*
1142 * We don't want any area to have conflicts with NULL page.
1143 */
1144 if (overlaps(va, size, NULL, PAGE_SIZE))
1145 return false;
1146
1147 /*
1148 * The leaf node is found in O(log n), where n is proportional to
1149 * the number of address space areas belonging to as.
1150 * The check for conflicts is then attempted on the rightmost
1151 * record in the left neighbour, the leftmost record in the right
1152 * neighbour and all records in the leaf node itself.
1153 */
1154
1155 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1156 if (a != avoid_area)
1157 return false;
1158 }
1159
1160 /* First, check the two border cases. */
1161 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1162 a = (as_area_t *) node->value[node->keys - 1];
1163 mutex_lock(&a->lock);
1164 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1165 mutex_unlock(&a->lock);
1166 return false;
1167 }
1168 mutex_unlock(&a->lock);
1169 }
1170 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
1171 if (node) {
1172 a = (as_area_t *) node->value[0];
1173 mutex_lock(&a->lock);
1174 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1175 mutex_unlock(&a->lock);
1176 return false;
1177 }
1178 mutex_unlock(&a->lock);
1179 }
1180
1181 /* Second, check the leaf node. */
1182 for (i = 0; i < leaf->keys; i++) {
1183 a = (as_area_t *) leaf->value[i];
1184
1185 if (a == avoid_area)
1186 continue;
1187
1188 mutex_lock(&a->lock);
1189 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1190 mutex_unlock(&a->lock);
1191 return false;
1192 }
1193 mutex_unlock(&a->lock);
1194 }
1195
1196 /*
1197 * So far, the area does not conflict with other areas.
1198 * Check if it doesn't conflict with kernel address space.
1199 */
1200 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1201 return !overlaps(va, size,
1202 KERNEL_ADDRESS_SPACE_START,
1203 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);
1204 }
1205
1206 return true;
1207}
1208
1209/** Return size of the address space area with given base. */
1210size_t as_get_size(uintptr_t base)
1211{
1212 ipl_t ipl;
1213 as_area_t *src_area;
1214 size_t size;
1215
1216 ipl = interrupts_disable();
1217 src_area = find_area_and_lock(AS, base);
1218 if (src_area){
1219 size = src_area->pages * PAGE_SIZE;
1220 mutex_unlock(&src_area->lock);
1221 } else {
1222 size = 0;
1223 }
1224 interrupts_restore(ipl);
1225 return size;
1226}
1227
1228/** Mark portion of address space area as used.
1229 *
1230 * The address space area must be already locked.
1231 *
1232 * @param a Address space area.
1233 * @param page First page to be marked.
1234 * @param count Number of page to be marked.
1235 *
1236 * @return 0 on failure and 1 on success.
1237 */
1238int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
1239{
1240 btree_node_t *leaf, *node;
1241 count_t pages;
1242 int i;
1243
1244 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1245 ASSERT(count);
1246
1247 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1248 if (pages) {
1249 /*
1250 * We hit the beginning of some used space.
1251 */
1252 return 0;
1253 }
1254
1255 if (!leaf->keys) {
1256 btree_insert(&a->used_space, page, (void *) count, leaf);
1257 return 1;
1258 }
1259
1260 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1261 if (node) {
1262 uintptr_t left_pg = node->key[node->keys - 1];
1263 uintptr_t right_pg = leaf->key[0];
1264 count_t left_cnt = (count_t) node->value[node->keys - 1];
1265 count_t right_cnt = (count_t) leaf->value[0];
1266
1267 /*
1268 * Examine the possibility that the interval fits
1269 * somewhere between the rightmost interval of
1270 * the left neigbour and the first interval of the leaf.
1271 */
1272
1273 if (page >= right_pg) {
1274 /* Do nothing. */
1275 } else if (overlaps(page, count * PAGE_SIZE, left_pg,
1276 left_cnt * PAGE_SIZE)) {
1277 /* The interval intersects with the left interval. */
1278 return 0;
1279 } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1280 right_cnt * PAGE_SIZE)) {
1281 /* The interval intersects with the right interval. */
1282 return 0;
1283 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1284 (page + count * PAGE_SIZE == right_pg)) {
1285 /*
1286 * The interval can be added by merging the two already
1287 * present intervals.
1288 */
1289 node->value[node->keys - 1] += count + right_cnt;
1290 btree_remove(&a->used_space, right_pg, leaf);
1291 return 1;
1292 } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1293 /*
1294 * The interval can be added by simply growing the left
1295 * interval.
1296 */
1297 node->value[node->keys - 1] += count;
1298 return 1;
1299 } else if (page + count * PAGE_SIZE == right_pg) {
1300 /*
1301 * The interval can be addded by simply moving base of
1302 * the right interval down and increasing its size
1303 * accordingly.
1304 */
1305 leaf->value[0] += count;
1306 leaf->key[0] = page;
1307 return 1;
1308 } else {
1309 /*
1310 * The interval is between both neigbouring intervals,
1311 * but cannot be merged with any of them.
1312 */
1313 btree_insert(&a->used_space, page, (void *) count,
1314 leaf);
1315 return 1;
1316 }
1317 } else if (page < leaf->key[0]) {
1318 uintptr_t right_pg = leaf->key[0];
1319 count_t right_cnt = (count_t) leaf->value[0];
1320
1321 /*
1322 * Investigate the border case in which the left neighbour does
1323 * not exist but the interval fits from the left.
1324 */
1325
1326 if (overlaps(page, count * PAGE_SIZE, right_pg,
1327 right_cnt * PAGE_SIZE)) {
1328 /* The interval intersects with the right interval. */
1329 return 0;
1330 } else if (page + count * PAGE_SIZE == right_pg) {
1331 /*
1332 * The interval can be added by moving the base of the
1333 * right interval down and increasing its size
1334 * accordingly.
1335 */
1336 leaf->key[0] = page;
1337 leaf->value[0] += count;
1338 return 1;
1339 } else {
1340 /*
1341 * The interval doesn't adjoin with the right interval.
1342 * It must be added individually.
1343 */
1344 btree_insert(&a->used_space, page, (void *) count,
1345 leaf);
1346 return 1;
1347 }
1348 }
1349
1350 node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1351 if (node) {
1352 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1353 uintptr_t right_pg = node->key[0];
1354 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1355 count_t right_cnt = (count_t) node->value[0];
1356
1357 /*
1358 * Examine the possibility that the interval fits
1359 * somewhere between the leftmost interval of
1360 * the right neigbour and the last interval of the leaf.
1361 */
1362
1363 if (page < left_pg) {
1364 /* Do nothing. */
1365 } else if (overlaps(page, count * PAGE_SIZE, left_pg,
1366 left_cnt * PAGE_SIZE)) {
1367 /* The interval intersects with the left interval. */
1368 return 0;
1369 } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1370 right_cnt * PAGE_SIZE)) {
1371 /* The interval intersects with the right interval. */
1372 return 0;
1373 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1374 (page + count * PAGE_SIZE == right_pg)) {
1375 /*
1376 * The interval can be added by merging the two already
1377 * present intervals.
1378 * */
1379 leaf->value[leaf->keys - 1] += count + right_cnt;
1380 btree_remove(&a->used_space, right_pg, node);
1381 return 1;
1382 } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1383 /*
1384 * The interval can be added by simply growing the left
1385 * interval.
1386 * */
1387 leaf->value[leaf->keys - 1] += count;
1388 return 1;
1389 } else if (page + count * PAGE_SIZE == right_pg) {
1390 /*
1391 * The interval can be addded by simply moving base of
1392 * the right interval down and increasing its size
1393 * accordingly.
1394 */
1395 node->value[0] += count;
1396 node->key[0] = page;
1397 return 1;
1398 } else {
1399 /*
1400 * The interval is between both neigbouring intervals,
1401 * but cannot be merged with any of them.
1402 */
1403 btree_insert(&a->used_space, page, (void *) count,
1404 leaf);
1405 return 1;
1406 }
1407 } else if (page >= leaf->key[leaf->keys - 1]) {
1408 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1409 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1410
1411 /*
1412 * Investigate the border case in which the right neighbour
1413 * does not exist but the interval fits from the right.
1414 */
1415
1416 if (overlaps(page, count * PAGE_SIZE, left_pg,
1417 left_cnt * PAGE_SIZE)) {
1418 /* The interval intersects with the left interval. */
1419 return 0;
1420 } else if (left_pg + left_cnt * PAGE_SIZE == page) {
1421 /*
1422 * The interval can be added by growing the left
1423 * interval.
1424 */
1425 leaf->value[leaf->keys - 1] += count;
1426 return 1;
1427 } else {
1428 /*
1429 * The interval doesn't adjoin with the left interval.
1430 * It must be added individually.
1431 */
1432 btree_insert(&a->used_space, page, (void *) count,
1433 leaf);
1434 return 1;
1435 }
1436 }
1437
1438 /*
1439 * Note that if the algorithm made it thus far, the interval can fit
1440 * only between two other intervals of the leaf. The two border cases
1441 * were already resolved.
1442 */
1443 for (i = 1; i < leaf->keys; i++) {
1444 if (page < leaf->key[i]) {
1445 uintptr_t left_pg = leaf->key[i - 1];
1446 uintptr_t right_pg = leaf->key[i];
1447 count_t left_cnt = (count_t) leaf->value[i - 1];
1448 count_t right_cnt = (count_t) leaf->value[i];
1449
1450 /*
1451 * The interval fits between left_pg and right_pg.
1452 */
1453
1454 if (overlaps(page, count * PAGE_SIZE, left_pg,
1455 left_cnt * PAGE_SIZE)) {
1456 /*
1457 * The interval intersects with the left
1458 * interval.
1459 */
1460 return 0;
1461 } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1462 right_cnt * PAGE_SIZE)) {
1463 /*
1464 * The interval intersects with the right
1465 * interval.
1466 */
1467 return 0;
1468 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1469 (page + count * PAGE_SIZE == right_pg)) {
1470 /*
1471 * The interval can be added by merging the two
1472 * already present intervals.
1473 */
1474 leaf->value[i - 1] += count + right_cnt;
1475 btree_remove(&a->used_space, right_pg, leaf);
1476 return 1;
1477 } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1478 /*
1479 * The interval can be added by simply growing
1480 * the left interval.
1481 */
1482 leaf->value[i - 1] += count;
1483 return 1;
1484 } else if (page + count * PAGE_SIZE == right_pg) {
1485 /*
1486 * The interval can be addded by simply moving
1487 * base of the right interval down and
1488 * increasing its size accordingly.
1489 */
1490 leaf->value[i] += count;
1491 leaf->key[i] = page;
1492 return 1;
1493 } else {
1494 /*
1495 * The interval is between both neigbouring
1496 * intervals, but cannot be merged with any of
1497 * them.
1498 */
1499 btree_insert(&a->used_space, page,
1500 (void *) count, leaf);
1501 return 1;
1502 }
1503 }
1504 }
1505
1506 panic("Inconsistency detected while adding %d pages of used space at "
1507 "%p.\n", count, page);
1508}
1509
1510/** Mark portion of address space area as unused.
1511 *
1512 * The address space area must be already locked.
1513 *
1514 * @param a Address space area.
1515 * @param page First page to be marked.
1516 * @param count Number of page to be marked.
1517 *
1518 * @return 0 on failure and 1 on success.
1519 */
1520int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
1521{
1522 btree_node_t *leaf, *node;
1523 count_t pages;
1524 int i;
1525
1526 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1527 ASSERT(count);
1528
1529 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1530 if (pages) {
1531 /*
1532 * We are lucky, page is the beginning of some interval.
1533 */
1534 if (count > pages) {
1535 return 0;
1536 } else if (count == pages) {
1537 btree_remove(&a->used_space, page, leaf);
1538 return 1;
1539 } else {
1540 /*
1541 * Find the respective interval.
1542 * Decrease its size and relocate its start address.
1543 */
1544 for (i = 0; i < leaf->keys; i++) {
1545 if (leaf->key[i] == page) {
1546 leaf->key[i] += count * PAGE_SIZE;
1547 leaf->value[i] -= count;
1548 return 1;
1549 }
1550 }
1551 goto error;
1552 }
1553 }
1554
1555 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1556 if (node && page < leaf->key[0]) {
1557 uintptr_t left_pg = node->key[node->keys - 1];
1558 count_t left_cnt = (count_t) node->value[node->keys - 1];
1559
1560 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1561 count * PAGE_SIZE)) {
1562 if (page + count * PAGE_SIZE ==
1563 left_pg + left_cnt * PAGE_SIZE) {
1564 /*
1565 * The interval is contained in the rightmost
1566 * interval of the left neighbour and can be
1567 * removed by updating the size of the bigger
1568 * interval.
1569 */
1570 node->value[node->keys - 1] -= count;
1571 return 1;
1572 } else if (page + count * PAGE_SIZE <
1573 left_pg + left_cnt*PAGE_SIZE) {
1574 count_t new_cnt;
1575
1576 /*
1577 * The interval is contained in the rightmost
1578 * interval of the left neighbour but its
1579 * removal requires both updating the size of
1580 * the original interval and also inserting a
1581 * new interval.
1582 */
1583 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1584 (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1585 node->value[node->keys - 1] -= count + new_cnt;
1586 btree_insert(&a->used_space, page +
1587 count * PAGE_SIZE, (void *) new_cnt, leaf);
1588 return 1;
1589 }
1590 }
1591 return 0;
1592 } else if (page < leaf->key[0]) {
1593 return 0;
1594 }
1595
1596 if (page > leaf->key[leaf->keys - 1]) {
1597 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1598 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1599
1600 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1601 count * PAGE_SIZE)) {
1602 if (page + count * PAGE_SIZE ==
1603 left_pg + left_cnt * PAGE_SIZE) {
1604 /*
1605 * The interval is contained in the rightmost
1606 * interval of the leaf and can be removed by
1607 * updating the size of the bigger interval.
1608 */
1609 leaf->value[leaf->keys - 1] -= count;
1610 return 1;
1611 } else if (page + count * PAGE_SIZE < left_pg +
1612 left_cnt * PAGE_SIZE) {
1613 count_t new_cnt;
1614
1615 /*
1616 * The interval is contained in the rightmost
1617 * interval of the leaf but its removal
1618 * requires both updating the size of the
1619 * original interval and also inserting a new
1620 * interval.
1621 */
1622 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1623 (page + count * PAGE_SIZE)) >> PAGE_WIDTH;
1624 leaf->value[leaf->keys - 1] -= count + new_cnt;
1625 btree_insert(&a->used_space, page +
1626 count * PAGE_SIZE, (void *) new_cnt, leaf);
1627 return 1;
1628 }
1629 }
1630 return 0;
1631 }
1632
1633 /*
1634 * The border cases have been already resolved.
1635 * Now the interval can be only between intervals of the leaf.
1636 */
1637 for (i = 1; i < leaf->keys - 1; i++) {
1638 if (page < leaf->key[i]) {
1639 uintptr_t left_pg = leaf->key[i - 1];
1640 count_t left_cnt = (count_t) leaf->value[i - 1];
1641
1642 /*
1643 * Now the interval is between intervals corresponding
1644 * to (i - 1) and i.
1645 */
1646 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1647 count * PAGE_SIZE)) {
1648 if (page + count * PAGE_SIZE ==
1649 left_pg + left_cnt*PAGE_SIZE) {
1650 /*
1651 * The interval is contained in the
1652 * interval (i - 1) of the leaf and can
1653 * be removed by updating the size of
1654 * the bigger interval.
1655 */
1656 leaf->value[i - 1] -= count;
1657 return 1;
1658 } else if (page + count * PAGE_SIZE <
1659 left_pg + left_cnt * PAGE_SIZE) {
1660 count_t new_cnt;
1661
1662 /*
1663 * The interval is contained in the
1664 * interval (i - 1) of the leaf but its
1665 * removal requires both updating the
1666 * size of the original interval and
1667 * also inserting a new interval.
1668 */
1669 new_cnt = ((left_pg +
1670 left_cnt * PAGE_SIZE) -
1671 (page + count * PAGE_SIZE)) >>
1672 PAGE_WIDTH;
1673 leaf->value[i - 1] -= count + new_cnt;
1674 btree_insert(&a->used_space, page +
1675 count * PAGE_SIZE, (void *) new_cnt,
1676 leaf);
1677 return 1;
1678 }
1679 }
1680 return 0;
1681 }
1682 }
1683
1684error:
1685 panic("Inconsistency detected while removing %d pages of used space "
1686 "from %p.\n", count, page);
1687}
1688
1689/** Remove reference to address space area share info.
1690 *
1691 * If the reference count drops to 0, the sh_info is deallocated.
1692 *
1693 * @param sh_info Pointer to address space area share info.
1694 */
1695void sh_info_remove_reference(share_info_t *sh_info)
1696{
1697 bool dealloc = false;
1698
1699 mutex_lock(&sh_info->lock);
1700 ASSERT(sh_info->refcount);
1701 if (--sh_info->refcount == 0) {
1702 dealloc = true;
1703 link_t *cur;
1704
1705 /*
1706 * Now walk carefully the pagemap B+tree and free/remove
1707 * reference from all frames found there.
1708 */
1709 for (cur = sh_info->pagemap.leaf_head.next;
1710 cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
1711 btree_node_t *node;
1712 int i;
1713
1714 node = list_get_instance(cur, btree_node_t, leaf_link);
1715 for (i = 0; i < node->keys; i++)
1716 frame_free((uintptr_t) node->value[i]);
1717 }
1718
1719 }
1720 mutex_unlock(&sh_info->lock);
1721
1722 if (dealloc) {
1723 btree_destroy(&sh_info->pagemap);
1724 free(sh_info);
1725 }
1726}
1727
1728/*
1729 * Address space related syscalls.
1730 */
1731
1732/** Wrapper for as_area_create(). */
1733unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
1734{
1735 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,
1736 AS_AREA_ATTR_NONE, &anon_backend, NULL))
1737 return (unative_t) address;
1738 else
1739 return (unative_t) -1;
1740}
1741
1742/** Wrapper for as_area_resize(). */
1743unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
1744{
1745 return (unative_t) as_area_resize(AS, address, size, 0);
1746}
1747
1748/** Wrapper for as_area_destroy(). */
1749unative_t sys_as_area_destroy(uintptr_t address)
1750{
1751 return (unative_t) as_area_destroy(AS, address);
1752}
1753
1754/** Print out information about address space.
1755 *
1756 * @param as Address space.
1757 */
1758void as_print(as_t *as)
1759{
1760 ipl_t ipl;
1761
1762 ipl = interrupts_disable();
1763 mutex_lock(&as->lock);
1764
1765 /* print out info about address space areas */
1766 link_t *cur;
1767 for (cur = as->as_area_btree.leaf_head.next;
1768 cur != &as->as_area_btree.leaf_head; cur = cur->next) {
1769 btree_node_t *node;
1770
1771 node = list_get_instance(cur, btree_node_t, leaf_link);
1772
1773 int i;
1774 for (i = 0; i < node->keys; i++) {
1775 as_area_t *area = node->value[i];
1776
1777 mutex_lock(&area->lock);
1778 printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
1779 area, area->base, area->pages, area->base,
1780 area->base + area->pages*PAGE_SIZE);
1781 mutex_unlock(&area->lock);
1782 }
1783 }
1784
1785 mutex_unlock(&as->lock);
1786 interrupts_restore(ipl);
1787}
1788
1789/** @}
1790 */
Note: See TracBrowser for help on using the repository browser.