source: mainline/kernel/generic/src/mm/as.c@ 92778f2

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 92778f2 was 92778f2, checked in by Jakub Jermar <jakub@…>, 19 years ago

Initial support for handling illegal virtual aliases on sparc64.

  • Property mode set to 100644
File size: 44.3 KB
Line 
1/*
2 * Copyright (C) 2001-2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Address space related functions.
36 *
37 * This file contains address space manipulation functions.
38 * Roughly speaking, this is a higher-level client of
39 * Virtual Address Translation (VAT) subsystem.
40 *
41 * Functionality provided by this file allows one to
42 * create address spaces and create, resize and share
43 * address space areas.
44 *
45 * @see page.c
46 *
47 */
48
49#include <mm/as.h>
50#include <arch/mm/as.h>
51#include <mm/page.h>
52#include <mm/frame.h>
53#include <mm/slab.h>
54#include <mm/tlb.h>
55#include <arch/mm/page.h>
56#include <genarch/mm/page_pt.h>
57#include <genarch/mm/page_ht.h>
58#include <mm/asid.h>
59#include <arch/mm/asid.h>
60#include <synch/spinlock.h>
61#include <synch/mutex.h>
62#include <adt/list.h>
63#include <adt/btree.h>
64#include <proc/task.h>
65#include <proc/thread.h>
66#include <arch/asm.h>
67#include <panic.h>
68#include <debug.h>
69#include <print.h>
70#include <memstr.h>
71#include <macros.h>
72#include <arch.h>
73#include <errno.h>
74#include <config.h>
75#include <align.h>
76#include <arch/types.h>
77#include <typedefs.h>
78#include <syscall/copy.h>
79#include <arch/interrupt.h>
80
81#ifdef CONFIG_VIRT_IDX_DCACHE
82#include <arch/mm/cache.h>
83#endif /* CONFIG_VIRT_IDX_DCACHE */
84
85/**
86 * Each architecture decides what functions will be used to carry out
87 * address space operations such as creating or locking page tables.
88 */
89as_operations_t *as_operations = NULL;
90
91/**
92 * Slab for as_t objects.
93 */
94static slab_cache_t *as_slab;
95
96/** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */
97SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
98
99/**
100 * This list contains address spaces that are not active on any
101 * processor and that have valid ASID.
102 */
103LIST_INITIALIZE(inactive_as_with_asid_head);
104
105/** Kernel address space. */
106as_t *AS_KERNEL = NULL;
107
108static int area_flags_to_page_flags(int aflags);
109static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
110static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area);
111static void sh_info_remove_reference(share_info_t *sh_info);
112
113static int as_constructor(void *obj, int flags)
114{
115 as_t *as = (as_t *) obj;
116 int rc;
117
118 link_initialize(&as->inactive_as_with_asid_link);
119 mutex_initialize(&as->lock);
120
121 rc = as_constructor_arch(as, flags);
122
123 return rc;
124}
125
126static int as_destructor(void *obj)
127{
128 as_t *as = (as_t *) obj;
129
130 return as_destructor_arch(as);
131}
132
133/** Initialize address space subsystem. */
134void as_init(void)
135{
136 as_arch_init();
137
138 as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
139 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
140
141 AS_KERNEL = as_create(FLAG_AS_KERNEL);
142 if (!AS_KERNEL)
143 panic("can't create kernel address space\n");
144
145}
146
147/** Create address space.
148 *
149 * @param flags Flags that influence way in wich the address space is created.
150 */
151as_t *as_create(int flags)
152{
153 as_t *as;
154
155 as = (as_t *) slab_alloc(as_slab, 0);
156 (void) as_create_arch(as, 0);
157
158 btree_create(&as->as_area_btree);
159
160 if (flags & FLAG_AS_KERNEL)
161 as->asid = ASID_KERNEL;
162 else
163 as->asid = ASID_INVALID;
164
165 as->refcount = 0;
166 as->cpu_refcount = 0;
167 as->page_table = page_table_create(flags);
168
169#ifdef CONFIG_VIRT_IDX_DCACHE
170 as->dcache_flush_on_install = false;
171 as->dcache_flush_on_deinstall = false;
172#endif /* CONFIG_VIRT_IDX_DCACHE */
173
174 return as;
175}
176
177/** Destroy adress space.
178 *
179 * When there are no tasks referencing this address space (i.e. its refcount is zero),
180 * the address space can be destroyed.
181 */
182void as_destroy(as_t *as)
183{
184 ipl_t ipl;
185 bool cond;
186
187 ASSERT(as->refcount == 0);
188
189 /*
190 * Since there is no reference to this area,
191 * it is safe not to lock its mutex.
192 */
193 ipl = interrupts_disable();
194 spinlock_lock(&inactive_as_with_asid_lock);
195 if (as->asid != ASID_INVALID && as != AS_KERNEL) {
196 if (as != AS && as->cpu_refcount == 0)
197 list_remove(&as->inactive_as_with_asid_link);
198 asid_put(as->asid);
199 }
200 spinlock_unlock(&inactive_as_with_asid_lock);
201
202 /*
203 * Destroy address space areas of the address space.
204 * The B+tree must be walked carefully because it is
205 * also being destroyed.
206 */
207 for (cond = true; cond; ) {
208 btree_node_t *node;
209
210 ASSERT(!list_empty(&as->as_area_btree.leaf_head));
211 node = list_get_instance(as->as_area_btree.leaf_head.next, btree_node_t, leaf_link);
212
213 if ((cond = node->keys)) {
214 as_area_destroy(as, node->key[0]);
215 }
216 }
217
218 btree_destroy(&as->as_area_btree);
219 page_table_destroy(as->page_table);
220
221 interrupts_restore(ipl);
222
223 slab_free(as_slab, as);
224}
225
226/** Create address space area of common attributes.
227 *
228 * The created address space area is added to the target address space.
229 *
230 * @param as Target address space.
231 * @param flags Flags of the area memory.
232 * @param size Size of area.
233 * @param base Base address of area.
234 * @param attrs Attributes of the area.
235 * @param backend Address space area backend. NULL if no backend is used.
236 * @param backend_data NULL or a pointer to an array holding two void *.
237 *
238 * @return Address space area on success or NULL on failure.
239 */
240as_area_t *as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
241 mem_backend_t *backend, mem_backend_data_t *backend_data)
242{
243 ipl_t ipl;
244 as_area_t *a;
245
246 if (base % PAGE_SIZE)
247 return NULL;
248
249 if (!size)
250 return NULL;
251
252 /* Writeable executable areas are not supported. */
253 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
254 return NULL;
255
256 ipl = interrupts_disable();
257 mutex_lock(&as->lock);
258
259 if (!check_area_conflicts(as, base, size, NULL)) {
260 mutex_unlock(&as->lock);
261 interrupts_restore(ipl);
262 return NULL;
263 }
264
265 a = (as_area_t *) malloc(sizeof(as_area_t), 0);
266
267 mutex_initialize(&a->lock);
268
269 a->as = as;
270 a->flags = flags;
271 a->attributes = attrs;
272 a->pages = SIZE2FRAMES(size);
273 a->base = base;
274 a->sh_info = NULL;
275 a->backend = backend;
276 if (backend_data)
277 a->backend_data = *backend_data;
278 else
279 memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data), 0);
280
281#ifdef CONFIG_VIRT_IDX_DCACHE
282 /*
283 * When the area is being created with the AS_AREA_ATTR_PARTIAL flag, the
284 * orig_color is probably wrong until the flag is reset. In other words, it is
285 * initialized with the color of the area being created and not with the color
286 * of the original address space area at the beginning of the share chain. Of
287 * course, the correct color is set by as_area_share() before the flag is
288 * reset.
289 */
290 a->orig_color = PAGE_COLOR(base);
291#endif /* CONFIG_VIRT_IDX_DCACHE */
292
293 btree_create(&a->used_space);
294
295 btree_insert(&as->as_area_btree, base, (void *) a, NULL);
296
297 mutex_unlock(&as->lock);
298 interrupts_restore(ipl);
299
300 return a;
301}
302
303/** Find address space area and change it.
304 *
305 * @param as Address space.
306 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
307 * @param size New size of the virtual memory block starting at address.
308 * @param flags Flags influencing the remap operation. Currently unused.
309 *
310 * @return Zero on success or a value from @ref errno.h otherwise.
311 */
312int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
313{
314 as_area_t *area;
315 ipl_t ipl;
316 size_t pages;
317
318 ipl = interrupts_disable();
319 mutex_lock(&as->lock);
320
321 /*
322 * Locate the area.
323 */
324 area = find_area_and_lock(as, address);
325 if (!area) {
326 mutex_unlock(&as->lock);
327 interrupts_restore(ipl);
328 return ENOENT;
329 }
330
331 if (area->backend == &phys_backend) {
332 /*
333 * Remapping of address space areas associated
334 * with memory mapped devices is not supported.
335 */
336 mutex_unlock(&area->lock);
337 mutex_unlock(&as->lock);
338 interrupts_restore(ipl);
339 return ENOTSUP;
340 }
341 if (area->sh_info) {
342 /*
343 * Remapping of shared address space areas
344 * is not supported.
345 */
346 mutex_unlock(&area->lock);
347 mutex_unlock(&as->lock);
348 interrupts_restore(ipl);
349 return ENOTSUP;
350 }
351
352 pages = SIZE2FRAMES((address - area->base) + size);
353 if (!pages) {
354 /*
355 * Zero size address space areas are not allowed.
356 */
357 mutex_unlock(&area->lock);
358 mutex_unlock(&as->lock);
359 interrupts_restore(ipl);
360 return EPERM;
361 }
362
363 if (pages < area->pages) {
364 bool cond;
365 uintptr_t start_free = area->base + pages*PAGE_SIZE;
366
367 /*
368 * Shrinking the area.
369 * No need to check for overlaps.
370 */
371
372 /*
373 * Start TLB shootdown sequence.
374 */
375 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
376
377 /*
378 * Remove frames belonging to used space starting from
379 * the highest addresses downwards until an overlap with
380 * the resized address space area is found. Note that this
381 * is also the right way to remove part of the used_space
382 * B+tree leaf list.
383 */
384 for (cond = true; cond;) {
385 btree_node_t *node;
386
387 ASSERT(!list_empty(&area->used_space.leaf_head));
388 node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link);
389 if ((cond = (bool) node->keys)) {
390 uintptr_t b = node->key[node->keys - 1];
391 count_t c = (count_t) node->value[node->keys - 1];
392 int i = 0;
393
394 if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) {
395
396 if (b + c*PAGE_SIZE <= start_free) {
397 /*
398 * The whole interval fits completely
399 * in the resized address space area.
400 */
401 break;
402 }
403
404 /*
405 * Part of the interval corresponding to b and c
406 * overlaps with the resized address space area.
407 */
408
409 cond = false; /* we are almost done */
410 i = (start_free - b) >> PAGE_WIDTH;
411 if (!used_space_remove(area, start_free, c - i))
412 panic("Could not remove used space.\n");
413 } else {
414 /*
415 * The interval of used space can be completely removed.
416 */
417 if (!used_space_remove(area, b, c))
418 panic("Could not remove used space.\n");
419 }
420
421 for (; i < c; i++) {
422 pte_t *pte;
423
424 page_table_lock(as, false);
425 pte = page_mapping_find(as, b + i*PAGE_SIZE);
426 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
427 if (area->backend && area->backend->frame_free) {
428 area->backend->frame_free(area,
429 b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
430 }
431 page_mapping_remove(as, b + i*PAGE_SIZE);
432 page_table_unlock(as, false);
433 }
434 }
435 }
436
437 /*
438 * Finish TLB shootdown sequence.
439 */
440 tlb_invalidate_pages(as->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
441 tlb_shootdown_finalize();
442
443 /*
444 * Invalidate software translation caches (e.g. TSB on sparc64).
445 */
446 as_invalidate_translation_cache(as, area->base + pages*PAGE_SIZE, area->pages - pages);
447 } else {
448 /*
449 * Growing the area.
450 * Check for overlaps with other address space areas.
451 */
452 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
453 mutex_unlock(&area->lock);
454 mutex_unlock(&as->lock);
455 interrupts_restore(ipl);
456 return EADDRNOTAVAIL;
457 }
458 }
459
460 area->pages = pages;
461
462 mutex_unlock(&area->lock);
463 mutex_unlock(&as->lock);
464 interrupts_restore(ipl);
465
466 return 0;
467}
468
469/** Destroy address space area.
470 *
471 * @param as Address space.
472 * @param address Address withing the area to be deleted.
473 *
474 * @return Zero on success or a value from @ref errno.h on failure.
475 */
476int as_area_destroy(as_t *as, uintptr_t address)
477{
478 as_area_t *area;
479 uintptr_t base;
480 link_t *cur;
481 ipl_t ipl;
482
483 ipl = interrupts_disable();
484 mutex_lock(&as->lock);
485
486 area = find_area_and_lock(as, address);
487 if (!area) {
488 mutex_unlock(&as->lock);
489 interrupts_restore(ipl);
490 return ENOENT;
491 }
492
493 base = area->base;
494
495 /*
496 * Start TLB shootdown sequence.
497 */
498 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
499
500 /*
501 * Visit only the pages mapped by used_space B+tree.
502 */
503 for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) {
504 btree_node_t *node;
505 int i;
506
507 node = list_get_instance(cur, btree_node_t, leaf_link);
508 for (i = 0; i < node->keys; i++) {
509 uintptr_t b = node->key[i];
510 count_t j;
511 pte_t *pte;
512
513 for (j = 0; j < (count_t) node->value[i]; j++) {
514 page_table_lock(as, false);
515 pte = page_mapping_find(as, b + j*PAGE_SIZE);
516 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
517 if (area->backend && area->backend->frame_free) {
518 area->backend->frame_free(area,
519 b + j*PAGE_SIZE, PTE_GET_FRAME(pte));
520 }
521 page_mapping_remove(as, b + j*PAGE_SIZE);
522 page_table_unlock(as, false);
523 }
524 }
525 }
526
527 /*
528 * Finish TLB shootdown sequence.
529 */
530 tlb_invalidate_pages(as->asid, area->base, area->pages);
531 tlb_shootdown_finalize();
532
533 /*
534 * Invalidate potential software translation caches (e.g. TSB on sparc64).
535 */
536 as_invalidate_translation_cache(as, area->base, area->pages);
537
538 btree_destroy(&area->used_space);
539
540 area->attributes |= AS_AREA_ATTR_PARTIAL;
541
542 if (area->sh_info)
543 sh_info_remove_reference(area->sh_info);
544
545 mutex_unlock(&area->lock);
546
547 /*
548 * Remove the empty area from address space.
549 */
550 btree_remove(&as->as_area_btree, base, NULL);
551
552 free(area);
553
554 mutex_unlock(&as->lock);
555 interrupts_restore(ipl);
556 return 0;
557}
558
559/** Share address space area with another or the same address space.
560 *
561 * Address space area mapping is shared with a new address space area.
562 * If the source address space area has not been shared so far,
563 * a new sh_info is created. The new address space area simply gets the
564 * sh_info of the source area. The process of duplicating the
565 * mapping is done through the backend share function.
566 *
567 * @param src_as Pointer to source address space.
568 * @param src_base Base address of the source address space area.
569 * @param acc_size Expected size of the source area.
570 * @param dst_as Pointer to destination address space.
571 * @param dst_base Target base address.
572 * @param dst_flags_mask Destination address space area flags mask.
573 *
574 * @return Zero on success or ENOENT if there is no such task or if there is no
575 * such address space area, EPERM if there was a problem in accepting the area
576 * or ENOMEM if there was a problem in allocating destination address space
577 * area. ENOTSUP is returned if the address space area backend does not support
578 * sharing.
579 */
580int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
581 as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
582{
583 ipl_t ipl;
584 int src_flags;
585 size_t src_size;
586 int src_orig_color;
587 as_area_t *src_area, *dst_area;
588 share_info_t *sh_info;
589 mem_backend_t *src_backend;
590 mem_backend_data_t src_backend_data;
591
592 ipl = interrupts_disable();
593 mutex_lock(&src_as->lock);
594 src_area = find_area_and_lock(src_as, src_base);
595 if (!src_area) {
596 /*
597 * Could not find the source address space area.
598 */
599 mutex_unlock(&src_as->lock);
600 interrupts_restore(ipl);
601 return ENOENT;
602 }
603
604
605 if (!src_area->backend || !src_area->backend->share) {
606 /*
607 * There is no backend or the backend does not
608 * know how to share the area.
609 */
610 mutex_unlock(&src_area->lock);
611 mutex_unlock(&src_as->lock);
612 interrupts_restore(ipl);
613 return ENOTSUP;
614 }
615
616 src_size = src_area->pages * PAGE_SIZE;
617 src_flags = src_area->flags;
618 src_backend = src_area->backend;
619 src_backend_data = src_area->backend_data;
620 src_orig_color = src_area->orig_color;
621
622 /* Share the cacheable flag from the original mapping */
623 if (src_flags & AS_AREA_CACHEABLE)
624 dst_flags_mask |= AS_AREA_CACHEABLE;
625
626 if (src_size != acc_size || (src_flags & dst_flags_mask) != dst_flags_mask) {
627 mutex_unlock(&src_area->lock);
628 mutex_unlock(&src_as->lock);
629 interrupts_restore(ipl);
630 return EPERM;
631 }
632
633 /*
634 * Now we are committed to sharing the area.
635 * First, prepare the area for sharing.
636 * Then it will be safe to unlock it.
637 */
638 sh_info = src_area->sh_info;
639 if (!sh_info) {
640 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
641 mutex_initialize(&sh_info->lock);
642 sh_info->refcount = 2;
643 btree_create(&sh_info->pagemap);
644 src_area->sh_info = sh_info;
645 } else {
646 mutex_lock(&sh_info->lock);
647 sh_info->refcount++;
648 mutex_unlock(&sh_info->lock);
649 }
650
651 src_area->backend->share(src_area);
652
653 mutex_unlock(&src_area->lock);
654 mutex_unlock(&src_as->lock);
655
656 /*
657 * Create copy of the source address space area.
658 * The destination area is created with AS_AREA_ATTR_PARTIAL
659 * attribute set which prevents race condition with
660 * preliminary as_page_fault() calls.
661 * The flags of the source area are masked against dst_flags_mask
662 * to support sharing in less privileged mode.
663 */
664 dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
665 AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
666 if (!dst_area) {
667 /*
668 * Destination address space area could not be created.
669 */
670 sh_info_remove_reference(sh_info);
671
672 interrupts_restore(ipl);
673 return ENOMEM;
674 }
675
676 /*
677 * Now the destination address space area has been
678 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
679 * attribute and set the sh_info.
680 */
681 mutex_lock(&dst_as->lock);
682 mutex_lock(&dst_area->lock);
683 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
684 dst_area->sh_info = sh_info;
685 dst_area->orig_color = src_orig_color;
686#ifdef CONFIG_VIRT_IDX_DCACHE
687 if (src_orig_color != PAGE_COLOR(dst_base)) {
688 /*
689 * We have just detected an attempt to create an invalid address
690 * alias. We allow this and set a special flag that tells the
691 * architecture specific code to flush the D-cache when the
692 * offending address space is installed and deinstalled
693 * (cleanup).
694 *
695 * In order for the flags to take effect immediately, we also
696 * perform a global D-cache shootdown.
697 */
698 dcache_shootdown_start();
699 dst_as->dcache_flush_on_install = true;
700 dst_as->dcache_flush_on_deinstall = true;
701 dcache_flush();
702 dcache_shootdown_finalize();
703 }
704#endif /* CONFIG_VIRT_IDX_DCACHE */
705 mutex_unlock(&dst_area->lock);
706 mutex_unlock(&dst_as->lock);
707
708 interrupts_restore(ipl);
709
710 return 0;
711}
712
713/** Check access mode for address space area.
714 *
715 * The address space area must be locked prior to this call.
716 *
717 * @param area Address space area.
718 * @param access Access mode.
719 *
720 * @return False if access violates area's permissions, true otherwise.
721 */
722bool as_area_check_access(as_area_t *area, pf_access_t access)
723{
724 int flagmap[] = {
725 [PF_ACCESS_READ] = AS_AREA_READ,
726 [PF_ACCESS_WRITE] = AS_AREA_WRITE,
727 [PF_ACCESS_EXEC] = AS_AREA_EXEC
728 };
729
730 if (!(area->flags & flagmap[access]))
731 return false;
732
733 return true;
734}
735
736/** Handle page fault within the current address space.
737 *
738 * This is the high-level page fault handler. It decides
739 * whether the page fault can be resolved by any backend
740 * and if so, it invokes the backend to resolve the page
741 * fault.
742 *
743 * Interrupts are assumed disabled.
744 *
745 * @param page Faulting page.
746 * @param access Access mode that caused the fault (i.e. read/write/exec).
747 * @param istate Pointer to interrupted state.
748 *
749 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
750 * fault was caused by copy_to_uspace() or copy_from_uspace().
751 */
752int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
753{
754 pte_t *pte;
755 as_area_t *area;
756
757 if (!THREAD)
758 return AS_PF_FAULT;
759
760 ASSERT(AS);
761
762 mutex_lock(&AS->lock);
763 area = find_area_and_lock(AS, page);
764 if (!area) {
765 /*
766 * No area contained mapping for 'page'.
767 * Signal page fault to low-level handler.
768 */
769 mutex_unlock(&AS->lock);
770 goto page_fault;
771 }
772
773 if (area->attributes & AS_AREA_ATTR_PARTIAL) {
774 /*
775 * The address space area is not fully initialized.
776 * Avoid possible race by returning error.
777 */
778 mutex_unlock(&area->lock);
779 mutex_unlock(&AS->lock);
780 goto page_fault;
781 }
782
783 if (!area->backend || !area->backend->page_fault) {
784 /*
785 * The address space area is not backed by any backend
786 * or the backend cannot handle page faults.
787 */
788 mutex_unlock(&area->lock);
789 mutex_unlock(&AS->lock);
790 goto page_fault;
791 }
792
793 page_table_lock(AS, false);
794
795 /*
796 * To avoid race condition between two page faults
797 * on the same address, we need to make sure
798 * the mapping has not been already inserted.
799 */
800 if ((pte = page_mapping_find(AS, page))) {
801 if (PTE_PRESENT(pte)) {
802 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
803 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
804 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
805 page_table_unlock(AS, false);
806 mutex_unlock(&area->lock);
807 mutex_unlock(&AS->lock);
808 return AS_PF_OK;
809 }
810 }
811 }
812
813 /*
814 * Resort to the backend page fault handler.
815 */
816 if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
817 page_table_unlock(AS, false);
818 mutex_unlock(&area->lock);
819 mutex_unlock(&AS->lock);
820 goto page_fault;
821 }
822
823 page_table_unlock(AS, false);
824 mutex_unlock(&area->lock);
825 mutex_unlock(&AS->lock);
826 return AS_PF_OK;
827
828page_fault:
829 if (THREAD->in_copy_from_uspace) {
830 THREAD->in_copy_from_uspace = false;
831 istate_set_retaddr(istate, (uintptr_t) &memcpy_from_uspace_failover_address);
832 } else if (THREAD->in_copy_to_uspace) {
833 THREAD->in_copy_to_uspace = false;
834 istate_set_retaddr(istate, (uintptr_t) &memcpy_to_uspace_failover_address);
835 } else {
836 return AS_PF_FAULT;
837 }
838
839 return AS_PF_DEFER;
840}
841
842/** Switch address spaces.
843 *
844 * Note that this function cannot sleep as it is essentially a part of
845 * scheduling. Sleeping here would lead to deadlock on wakeup.
846 *
847 * @param old Old address space or NULL.
848 * @param new New address space.
849 */
850void as_switch(as_t *old, as_t *new)
851{
852 ipl_t ipl;
853 bool needs_asid = false;
854
855 ipl = interrupts_disable();
856 spinlock_lock(&inactive_as_with_asid_lock);
857
858 /*
859 * First, take care of the old address space.
860 */
861 if (old) {
862 mutex_lock_active(&old->lock);
863 ASSERT(old->cpu_refcount);
864 if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) {
865 /*
866 * The old address space is no longer active on
867 * any processor. It can be appended to the
868 * list of inactive address spaces with assigned
869 * ASID.
870 */
871 ASSERT(old->asid != ASID_INVALID);
872 list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
873 }
874 mutex_unlock(&old->lock);
875
876 /*
877 * Perform architecture-specific tasks when the address space
878 * is being removed from the CPU.
879 */
880 as_deinstall_arch(old);
881 }
882
883 /*
884 * Second, prepare the new address space.
885 */
886 mutex_lock_active(&new->lock);
887 if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) {
888 if (new->asid != ASID_INVALID)
889 list_remove(&new->inactive_as_with_asid_link);
890 else
891 needs_asid = true; /* defer call to asid_get() until new->lock is released */
892 }
893 SET_PTL0_ADDRESS(new->page_table);
894 mutex_unlock(&new->lock);
895
896 if (needs_asid) {
897 /*
898 * Allocation of new ASID was deferred
899 * until now in order to avoid deadlock.
900 */
901 asid_t asid;
902
903 asid = asid_get();
904 mutex_lock_active(&new->lock);
905 new->asid = asid;
906 mutex_unlock(&new->lock);
907 }
908 spinlock_unlock(&inactive_as_with_asid_lock);
909 interrupts_restore(ipl);
910
911 /*
912 * Perform architecture-specific steps.
913 * (e.g. write ASID to hardware register etc.)
914 */
915 as_install_arch(new);
916
917 AS = new;
918}
919
920/** Convert address space area flags to page flags.
921 *
922 * @param aflags Flags of some address space area.
923 *
924 * @return Flags to be passed to page_mapping_insert().
925 */
926int area_flags_to_page_flags(int aflags)
927{
928 int flags;
929
930 flags = PAGE_USER | PAGE_PRESENT;
931
932 if (aflags & AS_AREA_READ)
933 flags |= PAGE_READ;
934
935 if (aflags & AS_AREA_WRITE)
936 flags |= PAGE_WRITE;
937
938 if (aflags & AS_AREA_EXEC)
939 flags |= PAGE_EXEC;
940
941 if (aflags & AS_AREA_CACHEABLE)
942 flags |= PAGE_CACHEABLE;
943
944 return flags;
945}
946
947/** Compute flags for virtual address translation subsytem.
948 *
949 * The address space area must be locked.
950 * Interrupts must be disabled.
951 *
952 * @param a Address space area.
953 *
954 * @return Flags to be used in page_mapping_insert().
955 */
956int as_area_get_flags(as_area_t *a)
957{
958 return area_flags_to_page_flags(a->flags);
959}
960
961/** Create page table.
962 *
963 * Depending on architecture, create either address space
964 * private or global page table.
965 *
966 * @param flags Flags saying whether the page table is for kernel address space.
967 *
968 * @return First entry of the page table.
969 */
970pte_t *page_table_create(int flags)
971{
972 ASSERT(as_operations);
973 ASSERT(as_operations->page_table_create);
974
975 return as_operations->page_table_create(flags);
976}
977
978/** Destroy page table.
979 *
980 * Destroy page table in architecture specific way.
981 *
982 * @param page_table Physical address of PTL0.
983 */
984void page_table_destroy(pte_t *page_table)
985{
986 ASSERT(as_operations);
987 ASSERT(as_operations->page_table_destroy);
988
989 as_operations->page_table_destroy(page_table);
990}
991
992/** Lock page table.
993 *
994 * This function should be called before any page_mapping_insert(),
995 * page_mapping_remove() and page_mapping_find().
996 *
997 * Locking order is such that address space areas must be locked
998 * prior to this call. Address space can be locked prior to this
999 * call in which case the lock argument is false.
1000 *
1001 * @param as Address space.
1002 * @param lock If false, do not attempt to lock as->lock.
1003 */
1004void page_table_lock(as_t *as, bool lock)
1005{
1006 ASSERT(as_operations);
1007 ASSERT(as_operations->page_table_lock);
1008
1009 as_operations->page_table_lock(as, lock);
1010}
1011
1012/** Unlock page table.
1013 *
1014 * @param as Address space.
1015 * @param unlock If false, do not attempt to unlock as->lock.
1016 */
1017void page_table_unlock(as_t *as, bool unlock)
1018{
1019 ASSERT(as_operations);
1020 ASSERT(as_operations->page_table_unlock);
1021
1022 as_operations->page_table_unlock(as, unlock);
1023}
1024
1025
1026/** Find address space area and lock it.
1027 *
1028 * The address space must be locked and interrupts must be disabled.
1029 *
1030 * @param as Address space.
1031 * @param va Virtual address.
1032 *
1033 * @return Locked address space area containing va on success or NULL on failure.
1034 */
1035as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
1036{
1037 as_area_t *a;
1038 btree_node_t *leaf, *lnode;
1039 int i;
1040
1041 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
1042 if (a) {
1043 /* va is the base address of an address space area */
1044 mutex_lock(&a->lock);
1045 return a;
1046 }
1047
1048 /*
1049 * Search the leaf node and the righmost record of its left neighbour
1050 * to find out whether this is a miss or va belongs to an address
1051 * space area found there.
1052 */
1053
1054 /* First, search the leaf node itself. */
1055 for (i = 0; i < leaf->keys; i++) {
1056 a = (as_area_t *) leaf->value[i];
1057 mutex_lock(&a->lock);
1058 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
1059 return a;
1060 }
1061 mutex_unlock(&a->lock);
1062 }
1063
1064 /*
1065 * Second, locate the left neighbour and test its last record.
1066 * Because of its position in the B+tree, it must have base < va.
1067 */
1068 if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1069 a = (as_area_t *) lnode->value[lnode->keys - 1];
1070 mutex_lock(&a->lock);
1071 if (va < a->base + a->pages * PAGE_SIZE) {
1072 return a;
1073 }
1074 mutex_unlock(&a->lock);
1075 }
1076
1077 return NULL;
1078}
1079
1080/** Check area conflicts with other areas.
1081 *
1082 * The address space must be locked and interrupts must be disabled.
1083 *
1084 * @param as Address space.
1085 * @param va Starting virtual address of the area being tested.
1086 * @param size Size of the area being tested.
1087 * @param avoid_area Do not touch this area.
1088 *
1089 * @return True if there is no conflict, false otherwise.
1090 */
1091bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area)
1092{
1093 as_area_t *a;
1094 btree_node_t *leaf, *node;
1095 int i;
1096
1097 /*
1098 * We don't want any area to have conflicts with NULL page.
1099 */
1100 if (overlaps(va, size, NULL, PAGE_SIZE))
1101 return false;
1102
1103 /*
1104 * The leaf node is found in O(log n), where n is proportional to
1105 * the number of address space areas belonging to as.
1106 * The check for conflicts is then attempted on the rightmost
1107 * record in the left neighbour, the leftmost record in the right
1108 * neighbour and all records in the leaf node itself.
1109 */
1110
1111 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1112 if (a != avoid_area)
1113 return false;
1114 }
1115
1116 /* First, check the two border cases. */
1117 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1118 a = (as_area_t *) node->value[node->keys - 1];
1119 mutex_lock(&a->lock);
1120 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1121 mutex_unlock(&a->lock);
1122 return false;
1123 }
1124 mutex_unlock(&a->lock);
1125 }
1126 if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
1127 a = (as_area_t *) node->value[0];
1128 mutex_lock(&a->lock);
1129 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1130 mutex_unlock(&a->lock);
1131 return false;
1132 }
1133 mutex_unlock(&a->lock);
1134 }
1135
1136 /* Second, check the leaf node. */
1137 for (i = 0; i < leaf->keys; i++) {
1138 a = (as_area_t *) leaf->value[i];
1139
1140 if (a == avoid_area)
1141 continue;
1142
1143 mutex_lock(&a->lock);
1144 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1145 mutex_unlock(&a->lock);
1146 return false;
1147 }
1148 mutex_unlock(&a->lock);
1149 }
1150
1151 /*
1152 * So far, the area does not conflict with other areas.
1153 * Check if it doesn't conflict with kernel address space.
1154 */
1155 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1156 return !overlaps(va, size,
1157 KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
1158 }
1159
1160 return true;
1161}
1162
1163/** Return size of the address space area with given base. */
1164size_t as_get_size(uintptr_t base)
1165{
1166 ipl_t ipl;
1167 as_area_t *src_area;
1168 size_t size;
1169
1170 ipl = interrupts_disable();
1171 src_area = find_area_and_lock(AS, base);
1172 if (src_area){
1173 size = src_area->pages * PAGE_SIZE;
1174 mutex_unlock(&src_area->lock);
1175 } else {
1176 size = 0;
1177 }
1178 interrupts_restore(ipl);
1179 return size;
1180}
1181
1182/** Mark portion of address space area as used.
1183 *
1184 * The address space area must be already locked.
1185 *
1186 * @param a Address space area.
1187 * @param page First page to be marked.
1188 * @param count Number of page to be marked.
1189 *
1190 * @return 0 on failure and 1 on success.
1191 */
1192int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
1193{
1194 btree_node_t *leaf, *node;
1195 count_t pages;
1196 int i;
1197
1198 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1199 ASSERT(count);
1200
1201 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1202 if (pages) {
1203 /*
1204 * We hit the beginning of some used space.
1205 */
1206 return 0;
1207 }
1208
1209 if (!leaf->keys) {
1210 btree_insert(&a->used_space, page, (void *) count, leaf);
1211 return 1;
1212 }
1213
1214 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1215 if (node) {
1216 uintptr_t left_pg = node->key[node->keys - 1], right_pg = leaf->key[0];
1217 count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0];
1218
1219 /*
1220 * Examine the possibility that the interval fits
1221 * somewhere between the rightmost interval of
1222 * the left neigbour and the first interval of the leaf.
1223 */
1224
1225 if (page >= right_pg) {
1226 /* Do nothing. */
1227 } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1228 /* The interval intersects with the left interval. */
1229 return 0;
1230 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1231 /* The interval intersects with the right interval. */
1232 return 0;
1233 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1234 /* The interval can be added by merging the two already present intervals. */
1235 node->value[node->keys - 1] += count + right_cnt;
1236 btree_remove(&a->used_space, right_pg, leaf);
1237 return 1;
1238 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1239 /* The interval can be added by simply growing the left interval. */
1240 node->value[node->keys - 1] += count;
1241 return 1;
1242 } else if (page + count*PAGE_SIZE == right_pg) {
1243 /*
1244 * The interval can be addded by simply moving base of the right
1245 * interval down and increasing its size accordingly.
1246 */
1247 leaf->value[0] += count;
1248 leaf->key[0] = page;
1249 return 1;
1250 } else {
1251 /*
1252 * The interval is between both neigbouring intervals,
1253 * but cannot be merged with any of them.
1254 */
1255 btree_insert(&a->used_space, page, (void *) count, leaf);
1256 return 1;
1257 }
1258 } else if (page < leaf->key[0]) {
1259 uintptr_t right_pg = leaf->key[0];
1260 count_t right_cnt = (count_t) leaf->value[0];
1261
1262 /*
1263 * Investigate the border case in which the left neighbour does not
1264 * exist but the interval fits from the left.
1265 */
1266
1267 if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1268 /* The interval intersects with the right interval. */
1269 return 0;
1270 } else if (page + count*PAGE_SIZE == right_pg) {
1271 /*
1272 * The interval can be added by moving the base of the right interval down
1273 * and increasing its size accordingly.
1274 */
1275 leaf->key[0] = page;
1276 leaf->value[0] += count;
1277 return 1;
1278 } else {
1279 /*
1280 * The interval doesn't adjoin with the right interval.
1281 * It must be added individually.
1282 */
1283 btree_insert(&a->used_space, page, (void *) count, leaf);
1284 return 1;
1285 }
1286 }
1287
1288 node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1289 if (node) {
1290 uintptr_t left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0];
1291 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0];
1292
1293 /*
1294 * Examine the possibility that the interval fits
1295 * somewhere between the leftmost interval of
1296 * the right neigbour and the last interval of the leaf.
1297 */
1298
1299 if (page < left_pg) {
1300 /* Do nothing. */
1301 } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1302 /* The interval intersects with the left interval. */
1303 return 0;
1304 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1305 /* The interval intersects with the right interval. */
1306 return 0;
1307 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1308 /* The interval can be added by merging the two already present intervals. */
1309 leaf->value[leaf->keys - 1] += count + right_cnt;
1310 btree_remove(&a->used_space, right_pg, node);
1311 return 1;
1312 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1313 /* The interval can be added by simply growing the left interval. */
1314 leaf->value[leaf->keys - 1] += count;
1315 return 1;
1316 } else if (page + count*PAGE_SIZE == right_pg) {
1317 /*
1318 * The interval can be addded by simply moving base of the right
1319 * interval down and increasing its size accordingly.
1320 */
1321 node->value[0] += count;
1322 node->key[0] = page;
1323 return 1;
1324 } else {
1325 /*
1326 * The interval is between both neigbouring intervals,
1327 * but cannot be merged with any of them.
1328 */
1329 btree_insert(&a->used_space, page, (void *) count, leaf);
1330 return 1;
1331 }
1332 } else if (page >= leaf->key[leaf->keys - 1]) {
1333 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1334 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1335
1336 /*
1337 * Investigate the border case in which the right neighbour does not
1338 * exist but the interval fits from the right.
1339 */
1340
1341 if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1342 /* The interval intersects with the left interval. */
1343 return 0;
1344 } else if (left_pg + left_cnt*PAGE_SIZE == page) {
1345 /* The interval can be added by growing the left interval. */
1346 leaf->value[leaf->keys - 1] += count;
1347 return 1;
1348 } else {
1349 /*
1350 * The interval doesn't adjoin with the left interval.
1351 * It must be added individually.
1352 */
1353 btree_insert(&a->used_space, page, (void *) count, leaf);
1354 return 1;
1355 }
1356 }
1357
1358 /*
1359 * Note that if the algorithm made it thus far, the interval can fit only
1360 * between two other intervals of the leaf. The two border cases were already
1361 * resolved.
1362 */
1363 for (i = 1; i < leaf->keys; i++) {
1364 if (page < leaf->key[i]) {
1365 uintptr_t left_pg = leaf->key[i - 1], right_pg = leaf->key[i];
1366 count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i];
1367
1368 /*
1369 * The interval fits between left_pg and right_pg.
1370 */
1371
1372 if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1373 /* The interval intersects with the left interval. */
1374 return 0;
1375 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1376 /* The interval intersects with the right interval. */
1377 return 0;
1378 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1379 /* The interval can be added by merging the two already present intervals. */
1380 leaf->value[i - 1] += count + right_cnt;
1381 btree_remove(&a->used_space, right_pg, leaf);
1382 return 1;
1383 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1384 /* The interval can be added by simply growing the left interval. */
1385 leaf->value[i - 1] += count;
1386 return 1;
1387 } else if (page + count*PAGE_SIZE == right_pg) {
1388 /*
1389 * The interval can be addded by simply moving base of the right
1390 * interval down and increasing its size accordingly.
1391 */
1392 leaf->value[i] += count;
1393 leaf->key[i] = page;
1394 return 1;
1395 } else {
1396 /*
1397 * The interval is between both neigbouring intervals,
1398 * but cannot be merged with any of them.
1399 */
1400 btree_insert(&a->used_space, page, (void *) count, leaf);
1401 return 1;
1402 }
1403 }
1404 }
1405
1406 panic("Inconsistency detected while adding %d pages of used space at %p.\n", count, page);
1407}
1408
1409/** Mark portion of address space area as unused.
1410 *
1411 * The address space area must be already locked.
1412 *
1413 * @param a Address space area.
1414 * @param page First page to be marked.
1415 * @param count Number of page to be marked.
1416 *
1417 * @return 0 on failure and 1 on success.
1418 */
1419int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
1420{
1421 btree_node_t *leaf, *node;
1422 count_t pages;
1423 int i;
1424
1425 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1426 ASSERT(count);
1427
1428 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1429 if (pages) {
1430 /*
1431 * We are lucky, page is the beginning of some interval.
1432 */
1433 if (count > pages) {
1434 return 0;
1435 } else if (count == pages) {
1436 btree_remove(&a->used_space, page, leaf);
1437 return 1;
1438 } else {
1439 /*
1440 * Find the respective interval.
1441 * Decrease its size and relocate its start address.
1442 */
1443 for (i = 0; i < leaf->keys; i++) {
1444 if (leaf->key[i] == page) {
1445 leaf->key[i] += count*PAGE_SIZE;
1446 leaf->value[i] -= count;
1447 return 1;
1448 }
1449 }
1450 goto error;
1451 }
1452 }
1453
1454 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1455 if (node && page < leaf->key[0]) {
1456 uintptr_t left_pg = node->key[node->keys - 1];
1457 count_t left_cnt = (count_t) node->value[node->keys - 1];
1458
1459 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1460 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1461 /*
1462 * The interval is contained in the rightmost interval
1463 * of the left neighbour and can be removed by
1464 * updating the size of the bigger interval.
1465 */
1466 node->value[node->keys - 1] -= count;
1467 return 1;
1468 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1469 count_t new_cnt;
1470
1471 /*
1472 * The interval is contained in the rightmost interval
1473 * of the left neighbour but its removal requires
1474 * both updating the size of the original interval and
1475 * also inserting a new interval.
1476 */
1477 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1478 node->value[node->keys - 1] -= count + new_cnt;
1479 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1480 return 1;
1481 }
1482 }
1483 return 0;
1484 } else if (page < leaf->key[0]) {
1485 return 0;
1486 }
1487
1488 if (page > leaf->key[leaf->keys - 1]) {
1489 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1490 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1491
1492 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1493 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1494 /*
1495 * The interval is contained in the rightmost interval
1496 * of the leaf and can be removed by updating the size
1497 * of the bigger interval.
1498 */
1499 leaf->value[leaf->keys - 1] -= count;
1500 return 1;
1501 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1502 count_t new_cnt;
1503
1504 /*
1505 * The interval is contained in the rightmost interval
1506 * of the leaf but its removal requires both updating
1507 * the size of the original interval and
1508 * also inserting a new interval.
1509 */
1510 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1511 leaf->value[leaf->keys - 1] -= count + new_cnt;
1512 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1513 return 1;
1514 }
1515 }
1516 return 0;
1517 }
1518
1519 /*
1520 * The border cases have been already resolved.
1521 * Now the interval can be only between intervals of the leaf.
1522 */
1523 for (i = 1; i < leaf->keys - 1; i++) {
1524 if (page < leaf->key[i]) {
1525 uintptr_t left_pg = leaf->key[i - 1];
1526 count_t left_cnt = (count_t) leaf->value[i - 1];
1527
1528 /*
1529 * Now the interval is between intervals corresponding to (i - 1) and i.
1530 */
1531 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1532 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1533 /*
1534 * The interval is contained in the interval (i - 1)
1535 * of the leaf and can be removed by updating the size
1536 * of the bigger interval.
1537 */
1538 leaf->value[i - 1] -= count;
1539 return 1;
1540 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1541 count_t new_cnt;
1542
1543 /*
1544 * The interval is contained in the interval (i - 1)
1545 * of the leaf but its removal requires both updating
1546 * the size of the original interval and
1547 * also inserting a new interval.
1548 */
1549 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1550 leaf->value[i - 1] -= count + new_cnt;
1551 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1552 return 1;
1553 }
1554 }
1555 return 0;
1556 }
1557 }
1558
1559error:
1560 panic("Inconsistency detected while removing %d pages of used space from %p.\n", count, page);
1561}
1562
1563/** Remove reference to address space area share info.
1564 *
1565 * If the reference count drops to 0, the sh_info is deallocated.
1566 *
1567 * @param sh_info Pointer to address space area share info.
1568 */
1569void sh_info_remove_reference(share_info_t *sh_info)
1570{
1571 bool dealloc = false;
1572
1573 mutex_lock(&sh_info->lock);
1574 ASSERT(sh_info->refcount);
1575 if (--sh_info->refcount == 0) {
1576 dealloc = true;
1577 link_t *cur;
1578
1579 /*
1580 * Now walk carefully the pagemap B+tree and free/remove
1581 * reference from all frames found there.
1582 */
1583 for (cur = sh_info->pagemap.leaf_head.next; cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
1584 btree_node_t *node;
1585 int i;
1586
1587 node = list_get_instance(cur, btree_node_t, leaf_link);
1588 for (i = 0; i < node->keys; i++)
1589 frame_free((uintptr_t) node->value[i]);
1590 }
1591
1592 }
1593 mutex_unlock(&sh_info->lock);
1594
1595 if (dealloc) {
1596 btree_destroy(&sh_info->pagemap);
1597 free(sh_info);
1598 }
1599}
1600
1601/*
1602 * Address space related syscalls.
1603 */
1604
1605/** Wrapper for as_area_create(). */
1606unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
1607{
1608 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
1609 return (unative_t) address;
1610 else
1611 return (unative_t) -1;
1612}
1613
1614/** Wrapper for as_area_resize(). */
1615unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
1616{
1617 return (unative_t) as_area_resize(AS, address, size, 0);
1618}
1619
1620/** Wrapper for as_area_destroy(). */
1621unative_t sys_as_area_destroy(uintptr_t address)
1622{
1623 return (unative_t) as_area_destroy(AS, address);
1624}
1625
1626/** Print out information about address space.
1627 *
1628 * @param as Address space.
1629 */
1630void as_print(as_t *as)
1631{
1632 ipl_t ipl;
1633
1634 ipl = interrupts_disable();
1635 mutex_lock(&as->lock);
1636
1637 /* print out info about address space areas */
1638 link_t *cur;
1639 for (cur = as->as_area_btree.leaf_head.next; cur != &as->as_area_btree.leaf_head; cur = cur->next) {
1640 btree_node_t *node = list_get_instance(cur, btree_node_t, leaf_link);
1641
1642 int i;
1643 for (i = 0; i < node->keys; i++) {
1644 as_area_t *area = node->value[i];
1645
1646 mutex_lock(&area->lock);
1647 printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
1648 area, area->base, area->pages, area->base, area->base + area->pages*PAGE_SIZE);
1649 mutex_unlock(&area->lock);
1650 }
1651 }
1652
1653 mutex_unlock(&as->lock);
1654 interrupts_restore(ipl);
1655}
1656
1657/** @}
1658 */
Note: See TracBrowser for help on using the repository browser.