source: mainline/kernel/generic/src/mm/as.c@ df496c5

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since df496c5 was f8ddd17, checked in by Jakub Jermar <jakub@…>, 19 years ago

Rework support for virtually indexed cache.
Instead of repeatedly flushing the data cache, which was a huge overkill, refuse to create an illegal address alias
in the kernel (again) and allocate appropriate page color in userspace instead. Extend the detection also to
SYS_PHYSMEM_MAP syscall.

Add support for tracking physical memory areas mappable by SYS_PHYSMEM_MAP.

Lots of coding style changes.

  • Property mode set to 100644
File size: 43.3 KB
Line 
1/*
2 * Copyright (C) 2001-2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Address space related functions.
36 *
37 * This file contains address space manipulation functions.
38 * Roughly speaking, this is a higher-level client of
39 * Virtual Address Translation (VAT) subsystem.
40 *
41 * Functionality provided by this file allows one to
42 * create address spaces and create, resize and share
43 * address space areas.
44 *
45 * @see page.c
46 *
47 */
48
49#include <mm/as.h>
50#include <arch/mm/as.h>
51#include <mm/page.h>
52#include <mm/frame.h>
53#include <mm/slab.h>
54#include <mm/tlb.h>
55#include <arch/mm/page.h>
56#include <genarch/mm/page_pt.h>
57#include <genarch/mm/page_ht.h>
58#include <mm/asid.h>
59#include <arch/mm/asid.h>
60#include <synch/spinlock.h>
61#include <synch/mutex.h>
62#include <adt/list.h>
63#include <adt/btree.h>
64#include <proc/task.h>
65#include <proc/thread.h>
66#include <arch/asm.h>
67#include <panic.h>
68#include <debug.h>
69#include <print.h>
70#include <memstr.h>
71#include <macros.h>
72#include <arch.h>
73#include <errno.h>
74#include <config.h>
75#include <align.h>
76#include <arch/types.h>
77#include <typedefs.h>
78#include <syscall/copy.h>
79#include <arch/interrupt.h>
80
81#ifdef CONFIG_VIRT_IDX_DCACHE
82#include <arch/mm/cache.h>
83#endif /* CONFIG_VIRT_IDX_DCACHE */
84
85/**
86 * Each architecture decides what functions will be used to carry out
87 * address space operations such as creating or locking page tables.
88 */
89as_operations_t *as_operations = NULL;
90
91/**
92 * Slab for as_t objects.
93 */
94static slab_cache_t *as_slab;
95
96/** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */
97SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
98
99/**
100 * This list contains address spaces that are not active on any
101 * processor and that have valid ASID.
102 */
103LIST_INITIALIZE(inactive_as_with_asid_head);
104
105/** Kernel address space. */
106as_t *AS_KERNEL = NULL;
107
108static int area_flags_to_page_flags(int aflags);
109static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
110static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area);
111static void sh_info_remove_reference(share_info_t *sh_info);
112
113static int as_constructor(void *obj, int flags)
114{
115 as_t *as = (as_t *) obj;
116 int rc;
117
118 link_initialize(&as->inactive_as_with_asid_link);
119 mutex_initialize(&as->lock);
120
121 rc = as_constructor_arch(as, flags);
122
123 return rc;
124}
125
126static int as_destructor(void *obj)
127{
128 as_t *as = (as_t *) obj;
129
130 return as_destructor_arch(as);
131}
132
133/** Initialize address space subsystem. */
134void as_init(void)
135{
136 as_arch_init();
137
138 as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
139 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
140
141 AS_KERNEL = as_create(FLAG_AS_KERNEL);
142 if (!AS_KERNEL)
143 panic("can't create kernel address space\n");
144
145}
146
147/** Create address space.
148 *
149 * @param flags Flags that influence way in wich the address space is created.
150 */
151as_t *as_create(int flags)
152{
153 as_t *as;
154
155 as = (as_t *) slab_alloc(as_slab, 0);
156 (void) as_create_arch(as, 0);
157
158 btree_create(&as->as_area_btree);
159
160 if (flags & FLAG_AS_KERNEL)
161 as->asid = ASID_KERNEL;
162 else
163 as->asid = ASID_INVALID;
164
165 as->refcount = 0;
166 as->cpu_refcount = 0;
167 as->page_table = page_table_create(flags);
168
169 return as;
170}
171
172/** Destroy adress space.
173 *
174 * When there are no tasks referencing this address space (i.e. its refcount is zero),
175 * the address space can be destroyed.
176 */
177void as_destroy(as_t *as)
178{
179 ipl_t ipl;
180 bool cond;
181
182 ASSERT(as->refcount == 0);
183
184 /*
185 * Since there is no reference to this area,
186 * it is safe not to lock its mutex.
187 */
188 ipl = interrupts_disable();
189 spinlock_lock(&inactive_as_with_asid_lock);
190 if (as->asid != ASID_INVALID && as != AS_KERNEL) {
191 if (as != AS && as->cpu_refcount == 0)
192 list_remove(&as->inactive_as_with_asid_link);
193 asid_put(as->asid);
194 }
195 spinlock_unlock(&inactive_as_with_asid_lock);
196
197 /*
198 * Destroy address space areas of the address space.
199 * The B+tree must be walked carefully because it is
200 * also being destroyed.
201 */
202 for (cond = true; cond; ) {
203 btree_node_t *node;
204
205 ASSERT(!list_empty(&as->as_area_btree.leaf_head));
206 node = list_get_instance(as->as_area_btree.leaf_head.next, btree_node_t, leaf_link);
207
208 if ((cond = node->keys)) {
209 as_area_destroy(as, node->key[0]);
210 }
211 }
212
213 btree_destroy(&as->as_area_btree);
214 page_table_destroy(as->page_table);
215
216 interrupts_restore(ipl);
217
218 slab_free(as_slab, as);
219}
220
221/** Create address space area of common attributes.
222 *
223 * The created address space area is added to the target address space.
224 *
225 * @param as Target address space.
226 * @param flags Flags of the area memory.
227 * @param size Size of area.
228 * @param base Base address of area.
229 * @param attrs Attributes of the area.
230 * @param backend Address space area backend. NULL if no backend is used.
231 * @param backend_data NULL or a pointer to an array holding two void *.
232 *
233 * @return Address space area on success or NULL on failure.
234 */
235as_area_t *as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
236 mem_backend_t *backend, mem_backend_data_t *backend_data)
237{
238 ipl_t ipl;
239 as_area_t *a;
240
241 if (base % PAGE_SIZE)
242 return NULL;
243
244 if (!size)
245 return NULL;
246
247 /* Writeable executable areas are not supported. */
248 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
249 return NULL;
250
251 ipl = interrupts_disable();
252 mutex_lock(&as->lock);
253
254 if (!check_area_conflicts(as, base, size, NULL)) {
255 mutex_unlock(&as->lock);
256 interrupts_restore(ipl);
257 return NULL;
258 }
259
260 a = (as_area_t *) malloc(sizeof(as_area_t), 0);
261
262 mutex_initialize(&a->lock);
263
264 a->as = as;
265 a->flags = flags;
266 a->attributes = attrs;
267 a->pages = SIZE2FRAMES(size);
268 a->base = base;
269 a->sh_info = NULL;
270 a->backend = backend;
271 if (backend_data)
272 a->backend_data = *backend_data;
273 else
274 memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data), 0);
275
276 btree_create(&a->used_space);
277
278 btree_insert(&as->as_area_btree, base, (void *) a, NULL);
279
280 mutex_unlock(&as->lock);
281 interrupts_restore(ipl);
282
283 return a;
284}
285
286/** Find address space area and change it.
287 *
288 * @param as Address space.
289 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
290 * @param size New size of the virtual memory block starting at address.
291 * @param flags Flags influencing the remap operation. Currently unused.
292 *
293 * @return Zero on success or a value from @ref errno.h otherwise.
294 */
295int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
296{
297 as_area_t *area;
298 ipl_t ipl;
299 size_t pages;
300
301 ipl = interrupts_disable();
302 mutex_lock(&as->lock);
303
304 /*
305 * Locate the area.
306 */
307 area = find_area_and_lock(as, address);
308 if (!area) {
309 mutex_unlock(&as->lock);
310 interrupts_restore(ipl);
311 return ENOENT;
312 }
313
314 if (area->backend == &phys_backend) {
315 /*
316 * Remapping of address space areas associated
317 * with memory mapped devices is not supported.
318 */
319 mutex_unlock(&area->lock);
320 mutex_unlock(&as->lock);
321 interrupts_restore(ipl);
322 return ENOTSUP;
323 }
324 if (area->sh_info) {
325 /*
326 * Remapping of shared address space areas
327 * is not supported.
328 */
329 mutex_unlock(&area->lock);
330 mutex_unlock(&as->lock);
331 interrupts_restore(ipl);
332 return ENOTSUP;
333 }
334
335 pages = SIZE2FRAMES((address - area->base) + size);
336 if (!pages) {
337 /*
338 * Zero size address space areas are not allowed.
339 */
340 mutex_unlock(&area->lock);
341 mutex_unlock(&as->lock);
342 interrupts_restore(ipl);
343 return EPERM;
344 }
345
346 if (pages < area->pages) {
347 bool cond;
348 uintptr_t start_free = area->base + pages*PAGE_SIZE;
349
350 /*
351 * Shrinking the area.
352 * No need to check for overlaps.
353 */
354
355 /*
356 * Start TLB shootdown sequence.
357 */
358 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
359
360 /*
361 * Remove frames belonging to used space starting from
362 * the highest addresses downwards until an overlap with
363 * the resized address space area is found. Note that this
364 * is also the right way to remove part of the used_space
365 * B+tree leaf list.
366 */
367 for (cond = true; cond;) {
368 btree_node_t *node;
369
370 ASSERT(!list_empty(&area->used_space.leaf_head));
371 node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link);
372 if ((cond = (bool) node->keys)) {
373 uintptr_t b = node->key[node->keys - 1];
374 count_t c = (count_t) node->value[node->keys - 1];
375 int i = 0;
376
377 if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) {
378
379 if (b + c*PAGE_SIZE <= start_free) {
380 /*
381 * The whole interval fits completely
382 * in the resized address space area.
383 */
384 break;
385 }
386
387 /*
388 * Part of the interval corresponding to b and c
389 * overlaps with the resized address space area.
390 */
391
392 cond = false; /* we are almost done */
393 i = (start_free - b) >> PAGE_WIDTH;
394 if (!used_space_remove(area, start_free, c - i))
395 panic("Could not remove used space.\n");
396 } else {
397 /*
398 * The interval of used space can be completely removed.
399 */
400 if (!used_space_remove(area, b, c))
401 panic("Could not remove used space.\n");
402 }
403
404 for (; i < c; i++) {
405 pte_t *pte;
406
407 page_table_lock(as, false);
408 pte = page_mapping_find(as, b + i*PAGE_SIZE);
409 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
410 if (area->backend && area->backend->frame_free) {
411 area->backend->frame_free(area,
412 b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
413 }
414 page_mapping_remove(as, b + i*PAGE_SIZE);
415 page_table_unlock(as, false);
416 }
417 }
418 }
419
420 /*
421 * Finish TLB shootdown sequence.
422 */
423 tlb_invalidate_pages(as->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
424 tlb_shootdown_finalize();
425
426 /*
427 * Invalidate software translation caches (e.g. TSB on sparc64).
428 */
429 as_invalidate_translation_cache(as, area->base + pages*PAGE_SIZE, area->pages - pages);
430 } else {
431 /*
432 * Growing the area.
433 * Check for overlaps with other address space areas.
434 */
435 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
436 mutex_unlock(&area->lock);
437 mutex_unlock(&as->lock);
438 interrupts_restore(ipl);
439 return EADDRNOTAVAIL;
440 }
441 }
442
443 area->pages = pages;
444
445 mutex_unlock(&area->lock);
446 mutex_unlock(&as->lock);
447 interrupts_restore(ipl);
448
449 return 0;
450}
451
452/** Destroy address space area.
453 *
454 * @param as Address space.
455 * @param address Address withing the area to be deleted.
456 *
457 * @return Zero on success or a value from @ref errno.h on failure.
458 */
459int as_area_destroy(as_t *as, uintptr_t address)
460{
461 as_area_t *area;
462 uintptr_t base;
463 link_t *cur;
464 ipl_t ipl;
465
466 ipl = interrupts_disable();
467 mutex_lock(&as->lock);
468
469 area = find_area_and_lock(as, address);
470 if (!area) {
471 mutex_unlock(&as->lock);
472 interrupts_restore(ipl);
473 return ENOENT;
474 }
475
476 base = area->base;
477
478 /*
479 * Start TLB shootdown sequence.
480 */
481 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
482
483 /*
484 * Visit only the pages mapped by used_space B+tree.
485 */
486 for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) {
487 btree_node_t *node;
488 int i;
489
490 node = list_get_instance(cur, btree_node_t, leaf_link);
491 for (i = 0; i < node->keys; i++) {
492 uintptr_t b = node->key[i];
493 count_t j;
494 pte_t *pte;
495
496 for (j = 0; j < (count_t) node->value[i]; j++) {
497 page_table_lock(as, false);
498 pte = page_mapping_find(as, b + j*PAGE_SIZE);
499 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
500 if (area->backend && area->backend->frame_free) {
501 area->backend->frame_free(area,
502 b + j*PAGE_SIZE, PTE_GET_FRAME(pte));
503 }
504 page_mapping_remove(as, b + j*PAGE_SIZE);
505 page_table_unlock(as, false);
506 }
507 }
508 }
509
510 /*
511 * Finish TLB shootdown sequence.
512 */
513 tlb_invalidate_pages(as->asid, area->base, area->pages);
514 tlb_shootdown_finalize();
515
516 /*
517 * Invalidate potential software translation caches (e.g. TSB on sparc64).
518 */
519 as_invalidate_translation_cache(as, area->base, area->pages);
520
521 btree_destroy(&area->used_space);
522
523 area->attributes |= AS_AREA_ATTR_PARTIAL;
524
525 if (area->sh_info)
526 sh_info_remove_reference(area->sh_info);
527
528 mutex_unlock(&area->lock);
529
530 /*
531 * Remove the empty area from address space.
532 */
533 btree_remove(&as->as_area_btree, base, NULL);
534
535 free(area);
536
537 mutex_unlock(&as->lock);
538 interrupts_restore(ipl);
539 return 0;
540}
541
542/** Share address space area with another or the same address space.
543 *
544 * Address space area mapping is shared with a new address space area.
545 * If the source address space area has not been shared so far,
546 * a new sh_info is created. The new address space area simply gets the
547 * sh_info of the source area. The process of duplicating the
548 * mapping is done through the backend share function.
549 *
550 * @param src_as Pointer to source address space.
551 * @param src_base Base address of the source address space area.
552 * @param acc_size Expected size of the source area.
553 * @param dst_as Pointer to destination address space.
554 * @param dst_base Target base address.
555 * @param dst_flags_mask Destination address space area flags mask.
556 *
557 * @return Zero on success or ENOENT if there is no such task or if there is no
558 * such address space area, EPERM if there was a problem in accepting the area
559 * or ENOMEM if there was a problem in allocating destination address space
560 * area. ENOTSUP is returned if the address space area backend does not support
561 * sharing or if the kernel detects an attempt to create an illegal address
562 * alias.
563 */
564int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
565 as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
566{
567 ipl_t ipl;
568 int src_flags;
569 size_t src_size;
570 as_area_t *src_area, *dst_area;
571 share_info_t *sh_info;
572 mem_backend_t *src_backend;
573 mem_backend_data_t src_backend_data;
574
575 ipl = interrupts_disable();
576 mutex_lock(&src_as->lock);
577 src_area = find_area_and_lock(src_as, src_base);
578 if (!src_area) {
579 /*
580 * Could not find the source address space area.
581 */
582 mutex_unlock(&src_as->lock);
583 interrupts_restore(ipl);
584 return ENOENT;
585 }
586
587 if (!src_area->backend || !src_area->backend->share) {
588 /*
589 * There is no backend or the backend does not
590 * know how to share the area.
591 */
592 mutex_unlock(&src_area->lock);
593 mutex_unlock(&src_as->lock);
594 interrupts_restore(ipl);
595 return ENOTSUP;
596 }
597
598 src_size = src_area->pages * PAGE_SIZE;
599 src_flags = src_area->flags;
600 src_backend = src_area->backend;
601 src_backend_data = src_area->backend_data;
602
603 /* Share the cacheable flag from the original mapping */
604 if (src_flags & AS_AREA_CACHEABLE)
605 dst_flags_mask |= AS_AREA_CACHEABLE;
606
607 if (src_size != acc_size || (src_flags & dst_flags_mask) != dst_flags_mask) {
608 mutex_unlock(&src_area->lock);
609 mutex_unlock(&src_as->lock);
610 interrupts_restore(ipl);
611 return EPERM;
612 }
613
614#ifdef CONFIG_VIRT_IDX_DCACHE
615 if (!(dst_flags_mask & AS_AREA_EXEC)) {
616 if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) {
617 /*
618 * Refuse to create an illegal address alias.
619 */
620 mutex_unlock(&src_area->lock);
621 mutex_unlock(&src_as->lock);
622 interrupts_restore(ipl);
623 return ENOTSUP;
624 }
625 }
626#endif /* CONFIG_VIRT_IDX_DCACHE */
627
628 /*
629 * Now we are committed to sharing the area.
630 * First, prepare the area for sharing.
631 * Then it will be safe to unlock it.
632 */
633 sh_info = src_area->sh_info;
634 if (!sh_info) {
635 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
636 mutex_initialize(&sh_info->lock);
637 sh_info->refcount = 2;
638 btree_create(&sh_info->pagemap);
639 src_area->sh_info = sh_info;
640 } else {
641 mutex_lock(&sh_info->lock);
642 sh_info->refcount++;
643 mutex_unlock(&sh_info->lock);
644 }
645
646 src_area->backend->share(src_area);
647
648 mutex_unlock(&src_area->lock);
649 mutex_unlock(&src_as->lock);
650
651 /*
652 * Create copy of the source address space area.
653 * The destination area is created with AS_AREA_ATTR_PARTIAL
654 * attribute set which prevents race condition with
655 * preliminary as_page_fault() calls.
656 * The flags of the source area are masked against dst_flags_mask
657 * to support sharing in less privileged mode.
658 */
659 dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
660 AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
661 if (!dst_area) {
662 /*
663 * Destination address space area could not be created.
664 */
665 sh_info_remove_reference(sh_info);
666
667 interrupts_restore(ipl);
668 return ENOMEM;
669 }
670
671 /*
672 * Now the destination address space area has been
673 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
674 * attribute and set the sh_info.
675 */
676 mutex_lock(&dst_as->lock);
677 mutex_lock(&dst_area->lock);
678 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
679 dst_area->sh_info = sh_info;
680 mutex_unlock(&dst_area->lock);
681 mutex_unlock(&dst_as->lock);
682
683 interrupts_restore(ipl);
684
685 return 0;
686}
687
688/** Check access mode for address space area.
689 *
690 * The address space area must be locked prior to this call.
691 *
692 * @param area Address space area.
693 * @param access Access mode.
694 *
695 * @return False if access violates area's permissions, true otherwise.
696 */
697bool as_area_check_access(as_area_t *area, pf_access_t access)
698{
699 int flagmap[] = {
700 [PF_ACCESS_READ] = AS_AREA_READ,
701 [PF_ACCESS_WRITE] = AS_AREA_WRITE,
702 [PF_ACCESS_EXEC] = AS_AREA_EXEC
703 };
704
705 if (!(area->flags & flagmap[access]))
706 return false;
707
708 return true;
709}
710
711/** Handle page fault within the current address space.
712 *
713 * This is the high-level page fault handler. It decides
714 * whether the page fault can be resolved by any backend
715 * and if so, it invokes the backend to resolve the page
716 * fault.
717 *
718 * Interrupts are assumed disabled.
719 *
720 * @param page Faulting page.
721 * @param access Access mode that caused the fault (i.e. read/write/exec).
722 * @param istate Pointer to interrupted state.
723 *
724 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
725 * fault was caused by copy_to_uspace() or copy_from_uspace().
726 */
727int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
728{
729 pte_t *pte;
730 as_area_t *area;
731
732 if (!THREAD)
733 return AS_PF_FAULT;
734
735 ASSERT(AS);
736
737 mutex_lock(&AS->lock);
738 area = find_area_and_lock(AS, page);
739 if (!area) {
740 /*
741 * No area contained mapping for 'page'.
742 * Signal page fault to low-level handler.
743 */
744 mutex_unlock(&AS->lock);
745 goto page_fault;
746 }
747
748 if (area->attributes & AS_AREA_ATTR_PARTIAL) {
749 /*
750 * The address space area is not fully initialized.
751 * Avoid possible race by returning error.
752 */
753 mutex_unlock(&area->lock);
754 mutex_unlock(&AS->lock);
755 goto page_fault;
756 }
757
758 if (!area->backend || !area->backend->page_fault) {
759 /*
760 * The address space area is not backed by any backend
761 * or the backend cannot handle page faults.
762 */
763 mutex_unlock(&area->lock);
764 mutex_unlock(&AS->lock);
765 goto page_fault;
766 }
767
768 page_table_lock(AS, false);
769
770 /*
771 * To avoid race condition between two page faults
772 * on the same address, we need to make sure
773 * the mapping has not been already inserted.
774 */
775 if ((pte = page_mapping_find(AS, page))) {
776 if (PTE_PRESENT(pte)) {
777 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
778 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
779 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
780 page_table_unlock(AS, false);
781 mutex_unlock(&area->lock);
782 mutex_unlock(&AS->lock);
783 return AS_PF_OK;
784 }
785 }
786 }
787
788 /*
789 * Resort to the backend page fault handler.
790 */
791 if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
792 page_table_unlock(AS, false);
793 mutex_unlock(&area->lock);
794 mutex_unlock(&AS->lock);
795 goto page_fault;
796 }
797
798 page_table_unlock(AS, false);
799 mutex_unlock(&area->lock);
800 mutex_unlock(&AS->lock);
801 return AS_PF_OK;
802
803page_fault:
804 if (THREAD->in_copy_from_uspace) {
805 THREAD->in_copy_from_uspace = false;
806 istate_set_retaddr(istate, (uintptr_t) &memcpy_from_uspace_failover_address);
807 } else if (THREAD->in_copy_to_uspace) {
808 THREAD->in_copy_to_uspace = false;
809 istate_set_retaddr(istate, (uintptr_t) &memcpy_to_uspace_failover_address);
810 } else {
811 return AS_PF_FAULT;
812 }
813
814 return AS_PF_DEFER;
815}
816
817/** Switch address spaces.
818 *
819 * Note that this function cannot sleep as it is essentially a part of
820 * scheduling. Sleeping here would lead to deadlock on wakeup.
821 *
822 * @param old Old address space or NULL.
823 * @param new New address space.
824 */
825void as_switch(as_t *old, as_t *new)
826{
827 ipl_t ipl;
828 bool needs_asid = false;
829
830 ipl = interrupts_disable();
831 spinlock_lock(&inactive_as_with_asid_lock);
832
833 /*
834 * First, take care of the old address space.
835 */
836 if (old) {
837 mutex_lock_active(&old->lock);
838 ASSERT(old->cpu_refcount);
839 if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) {
840 /*
841 * The old address space is no longer active on
842 * any processor. It can be appended to the
843 * list of inactive address spaces with assigned
844 * ASID.
845 */
846 ASSERT(old->asid != ASID_INVALID);
847 list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
848 }
849 mutex_unlock(&old->lock);
850
851 /*
852 * Perform architecture-specific tasks when the address space
853 * is being removed from the CPU.
854 */
855 as_deinstall_arch(old);
856 }
857
858 /*
859 * Second, prepare the new address space.
860 */
861 mutex_lock_active(&new->lock);
862 if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) {
863 if (new->asid != ASID_INVALID)
864 list_remove(&new->inactive_as_with_asid_link);
865 else
866 needs_asid = true; /* defer call to asid_get() until new->lock is released */
867 }
868 SET_PTL0_ADDRESS(new->page_table);
869 mutex_unlock(&new->lock);
870
871 if (needs_asid) {
872 /*
873 * Allocation of new ASID was deferred
874 * until now in order to avoid deadlock.
875 */
876 asid_t asid;
877
878 asid = asid_get();
879 mutex_lock_active(&new->lock);
880 new->asid = asid;
881 mutex_unlock(&new->lock);
882 }
883 spinlock_unlock(&inactive_as_with_asid_lock);
884 interrupts_restore(ipl);
885
886 /*
887 * Perform architecture-specific steps.
888 * (e.g. write ASID to hardware register etc.)
889 */
890 as_install_arch(new);
891
892 AS = new;
893}
894
895/** Convert address space area flags to page flags.
896 *
897 * @param aflags Flags of some address space area.
898 *
899 * @return Flags to be passed to page_mapping_insert().
900 */
901int area_flags_to_page_flags(int aflags)
902{
903 int flags;
904
905 flags = PAGE_USER | PAGE_PRESENT;
906
907 if (aflags & AS_AREA_READ)
908 flags |= PAGE_READ;
909
910 if (aflags & AS_AREA_WRITE)
911 flags |= PAGE_WRITE;
912
913 if (aflags & AS_AREA_EXEC)
914 flags |= PAGE_EXEC;
915
916 if (aflags & AS_AREA_CACHEABLE)
917 flags |= PAGE_CACHEABLE;
918
919 return flags;
920}
921
922/** Compute flags for virtual address translation subsytem.
923 *
924 * The address space area must be locked.
925 * Interrupts must be disabled.
926 *
927 * @param a Address space area.
928 *
929 * @return Flags to be used in page_mapping_insert().
930 */
931int as_area_get_flags(as_area_t *a)
932{
933 return area_flags_to_page_flags(a->flags);
934}
935
936/** Create page table.
937 *
938 * Depending on architecture, create either address space
939 * private or global page table.
940 *
941 * @param flags Flags saying whether the page table is for kernel address space.
942 *
943 * @return First entry of the page table.
944 */
945pte_t *page_table_create(int flags)
946{
947 ASSERT(as_operations);
948 ASSERT(as_operations->page_table_create);
949
950 return as_operations->page_table_create(flags);
951}
952
953/** Destroy page table.
954 *
955 * Destroy page table in architecture specific way.
956 *
957 * @param page_table Physical address of PTL0.
958 */
959void page_table_destroy(pte_t *page_table)
960{
961 ASSERT(as_operations);
962 ASSERT(as_operations->page_table_destroy);
963
964 as_operations->page_table_destroy(page_table);
965}
966
967/** Lock page table.
968 *
969 * This function should be called before any page_mapping_insert(),
970 * page_mapping_remove() and page_mapping_find().
971 *
972 * Locking order is such that address space areas must be locked
973 * prior to this call. Address space can be locked prior to this
974 * call in which case the lock argument is false.
975 *
976 * @param as Address space.
977 * @param lock If false, do not attempt to lock as->lock.
978 */
979void page_table_lock(as_t *as, bool lock)
980{
981 ASSERT(as_operations);
982 ASSERT(as_operations->page_table_lock);
983
984 as_operations->page_table_lock(as, lock);
985}
986
987/** Unlock page table.
988 *
989 * @param as Address space.
990 * @param unlock If false, do not attempt to unlock as->lock.
991 */
992void page_table_unlock(as_t *as, bool unlock)
993{
994 ASSERT(as_operations);
995 ASSERT(as_operations->page_table_unlock);
996
997 as_operations->page_table_unlock(as, unlock);
998}
999
1000
1001/** Find address space area and lock it.
1002 *
1003 * The address space must be locked and interrupts must be disabled.
1004 *
1005 * @param as Address space.
1006 * @param va Virtual address.
1007 *
1008 * @return Locked address space area containing va on success or NULL on failure.
1009 */
1010as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
1011{
1012 as_area_t *a;
1013 btree_node_t *leaf, *lnode;
1014 int i;
1015
1016 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
1017 if (a) {
1018 /* va is the base address of an address space area */
1019 mutex_lock(&a->lock);
1020 return a;
1021 }
1022
1023 /*
1024 * Search the leaf node and the righmost record of its left neighbour
1025 * to find out whether this is a miss or va belongs to an address
1026 * space area found there.
1027 */
1028
1029 /* First, search the leaf node itself. */
1030 for (i = 0; i < leaf->keys; i++) {
1031 a = (as_area_t *) leaf->value[i];
1032 mutex_lock(&a->lock);
1033 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
1034 return a;
1035 }
1036 mutex_unlock(&a->lock);
1037 }
1038
1039 /*
1040 * Second, locate the left neighbour and test its last record.
1041 * Because of its position in the B+tree, it must have base < va.
1042 */
1043 if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1044 a = (as_area_t *) lnode->value[lnode->keys - 1];
1045 mutex_lock(&a->lock);
1046 if (va < a->base + a->pages * PAGE_SIZE) {
1047 return a;
1048 }
1049 mutex_unlock(&a->lock);
1050 }
1051
1052 return NULL;
1053}
1054
1055/** Check area conflicts with other areas.
1056 *
1057 * The address space must be locked and interrupts must be disabled.
1058 *
1059 * @param as Address space.
1060 * @param va Starting virtual address of the area being tested.
1061 * @param size Size of the area being tested.
1062 * @param avoid_area Do not touch this area.
1063 *
1064 * @return True if there is no conflict, false otherwise.
1065 */
1066bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area)
1067{
1068 as_area_t *a;
1069 btree_node_t *leaf, *node;
1070 int i;
1071
1072 /*
1073 * We don't want any area to have conflicts with NULL page.
1074 */
1075 if (overlaps(va, size, NULL, PAGE_SIZE))
1076 return false;
1077
1078 /*
1079 * The leaf node is found in O(log n), where n is proportional to
1080 * the number of address space areas belonging to as.
1081 * The check for conflicts is then attempted on the rightmost
1082 * record in the left neighbour, the leftmost record in the right
1083 * neighbour and all records in the leaf node itself.
1084 */
1085
1086 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1087 if (a != avoid_area)
1088 return false;
1089 }
1090
1091 /* First, check the two border cases. */
1092 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1093 a = (as_area_t *) node->value[node->keys - 1];
1094 mutex_lock(&a->lock);
1095 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1096 mutex_unlock(&a->lock);
1097 return false;
1098 }
1099 mutex_unlock(&a->lock);
1100 }
1101 if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
1102 a = (as_area_t *) node->value[0];
1103 mutex_lock(&a->lock);
1104 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1105 mutex_unlock(&a->lock);
1106 return false;
1107 }
1108 mutex_unlock(&a->lock);
1109 }
1110
1111 /* Second, check the leaf node. */
1112 for (i = 0; i < leaf->keys; i++) {
1113 a = (as_area_t *) leaf->value[i];
1114
1115 if (a == avoid_area)
1116 continue;
1117
1118 mutex_lock(&a->lock);
1119 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1120 mutex_unlock(&a->lock);
1121 return false;
1122 }
1123 mutex_unlock(&a->lock);
1124 }
1125
1126 /*
1127 * So far, the area does not conflict with other areas.
1128 * Check if it doesn't conflict with kernel address space.
1129 */
1130 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1131 return !overlaps(va, size,
1132 KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
1133 }
1134
1135 return true;
1136}
1137
1138/** Return size of the address space area with given base. */
1139size_t as_get_size(uintptr_t base)
1140{
1141 ipl_t ipl;
1142 as_area_t *src_area;
1143 size_t size;
1144
1145 ipl = interrupts_disable();
1146 src_area = find_area_and_lock(AS, base);
1147 if (src_area){
1148 size = src_area->pages * PAGE_SIZE;
1149 mutex_unlock(&src_area->lock);
1150 } else {
1151 size = 0;
1152 }
1153 interrupts_restore(ipl);
1154 return size;
1155}
1156
1157/** Mark portion of address space area as used.
1158 *
1159 * The address space area must be already locked.
1160 *
1161 * @param a Address space area.
1162 * @param page First page to be marked.
1163 * @param count Number of page to be marked.
1164 *
1165 * @return 0 on failure and 1 on success.
1166 */
1167int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
1168{
1169 btree_node_t *leaf, *node;
1170 count_t pages;
1171 int i;
1172
1173 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1174 ASSERT(count);
1175
1176 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1177 if (pages) {
1178 /*
1179 * We hit the beginning of some used space.
1180 */
1181 return 0;
1182 }
1183
1184 if (!leaf->keys) {
1185 btree_insert(&a->used_space, page, (void *) count, leaf);
1186 return 1;
1187 }
1188
1189 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1190 if (node) {
1191 uintptr_t left_pg = node->key[node->keys - 1], right_pg = leaf->key[0];
1192 count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0];
1193
1194 /*
1195 * Examine the possibility that the interval fits
1196 * somewhere between the rightmost interval of
1197 * the left neigbour and the first interval of the leaf.
1198 */
1199
1200 if (page >= right_pg) {
1201 /* Do nothing. */
1202 } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1203 /* The interval intersects with the left interval. */
1204 return 0;
1205 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1206 /* The interval intersects with the right interval. */
1207 return 0;
1208 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1209 /* The interval can be added by merging the two already present intervals. */
1210 node->value[node->keys - 1] += count + right_cnt;
1211 btree_remove(&a->used_space, right_pg, leaf);
1212 return 1;
1213 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1214 /* The interval can be added by simply growing the left interval. */
1215 node->value[node->keys - 1] += count;
1216 return 1;
1217 } else if (page + count*PAGE_SIZE == right_pg) {
1218 /*
1219 * The interval can be addded by simply moving base of the right
1220 * interval down and increasing its size accordingly.
1221 */
1222 leaf->value[0] += count;
1223 leaf->key[0] = page;
1224 return 1;
1225 } else {
1226 /*
1227 * The interval is between both neigbouring intervals,
1228 * but cannot be merged with any of them.
1229 */
1230 btree_insert(&a->used_space, page, (void *) count, leaf);
1231 return 1;
1232 }
1233 } else if (page < leaf->key[0]) {
1234 uintptr_t right_pg = leaf->key[0];
1235 count_t right_cnt = (count_t) leaf->value[0];
1236
1237 /*
1238 * Investigate the border case in which the left neighbour does not
1239 * exist but the interval fits from the left.
1240 */
1241
1242 if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1243 /* The interval intersects with the right interval. */
1244 return 0;
1245 } else if (page + count*PAGE_SIZE == right_pg) {
1246 /*
1247 * The interval can be added by moving the base of the right interval down
1248 * and increasing its size accordingly.
1249 */
1250 leaf->key[0] = page;
1251 leaf->value[0] += count;
1252 return 1;
1253 } else {
1254 /*
1255 * The interval doesn't adjoin with the right interval.
1256 * It must be added individually.
1257 */
1258 btree_insert(&a->used_space, page, (void *) count, leaf);
1259 return 1;
1260 }
1261 }
1262
1263 node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1264 if (node) {
1265 uintptr_t left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0];
1266 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0];
1267
1268 /*
1269 * Examine the possibility that the interval fits
1270 * somewhere between the leftmost interval of
1271 * the right neigbour and the last interval of the leaf.
1272 */
1273
1274 if (page < left_pg) {
1275 /* Do nothing. */
1276 } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1277 /* The interval intersects with the left interval. */
1278 return 0;
1279 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1280 /* The interval intersects with the right interval. */
1281 return 0;
1282 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1283 /* The interval can be added by merging the two already present intervals. */
1284 leaf->value[leaf->keys - 1] += count + right_cnt;
1285 btree_remove(&a->used_space, right_pg, node);
1286 return 1;
1287 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1288 /* The interval can be added by simply growing the left interval. */
1289 leaf->value[leaf->keys - 1] += count;
1290 return 1;
1291 } else if (page + count*PAGE_SIZE == right_pg) {
1292 /*
1293 * The interval can be addded by simply moving base of the right
1294 * interval down and increasing its size accordingly.
1295 */
1296 node->value[0] += count;
1297 node->key[0] = page;
1298 return 1;
1299 } else {
1300 /*
1301 * The interval is between both neigbouring intervals,
1302 * but cannot be merged with any of them.
1303 */
1304 btree_insert(&a->used_space, page, (void *) count, leaf);
1305 return 1;
1306 }
1307 } else if (page >= leaf->key[leaf->keys - 1]) {
1308 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1309 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1310
1311 /*
1312 * Investigate the border case in which the right neighbour does not
1313 * exist but the interval fits from the right.
1314 */
1315
1316 if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1317 /* The interval intersects with the left interval. */
1318 return 0;
1319 } else if (left_pg + left_cnt*PAGE_SIZE == page) {
1320 /* The interval can be added by growing the left interval. */
1321 leaf->value[leaf->keys - 1] += count;
1322 return 1;
1323 } else {
1324 /*
1325 * The interval doesn't adjoin with the left interval.
1326 * It must be added individually.
1327 */
1328 btree_insert(&a->used_space, page, (void *) count, leaf);
1329 return 1;
1330 }
1331 }
1332
1333 /*
1334 * Note that if the algorithm made it thus far, the interval can fit only
1335 * between two other intervals of the leaf. The two border cases were already
1336 * resolved.
1337 */
1338 for (i = 1; i < leaf->keys; i++) {
1339 if (page < leaf->key[i]) {
1340 uintptr_t left_pg = leaf->key[i - 1], right_pg = leaf->key[i];
1341 count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i];
1342
1343 /*
1344 * The interval fits between left_pg and right_pg.
1345 */
1346
1347 if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1348 /* The interval intersects with the left interval. */
1349 return 0;
1350 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1351 /* The interval intersects with the right interval. */
1352 return 0;
1353 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1354 /* The interval can be added by merging the two already present intervals. */
1355 leaf->value[i - 1] += count + right_cnt;
1356 btree_remove(&a->used_space, right_pg, leaf);
1357 return 1;
1358 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1359 /* The interval can be added by simply growing the left interval. */
1360 leaf->value[i - 1] += count;
1361 return 1;
1362 } else if (page + count*PAGE_SIZE == right_pg) {
1363 /*
1364 * The interval can be addded by simply moving base of the right
1365 * interval down and increasing its size accordingly.
1366 */
1367 leaf->value[i] += count;
1368 leaf->key[i] = page;
1369 return 1;
1370 } else {
1371 /*
1372 * The interval is between both neigbouring intervals,
1373 * but cannot be merged with any of them.
1374 */
1375 btree_insert(&a->used_space, page, (void *) count, leaf);
1376 return 1;
1377 }
1378 }
1379 }
1380
1381 panic("Inconsistency detected while adding %d pages of used space at %p.\n", count, page);
1382}
1383
1384/** Mark portion of address space area as unused.
1385 *
1386 * The address space area must be already locked.
1387 *
1388 * @param a Address space area.
1389 * @param page First page to be marked.
1390 * @param count Number of page to be marked.
1391 *
1392 * @return 0 on failure and 1 on success.
1393 */
1394int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
1395{
1396 btree_node_t *leaf, *node;
1397 count_t pages;
1398 int i;
1399
1400 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1401 ASSERT(count);
1402
1403 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1404 if (pages) {
1405 /*
1406 * We are lucky, page is the beginning of some interval.
1407 */
1408 if (count > pages) {
1409 return 0;
1410 } else if (count == pages) {
1411 btree_remove(&a->used_space, page, leaf);
1412 return 1;
1413 } else {
1414 /*
1415 * Find the respective interval.
1416 * Decrease its size and relocate its start address.
1417 */
1418 for (i = 0; i < leaf->keys; i++) {
1419 if (leaf->key[i] == page) {
1420 leaf->key[i] += count*PAGE_SIZE;
1421 leaf->value[i] -= count;
1422 return 1;
1423 }
1424 }
1425 goto error;
1426 }
1427 }
1428
1429 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1430 if (node && page < leaf->key[0]) {
1431 uintptr_t left_pg = node->key[node->keys - 1];
1432 count_t left_cnt = (count_t) node->value[node->keys - 1];
1433
1434 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1435 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1436 /*
1437 * The interval is contained in the rightmost interval
1438 * of the left neighbour and can be removed by
1439 * updating the size of the bigger interval.
1440 */
1441 node->value[node->keys - 1] -= count;
1442 return 1;
1443 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1444 count_t new_cnt;
1445
1446 /*
1447 * The interval is contained in the rightmost interval
1448 * of the left neighbour but its removal requires
1449 * both updating the size of the original interval and
1450 * also inserting a new interval.
1451 */
1452 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1453 node->value[node->keys - 1] -= count + new_cnt;
1454 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1455 return 1;
1456 }
1457 }
1458 return 0;
1459 } else if (page < leaf->key[0]) {
1460 return 0;
1461 }
1462
1463 if (page > leaf->key[leaf->keys - 1]) {
1464 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1465 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1466
1467 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1468 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1469 /*
1470 * The interval is contained in the rightmost interval
1471 * of the leaf and can be removed by updating the size
1472 * of the bigger interval.
1473 */
1474 leaf->value[leaf->keys - 1] -= count;
1475 return 1;
1476 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1477 count_t new_cnt;
1478
1479 /*
1480 * The interval is contained in the rightmost interval
1481 * of the leaf but its removal requires both updating
1482 * the size of the original interval and
1483 * also inserting a new interval.
1484 */
1485 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1486 leaf->value[leaf->keys - 1] -= count + new_cnt;
1487 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1488 return 1;
1489 }
1490 }
1491 return 0;
1492 }
1493
1494 /*
1495 * The border cases have been already resolved.
1496 * Now the interval can be only between intervals of the leaf.
1497 */
1498 for (i = 1; i < leaf->keys - 1; i++) {
1499 if (page < leaf->key[i]) {
1500 uintptr_t left_pg = leaf->key[i - 1];
1501 count_t left_cnt = (count_t) leaf->value[i - 1];
1502
1503 /*
1504 * Now the interval is between intervals corresponding to (i - 1) and i.
1505 */
1506 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1507 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1508 /*
1509 * The interval is contained in the interval (i - 1)
1510 * of the leaf and can be removed by updating the size
1511 * of the bigger interval.
1512 */
1513 leaf->value[i - 1] -= count;
1514 return 1;
1515 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1516 count_t new_cnt;
1517
1518 /*
1519 * The interval is contained in the interval (i - 1)
1520 * of the leaf but its removal requires both updating
1521 * the size of the original interval and
1522 * also inserting a new interval.
1523 */
1524 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1525 leaf->value[i - 1] -= count + new_cnt;
1526 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1527 return 1;
1528 }
1529 }
1530 return 0;
1531 }
1532 }
1533
1534error:
1535 panic("Inconsistency detected while removing %d pages of used space from %p.\n", count, page);
1536}
1537
1538/** Remove reference to address space area share info.
1539 *
1540 * If the reference count drops to 0, the sh_info is deallocated.
1541 *
1542 * @param sh_info Pointer to address space area share info.
1543 */
1544void sh_info_remove_reference(share_info_t *sh_info)
1545{
1546 bool dealloc = false;
1547
1548 mutex_lock(&sh_info->lock);
1549 ASSERT(sh_info->refcount);
1550 if (--sh_info->refcount == 0) {
1551 dealloc = true;
1552 link_t *cur;
1553
1554 /*
1555 * Now walk carefully the pagemap B+tree and free/remove
1556 * reference from all frames found there.
1557 */
1558 for (cur = sh_info->pagemap.leaf_head.next; cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
1559 btree_node_t *node;
1560 int i;
1561
1562 node = list_get_instance(cur, btree_node_t, leaf_link);
1563 for (i = 0; i < node->keys; i++)
1564 frame_free((uintptr_t) node->value[i]);
1565 }
1566
1567 }
1568 mutex_unlock(&sh_info->lock);
1569
1570 if (dealloc) {
1571 btree_destroy(&sh_info->pagemap);
1572 free(sh_info);
1573 }
1574}
1575
1576/*
1577 * Address space related syscalls.
1578 */
1579
1580/** Wrapper for as_area_create(). */
1581unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
1582{
1583 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
1584 return (unative_t) address;
1585 else
1586 return (unative_t) -1;
1587}
1588
1589/** Wrapper for as_area_resize(). */
1590unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
1591{
1592 return (unative_t) as_area_resize(AS, address, size, 0);
1593}
1594
1595/** Wrapper for as_area_destroy(). */
1596unative_t sys_as_area_destroy(uintptr_t address)
1597{
1598 return (unative_t) as_area_destroy(AS, address);
1599}
1600
1601/** Print out information about address space.
1602 *
1603 * @param as Address space.
1604 */
1605void as_print(as_t *as)
1606{
1607 ipl_t ipl;
1608
1609 ipl = interrupts_disable();
1610 mutex_lock(&as->lock);
1611
1612 /* print out info about address space areas */
1613 link_t *cur;
1614 for (cur = as->as_area_btree.leaf_head.next; cur != &as->as_area_btree.leaf_head; cur = cur->next) {
1615 btree_node_t *node = list_get_instance(cur, btree_node_t, leaf_link);
1616
1617 int i;
1618 for (i = 0; i < node->keys; i++) {
1619 as_area_t *area = node->value[i];
1620
1621 mutex_lock(&area->lock);
1622 printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
1623 area, area->base, area->pages, area->base, area->base + area->pages*PAGE_SIZE);
1624 mutex_unlock(&area->lock);
1625 }
1626 }
1627
1628 mutex_unlock(&as->lock);
1629 interrupts_restore(ipl);
1630}
1631
1632/** @}
1633 */
Note: See TracBrowser for help on using the repository browser.