source: mainline/kernel/generic/src/mm/as.c@ 879585a3

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 879585a3 was 879585a3, checked in by Jakub Jermar <jakub@…>, 18 years ago

Simplify synchronization in as_switch().
The function was oversynchronized, which
was causing deadlocks on the address
space mutex.

Now, address spaces can only be switched
when the asidlock is held. This also protects
stealing of ASIDs. No other synchronization
is necessary.

  • Property mode set to 100644
File size: 44.2 KB
Line 
1/*
2 * Copyright (c) 2001-2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Address space related functions.
36 *
37 * This file contains address space manipulation functions.
38 * Roughly speaking, this is a higher-level client of
39 * Virtual Address Translation (VAT) subsystem.
40 *
41 * Functionality provided by this file allows one to
42 * create address spaces and create, resize and share
43 * address space areas.
44 *
45 * @see page.c
46 *
47 */
48
49#include <mm/as.h>
50#include <arch/mm/as.h>
51#include <mm/page.h>
52#include <mm/frame.h>
53#include <mm/slab.h>
54#include <mm/tlb.h>
55#include <arch/mm/page.h>
56#include <genarch/mm/page_pt.h>
57#include <genarch/mm/page_ht.h>
58#include <mm/asid.h>
59#include <arch/mm/asid.h>
60#include <synch/spinlock.h>
61#include <synch/mutex.h>
62#include <adt/list.h>
63#include <adt/btree.h>
64#include <proc/task.h>
65#include <proc/thread.h>
66#include <arch/asm.h>
67#include <panic.h>
68#include <debug.h>
69#include <print.h>
70#include <memstr.h>
71#include <macros.h>
72#include <arch.h>
73#include <errno.h>
74#include <config.h>
75#include <align.h>
76#include <arch/types.h>
77#include <syscall/copy.h>
78#include <arch/interrupt.h>
79
80#ifdef CONFIG_VIRT_IDX_DCACHE
81#include <arch/mm/cache.h>
82#endif /* CONFIG_VIRT_IDX_DCACHE */
83
84#ifndef __OBJC__
85/**
86 * Each architecture decides what functions will be used to carry out
87 * address space operations such as creating or locking page tables.
88 */
89as_operations_t *as_operations = NULL;
90
91/**
92 * Slab for as_t objects.
93 */
94static slab_cache_t *as_slab;
95#endif
96
97/**
98 * This lock serializes access to the ASID subsystem.
99 * It protects:
100 * - inactive_as_with_asid_head list
101 * - as->asid for each as of the as_t type
102 * - asids_allocated counter
103 */
104SPINLOCK_INITIALIZE(asidlock);
105
106/**
107 * This list contains address spaces that are not active on any
108 * processor and that have valid ASID.
109 */
110LIST_INITIALIZE(inactive_as_with_asid_head);
111
112/** Kernel address space. */
113as_t *AS_KERNEL = NULL;
114
115static int area_flags_to_page_flags(int aflags);
116static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
117static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
118 as_area_t *avoid_area);
119static void sh_info_remove_reference(share_info_t *sh_info);
120
121#ifndef __OBJC__
122static int as_constructor(void *obj, int flags)
123{
124 as_t *as = (as_t *) obj;
125 int rc;
126
127 link_initialize(&as->inactive_as_with_asid_link);
128 mutex_initialize(&as->lock);
129
130 rc = as_constructor_arch(as, flags);
131
132 return rc;
133}
134
135static int as_destructor(void *obj)
136{
137 as_t *as = (as_t *) obj;
138
139 return as_destructor_arch(as);
140}
141#endif
142
143/** Initialize address space subsystem. */
144void as_init(void)
145{
146 as_arch_init();
147
148#ifndef __OBJC__
149 as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
150 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
151#endif
152
153 AS_KERNEL = as_create(FLAG_AS_KERNEL);
154 if (!AS_KERNEL)
155 panic("can't create kernel address space\n");
156
157}
158
159/** Create address space.
160 *
161 * @param flags Flags that influence way in wich the address space is created.
162 */
163as_t *as_create(int flags)
164{
165 as_t *as;
166
167#ifdef __OBJC__
168 as = [as_t new];
169 link_initialize(&as->inactive_as_with_asid_link);
170 mutex_initialize(&as->lock);
171 (void) as_constructor_arch(as, flags);
172#else
173 as = (as_t *) slab_alloc(as_slab, 0);
174#endif
175 (void) as_create_arch(as, 0);
176
177 btree_create(&as->as_area_btree);
178
179 if (flags & FLAG_AS_KERNEL)
180 as->asid = ASID_KERNEL;
181 else
182 as->asid = ASID_INVALID;
183
184 as->refcount = 0;
185 as->cpu_refcount = 0;
186#ifdef AS_PAGE_TABLE
187 as->genarch.page_table = page_table_create(flags);
188#else
189 page_table_create(flags);
190#endif
191
192 return as;
193}
194
195/** Destroy adress space.
196 *
197 * When there are no tasks referencing this address space (i.e. its refcount is
198 * zero), the address space can be destroyed.
199 */
200void as_destroy(as_t *as)
201{
202 ipl_t ipl;
203 bool cond;
204
205 ASSERT(as->refcount == 0);
206
207 /*
208 * Since there is no reference to this area,
209 * it is safe not to lock its mutex.
210 */
211
212 ipl = interrupts_disable();
213 spinlock_lock(&asidlock);
214 if (as->asid != ASID_INVALID && as != AS_KERNEL) {
215 if (as != AS && as->cpu_refcount == 0)
216 list_remove(&as->inactive_as_with_asid_link);
217 asid_put(as->asid);
218 }
219 spinlock_unlock(&asidlock);
220
221 /*
222 * Destroy address space areas of the address space.
223 * The B+tree must be walked carefully because it is
224 * also being destroyed.
225 */
226 for (cond = true; cond; ) {
227 btree_node_t *node;
228
229 ASSERT(!list_empty(&as->as_area_btree.leaf_head));
230 node = list_get_instance(as->as_area_btree.leaf_head.next,
231 btree_node_t, leaf_link);
232
233 if ((cond = node->keys)) {
234 as_area_destroy(as, node->key[0]);
235 }
236 }
237
238 btree_destroy(&as->as_area_btree);
239#ifdef AS_PAGE_TABLE
240 page_table_destroy(as->genarch.page_table);
241#else
242 page_table_destroy(NULL);
243#endif
244
245 interrupts_restore(ipl);
246
247#ifdef __OBJC__
248 [as free];
249#else
250 slab_free(as_slab, as);
251#endif
252}
253
254/** Create address space area of common attributes.
255 *
256 * The created address space area is added to the target address space.
257 *
258 * @param as Target address space.
259 * @param flags Flags of the area memory.
260 * @param size Size of area.
261 * @param base Base address of area.
262 * @param attrs Attributes of the area.
263 * @param backend Address space area backend. NULL if no backend is used.
264 * @param backend_data NULL or a pointer to an array holding two void *.
265 *
266 * @return Address space area on success or NULL on failure.
267 */
268as_area_t *
269as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
270 mem_backend_t *backend, mem_backend_data_t *backend_data)
271{
272 ipl_t ipl;
273 as_area_t *a;
274
275 if (base % PAGE_SIZE)
276 return NULL;
277
278 if (!size)
279 return NULL;
280
281 /* Writeable executable areas are not supported. */
282 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
283 return NULL;
284
285 ipl = interrupts_disable();
286 mutex_lock(&as->lock);
287
288 if (!check_area_conflicts(as, base, size, NULL)) {
289 mutex_unlock(&as->lock);
290 interrupts_restore(ipl);
291 return NULL;
292 }
293
294 a = (as_area_t *) malloc(sizeof(as_area_t), 0);
295
296 mutex_initialize(&a->lock);
297
298 a->as = as;
299 a->flags = flags;
300 a->attributes = attrs;
301 a->pages = SIZE2FRAMES(size);
302 a->base = base;
303 a->sh_info = NULL;
304 a->backend = backend;
305 if (backend_data)
306 a->backend_data = *backend_data;
307 else
308 memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data),
309 0);
310
311 btree_create(&a->used_space);
312
313 btree_insert(&as->as_area_btree, base, (void *) a, NULL);
314
315 mutex_unlock(&as->lock);
316 interrupts_restore(ipl);
317
318 return a;
319}
320
321/** Find address space area and change it.
322 *
323 * @param as Address space.
324 * @param address Virtual address belonging to the area to be changed. Must be
325 * page-aligned.
326 * @param size New size of the virtual memory block starting at address.
327 * @param flags Flags influencing the remap operation. Currently unused.
328 *
329 * @return Zero on success or a value from @ref errno.h otherwise.
330 */
331int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
332{
333 as_area_t *area;
334 ipl_t ipl;
335 size_t pages;
336
337 ipl = interrupts_disable();
338 mutex_lock(&as->lock);
339
340 /*
341 * Locate the area.
342 */
343 area = find_area_and_lock(as, address);
344 if (!area) {
345 mutex_unlock(&as->lock);
346 interrupts_restore(ipl);
347 return ENOENT;
348 }
349
350 if (area->backend == &phys_backend) {
351 /*
352 * Remapping of address space areas associated
353 * with memory mapped devices is not supported.
354 */
355 mutex_unlock(&area->lock);
356 mutex_unlock(&as->lock);
357 interrupts_restore(ipl);
358 return ENOTSUP;
359 }
360 if (area->sh_info) {
361 /*
362 * Remapping of shared address space areas
363 * is not supported.
364 */
365 mutex_unlock(&area->lock);
366 mutex_unlock(&as->lock);
367 interrupts_restore(ipl);
368 return ENOTSUP;
369 }
370
371 pages = SIZE2FRAMES((address - area->base) + size);
372 if (!pages) {
373 /*
374 * Zero size address space areas are not allowed.
375 */
376 mutex_unlock(&area->lock);
377 mutex_unlock(&as->lock);
378 interrupts_restore(ipl);
379 return EPERM;
380 }
381
382 if (pages < area->pages) {
383 bool cond;
384 uintptr_t start_free = area->base + pages*PAGE_SIZE;
385
386 /*
387 * Shrinking the area.
388 * No need to check for overlaps.
389 */
390
391 /*
392 * Start TLB shootdown sequence.
393 */
394 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base +
395 pages * PAGE_SIZE, area->pages - pages);
396
397 /*
398 * Remove frames belonging to used space starting from
399 * the highest addresses downwards until an overlap with
400 * the resized address space area is found. Note that this
401 * is also the right way to remove part of the used_space
402 * B+tree leaf list.
403 */
404 for (cond = true; cond;) {
405 btree_node_t *node;
406
407 ASSERT(!list_empty(&area->used_space.leaf_head));
408 node =
409 list_get_instance(area->used_space.leaf_head.prev,
410 btree_node_t, leaf_link);
411 if ((cond = (bool) node->keys)) {
412 uintptr_t b = node->key[node->keys - 1];
413 count_t c =
414 (count_t) node->value[node->keys - 1];
415 int i = 0;
416
417 if (overlaps(b, c * PAGE_SIZE, area->base,
418 pages * PAGE_SIZE)) {
419
420 if (b + c * PAGE_SIZE <= start_free) {
421 /*
422 * The whole interval fits
423 * completely in the resized
424 * address space area.
425 */
426 break;
427 }
428
429 /*
430 * Part of the interval corresponding
431 * to b and c overlaps with the resized
432 * address space area.
433 */
434
435 cond = false; /* we are almost done */
436 i = (start_free - b) >> PAGE_WIDTH;
437 if (!used_space_remove(area, start_free,
438 c - i))
439 panic("Could not remove used "
440 "space.\n");
441 } else {
442 /*
443 * The interval of used space can be
444 * completely removed.
445 */
446 if (!used_space_remove(area, b, c))
447 panic("Could not remove used "
448 "space.\n");
449 }
450
451 for (; i < c; i++) {
452 pte_t *pte;
453
454 page_table_lock(as, false);
455 pte = page_mapping_find(as, b +
456 i * PAGE_SIZE);
457 ASSERT(pte && PTE_VALID(pte) &&
458 PTE_PRESENT(pte));
459 if (area->backend &&
460 area->backend->frame_free) {
461 area->backend->frame_free(area,
462 b + i * PAGE_SIZE,
463 PTE_GET_FRAME(pte));
464 }
465 page_mapping_remove(as, b +
466 i * PAGE_SIZE);
467 page_table_unlock(as, false);
468 }
469 }
470 }
471
472 /*
473 * Finish TLB shootdown sequence.
474 */
475 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
476 area->pages - pages);
477 tlb_shootdown_finalize();
478
479 /*
480 * Invalidate software translation caches (e.g. TSB on sparc64).
481 */
482 as_invalidate_translation_cache(as, area->base +
483 pages * PAGE_SIZE, area->pages - pages);
484 } else {
485 /*
486 * Growing the area.
487 * Check for overlaps with other address space areas.
488 */
489 if (!check_area_conflicts(as, address, pages * PAGE_SIZE,
490 area)) {
491 mutex_unlock(&area->lock);
492 mutex_unlock(&as->lock);
493 interrupts_restore(ipl);
494 return EADDRNOTAVAIL;
495 }
496 }
497
498 area->pages = pages;
499
500 mutex_unlock(&area->lock);
501 mutex_unlock(&as->lock);
502 interrupts_restore(ipl);
503
504 return 0;
505}
506
507/** Destroy address space area.
508 *
509 * @param as Address space.
510 * @param address Address withing the area to be deleted.
511 *
512 * @return Zero on success or a value from @ref errno.h on failure.
513 */
514int as_area_destroy(as_t *as, uintptr_t address)
515{
516 as_area_t *area;
517 uintptr_t base;
518 link_t *cur;
519 ipl_t ipl;
520
521 ipl = interrupts_disable();
522 mutex_lock(&as->lock);
523
524 area = find_area_and_lock(as, address);
525 if (!area) {
526 mutex_unlock(&as->lock);
527 interrupts_restore(ipl);
528 return ENOENT;
529 }
530
531 base = area->base;
532
533 /*
534 * Start TLB shootdown sequence.
535 */
536 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
537
538 /*
539 * Visit only the pages mapped by used_space B+tree.
540 */
541 for (cur = area->used_space.leaf_head.next;
542 cur != &area->used_space.leaf_head; cur = cur->next) {
543 btree_node_t *node;
544 int i;
545
546 node = list_get_instance(cur, btree_node_t, leaf_link);
547 for (i = 0; i < node->keys; i++) {
548 uintptr_t b = node->key[i];
549 count_t j;
550 pte_t *pte;
551
552 for (j = 0; j < (count_t) node->value[i]; j++) {
553 page_table_lock(as, false);
554 pte = page_mapping_find(as, b + j * PAGE_SIZE);
555 ASSERT(pte && PTE_VALID(pte) &&
556 PTE_PRESENT(pte));
557 if (area->backend &&
558 area->backend->frame_free) {
559 area->backend->frame_free(area, b +
560 j * PAGE_SIZE, PTE_GET_FRAME(pte));
561 }
562 page_mapping_remove(as, b + j * PAGE_SIZE);
563 page_table_unlock(as, false);
564 }
565 }
566 }
567
568 /*
569 * Finish TLB shootdown sequence.
570 */
571 tlb_invalidate_pages(as->asid, area->base, area->pages);
572 tlb_shootdown_finalize();
573
574 /*
575 * Invalidate potential software translation caches (e.g. TSB on
576 * sparc64).
577 */
578 as_invalidate_translation_cache(as, area->base, area->pages);
579
580 btree_destroy(&area->used_space);
581
582 area->attributes |= AS_AREA_ATTR_PARTIAL;
583
584 if (area->sh_info)
585 sh_info_remove_reference(area->sh_info);
586
587 mutex_unlock(&area->lock);
588
589 /*
590 * Remove the empty area from address space.
591 */
592 btree_remove(&as->as_area_btree, base, NULL);
593
594 free(area);
595
596 mutex_unlock(&as->lock);
597 interrupts_restore(ipl);
598 return 0;
599}
600
601/** Share address space area with another or the same address space.
602 *
603 * Address space area mapping is shared with a new address space area.
604 * If the source address space area has not been shared so far,
605 * a new sh_info is created. The new address space area simply gets the
606 * sh_info of the source area. The process of duplicating the
607 * mapping is done through the backend share function.
608 *
609 * @param src_as Pointer to source address space.
610 * @param src_base Base address of the source address space area.
611 * @param acc_size Expected size of the source area.
612 * @param dst_as Pointer to destination address space.
613 * @param dst_base Target base address.
614 * @param dst_flags_mask Destination address space area flags mask.
615 *
616 * @return Zero on success or ENOENT if there is no such task or if there is no
617 * such address space area, EPERM if there was a problem in accepting the area
618 * or ENOMEM if there was a problem in allocating destination address space
619 * area. ENOTSUP is returned if the address space area backend does not support
620 * sharing.
621 */
622int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
623 as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
624{
625 ipl_t ipl;
626 int src_flags;
627 size_t src_size;
628 as_area_t *src_area, *dst_area;
629 share_info_t *sh_info;
630 mem_backend_t *src_backend;
631 mem_backend_data_t src_backend_data;
632
633 ipl = interrupts_disable();
634 mutex_lock(&src_as->lock);
635 src_area = find_area_and_lock(src_as, src_base);
636 if (!src_area) {
637 /*
638 * Could not find the source address space area.
639 */
640 mutex_unlock(&src_as->lock);
641 interrupts_restore(ipl);
642 return ENOENT;
643 }
644
645 if (!src_area->backend || !src_area->backend->share) {
646 /*
647 * There is no backend or the backend does not
648 * know how to share the area.
649 */
650 mutex_unlock(&src_area->lock);
651 mutex_unlock(&src_as->lock);
652 interrupts_restore(ipl);
653 return ENOTSUP;
654 }
655
656 src_size = src_area->pages * PAGE_SIZE;
657 src_flags = src_area->flags;
658 src_backend = src_area->backend;
659 src_backend_data = src_area->backend_data;
660
661 /* Share the cacheable flag from the original mapping */
662 if (src_flags & AS_AREA_CACHEABLE)
663 dst_flags_mask |= AS_AREA_CACHEABLE;
664
665 if (src_size != acc_size ||
666 (src_flags & dst_flags_mask) != dst_flags_mask) {
667 mutex_unlock(&src_area->lock);
668 mutex_unlock(&src_as->lock);
669 interrupts_restore(ipl);
670 return EPERM;
671 }
672
673 /*
674 * Now we are committed to sharing the area.
675 * First, prepare the area for sharing.
676 * Then it will be safe to unlock it.
677 */
678 sh_info = src_area->sh_info;
679 if (!sh_info) {
680 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
681 mutex_initialize(&sh_info->lock);
682 sh_info->refcount = 2;
683 btree_create(&sh_info->pagemap);
684 src_area->sh_info = sh_info;
685 } else {
686 mutex_lock(&sh_info->lock);
687 sh_info->refcount++;
688 mutex_unlock(&sh_info->lock);
689 }
690
691 src_area->backend->share(src_area);
692
693 mutex_unlock(&src_area->lock);
694 mutex_unlock(&src_as->lock);
695
696 /*
697 * Create copy of the source address space area.
698 * The destination area is created with AS_AREA_ATTR_PARTIAL
699 * attribute set which prevents race condition with
700 * preliminary as_page_fault() calls.
701 * The flags of the source area are masked against dst_flags_mask
702 * to support sharing in less privileged mode.
703 */
704 dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
705 AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
706 if (!dst_area) {
707 /*
708 * Destination address space area could not be created.
709 */
710 sh_info_remove_reference(sh_info);
711
712 interrupts_restore(ipl);
713 return ENOMEM;
714 }
715
716 /*
717 * Now the destination address space area has been
718 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
719 * attribute and set the sh_info.
720 */
721 mutex_lock(&dst_as->lock);
722 mutex_lock(&dst_area->lock);
723 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
724 dst_area->sh_info = sh_info;
725 mutex_unlock(&dst_area->lock);
726 mutex_unlock(&dst_as->lock);
727
728 interrupts_restore(ipl);
729
730 return 0;
731}
732
733/** Check access mode for address space area.
734 *
735 * The address space area must be locked prior to this call.
736 *
737 * @param area Address space area.
738 * @param access Access mode.
739 *
740 * @return False if access violates area's permissions, true otherwise.
741 */
742bool as_area_check_access(as_area_t *area, pf_access_t access)
743{
744 int flagmap[] = {
745 [PF_ACCESS_READ] = AS_AREA_READ,
746 [PF_ACCESS_WRITE] = AS_AREA_WRITE,
747 [PF_ACCESS_EXEC] = AS_AREA_EXEC
748 };
749
750 if (!(area->flags & flagmap[access]))
751 return false;
752
753 return true;
754}
755
756/** Handle page fault within the current address space.
757 *
758 * This is the high-level page fault handler. It decides
759 * whether the page fault can be resolved by any backend
760 * and if so, it invokes the backend to resolve the page
761 * fault.
762 *
763 * Interrupts are assumed disabled.
764 *
765 * @param page Faulting page.
766 * @param access Access mode that caused the fault (i.e. read/write/exec).
767 * @param istate Pointer to interrupted state.
768 *
769 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
770 * fault was caused by copy_to_uspace() or copy_from_uspace().
771 */
772int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
773{
774 pte_t *pte;
775 as_area_t *area;
776
777 if (!THREAD)
778 return AS_PF_FAULT;
779
780 ASSERT(AS);
781
782 mutex_lock(&AS->lock);
783 area = find_area_and_lock(AS, page);
784 if (!area) {
785 /*
786 * No area contained mapping for 'page'.
787 * Signal page fault to low-level handler.
788 */
789 mutex_unlock(&AS->lock);
790 goto page_fault;
791 }
792
793 if (area->attributes & AS_AREA_ATTR_PARTIAL) {
794 /*
795 * The address space area is not fully initialized.
796 * Avoid possible race by returning error.
797 */
798 mutex_unlock(&area->lock);
799 mutex_unlock(&AS->lock);
800 goto page_fault;
801 }
802
803 if (!area->backend || !area->backend->page_fault) {
804 /*
805 * The address space area is not backed by any backend
806 * or the backend cannot handle page faults.
807 */
808 mutex_unlock(&area->lock);
809 mutex_unlock(&AS->lock);
810 goto page_fault;
811 }
812
813 page_table_lock(AS, false);
814
815 /*
816 * To avoid race condition between two page faults
817 * on the same address, we need to make sure
818 * the mapping has not been already inserted.
819 */
820 if ((pte = page_mapping_find(AS, page))) {
821 if (PTE_PRESENT(pte)) {
822 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
823 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
824 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
825 page_table_unlock(AS, false);
826 mutex_unlock(&area->lock);
827 mutex_unlock(&AS->lock);
828 return AS_PF_OK;
829 }
830 }
831 }
832
833 /*
834 * Resort to the backend page fault handler.
835 */
836 if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
837 page_table_unlock(AS, false);
838 mutex_unlock(&area->lock);
839 mutex_unlock(&AS->lock);
840 goto page_fault;
841 }
842
843 page_table_unlock(AS, false);
844 mutex_unlock(&area->lock);
845 mutex_unlock(&AS->lock);
846 return AS_PF_OK;
847
848page_fault:
849 if (THREAD->in_copy_from_uspace) {
850 THREAD->in_copy_from_uspace = false;
851 istate_set_retaddr(istate,
852 (uintptr_t) &memcpy_from_uspace_failover_address);
853 } else if (THREAD->in_copy_to_uspace) {
854 THREAD->in_copy_to_uspace = false;
855 istate_set_retaddr(istate,
856 (uintptr_t) &memcpy_to_uspace_failover_address);
857 } else {
858 return AS_PF_FAULT;
859 }
860
861 return AS_PF_DEFER;
862}
863
864/** Switch address spaces.
865 *
866 * Note that this function cannot sleep as it is essentially a part of
867 * scheduling. Sleeping here would lead to deadlock on wakeup. Another
868 * thing which is forbidden in this context is locking the address space.
869 *
870 * @param old Old address space or NULL.
871 * @param new New address space.
872 */
873void as_switch(as_t *old_as, as_t *new_as)
874{
875 spinlock_lock(&asidlock);
876
877 /*
878 * First, take care of the old address space.
879 */
880 if (old_as) {
881 ASSERT(old_as->cpu_refcount);
882 if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
883 /*
884 * The old address space is no longer active on
885 * any processor. It can be appended to the
886 * list of inactive address spaces with assigned
887 * ASID.
888 */
889 ASSERT(old_as->asid != ASID_INVALID);
890 list_append(&old_as->inactive_as_with_asid_link,
891 &inactive_as_with_asid_head);
892 }
893
894 /*
895 * Perform architecture-specific tasks when the address space
896 * is being removed from the CPU.
897 */
898 as_deinstall_arch(old_as);
899 }
900
901 /*
902 * Second, prepare the new address space.
903 */
904 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
905 if (new_as->asid != ASID_INVALID)
906 list_remove(&new_as->inactive_as_with_asid_link);
907 else
908 new_as->asid = asid_get();
909 }
910#ifdef AS_PAGE_TABLE
911 SET_PTL0_ADDRESS(new_as->genarch.page_table);
912#endif
913
914 /*
915 * Perform architecture-specific steps.
916 * (e.g. write ASID to hardware register etc.)
917 */
918 as_install_arch(new_as);
919
920 spinlock_unlock(&asidlock);
921
922 AS = new_as;
923}
924
925/** Convert address space area flags to page flags.
926 *
927 * @param aflags Flags of some address space area.
928 *
929 * @return Flags to be passed to page_mapping_insert().
930 */
931int area_flags_to_page_flags(int aflags)
932{
933 int flags;
934
935 flags = PAGE_USER | PAGE_PRESENT;
936
937 if (aflags & AS_AREA_READ)
938 flags |= PAGE_READ;
939
940 if (aflags & AS_AREA_WRITE)
941 flags |= PAGE_WRITE;
942
943 if (aflags & AS_AREA_EXEC)
944 flags |= PAGE_EXEC;
945
946 if (aflags & AS_AREA_CACHEABLE)
947 flags |= PAGE_CACHEABLE;
948
949 return flags;
950}
951
952/** Compute flags for virtual address translation subsytem.
953 *
954 * The address space area must be locked.
955 * Interrupts must be disabled.
956 *
957 * @param a Address space area.
958 *
959 * @return Flags to be used in page_mapping_insert().
960 */
961int as_area_get_flags(as_area_t *a)
962{
963 return area_flags_to_page_flags(a->flags);
964}
965
966/** Create page table.
967 *
968 * Depending on architecture, create either address space
969 * private or global page table.
970 *
971 * @param flags Flags saying whether the page table is for kernel address space.
972 *
973 * @return First entry of the page table.
974 */
975pte_t *page_table_create(int flags)
976{
977#ifdef __OBJC__
978 return [as_t page_table_create: flags];
979#else
980 ASSERT(as_operations);
981 ASSERT(as_operations->page_table_create);
982
983 return as_operations->page_table_create(flags);
984#endif
985}
986
987/** Destroy page table.
988 *
989 * Destroy page table in architecture specific way.
990 *
991 * @param page_table Physical address of PTL0.
992 */
993void page_table_destroy(pte_t *page_table)
994{
995#ifdef __OBJC__
996 return [as_t page_table_destroy: page_table];
997#else
998 ASSERT(as_operations);
999 ASSERT(as_operations->page_table_destroy);
1000
1001 as_operations->page_table_destroy(page_table);
1002#endif
1003}
1004
1005/** Lock page table.
1006 *
1007 * This function should be called before any page_mapping_insert(),
1008 * page_mapping_remove() and page_mapping_find().
1009 *
1010 * Locking order is such that address space areas must be locked
1011 * prior to this call. Address space can be locked prior to this
1012 * call in which case the lock argument is false.
1013 *
1014 * @param as Address space.
1015 * @param lock If false, do not attempt to lock as->lock.
1016 */
1017void page_table_lock(as_t *as, bool lock)
1018{
1019#ifdef __OBJC__
1020 [as page_table_lock: lock];
1021#else
1022 ASSERT(as_operations);
1023 ASSERT(as_operations->page_table_lock);
1024
1025 as_operations->page_table_lock(as, lock);
1026#endif
1027}
1028
1029/** Unlock page table.
1030 *
1031 * @param as Address space.
1032 * @param unlock If false, do not attempt to unlock as->lock.
1033 */
1034void page_table_unlock(as_t *as, bool unlock)
1035{
1036#ifdef __OBJC__
1037 [as page_table_unlock: unlock];
1038#else
1039 ASSERT(as_operations);
1040 ASSERT(as_operations->page_table_unlock);
1041
1042 as_operations->page_table_unlock(as, unlock);
1043#endif
1044}
1045
1046
1047/** Find address space area and lock it.
1048 *
1049 * The address space must be locked and interrupts must be disabled.
1050 *
1051 * @param as Address space.
1052 * @param va Virtual address.
1053 *
1054 * @return Locked address space area containing va on success or NULL on
1055 * failure.
1056 */
1057as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
1058{
1059 as_area_t *a;
1060 btree_node_t *leaf, *lnode;
1061 int i;
1062
1063 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
1064 if (a) {
1065 /* va is the base address of an address space area */
1066 mutex_lock(&a->lock);
1067 return a;
1068 }
1069
1070 /*
1071 * Search the leaf node and the righmost record of its left neighbour
1072 * to find out whether this is a miss or va belongs to an address
1073 * space area found there.
1074 */
1075
1076 /* First, search the leaf node itself. */
1077 for (i = 0; i < leaf->keys; i++) {
1078 a = (as_area_t *) leaf->value[i];
1079 mutex_lock(&a->lock);
1080 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
1081 return a;
1082 }
1083 mutex_unlock(&a->lock);
1084 }
1085
1086 /*
1087 * Second, locate the left neighbour and test its last record.
1088 * Because of its position in the B+tree, it must have base < va.
1089 */
1090 lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
1091 if (lnode) {
1092 a = (as_area_t *) lnode->value[lnode->keys - 1];
1093 mutex_lock(&a->lock);
1094 if (va < a->base + a->pages * PAGE_SIZE) {
1095 return a;
1096 }
1097 mutex_unlock(&a->lock);
1098 }
1099
1100 return NULL;
1101}
1102
1103/** Check area conflicts with other areas.
1104 *
1105 * The address space must be locked and interrupts must be disabled.
1106 *
1107 * @param as Address space.
1108 * @param va Starting virtual address of the area being tested.
1109 * @param size Size of the area being tested.
1110 * @param avoid_area Do not touch this area.
1111 *
1112 * @return True if there is no conflict, false otherwise.
1113 */
1114bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
1115 as_area_t *avoid_area)
1116{
1117 as_area_t *a;
1118 btree_node_t *leaf, *node;
1119 int i;
1120
1121 /*
1122 * We don't want any area to have conflicts with NULL page.
1123 */
1124 if (overlaps(va, size, NULL, PAGE_SIZE))
1125 return false;
1126
1127 /*
1128 * The leaf node is found in O(log n), where n is proportional to
1129 * the number of address space areas belonging to as.
1130 * The check for conflicts is then attempted on the rightmost
1131 * record in the left neighbour, the leftmost record in the right
1132 * neighbour and all records in the leaf node itself.
1133 */
1134
1135 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1136 if (a != avoid_area)
1137 return false;
1138 }
1139
1140 /* First, check the two border cases. */
1141 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1142 a = (as_area_t *) node->value[node->keys - 1];
1143 mutex_lock(&a->lock);
1144 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1145 mutex_unlock(&a->lock);
1146 return false;
1147 }
1148 mutex_unlock(&a->lock);
1149 }
1150 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
1151 if (node) {
1152 a = (as_area_t *) node->value[0];
1153 mutex_lock(&a->lock);
1154 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1155 mutex_unlock(&a->lock);
1156 return false;
1157 }
1158 mutex_unlock(&a->lock);
1159 }
1160
1161 /* Second, check the leaf node. */
1162 for (i = 0; i < leaf->keys; i++) {
1163 a = (as_area_t *) leaf->value[i];
1164
1165 if (a == avoid_area)
1166 continue;
1167
1168 mutex_lock(&a->lock);
1169 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1170 mutex_unlock(&a->lock);
1171 return false;
1172 }
1173 mutex_unlock(&a->lock);
1174 }
1175
1176 /*
1177 * So far, the area does not conflict with other areas.
1178 * Check if it doesn't conflict with kernel address space.
1179 */
1180 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1181 return !overlaps(va, size,
1182 KERNEL_ADDRESS_SPACE_START,
1183 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);
1184 }
1185
1186 return true;
1187}
1188
1189/** Return size of the address space area with given base. */
1190size_t as_get_size(uintptr_t base)
1191{
1192 ipl_t ipl;
1193 as_area_t *src_area;
1194 size_t size;
1195
1196 ipl = interrupts_disable();
1197 src_area = find_area_and_lock(AS, base);
1198 if (src_area){
1199 size = src_area->pages * PAGE_SIZE;
1200 mutex_unlock(&src_area->lock);
1201 } else {
1202 size = 0;
1203 }
1204 interrupts_restore(ipl);
1205 return size;
1206}
1207
1208/** Mark portion of address space area as used.
1209 *
1210 * The address space area must be already locked.
1211 *
1212 * @param a Address space area.
1213 * @param page First page to be marked.
1214 * @param count Number of page to be marked.
1215 *
1216 * @return 0 on failure and 1 on success.
1217 */
1218int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
1219{
1220 btree_node_t *leaf, *node;
1221 count_t pages;
1222 int i;
1223
1224 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1225 ASSERT(count);
1226
1227 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1228 if (pages) {
1229 /*
1230 * We hit the beginning of some used space.
1231 */
1232 return 0;
1233 }
1234
1235 if (!leaf->keys) {
1236 btree_insert(&a->used_space, page, (void *) count, leaf);
1237 return 1;
1238 }
1239
1240 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1241 if (node) {
1242 uintptr_t left_pg = node->key[node->keys - 1];
1243 uintptr_t right_pg = leaf->key[0];
1244 count_t left_cnt = (count_t) node->value[node->keys - 1];
1245 count_t right_cnt = (count_t) leaf->value[0];
1246
1247 /*
1248 * Examine the possibility that the interval fits
1249 * somewhere between the rightmost interval of
1250 * the left neigbour and the first interval of the leaf.
1251 */
1252
1253 if (page >= right_pg) {
1254 /* Do nothing. */
1255 } else if (overlaps(page, count * PAGE_SIZE, left_pg,
1256 left_cnt * PAGE_SIZE)) {
1257 /* The interval intersects with the left interval. */
1258 return 0;
1259 } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1260 right_cnt * PAGE_SIZE)) {
1261 /* The interval intersects with the right interval. */
1262 return 0;
1263 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1264 (page + count * PAGE_SIZE == right_pg)) {
1265 /*
1266 * The interval can be added by merging the two already
1267 * present intervals.
1268 */
1269 node->value[node->keys - 1] += count + right_cnt;
1270 btree_remove(&a->used_space, right_pg, leaf);
1271 return 1;
1272 } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1273 /*
1274 * The interval can be added by simply growing the left
1275 * interval.
1276 */
1277 node->value[node->keys - 1] += count;
1278 return 1;
1279 } else if (page + count * PAGE_SIZE == right_pg) {
1280 /*
1281 * The interval can be addded by simply moving base of
1282 * the right interval down and increasing its size
1283 * accordingly.
1284 */
1285 leaf->value[0] += count;
1286 leaf->key[0] = page;
1287 return 1;
1288 } else {
1289 /*
1290 * The interval is between both neigbouring intervals,
1291 * but cannot be merged with any of them.
1292 */
1293 btree_insert(&a->used_space, page, (void *) count,
1294 leaf);
1295 return 1;
1296 }
1297 } else if (page < leaf->key[0]) {
1298 uintptr_t right_pg = leaf->key[0];
1299 count_t right_cnt = (count_t) leaf->value[0];
1300
1301 /*
1302 * Investigate the border case in which the left neighbour does
1303 * not exist but the interval fits from the left.
1304 */
1305
1306 if (overlaps(page, count * PAGE_SIZE, right_pg,
1307 right_cnt * PAGE_SIZE)) {
1308 /* The interval intersects with the right interval. */
1309 return 0;
1310 } else if (page + count * PAGE_SIZE == right_pg) {
1311 /*
1312 * The interval can be added by moving the base of the
1313 * right interval down and increasing its size
1314 * accordingly.
1315 */
1316 leaf->key[0] = page;
1317 leaf->value[0] += count;
1318 return 1;
1319 } else {
1320 /*
1321 * The interval doesn't adjoin with the right interval.
1322 * It must be added individually.
1323 */
1324 btree_insert(&a->used_space, page, (void *) count,
1325 leaf);
1326 return 1;
1327 }
1328 }
1329
1330 node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1331 if (node) {
1332 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1333 uintptr_t right_pg = node->key[0];
1334 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1335 count_t right_cnt = (count_t) node->value[0];
1336
1337 /*
1338 * Examine the possibility that the interval fits
1339 * somewhere between the leftmost interval of
1340 * the right neigbour and the last interval of the leaf.
1341 */
1342
1343 if (page < left_pg) {
1344 /* Do nothing. */
1345 } else if (overlaps(page, count * PAGE_SIZE, left_pg,
1346 left_cnt * PAGE_SIZE)) {
1347 /* The interval intersects with the left interval. */
1348 return 0;
1349 } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1350 right_cnt * PAGE_SIZE)) {
1351 /* The interval intersects with the right interval. */
1352 return 0;
1353 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1354 (page + count * PAGE_SIZE == right_pg)) {
1355 /*
1356 * The interval can be added by merging the two already
1357 * present intervals.
1358 * */
1359 leaf->value[leaf->keys - 1] += count + right_cnt;
1360 btree_remove(&a->used_space, right_pg, node);
1361 return 1;
1362 } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1363 /*
1364 * The interval can be added by simply growing the left
1365 * interval.
1366 * */
1367 leaf->value[leaf->keys - 1] += count;
1368 return 1;
1369 } else if (page + count * PAGE_SIZE == right_pg) {
1370 /*
1371 * The interval can be addded by simply moving base of
1372 * the right interval down and increasing its size
1373 * accordingly.
1374 */
1375 node->value[0] += count;
1376 node->key[0] = page;
1377 return 1;
1378 } else {
1379 /*
1380 * The interval is between both neigbouring intervals,
1381 * but cannot be merged with any of them.
1382 */
1383 btree_insert(&a->used_space, page, (void *) count,
1384 leaf);
1385 return 1;
1386 }
1387 } else if (page >= leaf->key[leaf->keys - 1]) {
1388 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1389 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1390
1391 /*
1392 * Investigate the border case in which the right neighbour
1393 * does not exist but the interval fits from the right.
1394 */
1395
1396 if (overlaps(page, count * PAGE_SIZE, left_pg,
1397 left_cnt * PAGE_SIZE)) {
1398 /* The interval intersects with the left interval. */
1399 return 0;
1400 } else if (left_pg + left_cnt * PAGE_SIZE == page) {
1401 /*
1402 * The interval can be added by growing the left
1403 * interval.
1404 */
1405 leaf->value[leaf->keys - 1] += count;
1406 return 1;
1407 } else {
1408 /*
1409 * The interval doesn't adjoin with the left interval.
1410 * It must be added individually.
1411 */
1412 btree_insert(&a->used_space, page, (void *) count,
1413 leaf);
1414 return 1;
1415 }
1416 }
1417
1418 /*
1419 * Note that if the algorithm made it thus far, the interval can fit
1420 * only between two other intervals of the leaf. The two border cases
1421 * were already resolved.
1422 */
1423 for (i = 1; i < leaf->keys; i++) {
1424 if (page < leaf->key[i]) {
1425 uintptr_t left_pg = leaf->key[i - 1];
1426 uintptr_t right_pg = leaf->key[i];
1427 count_t left_cnt = (count_t) leaf->value[i - 1];
1428 count_t right_cnt = (count_t) leaf->value[i];
1429
1430 /*
1431 * The interval fits between left_pg and right_pg.
1432 */
1433
1434 if (overlaps(page, count * PAGE_SIZE, left_pg,
1435 left_cnt * PAGE_SIZE)) {
1436 /*
1437 * The interval intersects with the left
1438 * interval.
1439 */
1440 return 0;
1441 } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1442 right_cnt * PAGE_SIZE)) {
1443 /*
1444 * The interval intersects with the right
1445 * interval.
1446 */
1447 return 0;
1448 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1449 (page + count * PAGE_SIZE == right_pg)) {
1450 /*
1451 * The interval can be added by merging the two
1452 * already present intervals.
1453 */
1454 leaf->value[i - 1] += count + right_cnt;
1455 btree_remove(&a->used_space, right_pg, leaf);
1456 return 1;
1457 } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1458 /*
1459 * The interval can be added by simply growing
1460 * the left interval.
1461 */
1462 leaf->value[i - 1] += count;
1463 return 1;
1464 } else if (page + count * PAGE_SIZE == right_pg) {
1465 /*
1466 * The interval can be addded by simply moving
1467 * base of the right interval down and
1468 * increasing its size accordingly.
1469 */
1470 leaf->value[i] += count;
1471 leaf->key[i] = page;
1472 return 1;
1473 } else {
1474 /*
1475 * The interval is between both neigbouring
1476 * intervals, but cannot be merged with any of
1477 * them.
1478 */
1479 btree_insert(&a->used_space, page,
1480 (void *) count, leaf);
1481 return 1;
1482 }
1483 }
1484 }
1485
1486 panic("Inconsistency detected while adding %d pages of used space at "
1487 "%p.\n", count, page);
1488}
1489
1490/** Mark portion of address space area as unused.
1491 *
1492 * The address space area must be already locked.
1493 *
1494 * @param a Address space area.
1495 * @param page First page to be marked.
1496 * @param count Number of page to be marked.
1497 *
1498 * @return 0 on failure and 1 on success.
1499 */
1500int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
1501{
1502 btree_node_t *leaf, *node;
1503 count_t pages;
1504 int i;
1505
1506 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1507 ASSERT(count);
1508
1509 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1510 if (pages) {
1511 /*
1512 * We are lucky, page is the beginning of some interval.
1513 */
1514 if (count > pages) {
1515 return 0;
1516 } else if (count == pages) {
1517 btree_remove(&a->used_space, page, leaf);
1518 return 1;
1519 } else {
1520 /*
1521 * Find the respective interval.
1522 * Decrease its size and relocate its start address.
1523 */
1524 for (i = 0; i < leaf->keys; i++) {
1525 if (leaf->key[i] == page) {
1526 leaf->key[i] += count * PAGE_SIZE;
1527 leaf->value[i] -= count;
1528 return 1;
1529 }
1530 }
1531 goto error;
1532 }
1533 }
1534
1535 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1536 if (node && page < leaf->key[0]) {
1537 uintptr_t left_pg = node->key[node->keys - 1];
1538 count_t left_cnt = (count_t) node->value[node->keys - 1];
1539
1540 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1541 count * PAGE_SIZE)) {
1542 if (page + count * PAGE_SIZE ==
1543 left_pg + left_cnt * PAGE_SIZE) {
1544 /*
1545 * The interval is contained in the rightmost
1546 * interval of the left neighbour and can be
1547 * removed by updating the size of the bigger
1548 * interval.
1549 */
1550 node->value[node->keys - 1] -= count;
1551 return 1;
1552 } else if (page + count * PAGE_SIZE <
1553 left_pg + left_cnt*PAGE_SIZE) {
1554 count_t new_cnt;
1555
1556 /*
1557 * The interval is contained in the rightmost
1558 * interval of the left neighbour but its
1559 * removal requires both updating the size of
1560 * the original interval and also inserting a
1561 * new interval.
1562 */
1563 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1564 (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1565 node->value[node->keys - 1] -= count + new_cnt;
1566 btree_insert(&a->used_space, page +
1567 count * PAGE_SIZE, (void *) new_cnt, leaf);
1568 return 1;
1569 }
1570 }
1571 return 0;
1572 } else if (page < leaf->key[0]) {
1573 return 0;
1574 }
1575
1576 if (page > leaf->key[leaf->keys - 1]) {
1577 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1578 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1579
1580 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1581 count * PAGE_SIZE)) {
1582 if (page + count * PAGE_SIZE ==
1583 left_pg + left_cnt * PAGE_SIZE) {
1584 /*
1585 * The interval is contained in the rightmost
1586 * interval of the leaf and can be removed by
1587 * updating the size of the bigger interval.
1588 */
1589 leaf->value[leaf->keys - 1] -= count;
1590 return 1;
1591 } else if (page + count * PAGE_SIZE < left_pg +
1592 left_cnt * PAGE_SIZE) {
1593 count_t new_cnt;
1594
1595 /*
1596 * The interval is contained in the rightmost
1597 * interval of the leaf but its removal
1598 * requires both updating the size of the
1599 * original interval and also inserting a new
1600 * interval.
1601 */
1602 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1603 (page + count * PAGE_SIZE)) >> PAGE_WIDTH;
1604 leaf->value[leaf->keys - 1] -= count + new_cnt;
1605 btree_insert(&a->used_space, page +
1606 count * PAGE_SIZE, (void *) new_cnt, leaf);
1607 return 1;
1608 }
1609 }
1610 return 0;
1611 }
1612
1613 /*
1614 * The border cases have been already resolved.
1615 * Now the interval can be only between intervals of the leaf.
1616 */
1617 for (i = 1; i < leaf->keys - 1; i++) {
1618 if (page < leaf->key[i]) {
1619 uintptr_t left_pg = leaf->key[i - 1];
1620 count_t left_cnt = (count_t) leaf->value[i - 1];
1621
1622 /*
1623 * Now the interval is between intervals corresponding
1624 * to (i - 1) and i.
1625 */
1626 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1627 count * PAGE_SIZE)) {
1628 if (page + count * PAGE_SIZE ==
1629 left_pg + left_cnt*PAGE_SIZE) {
1630 /*
1631 * The interval is contained in the
1632 * interval (i - 1) of the leaf and can
1633 * be removed by updating the size of
1634 * the bigger interval.
1635 */
1636 leaf->value[i - 1] -= count;
1637 return 1;
1638 } else if (page + count * PAGE_SIZE <
1639 left_pg + left_cnt * PAGE_SIZE) {
1640 count_t new_cnt;
1641
1642 /*
1643 * The interval is contained in the
1644 * interval (i - 1) of the leaf but its
1645 * removal requires both updating the
1646 * size of the original interval and
1647 * also inserting a new interval.
1648 */
1649 new_cnt = ((left_pg +
1650 left_cnt * PAGE_SIZE) -
1651 (page + count * PAGE_SIZE)) >>
1652 PAGE_WIDTH;
1653 leaf->value[i - 1] -= count + new_cnt;
1654 btree_insert(&a->used_space, page +
1655 count * PAGE_SIZE, (void *) new_cnt,
1656 leaf);
1657 return 1;
1658 }
1659 }
1660 return 0;
1661 }
1662 }
1663
1664error:
1665 panic("Inconsistency detected while removing %d pages of used space "
1666 "from %p.\n", count, page);
1667}
1668
1669/** Remove reference to address space area share info.
1670 *
1671 * If the reference count drops to 0, the sh_info is deallocated.
1672 *
1673 * @param sh_info Pointer to address space area share info.
1674 */
1675void sh_info_remove_reference(share_info_t *sh_info)
1676{
1677 bool dealloc = false;
1678
1679 mutex_lock(&sh_info->lock);
1680 ASSERT(sh_info->refcount);
1681 if (--sh_info->refcount == 0) {
1682 dealloc = true;
1683 link_t *cur;
1684
1685 /*
1686 * Now walk carefully the pagemap B+tree and free/remove
1687 * reference from all frames found there.
1688 */
1689 for (cur = sh_info->pagemap.leaf_head.next;
1690 cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
1691 btree_node_t *node;
1692 int i;
1693
1694 node = list_get_instance(cur, btree_node_t, leaf_link);
1695 for (i = 0; i < node->keys; i++)
1696 frame_free((uintptr_t) node->value[i]);
1697 }
1698
1699 }
1700 mutex_unlock(&sh_info->lock);
1701
1702 if (dealloc) {
1703 btree_destroy(&sh_info->pagemap);
1704 free(sh_info);
1705 }
1706}
1707
1708/*
1709 * Address space related syscalls.
1710 */
1711
1712/** Wrapper for as_area_create(). */
1713unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
1714{
1715 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,
1716 AS_AREA_ATTR_NONE, &anon_backend, NULL))
1717 return (unative_t) address;
1718 else
1719 return (unative_t) -1;
1720}
1721
1722/** Wrapper for as_area_resize(). */
1723unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
1724{
1725 return (unative_t) as_area_resize(AS, address, size, 0);
1726}
1727
1728/** Wrapper for as_area_destroy(). */
1729unative_t sys_as_area_destroy(uintptr_t address)
1730{
1731 return (unative_t) as_area_destroy(AS, address);
1732}
1733
1734/** Print out information about address space.
1735 *
1736 * @param as Address space.
1737 */
1738void as_print(as_t *as)
1739{
1740 ipl_t ipl;
1741
1742 ipl = interrupts_disable();
1743 mutex_lock(&as->lock);
1744
1745 /* print out info about address space areas */
1746 link_t *cur;
1747 for (cur = as->as_area_btree.leaf_head.next;
1748 cur != &as->as_area_btree.leaf_head; cur = cur->next) {
1749 btree_node_t *node;
1750
1751 node = list_get_instance(cur, btree_node_t, leaf_link);
1752
1753 int i;
1754 for (i = 0; i < node->keys; i++) {
1755 as_area_t *area = node->value[i];
1756
1757 mutex_lock(&area->lock);
1758 printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
1759 area, area->base, area->pages, area->base,
1760 area->base + area->pages*PAGE_SIZE);
1761 mutex_unlock(&area->lock);
1762 }
1763 }
1764
1765 mutex_unlock(&as->lock);
1766 interrupts_restore(ipl);
1767}
1768
1769/** @}
1770 */
Note: See TracBrowser for help on using the repository browser.