source: mainline/kernel/generic/src/mm/as.c@ 7ba7c6d

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 7ba7c6d was 7ba7c6d, checked in by Jakub Jermar <jakub@…>, 19 years ago

A quote from from SPARC V9 specification:

The Y register is deprecated; it is provided only for compatibility with previous versions
of the architecture. It should not be used in new SPARC-V9 software. It is
recommended that all instructions that reference the Y register (i.e., SMUL,
SMULcc, UMUL, UMULcc, MULScc, SDIV, SDIVcc, UDIV, UDIVcc, RDY, and
WRY) be avoided. See the appropriate pages in Appendix A, “Instruction Definitions,”
for suitable substitute instructions.

Still gcc is generating code which uses Y and some of the instructions above.
This change modifies the preemptible_handler() to preserve the Y register
across preemption.

  • Property mode set to 100644
File size: 42.8 KB
Line 
1/*
2 * Copyright (C) 2001-2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Address space related functions.
36 *
37 * This file contains address space manipulation functions.
38 * Roughly speaking, this is a higher-level client of
39 * Virtual Address Translation (VAT) subsystem.
40 *
41 * Functionality provided by this file allows one to
42 * create address spaces and create, resize and share
43 * address space areas.
44 *
45 * @see page.c
46 *
47 */
48
49#include <mm/as.h>
50#include <arch/mm/as.h>
51#include <mm/page.h>
52#include <mm/frame.h>
53#include <mm/slab.h>
54#include <mm/tlb.h>
55#include <arch/mm/page.h>
56#include <genarch/mm/page_pt.h>
57#include <genarch/mm/page_ht.h>
58#include <mm/asid.h>
59#include <arch/mm/asid.h>
60#include <synch/spinlock.h>
61#include <synch/mutex.h>
62#include <adt/list.h>
63#include <adt/btree.h>
64#include <proc/task.h>
65#include <proc/thread.h>
66#include <arch/asm.h>
67#include <panic.h>
68#include <debug.h>
69#include <print.h>
70#include <memstr.h>
71#include <macros.h>
72#include <arch.h>
73#include <errno.h>
74#include <config.h>
75#include <align.h>
76#include <arch/types.h>
77#include <typedefs.h>
78#include <syscall/copy.h>
79#include <arch/interrupt.h>
80
81/**
82 * Each architecture decides what functions will be used to carry out
83 * address space operations such as creating or locking page tables.
84 */
85as_operations_t *as_operations = NULL;
86
87/**
88 * Slab for as_t objects.
89 */
90static slab_cache_t *as_slab;
91
92/** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */
93SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
94
95/**
96 * This list contains address spaces that are not active on any
97 * processor and that have valid ASID.
98 */
99LIST_INITIALIZE(inactive_as_with_asid_head);
100
101/** Kernel address space. */
102as_t *AS_KERNEL = NULL;
103
104static int area_flags_to_page_flags(int aflags);
105static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
106static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area);
107static void sh_info_remove_reference(share_info_t *sh_info);
108
109static int as_constructor(void *obj, int flags)
110{
111 as_t *as = (as_t *) obj;
112 int rc;
113
114 link_initialize(&as->inactive_as_with_asid_link);
115 mutex_initialize(&as->lock);
116
117 rc = as_constructor_arch(as, flags);
118
119 return rc;
120}
121
122static int as_destructor(void *obj)
123{
124 as_t *as = (as_t *) obj;
125
126 return as_destructor_arch(as);
127}
128
129/** Initialize address space subsystem. */
130void as_init(void)
131{
132 as_arch_init();
133
134 as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
135 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
136
137 AS_KERNEL = as_create(FLAG_AS_KERNEL);
138 if (!AS_KERNEL)
139 panic("can't create kernel address space\n");
140
141}
142
143/** Create address space.
144 *
145 * @param flags Flags that influence way in wich the address space is created.
146 */
147as_t *as_create(int flags)
148{
149 as_t *as;
150
151 as = (as_t *) slab_alloc(as_slab, 0);
152 (void) as_create_arch(as, 0);
153
154 btree_create(&as->as_area_btree);
155
156 if (flags & FLAG_AS_KERNEL)
157 as->asid = ASID_KERNEL;
158 else
159 as->asid = ASID_INVALID;
160
161 as->refcount = 0;
162 as->cpu_refcount = 0;
163 as->page_table = page_table_create(flags);
164
165 return as;
166}
167
168/** Destroy adress space.
169 *
170 * When there are no tasks referencing this address space (i.e. its refcount is zero),
171 * the address space can be destroyed.
172 */
173void as_destroy(as_t *as)
174{
175 ipl_t ipl;
176 bool cond;
177
178 ASSERT(as->refcount == 0);
179
180 /*
181 * Since there is no reference to this area,
182 * it is safe not to lock its mutex.
183 */
184 ipl = interrupts_disable();
185 spinlock_lock(&inactive_as_with_asid_lock);
186 if (as->asid != ASID_INVALID && as != AS_KERNEL) {
187 if (as != AS && as->cpu_refcount == 0)
188 list_remove(&as->inactive_as_with_asid_link);
189 asid_put(as->asid);
190 }
191 spinlock_unlock(&inactive_as_with_asid_lock);
192
193 /*
194 * Destroy address space areas of the address space.
195 * The B+tee must be walked carefully because it is
196 * also being destroyed.
197 */
198 for (cond = true; cond; ) {
199 btree_node_t *node;
200
201 ASSERT(!list_empty(&as->as_area_btree.leaf_head));
202 node = list_get_instance(as->as_area_btree.leaf_head.next, btree_node_t, leaf_link);
203
204 if ((cond = node->keys)) {
205 as_area_destroy(as, node->key[0]);
206 }
207 }
208
209 btree_destroy(&as->as_area_btree);
210 page_table_destroy(as->page_table);
211
212 interrupts_restore(ipl);
213
214 slab_free(as_slab, as);
215}
216
217/** Create address space area of common attributes.
218 *
219 * The created address space area is added to the target address space.
220 *
221 * @param as Target address space.
222 * @param flags Flags of the area memory.
223 * @param size Size of area.
224 * @param base Base address of area.
225 * @param attrs Attributes of the area.
226 * @param backend Address space area backend. NULL if no backend is used.
227 * @param backend_data NULL or a pointer to an array holding two void *.
228 *
229 * @return Address space area on success or NULL on failure.
230 */
231as_area_t *as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
232 mem_backend_t *backend, mem_backend_data_t *backend_data)
233{
234 ipl_t ipl;
235 as_area_t *a;
236
237 if (base % PAGE_SIZE)
238 return NULL;
239
240 if (!size)
241 return NULL;
242
243 /* Writeable executable areas are not supported. */
244 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
245 return NULL;
246
247 ipl = interrupts_disable();
248 mutex_lock(&as->lock);
249
250 if (!check_area_conflicts(as, base, size, NULL)) {
251 mutex_unlock(&as->lock);
252 interrupts_restore(ipl);
253 return NULL;
254 }
255
256 a = (as_area_t *) malloc(sizeof(as_area_t), 0);
257
258 mutex_initialize(&a->lock);
259
260 a->as = as;
261 a->flags = flags;
262 a->attributes = attrs;
263 a->pages = SIZE2FRAMES(size);
264 a->base = base;
265 a->sh_info = NULL;
266 a->backend = backend;
267 if (backend_data)
268 a->backend_data = *backend_data;
269 else
270 memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data), 0);
271
272 btree_create(&a->used_space);
273
274 btree_insert(&as->as_area_btree, base, (void *) a, NULL);
275
276 mutex_unlock(&as->lock);
277 interrupts_restore(ipl);
278
279 return a;
280}
281
282/** Find address space area and change it.
283 *
284 * @param as Address space.
285 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
286 * @param size New size of the virtual memory block starting at address.
287 * @param flags Flags influencing the remap operation. Currently unused.
288 *
289 * @return Zero on success or a value from @ref errno.h otherwise.
290 */
291int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
292{
293 as_area_t *area;
294 ipl_t ipl;
295 size_t pages;
296
297 ipl = interrupts_disable();
298 mutex_lock(&as->lock);
299
300 /*
301 * Locate the area.
302 */
303 area = find_area_and_lock(as, address);
304 if (!area) {
305 mutex_unlock(&as->lock);
306 interrupts_restore(ipl);
307 return ENOENT;
308 }
309
310 if (area->backend == &phys_backend) {
311 /*
312 * Remapping of address space areas associated
313 * with memory mapped devices is not supported.
314 */
315 mutex_unlock(&area->lock);
316 mutex_unlock(&as->lock);
317 interrupts_restore(ipl);
318 return ENOTSUP;
319 }
320 if (area->sh_info) {
321 /*
322 * Remapping of shared address space areas
323 * is not supported.
324 */
325 mutex_unlock(&area->lock);
326 mutex_unlock(&as->lock);
327 interrupts_restore(ipl);
328 return ENOTSUP;
329 }
330
331 pages = SIZE2FRAMES((address - area->base) + size);
332 if (!pages) {
333 /*
334 * Zero size address space areas are not allowed.
335 */
336 mutex_unlock(&area->lock);
337 mutex_unlock(&as->lock);
338 interrupts_restore(ipl);
339 return EPERM;
340 }
341
342 if (pages < area->pages) {
343 bool cond;
344 uintptr_t start_free = area->base + pages*PAGE_SIZE;
345
346 /*
347 * Shrinking the area.
348 * No need to check for overlaps.
349 */
350
351 /*
352 * Start TLB shootdown sequence.
353 */
354 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
355
356 /*
357 * Remove frames belonging to used space starting from
358 * the highest addresses downwards until an overlap with
359 * the resized address space area is found. Note that this
360 * is also the right way to remove part of the used_space
361 * B+tree leaf list.
362 */
363 for (cond = true; cond;) {
364 btree_node_t *node;
365
366 ASSERT(!list_empty(&area->used_space.leaf_head));
367 node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link);
368 if ((cond = (bool) node->keys)) {
369 uintptr_t b = node->key[node->keys - 1];
370 count_t c = (count_t) node->value[node->keys - 1];
371 int i = 0;
372
373 if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) {
374
375 if (b + c*PAGE_SIZE <= start_free) {
376 /*
377 * The whole interval fits completely
378 * in the resized address space area.
379 */
380 break;
381 }
382
383 /*
384 * Part of the interval corresponding to b and c
385 * overlaps with the resized address space area.
386 */
387
388 cond = false; /* we are almost done */
389 i = (start_free - b) >> PAGE_WIDTH;
390 if (!used_space_remove(area, start_free, c - i))
391 panic("Could not remove used space.\n");
392 } else {
393 /*
394 * The interval of used space can be completely removed.
395 */
396 if (!used_space_remove(area, b, c))
397 panic("Could not remove used space.\n");
398 }
399
400 for (; i < c; i++) {
401 pte_t *pte;
402
403 page_table_lock(as, false);
404 pte = page_mapping_find(as, b + i*PAGE_SIZE);
405 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
406 if (area->backend && area->backend->frame_free) {
407 area->backend->frame_free(area,
408 b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
409 }
410 page_mapping_remove(as, b + i*PAGE_SIZE);
411 page_table_unlock(as, false);
412 }
413 }
414 }
415
416 /*
417 * Finish TLB shootdown sequence.
418 */
419 tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
420 tlb_shootdown_finalize();
421
422 /*
423 * Invalidate software translation caches (e.g. TSB on sparc64).
424 */
425 as_invalidate_translation_cache(as, area->base + pages*PAGE_SIZE, area->pages - pages);
426 } else {
427 /*
428 * Growing the area.
429 * Check for overlaps with other address space areas.
430 */
431 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
432 mutex_unlock(&area->lock);
433 mutex_unlock(&as->lock);
434 interrupts_restore(ipl);
435 return EADDRNOTAVAIL;
436 }
437 }
438
439 area->pages = pages;
440
441 mutex_unlock(&area->lock);
442 mutex_unlock(&as->lock);
443 interrupts_restore(ipl);
444
445 return 0;
446}
447
448/** Destroy address space area.
449 *
450 * @param as Address space.
451 * @param address Address withing the area to be deleted.
452 *
453 * @return Zero on success or a value from @ref errno.h on failure.
454 */
455int as_area_destroy(as_t *as, uintptr_t address)
456{
457 as_area_t *area;
458 uintptr_t base;
459 link_t *cur;
460 ipl_t ipl;
461
462 ipl = interrupts_disable();
463 mutex_lock(&as->lock);
464
465 area = find_area_and_lock(as, address);
466 if (!area) {
467 mutex_unlock(&as->lock);
468 interrupts_restore(ipl);
469 return ENOENT;
470 }
471
472 base = area->base;
473
474 /*
475 * Start TLB shootdown sequence.
476 */
477 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
478
479 /*
480 * Visit only the pages mapped by used_space B+tree.
481 */
482 for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) {
483 btree_node_t *node;
484 int i;
485
486 node = list_get_instance(cur, btree_node_t, leaf_link);
487 for (i = 0; i < node->keys; i++) {
488 uintptr_t b = node->key[i];
489 count_t j;
490 pte_t *pte;
491
492 for (j = 0; j < (count_t) node->value[i]; j++) {
493 page_table_lock(as, false);
494 pte = page_mapping_find(as, b + j*PAGE_SIZE);
495 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
496 if (area->backend && area->backend->frame_free) {
497 area->backend->frame_free(area,
498 b + j*PAGE_SIZE, PTE_GET_FRAME(pte));
499 }
500 page_mapping_remove(as, b + j*PAGE_SIZE);
501 page_table_unlock(as, false);
502 }
503 }
504 }
505
506 /*
507 * Finish TLB shootdown sequence.
508 */
509 tlb_invalidate_pages(as->asid, area->base, area->pages);
510 tlb_shootdown_finalize();
511
512 /*
513 * Invalidate potential software translation caches (e.g. TSB on sparc64).
514 */
515 as_invalidate_translation_cache(as, area->base, area->pages);
516
517 btree_destroy(&area->used_space);
518
519 area->attributes |= AS_AREA_ATTR_PARTIAL;
520
521 if (area->sh_info)
522 sh_info_remove_reference(area->sh_info);
523
524 mutex_unlock(&area->lock);
525
526 /*
527 * Remove the empty area from address space.
528 */
529 btree_remove(&as->as_area_btree, base, NULL);
530
531 free(area);
532
533 mutex_unlock(&as->lock);
534 interrupts_restore(ipl);
535 return 0;
536}
537
538/** Share address space area with another or the same address space.
539 *
540 * Address space area mapping is shared with a new address space area.
541 * If the source address space area has not been shared so far,
542 * a new sh_info is created. The new address space area simply gets the
543 * sh_info of the source area. The process of duplicating the
544 * mapping is done through the backend share function.
545 *
546 * @param src_as Pointer to source address space.
547 * @param src_base Base address of the source address space area.
548 * @param acc_size Expected size of the source area.
549 * @param dst_as Pointer to destination address space.
550 * @param dst_base Target base address.
551 * @param dst_flags_mask Destination address space area flags mask.
552 *
553 * @return Zero on success or ENOENT if there is no such task or
554 * if there is no such address space area,
555 * EPERM if there was a problem in accepting the area or
556 * ENOMEM if there was a problem in allocating destination
557 * address space area. ENOTSUP is returned if an attempt
558 * to share non-anonymous address space area is detected.
559 */
560int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
561 as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
562{
563 ipl_t ipl;
564 int src_flags;
565 size_t src_size;
566 as_area_t *src_area, *dst_area;
567 share_info_t *sh_info;
568 mem_backend_t *src_backend;
569 mem_backend_data_t src_backend_data;
570
571 ipl = interrupts_disable();
572 mutex_lock(&src_as->lock);
573 src_area = find_area_and_lock(src_as, src_base);
574 if (!src_area) {
575 /*
576 * Could not find the source address space area.
577 */
578 mutex_unlock(&src_as->lock);
579 interrupts_restore(ipl);
580 return ENOENT;
581 }
582
583 if (!src_area->backend || !src_area->backend->share) {
584 /*
585 * There is no backend or the backend does not
586 * know how to share the area.
587 */
588 mutex_unlock(&src_area->lock);
589 mutex_unlock(&src_as->lock);
590 interrupts_restore(ipl);
591 return ENOTSUP;
592 }
593
594 src_size = src_area->pages * PAGE_SIZE;
595 src_flags = src_area->flags;
596 src_backend = src_area->backend;
597 src_backend_data = src_area->backend_data;
598
599 /* Share the cacheable flag from the original mapping */
600 if (src_flags & AS_AREA_CACHEABLE)
601 dst_flags_mask |= AS_AREA_CACHEABLE;
602
603 if (src_size != acc_size || (src_flags & dst_flags_mask) != dst_flags_mask) {
604 mutex_unlock(&src_area->lock);
605 mutex_unlock(&src_as->lock);
606 interrupts_restore(ipl);
607 return EPERM;
608 }
609
610 /*
611 * Now we are committed to sharing the area.
612 * First prepare the area for sharing.
613 * Then it will be safe to unlock it.
614 */
615 sh_info = src_area->sh_info;
616 if (!sh_info) {
617 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
618 mutex_initialize(&sh_info->lock);
619 sh_info->refcount = 2;
620 btree_create(&sh_info->pagemap);
621 src_area->sh_info = sh_info;
622 } else {
623 mutex_lock(&sh_info->lock);
624 sh_info->refcount++;
625 mutex_unlock(&sh_info->lock);
626 }
627
628 src_area->backend->share(src_area);
629
630 mutex_unlock(&src_area->lock);
631 mutex_unlock(&src_as->lock);
632
633 /*
634 * Create copy of the source address space area.
635 * The destination area is created with AS_AREA_ATTR_PARTIAL
636 * attribute set which prevents race condition with
637 * preliminary as_page_fault() calls.
638 * The flags of the source area are masked against dst_flags_mask
639 * to support sharing in less privileged mode.
640 */
641 dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
642 AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
643 if (!dst_area) {
644 /*
645 * Destination address space area could not be created.
646 */
647 sh_info_remove_reference(sh_info);
648
649 interrupts_restore(ipl);
650 return ENOMEM;
651 }
652
653 /*
654 * Now the destination address space area has been
655 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
656 * attribute and set the sh_info.
657 */
658 mutex_lock(&dst_area->lock);
659 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
660 dst_area->sh_info = sh_info;
661 mutex_unlock(&dst_area->lock);
662
663 interrupts_restore(ipl);
664
665 return 0;
666}
667
668/** Check access mode for address space area.
669 *
670 * The address space area must be locked prior to this call.
671 *
672 * @param area Address space area.
673 * @param access Access mode.
674 *
675 * @return False if access violates area's permissions, true otherwise.
676 */
677bool as_area_check_access(as_area_t *area, pf_access_t access)
678{
679 int flagmap[] = {
680 [PF_ACCESS_READ] = AS_AREA_READ,
681 [PF_ACCESS_WRITE] = AS_AREA_WRITE,
682 [PF_ACCESS_EXEC] = AS_AREA_EXEC
683 };
684
685 if (!(area->flags & flagmap[access]))
686 return false;
687
688 return true;
689}
690
691/** Handle page fault within the current address space.
692 *
693 * This is the high-level page fault handler. It decides
694 * whether the page fault can be resolved by any backend
695 * and if so, it invokes the backend to resolve the page
696 * fault.
697 *
698 * Interrupts are assumed disabled.
699 *
700 * @param page Faulting page.
701 * @param access Access mode that caused the fault (i.e. read/write/exec).
702 * @param istate Pointer to interrupted state.
703 *
704 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
705 * fault was caused by copy_to_uspace() or copy_from_uspace().
706 */
707int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
708{
709 pte_t *pte;
710 as_area_t *area;
711
712 if (!THREAD)
713 return AS_PF_FAULT;
714
715 ASSERT(AS);
716
717 mutex_lock(&AS->lock);
718 area = find_area_and_lock(AS, page);
719 if (!area) {
720 /*
721 * No area contained mapping for 'page'.
722 * Signal page fault to low-level handler.
723 */
724 mutex_unlock(&AS->lock);
725 goto page_fault;
726 }
727
728 if (area->attributes & AS_AREA_ATTR_PARTIAL) {
729 /*
730 * The address space area is not fully initialized.
731 * Avoid possible race by returning error.
732 */
733 mutex_unlock(&area->lock);
734 mutex_unlock(&AS->lock);
735 goto page_fault;
736 }
737
738 if (!area->backend || !area->backend->page_fault) {
739 /*
740 * The address space area is not backed by any backend
741 * or the backend cannot handle page faults.
742 */
743 mutex_unlock(&area->lock);
744 mutex_unlock(&AS->lock);
745 goto page_fault;
746 }
747
748 page_table_lock(AS, false);
749
750 /*
751 * To avoid race condition between two page faults
752 * on the same address, we need to make sure
753 * the mapping has not been already inserted.
754 */
755 if ((pte = page_mapping_find(AS, page))) {
756 if (PTE_PRESENT(pte)) {
757 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
758 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
759 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
760 page_table_unlock(AS, false);
761 mutex_unlock(&area->lock);
762 mutex_unlock(&AS->lock);
763 return AS_PF_OK;
764 }
765 }
766 }
767
768 /*
769 * Resort to the backend page fault handler.
770 */
771 if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
772 page_table_unlock(AS, false);
773 mutex_unlock(&area->lock);
774 mutex_unlock(&AS->lock);
775 goto page_fault;
776 }
777
778 page_table_unlock(AS, false);
779 mutex_unlock(&area->lock);
780 mutex_unlock(&AS->lock);
781 return AS_PF_OK;
782
783page_fault:
784 if (THREAD->in_copy_from_uspace) {
785 THREAD->in_copy_from_uspace = false;
786 istate_set_retaddr(istate, (uintptr_t) &memcpy_from_uspace_failover_address);
787 } else if (THREAD->in_copy_to_uspace) {
788 THREAD->in_copy_to_uspace = false;
789 istate_set_retaddr(istate, (uintptr_t) &memcpy_to_uspace_failover_address);
790 } else {
791 return AS_PF_FAULT;
792 }
793
794 return AS_PF_DEFER;
795}
796
797/** Switch address spaces.
798 *
799 * Note that this function cannot sleep as it is essentially a part of
800 * scheduling. Sleeping here would lead to deadlock on wakeup.
801 *
802 * @param old Old address space or NULL.
803 * @param new New address space.
804 */
805void as_switch(as_t *old, as_t *new)
806{
807 ipl_t ipl;
808 bool needs_asid = false;
809
810 ipl = interrupts_disable();
811 spinlock_lock(&inactive_as_with_asid_lock);
812
813 /*
814 * First, take care of the old address space.
815 */
816 if (old) {
817 mutex_lock_active(&old->lock);
818 ASSERT(old->cpu_refcount);
819 if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) {
820 /*
821 * The old address space is no longer active on
822 * any processor. It can be appended to the
823 * list of inactive address spaces with assigned
824 * ASID.
825 */
826 ASSERT(old->asid != ASID_INVALID);
827 list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
828 }
829 mutex_unlock(&old->lock);
830
831 /*
832 * Perform architecture-specific tasks when the address space
833 * is being removed from the CPU.
834 */
835 as_deinstall_arch(old);
836 }
837
838 /*
839 * Second, prepare the new address space.
840 */
841 mutex_lock_active(&new->lock);
842 if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) {
843 if (new->asid != ASID_INVALID)
844 list_remove(&new->inactive_as_with_asid_link);
845 else
846 needs_asid = true; /* defer call to asid_get() until new->lock is released */
847 }
848 SET_PTL0_ADDRESS(new->page_table);
849 mutex_unlock(&new->lock);
850
851 if (needs_asid) {
852 /*
853 * Allocation of new ASID was deferred
854 * until now in order to avoid deadlock.
855 */
856 asid_t asid;
857
858 asid = asid_get();
859 mutex_lock_active(&new->lock);
860 new->asid = asid;
861 mutex_unlock(&new->lock);
862 }
863 spinlock_unlock(&inactive_as_with_asid_lock);
864 interrupts_restore(ipl);
865
866 /*
867 * Perform architecture-specific steps.
868 * (e.g. write ASID to hardware register etc.)
869 */
870 as_install_arch(new);
871
872 AS = new;
873}
874
875/** Convert address space area flags to page flags.
876 *
877 * @param aflags Flags of some address space area.
878 *
879 * @return Flags to be passed to page_mapping_insert().
880 */
881int area_flags_to_page_flags(int aflags)
882{
883 int flags;
884
885 flags = PAGE_USER | PAGE_PRESENT;
886
887 if (aflags & AS_AREA_READ)
888 flags |= PAGE_READ;
889
890 if (aflags & AS_AREA_WRITE)
891 flags |= PAGE_WRITE;
892
893 if (aflags & AS_AREA_EXEC)
894 flags |= PAGE_EXEC;
895
896 if (aflags & AS_AREA_CACHEABLE)
897 flags |= PAGE_CACHEABLE;
898
899 return flags;
900}
901
902/** Compute flags for virtual address translation subsytem.
903 *
904 * The address space area must be locked.
905 * Interrupts must be disabled.
906 *
907 * @param a Address space area.
908 *
909 * @return Flags to be used in page_mapping_insert().
910 */
911int as_area_get_flags(as_area_t *a)
912{
913 return area_flags_to_page_flags(a->flags);
914}
915
916/** Create page table.
917 *
918 * Depending on architecture, create either address space
919 * private or global page table.
920 *
921 * @param flags Flags saying whether the page table is for kernel address space.
922 *
923 * @return First entry of the page table.
924 */
925pte_t *page_table_create(int flags)
926{
927 ASSERT(as_operations);
928 ASSERT(as_operations->page_table_create);
929
930 return as_operations->page_table_create(flags);
931}
932
933/** Destroy page table.
934 *
935 * Destroy page table in architecture specific way.
936 *
937 * @param page_table Physical address of PTL0.
938 */
939void page_table_destroy(pte_t *page_table)
940{
941 ASSERT(as_operations);
942 ASSERT(as_operations->page_table_destroy);
943
944 as_operations->page_table_destroy(page_table);
945}
946
947/** Lock page table.
948 *
949 * This function should be called before any page_mapping_insert(),
950 * page_mapping_remove() and page_mapping_find().
951 *
952 * Locking order is such that address space areas must be locked
953 * prior to this call. Address space can be locked prior to this
954 * call in which case the lock argument is false.
955 *
956 * @param as Address space.
957 * @param lock If false, do not attempt to lock as->lock.
958 */
959void page_table_lock(as_t *as, bool lock)
960{
961 ASSERT(as_operations);
962 ASSERT(as_operations->page_table_lock);
963
964 as_operations->page_table_lock(as, lock);
965}
966
967/** Unlock page table.
968 *
969 * @param as Address space.
970 * @param unlock If false, do not attempt to unlock as->lock.
971 */
972void page_table_unlock(as_t *as, bool unlock)
973{
974 ASSERT(as_operations);
975 ASSERT(as_operations->page_table_unlock);
976
977 as_operations->page_table_unlock(as, unlock);
978}
979
980
981/** Find address space area and lock it.
982 *
983 * The address space must be locked and interrupts must be disabled.
984 *
985 * @param as Address space.
986 * @param va Virtual address.
987 *
988 * @return Locked address space area containing va on success or NULL on failure.
989 */
990as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
991{
992 as_area_t *a;
993 btree_node_t *leaf, *lnode;
994 int i;
995
996 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
997 if (a) {
998 /* va is the base address of an address space area */
999 mutex_lock(&a->lock);
1000 return a;
1001 }
1002
1003 /*
1004 * Search the leaf node and the righmost record of its left neighbour
1005 * to find out whether this is a miss or va belongs to an address
1006 * space area found there.
1007 */
1008
1009 /* First, search the leaf node itself. */
1010 for (i = 0; i < leaf->keys; i++) {
1011 a = (as_area_t *) leaf->value[i];
1012 mutex_lock(&a->lock);
1013 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
1014 return a;
1015 }
1016 mutex_unlock(&a->lock);
1017 }
1018
1019 /*
1020 * Second, locate the left neighbour and test its last record.
1021 * Because of its position in the B+tree, it must have base < va.
1022 */
1023 if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1024 a = (as_area_t *) lnode->value[lnode->keys - 1];
1025 mutex_lock(&a->lock);
1026 if (va < a->base + a->pages * PAGE_SIZE) {
1027 return a;
1028 }
1029 mutex_unlock(&a->lock);
1030 }
1031
1032 return NULL;
1033}
1034
1035/** Check area conflicts with other areas.
1036 *
1037 * The address space must be locked and interrupts must be disabled.
1038 *
1039 * @param as Address space.
1040 * @param va Starting virtual address of the area being tested.
1041 * @param size Size of the area being tested.
1042 * @param avoid_area Do not touch this area.
1043 *
1044 * @return True if there is no conflict, false otherwise.
1045 */
1046bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area)
1047{
1048 as_area_t *a;
1049 btree_node_t *leaf, *node;
1050 int i;
1051
1052 /*
1053 * We don't want any area to have conflicts with NULL page.
1054 */
1055 if (overlaps(va, size, NULL, PAGE_SIZE))
1056 return false;
1057
1058 /*
1059 * The leaf node is found in O(log n), where n is proportional to
1060 * the number of address space areas belonging to as.
1061 * The check for conflicts is then attempted on the rightmost
1062 * record in the left neighbour, the leftmost record in the right
1063 * neighbour and all records in the leaf node itself.
1064 */
1065
1066 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1067 if (a != avoid_area)
1068 return false;
1069 }
1070
1071 /* First, check the two border cases. */
1072 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1073 a = (as_area_t *) node->value[node->keys - 1];
1074 mutex_lock(&a->lock);
1075 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1076 mutex_unlock(&a->lock);
1077 return false;
1078 }
1079 mutex_unlock(&a->lock);
1080 }
1081 if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
1082 a = (as_area_t *) node->value[0];
1083 mutex_lock(&a->lock);
1084 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1085 mutex_unlock(&a->lock);
1086 return false;
1087 }
1088 mutex_unlock(&a->lock);
1089 }
1090
1091 /* Second, check the leaf node. */
1092 for (i = 0; i < leaf->keys; i++) {
1093 a = (as_area_t *) leaf->value[i];
1094
1095 if (a == avoid_area)
1096 continue;
1097
1098 mutex_lock(&a->lock);
1099 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1100 mutex_unlock(&a->lock);
1101 return false;
1102 }
1103 mutex_unlock(&a->lock);
1104 }
1105
1106 /*
1107 * So far, the area does not conflict with other areas.
1108 * Check if it doesn't conflict with kernel address space.
1109 */
1110 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1111 return !overlaps(va, size,
1112 KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
1113 }
1114
1115 return true;
1116}
1117
1118/** Return size of the address space area with given base. */
1119size_t as_get_size(uintptr_t base)
1120{
1121 ipl_t ipl;
1122 as_area_t *src_area;
1123 size_t size;
1124
1125 ipl = interrupts_disable();
1126 src_area = find_area_and_lock(AS, base);
1127 if (src_area){
1128 size = src_area->pages * PAGE_SIZE;
1129 mutex_unlock(&src_area->lock);
1130 } else {
1131 size = 0;
1132 }
1133 interrupts_restore(ipl);
1134 return size;
1135}
1136
1137/** Mark portion of address space area as used.
1138 *
1139 * The address space area must be already locked.
1140 *
1141 * @param a Address space area.
1142 * @param page First page to be marked.
1143 * @param count Number of page to be marked.
1144 *
1145 * @return 0 on failure and 1 on success.
1146 */
1147int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
1148{
1149 btree_node_t *leaf, *node;
1150 count_t pages;
1151 int i;
1152
1153 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1154 ASSERT(count);
1155
1156 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1157 if (pages) {
1158 /*
1159 * We hit the beginning of some used space.
1160 */
1161 return 0;
1162 }
1163
1164 if (!leaf->keys) {
1165 btree_insert(&a->used_space, page, (void *) count, leaf);
1166 return 1;
1167 }
1168
1169 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1170 if (node) {
1171 uintptr_t left_pg = node->key[node->keys - 1], right_pg = leaf->key[0];
1172 count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0];
1173
1174 /*
1175 * Examine the possibility that the interval fits
1176 * somewhere between the rightmost interval of
1177 * the left neigbour and the first interval of the leaf.
1178 */
1179
1180 if (page >= right_pg) {
1181 /* Do nothing. */
1182 } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1183 /* The interval intersects with the left interval. */
1184 return 0;
1185 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1186 /* The interval intersects with the right interval. */
1187 return 0;
1188 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1189 /* The interval can be added by merging the two already present intervals. */
1190 node->value[node->keys - 1] += count + right_cnt;
1191 btree_remove(&a->used_space, right_pg, leaf);
1192 return 1;
1193 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1194 /* The interval can be added by simply growing the left interval. */
1195 node->value[node->keys - 1] += count;
1196 return 1;
1197 } else if (page + count*PAGE_SIZE == right_pg) {
1198 /*
1199 * The interval can be addded by simply moving base of the right
1200 * interval down and increasing its size accordingly.
1201 */
1202 leaf->value[0] += count;
1203 leaf->key[0] = page;
1204 return 1;
1205 } else {
1206 /*
1207 * The interval is between both neigbouring intervals,
1208 * but cannot be merged with any of them.
1209 */
1210 btree_insert(&a->used_space, page, (void *) count, leaf);
1211 return 1;
1212 }
1213 } else if (page < leaf->key[0]) {
1214 uintptr_t right_pg = leaf->key[0];
1215 count_t right_cnt = (count_t) leaf->value[0];
1216
1217 /*
1218 * Investigate the border case in which the left neighbour does not
1219 * exist but the interval fits from the left.
1220 */
1221
1222 if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1223 /* The interval intersects with the right interval. */
1224 return 0;
1225 } else if (page + count*PAGE_SIZE == right_pg) {
1226 /*
1227 * The interval can be added by moving the base of the right interval down
1228 * and increasing its size accordingly.
1229 */
1230 leaf->key[0] = page;
1231 leaf->value[0] += count;
1232 return 1;
1233 } else {
1234 /*
1235 * The interval doesn't adjoin with the right interval.
1236 * It must be added individually.
1237 */
1238 btree_insert(&a->used_space, page, (void *) count, leaf);
1239 return 1;
1240 }
1241 }
1242
1243 node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1244 if (node) {
1245 uintptr_t left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0];
1246 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0];
1247
1248 /*
1249 * Examine the possibility that the interval fits
1250 * somewhere between the leftmost interval of
1251 * the right neigbour and the last interval of the leaf.
1252 */
1253
1254 if (page < left_pg) {
1255 /* Do nothing. */
1256 } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1257 /* The interval intersects with the left interval. */
1258 return 0;
1259 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1260 /* The interval intersects with the right interval. */
1261 return 0;
1262 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1263 /* The interval can be added by merging the two already present intervals. */
1264 leaf->value[leaf->keys - 1] += count + right_cnt;
1265 btree_remove(&a->used_space, right_pg, node);
1266 return 1;
1267 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1268 /* The interval can be added by simply growing the left interval. */
1269 leaf->value[leaf->keys - 1] += count;
1270 return 1;
1271 } else if (page + count*PAGE_SIZE == right_pg) {
1272 /*
1273 * The interval can be addded by simply moving base of the right
1274 * interval down and increasing its size accordingly.
1275 */
1276 node->value[0] += count;
1277 node->key[0] = page;
1278 return 1;
1279 } else {
1280 /*
1281 * The interval is between both neigbouring intervals,
1282 * but cannot be merged with any of them.
1283 */
1284 btree_insert(&a->used_space, page, (void *) count, leaf);
1285 return 1;
1286 }
1287 } else if (page >= leaf->key[leaf->keys - 1]) {
1288 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1289 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1290
1291 /*
1292 * Investigate the border case in which the right neighbour does not
1293 * exist but the interval fits from the right.
1294 */
1295
1296 if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1297 /* The interval intersects with the left interval. */
1298 return 0;
1299 } else if (left_pg + left_cnt*PAGE_SIZE == page) {
1300 /* The interval can be added by growing the left interval. */
1301 leaf->value[leaf->keys - 1] += count;
1302 return 1;
1303 } else {
1304 /*
1305 * The interval doesn't adjoin with the left interval.
1306 * It must be added individually.
1307 */
1308 btree_insert(&a->used_space, page, (void *) count, leaf);
1309 return 1;
1310 }
1311 }
1312
1313 /*
1314 * Note that if the algorithm made it thus far, the interval can fit only
1315 * between two other intervals of the leaf. The two border cases were already
1316 * resolved.
1317 */
1318 for (i = 1; i < leaf->keys; i++) {
1319 if (page < leaf->key[i]) {
1320 uintptr_t left_pg = leaf->key[i - 1], right_pg = leaf->key[i];
1321 count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i];
1322
1323 /*
1324 * The interval fits between left_pg and right_pg.
1325 */
1326
1327 if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1328 /* The interval intersects with the left interval. */
1329 return 0;
1330 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1331 /* The interval intersects with the right interval. */
1332 return 0;
1333 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1334 /* The interval can be added by merging the two already present intervals. */
1335 leaf->value[i - 1] += count + right_cnt;
1336 btree_remove(&a->used_space, right_pg, leaf);
1337 return 1;
1338 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1339 /* The interval can be added by simply growing the left interval. */
1340 leaf->value[i - 1] += count;
1341 return 1;
1342 } else if (page + count*PAGE_SIZE == right_pg) {
1343 /*
1344 * The interval can be addded by simply moving base of the right
1345 * interval down and increasing its size accordingly.
1346 */
1347 leaf->value[i] += count;
1348 leaf->key[i] = page;
1349 return 1;
1350 } else {
1351 /*
1352 * The interval is between both neigbouring intervals,
1353 * but cannot be merged with any of them.
1354 */
1355 btree_insert(&a->used_space, page, (void *) count, leaf);
1356 return 1;
1357 }
1358 }
1359 }
1360
1361 panic("Inconsistency detected while adding %d pages of used space at %p.\n", count, page);
1362}
1363
1364/** Mark portion of address space area as unused.
1365 *
1366 * The address space area must be already locked.
1367 *
1368 * @param a Address space area.
1369 * @param page First page to be marked.
1370 * @param count Number of page to be marked.
1371 *
1372 * @return 0 on failure and 1 on success.
1373 */
1374int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
1375{
1376 btree_node_t *leaf, *node;
1377 count_t pages;
1378 int i;
1379
1380 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1381 ASSERT(count);
1382
1383 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1384 if (pages) {
1385 /*
1386 * We are lucky, page is the beginning of some interval.
1387 */
1388 if (count > pages) {
1389 return 0;
1390 } else if (count == pages) {
1391 btree_remove(&a->used_space, page, leaf);
1392 return 1;
1393 } else {
1394 /*
1395 * Find the respective interval.
1396 * Decrease its size and relocate its start address.
1397 */
1398 for (i = 0; i < leaf->keys; i++) {
1399 if (leaf->key[i] == page) {
1400 leaf->key[i] += count*PAGE_SIZE;
1401 leaf->value[i] -= count;
1402 return 1;
1403 }
1404 }
1405 goto error;
1406 }
1407 }
1408
1409 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1410 if (node && page < leaf->key[0]) {
1411 uintptr_t left_pg = node->key[node->keys - 1];
1412 count_t left_cnt = (count_t) node->value[node->keys - 1];
1413
1414 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1415 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1416 /*
1417 * The interval is contained in the rightmost interval
1418 * of the left neighbour and can be removed by
1419 * updating the size of the bigger interval.
1420 */
1421 node->value[node->keys - 1] -= count;
1422 return 1;
1423 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1424 count_t new_cnt;
1425
1426 /*
1427 * The interval is contained in the rightmost interval
1428 * of the left neighbour but its removal requires
1429 * both updating the size of the original interval and
1430 * also inserting a new interval.
1431 */
1432 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1433 node->value[node->keys - 1] -= count + new_cnt;
1434 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1435 return 1;
1436 }
1437 }
1438 return 0;
1439 } else if (page < leaf->key[0]) {
1440 return 0;
1441 }
1442
1443 if (page > leaf->key[leaf->keys - 1]) {
1444 uintptr_t left_pg = leaf->key[leaf->keys - 1];
1445 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1446
1447 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1448 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1449 /*
1450 * The interval is contained in the rightmost interval
1451 * of the leaf and can be removed by updating the size
1452 * of the bigger interval.
1453 */
1454 leaf->value[leaf->keys - 1] -= count;
1455 return 1;
1456 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1457 count_t new_cnt;
1458
1459 /*
1460 * The interval is contained in the rightmost interval
1461 * of the leaf but its removal requires both updating
1462 * the size of the original interval and
1463 * also inserting a new interval.
1464 */
1465 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1466 leaf->value[leaf->keys - 1] -= count + new_cnt;
1467 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1468 return 1;
1469 }
1470 }
1471 return 0;
1472 }
1473
1474 /*
1475 * The border cases have been already resolved.
1476 * Now the interval can be only between intervals of the leaf.
1477 */
1478 for (i = 1; i < leaf->keys - 1; i++) {
1479 if (page < leaf->key[i]) {
1480 uintptr_t left_pg = leaf->key[i - 1];
1481 count_t left_cnt = (count_t) leaf->value[i - 1];
1482
1483 /*
1484 * Now the interval is between intervals corresponding to (i - 1) and i.
1485 */
1486 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1487 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1488 /*
1489 * The interval is contained in the interval (i - 1)
1490 * of the leaf and can be removed by updating the size
1491 * of the bigger interval.
1492 */
1493 leaf->value[i - 1] -= count;
1494 return 1;
1495 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1496 count_t new_cnt;
1497
1498 /*
1499 * The interval is contained in the interval (i - 1)
1500 * of the leaf but its removal requires both updating
1501 * the size of the original interval and
1502 * also inserting a new interval.
1503 */
1504 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1505 leaf->value[i - 1] -= count + new_cnt;
1506 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1507 return 1;
1508 }
1509 }
1510 return 0;
1511 }
1512 }
1513
1514error:
1515 panic("Inconsistency detected while removing %d pages of used space from %p.\n", count, page);
1516}
1517
1518/** Remove reference to address space area share info.
1519 *
1520 * If the reference count drops to 0, the sh_info is deallocated.
1521 *
1522 * @param sh_info Pointer to address space area share info.
1523 */
1524void sh_info_remove_reference(share_info_t *sh_info)
1525{
1526 bool dealloc = false;
1527
1528 mutex_lock(&sh_info->lock);
1529 ASSERT(sh_info->refcount);
1530 if (--sh_info->refcount == 0) {
1531 dealloc = true;
1532 link_t *cur;
1533
1534 /*
1535 * Now walk carefully the pagemap B+tree and free/remove
1536 * reference from all frames found there.
1537 */
1538 for (cur = sh_info->pagemap.leaf_head.next; cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
1539 btree_node_t *node;
1540 int i;
1541
1542 node = list_get_instance(cur, btree_node_t, leaf_link);
1543 for (i = 0; i < node->keys; i++)
1544 frame_free((uintptr_t) node->value[i]);
1545 }
1546
1547 }
1548 mutex_unlock(&sh_info->lock);
1549
1550 if (dealloc) {
1551 btree_destroy(&sh_info->pagemap);
1552 free(sh_info);
1553 }
1554}
1555
1556/*
1557 * Address space related syscalls.
1558 */
1559
1560/** Wrapper for as_area_create(). */
1561unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
1562{
1563 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
1564 return (unative_t) address;
1565 else
1566 return (unative_t) -1;
1567}
1568
1569/** Wrapper for as_area_resize(). */
1570unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
1571{
1572 return (unative_t) as_area_resize(AS, address, size, 0);
1573}
1574
1575/** Wrapper for as_area_destroy(). */
1576unative_t sys_as_area_destroy(uintptr_t address)
1577{
1578 return (unative_t) as_area_destroy(AS, address);
1579}
1580
1581/** Print out information about address space.
1582 *
1583 * @param as Address space.
1584 */
1585void as_print(as_t *as)
1586{
1587 ipl_t ipl;
1588
1589 ipl = interrupts_disable();
1590 mutex_lock(&as->lock);
1591
1592 /* print out info about address space areas */
1593 link_t *cur;
1594 for (cur = as->as_area_btree.leaf_head.next; cur != &as->as_area_btree.leaf_head; cur = cur->next) {
1595 btree_node_t *node = list_get_instance(cur, btree_node_t, leaf_link);
1596
1597 int i;
1598 for (i = 0; i < node->keys; i++) {
1599 as_area_t *area = node->value[i];
1600
1601 mutex_lock(&area->lock);
1602 printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
1603 area, area->base, area->pages, area->base, area->base + area->pages*PAGE_SIZE);
1604 mutex_unlock(&area->lock);
1605 }
1606 }
1607
1608 mutex_unlock(&as->lock);
1609 interrupts_restore(ipl);
1610}
1611
1612/** @}
1613 */
Note: See TracBrowser for help on using the repository browser.