source: mainline/generic/src/mm/as.c@ 82da5f5

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 82da5f5 was 8182031, checked in by Jakub Jermar <jakub@…>, 19 years ago

Some shared memory stuff (not completed).
Support for address space area backends.
Add ELF and anonymous memory backends.

  • Property mode set to 100644
File size: 42.8 KB
Line 
1/*
2 * Copyright (C) 2001-2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/**
30 * @file as.c
31 * @brief Address space related functions.
32 *
33 * This file contains address space manipulation functions.
34 * Roughly speaking, this is a higher-level client of
35 * Virtual Address Translation (VAT) subsystem.
36 *
37 * Functionality provided by this file allows one to
38 * create address space and create, resize and share
39 * address space areas.
40 *
41 * @see page.c
42 *
43 */
44
45#include <mm/as.h>
46#include <arch/mm/as.h>
47#include <mm/page.h>
48#include <mm/frame.h>
49#include <mm/slab.h>
50#include <mm/tlb.h>
51#include <arch/mm/page.h>
52#include <genarch/mm/page_pt.h>
53#include <genarch/mm/page_ht.h>
54#include <mm/asid.h>
55#include <arch/mm/asid.h>
56#include <synch/spinlock.h>
57#include <synch/mutex.h>
58#include <adt/list.h>
59#include <adt/btree.h>
60#include <proc/task.h>
61#include <proc/thread.h>
62#include <arch/asm.h>
63#include <panic.h>
64#include <debug.h>
65#include <print.h>
66#include <memstr.h>
67#include <macros.h>
68#include <arch.h>
69#include <errno.h>
70#include <config.h>
71#include <align.h>
72#include <arch/types.h>
73#include <typedefs.h>
74#include <syscall/copy.h>
75#include <arch/interrupt.h>
76
77/** This structure contains information associated with the shared address space area. */
78struct share_info {
79 mutex_t lock; /**< This lock must be acquired only when the as_area lock is held. */
80 count_t refcount; /**< This structure can be deallocated if refcount drops to 0. */
81 btree_t pagemap; /**< B+tree containing complete map of anonymous pages of the shared area. */
82};
83
84as_operations_t *as_operations = NULL;
85
86/** Address space lock. It protects inactive_as_with_asid_head. Must be acquired before as_t mutex. */
87SPINLOCK_INITIALIZE(as_lock);
88
89/**
90 * This list contains address spaces that are not active on any
91 * processor and that have valid ASID.
92 */
93LIST_INITIALIZE(inactive_as_with_asid_head);
94
95/** Kernel address space. */
96as_t *AS_KERNEL = NULL;
97
98static int area_flags_to_page_flags(int aflags);
99static as_area_t *find_area_and_lock(as_t *as, __address va);
100static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
101static void sh_info_remove_reference(share_info_t *sh_info);
102
103/** Initialize address space subsystem. */
104void as_init(void)
105{
106 as_arch_init();
107 AS_KERNEL = as_create(FLAG_AS_KERNEL);
108 if (!AS_KERNEL)
109 panic("can't create kernel address space\n");
110
111}
112
113/** Create address space.
114 *
115 * @param flags Flags that influence way in wich the address space is created.
116 */
117as_t *as_create(int flags)
118{
119 as_t *as;
120
121 as = (as_t *) malloc(sizeof(as_t), 0);
122 link_initialize(&as->inactive_as_with_asid_link);
123 mutex_initialize(&as->lock);
124 btree_create(&as->as_area_btree);
125
126 if (flags & FLAG_AS_KERNEL)
127 as->asid = ASID_KERNEL;
128 else
129 as->asid = ASID_INVALID;
130
131 as->refcount = 0;
132 as->page_table = page_table_create(flags);
133
134 return as;
135}
136
137/** Free Adress space */
138void as_free(as_t *as)
139{
140 ASSERT(as->refcount == 0);
141
142 /* TODO: free as_areas and other resources held by as */
143 /* TODO: free page table */
144 free(as);
145}
146
147/** Create address space area of common attributes.
148 *
149 * The created address space area is added to the target address space.
150 *
151 * @param as Target address space.
152 * @param flags Flags of the area memory.
153 * @param size Size of area.
154 * @param base Base address of area.
155 * @param attrs Attributes of the area.
156 * @param backend Address space area backend. NULL if no backend is used.
157 * @param backend_data NULL or a pointer to an array holding two void *.
158 *
159 * @return Address space area on success or NULL on failure.
160 */
161as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs,
162 mem_backend_t *backend, void **backend_data)
163{
164 ipl_t ipl;
165 as_area_t *a;
166
167 if (base % PAGE_SIZE)
168 return NULL;
169
170 if (!size)
171 return NULL;
172
173 /* Writeable executable areas are not supported. */
174 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
175 return NULL;
176
177 ipl = interrupts_disable();
178 mutex_lock(&as->lock);
179
180 if (!check_area_conflicts(as, base, size, NULL)) {
181 mutex_unlock(&as->lock);
182 interrupts_restore(ipl);
183 return NULL;
184 }
185
186 a = (as_area_t *) malloc(sizeof(as_area_t), 0);
187
188 mutex_initialize(&a->lock);
189
190 a->flags = flags;
191 a->attributes = attrs;
192 a->pages = SIZE2FRAMES(size);
193 a->base = base;
194 a->sh_info = NULL;
195 a->backend = backend;
196 if (backend_data) {
197 a->backend_data[0] = backend_data[0];
198 a->backend_data[1] = backend_data[1];
199 }
200 btree_create(&a->used_space);
201
202 btree_insert(&as->as_area_btree, base, (void *) a, NULL);
203
204 mutex_unlock(&as->lock);
205 interrupts_restore(ipl);
206
207 return a;
208}
209
210/** Find address space area and change it.
211 *
212 * @param as Address space.
213 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
214 * @param size New size of the virtual memory block starting at address.
215 * @param flags Flags influencing the remap operation. Currently unused.
216 *
217 * @return Zero on success or a value from @ref errno.h otherwise.
218 */
219int as_area_resize(as_t *as, __address address, size_t size, int flags)
220{
221 as_area_t *area;
222 ipl_t ipl;
223 size_t pages;
224
225 ipl = interrupts_disable();
226 mutex_lock(&as->lock);
227
228 /*
229 * Locate the area.
230 */
231 area = find_area_and_lock(as, address);
232 if (!area) {
233 mutex_unlock(&as->lock);
234 interrupts_restore(ipl);
235 return ENOENT;
236 }
237
238 if (area->flags & AS_AREA_DEVICE) {
239 /*
240 * Remapping of address space areas associated
241 * with memory mapped devices is not supported.
242 */
243 mutex_unlock(&area->lock);
244 mutex_unlock(&as->lock);
245 interrupts_restore(ipl);
246 return ENOTSUP;
247 }
248 if (area->sh_info) {
249 /*
250 * Remapping of shared address space areas
251 * is not supported.
252 */
253 mutex_unlock(&area->lock);
254 mutex_unlock(&as->lock);
255 interrupts_restore(ipl);
256 return ENOTSUP;
257 }
258
259 pages = SIZE2FRAMES((address - area->base) + size);
260 if (!pages) {
261 /*
262 * Zero size address space areas are not allowed.
263 */
264 mutex_unlock(&area->lock);
265 mutex_unlock(&as->lock);
266 interrupts_restore(ipl);
267 return EPERM;
268 }
269
270 if (pages < area->pages) {
271 bool cond;
272 __address start_free = area->base + pages*PAGE_SIZE;
273
274 /*
275 * Shrinking the area.
276 * No need to check for overlaps.
277 */
278
279 /*
280 * Remove frames belonging to used space starting from
281 * the highest addresses downwards until an overlap with
282 * the resized address space area is found. Note that this
283 * is also the right way to remove part of the used_space
284 * B+tree leaf list.
285 */
286 for (cond = true; cond;) {
287 btree_node_t *node;
288
289 ASSERT(!list_empty(&area->used_space.leaf_head));
290 node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link);
291 if ((cond = (bool) node->keys)) {
292 __address b = node->key[node->keys - 1];
293 count_t c = (count_t) node->value[node->keys - 1];
294 int i = 0;
295
296 if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) {
297
298 if (b + c*PAGE_SIZE <= start_free) {
299 /*
300 * The whole interval fits completely
301 * in the resized address space area.
302 */
303 break;
304 }
305
306 /*
307 * Part of the interval corresponding to b and c
308 * overlaps with the resized address space area.
309 */
310
311 cond = false; /* we are almost done */
312 i = (start_free - b) >> PAGE_WIDTH;
313 if (!used_space_remove(area, start_free, c - i))
314 panic("Could not remove used space.");
315 } else {
316 /*
317 * The interval of used space can be completely removed.
318 */
319 if (!used_space_remove(area, b, c))
320 panic("Could not remove used space.\n");
321 }
322
323 for (; i < c; i++) {
324 pte_t *pte;
325
326 page_table_lock(as, false);
327 pte = page_mapping_find(as, b + i*PAGE_SIZE);
328 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
329 if (area->backend && area->backend->backend_frame_free) {
330 area->backend->backend_frame_free(area,
331 b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
332 }
333 page_mapping_remove(as, b + i*PAGE_SIZE);
334 page_table_unlock(as, false);
335 }
336 }
337 }
338 /*
339 * Invalidate TLB's.
340 */
341 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
342 tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
343 tlb_shootdown_finalize();
344 } else {
345 /*
346 * Growing the area.
347 * Check for overlaps with other address space areas.
348 */
349 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
350 mutex_unlock(&area->lock);
351 mutex_unlock(&as->lock);
352 interrupts_restore(ipl);
353 return EADDRNOTAVAIL;
354 }
355 }
356
357 area->pages = pages;
358
359 mutex_unlock(&area->lock);
360 mutex_unlock(&as->lock);
361 interrupts_restore(ipl);
362
363 return 0;
364}
365
366/** Destroy address space area.
367 *
368 * @param as Address space.
369 * @param address Address withing the area to be deleted.
370 *
371 * @return Zero on success or a value from @ref errno.h on failure.
372 */
373int as_area_destroy(as_t *as, __address address)
374{
375 as_area_t *area;
376 __address base;
377 ipl_t ipl;
378
379 ipl = interrupts_disable();
380 mutex_lock(&as->lock);
381
382 area = find_area_and_lock(as, address);
383 if (!area) {
384 mutex_unlock(&as->lock);
385 interrupts_restore(ipl);
386 return ENOENT;
387 }
388
389 base = area->base;
390 if (!(area->flags & AS_AREA_DEVICE)) {
391 bool cond;
392
393 /*
394 * Releasing physical memory.
395 * Areas mapping memory-mapped devices are treated differently than
396 * areas backing frame_alloc()'ed memory.
397 */
398
399 /*
400 * Visit only the pages mapped by used_space B+tree.
401 * Note that we must be very careful when walking the tree
402 * leaf list and removing used space as the leaf list changes
403 * unpredictibly after each remove. The solution is to actually
404 * not walk the tree at all, but to remove items from the head
405 * of the leaf list until there are some keys left.
406 */
407 for (cond = true; cond;) {
408 btree_node_t *node;
409
410 ASSERT(!list_empty(&area->used_space.leaf_head));
411 node = list_get_instance(area->used_space.leaf_head.next, btree_node_t, leaf_link);
412 if ((cond = (bool) node->keys)) {
413 __address b = node->key[0];
414 count_t i;
415 pte_t *pte;
416
417 for (i = 0; i < (count_t) node->value[0]; i++) {
418 page_table_lock(as, false);
419 pte = page_mapping_find(as, b + i*PAGE_SIZE);
420 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
421 if (area->backend && area->backend->backend_frame_free) {
422 area->backend->backend_frame_free(area,
423 b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
424 }
425 page_mapping_remove(as, b + i*PAGE_SIZE);
426 page_table_unlock(as, false);
427 }
428 if (!used_space_remove(area, b, i))
429 panic("Could not remove used space.\n");
430 }
431 }
432 }
433 btree_destroy(&area->used_space);
434
435 /*
436 * Invalidate TLB's.
437 */
438 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base, area->pages);
439 tlb_invalidate_pages(AS->asid, area->base, area->pages);
440 tlb_shootdown_finalize();
441
442 area->attributes |= AS_AREA_ATTR_PARTIAL;
443
444 if (area->sh_info)
445 sh_info_remove_reference(area->sh_info);
446
447 mutex_unlock(&area->lock);
448
449 /*
450 * Remove the empty area from address space.
451 */
452 btree_remove(&AS->as_area_btree, base, NULL);
453
454 free(area);
455
456 mutex_unlock(&AS->lock);
457 interrupts_restore(ipl);
458 return 0;
459}
460
461/** Steal address space area from another task.
462 *
463 * Address space area is stolen from another task
464 * Moreover, any existing mapping
465 * is copied as well, providing thus a mechanism
466 * for sharing group of pages. The source address
467 * space area and any associated mapping is preserved.
468 *
469 * @param src_task Pointer of source task
470 * @param src_base Base address of the source address space area.
471 * @param acc_size Expected size of the source area
472 * @param dst_base Target base address
473 *
474 * @return Zero on success or ENOENT if there is no such task or
475 * if there is no such address space area,
476 * EPERM if there was a problem in accepting the area or
477 * ENOMEM if there was a problem in allocating destination
478 * address space area.
479 */
480int as_area_steal(task_t *src_task, __address src_base, size_t acc_size,
481 __address dst_base)
482{
483 ipl_t ipl;
484 count_t i;
485 as_t *src_as;
486 int src_flags;
487 size_t src_size;
488 as_area_t *src_area, *dst_area;
489
490 ipl = interrupts_disable();
491 spinlock_lock(&src_task->lock);
492 src_as = src_task->as;
493
494 mutex_lock(&src_as->lock);
495 src_area = find_area_and_lock(src_as, src_base);
496 if (!src_area) {
497 /*
498 * Could not find the source address space area.
499 */
500 spinlock_unlock(&src_task->lock);
501 mutex_unlock(&src_as->lock);
502 interrupts_restore(ipl);
503 return ENOENT;
504 }
505 src_size = src_area->pages * PAGE_SIZE;
506 src_flags = src_area->flags;
507 mutex_unlock(&src_area->lock);
508 mutex_unlock(&src_as->lock);
509
510 if (src_size != acc_size) {
511 spinlock_unlock(&src_task->lock);
512 interrupts_restore(ipl);
513 return EPERM;
514 }
515 /*
516 * Create copy of the source address space area.
517 * The destination area is created with AS_AREA_ATTR_PARTIAL
518 * attribute set which prevents race condition with
519 * preliminary as_page_fault() calls.
520 */
521 dst_area = as_area_create(AS, src_flags, src_size, dst_base, AS_AREA_ATTR_PARTIAL, &anon_backend, NULL);
522 if (!dst_area) {
523 /*
524 * Destination address space area could not be created.
525 */
526 spinlock_unlock(&src_task->lock);
527 interrupts_restore(ipl);
528 return ENOMEM;
529 }
530
531 spinlock_unlock(&src_task->lock);
532
533 /*
534 * Avoid deadlock by first locking the address space with lower address.
535 */
536 if (AS < src_as) {
537 mutex_lock(&AS->lock);
538 mutex_lock(&src_as->lock);
539 } else {
540 mutex_lock(&AS->lock);
541 mutex_lock(&src_as->lock);
542 }
543
544 for (i = 0; i < SIZE2FRAMES(src_size); i++) {
545 pte_t *pte;
546 __address frame;
547
548 page_table_lock(src_as, false);
549 pte = page_mapping_find(src_as, src_base + i*PAGE_SIZE);
550 if (pte && PTE_VALID(pte)) {
551 ASSERT(PTE_PRESENT(pte));
552 frame = PTE_GET_FRAME(pte);
553 if (!(src_flags & AS_AREA_DEVICE))
554 frame_reference_add(ADDR2PFN(frame));
555 page_table_unlock(src_as, false);
556 } else {
557 page_table_unlock(src_as, false);
558 continue;
559 }
560
561 page_table_lock(AS, false);
562 page_mapping_insert(AS, dst_base + i*PAGE_SIZE, frame, area_flags_to_page_flags(src_flags));
563 page_table_unlock(AS, false);
564 }
565
566 /*
567 * Now the destination address space area has been
568 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
569 * attribute.
570 */
571 mutex_lock(&dst_area->lock);
572 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
573 mutex_unlock(&dst_area->lock);
574
575 mutex_unlock(&AS->lock);
576 mutex_unlock(&src_as->lock);
577 interrupts_restore(ipl);
578
579 return 0;
580}
581
582/** Initialize mapping for one page of address space.
583 *
584 * This functions maps 'page' to 'frame' according
585 * to attributes of the address space area to
586 * wich 'page' belongs.
587 *
588 * @param as Target address space.
589 * @param page Virtual page within the area.
590 * @param frame Physical frame to which page will be mapped.
591 */
592void as_set_mapping(as_t *as, __address page, __address frame)
593{
594 as_area_t *area;
595 ipl_t ipl;
596
597 ipl = interrupts_disable();
598 page_table_lock(as, true);
599
600 area = find_area_and_lock(as, page);
601 if (!area) {
602 panic("Page not part of any as_area.\n");
603 }
604
605 ASSERT(!area->backend);
606
607 page_mapping_insert(as, page, frame, as_area_get_flags(area));
608 if (!used_space_insert(area, page, 1))
609 panic("Could not insert used space.\n");
610
611 mutex_unlock(&area->lock);
612 page_table_unlock(as, true);
613 interrupts_restore(ipl);
614}
615
616/** Handle page fault within the current address space.
617 *
618 * This is the high-level page fault handler. It decides
619 * whether the page fault can be resolved by any backend
620 * and if so, it invokes the backend to resolve the page
621 * fault.
622 *
623 * Interrupts are assumed disabled.
624 *
625 * @param page Faulting page.
626 * @param istate Pointer to interrupted state.
627 *
628 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
629 * fault was caused by copy_to_uspace() or copy_from_uspace().
630 */
631int as_page_fault(__address page, istate_t *istate)
632{
633 pte_t *pte;
634 as_area_t *area;
635
636 if (!THREAD)
637 return AS_PF_FAULT;
638
639 ASSERT(AS);
640
641 mutex_lock(&AS->lock);
642 area = find_area_and_lock(AS, page);
643 if (!area) {
644 /*
645 * No area contained mapping for 'page'.
646 * Signal page fault to low-level handler.
647 */
648 mutex_unlock(&AS->lock);
649 goto page_fault;
650 }
651
652 if (area->attributes & AS_AREA_ATTR_PARTIAL) {
653 /*
654 * The address space area is not fully initialized.
655 * Avoid possible race by returning error.
656 */
657 mutex_unlock(&area->lock);
658 mutex_unlock(&AS->lock);
659 goto page_fault;
660 }
661
662 if (!area->backend || !area->backend->backend_page_fault) {
663 /*
664 * The address space area is not backed by any backend
665 * or the backend cannot handle page faults.
666 */
667 mutex_unlock(&area->lock);
668 mutex_unlock(&AS->lock);
669 goto page_fault;
670 }
671
672 page_table_lock(AS, false);
673
674 /*
675 * To avoid race condition between two page faults
676 * on the same address, we need to make sure
677 * the mapping has not been already inserted.
678 */
679 if ((pte = page_mapping_find(AS, page))) {
680 if (PTE_PRESENT(pte)) {
681 page_table_unlock(AS, false);
682 mutex_unlock(&area->lock);
683 mutex_unlock(&AS->lock);
684 return AS_PF_OK;
685 }
686 }
687
688 /*
689 * Resort to the backend page fault handler.
690 */
691 if (area->backend->backend_page_fault(area, page) != AS_PF_OK) {
692 page_table_unlock(AS, false);
693 mutex_unlock(&area->lock);
694 mutex_unlock(&AS->lock);
695 goto page_fault;
696 }
697
698 page_table_unlock(AS, false);
699 mutex_unlock(&area->lock);
700 mutex_unlock(&AS->lock);
701 return AS_PF_OK;
702
703page_fault:
704 if (THREAD->in_copy_from_uspace) {
705 THREAD->in_copy_from_uspace = false;
706 istate_set_retaddr(istate, (__address) &memcpy_from_uspace_failover_address);
707 } else if (THREAD->in_copy_to_uspace) {
708 THREAD->in_copy_to_uspace = false;
709 istate_set_retaddr(istate, (__address) &memcpy_to_uspace_failover_address);
710 } else {
711 return AS_PF_FAULT;
712 }
713
714 return AS_PF_DEFER;
715}
716
717/** Switch address spaces.
718 *
719 * Note that this function cannot sleep as it is essentially a part of
720 * the scheduling. Sleeping here would lead to deadlock on wakeup.
721 *
722 * @param old Old address space or NULL.
723 * @param new New address space.
724 */
725void as_switch(as_t *old, as_t *new)
726{
727 ipl_t ipl;
728 bool needs_asid = false;
729
730 ipl = interrupts_disable();
731 spinlock_lock(&as_lock);
732
733 /*
734 * First, take care of the old address space.
735 */
736 if (old) {
737 mutex_lock_active(&old->lock);
738 ASSERT(old->refcount);
739 if((--old->refcount == 0) && (old != AS_KERNEL)) {
740 /*
741 * The old address space is no longer active on
742 * any processor. It can be appended to the
743 * list of inactive address spaces with assigned
744 * ASID.
745 */
746 ASSERT(old->asid != ASID_INVALID);
747 list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
748 }
749 mutex_unlock(&old->lock);
750 }
751
752 /*
753 * Second, prepare the new address space.
754 */
755 mutex_lock_active(&new->lock);
756 if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
757 if (new->asid != ASID_INVALID)
758 list_remove(&new->inactive_as_with_asid_link);
759 else
760 needs_asid = true; /* defer call to asid_get() until new->lock is released */
761 }
762 SET_PTL0_ADDRESS(new->page_table);
763 mutex_unlock(&new->lock);
764
765 if (needs_asid) {
766 /*
767 * Allocation of new ASID was deferred
768 * until now in order to avoid deadlock.
769 */
770 asid_t asid;
771
772 asid = asid_get();
773 mutex_lock_active(&new->lock);
774 new->asid = asid;
775 mutex_unlock(&new->lock);
776 }
777 spinlock_unlock(&as_lock);
778 interrupts_restore(ipl);
779
780 /*
781 * Perform architecture-specific steps.
782 * (e.g. write ASID to hardware register etc.)
783 */
784 as_install_arch(new);
785
786 AS = new;
787}
788
789/** Convert address space area flags to page flags.
790 *
791 * @param aflags Flags of some address space area.
792 *
793 * @return Flags to be passed to page_mapping_insert().
794 */
795int area_flags_to_page_flags(int aflags)
796{
797 int flags;
798
799 flags = PAGE_USER | PAGE_PRESENT;
800
801 if (aflags & AS_AREA_READ)
802 flags |= PAGE_READ;
803
804 if (aflags & AS_AREA_WRITE)
805 flags |= PAGE_WRITE;
806
807 if (aflags & AS_AREA_EXEC)
808 flags |= PAGE_EXEC;
809
810 if (!(aflags & AS_AREA_DEVICE))
811 flags |= PAGE_CACHEABLE;
812
813 return flags;
814}
815
816/** Compute flags for virtual address translation subsytem.
817 *
818 * The address space area must be locked.
819 * Interrupts must be disabled.
820 *
821 * @param a Address space area.
822 *
823 * @return Flags to be used in page_mapping_insert().
824 */
825int as_area_get_flags(as_area_t *a)
826{
827 return area_flags_to_page_flags(a->flags);
828}
829
830/** Create page table.
831 *
832 * Depending on architecture, create either address space
833 * private or global page table.
834 *
835 * @param flags Flags saying whether the page table is for kernel address space.
836 *
837 * @return First entry of the page table.
838 */
839pte_t *page_table_create(int flags)
840{
841 ASSERT(as_operations);
842 ASSERT(as_operations->page_table_create);
843
844 return as_operations->page_table_create(flags);
845}
846
847/** Lock page table.
848 *
849 * This function should be called before any page_mapping_insert(),
850 * page_mapping_remove() and page_mapping_find().
851 *
852 * Locking order is such that address space areas must be locked
853 * prior to this call. Address space can be locked prior to this
854 * call in which case the lock argument is false.
855 *
856 * @param as Address space.
857 * @param lock If false, do not attempt to lock as->lock.
858 */
859void page_table_lock(as_t *as, bool lock)
860{
861 ASSERT(as_operations);
862 ASSERT(as_operations->page_table_lock);
863
864 as_operations->page_table_lock(as, lock);
865}
866
867/** Unlock page table.
868 *
869 * @param as Address space.
870 * @param unlock If false, do not attempt to unlock as->lock.
871 */
872void page_table_unlock(as_t *as, bool unlock)
873{
874 ASSERT(as_operations);
875 ASSERT(as_operations->page_table_unlock);
876
877 as_operations->page_table_unlock(as, unlock);
878}
879
880
881/** Find address space area and lock it.
882 *
883 * The address space must be locked and interrupts must be disabled.
884 *
885 * @param as Address space.
886 * @param va Virtual address.
887 *
888 * @return Locked address space area containing va on success or NULL on failure.
889 */
890as_area_t *find_area_and_lock(as_t *as, __address va)
891{
892 as_area_t *a;
893 btree_node_t *leaf, *lnode;
894 int i;
895
896 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
897 if (a) {
898 /* va is the base address of an address space area */
899 mutex_lock(&a->lock);
900 return a;
901 }
902
903 /*
904 * Search the leaf node and the righmost record of its left neighbour
905 * to find out whether this is a miss or va belongs to an address
906 * space area found there.
907 */
908
909 /* First, search the leaf node itself. */
910 for (i = 0; i < leaf->keys; i++) {
911 a = (as_area_t *) leaf->value[i];
912 mutex_lock(&a->lock);
913 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
914 return a;
915 }
916 mutex_unlock(&a->lock);
917 }
918
919 /*
920 * Second, locate the left neighbour and test its last record.
921 * Because of its position in the B+tree, it must have base < va.
922 */
923 if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
924 a = (as_area_t *) lnode->value[lnode->keys - 1];
925 mutex_lock(&a->lock);
926 if (va < a->base + a->pages * PAGE_SIZE) {
927 return a;
928 }
929 mutex_unlock(&a->lock);
930 }
931
932 return NULL;
933}
934
935/** Check area conflicts with other areas.
936 *
937 * The address space must be locked and interrupts must be disabled.
938 *
939 * @param as Address space.
940 * @param va Starting virtual address of the area being tested.
941 * @param size Size of the area being tested.
942 * @param avoid_area Do not touch this area.
943 *
944 * @return True if there is no conflict, false otherwise.
945 */
946bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
947{
948 as_area_t *a;
949 btree_node_t *leaf, *node;
950 int i;
951
952 /*
953 * We don't want any area to have conflicts with NULL page.
954 */
955 if (overlaps(va, size, NULL, PAGE_SIZE))
956 return false;
957
958 /*
959 * The leaf node is found in O(log n), where n is proportional to
960 * the number of address space areas belonging to as.
961 * The check for conflicts is then attempted on the rightmost
962 * record in the left neighbour, the leftmost record in the right
963 * neighbour and all records in the leaf node itself.
964 */
965
966 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
967 if (a != avoid_area)
968 return false;
969 }
970
971 /* First, check the two border cases. */
972 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
973 a = (as_area_t *) node->value[node->keys - 1];
974 mutex_lock(&a->lock);
975 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
976 mutex_unlock(&a->lock);
977 return false;
978 }
979 mutex_unlock(&a->lock);
980 }
981 if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
982 a = (as_area_t *) node->value[0];
983 mutex_lock(&a->lock);
984 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
985 mutex_unlock(&a->lock);
986 return false;
987 }
988 mutex_unlock(&a->lock);
989 }
990
991 /* Second, check the leaf node. */
992 for (i = 0; i < leaf->keys; i++) {
993 a = (as_area_t *) leaf->value[i];
994
995 if (a == avoid_area)
996 continue;
997
998 mutex_lock(&a->lock);
999 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1000 mutex_unlock(&a->lock);
1001 return false;
1002 }
1003 mutex_unlock(&a->lock);
1004 }
1005
1006 /*
1007 * So far, the area does not conflict with other areas.
1008 * Check if it doesn't conflict with kernel address space.
1009 */
1010 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1011 return !overlaps(va, size,
1012 KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
1013 }
1014
1015 return true;
1016}
1017
1018/** Return size of the address space area with given base. */
1019size_t as_get_size(__address base)
1020{
1021 ipl_t ipl;
1022 as_area_t *src_area;
1023 size_t size;
1024
1025 ipl = interrupts_disable();
1026 src_area = find_area_and_lock(AS, base);
1027 if (src_area){
1028 size = src_area->pages * PAGE_SIZE;
1029 mutex_unlock(&src_area->lock);
1030 } else {
1031 size = 0;
1032 }
1033 interrupts_restore(ipl);
1034 return size;
1035}
1036
1037/** Mark portion of address space area as used.
1038 *
1039 * The address space area must be already locked.
1040 *
1041 * @param a Address space area.
1042 * @param page First page to be marked.
1043 * @param count Number of page to be marked.
1044 *
1045 * @return 0 on failure and 1 on success.
1046 */
1047int used_space_insert(as_area_t *a, __address page, count_t count)
1048{
1049 btree_node_t *leaf, *node;
1050 count_t pages;
1051 int i;
1052
1053 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1054 ASSERT(count);
1055
1056 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1057 if (pages) {
1058 /*
1059 * We hit the beginning of some used space.
1060 */
1061 return 0;
1062 }
1063
1064 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1065 if (node) {
1066 __address left_pg = node->key[node->keys - 1], right_pg = leaf->key[0];
1067 count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0];
1068
1069 /*
1070 * Examine the possibility that the interval fits
1071 * somewhere between the rightmost interval of
1072 * the left neigbour and the first interval of the leaf.
1073 */
1074
1075 if (page >= right_pg) {
1076 /* Do nothing. */
1077 } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1078 /* The interval intersects with the left interval. */
1079 return 0;
1080 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1081 /* The interval intersects with the right interval. */
1082 return 0;
1083 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1084 /* The interval can be added by merging the two already present intervals. */
1085 node->value[node->keys - 1] += count + right_cnt;
1086 btree_remove(&a->used_space, right_pg, leaf);
1087 return 1;
1088 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1089 /* The interval can be added by simply growing the left interval. */
1090 node->value[node->keys - 1] += count;
1091 return 1;
1092 } else if (page + count*PAGE_SIZE == right_pg) {
1093 /*
1094 * The interval can be addded by simply moving base of the right
1095 * interval down and increasing its size accordingly.
1096 */
1097 leaf->value[0] += count;
1098 leaf->key[0] = page;
1099 return 1;
1100 } else {
1101 /*
1102 * The interval is between both neigbouring intervals,
1103 * but cannot be merged with any of them.
1104 */
1105 btree_insert(&a->used_space, page, (void *) count, leaf);
1106 return 1;
1107 }
1108 } else if (page < leaf->key[0]) {
1109 __address right_pg = leaf->key[0];
1110 count_t right_cnt = (count_t) leaf->value[0];
1111
1112 /*
1113 * Investigate the border case in which the left neighbour does not
1114 * exist but the interval fits from the left.
1115 */
1116
1117 if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1118 /* The interval intersects with the right interval. */
1119 return 0;
1120 } else if (page + count*PAGE_SIZE == right_pg) {
1121 /*
1122 * The interval can be added by moving the base of the right interval down
1123 * and increasing its size accordingly.
1124 */
1125 leaf->key[0] = page;
1126 leaf->value[0] += count;
1127 return 1;
1128 } else {
1129 /*
1130 * The interval doesn't adjoin with the right interval.
1131 * It must be added individually.
1132 */
1133 btree_insert(&a->used_space, page, (void *) count, leaf);
1134 return 1;
1135 }
1136 }
1137
1138 node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1139 if (node) {
1140 __address left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0];
1141 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0];
1142
1143 /*
1144 * Examine the possibility that the interval fits
1145 * somewhere between the leftmost interval of
1146 * the right neigbour and the last interval of the leaf.
1147 */
1148
1149 if (page < left_pg) {
1150 /* Do nothing. */
1151 } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1152 /* The interval intersects with the left interval. */
1153 return 0;
1154 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1155 /* The interval intersects with the right interval. */
1156 return 0;
1157 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1158 /* The interval can be added by merging the two already present intervals. */
1159 leaf->value[leaf->keys - 1] += count + right_cnt;
1160 btree_remove(&a->used_space, right_pg, node);
1161 return 1;
1162 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1163 /* The interval can be added by simply growing the left interval. */
1164 leaf->value[leaf->keys - 1] += count;
1165 return 1;
1166 } else if (page + count*PAGE_SIZE == right_pg) {
1167 /*
1168 * The interval can be addded by simply moving base of the right
1169 * interval down and increasing its size accordingly.
1170 */
1171 node->value[0] += count;
1172 node->key[0] = page;
1173 return 1;
1174 } else {
1175 /*
1176 * The interval is between both neigbouring intervals,
1177 * but cannot be merged with any of them.
1178 */
1179 btree_insert(&a->used_space, page, (void *) count, leaf);
1180 return 1;
1181 }
1182 } else if (page >= leaf->key[leaf->keys - 1]) {
1183 __address left_pg = leaf->key[leaf->keys - 1];
1184 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1185
1186 /*
1187 * Investigate the border case in which the right neighbour does not
1188 * exist but the interval fits from the right.
1189 */
1190
1191 if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1192 /* The interval intersects with the left interval. */
1193 return 0;
1194 } else if (left_pg + left_cnt*PAGE_SIZE == page) {
1195 /* The interval can be added by growing the left interval. */
1196 leaf->value[leaf->keys - 1] += count;
1197 return 1;
1198 } else {
1199 /*
1200 * The interval doesn't adjoin with the left interval.
1201 * It must be added individually.
1202 */
1203 btree_insert(&a->used_space, page, (void *) count, leaf);
1204 return 1;
1205 }
1206 }
1207
1208 /*
1209 * Note that if the algorithm made it thus far, the interval can fit only
1210 * between two other intervals of the leaf. The two border cases were already
1211 * resolved.
1212 */
1213 for (i = 1; i < leaf->keys; i++) {
1214 if (page < leaf->key[i]) {
1215 __address left_pg = leaf->key[i - 1], right_pg = leaf->key[i];
1216 count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i];
1217
1218 /*
1219 * The interval fits between left_pg and right_pg.
1220 */
1221
1222 if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1223 /* The interval intersects with the left interval. */
1224 return 0;
1225 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1226 /* The interval intersects with the right interval. */
1227 return 0;
1228 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1229 /* The interval can be added by merging the two already present intervals. */
1230 leaf->value[i - 1] += count + right_cnt;
1231 btree_remove(&a->used_space, right_pg, leaf);
1232 return 1;
1233 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1234 /* The interval can be added by simply growing the left interval. */
1235 leaf->value[i - 1] += count;
1236 return 1;
1237 } else if (page + count*PAGE_SIZE == right_pg) {
1238 /*
1239 * The interval can be addded by simply moving base of the right
1240 * interval down and increasing its size accordingly.
1241 */
1242 leaf->value[i] += count;
1243 leaf->key[i] = page;
1244 return 1;
1245 } else {
1246 /*
1247 * The interval is between both neigbouring intervals,
1248 * but cannot be merged with any of them.
1249 */
1250 btree_insert(&a->used_space, page, (void *) count, leaf);
1251 return 1;
1252 }
1253 }
1254 }
1255
1256 panic("Inconsistency detected while adding %d pages of used space at %P.\n", count, page);
1257}
1258
1259/** Mark portion of address space area as unused.
1260 *
1261 * The address space area must be already locked.
1262 *
1263 * @param a Address space area.
1264 * @param page First page to be marked.
1265 * @param count Number of page to be marked.
1266 *
1267 * @return 0 on failure and 1 on success.
1268 */
1269int used_space_remove(as_area_t *a, __address page, count_t count)
1270{
1271 btree_node_t *leaf, *node;
1272 count_t pages;
1273 int i;
1274
1275 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1276 ASSERT(count);
1277
1278 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1279 if (pages) {
1280 /*
1281 * We are lucky, page is the beginning of some interval.
1282 */
1283 if (count > pages) {
1284 return 0;
1285 } else if (count == pages) {
1286 btree_remove(&a->used_space, page, leaf);
1287 return 1;
1288 } else {
1289 /*
1290 * Find the respective interval.
1291 * Decrease its size and relocate its start address.
1292 */
1293 for (i = 0; i < leaf->keys; i++) {
1294 if (leaf->key[i] == page) {
1295 leaf->key[i] += count*PAGE_SIZE;
1296 leaf->value[i] -= count;
1297 return 1;
1298 }
1299 }
1300 goto error;
1301 }
1302 }
1303
1304 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1305 if (node && page < leaf->key[0]) {
1306 __address left_pg = node->key[node->keys - 1];
1307 count_t left_cnt = (count_t) node->value[node->keys - 1];
1308
1309 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1310 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1311 /*
1312 * The interval is contained in the rightmost interval
1313 * of the left neighbour and can be removed by
1314 * updating the size of the bigger interval.
1315 */
1316 node->value[node->keys - 1] -= count;
1317 return 1;
1318 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1319 count_t new_cnt;
1320
1321 /*
1322 * The interval is contained in the rightmost interval
1323 * of the left neighbour but its removal requires
1324 * both updating the size of the original interval and
1325 * also inserting a new interval.
1326 */
1327 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1328 node->value[node->keys - 1] -= count + new_cnt;
1329 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1330 return 1;
1331 }
1332 }
1333 return 0;
1334 } else if (page < leaf->key[0]) {
1335 return 0;
1336 }
1337
1338 if (page > leaf->key[leaf->keys - 1]) {
1339 __address left_pg = leaf->key[leaf->keys - 1];
1340 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1341
1342 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1343 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1344 /*
1345 * The interval is contained in the rightmost interval
1346 * of the leaf and can be removed by updating the size
1347 * of the bigger interval.
1348 */
1349 leaf->value[leaf->keys - 1] -= count;
1350 return 1;
1351 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1352 count_t new_cnt;
1353
1354 /*
1355 * The interval is contained in the rightmost interval
1356 * of the leaf but its removal requires both updating
1357 * the size of the original interval and
1358 * also inserting a new interval.
1359 */
1360 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1361 leaf->value[leaf->keys - 1] -= count + new_cnt;
1362 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1363 return 1;
1364 }
1365 }
1366 return 0;
1367 }
1368
1369 /*
1370 * The border cases have been already resolved.
1371 * Now the interval can be only between intervals of the leaf.
1372 */
1373 for (i = 1; i < leaf->keys - 1; i++) {
1374 if (page < leaf->key[i]) {
1375 __address left_pg = leaf->key[i - 1];
1376 count_t left_cnt = (count_t) leaf->value[i - 1];
1377
1378 /*
1379 * Now the interval is between intervals corresponding to (i - 1) and i.
1380 */
1381 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1382 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1383 /*
1384 * The interval is contained in the interval (i - 1)
1385 * of the leaf and can be removed by updating the size
1386 * of the bigger interval.
1387 */
1388 leaf->value[i - 1] -= count;
1389 return 1;
1390 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1391 count_t new_cnt;
1392
1393 /*
1394 * The interval is contained in the interval (i - 1)
1395 * of the leaf but its removal requires both updating
1396 * the size of the original interval and
1397 * also inserting a new interval.
1398 */
1399 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1400 leaf->value[i - 1] -= count + new_cnt;
1401 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1402 return 1;
1403 }
1404 }
1405 return 0;
1406 }
1407 }
1408
1409error:
1410 panic("Inconsistency detected while removing %d pages of used space from %P.\n", count, page);
1411}
1412
1413/** Remove reference to address space area share info.
1414 *
1415 * If the reference count drops to 0, the sh_info is deallocated.
1416 *
1417 * @param sh_info Pointer to address space area share info.
1418 */
1419void sh_info_remove_reference(share_info_t *sh_info)
1420{
1421 bool dealloc = false;
1422
1423 mutex_lock(&sh_info->lock);
1424 ASSERT(sh_info->refcount);
1425 if (--sh_info->refcount == 0) {
1426 dealloc = true;
1427 bool cond;
1428
1429 /*
1430 * Now walk carefully the pagemap B+tree and free/remove
1431 * reference from all frames found there.
1432 */
1433 for (cond = true; cond;) {
1434 btree_node_t *node;
1435
1436 ASSERT(!list_empty(&sh_info->pagemap.leaf_head));
1437 node = list_get_instance(sh_info->pagemap.leaf_head.next, btree_node_t, leaf_link);
1438 if ((cond = node->keys)) {
1439 frame_free(ADDR2PFN((__address) node->value[0]));
1440 btree_remove(&sh_info->pagemap, node->key[0], node);
1441 }
1442 }
1443
1444 }
1445 mutex_unlock(&sh_info->lock);
1446
1447 if (dealloc) {
1448 btree_destroy(&sh_info->pagemap);
1449 free(sh_info);
1450 }
1451}
1452
1453static int anon_page_fault(as_area_t *area, __address addr);
1454static void anon_frame_free(as_area_t *area, __address page, __address frame);
1455
1456/*
1457 * Anonymous memory backend.
1458 */
1459mem_backend_t anon_backend = {
1460 .backend_page_fault = anon_page_fault,
1461 .backend_frame_free = anon_frame_free
1462};
1463
1464/** Service a page fault in the anonymous memory address space area.
1465 *
1466 * The address space area and page tables must be already locked.
1467 *
1468 * @param area Pointer to the address space area.
1469 * @param addr Faulting virtual address.
1470 *
1471 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
1472 */
1473int anon_page_fault(as_area_t *area, __address addr)
1474{
1475 __address frame;
1476
1477 if (area->sh_info) {
1478 btree_node_t *leaf;
1479
1480 /*
1481 * The area is shared, chances are that the mapping can be found
1482 * in the pagemap of the address space area share info structure.
1483 * In the case that the pagemap does not contain the respective
1484 * mapping, a new frame is allocated and the mapping is created.
1485 */
1486 mutex_lock(&area->sh_info->lock);
1487 frame = (__address) btree_search(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE), &leaf);
1488 if (!frame) {
1489 bool allocate = true;
1490 int i;
1491
1492 /*
1493 * Zero can be returned as a valid frame address.
1494 * Just a small workaround.
1495 */
1496 for (i = 0; i < leaf->keys; i++) {
1497 if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {
1498 allocate = false;
1499 break;
1500 }
1501 }
1502 if (allocate) {
1503 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
1504 memsetb(PA2KA(frame), FRAME_SIZE, 0);
1505
1506 /*
1507 * Insert the address of the newly allocated frame to the pagemap.
1508 */
1509 btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE), (void *) frame, leaf);
1510 }
1511 }
1512 mutex_unlock(&area->sh_info->lock);
1513 } else {
1514
1515 /*
1516 * In general, there can be several reasons that
1517 * can have caused this fault.
1518 *
1519 * - non-existent mapping: the area is an anonymous
1520 * area (e.g. heap or stack) and so far has not been
1521 * allocated a frame for the faulting page
1522 *
1523 * - non-present mapping: another possibility,
1524 * currently not implemented, would be frame
1525 * reuse; when this becomes a possibility,
1526 * do not forget to distinguish between
1527 * the different causes
1528 */
1529 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
1530 memsetb(PA2KA(frame), FRAME_SIZE, 0);
1531 }
1532
1533 /*
1534 * Map 'page' to 'frame'.
1535 * Note that TLB shootdown is not attempted as only new information is being
1536 * inserted into page tables.
1537 */
1538 page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
1539 if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
1540 panic("Could not insert used space.\n");
1541
1542 return AS_PF_OK;
1543}
1544
1545/** Free a frame that is backed by the anonymous memory backend.
1546 *
1547 * The address space area and page tables must be already locked.
1548 *
1549 * @param area Ignored.
1550 * @param page Ignored.
1551 * @param frame Frame to be released.
1552 */
1553void anon_frame_free(as_area_t *area, __address page, __address frame)
1554{
1555 frame_free(ADDR2PFN(frame));
1556}
1557
1558/*
1559 * Address space related syscalls.
1560 */
1561
1562/** Wrapper for as_area_create(). */
1563__native sys_as_area_create(__address address, size_t size, int flags)
1564{
1565 if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
1566 return (__native) address;
1567 else
1568 return (__native) -1;
1569}
1570
1571/** Wrapper for as_area_resize. */
1572__native sys_as_area_resize(__address address, size_t size, int flags)
1573{
1574 return (__native) as_area_resize(AS, address, size, 0);
1575}
1576
1577/** Wrapper for as_area_destroy. */
1578__native sys_as_area_destroy(__address address)
1579{
1580 return (__native) as_area_destroy(AS, address);
1581}
Note: See TracBrowser for help on using the repository browser.