source: mainline/generic/src/mm/as.c@ bd571f44

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since bd571f44 was 482826d, checked in by Jakub Jermar <jakub@…>, 19 years ago

Function for destroying address space for which there is no other reference in the kernel.

  • Property mode set to 100644
File size: 41.2 KB
Line 
1/*
2 * Copyright (C) 2001-2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/**
30 * @file as.c
31 * @brief Address space related functions.
32 *
33 * This file contains address space manipulation functions.
34 * Roughly speaking, this is a higher-level client of
35 * Virtual Address Translation (VAT) subsystem.
36 *
37 * Functionality provided by this file allows one to
38 * create address space and create, resize and share
39 * address space areas.
40 *
41 * @see page.c
42 *
43 */
44
45#include <mm/as.h>
46#include <arch/mm/as.h>
47#include <mm/page.h>
48#include <mm/frame.h>
49#include <mm/slab.h>
50#include <mm/tlb.h>
51#include <arch/mm/page.h>
52#include <genarch/mm/page_pt.h>
53#include <genarch/mm/page_ht.h>
54#include <mm/asid.h>
55#include <arch/mm/asid.h>
56#include <synch/spinlock.h>
57#include <synch/mutex.h>
58#include <adt/list.h>
59#include <adt/btree.h>
60#include <proc/task.h>
61#include <proc/thread.h>
62#include <arch/asm.h>
63#include <panic.h>
64#include <debug.h>
65#include <print.h>
66#include <memstr.h>
67#include <macros.h>
68#include <arch.h>
69#include <errno.h>
70#include <config.h>
71#include <align.h>
72#include <arch/types.h>
73#include <typedefs.h>
74#include <syscall/copy.h>
75#include <arch/interrupt.h>
76
77as_operations_t *as_operations = NULL;
78
79/** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */
80SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
81
82/**
83 * This list contains address spaces that are not active on any
84 * processor and that have valid ASID.
85 */
86LIST_INITIALIZE(inactive_as_with_asid_head);
87
88/** Kernel address space. */
89as_t *AS_KERNEL = NULL;
90
91static int area_flags_to_page_flags(int aflags);
92static as_area_t *find_area_and_lock(as_t *as, __address va);
93static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
94static void sh_info_remove_reference(share_info_t *sh_info);
95
96/** Initialize address space subsystem. */
97void as_init(void)
98{
99 as_arch_init();
100 AS_KERNEL = as_create(FLAG_AS_KERNEL);
101 if (!AS_KERNEL)
102 panic("can't create kernel address space\n");
103
104}
105
106/** Create address space.
107 *
108 * @param flags Flags that influence way in wich the address space is created.
109 */
110as_t *as_create(int flags)
111{
112 as_t *as;
113
114 as = (as_t *) malloc(sizeof(as_t), 0);
115 link_initialize(&as->inactive_as_with_asid_link);
116 mutex_initialize(&as->lock);
117 btree_create(&as->as_area_btree);
118
119 if (flags & FLAG_AS_KERNEL)
120 as->asid = ASID_KERNEL;
121 else
122 as->asid = ASID_INVALID;
123
124 as->refcount = 0;
125 as->cpu_refcount = 0;
126 as->page_table = page_table_create(flags);
127
128 return as;
129}
130
131/** Destroy adress space.
132 *
133 * When there are no tasks referencing this address space (i.e. its refcount is zero),
134 * the address space can be destroyed.
135 */
136void as_destroy(as_t *as)
137{
138 ipl_t ipl;
139 bool cond;
140
141 ASSERT(as->refcount == 0);
142
143 /*
144 * Since there is no reference to this area,
145 * it is safe not to lock its mutex.
146 */
147
148 ipl = interrupts_disable();
149 spinlock_lock(&inactive_as_with_asid_lock);
150 if (as->asid != ASID_INVALID && as->asid != ASID_KERNEL) {
151 list_remove(&as->inactive_as_with_asid_link);
152 asid_put(as->asid);
153 }
154 spinlock_unlock(&inactive_as_with_asid_lock);
155
156 /*
157 * Destroy address space areas of the address space.
158 */
159 for (cond = true; cond; ) {
160 btree_node_t *node;
161
162 ASSERT(!list_empty(&as->as_area_btree.leaf_head));
163 node = list_get_instance(&as->as_area_btree.leaf_head.next, btree_node_t, leaf_link);
164 if ((cond = node->keys)) {
165 as_area_destroy(as, node->key[0]);
166 btree_remove(&as->as_area_btree, node->key[0], node);
167 }
168 }
169
170 page_table_destroy(as->page_table);
171
172 interrupts_restore(ipl);
173
174 free(as);
175}
176
177/** Create address space area of common attributes.
178 *
179 * The created address space area is added to the target address space.
180 *
181 * @param as Target address space.
182 * @param flags Flags of the area memory.
183 * @param size Size of area.
184 * @param base Base address of area.
185 * @param attrs Attributes of the area.
186 * @param backend Address space area backend. NULL if no backend is used.
187 * @param backend_data NULL or a pointer to an array holding two void *.
188 *
189 * @return Address space area on success or NULL on failure.
190 */
191as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs,
192 mem_backend_t *backend, mem_backend_data_t *backend_data)
193{
194 ipl_t ipl;
195 as_area_t *a;
196
197 if (base % PAGE_SIZE)
198 return NULL;
199
200 if (!size)
201 return NULL;
202
203 /* Writeable executable areas are not supported. */
204 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
205 return NULL;
206
207 ipl = interrupts_disable();
208 mutex_lock(&as->lock);
209
210 if (!check_area_conflicts(as, base, size, NULL)) {
211 mutex_unlock(&as->lock);
212 interrupts_restore(ipl);
213 return NULL;
214 }
215
216 a = (as_area_t *) malloc(sizeof(as_area_t), 0);
217
218 mutex_initialize(&a->lock);
219
220 a->as = as;
221 a->flags = flags;
222 a->attributes = attrs;
223 a->pages = SIZE2FRAMES(size);
224 a->base = base;
225 a->sh_info = NULL;
226 a->backend = backend;
227 if (backend_data)
228 a->backend_data = *backend_data;
229 else
230 memsetb((__address) &a->backend_data, sizeof(a->backend_data), 0);
231
232 btree_create(&a->used_space);
233
234 btree_insert(&as->as_area_btree, base, (void *) a, NULL);
235
236 mutex_unlock(&as->lock);
237 interrupts_restore(ipl);
238
239 return a;
240}
241
242/** Find address space area and change it.
243 *
244 * @param as Address space.
245 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
246 * @param size New size of the virtual memory block starting at address.
247 * @param flags Flags influencing the remap operation. Currently unused.
248 *
249 * @return Zero on success or a value from @ref errno.h otherwise.
250 */
251int as_area_resize(as_t *as, __address address, size_t size, int flags)
252{
253 as_area_t *area;
254 ipl_t ipl;
255 size_t pages;
256
257 ipl = interrupts_disable();
258 mutex_lock(&as->lock);
259
260 /*
261 * Locate the area.
262 */
263 area = find_area_and_lock(as, address);
264 if (!area) {
265 mutex_unlock(&as->lock);
266 interrupts_restore(ipl);
267 return ENOENT;
268 }
269
270 if (area->backend == &phys_backend) {
271 /*
272 * Remapping of address space areas associated
273 * with memory mapped devices is not supported.
274 */
275 mutex_unlock(&area->lock);
276 mutex_unlock(&as->lock);
277 interrupts_restore(ipl);
278 return ENOTSUP;
279 }
280 if (area->sh_info) {
281 /*
282 * Remapping of shared address space areas
283 * is not supported.
284 */
285 mutex_unlock(&area->lock);
286 mutex_unlock(&as->lock);
287 interrupts_restore(ipl);
288 return ENOTSUP;
289 }
290
291 pages = SIZE2FRAMES((address - area->base) + size);
292 if (!pages) {
293 /*
294 * Zero size address space areas are not allowed.
295 */
296 mutex_unlock(&area->lock);
297 mutex_unlock(&as->lock);
298 interrupts_restore(ipl);
299 return EPERM;
300 }
301
302 if (pages < area->pages) {
303 bool cond;
304 __address start_free = area->base + pages*PAGE_SIZE;
305
306 /*
307 * Shrinking the area.
308 * No need to check for overlaps.
309 */
310
311 /*
312 * Start TLB shootdown sequence.
313 */
314 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
315
316 /*
317 * Remove frames belonging to used space starting from
318 * the highest addresses downwards until an overlap with
319 * the resized address space area is found. Note that this
320 * is also the right way to remove part of the used_space
321 * B+tree leaf list.
322 */
323 for (cond = true; cond;) {
324 btree_node_t *node;
325
326 ASSERT(!list_empty(&area->used_space.leaf_head));
327 node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link);
328 if ((cond = (bool) node->keys)) {
329 __address b = node->key[node->keys - 1];
330 count_t c = (count_t) node->value[node->keys - 1];
331 int i = 0;
332
333 if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) {
334
335 if (b + c*PAGE_SIZE <= start_free) {
336 /*
337 * The whole interval fits completely
338 * in the resized address space area.
339 */
340 break;
341 }
342
343 /*
344 * Part of the interval corresponding to b and c
345 * overlaps with the resized address space area.
346 */
347
348 cond = false; /* we are almost done */
349 i = (start_free - b) >> PAGE_WIDTH;
350 if (!used_space_remove(area, start_free, c - i))
351 panic("Could not remove used space.");
352 } else {
353 /*
354 * The interval of used space can be completely removed.
355 */
356 if (!used_space_remove(area, b, c))
357 panic("Could not remove used space.\n");
358 }
359
360 for (; i < c; i++) {
361 pte_t *pte;
362
363 page_table_lock(as, false);
364 pte = page_mapping_find(as, b + i*PAGE_SIZE);
365 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
366 if (area->backend && area->backend->frame_free) {
367 area->backend->frame_free(area,
368 b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
369 }
370 page_mapping_remove(as, b + i*PAGE_SIZE);
371 page_table_unlock(as, false);
372 }
373 }
374 }
375
376 /*
377 * Finish TLB shootdown sequence.
378 */
379 tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
380 tlb_shootdown_finalize();
381 } else {
382 /*
383 * Growing the area.
384 * Check for overlaps with other address space areas.
385 */
386 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
387 mutex_unlock(&area->lock);
388 mutex_unlock(&as->lock);
389 interrupts_restore(ipl);
390 return EADDRNOTAVAIL;
391 }
392 }
393
394 area->pages = pages;
395
396 mutex_unlock(&area->lock);
397 mutex_unlock(&as->lock);
398 interrupts_restore(ipl);
399
400 return 0;
401}
402
403/** Destroy address space area.
404 *
405 * @param as Address space.
406 * @param address Address withing the area to be deleted.
407 *
408 * @return Zero on success or a value from @ref errno.h on failure.
409 */
410int as_area_destroy(as_t *as, __address address)
411{
412 as_area_t *area;
413 __address base;
414 ipl_t ipl;
415 bool cond;
416
417 ipl = interrupts_disable();
418 mutex_lock(&as->lock);
419
420 area = find_area_and_lock(as, address);
421 if (!area) {
422 mutex_unlock(&as->lock);
423 interrupts_restore(ipl);
424 return ENOENT;
425 }
426
427 base = area->base;
428
429 /*
430 * Start TLB shootdown sequence.
431 */
432 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base, area->pages);
433
434 /*
435 * Visit only the pages mapped by used_space B+tree.
436 * Note that we must be very careful when walking the tree
437 * leaf list and removing used space as the leaf list changes
438 * unpredictibly after each remove. The solution is to actually
439 * not walk the tree at all, but to remove items from the head
440 * of the leaf list until there are some keys left.
441 */
442 for (cond = true; cond;) {
443 btree_node_t *node;
444
445 ASSERT(!list_empty(&area->used_space.leaf_head));
446 node = list_get_instance(area->used_space.leaf_head.next, btree_node_t, leaf_link);
447 if ((cond = (bool) node->keys)) {
448 __address b = node->key[0];
449 count_t i;
450 pte_t *pte;
451
452 for (i = 0; i < (count_t) node->value[0]; i++) {
453 page_table_lock(as, false);
454 pte = page_mapping_find(as, b + i*PAGE_SIZE);
455 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
456 if (area->backend && area->backend->frame_free) {
457 area->backend->frame_free(area,
458 b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
459 }
460 page_mapping_remove(as, b + i*PAGE_SIZE);
461 page_table_unlock(as, false);
462 }
463 if (!used_space_remove(area, b, i))
464 panic("Could not remove used space.\n");
465 }
466 }
467
468 /*
469 * Finish TLB shootdown sequence.
470 */
471 tlb_invalidate_pages(AS->asid, area->base, area->pages);
472 tlb_shootdown_finalize();
473
474 btree_destroy(&area->used_space);
475
476 area->attributes |= AS_AREA_ATTR_PARTIAL;
477
478 if (area->sh_info)
479 sh_info_remove_reference(area->sh_info);
480
481 mutex_unlock(&area->lock);
482
483 /*
484 * Remove the empty area from address space.
485 */
486 btree_remove(&AS->as_area_btree, base, NULL);
487
488 free(area);
489
490 mutex_unlock(&AS->lock);
491 interrupts_restore(ipl);
492 return 0;
493}
494
495/** Share address space area with another or the same address space.
496 *
497 * Address space area mapping is shared with a new address space area.
498 * If the source address space area has not been shared so far,
499 * a new sh_info is created. The new address space area simply gets the
500 * sh_info of the source area. The process of duplicating the
501 * mapping is done through the backend share function.
502 *
503 * @param src_as Pointer to source address space.
504 * @param src_base Base address of the source address space area.
505 * @param acc_size Expected size of the source area.
506 * @param dst_as Pointer to destination address space.
507 * @param dst_base Target base address.
508 * @param dst_flags_mask Destination address space area flags mask.
509 *
510 * @return Zero on success or ENOENT if there is no such task or
511 * if there is no such address space area,
512 * EPERM if there was a problem in accepting the area or
513 * ENOMEM if there was a problem in allocating destination
514 * address space area. ENOTSUP is returned if an attempt
515 * to share non-anonymous address space area is detected.
516 */
517int as_area_share(as_t *src_as, __address src_base, size_t acc_size,
518 as_t *dst_as, __address dst_base, int dst_flags_mask)
519{
520 ipl_t ipl;
521 int src_flags;
522 size_t src_size;
523 as_area_t *src_area, *dst_area;
524 share_info_t *sh_info;
525 mem_backend_t *src_backend;
526 mem_backend_data_t src_backend_data;
527
528 ipl = interrupts_disable();
529 mutex_lock(&src_as->lock);
530 src_area = find_area_and_lock(src_as, src_base);
531 if (!src_area) {
532 /*
533 * Could not find the source address space area.
534 */
535 mutex_unlock(&src_as->lock);
536 interrupts_restore(ipl);
537 return ENOENT;
538 }
539
540 if (!src_area->backend || !src_area->backend->share) {
541 /*
542 * There is now backend or the backend does not
543 * know how to share the area.
544 */
545 mutex_unlock(&src_area->lock);
546 mutex_unlock(&src_as->lock);
547 interrupts_restore(ipl);
548 return ENOTSUP;
549 }
550
551 src_size = src_area->pages * PAGE_SIZE;
552 src_flags = src_area->flags;
553 src_backend = src_area->backend;
554 src_backend_data = src_area->backend_data;
555
556 if (src_size != acc_size || (src_flags & dst_flags_mask) != dst_flags_mask) {
557 mutex_unlock(&src_area->lock);
558 mutex_unlock(&src_as->lock);
559 interrupts_restore(ipl);
560 return EPERM;
561 }
562
563 /*
564 * Now we are committed to sharing the area.
565 * First prepare the area for sharing.
566 * Then it will be safe to unlock it.
567 */
568 sh_info = src_area->sh_info;
569 if (!sh_info) {
570 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
571 mutex_initialize(&sh_info->lock);
572 sh_info->refcount = 2;
573 btree_create(&sh_info->pagemap);
574 src_area->sh_info = sh_info;
575 } else {
576 mutex_lock(&sh_info->lock);
577 sh_info->refcount++;
578 mutex_unlock(&sh_info->lock);
579 }
580
581 src_area->backend->share(src_area);
582
583 mutex_unlock(&src_area->lock);
584 mutex_unlock(&src_as->lock);
585
586 /*
587 * Create copy of the source address space area.
588 * The destination area is created with AS_AREA_ATTR_PARTIAL
589 * attribute set which prevents race condition with
590 * preliminary as_page_fault() calls.
591 * The flags of the source area are masked against dst_flags_mask
592 * to support sharing in less privileged mode.
593 */
594 dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
595 AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
596 if (!dst_area) {
597 /*
598 * Destination address space area could not be created.
599 */
600 sh_info_remove_reference(sh_info);
601
602 interrupts_restore(ipl);
603 return ENOMEM;
604 }
605
606 /*
607 * Now the destination address space area has been
608 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
609 * attribute and set the sh_info.
610 */
611 mutex_lock(&dst_area->lock);
612 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
613 dst_area->sh_info = sh_info;
614 mutex_unlock(&dst_area->lock);
615
616 interrupts_restore(ipl);
617
618 return 0;
619}
620
621/** Check access mode for address space area.
622 *
623 * The address space area must be locked prior to this call.
624 *
625 * @param area Address space area.
626 * @param access Access mode.
627 *
628 * @return False if access violates area's permissions, true otherwise.
629 */
630bool as_area_check_access(as_area_t *area, pf_access_t access)
631{
632 int flagmap[] = {
633 [PF_ACCESS_READ] = AS_AREA_READ,
634 [PF_ACCESS_WRITE] = AS_AREA_WRITE,
635 [PF_ACCESS_EXEC] = AS_AREA_EXEC
636 };
637
638 if (!(area->flags & flagmap[access]))
639 return false;
640
641 return true;
642}
643
644/** Handle page fault within the current address space.
645 *
646 * This is the high-level page fault handler. It decides
647 * whether the page fault can be resolved by any backend
648 * and if so, it invokes the backend to resolve the page
649 * fault.
650 *
651 * Interrupts are assumed disabled.
652 *
653 * @param page Faulting page.
654 * @param access Access mode that caused the fault (i.e. read/write/exec).
655 * @param istate Pointer to interrupted state.
656 *
657 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
658 * fault was caused by copy_to_uspace() or copy_from_uspace().
659 */
660int as_page_fault(__address page, pf_access_t access, istate_t *istate)
661{
662 pte_t *pte;
663 as_area_t *area;
664
665 if (!THREAD)
666 return AS_PF_FAULT;
667
668 ASSERT(AS);
669
670 mutex_lock(&AS->lock);
671 area = find_area_and_lock(AS, page);
672 if (!area) {
673 /*
674 * No area contained mapping for 'page'.
675 * Signal page fault to low-level handler.
676 */
677 mutex_unlock(&AS->lock);
678 goto page_fault;
679 }
680
681 if (area->attributes & AS_AREA_ATTR_PARTIAL) {
682 /*
683 * The address space area is not fully initialized.
684 * Avoid possible race by returning error.
685 */
686 mutex_unlock(&area->lock);
687 mutex_unlock(&AS->lock);
688 goto page_fault;
689 }
690
691 if (!area->backend || !area->backend->page_fault) {
692 /*
693 * The address space area is not backed by any backend
694 * or the backend cannot handle page faults.
695 */
696 mutex_unlock(&area->lock);
697 mutex_unlock(&AS->lock);
698 goto page_fault;
699 }
700
701 page_table_lock(AS, false);
702
703 /*
704 * To avoid race condition between two page faults
705 * on the same address, we need to make sure
706 * the mapping has not been already inserted.
707 */
708 if ((pte = page_mapping_find(AS, page))) {
709 if (PTE_PRESENT(pte)) {
710 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
711 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
712 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
713 page_table_unlock(AS, false);
714 mutex_unlock(&area->lock);
715 mutex_unlock(&AS->lock);
716 return AS_PF_OK;
717 }
718 }
719 }
720
721 /*
722 * Resort to the backend page fault handler.
723 */
724 if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
725 page_table_unlock(AS, false);
726 mutex_unlock(&area->lock);
727 mutex_unlock(&AS->lock);
728 goto page_fault;
729 }
730
731 page_table_unlock(AS, false);
732 mutex_unlock(&area->lock);
733 mutex_unlock(&AS->lock);
734 return AS_PF_OK;
735
736page_fault:
737 if (THREAD->in_copy_from_uspace) {
738 THREAD->in_copy_from_uspace = false;
739 istate_set_retaddr(istate, (__address) &memcpy_from_uspace_failover_address);
740 } else if (THREAD->in_copy_to_uspace) {
741 THREAD->in_copy_to_uspace = false;
742 istate_set_retaddr(istate, (__address) &memcpy_to_uspace_failover_address);
743 } else {
744 return AS_PF_FAULT;
745 }
746
747 return AS_PF_DEFER;
748}
749
750/** Switch address spaces.
751 *
752 * Note that this function cannot sleep as it is essentially a part of
753 * scheduling. Sleeping here would lead to deadlock on wakeup.
754 *
755 * @param old Old address space or NULL.
756 * @param new New address space.
757 */
758void as_switch(as_t *old, as_t *new)
759{
760 ipl_t ipl;
761 bool needs_asid = false;
762
763 ipl = interrupts_disable();
764 spinlock_lock(&inactive_as_with_asid_lock);
765
766 /*
767 * First, take care of the old address space.
768 */
769 if (old) {
770 mutex_lock_active(&old->lock);
771 ASSERT(old->cpu_refcount);
772 if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) {
773 /*
774 * The old address space is no longer active on
775 * any processor. It can be appended to the
776 * list of inactive address spaces with assigned
777 * ASID.
778 */
779 ASSERT(old->asid != ASID_INVALID);
780 list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
781 }
782 mutex_unlock(&old->lock);
783 }
784
785 /*
786 * Second, prepare the new address space.
787 */
788 mutex_lock_active(&new->lock);
789 if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) {
790 if (new->asid != ASID_INVALID)
791 list_remove(&new->inactive_as_with_asid_link);
792 else
793 needs_asid = true; /* defer call to asid_get() until new->lock is released */
794 }
795 SET_PTL0_ADDRESS(new->page_table);
796 mutex_unlock(&new->lock);
797
798 if (needs_asid) {
799 /*
800 * Allocation of new ASID was deferred
801 * until now in order to avoid deadlock.
802 */
803 asid_t asid;
804
805 asid = asid_get();
806 mutex_lock_active(&new->lock);
807 new->asid = asid;
808 mutex_unlock(&new->lock);
809 }
810 spinlock_unlock(&inactive_as_with_asid_lock);
811 interrupts_restore(ipl);
812
813 /*
814 * Perform architecture-specific steps.
815 * (e.g. write ASID to hardware register etc.)
816 */
817 as_install_arch(new);
818
819 AS = new;
820}
821
822/** Convert address space area flags to page flags.
823 *
824 * @param aflags Flags of some address space area.
825 *
826 * @return Flags to be passed to page_mapping_insert().
827 */
828int area_flags_to_page_flags(int aflags)
829{
830 int flags;
831
832 flags = PAGE_USER | PAGE_PRESENT;
833
834 if (aflags & AS_AREA_READ)
835 flags |= PAGE_READ;
836
837 if (aflags & AS_AREA_WRITE)
838 flags |= PAGE_WRITE;
839
840 if (aflags & AS_AREA_EXEC)
841 flags |= PAGE_EXEC;
842
843 if (aflags & AS_AREA_CACHEABLE)
844 flags |= PAGE_CACHEABLE;
845
846 return flags;
847}
848
849/** Compute flags for virtual address translation subsytem.
850 *
851 * The address space area must be locked.
852 * Interrupts must be disabled.
853 *
854 * @param a Address space area.
855 *
856 * @return Flags to be used in page_mapping_insert().
857 */
858int as_area_get_flags(as_area_t *a)
859{
860 return area_flags_to_page_flags(a->flags);
861}
862
863/** Create page table.
864 *
865 * Depending on architecture, create either address space
866 * private or global page table.
867 *
868 * @param flags Flags saying whether the page table is for kernel address space.
869 *
870 * @return First entry of the page table.
871 */
872pte_t *page_table_create(int flags)
873{
874 ASSERT(as_operations);
875 ASSERT(as_operations->page_table_create);
876
877 return as_operations->page_table_create(flags);
878}
879
880/** Destroy page table.
881 *
882 * Destroy page table in architecture specific way.
883 *
884 * @param page_table Physical address of PTL0.
885 */
886void page_table_destroy(pte_t *page_table)
887{
888 ASSERT(as_operations);
889 ASSERT(as_operations->page_table_destroy);
890
891 as_operations->page_table_destroy(page_table);
892}
893
894/** Lock page table.
895 *
896 * This function should be called before any page_mapping_insert(),
897 * page_mapping_remove() and page_mapping_find().
898 *
899 * Locking order is such that address space areas must be locked
900 * prior to this call. Address space can be locked prior to this
901 * call in which case the lock argument is false.
902 *
903 * @param as Address space.
904 * @param lock If false, do not attempt to lock as->lock.
905 */
906void page_table_lock(as_t *as, bool lock)
907{
908 ASSERT(as_operations);
909 ASSERT(as_operations->page_table_lock);
910
911 as_operations->page_table_lock(as, lock);
912}
913
914/** Unlock page table.
915 *
916 * @param as Address space.
917 * @param unlock If false, do not attempt to unlock as->lock.
918 */
919void page_table_unlock(as_t *as, bool unlock)
920{
921 ASSERT(as_operations);
922 ASSERT(as_operations->page_table_unlock);
923
924 as_operations->page_table_unlock(as, unlock);
925}
926
927
928/** Find address space area and lock it.
929 *
930 * The address space must be locked and interrupts must be disabled.
931 *
932 * @param as Address space.
933 * @param va Virtual address.
934 *
935 * @return Locked address space area containing va on success or NULL on failure.
936 */
937as_area_t *find_area_and_lock(as_t *as, __address va)
938{
939 as_area_t *a;
940 btree_node_t *leaf, *lnode;
941 int i;
942
943 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
944 if (a) {
945 /* va is the base address of an address space area */
946 mutex_lock(&a->lock);
947 return a;
948 }
949
950 /*
951 * Search the leaf node and the righmost record of its left neighbour
952 * to find out whether this is a miss or va belongs to an address
953 * space area found there.
954 */
955
956 /* First, search the leaf node itself. */
957 for (i = 0; i < leaf->keys; i++) {
958 a = (as_area_t *) leaf->value[i];
959 mutex_lock(&a->lock);
960 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
961 return a;
962 }
963 mutex_unlock(&a->lock);
964 }
965
966 /*
967 * Second, locate the left neighbour and test its last record.
968 * Because of its position in the B+tree, it must have base < va.
969 */
970 if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
971 a = (as_area_t *) lnode->value[lnode->keys - 1];
972 mutex_lock(&a->lock);
973 if (va < a->base + a->pages * PAGE_SIZE) {
974 return a;
975 }
976 mutex_unlock(&a->lock);
977 }
978
979 return NULL;
980}
981
982/** Check area conflicts with other areas.
983 *
984 * The address space must be locked and interrupts must be disabled.
985 *
986 * @param as Address space.
987 * @param va Starting virtual address of the area being tested.
988 * @param size Size of the area being tested.
989 * @param avoid_area Do not touch this area.
990 *
991 * @return True if there is no conflict, false otherwise.
992 */
993bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
994{
995 as_area_t *a;
996 btree_node_t *leaf, *node;
997 int i;
998
999 /*
1000 * We don't want any area to have conflicts with NULL page.
1001 */
1002 if (overlaps(va, size, NULL, PAGE_SIZE))
1003 return false;
1004
1005 /*
1006 * The leaf node is found in O(log n), where n is proportional to
1007 * the number of address space areas belonging to as.
1008 * The check for conflicts is then attempted on the rightmost
1009 * record in the left neighbour, the leftmost record in the right
1010 * neighbour and all records in the leaf node itself.
1011 */
1012
1013 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1014 if (a != avoid_area)
1015 return false;
1016 }
1017
1018 /* First, check the two border cases. */
1019 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1020 a = (as_area_t *) node->value[node->keys - 1];
1021 mutex_lock(&a->lock);
1022 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1023 mutex_unlock(&a->lock);
1024 return false;
1025 }
1026 mutex_unlock(&a->lock);
1027 }
1028 if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
1029 a = (as_area_t *) node->value[0];
1030 mutex_lock(&a->lock);
1031 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1032 mutex_unlock(&a->lock);
1033 return false;
1034 }
1035 mutex_unlock(&a->lock);
1036 }
1037
1038 /* Second, check the leaf node. */
1039 for (i = 0; i < leaf->keys; i++) {
1040 a = (as_area_t *) leaf->value[i];
1041
1042 if (a == avoid_area)
1043 continue;
1044
1045 mutex_lock(&a->lock);
1046 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1047 mutex_unlock(&a->lock);
1048 return false;
1049 }
1050 mutex_unlock(&a->lock);
1051 }
1052
1053 /*
1054 * So far, the area does not conflict with other areas.
1055 * Check if it doesn't conflict with kernel address space.
1056 */
1057 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1058 return !overlaps(va, size,
1059 KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
1060 }
1061
1062 return true;
1063}
1064
1065/** Return size of the address space area with given base. */
1066size_t as_get_size(__address base)
1067{
1068 ipl_t ipl;
1069 as_area_t *src_area;
1070 size_t size;
1071
1072 ipl = interrupts_disable();
1073 src_area = find_area_and_lock(AS, base);
1074 if (src_area){
1075 size = src_area->pages * PAGE_SIZE;
1076 mutex_unlock(&src_area->lock);
1077 } else {
1078 size = 0;
1079 }
1080 interrupts_restore(ipl);
1081 return size;
1082}
1083
1084/** Mark portion of address space area as used.
1085 *
1086 * The address space area must be already locked.
1087 *
1088 * @param a Address space area.
1089 * @param page First page to be marked.
1090 * @param count Number of page to be marked.
1091 *
1092 * @return 0 on failure and 1 on success.
1093 */
1094int used_space_insert(as_area_t *a, __address page, count_t count)
1095{
1096 btree_node_t *leaf, *node;
1097 count_t pages;
1098 int i;
1099
1100 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1101 ASSERT(count);
1102
1103 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1104 if (pages) {
1105 /*
1106 * We hit the beginning of some used space.
1107 */
1108 return 0;
1109 }
1110
1111 if (!leaf->keys) {
1112 btree_insert(&a->used_space, page, (void *) count, leaf);
1113 return 1;
1114 }
1115
1116 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1117 if (node) {
1118 __address left_pg = node->key[node->keys - 1], right_pg = leaf->key[0];
1119 count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0];
1120
1121 /*
1122 * Examine the possibility that the interval fits
1123 * somewhere between the rightmost interval of
1124 * the left neigbour and the first interval of the leaf.
1125 */
1126
1127 if (page >= right_pg) {
1128 /* Do nothing. */
1129 } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1130 /* The interval intersects with the left interval. */
1131 return 0;
1132 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1133 /* The interval intersects with the right interval. */
1134 return 0;
1135 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1136 /* The interval can be added by merging the two already present intervals. */
1137 node->value[node->keys - 1] += count + right_cnt;
1138 btree_remove(&a->used_space, right_pg, leaf);
1139 return 1;
1140 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1141 /* The interval can be added by simply growing the left interval. */
1142 node->value[node->keys - 1] += count;
1143 return 1;
1144 } else if (page + count*PAGE_SIZE == right_pg) {
1145 /*
1146 * The interval can be addded by simply moving base of the right
1147 * interval down and increasing its size accordingly.
1148 */
1149 leaf->value[0] += count;
1150 leaf->key[0] = page;
1151 return 1;
1152 } else {
1153 /*
1154 * The interval is between both neigbouring intervals,
1155 * but cannot be merged with any of them.
1156 */
1157 btree_insert(&a->used_space, page, (void *) count, leaf);
1158 return 1;
1159 }
1160 } else if (page < leaf->key[0]) {
1161 __address right_pg = leaf->key[0];
1162 count_t right_cnt = (count_t) leaf->value[0];
1163
1164 /*
1165 * Investigate the border case in which the left neighbour does not
1166 * exist but the interval fits from the left.
1167 */
1168
1169 if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1170 /* The interval intersects with the right interval. */
1171 return 0;
1172 } else if (page + count*PAGE_SIZE == right_pg) {
1173 /*
1174 * The interval can be added by moving the base of the right interval down
1175 * and increasing its size accordingly.
1176 */
1177 leaf->key[0] = page;
1178 leaf->value[0] += count;
1179 return 1;
1180 } else {
1181 /*
1182 * The interval doesn't adjoin with the right interval.
1183 * It must be added individually.
1184 */
1185 btree_insert(&a->used_space, page, (void *) count, leaf);
1186 return 1;
1187 }
1188 }
1189
1190 node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1191 if (node) {
1192 __address left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0];
1193 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0];
1194
1195 /*
1196 * Examine the possibility that the interval fits
1197 * somewhere between the leftmost interval of
1198 * the right neigbour and the last interval of the leaf.
1199 */
1200
1201 if (page < left_pg) {
1202 /* Do nothing. */
1203 } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1204 /* The interval intersects with the left interval. */
1205 return 0;
1206 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1207 /* The interval intersects with the right interval. */
1208 return 0;
1209 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1210 /* The interval can be added by merging the two already present intervals. */
1211 leaf->value[leaf->keys - 1] += count + right_cnt;
1212 btree_remove(&a->used_space, right_pg, node);
1213 return 1;
1214 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1215 /* The interval can be added by simply growing the left interval. */
1216 leaf->value[leaf->keys - 1] += count;
1217 return 1;
1218 } else if (page + count*PAGE_SIZE == right_pg) {
1219 /*
1220 * The interval can be addded by simply moving base of the right
1221 * interval down and increasing its size accordingly.
1222 */
1223 node->value[0] += count;
1224 node->key[0] = page;
1225 return 1;
1226 } else {
1227 /*
1228 * The interval is between both neigbouring intervals,
1229 * but cannot be merged with any of them.
1230 */
1231 btree_insert(&a->used_space, page, (void *) count, leaf);
1232 return 1;
1233 }
1234 } else if (page >= leaf->key[leaf->keys - 1]) {
1235 __address left_pg = leaf->key[leaf->keys - 1];
1236 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1237
1238 /*
1239 * Investigate the border case in which the right neighbour does not
1240 * exist but the interval fits from the right.
1241 */
1242
1243 if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1244 /* The interval intersects with the left interval. */
1245 return 0;
1246 } else if (left_pg + left_cnt*PAGE_SIZE == page) {
1247 /* The interval can be added by growing the left interval. */
1248 leaf->value[leaf->keys - 1] += count;
1249 return 1;
1250 } else {
1251 /*
1252 * The interval doesn't adjoin with the left interval.
1253 * It must be added individually.
1254 */
1255 btree_insert(&a->used_space, page, (void *) count, leaf);
1256 return 1;
1257 }
1258 }
1259
1260 /*
1261 * Note that if the algorithm made it thus far, the interval can fit only
1262 * between two other intervals of the leaf. The two border cases were already
1263 * resolved.
1264 */
1265 for (i = 1; i < leaf->keys; i++) {
1266 if (page < leaf->key[i]) {
1267 __address left_pg = leaf->key[i - 1], right_pg = leaf->key[i];
1268 count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i];
1269
1270 /*
1271 * The interval fits between left_pg and right_pg.
1272 */
1273
1274 if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1275 /* The interval intersects with the left interval. */
1276 return 0;
1277 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1278 /* The interval intersects with the right interval. */
1279 return 0;
1280 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1281 /* The interval can be added by merging the two already present intervals. */
1282 leaf->value[i - 1] += count + right_cnt;
1283 btree_remove(&a->used_space, right_pg, leaf);
1284 return 1;
1285 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1286 /* The interval can be added by simply growing the left interval. */
1287 leaf->value[i - 1] += count;
1288 return 1;
1289 } else if (page + count*PAGE_SIZE == right_pg) {
1290 /*
1291 * The interval can be addded by simply moving base of the right
1292 * interval down and increasing its size accordingly.
1293 */
1294 leaf->value[i] += count;
1295 leaf->key[i] = page;
1296 return 1;
1297 } else {
1298 /*
1299 * The interval is between both neigbouring intervals,
1300 * but cannot be merged with any of them.
1301 */
1302 btree_insert(&a->used_space, page, (void *) count, leaf);
1303 return 1;
1304 }
1305 }
1306 }
1307
1308 panic("Inconsistency detected while adding %d pages of used space at %P.\n", count, page);
1309}
1310
1311/** Mark portion of address space area as unused.
1312 *
1313 * The address space area must be already locked.
1314 *
1315 * @param a Address space area.
1316 * @param page First page to be marked.
1317 * @param count Number of page to be marked.
1318 *
1319 * @return 0 on failure and 1 on success.
1320 */
1321int used_space_remove(as_area_t *a, __address page, count_t count)
1322{
1323 btree_node_t *leaf, *node;
1324 count_t pages;
1325 int i;
1326
1327 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1328 ASSERT(count);
1329
1330 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1331 if (pages) {
1332 /*
1333 * We are lucky, page is the beginning of some interval.
1334 */
1335 if (count > pages) {
1336 return 0;
1337 } else if (count == pages) {
1338 btree_remove(&a->used_space, page, leaf);
1339 return 1;
1340 } else {
1341 /*
1342 * Find the respective interval.
1343 * Decrease its size and relocate its start address.
1344 */
1345 for (i = 0; i < leaf->keys; i++) {
1346 if (leaf->key[i] == page) {
1347 leaf->key[i] += count*PAGE_SIZE;
1348 leaf->value[i] -= count;
1349 return 1;
1350 }
1351 }
1352 goto error;
1353 }
1354 }
1355
1356 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1357 if (node && page < leaf->key[0]) {
1358 __address left_pg = node->key[node->keys - 1];
1359 count_t left_cnt = (count_t) node->value[node->keys - 1];
1360
1361 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1362 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1363 /*
1364 * The interval is contained in the rightmost interval
1365 * of the left neighbour and can be removed by
1366 * updating the size of the bigger interval.
1367 */
1368 node->value[node->keys - 1] -= count;
1369 return 1;
1370 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1371 count_t new_cnt;
1372
1373 /*
1374 * The interval is contained in the rightmost interval
1375 * of the left neighbour but its removal requires
1376 * both updating the size of the original interval and
1377 * also inserting a new interval.
1378 */
1379 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1380 node->value[node->keys - 1] -= count + new_cnt;
1381 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1382 return 1;
1383 }
1384 }
1385 return 0;
1386 } else if (page < leaf->key[0]) {
1387 return 0;
1388 }
1389
1390 if (page > leaf->key[leaf->keys - 1]) {
1391 __address left_pg = leaf->key[leaf->keys - 1];
1392 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1393
1394 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1395 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1396 /*
1397 * The interval is contained in the rightmost interval
1398 * of the leaf and can be removed by updating the size
1399 * of the bigger interval.
1400 */
1401 leaf->value[leaf->keys - 1] -= count;
1402 return 1;
1403 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1404 count_t new_cnt;
1405
1406 /*
1407 * The interval is contained in the rightmost interval
1408 * of the leaf but its removal requires both updating
1409 * the size of the original interval and
1410 * also inserting a new interval.
1411 */
1412 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1413 leaf->value[leaf->keys - 1] -= count + new_cnt;
1414 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1415 return 1;
1416 }
1417 }
1418 return 0;
1419 }
1420
1421 /*
1422 * The border cases have been already resolved.
1423 * Now the interval can be only between intervals of the leaf.
1424 */
1425 for (i = 1; i < leaf->keys - 1; i++) {
1426 if (page < leaf->key[i]) {
1427 __address left_pg = leaf->key[i - 1];
1428 count_t left_cnt = (count_t) leaf->value[i - 1];
1429
1430 /*
1431 * Now the interval is between intervals corresponding to (i - 1) and i.
1432 */
1433 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1434 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1435 /*
1436 * The interval is contained in the interval (i - 1)
1437 * of the leaf and can be removed by updating the size
1438 * of the bigger interval.
1439 */
1440 leaf->value[i - 1] -= count;
1441 return 1;
1442 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1443 count_t new_cnt;
1444
1445 /*
1446 * The interval is contained in the interval (i - 1)
1447 * of the leaf but its removal requires both updating
1448 * the size of the original interval and
1449 * also inserting a new interval.
1450 */
1451 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1452 leaf->value[i - 1] -= count + new_cnt;
1453 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1454 return 1;
1455 }
1456 }
1457 return 0;
1458 }
1459 }
1460
1461error:
1462 panic("Inconsistency detected while removing %d pages of used space from %P.\n", count, page);
1463}
1464
1465/** Remove reference to address space area share info.
1466 *
1467 * If the reference count drops to 0, the sh_info is deallocated.
1468 *
1469 * @param sh_info Pointer to address space area share info.
1470 */
1471void sh_info_remove_reference(share_info_t *sh_info)
1472{
1473 bool dealloc = false;
1474
1475 mutex_lock(&sh_info->lock);
1476 ASSERT(sh_info->refcount);
1477 if (--sh_info->refcount == 0) {
1478 dealloc = true;
1479 bool cond;
1480
1481 /*
1482 * Now walk carefully the pagemap B+tree and free/remove
1483 * reference from all frames found there.
1484 */
1485 for (cond = true; cond;) {
1486 btree_node_t *node;
1487
1488 ASSERT(!list_empty(&sh_info->pagemap.leaf_head));
1489 node = list_get_instance(sh_info->pagemap.leaf_head.next, btree_node_t, leaf_link);
1490 if ((cond = node->keys)) {
1491 frame_free(ADDR2PFN((__address) node->value[0]));
1492 btree_remove(&sh_info->pagemap, node->key[0], node);
1493 }
1494 }
1495
1496 }
1497 mutex_unlock(&sh_info->lock);
1498
1499 if (dealloc) {
1500 btree_destroy(&sh_info->pagemap);
1501 free(sh_info);
1502 }
1503}
1504
1505/*
1506 * Address space related syscalls.
1507 */
1508
1509/** Wrapper for as_area_create(). */
1510__native sys_as_area_create(__address address, size_t size, int flags)
1511{
1512 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
1513 return (__native) address;
1514 else
1515 return (__native) -1;
1516}
1517
1518/** Wrapper for as_area_resize. */
1519__native sys_as_area_resize(__address address, size_t size, int flags)
1520{
1521 return (__native) as_area_resize(AS, address, size, 0);
1522}
1523
1524/** Wrapper for as_area_destroy. */
1525__native sys_as_area_destroy(__address address)
1526{
1527 return (__native) as_area_destroy(AS, address);
1528}
Note: See TracBrowser for help on using the repository browser.