source: mainline/generic/src/mm/as.c@ c74804f

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since c74804f was 31e8ddd, checked in by Jakub Jermar <jakub@…>, 19 years ago

task_destroy() implementation, fixes in as_destroy() and task_kill().
This is the first version of HelenOS that would perform complete cleanup leading from thread to destruction of address space.

  • Property mode set to 100644
File size: 40.9 KB
Line 
1/*
2 * Copyright (C) 2001-2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/**
30 * @file as.c
31 * @brief Address space related functions.
32 *
33 * This file contains address space manipulation functions.
34 * Roughly speaking, this is a higher-level client of
35 * Virtual Address Translation (VAT) subsystem.
36 *
37 * Functionality provided by this file allows one to
38 * create address space and create, resize and share
39 * address space areas.
40 *
41 * @see page.c
42 *
43 */
44
45#include <mm/as.h>
46#include <arch/mm/as.h>
47#include <mm/page.h>
48#include <mm/frame.h>
49#include <mm/slab.h>
50#include <mm/tlb.h>
51#include <arch/mm/page.h>
52#include <genarch/mm/page_pt.h>
53#include <genarch/mm/page_ht.h>
54#include <mm/asid.h>
55#include <arch/mm/asid.h>
56#include <synch/spinlock.h>
57#include <synch/mutex.h>
58#include <adt/list.h>
59#include <adt/btree.h>
60#include <proc/task.h>
61#include <proc/thread.h>
62#include <arch/asm.h>
63#include <panic.h>
64#include <debug.h>
65#include <print.h>
66#include <memstr.h>
67#include <macros.h>
68#include <arch.h>
69#include <errno.h>
70#include <config.h>
71#include <align.h>
72#include <arch/types.h>
73#include <typedefs.h>
74#include <syscall/copy.h>
75#include <arch/interrupt.h>
76
77as_operations_t *as_operations = NULL;
78
79/** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */
80SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
81
82/**
83 * This list contains address spaces that are not active on any
84 * processor and that have valid ASID.
85 */
86LIST_INITIALIZE(inactive_as_with_asid_head);
87
88/** Kernel address space. */
89as_t *AS_KERNEL = NULL;
90
91static int area_flags_to_page_flags(int aflags);
92static as_area_t *find_area_and_lock(as_t *as, __address va);
93static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
94static void sh_info_remove_reference(share_info_t *sh_info);
95
96/** Initialize address space subsystem. */
97void as_init(void)
98{
99 as_arch_init();
100 AS_KERNEL = as_create(FLAG_AS_KERNEL);
101 if (!AS_KERNEL)
102 panic("can't create kernel address space\n");
103
104}
105
106/** Create address space.
107 *
108 * @param flags Flags that influence way in wich the address space is created.
109 */
110as_t *as_create(int flags)
111{
112 as_t *as;
113
114 as = (as_t *) malloc(sizeof(as_t), 0);
115 link_initialize(&as->inactive_as_with_asid_link);
116 mutex_initialize(&as->lock);
117 btree_create(&as->as_area_btree);
118
119 if (flags & FLAG_AS_KERNEL)
120 as->asid = ASID_KERNEL;
121 else
122 as->asid = ASID_INVALID;
123
124 as->refcount = 0;
125 as->cpu_refcount = 0;
126 as->page_table = page_table_create(flags);
127
128 return as;
129}
130
131/** Destroy adress space.
132 *
133 * When there are no tasks referencing this address space (i.e. its refcount is zero),
134 * the address space can be destroyed.
135 */
136void as_destroy(as_t *as)
137{
138 ipl_t ipl;
139 link_t *cur;
140
141 ASSERT(as->refcount == 0);
142
143 /*
144 * Since there is no reference to this area,
145 * it is safe not to lock its mutex.
146 */
147
148 ipl = interrupts_disable();
149 spinlock_lock(&inactive_as_with_asid_lock);
150
151 if (as->asid != ASID_INVALID && as != AS_KERNEL) {
152 if (!as->cpu_refcount)
153 list_remove(&as->inactive_as_with_asid_link);
154 asid_put(as->asid);
155 }
156 spinlock_unlock(&inactive_as_with_asid_lock);
157
158 /*
159 * Destroy address space areas of the address space.
160 */
161 for (cur = as->as_area_btree.leaf_head.next; cur != &as->as_area_btree.leaf_head; cur = cur->next) {
162 btree_node_t *node;
163 int i;
164
165 node = list_get_instance(cur, btree_node_t, leaf_link);
166 for (i = 0; i < node->keys; i++)
167 as_area_destroy(as, node->key[i]);
168 }
169
170 btree_destroy(&as->as_area_btree);
171 page_table_destroy(as->page_table);
172
173 interrupts_restore(ipl);
174
175 free(as);
176}
177
178/** Create address space area of common attributes.
179 *
180 * The created address space area is added to the target address space.
181 *
182 * @param as Target address space.
183 * @param flags Flags of the area memory.
184 * @param size Size of area.
185 * @param base Base address of area.
186 * @param attrs Attributes of the area.
187 * @param backend Address space area backend. NULL if no backend is used.
188 * @param backend_data NULL or a pointer to an array holding two void *.
189 *
190 * @return Address space area on success or NULL on failure.
191 */
192as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs,
193 mem_backend_t *backend, mem_backend_data_t *backend_data)
194{
195 ipl_t ipl;
196 as_area_t *a;
197
198 if (base % PAGE_SIZE)
199 return NULL;
200
201 if (!size)
202 return NULL;
203
204 /* Writeable executable areas are not supported. */
205 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
206 return NULL;
207
208 ipl = interrupts_disable();
209 mutex_lock(&as->lock);
210
211 if (!check_area_conflicts(as, base, size, NULL)) {
212 mutex_unlock(&as->lock);
213 interrupts_restore(ipl);
214 return NULL;
215 }
216
217 a = (as_area_t *) malloc(sizeof(as_area_t), 0);
218
219 mutex_initialize(&a->lock);
220
221 a->as = as;
222 a->flags = flags;
223 a->attributes = attrs;
224 a->pages = SIZE2FRAMES(size);
225 a->base = base;
226 a->sh_info = NULL;
227 a->backend = backend;
228 if (backend_data)
229 a->backend_data = *backend_data;
230 else
231 memsetb((__address) &a->backend_data, sizeof(a->backend_data), 0);
232
233 btree_create(&a->used_space);
234
235 btree_insert(&as->as_area_btree, base, (void *) a, NULL);
236
237 mutex_unlock(&as->lock);
238 interrupts_restore(ipl);
239
240 return a;
241}
242
243/** Find address space area and change it.
244 *
245 * @param as Address space.
246 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
247 * @param size New size of the virtual memory block starting at address.
248 * @param flags Flags influencing the remap operation. Currently unused.
249 *
250 * @return Zero on success or a value from @ref errno.h otherwise.
251 */
252int as_area_resize(as_t *as, __address address, size_t size, int flags)
253{
254 as_area_t *area;
255 ipl_t ipl;
256 size_t pages;
257
258 ipl = interrupts_disable();
259 mutex_lock(&as->lock);
260
261 /*
262 * Locate the area.
263 */
264 area = find_area_and_lock(as, address);
265 if (!area) {
266 mutex_unlock(&as->lock);
267 interrupts_restore(ipl);
268 return ENOENT;
269 }
270
271 if (area->backend == &phys_backend) {
272 /*
273 * Remapping of address space areas associated
274 * with memory mapped devices is not supported.
275 */
276 mutex_unlock(&area->lock);
277 mutex_unlock(&as->lock);
278 interrupts_restore(ipl);
279 return ENOTSUP;
280 }
281 if (area->sh_info) {
282 /*
283 * Remapping of shared address space areas
284 * is not supported.
285 */
286 mutex_unlock(&area->lock);
287 mutex_unlock(&as->lock);
288 interrupts_restore(ipl);
289 return ENOTSUP;
290 }
291
292 pages = SIZE2FRAMES((address - area->base) + size);
293 if (!pages) {
294 /*
295 * Zero size address space areas are not allowed.
296 */
297 mutex_unlock(&area->lock);
298 mutex_unlock(&as->lock);
299 interrupts_restore(ipl);
300 return EPERM;
301 }
302
303 if (pages < area->pages) {
304 bool cond;
305 __address start_free = area->base + pages*PAGE_SIZE;
306
307 /*
308 * Shrinking the area.
309 * No need to check for overlaps.
310 */
311
312 /*
313 * Start TLB shootdown sequence.
314 */
315 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
316
317 /*
318 * Remove frames belonging to used space starting from
319 * the highest addresses downwards until an overlap with
320 * the resized address space area is found. Note that this
321 * is also the right way to remove part of the used_space
322 * B+tree leaf list.
323 */
324 for (cond = true; cond;) {
325 btree_node_t *node;
326
327 ASSERT(!list_empty(&area->used_space.leaf_head));
328 node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link);
329 if ((cond = (bool) node->keys)) {
330 __address b = node->key[node->keys - 1];
331 count_t c = (count_t) node->value[node->keys - 1];
332 int i = 0;
333
334 if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) {
335
336 if (b + c*PAGE_SIZE <= start_free) {
337 /*
338 * The whole interval fits completely
339 * in the resized address space area.
340 */
341 break;
342 }
343
344 /*
345 * Part of the interval corresponding to b and c
346 * overlaps with the resized address space area.
347 */
348
349 cond = false; /* we are almost done */
350 i = (start_free - b) >> PAGE_WIDTH;
351 if (!used_space_remove(area, start_free, c - i))
352 panic("Could not remove used space.");
353 } else {
354 /*
355 * The interval of used space can be completely removed.
356 */
357 if (!used_space_remove(area, b, c))
358 panic("Could not remove used space.\n");
359 }
360
361 for (; i < c; i++) {
362 pte_t *pte;
363
364 page_table_lock(as, false);
365 pte = page_mapping_find(as, b + i*PAGE_SIZE);
366 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
367 if (area->backend && area->backend->frame_free) {
368 area->backend->frame_free(area,
369 b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
370 }
371 page_mapping_remove(as, b + i*PAGE_SIZE);
372 page_table_unlock(as, false);
373 }
374 }
375 }
376
377 /*
378 * Finish TLB shootdown sequence.
379 */
380 tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
381 tlb_shootdown_finalize();
382 } else {
383 /*
384 * Growing the area.
385 * Check for overlaps with other address space areas.
386 */
387 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
388 mutex_unlock(&area->lock);
389 mutex_unlock(&as->lock);
390 interrupts_restore(ipl);
391 return EADDRNOTAVAIL;
392 }
393 }
394
395 area->pages = pages;
396
397 mutex_unlock(&area->lock);
398 mutex_unlock(&as->lock);
399 interrupts_restore(ipl);
400
401 return 0;
402}
403
404/** Destroy address space area.
405 *
406 * @param as Address space.
407 * @param address Address withing the area to be deleted.
408 *
409 * @return Zero on success or a value from @ref errno.h on failure.
410 */
411int as_area_destroy(as_t *as, __address address)
412{
413 as_area_t *area;
414 __address base;
415 link_t *cur;
416 ipl_t ipl;
417
418 ipl = interrupts_disable();
419 mutex_lock(&as->lock);
420
421 area = find_area_and_lock(as, address);
422 if (!area) {
423 mutex_unlock(&as->lock);
424 interrupts_restore(ipl);
425 return ENOENT;
426 }
427
428 base = area->base;
429
430 /*
431 * Start TLB shootdown sequence.
432 */
433 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base, area->pages);
434
435 /*
436 * Visit only the pages mapped by used_space B+tree.
437 */
438 for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) {
439 btree_node_t *node;
440 int i;
441
442 node = list_get_instance(cur, btree_node_t, leaf_link);
443 for (i = 0; i < node->keys; i++) {
444 __address b = node->key[i];
445 count_t j;
446 pte_t *pte;
447
448 for (j = 0; j < (count_t) node->value[i]; j++) {
449 page_table_lock(as, false);
450 pte = page_mapping_find(as, b + j*PAGE_SIZE);
451 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
452 if (area->backend && area->backend->frame_free) {
453 area->backend->frame_free(area,
454 b + j*PAGE_SIZE, PTE_GET_FRAME(pte));
455 }
456 page_mapping_remove(as, b + j*PAGE_SIZE);
457 page_table_unlock(as, false);
458 }
459 }
460 }
461
462 /*
463 * Finish TLB shootdown sequence.
464 */
465 tlb_invalidate_pages(AS->asid, area->base, area->pages);
466 tlb_shootdown_finalize();
467
468 btree_destroy(&area->used_space);
469
470 area->attributes |= AS_AREA_ATTR_PARTIAL;
471
472 if (area->sh_info)
473 sh_info_remove_reference(area->sh_info);
474
475 mutex_unlock(&area->lock);
476
477 /*
478 * Remove the empty area from address space.
479 */
480 btree_remove(&AS->as_area_btree, base, NULL);
481
482 free(area);
483
484 mutex_unlock(&AS->lock);
485 interrupts_restore(ipl);
486 return 0;
487}
488
489/** Share address space area with another or the same address space.
490 *
491 * Address space area mapping is shared with a new address space area.
492 * If the source address space area has not been shared so far,
493 * a new sh_info is created. The new address space area simply gets the
494 * sh_info of the source area. The process of duplicating the
495 * mapping is done through the backend share function.
496 *
497 * @param src_as Pointer to source address space.
498 * @param src_base Base address of the source address space area.
499 * @param acc_size Expected size of the source area.
500 * @param dst_as Pointer to destination address space.
501 * @param dst_base Target base address.
502 * @param dst_flags_mask Destination address space area flags mask.
503 *
504 * @return Zero on success or ENOENT if there is no such task or
505 * if there is no such address space area,
506 * EPERM if there was a problem in accepting the area or
507 * ENOMEM if there was a problem in allocating destination
508 * address space area. ENOTSUP is returned if an attempt
509 * to share non-anonymous address space area is detected.
510 */
511int as_area_share(as_t *src_as, __address src_base, size_t acc_size,
512 as_t *dst_as, __address dst_base, int dst_flags_mask)
513{
514 ipl_t ipl;
515 int src_flags;
516 size_t src_size;
517 as_area_t *src_area, *dst_area;
518 share_info_t *sh_info;
519 mem_backend_t *src_backend;
520 mem_backend_data_t src_backend_data;
521
522 ipl = interrupts_disable();
523 mutex_lock(&src_as->lock);
524 src_area = find_area_and_lock(src_as, src_base);
525 if (!src_area) {
526 /*
527 * Could not find the source address space area.
528 */
529 mutex_unlock(&src_as->lock);
530 interrupts_restore(ipl);
531 return ENOENT;
532 }
533
534 if (!src_area->backend || !src_area->backend->share) {
535 /*
536 * There is now backend or the backend does not
537 * know how to share the area.
538 */
539 mutex_unlock(&src_area->lock);
540 mutex_unlock(&src_as->lock);
541 interrupts_restore(ipl);
542 return ENOTSUP;
543 }
544
545 src_size = src_area->pages * PAGE_SIZE;
546 src_flags = src_area->flags;
547 src_backend = src_area->backend;
548 src_backend_data = src_area->backend_data;
549
550 /* Share the cacheable flag from the original mapping */
551 if (src_flags & AS_AREA_CACHEABLE)
552 dst_flags_mask |= AS_AREA_CACHEABLE;
553
554 if (src_size != acc_size || (src_flags & dst_flags_mask) != dst_flags_mask) {
555 mutex_unlock(&src_area->lock);
556 mutex_unlock(&src_as->lock);
557 interrupts_restore(ipl);
558 return EPERM;
559 }
560
561 /*
562 * Now we are committed to sharing the area.
563 * First prepare the area for sharing.
564 * Then it will be safe to unlock it.
565 */
566 sh_info = src_area->sh_info;
567 if (!sh_info) {
568 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
569 mutex_initialize(&sh_info->lock);
570 sh_info->refcount = 2;
571 btree_create(&sh_info->pagemap);
572 src_area->sh_info = sh_info;
573 } else {
574 mutex_lock(&sh_info->lock);
575 sh_info->refcount++;
576 mutex_unlock(&sh_info->lock);
577 }
578
579 src_area->backend->share(src_area);
580
581 mutex_unlock(&src_area->lock);
582 mutex_unlock(&src_as->lock);
583
584 /*
585 * Create copy of the source address space area.
586 * The destination area is created with AS_AREA_ATTR_PARTIAL
587 * attribute set which prevents race condition with
588 * preliminary as_page_fault() calls.
589 * The flags of the source area are masked against dst_flags_mask
590 * to support sharing in less privileged mode.
591 */
592 dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
593 AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
594 if (!dst_area) {
595 /*
596 * Destination address space area could not be created.
597 */
598 sh_info_remove_reference(sh_info);
599
600 interrupts_restore(ipl);
601 return ENOMEM;
602 }
603
604 /*
605 * Now the destination address space area has been
606 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
607 * attribute and set the sh_info.
608 */
609 mutex_lock(&dst_area->lock);
610 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
611 dst_area->sh_info = sh_info;
612 mutex_unlock(&dst_area->lock);
613
614 interrupts_restore(ipl);
615
616 return 0;
617}
618
619/** Check access mode for address space area.
620 *
621 * The address space area must be locked prior to this call.
622 *
623 * @param area Address space area.
624 * @param access Access mode.
625 *
626 * @return False if access violates area's permissions, true otherwise.
627 */
628bool as_area_check_access(as_area_t *area, pf_access_t access)
629{
630 int flagmap[] = {
631 [PF_ACCESS_READ] = AS_AREA_READ,
632 [PF_ACCESS_WRITE] = AS_AREA_WRITE,
633 [PF_ACCESS_EXEC] = AS_AREA_EXEC
634 };
635
636 if (!(area->flags & flagmap[access]))
637 return false;
638
639 return true;
640}
641
642/** Handle page fault within the current address space.
643 *
644 * This is the high-level page fault handler. It decides
645 * whether the page fault can be resolved by any backend
646 * and if so, it invokes the backend to resolve the page
647 * fault.
648 *
649 * Interrupts are assumed disabled.
650 *
651 * @param page Faulting page.
652 * @param access Access mode that caused the fault (i.e. read/write/exec).
653 * @param istate Pointer to interrupted state.
654 *
655 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
656 * fault was caused by copy_to_uspace() or copy_from_uspace().
657 */
658int as_page_fault(__address page, pf_access_t access, istate_t *istate)
659{
660 pte_t *pte;
661 as_area_t *area;
662
663 if (!THREAD)
664 return AS_PF_FAULT;
665
666 ASSERT(AS);
667
668 mutex_lock(&AS->lock);
669 area = find_area_and_lock(AS, page);
670 if (!area) {
671 /*
672 * No area contained mapping for 'page'.
673 * Signal page fault to low-level handler.
674 */
675 mutex_unlock(&AS->lock);
676 goto page_fault;
677 }
678
679 if (area->attributes & AS_AREA_ATTR_PARTIAL) {
680 /*
681 * The address space area is not fully initialized.
682 * Avoid possible race by returning error.
683 */
684 mutex_unlock(&area->lock);
685 mutex_unlock(&AS->lock);
686 goto page_fault;
687 }
688
689 if (!area->backend || !area->backend->page_fault) {
690 /*
691 * The address space area is not backed by any backend
692 * or the backend cannot handle page faults.
693 */
694 mutex_unlock(&area->lock);
695 mutex_unlock(&AS->lock);
696 goto page_fault;
697 }
698
699 page_table_lock(AS, false);
700
701 /*
702 * To avoid race condition between two page faults
703 * on the same address, we need to make sure
704 * the mapping has not been already inserted.
705 */
706 if ((pte = page_mapping_find(AS, page))) {
707 if (PTE_PRESENT(pte)) {
708 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
709 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
710 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
711 page_table_unlock(AS, false);
712 mutex_unlock(&area->lock);
713 mutex_unlock(&AS->lock);
714 return AS_PF_OK;
715 }
716 }
717 }
718
719 /*
720 * Resort to the backend page fault handler.
721 */
722 if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
723 page_table_unlock(AS, false);
724 mutex_unlock(&area->lock);
725 mutex_unlock(&AS->lock);
726 goto page_fault;
727 }
728
729 page_table_unlock(AS, false);
730 mutex_unlock(&area->lock);
731 mutex_unlock(&AS->lock);
732 return AS_PF_OK;
733
734page_fault:
735 if (THREAD->in_copy_from_uspace) {
736 THREAD->in_copy_from_uspace = false;
737 istate_set_retaddr(istate, (__address) &memcpy_from_uspace_failover_address);
738 } else if (THREAD->in_copy_to_uspace) {
739 THREAD->in_copy_to_uspace = false;
740 istate_set_retaddr(istate, (__address) &memcpy_to_uspace_failover_address);
741 } else {
742 return AS_PF_FAULT;
743 }
744
745 return AS_PF_DEFER;
746}
747
748/** Switch address spaces.
749 *
750 * Note that this function cannot sleep as it is essentially a part of
751 * scheduling. Sleeping here would lead to deadlock on wakeup.
752 *
753 * @param old Old address space or NULL.
754 * @param new New address space.
755 */
756void as_switch(as_t *old, as_t *new)
757{
758 ipl_t ipl;
759 bool needs_asid = false;
760
761 ipl = interrupts_disable();
762 spinlock_lock(&inactive_as_with_asid_lock);
763
764 /*
765 * First, take care of the old address space.
766 */
767 if (old) {
768 mutex_lock_active(&old->lock);
769 ASSERT(old->cpu_refcount);
770 if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) {
771 /*
772 * The old address space is no longer active on
773 * any processor. It can be appended to the
774 * list of inactive address spaces with assigned
775 * ASID.
776 */
777 ASSERT(old->asid != ASID_INVALID);
778 list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
779 }
780 mutex_unlock(&old->lock);
781 }
782
783 /*
784 * Second, prepare the new address space.
785 */
786 mutex_lock_active(&new->lock);
787 if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) {
788 if (new->asid != ASID_INVALID)
789 list_remove(&new->inactive_as_with_asid_link);
790 else
791 needs_asid = true; /* defer call to asid_get() until new->lock is released */
792 }
793 SET_PTL0_ADDRESS(new->page_table);
794 mutex_unlock(&new->lock);
795
796 if (needs_asid) {
797 /*
798 * Allocation of new ASID was deferred
799 * until now in order to avoid deadlock.
800 */
801 asid_t asid;
802
803 asid = asid_get();
804 mutex_lock_active(&new->lock);
805 new->asid = asid;
806 mutex_unlock(&new->lock);
807 }
808 spinlock_unlock(&inactive_as_with_asid_lock);
809 interrupts_restore(ipl);
810
811 /*
812 * Perform architecture-specific steps.
813 * (e.g. write ASID to hardware register etc.)
814 */
815 as_install_arch(new);
816
817 AS = new;
818}
819
820/** Convert address space area flags to page flags.
821 *
822 * @param aflags Flags of some address space area.
823 *
824 * @return Flags to be passed to page_mapping_insert().
825 */
826int area_flags_to_page_flags(int aflags)
827{
828 int flags;
829
830 flags = PAGE_USER | PAGE_PRESENT;
831
832 if (aflags & AS_AREA_READ)
833 flags |= PAGE_READ;
834
835 if (aflags & AS_AREA_WRITE)
836 flags |= PAGE_WRITE;
837
838 if (aflags & AS_AREA_EXEC)
839 flags |= PAGE_EXEC;
840
841 if (aflags & AS_AREA_CACHEABLE)
842 flags |= PAGE_CACHEABLE;
843
844 return flags;
845}
846
847/** Compute flags for virtual address translation subsytem.
848 *
849 * The address space area must be locked.
850 * Interrupts must be disabled.
851 *
852 * @param a Address space area.
853 *
854 * @return Flags to be used in page_mapping_insert().
855 */
856int as_area_get_flags(as_area_t *a)
857{
858 return area_flags_to_page_flags(a->flags);
859}
860
861/** Create page table.
862 *
863 * Depending on architecture, create either address space
864 * private or global page table.
865 *
866 * @param flags Flags saying whether the page table is for kernel address space.
867 *
868 * @return First entry of the page table.
869 */
870pte_t *page_table_create(int flags)
871{
872 ASSERT(as_operations);
873 ASSERT(as_operations->page_table_create);
874
875 return as_operations->page_table_create(flags);
876}
877
878/** Destroy page table.
879 *
880 * Destroy page table in architecture specific way.
881 *
882 * @param page_table Physical address of PTL0.
883 */
884void page_table_destroy(pte_t *page_table)
885{
886 ASSERT(as_operations);
887 ASSERT(as_operations->page_table_destroy);
888
889 as_operations->page_table_destroy(page_table);
890}
891
892/** Lock page table.
893 *
894 * This function should be called before any page_mapping_insert(),
895 * page_mapping_remove() and page_mapping_find().
896 *
897 * Locking order is such that address space areas must be locked
898 * prior to this call. Address space can be locked prior to this
899 * call in which case the lock argument is false.
900 *
901 * @param as Address space.
902 * @param lock If false, do not attempt to lock as->lock.
903 */
904void page_table_lock(as_t *as, bool lock)
905{
906 ASSERT(as_operations);
907 ASSERT(as_operations->page_table_lock);
908
909 as_operations->page_table_lock(as, lock);
910}
911
912/** Unlock page table.
913 *
914 * @param as Address space.
915 * @param unlock If false, do not attempt to unlock as->lock.
916 */
917void page_table_unlock(as_t *as, bool unlock)
918{
919 ASSERT(as_operations);
920 ASSERT(as_operations->page_table_unlock);
921
922 as_operations->page_table_unlock(as, unlock);
923}
924
925
926/** Find address space area and lock it.
927 *
928 * The address space must be locked and interrupts must be disabled.
929 *
930 * @param as Address space.
931 * @param va Virtual address.
932 *
933 * @return Locked address space area containing va on success or NULL on failure.
934 */
935as_area_t *find_area_and_lock(as_t *as, __address va)
936{
937 as_area_t *a;
938 btree_node_t *leaf, *lnode;
939 int i;
940
941 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
942 if (a) {
943 /* va is the base address of an address space area */
944 mutex_lock(&a->lock);
945 return a;
946 }
947
948 /*
949 * Search the leaf node and the righmost record of its left neighbour
950 * to find out whether this is a miss or va belongs to an address
951 * space area found there.
952 */
953
954 /* First, search the leaf node itself. */
955 for (i = 0; i < leaf->keys; i++) {
956 a = (as_area_t *) leaf->value[i];
957 mutex_lock(&a->lock);
958 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
959 return a;
960 }
961 mutex_unlock(&a->lock);
962 }
963
964 /*
965 * Second, locate the left neighbour and test its last record.
966 * Because of its position in the B+tree, it must have base < va.
967 */
968 if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
969 a = (as_area_t *) lnode->value[lnode->keys - 1];
970 mutex_lock(&a->lock);
971 if (va < a->base + a->pages * PAGE_SIZE) {
972 return a;
973 }
974 mutex_unlock(&a->lock);
975 }
976
977 return NULL;
978}
979
980/** Check area conflicts with other areas.
981 *
982 * The address space must be locked and interrupts must be disabled.
983 *
984 * @param as Address space.
985 * @param va Starting virtual address of the area being tested.
986 * @param size Size of the area being tested.
987 * @param avoid_area Do not touch this area.
988 *
989 * @return True if there is no conflict, false otherwise.
990 */
991bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
992{
993 as_area_t *a;
994 btree_node_t *leaf, *node;
995 int i;
996
997 /*
998 * We don't want any area to have conflicts with NULL page.
999 */
1000 if (overlaps(va, size, NULL, PAGE_SIZE))
1001 return false;
1002
1003 /*
1004 * The leaf node is found in O(log n), where n is proportional to
1005 * the number of address space areas belonging to as.
1006 * The check for conflicts is then attempted on the rightmost
1007 * record in the left neighbour, the leftmost record in the right
1008 * neighbour and all records in the leaf node itself.
1009 */
1010
1011 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1012 if (a != avoid_area)
1013 return false;
1014 }
1015
1016 /* First, check the two border cases. */
1017 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1018 a = (as_area_t *) node->value[node->keys - 1];
1019 mutex_lock(&a->lock);
1020 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1021 mutex_unlock(&a->lock);
1022 return false;
1023 }
1024 mutex_unlock(&a->lock);
1025 }
1026 if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
1027 a = (as_area_t *) node->value[0];
1028 mutex_lock(&a->lock);
1029 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1030 mutex_unlock(&a->lock);
1031 return false;
1032 }
1033 mutex_unlock(&a->lock);
1034 }
1035
1036 /* Second, check the leaf node. */
1037 for (i = 0; i < leaf->keys; i++) {
1038 a = (as_area_t *) leaf->value[i];
1039
1040 if (a == avoid_area)
1041 continue;
1042
1043 mutex_lock(&a->lock);
1044 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1045 mutex_unlock(&a->lock);
1046 return false;
1047 }
1048 mutex_unlock(&a->lock);
1049 }
1050
1051 /*
1052 * So far, the area does not conflict with other areas.
1053 * Check if it doesn't conflict with kernel address space.
1054 */
1055 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1056 return !overlaps(va, size,
1057 KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
1058 }
1059
1060 return true;
1061}
1062
1063/** Return size of the address space area with given base. */
1064size_t as_get_size(__address base)
1065{
1066 ipl_t ipl;
1067 as_area_t *src_area;
1068 size_t size;
1069
1070 ipl = interrupts_disable();
1071 src_area = find_area_and_lock(AS, base);
1072 if (src_area){
1073 size = src_area->pages * PAGE_SIZE;
1074 mutex_unlock(&src_area->lock);
1075 } else {
1076 size = 0;
1077 }
1078 interrupts_restore(ipl);
1079 return size;
1080}
1081
1082/** Mark portion of address space area as used.
1083 *
1084 * The address space area must be already locked.
1085 *
1086 * @param a Address space area.
1087 * @param page First page to be marked.
1088 * @param count Number of page to be marked.
1089 *
1090 * @return 0 on failure and 1 on success.
1091 */
1092int used_space_insert(as_area_t *a, __address page, count_t count)
1093{
1094 btree_node_t *leaf, *node;
1095 count_t pages;
1096 int i;
1097
1098 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1099 ASSERT(count);
1100
1101 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1102 if (pages) {
1103 /*
1104 * We hit the beginning of some used space.
1105 */
1106 return 0;
1107 }
1108
1109 if (!leaf->keys) {
1110 btree_insert(&a->used_space, page, (void *) count, leaf);
1111 return 1;
1112 }
1113
1114 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1115 if (node) {
1116 __address left_pg = node->key[node->keys - 1], right_pg = leaf->key[0];
1117 count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0];
1118
1119 /*
1120 * Examine the possibility that the interval fits
1121 * somewhere between the rightmost interval of
1122 * the left neigbour and the first interval of the leaf.
1123 */
1124
1125 if (page >= right_pg) {
1126 /* Do nothing. */
1127 } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1128 /* The interval intersects with the left interval. */
1129 return 0;
1130 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1131 /* The interval intersects with the right interval. */
1132 return 0;
1133 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1134 /* The interval can be added by merging the two already present intervals. */
1135 node->value[node->keys - 1] += count + right_cnt;
1136 btree_remove(&a->used_space, right_pg, leaf);
1137 return 1;
1138 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1139 /* The interval can be added by simply growing the left interval. */
1140 node->value[node->keys - 1] += count;
1141 return 1;
1142 } else if (page + count*PAGE_SIZE == right_pg) {
1143 /*
1144 * The interval can be addded by simply moving base of the right
1145 * interval down and increasing its size accordingly.
1146 */
1147 leaf->value[0] += count;
1148 leaf->key[0] = page;
1149 return 1;
1150 } else {
1151 /*
1152 * The interval is between both neigbouring intervals,
1153 * but cannot be merged with any of them.
1154 */
1155 btree_insert(&a->used_space, page, (void *) count, leaf);
1156 return 1;
1157 }
1158 } else if (page < leaf->key[0]) {
1159 __address right_pg = leaf->key[0];
1160 count_t right_cnt = (count_t) leaf->value[0];
1161
1162 /*
1163 * Investigate the border case in which the left neighbour does not
1164 * exist but the interval fits from the left.
1165 */
1166
1167 if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1168 /* The interval intersects with the right interval. */
1169 return 0;
1170 } else if (page + count*PAGE_SIZE == right_pg) {
1171 /*
1172 * The interval can be added by moving the base of the right interval down
1173 * and increasing its size accordingly.
1174 */
1175 leaf->key[0] = page;
1176 leaf->value[0] += count;
1177 return 1;
1178 } else {
1179 /*
1180 * The interval doesn't adjoin with the right interval.
1181 * It must be added individually.
1182 */
1183 btree_insert(&a->used_space, page, (void *) count, leaf);
1184 return 1;
1185 }
1186 }
1187
1188 node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1189 if (node) {
1190 __address left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0];
1191 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0];
1192
1193 /*
1194 * Examine the possibility that the interval fits
1195 * somewhere between the leftmost interval of
1196 * the right neigbour and the last interval of the leaf.
1197 */
1198
1199 if (page < left_pg) {
1200 /* Do nothing. */
1201 } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1202 /* The interval intersects with the left interval. */
1203 return 0;
1204 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1205 /* The interval intersects with the right interval. */
1206 return 0;
1207 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1208 /* The interval can be added by merging the two already present intervals. */
1209 leaf->value[leaf->keys - 1] += count + right_cnt;
1210 btree_remove(&a->used_space, right_pg, node);
1211 return 1;
1212 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1213 /* The interval can be added by simply growing the left interval. */
1214 leaf->value[leaf->keys - 1] += count;
1215 return 1;
1216 } else if (page + count*PAGE_SIZE == right_pg) {
1217 /*
1218 * The interval can be addded by simply moving base of the right
1219 * interval down and increasing its size accordingly.
1220 */
1221 node->value[0] += count;
1222 node->key[0] = page;
1223 return 1;
1224 } else {
1225 /*
1226 * The interval is between both neigbouring intervals,
1227 * but cannot be merged with any of them.
1228 */
1229 btree_insert(&a->used_space, page, (void *) count, leaf);
1230 return 1;
1231 }
1232 } else if (page >= leaf->key[leaf->keys - 1]) {
1233 __address left_pg = leaf->key[leaf->keys - 1];
1234 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1235
1236 /*
1237 * Investigate the border case in which the right neighbour does not
1238 * exist but the interval fits from the right.
1239 */
1240
1241 if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1242 /* The interval intersects with the left interval. */
1243 return 0;
1244 } else if (left_pg + left_cnt*PAGE_SIZE == page) {
1245 /* The interval can be added by growing the left interval. */
1246 leaf->value[leaf->keys - 1] += count;
1247 return 1;
1248 } else {
1249 /*
1250 * The interval doesn't adjoin with the left interval.
1251 * It must be added individually.
1252 */
1253 btree_insert(&a->used_space, page, (void *) count, leaf);
1254 return 1;
1255 }
1256 }
1257
1258 /*
1259 * Note that if the algorithm made it thus far, the interval can fit only
1260 * between two other intervals of the leaf. The two border cases were already
1261 * resolved.
1262 */
1263 for (i = 1; i < leaf->keys; i++) {
1264 if (page < leaf->key[i]) {
1265 __address left_pg = leaf->key[i - 1], right_pg = leaf->key[i];
1266 count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i];
1267
1268 /*
1269 * The interval fits between left_pg and right_pg.
1270 */
1271
1272 if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1273 /* The interval intersects with the left interval. */
1274 return 0;
1275 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1276 /* The interval intersects with the right interval. */
1277 return 0;
1278 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1279 /* The interval can be added by merging the two already present intervals. */
1280 leaf->value[i - 1] += count + right_cnt;
1281 btree_remove(&a->used_space, right_pg, leaf);
1282 return 1;
1283 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1284 /* The interval can be added by simply growing the left interval. */
1285 leaf->value[i - 1] += count;
1286 return 1;
1287 } else if (page + count*PAGE_SIZE == right_pg) {
1288 /*
1289 * The interval can be addded by simply moving base of the right
1290 * interval down and increasing its size accordingly.
1291 */
1292 leaf->value[i] += count;
1293 leaf->key[i] = page;
1294 return 1;
1295 } else {
1296 /*
1297 * The interval is between both neigbouring intervals,
1298 * but cannot be merged with any of them.
1299 */
1300 btree_insert(&a->used_space, page, (void *) count, leaf);
1301 return 1;
1302 }
1303 }
1304 }
1305
1306 panic("Inconsistency detected while adding %d pages of used space at %P.\n", count, page);
1307}
1308
1309/** Mark portion of address space area as unused.
1310 *
1311 * The address space area must be already locked.
1312 *
1313 * @param a Address space area.
1314 * @param page First page to be marked.
1315 * @param count Number of page to be marked.
1316 *
1317 * @return 0 on failure and 1 on success.
1318 */
1319int used_space_remove(as_area_t *a, __address page, count_t count)
1320{
1321 btree_node_t *leaf, *node;
1322 count_t pages;
1323 int i;
1324
1325 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1326 ASSERT(count);
1327
1328 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1329 if (pages) {
1330 /*
1331 * We are lucky, page is the beginning of some interval.
1332 */
1333 if (count > pages) {
1334 return 0;
1335 } else if (count == pages) {
1336 btree_remove(&a->used_space, page, leaf);
1337 return 1;
1338 } else {
1339 /*
1340 * Find the respective interval.
1341 * Decrease its size and relocate its start address.
1342 */
1343 for (i = 0; i < leaf->keys; i++) {
1344 if (leaf->key[i] == page) {
1345 leaf->key[i] += count*PAGE_SIZE;
1346 leaf->value[i] -= count;
1347 return 1;
1348 }
1349 }
1350 goto error;
1351 }
1352 }
1353
1354 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1355 if (node && page < leaf->key[0]) {
1356 __address left_pg = node->key[node->keys - 1];
1357 count_t left_cnt = (count_t) node->value[node->keys - 1];
1358
1359 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1360 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1361 /*
1362 * The interval is contained in the rightmost interval
1363 * of the left neighbour and can be removed by
1364 * updating the size of the bigger interval.
1365 */
1366 node->value[node->keys - 1] -= count;
1367 return 1;
1368 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1369 count_t new_cnt;
1370
1371 /*
1372 * The interval is contained in the rightmost interval
1373 * of the left neighbour but its removal requires
1374 * both updating the size of the original interval and
1375 * also inserting a new interval.
1376 */
1377 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1378 node->value[node->keys - 1] -= count + new_cnt;
1379 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1380 return 1;
1381 }
1382 }
1383 return 0;
1384 } else if (page < leaf->key[0]) {
1385 return 0;
1386 }
1387
1388 if (page > leaf->key[leaf->keys - 1]) {
1389 __address left_pg = leaf->key[leaf->keys - 1];
1390 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1391
1392 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1393 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1394 /*
1395 * The interval is contained in the rightmost interval
1396 * of the leaf and can be removed by updating the size
1397 * of the bigger interval.
1398 */
1399 leaf->value[leaf->keys - 1] -= count;
1400 return 1;
1401 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1402 count_t new_cnt;
1403
1404 /*
1405 * The interval is contained in the rightmost interval
1406 * of the leaf but its removal requires both updating
1407 * the size of the original interval and
1408 * also inserting a new interval.
1409 */
1410 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1411 leaf->value[leaf->keys - 1] -= count + new_cnt;
1412 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1413 return 1;
1414 }
1415 }
1416 return 0;
1417 }
1418
1419 /*
1420 * The border cases have been already resolved.
1421 * Now the interval can be only between intervals of the leaf.
1422 */
1423 for (i = 1; i < leaf->keys - 1; i++) {
1424 if (page < leaf->key[i]) {
1425 __address left_pg = leaf->key[i - 1];
1426 count_t left_cnt = (count_t) leaf->value[i - 1];
1427
1428 /*
1429 * Now the interval is between intervals corresponding to (i - 1) and i.
1430 */
1431 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1432 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1433 /*
1434 * The interval is contained in the interval (i - 1)
1435 * of the leaf and can be removed by updating the size
1436 * of the bigger interval.
1437 */
1438 leaf->value[i - 1] -= count;
1439 return 1;
1440 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1441 count_t new_cnt;
1442
1443 /*
1444 * The interval is contained in the interval (i - 1)
1445 * of the leaf but its removal requires both updating
1446 * the size of the original interval and
1447 * also inserting a new interval.
1448 */
1449 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1450 leaf->value[i - 1] -= count + new_cnt;
1451 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1452 return 1;
1453 }
1454 }
1455 return 0;
1456 }
1457 }
1458
1459error:
1460 panic("Inconsistency detected while removing %d pages of used space from %P.\n", count, page);
1461}
1462
1463/** Remove reference to address space area share info.
1464 *
1465 * If the reference count drops to 0, the sh_info is deallocated.
1466 *
1467 * @param sh_info Pointer to address space area share info.
1468 */
1469void sh_info_remove_reference(share_info_t *sh_info)
1470{
1471 bool dealloc = false;
1472
1473 mutex_lock(&sh_info->lock);
1474 ASSERT(sh_info->refcount);
1475 if (--sh_info->refcount == 0) {
1476 dealloc = true;
1477 link_t *cur;
1478
1479 /*
1480 * Now walk carefully the pagemap B+tree and free/remove
1481 * reference from all frames found there.
1482 */
1483 for (cur = sh_info->pagemap.leaf_head.next; cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
1484 btree_node_t *node;
1485 int i;
1486
1487 node = list_get_instance(cur, btree_node_t, leaf_link);
1488 for (i = 0; i < node->keys; i++)
1489 frame_free(ADDR2PFN((__address) node->value[i]));
1490 }
1491
1492 }
1493 mutex_unlock(&sh_info->lock);
1494
1495 if (dealloc) {
1496 btree_destroy(&sh_info->pagemap);
1497 free(sh_info);
1498 }
1499}
1500
1501/*
1502 * Address space related syscalls.
1503 */
1504
1505/** Wrapper for as_area_create(). */
1506__native sys_as_area_create(__address address, size_t size, int flags)
1507{
1508 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
1509 return (__native) address;
1510 else
1511 return (__native) -1;
1512}
1513
1514/** Wrapper for as_area_resize. */
1515__native sys_as_area_resize(__address address, size_t size, int flags)
1516{
1517 return (__native) as_area_resize(AS, address, size, 0);
1518}
1519
1520/** Wrapper for as_area_destroy. */
1521__native sys_as_area_destroy(__address address)
1522{
1523 return (__native) as_area_destroy(AS, address);
1524}
Note: See TracBrowser for help on using the repository browser.