source: mainline/generic/src/mm/as.c@ 1e0a5fc

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 1e0a5fc was fbf7b4c, checked in by Martin Decky <martin@…>, 20 years ago

make kernel prints case consistent

  • Property mode set to 100644
File size: 41.0 KB
RevLine 
[20d50a1]1/*
2 * Copyright (C) 2001-2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[b6529ae]29 /** @addtogroup genericmm
[b45c443]30 * @{
31 */
32
[9179d0a]33/**
[b45c443]34 * @file
[9179d0a]35 * @brief Address space related functions.
36 *
[20d50a1]37 * This file contains address space manipulation functions.
38 * Roughly speaking, this is a higher-level client of
39 * Virtual Address Translation (VAT) subsystem.
[9179d0a]40 *
41 * Functionality provided by this file allows one to
42 * create address space and create, resize and share
43 * address space areas.
44 *
45 * @see page.c
46 *
[20d50a1]47 */
48
49#include <mm/as.h>
[ef67bab]50#include <arch/mm/as.h>
[20d50a1]51#include <mm/page.h>
52#include <mm/frame.h>
[085d973]53#include <mm/slab.h>
[20d50a1]54#include <mm/tlb.h>
55#include <arch/mm/page.h>
56#include <genarch/mm/page_pt.h>
[2802767]57#include <genarch/mm/page_ht.h>
[4512d7e]58#include <mm/asid.h>
[20d50a1]59#include <arch/mm/asid.h>
60#include <synch/spinlock.h>
[1068f6a]61#include <synch/mutex.h>
[5c9a08b]62#include <adt/list.h>
[252127e]63#include <adt/btree.h>
[df0103f7]64#include <proc/task.h>
[e3c762cd]65#include <proc/thread.h>
[20d50a1]66#include <arch/asm.h>
[df0103f7]67#include <panic.h>
[20d50a1]68#include <debug.h>
[df0103f7]69#include <print.h>
[20d50a1]70#include <memstr.h>
[5a7d9d1]71#include <macros.h>
[20d50a1]72#include <arch.h>
[df0103f7]73#include <errno.h>
74#include <config.h>
[25bf215]75#include <align.h>
[df0103f7]76#include <arch/types.h>
77#include <typedefs.h>
[e3c762cd]78#include <syscall/copy.h>
79#include <arch/interrupt.h>
[20d50a1]80
[ef67bab]81as_operations_t *as_operations = NULL;
[20d50a1]82
[47800e0]83/** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */
84SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
[7e4e532]85
86/**
87 * This list contains address spaces that are not active on any
88 * processor and that have valid ASID.
89 */
90LIST_INITIALIZE(inactive_as_with_asid_head);
91
[071a8ae6]92/** Kernel address space. */
93as_t *AS_KERNEL = NULL;
94
[df0103f7]95static int area_flags_to_page_flags(int aflags);
[d3e7ff4]96static as_area_t *find_area_and_lock(as_t *as, __address va);
[37e7d2b9]97static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
[8182031]98static void sh_info_remove_reference(share_info_t *sh_info);
[20d50a1]99
[ef67bab]100/** Initialize address space subsystem. */
101void as_init(void)
102{
103 as_arch_init();
[8e1ea655]104 AS_KERNEL = as_create(FLAG_AS_KERNEL);
[125e944]105 if (!AS_KERNEL)
106 panic("can't create kernel address space\n");
107
[ef67bab]108}
109
[071a8ae6]110/** Create address space.
111 *
112 * @param flags Flags that influence way in wich the address space is created.
113 */
[ef67bab]114as_t *as_create(int flags)
[20d50a1]115{
116 as_t *as;
117
[bb68433]118 as = (as_t *) malloc(sizeof(as_t), 0);
[7e4e532]119 link_initialize(&as->inactive_as_with_asid_link);
[1068f6a]120 mutex_initialize(&as->lock);
[252127e]121 btree_create(&as->as_area_btree);
[bb68433]122
123 if (flags & FLAG_AS_KERNEL)
124 as->asid = ASID_KERNEL;
125 else
126 as->asid = ASID_INVALID;
127
[482826d]128 as->refcount = 0;
[47800e0]129 as->cpu_refcount = 0;
[bb68433]130 as->page_table = page_table_create(flags);
[20d50a1]131
132 return as;
133}
134
[482826d]135/** Destroy adress space.
136 *
137 * When there are no tasks referencing this address space (i.e. its refcount is zero),
138 * the address space can be destroyed.
139 */
140void as_destroy(as_t *as)
[5be1923]141{
[482826d]142 ipl_t ipl;
[6f9a9bc]143 bool cond;
[482826d]144
145 ASSERT(as->refcount == 0);
146
147 /*
148 * Since there is no reference to this area,
149 * it is safe not to lock its mutex.
150 */
151 ipl = interrupts_disable();
152 spinlock_lock(&inactive_as_with_asid_lock);
[31e8ddd]153 if (as->asid != ASID_INVALID && as != AS_KERNEL) {
[6f9a9bc]154 if (as != AS && as->cpu_refcount == 0)
[31e8ddd]155 list_remove(&as->inactive_as_with_asid_link);
[482826d]156 asid_put(as->asid);
157 }
158 spinlock_unlock(&inactive_as_with_asid_lock);
159
160 /*
161 * Destroy address space areas of the address space.
[6f9a9bc]162 * The B+tee must be walked carefully because it is
163 * also being destroyed.
[482826d]164 */
[6f9a9bc]165 for (cond = true; cond; ) {
[482826d]166 btree_node_t *node;
[6f9a9bc]167
168 ASSERT(!list_empty(&as->as_area_btree.leaf_head));
169 node = list_get_instance(as->as_area_btree.leaf_head.next, btree_node_t, leaf_link);
170
171 if ((cond = node->keys)) {
172 as_area_destroy(as, node->key[0]);
173 }
[482826d]174 }
[f8d069e8]175
[152b2b0]176 btree_destroy(&as->as_area_btree);
[482826d]177 page_table_destroy(as->page_table);
[5be1923]178
[482826d]179 interrupts_restore(ipl);
180
[5be1923]181 free(as);
182}
183
[20d50a1]184/** Create address space area of common attributes.
185 *
186 * The created address space area is added to the target address space.
187 *
188 * @param as Target address space.
[a9e8b39]189 * @param flags Flags of the area memory.
[37e7d2b9]190 * @param size Size of area.
[20d50a1]191 * @param base Base address of area.
[a9e8b39]192 * @param attrs Attributes of the area.
[8182031]193 * @param backend Address space area backend. NULL if no backend is used.
194 * @param backend_data NULL or a pointer to an array holding two void *.
[20d50a1]195 *
196 * @return Address space area on success or NULL on failure.
197 */
[8182031]198as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs,
[0ee077ee]199 mem_backend_t *backend, mem_backend_data_t *backend_data)
[20d50a1]200{
201 ipl_t ipl;
202 as_area_t *a;
203
204 if (base % PAGE_SIZE)
[37e7d2b9]205 return NULL;
206
[dbbeb26]207 if (!size)
208 return NULL;
209
[37e7d2b9]210 /* Writeable executable areas are not supported. */
211 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
212 return NULL;
[20d50a1]213
214 ipl = interrupts_disable();
[1068f6a]215 mutex_lock(&as->lock);
[20d50a1]216
[37e7d2b9]217 if (!check_area_conflicts(as, base, size, NULL)) {
[1068f6a]218 mutex_unlock(&as->lock);
[37e7d2b9]219 interrupts_restore(ipl);
220 return NULL;
221 }
[20d50a1]222
[bb68433]223 a = (as_area_t *) malloc(sizeof(as_area_t), 0);
224
[1068f6a]225 mutex_initialize(&a->lock);
[bb68433]226
[0ee077ee]227 a->as = as;
[c23502d]228 a->flags = flags;
[a9e8b39]229 a->attributes = attrs;
[37e7d2b9]230 a->pages = SIZE2FRAMES(size);
[bb68433]231 a->base = base;
[8182031]232 a->sh_info = NULL;
233 a->backend = backend;
[0ee077ee]234 if (backend_data)
235 a->backend_data = *backend_data;
236 else
237 memsetb((__address) &a->backend_data, sizeof(a->backend_data), 0);
238
[25bf215]239 btree_create(&a->used_space);
[bb68433]240
[252127e]241 btree_insert(&as->as_area_btree, base, (void *) a, NULL);
[20d50a1]242
[1068f6a]243 mutex_unlock(&as->lock);
[20d50a1]244 interrupts_restore(ipl);
[f9425006]245
[20d50a1]246 return a;
247}
248
[df0103f7]249/** Find address space area and change it.
250 *
251 * @param as Address space.
252 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
253 * @param size New size of the virtual memory block starting at address.
254 * @param flags Flags influencing the remap operation. Currently unused.
255 *
[7242a78e]256 * @return Zero on success or a value from @ref errno.h otherwise.
[df0103f7]257 */
[7242a78e]258int as_area_resize(as_t *as, __address address, size_t size, int flags)
[df0103f7]259{
[7242a78e]260 as_area_t *area;
[df0103f7]261 ipl_t ipl;
262 size_t pages;
263
264 ipl = interrupts_disable();
[1068f6a]265 mutex_lock(&as->lock);
[df0103f7]266
267 /*
268 * Locate the area.
269 */
270 area = find_area_and_lock(as, address);
271 if (!area) {
[1068f6a]272 mutex_unlock(&as->lock);
[df0103f7]273 interrupts_restore(ipl);
[7242a78e]274 return ENOENT;
[df0103f7]275 }
276
[0ee077ee]277 if (area->backend == &phys_backend) {
[df0103f7]278 /*
279 * Remapping of address space areas associated
280 * with memory mapped devices is not supported.
281 */
[1068f6a]282 mutex_unlock(&area->lock);
283 mutex_unlock(&as->lock);
[df0103f7]284 interrupts_restore(ipl);
[7242a78e]285 return ENOTSUP;
[df0103f7]286 }
[8182031]287 if (area->sh_info) {
288 /*
289 * Remapping of shared address space areas
290 * is not supported.
291 */
292 mutex_unlock(&area->lock);
293 mutex_unlock(&as->lock);
294 interrupts_restore(ipl);
295 return ENOTSUP;
296 }
[df0103f7]297
298 pages = SIZE2FRAMES((address - area->base) + size);
299 if (!pages) {
300 /*
301 * Zero size address space areas are not allowed.
302 */
[1068f6a]303 mutex_unlock(&area->lock);
304 mutex_unlock(&as->lock);
[df0103f7]305 interrupts_restore(ipl);
[7242a78e]306 return EPERM;
[df0103f7]307 }
308
309 if (pages < area->pages) {
[56789125]310 bool cond;
311 __address start_free = area->base + pages*PAGE_SIZE;
[df0103f7]312
313 /*
314 * Shrinking the area.
315 * No need to check for overlaps.
316 */
317
[5552d60]318 /*
319 * Start TLB shootdown sequence.
320 */
321 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
322
[56789125]323 /*
324 * Remove frames belonging to used space starting from
325 * the highest addresses downwards until an overlap with
326 * the resized address space area is found. Note that this
327 * is also the right way to remove part of the used_space
328 * B+tree leaf list.
329 */
330 for (cond = true; cond;) {
331 btree_node_t *node;
332
333 ASSERT(!list_empty(&area->used_space.leaf_head));
334 node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link);
335 if ((cond = (bool) node->keys)) {
336 __address b = node->key[node->keys - 1];
337 count_t c = (count_t) node->value[node->keys - 1];
338 int i = 0;
339
340 if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) {
341
342 if (b + c*PAGE_SIZE <= start_free) {
343 /*
344 * The whole interval fits completely
345 * in the resized address space area.
346 */
347 break;
348 }
349
350 /*
351 * Part of the interval corresponding to b and c
352 * overlaps with the resized address space area.
353 */
354
355 cond = false; /* we are almost done */
356 i = (start_free - b) >> PAGE_WIDTH;
357 if (!used_space_remove(area, start_free, c - i))
358 panic("Could not remove used space.");
359 } else {
360 /*
361 * The interval of used space can be completely removed.
362 */
363 if (!used_space_remove(area, b, c))
364 panic("Could not remove used space.\n");
365 }
366
367 for (; i < c; i++) {
368 pte_t *pte;
369
370 page_table_lock(as, false);
371 pte = page_mapping_find(as, b + i*PAGE_SIZE);
372 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
[0ee077ee]373 if (area->backend && area->backend->frame_free) {
374 area->backend->frame_free(area,
[8182031]375 b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
376 }
[56789125]377 page_mapping_remove(as, b + i*PAGE_SIZE);
378 page_table_unlock(as, false);
379 }
[df0103f7]380 }
381 }
[5552d60]382
[df0103f7]383 /*
[5552d60]384 * Finish TLB shootdown sequence.
[df0103f7]385 */
386 tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
387 tlb_shootdown_finalize();
388 } else {
389 /*
390 * Growing the area.
391 * Check for overlaps with other address space areas.
392 */
393 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
[1068f6a]394 mutex_unlock(&area->lock);
395 mutex_unlock(&as->lock);
[df0103f7]396 interrupts_restore(ipl);
[7242a78e]397 return EADDRNOTAVAIL;
[df0103f7]398 }
399 }
400
401 area->pages = pages;
402
[1068f6a]403 mutex_unlock(&area->lock);
404 mutex_unlock(&as->lock);
[df0103f7]405 interrupts_restore(ipl);
406
[7242a78e]407 return 0;
408}
409
410/** Destroy address space area.
411 *
412 * @param as Address space.
413 * @param address Address withing the area to be deleted.
414 *
415 * @return Zero on success or a value from @ref errno.h on failure.
416 */
417int as_area_destroy(as_t *as, __address address)
418{
419 as_area_t *area;
420 __address base;
[f8d069e8]421 link_t *cur;
[7242a78e]422 ipl_t ipl;
423
424 ipl = interrupts_disable();
[1068f6a]425 mutex_lock(&as->lock);
[7242a78e]426
427 area = find_area_and_lock(as, address);
428 if (!area) {
[1068f6a]429 mutex_unlock(&as->lock);
[7242a78e]430 interrupts_restore(ipl);
431 return ENOENT;
432 }
433
[56789125]434 base = area->base;
435
[5552d60]436 /*
437 * Start TLB shootdown sequence.
438 */
439 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base, area->pages);
440
[567807b1]441 /*
442 * Visit only the pages mapped by used_space B+tree.
443 */
[f8d069e8]444 for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) {
[567807b1]445 btree_node_t *node;
[f8d069e8]446 int i;
[56789125]447
[f8d069e8]448 node = list_get_instance(cur, btree_node_t, leaf_link);
449 for (i = 0; i < node->keys; i++) {
450 __address b = node->key[i];
451 count_t j;
[567807b1]452 pte_t *pte;
[56789125]453
[f8d069e8]454 for (j = 0; j < (count_t) node->value[i]; j++) {
[567807b1]455 page_table_lock(as, false);
[f8d069e8]456 pte = page_mapping_find(as, b + j*PAGE_SIZE);
[567807b1]457 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
[0ee077ee]458 if (area->backend && area->backend->frame_free) {
459 area->backend->frame_free(area,
[f8d069e8]460 b + j*PAGE_SIZE, PTE_GET_FRAME(pte));
[56789125]461 }
[f8d069e8]462 page_mapping_remove(as, b + j*PAGE_SIZE);
[567807b1]463 page_table_unlock(as, false);
[7242a78e]464 }
465 }
466 }
[56789125]467
[7242a78e]468 /*
[5552d60]469 * Finish TLB shootdown sequence.
[7242a78e]470 */
471 tlb_invalidate_pages(AS->asid, area->base, area->pages);
472 tlb_shootdown_finalize();
[5552d60]473
474 btree_destroy(&area->used_space);
[7242a78e]475
[8d4f2ae]476 area->attributes |= AS_AREA_ATTR_PARTIAL;
[8182031]477
478 if (area->sh_info)
479 sh_info_remove_reference(area->sh_info);
480
[1068f6a]481 mutex_unlock(&area->lock);
[7242a78e]482
483 /*
484 * Remove the empty area from address space.
485 */
486 btree_remove(&AS->as_area_btree, base, NULL);
487
[8d4f2ae]488 free(area);
489
[1068f6a]490 mutex_unlock(&AS->lock);
[7242a78e]491 interrupts_restore(ipl);
492 return 0;
[df0103f7]493}
494
[8d6bc2d5]495/** Share address space area with another or the same address space.
[df0103f7]496 *
[0ee077ee]497 * Address space area mapping is shared with a new address space area.
498 * If the source address space area has not been shared so far,
499 * a new sh_info is created. The new address space area simply gets the
500 * sh_info of the source area. The process of duplicating the
501 * mapping is done through the backend share function.
[8d6bc2d5]502 *
[fd4d8c0]503 * @param src_as Pointer to source address space.
[a9e8b39]504 * @param src_base Base address of the source address space area.
[fd4d8c0]505 * @param acc_size Expected size of the source area.
[46fc2f9]506 * @param dst_as Pointer to destination address space.
[fd4d8c0]507 * @param dst_base Target base address.
508 * @param dst_flags_mask Destination address space area flags mask.
[df0103f7]509 *
[7242a78e]510 * @return Zero on success or ENOENT if there is no such task or
[df0103f7]511 * if there is no such address space area,
512 * EPERM if there was a problem in accepting the area or
513 * ENOMEM if there was a problem in allocating destination
[8d6bc2d5]514 * address space area. ENOTSUP is returned if an attempt
515 * to share non-anonymous address space area is detected.
[df0103f7]516 */
[8d6bc2d5]517int as_area_share(as_t *src_as, __address src_base, size_t acc_size,
[46fc2f9]518 as_t *dst_as, __address dst_base, int dst_flags_mask)
[df0103f7]519{
520 ipl_t ipl;
[a9e8b39]521 int src_flags;
522 size_t src_size;
523 as_area_t *src_area, *dst_area;
[8d6bc2d5]524 share_info_t *sh_info;
[0ee077ee]525 mem_backend_t *src_backend;
526 mem_backend_data_t src_backend_data;
[d6e5cbc]527
[7c23af9]528 ipl = interrupts_disable();
[1068f6a]529 mutex_lock(&src_as->lock);
[7c23af9]530 src_area = find_area_and_lock(src_as, src_base);
[a9e8b39]531 if (!src_area) {
[6fa476f7]532 /*
533 * Could not find the source address space area.
534 */
[1068f6a]535 mutex_unlock(&src_as->lock);
[6fa476f7]536 interrupts_restore(ipl);
537 return ENOENT;
538 }
[8d6bc2d5]539
[0ee077ee]540 if (!src_area->backend || !src_area->backend->share) {
[8d6bc2d5]541 /*
[0ee077ee]542 * There is now backend or the backend does not
543 * know how to share the area.
[8d6bc2d5]544 */
545 mutex_unlock(&src_area->lock);
546 mutex_unlock(&src_as->lock);
547 interrupts_restore(ipl);
548 return ENOTSUP;
549 }
550
[a9e8b39]551 src_size = src_area->pages * PAGE_SIZE;
552 src_flags = src_area->flags;
[0ee077ee]553 src_backend = src_area->backend;
554 src_backend_data = src_area->backend_data;
[1ec1fd8]555
556 /* Share the cacheable flag from the original mapping */
557 if (src_flags & AS_AREA_CACHEABLE)
558 dst_flags_mask |= AS_AREA_CACHEABLE;
559
[76d7305]560 if (src_size != acc_size || (src_flags & dst_flags_mask) != dst_flags_mask) {
[8d6bc2d5]561 mutex_unlock(&src_area->lock);
562 mutex_unlock(&src_as->lock);
[df0103f7]563 interrupts_restore(ipl);
564 return EPERM;
565 }
[8d6bc2d5]566
567 /*
568 * Now we are committed to sharing the area.
569 * First prepare the area for sharing.
570 * Then it will be safe to unlock it.
571 */
572 sh_info = src_area->sh_info;
573 if (!sh_info) {
574 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
575 mutex_initialize(&sh_info->lock);
576 sh_info->refcount = 2;
577 btree_create(&sh_info->pagemap);
578 src_area->sh_info = sh_info;
579 } else {
580 mutex_lock(&sh_info->lock);
581 sh_info->refcount++;
582 mutex_unlock(&sh_info->lock);
583 }
584
[0ee077ee]585 src_area->backend->share(src_area);
[8d6bc2d5]586
587 mutex_unlock(&src_area->lock);
588 mutex_unlock(&src_as->lock);
589
[df0103f7]590 /*
[a9e8b39]591 * Create copy of the source address space area.
592 * The destination area is created with AS_AREA_ATTR_PARTIAL
593 * attribute set which prevents race condition with
594 * preliminary as_page_fault() calls.
[fd4d8c0]595 * The flags of the source area are masked against dst_flags_mask
596 * to support sharing in less privileged mode.
[df0103f7]597 */
[76d7305]598 dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
[0ee077ee]599 AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
[a9e8b39]600 if (!dst_area) {
[df0103f7]601 /*
602 * Destination address space area could not be created.
603 */
[8d6bc2d5]604 sh_info_remove_reference(sh_info);
605
[df0103f7]606 interrupts_restore(ipl);
607 return ENOMEM;
608 }
609
[a9e8b39]610 /*
611 * Now the destination address space area has been
612 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
[8d6bc2d5]613 * attribute and set the sh_info.
[a9e8b39]614 */
[1068f6a]615 mutex_lock(&dst_area->lock);
[a9e8b39]616 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
[8d6bc2d5]617 dst_area->sh_info = sh_info;
[1068f6a]618 mutex_unlock(&dst_area->lock);
[df0103f7]619
620 interrupts_restore(ipl);
621
622 return 0;
623}
624
[fb84455]625/** Check access mode for address space area.
626 *
627 * The address space area must be locked prior to this call.
628 *
629 * @param area Address space area.
630 * @param access Access mode.
631 *
632 * @return False if access violates area's permissions, true otherwise.
633 */
634bool as_area_check_access(as_area_t *area, pf_access_t access)
635{
636 int flagmap[] = {
637 [PF_ACCESS_READ] = AS_AREA_READ,
638 [PF_ACCESS_WRITE] = AS_AREA_WRITE,
639 [PF_ACCESS_EXEC] = AS_AREA_EXEC
640 };
641
642 if (!(area->flags & flagmap[access]))
643 return false;
644
645 return true;
646}
647
[20d50a1]648/** Handle page fault within the current address space.
649 *
[8182031]650 * This is the high-level page fault handler. It decides
651 * whether the page fault can be resolved by any backend
652 * and if so, it invokes the backend to resolve the page
653 * fault.
654 *
[20d50a1]655 * Interrupts are assumed disabled.
656 *
657 * @param page Faulting page.
[567807b1]658 * @param access Access mode that caused the fault (i.e. read/write/exec).
[e3c762cd]659 * @param istate Pointer to interrupted state.
[20d50a1]660 *
[8182031]661 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
662 * fault was caused by copy_to_uspace() or copy_from_uspace().
[20d50a1]663 */
[567807b1]664int as_page_fault(__address page, pf_access_t access, istate_t *istate)
[20d50a1]665{
[2299914]666 pte_t *pte;
[d3e7ff4]667 as_area_t *area;
[20d50a1]668
[1068f6a]669 if (!THREAD)
[8182031]670 return AS_PF_FAULT;
[1068f6a]671
[20d50a1]672 ASSERT(AS);
[2299914]673
[1068f6a]674 mutex_lock(&AS->lock);
[d3e7ff4]675 area = find_area_and_lock(AS, page);
[20d50a1]676 if (!area) {
677 /*
678 * No area contained mapping for 'page'.
679 * Signal page fault to low-level handler.
680 */
[1068f6a]681 mutex_unlock(&AS->lock);
[e3c762cd]682 goto page_fault;
[20d50a1]683 }
684
[a9e8b39]685 if (area->attributes & AS_AREA_ATTR_PARTIAL) {
686 /*
687 * The address space area is not fully initialized.
688 * Avoid possible race by returning error.
689 */
[1068f6a]690 mutex_unlock(&area->lock);
691 mutex_unlock(&AS->lock);
[e3c762cd]692 goto page_fault;
[a9e8b39]693 }
694
[0ee077ee]695 if (!area->backend || !area->backend->page_fault) {
[8182031]696 /*
697 * The address space area is not backed by any backend
698 * or the backend cannot handle page faults.
699 */
700 mutex_unlock(&area->lock);
701 mutex_unlock(&AS->lock);
702 goto page_fault;
703 }
[1ace9ea]704
[2299914]705 page_table_lock(AS, false);
706
707 /*
708 * To avoid race condition between two page faults
709 * on the same address, we need to make sure
710 * the mapping has not been already inserted.
711 */
712 if ((pte = page_mapping_find(AS, page))) {
713 if (PTE_PRESENT(pte)) {
[fb84455]714 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
715 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
716 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
717 page_table_unlock(AS, false);
718 mutex_unlock(&area->lock);
719 mutex_unlock(&AS->lock);
720 return AS_PF_OK;
721 }
[2299914]722 }
723 }
[20d50a1]724
725 /*
[8182031]726 * Resort to the backend page fault handler.
[20d50a1]727 */
[0ee077ee]728 if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
[8182031]729 page_table_unlock(AS, false);
730 mutex_unlock(&area->lock);
731 mutex_unlock(&AS->lock);
732 goto page_fault;
733 }
[20d50a1]734
[8182031]735 page_table_unlock(AS, false);
[1068f6a]736 mutex_unlock(&area->lock);
737 mutex_unlock(&AS->lock);
[e3c762cd]738 return AS_PF_OK;
739
740page_fault:
741 if (THREAD->in_copy_from_uspace) {
742 THREAD->in_copy_from_uspace = false;
743 istate_set_retaddr(istate, (__address) &memcpy_from_uspace_failover_address);
744 } else if (THREAD->in_copy_to_uspace) {
745 THREAD->in_copy_to_uspace = false;
746 istate_set_retaddr(istate, (__address) &memcpy_to_uspace_failover_address);
747 } else {
748 return AS_PF_FAULT;
749 }
750
751 return AS_PF_DEFER;
[20d50a1]752}
753
[7e4e532]754/** Switch address spaces.
[1068f6a]755 *
756 * Note that this function cannot sleep as it is essentially a part of
[47800e0]757 * scheduling. Sleeping here would lead to deadlock on wakeup.
[20d50a1]758 *
[7e4e532]759 * @param old Old address space or NULL.
760 * @param new New address space.
[20d50a1]761 */
[7e4e532]762void as_switch(as_t *old, as_t *new)
[20d50a1]763{
764 ipl_t ipl;
[7e4e532]765 bool needs_asid = false;
[4512d7e]766
[20d50a1]767 ipl = interrupts_disable();
[47800e0]768 spinlock_lock(&inactive_as_with_asid_lock);
[7e4e532]769
770 /*
771 * First, take care of the old address space.
772 */
773 if (old) {
[1068f6a]774 mutex_lock_active(&old->lock);
[47800e0]775 ASSERT(old->cpu_refcount);
776 if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) {
[7e4e532]777 /*
778 * The old address space is no longer active on
779 * any processor. It can be appended to the
780 * list of inactive address spaces with assigned
781 * ASID.
782 */
783 ASSERT(old->asid != ASID_INVALID);
784 list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
785 }
[1068f6a]786 mutex_unlock(&old->lock);
[7e4e532]787 }
788
789 /*
790 * Second, prepare the new address space.
791 */
[1068f6a]792 mutex_lock_active(&new->lock);
[47800e0]793 if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) {
[7e4e532]794 if (new->asid != ASID_INVALID)
795 list_remove(&new->inactive_as_with_asid_link);
796 else
797 needs_asid = true; /* defer call to asid_get() until new->lock is released */
798 }
799 SET_PTL0_ADDRESS(new->page_table);
[1068f6a]800 mutex_unlock(&new->lock);
[20d50a1]801
[7e4e532]802 if (needs_asid) {
803 /*
804 * Allocation of new ASID was deferred
805 * until now in order to avoid deadlock.
806 */
807 asid_t asid;
808
809 asid = asid_get();
[1068f6a]810 mutex_lock_active(&new->lock);
[7e4e532]811 new->asid = asid;
[1068f6a]812 mutex_unlock(&new->lock);
[7e4e532]813 }
[47800e0]814 spinlock_unlock(&inactive_as_with_asid_lock);
[7e4e532]815 interrupts_restore(ipl);
816
[20d50a1]817 /*
818 * Perform architecture-specific steps.
[4512d7e]819 * (e.g. write ASID to hardware register etc.)
[20d50a1]820 */
[7e4e532]821 as_install_arch(new);
[20d50a1]822
[7e4e532]823 AS = new;
[20d50a1]824}
[6a3c9a7]825
[df0103f7]826/** Convert address space area flags to page flags.
[6a3c9a7]827 *
[df0103f7]828 * @param aflags Flags of some address space area.
[6a3c9a7]829 *
[df0103f7]830 * @return Flags to be passed to page_mapping_insert().
[6a3c9a7]831 */
[df0103f7]832int area_flags_to_page_flags(int aflags)
[6a3c9a7]833{
834 int flags;
835
[9a8d91b]836 flags = PAGE_USER | PAGE_PRESENT;
[c23502d]837
[df0103f7]838 if (aflags & AS_AREA_READ)
[c23502d]839 flags |= PAGE_READ;
840
[df0103f7]841 if (aflags & AS_AREA_WRITE)
[c23502d]842 flags |= PAGE_WRITE;
843
[df0103f7]844 if (aflags & AS_AREA_EXEC)
[c23502d]845 flags |= PAGE_EXEC;
[6a3c9a7]846
[0ee077ee]847 if (aflags & AS_AREA_CACHEABLE)
[9a8d91b]848 flags |= PAGE_CACHEABLE;
849
[6a3c9a7]850 return flags;
851}
[ef67bab]852
[df0103f7]853/** Compute flags for virtual address translation subsytem.
854 *
855 * The address space area must be locked.
856 * Interrupts must be disabled.
857 *
858 * @param a Address space area.
859 *
860 * @return Flags to be used in page_mapping_insert().
861 */
[8182031]862int as_area_get_flags(as_area_t *a)
[df0103f7]863{
864 return area_flags_to_page_flags(a->flags);
865}
866
[ef67bab]867/** Create page table.
868 *
869 * Depending on architecture, create either address space
870 * private or global page table.
871 *
872 * @param flags Flags saying whether the page table is for kernel address space.
873 *
874 * @return First entry of the page table.
875 */
876pte_t *page_table_create(int flags)
877{
878 ASSERT(as_operations);
879 ASSERT(as_operations->page_table_create);
880
881 return as_operations->page_table_create(flags);
882}
[d3e7ff4]883
[482826d]884/** Destroy page table.
885 *
886 * Destroy page table in architecture specific way.
887 *
888 * @param page_table Physical address of PTL0.
889 */
890void page_table_destroy(pte_t *page_table)
891{
892 ASSERT(as_operations);
893 ASSERT(as_operations->page_table_destroy);
894
895 as_operations->page_table_destroy(page_table);
896}
897
[2299914]898/** Lock page table.
899 *
900 * This function should be called before any page_mapping_insert(),
901 * page_mapping_remove() and page_mapping_find().
902 *
903 * Locking order is such that address space areas must be locked
904 * prior to this call. Address space can be locked prior to this
905 * call in which case the lock argument is false.
906 *
907 * @param as Address space.
[9179d0a]908 * @param lock If false, do not attempt to lock as->lock.
[2299914]909 */
910void page_table_lock(as_t *as, bool lock)
911{
912 ASSERT(as_operations);
913 ASSERT(as_operations->page_table_lock);
914
915 as_operations->page_table_lock(as, lock);
916}
917
918/** Unlock page table.
919 *
920 * @param as Address space.
[9179d0a]921 * @param unlock If false, do not attempt to unlock as->lock.
[2299914]922 */
923void page_table_unlock(as_t *as, bool unlock)
924{
925 ASSERT(as_operations);
926 ASSERT(as_operations->page_table_unlock);
927
928 as_operations->page_table_unlock(as, unlock);
929}
930
[d3e7ff4]931
932/** Find address space area and lock it.
933 *
934 * The address space must be locked and interrupts must be disabled.
935 *
936 * @param as Address space.
937 * @param va Virtual address.
938 *
939 * @return Locked address space area containing va on success or NULL on failure.
940 */
941as_area_t *find_area_and_lock(as_t *as, __address va)
942{
943 as_area_t *a;
[252127e]944 btree_node_t *leaf, *lnode;
945 int i;
946
947 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
948 if (a) {
949 /* va is the base address of an address space area */
[1068f6a]950 mutex_lock(&a->lock);
[252127e]951 return a;
952 }
[d3e7ff4]953
[252127e]954 /*
[c47912f]955 * Search the leaf node and the righmost record of its left neighbour
[252127e]956 * to find out whether this is a miss or va belongs to an address
957 * space area found there.
958 */
959
960 /* First, search the leaf node itself. */
961 for (i = 0; i < leaf->keys; i++) {
962 a = (as_area_t *) leaf->value[i];
[1068f6a]963 mutex_lock(&a->lock);
[252127e]964 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
965 return a;
966 }
[1068f6a]967 mutex_unlock(&a->lock);
[252127e]968 }
[d3e7ff4]969
[252127e]970 /*
[c47912f]971 * Second, locate the left neighbour and test its last record.
[b26db0c]972 * Because of its position in the B+tree, it must have base < va.
[252127e]973 */
[c47912f]974 if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
[252127e]975 a = (as_area_t *) lnode->value[lnode->keys - 1];
[1068f6a]976 mutex_lock(&a->lock);
[252127e]977 if (va < a->base + a->pages * PAGE_SIZE) {
[37e7d2b9]978 return a;
[252127e]979 }
[1068f6a]980 mutex_unlock(&a->lock);
[d3e7ff4]981 }
982
983 return NULL;
984}
[37e7d2b9]985
986/** Check area conflicts with other areas.
987 *
988 * The address space must be locked and interrupts must be disabled.
989 *
990 * @param as Address space.
991 * @param va Starting virtual address of the area being tested.
992 * @param size Size of the area being tested.
993 * @param avoid_area Do not touch this area.
994 *
995 * @return True if there is no conflict, false otherwise.
996 */
997bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
998{
999 as_area_t *a;
[252127e]1000 btree_node_t *leaf, *node;
1001 int i;
[37e7d2b9]1002
[5a7d9d1]1003 /*
1004 * We don't want any area to have conflicts with NULL page.
1005 */
1006 if (overlaps(va, size, NULL, PAGE_SIZE))
1007 return false;
1008
[252127e]1009 /*
1010 * The leaf node is found in O(log n), where n is proportional to
1011 * the number of address space areas belonging to as.
1012 * The check for conflicts is then attempted on the rightmost
[c47912f]1013 * record in the left neighbour, the leftmost record in the right
1014 * neighbour and all records in the leaf node itself.
[252127e]1015 */
1016
1017 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1018 if (a != avoid_area)
1019 return false;
1020 }
1021
1022 /* First, check the two border cases. */
[c47912f]1023 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
[252127e]1024 a = (as_area_t *) node->value[node->keys - 1];
[1068f6a]1025 mutex_lock(&a->lock);
[252127e]1026 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
[1068f6a]1027 mutex_unlock(&a->lock);
[252127e]1028 return false;
1029 }
[1068f6a]1030 mutex_unlock(&a->lock);
[252127e]1031 }
[c47912f]1032 if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
[252127e]1033 a = (as_area_t *) node->value[0];
[1068f6a]1034 mutex_lock(&a->lock);
[252127e]1035 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
[1068f6a]1036 mutex_unlock(&a->lock);
[252127e]1037 return false;
1038 }
[1068f6a]1039 mutex_unlock(&a->lock);
[252127e]1040 }
1041
1042 /* Second, check the leaf node. */
1043 for (i = 0; i < leaf->keys; i++) {
1044 a = (as_area_t *) leaf->value[i];
[37e7d2b9]1045
1046 if (a == avoid_area)
1047 continue;
[252127e]1048
[1068f6a]1049 mutex_lock(&a->lock);
[252127e]1050 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
[1068f6a]1051 mutex_unlock(&a->lock);
[252127e]1052 return false;
1053 }
[1068f6a]1054 mutex_unlock(&a->lock);
[5a7d9d1]1055 }
[37e7d2b9]1056
[5a7d9d1]1057 /*
1058 * So far, the area does not conflict with other areas.
1059 * Check if it doesn't conflict with kernel address space.
1060 */
1061 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1062 return !overlaps(va, size,
1063 KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
[37e7d2b9]1064 }
1065
1066 return true;
1067}
[df0103f7]1068
[1068f6a]1069/** Return size of the address space area with given base. */
[7c23af9]1070size_t as_get_size(__address base)
1071{
1072 ipl_t ipl;
1073 as_area_t *src_area;
1074 size_t size;
1075
1076 ipl = interrupts_disable();
1077 src_area = find_area_and_lock(AS, base);
1078 if (src_area){
1079 size = src_area->pages * PAGE_SIZE;
[1068f6a]1080 mutex_unlock(&src_area->lock);
[7c23af9]1081 } else {
1082 size = 0;
1083 }
1084 interrupts_restore(ipl);
1085 return size;
1086}
1087
[25bf215]1088/** Mark portion of address space area as used.
1089 *
1090 * The address space area must be already locked.
1091 *
1092 * @param a Address space area.
1093 * @param page First page to be marked.
1094 * @param count Number of page to be marked.
1095 *
1096 * @return 0 on failure and 1 on success.
1097 */
1098int used_space_insert(as_area_t *a, __address page, count_t count)
1099{
1100 btree_node_t *leaf, *node;
1101 count_t pages;
1102 int i;
1103
1104 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1105 ASSERT(count);
1106
1107 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1108 if (pages) {
1109 /*
1110 * We hit the beginning of some used space.
1111 */
1112 return 0;
1113 }
1114
[a6cb8cb]1115 if (!leaf->keys) {
1116 btree_insert(&a->used_space, page, (void *) count, leaf);
1117 return 1;
1118 }
1119
[25bf215]1120 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1121 if (node) {
1122 __address left_pg = node->key[node->keys - 1], right_pg = leaf->key[0];
1123 count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0];
1124
1125 /*
1126 * Examine the possibility that the interval fits
1127 * somewhere between the rightmost interval of
1128 * the left neigbour and the first interval of the leaf.
1129 */
1130
1131 if (page >= right_pg) {
1132 /* Do nothing. */
1133 } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1134 /* The interval intersects with the left interval. */
1135 return 0;
1136 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1137 /* The interval intersects with the right interval. */
1138 return 0;
1139 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1140 /* The interval can be added by merging the two already present intervals. */
[56789125]1141 node->value[node->keys - 1] += count + right_cnt;
[25bf215]1142 btree_remove(&a->used_space, right_pg, leaf);
1143 return 1;
1144 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1145 /* The interval can be added by simply growing the left interval. */
[56789125]1146 node->value[node->keys - 1] += count;
[25bf215]1147 return 1;
1148 } else if (page + count*PAGE_SIZE == right_pg) {
1149 /*
1150 * The interval can be addded by simply moving base of the right
1151 * interval down and increasing its size accordingly.
1152 */
[56789125]1153 leaf->value[0] += count;
[25bf215]1154 leaf->key[0] = page;
1155 return 1;
1156 } else {
1157 /*
1158 * The interval is between both neigbouring intervals,
1159 * but cannot be merged with any of them.
1160 */
1161 btree_insert(&a->used_space, page, (void *) count, leaf);
1162 return 1;
1163 }
1164 } else if (page < leaf->key[0]) {
1165 __address right_pg = leaf->key[0];
1166 count_t right_cnt = (count_t) leaf->value[0];
1167
1168 /*
1169 * Investigate the border case in which the left neighbour does not
1170 * exist but the interval fits from the left.
1171 */
1172
1173 if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1174 /* The interval intersects with the right interval. */
1175 return 0;
1176 } else if (page + count*PAGE_SIZE == right_pg) {
1177 /*
1178 * The interval can be added by moving the base of the right interval down
1179 * and increasing its size accordingly.
1180 */
1181 leaf->key[0] = page;
[56789125]1182 leaf->value[0] += count;
[25bf215]1183 return 1;
1184 } else {
1185 /*
1186 * The interval doesn't adjoin with the right interval.
1187 * It must be added individually.
1188 */
1189 btree_insert(&a->used_space, page, (void *) count, leaf);
1190 return 1;
1191 }
1192 }
1193
1194 node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1195 if (node) {
1196 __address left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0];
1197 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0];
1198
1199 /*
1200 * Examine the possibility that the interval fits
1201 * somewhere between the leftmost interval of
1202 * the right neigbour and the last interval of the leaf.
1203 */
1204
1205 if (page < left_pg) {
1206 /* Do nothing. */
1207 } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1208 /* The interval intersects with the left interval. */
1209 return 0;
1210 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1211 /* The interval intersects with the right interval. */
1212 return 0;
1213 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1214 /* The interval can be added by merging the two already present intervals. */
[56789125]1215 leaf->value[leaf->keys - 1] += count + right_cnt;
[25bf215]1216 btree_remove(&a->used_space, right_pg, node);
1217 return 1;
1218 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1219 /* The interval can be added by simply growing the left interval. */
[56789125]1220 leaf->value[leaf->keys - 1] += count;
[25bf215]1221 return 1;
1222 } else if (page + count*PAGE_SIZE == right_pg) {
1223 /*
1224 * The interval can be addded by simply moving base of the right
1225 * interval down and increasing its size accordingly.
1226 */
[56789125]1227 node->value[0] += count;
[25bf215]1228 node->key[0] = page;
1229 return 1;
1230 } else {
1231 /*
1232 * The interval is between both neigbouring intervals,
1233 * but cannot be merged with any of them.
1234 */
1235 btree_insert(&a->used_space, page, (void *) count, leaf);
1236 return 1;
1237 }
1238 } else if (page >= leaf->key[leaf->keys - 1]) {
1239 __address left_pg = leaf->key[leaf->keys - 1];
1240 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1241
1242 /*
1243 * Investigate the border case in which the right neighbour does not
1244 * exist but the interval fits from the right.
1245 */
1246
1247 if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
[56789125]1248 /* The interval intersects with the left interval. */
[25bf215]1249 return 0;
1250 } else if (left_pg + left_cnt*PAGE_SIZE == page) {
1251 /* The interval can be added by growing the left interval. */
[56789125]1252 leaf->value[leaf->keys - 1] += count;
[25bf215]1253 return 1;
1254 } else {
1255 /*
1256 * The interval doesn't adjoin with the left interval.
1257 * It must be added individually.
1258 */
1259 btree_insert(&a->used_space, page, (void *) count, leaf);
1260 return 1;
1261 }
1262 }
1263
1264 /*
1265 * Note that if the algorithm made it thus far, the interval can fit only
1266 * between two other intervals of the leaf. The two border cases were already
1267 * resolved.
1268 */
1269 for (i = 1; i < leaf->keys; i++) {
1270 if (page < leaf->key[i]) {
1271 __address left_pg = leaf->key[i - 1], right_pg = leaf->key[i];
1272 count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i];
1273
1274 /*
1275 * The interval fits between left_pg and right_pg.
1276 */
1277
1278 if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1279 /* The interval intersects with the left interval. */
1280 return 0;
1281 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1282 /* The interval intersects with the right interval. */
1283 return 0;
1284 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1285 /* The interval can be added by merging the two already present intervals. */
[56789125]1286 leaf->value[i - 1] += count + right_cnt;
[25bf215]1287 btree_remove(&a->used_space, right_pg, leaf);
1288 return 1;
1289 } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1290 /* The interval can be added by simply growing the left interval. */
[56789125]1291 leaf->value[i - 1] += count;
[25bf215]1292 return 1;
1293 } else if (page + count*PAGE_SIZE == right_pg) {
1294 /*
1295 * The interval can be addded by simply moving base of the right
1296 * interval down and increasing its size accordingly.
1297 */
[56789125]1298 leaf->value[i] += count;
[25bf215]1299 leaf->key[i] = page;
1300 return 1;
1301 } else {
1302 /*
1303 * The interval is between both neigbouring intervals,
1304 * but cannot be merged with any of them.
1305 */
1306 btree_insert(&a->used_space, page, (void *) count, leaf);
1307 return 1;
1308 }
1309 }
1310 }
1311
[fbf7b4c]1312 panic("Inconsistency detected while adding %d pages of used space at %p.\n", count, page);
[25bf215]1313}
1314
1315/** Mark portion of address space area as unused.
1316 *
1317 * The address space area must be already locked.
1318 *
1319 * @param a Address space area.
1320 * @param page First page to be marked.
1321 * @param count Number of page to be marked.
1322 *
1323 * @return 0 on failure and 1 on success.
1324 */
1325int used_space_remove(as_area_t *a, __address page, count_t count)
1326{
1327 btree_node_t *leaf, *node;
1328 count_t pages;
1329 int i;
1330
1331 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1332 ASSERT(count);
1333
1334 pages = (count_t) btree_search(&a->used_space, page, &leaf);
1335 if (pages) {
1336 /*
1337 * We are lucky, page is the beginning of some interval.
1338 */
1339 if (count > pages) {
1340 return 0;
1341 } else if (count == pages) {
1342 btree_remove(&a->used_space, page, leaf);
[56789125]1343 return 1;
[25bf215]1344 } else {
1345 /*
1346 * Find the respective interval.
1347 * Decrease its size and relocate its start address.
1348 */
1349 for (i = 0; i < leaf->keys; i++) {
1350 if (leaf->key[i] == page) {
1351 leaf->key[i] += count*PAGE_SIZE;
[56789125]1352 leaf->value[i] -= count;
[25bf215]1353 return 1;
1354 }
1355 }
1356 goto error;
1357 }
1358 }
1359
1360 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1361 if (node && page < leaf->key[0]) {
1362 __address left_pg = node->key[node->keys - 1];
1363 count_t left_cnt = (count_t) node->value[node->keys - 1];
1364
1365 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1366 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1367 /*
1368 * The interval is contained in the rightmost interval
1369 * of the left neighbour and can be removed by
1370 * updating the size of the bigger interval.
1371 */
[56789125]1372 node->value[node->keys - 1] -= count;
[25bf215]1373 return 1;
1374 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
[56789125]1375 count_t new_cnt;
[25bf215]1376
1377 /*
1378 * The interval is contained in the rightmost interval
1379 * of the left neighbour but its removal requires
1380 * both updating the size of the original interval and
1381 * also inserting a new interval.
1382 */
[56789125]1383 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1384 node->value[node->keys - 1] -= count + new_cnt;
[25bf215]1385 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1386 return 1;
1387 }
1388 }
1389 return 0;
1390 } else if (page < leaf->key[0]) {
1391 return 0;
1392 }
1393
1394 if (page > leaf->key[leaf->keys - 1]) {
1395 __address left_pg = leaf->key[leaf->keys - 1];
1396 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1397
1398 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1399 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1400 /*
1401 * The interval is contained in the rightmost interval
1402 * of the leaf and can be removed by updating the size
1403 * of the bigger interval.
1404 */
[56789125]1405 leaf->value[leaf->keys - 1] -= count;
[25bf215]1406 return 1;
1407 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
[56789125]1408 count_t new_cnt;
[25bf215]1409
1410 /*
1411 * The interval is contained in the rightmost interval
1412 * of the leaf but its removal requires both updating
1413 * the size of the original interval and
1414 * also inserting a new interval.
1415 */
[56789125]1416 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1417 leaf->value[leaf->keys - 1] -= count + new_cnt;
[25bf215]1418 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1419 return 1;
1420 }
1421 }
1422 return 0;
1423 }
1424
1425 /*
1426 * The border cases have been already resolved.
1427 * Now the interval can be only between intervals of the leaf.
1428 */
1429 for (i = 1; i < leaf->keys - 1; i++) {
1430 if (page < leaf->key[i]) {
1431 __address left_pg = leaf->key[i - 1];
1432 count_t left_cnt = (count_t) leaf->value[i - 1];
1433
1434 /*
1435 * Now the interval is between intervals corresponding to (i - 1) and i.
1436 */
1437 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1438 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1439 /*
1440 * The interval is contained in the interval (i - 1)
1441 * of the leaf and can be removed by updating the size
1442 * of the bigger interval.
1443 */
[56789125]1444 leaf->value[i - 1] -= count;
[25bf215]1445 return 1;
1446 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
[56789125]1447 count_t new_cnt;
[25bf215]1448
1449 /*
1450 * The interval is contained in the interval (i - 1)
1451 * of the leaf but its removal requires both updating
1452 * the size of the original interval and
1453 * also inserting a new interval.
1454 */
[56789125]1455 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1456 leaf->value[i - 1] -= count + new_cnt;
[25bf215]1457 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1458 return 1;
1459 }
1460 }
1461 return 0;
1462 }
1463 }
1464
1465error:
[fbf7b4c]1466 panic("Inconsistency detected while removing %d pages of used space from %p.\n", count, page);
[25bf215]1467}
1468
[8182031]1469/** Remove reference to address space area share info.
1470 *
1471 * If the reference count drops to 0, the sh_info is deallocated.
1472 *
1473 * @param sh_info Pointer to address space area share info.
1474 */
1475void sh_info_remove_reference(share_info_t *sh_info)
1476{
1477 bool dealloc = false;
1478
1479 mutex_lock(&sh_info->lock);
1480 ASSERT(sh_info->refcount);
1481 if (--sh_info->refcount == 0) {
1482 dealloc = true;
[f8d069e8]1483 link_t *cur;
[8182031]1484
1485 /*
1486 * Now walk carefully the pagemap B+tree and free/remove
1487 * reference from all frames found there.
1488 */
[f8d069e8]1489 for (cur = sh_info->pagemap.leaf_head.next; cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
[8182031]1490 btree_node_t *node;
[f8d069e8]1491 int i;
[8182031]1492
[f8d069e8]1493 node = list_get_instance(cur, btree_node_t, leaf_link);
1494 for (i = 0; i < node->keys; i++)
1495 frame_free(ADDR2PFN((__address) node->value[i]));
[8182031]1496 }
1497
1498 }
1499 mutex_unlock(&sh_info->lock);
1500
1501 if (dealloc) {
1502 btree_destroy(&sh_info->pagemap);
1503 free(sh_info);
1504 }
1505}
1506
[df0103f7]1507/*
1508 * Address space related syscalls.
1509 */
1510
1511/** Wrapper for as_area_create(). */
1512__native sys_as_area_create(__address address, size_t size, int flags)
1513{
[0ee077ee]1514 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
[df0103f7]1515 return (__native) address;
1516 else
1517 return (__native) -1;
1518}
1519
1520/** Wrapper for as_area_resize. */
1521__native sys_as_area_resize(__address address, size_t size, int flags)
1522{
[7242a78e]1523 return (__native) as_area_resize(AS, address, size, 0);
1524}
1525
1526/** Wrapper for as_area_destroy. */
1527__native sys_as_area_destroy(__address address)
1528{
1529 return (__native) as_area_destroy(AS, address);
[df0103f7]1530}
[b45c443]1531
1532 /** @}
1533 */
1534
Note: See TracBrowser for help on using the repository browser.