1 | /*
|
---|
2 | * Copyright (c) 2001-2006 Jakub Jermar
|
---|
3 | * All rights reserved.
|
---|
4 | *
|
---|
5 | * Redistribution and use in source and binary forms, with or without
|
---|
6 | * modification, are permitted provided that the following conditions
|
---|
7 | * are met:
|
---|
8 | *
|
---|
9 | * - Redistributions of source code must retain the above copyright
|
---|
10 | * notice, this list of conditions and the following disclaimer.
|
---|
11 | * - Redistributions in binary form must reproduce the above copyright
|
---|
12 | * notice, this list of conditions and the following disclaimer in the
|
---|
13 | * documentation and/or other materials provided with the distribution.
|
---|
14 | * - The name of the author may not be used to endorse or promote products
|
---|
15 | * derived from this software without specific prior written permission.
|
---|
16 | *
|
---|
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
---|
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
---|
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
---|
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
---|
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
---|
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
---|
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
---|
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
---|
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
27 | */
|
---|
28 |
|
---|
29 | /** @addtogroup genericmm
|
---|
30 | * @{
|
---|
31 | */
|
---|
32 |
|
---|
33 | /**
|
---|
34 | * @file
|
---|
35 | * @brief Address space related functions.
|
---|
36 | *
|
---|
37 | * This file contains address space manipulation functions.
|
---|
38 | * Roughly speaking, this is a higher-level client of
|
---|
39 | * Virtual Address Translation (VAT) subsystem.
|
---|
40 | *
|
---|
41 | * Functionality provided by this file allows one to
|
---|
42 | * create address spaces and create, resize and share
|
---|
43 | * address space areas.
|
---|
44 | *
|
---|
45 | * @see page.c
|
---|
46 | *
|
---|
47 | */
|
---|
48 |
|
---|
49 | #include <mm/as.h>
|
---|
50 | #include <arch/mm/as.h>
|
---|
51 | #include <mm/page.h>
|
---|
52 | #include <mm/frame.h>
|
---|
53 | #include <mm/slab.h>
|
---|
54 | #include <mm/tlb.h>
|
---|
55 | #include <arch/mm/page.h>
|
---|
56 | #include <genarch/mm/page_pt.h>
|
---|
57 | #include <genarch/mm/page_ht.h>
|
---|
58 | #include <mm/asid.h>
|
---|
59 | #include <arch/mm/asid.h>
|
---|
60 | #include <synch/spinlock.h>
|
---|
61 | #include <synch/mutex.h>
|
---|
62 | #include <adt/list.h>
|
---|
63 | #include <adt/btree.h>
|
---|
64 | #include <proc/task.h>
|
---|
65 | #include <proc/thread.h>
|
---|
66 | #include <arch/asm.h>
|
---|
67 | #include <panic.h>
|
---|
68 | #include <debug.h>
|
---|
69 | #include <print.h>
|
---|
70 | #include <memstr.h>
|
---|
71 | #include <macros.h>
|
---|
72 | #include <arch.h>
|
---|
73 | #include <errno.h>
|
---|
74 | #include <config.h>
|
---|
75 | #include <align.h>
|
---|
76 | #include <arch/types.h>
|
---|
77 | #include <typedefs.h>
|
---|
78 | #include <syscall/copy.h>
|
---|
79 | #include <arch/interrupt.h>
|
---|
80 |
|
---|
81 | #ifdef CONFIG_VIRT_IDX_DCACHE
|
---|
82 | #include <arch/mm/cache.h>
|
---|
83 | #endif /* CONFIG_VIRT_IDX_DCACHE */
|
---|
84 |
|
---|
85 | /**
|
---|
86 | * Each architecture decides what functions will be used to carry out
|
---|
87 | * address space operations such as creating or locking page tables.
|
---|
88 | */
|
---|
89 | as_operations_t *as_operations = NULL;
|
---|
90 |
|
---|
91 | /**
|
---|
92 | * Slab for as_t objects.
|
---|
93 | */
|
---|
94 | static slab_cache_t *as_slab;
|
---|
95 |
|
---|
96 | /**
|
---|
97 | * This lock protects inactive_as_with_asid_head list. It must be acquired
|
---|
98 | * before as_t mutex.
|
---|
99 | */
|
---|
100 | SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
|
---|
101 |
|
---|
102 | /**
|
---|
103 | * This list contains address spaces that are not active on any
|
---|
104 | * processor and that have valid ASID.
|
---|
105 | */
|
---|
106 | LIST_INITIALIZE(inactive_as_with_asid_head);
|
---|
107 |
|
---|
108 | /** Kernel address space. */
|
---|
109 | as_t *AS_KERNEL = NULL;
|
---|
110 |
|
---|
111 | static int area_flags_to_page_flags(int aflags);
|
---|
112 | static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
|
---|
113 | static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
|
---|
114 | as_area_t *avoid_area);
|
---|
115 | static void sh_info_remove_reference(share_info_t *sh_info);
|
---|
116 |
|
---|
117 | static int as_constructor(void *obj, int flags)
|
---|
118 | {
|
---|
119 | as_t *as = (as_t *) obj;
|
---|
120 | int rc;
|
---|
121 |
|
---|
122 | link_initialize(&as->inactive_as_with_asid_link);
|
---|
123 | mutex_initialize(&as->lock);
|
---|
124 |
|
---|
125 | rc = as_constructor_arch(as, flags);
|
---|
126 |
|
---|
127 | return rc;
|
---|
128 | }
|
---|
129 |
|
---|
130 | static int as_destructor(void *obj)
|
---|
131 | {
|
---|
132 | as_t *as = (as_t *) obj;
|
---|
133 |
|
---|
134 | return as_destructor_arch(as);
|
---|
135 | }
|
---|
136 |
|
---|
137 | /** Initialize address space subsystem. */
|
---|
138 | void as_init(void)
|
---|
139 | {
|
---|
140 | as_arch_init();
|
---|
141 |
|
---|
142 | as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
|
---|
143 | as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
|
---|
144 |
|
---|
145 | AS_KERNEL = as_create(FLAG_AS_KERNEL);
|
---|
146 | if (!AS_KERNEL)
|
---|
147 | panic("can't create kernel address space\n");
|
---|
148 |
|
---|
149 | }
|
---|
150 |
|
---|
151 | /** Create address space.
|
---|
152 | *
|
---|
153 | * @param flags Flags that influence way in wich the address space is created.
|
---|
154 | */
|
---|
155 | as_t *as_create(int flags)
|
---|
156 | {
|
---|
157 | as_t *as;
|
---|
158 |
|
---|
159 | as = (as_t *) slab_alloc(as_slab, 0);
|
---|
160 | (void) as_create_arch(as, 0);
|
---|
161 |
|
---|
162 | btree_create(&as->as_area_btree);
|
---|
163 |
|
---|
164 | if (flags & FLAG_AS_KERNEL)
|
---|
165 | as->asid = ASID_KERNEL;
|
---|
166 | else
|
---|
167 | as->asid = ASID_INVALID;
|
---|
168 |
|
---|
169 | as->refcount = 0;
|
---|
170 | as->cpu_refcount = 0;
|
---|
171 | as->page_table = page_table_create(flags);
|
---|
172 |
|
---|
173 | return as;
|
---|
174 | }
|
---|
175 |
|
---|
176 | /** Destroy adress space.
|
---|
177 | *
|
---|
178 | * When there are no tasks referencing this address space (i.e. its refcount is
|
---|
179 | * zero), the address space can be destroyed.
|
---|
180 | */
|
---|
181 | void as_destroy(as_t *as)
|
---|
182 | {
|
---|
183 | ipl_t ipl;
|
---|
184 | bool cond;
|
---|
185 |
|
---|
186 | ASSERT(as->refcount == 0);
|
---|
187 |
|
---|
188 | /*
|
---|
189 | * Since there is no reference to this area,
|
---|
190 | * it is safe not to lock its mutex.
|
---|
191 | */
|
---|
192 | ipl = interrupts_disable();
|
---|
193 | spinlock_lock(&inactive_as_with_asid_lock);
|
---|
194 | if (as->asid != ASID_INVALID && as != AS_KERNEL) {
|
---|
195 | if (as != AS && as->cpu_refcount == 0)
|
---|
196 | list_remove(&as->inactive_as_with_asid_link);
|
---|
197 | asid_put(as->asid);
|
---|
198 | }
|
---|
199 | spinlock_unlock(&inactive_as_with_asid_lock);
|
---|
200 |
|
---|
201 | /*
|
---|
202 | * Destroy address space areas of the address space.
|
---|
203 | * The B+tree must be walked carefully because it is
|
---|
204 | * also being destroyed.
|
---|
205 | */
|
---|
206 | for (cond = true; cond; ) {
|
---|
207 | btree_node_t *node;
|
---|
208 |
|
---|
209 | ASSERT(!list_empty(&as->as_area_btree.leaf_head));
|
---|
210 | node = list_get_instance(as->as_area_btree.leaf_head.next,
|
---|
211 | btree_node_t, leaf_link);
|
---|
212 |
|
---|
213 | if ((cond = node->keys)) {
|
---|
214 | as_area_destroy(as, node->key[0]);
|
---|
215 | }
|
---|
216 | }
|
---|
217 |
|
---|
218 | btree_destroy(&as->as_area_btree);
|
---|
219 | page_table_destroy(as->page_table);
|
---|
220 |
|
---|
221 | interrupts_restore(ipl);
|
---|
222 |
|
---|
223 | slab_free(as_slab, as);
|
---|
224 | }
|
---|
225 |
|
---|
226 | /** Create address space area of common attributes.
|
---|
227 | *
|
---|
228 | * The created address space area is added to the target address space.
|
---|
229 | *
|
---|
230 | * @param as Target address space.
|
---|
231 | * @param flags Flags of the area memory.
|
---|
232 | * @param size Size of area.
|
---|
233 | * @param base Base address of area.
|
---|
234 | * @param attrs Attributes of the area.
|
---|
235 | * @param backend Address space area backend. NULL if no backend is used.
|
---|
236 | * @param backend_data NULL or a pointer to an array holding two void *.
|
---|
237 | *
|
---|
238 | * @return Address space area on success or NULL on failure.
|
---|
239 | */
|
---|
240 | as_area_t *
|
---|
241 | as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
|
---|
242 | mem_backend_t *backend, mem_backend_data_t *backend_data)
|
---|
243 | {
|
---|
244 | ipl_t ipl;
|
---|
245 | as_area_t *a;
|
---|
246 |
|
---|
247 | if (base % PAGE_SIZE)
|
---|
248 | return NULL;
|
---|
249 |
|
---|
250 | if (!size)
|
---|
251 | return NULL;
|
---|
252 |
|
---|
253 | /* Writeable executable areas are not supported. */
|
---|
254 | if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
|
---|
255 | return NULL;
|
---|
256 |
|
---|
257 | ipl = interrupts_disable();
|
---|
258 | mutex_lock(&as->lock);
|
---|
259 |
|
---|
260 | if (!check_area_conflicts(as, base, size, NULL)) {
|
---|
261 | mutex_unlock(&as->lock);
|
---|
262 | interrupts_restore(ipl);
|
---|
263 | return NULL;
|
---|
264 | }
|
---|
265 |
|
---|
266 | a = (as_area_t *) malloc(sizeof(as_area_t), 0);
|
---|
267 |
|
---|
268 | mutex_initialize(&a->lock);
|
---|
269 |
|
---|
270 | a->as = as;
|
---|
271 | a->flags = flags;
|
---|
272 | a->attributes = attrs;
|
---|
273 | a->pages = SIZE2FRAMES(size);
|
---|
274 | a->base = base;
|
---|
275 | a->sh_info = NULL;
|
---|
276 | a->backend = backend;
|
---|
277 | if (backend_data)
|
---|
278 | a->backend_data = *backend_data;
|
---|
279 | else
|
---|
280 | memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data),
|
---|
281 | 0);
|
---|
282 |
|
---|
283 | btree_create(&a->used_space);
|
---|
284 |
|
---|
285 | btree_insert(&as->as_area_btree, base, (void *) a, NULL);
|
---|
286 |
|
---|
287 | mutex_unlock(&as->lock);
|
---|
288 | interrupts_restore(ipl);
|
---|
289 |
|
---|
290 | return a;
|
---|
291 | }
|
---|
292 |
|
---|
293 | /** Find address space area and change it.
|
---|
294 | *
|
---|
295 | * @param as Address space.
|
---|
296 | * @param address Virtual address belonging to the area to be changed. Must be
|
---|
297 | * page-aligned.
|
---|
298 | * @param size New size of the virtual memory block starting at address.
|
---|
299 | * @param flags Flags influencing the remap operation. Currently unused.
|
---|
300 | *
|
---|
301 | * @return Zero on success or a value from @ref errno.h otherwise.
|
---|
302 | */
|
---|
303 | int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
|
---|
304 | {
|
---|
305 | as_area_t *area;
|
---|
306 | ipl_t ipl;
|
---|
307 | size_t pages;
|
---|
308 |
|
---|
309 | ipl = interrupts_disable();
|
---|
310 | mutex_lock(&as->lock);
|
---|
311 |
|
---|
312 | /*
|
---|
313 | * Locate the area.
|
---|
314 | */
|
---|
315 | area = find_area_and_lock(as, address);
|
---|
316 | if (!area) {
|
---|
317 | mutex_unlock(&as->lock);
|
---|
318 | interrupts_restore(ipl);
|
---|
319 | return ENOENT;
|
---|
320 | }
|
---|
321 |
|
---|
322 | if (area->backend == &phys_backend) {
|
---|
323 | /*
|
---|
324 | * Remapping of address space areas associated
|
---|
325 | * with memory mapped devices is not supported.
|
---|
326 | */
|
---|
327 | mutex_unlock(&area->lock);
|
---|
328 | mutex_unlock(&as->lock);
|
---|
329 | interrupts_restore(ipl);
|
---|
330 | return ENOTSUP;
|
---|
331 | }
|
---|
332 | if (area->sh_info) {
|
---|
333 | /*
|
---|
334 | * Remapping of shared address space areas
|
---|
335 | * is not supported.
|
---|
336 | */
|
---|
337 | mutex_unlock(&area->lock);
|
---|
338 | mutex_unlock(&as->lock);
|
---|
339 | interrupts_restore(ipl);
|
---|
340 | return ENOTSUP;
|
---|
341 | }
|
---|
342 |
|
---|
343 | pages = SIZE2FRAMES((address - area->base) + size);
|
---|
344 | if (!pages) {
|
---|
345 | /*
|
---|
346 | * Zero size address space areas are not allowed.
|
---|
347 | */
|
---|
348 | mutex_unlock(&area->lock);
|
---|
349 | mutex_unlock(&as->lock);
|
---|
350 | interrupts_restore(ipl);
|
---|
351 | return EPERM;
|
---|
352 | }
|
---|
353 |
|
---|
354 | if (pages < area->pages) {
|
---|
355 | bool cond;
|
---|
356 | uintptr_t start_free = area->base + pages*PAGE_SIZE;
|
---|
357 |
|
---|
358 | /*
|
---|
359 | * Shrinking the area.
|
---|
360 | * No need to check for overlaps.
|
---|
361 | */
|
---|
362 |
|
---|
363 | /*
|
---|
364 | * Start TLB shootdown sequence.
|
---|
365 | */
|
---|
366 | tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base +
|
---|
367 | pages * PAGE_SIZE, area->pages - pages);
|
---|
368 |
|
---|
369 | /*
|
---|
370 | * Remove frames belonging to used space starting from
|
---|
371 | * the highest addresses downwards until an overlap with
|
---|
372 | * the resized address space area is found. Note that this
|
---|
373 | * is also the right way to remove part of the used_space
|
---|
374 | * B+tree leaf list.
|
---|
375 | */
|
---|
376 | for (cond = true; cond;) {
|
---|
377 | btree_node_t *node;
|
---|
378 |
|
---|
379 | ASSERT(!list_empty(&area->used_space.leaf_head));
|
---|
380 | node =
|
---|
381 | list_get_instance(area->used_space.leaf_head.prev,
|
---|
382 | btree_node_t, leaf_link);
|
---|
383 | if ((cond = (bool) node->keys)) {
|
---|
384 | uintptr_t b = node->key[node->keys - 1];
|
---|
385 | count_t c =
|
---|
386 | (count_t) node->value[node->keys - 1];
|
---|
387 | int i = 0;
|
---|
388 |
|
---|
389 | if (overlaps(b, c * PAGE_SIZE, area->base,
|
---|
390 | pages*PAGE_SIZE)) {
|
---|
391 |
|
---|
392 | if (b + c * PAGE_SIZE <= start_free) {
|
---|
393 | /*
|
---|
394 | * The whole interval fits
|
---|
395 | * completely in the resized
|
---|
396 | * address space area.
|
---|
397 | */
|
---|
398 | break;
|
---|
399 | }
|
---|
400 |
|
---|
401 | /*
|
---|
402 | * Part of the interval corresponding
|
---|
403 | * to b and c overlaps with the resized
|
---|
404 | * address space area.
|
---|
405 | */
|
---|
406 |
|
---|
407 | cond = false; /* we are almost done */
|
---|
408 | i = (start_free - b) >> PAGE_WIDTH;
|
---|
409 | if (!used_space_remove(area, start_free,
|
---|
410 | c - i))
|
---|
411 | panic("Could not remove used "
|
---|
412 | "space.\n");
|
---|
413 | } else {
|
---|
414 | /*
|
---|
415 | * The interval of used space can be
|
---|
416 | * completely removed.
|
---|
417 | */
|
---|
418 | if (!used_space_remove(area, b, c))
|
---|
419 | panic("Could not remove used "
|
---|
420 | "space.\n");
|
---|
421 | }
|
---|
422 |
|
---|
423 | for (; i < c; i++) {
|
---|
424 | pte_t *pte;
|
---|
425 |
|
---|
426 | page_table_lock(as, false);
|
---|
427 | pte = page_mapping_find(as, b +
|
---|
428 | i * PAGE_SIZE);
|
---|
429 | ASSERT(pte && PTE_VALID(pte) &&
|
---|
430 | PTE_PRESENT(pte));
|
---|
431 | if (area->backend &&
|
---|
432 | area->backend->frame_free) {
|
---|
433 | area->backend->frame_free(area,
|
---|
434 | b + i * PAGE_SIZE,
|
---|
435 | PTE_GET_FRAME(pte));
|
---|
436 | }
|
---|
437 | page_mapping_remove(as, b +
|
---|
438 | i * PAGE_SIZE);
|
---|
439 | page_table_unlock(as, false);
|
---|
440 | }
|
---|
441 | }
|
---|
442 | }
|
---|
443 |
|
---|
444 | /*
|
---|
445 | * Finish TLB shootdown sequence.
|
---|
446 | */
|
---|
447 | tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
|
---|
448 | area->pages - pages);
|
---|
449 | tlb_shootdown_finalize();
|
---|
450 |
|
---|
451 | /*
|
---|
452 | * Invalidate software translation caches (e.g. TSB on sparc64).
|
---|
453 | */
|
---|
454 | as_invalidate_translation_cache(as, area->base +
|
---|
455 | pages * PAGE_SIZE, area->pages - pages);
|
---|
456 | } else {
|
---|
457 | /*
|
---|
458 | * Growing the area.
|
---|
459 | * Check for overlaps with other address space areas.
|
---|
460 | */
|
---|
461 | if (!check_area_conflicts(as, address, pages * PAGE_SIZE,
|
---|
462 | area)) {
|
---|
463 | mutex_unlock(&area->lock);
|
---|
464 | mutex_unlock(&as->lock);
|
---|
465 | interrupts_restore(ipl);
|
---|
466 | return EADDRNOTAVAIL;
|
---|
467 | }
|
---|
468 | }
|
---|
469 |
|
---|
470 | area->pages = pages;
|
---|
471 |
|
---|
472 | mutex_unlock(&area->lock);
|
---|
473 | mutex_unlock(&as->lock);
|
---|
474 | interrupts_restore(ipl);
|
---|
475 |
|
---|
476 | return 0;
|
---|
477 | }
|
---|
478 |
|
---|
479 | /** Destroy address space area.
|
---|
480 | *
|
---|
481 | * @param as Address space.
|
---|
482 | * @param address Address withing the area to be deleted.
|
---|
483 | *
|
---|
484 | * @return Zero on success or a value from @ref errno.h on failure.
|
---|
485 | */
|
---|
486 | int as_area_destroy(as_t *as, uintptr_t address)
|
---|
487 | {
|
---|
488 | as_area_t *area;
|
---|
489 | uintptr_t base;
|
---|
490 | link_t *cur;
|
---|
491 | ipl_t ipl;
|
---|
492 |
|
---|
493 | ipl = interrupts_disable();
|
---|
494 | mutex_lock(&as->lock);
|
---|
495 |
|
---|
496 | area = find_area_and_lock(as, address);
|
---|
497 | if (!area) {
|
---|
498 | mutex_unlock(&as->lock);
|
---|
499 | interrupts_restore(ipl);
|
---|
500 | return ENOENT;
|
---|
501 | }
|
---|
502 |
|
---|
503 | base = area->base;
|
---|
504 |
|
---|
505 | /*
|
---|
506 | * Start TLB shootdown sequence.
|
---|
507 | */
|
---|
508 | tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
|
---|
509 |
|
---|
510 | /*
|
---|
511 | * Visit only the pages mapped by used_space B+tree.
|
---|
512 | */
|
---|
513 | for (cur = area->used_space.leaf_head.next;
|
---|
514 | cur != &area->used_space.leaf_head; cur = cur->next) {
|
---|
515 | btree_node_t *node;
|
---|
516 | int i;
|
---|
517 |
|
---|
518 | node = list_get_instance(cur, btree_node_t, leaf_link);
|
---|
519 | for (i = 0; i < node->keys; i++) {
|
---|
520 | uintptr_t b = node->key[i];
|
---|
521 | count_t j;
|
---|
522 | pte_t *pte;
|
---|
523 |
|
---|
524 | for (j = 0; j < (count_t) node->value[i]; j++) {
|
---|
525 | page_table_lock(as, false);
|
---|
526 | pte = page_mapping_find(as, b + j * PAGE_SIZE);
|
---|
527 | ASSERT(pte && PTE_VALID(pte) &&
|
---|
528 | PTE_PRESENT(pte));
|
---|
529 | if (area->backend &&
|
---|
530 | area->backend->frame_free) {
|
---|
531 | area->backend->frame_free(area, b +
|
---|
532 | j * PAGE_SIZE, PTE_GET_FRAME(pte));
|
---|
533 | }
|
---|
534 | page_mapping_remove(as, b + j * PAGE_SIZE);
|
---|
535 | page_table_unlock(as, false);
|
---|
536 | }
|
---|
537 | }
|
---|
538 | }
|
---|
539 |
|
---|
540 | /*
|
---|
541 | * Finish TLB shootdown sequence.
|
---|
542 | */
|
---|
543 | tlb_invalidate_pages(as->asid, area->base, area->pages);
|
---|
544 | tlb_shootdown_finalize();
|
---|
545 |
|
---|
546 | /*
|
---|
547 | * Invalidate potential software translation caches (e.g. TSB on
|
---|
548 | * sparc64).
|
---|
549 | */
|
---|
550 | as_invalidate_translation_cache(as, area->base, area->pages);
|
---|
551 |
|
---|
552 | btree_destroy(&area->used_space);
|
---|
553 |
|
---|
554 | area->attributes |= AS_AREA_ATTR_PARTIAL;
|
---|
555 |
|
---|
556 | if (area->sh_info)
|
---|
557 | sh_info_remove_reference(area->sh_info);
|
---|
558 |
|
---|
559 | mutex_unlock(&area->lock);
|
---|
560 |
|
---|
561 | /*
|
---|
562 | * Remove the empty area from address space.
|
---|
563 | */
|
---|
564 | btree_remove(&as->as_area_btree, base, NULL);
|
---|
565 |
|
---|
566 | free(area);
|
---|
567 |
|
---|
568 | mutex_unlock(&as->lock);
|
---|
569 | interrupts_restore(ipl);
|
---|
570 | return 0;
|
---|
571 | }
|
---|
572 |
|
---|
573 | /** Share address space area with another or the same address space.
|
---|
574 | *
|
---|
575 | * Address space area mapping is shared with a new address space area.
|
---|
576 | * If the source address space area has not been shared so far,
|
---|
577 | * a new sh_info is created. The new address space area simply gets the
|
---|
578 | * sh_info of the source area. The process of duplicating the
|
---|
579 | * mapping is done through the backend share function.
|
---|
580 | *
|
---|
581 | * @param src_as Pointer to source address space.
|
---|
582 | * @param src_base Base address of the source address space area.
|
---|
583 | * @param acc_size Expected size of the source area.
|
---|
584 | * @param dst_as Pointer to destination address space.
|
---|
585 | * @param dst_base Target base address.
|
---|
586 | * @param dst_flags_mask Destination address space area flags mask.
|
---|
587 | *
|
---|
588 | * @return Zero on success or ENOENT if there is no such task or if there is no
|
---|
589 | * such address space area, EPERM if there was a problem in accepting the area
|
---|
590 | * or ENOMEM if there was a problem in allocating destination address space
|
---|
591 | * area. ENOTSUP is returned if the address space area backend does not support
|
---|
592 | * sharing or if the kernel detects an attempt to create an illegal address
|
---|
593 | * alias.
|
---|
594 | */
|
---|
595 | int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
|
---|
596 | as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
|
---|
597 | {
|
---|
598 | ipl_t ipl;
|
---|
599 | int src_flags;
|
---|
600 | size_t src_size;
|
---|
601 | as_area_t *src_area, *dst_area;
|
---|
602 | share_info_t *sh_info;
|
---|
603 | mem_backend_t *src_backend;
|
---|
604 | mem_backend_data_t src_backend_data;
|
---|
605 |
|
---|
606 | ipl = interrupts_disable();
|
---|
607 | mutex_lock(&src_as->lock);
|
---|
608 | src_area = find_area_and_lock(src_as, src_base);
|
---|
609 | if (!src_area) {
|
---|
610 | /*
|
---|
611 | * Could not find the source address space area.
|
---|
612 | */
|
---|
613 | mutex_unlock(&src_as->lock);
|
---|
614 | interrupts_restore(ipl);
|
---|
615 | return ENOENT;
|
---|
616 | }
|
---|
617 |
|
---|
618 | if (!src_area->backend || !src_area->backend->share) {
|
---|
619 | /*
|
---|
620 | * There is no backend or the backend does not
|
---|
621 | * know how to share the area.
|
---|
622 | */
|
---|
623 | mutex_unlock(&src_area->lock);
|
---|
624 | mutex_unlock(&src_as->lock);
|
---|
625 | interrupts_restore(ipl);
|
---|
626 | return ENOTSUP;
|
---|
627 | }
|
---|
628 |
|
---|
629 | src_size = src_area->pages * PAGE_SIZE;
|
---|
630 | src_flags = src_area->flags;
|
---|
631 | src_backend = src_area->backend;
|
---|
632 | src_backend_data = src_area->backend_data;
|
---|
633 |
|
---|
634 | /* Share the cacheable flag from the original mapping */
|
---|
635 | if (src_flags & AS_AREA_CACHEABLE)
|
---|
636 | dst_flags_mask |= AS_AREA_CACHEABLE;
|
---|
637 |
|
---|
638 | if (src_size != acc_size ||
|
---|
639 | (src_flags & dst_flags_mask) != dst_flags_mask) {
|
---|
640 | mutex_unlock(&src_area->lock);
|
---|
641 | mutex_unlock(&src_as->lock);
|
---|
642 | interrupts_restore(ipl);
|
---|
643 | return EPERM;
|
---|
644 | }
|
---|
645 |
|
---|
646 | #ifdef CONFIG_VIRT_IDX_DCACHE
|
---|
647 | if (!(dst_flags_mask & AS_AREA_EXEC)) {
|
---|
648 | if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) {
|
---|
649 | /*
|
---|
650 | * Refuse to create an illegal address alias.
|
---|
651 | */
|
---|
652 | mutex_unlock(&src_area->lock);
|
---|
653 | mutex_unlock(&src_as->lock);
|
---|
654 | interrupts_restore(ipl);
|
---|
655 | return ENOTSUP;
|
---|
656 | }
|
---|
657 | }
|
---|
658 | #endif /* CONFIG_VIRT_IDX_DCACHE */
|
---|
659 |
|
---|
660 | /*
|
---|
661 | * Now we are committed to sharing the area.
|
---|
662 | * First, prepare the area for sharing.
|
---|
663 | * Then it will be safe to unlock it.
|
---|
664 | */
|
---|
665 | sh_info = src_area->sh_info;
|
---|
666 | if (!sh_info) {
|
---|
667 | sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
|
---|
668 | mutex_initialize(&sh_info->lock);
|
---|
669 | sh_info->refcount = 2;
|
---|
670 | btree_create(&sh_info->pagemap);
|
---|
671 | src_area->sh_info = sh_info;
|
---|
672 | } else {
|
---|
673 | mutex_lock(&sh_info->lock);
|
---|
674 | sh_info->refcount++;
|
---|
675 | mutex_unlock(&sh_info->lock);
|
---|
676 | }
|
---|
677 |
|
---|
678 | src_area->backend->share(src_area);
|
---|
679 |
|
---|
680 | mutex_unlock(&src_area->lock);
|
---|
681 | mutex_unlock(&src_as->lock);
|
---|
682 |
|
---|
683 | /*
|
---|
684 | * Create copy of the source address space area.
|
---|
685 | * The destination area is created with AS_AREA_ATTR_PARTIAL
|
---|
686 | * attribute set which prevents race condition with
|
---|
687 | * preliminary as_page_fault() calls.
|
---|
688 | * The flags of the source area are masked against dst_flags_mask
|
---|
689 | * to support sharing in less privileged mode.
|
---|
690 | */
|
---|
691 | dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
|
---|
692 | AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
|
---|
693 | if (!dst_area) {
|
---|
694 | /*
|
---|
695 | * Destination address space area could not be created.
|
---|
696 | */
|
---|
697 | sh_info_remove_reference(sh_info);
|
---|
698 |
|
---|
699 | interrupts_restore(ipl);
|
---|
700 | return ENOMEM;
|
---|
701 | }
|
---|
702 |
|
---|
703 | /*
|
---|
704 | * Now the destination address space area has been
|
---|
705 | * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
|
---|
706 | * attribute and set the sh_info.
|
---|
707 | */
|
---|
708 | mutex_lock(&dst_as->lock);
|
---|
709 | mutex_lock(&dst_area->lock);
|
---|
710 | dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
|
---|
711 | dst_area->sh_info = sh_info;
|
---|
712 | mutex_unlock(&dst_area->lock);
|
---|
713 | mutex_unlock(&dst_as->lock);
|
---|
714 |
|
---|
715 | interrupts_restore(ipl);
|
---|
716 |
|
---|
717 | return 0;
|
---|
718 | }
|
---|
719 |
|
---|
720 | /** Check access mode for address space area.
|
---|
721 | *
|
---|
722 | * The address space area must be locked prior to this call.
|
---|
723 | *
|
---|
724 | * @param area Address space area.
|
---|
725 | * @param access Access mode.
|
---|
726 | *
|
---|
727 | * @return False if access violates area's permissions, true otherwise.
|
---|
728 | */
|
---|
729 | bool as_area_check_access(as_area_t *area, pf_access_t access)
|
---|
730 | {
|
---|
731 | int flagmap[] = {
|
---|
732 | [PF_ACCESS_READ] = AS_AREA_READ,
|
---|
733 | [PF_ACCESS_WRITE] = AS_AREA_WRITE,
|
---|
734 | [PF_ACCESS_EXEC] = AS_AREA_EXEC
|
---|
735 | };
|
---|
736 |
|
---|
737 | if (!(area->flags & flagmap[access]))
|
---|
738 | return false;
|
---|
739 |
|
---|
740 | return true;
|
---|
741 | }
|
---|
742 |
|
---|
743 | /** Handle page fault within the current address space.
|
---|
744 | *
|
---|
745 | * This is the high-level page fault handler. It decides
|
---|
746 | * whether the page fault can be resolved by any backend
|
---|
747 | * and if so, it invokes the backend to resolve the page
|
---|
748 | * fault.
|
---|
749 | *
|
---|
750 | * Interrupts are assumed disabled.
|
---|
751 | *
|
---|
752 | * @param page Faulting page.
|
---|
753 | * @param access Access mode that caused the fault (i.e. read/write/exec).
|
---|
754 | * @param istate Pointer to interrupted state.
|
---|
755 | *
|
---|
756 | * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
|
---|
757 | * fault was caused by copy_to_uspace() or copy_from_uspace().
|
---|
758 | */
|
---|
759 | int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
|
---|
760 | {
|
---|
761 | pte_t *pte;
|
---|
762 | as_area_t *area;
|
---|
763 |
|
---|
764 | if (!THREAD)
|
---|
765 | return AS_PF_FAULT;
|
---|
766 |
|
---|
767 | ASSERT(AS);
|
---|
768 |
|
---|
769 | mutex_lock(&AS->lock);
|
---|
770 | area = find_area_and_lock(AS, page);
|
---|
771 | if (!area) {
|
---|
772 | /*
|
---|
773 | * No area contained mapping for 'page'.
|
---|
774 | * Signal page fault to low-level handler.
|
---|
775 | */
|
---|
776 | mutex_unlock(&AS->lock);
|
---|
777 | goto page_fault;
|
---|
778 | }
|
---|
779 |
|
---|
780 | if (area->attributes & AS_AREA_ATTR_PARTIAL) {
|
---|
781 | /*
|
---|
782 | * The address space area is not fully initialized.
|
---|
783 | * Avoid possible race by returning error.
|
---|
784 | */
|
---|
785 | mutex_unlock(&area->lock);
|
---|
786 | mutex_unlock(&AS->lock);
|
---|
787 | goto page_fault;
|
---|
788 | }
|
---|
789 |
|
---|
790 | if (!area->backend || !area->backend->page_fault) {
|
---|
791 | /*
|
---|
792 | * The address space area is not backed by any backend
|
---|
793 | * or the backend cannot handle page faults.
|
---|
794 | */
|
---|
795 | mutex_unlock(&area->lock);
|
---|
796 | mutex_unlock(&AS->lock);
|
---|
797 | goto page_fault;
|
---|
798 | }
|
---|
799 |
|
---|
800 | page_table_lock(AS, false);
|
---|
801 |
|
---|
802 | /*
|
---|
803 | * To avoid race condition between two page faults
|
---|
804 | * on the same address, we need to make sure
|
---|
805 | * the mapping has not been already inserted.
|
---|
806 | */
|
---|
807 | if ((pte = page_mapping_find(AS, page))) {
|
---|
808 | if (PTE_PRESENT(pte)) {
|
---|
809 | if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
|
---|
810 | (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
|
---|
811 | (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
|
---|
812 | page_table_unlock(AS, false);
|
---|
813 | mutex_unlock(&area->lock);
|
---|
814 | mutex_unlock(&AS->lock);
|
---|
815 | return AS_PF_OK;
|
---|
816 | }
|
---|
817 | }
|
---|
818 | }
|
---|
819 |
|
---|
820 | /*
|
---|
821 | * Resort to the backend page fault handler.
|
---|
822 | */
|
---|
823 | if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
|
---|
824 | page_table_unlock(AS, false);
|
---|
825 | mutex_unlock(&area->lock);
|
---|
826 | mutex_unlock(&AS->lock);
|
---|
827 | goto page_fault;
|
---|
828 | }
|
---|
829 |
|
---|
830 | page_table_unlock(AS, false);
|
---|
831 | mutex_unlock(&area->lock);
|
---|
832 | mutex_unlock(&AS->lock);
|
---|
833 | return AS_PF_OK;
|
---|
834 |
|
---|
835 | page_fault:
|
---|
836 | if (THREAD->in_copy_from_uspace) {
|
---|
837 | THREAD->in_copy_from_uspace = false;
|
---|
838 | istate_set_retaddr(istate,
|
---|
839 | (uintptr_t) &memcpy_from_uspace_failover_address);
|
---|
840 | } else if (THREAD->in_copy_to_uspace) {
|
---|
841 | THREAD->in_copy_to_uspace = false;
|
---|
842 | istate_set_retaddr(istate,
|
---|
843 | (uintptr_t) &memcpy_to_uspace_failover_address);
|
---|
844 | } else {
|
---|
845 | return AS_PF_FAULT;
|
---|
846 | }
|
---|
847 |
|
---|
848 | return AS_PF_DEFER;
|
---|
849 | }
|
---|
850 |
|
---|
851 | /** Switch address spaces.
|
---|
852 | *
|
---|
853 | * Note that this function cannot sleep as it is essentially a part of
|
---|
854 | * scheduling. Sleeping here would lead to deadlock on wakeup.
|
---|
855 | *
|
---|
856 | * @param old Old address space or NULL.
|
---|
857 | * @param new New address space.
|
---|
858 | */
|
---|
859 | void as_switch(as_t *old, as_t *new)
|
---|
860 | {
|
---|
861 | ipl_t ipl;
|
---|
862 | bool needs_asid = false;
|
---|
863 |
|
---|
864 | ipl = interrupts_disable();
|
---|
865 | spinlock_lock(&inactive_as_with_asid_lock);
|
---|
866 |
|
---|
867 | /*
|
---|
868 | * First, take care of the old address space.
|
---|
869 | */
|
---|
870 | if (old) {
|
---|
871 | mutex_lock_active(&old->lock);
|
---|
872 | ASSERT(old->cpu_refcount);
|
---|
873 | if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) {
|
---|
874 | /*
|
---|
875 | * The old address space is no longer active on
|
---|
876 | * any processor. It can be appended to the
|
---|
877 | * list of inactive address spaces with assigned
|
---|
878 | * ASID.
|
---|
879 | */
|
---|
880 | ASSERT(old->asid != ASID_INVALID);
|
---|
881 | list_append(&old->inactive_as_with_asid_link,
|
---|
882 | &inactive_as_with_asid_head);
|
---|
883 | }
|
---|
884 | mutex_unlock(&old->lock);
|
---|
885 |
|
---|
886 | /*
|
---|
887 | * Perform architecture-specific tasks when the address space
|
---|
888 | * is being removed from the CPU.
|
---|
889 | */
|
---|
890 | as_deinstall_arch(old);
|
---|
891 | }
|
---|
892 |
|
---|
893 | /*
|
---|
894 | * Second, prepare the new address space.
|
---|
895 | */
|
---|
896 | mutex_lock_active(&new->lock);
|
---|
897 | if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) {
|
---|
898 | if (new->asid != ASID_INVALID) {
|
---|
899 | list_remove(&new->inactive_as_with_asid_link);
|
---|
900 | } else {
|
---|
901 | /*
|
---|
902 | * Defer call to asid_get() until new->lock is released.
|
---|
903 | */
|
---|
904 | needs_asid = true;
|
---|
905 | }
|
---|
906 | }
|
---|
907 | SET_PTL0_ADDRESS(new->page_table);
|
---|
908 | mutex_unlock(&new->lock);
|
---|
909 |
|
---|
910 | if (needs_asid) {
|
---|
911 | /*
|
---|
912 | * Allocation of new ASID was deferred
|
---|
913 | * until now in order to avoid deadlock.
|
---|
914 | */
|
---|
915 | asid_t asid;
|
---|
916 |
|
---|
917 | asid = asid_get();
|
---|
918 | mutex_lock_active(&new->lock);
|
---|
919 | new->asid = asid;
|
---|
920 | mutex_unlock(&new->lock);
|
---|
921 | }
|
---|
922 | spinlock_unlock(&inactive_as_with_asid_lock);
|
---|
923 | interrupts_restore(ipl);
|
---|
924 |
|
---|
925 | /*
|
---|
926 | * Perform architecture-specific steps.
|
---|
927 | * (e.g. write ASID to hardware register etc.)
|
---|
928 | */
|
---|
929 | as_install_arch(new);
|
---|
930 |
|
---|
931 | AS = new;
|
---|
932 | }
|
---|
933 |
|
---|
934 | /** Convert address space area flags to page flags.
|
---|
935 | *
|
---|
936 | * @param aflags Flags of some address space area.
|
---|
937 | *
|
---|
938 | * @return Flags to be passed to page_mapping_insert().
|
---|
939 | */
|
---|
940 | int area_flags_to_page_flags(int aflags)
|
---|
941 | {
|
---|
942 | int flags;
|
---|
943 |
|
---|
944 | flags = PAGE_USER | PAGE_PRESENT;
|
---|
945 |
|
---|
946 | if (aflags & AS_AREA_READ)
|
---|
947 | flags |= PAGE_READ;
|
---|
948 |
|
---|
949 | if (aflags & AS_AREA_WRITE)
|
---|
950 | flags |= PAGE_WRITE;
|
---|
951 |
|
---|
952 | if (aflags & AS_AREA_EXEC)
|
---|
953 | flags |= PAGE_EXEC;
|
---|
954 |
|
---|
955 | if (aflags & AS_AREA_CACHEABLE)
|
---|
956 | flags |= PAGE_CACHEABLE;
|
---|
957 |
|
---|
958 | return flags;
|
---|
959 | }
|
---|
960 |
|
---|
961 | /** Compute flags for virtual address translation subsytem.
|
---|
962 | *
|
---|
963 | * The address space area must be locked.
|
---|
964 | * Interrupts must be disabled.
|
---|
965 | *
|
---|
966 | * @param a Address space area.
|
---|
967 | *
|
---|
968 | * @return Flags to be used in page_mapping_insert().
|
---|
969 | */
|
---|
970 | int as_area_get_flags(as_area_t *a)
|
---|
971 | {
|
---|
972 | return area_flags_to_page_flags(a->flags);
|
---|
973 | }
|
---|
974 |
|
---|
975 | /** Create page table.
|
---|
976 | *
|
---|
977 | * Depending on architecture, create either address space
|
---|
978 | * private or global page table.
|
---|
979 | *
|
---|
980 | * @param flags Flags saying whether the page table is for kernel address space.
|
---|
981 | *
|
---|
982 | * @return First entry of the page table.
|
---|
983 | */
|
---|
984 | pte_t *page_table_create(int flags)
|
---|
985 | {
|
---|
986 | ASSERT(as_operations);
|
---|
987 | ASSERT(as_operations->page_table_create);
|
---|
988 |
|
---|
989 | return as_operations->page_table_create(flags);
|
---|
990 | }
|
---|
991 |
|
---|
992 | /** Destroy page table.
|
---|
993 | *
|
---|
994 | * Destroy page table in architecture specific way.
|
---|
995 | *
|
---|
996 | * @param page_table Physical address of PTL0.
|
---|
997 | */
|
---|
998 | void page_table_destroy(pte_t *page_table)
|
---|
999 | {
|
---|
1000 | ASSERT(as_operations);
|
---|
1001 | ASSERT(as_operations->page_table_destroy);
|
---|
1002 |
|
---|
1003 | as_operations->page_table_destroy(page_table);
|
---|
1004 | }
|
---|
1005 |
|
---|
1006 | /** Lock page table.
|
---|
1007 | *
|
---|
1008 | * This function should be called before any page_mapping_insert(),
|
---|
1009 | * page_mapping_remove() and page_mapping_find().
|
---|
1010 | *
|
---|
1011 | * Locking order is such that address space areas must be locked
|
---|
1012 | * prior to this call. Address space can be locked prior to this
|
---|
1013 | * call in which case the lock argument is false.
|
---|
1014 | *
|
---|
1015 | * @param as Address space.
|
---|
1016 | * @param lock If false, do not attempt to lock as->lock.
|
---|
1017 | */
|
---|
1018 | void page_table_lock(as_t *as, bool lock)
|
---|
1019 | {
|
---|
1020 | ASSERT(as_operations);
|
---|
1021 | ASSERT(as_operations->page_table_lock);
|
---|
1022 |
|
---|
1023 | as_operations->page_table_lock(as, lock);
|
---|
1024 | }
|
---|
1025 |
|
---|
1026 | /** Unlock page table.
|
---|
1027 | *
|
---|
1028 | * @param as Address space.
|
---|
1029 | * @param unlock If false, do not attempt to unlock as->lock.
|
---|
1030 | */
|
---|
1031 | void page_table_unlock(as_t *as, bool unlock)
|
---|
1032 | {
|
---|
1033 | ASSERT(as_operations);
|
---|
1034 | ASSERT(as_operations->page_table_unlock);
|
---|
1035 |
|
---|
1036 | as_operations->page_table_unlock(as, unlock);
|
---|
1037 | }
|
---|
1038 |
|
---|
1039 |
|
---|
1040 | /** Find address space area and lock it.
|
---|
1041 | *
|
---|
1042 | * The address space must be locked and interrupts must be disabled.
|
---|
1043 | *
|
---|
1044 | * @param as Address space.
|
---|
1045 | * @param va Virtual address.
|
---|
1046 | *
|
---|
1047 | * @return Locked address space area containing va on success or NULL on
|
---|
1048 | * failure.
|
---|
1049 | */
|
---|
1050 | as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
|
---|
1051 | {
|
---|
1052 | as_area_t *a;
|
---|
1053 | btree_node_t *leaf, *lnode;
|
---|
1054 | int i;
|
---|
1055 |
|
---|
1056 | a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
|
---|
1057 | if (a) {
|
---|
1058 | /* va is the base address of an address space area */
|
---|
1059 | mutex_lock(&a->lock);
|
---|
1060 | return a;
|
---|
1061 | }
|
---|
1062 |
|
---|
1063 | /*
|
---|
1064 | * Search the leaf node and the righmost record of its left neighbour
|
---|
1065 | * to find out whether this is a miss or va belongs to an address
|
---|
1066 | * space area found there.
|
---|
1067 | */
|
---|
1068 |
|
---|
1069 | /* First, search the leaf node itself. */
|
---|
1070 | for (i = 0; i < leaf->keys; i++) {
|
---|
1071 | a = (as_area_t *) leaf->value[i];
|
---|
1072 | mutex_lock(&a->lock);
|
---|
1073 | if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
|
---|
1074 | return a;
|
---|
1075 | }
|
---|
1076 | mutex_unlock(&a->lock);
|
---|
1077 | }
|
---|
1078 |
|
---|
1079 | /*
|
---|
1080 | * Second, locate the left neighbour and test its last record.
|
---|
1081 | * Because of its position in the B+tree, it must have base < va.
|
---|
1082 | */
|
---|
1083 | lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
|
---|
1084 | if (lnode) {
|
---|
1085 | a = (as_area_t *) lnode->value[lnode->keys - 1];
|
---|
1086 | mutex_lock(&a->lock);
|
---|
1087 | if (va < a->base + a->pages * PAGE_SIZE) {
|
---|
1088 | return a;
|
---|
1089 | }
|
---|
1090 | mutex_unlock(&a->lock);
|
---|
1091 | }
|
---|
1092 |
|
---|
1093 | return NULL;
|
---|
1094 | }
|
---|
1095 |
|
---|
1096 | /** Check area conflicts with other areas.
|
---|
1097 | *
|
---|
1098 | * The address space must be locked and interrupts must be disabled.
|
---|
1099 | *
|
---|
1100 | * @param as Address space.
|
---|
1101 | * @param va Starting virtual address of the area being tested.
|
---|
1102 | * @param size Size of the area being tested.
|
---|
1103 | * @param avoid_area Do not touch this area.
|
---|
1104 | *
|
---|
1105 | * @return True if there is no conflict, false otherwise.
|
---|
1106 | */
|
---|
1107 | bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
|
---|
1108 | as_area_t *avoid_area)
|
---|
1109 | {
|
---|
1110 | as_area_t *a;
|
---|
1111 | btree_node_t *leaf, *node;
|
---|
1112 | int i;
|
---|
1113 |
|
---|
1114 | /*
|
---|
1115 | * We don't want any area to have conflicts with NULL page.
|
---|
1116 | */
|
---|
1117 | if (overlaps(va, size, NULL, PAGE_SIZE))
|
---|
1118 | return false;
|
---|
1119 |
|
---|
1120 | /*
|
---|
1121 | * The leaf node is found in O(log n), where n is proportional to
|
---|
1122 | * the number of address space areas belonging to as.
|
---|
1123 | * The check for conflicts is then attempted on the rightmost
|
---|
1124 | * record in the left neighbour, the leftmost record in the right
|
---|
1125 | * neighbour and all records in the leaf node itself.
|
---|
1126 | */
|
---|
1127 |
|
---|
1128 | if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
|
---|
1129 | if (a != avoid_area)
|
---|
1130 | return false;
|
---|
1131 | }
|
---|
1132 |
|
---|
1133 | /* First, check the two border cases. */
|
---|
1134 | if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
|
---|
1135 | a = (as_area_t *) node->value[node->keys - 1];
|
---|
1136 | mutex_lock(&a->lock);
|
---|
1137 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
|
---|
1138 | mutex_unlock(&a->lock);
|
---|
1139 | return false;
|
---|
1140 | }
|
---|
1141 | mutex_unlock(&a->lock);
|
---|
1142 | }
|
---|
1143 | node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
|
---|
1144 | if (node) {
|
---|
1145 | a = (as_area_t *) node->value[0];
|
---|
1146 | mutex_lock(&a->lock);
|
---|
1147 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
|
---|
1148 | mutex_unlock(&a->lock);
|
---|
1149 | return false;
|
---|
1150 | }
|
---|
1151 | mutex_unlock(&a->lock);
|
---|
1152 | }
|
---|
1153 |
|
---|
1154 | /* Second, check the leaf node. */
|
---|
1155 | for (i = 0; i < leaf->keys; i++) {
|
---|
1156 | a = (as_area_t *) leaf->value[i];
|
---|
1157 |
|
---|
1158 | if (a == avoid_area)
|
---|
1159 | continue;
|
---|
1160 |
|
---|
1161 | mutex_lock(&a->lock);
|
---|
1162 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
|
---|
1163 | mutex_unlock(&a->lock);
|
---|
1164 | return false;
|
---|
1165 | }
|
---|
1166 | mutex_unlock(&a->lock);
|
---|
1167 | }
|
---|
1168 |
|
---|
1169 | /*
|
---|
1170 | * So far, the area does not conflict with other areas.
|
---|
1171 | * Check if it doesn't conflict with kernel address space.
|
---|
1172 | */
|
---|
1173 | if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
|
---|
1174 | return !overlaps(va, size,
|
---|
1175 | KERNEL_ADDRESS_SPACE_START,
|
---|
1176 | KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);
|
---|
1177 | }
|
---|
1178 |
|
---|
1179 | return true;
|
---|
1180 | }
|
---|
1181 |
|
---|
1182 | /** Return size of the address space area with given base. */
|
---|
1183 | size_t as_get_size(uintptr_t base)
|
---|
1184 | {
|
---|
1185 | ipl_t ipl;
|
---|
1186 | as_area_t *src_area;
|
---|
1187 | size_t size;
|
---|
1188 |
|
---|
1189 | ipl = interrupts_disable();
|
---|
1190 | src_area = find_area_and_lock(AS, base);
|
---|
1191 | if (src_area){
|
---|
1192 | size = src_area->pages * PAGE_SIZE;
|
---|
1193 | mutex_unlock(&src_area->lock);
|
---|
1194 | } else {
|
---|
1195 | size = 0;
|
---|
1196 | }
|
---|
1197 | interrupts_restore(ipl);
|
---|
1198 | return size;
|
---|
1199 | }
|
---|
1200 |
|
---|
1201 | /** Mark portion of address space area as used.
|
---|
1202 | *
|
---|
1203 | * The address space area must be already locked.
|
---|
1204 | *
|
---|
1205 | * @param a Address space area.
|
---|
1206 | * @param page First page to be marked.
|
---|
1207 | * @param count Number of page to be marked.
|
---|
1208 | *
|
---|
1209 | * @return 0 on failure and 1 on success.
|
---|
1210 | */
|
---|
1211 | int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
|
---|
1212 | {
|
---|
1213 | btree_node_t *leaf, *node;
|
---|
1214 | count_t pages;
|
---|
1215 | int i;
|
---|
1216 |
|
---|
1217 | ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
|
---|
1218 | ASSERT(count);
|
---|
1219 |
|
---|
1220 | pages = (count_t) btree_search(&a->used_space, page, &leaf);
|
---|
1221 | if (pages) {
|
---|
1222 | /*
|
---|
1223 | * We hit the beginning of some used space.
|
---|
1224 | */
|
---|
1225 | return 0;
|
---|
1226 | }
|
---|
1227 |
|
---|
1228 | if (!leaf->keys) {
|
---|
1229 | btree_insert(&a->used_space, page, (void *) count, leaf);
|
---|
1230 | return 1;
|
---|
1231 | }
|
---|
1232 |
|
---|
1233 | node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
|
---|
1234 | if (node) {
|
---|
1235 | uintptr_t left_pg = node->key[node->keys - 1];
|
---|
1236 | uintptr_t right_pg = leaf->key[0];
|
---|
1237 | count_t left_cnt = (count_t) node->value[node->keys - 1];
|
---|
1238 | count_t right_cnt = (count_t) leaf->value[0];
|
---|
1239 |
|
---|
1240 | /*
|
---|
1241 | * Examine the possibility that the interval fits
|
---|
1242 | * somewhere between the rightmost interval of
|
---|
1243 | * the left neigbour and the first interval of the leaf.
|
---|
1244 | */
|
---|
1245 |
|
---|
1246 | if (page >= right_pg) {
|
---|
1247 | /* Do nothing. */
|
---|
1248 | } else if (overlaps(page, count * PAGE_SIZE, left_pg,
|
---|
1249 | left_cnt * PAGE_SIZE)) {
|
---|
1250 | /* The interval intersects with the left interval. */
|
---|
1251 | return 0;
|
---|
1252 | } else if (overlaps(page, count * PAGE_SIZE, right_pg,
|
---|
1253 | right_cnt * PAGE_SIZE)) {
|
---|
1254 | /* The interval intersects with the right interval. */
|
---|
1255 | return 0;
|
---|
1256 | } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
|
---|
1257 | (page + count * PAGE_SIZE == right_pg)) {
|
---|
1258 | /*
|
---|
1259 | * The interval can be added by merging the two already
|
---|
1260 | * present intervals.
|
---|
1261 | */
|
---|
1262 | node->value[node->keys - 1] += count + right_cnt;
|
---|
1263 | btree_remove(&a->used_space, right_pg, leaf);
|
---|
1264 | return 1;
|
---|
1265 | } else if (page == left_pg + left_cnt * PAGE_SIZE) {
|
---|
1266 | /*
|
---|
1267 | * The interval can be added by simply growing the left
|
---|
1268 | * interval.
|
---|
1269 | */
|
---|
1270 | node->value[node->keys - 1] += count;
|
---|
1271 | return 1;
|
---|
1272 | } else if (page + count * PAGE_SIZE == right_pg) {
|
---|
1273 | /*
|
---|
1274 | * The interval can be addded by simply moving base of
|
---|
1275 | * the right interval down and increasing its size
|
---|
1276 | * accordingly.
|
---|
1277 | */
|
---|
1278 | leaf->value[0] += count;
|
---|
1279 | leaf->key[0] = page;
|
---|
1280 | return 1;
|
---|
1281 | } else {
|
---|
1282 | /*
|
---|
1283 | * The interval is between both neigbouring intervals,
|
---|
1284 | * but cannot be merged with any of them.
|
---|
1285 | */
|
---|
1286 | btree_insert(&a->used_space, page, (void *) count,
|
---|
1287 | leaf);
|
---|
1288 | return 1;
|
---|
1289 | }
|
---|
1290 | } else if (page < leaf->key[0]) {
|
---|
1291 | uintptr_t right_pg = leaf->key[0];
|
---|
1292 | count_t right_cnt = (count_t) leaf->value[0];
|
---|
1293 |
|
---|
1294 | /*
|
---|
1295 | * Investigate the border case in which the left neighbour does
|
---|
1296 | * not exist but the interval fits from the left.
|
---|
1297 | */
|
---|
1298 |
|
---|
1299 | if (overlaps(page, count * PAGE_SIZE, right_pg,
|
---|
1300 | right_cnt * PAGE_SIZE)) {
|
---|
1301 | /* The interval intersects with the right interval. */
|
---|
1302 | return 0;
|
---|
1303 | } else if (page + count * PAGE_SIZE == right_pg) {
|
---|
1304 | /*
|
---|
1305 | * The interval can be added by moving the base of the
|
---|
1306 | * right interval down and increasing its size
|
---|
1307 | * accordingly.
|
---|
1308 | */
|
---|
1309 | leaf->key[0] = page;
|
---|
1310 | leaf->value[0] += count;
|
---|
1311 | return 1;
|
---|
1312 | } else {
|
---|
1313 | /*
|
---|
1314 | * The interval doesn't adjoin with the right interval.
|
---|
1315 | * It must be added individually.
|
---|
1316 | */
|
---|
1317 | btree_insert(&a->used_space, page, (void *) count,
|
---|
1318 | leaf);
|
---|
1319 | return 1;
|
---|
1320 | }
|
---|
1321 | }
|
---|
1322 |
|
---|
1323 | node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
|
---|
1324 | if (node) {
|
---|
1325 | uintptr_t left_pg = leaf->key[leaf->keys - 1];
|
---|
1326 | uintptr_t right_pg = node->key[0];
|
---|
1327 | count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
|
---|
1328 | count_t right_cnt = (count_t) node->value[0];
|
---|
1329 |
|
---|
1330 | /*
|
---|
1331 | * Examine the possibility that the interval fits
|
---|
1332 | * somewhere between the leftmost interval of
|
---|
1333 | * the right neigbour and the last interval of the leaf.
|
---|
1334 | */
|
---|
1335 |
|
---|
1336 | if (page < left_pg) {
|
---|
1337 | /* Do nothing. */
|
---|
1338 | } else if (overlaps(page, count * PAGE_SIZE, left_pg,
|
---|
1339 | left_cnt * PAGE_SIZE)) {
|
---|
1340 | /* The interval intersects with the left interval. */
|
---|
1341 | return 0;
|
---|
1342 | } else if (overlaps(page, count * PAGE_SIZE, right_pg,
|
---|
1343 | right_cnt * PAGE_SIZE)) {
|
---|
1344 | /* The interval intersects with the right interval. */
|
---|
1345 | return 0;
|
---|
1346 | } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
|
---|
1347 | (page + count * PAGE_SIZE == right_pg)) {
|
---|
1348 | /*
|
---|
1349 | * The interval can be added by merging the two already
|
---|
1350 | * present intervals.
|
---|
1351 | * */
|
---|
1352 | leaf->value[leaf->keys - 1] += count + right_cnt;
|
---|
1353 | btree_remove(&a->used_space, right_pg, node);
|
---|
1354 | return 1;
|
---|
1355 | } else if (page == left_pg + left_cnt * PAGE_SIZE) {
|
---|
1356 | /*
|
---|
1357 | * The interval can be added by simply growing the left
|
---|
1358 | * interval.
|
---|
1359 | * */
|
---|
1360 | leaf->value[leaf->keys - 1] += count;
|
---|
1361 | return 1;
|
---|
1362 | } else if (page + count * PAGE_SIZE == right_pg) {
|
---|
1363 | /*
|
---|
1364 | * The interval can be addded by simply moving base of
|
---|
1365 | * the right interval down and increasing its size
|
---|
1366 | * accordingly.
|
---|
1367 | */
|
---|
1368 | node->value[0] += count;
|
---|
1369 | node->key[0] = page;
|
---|
1370 | return 1;
|
---|
1371 | } else {
|
---|
1372 | /*
|
---|
1373 | * The interval is between both neigbouring intervals,
|
---|
1374 | * but cannot be merged with any of them.
|
---|
1375 | */
|
---|
1376 | btree_insert(&a->used_space, page, (void *) count,
|
---|
1377 | leaf);
|
---|
1378 | return 1;
|
---|
1379 | }
|
---|
1380 | } else if (page >= leaf->key[leaf->keys - 1]) {
|
---|
1381 | uintptr_t left_pg = leaf->key[leaf->keys - 1];
|
---|
1382 | count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
|
---|
1383 |
|
---|
1384 | /*
|
---|
1385 | * Investigate the border case in which the right neighbour
|
---|
1386 | * does not exist but the interval fits from the right.
|
---|
1387 | */
|
---|
1388 |
|
---|
1389 | if (overlaps(page, count * PAGE_SIZE, left_pg,
|
---|
1390 | left_cnt * PAGE_SIZE)) {
|
---|
1391 | /* The interval intersects with the left interval. */
|
---|
1392 | return 0;
|
---|
1393 | } else if (left_pg + left_cnt * PAGE_SIZE == page) {
|
---|
1394 | /*
|
---|
1395 | * The interval can be added by growing the left
|
---|
1396 | * interval.
|
---|
1397 | */
|
---|
1398 | leaf->value[leaf->keys - 1] += count;
|
---|
1399 | return 1;
|
---|
1400 | } else {
|
---|
1401 | /*
|
---|
1402 | * The interval doesn't adjoin with the left interval.
|
---|
1403 | * It must be added individually.
|
---|
1404 | */
|
---|
1405 | btree_insert(&a->used_space, page, (void *) count,
|
---|
1406 | leaf);
|
---|
1407 | return 1;
|
---|
1408 | }
|
---|
1409 | }
|
---|
1410 |
|
---|
1411 | /*
|
---|
1412 | * Note that if the algorithm made it thus far, the interval can fit
|
---|
1413 | * only between two other intervals of the leaf. The two border cases
|
---|
1414 | * were already resolved.
|
---|
1415 | */
|
---|
1416 | for (i = 1; i < leaf->keys; i++) {
|
---|
1417 | if (page < leaf->key[i]) {
|
---|
1418 | uintptr_t left_pg = leaf->key[i - 1];
|
---|
1419 | uintptr_t right_pg = leaf->key[i];
|
---|
1420 | count_t left_cnt = (count_t) leaf->value[i - 1];
|
---|
1421 | count_t right_cnt = (count_t) leaf->value[i];
|
---|
1422 |
|
---|
1423 | /*
|
---|
1424 | * The interval fits between left_pg and right_pg.
|
---|
1425 | */
|
---|
1426 |
|
---|
1427 | if (overlaps(page, count * PAGE_SIZE, left_pg,
|
---|
1428 | left_cnt * PAGE_SIZE)) {
|
---|
1429 | /*
|
---|
1430 | * The interval intersects with the left
|
---|
1431 | * interval.
|
---|
1432 | */
|
---|
1433 | return 0;
|
---|
1434 | } else if (overlaps(page, count * PAGE_SIZE, right_pg,
|
---|
1435 | right_cnt * PAGE_SIZE)) {
|
---|
1436 | /*
|
---|
1437 | * The interval intersects with the right
|
---|
1438 | * interval.
|
---|
1439 | */
|
---|
1440 | return 0;
|
---|
1441 | } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
|
---|
1442 | (page + count * PAGE_SIZE == right_pg)) {
|
---|
1443 | /*
|
---|
1444 | * The interval can be added by merging the two
|
---|
1445 | * already present intervals.
|
---|
1446 | */
|
---|
1447 | leaf->value[i - 1] += count + right_cnt;
|
---|
1448 | btree_remove(&a->used_space, right_pg, leaf);
|
---|
1449 | return 1;
|
---|
1450 | } else if (page == left_pg + left_cnt * PAGE_SIZE) {
|
---|
1451 | /*
|
---|
1452 | * The interval can be added by simply growing
|
---|
1453 | * the left interval.
|
---|
1454 | */
|
---|
1455 | leaf->value[i - 1] += count;
|
---|
1456 | return 1;
|
---|
1457 | } else if (page + count * PAGE_SIZE == right_pg) {
|
---|
1458 | /*
|
---|
1459 | * The interval can be addded by simply moving
|
---|
1460 | * base of the right interval down and
|
---|
1461 | * increasing its size accordingly.
|
---|
1462 | */
|
---|
1463 | leaf->value[i] += count;
|
---|
1464 | leaf->key[i] = page;
|
---|
1465 | return 1;
|
---|
1466 | } else {
|
---|
1467 | /*
|
---|
1468 | * The interval is between both neigbouring
|
---|
1469 | * intervals, but cannot be merged with any of
|
---|
1470 | * them.
|
---|
1471 | */
|
---|
1472 | btree_insert(&a->used_space, page,
|
---|
1473 | (void *) count, leaf);
|
---|
1474 | return 1;
|
---|
1475 | }
|
---|
1476 | }
|
---|
1477 | }
|
---|
1478 |
|
---|
1479 | panic("Inconsistency detected while adding %d pages of used space at "
|
---|
1480 | "%p.\n", count, page);
|
---|
1481 | }
|
---|
1482 |
|
---|
1483 | /** Mark portion of address space area as unused.
|
---|
1484 | *
|
---|
1485 | * The address space area must be already locked.
|
---|
1486 | *
|
---|
1487 | * @param a Address space area.
|
---|
1488 | * @param page First page to be marked.
|
---|
1489 | * @param count Number of page to be marked.
|
---|
1490 | *
|
---|
1491 | * @return 0 on failure and 1 on success.
|
---|
1492 | */
|
---|
1493 | int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
|
---|
1494 | {
|
---|
1495 | btree_node_t *leaf, *node;
|
---|
1496 | count_t pages;
|
---|
1497 | int i;
|
---|
1498 |
|
---|
1499 | ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
|
---|
1500 | ASSERT(count);
|
---|
1501 |
|
---|
1502 | pages = (count_t) btree_search(&a->used_space, page, &leaf);
|
---|
1503 | if (pages) {
|
---|
1504 | /*
|
---|
1505 | * We are lucky, page is the beginning of some interval.
|
---|
1506 | */
|
---|
1507 | if (count > pages) {
|
---|
1508 | return 0;
|
---|
1509 | } else if (count == pages) {
|
---|
1510 | btree_remove(&a->used_space, page, leaf);
|
---|
1511 | return 1;
|
---|
1512 | } else {
|
---|
1513 | /*
|
---|
1514 | * Find the respective interval.
|
---|
1515 | * Decrease its size and relocate its start address.
|
---|
1516 | */
|
---|
1517 | for (i = 0; i < leaf->keys; i++) {
|
---|
1518 | if (leaf->key[i] == page) {
|
---|
1519 | leaf->key[i] += count * PAGE_SIZE;
|
---|
1520 | leaf->value[i] -= count;
|
---|
1521 | return 1;
|
---|
1522 | }
|
---|
1523 | }
|
---|
1524 | goto error;
|
---|
1525 | }
|
---|
1526 | }
|
---|
1527 |
|
---|
1528 | node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
|
---|
1529 | if (node && page < leaf->key[0]) {
|
---|
1530 | uintptr_t left_pg = node->key[node->keys - 1];
|
---|
1531 | count_t left_cnt = (count_t) node->value[node->keys - 1];
|
---|
1532 |
|
---|
1533 | if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
|
---|
1534 | count * PAGE_SIZE)) {
|
---|
1535 | if (page + count * PAGE_SIZE ==
|
---|
1536 | left_pg + left_cnt * PAGE_SIZE) {
|
---|
1537 | /*
|
---|
1538 | * The interval is contained in the rightmost
|
---|
1539 | * interval of the left neighbour and can be
|
---|
1540 | * removed by updating the size of the bigger
|
---|
1541 | * interval.
|
---|
1542 | */
|
---|
1543 | node->value[node->keys - 1] -= count;
|
---|
1544 | return 1;
|
---|
1545 | } else if (page + count * PAGE_SIZE <
|
---|
1546 | left_pg + left_cnt*PAGE_SIZE) {
|
---|
1547 | count_t new_cnt;
|
---|
1548 |
|
---|
1549 | /*
|
---|
1550 | * The interval is contained in the rightmost
|
---|
1551 | * interval of the left neighbour but its
|
---|
1552 | * removal requires both updating the size of
|
---|
1553 | * the original interval and also inserting a
|
---|
1554 | * new interval.
|
---|
1555 | */
|
---|
1556 | new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
|
---|
1557 | (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
|
---|
1558 | node->value[node->keys - 1] -= count + new_cnt;
|
---|
1559 | btree_insert(&a->used_space, page +
|
---|
1560 | count * PAGE_SIZE, (void *) new_cnt, leaf);
|
---|
1561 | return 1;
|
---|
1562 | }
|
---|
1563 | }
|
---|
1564 | return 0;
|
---|
1565 | } else if (page < leaf->key[0]) {
|
---|
1566 | return 0;
|
---|
1567 | }
|
---|
1568 |
|
---|
1569 | if (page > leaf->key[leaf->keys - 1]) {
|
---|
1570 | uintptr_t left_pg = leaf->key[leaf->keys - 1];
|
---|
1571 | count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
|
---|
1572 |
|
---|
1573 | if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
|
---|
1574 | count * PAGE_SIZE)) {
|
---|
1575 | if (page + count * PAGE_SIZE ==
|
---|
1576 | left_pg + left_cnt * PAGE_SIZE) {
|
---|
1577 | /*
|
---|
1578 | * The interval is contained in the rightmost
|
---|
1579 | * interval of the leaf and can be removed by
|
---|
1580 | * updating the size of the bigger interval.
|
---|
1581 | */
|
---|
1582 | leaf->value[leaf->keys - 1] -= count;
|
---|
1583 | return 1;
|
---|
1584 | } else if (page + count * PAGE_SIZE < left_pg +
|
---|
1585 | left_cnt * PAGE_SIZE) {
|
---|
1586 | count_t new_cnt;
|
---|
1587 |
|
---|
1588 | /*
|
---|
1589 | * The interval is contained in the rightmost
|
---|
1590 | * interval of the leaf but its removal
|
---|
1591 | * requires both updating the size of the
|
---|
1592 | * original interval and also inserting a new
|
---|
1593 | * interval.
|
---|
1594 | */
|
---|
1595 | new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
|
---|
1596 | (page + count * PAGE_SIZE)) >> PAGE_WIDTH;
|
---|
1597 | leaf->value[leaf->keys - 1] -= count + new_cnt;
|
---|
1598 | btree_insert(&a->used_space, page +
|
---|
1599 | count * PAGE_SIZE, (void *) new_cnt, leaf);
|
---|
1600 | return 1;
|
---|
1601 | }
|
---|
1602 | }
|
---|
1603 | return 0;
|
---|
1604 | }
|
---|
1605 |
|
---|
1606 | /*
|
---|
1607 | * The border cases have been already resolved.
|
---|
1608 | * Now the interval can be only between intervals of the leaf.
|
---|
1609 | */
|
---|
1610 | for (i = 1; i < leaf->keys - 1; i++) {
|
---|
1611 | if (page < leaf->key[i]) {
|
---|
1612 | uintptr_t left_pg = leaf->key[i - 1];
|
---|
1613 | count_t left_cnt = (count_t) leaf->value[i - 1];
|
---|
1614 |
|
---|
1615 | /*
|
---|
1616 | * Now the interval is between intervals corresponding
|
---|
1617 | * to (i - 1) and i.
|
---|
1618 | */
|
---|
1619 | if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
|
---|
1620 | count * PAGE_SIZE)) {
|
---|
1621 | if (page + count * PAGE_SIZE ==
|
---|
1622 | left_pg + left_cnt*PAGE_SIZE) {
|
---|
1623 | /*
|
---|
1624 | * The interval is contained in the
|
---|
1625 | * interval (i - 1) of the leaf and can
|
---|
1626 | * be removed by updating the size of
|
---|
1627 | * the bigger interval.
|
---|
1628 | */
|
---|
1629 | leaf->value[i - 1] -= count;
|
---|
1630 | return 1;
|
---|
1631 | } else if (page + count * PAGE_SIZE <
|
---|
1632 | left_pg + left_cnt * PAGE_SIZE) {
|
---|
1633 | count_t new_cnt;
|
---|
1634 |
|
---|
1635 | /*
|
---|
1636 | * The interval is contained in the
|
---|
1637 | * interval (i - 1) of the leaf but its
|
---|
1638 | * removal requires both updating the
|
---|
1639 | * size of the original interval and
|
---|
1640 | * also inserting a new interval.
|
---|
1641 | */
|
---|
1642 | new_cnt = ((left_pg +
|
---|
1643 | left_cnt * PAGE_SIZE) -
|
---|
1644 | (page + count * PAGE_SIZE)) >>
|
---|
1645 | PAGE_WIDTH;
|
---|
1646 | leaf->value[i - 1] -= count + new_cnt;
|
---|
1647 | btree_insert(&a->used_space, page +
|
---|
1648 | count * PAGE_SIZE, (void *) new_cnt,
|
---|
1649 | leaf);
|
---|
1650 | return 1;
|
---|
1651 | }
|
---|
1652 | }
|
---|
1653 | return 0;
|
---|
1654 | }
|
---|
1655 | }
|
---|
1656 |
|
---|
1657 | error:
|
---|
1658 | panic("Inconsistency detected while removing %d pages of used space "
|
---|
1659 | "from %p.\n", count, page);
|
---|
1660 | }
|
---|
1661 |
|
---|
1662 | /** Remove reference to address space area share info.
|
---|
1663 | *
|
---|
1664 | * If the reference count drops to 0, the sh_info is deallocated.
|
---|
1665 | *
|
---|
1666 | * @param sh_info Pointer to address space area share info.
|
---|
1667 | */
|
---|
1668 | void sh_info_remove_reference(share_info_t *sh_info)
|
---|
1669 | {
|
---|
1670 | bool dealloc = false;
|
---|
1671 |
|
---|
1672 | mutex_lock(&sh_info->lock);
|
---|
1673 | ASSERT(sh_info->refcount);
|
---|
1674 | if (--sh_info->refcount == 0) {
|
---|
1675 | dealloc = true;
|
---|
1676 | link_t *cur;
|
---|
1677 |
|
---|
1678 | /*
|
---|
1679 | * Now walk carefully the pagemap B+tree and free/remove
|
---|
1680 | * reference from all frames found there.
|
---|
1681 | */
|
---|
1682 | for (cur = sh_info->pagemap.leaf_head.next;
|
---|
1683 | cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
|
---|
1684 | btree_node_t *node;
|
---|
1685 | int i;
|
---|
1686 |
|
---|
1687 | node = list_get_instance(cur, btree_node_t, leaf_link);
|
---|
1688 | for (i = 0; i < node->keys; i++)
|
---|
1689 | frame_free((uintptr_t) node->value[i]);
|
---|
1690 | }
|
---|
1691 |
|
---|
1692 | }
|
---|
1693 | mutex_unlock(&sh_info->lock);
|
---|
1694 |
|
---|
1695 | if (dealloc) {
|
---|
1696 | btree_destroy(&sh_info->pagemap);
|
---|
1697 | free(sh_info);
|
---|
1698 | }
|
---|
1699 | }
|
---|
1700 |
|
---|
1701 | /*
|
---|
1702 | * Address space related syscalls.
|
---|
1703 | */
|
---|
1704 |
|
---|
1705 | /** Wrapper for as_area_create(). */
|
---|
1706 | unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
|
---|
1707 | {
|
---|
1708 | if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,
|
---|
1709 | AS_AREA_ATTR_NONE, &anon_backend, NULL))
|
---|
1710 | return (unative_t) address;
|
---|
1711 | else
|
---|
1712 | return (unative_t) -1;
|
---|
1713 | }
|
---|
1714 |
|
---|
1715 | /** Wrapper for as_area_resize(). */
|
---|
1716 | unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
|
---|
1717 | {
|
---|
1718 | return (unative_t) as_area_resize(AS, address, size, 0);
|
---|
1719 | }
|
---|
1720 |
|
---|
1721 | /** Wrapper for as_area_destroy(). */
|
---|
1722 | unative_t sys_as_area_destroy(uintptr_t address)
|
---|
1723 | {
|
---|
1724 | return (unative_t) as_area_destroy(AS, address);
|
---|
1725 | }
|
---|
1726 |
|
---|
1727 | /** Print out information about address space.
|
---|
1728 | *
|
---|
1729 | * @param as Address space.
|
---|
1730 | */
|
---|
1731 | void as_print(as_t *as)
|
---|
1732 | {
|
---|
1733 | ipl_t ipl;
|
---|
1734 |
|
---|
1735 | ipl = interrupts_disable();
|
---|
1736 | mutex_lock(&as->lock);
|
---|
1737 |
|
---|
1738 | /* print out info about address space areas */
|
---|
1739 | link_t *cur;
|
---|
1740 | for (cur = as->as_area_btree.leaf_head.next;
|
---|
1741 | cur != &as->as_area_btree.leaf_head; cur = cur->next) {
|
---|
1742 | btree_node_t *node;
|
---|
1743 |
|
---|
1744 | node = list_get_instance(cur, btree_node_t, leaf_link);
|
---|
1745 |
|
---|
1746 | int i;
|
---|
1747 | for (i = 0; i < node->keys; i++) {
|
---|
1748 | as_area_t *area = node->value[i];
|
---|
1749 |
|
---|
1750 | mutex_lock(&area->lock);
|
---|
1751 | printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
|
---|
1752 | area, area->base, area->pages, area->base,
|
---|
1753 | area->base + area->pages*PAGE_SIZE);
|
---|
1754 | mutex_unlock(&area->lock);
|
---|
1755 | }
|
---|
1756 | }
|
---|
1757 |
|
---|
1758 | mutex_unlock(&as->lock);
|
---|
1759 | interrupts_restore(ipl);
|
---|
1760 | }
|
---|
1761 |
|
---|
1762 | /** @}
|
---|
1763 | */
|
---|