1 | /*
|
---|
2 | * Copyright (c) 2001-2006 Jakub Jermar
|
---|
3 | * All rights reserved.
|
---|
4 | *
|
---|
5 | * Redistribution and use in source and binary forms, with or without
|
---|
6 | * modification, are permitted provided that the following conditions
|
---|
7 | * are met:
|
---|
8 | *
|
---|
9 | * - Redistributions of source code must retain the above copyright
|
---|
10 | * notice, this list of conditions and the following disclaimer.
|
---|
11 | * - Redistributions in binary form must reproduce the above copyright
|
---|
12 | * notice, this list of conditions and the following disclaimer in the
|
---|
13 | * documentation and/or other materials provided with the distribution.
|
---|
14 | * - The name of the author may not be used to endorse or promote products
|
---|
15 | * derived from this software without specific prior written permission.
|
---|
16 | *
|
---|
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
---|
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
---|
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
---|
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
---|
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
---|
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
---|
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
---|
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
---|
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
27 | */
|
---|
28 |
|
---|
29 | /** @addtogroup genericmm
|
---|
30 | * @{
|
---|
31 | */
|
---|
32 |
|
---|
33 | /**
|
---|
34 | * @file
|
---|
35 | * @brief Address space related functions.
|
---|
36 | *
|
---|
37 | * This file contains address space manipulation functions.
|
---|
38 | * Roughly speaking, this is a higher-level client of
|
---|
39 | * Virtual Address Translation (VAT) subsystem.
|
---|
40 | *
|
---|
41 | * Functionality provided by this file allows one to
|
---|
42 | * create address spaces and create, resize and share
|
---|
43 | * address space areas.
|
---|
44 | *
|
---|
45 | * @see page.c
|
---|
46 | *
|
---|
47 | */
|
---|
48 |
|
---|
49 | #include <mm/as.h>
|
---|
50 | #include <arch/mm/as.h>
|
---|
51 | #include <mm/page.h>
|
---|
52 | #include <mm/frame.h>
|
---|
53 | #include <mm/slab.h>
|
---|
54 | #include <mm/tlb.h>
|
---|
55 | #include <arch/mm/page.h>
|
---|
56 | #include <genarch/mm/page_pt.h>
|
---|
57 | #include <genarch/mm/page_ht.h>
|
---|
58 | #include <mm/asid.h>
|
---|
59 | #include <arch/mm/asid.h>
|
---|
60 | #include <preemption.h>
|
---|
61 | #include <synch/spinlock.h>
|
---|
62 | #include <synch/mutex.h>
|
---|
63 | #include <adt/list.h>
|
---|
64 | #include <adt/btree.h>
|
---|
65 | #include <proc/task.h>
|
---|
66 | #include <proc/thread.h>
|
---|
67 | #include <arch/asm.h>
|
---|
68 | #include <panic.h>
|
---|
69 | #include <debug.h>
|
---|
70 | #include <print.h>
|
---|
71 | #include <memstr.h>
|
---|
72 | #include <macros.h>
|
---|
73 | #include <arch.h>
|
---|
74 | #include <errno.h>
|
---|
75 | #include <config.h>
|
---|
76 | #include <align.h>
|
---|
77 | #include <arch/types.h>
|
---|
78 | #include <syscall/copy.h>
|
---|
79 | #include <arch/interrupt.h>
|
---|
80 |
|
---|
81 | #ifdef CONFIG_VIRT_IDX_DCACHE
|
---|
82 | #include <arch/mm/cache.h>
|
---|
83 | #endif /* CONFIG_VIRT_IDX_DCACHE */
|
---|
84 |
|
---|
85 | #ifndef __OBJC__
|
---|
86 | /**
|
---|
87 | * Each architecture decides what functions will be used to carry out
|
---|
88 | * address space operations such as creating or locking page tables.
|
---|
89 | */
|
---|
90 | as_operations_t *as_operations = NULL;
|
---|
91 |
|
---|
92 | /**
|
---|
93 | * Slab for as_t objects.
|
---|
94 | */
|
---|
95 | static slab_cache_t *as_slab;
|
---|
96 | #endif
|
---|
97 |
|
---|
98 | /**
|
---|
99 | * This lock serializes access to the ASID subsystem.
|
---|
100 | * It protects:
|
---|
101 | * - inactive_as_with_asid_head list
|
---|
102 | * - as->asid for each as of the as_t type
|
---|
103 | * - asids_allocated counter
|
---|
104 | */
|
---|
105 | SPINLOCK_INITIALIZE(asidlock);
|
---|
106 |
|
---|
107 | /**
|
---|
108 | * This list contains address spaces that are not active on any
|
---|
109 | * processor and that have valid ASID.
|
---|
110 | */
|
---|
111 | LIST_INITIALIZE(inactive_as_with_asid_head);
|
---|
112 |
|
---|
113 | /** Kernel address space. */
|
---|
114 | as_t *AS_KERNEL = NULL;
|
---|
115 |
|
---|
116 | static int area_flags_to_page_flags(int aflags);
|
---|
117 | static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
|
---|
118 | static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
|
---|
119 | as_area_t *avoid_area);
|
---|
120 | static void sh_info_remove_reference(share_info_t *sh_info);
|
---|
121 |
|
---|
122 | #ifndef __OBJC__
|
---|
123 | static int as_constructor(void *obj, int flags)
|
---|
124 | {
|
---|
125 | as_t *as = (as_t *) obj;
|
---|
126 | int rc;
|
---|
127 |
|
---|
128 | link_initialize(&as->inactive_as_with_asid_link);
|
---|
129 | mutex_initialize(&as->lock);
|
---|
130 |
|
---|
131 | rc = as_constructor_arch(as, flags);
|
---|
132 |
|
---|
133 | return rc;
|
---|
134 | }
|
---|
135 |
|
---|
136 | static int as_destructor(void *obj)
|
---|
137 | {
|
---|
138 | as_t *as = (as_t *) obj;
|
---|
139 |
|
---|
140 | return as_destructor_arch(as);
|
---|
141 | }
|
---|
142 | #endif
|
---|
143 |
|
---|
144 | /** Initialize address space subsystem. */
|
---|
145 | void as_init(void)
|
---|
146 | {
|
---|
147 | as_arch_init();
|
---|
148 |
|
---|
149 | #ifndef __OBJC__
|
---|
150 | as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
|
---|
151 | as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
|
---|
152 | #endif
|
---|
153 |
|
---|
154 | AS_KERNEL = as_create(FLAG_AS_KERNEL);
|
---|
155 | if (!AS_KERNEL)
|
---|
156 | panic("can't create kernel address space\n");
|
---|
157 |
|
---|
158 | }
|
---|
159 |
|
---|
160 | /** Create address space.
|
---|
161 | *
|
---|
162 | * @param flags Flags that influence way in wich the address space is created.
|
---|
163 | */
|
---|
164 | as_t *as_create(int flags)
|
---|
165 | {
|
---|
166 | as_t *as;
|
---|
167 |
|
---|
168 | #ifdef __OBJC__
|
---|
169 | as = [as_t new];
|
---|
170 | link_initialize(&as->inactive_as_with_asid_link);
|
---|
171 | mutex_initialize(&as->lock);
|
---|
172 | (void) as_constructor_arch(as, flags);
|
---|
173 | #else
|
---|
174 | as = (as_t *) slab_alloc(as_slab, 0);
|
---|
175 | #endif
|
---|
176 | (void) as_create_arch(as, 0);
|
---|
177 |
|
---|
178 | btree_create(&as->as_area_btree);
|
---|
179 |
|
---|
180 | if (flags & FLAG_AS_KERNEL)
|
---|
181 | as->asid = ASID_KERNEL;
|
---|
182 | else
|
---|
183 | as->asid = ASID_INVALID;
|
---|
184 |
|
---|
185 | atomic_set(&as->refcount, 0);
|
---|
186 | as->cpu_refcount = 0;
|
---|
187 | #ifdef AS_PAGE_TABLE
|
---|
188 | as->genarch.page_table = page_table_create(flags);
|
---|
189 | #else
|
---|
190 | page_table_create(flags);
|
---|
191 | #endif
|
---|
192 |
|
---|
193 | return as;
|
---|
194 | }
|
---|
195 |
|
---|
196 | /** Destroy adress space.
|
---|
197 | *
|
---|
198 | * When there are no tasks referencing this address space (i.e. its refcount is
|
---|
199 | * zero), the address space can be destroyed.
|
---|
200 | *
|
---|
201 | * We know that we don't hold any spinlock.
|
---|
202 | */
|
---|
203 | void as_destroy(as_t *as)
|
---|
204 | {
|
---|
205 | ipl_t ipl;
|
---|
206 | bool cond;
|
---|
207 | DEADLOCK_PROBE_INIT(p_asidlock);
|
---|
208 |
|
---|
209 | ASSERT(atomic_get(&as->refcount) == 0);
|
---|
210 |
|
---|
211 | /*
|
---|
212 | * Since there is no reference to this area,
|
---|
213 | * it is safe not to lock its mutex.
|
---|
214 | */
|
---|
215 |
|
---|
216 | /*
|
---|
217 | * We need to avoid deadlock between TLB shootdown and asidlock.
|
---|
218 | * We therefore try to take asid conditionally and if we don't succeed,
|
---|
219 | * we enable interrupts and try again. This is done while preemption is
|
---|
220 | * disabled to prevent nested context switches. We also depend on the
|
---|
221 | * fact that so far no spinlocks are held.
|
---|
222 | */
|
---|
223 | preemption_disable();
|
---|
224 | ipl = interrupts_read();
|
---|
225 | retry:
|
---|
226 | interrupts_disable();
|
---|
227 | if (!spinlock_trylock(&asidlock)) {
|
---|
228 | interrupts_enable();
|
---|
229 | DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
|
---|
230 | goto retry;
|
---|
231 | }
|
---|
232 | preemption_enable(); /* Interrupts disabled, enable preemption */
|
---|
233 | if (as->asid != ASID_INVALID && as != AS_KERNEL) {
|
---|
234 | if (as != AS && as->cpu_refcount == 0)
|
---|
235 | list_remove(&as->inactive_as_with_asid_link);
|
---|
236 | asid_put(as->asid);
|
---|
237 | }
|
---|
238 | spinlock_unlock(&asidlock);
|
---|
239 |
|
---|
240 | /*
|
---|
241 | * Destroy address space areas of the address space.
|
---|
242 | * The B+tree must be walked carefully because it is
|
---|
243 | * also being destroyed.
|
---|
244 | */
|
---|
245 | for (cond = true; cond; ) {
|
---|
246 | btree_node_t *node;
|
---|
247 |
|
---|
248 | ASSERT(!list_empty(&as->as_area_btree.leaf_head));
|
---|
249 | node = list_get_instance(as->as_area_btree.leaf_head.next,
|
---|
250 | btree_node_t, leaf_link);
|
---|
251 |
|
---|
252 | if ((cond = node->keys)) {
|
---|
253 | as_area_destroy(as, node->key[0]);
|
---|
254 | }
|
---|
255 | }
|
---|
256 |
|
---|
257 | btree_destroy(&as->as_area_btree);
|
---|
258 | #ifdef AS_PAGE_TABLE
|
---|
259 | page_table_destroy(as->genarch.page_table);
|
---|
260 | #else
|
---|
261 | page_table_destroy(NULL);
|
---|
262 | #endif
|
---|
263 |
|
---|
264 | interrupts_restore(ipl);
|
---|
265 |
|
---|
266 | #ifdef __OBJC__
|
---|
267 | [as free];
|
---|
268 | #else
|
---|
269 | slab_free(as_slab, as);
|
---|
270 | #endif
|
---|
271 | }
|
---|
272 |
|
---|
273 | /** Create address space area of common attributes.
|
---|
274 | *
|
---|
275 | * The created address space area is added to the target address space.
|
---|
276 | *
|
---|
277 | * @param as Target address space.
|
---|
278 | * @param flags Flags of the area memory.
|
---|
279 | * @param size Size of area.
|
---|
280 | * @param base Base address of area.
|
---|
281 | * @param attrs Attributes of the area.
|
---|
282 | * @param backend Address space area backend. NULL if no backend is used.
|
---|
283 | * @param backend_data NULL or a pointer to an array holding two void *.
|
---|
284 | *
|
---|
285 | * @return Address space area on success or NULL on failure.
|
---|
286 | */
|
---|
287 | as_area_t *
|
---|
288 | as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
|
---|
289 | mem_backend_t *backend, mem_backend_data_t *backend_data)
|
---|
290 | {
|
---|
291 | ipl_t ipl;
|
---|
292 | as_area_t *a;
|
---|
293 |
|
---|
294 | if (base % PAGE_SIZE)
|
---|
295 | return NULL;
|
---|
296 |
|
---|
297 | if (!size)
|
---|
298 | return NULL;
|
---|
299 |
|
---|
300 | /* Writeable executable areas are not supported. */
|
---|
301 | if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
|
---|
302 | return NULL;
|
---|
303 |
|
---|
304 | ipl = interrupts_disable();
|
---|
305 | mutex_lock(&as->lock);
|
---|
306 |
|
---|
307 | if (!check_area_conflicts(as, base, size, NULL)) {
|
---|
308 | mutex_unlock(&as->lock);
|
---|
309 | interrupts_restore(ipl);
|
---|
310 | return NULL;
|
---|
311 | }
|
---|
312 |
|
---|
313 | a = (as_area_t *) malloc(sizeof(as_area_t), 0);
|
---|
314 |
|
---|
315 | mutex_initialize(&a->lock);
|
---|
316 |
|
---|
317 | a->as = as;
|
---|
318 | a->flags = flags;
|
---|
319 | a->attributes = attrs;
|
---|
320 | a->pages = SIZE2FRAMES(size);
|
---|
321 | a->base = base;
|
---|
322 | a->sh_info = NULL;
|
---|
323 | a->backend = backend;
|
---|
324 | if (backend_data)
|
---|
325 | a->backend_data = *backend_data;
|
---|
326 | else
|
---|
327 | memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data),
|
---|
328 | 0);
|
---|
329 |
|
---|
330 | btree_create(&a->used_space);
|
---|
331 |
|
---|
332 | btree_insert(&as->as_area_btree, base, (void *) a, NULL);
|
---|
333 |
|
---|
334 | mutex_unlock(&as->lock);
|
---|
335 | interrupts_restore(ipl);
|
---|
336 |
|
---|
337 | return a;
|
---|
338 | }
|
---|
339 |
|
---|
340 | /** Find address space area and change it.
|
---|
341 | *
|
---|
342 | * @param as Address space.
|
---|
343 | * @param address Virtual address belonging to the area to be changed. Must be
|
---|
344 | * page-aligned.
|
---|
345 | * @param size New size of the virtual memory block starting at address.
|
---|
346 | * @param flags Flags influencing the remap operation. Currently unused.
|
---|
347 | *
|
---|
348 | * @return Zero on success or a value from @ref errno.h otherwise.
|
---|
349 | */
|
---|
350 | int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
|
---|
351 | {
|
---|
352 | as_area_t *area;
|
---|
353 | ipl_t ipl;
|
---|
354 | size_t pages;
|
---|
355 |
|
---|
356 | ipl = interrupts_disable();
|
---|
357 | mutex_lock(&as->lock);
|
---|
358 |
|
---|
359 | /*
|
---|
360 | * Locate the area.
|
---|
361 | */
|
---|
362 | area = find_area_and_lock(as, address);
|
---|
363 | if (!area) {
|
---|
364 | mutex_unlock(&as->lock);
|
---|
365 | interrupts_restore(ipl);
|
---|
366 | return ENOENT;
|
---|
367 | }
|
---|
368 |
|
---|
369 | if (area->backend == &phys_backend) {
|
---|
370 | /*
|
---|
371 | * Remapping of address space areas associated
|
---|
372 | * with memory mapped devices is not supported.
|
---|
373 | */
|
---|
374 | mutex_unlock(&area->lock);
|
---|
375 | mutex_unlock(&as->lock);
|
---|
376 | interrupts_restore(ipl);
|
---|
377 | return ENOTSUP;
|
---|
378 | }
|
---|
379 | if (area->sh_info) {
|
---|
380 | /*
|
---|
381 | * Remapping of shared address space areas
|
---|
382 | * is not supported.
|
---|
383 | */
|
---|
384 | mutex_unlock(&area->lock);
|
---|
385 | mutex_unlock(&as->lock);
|
---|
386 | interrupts_restore(ipl);
|
---|
387 | return ENOTSUP;
|
---|
388 | }
|
---|
389 |
|
---|
390 | pages = SIZE2FRAMES((address - area->base) + size);
|
---|
391 | if (!pages) {
|
---|
392 | /*
|
---|
393 | * Zero size address space areas are not allowed.
|
---|
394 | */
|
---|
395 | mutex_unlock(&area->lock);
|
---|
396 | mutex_unlock(&as->lock);
|
---|
397 | interrupts_restore(ipl);
|
---|
398 | return EPERM;
|
---|
399 | }
|
---|
400 |
|
---|
401 | if (pages < area->pages) {
|
---|
402 | bool cond;
|
---|
403 | uintptr_t start_free = area->base + pages*PAGE_SIZE;
|
---|
404 |
|
---|
405 | /*
|
---|
406 | * Shrinking the area.
|
---|
407 | * No need to check for overlaps.
|
---|
408 | */
|
---|
409 |
|
---|
410 | /*
|
---|
411 | * Start TLB shootdown sequence.
|
---|
412 | */
|
---|
413 | tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base +
|
---|
414 | pages * PAGE_SIZE, area->pages - pages);
|
---|
415 |
|
---|
416 | /*
|
---|
417 | * Remove frames belonging to used space starting from
|
---|
418 | * the highest addresses downwards until an overlap with
|
---|
419 | * the resized address space area is found. Note that this
|
---|
420 | * is also the right way to remove part of the used_space
|
---|
421 | * B+tree leaf list.
|
---|
422 | */
|
---|
423 | for (cond = true; cond;) {
|
---|
424 | btree_node_t *node;
|
---|
425 |
|
---|
426 | ASSERT(!list_empty(&area->used_space.leaf_head));
|
---|
427 | node =
|
---|
428 | list_get_instance(area->used_space.leaf_head.prev,
|
---|
429 | btree_node_t, leaf_link);
|
---|
430 | if ((cond = (bool) node->keys)) {
|
---|
431 | uintptr_t b = node->key[node->keys - 1];
|
---|
432 | count_t c =
|
---|
433 | (count_t) node->value[node->keys - 1];
|
---|
434 | int i = 0;
|
---|
435 |
|
---|
436 | if (overlaps(b, c * PAGE_SIZE, area->base,
|
---|
437 | pages * PAGE_SIZE)) {
|
---|
438 |
|
---|
439 | if (b + c * PAGE_SIZE <= start_free) {
|
---|
440 | /*
|
---|
441 | * The whole interval fits
|
---|
442 | * completely in the resized
|
---|
443 | * address space area.
|
---|
444 | */
|
---|
445 | break;
|
---|
446 | }
|
---|
447 |
|
---|
448 | /*
|
---|
449 | * Part of the interval corresponding
|
---|
450 | * to b and c overlaps with the resized
|
---|
451 | * address space area.
|
---|
452 | */
|
---|
453 |
|
---|
454 | cond = false; /* we are almost done */
|
---|
455 | i = (start_free - b) >> PAGE_WIDTH;
|
---|
456 | if (!used_space_remove(area, start_free,
|
---|
457 | c - i))
|
---|
458 | panic("Could not remove used "
|
---|
459 | "space.\n");
|
---|
460 | } else {
|
---|
461 | /*
|
---|
462 | * The interval of used space can be
|
---|
463 | * completely removed.
|
---|
464 | */
|
---|
465 | if (!used_space_remove(area, b, c))
|
---|
466 | panic("Could not remove used "
|
---|
467 | "space.\n");
|
---|
468 | }
|
---|
469 |
|
---|
470 | for (; i < c; i++) {
|
---|
471 | pte_t *pte;
|
---|
472 |
|
---|
473 | page_table_lock(as, false);
|
---|
474 | pte = page_mapping_find(as, b +
|
---|
475 | i * PAGE_SIZE);
|
---|
476 | ASSERT(pte && PTE_VALID(pte) &&
|
---|
477 | PTE_PRESENT(pte));
|
---|
478 | if (area->backend &&
|
---|
479 | area->backend->frame_free) {
|
---|
480 | area->backend->frame_free(area,
|
---|
481 | b + i * PAGE_SIZE,
|
---|
482 | PTE_GET_FRAME(pte));
|
---|
483 | }
|
---|
484 | page_mapping_remove(as, b +
|
---|
485 | i * PAGE_SIZE);
|
---|
486 | page_table_unlock(as, false);
|
---|
487 | }
|
---|
488 | }
|
---|
489 | }
|
---|
490 |
|
---|
491 | /*
|
---|
492 | * Finish TLB shootdown sequence.
|
---|
493 | */
|
---|
494 |
|
---|
495 | tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
|
---|
496 | area->pages - pages);
|
---|
497 | /*
|
---|
498 | * Invalidate software translation caches (e.g. TSB on sparc64).
|
---|
499 | */
|
---|
500 | as_invalidate_translation_cache(as, area->base +
|
---|
501 | pages * PAGE_SIZE, area->pages - pages);
|
---|
502 | tlb_shootdown_finalize();
|
---|
503 |
|
---|
504 | } else {
|
---|
505 | /*
|
---|
506 | * Growing the area.
|
---|
507 | * Check for overlaps with other address space areas.
|
---|
508 | */
|
---|
509 | if (!check_area_conflicts(as, address, pages * PAGE_SIZE,
|
---|
510 | area)) {
|
---|
511 | mutex_unlock(&area->lock);
|
---|
512 | mutex_unlock(&as->lock);
|
---|
513 | interrupts_restore(ipl);
|
---|
514 | return EADDRNOTAVAIL;
|
---|
515 | }
|
---|
516 | }
|
---|
517 |
|
---|
518 | area->pages = pages;
|
---|
519 |
|
---|
520 | mutex_unlock(&area->lock);
|
---|
521 | mutex_unlock(&as->lock);
|
---|
522 | interrupts_restore(ipl);
|
---|
523 |
|
---|
524 | return 0;
|
---|
525 | }
|
---|
526 |
|
---|
527 | /** Destroy address space area.
|
---|
528 | *
|
---|
529 | * @param as Address space.
|
---|
530 | * @param address Address withing the area to be deleted.
|
---|
531 | *
|
---|
532 | * @return Zero on success or a value from @ref errno.h on failure.
|
---|
533 | */
|
---|
534 | int as_area_destroy(as_t *as, uintptr_t address)
|
---|
535 | {
|
---|
536 | as_area_t *area;
|
---|
537 | uintptr_t base;
|
---|
538 | link_t *cur;
|
---|
539 | ipl_t ipl;
|
---|
540 |
|
---|
541 | ipl = interrupts_disable();
|
---|
542 | mutex_lock(&as->lock);
|
---|
543 |
|
---|
544 | area = find_area_and_lock(as, address);
|
---|
545 | if (!area) {
|
---|
546 | mutex_unlock(&as->lock);
|
---|
547 | interrupts_restore(ipl);
|
---|
548 | return ENOENT;
|
---|
549 | }
|
---|
550 |
|
---|
551 | base = area->base;
|
---|
552 |
|
---|
553 | /*
|
---|
554 | * Start TLB shootdown sequence.
|
---|
555 | */
|
---|
556 | tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
|
---|
557 |
|
---|
558 | /*
|
---|
559 | * Visit only the pages mapped by used_space B+tree.
|
---|
560 | */
|
---|
561 | for (cur = area->used_space.leaf_head.next;
|
---|
562 | cur != &area->used_space.leaf_head; cur = cur->next) {
|
---|
563 | btree_node_t *node;
|
---|
564 | int i;
|
---|
565 |
|
---|
566 | node = list_get_instance(cur, btree_node_t, leaf_link);
|
---|
567 | for (i = 0; i < node->keys; i++) {
|
---|
568 | uintptr_t b = node->key[i];
|
---|
569 | count_t j;
|
---|
570 | pte_t *pte;
|
---|
571 |
|
---|
572 | for (j = 0; j < (count_t) node->value[i]; j++) {
|
---|
573 | page_table_lock(as, false);
|
---|
574 | pte = page_mapping_find(as, b + j * PAGE_SIZE);
|
---|
575 | ASSERT(pte && PTE_VALID(pte) &&
|
---|
576 | PTE_PRESENT(pte));
|
---|
577 | if (area->backend &&
|
---|
578 | area->backend->frame_free) {
|
---|
579 | area->backend->frame_free(area, b +
|
---|
580 | j * PAGE_SIZE, PTE_GET_FRAME(pte));
|
---|
581 | }
|
---|
582 | page_mapping_remove(as, b + j * PAGE_SIZE);
|
---|
583 | page_table_unlock(as, false);
|
---|
584 | }
|
---|
585 | }
|
---|
586 | }
|
---|
587 |
|
---|
588 | /*
|
---|
589 | * Finish TLB shootdown sequence.
|
---|
590 | */
|
---|
591 |
|
---|
592 | tlb_invalidate_pages(as->asid, area->base, area->pages);
|
---|
593 | /*
|
---|
594 | * Invalidate potential software translation caches (e.g. TSB on
|
---|
595 | * sparc64).
|
---|
596 | */
|
---|
597 | as_invalidate_translation_cache(as, area->base, area->pages);
|
---|
598 | tlb_shootdown_finalize();
|
---|
599 |
|
---|
600 | btree_destroy(&area->used_space);
|
---|
601 |
|
---|
602 | area->attributes |= AS_AREA_ATTR_PARTIAL;
|
---|
603 |
|
---|
604 | if (area->sh_info)
|
---|
605 | sh_info_remove_reference(area->sh_info);
|
---|
606 |
|
---|
607 | mutex_unlock(&area->lock);
|
---|
608 |
|
---|
609 | /*
|
---|
610 | * Remove the empty area from address space.
|
---|
611 | */
|
---|
612 | btree_remove(&as->as_area_btree, base, NULL);
|
---|
613 |
|
---|
614 | free(area);
|
---|
615 |
|
---|
616 | mutex_unlock(&as->lock);
|
---|
617 | interrupts_restore(ipl);
|
---|
618 | return 0;
|
---|
619 | }
|
---|
620 |
|
---|
621 | /** Share address space area with another or the same address space.
|
---|
622 | *
|
---|
623 | * Address space area mapping is shared with a new address space area.
|
---|
624 | * If the source address space area has not been shared so far,
|
---|
625 | * a new sh_info is created. The new address space area simply gets the
|
---|
626 | * sh_info of the source area. The process of duplicating the
|
---|
627 | * mapping is done through the backend share function.
|
---|
628 | *
|
---|
629 | * @param src_as Pointer to source address space.
|
---|
630 | * @param src_base Base address of the source address space area.
|
---|
631 | * @param acc_size Expected size of the source area.
|
---|
632 | * @param dst_as Pointer to destination address space.
|
---|
633 | * @param dst_base Target base address.
|
---|
634 | * @param dst_flags_mask Destination address space area flags mask.
|
---|
635 | *
|
---|
636 | * @return Zero on success or ENOENT if there is no such task or if there is no
|
---|
637 | * such address space area, EPERM if there was a problem in accepting the area
|
---|
638 | * or ENOMEM if there was a problem in allocating destination address space
|
---|
639 | * area. ENOTSUP is returned if the address space area backend does not support
|
---|
640 | * sharing.
|
---|
641 | */
|
---|
642 | int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
|
---|
643 | as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
|
---|
644 | {
|
---|
645 | ipl_t ipl;
|
---|
646 | int src_flags;
|
---|
647 | size_t src_size;
|
---|
648 | as_area_t *src_area, *dst_area;
|
---|
649 | share_info_t *sh_info;
|
---|
650 | mem_backend_t *src_backend;
|
---|
651 | mem_backend_data_t src_backend_data;
|
---|
652 |
|
---|
653 | ipl = interrupts_disable();
|
---|
654 | mutex_lock(&src_as->lock);
|
---|
655 | src_area = find_area_and_lock(src_as, src_base);
|
---|
656 | if (!src_area) {
|
---|
657 | /*
|
---|
658 | * Could not find the source address space area.
|
---|
659 | */
|
---|
660 | mutex_unlock(&src_as->lock);
|
---|
661 | interrupts_restore(ipl);
|
---|
662 | return ENOENT;
|
---|
663 | }
|
---|
664 |
|
---|
665 | if (!src_area->backend || !src_area->backend->share) {
|
---|
666 | /*
|
---|
667 | * There is no backend or the backend does not
|
---|
668 | * know how to share the area.
|
---|
669 | */
|
---|
670 | mutex_unlock(&src_area->lock);
|
---|
671 | mutex_unlock(&src_as->lock);
|
---|
672 | interrupts_restore(ipl);
|
---|
673 | return ENOTSUP;
|
---|
674 | }
|
---|
675 |
|
---|
676 | src_size = src_area->pages * PAGE_SIZE;
|
---|
677 | src_flags = src_area->flags;
|
---|
678 | src_backend = src_area->backend;
|
---|
679 | src_backend_data = src_area->backend_data;
|
---|
680 |
|
---|
681 | /* Share the cacheable flag from the original mapping */
|
---|
682 | if (src_flags & AS_AREA_CACHEABLE)
|
---|
683 | dst_flags_mask |= AS_AREA_CACHEABLE;
|
---|
684 |
|
---|
685 | if (src_size != acc_size ||
|
---|
686 | (src_flags & dst_flags_mask) != dst_flags_mask) {
|
---|
687 | mutex_unlock(&src_area->lock);
|
---|
688 | mutex_unlock(&src_as->lock);
|
---|
689 | interrupts_restore(ipl);
|
---|
690 | return EPERM;
|
---|
691 | }
|
---|
692 |
|
---|
693 | /*
|
---|
694 | * Now we are committed to sharing the area.
|
---|
695 | * First, prepare the area for sharing.
|
---|
696 | * Then it will be safe to unlock it.
|
---|
697 | */
|
---|
698 | sh_info = src_area->sh_info;
|
---|
699 | if (!sh_info) {
|
---|
700 | sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
|
---|
701 | mutex_initialize(&sh_info->lock);
|
---|
702 | sh_info->refcount = 2;
|
---|
703 | btree_create(&sh_info->pagemap);
|
---|
704 | src_area->sh_info = sh_info;
|
---|
705 | /*
|
---|
706 | * Call the backend to setup sharing.
|
---|
707 | */
|
---|
708 | src_area->backend->share(src_area);
|
---|
709 | } else {
|
---|
710 | mutex_lock(&sh_info->lock);
|
---|
711 | sh_info->refcount++;
|
---|
712 | mutex_unlock(&sh_info->lock);
|
---|
713 | }
|
---|
714 |
|
---|
715 | mutex_unlock(&src_area->lock);
|
---|
716 | mutex_unlock(&src_as->lock);
|
---|
717 |
|
---|
718 | /*
|
---|
719 | * Create copy of the source address space area.
|
---|
720 | * The destination area is created with AS_AREA_ATTR_PARTIAL
|
---|
721 | * attribute set which prevents race condition with
|
---|
722 | * preliminary as_page_fault() calls.
|
---|
723 | * The flags of the source area are masked against dst_flags_mask
|
---|
724 | * to support sharing in less privileged mode.
|
---|
725 | */
|
---|
726 | dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
|
---|
727 | AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
|
---|
728 | if (!dst_area) {
|
---|
729 | /*
|
---|
730 | * Destination address space area could not be created.
|
---|
731 | */
|
---|
732 | sh_info_remove_reference(sh_info);
|
---|
733 |
|
---|
734 | interrupts_restore(ipl);
|
---|
735 | return ENOMEM;
|
---|
736 | }
|
---|
737 |
|
---|
738 | /*
|
---|
739 | * Now the destination address space area has been
|
---|
740 | * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
|
---|
741 | * attribute and set the sh_info.
|
---|
742 | */
|
---|
743 | mutex_lock(&dst_as->lock);
|
---|
744 | mutex_lock(&dst_area->lock);
|
---|
745 | dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
|
---|
746 | dst_area->sh_info = sh_info;
|
---|
747 | mutex_unlock(&dst_area->lock);
|
---|
748 | mutex_unlock(&dst_as->lock);
|
---|
749 |
|
---|
750 | interrupts_restore(ipl);
|
---|
751 |
|
---|
752 | return 0;
|
---|
753 | }
|
---|
754 |
|
---|
755 | /** Check access mode for address space area.
|
---|
756 | *
|
---|
757 | * The address space area must be locked prior to this call.
|
---|
758 | *
|
---|
759 | * @param area Address space area.
|
---|
760 | * @param access Access mode.
|
---|
761 | *
|
---|
762 | * @return False if access violates area's permissions, true otherwise.
|
---|
763 | */
|
---|
764 | bool as_area_check_access(as_area_t *area, pf_access_t access)
|
---|
765 | {
|
---|
766 | int flagmap[] = {
|
---|
767 | [PF_ACCESS_READ] = AS_AREA_READ,
|
---|
768 | [PF_ACCESS_WRITE] = AS_AREA_WRITE,
|
---|
769 | [PF_ACCESS_EXEC] = AS_AREA_EXEC
|
---|
770 | };
|
---|
771 |
|
---|
772 | if (!(area->flags & flagmap[access]))
|
---|
773 | return false;
|
---|
774 |
|
---|
775 | return true;
|
---|
776 | }
|
---|
777 |
|
---|
778 | /** Handle page fault within the current address space.
|
---|
779 | *
|
---|
780 | * This is the high-level page fault handler. It decides
|
---|
781 | * whether the page fault can be resolved by any backend
|
---|
782 | * and if so, it invokes the backend to resolve the page
|
---|
783 | * fault.
|
---|
784 | *
|
---|
785 | * Interrupts are assumed disabled.
|
---|
786 | *
|
---|
787 | * @param page Faulting page.
|
---|
788 | * @param access Access mode that caused the fault (i.e. read/write/exec).
|
---|
789 | * @param istate Pointer to interrupted state.
|
---|
790 | *
|
---|
791 | * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
|
---|
792 | * fault was caused by copy_to_uspace() or copy_from_uspace().
|
---|
793 | */
|
---|
794 | int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
|
---|
795 | {
|
---|
796 | pte_t *pte;
|
---|
797 | as_area_t *area;
|
---|
798 |
|
---|
799 | if (!THREAD)
|
---|
800 | return AS_PF_FAULT;
|
---|
801 |
|
---|
802 | ASSERT(AS);
|
---|
803 |
|
---|
804 | mutex_lock(&AS->lock);
|
---|
805 | area = find_area_and_lock(AS, page);
|
---|
806 | if (!area) {
|
---|
807 | /*
|
---|
808 | * No area contained mapping for 'page'.
|
---|
809 | * Signal page fault to low-level handler.
|
---|
810 | */
|
---|
811 | mutex_unlock(&AS->lock);
|
---|
812 | goto page_fault;
|
---|
813 | }
|
---|
814 |
|
---|
815 | if (area->attributes & AS_AREA_ATTR_PARTIAL) {
|
---|
816 | /*
|
---|
817 | * The address space area is not fully initialized.
|
---|
818 | * Avoid possible race by returning error.
|
---|
819 | */
|
---|
820 | mutex_unlock(&area->lock);
|
---|
821 | mutex_unlock(&AS->lock);
|
---|
822 | goto page_fault;
|
---|
823 | }
|
---|
824 |
|
---|
825 | if (!area->backend || !area->backend->page_fault) {
|
---|
826 | /*
|
---|
827 | * The address space area is not backed by any backend
|
---|
828 | * or the backend cannot handle page faults.
|
---|
829 | */
|
---|
830 | mutex_unlock(&area->lock);
|
---|
831 | mutex_unlock(&AS->lock);
|
---|
832 | goto page_fault;
|
---|
833 | }
|
---|
834 |
|
---|
835 | page_table_lock(AS, false);
|
---|
836 |
|
---|
837 | /*
|
---|
838 | * To avoid race condition between two page faults
|
---|
839 | * on the same address, we need to make sure
|
---|
840 | * the mapping has not been already inserted.
|
---|
841 | */
|
---|
842 | if ((pte = page_mapping_find(AS, page))) {
|
---|
843 | if (PTE_PRESENT(pte)) {
|
---|
844 | if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
|
---|
845 | (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
|
---|
846 | (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
|
---|
847 | page_table_unlock(AS, false);
|
---|
848 | mutex_unlock(&area->lock);
|
---|
849 | mutex_unlock(&AS->lock);
|
---|
850 | return AS_PF_OK;
|
---|
851 | }
|
---|
852 | }
|
---|
853 | }
|
---|
854 |
|
---|
855 | /*
|
---|
856 | * Resort to the backend page fault handler.
|
---|
857 | */
|
---|
858 | if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
|
---|
859 | page_table_unlock(AS, false);
|
---|
860 | mutex_unlock(&area->lock);
|
---|
861 | mutex_unlock(&AS->lock);
|
---|
862 | goto page_fault;
|
---|
863 | }
|
---|
864 |
|
---|
865 | page_table_unlock(AS, false);
|
---|
866 | mutex_unlock(&area->lock);
|
---|
867 | mutex_unlock(&AS->lock);
|
---|
868 | return AS_PF_OK;
|
---|
869 |
|
---|
870 | page_fault:
|
---|
871 | if (THREAD->in_copy_from_uspace) {
|
---|
872 | THREAD->in_copy_from_uspace = false;
|
---|
873 | istate_set_retaddr(istate,
|
---|
874 | (uintptr_t) &memcpy_from_uspace_failover_address);
|
---|
875 | } else if (THREAD->in_copy_to_uspace) {
|
---|
876 | THREAD->in_copy_to_uspace = false;
|
---|
877 | istate_set_retaddr(istate,
|
---|
878 | (uintptr_t) &memcpy_to_uspace_failover_address);
|
---|
879 | } else {
|
---|
880 | return AS_PF_FAULT;
|
---|
881 | }
|
---|
882 |
|
---|
883 | return AS_PF_DEFER;
|
---|
884 | }
|
---|
885 |
|
---|
886 | /** Switch address spaces.
|
---|
887 | *
|
---|
888 | * Note that this function cannot sleep as it is essentially a part of
|
---|
889 | * scheduling. Sleeping here would lead to deadlock on wakeup. Another
|
---|
890 | * thing which is forbidden in this context is locking the address space.
|
---|
891 | *
|
---|
892 | * When this function is enetered, no spinlocks may be held.
|
---|
893 | *
|
---|
894 | * @param old Old address space or NULL.
|
---|
895 | * @param new New address space.
|
---|
896 | */
|
---|
897 | void as_switch(as_t *old_as, as_t *new_as)
|
---|
898 | {
|
---|
899 | DEADLOCK_PROBE_INIT(p_asidlock);
|
---|
900 | preemption_disable();
|
---|
901 | retry:
|
---|
902 | (void) interrupts_disable();
|
---|
903 | if (!spinlock_trylock(&asidlock)) {
|
---|
904 | /*
|
---|
905 | * Avoid deadlock with TLB shootdown.
|
---|
906 | * We can enable interrupts here because
|
---|
907 | * preemption is disabled. We should not be
|
---|
908 | * holding any other lock.
|
---|
909 | */
|
---|
910 | (void) interrupts_enable();
|
---|
911 | DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
|
---|
912 | goto retry;
|
---|
913 | }
|
---|
914 | preemption_enable();
|
---|
915 |
|
---|
916 | /*
|
---|
917 | * First, take care of the old address space.
|
---|
918 | */
|
---|
919 | if (old_as) {
|
---|
920 | ASSERT(old_as->cpu_refcount);
|
---|
921 | if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
|
---|
922 | /*
|
---|
923 | * The old address space is no longer active on
|
---|
924 | * any processor. It can be appended to the
|
---|
925 | * list of inactive address spaces with assigned
|
---|
926 | * ASID.
|
---|
927 | */
|
---|
928 | ASSERT(old_as->asid != ASID_INVALID);
|
---|
929 | list_append(&old_as->inactive_as_with_asid_link,
|
---|
930 | &inactive_as_with_asid_head);
|
---|
931 | }
|
---|
932 |
|
---|
933 | /*
|
---|
934 | * Perform architecture-specific tasks when the address space
|
---|
935 | * is being removed from the CPU.
|
---|
936 | */
|
---|
937 | as_deinstall_arch(old_as);
|
---|
938 | }
|
---|
939 |
|
---|
940 | /*
|
---|
941 | * Second, prepare the new address space.
|
---|
942 | */
|
---|
943 | if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
|
---|
944 | if (new_as->asid != ASID_INVALID)
|
---|
945 | list_remove(&new_as->inactive_as_with_asid_link);
|
---|
946 | else
|
---|
947 | new_as->asid = asid_get();
|
---|
948 | }
|
---|
949 | #ifdef AS_PAGE_TABLE
|
---|
950 | SET_PTL0_ADDRESS(new_as->genarch.page_table);
|
---|
951 | #endif
|
---|
952 |
|
---|
953 | /*
|
---|
954 | * Perform architecture-specific steps.
|
---|
955 | * (e.g. write ASID to hardware register etc.)
|
---|
956 | */
|
---|
957 | as_install_arch(new_as);
|
---|
958 |
|
---|
959 | spinlock_unlock(&asidlock);
|
---|
960 |
|
---|
961 | AS = new_as;
|
---|
962 | }
|
---|
963 |
|
---|
964 | /** Convert address space area flags to page flags.
|
---|
965 | *
|
---|
966 | * @param aflags Flags of some address space area.
|
---|
967 | *
|
---|
968 | * @return Flags to be passed to page_mapping_insert().
|
---|
969 | */
|
---|
970 | int area_flags_to_page_flags(int aflags)
|
---|
971 | {
|
---|
972 | int flags;
|
---|
973 |
|
---|
974 | flags = PAGE_USER | PAGE_PRESENT;
|
---|
975 |
|
---|
976 | if (aflags & AS_AREA_READ)
|
---|
977 | flags |= PAGE_READ;
|
---|
978 |
|
---|
979 | if (aflags & AS_AREA_WRITE)
|
---|
980 | flags |= PAGE_WRITE;
|
---|
981 |
|
---|
982 | if (aflags & AS_AREA_EXEC)
|
---|
983 | flags |= PAGE_EXEC;
|
---|
984 |
|
---|
985 | if (aflags & AS_AREA_CACHEABLE)
|
---|
986 | flags |= PAGE_CACHEABLE;
|
---|
987 |
|
---|
988 | return flags;
|
---|
989 | }
|
---|
990 |
|
---|
991 | /** Compute flags for virtual address translation subsytem.
|
---|
992 | *
|
---|
993 | * The address space area must be locked.
|
---|
994 | * Interrupts must be disabled.
|
---|
995 | *
|
---|
996 | * @param a Address space area.
|
---|
997 | *
|
---|
998 | * @return Flags to be used in page_mapping_insert().
|
---|
999 | */
|
---|
1000 | int as_area_get_flags(as_area_t *a)
|
---|
1001 | {
|
---|
1002 | return area_flags_to_page_flags(a->flags);
|
---|
1003 | }
|
---|
1004 |
|
---|
1005 | /** Create page table.
|
---|
1006 | *
|
---|
1007 | * Depending on architecture, create either address space
|
---|
1008 | * private or global page table.
|
---|
1009 | *
|
---|
1010 | * @param flags Flags saying whether the page table is for kernel address space.
|
---|
1011 | *
|
---|
1012 | * @return First entry of the page table.
|
---|
1013 | */
|
---|
1014 | pte_t *page_table_create(int flags)
|
---|
1015 | {
|
---|
1016 | #ifdef __OBJC__
|
---|
1017 | return [as_t page_table_create: flags];
|
---|
1018 | #else
|
---|
1019 | ASSERT(as_operations);
|
---|
1020 | ASSERT(as_operations->page_table_create);
|
---|
1021 |
|
---|
1022 | return as_operations->page_table_create(flags);
|
---|
1023 | #endif
|
---|
1024 | }
|
---|
1025 |
|
---|
1026 | /** Destroy page table.
|
---|
1027 | *
|
---|
1028 | * Destroy page table in architecture specific way.
|
---|
1029 | *
|
---|
1030 | * @param page_table Physical address of PTL0.
|
---|
1031 | */
|
---|
1032 | void page_table_destroy(pte_t *page_table)
|
---|
1033 | {
|
---|
1034 | #ifdef __OBJC__
|
---|
1035 | return [as_t page_table_destroy: page_table];
|
---|
1036 | #else
|
---|
1037 | ASSERT(as_operations);
|
---|
1038 | ASSERT(as_operations->page_table_destroy);
|
---|
1039 |
|
---|
1040 | as_operations->page_table_destroy(page_table);
|
---|
1041 | #endif
|
---|
1042 | }
|
---|
1043 |
|
---|
1044 | /** Lock page table.
|
---|
1045 | *
|
---|
1046 | * This function should be called before any page_mapping_insert(),
|
---|
1047 | * page_mapping_remove() and page_mapping_find().
|
---|
1048 | *
|
---|
1049 | * Locking order is such that address space areas must be locked
|
---|
1050 | * prior to this call. Address space can be locked prior to this
|
---|
1051 | * call in which case the lock argument is false.
|
---|
1052 | *
|
---|
1053 | * @param as Address space.
|
---|
1054 | * @param lock If false, do not attempt to lock as->lock.
|
---|
1055 | */
|
---|
1056 | void page_table_lock(as_t *as, bool lock)
|
---|
1057 | {
|
---|
1058 | #ifdef __OBJC__
|
---|
1059 | [as page_table_lock: lock];
|
---|
1060 | #else
|
---|
1061 | ASSERT(as_operations);
|
---|
1062 | ASSERT(as_operations->page_table_lock);
|
---|
1063 |
|
---|
1064 | as_operations->page_table_lock(as, lock);
|
---|
1065 | #endif
|
---|
1066 | }
|
---|
1067 |
|
---|
1068 | /** Unlock page table.
|
---|
1069 | *
|
---|
1070 | * @param as Address space.
|
---|
1071 | * @param unlock If false, do not attempt to unlock as->lock.
|
---|
1072 | */
|
---|
1073 | void page_table_unlock(as_t *as, bool unlock)
|
---|
1074 | {
|
---|
1075 | #ifdef __OBJC__
|
---|
1076 | [as page_table_unlock: unlock];
|
---|
1077 | #else
|
---|
1078 | ASSERT(as_operations);
|
---|
1079 | ASSERT(as_operations->page_table_unlock);
|
---|
1080 |
|
---|
1081 | as_operations->page_table_unlock(as, unlock);
|
---|
1082 | #endif
|
---|
1083 | }
|
---|
1084 |
|
---|
1085 |
|
---|
1086 | /** Find address space area and lock it.
|
---|
1087 | *
|
---|
1088 | * The address space must be locked and interrupts must be disabled.
|
---|
1089 | *
|
---|
1090 | * @param as Address space.
|
---|
1091 | * @param va Virtual address.
|
---|
1092 | *
|
---|
1093 | * @return Locked address space area containing va on success or NULL on
|
---|
1094 | * failure.
|
---|
1095 | */
|
---|
1096 | as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
|
---|
1097 | {
|
---|
1098 | as_area_t *a;
|
---|
1099 | btree_node_t *leaf, *lnode;
|
---|
1100 | int i;
|
---|
1101 |
|
---|
1102 | a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
|
---|
1103 | if (a) {
|
---|
1104 | /* va is the base address of an address space area */
|
---|
1105 | mutex_lock(&a->lock);
|
---|
1106 | return a;
|
---|
1107 | }
|
---|
1108 |
|
---|
1109 | /*
|
---|
1110 | * Search the leaf node and the righmost record of its left neighbour
|
---|
1111 | * to find out whether this is a miss or va belongs to an address
|
---|
1112 | * space area found there.
|
---|
1113 | */
|
---|
1114 |
|
---|
1115 | /* First, search the leaf node itself. */
|
---|
1116 | for (i = 0; i < leaf->keys; i++) {
|
---|
1117 | a = (as_area_t *) leaf->value[i];
|
---|
1118 | mutex_lock(&a->lock);
|
---|
1119 | if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
|
---|
1120 | return a;
|
---|
1121 | }
|
---|
1122 | mutex_unlock(&a->lock);
|
---|
1123 | }
|
---|
1124 |
|
---|
1125 | /*
|
---|
1126 | * Second, locate the left neighbour and test its last record.
|
---|
1127 | * Because of its position in the B+tree, it must have base < va.
|
---|
1128 | */
|
---|
1129 | lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
|
---|
1130 | if (lnode) {
|
---|
1131 | a = (as_area_t *) lnode->value[lnode->keys - 1];
|
---|
1132 | mutex_lock(&a->lock);
|
---|
1133 | if (va < a->base + a->pages * PAGE_SIZE) {
|
---|
1134 | return a;
|
---|
1135 | }
|
---|
1136 | mutex_unlock(&a->lock);
|
---|
1137 | }
|
---|
1138 |
|
---|
1139 | return NULL;
|
---|
1140 | }
|
---|
1141 |
|
---|
1142 | /** Check area conflicts with other areas.
|
---|
1143 | *
|
---|
1144 | * The address space must be locked and interrupts must be disabled.
|
---|
1145 | *
|
---|
1146 | * @param as Address space.
|
---|
1147 | * @param va Starting virtual address of the area being tested.
|
---|
1148 | * @param size Size of the area being tested.
|
---|
1149 | * @param avoid_area Do not touch this area.
|
---|
1150 | *
|
---|
1151 | * @return True if there is no conflict, false otherwise.
|
---|
1152 | */
|
---|
1153 | bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
|
---|
1154 | as_area_t *avoid_area)
|
---|
1155 | {
|
---|
1156 | as_area_t *a;
|
---|
1157 | btree_node_t *leaf, *node;
|
---|
1158 | int i;
|
---|
1159 |
|
---|
1160 | /*
|
---|
1161 | * We don't want any area to have conflicts with NULL page.
|
---|
1162 | */
|
---|
1163 | if (overlaps(va, size, NULL, PAGE_SIZE))
|
---|
1164 | return false;
|
---|
1165 |
|
---|
1166 | /*
|
---|
1167 | * The leaf node is found in O(log n), where n is proportional to
|
---|
1168 | * the number of address space areas belonging to as.
|
---|
1169 | * The check for conflicts is then attempted on the rightmost
|
---|
1170 | * record in the left neighbour, the leftmost record in the right
|
---|
1171 | * neighbour and all records in the leaf node itself.
|
---|
1172 | */
|
---|
1173 |
|
---|
1174 | if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
|
---|
1175 | if (a != avoid_area)
|
---|
1176 | return false;
|
---|
1177 | }
|
---|
1178 |
|
---|
1179 | /* First, check the two border cases. */
|
---|
1180 | if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
|
---|
1181 | a = (as_area_t *) node->value[node->keys - 1];
|
---|
1182 | mutex_lock(&a->lock);
|
---|
1183 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
|
---|
1184 | mutex_unlock(&a->lock);
|
---|
1185 | return false;
|
---|
1186 | }
|
---|
1187 | mutex_unlock(&a->lock);
|
---|
1188 | }
|
---|
1189 | node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
|
---|
1190 | if (node) {
|
---|
1191 | a = (as_area_t *) node->value[0];
|
---|
1192 | mutex_lock(&a->lock);
|
---|
1193 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
|
---|
1194 | mutex_unlock(&a->lock);
|
---|
1195 | return false;
|
---|
1196 | }
|
---|
1197 | mutex_unlock(&a->lock);
|
---|
1198 | }
|
---|
1199 |
|
---|
1200 | /* Second, check the leaf node. */
|
---|
1201 | for (i = 0; i < leaf->keys; i++) {
|
---|
1202 | a = (as_area_t *) leaf->value[i];
|
---|
1203 |
|
---|
1204 | if (a == avoid_area)
|
---|
1205 | continue;
|
---|
1206 |
|
---|
1207 | mutex_lock(&a->lock);
|
---|
1208 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
|
---|
1209 | mutex_unlock(&a->lock);
|
---|
1210 | return false;
|
---|
1211 | }
|
---|
1212 | mutex_unlock(&a->lock);
|
---|
1213 | }
|
---|
1214 |
|
---|
1215 | /*
|
---|
1216 | * So far, the area does not conflict with other areas.
|
---|
1217 | * Check if it doesn't conflict with kernel address space.
|
---|
1218 | */
|
---|
1219 | if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
|
---|
1220 | return !overlaps(va, size,
|
---|
1221 | KERNEL_ADDRESS_SPACE_START,
|
---|
1222 | KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);
|
---|
1223 | }
|
---|
1224 |
|
---|
1225 | return true;
|
---|
1226 | }
|
---|
1227 |
|
---|
1228 | /** Return size of the address space area with given base.
|
---|
1229 | *
|
---|
1230 | * @param base Arbitrary address insede the address space area.
|
---|
1231 | *
|
---|
1232 | * @return Size of the address space area in bytes or zero if it
|
---|
1233 | * does not exist.
|
---|
1234 | */
|
---|
1235 | size_t as_area_get_size(uintptr_t base)
|
---|
1236 | {
|
---|
1237 | ipl_t ipl;
|
---|
1238 | as_area_t *src_area;
|
---|
1239 | size_t size;
|
---|
1240 |
|
---|
1241 | ipl = interrupts_disable();
|
---|
1242 | src_area = find_area_and_lock(AS, base);
|
---|
1243 | if (src_area){
|
---|
1244 | size = src_area->pages * PAGE_SIZE;
|
---|
1245 | mutex_unlock(&src_area->lock);
|
---|
1246 | } else {
|
---|
1247 | size = 0;
|
---|
1248 | }
|
---|
1249 | interrupts_restore(ipl);
|
---|
1250 | return size;
|
---|
1251 | }
|
---|
1252 |
|
---|
1253 | /** Mark portion of address space area as used.
|
---|
1254 | *
|
---|
1255 | * The address space area must be already locked.
|
---|
1256 | *
|
---|
1257 | * @param a Address space area.
|
---|
1258 | * @param page First page to be marked.
|
---|
1259 | * @param count Number of page to be marked.
|
---|
1260 | *
|
---|
1261 | * @return 0 on failure and 1 on success.
|
---|
1262 | */
|
---|
1263 | int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
|
---|
1264 | {
|
---|
1265 | btree_node_t *leaf, *node;
|
---|
1266 | count_t pages;
|
---|
1267 | int i;
|
---|
1268 |
|
---|
1269 | ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
|
---|
1270 | ASSERT(count);
|
---|
1271 |
|
---|
1272 | pages = (count_t) btree_search(&a->used_space, page, &leaf);
|
---|
1273 | if (pages) {
|
---|
1274 | /*
|
---|
1275 | * We hit the beginning of some used space.
|
---|
1276 | */
|
---|
1277 | return 0;
|
---|
1278 | }
|
---|
1279 |
|
---|
1280 | if (!leaf->keys) {
|
---|
1281 | btree_insert(&a->used_space, page, (void *) count, leaf);
|
---|
1282 | return 1;
|
---|
1283 | }
|
---|
1284 |
|
---|
1285 | node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
|
---|
1286 | if (node) {
|
---|
1287 | uintptr_t left_pg = node->key[node->keys - 1];
|
---|
1288 | uintptr_t right_pg = leaf->key[0];
|
---|
1289 | count_t left_cnt = (count_t) node->value[node->keys - 1];
|
---|
1290 | count_t right_cnt = (count_t) leaf->value[0];
|
---|
1291 |
|
---|
1292 | /*
|
---|
1293 | * Examine the possibility that the interval fits
|
---|
1294 | * somewhere between the rightmost interval of
|
---|
1295 | * the left neigbour and the first interval of the leaf.
|
---|
1296 | */
|
---|
1297 |
|
---|
1298 | if (page >= right_pg) {
|
---|
1299 | /* Do nothing. */
|
---|
1300 | } else if (overlaps(page, count * PAGE_SIZE, left_pg,
|
---|
1301 | left_cnt * PAGE_SIZE)) {
|
---|
1302 | /* The interval intersects with the left interval. */
|
---|
1303 | return 0;
|
---|
1304 | } else if (overlaps(page, count * PAGE_SIZE, right_pg,
|
---|
1305 | right_cnt * PAGE_SIZE)) {
|
---|
1306 | /* The interval intersects with the right interval. */
|
---|
1307 | return 0;
|
---|
1308 | } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
|
---|
1309 | (page + count * PAGE_SIZE == right_pg)) {
|
---|
1310 | /*
|
---|
1311 | * The interval can be added by merging the two already
|
---|
1312 | * present intervals.
|
---|
1313 | */
|
---|
1314 | node->value[node->keys - 1] += count + right_cnt;
|
---|
1315 | btree_remove(&a->used_space, right_pg, leaf);
|
---|
1316 | return 1;
|
---|
1317 | } else if (page == left_pg + left_cnt * PAGE_SIZE) {
|
---|
1318 | /*
|
---|
1319 | * The interval can be added by simply growing the left
|
---|
1320 | * interval.
|
---|
1321 | */
|
---|
1322 | node->value[node->keys - 1] += count;
|
---|
1323 | return 1;
|
---|
1324 | } else if (page + count * PAGE_SIZE == right_pg) {
|
---|
1325 | /*
|
---|
1326 | * The interval can be addded by simply moving base of
|
---|
1327 | * the right interval down and increasing its size
|
---|
1328 | * accordingly.
|
---|
1329 | */
|
---|
1330 | leaf->value[0] += count;
|
---|
1331 | leaf->key[0] = page;
|
---|
1332 | return 1;
|
---|
1333 | } else {
|
---|
1334 | /*
|
---|
1335 | * The interval is between both neigbouring intervals,
|
---|
1336 | * but cannot be merged with any of them.
|
---|
1337 | */
|
---|
1338 | btree_insert(&a->used_space, page, (void *) count,
|
---|
1339 | leaf);
|
---|
1340 | return 1;
|
---|
1341 | }
|
---|
1342 | } else if (page < leaf->key[0]) {
|
---|
1343 | uintptr_t right_pg = leaf->key[0];
|
---|
1344 | count_t right_cnt = (count_t) leaf->value[0];
|
---|
1345 |
|
---|
1346 | /*
|
---|
1347 | * Investigate the border case in which the left neighbour does
|
---|
1348 | * not exist but the interval fits from the left.
|
---|
1349 | */
|
---|
1350 |
|
---|
1351 | if (overlaps(page, count * PAGE_SIZE, right_pg,
|
---|
1352 | right_cnt * PAGE_SIZE)) {
|
---|
1353 | /* The interval intersects with the right interval. */
|
---|
1354 | return 0;
|
---|
1355 | } else if (page + count * PAGE_SIZE == right_pg) {
|
---|
1356 | /*
|
---|
1357 | * The interval can be added by moving the base of the
|
---|
1358 | * right interval down and increasing its size
|
---|
1359 | * accordingly.
|
---|
1360 | */
|
---|
1361 | leaf->key[0] = page;
|
---|
1362 | leaf->value[0] += count;
|
---|
1363 | return 1;
|
---|
1364 | } else {
|
---|
1365 | /*
|
---|
1366 | * The interval doesn't adjoin with the right interval.
|
---|
1367 | * It must be added individually.
|
---|
1368 | */
|
---|
1369 | btree_insert(&a->used_space, page, (void *) count,
|
---|
1370 | leaf);
|
---|
1371 | return 1;
|
---|
1372 | }
|
---|
1373 | }
|
---|
1374 |
|
---|
1375 | node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
|
---|
1376 | if (node) {
|
---|
1377 | uintptr_t left_pg = leaf->key[leaf->keys - 1];
|
---|
1378 | uintptr_t right_pg = node->key[0];
|
---|
1379 | count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
|
---|
1380 | count_t right_cnt = (count_t) node->value[0];
|
---|
1381 |
|
---|
1382 | /*
|
---|
1383 | * Examine the possibility that the interval fits
|
---|
1384 | * somewhere between the leftmost interval of
|
---|
1385 | * the right neigbour and the last interval of the leaf.
|
---|
1386 | */
|
---|
1387 |
|
---|
1388 | if (page < left_pg) {
|
---|
1389 | /* Do nothing. */
|
---|
1390 | } else if (overlaps(page, count * PAGE_SIZE, left_pg,
|
---|
1391 | left_cnt * PAGE_SIZE)) {
|
---|
1392 | /* The interval intersects with the left interval. */
|
---|
1393 | return 0;
|
---|
1394 | } else if (overlaps(page, count * PAGE_SIZE, right_pg,
|
---|
1395 | right_cnt * PAGE_SIZE)) {
|
---|
1396 | /* The interval intersects with the right interval. */
|
---|
1397 | return 0;
|
---|
1398 | } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
|
---|
1399 | (page + count * PAGE_SIZE == right_pg)) {
|
---|
1400 | /*
|
---|
1401 | * The interval can be added by merging the two already
|
---|
1402 | * present intervals.
|
---|
1403 | * */
|
---|
1404 | leaf->value[leaf->keys - 1] += count + right_cnt;
|
---|
1405 | btree_remove(&a->used_space, right_pg, node);
|
---|
1406 | return 1;
|
---|
1407 | } else if (page == left_pg + left_cnt * PAGE_SIZE) {
|
---|
1408 | /*
|
---|
1409 | * The interval can be added by simply growing the left
|
---|
1410 | * interval.
|
---|
1411 | * */
|
---|
1412 | leaf->value[leaf->keys - 1] += count;
|
---|
1413 | return 1;
|
---|
1414 | } else if (page + count * PAGE_SIZE == right_pg) {
|
---|
1415 | /*
|
---|
1416 | * The interval can be addded by simply moving base of
|
---|
1417 | * the right interval down and increasing its size
|
---|
1418 | * accordingly.
|
---|
1419 | */
|
---|
1420 | node->value[0] += count;
|
---|
1421 | node->key[0] = page;
|
---|
1422 | return 1;
|
---|
1423 | } else {
|
---|
1424 | /*
|
---|
1425 | * The interval is between both neigbouring intervals,
|
---|
1426 | * but cannot be merged with any of them.
|
---|
1427 | */
|
---|
1428 | btree_insert(&a->used_space, page, (void *) count,
|
---|
1429 | leaf);
|
---|
1430 | return 1;
|
---|
1431 | }
|
---|
1432 | } else if (page >= leaf->key[leaf->keys - 1]) {
|
---|
1433 | uintptr_t left_pg = leaf->key[leaf->keys - 1];
|
---|
1434 | count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
|
---|
1435 |
|
---|
1436 | /*
|
---|
1437 | * Investigate the border case in which the right neighbour
|
---|
1438 | * does not exist but the interval fits from the right.
|
---|
1439 | */
|
---|
1440 |
|
---|
1441 | if (overlaps(page, count * PAGE_SIZE, left_pg,
|
---|
1442 | left_cnt * PAGE_SIZE)) {
|
---|
1443 | /* The interval intersects with the left interval. */
|
---|
1444 | return 0;
|
---|
1445 | } else if (left_pg + left_cnt * PAGE_SIZE == page) {
|
---|
1446 | /*
|
---|
1447 | * The interval can be added by growing the left
|
---|
1448 | * interval.
|
---|
1449 | */
|
---|
1450 | leaf->value[leaf->keys - 1] += count;
|
---|
1451 | return 1;
|
---|
1452 | } else {
|
---|
1453 | /*
|
---|
1454 | * The interval doesn't adjoin with the left interval.
|
---|
1455 | * It must be added individually.
|
---|
1456 | */
|
---|
1457 | btree_insert(&a->used_space, page, (void *) count,
|
---|
1458 | leaf);
|
---|
1459 | return 1;
|
---|
1460 | }
|
---|
1461 | }
|
---|
1462 |
|
---|
1463 | /*
|
---|
1464 | * Note that if the algorithm made it thus far, the interval can fit
|
---|
1465 | * only between two other intervals of the leaf. The two border cases
|
---|
1466 | * were already resolved.
|
---|
1467 | */
|
---|
1468 | for (i = 1; i < leaf->keys; i++) {
|
---|
1469 | if (page < leaf->key[i]) {
|
---|
1470 | uintptr_t left_pg = leaf->key[i - 1];
|
---|
1471 | uintptr_t right_pg = leaf->key[i];
|
---|
1472 | count_t left_cnt = (count_t) leaf->value[i - 1];
|
---|
1473 | count_t right_cnt = (count_t) leaf->value[i];
|
---|
1474 |
|
---|
1475 | /*
|
---|
1476 | * The interval fits between left_pg and right_pg.
|
---|
1477 | */
|
---|
1478 |
|
---|
1479 | if (overlaps(page, count * PAGE_SIZE, left_pg,
|
---|
1480 | left_cnt * PAGE_SIZE)) {
|
---|
1481 | /*
|
---|
1482 | * The interval intersects with the left
|
---|
1483 | * interval.
|
---|
1484 | */
|
---|
1485 | return 0;
|
---|
1486 | } else if (overlaps(page, count * PAGE_SIZE, right_pg,
|
---|
1487 | right_cnt * PAGE_SIZE)) {
|
---|
1488 | /*
|
---|
1489 | * The interval intersects with the right
|
---|
1490 | * interval.
|
---|
1491 | */
|
---|
1492 | return 0;
|
---|
1493 | } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
|
---|
1494 | (page + count * PAGE_SIZE == right_pg)) {
|
---|
1495 | /*
|
---|
1496 | * The interval can be added by merging the two
|
---|
1497 | * already present intervals.
|
---|
1498 | */
|
---|
1499 | leaf->value[i - 1] += count + right_cnt;
|
---|
1500 | btree_remove(&a->used_space, right_pg, leaf);
|
---|
1501 | return 1;
|
---|
1502 | } else if (page == left_pg + left_cnt * PAGE_SIZE) {
|
---|
1503 | /*
|
---|
1504 | * The interval can be added by simply growing
|
---|
1505 | * the left interval.
|
---|
1506 | */
|
---|
1507 | leaf->value[i - 1] += count;
|
---|
1508 | return 1;
|
---|
1509 | } else if (page + count * PAGE_SIZE == right_pg) {
|
---|
1510 | /*
|
---|
1511 | * The interval can be addded by simply moving
|
---|
1512 | * base of the right interval down and
|
---|
1513 | * increasing its size accordingly.
|
---|
1514 | */
|
---|
1515 | leaf->value[i] += count;
|
---|
1516 | leaf->key[i] = page;
|
---|
1517 | return 1;
|
---|
1518 | } else {
|
---|
1519 | /*
|
---|
1520 | * The interval is between both neigbouring
|
---|
1521 | * intervals, but cannot be merged with any of
|
---|
1522 | * them.
|
---|
1523 | */
|
---|
1524 | btree_insert(&a->used_space, page,
|
---|
1525 | (void *) count, leaf);
|
---|
1526 | return 1;
|
---|
1527 | }
|
---|
1528 | }
|
---|
1529 | }
|
---|
1530 |
|
---|
1531 | panic("Inconsistency detected while adding %d pages of used space at "
|
---|
1532 | "%p.\n", count, page);
|
---|
1533 | }
|
---|
1534 |
|
---|
1535 | /** Mark portion of address space area as unused.
|
---|
1536 | *
|
---|
1537 | * The address space area must be already locked.
|
---|
1538 | *
|
---|
1539 | * @param a Address space area.
|
---|
1540 | * @param page First page to be marked.
|
---|
1541 | * @param count Number of page to be marked.
|
---|
1542 | *
|
---|
1543 | * @return 0 on failure and 1 on success.
|
---|
1544 | */
|
---|
1545 | int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
|
---|
1546 | {
|
---|
1547 | btree_node_t *leaf, *node;
|
---|
1548 | count_t pages;
|
---|
1549 | int i;
|
---|
1550 |
|
---|
1551 | ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
|
---|
1552 | ASSERT(count);
|
---|
1553 |
|
---|
1554 | pages = (count_t) btree_search(&a->used_space, page, &leaf);
|
---|
1555 | if (pages) {
|
---|
1556 | /*
|
---|
1557 | * We are lucky, page is the beginning of some interval.
|
---|
1558 | */
|
---|
1559 | if (count > pages) {
|
---|
1560 | return 0;
|
---|
1561 | } else if (count == pages) {
|
---|
1562 | btree_remove(&a->used_space, page, leaf);
|
---|
1563 | return 1;
|
---|
1564 | } else {
|
---|
1565 | /*
|
---|
1566 | * Find the respective interval.
|
---|
1567 | * Decrease its size and relocate its start address.
|
---|
1568 | */
|
---|
1569 | for (i = 0; i < leaf->keys; i++) {
|
---|
1570 | if (leaf->key[i] == page) {
|
---|
1571 | leaf->key[i] += count * PAGE_SIZE;
|
---|
1572 | leaf->value[i] -= count;
|
---|
1573 | return 1;
|
---|
1574 | }
|
---|
1575 | }
|
---|
1576 | goto error;
|
---|
1577 | }
|
---|
1578 | }
|
---|
1579 |
|
---|
1580 | node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
|
---|
1581 | if (node && page < leaf->key[0]) {
|
---|
1582 | uintptr_t left_pg = node->key[node->keys - 1];
|
---|
1583 | count_t left_cnt = (count_t) node->value[node->keys - 1];
|
---|
1584 |
|
---|
1585 | if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
|
---|
1586 | count * PAGE_SIZE)) {
|
---|
1587 | if (page + count * PAGE_SIZE ==
|
---|
1588 | left_pg + left_cnt * PAGE_SIZE) {
|
---|
1589 | /*
|
---|
1590 | * The interval is contained in the rightmost
|
---|
1591 | * interval of the left neighbour and can be
|
---|
1592 | * removed by updating the size of the bigger
|
---|
1593 | * interval.
|
---|
1594 | */
|
---|
1595 | node->value[node->keys - 1] -= count;
|
---|
1596 | return 1;
|
---|
1597 | } else if (page + count * PAGE_SIZE <
|
---|
1598 | left_pg + left_cnt*PAGE_SIZE) {
|
---|
1599 | count_t new_cnt;
|
---|
1600 |
|
---|
1601 | /*
|
---|
1602 | * The interval is contained in the rightmost
|
---|
1603 | * interval of the left neighbour but its
|
---|
1604 | * removal requires both updating the size of
|
---|
1605 | * the original interval and also inserting a
|
---|
1606 | * new interval.
|
---|
1607 | */
|
---|
1608 | new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
|
---|
1609 | (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
|
---|
1610 | node->value[node->keys - 1] -= count + new_cnt;
|
---|
1611 | btree_insert(&a->used_space, page +
|
---|
1612 | count * PAGE_SIZE, (void *) new_cnt, leaf);
|
---|
1613 | return 1;
|
---|
1614 | }
|
---|
1615 | }
|
---|
1616 | return 0;
|
---|
1617 | } else if (page < leaf->key[0]) {
|
---|
1618 | return 0;
|
---|
1619 | }
|
---|
1620 |
|
---|
1621 | if (page > leaf->key[leaf->keys - 1]) {
|
---|
1622 | uintptr_t left_pg = leaf->key[leaf->keys - 1];
|
---|
1623 | count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
|
---|
1624 |
|
---|
1625 | if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
|
---|
1626 | count * PAGE_SIZE)) {
|
---|
1627 | if (page + count * PAGE_SIZE ==
|
---|
1628 | left_pg + left_cnt * PAGE_SIZE) {
|
---|
1629 | /*
|
---|
1630 | * The interval is contained in the rightmost
|
---|
1631 | * interval of the leaf and can be removed by
|
---|
1632 | * updating the size of the bigger interval.
|
---|
1633 | */
|
---|
1634 | leaf->value[leaf->keys - 1] -= count;
|
---|
1635 | return 1;
|
---|
1636 | } else if (page + count * PAGE_SIZE < left_pg +
|
---|
1637 | left_cnt * PAGE_SIZE) {
|
---|
1638 | count_t new_cnt;
|
---|
1639 |
|
---|
1640 | /*
|
---|
1641 | * The interval is contained in the rightmost
|
---|
1642 | * interval of the leaf but its removal
|
---|
1643 | * requires both updating the size of the
|
---|
1644 | * original interval and also inserting a new
|
---|
1645 | * interval.
|
---|
1646 | */
|
---|
1647 | new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
|
---|
1648 | (page + count * PAGE_SIZE)) >> PAGE_WIDTH;
|
---|
1649 | leaf->value[leaf->keys - 1] -= count + new_cnt;
|
---|
1650 | btree_insert(&a->used_space, page +
|
---|
1651 | count * PAGE_SIZE, (void *) new_cnt, leaf);
|
---|
1652 | return 1;
|
---|
1653 | }
|
---|
1654 | }
|
---|
1655 | return 0;
|
---|
1656 | }
|
---|
1657 |
|
---|
1658 | /*
|
---|
1659 | * The border cases have been already resolved.
|
---|
1660 | * Now the interval can be only between intervals of the leaf.
|
---|
1661 | */
|
---|
1662 | for (i = 1; i < leaf->keys - 1; i++) {
|
---|
1663 | if (page < leaf->key[i]) {
|
---|
1664 | uintptr_t left_pg = leaf->key[i - 1];
|
---|
1665 | count_t left_cnt = (count_t) leaf->value[i - 1];
|
---|
1666 |
|
---|
1667 | /*
|
---|
1668 | * Now the interval is between intervals corresponding
|
---|
1669 | * to (i - 1) and i.
|
---|
1670 | */
|
---|
1671 | if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
|
---|
1672 | count * PAGE_SIZE)) {
|
---|
1673 | if (page + count * PAGE_SIZE ==
|
---|
1674 | left_pg + left_cnt*PAGE_SIZE) {
|
---|
1675 | /*
|
---|
1676 | * The interval is contained in the
|
---|
1677 | * interval (i - 1) of the leaf and can
|
---|
1678 | * be removed by updating the size of
|
---|
1679 | * the bigger interval.
|
---|
1680 | */
|
---|
1681 | leaf->value[i - 1] -= count;
|
---|
1682 | return 1;
|
---|
1683 | } else if (page + count * PAGE_SIZE <
|
---|
1684 | left_pg + left_cnt * PAGE_SIZE) {
|
---|
1685 | count_t new_cnt;
|
---|
1686 |
|
---|
1687 | /*
|
---|
1688 | * The interval is contained in the
|
---|
1689 | * interval (i - 1) of the leaf but its
|
---|
1690 | * removal requires both updating the
|
---|
1691 | * size of the original interval and
|
---|
1692 | * also inserting a new interval.
|
---|
1693 | */
|
---|
1694 | new_cnt = ((left_pg +
|
---|
1695 | left_cnt * PAGE_SIZE) -
|
---|
1696 | (page + count * PAGE_SIZE)) >>
|
---|
1697 | PAGE_WIDTH;
|
---|
1698 | leaf->value[i - 1] -= count + new_cnt;
|
---|
1699 | btree_insert(&a->used_space, page +
|
---|
1700 | count * PAGE_SIZE, (void *) new_cnt,
|
---|
1701 | leaf);
|
---|
1702 | return 1;
|
---|
1703 | }
|
---|
1704 | }
|
---|
1705 | return 0;
|
---|
1706 | }
|
---|
1707 | }
|
---|
1708 |
|
---|
1709 | error:
|
---|
1710 | panic("Inconsistency detected while removing %d pages of used space "
|
---|
1711 | "from %p.\n", count, page);
|
---|
1712 | }
|
---|
1713 |
|
---|
1714 | /** Remove reference to address space area share info.
|
---|
1715 | *
|
---|
1716 | * If the reference count drops to 0, the sh_info is deallocated.
|
---|
1717 | *
|
---|
1718 | * @param sh_info Pointer to address space area share info.
|
---|
1719 | */
|
---|
1720 | void sh_info_remove_reference(share_info_t *sh_info)
|
---|
1721 | {
|
---|
1722 | bool dealloc = false;
|
---|
1723 |
|
---|
1724 | mutex_lock(&sh_info->lock);
|
---|
1725 | ASSERT(sh_info->refcount);
|
---|
1726 | if (--sh_info->refcount == 0) {
|
---|
1727 | dealloc = true;
|
---|
1728 | link_t *cur;
|
---|
1729 |
|
---|
1730 | /*
|
---|
1731 | * Now walk carefully the pagemap B+tree and free/remove
|
---|
1732 | * reference from all frames found there.
|
---|
1733 | */
|
---|
1734 | for (cur = sh_info->pagemap.leaf_head.next;
|
---|
1735 | cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
|
---|
1736 | btree_node_t *node;
|
---|
1737 | int i;
|
---|
1738 |
|
---|
1739 | node = list_get_instance(cur, btree_node_t, leaf_link);
|
---|
1740 | for (i = 0; i < node->keys; i++)
|
---|
1741 | frame_free((uintptr_t) node->value[i]);
|
---|
1742 | }
|
---|
1743 |
|
---|
1744 | }
|
---|
1745 | mutex_unlock(&sh_info->lock);
|
---|
1746 |
|
---|
1747 | if (dealloc) {
|
---|
1748 | btree_destroy(&sh_info->pagemap);
|
---|
1749 | free(sh_info);
|
---|
1750 | }
|
---|
1751 | }
|
---|
1752 |
|
---|
1753 | /*
|
---|
1754 | * Address space related syscalls.
|
---|
1755 | */
|
---|
1756 |
|
---|
1757 | /** Wrapper for as_area_create(). */
|
---|
1758 | unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
|
---|
1759 | {
|
---|
1760 | if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,
|
---|
1761 | AS_AREA_ATTR_NONE, &anon_backend, NULL))
|
---|
1762 | return (unative_t) address;
|
---|
1763 | else
|
---|
1764 | return (unative_t) -1;
|
---|
1765 | }
|
---|
1766 |
|
---|
1767 | /** Wrapper for as_area_resize(). */
|
---|
1768 | unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
|
---|
1769 | {
|
---|
1770 | return (unative_t) as_area_resize(AS, address, size, 0);
|
---|
1771 | }
|
---|
1772 |
|
---|
1773 | /** Wrapper for as_area_destroy(). */
|
---|
1774 | unative_t sys_as_area_destroy(uintptr_t address)
|
---|
1775 | {
|
---|
1776 | return (unative_t) as_area_destroy(AS, address);
|
---|
1777 | }
|
---|
1778 |
|
---|
1779 | /** Print out information about address space.
|
---|
1780 | *
|
---|
1781 | * @param as Address space.
|
---|
1782 | */
|
---|
1783 | void as_print(as_t *as)
|
---|
1784 | {
|
---|
1785 | ipl_t ipl;
|
---|
1786 |
|
---|
1787 | ipl = interrupts_disable();
|
---|
1788 | mutex_lock(&as->lock);
|
---|
1789 |
|
---|
1790 | /* print out info about address space areas */
|
---|
1791 | link_t *cur;
|
---|
1792 | for (cur = as->as_area_btree.leaf_head.next;
|
---|
1793 | cur != &as->as_area_btree.leaf_head; cur = cur->next) {
|
---|
1794 | btree_node_t *node;
|
---|
1795 |
|
---|
1796 | node = list_get_instance(cur, btree_node_t, leaf_link);
|
---|
1797 |
|
---|
1798 | int i;
|
---|
1799 | for (i = 0; i < node->keys; i++) {
|
---|
1800 | as_area_t *area = node->value[i];
|
---|
1801 |
|
---|
1802 | mutex_lock(&area->lock);
|
---|
1803 | printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
|
---|
1804 | area, area->base, area->pages, area->base,
|
---|
1805 | area->base + area->pages*PAGE_SIZE);
|
---|
1806 | mutex_unlock(&area->lock);
|
---|
1807 | }
|
---|
1808 | }
|
---|
1809 |
|
---|
1810 | mutex_unlock(&as->lock);
|
---|
1811 | interrupts_restore(ipl);
|
---|
1812 | }
|
---|
1813 |
|
---|
1814 | /** @}
|
---|
1815 | */
|
---|