1 | /*
|
---|
2 | * Copyright (C) 2001-2006 Jakub Jermar
|
---|
3 | * All rights reserved.
|
---|
4 | *
|
---|
5 | * Redistribution and use in source and binary forms, with or without
|
---|
6 | * modification, are permitted provided that the following conditions
|
---|
7 | * are met:
|
---|
8 | *
|
---|
9 | * - Redistributions of source code must retain the above copyright
|
---|
10 | * notice, this list of conditions and the following disclaimer.
|
---|
11 | * - Redistributions in binary form must reproduce the above copyright
|
---|
12 | * notice, this list of conditions and the following disclaimer in the
|
---|
13 | * documentation and/or other materials provided with the distribution.
|
---|
14 | * - The name of the author may not be used to endorse or promote products
|
---|
15 | * derived from this software without specific prior written permission.
|
---|
16 | *
|
---|
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
---|
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
---|
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
---|
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
---|
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
---|
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
---|
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
---|
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
---|
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
27 | */
|
---|
28 |
|
---|
29 | /*
|
---|
30 | * This file contains address space manipulation functions.
|
---|
31 | * Roughly speaking, this is a higher-level client of
|
---|
32 | * Virtual Address Translation (VAT) subsystem.
|
---|
33 | */
|
---|
34 |
|
---|
35 | #include <mm/as.h>
|
---|
36 | #include <arch/mm/as.h>
|
---|
37 | #include <mm/page.h>
|
---|
38 | #include <mm/frame.h>
|
---|
39 | #include <mm/slab.h>
|
---|
40 | #include <mm/tlb.h>
|
---|
41 | #include <arch/mm/page.h>
|
---|
42 | #include <genarch/mm/page_pt.h>
|
---|
43 | #include <genarch/mm/page_ht.h>
|
---|
44 | #include <mm/asid.h>
|
---|
45 | #include <arch/mm/asid.h>
|
---|
46 | #include <arch/types.h>
|
---|
47 | #include <typedefs.h>
|
---|
48 | #include <synch/spinlock.h>
|
---|
49 | #include <config.h>
|
---|
50 | #include <adt/list.h>
|
---|
51 | #include <adt/btree.h>
|
---|
52 | #include <panic.h>
|
---|
53 | #include <arch/asm.h>
|
---|
54 | #include <debug.h>
|
---|
55 | #include <memstr.h>
|
---|
56 | #include <macros.h>
|
---|
57 | #include <arch.h>
|
---|
58 | #include <print.h>
|
---|
59 |
|
---|
60 | as_operations_t *as_operations = NULL;
|
---|
61 |
|
---|
62 | /** Address space lock. It protects inactive_as_with_asid_head. */
|
---|
63 | SPINLOCK_INITIALIZE(as_lock);
|
---|
64 |
|
---|
65 | /**
|
---|
66 | * This list contains address spaces that are not active on any
|
---|
67 | * processor and that have valid ASID.
|
---|
68 | */
|
---|
69 | LIST_INITIALIZE(inactive_as_with_asid_head);
|
---|
70 |
|
---|
71 | /** Kernel address space. */
|
---|
72 | as_t *AS_KERNEL = NULL;
|
---|
73 |
|
---|
74 | static int get_area_flags(as_area_t *a);
|
---|
75 | static as_area_t *find_area_and_lock(as_t *as, __address va);
|
---|
76 | static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
|
---|
77 |
|
---|
78 | /** Initialize address space subsystem. */
|
---|
79 | void as_init(void)
|
---|
80 | {
|
---|
81 | as_arch_init();
|
---|
82 | AS_KERNEL = as_create(FLAG_AS_KERNEL);
|
---|
83 | if (!AS_KERNEL)
|
---|
84 | panic("can't create kernel address space\n");
|
---|
85 | }
|
---|
86 |
|
---|
87 | /** Create address space.
|
---|
88 | *
|
---|
89 | * @param flags Flags that influence way in wich the address space is created.
|
---|
90 | */
|
---|
91 | as_t *as_create(int flags)
|
---|
92 | {
|
---|
93 | as_t *as;
|
---|
94 |
|
---|
95 | as = (as_t *) malloc(sizeof(as_t), 0);
|
---|
96 | link_initialize(&as->inactive_as_with_asid_link);
|
---|
97 | spinlock_initialize(&as->lock, "as_lock");
|
---|
98 | btree_create(&as->as_area_btree);
|
---|
99 |
|
---|
100 | if (flags & FLAG_AS_KERNEL)
|
---|
101 | as->asid = ASID_KERNEL;
|
---|
102 | else
|
---|
103 | as->asid = ASID_INVALID;
|
---|
104 |
|
---|
105 | as->refcount = 0;
|
---|
106 | as->page_table = page_table_create(flags);
|
---|
107 |
|
---|
108 | return as;
|
---|
109 | }
|
---|
110 |
|
---|
111 | /** Free Adress space */
|
---|
112 | void as_free(as_t *as)
|
---|
113 | {
|
---|
114 | ASSERT(as->refcount == 0);
|
---|
115 |
|
---|
116 | /* TODO: free as_areas and other resources held by as */
|
---|
117 | /* TODO: free page table */
|
---|
118 | free(as);
|
---|
119 | }
|
---|
120 |
|
---|
121 | /** Create address space area of common attributes.
|
---|
122 | *
|
---|
123 | * The created address space area is added to the target address space.
|
---|
124 | *
|
---|
125 | * @param as Target address space.
|
---|
126 | * @param flags Flags of the area.
|
---|
127 | * @param size Size of area.
|
---|
128 | * @param base Base address of area.
|
---|
129 | *
|
---|
130 | * @return Address space area on success or NULL on failure.
|
---|
131 | */
|
---|
132 | as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base)
|
---|
133 | {
|
---|
134 | ipl_t ipl;
|
---|
135 | as_area_t *a;
|
---|
136 |
|
---|
137 | if (base % PAGE_SIZE)
|
---|
138 | return NULL;
|
---|
139 |
|
---|
140 | /* Writeable executable areas are not supported. */
|
---|
141 | if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
|
---|
142 | return NULL;
|
---|
143 |
|
---|
144 | ipl = interrupts_disable();
|
---|
145 | spinlock_lock(&as->lock);
|
---|
146 |
|
---|
147 | if (!check_area_conflicts(as, base, size, NULL)) {
|
---|
148 | spinlock_unlock(&as->lock);
|
---|
149 | interrupts_restore(ipl);
|
---|
150 | return NULL;
|
---|
151 | }
|
---|
152 |
|
---|
153 | a = (as_area_t *) malloc(sizeof(as_area_t), 0);
|
---|
154 |
|
---|
155 | spinlock_initialize(&a->lock, "as_area_lock");
|
---|
156 |
|
---|
157 | a->flags = flags;
|
---|
158 | a->pages = SIZE2FRAMES(size);
|
---|
159 | a->base = base;
|
---|
160 |
|
---|
161 | btree_insert(&as->as_area_btree, base, (void *) a, NULL);
|
---|
162 |
|
---|
163 | spinlock_unlock(&as->lock);
|
---|
164 | interrupts_restore(ipl);
|
---|
165 |
|
---|
166 | return a;
|
---|
167 | }
|
---|
168 |
|
---|
169 | /** Initialize mapping for one page of address space.
|
---|
170 | *
|
---|
171 | * This functions maps 'page' to 'frame' according
|
---|
172 | * to attributes of the address space area to
|
---|
173 | * wich 'page' belongs.
|
---|
174 | *
|
---|
175 | * @param as Target address space.
|
---|
176 | * @param page Virtual page within the area.
|
---|
177 | * @param frame Physical frame to which page will be mapped.
|
---|
178 | */
|
---|
179 | void as_set_mapping(as_t *as, __address page, __address frame)
|
---|
180 | {
|
---|
181 | as_area_t *area;
|
---|
182 | ipl_t ipl;
|
---|
183 |
|
---|
184 | ipl = interrupts_disable();
|
---|
185 | page_table_lock(as, true);
|
---|
186 |
|
---|
187 | area = find_area_and_lock(as, page);
|
---|
188 | if (!area) {
|
---|
189 | panic("page not part of any as_area\n");
|
---|
190 | }
|
---|
191 |
|
---|
192 | page_mapping_insert(as, page, frame, get_area_flags(area));
|
---|
193 |
|
---|
194 | spinlock_unlock(&area->lock);
|
---|
195 | page_table_unlock(as, true);
|
---|
196 | interrupts_restore(ipl);
|
---|
197 | }
|
---|
198 |
|
---|
199 | /** Handle page fault within the current address space.
|
---|
200 | *
|
---|
201 | * This is the high-level page fault handler.
|
---|
202 | * Interrupts are assumed disabled.
|
---|
203 | *
|
---|
204 | * @param page Faulting page.
|
---|
205 | *
|
---|
206 | * @return 0 on page fault, 1 on success.
|
---|
207 | */
|
---|
208 | int as_page_fault(__address page)
|
---|
209 | {
|
---|
210 | pte_t *pte;
|
---|
211 | as_area_t *area;
|
---|
212 | __address frame;
|
---|
213 |
|
---|
214 | ASSERT(AS);
|
---|
215 |
|
---|
216 | spinlock_lock(&AS->lock);
|
---|
217 | area = find_area_and_lock(AS, page);
|
---|
218 | if (!area) {
|
---|
219 | /*
|
---|
220 | * No area contained mapping for 'page'.
|
---|
221 | * Signal page fault to low-level handler.
|
---|
222 | */
|
---|
223 | spinlock_unlock(&AS->lock);
|
---|
224 | return 0;
|
---|
225 | }
|
---|
226 |
|
---|
227 | page_table_lock(AS, false);
|
---|
228 |
|
---|
229 | /*
|
---|
230 | * To avoid race condition between two page faults
|
---|
231 | * on the same address, we need to make sure
|
---|
232 | * the mapping has not been already inserted.
|
---|
233 | */
|
---|
234 | if ((pte = page_mapping_find(AS, page))) {
|
---|
235 | if (PTE_PRESENT(pte)) {
|
---|
236 | page_table_unlock(AS, false);
|
---|
237 | spinlock_unlock(&area->lock);
|
---|
238 | spinlock_unlock(&AS->lock);
|
---|
239 | return 1;
|
---|
240 | }
|
---|
241 | }
|
---|
242 |
|
---|
243 | /*
|
---|
244 | * In general, there can be several reasons that
|
---|
245 | * can have caused this fault.
|
---|
246 | *
|
---|
247 | * - non-existent mapping: the area is a scratch
|
---|
248 | * area (e.g. stack) and so far has not been
|
---|
249 | * allocated a frame for the faulting page
|
---|
250 | *
|
---|
251 | * - non-present mapping: another possibility,
|
---|
252 | * currently not implemented, would be frame
|
---|
253 | * reuse; when this becomes a possibility,
|
---|
254 | * do not forget to distinguish between
|
---|
255 | * the different causes
|
---|
256 | */
|
---|
257 | frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
|
---|
258 | memsetb(PA2KA(frame), FRAME_SIZE, 0);
|
---|
259 |
|
---|
260 | /*
|
---|
261 | * Map 'page' to 'frame'.
|
---|
262 | * Note that TLB shootdown is not attempted as only new information is being
|
---|
263 | * inserted into page tables.
|
---|
264 | */
|
---|
265 | page_mapping_insert(AS, page, frame, get_area_flags(area));
|
---|
266 | page_table_unlock(AS, false);
|
---|
267 |
|
---|
268 | spinlock_unlock(&area->lock);
|
---|
269 | spinlock_unlock(&AS->lock);
|
---|
270 | return 1;
|
---|
271 | }
|
---|
272 |
|
---|
273 | /** Switch address spaces.
|
---|
274 | *
|
---|
275 | * @param old Old address space or NULL.
|
---|
276 | * @param new New address space.
|
---|
277 | */
|
---|
278 | void as_switch(as_t *old, as_t *new)
|
---|
279 | {
|
---|
280 | ipl_t ipl;
|
---|
281 | bool needs_asid = false;
|
---|
282 |
|
---|
283 | ipl = interrupts_disable();
|
---|
284 | spinlock_lock(&as_lock);
|
---|
285 |
|
---|
286 | /*
|
---|
287 | * First, take care of the old address space.
|
---|
288 | */
|
---|
289 | if (old) {
|
---|
290 | spinlock_lock(&old->lock);
|
---|
291 | ASSERT(old->refcount);
|
---|
292 | if((--old->refcount == 0) && (old != AS_KERNEL)) {
|
---|
293 | /*
|
---|
294 | * The old address space is no longer active on
|
---|
295 | * any processor. It can be appended to the
|
---|
296 | * list of inactive address spaces with assigned
|
---|
297 | * ASID.
|
---|
298 | */
|
---|
299 | ASSERT(old->asid != ASID_INVALID);
|
---|
300 | list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
|
---|
301 | }
|
---|
302 | spinlock_unlock(&old->lock);
|
---|
303 | }
|
---|
304 |
|
---|
305 | /*
|
---|
306 | * Second, prepare the new address space.
|
---|
307 | */
|
---|
308 | spinlock_lock(&new->lock);
|
---|
309 | if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
|
---|
310 | if (new->asid != ASID_INVALID)
|
---|
311 | list_remove(&new->inactive_as_with_asid_link);
|
---|
312 | else
|
---|
313 | needs_asid = true; /* defer call to asid_get() until new->lock is released */
|
---|
314 | }
|
---|
315 | SET_PTL0_ADDRESS(new->page_table);
|
---|
316 | spinlock_unlock(&new->lock);
|
---|
317 |
|
---|
318 | if (needs_asid) {
|
---|
319 | /*
|
---|
320 | * Allocation of new ASID was deferred
|
---|
321 | * until now in order to avoid deadlock.
|
---|
322 | */
|
---|
323 | asid_t asid;
|
---|
324 |
|
---|
325 | asid = asid_get();
|
---|
326 | spinlock_lock(&new->lock);
|
---|
327 | new->asid = asid;
|
---|
328 | spinlock_unlock(&new->lock);
|
---|
329 | }
|
---|
330 | spinlock_unlock(&as_lock);
|
---|
331 | interrupts_restore(ipl);
|
---|
332 |
|
---|
333 | /*
|
---|
334 | * Perform architecture-specific steps.
|
---|
335 | * (e.g. write ASID to hardware register etc.)
|
---|
336 | */
|
---|
337 | as_install_arch(new);
|
---|
338 |
|
---|
339 | AS = new;
|
---|
340 | }
|
---|
341 |
|
---|
342 | /** Compute flags for virtual address translation subsytem.
|
---|
343 | *
|
---|
344 | * The address space area must be locked.
|
---|
345 | * Interrupts must be disabled.
|
---|
346 | *
|
---|
347 | * @param a Address space area.
|
---|
348 | *
|
---|
349 | * @return Flags to be used in page_mapping_insert().
|
---|
350 | */
|
---|
351 | int get_area_flags(as_area_t *a)
|
---|
352 | {
|
---|
353 | int flags;
|
---|
354 |
|
---|
355 | flags = PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
|
---|
356 |
|
---|
357 | if (a->flags & AS_AREA_READ)
|
---|
358 | flags |= PAGE_READ;
|
---|
359 |
|
---|
360 | if (a->flags & AS_AREA_WRITE)
|
---|
361 | flags |= PAGE_WRITE;
|
---|
362 |
|
---|
363 | if (a->flags & AS_AREA_EXEC)
|
---|
364 | flags |= PAGE_EXEC;
|
---|
365 |
|
---|
366 | return flags;
|
---|
367 | }
|
---|
368 |
|
---|
369 | /** Create page table.
|
---|
370 | *
|
---|
371 | * Depending on architecture, create either address space
|
---|
372 | * private or global page table.
|
---|
373 | *
|
---|
374 | * @param flags Flags saying whether the page table is for kernel address space.
|
---|
375 | *
|
---|
376 | * @return First entry of the page table.
|
---|
377 | */
|
---|
378 | pte_t *page_table_create(int flags)
|
---|
379 | {
|
---|
380 | ASSERT(as_operations);
|
---|
381 | ASSERT(as_operations->page_table_create);
|
---|
382 |
|
---|
383 | return as_operations->page_table_create(flags);
|
---|
384 | }
|
---|
385 |
|
---|
386 | /** Lock page table.
|
---|
387 | *
|
---|
388 | * This function should be called before any page_mapping_insert(),
|
---|
389 | * page_mapping_remove() and page_mapping_find().
|
---|
390 | *
|
---|
391 | * Locking order is such that address space areas must be locked
|
---|
392 | * prior to this call. Address space can be locked prior to this
|
---|
393 | * call in which case the lock argument is false.
|
---|
394 | *
|
---|
395 | * @param as Address space.
|
---|
396 | * @param as_locked If false, do not attempt to lock as->lock.
|
---|
397 | */
|
---|
398 | void page_table_lock(as_t *as, bool lock)
|
---|
399 | {
|
---|
400 | ASSERT(as_operations);
|
---|
401 | ASSERT(as_operations->page_table_lock);
|
---|
402 |
|
---|
403 | as_operations->page_table_lock(as, lock);
|
---|
404 | }
|
---|
405 |
|
---|
406 | /** Unlock page table.
|
---|
407 | *
|
---|
408 | * @param as Address space.
|
---|
409 | * @param as_locked If false, do not attempt to unlock as->lock.
|
---|
410 | */
|
---|
411 | void page_table_unlock(as_t *as, bool unlock)
|
---|
412 | {
|
---|
413 | ASSERT(as_operations);
|
---|
414 | ASSERT(as_operations->page_table_unlock);
|
---|
415 |
|
---|
416 | as_operations->page_table_unlock(as, unlock);
|
---|
417 | }
|
---|
418 |
|
---|
419 | /** Find address space area and change it.
|
---|
420 | *
|
---|
421 | * @param as Address space.
|
---|
422 | * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
|
---|
423 | * @param size New size of the virtual memory block starting at address.
|
---|
424 | * @param flags Flags influencing the remap operation. Currently unused.
|
---|
425 | *
|
---|
426 | * @return address on success, (__address) -1 otherwise.
|
---|
427 | */
|
---|
428 | __address as_remap(as_t *as, __address address, size_t size, int flags)
|
---|
429 | {
|
---|
430 | as_area_t *area = NULL;
|
---|
431 | ipl_t ipl;
|
---|
432 | size_t pages;
|
---|
433 |
|
---|
434 | ipl = interrupts_disable();
|
---|
435 | spinlock_lock(&as->lock);
|
---|
436 |
|
---|
437 | /*
|
---|
438 | * Locate the area.
|
---|
439 | */
|
---|
440 | area = find_area_and_lock(as, address);
|
---|
441 | if (!area) {
|
---|
442 | spinlock_unlock(&as->lock);
|
---|
443 | interrupts_restore(ipl);
|
---|
444 | return (__address) -1;
|
---|
445 | }
|
---|
446 |
|
---|
447 | pages = SIZE2FRAMES((address - area->base) + size);
|
---|
448 | if (pages < area->pages) {
|
---|
449 | int i;
|
---|
450 |
|
---|
451 | /*
|
---|
452 | * Shrinking the area.
|
---|
453 | * No need to check for overlaps.
|
---|
454 | */
|
---|
455 | for (i = pages; i < area->pages; i++) {
|
---|
456 | pte_t *pte;
|
---|
457 |
|
---|
458 | /*
|
---|
459 | * Releasing physical memory.
|
---|
460 | * This depends on the fact that the memory was allocated using frame_alloc().
|
---|
461 | */
|
---|
462 | page_table_lock(as, false);
|
---|
463 | pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
|
---|
464 | if (pte && PTE_VALID(pte)) {
|
---|
465 | __address frame;
|
---|
466 |
|
---|
467 | ASSERT(PTE_PRESENT(pte));
|
---|
468 | frame = PTE_GET_FRAME(pte);
|
---|
469 | page_mapping_remove(as, area->base + i*PAGE_SIZE);
|
---|
470 | page_table_unlock(as, false);
|
---|
471 |
|
---|
472 | frame_free(ADDR2PFN(frame));
|
---|
473 | } else {
|
---|
474 | page_table_unlock(as, false);
|
---|
475 | }
|
---|
476 | }
|
---|
477 | /*
|
---|
478 | * Invalidate TLB's.
|
---|
479 | */
|
---|
480 | tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
|
---|
481 | tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
|
---|
482 | tlb_shootdown_finalize();
|
---|
483 | } else {
|
---|
484 | /*
|
---|
485 | * Growing the area.
|
---|
486 | * Check for overlaps with other address space areas.
|
---|
487 | */
|
---|
488 | if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
|
---|
489 | spinlock_unlock(&area->lock);
|
---|
490 | spinlock_unlock(&as->lock);
|
---|
491 | interrupts_restore(ipl);
|
---|
492 | return (__address) -1;
|
---|
493 | }
|
---|
494 | }
|
---|
495 |
|
---|
496 | area->pages = pages;
|
---|
497 |
|
---|
498 | spinlock_unlock(&area->lock);
|
---|
499 | spinlock_unlock(&as->lock);
|
---|
500 | interrupts_restore(ipl);
|
---|
501 |
|
---|
502 | return address;
|
---|
503 | }
|
---|
504 |
|
---|
505 | /** Find address space area and lock it.
|
---|
506 | *
|
---|
507 | * The address space must be locked and interrupts must be disabled.
|
---|
508 | *
|
---|
509 | * @param as Address space.
|
---|
510 | * @param va Virtual address.
|
---|
511 | *
|
---|
512 | * @return Locked address space area containing va on success or NULL on failure.
|
---|
513 | */
|
---|
514 | as_area_t *find_area_and_lock(as_t *as, __address va)
|
---|
515 | {
|
---|
516 | as_area_t *a;
|
---|
517 | btree_node_t *leaf, *lnode;
|
---|
518 | int i;
|
---|
519 |
|
---|
520 | a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
|
---|
521 | if (a) {
|
---|
522 | /* va is the base address of an address space area */
|
---|
523 | spinlock_lock(&a->lock);
|
---|
524 | return a;
|
---|
525 | }
|
---|
526 |
|
---|
527 | /*
|
---|
528 | * Search the leaf node and the righmost record of its left sibling
|
---|
529 | * to find out whether this is a miss or va belongs to an address
|
---|
530 | * space area found there.
|
---|
531 | */
|
---|
532 |
|
---|
533 | /* First, search the leaf node itself. */
|
---|
534 | for (i = 0; i < leaf->keys; i++) {
|
---|
535 | a = (as_area_t *) leaf->value[i];
|
---|
536 | spinlock_lock(&a->lock);
|
---|
537 | if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
|
---|
538 | return a;
|
---|
539 | }
|
---|
540 | spinlock_unlock(&a->lock);
|
---|
541 | }
|
---|
542 |
|
---|
543 | /*
|
---|
544 | * Second, locate the left sibling and test its last record.
|
---|
545 | * Because of its position in the B+tree, it must have base < va.
|
---|
546 | */
|
---|
547 | if ((lnode = btree_node_left_sibling(&as->as_area_btree, leaf))) {
|
---|
548 | a = (as_area_t *) lnode->value[lnode->keys - 1];
|
---|
549 | spinlock_lock(&a->lock);
|
---|
550 | if (va < a->base + a->pages * PAGE_SIZE) {
|
---|
551 | return a;
|
---|
552 | }
|
---|
553 | spinlock_unlock(&a->lock);
|
---|
554 | }
|
---|
555 |
|
---|
556 | return NULL;
|
---|
557 | }
|
---|
558 |
|
---|
559 | /** Check area conflicts with other areas.
|
---|
560 | *
|
---|
561 | * The address space must be locked and interrupts must be disabled.
|
---|
562 | *
|
---|
563 | * @param as Address space.
|
---|
564 | * @param va Starting virtual address of the area being tested.
|
---|
565 | * @param size Size of the area being tested.
|
---|
566 | * @param avoid_area Do not touch this area.
|
---|
567 | *
|
---|
568 | * @return True if there is no conflict, false otherwise.
|
---|
569 | */
|
---|
570 | bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
|
---|
571 | {
|
---|
572 | as_area_t *a;
|
---|
573 | btree_node_t *leaf, *node;
|
---|
574 | int i;
|
---|
575 |
|
---|
576 | /*
|
---|
577 | * We don't want any area to have conflicts with NULL page.
|
---|
578 | */
|
---|
579 | if (overlaps(va, size, NULL, PAGE_SIZE))
|
---|
580 | return false;
|
---|
581 |
|
---|
582 | /*
|
---|
583 | * The leaf node is found in O(log n), where n is proportional to
|
---|
584 | * the number of address space areas belonging to as.
|
---|
585 | * The check for conflicts is then attempted on the rightmost
|
---|
586 | * record in the left sibling, the leftmost record in the right
|
---|
587 | * sibling and all records in the leaf node itself.
|
---|
588 | */
|
---|
589 |
|
---|
590 | if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
|
---|
591 | if (a != avoid_area)
|
---|
592 | return false;
|
---|
593 | }
|
---|
594 |
|
---|
595 | /* First, check the two border cases. */
|
---|
596 | if ((node = btree_node_left_sibling(&as->as_area_btree, leaf))) {
|
---|
597 | a = (as_area_t *) node->value[node->keys - 1];
|
---|
598 | spinlock_lock(&a->lock);
|
---|
599 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
|
---|
600 | spinlock_unlock(&a->lock);
|
---|
601 | return false;
|
---|
602 | }
|
---|
603 | spinlock_unlock(&a->lock);
|
---|
604 | }
|
---|
605 | if ((node = btree_node_right_sibling(&as->as_area_btree, leaf))) {
|
---|
606 | a = (as_area_t *) node->value[0];
|
---|
607 | spinlock_lock(&a->lock);
|
---|
608 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
|
---|
609 | spinlock_unlock(&a->lock);
|
---|
610 | return false;
|
---|
611 | }
|
---|
612 | spinlock_unlock(&a->lock);
|
---|
613 | }
|
---|
614 |
|
---|
615 | /* Second, check the leaf node. */
|
---|
616 | for (i = 0; i < leaf->keys; i++) {
|
---|
617 | a = (as_area_t *) leaf->value[i];
|
---|
618 |
|
---|
619 | if (a == avoid_area)
|
---|
620 | continue;
|
---|
621 |
|
---|
622 | spinlock_lock(&a->lock);
|
---|
623 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
|
---|
624 | spinlock_unlock(&a->lock);
|
---|
625 | return false;
|
---|
626 | }
|
---|
627 | spinlock_unlock(&a->lock);
|
---|
628 | }
|
---|
629 |
|
---|
630 | /*
|
---|
631 | * So far, the area does not conflict with other areas.
|
---|
632 | * Check if it doesn't conflict with kernel address space.
|
---|
633 | */
|
---|
634 | if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
|
---|
635 | return !overlaps(va, size,
|
---|
636 | KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
|
---|
637 | }
|
---|
638 |
|
---|
639 | return true;
|
---|
640 | }
|
---|