source: mainline/generic/src/mm/as.c@ 4e49572

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 4e49572 was 5a7d9d1, checked in by Jakub Jermar <jakub@…>, 20 years ago

More checks for address space area conflicts.

  • Property mode set to 100644
File size: 14.1 KB
RevLine 
[20d50a1]1/*
2 * Copyright (C) 2001-2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * This file contains address space manipulation functions.
31 * Roughly speaking, this is a higher-level client of
32 * Virtual Address Translation (VAT) subsystem.
33 */
34
35#include <mm/as.h>
[ef67bab]36#include <arch/mm/as.h>
[20d50a1]37#include <mm/page.h>
38#include <mm/frame.h>
[085d973]39#include <mm/slab.h>
[20d50a1]40#include <mm/tlb.h>
41#include <arch/mm/page.h>
42#include <genarch/mm/page_pt.h>
[4512d7e]43#include <mm/asid.h>
[20d50a1]44#include <arch/mm/asid.h>
45#include <arch/types.h>
46#include <typedefs.h>
47#include <synch/spinlock.h>
48#include <config.h>
[5c9a08b]49#include <adt/list.h>
[20d50a1]50#include <panic.h>
51#include <arch/asm.h>
52#include <debug.h>
53#include <memstr.h>
[5a7d9d1]54#include <macros.h>
[20d50a1]55#include <arch.h>
56#include <print.h>
57
[ef67bab]58as_operations_t *as_operations = NULL;
[20d50a1]59
[7e4e532]60/** Address space lock. It protects inactive_as_with_asid_head. */
61SPINLOCK_INITIALIZE(as_lock);
62
63/**
64 * This list contains address spaces that are not active on any
65 * processor and that have valid ASID.
66 */
67LIST_INITIALIZE(inactive_as_with_asid_head);
68
[071a8ae6]69/** Kernel address space. */
70as_t *AS_KERNEL = NULL;
71
[6a3c9a7]72static int get_area_flags(as_area_t *a);
[d3e7ff4]73static as_area_t *find_area_and_lock(as_t *as, __address va);
[37e7d2b9]74static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
[20d50a1]75
[ef67bab]76/** Initialize address space subsystem. */
77void as_init(void)
78{
79 as_arch_init();
[8e1ea655]80 AS_KERNEL = as_create(FLAG_AS_KERNEL);
[ef67bab]81 if (!AS_KERNEL)
82 panic("can't create kernel address space\n");
83}
84
[071a8ae6]85/** Create address space.
86 *
87 * @param flags Flags that influence way in wich the address space is created.
88 */
[ef67bab]89as_t *as_create(int flags)
[20d50a1]90{
91 as_t *as;
92
[bb68433]93 as = (as_t *) malloc(sizeof(as_t), 0);
[7e4e532]94 link_initialize(&as->inactive_as_with_asid_link);
[bb68433]95 spinlock_initialize(&as->lock, "as_lock");
96 list_initialize(&as->as_area_head);
97
98 if (flags & FLAG_AS_KERNEL)
99 as->asid = ASID_KERNEL;
100 else
101 as->asid = ASID_INVALID;
102
[7e4e532]103 as->refcount = 0;
[bb68433]104 as->page_table = page_table_create(flags);
[20d50a1]105
106 return as;
107}
108
[5be1923]109/** Free Adress space */
110void as_free(as_t *as)
111{
112 ASSERT(as->refcount == 0);
113
114 /* TODO: free as_areas and other resources held by as */
115 /* TODO: free page table */
116 free(as);
117}
118
[20d50a1]119/** Create address space area of common attributes.
120 *
121 * The created address space area is added to the target address space.
122 *
123 * @param as Target address space.
[c23502d]124 * @param flags Flags of the area.
[37e7d2b9]125 * @param size Size of area.
[20d50a1]126 * @param base Base address of area.
127 *
128 * @return Address space area on success or NULL on failure.
129 */
[c23502d]130as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base)
[20d50a1]131{
132 ipl_t ipl;
133 as_area_t *a;
134
135 if (base % PAGE_SIZE)
[37e7d2b9]136 return NULL;
137
138 /* Writeable executable areas are not supported. */
139 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
140 return NULL;
[20d50a1]141
142 ipl = interrupts_disable();
143 spinlock_lock(&as->lock);
144
[37e7d2b9]145 if (!check_area_conflicts(as, base, size, NULL)) {
146 spinlock_unlock(&as->lock);
147 interrupts_restore(ipl);
148 return NULL;
149 }
[20d50a1]150
[bb68433]151 a = (as_area_t *) malloc(sizeof(as_area_t), 0);
152
153 spinlock_initialize(&a->lock, "as_area_lock");
154
155 link_initialize(&a->link);
[c23502d]156 a->flags = flags;
[37e7d2b9]157 a->pages = SIZE2FRAMES(size);
[bb68433]158 a->base = base;
159
160 list_append(&a->link, &as->as_area_head);
[20d50a1]161
162 spinlock_unlock(&as->lock);
163 interrupts_restore(ipl);
[f9425006]164
[20d50a1]165 return a;
166}
167
[6a3c9a7]168/** Initialize mapping for one page of address space.
[20d50a1]169 *
[6a3c9a7]170 * This functions maps 'page' to 'frame' according
171 * to attributes of the address space area to
172 * wich 'page' belongs.
[20d50a1]173 *
[23230aa]174 * @param as Target address space.
[6a3c9a7]175 * @param page Virtual page within the area.
176 * @param frame Physical frame to which page will be mapped.
[20d50a1]177 */
[6a3c9a7]178void as_set_mapping(as_t *as, __address page, __address frame)
[20d50a1]179{
[d3e7ff4]180 as_area_t *area;
[20d50a1]181 ipl_t ipl;
182
183 ipl = interrupts_disable();
[2299914]184 page_table_lock(as, true);
[6a3c9a7]185
[d3e7ff4]186 area = find_area_and_lock(as, page);
[6a3c9a7]187 if (!area) {
188 panic("page not part of any as_area\n");
189 }
190
[ef67bab]191 page_mapping_insert(as, page, frame, get_area_flags(area));
[20d50a1]192
[6a3c9a7]193 spinlock_unlock(&area->lock);
[2299914]194 page_table_unlock(as, true);
[20d50a1]195 interrupts_restore(ipl);
196}
197
198/** Handle page fault within the current address space.
199 *
200 * This is the high-level page fault handler.
201 * Interrupts are assumed disabled.
202 *
203 * @param page Faulting page.
204 *
[f9425006]205 * @return 0 on page fault, 1 on success.
[20d50a1]206 */
207int as_page_fault(__address page)
208{
[2299914]209 pte_t *pte;
[d3e7ff4]210 as_area_t *area;
[20d50a1]211 __address frame;
212
213 ASSERT(AS);
[2299914]214
[20d50a1]215 spinlock_lock(&AS->lock);
[d3e7ff4]216 area = find_area_and_lock(AS, page);
[20d50a1]217 if (!area) {
218 /*
219 * No area contained mapping for 'page'.
220 * Signal page fault to low-level handler.
221 */
222 spinlock_unlock(&AS->lock);
223 return 0;
224 }
225
[2299914]226 page_table_lock(AS, false);
227
228 /*
229 * To avoid race condition between two page faults
230 * on the same address, we need to make sure
231 * the mapping has not been already inserted.
232 */
233 if ((pte = page_mapping_find(AS, page))) {
234 if (PTE_PRESENT(pte)) {
235 page_table_unlock(AS, false);
236 spinlock_unlock(&area->lock);
237 spinlock_unlock(&AS->lock);
238 return 1;
239 }
240 }
241
[20d50a1]242 /*
[6a3c9a7]243 * In general, there can be several reasons that
244 * can have caused this fault.
245 *
246 * - non-existent mapping: the area is a scratch
247 * area (e.g. stack) and so far has not been
248 * allocated a frame for the faulting page
249 *
250 * - non-present mapping: another possibility,
251 * currently not implemented, would be frame
252 * reuse; when this becomes a possibility,
253 * do not forget to distinguish between
254 * the different causes
[20d50a1]255 */
[085d973]256 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
[6a3c9a7]257 memsetb(PA2KA(frame), FRAME_SIZE, 0);
[20d50a1]258
259 /*
260 * Map 'page' to 'frame'.
261 * Note that TLB shootdown is not attempted as only new information is being
262 * inserted into page tables.
263 */
[ef67bab]264 page_mapping_insert(AS, page, frame, get_area_flags(area));
[2299914]265 page_table_unlock(AS, false);
[20d50a1]266
267 spinlock_unlock(&area->lock);
268 spinlock_unlock(&AS->lock);
269 return 1;
270}
271
[7e4e532]272/** Switch address spaces.
[20d50a1]273 *
[7e4e532]274 * @param old Old address space or NULL.
275 * @param new New address space.
[20d50a1]276 */
[7e4e532]277void as_switch(as_t *old, as_t *new)
[20d50a1]278{
279 ipl_t ipl;
[7e4e532]280 bool needs_asid = false;
[4512d7e]281
[20d50a1]282 ipl = interrupts_disable();
[7e4e532]283 spinlock_lock(&as_lock);
284
285 /*
286 * First, take care of the old address space.
287 */
288 if (old) {
289 spinlock_lock(&old->lock);
290 ASSERT(old->refcount);
291 if((--old->refcount == 0) && (old != AS_KERNEL)) {
292 /*
293 * The old address space is no longer active on
294 * any processor. It can be appended to the
295 * list of inactive address spaces with assigned
296 * ASID.
297 */
298 ASSERT(old->asid != ASID_INVALID);
299 list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
300 }
301 spinlock_unlock(&old->lock);
302 }
303
304 /*
305 * Second, prepare the new address space.
306 */
307 spinlock_lock(&new->lock);
308 if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
309 if (new->asid != ASID_INVALID)
310 list_remove(&new->inactive_as_with_asid_link);
311 else
312 needs_asid = true; /* defer call to asid_get() until new->lock is released */
313 }
314 SET_PTL0_ADDRESS(new->page_table);
315 spinlock_unlock(&new->lock);
[20d50a1]316
[7e4e532]317 if (needs_asid) {
318 /*
319 * Allocation of new ASID was deferred
320 * until now in order to avoid deadlock.
321 */
322 asid_t asid;
323
324 asid = asid_get();
325 spinlock_lock(&new->lock);
326 new->asid = asid;
327 spinlock_unlock(&new->lock);
328 }
329 spinlock_unlock(&as_lock);
330 interrupts_restore(ipl);
331
[20d50a1]332 /*
333 * Perform architecture-specific steps.
[4512d7e]334 * (e.g. write ASID to hardware register etc.)
[20d50a1]335 */
[7e4e532]336 as_install_arch(new);
[20d50a1]337
[7e4e532]338 AS = new;
[20d50a1]339}
[6a3c9a7]340
341/** Compute flags for virtual address translation subsytem.
342 *
343 * The address space area must be locked.
344 * Interrupts must be disabled.
345 *
346 * @param a Address space area.
347 *
348 * @return Flags to be used in page_mapping_insert().
349 */
350int get_area_flags(as_area_t *a)
351{
352 int flags;
353
[c23502d]354 flags = PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
355
356 if (a->flags & AS_AREA_READ)
357 flags |= PAGE_READ;
358
359 if (a->flags & AS_AREA_WRITE)
360 flags |= PAGE_WRITE;
361
362 if (a->flags & AS_AREA_EXEC)
363 flags |= PAGE_EXEC;
[6a3c9a7]364
365 return flags;
366}
[ef67bab]367
368/** Create page table.
369 *
370 * Depending on architecture, create either address space
371 * private or global page table.
372 *
373 * @param flags Flags saying whether the page table is for kernel address space.
374 *
375 * @return First entry of the page table.
376 */
377pte_t *page_table_create(int flags)
378{
379 ASSERT(as_operations);
380 ASSERT(as_operations->page_table_create);
381
382 return as_operations->page_table_create(flags);
383}
[d3e7ff4]384
[2299914]385/** Lock page table.
386 *
387 * This function should be called before any page_mapping_insert(),
388 * page_mapping_remove() and page_mapping_find().
389 *
390 * Locking order is such that address space areas must be locked
391 * prior to this call. Address space can be locked prior to this
392 * call in which case the lock argument is false.
393 *
394 * @param as Address space.
395 * @param as_locked If false, do not attempt to lock as->lock.
396 */
397void page_table_lock(as_t *as, bool lock)
398{
399 ASSERT(as_operations);
400 ASSERT(as_operations->page_table_lock);
401
402 as_operations->page_table_lock(as, lock);
403}
404
405/** Unlock page table.
406 *
407 * @param as Address space.
408 * @param as_locked If false, do not attempt to unlock as->lock.
409 */
410void page_table_unlock(as_t *as, bool unlock)
411{
412 ASSERT(as_operations);
413 ASSERT(as_operations->page_table_unlock);
414
415 as_operations->page_table_unlock(as, unlock);
416}
417
[d3e7ff4]418/** Find address space area and change it.
419 *
420 * @param as Address space.
421 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
422 * @param size New size of the virtual memory block starting at address.
423 * @param flags Flags influencing the remap operation. Currently unused.
424 *
425 * @return address on success, (__address) -1 otherwise.
426 */
427__address as_remap(as_t *as, __address address, size_t size, int flags)
428{
429 as_area_t *area = NULL;
430 ipl_t ipl;
431 size_t pages;
432
433 ipl = interrupts_disable();
434 spinlock_lock(&as->lock);
435
436 /*
437 * Locate the area.
438 */
439 area = find_area_and_lock(as, address);
440 if (!area) {
441 spinlock_unlock(&as->lock);
[37e7d2b9]442 interrupts_restore(ipl);
[d3e7ff4]443 return (__address) -1;
444 }
445
446 pages = SIZE2FRAMES((address - area->base) + size);
[37e7d2b9]447 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
448 spinlock_unlock(&as->lock);
449 interrupts_restore(ipl);
450 return (__address) -1;
451 }
452
453 if (pages < area->pages) {
[d3e7ff4]454 int i;
455
456 /*
457 * Shrinking the area.
458 */
[37e7d2b9]459 for (i = pages; i < area->pages; i++) {
[d3e7ff4]460 pte_t *pte;
461
462 /*
463 * Releasing physical memory.
464 * This depends on the fact that the memory was allocated using frame_alloc().
[2299914]465 */
466 page_table_lock(as, false);
[d3e7ff4]467 pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
[d9e11ff2]468 if (pte && PTE_VALID(pte)) {
[2299914]469 __address frame;
470
[d3e7ff4]471 ASSERT(PTE_PRESENT(pte));
[2299914]472 frame = PTE_GET_FRAME(pte);
[2d01bbd]473 page_mapping_remove(as, area->base + i*PAGE_SIZE);
[2299914]474 page_table_unlock(as, false);
475
476 frame_free(ADDR2PFN(frame));
477 } else {
478 page_table_unlock(as, false);
[d3e7ff4]479 }
480 }
481 /*
482 * Invalidate TLB's.
483 */
[37e7d2b9]484 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
485 tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
[d3e7ff4]486 tlb_shootdown_finalize();
[2d01bbd]487 }
488
[37e7d2b9]489 area->pages = pages;
[d3e7ff4]490
491 spinlock_unlock(&area->lock);
492 spinlock_unlock(&as->lock);
493 interrupts_restore(ipl);
494
495 return address;
496}
497
498/** Find address space area and lock it.
499 *
500 * The address space must be locked and interrupts must be disabled.
501 *
502 * @param as Address space.
503 * @param va Virtual address.
504 *
505 * @return Locked address space area containing va on success or NULL on failure.
506 */
507as_area_t *find_area_and_lock(as_t *as, __address va)
508{
509 link_t *cur;
510 as_area_t *a;
511
512 for (cur = as->as_area_head.next; cur != &as->as_area_head; cur = cur->next) {
513 a = list_get_instance(cur, as_area_t, link);
514 spinlock_lock(&a->lock);
515
[37e7d2b9]516 if ((va >= a->base) && (va < a->base + a->pages * PAGE_SIZE))
517 return a;
[d3e7ff4]518
519 spinlock_unlock(&a->lock);
520 }
521
522 return NULL;
523}
[37e7d2b9]524
525/** Check area conflicts with other areas.
526 *
527 * The address space must be locked and interrupts must be disabled.
528 *
529 * @param as Address space.
530 * @param va Starting virtual address of the area being tested.
531 * @param size Size of the area being tested.
532 * @param avoid_area Do not touch this area.
533 *
534 * @return True if there is no conflict, false otherwise.
535 */
536bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
537{
538 link_t *cur;
539 as_area_t *a;
540
[5a7d9d1]541 /*
542 * We don't want any area to have conflicts with NULL page.
543 */
544 if (overlaps(va, size, NULL, PAGE_SIZE))
545 return false;
546
[37e7d2b9]547 for (cur = as->as_area_head.next; cur != &as->as_area_head; cur = cur->next) {
[5a7d9d1]548 __address a_start;
549 size_t a_size;
[37e7d2b9]550
551 a = list_get_instance(cur, as_area_t, link);
552 if (a == avoid_area)
553 continue;
554
555 spinlock_lock(&a->lock);
556
[5a7d9d1]557 a_start = a->base;
558 a_size = a->pages * PAGE_SIZE;
[37e7d2b9]559
560 spinlock_unlock(&a->lock);
561
[5a7d9d1]562 if (overlaps(va, size, a_start, a_size))
563 return false;
564
565 }
[37e7d2b9]566
[5a7d9d1]567 /*
568 * So far, the area does not conflict with other areas.
569 * Check if it doesn't conflict with kernel address space.
570 */
571 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
572 return !overlaps(va, size,
573 KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
[37e7d2b9]574 }
575
576 return true;
577}
Note: See TracBrowser for help on using the repository browser.