source: mainline/generic/src/mm/as.c@ 37e7d2b9

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 37e7d2b9 was 37e7d2b9, checked in by Jakub Jermar <jakub@…>, 20 years ago

Restore interrupts when failing in as_remap().
Add check_area_conflicts() that checks whether address area overlaps with other areas.
Refuse to create writeable executable address space areas.
Rename as_area_t::size to as_area_t::pages.

  • Property mode set to 100644
File size: 14.0 KB
Line 
1/*
2 * Copyright (C) 2001-2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * This file contains address space manipulation functions.
31 * Roughly speaking, this is a higher-level client of
32 * Virtual Address Translation (VAT) subsystem.
33 */
34
35#include <mm/as.h>
36#include <arch/mm/as.h>
37#include <mm/page.h>
38#include <mm/frame.h>
39#include <mm/slab.h>
40#include <mm/tlb.h>
41#include <arch/mm/page.h>
42#include <genarch/mm/page_pt.h>
43#include <mm/asid.h>
44#include <arch/mm/asid.h>
45#include <arch/types.h>
46#include <typedefs.h>
47#include <synch/spinlock.h>
48#include <config.h>
49#include <adt/list.h>
50#include <panic.h>
51#include <arch/asm.h>
52#include <debug.h>
53#include <memstr.h>
54#include <arch.h>
55#include <print.h>
56
57as_operations_t *as_operations = NULL;
58
59/** Address space lock. It protects inactive_as_with_asid_head. */
60SPINLOCK_INITIALIZE(as_lock);
61
62/**
63 * This list contains address spaces that are not active on any
64 * processor and that have valid ASID.
65 */
66LIST_INITIALIZE(inactive_as_with_asid_head);
67
68/** Kernel address space. */
69as_t *AS_KERNEL = NULL;
70
71static int get_area_flags(as_area_t *a);
72static as_area_t *find_area_and_lock(as_t *as, __address va);
73static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
74
75/** Initialize address space subsystem. */
76void as_init(void)
77{
78 as_arch_init();
79 AS_KERNEL = as_create(FLAG_AS_KERNEL);
80 if (!AS_KERNEL)
81 panic("can't create kernel address space\n");
82}
83
84/** Create address space.
85 *
86 * @param flags Flags that influence way in wich the address space is created.
87 */
88as_t *as_create(int flags)
89{
90 as_t *as;
91
92 as = (as_t *) malloc(sizeof(as_t), 0);
93 link_initialize(&as->inactive_as_with_asid_link);
94 spinlock_initialize(&as->lock, "as_lock");
95 list_initialize(&as->as_area_head);
96
97 if (flags & FLAG_AS_KERNEL)
98 as->asid = ASID_KERNEL;
99 else
100 as->asid = ASID_INVALID;
101
102 as->refcount = 0;
103 as->page_table = page_table_create(flags);
104
105 return as;
106}
107
108/** Free Adress space */
109void as_free(as_t *as)
110{
111 ASSERT(as->refcount == 0);
112
113 /* TODO: free as_areas and other resources held by as */
114 /* TODO: free page table */
115 free(as);
116}
117
118/** Create address space area of common attributes.
119 *
120 * The created address space area is added to the target address space.
121 *
122 * @param as Target address space.
123 * @param flags Flags of the area.
124 * @param size Size of area.
125 * @param base Base address of area.
126 *
127 * @return Address space area on success or NULL on failure.
128 */
129as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base)
130{
131 ipl_t ipl;
132 as_area_t *a;
133
134 if (base % PAGE_SIZE)
135 return NULL;
136
137 /* Writeable executable areas are not supported. */
138 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
139 return NULL;
140
141 ipl = interrupts_disable();
142 spinlock_lock(&as->lock);
143
144 if (!check_area_conflicts(as, base, size, NULL)) {
145 spinlock_unlock(&as->lock);
146 interrupts_restore(ipl);
147 return NULL;
148 }
149
150 a = (as_area_t *) malloc(sizeof(as_area_t), 0);
151
152 spinlock_initialize(&a->lock, "as_area_lock");
153
154 link_initialize(&a->link);
155 a->flags = flags;
156 a->pages = SIZE2FRAMES(size);
157 a->base = base;
158
159 list_append(&a->link, &as->as_area_head);
160
161 spinlock_unlock(&as->lock);
162 interrupts_restore(ipl);
163
164 return a;
165}
166
167/** Initialize mapping for one page of address space.
168 *
169 * This functions maps 'page' to 'frame' according
170 * to attributes of the address space area to
171 * wich 'page' belongs.
172 *
173 * @param as Target address space.
174 * @param page Virtual page within the area.
175 * @param frame Physical frame to which page will be mapped.
176 */
177void as_set_mapping(as_t *as, __address page, __address frame)
178{
179 as_area_t *area;
180 ipl_t ipl;
181
182 ipl = interrupts_disable();
183 page_table_lock(as, true);
184
185 area = find_area_and_lock(as, page);
186 if (!area) {
187 panic("page not part of any as_area\n");
188 }
189
190 page_mapping_insert(as, page, frame, get_area_flags(area));
191
192 spinlock_unlock(&area->lock);
193 page_table_unlock(as, true);
194 interrupts_restore(ipl);
195}
196
197/** Handle page fault within the current address space.
198 *
199 * This is the high-level page fault handler.
200 * Interrupts are assumed disabled.
201 *
202 * @param page Faulting page.
203 *
204 * @return 0 on page fault, 1 on success.
205 */
206int as_page_fault(__address page)
207{
208 pte_t *pte;
209 as_area_t *area;
210 __address frame;
211
212 ASSERT(AS);
213
214 spinlock_lock(&AS->lock);
215 area = find_area_and_lock(AS, page);
216 if (!area) {
217 /*
218 * No area contained mapping for 'page'.
219 * Signal page fault to low-level handler.
220 */
221 spinlock_unlock(&AS->lock);
222 return 0;
223 }
224
225 page_table_lock(AS, false);
226
227 /*
228 * To avoid race condition between two page faults
229 * on the same address, we need to make sure
230 * the mapping has not been already inserted.
231 */
232 if ((pte = page_mapping_find(AS, page))) {
233 if (PTE_PRESENT(pte)) {
234 page_table_unlock(AS, false);
235 spinlock_unlock(&area->lock);
236 spinlock_unlock(&AS->lock);
237 return 1;
238 }
239 }
240
241 /*
242 * In general, there can be several reasons that
243 * can have caused this fault.
244 *
245 * - non-existent mapping: the area is a scratch
246 * area (e.g. stack) and so far has not been
247 * allocated a frame for the faulting page
248 *
249 * - non-present mapping: another possibility,
250 * currently not implemented, would be frame
251 * reuse; when this becomes a possibility,
252 * do not forget to distinguish between
253 * the different causes
254 */
255 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
256 memsetb(PA2KA(frame), FRAME_SIZE, 0);
257
258 /*
259 * Map 'page' to 'frame'.
260 * Note that TLB shootdown is not attempted as only new information is being
261 * inserted into page tables.
262 */
263 page_mapping_insert(AS, page, frame, get_area_flags(area));
264 page_table_unlock(AS, false);
265
266 spinlock_unlock(&area->lock);
267 spinlock_unlock(&AS->lock);
268 return 1;
269}
270
271/** Switch address spaces.
272 *
273 * @param old Old address space or NULL.
274 * @param new New address space.
275 */
276void as_switch(as_t *old, as_t *new)
277{
278 ipl_t ipl;
279 bool needs_asid = false;
280
281 ipl = interrupts_disable();
282 spinlock_lock(&as_lock);
283
284 /*
285 * First, take care of the old address space.
286 */
287 if (old) {
288 spinlock_lock(&old->lock);
289 ASSERT(old->refcount);
290 if((--old->refcount == 0) && (old != AS_KERNEL)) {
291 /*
292 * The old address space is no longer active on
293 * any processor. It can be appended to the
294 * list of inactive address spaces with assigned
295 * ASID.
296 */
297 ASSERT(old->asid != ASID_INVALID);
298 list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
299 }
300 spinlock_unlock(&old->lock);
301 }
302
303 /*
304 * Second, prepare the new address space.
305 */
306 spinlock_lock(&new->lock);
307 if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
308 if (new->asid != ASID_INVALID)
309 list_remove(&new->inactive_as_with_asid_link);
310 else
311 needs_asid = true; /* defer call to asid_get() until new->lock is released */
312 }
313 SET_PTL0_ADDRESS(new->page_table);
314 spinlock_unlock(&new->lock);
315
316 if (needs_asid) {
317 /*
318 * Allocation of new ASID was deferred
319 * until now in order to avoid deadlock.
320 */
321 asid_t asid;
322
323 asid = asid_get();
324 spinlock_lock(&new->lock);
325 new->asid = asid;
326 spinlock_unlock(&new->lock);
327 }
328 spinlock_unlock(&as_lock);
329 interrupts_restore(ipl);
330
331 /*
332 * Perform architecture-specific steps.
333 * (e.g. write ASID to hardware register etc.)
334 */
335 as_install_arch(new);
336
337 AS = new;
338}
339
340/** Compute flags for virtual address translation subsytem.
341 *
342 * The address space area must be locked.
343 * Interrupts must be disabled.
344 *
345 * @param a Address space area.
346 *
347 * @return Flags to be used in page_mapping_insert().
348 */
349int get_area_flags(as_area_t *a)
350{
351 int flags;
352
353 flags = PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
354
355 if (a->flags & AS_AREA_READ)
356 flags |= PAGE_READ;
357
358 if (a->flags & AS_AREA_WRITE)
359 flags |= PAGE_WRITE;
360
361 if (a->flags & AS_AREA_EXEC)
362 flags |= PAGE_EXEC;
363
364 return flags;
365}
366
367/** Create page table.
368 *
369 * Depending on architecture, create either address space
370 * private or global page table.
371 *
372 * @param flags Flags saying whether the page table is for kernel address space.
373 *
374 * @return First entry of the page table.
375 */
376pte_t *page_table_create(int flags)
377{
378 ASSERT(as_operations);
379 ASSERT(as_operations->page_table_create);
380
381 return as_operations->page_table_create(flags);
382}
383
384/** Lock page table.
385 *
386 * This function should be called before any page_mapping_insert(),
387 * page_mapping_remove() and page_mapping_find().
388 *
389 * Locking order is such that address space areas must be locked
390 * prior to this call. Address space can be locked prior to this
391 * call in which case the lock argument is false.
392 *
393 * @param as Address space.
394 * @param as_locked If false, do not attempt to lock as->lock.
395 */
396void page_table_lock(as_t *as, bool lock)
397{
398 ASSERT(as_operations);
399 ASSERT(as_operations->page_table_lock);
400
401 as_operations->page_table_lock(as, lock);
402}
403
404/** Unlock page table.
405 *
406 * @param as Address space.
407 * @param as_locked If false, do not attempt to unlock as->lock.
408 */
409void page_table_unlock(as_t *as, bool unlock)
410{
411 ASSERT(as_operations);
412 ASSERT(as_operations->page_table_unlock);
413
414 as_operations->page_table_unlock(as, unlock);
415}
416
417/** Find address space area and change it.
418 *
419 * @param as Address space.
420 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
421 * @param size New size of the virtual memory block starting at address.
422 * @param flags Flags influencing the remap operation. Currently unused.
423 *
424 * @return address on success, (__address) -1 otherwise.
425 */
426__address as_remap(as_t *as, __address address, size_t size, int flags)
427{
428 as_area_t *area = NULL;
429 ipl_t ipl;
430 size_t pages;
431
432 ipl = interrupts_disable();
433 spinlock_lock(&as->lock);
434
435 /*
436 * Locate the area.
437 */
438 area = find_area_and_lock(as, address);
439 if (!area) {
440 spinlock_unlock(&as->lock);
441 interrupts_restore(ipl);
442 return (__address) -1;
443 }
444
445 pages = SIZE2FRAMES((address - area->base) + size);
446 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
447 spinlock_unlock(&as->lock);
448 interrupts_restore(ipl);
449 return (__address) -1;
450 }
451
452 if (pages < area->pages) {
453 int i;
454
455 /*
456 * Shrinking the area.
457 */
458 for (i = pages; i < area->pages; i++) {
459 pte_t *pte;
460
461 /*
462 * Releasing physical memory.
463 * This depends on the fact that the memory was allocated using frame_alloc().
464 */
465 page_table_lock(as, false);
466 pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
467 if (pte && PTE_VALID(pte)) {
468 __address frame;
469
470 ASSERT(PTE_PRESENT(pte));
471 frame = PTE_GET_FRAME(pte);
472 page_mapping_remove(as, area->base + i*PAGE_SIZE);
473 page_table_unlock(as, false);
474
475 frame_free(ADDR2PFN(frame));
476 } else {
477 page_table_unlock(as, false);
478 }
479 }
480 /*
481 * Invalidate TLB's.
482 */
483 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
484 tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
485 tlb_shootdown_finalize();
486 }
487
488 area->pages = pages;
489
490 spinlock_unlock(&area->lock);
491 spinlock_unlock(&as->lock);
492 interrupts_restore(ipl);
493
494 return address;
495}
496
497/** Find address space area and lock it.
498 *
499 * The address space must be locked and interrupts must be disabled.
500 *
501 * @param as Address space.
502 * @param va Virtual address.
503 *
504 * @return Locked address space area containing va on success or NULL on failure.
505 */
506as_area_t *find_area_and_lock(as_t *as, __address va)
507{
508 link_t *cur;
509 as_area_t *a;
510
511 for (cur = as->as_area_head.next; cur != &as->as_area_head; cur = cur->next) {
512 a = list_get_instance(cur, as_area_t, link);
513 spinlock_lock(&a->lock);
514
515 if ((va >= a->base) && (va < a->base + a->pages * PAGE_SIZE))
516 return a;
517
518 spinlock_unlock(&a->lock);
519 }
520
521 return NULL;
522}
523
524/** Check area conflicts with other areas.
525 *
526 * The address space must be locked and interrupts must be disabled.
527 *
528 * @param as Address space.
529 * @param va Starting virtual address of the area being tested.
530 * @param size Size of the area being tested.
531 * @param avoid_area Do not touch this area.
532 *
533 * @return True if there is no conflict, false otherwise.
534 */
535bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
536{
537 link_t *cur;
538 as_area_t *a;
539
540 for (cur = as->as_area_head.next; cur != &as->as_area_head; cur = cur->next) {
541 __address start;
542 __address end;
543
544 a = list_get_instance(cur, as_area_t, link);
545 if (a == avoid_area)
546 continue;
547
548 spinlock_lock(&a->lock);
549
550 start = a->base;
551 end = a->base + a->pages * PAGE_SIZE - 1;
552
553 spinlock_unlock(&a->lock);
554
555 if ((va >= start) && (va <= end)) {
556 /*
557 * Tested area is inside another area.
558 */
559 return false;
560 }
561
562 if ((start >= va) && (start < va + size)) {
563 /*
564 * Another area starts in tested area.
565 */
566 return false;
567 }
568
569 if ((end >= va) && (end < va + size)) {
570 /*
571 * Another area ends in tested area.
572 */
573 return false;
574 }
575
576 }
577
578 return true;
579}
Note: See TracBrowser for help on using the repository browser.