source: mainline/generic/src/mm/as.c@ bd72b475

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since bd72b475 was bd72b475, checked in by Jakub Jermar <jakub@…>, 20 years ago

Unlock address space area when returning error.

  • Property mode set to 100644
File size: 14.2 KB
Line 
1/*
2 * Copyright (C) 2001-2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * This file contains address space manipulation functions.
31 * Roughly speaking, this is a higher-level client of
32 * Virtual Address Translation (VAT) subsystem.
33 */
34
35#include <mm/as.h>
36#include <arch/mm/as.h>
37#include <mm/page.h>
38#include <mm/frame.h>
39#include <mm/slab.h>
40#include <mm/tlb.h>
41#include <arch/mm/page.h>
42#include <genarch/mm/page_pt.h>
43#include <mm/asid.h>
44#include <arch/mm/asid.h>
45#include <arch/types.h>
46#include <typedefs.h>
47#include <synch/spinlock.h>
48#include <config.h>
49#include <adt/list.h>
50#include <panic.h>
51#include <arch/asm.h>
52#include <debug.h>
53#include <memstr.h>
54#include <macros.h>
55#include <arch.h>
56#include <print.h>
57
58as_operations_t *as_operations = NULL;
59
60/** Address space lock. It protects inactive_as_with_asid_head. */
61SPINLOCK_INITIALIZE(as_lock);
62
63/**
64 * This list contains address spaces that are not active on any
65 * processor and that have valid ASID.
66 */
67LIST_INITIALIZE(inactive_as_with_asid_head);
68
69/** Kernel address space. */
70as_t *AS_KERNEL = NULL;
71
72static int get_area_flags(as_area_t *a);
73static as_area_t *find_area_and_lock(as_t *as, __address va);
74static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
75
76/** Initialize address space subsystem. */
77void as_init(void)
78{
79 as_arch_init();
80 AS_KERNEL = as_create(FLAG_AS_KERNEL);
81 if (!AS_KERNEL)
82 panic("can't create kernel address space\n");
83}
84
85/** Create address space.
86 *
87 * @param flags Flags that influence way in wich the address space is created.
88 */
89as_t *as_create(int flags)
90{
91 as_t *as;
92
93 as = (as_t *) malloc(sizeof(as_t), 0);
94 link_initialize(&as->inactive_as_with_asid_link);
95 spinlock_initialize(&as->lock, "as_lock");
96 list_initialize(&as->as_area_head);
97
98 if (flags & FLAG_AS_KERNEL)
99 as->asid = ASID_KERNEL;
100 else
101 as->asid = ASID_INVALID;
102
103 as->refcount = 0;
104 as->page_table = page_table_create(flags);
105
106 return as;
107}
108
109/** Free Adress space */
110void as_free(as_t *as)
111{
112 ASSERT(as->refcount == 0);
113
114 /* TODO: free as_areas and other resources held by as */
115 /* TODO: free page table */
116 free(as);
117}
118
119/** Create address space area of common attributes.
120 *
121 * The created address space area is added to the target address space.
122 *
123 * @param as Target address space.
124 * @param flags Flags of the area.
125 * @param size Size of area.
126 * @param base Base address of area.
127 *
128 * @return Address space area on success or NULL on failure.
129 */
130as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base)
131{
132 ipl_t ipl;
133 as_area_t *a;
134
135 if (base % PAGE_SIZE)
136 return NULL;
137
138 /* Writeable executable areas are not supported. */
139 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
140 return NULL;
141
142 ipl = interrupts_disable();
143 spinlock_lock(&as->lock);
144
145 if (!check_area_conflicts(as, base, size, NULL)) {
146 spinlock_unlock(&as->lock);
147 interrupts_restore(ipl);
148 return NULL;
149 }
150
151 a = (as_area_t *) malloc(sizeof(as_area_t), 0);
152
153 spinlock_initialize(&a->lock, "as_area_lock");
154
155 link_initialize(&a->link);
156 a->flags = flags;
157 a->pages = SIZE2FRAMES(size);
158 a->base = base;
159
160 list_append(&a->link, &as->as_area_head);
161
162 spinlock_unlock(&as->lock);
163 interrupts_restore(ipl);
164
165 return a;
166}
167
168/** Initialize mapping for one page of address space.
169 *
170 * This functions maps 'page' to 'frame' according
171 * to attributes of the address space area to
172 * wich 'page' belongs.
173 *
174 * @param as Target address space.
175 * @param page Virtual page within the area.
176 * @param frame Physical frame to which page will be mapped.
177 */
178void as_set_mapping(as_t *as, __address page, __address frame)
179{
180 as_area_t *area;
181 ipl_t ipl;
182
183 ipl = interrupts_disable();
184 page_table_lock(as, true);
185
186 area = find_area_and_lock(as, page);
187 if (!area) {
188 panic("page not part of any as_area\n");
189 }
190
191 page_mapping_insert(as, page, frame, get_area_flags(area));
192
193 spinlock_unlock(&area->lock);
194 page_table_unlock(as, true);
195 interrupts_restore(ipl);
196}
197
198/** Handle page fault within the current address space.
199 *
200 * This is the high-level page fault handler.
201 * Interrupts are assumed disabled.
202 *
203 * @param page Faulting page.
204 *
205 * @return 0 on page fault, 1 on success.
206 */
207int as_page_fault(__address page)
208{
209 pte_t *pte;
210 as_area_t *area;
211 __address frame;
212
213 ASSERT(AS);
214
215 spinlock_lock(&AS->lock);
216 area = find_area_and_lock(AS, page);
217 if (!area) {
218 /*
219 * No area contained mapping for 'page'.
220 * Signal page fault to low-level handler.
221 */
222 spinlock_unlock(&AS->lock);
223 return 0;
224 }
225
226 page_table_lock(AS, false);
227
228 /*
229 * To avoid race condition between two page faults
230 * on the same address, we need to make sure
231 * the mapping has not been already inserted.
232 */
233 if ((pte = page_mapping_find(AS, page))) {
234 if (PTE_PRESENT(pte)) {
235 page_table_unlock(AS, false);
236 spinlock_unlock(&area->lock);
237 spinlock_unlock(&AS->lock);
238 return 1;
239 }
240 }
241
242 /*
243 * In general, there can be several reasons that
244 * can have caused this fault.
245 *
246 * - non-existent mapping: the area is a scratch
247 * area (e.g. stack) and so far has not been
248 * allocated a frame for the faulting page
249 *
250 * - non-present mapping: another possibility,
251 * currently not implemented, would be frame
252 * reuse; when this becomes a possibility,
253 * do not forget to distinguish between
254 * the different causes
255 */
256 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
257 memsetb(PA2KA(frame), FRAME_SIZE, 0);
258
259 /*
260 * Map 'page' to 'frame'.
261 * Note that TLB shootdown is not attempted as only new information is being
262 * inserted into page tables.
263 */
264 page_mapping_insert(AS, page, frame, get_area_flags(area));
265 page_table_unlock(AS, false);
266
267 spinlock_unlock(&area->lock);
268 spinlock_unlock(&AS->lock);
269 return 1;
270}
271
272/** Switch address spaces.
273 *
274 * @param old Old address space or NULL.
275 * @param new New address space.
276 */
277void as_switch(as_t *old, as_t *new)
278{
279 ipl_t ipl;
280 bool needs_asid = false;
281
282 ipl = interrupts_disable();
283 spinlock_lock(&as_lock);
284
285 /*
286 * First, take care of the old address space.
287 */
288 if (old) {
289 spinlock_lock(&old->lock);
290 ASSERT(old->refcount);
291 if((--old->refcount == 0) && (old != AS_KERNEL)) {
292 /*
293 * The old address space is no longer active on
294 * any processor. It can be appended to the
295 * list of inactive address spaces with assigned
296 * ASID.
297 */
298 ASSERT(old->asid != ASID_INVALID);
299 list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
300 }
301 spinlock_unlock(&old->lock);
302 }
303
304 /*
305 * Second, prepare the new address space.
306 */
307 spinlock_lock(&new->lock);
308 if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
309 if (new->asid != ASID_INVALID)
310 list_remove(&new->inactive_as_with_asid_link);
311 else
312 needs_asid = true; /* defer call to asid_get() until new->lock is released */
313 }
314 SET_PTL0_ADDRESS(new->page_table);
315 spinlock_unlock(&new->lock);
316
317 if (needs_asid) {
318 /*
319 * Allocation of new ASID was deferred
320 * until now in order to avoid deadlock.
321 */
322 asid_t asid;
323
324 asid = asid_get();
325 spinlock_lock(&new->lock);
326 new->asid = asid;
327 spinlock_unlock(&new->lock);
328 }
329 spinlock_unlock(&as_lock);
330 interrupts_restore(ipl);
331
332 /*
333 * Perform architecture-specific steps.
334 * (e.g. write ASID to hardware register etc.)
335 */
336 as_install_arch(new);
337
338 AS = new;
339}
340
341/** Compute flags for virtual address translation subsytem.
342 *
343 * The address space area must be locked.
344 * Interrupts must be disabled.
345 *
346 * @param a Address space area.
347 *
348 * @return Flags to be used in page_mapping_insert().
349 */
350int get_area_flags(as_area_t *a)
351{
352 int flags;
353
354 flags = PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
355
356 if (a->flags & AS_AREA_READ)
357 flags |= PAGE_READ;
358
359 if (a->flags & AS_AREA_WRITE)
360 flags |= PAGE_WRITE;
361
362 if (a->flags & AS_AREA_EXEC)
363 flags |= PAGE_EXEC;
364
365 return flags;
366}
367
368/** Create page table.
369 *
370 * Depending on architecture, create either address space
371 * private or global page table.
372 *
373 * @param flags Flags saying whether the page table is for kernel address space.
374 *
375 * @return First entry of the page table.
376 */
377pte_t *page_table_create(int flags)
378{
379 ASSERT(as_operations);
380 ASSERT(as_operations->page_table_create);
381
382 return as_operations->page_table_create(flags);
383}
384
385/** Lock page table.
386 *
387 * This function should be called before any page_mapping_insert(),
388 * page_mapping_remove() and page_mapping_find().
389 *
390 * Locking order is such that address space areas must be locked
391 * prior to this call. Address space can be locked prior to this
392 * call in which case the lock argument is false.
393 *
394 * @param as Address space.
395 * @param as_locked If false, do not attempt to lock as->lock.
396 */
397void page_table_lock(as_t *as, bool lock)
398{
399 ASSERT(as_operations);
400 ASSERT(as_operations->page_table_lock);
401
402 as_operations->page_table_lock(as, lock);
403}
404
405/** Unlock page table.
406 *
407 * @param as Address space.
408 * @param as_locked If false, do not attempt to unlock as->lock.
409 */
410void page_table_unlock(as_t *as, bool unlock)
411{
412 ASSERT(as_operations);
413 ASSERT(as_operations->page_table_unlock);
414
415 as_operations->page_table_unlock(as, unlock);
416}
417
418/** Find address space area and change it.
419 *
420 * @param as Address space.
421 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
422 * @param size New size of the virtual memory block starting at address.
423 * @param flags Flags influencing the remap operation. Currently unused.
424 *
425 * @return address on success, (__address) -1 otherwise.
426 */
427__address as_remap(as_t *as, __address address, size_t size, int flags)
428{
429 as_area_t *area = NULL;
430 ipl_t ipl;
431 size_t pages;
432
433 ipl = interrupts_disable();
434 spinlock_lock(&as->lock);
435
436 /*
437 * Locate the area.
438 */
439 area = find_area_and_lock(as, address);
440 if (!area) {
441 spinlock_unlock(&as->lock);
442 interrupts_restore(ipl);
443 return (__address) -1;
444 }
445
446 pages = SIZE2FRAMES((address - area->base) + size);
447 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
448 spinlock_unlock(&area->lock);
449 spinlock_unlock(&as->lock);
450 interrupts_restore(ipl);
451 return (__address) -1;
452 }
453
454 if (pages < area->pages) {
455 int i;
456
457 /*
458 * Shrinking the area.
459 */
460 for (i = pages; i < area->pages; i++) {
461 pte_t *pte;
462
463 /*
464 * Releasing physical memory.
465 * This depends on the fact that the memory was allocated using frame_alloc().
466 */
467 page_table_lock(as, false);
468 pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
469 if (pte && PTE_VALID(pte)) {
470 __address frame;
471
472 ASSERT(PTE_PRESENT(pte));
473 frame = PTE_GET_FRAME(pte);
474 page_mapping_remove(as, area->base + i*PAGE_SIZE);
475 page_table_unlock(as, false);
476
477 frame_free(ADDR2PFN(frame));
478 } else {
479 page_table_unlock(as, false);
480 }
481 }
482 /*
483 * Invalidate TLB's.
484 */
485 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
486 tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
487 tlb_shootdown_finalize();
488 }
489
490 area->pages = pages;
491
492 spinlock_unlock(&area->lock);
493 spinlock_unlock(&as->lock);
494 interrupts_restore(ipl);
495
496 return address;
497}
498
499/** Find address space area and lock it.
500 *
501 * The address space must be locked and interrupts must be disabled.
502 *
503 * @param as Address space.
504 * @param va Virtual address.
505 *
506 * @return Locked address space area containing va on success or NULL on failure.
507 */
508as_area_t *find_area_and_lock(as_t *as, __address va)
509{
510 link_t *cur;
511 as_area_t *a;
512
513 for (cur = as->as_area_head.next; cur != &as->as_area_head; cur = cur->next) {
514 a = list_get_instance(cur, as_area_t, link);
515 spinlock_lock(&a->lock);
516
517 if ((va >= a->base) && (va < a->base + a->pages * PAGE_SIZE))
518 return a;
519
520 spinlock_unlock(&a->lock);
521 }
522
523 return NULL;
524}
525
526/** Check area conflicts with other areas.
527 *
528 * The address space must be locked and interrupts must be disabled.
529 *
530 * @param as Address space.
531 * @param va Starting virtual address of the area being tested.
532 * @param size Size of the area being tested.
533 * @param avoid_area Do not touch this area.
534 *
535 * @return True if there is no conflict, false otherwise.
536 */
537bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
538{
539 link_t *cur;
540 as_area_t *a;
541
542 /*
543 * We don't want any area to have conflicts with NULL page.
544 */
545 if (overlaps(va, size, NULL, PAGE_SIZE))
546 return false;
547
548 for (cur = as->as_area_head.next; cur != &as->as_area_head; cur = cur->next) {
549 __address a_start;
550 size_t a_size;
551
552 a = list_get_instance(cur, as_area_t, link);
553 if (a == avoid_area)
554 continue;
555
556 spinlock_lock(&a->lock);
557
558 a_start = a->base;
559 a_size = a->pages * PAGE_SIZE;
560
561 spinlock_unlock(&a->lock);
562
563 if (overlaps(va, size, a_start, a_size))
564 return false;
565
566 }
567
568 /*
569 * So far, the area does not conflict with other areas.
570 * Check if it doesn't conflict with kernel address space.
571 */
572 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
573 return !overlaps(va, size,
574 KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
575 }
576
577 return true;
578}
Note: See TracBrowser for help on using the repository browser.