1 | /*
|
---|
2 | * Copyright (C) 2001-2006 Jakub Jermar
|
---|
3 | * All rights reserved.
|
---|
4 | *
|
---|
5 | * Redistribution and use in source and binary forms, with or without
|
---|
6 | * modification, are permitted provided that the following conditions
|
---|
7 | * are met:
|
---|
8 | *
|
---|
9 | * - Redistributions of source code must retain the above copyright
|
---|
10 | * notice, this list of conditions and the following disclaimer.
|
---|
11 | * - Redistributions in binary form must reproduce the above copyright
|
---|
12 | * notice, this list of conditions and the following disclaimer in the
|
---|
13 | * documentation and/or other materials provided with the distribution.
|
---|
14 | * - The name of the author may not be used to endorse or promote products
|
---|
15 | * derived from this software without specific prior written permission.
|
---|
16 | *
|
---|
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
---|
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
---|
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
---|
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
---|
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
---|
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
---|
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
---|
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
---|
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
27 | */
|
---|
28 |
|
---|
29 | /**
|
---|
30 | * @file as.c
|
---|
31 | * @brief Address space related functions.
|
---|
32 | *
|
---|
33 | * This file contains address space manipulation functions.
|
---|
34 | * Roughly speaking, this is a higher-level client of
|
---|
35 | * Virtual Address Translation (VAT) subsystem.
|
---|
36 | *
|
---|
37 | * Functionality provided by this file allows one to
|
---|
38 | * create address space and create, resize and share
|
---|
39 | * address space areas.
|
---|
40 | *
|
---|
41 | * @see page.c
|
---|
42 | *
|
---|
43 | */
|
---|
44 |
|
---|
45 | #include <mm/as.h>
|
---|
46 | #include <arch/mm/as.h>
|
---|
47 | #include <mm/page.h>
|
---|
48 | #include <mm/frame.h>
|
---|
49 | #include <mm/slab.h>
|
---|
50 | #include <mm/tlb.h>
|
---|
51 | #include <arch/mm/page.h>
|
---|
52 | #include <genarch/mm/page_pt.h>
|
---|
53 | #include <genarch/mm/page_ht.h>
|
---|
54 | #include <mm/asid.h>
|
---|
55 | #include <arch/mm/asid.h>
|
---|
56 | #include <synch/spinlock.h>
|
---|
57 | #include <adt/list.h>
|
---|
58 | #include <adt/btree.h>
|
---|
59 | #include <proc/task.h>
|
---|
60 | #include <proc/thread.h>
|
---|
61 | #include <arch/asm.h>
|
---|
62 | #include <panic.h>
|
---|
63 | #include <debug.h>
|
---|
64 | #include <print.h>
|
---|
65 | #include <memstr.h>
|
---|
66 | #include <macros.h>
|
---|
67 | #include <arch.h>
|
---|
68 | #include <errno.h>
|
---|
69 | #include <config.h>
|
---|
70 | #include <arch/types.h>
|
---|
71 | #include <typedefs.h>
|
---|
72 | #include <syscall/copy.h>
|
---|
73 | #include <arch/interrupt.h>
|
---|
74 |
|
---|
75 | as_operations_t *as_operations = NULL;
|
---|
76 |
|
---|
77 | /** Address space lock. It protects inactive_as_with_asid_head. */
|
---|
78 | SPINLOCK_INITIALIZE(as_lock);
|
---|
79 |
|
---|
80 | /**
|
---|
81 | * This list contains address spaces that are not active on any
|
---|
82 | * processor and that have valid ASID.
|
---|
83 | */
|
---|
84 | LIST_INITIALIZE(inactive_as_with_asid_head);
|
---|
85 |
|
---|
86 | /** Kernel address space. */
|
---|
87 | as_t *AS_KERNEL = NULL;
|
---|
88 |
|
---|
89 | static int area_flags_to_page_flags(int aflags);
|
---|
90 | static int get_area_flags(as_area_t *a);
|
---|
91 | static as_area_t *find_area_and_lock(as_t *as, __address va);
|
---|
92 | static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
|
---|
93 |
|
---|
94 | /** Initialize address space subsystem. */
|
---|
95 | void as_init(void)
|
---|
96 | {
|
---|
97 | as_arch_init();
|
---|
98 | AS_KERNEL = as_create(FLAG_AS_KERNEL);
|
---|
99 | if (!AS_KERNEL)
|
---|
100 | panic("can't create kernel address space\n");
|
---|
101 | }
|
---|
102 |
|
---|
103 | /** Create address space.
|
---|
104 | *
|
---|
105 | * @param flags Flags that influence way in wich the address space is created.
|
---|
106 | */
|
---|
107 | as_t *as_create(int flags)
|
---|
108 | {
|
---|
109 | as_t *as;
|
---|
110 |
|
---|
111 | as = (as_t *) malloc(sizeof(as_t), 0);
|
---|
112 | link_initialize(&as->inactive_as_with_asid_link);
|
---|
113 | spinlock_initialize(&as->lock, "as_lock");
|
---|
114 | btree_create(&as->as_area_btree);
|
---|
115 |
|
---|
116 | if (flags & FLAG_AS_KERNEL)
|
---|
117 | as->asid = ASID_KERNEL;
|
---|
118 | else
|
---|
119 | as->asid = ASID_INVALID;
|
---|
120 |
|
---|
121 | as->refcount = 0;
|
---|
122 | as->page_table = page_table_create(flags);
|
---|
123 |
|
---|
124 | return as;
|
---|
125 | }
|
---|
126 |
|
---|
127 | /** Free Adress space */
|
---|
128 | void as_free(as_t *as)
|
---|
129 | {
|
---|
130 | ASSERT(as->refcount == 0);
|
---|
131 |
|
---|
132 | /* TODO: free as_areas and other resources held by as */
|
---|
133 | /* TODO: free page table */
|
---|
134 | free(as);
|
---|
135 | }
|
---|
136 |
|
---|
137 | /** Create address space area of common attributes.
|
---|
138 | *
|
---|
139 | * The created address space area is added to the target address space.
|
---|
140 | *
|
---|
141 | * @param as Target address space.
|
---|
142 | * @param flags Flags of the area memory.
|
---|
143 | * @param size Size of area.
|
---|
144 | * @param base Base address of area.
|
---|
145 | * @param attrs Attributes of the area.
|
---|
146 | *
|
---|
147 | * @return Address space area on success or NULL on failure.
|
---|
148 | */
|
---|
149 | as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs)
|
---|
150 | {
|
---|
151 | ipl_t ipl;
|
---|
152 | as_area_t *a;
|
---|
153 |
|
---|
154 | if (base % PAGE_SIZE)
|
---|
155 | return NULL;
|
---|
156 |
|
---|
157 | if (!size)
|
---|
158 | return NULL;
|
---|
159 |
|
---|
160 | /* Writeable executable areas are not supported. */
|
---|
161 | if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
|
---|
162 | return NULL;
|
---|
163 |
|
---|
164 | ipl = interrupts_disable();
|
---|
165 | spinlock_lock(&as->lock);
|
---|
166 |
|
---|
167 | if (!check_area_conflicts(as, base, size, NULL)) {
|
---|
168 | spinlock_unlock(&as->lock);
|
---|
169 | interrupts_restore(ipl);
|
---|
170 | return NULL;
|
---|
171 | }
|
---|
172 |
|
---|
173 | a = (as_area_t *) malloc(sizeof(as_area_t), 0);
|
---|
174 |
|
---|
175 | spinlock_initialize(&a->lock, "as_area_lock");
|
---|
176 |
|
---|
177 | a->flags = flags;
|
---|
178 | a->attributes = attrs;
|
---|
179 | a->pages = SIZE2FRAMES(size);
|
---|
180 | a->base = base;
|
---|
181 |
|
---|
182 | btree_insert(&as->as_area_btree, base, (void *) a, NULL);
|
---|
183 |
|
---|
184 | spinlock_unlock(&as->lock);
|
---|
185 | interrupts_restore(ipl);
|
---|
186 |
|
---|
187 | return a;
|
---|
188 | }
|
---|
189 |
|
---|
190 | /** Find address space area and change it.
|
---|
191 | *
|
---|
192 | * @param as Address space.
|
---|
193 | * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
|
---|
194 | * @param size New size of the virtual memory block starting at address.
|
---|
195 | * @param flags Flags influencing the remap operation. Currently unused.
|
---|
196 | *
|
---|
197 | * @return Zero on success or a value from @ref errno.h otherwise.
|
---|
198 | */
|
---|
199 | int as_area_resize(as_t *as, __address address, size_t size, int flags)
|
---|
200 | {
|
---|
201 | as_area_t *area;
|
---|
202 | ipl_t ipl;
|
---|
203 | size_t pages;
|
---|
204 |
|
---|
205 | ipl = interrupts_disable();
|
---|
206 | spinlock_lock(&as->lock);
|
---|
207 |
|
---|
208 | /*
|
---|
209 | * Locate the area.
|
---|
210 | */
|
---|
211 | area = find_area_and_lock(as, address);
|
---|
212 | if (!area) {
|
---|
213 | spinlock_unlock(&as->lock);
|
---|
214 | interrupts_restore(ipl);
|
---|
215 | return ENOENT;
|
---|
216 | }
|
---|
217 |
|
---|
218 | if (area->flags & AS_AREA_DEVICE) {
|
---|
219 | /*
|
---|
220 | * Remapping of address space areas associated
|
---|
221 | * with memory mapped devices is not supported.
|
---|
222 | */
|
---|
223 | spinlock_unlock(&area->lock);
|
---|
224 | spinlock_unlock(&as->lock);
|
---|
225 | interrupts_restore(ipl);
|
---|
226 | return ENOTSUP;
|
---|
227 | }
|
---|
228 |
|
---|
229 | pages = SIZE2FRAMES((address - area->base) + size);
|
---|
230 | if (!pages) {
|
---|
231 | /*
|
---|
232 | * Zero size address space areas are not allowed.
|
---|
233 | */
|
---|
234 | spinlock_unlock(&area->lock);
|
---|
235 | spinlock_unlock(&as->lock);
|
---|
236 | interrupts_restore(ipl);
|
---|
237 | return EPERM;
|
---|
238 | }
|
---|
239 |
|
---|
240 | if (pages < area->pages) {
|
---|
241 | int i;
|
---|
242 |
|
---|
243 | /*
|
---|
244 | * Shrinking the area.
|
---|
245 | * No need to check for overlaps.
|
---|
246 | */
|
---|
247 | for (i = pages; i < area->pages; i++) {
|
---|
248 | pte_t *pte;
|
---|
249 |
|
---|
250 | /*
|
---|
251 | * Releasing physical memory.
|
---|
252 | * This depends on the fact that the memory was allocated using frame_alloc().
|
---|
253 | */
|
---|
254 | page_table_lock(as, false);
|
---|
255 | pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
|
---|
256 | if (pte && PTE_VALID(pte)) {
|
---|
257 | __address frame;
|
---|
258 |
|
---|
259 | ASSERT(PTE_PRESENT(pte));
|
---|
260 | frame = PTE_GET_FRAME(pte);
|
---|
261 | page_mapping_remove(as, area->base + i*PAGE_SIZE);
|
---|
262 | page_table_unlock(as, false);
|
---|
263 |
|
---|
264 | frame_free(ADDR2PFN(frame));
|
---|
265 | } else {
|
---|
266 | page_table_unlock(as, false);
|
---|
267 | }
|
---|
268 | }
|
---|
269 | /*
|
---|
270 | * Invalidate TLB's.
|
---|
271 | */
|
---|
272 | tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
|
---|
273 | tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
|
---|
274 | tlb_shootdown_finalize();
|
---|
275 | } else {
|
---|
276 | /*
|
---|
277 | * Growing the area.
|
---|
278 | * Check for overlaps with other address space areas.
|
---|
279 | */
|
---|
280 | if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
|
---|
281 | spinlock_unlock(&area->lock);
|
---|
282 | spinlock_unlock(&as->lock);
|
---|
283 | interrupts_restore(ipl);
|
---|
284 | return EADDRNOTAVAIL;
|
---|
285 | }
|
---|
286 | }
|
---|
287 |
|
---|
288 | area->pages = pages;
|
---|
289 |
|
---|
290 | spinlock_unlock(&area->lock);
|
---|
291 | spinlock_unlock(&as->lock);
|
---|
292 | interrupts_restore(ipl);
|
---|
293 |
|
---|
294 | return 0;
|
---|
295 | }
|
---|
296 |
|
---|
297 | /** Destroy address space area.
|
---|
298 | *
|
---|
299 | * @param as Address space.
|
---|
300 | * @param address Address withing the area to be deleted.
|
---|
301 | *
|
---|
302 | * @return Zero on success or a value from @ref errno.h on failure.
|
---|
303 | */
|
---|
304 | int as_area_destroy(as_t *as, __address address)
|
---|
305 | {
|
---|
306 | as_area_t *area;
|
---|
307 | __address base;
|
---|
308 | ipl_t ipl;
|
---|
309 | int i;
|
---|
310 |
|
---|
311 | ipl = interrupts_disable();
|
---|
312 | spinlock_lock(&as->lock);
|
---|
313 |
|
---|
314 | area = find_area_and_lock(as, address);
|
---|
315 | if (!area) {
|
---|
316 | spinlock_unlock(&as->lock);
|
---|
317 | interrupts_restore(ipl);
|
---|
318 | return ENOENT;
|
---|
319 | }
|
---|
320 |
|
---|
321 | base = area->base;
|
---|
322 | for (i = 0; i < area->pages; i++) {
|
---|
323 | pte_t *pte;
|
---|
324 |
|
---|
325 | /*
|
---|
326 | * Releasing physical memory.
|
---|
327 | * Areas mapping memory-mapped devices are treated differently than
|
---|
328 | * areas backing frame_alloc()'ed memory.
|
---|
329 | */
|
---|
330 | page_table_lock(as, false);
|
---|
331 | pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
|
---|
332 | if (pte && PTE_VALID(pte)) {
|
---|
333 | ASSERT(PTE_PRESENT(pte));
|
---|
334 | page_mapping_remove(as, area->base + i*PAGE_SIZE);
|
---|
335 | if (area->flags & AS_AREA_DEVICE) {
|
---|
336 | __address frame;
|
---|
337 | frame = PTE_GET_FRAME(pte);
|
---|
338 | frame_free(ADDR2PFN(frame));
|
---|
339 | }
|
---|
340 | page_table_unlock(as, false);
|
---|
341 | } else {
|
---|
342 | page_table_unlock(as, false);
|
---|
343 | }
|
---|
344 | }
|
---|
345 | /*
|
---|
346 | * Invalidate TLB's.
|
---|
347 | */
|
---|
348 | tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base, area->pages);
|
---|
349 | tlb_invalidate_pages(AS->asid, area->base, area->pages);
|
---|
350 | tlb_shootdown_finalize();
|
---|
351 |
|
---|
352 | area->attributes |= AS_AREA_ATTR_PARTIAL;
|
---|
353 | spinlock_unlock(&area->lock);
|
---|
354 |
|
---|
355 | /*
|
---|
356 | * Remove the empty area from address space.
|
---|
357 | */
|
---|
358 | btree_remove(&AS->as_area_btree, base, NULL);
|
---|
359 |
|
---|
360 | free(area);
|
---|
361 |
|
---|
362 | spinlock_unlock(&AS->lock);
|
---|
363 | interrupts_restore(ipl);
|
---|
364 | return 0;
|
---|
365 | }
|
---|
366 |
|
---|
367 | /** Steal address space area from another task.
|
---|
368 | *
|
---|
369 | * Address space area is stolen from another task
|
---|
370 | * Moreover, any existing mapping
|
---|
371 | * is copied as well, providing thus a mechanism
|
---|
372 | * for sharing group of pages. The source address
|
---|
373 | * space area and any associated mapping is preserved.
|
---|
374 | *
|
---|
375 | * @param src_task Pointer of source task
|
---|
376 | * @param src_base Base address of the source address space area.
|
---|
377 | * @param acc_size Expected size of the source area
|
---|
378 | * @param dst_base Target base address
|
---|
379 | *
|
---|
380 | * @return Zero on success or ENOENT if there is no such task or
|
---|
381 | * if there is no such address space area,
|
---|
382 | * EPERM if there was a problem in accepting the area or
|
---|
383 | * ENOMEM if there was a problem in allocating destination
|
---|
384 | * address space area.
|
---|
385 | */
|
---|
386 | int as_area_steal(task_t *src_task, __address src_base, size_t acc_size,
|
---|
387 | __address dst_base)
|
---|
388 | {
|
---|
389 | ipl_t ipl;
|
---|
390 | count_t i;
|
---|
391 | as_t *src_as;
|
---|
392 | int src_flags;
|
---|
393 | size_t src_size;
|
---|
394 | as_area_t *src_area, *dst_area;
|
---|
395 |
|
---|
396 | ipl = interrupts_disable();
|
---|
397 | spinlock_lock(&src_task->lock);
|
---|
398 | src_as = src_task->as;
|
---|
399 |
|
---|
400 | spinlock_lock(&src_as->lock);
|
---|
401 | src_area = find_area_and_lock(src_as, src_base);
|
---|
402 | if (!src_area) {
|
---|
403 | /*
|
---|
404 | * Could not find the source address space area.
|
---|
405 | */
|
---|
406 | spinlock_unlock(&src_task->lock);
|
---|
407 | spinlock_unlock(&src_as->lock);
|
---|
408 | interrupts_restore(ipl);
|
---|
409 | return ENOENT;
|
---|
410 | }
|
---|
411 | src_size = src_area->pages * PAGE_SIZE;
|
---|
412 | src_flags = src_area->flags;
|
---|
413 | spinlock_unlock(&src_area->lock);
|
---|
414 | spinlock_unlock(&src_as->lock);
|
---|
415 |
|
---|
416 |
|
---|
417 | if (src_size != acc_size) {
|
---|
418 | spinlock_unlock(&src_task->lock);
|
---|
419 | interrupts_restore(ipl);
|
---|
420 | return EPERM;
|
---|
421 | }
|
---|
422 | /*
|
---|
423 | * Create copy of the source address space area.
|
---|
424 | * The destination area is created with AS_AREA_ATTR_PARTIAL
|
---|
425 | * attribute set which prevents race condition with
|
---|
426 | * preliminary as_page_fault() calls.
|
---|
427 | */
|
---|
428 | dst_area = as_area_create(AS, src_flags, src_size, dst_base, AS_AREA_ATTR_PARTIAL);
|
---|
429 | if (!dst_area) {
|
---|
430 | /*
|
---|
431 | * Destination address space area could not be created.
|
---|
432 | */
|
---|
433 | spinlock_unlock(&src_task->lock);
|
---|
434 | interrupts_restore(ipl);
|
---|
435 | return ENOMEM;
|
---|
436 | }
|
---|
437 |
|
---|
438 | spinlock_unlock(&src_task->lock);
|
---|
439 |
|
---|
440 | /*
|
---|
441 | * Avoid deadlock by first locking the address space with lower address.
|
---|
442 | */
|
---|
443 | if (AS < src_as) {
|
---|
444 | spinlock_lock(&AS->lock);
|
---|
445 | spinlock_lock(&src_as->lock);
|
---|
446 | } else {
|
---|
447 | spinlock_lock(&AS->lock);
|
---|
448 | spinlock_lock(&src_as->lock);
|
---|
449 | }
|
---|
450 |
|
---|
451 | for (i = 0; i < SIZE2FRAMES(src_size); i++) {
|
---|
452 | pte_t *pte;
|
---|
453 | __address frame;
|
---|
454 |
|
---|
455 | page_table_lock(src_as, false);
|
---|
456 | pte = page_mapping_find(src_as, src_base + i*PAGE_SIZE);
|
---|
457 | if (pte && PTE_VALID(pte)) {
|
---|
458 | ASSERT(PTE_PRESENT(pte));
|
---|
459 | frame = PTE_GET_FRAME(pte);
|
---|
460 | if (!(src_flags & AS_AREA_DEVICE))
|
---|
461 | frame_reference_add(ADDR2PFN(frame));
|
---|
462 | page_table_unlock(src_as, false);
|
---|
463 | } else {
|
---|
464 | page_table_unlock(src_as, false);
|
---|
465 | continue;
|
---|
466 | }
|
---|
467 |
|
---|
468 | page_table_lock(AS, false);
|
---|
469 | page_mapping_insert(AS, dst_base + i*PAGE_SIZE, frame, area_flags_to_page_flags(src_flags));
|
---|
470 | page_table_unlock(AS, false);
|
---|
471 | }
|
---|
472 |
|
---|
473 | /*
|
---|
474 | * Now the destination address space area has been
|
---|
475 | * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
|
---|
476 | * attribute.
|
---|
477 | */
|
---|
478 | spinlock_lock(&dst_area->lock);
|
---|
479 | dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
|
---|
480 | spinlock_unlock(&dst_area->lock);
|
---|
481 |
|
---|
482 | spinlock_unlock(&AS->lock);
|
---|
483 | spinlock_unlock(&src_as->lock);
|
---|
484 | interrupts_restore(ipl);
|
---|
485 |
|
---|
486 | return 0;
|
---|
487 | }
|
---|
488 |
|
---|
489 | /** Initialize mapping for one page of address space.
|
---|
490 | *
|
---|
491 | * This functions maps 'page' to 'frame' according
|
---|
492 | * to attributes of the address space area to
|
---|
493 | * wich 'page' belongs.
|
---|
494 | *
|
---|
495 | * @param as Target address space.
|
---|
496 | * @param page Virtual page within the area.
|
---|
497 | * @param frame Physical frame to which page will be mapped.
|
---|
498 | */
|
---|
499 | void as_set_mapping(as_t *as, __address page, __address frame)
|
---|
500 | {
|
---|
501 | as_area_t *area;
|
---|
502 | ipl_t ipl;
|
---|
503 |
|
---|
504 | ipl = interrupts_disable();
|
---|
505 | page_table_lock(as, true);
|
---|
506 |
|
---|
507 | area = find_area_and_lock(as, page);
|
---|
508 | if (!area) {
|
---|
509 | panic("page not part of any as_area\n");
|
---|
510 | }
|
---|
511 |
|
---|
512 | page_mapping_insert(as, page, frame, get_area_flags(area));
|
---|
513 |
|
---|
514 | spinlock_unlock(&area->lock);
|
---|
515 | page_table_unlock(as, true);
|
---|
516 | interrupts_restore(ipl);
|
---|
517 | }
|
---|
518 |
|
---|
519 | /** Handle page fault within the current address space.
|
---|
520 | *
|
---|
521 | * This is the high-level page fault handler.
|
---|
522 | * Interrupts are assumed disabled.
|
---|
523 | *
|
---|
524 | * @param page Faulting page.
|
---|
525 | * @param istate Pointer to interrupted state.
|
---|
526 | *
|
---|
527 | * @return 0 on page fault, 1 on success or 2 if the fault was caused by copy_to_uspace() or copy_from_uspace().
|
---|
528 | */
|
---|
529 | int as_page_fault(__address page, istate_t *istate)
|
---|
530 | {
|
---|
531 | pte_t *pte;
|
---|
532 | as_area_t *area;
|
---|
533 | __address frame;
|
---|
534 |
|
---|
535 | ASSERT(AS);
|
---|
536 |
|
---|
537 | spinlock_lock(&AS->lock);
|
---|
538 | area = find_area_and_lock(AS, page);
|
---|
539 | if (!area) {
|
---|
540 | /*
|
---|
541 | * No area contained mapping for 'page'.
|
---|
542 | * Signal page fault to low-level handler.
|
---|
543 | */
|
---|
544 | spinlock_unlock(&AS->lock);
|
---|
545 | goto page_fault;
|
---|
546 | }
|
---|
547 |
|
---|
548 | if (area->attributes & AS_AREA_ATTR_PARTIAL) {
|
---|
549 | /*
|
---|
550 | * The address space area is not fully initialized.
|
---|
551 | * Avoid possible race by returning error.
|
---|
552 | */
|
---|
553 | spinlock_unlock(&area->lock);
|
---|
554 | spinlock_unlock(&AS->lock);
|
---|
555 | goto page_fault;
|
---|
556 | }
|
---|
557 |
|
---|
558 | ASSERT(!(area->flags & AS_AREA_DEVICE));
|
---|
559 |
|
---|
560 | page_table_lock(AS, false);
|
---|
561 |
|
---|
562 | /*
|
---|
563 | * To avoid race condition between two page faults
|
---|
564 | * on the same address, we need to make sure
|
---|
565 | * the mapping has not been already inserted.
|
---|
566 | */
|
---|
567 | if ((pte = page_mapping_find(AS, page))) {
|
---|
568 | if (PTE_PRESENT(pte)) {
|
---|
569 | page_table_unlock(AS, false);
|
---|
570 | spinlock_unlock(&area->lock);
|
---|
571 | spinlock_unlock(&AS->lock);
|
---|
572 | return 1;
|
---|
573 | }
|
---|
574 | }
|
---|
575 |
|
---|
576 | /*
|
---|
577 | * In general, there can be several reasons that
|
---|
578 | * can have caused this fault.
|
---|
579 | *
|
---|
580 | * - non-existent mapping: the area is a scratch
|
---|
581 | * area (e.g. stack) and so far has not been
|
---|
582 | * allocated a frame for the faulting page
|
---|
583 | *
|
---|
584 | * - non-present mapping: another possibility,
|
---|
585 | * currently not implemented, would be frame
|
---|
586 | * reuse; when this becomes a possibility,
|
---|
587 | * do not forget to distinguish between
|
---|
588 | * the different causes
|
---|
589 | */
|
---|
590 | frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
|
---|
591 | memsetb(PA2KA(frame), FRAME_SIZE, 0);
|
---|
592 |
|
---|
593 | /*
|
---|
594 | * Map 'page' to 'frame'.
|
---|
595 | * Note that TLB shootdown is not attempted as only new information is being
|
---|
596 | * inserted into page tables.
|
---|
597 | */
|
---|
598 | page_mapping_insert(AS, page, frame, get_area_flags(area));
|
---|
599 | page_table_unlock(AS, false);
|
---|
600 |
|
---|
601 | spinlock_unlock(&area->lock);
|
---|
602 | spinlock_unlock(&AS->lock);
|
---|
603 | return AS_PF_OK;
|
---|
604 |
|
---|
605 | page_fault:
|
---|
606 | if (!THREAD)
|
---|
607 | return AS_PF_FAULT;
|
---|
608 |
|
---|
609 | if (THREAD->in_copy_from_uspace) {
|
---|
610 | THREAD->in_copy_from_uspace = false;
|
---|
611 | istate_set_retaddr(istate, (__address) &memcpy_from_uspace_failover_address);
|
---|
612 | } else if (THREAD->in_copy_to_uspace) {
|
---|
613 | THREAD->in_copy_to_uspace = false;
|
---|
614 | istate_set_retaddr(istate, (__address) &memcpy_to_uspace_failover_address);
|
---|
615 | } else {
|
---|
616 | return AS_PF_FAULT;
|
---|
617 | }
|
---|
618 |
|
---|
619 | return AS_PF_DEFER;
|
---|
620 | }
|
---|
621 |
|
---|
622 | /** Switch address spaces.
|
---|
623 | *
|
---|
624 | * @param old Old address space or NULL.
|
---|
625 | * @param new New address space.
|
---|
626 | */
|
---|
627 | void as_switch(as_t *old, as_t *new)
|
---|
628 | {
|
---|
629 | ipl_t ipl;
|
---|
630 | bool needs_asid = false;
|
---|
631 |
|
---|
632 | ipl = interrupts_disable();
|
---|
633 | spinlock_lock(&as_lock);
|
---|
634 |
|
---|
635 | /*
|
---|
636 | * First, take care of the old address space.
|
---|
637 | */
|
---|
638 | if (old) {
|
---|
639 | spinlock_lock(&old->lock);
|
---|
640 | ASSERT(old->refcount);
|
---|
641 | if((--old->refcount == 0) && (old != AS_KERNEL)) {
|
---|
642 | /*
|
---|
643 | * The old address space is no longer active on
|
---|
644 | * any processor. It can be appended to the
|
---|
645 | * list of inactive address spaces with assigned
|
---|
646 | * ASID.
|
---|
647 | */
|
---|
648 | ASSERT(old->asid != ASID_INVALID);
|
---|
649 | list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
|
---|
650 | }
|
---|
651 | spinlock_unlock(&old->lock);
|
---|
652 | }
|
---|
653 |
|
---|
654 | /*
|
---|
655 | * Second, prepare the new address space.
|
---|
656 | */
|
---|
657 | spinlock_lock(&new->lock);
|
---|
658 | if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
|
---|
659 | if (new->asid != ASID_INVALID)
|
---|
660 | list_remove(&new->inactive_as_with_asid_link);
|
---|
661 | else
|
---|
662 | needs_asid = true; /* defer call to asid_get() until new->lock is released */
|
---|
663 | }
|
---|
664 | SET_PTL0_ADDRESS(new->page_table);
|
---|
665 | spinlock_unlock(&new->lock);
|
---|
666 |
|
---|
667 | if (needs_asid) {
|
---|
668 | /*
|
---|
669 | * Allocation of new ASID was deferred
|
---|
670 | * until now in order to avoid deadlock.
|
---|
671 | */
|
---|
672 | asid_t asid;
|
---|
673 |
|
---|
674 | asid = asid_get();
|
---|
675 | spinlock_lock(&new->lock);
|
---|
676 | new->asid = asid;
|
---|
677 | spinlock_unlock(&new->lock);
|
---|
678 | }
|
---|
679 | spinlock_unlock(&as_lock);
|
---|
680 | interrupts_restore(ipl);
|
---|
681 |
|
---|
682 | /*
|
---|
683 | * Perform architecture-specific steps.
|
---|
684 | * (e.g. write ASID to hardware register etc.)
|
---|
685 | */
|
---|
686 | as_install_arch(new);
|
---|
687 |
|
---|
688 | AS = new;
|
---|
689 | }
|
---|
690 |
|
---|
691 | /** Convert address space area flags to page flags.
|
---|
692 | *
|
---|
693 | * @param aflags Flags of some address space area.
|
---|
694 | *
|
---|
695 | * @return Flags to be passed to page_mapping_insert().
|
---|
696 | */
|
---|
697 | int area_flags_to_page_flags(int aflags)
|
---|
698 | {
|
---|
699 | int flags;
|
---|
700 |
|
---|
701 | flags = PAGE_USER | PAGE_PRESENT;
|
---|
702 |
|
---|
703 | if (aflags & AS_AREA_READ)
|
---|
704 | flags |= PAGE_READ;
|
---|
705 |
|
---|
706 | if (aflags & AS_AREA_WRITE)
|
---|
707 | flags |= PAGE_WRITE;
|
---|
708 |
|
---|
709 | if (aflags & AS_AREA_EXEC)
|
---|
710 | flags |= PAGE_EXEC;
|
---|
711 |
|
---|
712 | if (!(aflags & AS_AREA_DEVICE))
|
---|
713 | flags |= PAGE_CACHEABLE;
|
---|
714 |
|
---|
715 | return flags;
|
---|
716 | }
|
---|
717 |
|
---|
718 | /** Compute flags for virtual address translation subsytem.
|
---|
719 | *
|
---|
720 | * The address space area must be locked.
|
---|
721 | * Interrupts must be disabled.
|
---|
722 | *
|
---|
723 | * @param a Address space area.
|
---|
724 | *
|
---|
725 | * @return Flags to be used in page_mapping_insert().
|
---|
726 | */
|
---|
727 | int get_area_flags(as_area_t *a)
|
---|
728 | {
|
---|
729 | return area_flags_to_page_flags(a->flags);
|
---|
730 | }
|
---|
731 |
|
---|
732 | /** Create page table.
|
---|
733 | *
|
---|
734 | * Depending on architecture, create either address space
|
---|
735 | * private or global page table.
|
---|
736 | *
|
---|
737 | * @param flags Flags saying whether the page table is for kernel address space.
|
---|
738 | *
|
---|
739 | * @return First entry of the page table.
|
---|
740 | */
|
---|
741 | pte_t *page_table_create(int flags)
|
---|
742 | {
|
---|
743 | ASSERT(as_operations);
|
---|
744 | ASSERT(as_operations->page_table_create);
|
---|
745 |
|
---|
746 | return as_operations->page_table_create(flags);
|
---|
747 | }
|
---|
748 |
|
---|
749 | /** Lock page table.
|
---|
750 | *
|
---|
751 | * This function should be called before any page_mapping_insert(),
|
---|
752 | * page_mapping_remove() and page_mapping_find().
|
---|
753 | *
|
---|
754 | * Locking order is such that address space areas must be locked
|
---|
755 | * prior to this call. Address space can be locked prior to this
|
---|
756 | * call in which case the lock argument is false.
|
---|
757 | *
|
---|
758 | * @param as Address space.
|
---|
759 | * @param lock If false, do not attempt to lock as->lock.
|
---|
760 | */
|
---|
761 | void page_table_lock(as_t *as, bool lock)
|
---|
762 | {
|
---|
763 | ASSERT(as_operations);
|
---|
764 | ASSERT(as_operations->page_table_lock);
|
---|
765 |
|
---|
766 | as_operations->page_table_lock(as, lock);
|
---|
767 | }
|
---|
768 |
|
---|
769 | /** Unlock page table.
|
---|
770 | *
|
---|
771 | * @param as Address space.
|
---|
772 | * @param unlock If false, do not attempt to unlock as->lock.
|
---|
773 | */
|
---|
774 | void page_table_unlock(as_t *as, bool unlock)
|
---|
775 | {
|
---|
776 | ASSERT(as_operations);
|
---|
777 | ASSERT(as_operations->page_table_unlock);
|
---|
778 |
|
---|
779 | as_operations->page_table_unlock(as, unlock);
|
---|
780 | }
|
---|
781 |
|
---|
782 |
|
---|
783 | /** Find address space area and lock it.
|
---|
784 | *
|
---|
785 | * The address space must be locked and interrupts must be disabled.
|
---|
786 | *
|
---|
787 | * @param as Address space.
|
---|
788 | * @param va Virtual address.
|
---|
789 | *
|
---|
790 | * @return Locked address space area containing va on success or NULL on failure.
|
---|
791 | */
|
---|
792 | as_area_t *find_area_and_lock(as_t *as, __address va)
|
---|
793 | {
|
---|
794 | as_area_t *a;
|
---|
795 | btree_node_t *leaf, *lnode;
|
---|
796 | int i;
|
---|
797 |
|
---|
798 | a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
|
---|
799 | if (a) {
|
---|
800 | /* va is the base address of an address space area */
|
---|
801 | spinlock_lock(&a->lock);
|
---|
802 | return a;
|
---|
803 | }
|
---|
804 |
|
---|
805 | /*
|
---|
806 | * Search the leaf node and the righmost record of its left neighbour
|
---|
807 | * to find out whether this is a miss or va belongs to an address
|
---|
808 | * space area found there.
|
---|
809 | */
|
---|
810 |
|
---|
811 | /* First, search the leaf node itself. */
|
---|
812 | for (i = 0; i < leaf->keys; i++) {
|
---|
813 | a = (as_area_t *) leaf->value[i];
|
---|
814 | spinlock_lock(&a->lock);
|
---|
815 | if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
|
---|
816 | return a;
|
---|
817 | }
|
---|
818 | spinlock_unlock(&a->lock);
|
---|
819 | }
|
---|
820 |
|
---|
821 | /*
|
---|
822 | * Second, locate the left neighbour and test its last record.
|
---|
823 | * Because of its position in the B+tree, it must have base < va.
|
---|
824 | */
|
---|
825 | if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
|
---|
826 | a = (as_area_t *) lnode->value[lnode->keys - 1];
|
---|
827 | spinlock_lock(&a->lock);
|
---|
828 | if (va < a->base + a->pages * PAGE_SIZE) {
|
---|
829 | return a;
|
---|
830 | }
|
---|
831 | spinlock_unlock(&a->lock);
|
---|
832 | }
|
---|
833 |
|
---|
834 | return NULL;
|
---|
835 | }
|
---|
836 |
|
---|
837 | /** Check area conflicts with other areas.
|
---|
838 | *
|
---|
839 | * The address space must be locked and interrupts must be disabled.
|
---|
840 | *
|
---|
841 | * @param as Address space.
|
---|
842 | * @param va Starting virtual address of the area being tested.
|
---|
843 | * @param size Size of the area being tested.
|
---|
844 | * @param avoid_area Do not touch this area.
|
---|
845 | *
|
---|
846 | * @return True if there is no conflict, false otherwise.
|
---|
847 | */
|
---|
848 | bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
|
---|
849 | {
|
---|
850 | as_area_t *a;
|
---|
851 | btree_node_t *leaf, *node;
|
---|
852 | int i;
|
---|
853 |
|
---|
854 | /*
|
---|
855 | * We don't want any area to have conflicts with NULL page.
|
---|
856 | */
|
---|
857 | if (overlaps(va, size, NULL, PAGE_SIZE))
|
---|
858 | return false;
|
---|
859 |
|
---|
860 | /*
|
---|
861 | * The leaf node is found in O(log n), where n is proportional to
|
---|
862 | * the number of address space areas belonging to as.
|
---|
863 | * The check for conflicts is then attempted on the rightmost
|
---|
864 | * record in the left neighbour, the leftmost record in the right
|
---|
865 | * neighbour and all records in the leaf node itself.
|
---|
866 | */
|
---|
867 |
|
---|
868 | if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
|
---|
869 | if (a != avoid_area)
|
---|
870 | return false;
|
---|
871 | }
|
---|
872 |
|
---|
873 | /* First, check the two border cases. */
|
---|
874 | if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
|
---|
875 | a = (as_area_t *) node->value[node->keys - 1];
|
---|
876 | spinlock_lock(&a->lock);
|
---|
877 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
|
---|
878 | spinlock_unlock(&a->lock);
|
---|
879 | return false;
|
---|
880 | }
|
---|
881 | spinlock_unlock(&a->lock);
|
---|
882 | }
|
---|
883 | if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
|
---|
884 | a = (as_area_t *) node->value[0];
|
---|
885 | spinlock_lock(&a->lock);
|
---|
886 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
|
---|
887 | spinlock_unlock(&a->lock);
|
---|
888 | return false;
|
---|
889 | }
|
---|
890 | spinlock_unlock(&a->lock);
|
---|
891 | }
|
---|
892 |
|
---|
893 | /* Second, check the leaf node. */
|
---|
894 | for (i = 0; i < leaf->keys; i++) {
|
---|
895 | a = (as_area_t *) leaf->value[i];
|
---|
896 |
|
---|
897 | if (a == avoid_area)
|
---|
898 | continue;
|
---|
899 |
|
---|
900 | spinlock_lock(&a->lock);
|
---|
901 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
|
---|
902 | spinlock_unlock(&a->lock);
|
---|
903 | return false;
|
---|
904 | }
|
---|
905 | spinlock_unlock(&a->lock);
|
---|
906 | }
|
---|
907 |
|
---|
908 | /*
|
---|
909 | * So far, the area does not conflict with other areas.
|
---|
910 | * Check if it doesn't conflict with kernel address space.
|
---|
911 | */
|
---|
912 | if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
|
---|
913 | return !overlaps(va, size,
|
---|
914 | KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
|
---|
915 | }
|
---|
916 |
|
---|
917 | return true;
|
---|
918 | }
|
---|
919 |
|
---|
920 | /** Return size of address space of current task pointed to by base */
|
---|
921 | size_t as_get_size(__address base)
|
---|
922 | {
|
---|
923 | ipl_t ipl;
|
---|
924 | as_area_t *src_area;
|
---|
925 | size_t size;
|
---|
926 |
|
---|
927 | ipl = interrupts_disable();
|
---|
928 | src_area = find_area_and_lock(AS, base);
|
---|
929 | if (src_area){
|
---|
930 | size = src_area->pages * PAGE_SIZE;
|
---|
931 | spinlock_unlock(&src_area->lock);
|
---|
932 | } else {
|
---|
933 | size = 0;
|
---|
934 | }
|
---|
935 | interrupts_restore(ipl);
|
---|
936 | return size;
|
---|
937 | }
|
---|
938 |
|
---|
939 | /*
|
---|
940 | * Address space related syscalls.
|
---|
941 | */
|
---|
942 |
|
---|
943 | /** Wrapper for as_area_create(). */
|
---|
944 | __native sys_as_area_create(__address address, size_t size, int flags)
|
---|
945 | {
|
---|
946 | if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE))
|
---|
947 | return (__native) address;
|
---|
948 | else
|
---|
949 | return (__native) -1;
|
---|
950 | }
|
---|
951 |
|
---|
952 | /** Wrapper for as_area_resize. */
|
---|
953 | __native sys_as_area_resize(__address address, size_t size, int flags)
|
---|
954 | {
|
---|
955 | return (__native) as_area_resize(AS, address, size, 0);
|
---|
956 | }
|
---|
957 |
|
---|
958 | /** Wrapper for as_area_destroy. */
|
---|
959 | __native sys_as_area_destroy(__address address)
|
---|
960 | {
|
---|
961 | return (__native) as_area_destroy(AS, address);
|
---|
962 | }
|
---|
963 |
|
---|