source: mainline/generic/src/mm/as.c@ 040e4e9

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 040e4e9 was 9179d0a, checked in by Jakub Jermar <jakub@…>, 19 years ago

Add some @file doxygen comments and improve already existing comments.

  • Property mode set to 100644
File size: 22.8 KB
RevLine 
[20d50a1]1/*
2 * Copyright (C) 2001-2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[9179d0a]29/**
30 * @file as.c
31 * @brief Address space related functions.
32 *
[20d50a1]33 * This file contains address space manipulation functions.
34 * Roughly speaking, this is a higher-level client of
35 * Virtual Address Translation (VAT) subsystem.
[9179d0a]36 *
37 * Functionality provided by this file allows one to
38 * create address space and create, resize and share
39 * address space areas.
40 *
41 * @see page.c
42 *
[20d50a1]43 */
44
45#include <mm/as.h>
[ef67bab]46#include <arch/mm/as.h>
[20d50a1]47#include <mm/page.h>
48#include <mm/frame.h>
[085d973]49#include <mm/slab.h>
[20d50a1]50#include <mm/tlb.h>
51#include <arch/mm/page.h>
52#include <genarch/mm/page_pt.h>
[2802767]53#include <genarch/mm/page_ht.h>
[4512d7e]54#include <mm/asid.h>
[20d50a1]55#include <arch/mm/asid.h>
56#include <synch/spinlock.h>
[5c9a08b]57#include <adt/list.h>
[252127e]58#include <adt/btree.h>
[df0103f7]59#include <proc/task.h>
[20d50a1]60#include <arch/asm.h>
[df0103f7]61#include <panic.h>
[20d50a1]62#include <debug.h>
[df0103f7]63#include <print.h>
[20d50a1]64#include <memstr.h>
[5a7d9d1]65#include <macros.h>
[20d50a1]66#include <arch.h>
[df0103f7]67#include <errno.h>
68#include <config.h>
69#include <arch/types.h>
70#include <typedefs.h>
[20d50a1]71
[ef67bab]72as_operations_t *as_operations = NULL;
[20d50a1]73
[7e4e532]74/** Address space lock. It protects inactive_as_with_asid_head. */
75SPINLOCK_INITIALIZE(as_lock);
76
77/**
78 * This list contains address spaces that are not active on any
79 * processor and that have valid ASID.
80 */
81LIST_INITIALIZE(inactive_as_with_asid_head);
82
[071a8ae6]83/** Kernel address space. */
84as_t *AS_KERNEL = NULL;
85
[df0103f7]86static int area_flags_to_page_flags(int aflags);
[6a3c9a7]87static int get_area_flags(as_area_t *a);
[d3e7ff4]88static as_area_t *find_area_and_lock(as_t *as, __address va);
[37e7d2b9]89static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
[20d50a1]90
[ef67bab]91/** Initialize address space subsystem. */
92void as_init(void)
93{
94 as_arch_init();
[8e1ea655]95 AS_KERNEL = as_create(FLAG_AS_KERNEL);
[ef67bab]96 if (!AS_KERNEL)
97 panic("can't create kernel address space\n");
98}
99
[071a8ae6]100/** Create address space.
101 *
102 * @param flags Flags that influence way in wich the address space is created.
103 */
[ef67bab]104as_t *as_create(int flags)
[20d50a1]105{
106 as_t *as;
107
[bb68433]108 as = (as_t *) malloc(sizeof(as_t), 0);
[7e4e532]109 link_initialize(&as->inactive_as_with_asid_link);
[bb68433]110 spinlock_initialize(&as->lock, "as_lock");
[252127e]111 btree_create(&as->as_area_btree);
[bb68433]112
113 if (flags & FLAG_AS_KERNEL)
114 as->asid = ASID_KERNEL;
115 else
116 as->asid = ASID_INVALID;
117
[7e4e532]118 as->refcount = 0;
[bb68433]119 as->page_table = page_table_create(flags);
[20d50a1]120
121 return as;
122}
123
[5be1923]124/** Free Adress space */
125void as_free(as_t *as)
126{
127 ASSERT(as->refcount == 0);
128
129 /* TODO: free as_areas and other resources held by as */
130 /* TODO: free page table */
131 free(as);
132}
133
[20d50a1]134/** Create address space area of common attributes.
135 *
136 * The created address space area is added to the target address space.
137 *
138 * @param as Target address space.
[a9e8b39]139 * @param flags Flags of the area memory.
[37e7d2b9]140 * @param size Size of area.
[20d50a1]141 * @param base Base address of area.
[a9e8b39]142 * @param attrs Attributes of the area.
[20d50a1]143 *
144 * @return Address space area on success or NULL on failure.
145 */
[a9e8b39]146as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs)
[20d50a1]147{
148 ipl_t ipl;
149 as_area_t *a;
150
151 if (base % PAGE_SIZE)
[37e7d2b9]152 return NULL;
153
[dbbeb26]154 if (!size)
155 return NULL;
156
[37e7d2b9]157 /* Writeable executable areas are not supported. */
158 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
159 return NULL;
[20d50a1]160
161 ipl = interrupts_disable();
162 spinlock_lock(&as->lock);
163
[37e7d2b9]164 if (!check_area_conflicts(as, base, size, NULL)) {
165 spinlock_unlock(&as->lock);
166 interrupts_restore(ipl);
167 return NULL;
168 }
[20d50a1]169
[bb68433]170 a = (as_area_t *) malloc(sizeof(as_area_t), 0);
171
172 spinlock_initialize(&a->lock, "as_area_lock");
173
[c23502d]174 a->flags = flags;
[a9e8b39]175 a->attributes = attrs;
[37e7d2b9]176 a->pages = SIZE2FRAMES(size);
[bb68433]177 a->base = base;
178
[252127e]179 btree_insert(&as->as_area_btree, base, (void *) a, NULL);
[20d50a1]180
181 spinlock_unlock(&as->lock);
182 interrupts_restore(ipl);
[f9425006]183
[20d50a1]184 return a;
185}
186
[df0103f7]187/** Find address space area and change it.
188 *
189 * @param as Address space.
190 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
191 * @param size New size of the virtual memory block starting at address.
192 * @param flags Flags influencing the remap operation. Currently unused.
193 *
194 * @return address on success, (__address) -1 otherwise.
195 */
196__address as_area_resize(as_t *as, __address address, size_t size, int flags)
197{
198 as_area_t *area = NULL;
199 ipl_t ipl;
200 size_t pages;
201
202 ipl = interrupts_disable();
203 spinlock_lock(&as->lock);
204
205 /*
206 * Locate the area.
207 */
208 area = find_area_and_lock(as, address);
209 if (!area) {
210 spinlock_unlock(&as->lock);
211 interrupts_restore(ipl);
212 return (__address) -1;
213 }
214
215 if (area->flags & AS_AREA_DEVICE) {
216 /*
217 * Remapping of address space areas associated
218 * with memory mapped devices is not supported.
219 */
220 spinlock_unlock(&area->lock);
221 spinlock_unlock(&as->lock);
222 interrupts_restore(ipl);
223 return (__address) -1;
224 }
225
226 pages = SIZE2FRAMES((address - area->base) + size);
227 if (!pages) {
228 /*
229 * Zero size address space areas are not allowed.
230 */
231 spinlock_unlock(&area->lock);
232 spinlock_unlock(&as->lock);
233 interrupts_restore(ipl);
234 return (__address) -1;
235 }
236
237 if (pages < area->pages) {
238 int i;
239
240 /*
241 * Shrinking the area.
242 * No need to check for overlaps.
243 */
244 for (i = pages; i < area->pages; i++) {
245 pte_t *pte;
246
247 /*
248 * Releasing physical memory.
249 * This depends on the fact that the memory was allocated using frame_alloc().
250 */
251 page_table_lock(as, false);
252 pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
253 if (pte && PTE_VALID(pte)) {
254 __address frame;
255
256 ASSERT(PTE_PRESENT(pte));
257 frame = PTE_GET_FRAME(pte);
258 page_mapping_remove(as, area->base + i*PAGE_SIZE);
259 page_table_unlock(as, false);
260
261 frame_free(ADDR2PFN(frame));
262 } else {
263 page_table_unlock(as, false);
264 }
265 }
266 /*
267 * Invalidate TLB's.
268 */
269 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
270 tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
271 tlb_shootdown_finalize();
272 } else {
273 /*
274 * Growing the area.
275 * Check for overlaps with other address space areas.
276 */
277 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
278 spinlock_unlock(&area->lock);
279 spinlock_unlock(&as->lock);
280 interrupts_restore(ipl);
281 return (__address) -1;
282 }
283 }
284
285 area->pages = pages;
286
287 spinlock_unlock(&area->lock);
288 spinlock_unlock(&as->lock);
289 interrupts_restore(ipl);
290
291 return address;
292}
293
294/** Send address space area to another task.
295 *
296 * Address space area is sent to the specified task.
297 * If the destination task is willing to accept the
298 * area, a new area is created according to the
299 * source area. Moreover, any existing mapping
300 * is copied as well, providing thus a mechanism
301 * for sharing group of pages. The source address
302 * space area and any associated mapping is preserved.
303 *
[a9e8b39]304 * @param dst_id Task ID of the accepting task.
305 * @param src_base Base address of the source address space area.
[df0103f7]306 *
307 * @return 0 on success or ENOENT if there is no such task or
308 * if there is no such address space area,
309 * EPERM if there was a problem in accepting the area or
310 * ENOMEM if there was a problem in allocating destination
311 * address space area.
312 */
[a9e8b39]313int as_area_send(task_id_t dst_id, __address src_base)
[df0103f7]314{
315 ipl_t ipl;
316 task_t *t;
317 count_t i;
[a9e8b39]318 as_t *dst_as;
[df0103f7]319 __address dst_base;
[a9e8b39]320 int src_flags;
321 size_t src_size;
322 as_area_t *src_area, *dst_area;
[df0103f7]323
324 ipl = interrupts_disable();
325 spinlock_lock(&tasks_lock);
326
[a9e8b39]327 t = task_find_by_id(dst_id);
[df0103f7]328 if (!NULL) {
329 spinlock_unlock(&tasks_lock);
330 interrupts_restore(ipl);
331 return ENOENT;
332 }
333
334 spinlock_lock(&t->lock);
335 spinlock_unlock(&tasks_lock);
336
[a9e8b39]337 dst_as = t->as;
[df0103f7]338 dst_base = (__address) t->accept_arg.base;
339
[a9e8b39]340 if (dst_as == AS) {
[df0103f7]341 /*
342 * The two tasks share the entire address space.
343 * Return error since there is no point in continuing.
344 */
345 spinlock_unlock(&t->lock);
346 interrupts_restore(ipl);
347 return EPERM;
348 }
[6fa476f7]349
350 spinlock_lock(&AS->lock);
[a9e8b39]351 src_area = find_area_and_lock(AS, src_base);
352 if (!src_area) {
[6fa476f7]353 /*
354 * Could not find the source address space area.
355 */
356 spinlock_unlock(&t->lock);
357 spinlock_unlock(&AS->lock);
358 interrupts_restore(ipl);
359 return ENOENT;
360 }
[a9e8b39]361 src_size = src_area->pages * PAGE_SIZE;
362 src_flags = src_area->flags;
363 spinlock_unlock(&src_area->lock);
[6fa476f7]364 spinlock_unlock(&AS->lock);
[df0103f7]365
[a9e8b39]366 if ((t->accept_arg.task_id != TASK->taskid) || (t->accept_arg.size != src_size) ||
367 (t->accept_arg.flags != src_flags)) {
[df0103f7]368 /*
369 * Discrepancy in either task ID, size or flags.
370 */
371 spinlock_unlock(&t->lock);
372 interrupts_restore(ipl);
373 return EPERM;
374 }
375
376 /*
[a9e8b39]377 * Create copy of the source address space area.
378 * The destination area is created with AS_AREA_ATTR_PARTIAL
379 * attribute set which prevents race condition with
380 * preliminary as_page_fault() calls.
[df0103f7]381 */
[a9e8b39]382 dst_area = as_area_create(dst_as, src_flags, src_size, dst_base, AS_AREA_ATTR_PARTIAL);
383 if (!dst_area) {
[df0103f7]384 /*
385 * Destination address space area could not be created.
386 */
387 spinlock_unlock(&t->lock);
388 interrupts_restore(ipl);
389 return ENOMEM;
390 }
391
392 memsetb((__address) &t->accept_arg, sizeof(as_area_acptsnd_arg_t), 0);
393 spinlock_unlock(&t->lock);
394
395 /*
396 * Avoid deadlock by first locking the address space with lower address.
397 */
[a9e8b39]398 if (dst_as < AS) {
399 spinlock_lock(&dst_as->lock);
[df0103f7]400 spinlock_lock(&AS->lock);
401 } else {
402 spinlock_lock(&AS->lock);
[a9e8b39]403 spinlock_lock(&dst_as->lock);
[df0103f7]404 }
405
[a9e8b39]406 for (i = 0; i < SIZE2FRAMES(src_size); i++) {
[df0103f7]407 pte_t *pte;
408 __address frame;
409
410 page_table_lock(AS, false);
[a9e8b39]411 pte = page_mapping_find(AS, src_base + i*PAGE_SIZE);
[df0103f7]412 if (pte && PTE_VALID(pte)) {
413 ASSERT(PTE_PRESENT(pte));
414 frame = PTE_GET_FRAME(pte);
[a9e8b39]415 if (!(src_flags & AS_AREA_DEVICE))
[f3ac636]416 frame_reference_add(ADDR2PFN(frame));
[df0103f7]417 page_table_unlock(AS, false);
418 } else {
419 page_table_unlock(AS, false);
420 continue;
421 }
422
[a9e8b39]423 page_table_lock(dst_as, false);
424 page_mapping_insert(dst_as, dst_base + i*PAGE_SIZE, frame, area_flags_to_page_flags(src_flags));
425 page_table_unlock(dst_as, false);
[df0103f7]426 }
[a9e8b39]427
428 /*
429 * Now the destination address space area has been
430 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
431 * attribute.
432 */
433 spinlock_lock(&dst_area->lock);
434 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
435 spinlock_unlock(&dst_area->lock);
[df0103f7]436
437 spinlock_unlock(&AS->lock);
[a9e8b39]438 spinlock_unlock(&dst_as->lock);
[df0103f7]439 interrupts_restore(ipl);
440
441 return 0;
442}
443
[6a3c9a7]444/** Initialize mapping for one page of address space.
[20d50a1]445 *
[6a3c9a7]446 * This functions maps 'page' to 'frame' according
447 * to attributes of the address space area to
448 * wich 'page' belongs.
[20d50a1]449 *
[23230aa]450 * @param as Target address space.
[6a3c9a7]451 * @param page Virtual page within the area.
452 * @param frame Physical frame to which page will be mapped.
[20d50a1]453 */
[6a3c9a7]454void as_set_mapping(as_t *as, __address page, __address frame)
[20d50a1]455{
[d3e7ff4]456 as_area_t *area;
[20d50a1]457 ipl_t ipl;
458
459 ipl = interrupts_disable();
[2299914]460 page_table_lock(as, true);
[6a3c9a7]461
[d3e7ff4]462 area = find_area_and_lock(as, page);
[6a3c9a7]463 if (!area) {
464 panic("page not part of any as_area\n");
465 }
466
[ef67bab]467 page_mapping_insert(as, page, frame, get_area_flags(area));
[20d50a1]468
[6a3c9a7]469 spinlock_unlock(&area->lock);
[2299914]470 page_table_unlock(as, true);
[20d50a1]471 interrupts_restore(ipl);
472}
473
474/** Handle page fault within the current address space.
475 *
476 * This is the high-level page fault handler.
477 * Interrupts are assumed disabled.
478 *
479 * @param page Faulting page.
480 *
[f9425006]481 * @return 0 on page fault, 1 on success.
[20d50a1]482 */
483int as_page_fault(__address page)
484{
[2299914]485 pte_t *pte;
[d3e7ff4]486 as_area_t *area;
[20d50a1]487 __address frame;
488
489 ASSERT(AS);
[2299914]490
[20d50a1]491 spinlock_lock(&AS->lock);
[d3e7ff4]492 area = find_area_and_lock(AS, page);
[20d50a1]493 if (!area) {
494 /*
495 * No area contained mapping for 'page'.
496 * Signal page fault to low-level handler.
497 */
498 spinlock_unlock(&AS->lock);
499 return 0;
500 }
501
[a9e8b39]502 if (area->attributes & AS_AREA_ATTR_PARTIAL) {
503 /*
504 * The address space area is not fully initialized.
505 * Avoid possible race by returning error.
506 */
507 spinlock_unlock(&area->lock);
508 spinlock_unlock(&AS->lock);
509 return 0;
510 }
511
[1ace9ea]512 ASSERT(!(area->flags & AS_AREA_DEVICE));
513
[2299914]514 page_table_lock(AS, false);
515
516 /*
517 * To avoid race condition between two page faults
518 * on the same address, we need to make sure
519 * the mapping has not been already inserted.
520 */
521 if ((pte = page_mapping_find(AS, page))) {
522 if (PTE_PRESENT(pte)) {
523 page_table_unlock(AS, false);
524 spinlock_unlock(&area->lock);
525 spinlock_unlock(&AS->lock);
526 return 1;
527 }
528 }
529
[20d50a1]530 /*
[6a3c9a7]531 * In general, there can be several reasons that
532 * can have caused this fault.
533 *
534 * - non-existent mapping: the area is a scratch
535 * area (e.g. stack) and so far has not been
536 * allocated a frame for the faulting page
537 *
538 * - non-present mapping: another possibility,
539 * currently not implemented, would be frame
540 * reuse; when this becomes a possibility,
541 * do not forget to distinguish between
542 * the different causes
[20d50a1]543 */
[085d973]544 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
[6a3c9a7]545 memsetb(PA2KA(frame), FRAME_SIZE, 0);
[20d50a1]546
547 /*
548 * Map 'page' to 'frame'.
549 * Note that TLB shootdown is not attempted as only new information is being
550 * inserted into page tables.
551 */
[ef67bab]552 page_mapping_insert(AS, page, frame, get_area_flags(area));
[2299914]553 page_table_unlock(AS, false);
[20d50a1]554
555 spinlock_unlock(&area->lock);
556 spinlock_unlock(&AS->lock);
557 return 1;
558}
559
[7e4e532]560/** Switch address spaces.
[20d50a1]561 *
[7e4e532]562 * @param old Old address space or NULL.
563 * @param new New address space.
[20d50a1]564 */
[7e4e532]565void as_switch(as_t *old, as_t *new)
[20d50a1]566{
567 ipl_t ipl;
[7e4e532]568 bool needs_asid = false;
[4512d7e]569
[20d50a1]570 ipl = interrupts_disable();
[7e4e532]571 spinlock_lock(&as_lock);
572
573 /*
574 * First, take care of the old address space.
575 */
576 if (old) {
577 spinlock_lock(&old->lock);
578 ASSERT(old->refcount);
579 if((--old->refcount == 0) && (old != AS_KERNEL)) {
580 /*
581 * The old address space is no longer active on
582 * any processor. It can be appended to the
583 * list of inactive address spaces with assigned
584 * ASID.
585 */
586 ASSERT(old->asid != ASID_INVALID);
587 list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
588 }
589 spinlock_unlock(&old->lock);
590 }
591
592 /*
593 * Second, prepare the new address space.
594 */
595 spinlock_lock(&new->lock);
596 if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
597 if (new->asid != ASID_INVALID)
598 list_remove(&new->inactive_as_with_asid_link);
599 else
600 needs_asid = true; /* defer call to asid_get() until new->lock is released */
601 }
602 SET_PTL0_ADDRESS(new->page_table);
603 spinlock_unlock(&new->lock);
[20d50a1]604
[7e4e532]605 if (needs_asid) {
606 /*
607 * Allocation of new ASID was deferred
608 * until now in order to avoid deadlock.
609 */
610 asid_t asid;
611
612 asid = asid_get();
613 spinlock_lock(&new->lock);
614 new->asid = asid;
615 spinlock_unlock(&new->lock);
616 }
617 spinlock_unlock(&as_lock);
618 interrupts_restore(ipl);
619
[20d50a1]620 /*
621 * Perform architecture-specific steps.
[4512d7e]622 * (e.g. write ASID to hardware register etc.)
[20d50a1]623 */
[7e4e532]624 as_install_arch(new);
[20d50a1]625
[7e4e532]626 AS = new;
[20d50a1]627}
[6a3c9a7]628
[df0103f7]629/** Convert address space area flags to page flags.
[6a3c9a7]630 *
[df0103f7]631 * @param aflags Flags of some address space area.
[6a3c9a7]632 *
[df0103f7]633 * @return Flags to be passed to page_mapping_insert().
[6a3c9a7]634 */
[df0103f7]635int area_flags_to_page_flags(int aflags)
[6a3c9a7]636{
637 int flags;
638
[9a8d91b]639 flags = PAGE_USER | PAGE_PRESENT;
[c23502d]640
[df0103f7]641 if (aflags & AS_AREA_READ)
[c23502d]642 flags |= PAGE_READ;
643
[df0103f7]644 if (aflags & AS_AREA_WRITE)
[c23502d]645 flags |= PAGE_WRITE;
646
[df0103f7]647 if (aflags & AS_AREA_EXEC)
[c23502d]648 flags |= PAGE_EXEC;
[6a3c9a7]649
[df0103f7]650 if (!(aflags & AS_AREA_DEVICE))
[9a8d91b]651 flags |= PAGE_CACHEABLE;
652
[6a3c9a7]653 return flags;
654}
[ef67bab]655
[df0103f7]656/** Compute flags for virtual address translation subsytem.
657 *
658 * The address space area must be locked.
659 * Interrupts must be disabled.
660 *
661 * @param a Address space area.
662 *
663 * @return Flags to be used in page_mapping_insert().
664 */
665int get_area_flags(as_area_t *a)
666{
667 return area_flags_to_page_flags(a->flags);
668}
669
[ef67bab]670/** Create page table.
671 *
672 * Depending on architecture, create either address space
673 * private or global page table.
674 *
675 * @param flags Flags saying whether the page table is for kernel address space.
676 *
677 * @return First entry of the page table.
678 */
679pte_t *page_table_create(int flags)
680{
681 ASSERT(as_operations);
682 ASSERT(as_operations->page_table_create);
683
684 return as_operations->page_table_create(flags);
685}
[d3e7ff4]686
[2299914]687/** Lock page table.
688 *
689 * This function should be called before any page_mapping_insert(),
690 * page_mapping_remove() and page_mapping_find().
691 *
692 * Locking order is such that address space areas must be locked
693 * prior to this call. Address space can be locked prior to this
694 * call in which case the lock argument is false.
695 *
696 * @param as Address space.
[9179d0a]697 * @param lock If false, do not attempt to lock as->lock.
[2299914]698 */
699void page_table_lock(as_t *as, bool lock)
700{
701 ASSERT(as_operations);
702 ASSERT(as_operations->page_table_lock);
703
704 as_operations->page_table_lock(as, lock);
705}
706
707/** Unlock page table.
708 *
709 * @param as Address space.
[9179d0a]710 * @param unlock If false, do not attempt to unlock as->lock.
[2299914]711 */
712void page_table_unlock(as_t *as, bool unlock)
713{
714 ASSERT(as_operations);
715 ASSERT(as_operations->page_table_unlock);
716
717 as_operations->page_table_unlock(as, unlock);
718}
719
[d3e7ff4]720
721/** Find address space area and lock it.
722 *
723 * The address space must be locked and interrupts must be disabled.
724 *
725 * @param as Address space.
726 * @param va Virtual address.
727 *
728 * @return Locked address space area containing va on success or NULL on failure.
729 */
730as_area_t *find_area_and_lock(as_t *as, __address va)
731{
732 as_area_t *a;
[252127e]733 btree_node_t *leaf, *lnode;
734 int i;
735
736 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
737 if (a) {
738 /* va is the base address of an address space area */
739 spinlock_lock(&a->lock);
740 return a;
741 }
[d3e7ff4]742
[252127e]743 /*
[c47912f]744 * Search the leaf node and the righmost record of its left neighbour
[252127e]745 * to find out whether this is a miss or va belongs to an address
746 * space area found there.
747 */
748
749 /* First, search the leaf node itself. */
750 for (i = 0; i < leaf->keys; i++) {
751 a = (as_area_t *) leaf->value[i];
[d3e7ff4]752 spinlock_lock(&a->lock);
[252127e]753 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
754 return a;
755 }
756 spinlock_unlock(&a->lock);
757 }
[d3e7ff4]758
[252127e]759 /*
[c47912f]760 * Second, locate the left neighbour and test its last record.
[b26db0c]761 * Because of its position in the B+tree, it must have base < va.
[252127e]762 */
[c47912f]763 if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
[252127e]764 a = (as_area_t *) lnode->value[lnode->keys - 1];
765 spinlock_lock(&a->lock);
766 if (va < a->base + a->pages * PAGE_SIZE) {
[37e7d2b9]767 return a;
[252127e]768 }
[d3e7ff4]769 spinlock_unlock(&a->lock);
770 }
771
772 return NULL;
773}
[37e7d2b9]774
775/** Check area conflicts with other areas.
776 *
777 * The address space must be locked and interrupts must be disabled.
778 *
779 * @param as Address space.
780 * @param va Starting virtual address of the area being tested.
781 * @param size Size of the area being tested.
782 * @param avoid_area Do not touch this area.
783 *
784 * @return True if there is no conflict, false otherwise.
785 */
786bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
787{
788 as_area_t *a;
[252127e]789 btree_node_t *leaf, *node;
790 int i;
[37e7d2b9]791
[5a7d9d1]792 /*
793 * We don't want any area to have conflicts with NULL page.
794 */
795 if (overlaps(va, size, NULL, PAGE_SIZE))
796 return false;
797
[252127e]798 /*
799 * The leaf node is found in O(log n), where n is proportional to
800 * the number of address space areas belonging to as.
801 * The check for conflicts is then attempted on the rightmost
[c47912f]802 * record in the left neighbour, the leftmost record in the right
803 * neighbour and all records in the leaf node itself.
[252127e]804 */
805
806 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
807 if (a != avoid_area)
808 return false;
809 }
810
811 /* First, check the two border cases. */
[c47912f]812 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
[252127e]813 a = (as_area_t *) node->value[node->keys - 1];
814 spinlock_lock(&a->lock);
815 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
816 spinlock_unlock(&a->lock);
817 return false;
818 }
819 spinlock_unlock(&a->lock);
820 }
[c47912f]821 if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
[252127e]822 a = (as_area_t *) node->value[0];
823 spinlock_lock(&a->lock);
824 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
825 spinlock_unlock(&a->lock);
826 return false;
827 }
828 spinlock_unlock(&a->lock);
829 }
830
831 /* Second, check the leaf node. */
832 for (i = 0; i < leaf->keys; i++) {
833 a = (as_area_t *) leaf->value[i];
[37e7d2b9]834
835 if (a == avoid_area)
836 continue;
[252127e]837
[37e7d2b9]838 spinlock_lock(&a->lock);
[252127e]839 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
840 spinlock_unlock(&a->lock);
841 return false;
842 }
[37e7d2b9]843 spinlock_unlock(&a->lock);
[5a7d9d1]844 }
[37e7d2b9]845
[5a7d9d1]846 /*
847 * So far, the area does not conflict with other areas.
848 * Check if it doesn't conflict with kernel address space.
849 */
850 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
851 return !overlaps(va, size,
852 KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
[37e7d2b9]853 }
854
855 return true;
856}
[df0103f7]857
858/*
859 * Address space related syscalls.
860 */
861
862/** Wrapper for as_area_create(). */
863__native sys_as_area_create(__address address, size_t size, int flags)
864{
[a9e8b39]865 if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE))
[df0103f7]866 return (__native) address;
867 else
868 return (__native) -1;
869}
870
871/** Wrapper for as_area_resize. */
872__native sys_as_area_resize(__address address, size_t size, int flags)
873{
874 return as_area_resize(AS, address, size, 0);
875}
876
877/** Prepare task for accepting address space area from another task.
878 *
879 * @param uspace_accept_arg Accept structure passed from userspace.
880 *
881 * @return EPERM if the task ID encapsulated in @uspace_accept_arg references
882 * TASK. Otherwise zero is returned.
883 */
884__native sys_as_area_accept(as_area_acptsnd_arg_t *uspace_accept_arg)
885{
886 as_area_acptsnd_arg_t arg;
887
888 copy_from_uspace(&arg, uspace_accept_arg, sizeof(as_area_acptsnd_arg_t));
889
890 if (!arg.size)
891 return (__native) EPERM;
892
893 if (arg.task_id == TASK->taskid) {
894 /*
895 * Accepting from itself is not allowed.
896 */
897 return (__native) EPERM;
898 }
899
900 memcpy(&TASK->accept_arg, &arg, sizeof(as_area_acptsnd_arg_t));
901
902 return 0;
903}
904
905/** Wrapper for as_area_send. */
906__native sys_as_area_send(as_area_acptsnd_arg_t *uspace_send_arg)
907{
908 as_area_acptsnd_arg_t arg;
909
910 copy_from_uspace(&arg, uspace_send_arg, sizeof(as_area_acptsnd_arg_t));
911
912 if (!arg.size)
913 return (__native) EPERM;
914
915 if (arg.task_id == TASK->taskid) {
916 /*
917 * Sending to itself is not allowed.
918 */
919 return (__native) EPERM;
920 }
921
[6fa476f7]922 return (__native) as_area_send(arg.task_id, (__address) arg.base);
[df0103f7]923}
Note: See TracBrowser for help on using the repository browser.