source: mainline/generic/src/mm/as.c@ 040e4e9

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 040e4e9 was 9179d0a, checked in by Jakub Jermar <jakub@…>, 19 years ago

Add some @file doxygen comments and improve already existing comments.

  • Property mode set to 100644
File size: 22.8 KB
Line 
1/*
2 * Copyright (C) 2001-2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/**
30 * @file as.c
31 * @brief Address space related functions.
32 *
33 * This file contains address space manipulation functions.
34 * Roughly speaking, this is a higher-level client of
35 * Virtual Address Translation (VAT) subsystem.
36 *
37 * Functionality provided by this file allows one to
38 * create address space and create, resize and share
39 * address space areas.
40 *
41 * @see page.c
42 *
43 */
44
45#include <mm/as.h>
46#include <arch/mm/as.h>
47#include <mm/page.h>
48#include <mm/frame.h>
49#include <mm/slab.h>
50#include <mm/tlb.h>
51#include <arch/mm/page.h>
52#include <genarch/mm/page_pt.h>
53#include <genarch/mm/page_ht.h>
54#include <mm/asid.h>
55#include <arch/mm/asid.h>
56#include <synch/spinlock.h>
57#include <adt/list.h>
58#include <adt/btree.h>
59#include <proc/task.h>
60#include <arch/asm.h>
61#include <panic.h>
62#include <debug.h>
63#include <print.h>
64#include <memstr.h>
65#include <macros.h>
66#include <arch.h>
67#include <errno.h>
68#include <config.h>
69#include <arch/types.h>
70#include <typedefs.h>
71
72as_operations_t *as_operations = NULL;
73
74/** Address space lock. It protects inactive_as_with_asid_head. */
75SPINLOCK_INITIALIZE(as_lock);
76
77/**
78 * This list contains address spaces that are not active on any
79 * processor and that have valid ASID.
80 */
81LIST_INITIALIZE(inactive_as_with_asid_head);
82
83/** Kernel address space. */
84as_t *AS_KERNEL = NULL;
85
86static int area_flags_to_page_flags(int aflags);
87static int get_area_flags(as_area_t *a);
88static as_area_t *find_area_and_lock(as_t *as, __address va);
89static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
90
91/** Initialize address space subsystem. */
92void as_init(void)
93{
94 as_arch_init();
95 AS_KERNEL = as_create(FLAG_AS_KERNEL);
96 if (!AS_KERNEL)
97 panic("can't create kernel address space\n");
98}
99
100/** Create address space.
101 *
102 * @param flags Flags that influence way in wich the address space is created.
103 */
104as_t *as_create(int flags)
105{
106 as_t *as;
107
108 as = (as_t *) malloc(sizeof(as_t), 0);
109 link_initialize(&as->inactive_as_with_asid_link);
110 spinlock_initialize(&as->lock, "as_lock");
111 btree_create(&as->as_area_btree);
112
113 if (flags & FLAG_AS_KERNEL)
114 as->asid = ASID_KERNEL;
115 else
116 as->asid = ASID_INVALID;
117
118 as->refcount = 0;
119 as->page_table = page_table_create(flags);
120
121 return as;
122}
123
124/** Free Adress space */
125void as_free(as_t *as)
126{
127 ASSERT(as->refcount == 0);
128
129 /* TODO: free as_areas and other resources held by as */
130 /* TODO: free page table */
131 free(as);
132}
133
134/** Create address space area of common attributes.
135 *
136 * The created address space area is added to the target address space.
137 *
138 * @param as Target address space.
139 * @param flags Flags of the area memory.
140 * @param size Size of area.
141 * @param base Base address of area.
142 * @param attrs Attributes of the area.
143 *
144 * @return Address space area on success or NULL on failure.
145 */
146as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs)
147{
148 ipl_t ipl;
149 as_area_t *a;
150
151 if (base % PAGE_SIZE)
152 return NULL;
153
154 if (!size)
155 return NULL;
156
157 /* Writeable executable areas are not supported. */
158 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
159 return NULL;
160
161 ipl = interrupts_disable();
162 spinlock_lock(&as->lock);
163
164 if (!check_area_conflicts(as, base, size, NULL)) {
165 spinlock_unlock(&as->lock);
166 interrupts_restore(ipl);
167 return NULL;
168 }
169
170 a = (as_area_t *) malloc(sizeof(as_area_t), 0);
171
172 spinlock_initialize(&a->lock, "as_area_lock");
173
174 a->flags = flags;
175 a->attributes = attrs;
176 a->pages = SIZE2FRAMES(size);
177 a->base = base;
178
179 btree_insert(&as->as_area_btree, base, (void *) a, NULL);
180
181 spinlock_unlock(&as->lock);
182 interrupts_restore(ipl);
183
184 return a;
185}
186
187/** Find address space area and change it.
188 *
189 * @param as Address space.
190 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
191 * @param size New size of the virtual memory block starting at address.
192 * @param flags Flags influencing the remap operation. Currently unused.
193 *
194 * @return address on success, (__address) -1 otherwise.
195 */
196__address as_area_resize(as_t *as, __address address, size_t size, int flags)
197{
198 as_area_t *area = NULL;
199 ipl_t ipl;
200 size_t pages;
201
202 ipl = interrupts_disable();
203 spinlock_lock(&as->lock);
204
205 /*
206 * Locate the area.
207 */
208 area = find_area_and_lock(as, address);
209 if (!area) {
210 spinlock_unlock(&as->lock);
211 interrupts_restore(ipl);
212 return (__address) -1;
213 }
214
215 if (area->flags & AS_AREA_DEVICE) {
216 /*
217 * Remapping of address space areas associated
218 * with memory mapped devices is not supported.
219 */
220 spinlock_unlock(&area->lock);
221 spinlock_unlock(&as->lock);
222 interrupts_restore(ipl);
223 return (__address) -1;
224 }
225
226 pages = SIZE2FRAMES((address - area->base) + size);
227 if (!pages) {
228 /*
229 * Zero size address space areas are not allowed.
230 */
231 spinlock_unlock(&area->lock);
232 spinlock_unlock(&as->lock);
233 interrupts_restore(ipl);
234 return (__address) -1;
235 }
236
237 if (pages < area->pages) {
238 int i;
239
240 /*
241 * Shrinking the area.
242 * No need to check for overlaps.
243 */
244 for (i = pages; i < area->pages; i++) {
245 pte_t *pte;
246
247 /*
248 * Releasing physical memory.
249 * This depends on the fact that the memory was allocated using frame_alloc().
250 */
251 page_table_lock(as, false);
252 pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
253 if (pte && PTE_VALID(pte)) {
254 __address frame;
255
256 ASSERT(PTE_PRESENT(pte));
257 frame = PTE_GET_FRAME(pte);
258 page_mapping_remove(as, area->base + i*PAGE_SIZE);
259 page_table_unlock(as, false);
260
261 frame_free(ADDR2PFN(frame));
262 } else {
263 page_table_unlock(as, false);
264 }
265 }
266 /*
267 * Invalidate TLB's.
268 */
269 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
270 tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
271 tlb_shootdown_finalize();
272 } else {
273 /*
274 * Growing the area.
275 * Check for overlaps with other address space areas.
276 */
277 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
278 spinlock_unlock(&area->lock);
279 spinlock_unlock(&as->lock);
280 interrupts_restore(ipl);
281 return (__address) -1;
282 }
283 }
284
285 area->pages = pages;
286
287 spinlock_unlock(&area->lock);
288 spinlock_unlock(&as->lock);
289 interrupts_restore(ipl);
290
291 return address;
292}
293
294/** Send address space area to another task.
295 *
296 * Address space area is sent to the specified task.
297 * If the destination task is willing to accept the
298 * area, a new area is created according to the
299 * source area. Moreover, any existing mapping
300 * is copied as well, providing thus a mechanism
301 * for sharing group of pages. The source address
302 * space area and any associated mapping is preserved.
303 *
304 * @param dst_id Task ID of the accepting task.
305 * @param src_base Base address of the source address space area.
306 *
307 * @return 0 on success or ENOENT if there is no such task or
308 * if there is no such address space area,
309 * EPERM if there was a problem in accepting the area or
310 * ENOMEM if there was a problem in allocating destination
311 * address space area.
312 */
313int as_area_send(task_id_t dst_id, __address src_base)
314{
315 ipl_t ipl;
316 task_t *t;
317 count_t i;
318 as_t *dst_as;
319 __address dst_base;
320 int src_flags;
321 size_t src_size;
322 as_area_t *src_area, *dst_area;
323
324 ipl = interrupts_disable();
325 spinlock_lock(&tasks_lock);
326
327 t = task_find_by_id(dst_id);
328 if (!NULL) {
329 spinlock_unlock(&tasks_lock);
330 interrupts_restore(ipl);
331 return ENOENT;
332 }
333
334 spinlock_lock(&t->lock);
335 spinlock_unlock(&tasks_lock);
336
337 dst_as = t->as;
338 dst_base = (__address) t->accept_arg.base;
339
340 if (dst_as == AS) {
341 /*
342 * The two tasks share the entire address space.
343 * Return error since there is no point in continuing.
344 */
345 spinlock_unlock(&t->lock);
346 interrupts_restore(ipl);
347 return EPERM;
348 }
349
350 spinlock_lock(&AS->lock);
351 src_area = find_area_and_lock(AS, src_base);
352 if (!src_area) {
353 /*
354 * Could not find the source address space area.
355 */
356 spinlock_unlock(&t->lock);
357 spinlock_unlock(&AS->lock);
358 interrupts_restore(ipl);
359 return ENOENT;
360 }
361 src_size = src_area->pages * PAGE_SIZE;
362 src_flags = src_area->flags;
363 spinlock_unlock(&src_area->lock);
364 spinlock_unlock(&AS->lock);
365
366 if ((t->accept_arg.task_id != TASK->taskid) || (t->accept_arg.size != src_size) ||
367 (t->accept_arg.flags != src_flags)) {
368 /*
369 * Discrepancy in either task ID, size or flags.
370 */
371 spinlock_unlock(&t->lock);
372 interrupts_restore(ipl);
373 return EPERM;
374 }
375
376 /*
377 * Create copy of the source address space area.
378 * The destination area is created with AS_AREA_ATTR_PARTIAL
379 * attribute set which prevents race condition with
380 * preliminary as_page_fault() calls.
381 */
382 dst_area = as_area_create(dst_as, src_flags, src_size, dst_base, AS_AREA_ATTR_PARTIAL);
383 if (!dst_area) {
384 /*
385 * Destination address space area could not be created.
386 */
387 spinlock_unlock(&t->lock);
388 interrupts_restore(ipl);
389 return ENOMEM;
390 }
391
392 memsetb((__address) &t->accept_arg, sizeof(as_area_acptsnd_arg_t), 0);
393 spinlock_unlock(&t->lock);
394
395 /*
396 * Avoid deadlock by first locking the address space with lower address.
397 */
398 if (dst_as < AS) {
399 spinlock_lock(&dst_as->lock);
400 spinlock_lock(&AS->lock);
401 } else {
402 spinlock_lock(&AS->lock);
403 spinlock_lock(&dst_as->lock);
404 }
405
406 for (i = 0; i < SIZE2FRAMES(src_size); i++) {
407 pte_t *pte;
408 __address frame;
409
410 page_table_lock(AS, false);
411 pte = page_mapping_find(AS, src_base + i*PAGE_SIZE);
412 if (pte && PTE_VALID(pte)) {
413 ASSERT(PTE_PRESENT(pte));
414 frame = PTE_GET_FRAME(pte);
415 if (!(src_flags & AS_AREA_DEVICE))
416 frame_reference_add(ADDR2PFN(frame));
417 page_table_unlock(AS, false);
418 } else {
419 page_table_unlock(AS, false);
420 continue;
421 }
422
423 page_table_lock(dst_as, false);
424 page_mapping_insert(dst_as, dst_base + i*PAGE_SIZE, frame, area_flags_to_page_flags(src_flags));
425 page_table_unlock(dst_as, false);
426 }
427
428 /*
429 * Now the destination address space area has been
430 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
431 * attribute.
432 */
433 spinlock_lock(&dst_area->lock);
434 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
435 spinlock_unlock(&dst_area->lock);
436
437 spinlock_unlock(&AS->lock);
438 spinlock_unlock(&dst_as->lock);
439 interrupts_restore(ipl);
440
441 return 0;
442}
443
444/** Initialize mapping for one page of address space.
445 *
446 * This functions maps 'page' to 'frame' according
447 * to attributes of the address space area to
448 * wich 'page' belongs.
449 *
450 * @param as Target address space.
451 * @param page Virtual page within the area.
452 * @param frame Physical frame to which page will be mapped.
453 */
454void as_set_mapping(as_t *as, __address page, __address frame)
455{
456 as_area_t *area;
457 ipl_t ipl;
458
459 ipl = interrupts_disable();
460 page_table_lock(as, true);
461
462 area = find_area_and_lock(as, page);
463 if (!area) {
464 panic("page not part of any as_area\n");
465 }
466
467 page_mapping_insert(as, page, frame, get_area_flags(area));
468
469 spinlock_unlock(&area->lock);
470 page_table_unlock(as, true);
471 interrupts_restore(ipl);
472}
473
474/** Handle page fault within the current address space.
475 *
476 * This is the high-level page fault handler.
477 * Interrupts are assumed disabled.
478 *
479 * @param page Faulting page.
480 *
481 * @return 0 on page fault, 1 on success.
482 */
483int as_page_fault(__address page)
484{
485 pte_t *pte;
486 as_area_t *area;
487 __address frame;
488
489 ASSERT(AS);
490
491 spinlock_lock(&AS->lock);
492 area = find_area_and_lock(AS, page);
493 if (!area) {
494 /*
495 * No area contained mapping for 'page'.
496 * Signal page fault to low-level handler.
497 */
498 spinlock_unlock(&AS->lock);
499 return 0;
500 }
501
502 if (area->attributes & AS_AREA_ATTR_PARTIAL) {
503 /*
504 * The address space area is not fully initialized.
505 * Avoid possible race by returning error.
506 */
507 spinlock_unlock(&area->lock);
508 spinlock_unlock(&AS->lock);
509 return 0;
510 }
511
512 ASSERT(!(area->flags & AS_AREA_DEVICE));
513
514 page_table_lock(AS, false);
515
516 /*
517 * To avoid race condition between two page faults
518 * on the same address, we need to make sure
519 * the mapping has not been already inserted.
520 */
521 if ((pte = page_mapping_find(AS, page))) {
522 if (PTE_PRESENT(pte)) {
523 page_table_unlock(AS, false);
524 spinlock_unlock(&area->lock);
525 spinlock_unlock(&AS->lock);
526 return 1;
527 }
528 }
529
530 /*
531 * In general, there can be several reasons that
532 * can have caused this fault.
533 *
534 * - non-existent mapping: the area is a scratch
535 * area (e.g. stack) and so far has not been
536 * allocated a frame for the faulting page
537 *
538 * - non-present mapping: another possibility,
539 * currently not implemented, would be frame
540 * reuse; when this becomes a possibility,
541 * do not forget to distinguish between
542 * the different causes
543 */
544 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
545 memsetb(PA2KA(frame), FRAME_SIZE, 0);
546
547 /*
548 * Map 'page' to 'frame'.
549 * Note that TLB shootdown is not attempted as only new information is being
550 * inserted into page tables.
551 */
552 page_mapping_insert(AS, page, frame, get_area_flags(area));
553 page_table_unlock(AS, false);
554
555 spinlock_unlock(&area->lock);
556 spinlock_unlock(&AS->lock);
557 return 1;
558}
559
560/** Switch address spaces.
561 *
562 * @param old Old address space or NULL.
563 * @param new New address space.
564 */
565void as_switch(as_t *old, as_t *new)
566{
567 ipl_t ipl;
568 bool needs_asid = false;
569
570 ipl = interrupts_disable();
571 spinlock_lock(&as_lock);
572
573 /*
574 * First, take care of the old address space.
575 */
576 if (old) {
577 spinlock_lock(&old->lock);
578 ASSERT(old->refcount);
579 if((--old->refcount == 0) && (old != AS_KERNEL)) {
580 /*
581 * The old address space is no longer active on
582 * any processor. It can be appended to the
583 * list of inactive address spaces with assigned
584 * ASID.
585 */
586 ASSERT(old->asid != ASID_INVALID);
587 list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
588 }
589 spinlock_unlock(&old->lock);
590 }
591
592 /*
593 * Second, prepare the new address space.
594 */
595 spinlock_lock(&new->lock);
596 if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
597 if (new->asid != ASID_INVALID)
598 list_remove(&new->inactive_as_with_asid_link);
599 else
600 needs_asid = true; /* defer call to asid_get() until new->lock is released */
601 }
602 SET_PTL0_ADDRESS(new->page_table);
603 spinlock_unlock(&new->lock);
604
605 if (needs_asid) {
606 /*
607 * Allocation of new ASID was deferred
608 * until now in order to avoid deadlock.
609 */
610 asid_t asid;
611
612 asid = asid_get();
613 spinlock_lock(&new->lock);
614 new->asid = asid;
615 spinlock_unlock(&new->lock);
616 }
617 spinlock_unlock(&as_lock);
618 interrupts_restore(ipl);
619
620 /*
621 * Perform architecture-specific steps.
622 * (e.g. write ASID to hardware register etc.)
623 */
624 as_install_arch(new);
625
626 AS = new;
627}
628
629/** Convert address space area flags to page flags.
630 *
631 * @param aflags Flags of some address space area.
632 *
633 * @return Flags to be passed to page_mapping_insert().
634 */
635int area_flags_to_page_flags(int aflags)
636{
637 int flags;
638
639 flags = PAGE_USER | PAGE_PRESENT;
640
641 if (aflags & AS_AREA_READ)
642 flags |= PAGE_READ;
643
644 if (aflags & AS_AREA_WRITE)
645 flags |= PAGE_WRITE;
646
647 if (aflags & AS_AREA_EXEC)
648 flags |= PAGE_EXEC;
649
650 if (!(aflags & AS_AREA_DEVICE))
651 flags |= PAGE_CACHEABLE;
652
653 return flags;
654}
655
656/** Compute flags for virtual address translation subsytem.
657 *
658 * The address space area must be locked.
659 * Interrupts must be disabled.
660 *
661 * @param a Address space area.
662 *
663 * @return Flags to be used in page_mapping_insert().
664 */
665int get_area_flags(as_area_t *a)
666{
667 return area_flags_to_page_flags(a->flags);
668}
669
670/** Create page table.
671 *
672 * Depending on architecture, create either address space
673 * private or global page table.
674 *
675 * @param flags Flags saying whether the page table is for kernel address space.
676 *
677 * @return First entry of the page table.
678 */
679pte_t *page_table_create(int flags)
680{
681 ASSERT(as_operations);
682 ASSERT(as_operations->page_table_create);
683
684 return as_operations->page_table_create(flags);
685}
686
687/** Lock page table.
688 *
689 * This function should be called before any page_mapping_insert(),
690 * page_mapping_remove() and page_mapping_find().
691 *
692 * Locking order is such that address space areas must be locked
693 * prior to this call. Address space can be locked prior to this
694 * call in which case the lock argument is false.
695 *
696 * @param as Address space.
697 * @param lock If false, do not attempt to lock as->lock.
698 */
699void page_table_lock(as_t *as, bool lock)
700{
701 ASSERT(as_operations);
702 ASSERT(as_operations->page_table_lock);
703
704 as_operations->page_table_lock(as, lock);
705}
706
707/** Unlock page table.
708 *
709 * @param as Address space.
710 * @param unlock If false, do not attempt to unlock as->lock.
711 */
712void page_table_unlock(as_t *as, bool unlock)
713{
714 ASSERT(as_operations);
715 ASSERT(as_operations->page_table_unlock);
716
717 as_operations->page_table_unlock(as, unlock);
718}
719
720
721/** Find address space area and lock it.
722 *
723 * The address space must be locked and interrupts must be disabled.
724 *
725 * @param as Address space.
726 * @param va Virtual address.
727 *
728 * @return Locked address space area containing va on success or NULL on failure.
729 */
730as_area_t *find_area_and_lock(as_t *as, __address va)
731{
732 as_area_t *a;
733 btree_node_t *leaf, *lnode;
734 int i;
735
736 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
737 if (a) {
738 /* va is the base address of an address space area */
739 spinlock_lock(&a->lock);
740 return a;
741 }
742
743 /*
744 * Search the leaf node and the righmost record of its left neighbour
745 * to find out whether this is a miss or va belongs to an address
746 * space area found there.
747 */
748
749 /* First, search the leaf node itself. */
750 for (i = 0; i < leaf->keys; i++) {
751 a = (as_area_t *) leaf->value[i];
752 spinlock_lock(&a->lock);
753 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
754 return a;
755 }
756 spinlock_unlock(&a->lock);
757 }
758
759 /*
760 * Second, locate the left neighbour and test its last record.
761 * Because of its position in the B+tree, it must have base < va.
762 */
763 if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
764 a = (as_area_t *) lnode->value[lnode->keys - 1];
765 spinlock_lock(&a->lock);
766 if (va < a->base + a->pages * PAGE_SIZE) {
767 return a;
768 }
769 spinlock_unlock(&a->lock);
770 }
771
772 return NULL;
773}
774
775/** Check area conflicts with other areas.
776 *
777 * The address space must be locked and interrupts must be disabled.
778 *
779 * @param as Address space.
780 * @param va Starting virtual address of the area being tested.
781 * @param size Size of the area being tested.
782 * @param avoid_area Do not touch this area.
783 *
784 * @return True if there is no conflict, false otherwise.
785 */
786bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
787{
788 as_area_t *a;
789 btree_node_t *leaf, *node;
790 int i;
791
792 /*
793 * We don't want any area to have conflicts with NULL page.
794 */
795 if (overlaps(va, size, NULL, PAGE_SIZE))
796 return false;
797
798 /*
799 * The leaf node is found in O(log n), where n is proportional to
800 * the number of address space areas belonging to as.
801 * The check for conflicts is then attempted on the rightmost
802 * record in the left neighbour, the leftmost record in the right
803 * neighbour and all records in the leaf node itself.
804 */
805
806 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
807 if (a != avoid_area)
808 return false;
809 }
810
811 /* First, check the two border cases. */
812 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
813 a = (as_area_t *) node->value[node->keys - 1];
814 spinlock_lock(&a->lock);
815 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
816 spinlock_unlock(&a->lock);
817 return false;
818 }
819 spinlock_unlock(&a->lock);
820 }
821 if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
822 a = (as_area_t *) node->value[0];
823 spinlock_lock(&a->lock);
824 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
825 spinlock_unlock(&a->lock);
826 return false;
827 }
828 spinlock_unlock(&a->lock);
829 }
830
831 /* Second, check the leaf node. */
832 for (i = 0; i < leaf->keys; i++) {
833 a = (as_area_t *) leaf->value[i];
834
835 if (a == avoid_area)
836 continue;
837
838 spinlock_lock(&a->lock);
839 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
840 spinlock_unlock(&a->lock);
841 return false;
842 }
843 spinlock_unlock(&a->lock);
844 }
845
846 /*
847 * So far, the area does not conflict with other areas.
848 * Check if it doesn't conflict with kernel address space.
849 */
850 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
851 return !overlaps(va, size,
852 KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
853 }
854
855 return true;
856}
857
858/*
859 * Address space related syscalls.
860 */
861
862/** Wrapper for as_area_create(). */
863__native sys_as_area_create(__address address, size_t size, int flags)
864{
865 if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE))
866 return (__native) address;
867 else
868 return (__native) -1;
869}
870
871/** Wrapper for as_area_resize. */
872__native sys_as_area_resize(__address address, size_t size, int flags)
873{
874 return as_area_resize(AS, address, size, 0);
875}
876
877/** Prepare task for accepting address space area from another task.
878 *
879 * @param uspace_accept_arg Accept structure passed from userspace.
880 *
881 * @return EPERM if the task ID encapsulated in @uspace_accept_arg references
882 * TASK. Otherwise zero is returned.
883 */
884__native sys_as_area_accept(as_area_acptsnd_arg_t *uspace_accept_arg)
885{
886 as_area_acptsnd_arg_t arg;
887
888 copy_from_uspace(&arg, uspace_accept_arg, sizeof(as_area_acptsnd_arg_t));
889
890 if (!arg.size)
891 return (__native) EPERM;
892
893 if (arg.task_id == TASK->taskid) {
894 /*
895 * Accepting from itself is not allowed.
896 */
897 return (__native) EPERM;
898 }
899
900 memcpy(&TASK->accept_arg, &arg, sizeof(as_area_acptsnd_arg_t));
901
902 return 0;
903}
904
905/** Wrapper for as_area_send. */
906__native sys_as_area_send(as_area_acptsnd_arg_t *uspace_send_arg)
907{
908 as_area_acptsnd_arg_t arg;
909
910 copy_from_uspace(&arg, uspace_send_arg, sizeof(as_area_acptsnd_arg_t));
911
912 if (!arg.size)
913 return (__native) EPERM;
914
915 if (arg.task_id == TASK->taskid) {
916 /*
917 * Sending to itself is not allowed.
918 */
919 return (__native) EPERM;
920 }
921
922 return (__native) as_area_send(arg.task_id, (__address) arg.base);
923}
Note: See TracBrowser for help on using the repository browser.