source: mainline/generic/src/mm/as.c@ 6fa476f7

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 6fa476f7 was 6fa476f7, checked in by Jakub Jermar <jakub@…>, 20 years ago

Rename SYS_AS_AREA_SHARE_APPROVE and SYS_AS_AREA_SHARE_PERFORM, resp., to
SYS_AS_AREA_ACCEPT and SYS_AS_AREA_SEND, resp. in syscall_t.

Fix prototype of as_area_send() to take only base address of the address
space area as a parameter and read size and flags from the address space
area found at this base address.

  • Property mode set to 100644
File size: 22.1 KB
Line 
1/*
2 * Copyright (C) 2001-2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * This file contains address space manipulation functions.
31 * Roughly speaking, this is a higher-level client of
32 * Virtual Address Translation (VAT) subsystem.
33 */
34
35#include <mm/as.h>
36#include <arch/mm/as.h>
37#include <mm/page.h>
38#include <mm/frame.h>
39#include <mm/slab.h>
40#include <mm/tlb.h>
41#include <arch/mm/page.h>
42#include <genarch/mm/page_pt.h>
43#include <genarch/mm/page_ht.h>
44#include <mm/asid.h>
45#include <arch/mm/asid.h>
46#include <synch/spinlock.h>
47#include <adt/list.h>
48#include <adt/btree.h>
49#include <proc/task.h>
50#include <arch/asm.h>
51#include <panic.h>
52#include <debug.h>
53#include <print.h>
54#include <memstr.h>
55#include <macros.h>
56#include <arch.h>
57#include <errno.h>
58#include <config.h>
59#include <arch/types.h>
60#include <typedefs.h>
61
62as_operations_t *as_operations = NULL;
63
64/** Address space lock. It protects inactive_as_with_asid_head. */
65SPINLOCK_INITIALIZE(as_lock);
66
67/**
68 * This list contains address spaces that are not active on any
69 * processor and that have valid ASID.
70 */
71LIST_INITIALIZE(inactive_as_with_asid_head);
72
73/** Kernel address space. */
74as_t *AS_KERNEL = NULL;
75
76static int area_flags_to_page_flags(int aflags);
77static int get_area_flags(as_area_t *a);
78static as_area_t *find_area_and_lock(as_t *as, __address va);
79static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
80
81/** Initialize address space subsystem. */
82void as_init(void)
83{
84 as_arch_init();
85 AS_KERNEL = as_create(FLAG_AS_KERNEL);
86 if (!AS_KERNEL)
87 panic("can't create kernel address space\n");
88}
89
90/** Create address space.
91 *
92 * @param flags Flags that influence way in wich the address space is created.
93 */
94as_t *as_create(int flags)
95{
96 as_t *as;
97
98 as = (as_t *) malloc(sizeof(as_t), 0);
99 link_initialize(&as->inactive_as_with_asid_link);
100 spinlock_initialize(&as->lock, "as_lock");
101 btree_create(&as->as_area_btree);
102
103 if (flags & FLAG_AS_KERNEL)
104 as->asid = ASID_KERNEL;
105 else
106 as->asid = ASID_INVALID;
107
108 as->refcount = 0;
109 as->page_table = page_table_create(flags);
110
111 return as;
112}
113
114/** Free Adress space */
115void as_free(as_t *as)
116{
117 ASSERT(as->refcount == 0);
118
119 /* TODO: free as_areas and other resources held by as */
120 /* TODO: free page table */
121 free(as);
122}
123
124/** Create address space area of common attributes.
125 *
126 * The created address space area is added to the target address space.
127 *
128 * @param as Target address space.
129 * @param flags Flags of the area.
130 * @param size Size of area.
131 * @param base Base address of area.
132 *
133 * @return Address space area on success or NULL on failure.
134 */
135as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base)
136{
137 ipl_t ipl;
138 as_area_t *a;
139
140 if (base % PAGE_SIZE)
141 return NULL;
142
143 if (!size)
144 return NULL;
145
146 /* Writeable executable areas are not supported. */
147 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
148 return NULL;
149
150 ipl = interrupts_disable();
151 spinlock_lock(&as->lock);
152
153 if (!check_area_conflicts(as, base, size, NULL)) {
154 spinlock_unlock(&as->lock);
155 interrupts_restore(ipl);
156 return NULL;
157 }
158
159 a = (as_area_t *) malloc(sizeof(as_area_t), 0);
160
161 spinlock_initialize(&a->lock, "as_area_lock");
162
163 a->flags = flags;
164 a->pages = SIZE2FRAMES(size);
165 a->base = base;
166
167 btree_insert(&as->as_area_btree, base, (void *) a, NULL);
168
169 spinlock_unlock(&as->lock);
170 interrupts_restore(ipl);
171
172 return a;
173}
174
175/** Find address space area and change it.
176 *
177 * @param as Address space.
178 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
179 * @param size New size of the virtual memory block starting at address.
180 * @param flags Flags influencing the remap operation. Currently unused.
181 *
182 * @return address on success, (__address) -1 otherwise.
183 */
184__address as_area_resize(as_t *as, __address address, size_t size, int flags)
185{
186 as_area_t *area = NULL;
187 ipl_t ipl;
188 size_t pages;
189
190 ipl = interrupts_disable();
191 spinlock_lock(&as->lock);
192
193 /*
194 * Locate the area.
195 */
196 area = find_area_and_lock(as, address);
197 if (!area) {
198 spinlock_unlock(&as->lock);
199 interrupts_restore(ipl);
200 return (__address) -1;
201 }
202
203 if (area->flags & AS_AREA_DEVICE) {
204 /*
205 * Remapping of address space areas associated
206 * with memory mapped devices is not supported.
207 */
208 spinlock_unlock(&area->lock);
209 spinlock_unlock(&as->lock);
210 interrupts_restore(ipl);
211 return (__address) -1;
212 }
213
214 pages = SIZE2FRAMES((address - area->base) + size);
215 if (!pages) {
216 /*
217 * Zero size address space areas are not allowed.
218 */
219 spinlock_unlock(&area->lock);
220 spinlock_unlock(&as->lock);
221 interrupts_restore(ipl);
222 return (__address) -1;
223 }
224
225 if (pages < area->pages) {
226 int i;
227
228 /*
229 * Shrinking the area.
230 * No need to check for overlaps.
231 */
232 for (i = pages; i < area->pages; i++) {
233 pte_t *pte;
234
235 /*
236 * Releasing physical memory.
237 * This depends on the fact that the memory was allocated using frame_alloc().
238 */
239 page_table_lock(as, false);
240 pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
241 if (pte && PTE_VALID(pte)) {
242 __address frame;
243
244 ASSERT(PTE_PRESENT(pte));
245 frame = PTE_GET_FRAME(pte);
246 page_mapping_remove(as, area->base + i*PAGE_SIZE);
247 page_table_unlock(as, false);
248
249 frame_free(ADDR2PFN(frame));
250 } else {
251 page_table_unlock(as, false);
252 }
253 }
254 /*
255 * Invalidate TLB's.
256 */
257 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
258 tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
259 tlb_shootdown_finalize();
260 } else {
261 /*
262 * Growing the area.
263 * Check for overlaps with other address space areas.
264 */
265 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
266 spinlock_unlock(&area->lock);
267 spinlock_unlock(&as->lock);
268 interrupts_restore(ipl);
269 return (__address) -1;
270 }
271 }
272
273 area->pages = pages;
274
275 spinlock_unlock(&area->lock);
276 spinlock_unlock(&as->lock);
277 interrupts_restore(ipl);
278
279 return address;
280}
281
282/** Send address space area to another task.
283 *
284 * Address space area is sent to the specified task.
285 * If the destination task is willing to accept the
286 * area, a new area is created according to the
287 * source area. Moreover, any existing mapping
288 * is copied as well, providing thus a mechanism
289 * for sharing group of pages. The source address
290 * space area and any associated mapping is preserved.
291 *
292 * @param id Task ID of the accepting task.
293 * @param base Base address of the source address space area.
294 *
295 * @return 0 on success or ENOENT if there is no such task or
296 * if there is no such address space area,
297 * EPERM if there was a problem in accepting the area or
298 * ENOMEM if there was a problem in allocating destination
299 * address space area.
300 */
301int as_area_send(task_id_t id, __address base)
302{
303 ipl_t ipl;
304 task_t *t;
305 count_t i;
306 as_t *as;
307 __address dst_base;
308 int flags;
309 size_t size;
310 as_area_t *area;
311
312 ipl = interrupts_disable();
313 spinlock_lock(&tasks_lock);
314
315 t = task_find_by_id(id);
316 if (!NULL) {
317 spinlock_unlock(&tasks_lock);
318 interrupts_restore(ipl);
319 return ENOENT;
320 }
321
322 spinlock_lock(&t->lock);
323 spinlock_unlock(&tasks_lock);
324
325 as = t->as;
326 dst_base = (__address) t->accept_arg.base;
327
328 if (as == AS) {
329 /*
330 * The two tasks share the entire address space.
331 * Return error since there is no point in continuing.
332 */
333 spinlock_unlock(&t->lock);
334 interrupts_restore(ipl);
335 return EPERM;
336 }
337
338 spinlock_lock(&AS->lock);
339 area = find_area_and_lock(AS, base);
340 if (!area) {
341 /*
342 * Could not find the source address space area.
343 */
344 spinlock_unlock(&t->lock);
345 spinlock_unlock(&AS->lock);
346 interrupts_restore(ipl);
347 return ENOENT;
348 }
349 size = area->pages * PAGE_SIZE;
350 flags = area->flags;
351 spinlock_unlock(&area->lock);
352 spinlock_unlock(&AS->lock);
353
354 if ((t->accept_arg.task_id != TASK->taskid) || (t->accept_arg.size != size) ||
355 (t->accept_arg.flags != flags)) {
356 /*
357 * Discrepancy in either task ID, size or flags.
358 */
359 spinlock_unlock(&t->lock);
360 interrupts_restore(ipl);
361 return EPERM;
362 }
363
364 /*
365 * Create copy of the address space area.
366 */
367 if (!as_area_create(as, flags, size, dst_base)) {
368 /*
369 * Destination address space area could not be created.
370 */
371 spinlock_unlock(&t->lock);
372 interrupts_restore(ipl);
373 return ENOMEM;
374 }
375
376 /*
377 * NOTE: we have just introduced a race condition.
378 * The destination task can try to attempt the newly
379 * created area before its mapping is copied from
380 * the source address space area. In result, frames
381 * can get lost.
382 *
383 * Currently, this race is not solved, but one of the
384 * possible solutions would be to sleep in as_page_fault()
385 * when this situation is detected.
386 */
387
388 memsetb((__address) &t->accept_arg, sizeof(as_area_acptsnd_arg_t), 0);
389 spinlock_unlock(&t->lock);
390
391 /*
392 * Avoid deadlock by first locking the address space with lower address.
393 */
394 if (as < AS) {
395 spinlock_lock(&as->lock);
396 spinlock_lock(&AS->lock);
397 } else {
398 spinlock_lock(&AS->lock);
399 spinlock_lock(&as->lock);
400 }
401
402 for (i = 0; i < SIZE2FRAMES(size); i++) {
403 pte_t *pte;
404 __address frame;
405
406 page_table_lock(AS, false);
407 pte = page_mapping_find(AS, base + i*PAGE_SIZE);
408 if (pte && PTE_VALID(pte)) {
409 ASSERT(PTE_PRESENT(pte));
410 frame = PTE_GET_FRAME(pte);
411 if (!(flags & AS_AREA_DEVICE))
412 frame_reference_add(ADDR2PFN(frame));
413 page_table_unlock(AS, false);
414 } else {
415 page_table_unlock(AS, false);
416 continue;
417 }
418
419 page_table_lock(as, false);
420 page_mapping_insert(as, dst_base + i*PAGE_SIZE, frame, area_flags_to_page_flags(flags));
421 page_table_unlock(as, false);
422 }
423
424 spinlock_unlock(&AS->lock);
425 spinlock_unlock(&as->lock);
426 interrupts_restore(ipl);
427
428 return 0;
429}
430
431/** Initialize mapping for one page of address space.
432 *
433 * This functions maps 'page' to 'frame' according
434 * to attributes of the address space area to
435 * wich 'page' belongs.
436 *
437 * @param as Target address space.
438 * @param page Virtual page within the area.
439 * @param frame Physical frame to which page will be mapped.
440 */
441void as_set_mapping(as_t *as, __address page, __address frame)
442{
443 as_area_t *area;
444 ipl_t ipl;
445
446 ipl = interrupts_disable();
447 page_table_lock(as, true);
448
449 area = find_area_and_lock(as, page);
450 if (!area) {
451 panic("page not part of any as_area\n");
452 }
453
454 page_mapping_insert(as, page, frame, get_area_flags(area));
455
456 spinlock_unlock(&area->lock);
457 page_table_unlock(as, true);
458 interrupts_restore(ipl);
459}
460
461/** Handle page fault within the current address space.
462 *
463 * This is the high-level page fault handler.
464 * Interrupts are assumed disabled.
465 *
466 * @param page Faulting page.
467 *
468 * @return 0 on page fault, 1 on success.
469 */
470int as_page_fault(__address page)
471{
472 pte_t *pte;
473 as_area_t *area;
474 __address frame;
475
476 ASSERT(AS);
477
478 spinlock_lock(&AS->lock);
479 area = find_area_and_lock(AS, page);
480 if (!area) {
481 /*
482 * No area contained mapping for 'page'.
483 * Signal page fault to low-level handler.
484 */
485 spinlock_unlock(&AS->lock);
486 return 0;
487 }
488
489 ASSERT(!(area->flags & AS_AREA_DEVICE));
490
491 page_table_lock(AS, false);
492
493 /*
494 * To avoid race condition between two page faults
495 * on the same address, we need to make sure
496 * the mapping has not been already inserted.
497 */
498 if ((pte = page_mapping_find(AS, page))) {
499 if (PTE_PRESENT(pte)) {
500 page_table_unlock(AS, false);
501 spinlock_unlock(&area->lock);
502 spinlock_unlock(&AS->lock);
503 return 1;
504 }
505 }
506
507 /*
508 * In general, there can be several reasons that
509 * can have caused this fault.
510 *
511 * - non-existent mapping: the area is a scratch
512 * area (e.g. stack) and so far has not been
513 * allocated a frame for the faulting page
514 *
515 * - non-present mapping: another possibility,
516 * currently not implemented, would be frame
517 * reuse; when this becomes a possibility,
518 * do not forget to distinguish between
519 * the different causes
520 */
521 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
522 memsetb(PA2KA(frame), FRAME_SIZE, 0);
523
524 /*
525 * Map 'page' to 'frame'.
526 * Note that TLB shootdown is not attempted as only new information is being
527 * inserted into page tables.
528 */
529 page_mapping_insert(AS, page, frame, get_area_flags(area));
530 page_table_unlock(AS, false);
531
532 spinlock_unlock(&area->lock);
533 spinlock_unlock(&AS->lock);
534 return 1;
535}
536
537/** Switch address spaces.
538 *
539 * @param old Old address space or NULL.
540 * @param new New address space.
541 */
542void as_switch(as_t *old, as_t *new)
543{
544 ipl_t ipl;
545 bool needs_asid = false;
546
547 ipl = interrupts_disable();
548 spinlock_lock(&as_lock);
549
550 /*
551 * First, take care of the old address space.
552 */
553 if (old) {
554 spinlock_lock(&old->lock);
555 ASSERT(old->refcount);
556 if((--old->refcount == 0) && (old != AS_KERNEL)) {
557 /*
558 * The old address space is no longer active on
559 * any processor. It can be appended to the
560 * list of inactive address spaces with assigned
561 * ASID.
562 */
563 ASSERT(old->asid != ASID_INVALID);
564 list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
565 }
566 spinlock_unlock(&old->lock);
567 }
568
569 /*
570 * Second, prepare the new address space.
571 */
572 spinlock_lock(&new->lock);
573 if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
574 if (new->asid != ASID_INVALID)
575 list_remove(&new->inactive_as_with_asid_link);
576 else
577 needs_asid = true; /* defer call to asid_get() until new->lock is released */
578 }
579 SET_PTL0_ADDRESS(new->page_table);
580 spinlock_unlock(&new->lock);
581
582 if (needs_asid) {
583 /*
584 * Allocation of new ASID was deferred
585 * until now in order to avoid deadlock.
586 */
587 asid_t asid;
588
589 asid = asid_get();
590 spinlock_lock(&new->lock);
591 new->asid = asid;
592 spinlock_unlock(&new->lock);
593 }
594 spinlock_unlock(&as_lock);
595 interrupts_restore(ipl);
596
597 /*
598 * Perform architecture-specific steps.
599 * (e.g. write ASID to hardware register etc.)
600 */
601 as_install_arch(new);
602
603 AS = new;
604}
605
606/** Convert address space area flags to page flags.
607 *
608 * @param aflags Flags of some address space area.
609 *
610 * @return Flags to be passed to page_mapping_insert().
611 */
612int area_flags_to_page_flags(int aflags)
613{
614 int flags;
615
616 flags = PAGE_USER | PAGE_PRESENT;
617
618 if (aflags & AS_AREA_READ)
619 flags |= PAGE_READ;
620
621 if (aflags & AS_AREA_WRITE)
622 flags |= PAGE_WRITE;
623
624 if (aflags & AS_AREA_EXEC)
625 flags |= PAGE_EXEC;
626
627 if (!(aflags & AS_AREA_DEVICE))
628 flags |= PAGE_CACHEABLE;
629
630 return flags;
631}
632
633/** Compute flags for virtual address translation subsytem.
634 *
635 * The address space area must be locked.
636 * Interrupts must be disabled.
637 *
638 * @param a Address space area.
639 *
640 * @return Flags to be used in page_mapping_insert().
641 */
642int get_area_flags(as_area_t *a)
643{
644 return area_flags_to_page_flags(a->flags);
645}
646
647/** Create page table.
648 *
649 * Depending on architecture, create either address space
650 * private or global page table.
651 *
652 * @param flags Flags saying whether the page table is for kernel address space.
653 *
654 * @return First entry of the page table.
655 */
656pte_t *page_table_create(int flags)
657{
658 ASSERT(as_operations);
659 ASSERT(as_operations->page_table_create);
660
661 return as_operations->page_table_create(flags);
662}
663
664/** Lock page table.
665 *
666 * This function should be called before any page_mapping_insert(),
667 * page_mapping_remove() and page_mapping_find().
668 *
669 * Locking order is such that address space areas must be locked
670 * prior to this call. Address space can be locked prior to this
671 * call in which case the lock argument is false.
672 *
673 * @param as Address space.
674 * @param as_locked If false, do not attempt to lock as->lock.
675 */
676void page_table_lock(as_t *as, bool lock)
677{
678 ASSERT(as_operations);
679 ASSERT(as_operations->page_table_lock);
680
681 as_operations->page_table_lock(as, lock);
682}
683
684/** Unlock page table.
685 *
686 * @param as Address space.
687 * @param as_locked If false, do not attempt to unlock as->lock.
688 */
689void page_table_unlock(as_t *as, bool unlock)
690{
691 ASSERT(as_operations);
692 ASSERT(as_operations->page_table_unlock);
693
694 as_operations->page_table_unlock(as, unlock);
695}
696
697
698/** Find address space area and lock it.
699 *
700 * The address space must be locked and interrupts must be disabled.
701 *
702 * @param as Address space.
703 * @param va Virtual address.
704 *
705 * @return Locked address space area containing va on success or NULL on failure.
706 */
707as_area_t *find_area_and_lock(as_t *as, __address va)
708{
709 as_area_t *a;
710 btree_node_t *leaf, *lnode;
711 int i;
712
713 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
714 if (a) {
715 /* va is the base address of an address space area */
716 spinlock_lock(&a->lock);
717 return a;
718 }
719
720 /*
721 * Search the leaf node and the righmost record of its left neighbour
722 * to find out whether this is a miss or va belongs to an address
723 * space area found there.
724 */
725
726 /* First, search the leaf node itself. */
727 for (i = 0; i < leaf->keys; i++) {
728 a = (as_area_t *) leaf->value[i];
729 spinlock_lock(&a->lock);
730 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
731 return a;
732 }
733 spinlock_unlock(&a->lock);
734 }
735
736 /*
737 * Second, locate the left neighbour and test its last record.
738 * Because of its position in the B+tree, it must have base < va.
739 */
740 if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
741 a = (as_area_t *) lnode->value[lnode->keys - 1];
742 spinlock_lock(&a->lock);
743 if (va < a->base + a->pages * PAGE_SIZE) {
744 return a;
745 }
746 spinlock_unlock(&a->lock);
747 }
748
749 return NULL;
750}
751
752/** Check area conflicts with other areas.
753 *
754 * The address space must be locked and interrupts must be disabled.
755 *
756 * @param as Address space.
757 * @param va Starting virtual address of the area being tested.
758 * @param size Size of the area being tested.
759 * @param avoid_area Do not touch this area.
760 *
761 * @return True if there is no conflict, false otherwise.
762 */
763bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
764{
765 as_area_t *a;
766 btree_node_t *leaf, *node;
767 int i;
768
769 /*
770 * We don't want any area to have conflicts with NULL page.
771 */
772 if (overlaps(va, size, NULL, PAGE_SIZE))
773 return false;
774
775 /*
776 * The leaf node is found in O(log n), where n is proportional to
777 * the number of address space areas belonging to as.
778 * The check for conflicts is then attempted on the rightmost
779 * record in the left neighbour, the leftmost record in the right
780 * neighbour and all records in the leaf node itself.
781 */
782
783 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
784 if (a != avoid_area)
785 return false;
786 }
787
788 /* First, check the two border cases. */
789 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
790 a = (as_area_t *) node->value[node->keys - 1];
791 spinlock_lock(&a->lock);
792 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
793 spinlock_unlock(&a->lock);
794 return false;
795 }
796 spinlock_unlock(&a->lock);
797 }
798 if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
799 a = (as_area_t *) node->value[0];
800 spinlock_lock(&a->lock);
801 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
802 spinlock_unlock(&a->lock);
803 return false;
804 }
805 spinlock_unlock(&a->lock);
806 }
807
808 /* Second, check the leaf node. */
809 for (i = 0; i < leaf->keys; i++) {
810 a = (as_area_t *) leaf->value[i];
811
812 if (a == avoid_area)
813 continue;
814
815 spinlock_lock(&a->lock);
816 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
817 spinlock_unlock(&a->lock);
818 return false;
819 }
820 spinlock_unlock(&a->lock);
821 }
822
823 /*
824 * So far, the area does not conflict with other areas.
825 * Check if it doesn't conflict with kernel address space.
826 */
827 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
828 return !overlaps(va, size,
829 KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
830 }
831
832 return true;
833}
834
835/*
836 * Address space related syscalls.
837 */
838
839/** Wrapper for as_area_create(). */
840__native sys_as_area_create(__address address, size_t size, int flags)
841{
842 if (as_area_create(AS, flags, size, address))
843 return (__native) address;
844 else
845 return (__native) -1;
846}
847
848/** Wrapper for as_area_resize. */
849__native sys_as_area_resize(__address address, size_t size, int flags)
850{
851 return as_area_resize(AS, address, size, 0);
852}
853
854/** Prepare task for accepting address space area from another task.
855 *
856 * @param uspace_accept_arg Accept structure passed from userspace.
857 *
858 * @return EPERM if the task ID encapsulated in @uspace_accept_arg references
859 * TASK. Otherwise zero is returned.
860 */
861__native sys_as_area_accept(as_area_acptsnd_arg_t *uspace_accept_arg)
862{
863 as_area_acptsnd_arg_t arg;
864
865 copy_from_uspace(&arg, uspace_accept_arg, sizeof(as_area_acptsnd_arg_t));
866
867 if (!arg.size)
868 return (__native) EPERM;
869
870 if (arg.task_id == TASK->taskid) {
871 /*
872 * Accepting from itself is not allowed.
873 */
874 return (__native) EPERM;
875 }
876
877 memcpy(&TASK->accept_arg, &arg, sizeof(as_area_acptsnd_arg_t));
878
879 return 0;
880}
881
882/** Wrapper for as_area_send. */
883__native sys_as_area_send(as_area_acptsnd_arg_t *uspace_send_arg)
884{
885 as_area_acptsnd_arg_t arg;
886
887 copy_from_uspace(&arg, uspace_send_arg, sizeof(as_area_acptsnd_arg_t));
888
889 if (!arg.size)
890 return (__native) EPERM;
891
892 if (arg.task_id == TASK->taskid) {
893 /*
894 * Sending to itself is not allowed.
895 */
896 return (__native) EPERM;
897 }
898
899 return (__native) as_area_send(arg.task_id, (__address) arg.base);
900}
Note: See TracBrowser for help on using the repository browser.