Changeset 8182031 in mainline for generic/src/mm/as.c
- Timestamp:
- 2006-05-23T23:09:13Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 82da5f5
- Parents:
- 56789125
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
generic/src/mm/as.c
r56789125 r8182031 75 75 #include <arch/interrupt.h> 76 76 77 /** This structure contains information associated with the shared address space area. */ 78 struct share_info { 79 mutex_t lock; /**< This lock must be acquired only when the as_area lock is held. */ 80 count_t refcount; /**< This structure can be deallocated if refcount drops to 0. */ 81 btree_t pagemap; /**< B+tree containing complete map of anonymous pages of the shared area. */ 82 }; 83 77 84 as_operations_t *as_operations = NULL; 78 85 … … 90 97 91 98 static int area_flags_to_page_flags(int aflags); 92 static int get_area_flags(as_area_t *a);93 99 static as_area_t *find_area_and_lock(as_t *as, __address va); 94 100 static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area); 95 static int used_space_insert(as_area_t *a, __address page, count_t count); 96 static int used_space_remove(as_area_t *a, __address page, count_t count); 101 static void sh_info_remove_reference(share_info_t *sh_info); 97 102 98 103 /** Initialize address space subsystem. */ … … 149 154 * @param base Base address of area. 150 155 * @param attrs Attributes of the area. 156 * @param backend Address space area backend. NULL if no backend is used. 157 * @param backend_data NULL or a pointer to an array holding two void *. 151 158 * 152 159 * @return Address space area on success or NULL on failure. 153 160 */ 154 as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs) 161 as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs, 162 mem_backend_t *backend, void **backend_data) 155 163 { 156 164 ipl_t ipl; … … 184 192 a->pages = SIZE2FRAMES(size); 185 193 a->base = base; 194 a->sh_info = NULL; 195 a->backend = backend; 196 if (backend_data) { 197 a->backend_data[0] = backend_data[0]; 198 a->backend_data[1] = backend_data[1]; 199 } 186 200 btree_create(&a->used_space); 187 201 … … 226 240 * Remapping of address space areas associated 227 241 * with memory mapped devices is not supported. 242 */ 243 mutex_unlock(&area->lock); 244 mutex_unlock(&as->lock); 245 interrupts_restore(ipl); 246 return ENOTSUP; 247 } 248 if (area->sh_info) { 249 /* 250 * Remapping of shared address space areas 251 * is not supported. 228 252 */ 229 253 mutex_unlock(&area->lock); … … 303 327 pte = page_mapping_find(as, b + i*PAGE_SIZE); 304 328 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte)); 305 frame_free(ADDR2PFN(PTE_GET_FRAME(pte))); 329 if (area->backend && area->backend->backend_frame_free) { 330 area->backend->backend_frame_free(area, 331 b + i*PAGE_SIZE, PTE_GET_FRAME(pte)); 332 } 306 333 page_mapping_remove(as, b + i*PAGE_SIZE); 307 334 page_table_unlock(as, false); … … 392 419 pte = page_mapping_find(as, b + i*PAGE_SIZE); 393 420 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte)); 394 frame_free(ADDR2PFN(PTE_GET_FRAME(pte))); 421 if (area->backend && area->backend->backend_frame_free) { 422 area->backend->backend_frame_free(area, 423 b + i*PAGE_SIZE, PTE_GET_FRAME(pte)); 424 } 395 425 page_mapping_remove(as, b + i*PAGE_SIZE); 396 426 page_table_unlock(as, false); … … 411 441 412 442 area->attributes |= AS_AREA_ATTR_PARTIAL; 443 444 if (area->sh_info) 445 sh_info_remove_reference(area->sh_info); 446 413 447 mutex_unlock(&area->lock); 414 448 … … 485 519 * preliminary as_page_fault() calls. 486 520 */ 487 dst_area = as_area_create(AS, src_flags, src_size, dst_base, AS_AREA_ATTR_PARTIAL );521 dst_area = as_area_create(AS, src_flags, src_size, dst_base, AS_AREA_ATTR_PARTIAL, &anon_backend, NULL); 488 522 if (!dst_area) { 489 523 /* … … 569 603 } 570 604 571 page_mapping_insert(as, page, frame, get_area_flags(area)); 605 ASSERT(!area->backend); 606 607 page_mapping_insert(as, page, frame, as_area_get_flags(area)); 572 608 if (!used_space_insert(area, page, 1)) 573 609 panic("Could not insert used space.\n"); … … 580 616 /** Handle page fault within the current address space. 581 617 * 582 * This is the high-level page fault handler. 618 * This is the high-level page fault handler. It decides 619 * whether the page fault can be resolved by any backend 620 * and if so, it invokes the backend to resolve the page 621 * fault. 622 * 583 623 * Interrupts are assumed disabled. 584 624 * … … 586 626 * @param istate Pointer to interrupted state. 587 627 * 588 * @return 0 on page fault, 1 on success or 2 if the fault was caused by copy_to_uspace() or copy_from_uspace(). 628 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the 629 * fault was caused by copy_to_uspace() or copy_from_uspace(). 589 630 */ 590 631 int as_page_fault(__address page, istate_t *istate) … … 592 633 pte_t *pte; 593 634 as_area_t *area; 594 __address frame;595 635 596 636 if (!THREAD) 597 return 0;637 return AS_PF_FAULT; 598 638 599 639 ASSERT(AS); … … 620 660 } 621 661 622 ASSERT(!(area->flags & AS_AREA_DEVICE)); 662 if (!area->backend || !area->backend->backend_page_fault) { 663 /* 664 * The address space area is not backed by any backend 665 * or the backend cannot handle page faults. 666 */ 667 mutex_unlock(&area->lock); 668 mutex_unlock(&AS->lock); 669 goto page_fault; 670 } 623 671 624 672 page_table_lock(AS, false); … … 634 682 mutex_unlock(&area->lock); 635 683 mutex_unlock(&AS->lock); 636 return 1; 637 } 638 } 639 640 /* 641 * In general, there can be several reasons that 642 * can have caused this fault. 643 * 644 * - non-existent mapping: the area is a scratch 645 * area (e.g. stack) and so far has not been 646 * allocated a frame for the faulting page 647 * 648 * - non-present mapping: another possibility, 649 * currently not implemented, would be frame 650 * reuse; when this becomes a possibility, 651 * do not forget to distinguish between 652 * the different causes 684 return AS_PF_OK; 685 } 686 } 687 688 /* 689 * Resort to the backend page fault handler. 653 690 */ 654 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); 655 memsetb(PA2KA(frame), FRAME_SIZE, 0); 656 657 /* 658 * Map 'page' to 'frame'. 659 * Note that TLB shootdown is not attempted as only new information is being 660 * inserted into page tables. 661 */ 662 page_mapping_insert(AS, page, frame, get_area_flags(area)); 663 if (!used_space_insert(area, ALIGN_DOWN(page, PAGE_SIZE), 1)) 664 panic("Could not insert used space.\n"); 691 if (area->backend->backend_page_fault(area, page) != AS_PF_OK) { 692 page_table_unlock(AS, false); 693 mutex_unlock(&area->lock); 694 mutex_unlock(&AS->lock); 695 goto page_fault; 696 } 697 665 698 page_table_unlock(AS, false); 666 667 699 mutex_unlock(&area->lock); 668 700 mutex_unlock(&AS->lock); … … 670 702 671 703 page_fault: 672 if (!THREAD)673 return AS_PF_FAULT;674 675 704 if (THREAD->in_copy_from_uspace) { 676 705 THREAD->in_copy_from_uspace = false; … … 794 823 * @return Flags to be used in page_mapping_insert(). 795 824 */ 796 int get_area_flags(as_area_t *a)825 int as_area_get_flags(as_area_t *a) 797 826 { 798 827 return area_flags_to_page_flags(a->flags); … … 1382 1411 } 1383 1412 1413 /** Remove reference to address space area share info. 1414 * 1415 * If the reference count drops to 0, the sh_info is deallocated. 1416 * 1417 * @param sh_info Pointer to address space area share info. 1418 */ 1419 void sh_info_remove_reference(share_info_t *sh_info) 1420 { 1421 bool dealloc = false; 1422 1423 mutex_lock(&sh_info->lock); 1424 ASSERT(sh_info->refcount); 1425 if (--sh_info->refcount == 0) { 1426 dealloc = true; 1427 bool cond; 1428 1429 /* 1430 * Now walk carefully the pagemap B+tree and free/remove 1431 * reference from all frames found there. 1432 */ 1433 for (cond = true; cond;) { 1434 btree_node_t *node; 1435 1436 ASSERT(!list_empty(&sh_info->pagemap.leaf_head)); 1437 node = list_get_instance(sh_info->pagemap.leaf_head.next, btree_node_t, leaf_link); 1438 if ((cond = node->keys)) { 1439 frame_free(ADDR2PFN((__address) node->value[0])); 1440 btree_remove(&sh_info->pagemap, node->key[0], node); 1441 } 1442 } 1443 1444 } 1445 mutex_unlock(&sh_info->lock); 1446 1447 if (dealloc) { 1448 btree_destroy(&sh_info->pagemap); 1449 free(sh_info); 1450 } 1451 } 1452 1453 static int anon_page_fault(as_area_t *area, __address addr); 1454 static void anon_frame_free(as_area_t *area, __address page, __address frame); 1455 1456 /* 1457 * Anonymous memory backend. 1458 */ 1459 mem_backend_t anon_backend = { 1460 .backend_page_fault = anon_page_fault, 1461 .backend_frame_free = anon_frame_free 1462 }; 1463 1464 /** Service a page fault in the anonymous memory address space area. 1465 * 1466 * The address space area and page tables must be already locked. 1467 * 1468 * @param area Pointer to the address space area. 1469 * @param addr Faulting virtual address. 1470 * 1471 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced). 1472 */ 1473 int anon_page_fault(as_area_t *area, __address addr) 1474 { 1475 __address frame; 1476 1477 if (area->sh_info) { 1478 btree_node_t *leaf; 1479 1480 /* 1481 * The area is shared, chances are that the mapping can be found 1482 * in the pagemap of the address space area share info structure. 1483 * In the case that the pagemap does not contain the respective 1484 * mapping, a new frame is allocated and the mapping is created. 1485 */ 1486 mutex_lock(&area->sh_info->lock); 1487 frame = (__address) btree_search(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE), &leaf); 1488 if (!frame) { 1489 bool allocate = true; 1490 int i; 1491 1492 /* 1493 * Zero can be returned as a valid frame address. 1494 * Just a small workaround. 1495 */ 1496 for (i = 0; i < leaf->keys; i++) { 1497 if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) { 1498 allocate = false; 1499 break; 1500 } 1501 } 1502 if (allocate) { 1503 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); 1504 memsetb(PA2KA(frame), FRAME_SIZE, 0); 1505 1506 /* 1507 * Insert the address of the newly allocated frame to the pagemap. 1508 */ 1509 btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE), (void *) frame, leaf); 1510 } 1511 } 1512 mutex_unlock(&area->sh_info->lock); 1513 } else { 1514 1515 /* 1516 * In general, there can be several reasons that 1517 * can have caused this fault. 1518 * 1519 * - non-existent mapping: the area is an anonymous 1520 * area (e.g. heap or stack) and so far has not been 1521 * allocated a frame for the faulting page 1522 * 1523 * - non-present mapping: another possibility, 1524 * currently not implemented, would be frame 1525 * reuse; when this becomes a possibility, 1526 * do not forget to distinguish between 1527 * the different causes 1528 */ 1529 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); 1530 memsetb(PA2KA(frame), FRAME_SIZE, 0); 1531 } 1532 1533 /* 1534 * Map 'page' to 'frame'. 1535 * Note that TLB shootdown is not attempted as only new information is being 1536 * inserted into page tables. 1537 */ 1538 page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); 1539 if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1)) 1540 panic("Could not insert used space.\n"); 1541 1542 return AS_PF_OK; 1543 } 1544 1545 /** Free a frame that is backed by the anonymous memory backend. 1546 * 1547 * The address space area and page tables must be already locked. 1548 * 1549 * @param area Ignored. 1550 * @param page Ignored. 1551 * @param frame Frame to be released. 1552 */ 1553 void anon_frame_free(as_area_t *area, __address page, __address frame) 1554 { 1555 frame_free(ADDR2PFN(frame)); 1556 } 1557 1384 1558 /* 1385 1559 * Address space related syscalls. … … 1389 1563 __native sys_as_area_create(__address address, size_t size, int flags) 1390 1564 { 1391 if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE ))1565 if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL)) 1392 1566 return (__native) address; 1393 1567 else
Note:
See TracChangeset
for help on using the changeset viewer.