Changeset ab936440 in mainline for kernel/generic/src
- Timestamp:
- 2019-02-12T20:42:42Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- f31ca47
- Parents:
- 7f7817a9 (diff), 4805495 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - git-author:
- Matthieu Riolo <matthieu.riolo@…> (2019-02-12 20:26:18)
- git-committer:
- Matthieu Riolo <matthieu.riolo@…> (2019-02-12 20:42:42)
- Location:
- kernel/generic/src
- Files:
-
- 1 deleted
- 18 edited
-
adt/btree.c (deleted)
-
console/kconsole.c (modified) (7 diffs)
-
ddi/ddi.c (modified) (8 diffs)
-
interrupt/interrupt.c (modified) (6 diffs)
-
lib/str.c (modified) (4 diffs)
-
main/main.c (modified) (3 diffs)
-
mm/as.c (modified) (38 diffs)
-
mm/backend_anon.c (modified) (5 diffs)
-
mm/backend_elf.c (modified) (7 diffs)
-
mm/backend_phys.c (modified) (1 diff)
-
mm/backend_user.c (modified) (1 diff)
-
mm/frame.c (modified) (19 diffs)
-
mm/page.c (modified) (4 diffs)
-
mm/slab.c (modified) (18 diffs)
-
proc/current.c (modified) (1 diff)
-
proc/task.c (modified) (1 diff)
-
proc/thread.c (modified) (1 diff)
-
sysinfo/stats.c (modified) (1 diff)
-
sysinfo/sysinfo.c (modified) (10 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/console/kconsole.c
r7f7817a9 rab936440 156 156 157 157 /** Print count times a character */ 158 NO_TRACE static void print_cc(wchar_t ch, size_t count)158 _NO_TRACE static void print_cc(wchar_t ch, size_t count) 159 159 { 160 160 size_t i; … … 203 203 * 204 204 */ 205 NO_TRACE static int cmdtab_compl(char *input, size_t size, indev_t *indev,205 _NO_TRACE static int cmdtab_compl(char *input, size_t size, indev_t *indev, 206 206 hints_enum_func_t hints_enum) 207 207 { … … 290 290 } 291 291 292 NO_TRACE static cmd_info_t *parse_cmd(const wchar_t *cmdline)292 _NO_TRACE static cmd_info_t *parse_cmd(const wchar_t *cmdline) 293 293 { 294 294 size_t start = 0; … … 331 331 } 332 332 333 NO_TRACE static wchar_t *clever_readline(const char *prompt, indev_t *indev,333 _NO_TRACE static wchar_t *clever_readline(const char *prompt, indev_t *indev, 334 334 char *tmp) 335 335 { … … 548 548 } 549 549 550 NO_TRACE static bool parse_int_arg(const char *text, size_t len,550 _NO_TRACE static bool parse_int_arg(const char *text, size_t len, 551 551 sysarg_t *result) 552 552 { … … 637 637 * 638 638 */ 639 NO_TRACE static bool parse_argument(const char *cmdline, size_t size,639 _NO_TRACE static bool parse_argument(const char *cmdline, size_t size, 640 640 size_t *start, size_t *end) 641 641 { … … 674 674 * 675 675 */ 676 NO_TRACE static cmd_info_t *parse_cmdline(const char *cmdline, size_t size)676 _NO_TRACE static cmd_info_t *parse_cmdline(const char *cmdline, size_t size) 677 677 { 678 678 size_t start = 0; -
kernel/generic/src/ddi/ddi.c
r7f7817a9 rab936440 121 121 * 122 122 */ 123 NO_TRACE static errno_t physmem_map(uintptr_t phys, size_t pages,123 _NO_TRACE static errno_t physmem_map(uintptr_t phys, size_t pages, 124 124 unsigned int flags, uintptr_t *virt, uintptr_t bound) 125 125 { … … 227 227 } 228 228 229 NO_TRACE static errno_t physmem_unmap(uintptr_t virt)229 _NO_TRACE static errno_t physmem_unmap(uintptr_t virt) 230 230 { 231 231 assert(TASK); … … 312 312 * 313 313 */ 314 NO_TRACE static errno_t iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size)314 _NO_TRACE static errno_t iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size) 315 315 { 316 316 /* … … 353 353 * 354 354 */ 355 NO_TRACE static errno_t iospace_disable(task_id_t id, uintptr_t ioaddr, size_t size)355 _NO_TRACE static errno_t iospace_disable(task_id_t id, uintptr_t ioaddr, size_t size) 356 356 { 357 357 /* … … 413 413 } 414 414 415 NO_TRACE static errno_t dmamem_map(uintptr_t virt, size_t size, unsigned int map_flags,415 _NO_TRACE static errno_t dmamem_map(uintptr_t virt, size_t size, unsigned int map_flags, 416 416 unsigned int flags, uintptr_t *phys) 417 417 { … … 422 422 } 423 423 424 NO_TRACE static errno_t dmamem_map_anonymous(size_t size, uintptr_t constraint,424 _NO_TRACE static errno_t dmamem_map_anonymous(size_t size, uintptr_t constraint, 425 425 unsigned int map_flags, unsigned int flags, uintptr_t *phys, 426 426 uintptr_t *virt, uintptr_t bound) … … 451 451 } 452 452 453 NO_TRACE static errno_t dmamem_unmap(uintptr_t virt, size_t size)453 _NO_TRACE static errno_t dmamem_unmap(uintptr_t virt, size_t size) 454 454 { 455 455 // TODO: implement unlocking & unmap … … 457 457 } 458 458 459 NO_TRACE static errno_t dmamem_unmap_anonymous(uintptr_t virt)459 _NO_TRACE static errno_t dmamem_unmap_anonymous(uintptr_t virt) 460 460 { 461 461 return as_area_destroy(TASK->as, virt); -
kernel/generic/src/interrupt/interrupt.c
r7f7817a9 rab936440 99 99 * 100 100 */ 101 NO_TRACE void exc_dispatch(unsigned int n, istate_t *istate)101 _NO_TRACE void exc_dispatch(unsigned int n, istate_t *istate) 102 102 { 103 103 #if (IVT_ITEMS > 0) … … 159 159 * 160 160 */ 161 NO_TRACE static void exc_undef(unsigned int n, istate_t *istate)161 _NO_TRACE static void exc_undef(unsigned int n, istate_t *istate) 162 162 { 163 163 fault_if_from_uspace(istate, "Unhandled exception %u.", n); … … 165 165 } 166 166 167 static NO_TRACE void167 static _NO_TRACE void 168 168 fault_from_uspace_core(istate_t *istate, const char *fmt, va_list args) 169 169 { … … 185 185 * 186 186 */ 187 NO_TRACE void fault_from_uspace(istate_t *istate, const char *fmt, ...)187 _NO_TRACE void fault_from_uspace(istate_t *istate, const char *fmt, ...) 188 188 { 189 189 va_list args; … … 197 197 * 198 198 */ 199 NO_TRACE void fault_if_from_uspace(istate_t *istate, const char *fmt, ...)199 _NO_TRACE void fault_if_from_uspace(istate_t *istate, const char *fmt, ...) 200 200 { 201 201 if (!istate_from_uspace(istate)) … … 233 233 * 234 234 */ 235 NO_TRACE static int cmd_exc_print(cmd_arg_t *argv)235 _NO_TRACE static int cmd_exc_print(cmd_arg_t *argv) 236 236 { 237 237 bool excs_all; -
kernel/generic/src/lib/str.c
r7f7817a9 rab936440 1 1 /* 2 2 * Copyright (c) 2001-2004 Jakub Jermar 3 * Copyright (c) 2005 Martin Decky 4 * Copyright (c) 2008 Jiri Svoboda 5 * Copyright (c) 2011 Martin Sucha 6 * Copyright (c) 2011 Oleg Romanenko 3 7 * All rights reserved. 4 8 * … … 103 107 104 108 #include <str.h> 105 #include <cpu.h> 106 #include <arch/asm.h> 107 #include <arch.h> 109 110 #include <assert.h> 108 111 #include <errno.h> 112 #include <stdbool.h> 113 #include <stddef.h> 114 #include <stdint.h> 115 #include <stdlib.h> 116 109 117 #include <align.h> 110 #include <assert.h>111 118 #include <macros.h> 112 #include <stdlib.h>113 119 114 120 /** Check the condition if wchar_t is signed */ … … 616 622 } 617 623 624 /** Convert wide string to string. 625 * 626 * Convert wide string @a src to string. The output is written to the buffer 627 * specified by @a dest and @a size. @a size must be non-zero and the string 628 * written will always be well-formed. 629 * 630 * @param dest Destination buffer. 631 * @param size Size of the destination buffer. 632 * @param src Source wide string. 633 */ 634 void wstr_to_str(char *dest, size_t size, const wchar_t *src) 635 { 636 wchar_t ch; 637 size_t src_idx; 638 size_t dest_off; 639 640 /* There must be space for a null terminator in the buffer. */ 641 assert(size > 0); 642 643 src_idx = 0; 644 dest_off = 0; 645 646 while ((ch = src[src_idx++]) != 0) { 647 if (chr_encode(ch, dest, &dest_off, size - 1) != EOK) 648 break; 649 } 650 651 dest[dest_off] = '\0'; 652 } 653 654 /** Find first occurence of character in string. 655 * 656 * @param str String to search. 657 * @param ch Character to look for. 658 * 659 * @return Pointer to character in @a str or NULL if not found. 660 */ 661 char *str_chr(const char *str, wchar_t ch) 662 { 663 wchar_t acc; 664 size_t off = 0; 665 size_t last = 0; 666 667 while ((acc = str_decode(str, &off, STR_NO_LIMIT)) != 0) { 668 if (acc == ch) 669 return (char *) (str + last); 670 last = off; 671 } 672 673 return NULL; 674 } 675 676 /** Insert a wide character into a wide string. 677 * 678 * Insert a wide character into a wide string at position 679 * @a pos. The characters after the position are shifted. 680 * 681 * @param str String to insert to. 682 * @param ch Character to insert to. 683 * @param pos Character index where to insert. 684 * @param max_pos Characters in the buffer. 685 * 686 * @return True if the insertion was sucessful, false if the position 687 * is out of bounds. 688 * 689 */ 690 bool wstr_linsert(wchar_t *str, wchar_t ch, size_t pos, size_t max_pos) 691 { 692 size_t len = wstr_length(str); 693 694 if ((pos > len) || (pos + 1 > max_pos)) 695 return false; 696 697 size_t i; 698 for (i = len; i + 1 > pos; i--) 699 str[i + 1] = str[i]; 700 701 str[pos] = ch; 702 703 return true; 704 } 705 706 /** Remove a wide character from a wide string. 707 * 708 * Remove a wide character from a wide string at position 709 * @a pos. The characters after the position are shifted. 710 * 711 * @param str String to remove from. 712 * @param pos Character index to remove. 713 * 714 * @return True if the removal was sucessful, false if the position 715 * is out of bounds. 716 * 717 */ 718 bool wstr_remove(wchar_t *str, size_t pos) 719 { 720 size_t len = wstr_length(str); 721 722 if (pos >= len) 723 return false; 724 725 size_t i; 726 for (i = pos + 1; i <= len; i++) 727 str[i - 1] = str[i]; 728 729 return true; 730 } 731 618 732 /** Duplicate string. 619 733 * … … 675 789 str_ncpy(dest, size + 1, src, size); 676 790 return dest; 677 }678 679 /** Convert wide string to string.680 *681 * Convert wide string @a src to string. The output is written to the buffer682 * specified by @a dest and @a size. @a size must be non-zero and the string683 * written will always be well-formed.684 *685 * @param dest Destination buffer.686 * @param size Size of the destination buffer.687 * @param src Source wide string.688 */689 void wstr_to_str(char *dest, size_t size, const wchar_t *src)690 {691 wchar_t ch;692 size_t src_idx;693 size_t dest_off;694 695 /* There must be space for a null terminator in the buffer. */696 assert(size > 0);697 698 src_idx = 0;699 dest_off = 0;700 701 while ((ch = src[src_idx++]) != 0) {702 if (chr_encode(ch, dest, &dest_off, size - 1) != EOK)703 break;704 }705 706 dest[dest_off] = '\0';707 }708 709 /** Find first occurence of character in string.710 *711 * @param str String to search.712 * @param ch Character to look for.713 *714 * @return Pointer to character in @a str or NULL if not found.715 *716 */717 char *str_chr(const char *str, wchar_t ch)718 {719 wchar_t acc;720 size_t off = 0;721 size_t last = 0;722 723 while ((acc = str_decode(str, &off, STR_NO_LIMIT)) != 0) {724 if (acc == ch)725 return (char *) (str + last);726 last = off;727 }728 729 return NULL;730 }731 732 /** Insert a wide character into a wide string.733 *734 * Insert a wide character into a wide string at position735 * @a pos. The characters after the position are shifted.736 *737 * @param str String to insert to.738 * @param ch Character to insert to.739 * @param pos Character index where to insert.740 * @param max_pos Characters in the buffer.741 *742 * @return True if the insertion was sucessful, false if the position743 * is out of bounds.744 *745 */746 bool wstr_linsert(wchar_t *str, wchar_t ch, size_t pos, size_t max_pos)747 {748 size_t len = wstr_length(str);749 750 if ((pos > len) || (pos + 1 > max_pos))751 return false;752 753 size_t i;754 for (i = len; i + 1 > pos; i--)755 str[i + 1] = str[i];756 757 str[pos] = ch;758 759 return true;760 }761 762 /** Remove a wide character from a wide string.763 *764 * Remove a wide character from a wide string at position765 * @a pos. The characters after the position are shifted.766 *767 * @param str String to remove from.768 * @param pos Character index to remove.769 *770 * @return True if the removal was sucessful, false if the position771 * is out of bounds.772 *773 */774 bool wstr_remove(wchar_t *str, size_t pos)775 {776 size_t len = wstr_length(str);777 778 if (pos >= len)779 return false;780 781 size_t i;782 for (i = pos + 1; i <= len; i++)783 str[i - 1] = str[i];784 785 return true;786 791 } 787 792 -
kernel/generic/src/main/main.c
r7f7817a9 rab936440 83 83 #include <ipc/ipc.h> 84 84 #include <macros.h> 85 #include <adt/btree.h>86 85 #include <smp/smp.h> 87 86 #include <ddi/ddi.h> … … 161 160 * 162 161 */ 163 NO_TRACE void main_bsp(void)162 _NO_TRACE void main_bsp(void) 164 163 { 165 164 config.cpu_count = 1; … … 249 248 ra_init(); 250 249 sysinfo_init(); 251 btree_init();252 250 as_init(); 253 251 page_init(); -
kernel/generic/src/mm/as.c
r7f7817a9 rab936440 63 63 #include <synch/mutex.h> 64 64 #include <adt/list.h> 65 #include <adt/btree.h>66 65 #include <proc/task.h> 67 66 #include <proc/thread.h> … … 89 88 as_operations_t *as_operations = NULL; 90 89 91 /** Slab for as_t objects. 92 * 93 */ 90 /** Cache for as_t objects */ 94 91 static slab_cache_t *as_cache; 92 93 /** Cache for as_page_mapping_t objects */ 94 static slab_cache_t *as_page_mapping_cache; 95 96 /** Cache for used_space_ival_t objects */ 97 static slab_cache_t *used_space_ival_cache; 95 98 96 99 /** ASID subsystem lock. … … 116 119 static int as_areas_cmp(void *, void *); 117 120 118 NO_TRACE static errno_t as_constructor(void *obj, unsigned int flags) 121 static void used_space_initialize(used_space_t *); 122 static void used_space_finalize(used_space_t *); 123 static void *used_space_getkey(odlink_t *); 124 static int used_space_cmp(void *, void *); 125 static used_space_ival_t *used_space_last(used_space_t *); 126 static void used_space_remove_ival(used_space_ival_t *); 127 static void used_space_shorten_ival(used_space_ival_t *, size_t); 128 129 _NO_TRACE static errno_t as_constructor(void *obj, unsigned int flags) 119 130 { 120 131 as_t *as = (as_t *) obj; … … 126 137 } 127 138 128 NO_TRACE static size_t as_destructor(void *obj)139 _NO_TRACE static size_t as_destructor(void *obj) 129 140 { 130 141 return as_destructor_arch((as_t *) obj); … … 138 149 as_cache = slab_cache_create("as_t", sizeof(as_t), 0, 139 150 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED); 151 152 as_page_mapping_cache = slab_cache_create("as_page_mapping_t", 153 sizeof(as_page_mapping_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED); 154 155 used_space_ival_cache = slab_cache_create("used_space_ival_t", 156 sizeof(used_space_ival_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED); 140 157 141 158 AS_KERNEL = as_create(FLAG_AS_KERNEL); … … 177 194 } 178 195 179 /** Destroy ad ress space.196 /** Destroy address space. 180 197 * 181 198 * When there are no tasks referencing this address space (i.e. its refcount is … … 264 281 * 265 282 */ 266 NO_TRACE void as_hold(as_t *as)283 _NO_TRACE void as_hold(as_t *as) 267 284 { 268 285 refcount_up(&as->refcount); … … 277 294 * 278 295 */ 279 NO_TRACE void as_release(as_t *as)296 _NO_TRACE void as_release(as_t *as) 280 297 { 281 298 if (refcount_down(&as->refcount)) … … 323 340 * @return True if the two areas conflict, false otherwise. 324 341 */ 325 NO_TRACE static bool area_is_conflicting(uintptr_t addr,342 _NO_TRACE static bool area_is_conflicting(uintptr_t addr, 326 343 size_t count, bool guarded, as_area_t *area) 327 344 { … … 363 380 * 364 381 */ 365 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr,382 _NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr, 366 383 size_t count, bool guarded, as_area_t *avoid) 367 384 { … … 457 474 * 458 475 */ 459 NO_TRACE static uintptr_t as_get_unmapped_area(as_t *as, uintptr_t bound,476 _NO_TRACE static uintptr_t as_get_unmapped_area(as_t *as, uintptr_t bound, 460 477 size_t size, bool guarded) 461 478 { … … 524 541 } 525 542 543 /** Get key function for pagemap ordered dictionary. 544 * 545 * The key is the virtual address of the page (as_page_mapping_t.vaddr) 546 * 547 * @param odlink Link to as_pagemap_t.map ordered dictionary 548 * @return Pointer to virtual address cast as @c void * 549 */ 550 static void *as_pagemap_getkey(odlink_t *odlink) 551 { 552 as_page_mapping_t *mapping; 553 554 mapping = odict_get_instance(odlink, as_page_mapping_t, lpagemap); 555 return (void *) &mapping->vaddr; 556 } 557 558 /** Comparison function for pagemap ordered dictionary. 559 * 560 * @param a Pointer to virtual address cast as @c void * 561 * @param b Pointer to virtual address cast as @c void * 562 * @return <0, =0, >0 if virtual address a is less than, equal to, or 563 * greater than b, respectively. 564 */ 565 static int as_pagemap_cmp(void *a, void *b) 566 { 567 uintptr_t va = *(uintptr_t *)a; 568 uintptr_t vb = *(uintptr_t *)b; 569 570 if (va < vb) 571 return -1; 572 else if (va == vb) 573 return 0; 574 else 575 return +1; 576 } 577 578 /** Initialize pagemap. 579 * 580 * @param pagemap Pagemap 581 */ 582 _NO_TRACE void as_pagemap_initialize(as_pagemap_t *pagemap) 583 { 584 odict_initialize(&pagemap->map, as_pagemap_getkey, as_pagemap_cmp); 585 } 586 587 /** Finalize pagemap. 588 * 589 * Destroy any entries in the pagemap. 590 * 591 * @param pagemap Pagemap 592 */ 593 _NO_TRACE void as_pagemap_finalize(as_pagemap_t *pagemap) 594 { 595 as_page_mapping_t *mapping = as_pagemap_first(pagemap); 596 while (mapping != NULL) { 597 as_pagemap_remove(mapping); 598 mapping = as_pagemap_first(pagemap); 599 } 600 odict_finalize(&pagemap->map); 601 } 602 603 /** Get first page mapping. 604 * 605 * @param pagemap Pagemap 606 * @return First mapping or @c NULL if there is none 607 */ 608 _NO_TRACE as_page_mapping_t *as_pagemap_first(as_pagemap_t *pagemap) 609 { 610 odlink_t *odlink; 611 612 odlink = odict_first(&pagemap->map); 613 if (odlink == NULL) 614 return NULL; 615 616 return odict_get_instance(odlink, as_page_mapping_t, lpagemap); 617 } 618 619 /** Get next page mapping. 620 * 621 * @param cur Current mapping 622 * @return Next mapping or @c NULL if @a cur is the last one 623 */ 624 _NO_TRACE as_page_mapping_t *as_pagemap_next(as_page_mapping_t *cur) 625 { 626 odlink_t *odlink; 627 628 odlink = odict_next(&cur->lpagemap, &cur->pagemap->map); 629 if (odlink == NULL) 630 return NULL; 631 632 return odict_get_instance(odlink, as_page_mapping_t, lpagemap); 633 } 634 635 /** Find frame by virtual address. 636 * 637 * @param pagemap Pagemap 638 * @param vaddr Virtual address of page 639 * @param rframe Place to store physical frame address 640 * @return EOK on succcess or ENOENT if no mapping found 641 */ 642 _NO_TRACE errno_t as_pagemap_find(as_pagemap_t *pagemap, uintptr_t vaddr, 643 uintptr_t *rframe) 644 { 645 odlink_t *odlink; 646 as_page_mapping_t *mapping; 647 648 odlink = odict_find_eq(&pagemap->map, &vaddr, NULL); 649 if (odlink == NULL) 650 return ENOENT; 651 652 mapping = odict_get_instance(odlink, as_page_mapping_t, lpagemap); 653 *rframe = mapping->frame; 654 return EOK; 655 } 656 657 /** Insert new page mapping. 658 * 659 * This function can block to allocate kernel memory. 660 * 661 * @param pagemap Pagemap 662 * @param vaddr Virtual page address 663 * @param frame Physical frame address 664 */ 665 _NO_TRACE void as_pagemap_insert(as_pagemap_t *pagemap, uintptr_t vaddr, 666 uintptr_t frame) 667 { 668 as_page_mapping_t *mapping; 669 670 mapping = slab_alloc(as_page_mapping_cache, 0); 671 mapping->pagemap = pagemap; 672 odlink_initialize(&mapping->lpagemap); 673 mapping->vaddr = vaddr; 674 mapping->frame = frame; 675 odict_insert(&mapping->lpagemap, &pagemap->map, NULL); 676 } 677 678 /** Remove page mapping. 679 * 680 * @param mapping Mapping 681 */ 682 _NO_TRACE void as_pagemap_remove(as_page_mapping_t *mapping) 683 { 684 odict_remove(&mapping->lpagemap); 685 slab_free(as_page_mapping_cache, mapping); 686 } 687 526 688 /** Remove reference to address space area share info. 527 689 * … … 531 693 * 532 694 */ 533 NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info)695 _NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info) 534 696 { 535 697 bool dealloc = false; … … 541 703 dealloc = true; 542 704 543 /* 544 * Now walk carefully the pagemap B+tree and free/remove 545 * reference from all frames found there. 546 */ 547 list_foreach(sh_info->pagemap.leaf_list, leaf_link, 548 btree_node_t, node) { 549 btree_key_t i; 550 551 for (i = 0; i < node->keys; i++) 552 frame_free((uintptr_t) node->value[i], 1); 705 as_page_mapping_t *mapping = as_pagemap_first(&sh_info->pagemap); 706 while (mapping != NULL) { 707 frame_free(mapping->frame, 1); 708 mapping = as_pagemap_next(mapping); 553 709 } 554 710 … … 561 717 sh_info->backend_shared_data); 562 718 } 563 btree_destroy(&sh_info->pagemap);719 as_pagemap_finalize(&sh_info->pagemap); 564 720 free(sh_info); 565 721 } … … 636 792 area->attributes = attrs; 637 793 area->pages = pages; 638 area->resident = 0;639 794 area->base = *base; 640 795 area->backend = backend; … … 665 820 si->backend_shared_data = NULL; 666 821 si->backend = backend; 667 btree_create(&si->pagemap);822 as_pagemap_initialize(&si->pagemap); 668 823 669 824 area->sh_info = si; … … 689 844 } 690 845 691 btree_create(&area->used_space);846 used_space_initialize(&area->used_space); 692 847 odict_insert(&area->las_areas, &as->as_areas, NULL); 693 848 … … 706 861 * 707 862 */ 708 NO_TRACE static as_area_t *find_area_and_lock(as_t *as, uintptr_t va)863 _NO_TRACE static as_area_t *find_area_and_lock(as_t *as, uintptr_t va) 709 864 { 710 865 assert(mutex_locked(&as->lock)); … … 797 952 798 953 /* 954 * Start TLB shootdown sequence. 955 */ 956 957 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, 958 as->asid, area->base + P2SZ(pages), 959 area->pages - pages); 960 961 /* 799 962 * Remove frames belonging to used space starting from 800 963 * the highest addresses downwards until an overlap with 801 * the resized address space area is found. Note that this 802 * is also the right way to remove part of the used_space 803 * B+tree leaf list. 964 * the resized address space area is found. 804 965 */ 805 966 bool cond = true; 806 967 while (cond) { 807 assert(!list_empty(&area->used_space.leaf_list)); 808 809 btree_node_t *node = 810 list_get_instance(list_last(&area->used_space.leaf_list), 811 btree_node_t, leaf_link); 812 813 if ((cond = (node->keys != 0))) { 814 uintptr_t ptr = node->key[node->keys - 1]; 815 size_t node_size = 816 (size_t) node->value[node->keys - 1]; 817 size_t i = 0; 818 819 if (overlaps(ptr, P2SZ(node_size), area->base, 820 P2SZ(pages))) { 821 822 if (ptr + P2SZ(node_size) <= start_free) { 823 /* 824 * The whole interval fits 825 * completely in the resized 826 * address space area. 827 */ 828 break; 829 } 830 968 used_space_ival_t *ival = 969 used_space_last(&area->used_space); 970 assert(ival != NULL); 971 972 uintptr_t ptr = ival->page; 973 size_t pcount = ival->count; 974 size_t i = 0; 975 976 if (overlaps(ptr, P2SZ(pcount), area->base, 977 P2SZ(pages))) { 978 979 if (ptr + P2SZ(pcount) <= start_free) { 831 980 /* 832 * Part of the interval corresponding 833 * to b and c overlaps with the resized 834 * address space area. 981 * The whole interval fits completely 982 * in the resized address space area. 835 983 */ 836 837 /* We are almost done */ 838 cond = false; 839 i = (start_free - ptr) >> PAGE_WIDTH; 840 if (!used_space_remove(area, start_free, 841 node_size - i)) 842 panic("Cannot remove used space."); 843 } else { 844 /* 845 * The interval of used space can be 846 * completely removed. 847 */ 848 if (!used_space_remove(area, ptr, node_size)) 849 panic("Cannot remove used space."); 984 break; 850 985 } 851 986 852 987 /* 853 * Start TLB shootdown sequence. 854 * 855 * The sequence is rather short and can be 856 * repeated multiple times. The reason is that 857 * we don't want to have used_space_remove() 858 * inside the sequence as it may use a blocking 859 * memory allocation for its B+tree. Blocking 860 * while holding the tlblock spinlock is 861 * forbidden and would hit a kernel assertion. 988 * Part of the interval corresponding to b and 989 * c overlaps with the resized address space 990 * area. 862 991 */ 863 992 864 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, 865 as->asid, area->base + P2SZ(pages), 866 area->pages - pages); 867 868 for (; i < node_size; i++) { 869 pte_t pte; 870 bool found = page_mapping_find(as, 871 ptr + P2SZ(i), false, &pte); 872 873 (void) found; 874 assert(found); 875 assert(PTE_VALID(&pte)); 876 assert(PTE_PRESENT(&pte)); 877 878 if ((area->backend) && 879 (area->backend->frame_free)) { 880 area->backend->frame_free(area, 881 ptr + P2SZ(i), 882 PTE_GET_FRAME(&pte)); 883 } 884 885 page_mapping_remove(as, ptr + P2SZ(i)); 993 /* We are almost done */ 994 cond = false; 995 i = (start_free - ptr) >> PAGE_WIDTH; 996 997 /* Shorten the interval to @c i pages */ 998 used_space_shorten_ival(ival, i); 999 } else { 1000 /* 1001 * The interval of used space can be completely 1002 * removed. 1003 */ 1004 used_space_remove_ival(ival); 1005 } 1006 1007 for (; i < pcount; i++) { 1008 pte_t pte; 1009 bool found = page_mapping_find(as, 1010 ptr + P2SZ(i), false, &pte); 1011 1012 (void) found; 1013 assert(found); 1014 assert(PTE_VALID(&pte)); 1015 assert(PTE_PRESENT(&pte)); 1016 1017 if ((area->backend) && 1018 (area->backend->frame_free)) { 1019 area->backend->frame_free(area, 1020 ptr + P2SZ(i), 1021 PTE_GET_FRAME(&pte)); 886 1022 } 887 1023 888 /* 889 * Finish TLB shootdown sequence. 890 */ 891 892 tlb_invalidate_pages(as->asid, 893 area->base + P2SZ(pages), 894 area->pages - pages); 895 896 /* 897 * Invalidate software translation caches 898 * (e.g. TSB on sparc64, PHT on ppc32). 899 */ 900 as_invalidate_translation_cache(as, 901 area->base + P2SZ(pages), 902 area->pages - pages); 903 tlb_shootdown_finalize(ipl); 1024 page_mapping_remove(as, ptr + P2SZ(i)); 904 1025 } 1026 905 1027 } 1028 1029 /* 1030 * Finish TLB shootdown sequence. 1031 */ 1032 1033 tlb_invalidate_pages(as->asid, 1034 area->base + P2SZ(pages), 1035 area->pages - pages); 1036 1037 /* 1038 * Invalidate software translation caches 1039 * (e.g. TSB on sparc64, PHT on ppc32). 1040 */ 1041 as_invalidate_translation_cache(as, 1042 area->base + P2SZ(pages), 1043 area->pages - pages); 1044 tlb_shootdown_finalize(ipl); 1045 906 1046 page_table_unlock(as, false); 907 1047 } else { … … 962 1102 963 1103 page_table_lock(as, false); 964 965 1104 /* 966 1105 * Start TLB shootdown sequence. … … 970 1109 971 1110 /* 972 * Visit only the pages mapped by used_space B+tree. 973 */ 974 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 975 node) { 976 btree_key_t i; 977 978 for (i = 0; i < node->keys; i++) { 979 uintptr_t ptr = node->key[i]; 980 size_t size; 981 982 for (size = 0; size < (size_t) node->value[i]; size++) { 983 pte_t pte; 984 bool found = page_mapping_find(as, 985 ptr + P2SZ(size), false, &pte); 986 987 (void) found; 988 assert(found); 989 assert(PTE_VALID(&pte)); 990 assert(PTE_PRESENT(&pte)); 991 992 if ((area->backend) && 993 (area->backend->frame_free)) { 994 area->backend->frame_free(area, 995 ptr + P2SZ(size), 996 PTE_GET_FRAME(&pte)); 997 } 998 999 page_mapping_remove(as, ptr + P2SZ(size)); 1111 * Visit only the pages mapped by used_space. 1112 */ 1113 used_space_ival_t *ival = used_space_first(&area->used_space); 1114 while (ival != NULL) { 1115 uintptr_t ptr = ival->page; 1116 1117 for (size_t size = 0; size < ival->count; size++) { 1118 pte_t pte; 1119 bool found = page_mapping_find(as, 1120 ptr + P2SZ(size), false, &pte); 1121 1122 (void) found; 1123 assert(found); 1124 assert(PTE_VALID(&pte)); 1125 assert(PTE_PRESENT(&pte)); 1126 1127 if ((area->backend) && 1128 (area->backend->frame_free)) { 1129 area->backend->frame_free(area, 1130 ptr + P2SZ(size), 1131 PTE_GET_FRAME(&pte)); 1000 1132 } 1133 1134 page_mapping_remove(as, ptr + P2SZ(size)); 1001 1135 } 1136 1137 used_space_remove_ival(ival); 1138 ival = used_space_first(&area->used_space); 1002 1139 } 1003 1140 … … 1017 1154 page_table_unlock(as, false); 1018 1155 1019 btree_destroy(&area->used_space); 1020 1156 used_space_finalize(&area->used_space); 1021 1157 area->attributes |= AS_AREA_ATTR_PARTIAL; 1022 1023 1158 sh_info_remove_reference(area->sh_info); 1024 1159 … … 1170 1305 * 1171 1306 */ 1172 NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access)1307 _NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access) 1173 1308 { 1174 1309 assert(mutex_locked(&area->lock)); … … 1193 1328 * 1194 1329 */ 1195 NO_TRACE static unsigned int area_flags_to_page_flags(unsigned int aflags)1330 _NO_TRACE static unsigned int area_flags_to_page_flags(unsigned int aflags) 1196 1331 { 1197 1332 unsigned int flags = PAGE_USER | PAGE_PRESENT; … … 1212 1347 } 1213 1348 1214 /** Change ad ress space area flags.1349 /** Change address space area flags. 1215 1350 * 1216 1351 * The idea is to have the same data, but with a different access mode. … … 1256 1391 mutex_unlock(&area->sh_info->lock); 1257 1392 1258 /*1259 * Compute total number of used pages in the used_space B+tree1260 */1261 size_t used_pages = 0;1262 1263 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t,1264 node) {1265 btree_key_t i;1266 1267 for (i = 0; i < node->keys; i++)1268 used_pages += (size_t) node->value[i];1269 }1270 1271 1393 /* An array for storing frame numbers */ 1272 uintptr_t *old_frame = malloc(used_pages * sizeof(uintptr_t)); 1394 uintptr_t *old_frame = malloc(area->used_space.pages * 1395 sizeof(uintptr_t)); 1273 1396 if (!old_frame) { 1274 1397 mutex_unlock(&area->lock); … … 1291 1414 size_t frame_idx = 0; 1292 1415 1293 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1294 node) { 1295 btree_key_t i; 1296 1297 for (i = 0; i < node->keys; i++) { 1298 uintptr_t ptr = node->key[i]; 1299 size_t size; 1300 1301 for (size = 0; size < (size_t) node->value[i]; size++) { 1302 pte_t pte; 1303 bool found = page_mapping_find(as, 1304 ptr + P2SZ(size), false, &pte); 1305 1306 (void) found; 1307 assert(found); 1308 assert(PTE_VALID(&pte)); 1309 assert(PTE_PRESENT(&pte)); 1310 1311 old_frame[frame_idx++] = PTE_GET_FRAME(&pte); 1312 1313 /* Remove old mapping */ 1314 page_mapping_remove(as, ptr + P2SZ(size)); 1315 } 1416 used_space_ival_t *ival = used_space_first(&area->used_space); 1417 while (ival != NULL) { 1418 uintptr_t ptr = ival->page; 1419 size_t size; 1420 1421 for (size = 0; size < ival->count; size++) { 1422 pte_t pte; 1423 bool found = page_mapping_find(as, ptr + P2SZ(size), 1424 false, &pte); 1425 1426 (void) found; 1427 assert(found); 1428 assert(PTE_VALID(&pte)); 1429 assert(PTE_PRESENT(&pte)); 1430 1431 old_frame[frame_idx++] = PTE_GET_FRAME(&pte); 1432 1433 /* Remove old mapping */ 1434 page_mapping_remove(as, ptr + P2SZ(size)); 1316 1435 } 1436 1437 ival = used_space_next(ival); 1317 1438 } 1318 1439 … … 1344 1465 frame_idx = 0; 1345 1466 1346 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1347 node) { 1348 btree_key_t i; 1349 1350 for (i = 0; i < node->keys; i++) { 1351 uintptr_t ptr = node->key[i]; 1352 size_t size; 1353 1354 for (size = 0; size < (size_t) node->value[i]; size++) { 1355 page_table_lock(as, false); 1356 1357 /* Insert the new mapping */ 1358 page_mapping_insert(as, ptr + P2SZ(size), 1359 old_frame[frame_idx++], page_flags); 1360 1361 page_table_unlock(as, false); 1362 } 1467 ival = used_space_first(&area->used_space); 1468 while (ival != NULL) { 1469 uintptr_t ptr = ival->page; 1470 size_t size; 1471 1472 for (size = 0; size < ival->count; size++) { 1473 page_table_lock(as, false); 1474 1475 /* Insert the new mapping */ 1476 page_mapping_insert(as, ptr + P2SZ(size), 1477 old_frame[frame_idx++], page_flags); 1478 1479 page_table_unlock(as, false); 1363 1480 } 1481 1482 ival = used_space_next(ival); 1364 1483 } 1365 1484 … … 1579 1698 * 1580 1699 */ 1581 NO_TRACE unsigned int as_area_get_flags(as_area_t *area)1700 _NO_TRACE unsigned int as_area_get_flags(as_area_t *area) 1582 1701 { 1583 1702 assert(mutex_locked(&area->lock)); … … 1627 1746 * 1628 1747 */ 1629 NO_TRACE pte_t *page_table_create(unsigned int flags)1748 _NO_TRACE pte_t *page_table_create(unsigned int flags) 1630 1749 { 1631 1750 assert(as_operations); … … 1642 1761 * 1643 1762 */ 1644 NO_TRACE void page_table_destroy(pte_t *page_table)1763 _NO_TRACE void page_table_destroy(pte_t *page_table) 1645 1764 { 1646 1765 assert(as_operations); … … 1663 1782 * 1664 1783 */ 1665 NO_TRACE void page_table_lock(as_t *as, bool lock)1784 _NO_TRACE void page_table_lock(as_t *as, bool lock) 1666 1785 { 1667 1786 assert(as_operations); … … 1677 1796 * 1678 1797 */ 1679 NO_TRACE void page_table_unlock(as_t *as, bool unlock)1798 _NO_TRACE void page_table_unlock(as_t *as, bool unlock) 1680 1799 { 1681 1800 assert(as_operations); … … 1692 1811 * are locked, otherwise false. 1693 1812 */ 1694 NO_TRACE bool page_table_locked(as_t *as)1813 _NO_TRACE bool page_table_locked(as_t *as) 1695 1814 { 1696 1815 assert(as_operations); … … 1725 1844 } 1726 1845 1846 /** Initialize used space map. 1847 * 1848 * @param used_space Used space map 1849 */ 1850 static void used_space_initialize(used_space_t *used_space) 1851 { 1852 odict_initialize(&used_space->ivals, used_space_getkey, used_space_cmp); 1853 used_space->pages = 0; 1854 } 1855 1856 /** Finalize used space map. 1857 * 1858 * @param used_space Used space map 1859 */ 1860 static void used_space_finalize(used_space_t *used_space) 1861 { 1862 assert(odict_empty(&used_space->ivals)); 1863 odict_finalize(&used_space->ivals); 1864 } 1865 1866 /** Get first interval of used space. 1867 * 1868 * @param used_space Used space map 1869 * @return First interval or @c NULL if there are none 1870 */ 1871 used_space_ival_t *used_space_first(used_space_t *used_space) 1872 { 1873 odlink_t *odlink = odict_first(&used_space->ivals); 1874 1875 if (odlink == NULL) 1876 return NULL; 1877 1878 return odict_get_instance(odlink, used_space_ival_t, lused_space); 1879 } 1880 1881 /** Get next interval of used space. 1882 * 1883 * @param cur Current interval 1884 * @return Next interval or @c NULL if there are none 1885 */ 1886 used_space_ival_t *used_space_next(used_space_ival_t *cur) 1887 { 1888 odlink_t *odlink = odict_next(&cur->lused_space, 1889 &cur->used_space->ivals); 1890 1891 if (odlink == NULL) 1892 return NULL; 1893 1894 return odict_get_instance(odlink, used_space_ival_t, lused_space); 1895 } 1896 1897 /** Get last interval of used space. 1898 * 1899 * @param used_space Used space map 1900 * @return First interval or @c NULL if there are none 1901 */ 1902 static used_space_ival_t *used_space_last(used_space_t *used_space) 1903 { 1904 odlink_t *odlink = odict_last(&used_space->ivals); 1905 1906 if (odlink == NULL) 1907 return NULL; 1908 1909 return odict_get_instance(odlink, used_space_ival_t, lused_space); 1910 } 1911 1912 /** Find the first interval that contains addresses greater than or equal to 1913 * @a ptr. 1914 * 1915 * @param used_space Used space map 1916 * @param ptr Virtual address 1917 * 1918 * @return Used space interval or @c NULL if none matches 1919 */ 1920 used_space_ival_t *used_space_find_gteq(used_space_t *used_space, uintptr_t ptr) 1921 { 1922 odlink_t *odlink; 1923 used_space_ival_t *ival; 1924 1925 /* Find last interval to start at address less than @a ptr */ 1926 odlink = odict_find_lt(&used_space->ivals, &ptr, NULL); 1927 if (odlink != NULL) { 1928 ival = odict_get_instance(odlink, used_space_ival_t, 1929 lused_space); 1930 1931 /* If the interval extends above @a ptr, return it */ 1932 if (ival->page + P2SZ(ival->count) > ptr) 1933 return ival; 1934 1935 /* 1936 * Otherwise, if a next interval exists, it must match 1937 * the criteria. 1938 */ 1939 odlink = odict_next(&ival->lused_space, &used_space->ivals); 1940 } else { 1941 /* 1942 * No interval with lower base address, so if there is any 1943 * interval at all, it must match the criteria 1944 */ 1945 odlink = odict_first(&used_space->ivals); 1946 } 1947 1948 if (odlink != NULL) { 1949 ival = odict_get_instance(odlink, used_space_ival_t, 1950 lused_space); 1951 return ival; 1952 } 1953 1954 return NULL; 1955 } 1956 1957 /** Get key function for used space ordered dictionary. 1958 * 1959 * The key is the virtual address of the first page 1960 * 1961 * @param odlink Ordered dictionary link (used_space_ival_t.lused_space) 1962 * @return Pointer to virtual address of first page cast as @c void *. 1963 */ 1964 static void *used_space_getkey(odlink_t *odlink) 1965 { 1966 used_space_ival_t *ival = odict_get_instance(odlink, used_space_ival_t, 1967 lused_space); 1968 return (void *) &ival->page; 1969 } 1970 1971 /** Compare function for used space ordered dictionary. 1972 * 1973 * @param a Pointer to virtual address of first page cast as @c void * 1974 * @param b Pointer to virtual address of first page cast as @c void * 1975 * @return Less than zero, zero, greater than zero if virtual address @a a 1976 * is less than, equal to, greater than virtual address b, respectively. 1977 */ 1978 static int used_space_cmp(void *a, void *b) 1979 { 1980 uintptr_t va = *(uintptr_t *) a; 1981 uintptr_t vb = *(uintptr_t *) b; 1982 1983 if (va < vb) 1984 return -1; 1985 else if (va == vb) 1986 return 0; 1987 else 1988 return +1; 1989 } 1990 1991 /** Remove used space interval. 1992 * 1993 * @param ival Used space interval 1994 */ 1995 static void used_space_remove_ival(used_space_ival_t *ival) 1996 { 1997 ival->used_space->pages -= ival->count; 1998 odict_remove(&ival->lused_space); 1999 slab_free(used_space_ival_cache, ival); 2000 } 2001 2002 /** Shorten used space interval. 2003 * 2004 * @param ival Used space interval 2005 * @param count New number of pages in the interval 2006 */ 2007 static void used_space_shorten_ival(used_space_ival_t *ival, size_t count) 2008 { 2009 assert(count > 0); 2010 assert(count < ival->count); 2011 2012 ival->used_space->pages -= ival->count - count; 2013 ival->count = count; 2014 } 2015 1727 2016 /** Mark portion of address space area as used. 1728 2017 * 1729 2018 * The address space area must be already locked. 1730 2019 * 1731 * @param area Address space area.1732 * @param page First page to be marked.2020 * @param used_space Used space map 2021 * @param page First page to be marked. 1733 2022 * @param count Number of page to be marked. 1734 2023 * … … 1736 2025 * 1737 2026 */ 1738 bool used_space_insert(as_area_t *area, uintptr_t page, size_t count) 1739 { 1740 assert(mutex_locked(&area->lock)); 2027 bool used_space_insert(used_space_t *used_space, uintptr_t page, size_t count) 2028 { 2029 used_space_ival_t *a; 2030 used_space_ival_t *b; 2031 bool adj_a; 2032 bool adj_b; 2033 odlink_t *odlink; 2034 used_space_ival_t *ival; 2035 1741 2036 assert(IS_ALIGNED(page, PAGE_SIZE)); 1742 2037 assert(count); 1743 2038 1744 btree_node_t *leaf = NULL; 1745 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1746 if (pages) { 1747 /* 1748 * We hit the beginning of some used space. 1749 */ 2039 /* Interval to the left */ 2040 odlink = odict_find_lt(&used_space->ivals, &page, NULL); 2041 a = (odlink != NULL) ? 2042 odict_get_instance(odlink, used_space_ival_t, lused_space) : 2043 NULL; 2044 2045 /* Interval to the right */ 2046 b = (a != NULL) ? used_space_next(a) : 2047 used_space_first(used_space); 2048 2049 /* Check for conflict with left interval */ 2050 if (a != NULL && overlaps(a->page, P2SZ(a->count), page, P2SZ(count))) 1750 2051 return false; 1751 } 1752 1753 assert(leaf != NULL); 1754 1755 if (!leaf->keys) { 1756 btree_insert(&area->used_space, page, (void *) count, leaf); 1757 goto success; 1758 } 1759 1760 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf); 1761 if (node) { 1762 uintptr_t left_pg = node->key[node->keys - 1]; 1763 uintptr_t right_pg = leaf->key[0]; 1764 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1765 size_t right_cnt = (size_t) leaf->value[0]; 1766 1767 /* 1768 * Examine the possibility that the interval fits 1769 * somewhere between the rightmost interval of 1770 * the left neigbour and the first interval of the leaf. 1771 */ 1772 1773 if (page >= right_pg) { 1774 /* Do nothing. */ 1775 } else if (overlaps(page, P2SZ(count), left_pg, 1776 P2SZ(left_cnt))) { 1777 /* The interval intersects with the left interval. */ 1778 return false; 1779 } else if (overlaps(page, P2SZ(count), right_pg, 1780 P2SZ(right_cnt))) { 1781 /* The interval intersects with the right interval. */ 1782 return false; 1783 } else if ((page == left_pg + P2SZ(left_cnt)) && 1784 (page + P2SZ(count) == right_pg)) { 1785 /* 1786 * The interval can be added by merging the two already 1787 * present intervals. 1788 */ 1789 node->value[node->keys - 1] += count + right_cnt; 1790 btree_remove(&area->used_space, right_pg, leaf); 1791 goto success; 1792 } else if (page == left_pg + P2SZ(left_cnt)) { 1793 /* 1794 * The interval can be added by simply growing the left 1795 * interval. 1796 */ 1797 node->value[node->keys - 1] += count; 1798 goto success; 1799 } else if (page + P2SZ(count) == right_pg) { 1800 /* 1801 * The interval can be addded by simply moving base of 1802 * the right interval down and increasing its size 1803 * accordingly. 1804 */ 1805 leaf->value[0] += count; 1806 leaf->key[0] = page; 1807 goto success; 1808 } else { 1809 /* 1810 * The interval is between both neigbouring intervals, 1811 * but cannot be merged with any of them. 1812 */ 1813 btree_insert(&area->used_space, page, (void *) count, 1814 leaf); 1815 goto success; 1816 } 1817 } else if (page < leaf->key[0]) { 1818 uintptr_t right_pg = leaf->key[0]; 1819 size_t right_cnt = (size_t) leaf->value[0]; 1820 1821 /* 1822 * Investigate the border case in which the left neighbour does 1823 * not exist but the interval fits from the left. 1824 */ 1825 1826 if (overlaps(page, P2SZ(count), right_pg, P2SZ(right_cnt))) { 1827 /* The interval intersects with the right interval. */ 1828 return false; 1829 } else if (page + P2SZ(count) == right_pg) { 1830 /* 1831 * The interval can be added by moving the base of the 1832 * right interval down and increasing its size 1833 * accordingly. 1834 */ 1835 leaf->key[0] = page; 1836 leaf->value[0] += count; 1837 goto success; 1838 } else { 1839 /* 1840 * The interval doesn't adjoin with the right interval. 1841 * It must be added individually. 1842 */ 1843 btree_insert(&area->used_space, page, (void *) count, 1844 leaf); 1845 goto success; 1846 } 1847 } 1848 1849 node = btree_leaf_node_right_neighbour(&area->used_space, leaf); 1850 if (node) { 1851 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1852 uintptr_t right_pg = node->key[0]; 1853 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1854 size_t right_cnt = (size_t) node->value[0]; 1855 1856 /* 1857 * Examine the possibility that the interval fits 1858 * somewhere between the leftmost interval of 1859 * the right neigbour and the last interval of the leaf. 1860 */ 1861 1862 if (page < left_pg) { 1863 /* Do nothing. */ 1864 } else if (overlaps(page, P2SZ(count), left_pg, 1865 P2SZ(left_cnt))) { 1866 /* The interval intersects with the left interval. */ 1867 return false; 1868 } else if (overlaps(page, P2SZ(count), right_pg, 1869 P2SZ(right_cnt))) { 1870 /* The interval intersects with the right interval. */ 1871 return false; 1872 } else if ((page == left_pg + P2SZ(left_cnt)) && 1873 (page + P2SZ(count) == right_pg)) { 1874 /* 1875 * The interval can be added by merging the two already 1876 * present intervals. 1877 */ 1878 leaf->value[leaf->keys - 1] += count + right_cnt; 1879 btree_remove(&area->used_space, right_pg, node); 1880 goto success; 1881 } else if (page == left_pg + P2SZ(left_cnt)) { 1882 /* 1883 * The interval can be added by simply growing the left 1884 * interval. 1885 */ 1886 leaf->value[leaf->keys - 1] += count; 1887 goto success; 1888 } else if (page + P2SZ(count) == right_pg) { 1889 /* 1890 * The interval can be addded by simply moving base of 1891 * the right interval down and increasing its size 1892 * accordingly. 1893 */ 1894 node->value[0] += count; 1895 node->key[0] = page; 1896 goto success; 1897 } else { 1898 /* 1899 * The interval is between both neigbouring intervals, 1900 * but cannot be merged with any of them. 1901 */ 1902 btree_insert(&area->used_space, page, (void *) count, 1903 leaf); 1904 goto success; 1905 } 1906 } else if (page >= leaf->key[leaf->keys - 1]) { 1907 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1908 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1909 1910 /* 1911 * Investigate the border case in which the right neighbour 1912 * does not exist but the interval fits from the right. 1913 */ 1914 1915 if (overlaps(page, P2SZ(count), left_pg, P2SZ(left_cnt))) { 1916 /* The interval intersects with the left interval. */ 1917 return false; 1918 } else if (left_pg + P2SZ(left_cnt) == page) { 1919 /* 1920 * The interval can be added by growing the left 1921 * interval. 1922 */ 1923 leaf->value[leaf->keys - 1] += count; 1924 goto success; 1925 } else { 1926 /* 1927 * The interval doesn't adjoin with the left interval. 1928 * It must be added individually. 1929 */ 1930 btree_insert(&area->used_space, page, (void *) count, 1931 leaf); 1932 goto success; 1933 } 1934 } 1935 1936 /* 1937 * Note that if the algorithm made it thus far, the interval can fit 1938 * only between two other intervals of the leaf. The two border cases 1939 * were already resolved. 1940 */ 1941 btree_key_t i; 1942 for (i = 1; i < leaf->keys; i++) { 1943 if (page < leaf->key[i]) { 1944 uintptr_t left_pg = leaf->key[i - 1]; 1945 uintptr_t right_pg = leaf->key[i]; 1946 size_t left_cnt = (size_t) leaf->value[i - 1]; 1947 size_t right_cnt = (size_t) leaf->value[i]; 1948 1949 /* 1950 * The interval fits between left_pg and right_pg. 1951 */ 1952 1953 if (overlaps(page, P2SZ(count), left_pg, 1954 P2SZ(left_cnt))) { 1955 /* 1956 * The interval intersects with the left 1957 * interval. 1958 */ 1959 return false; 1960 } else if (overlaps(page, P2SZ(count), right_pg, 1961 P2SZ(right_cnt))) { 1962 /* 1963 * The interval intersects with the right 1964 * interval. 1965 */ 1966 return false; 1967 } else if ((page == left_pg + P2SZ(left_cnt)) && 1968 (page + P2SZ(count) == right_pg)) { 1969 /* 1970 * The interval can be added by merging the two 1971 * already present intervals. 1972 */ 1973 leaf->value[i - 1] += count + right_cnt; 1974 btree_remove(&area->used_space, right_pg, leaf); 1975 goto success; 1976 } else if (page == left_pg + P2SZ(left_cnt)) { 1977 /* 1978 * The interval can be added by simply growing 1979 * the left interval. 1980 */ 1981 leaf->value[i - 1] += count; 1982 goto success; 1983 } else if (page + P2SZ(count) == right_pg) { 1984 /* 1985 * The interval can be addded by simply moving 1986 * base of the right interval down and 1987 * increasing its size accordingly. 1988 */ 1989 leaf->value[i] += count; 1990 leaf->key[i] = page; 1991 goto success; 1992 } else { 1993 /* 1994 * The interval is between both neigbouring 1995 * intervals, but cannot be merged with any of 1996 * them. 1997 */ 1998 btree_insert(&area->used_space, page, 1999 (void *) count, leaf); 2000 goto success; 2001 } 2002 } 2003 } 2004 2005 panic("Inconsistency detected while adding %zu pages of used " 2006 "space at %p.", count, (void *) page); 2007 2008 success: 2009 area->resident += count; 2010 return true; 2011 } 2012 2013 /** Mark portion of address space area as unused. 2014 * 2015 * The address space area must be already locked. 2016 * 2017 * @param area Address space area. 2018 * @param page First page to be marked. 2019 * @param count Number of page to be marked. 2020 * 2021 * @return False on failure or true on success. 2022 * 2023 */ 2024 bool used_space_remove(as_area_t *area, uintptr_t page, size_t count) 2025 { 2026 assert(mutex_locked(&area->lock)); 2027 assert(IS_ALIGNED(page, PAGE_SIZE)); 2028 assert(count); 2029 2030 btree_node_t *leaf; 2031 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 2032 if (pages) { 2033 /* 2034 * We are lucky, page is the beginning of some interval. 2035 */ 2036 if (count > pages) { 2037 return false; 2038 } else if (count == pages) { 2039 btree_remove(&area->used_space, page, leaf); 2040 goto success; 2041 } else { 2042 /* 2043 * Find the respective interval. 2044 * Decrease its size and relocate its start address. 2045 */ 2046 btree_key_t i; 2047 for (i = 0; i < leaf->keys; i++) { 2048 if (leaf->key[i] == page) { 2049 leaf->key[i] += P2SZ(count); 2050 leaf->value[i] -= count; 2051 goto success; 2052 } 2053 } 2054 2055 goto error; 2056 } 2057 } 2058 2059 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, 2060 leaf); 2061 if ((node) && (page < leaf->key[0])) { 2062 uintptr_t left_pg = node->key[node->keys - 1]; 2063 size_t left_cnt = (size_t) node->value[node->keys - 1]; 2064 2065 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) { 2066 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) { 2067 /* 2068 * The interval is contained in the rightmost 2069 * interval of the left neighbour and can be 2070 * removed by updating the size of the bigger 2071 * interval. 2072 */ 2073 node->value[node->keys - 1] -= count; 2074 goto success; 2075 } else if (page + P2SZ(count) < 2076 left_pg + P2SZ(left_cnt)) { 2077 size_t new_cnt; 2078 2079 /* 2080 * The interval is contained in the rightmost 2081 * interval of the left neighbour but its 2082 * removal requires both updating the size of 2083 * the original interval and also inserting a 2084 * new interval. 2085 */ 2086 new_cnt = ((left_pg + P2SZ(left_cnt)) - 2087 (page + P2SZ(count))) >> PAGE_WIDTH; 2088 node->value[node->keys - 1] -= count + new_cnt; 2089 btree_insert(&area->used_space, page + 2090 P2SZ(count), (void *) new_cnt, leaf); 2091 goto success; 2092 } 2093 } 2094 2052 2053 /* Check for conflict with right interval */ 2054 if (b != NULL && overlaps(page, P2SZ(count), b->page, P2SZ(b->count))) 2095 2055 return false; 2096 } else if (page < leaf->key[0]) 2097 return false; 2098 2099 if (page > leaf->key[leaf->keys - 1]) { 2100 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 2101 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 2102 2103 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) { 2104 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) { 2105 /* 2106 * The interval is contained in the rightmost 2107 * interval of the leaf and can be removed by 2108 * updating the size of the bigger interval. 2109 */ 2110 leaf->value[leaf->keys - 1] -= count; 2111 goto success; 2112 } else if (page + P2SZ(count) < left_pg + 2113 P2SZ(left_cnt)) { 2114 size_t new_cnt; 2115 2116 /* 2117 * The interval is contained in the rightmost 2118 * interval of the leaf but its removal 2119 * requires both updating the size of the 2120 * original interval and also inserting a new 2121 * interval. 2122 */ 2123 new_cnt = ((left_pg + P2SZ(left_cnt)) - 2124 (page + P2SZ(count))) >> PAGE_WIDTH; 2125 leaf->value[leaf->keys - 1] -= count + new_cnt; 2126 btree_insert(&area->used_space, page + 2127 P2SZ(count), (void *) new_cnt, leaf); 2128 goto success; 2129 } 2130 } 2131 2132 return false; 2133 } 2134 2135 /* 2136 * The border cases have been already resolved. 2137 * Now the interval can be only between intervals of the leaf. 2138 */ 2139 btree_key_t i; 2140 for (i = 1; i < leaf->keys - 1; i++) { 2141 if (page < leaf->key[i]) { 2142 uintptr_t left_pg = leaf->key[i - 1]; 2143 size_t left_cnt = (size_t) leaf->value[i - 1]; 2144 2145 /* 2146 * Now the interval is between intervals corresponding 2147 * to (i - 1) and i. 2148 */ 2149 if (overlaps(left_pg, P2SZ(left_cnt), page, 2150 P2SZ(count))) { 2151 if (page + P2SZ(count) == 2152 left_pg + P2SZ(left_cnt)) { 2153 /* 2154 * The interval is contained in the 2155 * interval (i - 1) of the leaf and can 2156 * be removed by updating the size of 2157 * the bigger interval. 2158 */ 2159 leaf->value[i - 1] -= count; 2160 goto success; 2161 } else if (page + P2SZ(count) < 2162 left_pg + P2SZ(left_cnt)) { 2163 size_t new_cnt; 2164 2165 /* 2166 * The interval is contained in the 2167 * interval (i - 1) of the leaf but its 2168 * removal requires both updating the 2169 * size of the original interval and 2170 * also inserting a new interval. 2171 */ 2172 new_cnt = ((left_pg + P2SZ(left_cnt)) - 2173 (page + P2SZ(count))) >> 2174 PAGE_WIDTH; 2175 leaf->value[i - 1] -= count + new_cnt; 2176 btree_insert(&area->used_space, page + 2177 P2SZ(count), (void *) new_cnt, 2178 leaf); 2179 goto success; 2180 } 2181 } 2182 2183 return false; 2184 } 2185 } 2186 2187 error: 2188 panic("Inconsistency detected while removing %zu pages of used " 2189 "space from %p.", count, (void *) page); 2190 2191 success: 2192 area->resident -= count; 2056 2057 /* Check if A is adjacent to the new interval */ 2058 adj_a = (a != NULL) && (a->page + P2SZ(a->count) == page); 2059 /* Check if the new interval is adjacent to B*/ 2060 adj_b = (b != NULL) && page + P2SZ(count) == b->page; 2061 2062 if (adj_a && adj_b) { 2063 /* Fuse into a single interval */ 2064 a->count += count + b->count; 2065 used_space_remove_ival(b); 2066 } else if (adj_a) { 2067 /* Append to A */ 2068 a->count += count; 2069 } else if (adj_b) { 2070 /* Prepend to B */ 2071 b->page = page; 2072 b->count += count; 2073 } else { 2074 /* Create new interval */ 2075 ival = slab_alloc(used_space_ival_cache, 0); 2076 ival->used_space = used_space; 2077 odlink_initialize(&ival->lused_space); 2078 ival->page = page; 2079 ival->count = count; 2080 2081 odict_insert(&ival->lused_space, &used_space->ivals, 2082 NULL); 2083 } 2084 2085 used_space->pages += count; 2193 2086 return true; 2194 2087 } … … 2257 2150 } 2258 2151 2259 /** Get list of ad ress space areas.2152 /** Get list of address space areas. 2260 2153 * 2261 2154 * @param as Address space. -
kernel/generic/src/mm/backend_anon.c
r7f7817a9 rab936440 48 48 #include <synch/mutex.h> 49 49 #include <adt/list.h> 50 #include <adt/btree.h>51 50 #include <errno.h> 52 51 #include <typedefs.h> … … 122 121 */ 123 122 mutex_lock(&area->sh_info->lock); 124 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 125 node) { 126 unsigned int i; 127 128 for (i = 0; i < node->keys; i++) { 129 uintptr_t base = node->key[i]; 130 size_t count = (size_t) node->value[i]; 131 unsigned int j; 132 133 for (j = 0; j < count; j++) { 134 pte_t pte; 135 bool found; 136 137 page_table_lock(area->as, false); 138 found = page_mapping_find(area->as, 139 base + P2SZ(j), false, &pte); 140 141 (void)found; 142 assert(found); 143 assert(PTE_VALID(&pte)); 144 assert(PTE_PRESENT(&pte)); 145 146 btree_insert(&area->sh_info->pagemap, 147 (base + P2SZ(j)) - area->base, 148 (void *) PTE_GET_FRAME(&pte), NULL); 149 page_table_unlock(area->as, false); 150 151 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(&pte)); 152 frame_reference_add(pfn); 153 } 154 123 used_space_ival_t *ival = used_space_first(&area->used_space); 124 while (ival != NULL) { 125 uintptr_t base = ival->page; 126 size_t count = ival->count; 127 unsigned int j; 128 129 for (j = 0; j < count; j++) { 130 pte_t pte; 131 bool found; 132 133 page_table_lock(area->as, false); 134 found = page_mapping_find(area->as, base + P2SZ(j), 135 false, &pte); 136 137 (void)found; 138 assert(found); 139 assert(PTE_VALID(&pte)); 140 assert(PTE_PRESENT(&pte)); 141 142 as_pagemap_insert(&area->sh_info->pagemap, 143 (base + P2SZ(j)) - area->base, PTE_GET_FRAME(&pte)); 144 page_table_unlock(area->as, false); 145 146 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(&pte)); 147 frame_reference_add(pfn); 155 148 } 149 150 ival = used_space_next(ival); 156 151 } 157 152 mutex_unlock(&area->sh_info->lock); … … 201 196 mutex_lock(&area->sh_info->lock); 202 197 if (area->sh_info->shared) { 203 btree_node_t *leaf;204 205 198 /* 206 199 * The area is shared, chances are that the mapping can be found … … 210 203 * mapping, a new frame is allocated and the mapping is created. 211 204 */ 212 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 213 upage - area->base, &leaf); 214 if (!frame) { 215 bool allocate = true; 216 unsigned int i; 205 errno_t rc = as_pagemap_find(&area->sh_info->pagemap, 206 upage - area->base, &frame); 207 if (rc != EOK) { 208 /* Need to allocate the frame */ 209 kpage = km_temporary_page_get(&frame, 210 FRAME_NO_RESERVE); 211 memsetb((void *) kpage, PAGE_SIZE, 0); 212 km_temporary_page_put(kpage); 217 213 218 214 /* 219 * Zero can be returned as a valid frame address.220 * Just a small workaround.215 * Insert the address of the newly allocated 216 * frame to the pagemap. 221 217 */ 222 for (i = 0; i < leaf->keys; i++) { 223 if (leaf->key[i] == upage - area->base) { 224 allocate = false; 225 break; 226 } 227 } 228 if (allocate) { 229 kpage = km_temporary_page_get(&frame, 230 FRAME_NO_RESERVE); 231 memsetb((void *) kpage, PAGE_SIZE, 0); 232 km_temporary_page_put(kpage); 233 234 /* 235 * Insert the address of the newly allocated 236 * frame to the pagemap. 237 */ 238 btree_insert(&area->sh_info->pagemap, 239 upage - area->base, (void *) frame, leaf); 240 } 218 as_pagemap_insert(&area->sh_info->pagemap, 219 upage - area->base, frame); 241 220 } 242 221 frame_reference_add(ADDR2PFN(frame)); … … 280 259 */ 281 260 page_mapping_insert(AS, upage, frame, as_area_get_flags(area)); 282 if (!used_space_insert( area, upage, 1))261 if (!used_space_insert(&area->used_space, upage, 1)) 283 262 panic("Cannot insert used space."); 284 263 -
kernel/generic/src/mm/backend_elf.c
r7f7817a9 rab936440 153 153 { 154 154 elf_segment_header_t *entry = area->backend_data.segment; 155 link_t *cur;156 btree_node_t *leaf, *node;155 used_space_ival_t *start; 156 used_space_ival_t *cur; 157 157 uintptr_t start_anon = entry->p_vaddr + entry->p_filesz; 158 158 … … 164 164 */ 165 165 if (area->flags & AS_AREA_WRITE) { 166 node = list_get_instance(list_first(&area->used_space.leaf_list), 167 btree_node_t, leaf_link); 166 start = used_space_first(&area->used_space); 168 167 } else { 169 (void) btree_search(&area->used_space, start_anon, &leaf); 170 node = btree_leaf_node_left_neighbour(&area->used_space, leaf); 171 if (!node) 172 node = leaf; 168 /* Find first interval containing addresses >= start_anon */ 169 start = used_space_find_gteq(&area->used_space, start_anon); 173 170 } 174 171 … … 177 174 */ 178 175 mutex_lock(&area->sh_info->lock); 179 for (cur = &node->leaf_link; cur != &area->used_space.leaf_list.head; 180 cur = cur->next) { 176 cur = start; 177 while (cur != NULL) { 178 uintptr_t base = cur->page; 179 size_t count = cur->count; 181 180 unsigned int i; 182 181 183 node = list_get_instance(cur, btree_node_t, leaf_link); 184 185 for (i = 0; i < node->keys; i++) { 186 uintptr_t base = node->key[i]; 187 size_t count = (size_t) node->value[i]; 188 unsigned int j; 182 /* 183 * Skip read-only areas of used space that are backed 184 * by the ELF image. 185 */ 186 if (!(area->flags & AS_AREA_WRITE)) 187 if (base >= entry->p_vaddr && 188 base + P2SZ(count) <= start_anon) 189 continue; 190 191 for (i = 0; i < count; i++) { 192 pte_t pte; 193 bool found; 189 194 190 195 /* 191 * Skip read-only areas of used space that are backed192 * by theELF image.196 * Skip read-only pages that are backed by the 197 * ELF image. 193 198 */ 194 199 if (!(area->flags & AS_AREA_WRITE)) 195 200 if (base >= entry->p_vaddr && 196 base + P2SZ( count) <= start_anon)201 base + P2SZ(i + 1) <= start_anon) 197 202 continue; 198 203 199 for (j = 0; j < count; j++) { 200 pte_t pte; 201 bool found; 202 203 /* 204 * Skip read-only pages that are backed by the 205 * ELF image. 206 */ 207 if (!(area->flags & AS_AREA_WRITE)) 208 if (base >= entry->p_vaddr && 209 base + P2SZ(j + 1) <= start_anon) 210 continue; 211 212 page_table_lock(area->as, false); 213 found = page_mapping_find(area->as, 214 base + P2SZ(j), false, &pte); 215 216 (void) found; 217 assert(found); 218 assert(PTE_VALID(&pte)); 219 assert(PTE_PRESENT(&pte)); 220 221 btree_insert(&area->sh_info->pagemap, 222 (base + P2SZ(j)) - area->base, 223 (void *) PTE_GET_FRAME(&pte), NULL); 224 page_table_unlock(area->as, false); 225 226 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(&pte)); 227 frame_reference_add(pfn); 228 } 229 204 page_table_lock(area->as, false); 205 found = page_mapping_find(area->as, 206 base + P2SZ(i), false, &pte); 207 208 (void) found; 209 assert(found); 210 assert(PTE_VALID(&pte)); 211 assert(PTE_PRESENT(&pte)); 212 213 as_pagemap_insert(&area->sh_info->pagemap, 214 (base + P2SZ(i)) - area->base, 215 PTE_GET_FRAME(&pte)); 216 page_table_unlock(area->as, false); 217 218 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(&pte)); 219 frame_reference_add(pfn); 230 220 } 231 } 221 222 cur = used_space_next(cur); 223 } 224 232 225 mutex_unlock(&area->sh_info->lock); 233 226 } … … 267 260 elf_header_t *elf = area->backend_data.elf; 268 261 elf_segment_header_t *entry = area->backend_data.segment; 269 btree_node_t *leaf;270 262 uintptr_t base; 271 263 uintptr_t frame; … … 301 293 mutex_lock(&area->sh_info->lock); 302 294 if (area->sh_info->shared) { 303 bool found = false;304 305 295 /* 306 296 * The address space area is shared. 307 297 */ 308 298 309 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 310 upage - area->base, &leaf); 311 if (!frame) { 312 unsigned int i; 313 314 /* 315 * Workaround for valid NULL address. 316 */ 317 318 for (i = 0; i < leaf->keys; i++) { 319 if (leaf->key[i] == upage - area->base) { 320 found = true; 321 break; 322 } 323 } 324 } 325 if (frame || found) { 299 errno_t rc = as_pagemap_find(&area->sh_info->pagemap, 300 upage - area->base, &frame); 301 if (rc == EOK) { 326 302 frame_reference_add(ADDR2PFN(frame)); 327 303 page_mapping_insert(AS, upage, frame, 328 304 as_area_get_flags(area)); 329 if (!used_space_insert( area, upage, 1))305 if (!used_space_insert(&area->used_space, upage, 1)) 330 306 panic("Cannot insert used space."); 331 307 mutex_unlock(&area->sh_info->lock); … … 415 391 if (dirty && area->sh_info->shared) { 416 392 frame_reference_add(ADDR2PFN(frame)); 417 btree_insert(&area->sh_info->pagemap, upage - area->base,418 (void *) frame, leaf);393 as_pagemap_insert(&area->sh_info->pagemap, upage - area->base, 394 frame); 419 395 } 420 396 … … 422 398 423 399 page_mapping_insert(AS, upage, frame, as_area_get_flags(area)); 424 if (!used_space_insert( area, upage, 1))400 if (!used_space_insert(&area->used_space, upage, 1)) 425 401 panic("Cannot insert used space."); 426 402 -
kernel/generic/src/mm/backend_phys.c
r7f7817a9 rab936440 143 143 as_area_get_flags(area)); 144 144 145 if (!used_space_insert( area, upage, 1))145 if (!used_space_insert(&area->used_space, upage, 1)) 146 146 panic("Cannot insert used space."); 147 147 -
kernel/generic/src/mm/backend_user.c
r7f7817a9 rab936440 147 147 uintptr_t frame = IPC_GET_ARG1(data); 148 148 page_mapping_insert(AS, upage, frame, as_area_get_flags(area)); 149 if (!used_space_insert( area, upage, 1))149 if (!used_space_insert(&area->used_space, upage, 1)) 150 150 panic("Cannot insert used space."); 151 151 -
kernel/generic/src/mm/frame.c
r7f7817a9 rab936440 79 79 * 80 80 */ 81 NO_TRACE static void frame_initialize(frame_t *frame)81 _NO_TRACE static void frame_initialize(frame_t *frame) 82 82 { 83 83 frame->refcount = 0; … … 100 100 * 101 101 */ 102 NO_TRACE static size_t zones_insert_zone(pfn_t base, size_t count,102 _NO_TRACE static size_t zones_insert_zone(pfn_t base, size_t count, 103 103 zone_flags_t flags) 104 104 { … … 156 156 * 157 157 */ 158 NO_TRACE static size_t frame_total_free_get_internal(void)158 _NO_TRACE static size_t frame_total_free_get_internal(void) 159 159 { 160 160 size_t total = 0; … … 167 167 } 168 168 169 NO_TRACE size_t frame_total_free_get(void)169 _NO_TRACE size_t frame_total_free_get(void) 170 170 { 171 171 size_t total; … … 190 190 * 191 191 */ 192 NO_TRACE size_t find_zone(pfn_t frame, size_t count, size_t hint)192 _NO_TRACE size_t find_zone(pfn_t frame, size_t count, size_t hint) 193 193 { 194 194 if (hint >= zones.count) … … 211 211 212 212 /** @return True if zone can allocate specified number of frames */ 213 NO_TRACE static bool zone_can_alloc(zone_t *zone, size_t count,213 _NO_TRACE static bool zone_can_alloc(zone_t *zone, size_t count, 214 214 pfn_t constraint) 215 215 { … … 239 239 * 240 240 */ 241 NO_TRACE static size_t find_free_zone_all(size_t count, zone_flags_t flags,241 _NO_TRACE static size_t find_free_zone_all(size_t count, zone_flags_t flags, 242 242 pfn_t constraint, size_t hint) 243 243 { … … 265 265 * 266 266 */ 267 NO_TRACE static bool is_high_priority(pfn_t base, size_t count)267 _NO_TRACE static bool is_high_priority(pfn_t base, size_t count) 268 268 { 269 269 return (base + count <= FRAME_LOWPRIO); … … 285 285 * 286 286 */ 287 NO_TRACE static size_t find_free_zone_lowprio(size_t count, zone_flags_t flags,287 _NO_TRACE static size_t find_free_zone_lowprio(size_t count, zone_flags_t flags, 288 288 pfn_t constraint, size_t hint) 289 289 { … … 322 322 * 323 323 */ 324 NO_TRACE static size_t find_free_zone(size_t count, zone_flags_t flags,324 _NO_TRACE static size_t find_free_zone(size_t count, zone_flags_t flags, 325 325 pfn_t constraint, size_t hint) 326 326 { … … 346 346 347 347 /** Return frame from zone. */ 348 NO_TRACE static frame_t *zone_get_frame(zone_t *zone, size_t index)348 _NO_TRACE static frame_t *zone_get_frame(zone_t *zone, size_t index) 349 349 { 350 350 assert(index < zone->count); … … 366 366 * 367 367 */ 368 NO_TRACE static size_t zone_frame_alloc(zone_t *zone, size_t count,368 _NO_TRACE static size_t zone_frame_alloc(zone_t *zone, size_t count, 369 369 pfn_t constraint) 370 370 { … … 405 405 * 406 406 */ 407 NO_TRACE static size_t zone_frame_free(zone_t *zone, size_t index)407 _NO_TRACE static size_t zone_frame_free(zone_t *zone, size_t index) 408 408 { 409 409 assert(zone->flags & ZONE_AVAILABLE); … … 427 427 428 428 /** Mark frame in zone unavailable to allocation. */ 429 NO_TRACE static void zone_mark_unavailable(zone_t *zone, size_t index)429 _NO_TRACE static void zone_mark_unavailable(zone_t *zone, size_t index) 430 430 { 431 431 assert(zone->flags & ZONE_AVAILABLE); … … 453 453 * 454 454 */ 455 NO_TRACE static void zone_merge_internal(size_t z1, size_t z2, zone_t *old_z1,455 _NO_TRACE static void zone_merge_internal(size_t z1, size_t z2, zone_t *old_z1, 456 456 void *confdata) 457 457 { … … 507 507 * 508 508 */ 509 NO_TRACE static void return_config_frames(size_t znum, pfn_t pfn, size_t count)509 _NO_TRACE static void return_config_frames(size_t znum, pfn_t pfn, size_t count) 510 510 { 511 511 assert(zones.info[znum].flags & ZONE_AVAILABLE); … … 623 623 * 624 624 */ 625 NO_TRACE static void zone_construct(zone_t *zone, pfn_t start, size_t count,625 _NO_TRACE static void zone_construct(zone_t *zone, pfn_t start, size_t count, 626 626 zone_flags_t flags, void *confdata) 627 627 { … … 1037 1037 * 1038 1038 */ 1039 NO_TRACE void frame_reference_add(pfn_t pfn)1039 _NO_TRACE void frame_reference_add(pfn_t pfn) 1040 1040 { 1041 1041 irq_spinlock_lock(&zones.lock, true); … … 1056 1056 * 1057 1057 */ 1058 NO_TRACE void frame_mark_unavailable(pfn_t start, size_t count)1058 _NO_TRACE void frame_mark_unavailable(pfn_t start, size_t count) 1059 1059 { 1060 1060 irq_spinlock_lock(&zones.lock, true); -
kernel/generic/src/mm/page.c
r7f7817a9 rab936440 95 95 * 96 96 */ 97 NO_TRACE void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,97 _NO_TRACE void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, 98 98 unsigned int flags) 99 99 { … … 120 120 * 121 121 */ 122 NO_TRACE void page_mapping_remove(as_t *as, uintptr_t page)122 _NO_TRACE void page_mapping_remove(as_t *as, uintptr_t page) 123 123 { 124 124 assert(page_table_locked(as)); … … 144 144 * the PTE is not guaranteed to be present. 145 145 */ 146 NO_TRACE bool page_mapping_find(as_t *as, uintptr_t page, bool nolock,146 _NO_TRACE bool page_mapping_find(as_t *as, uintptr_t page, bool nolock, 147 147 pte_t *pte) 148 148 { … … 165 165 * @param pte New PTE. 166 166 */ 167 NO_TRACE void page_mapping_update(as_t *as, uintptr_t page, bool nolock,167 _NO_TRACE void page_mapping_update(as_t *as, uintptr_t page, bool nolock, 168 168 pte_t *pte) 169 169 { -
kernel/generic/src/mm/slab.c
r7f7817a9 rab936440 158 158 * 159 159 */ 160 NO_TRACE static slab_t *slab_space_alloc(slab_cache_t *cache,160 _NO_TRACE static slab_t *slab_space_alloc(slab_cache_t *cache, 161 161 unsigned int flags) 162 162 { … … 206 206 * 207 207 */ 208 NO_TRACE static size_t slab_space_free(slab_cache_t *cache, slab_t *slab)208 _NO_TRACE static size_t slab_space_free(slab_cache_t *cache, slab_t *slab) 209 209 { 210 210 frame_free(KA2PA(slab->start), slab->cache->frames); … … 218 218 219 219 /** Map object to slab structure */ 220 NO_TRACE static slab_t *obj2slab(void *obj)220 _NO_TRACE static slab_t *obj2slab(void *obj) 221 221 { 222 222 return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); … … 234 234 * 235 235 */ 236 NO_TRACE static size_t slab_obj_destroy(slab_cache_t *cache, void *obj,236 _NO_TRACE static size_t slab_obj_destroy(slab_cache_t *cache, void *obj, 237 237 slab_t *slab) 238 238 { … … 276 276 * 277 277 */ 278 NO_TRACE static void *slab_obj_create(slab_cache_t *cache, unsigned int flags)278 _NO_TRACE static void *slab_obj_create(slab_cache_t *cache, unsigned int flags) 279 279 { 280 280 irq_spinlock_lock(&cache->slablock, true); … … 332 332 * 333 333 */ 334 NO_TRACE static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache,334 _NO_TRACE static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, 335 335 bool first) 336 336 { … … 357 357 * 358 358 */ 359 NO_TRACE static void put_mag_to_cache(slab_cache_t *cache,359 _NO_TRACE static void put_mag_to_cache(slab_cache_t *cache, 360 360 slab_magazine_t *mag) 361 361 { … … 373 373 * 374 374 */ 375 NO_TRACE static size_t magazine_destroy(slab_cache_t *cache,375 _NO_TRACE static size_t magazine_destroy(slab_cache_t *cache, 376 376 slab_magazine_t *mag) 377 377 { … … 392 392 * 393 393 */ 394 NO_TRACE static slab_magazine_t *get_full_current_mag(slab_cache_t *cache)394 _NO_TRACE static slab_magazine_t *get_full_current_mag(slab_cache_t *cache) 395 395 { 396 396 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current; … … 429 429 * 430 430 */ 431 NO_TRACE static void *magazine_obj_get(slab_cache_t *cache)431 _NO_TRACE static void *magazine_obj_get(slab_cache_t *cache) 432 432 { 433 433 if (!CPU) … … 459 459 * 460 460 */ 461 NO_TRACE static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache)461 _NO_TRACE static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache) 462 462 { 463 463 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current; … … 509 509 * 510 510 */ 511 NO_TRACE static int magazine_obj_put(slab_cache_t *cache, void *obj)511 _NO_TRACE static int magazine_obj_put(slab_cache_t *cache, void *obj) 512 512 { 513 513 if (!CPU) … … 538 538 * 539 539 */ 540 NO_TRACE static size_t comp_objects(slab_cache_t *cache)540 _NO_TRACE static size_t comp_objects(slab_cache_t *cache) 541 541 { 542 542 if (cache->flags & SLAB_CACHE_SLINSIDE) … … 550 550 * 551 551 */ 552 NO_TRACE static size_t badness(slab_cache_t *cache)552 _NO_TRACE static size_t badness(slab_cache_t *cache) 553 553 { 554 554 size_t objects = comp_objects(cache); … … 564 564 * 565 565 */ 566 NO_TRACE static bool make_magcache(slab_cache_t *cache)566 _NO_TRACE static bool make_magcache(slab_cache_t *cache) 567 567 { 568 568 assert(_slab_initialized >= 2); … … 585 585 * 586 586 */ 587 NO_TRACE static void _slab_cache_create(slab_cache_t *cache, const char *name,587 _NO_TRACE static void _slab_cache_create(slab_cache_t *cache, const char *name, 588 588 size_t size, size_t align, errno_t (*constructor)(void *obj, 589 589 unsigned int kmflag), size_t (*destructor)(void *obj), unsigned int flags) … … 660 660 * 661 661 */ 662 NO_TRACE static size_t _slab_reclaim(slab_cache_t *cache, unsigned int flags)662 _NO_TRACE static size_t _slab_reclaim(slab_cache_t *cache, unsigned int flags) 663 663 { 664 664 if (cache->flags & SLAB_CACHE_NOMAGAZINE) … … 707 707 * 708 708 */ 709 NO_TRACE static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)709 _NO_TRACE static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) 710 710 { 711 711 if (!obj) -
kernel/generic/src/proc/current.c
r7f7817a9 rab936440 70 70 * 71 71 */ 72 NO_TRACE void current_copy(current_t *src, current_t *dst)72 _NO_TRACE void current_copy(current_t *src, current_t *dst) 73 73 { 74 74 assert(src->magic == MAGIC); -
kernel/generic/src/proc/task.c
r7f7817a9 rab936440 269 269 { 270 270 /* 271 * Remove the task from the task B+tree.271 * Remove the task from the task odict. 272 272 */ 273 273 irq_spinlock_lock(&tasks_lock, true); -
kernel/generic/src/proc/thread.c
r7f7817a9 rab936440 180 180 * covered by the kernel identity mapping, which guarantees not to 181 181 * nest TLB-misses infinitely (either via some hardware mechanism or 182 * by the construc iton of the assembly-language part of the TLB-miss182 * by the construction of the assembly-language part of the TLB-miss 183 183 * handler). 184 184 * 185 185 * This restriction can be lifted once each architecture provides 186 * a similar guarantee, for example by locking the kernel stack186 * a similar guarantee, for example, by locking the kernel stack 187 187 * in the TLB whenever it is allocated from the high-memory and the 188 188 * thread is being scheduled to run. -
kernel/generic/src/sysinfo/stats.c
r7f7817a9 rab936440 189 189 continue; 190 190 191 pages += area-> resident;191 pages += area->used_space.pages; 192 192 mutex_unlock(&area->lock); 193 193 area = as_area_next(area); -
kernel/generic/src/sysinfo/sysinfo.c
r7f7817a9 rab936440 62 62 * 63 63 */ 64 NO_TRACE static errno_t sysinfo_item_constructor(void *obj, unsigned int kmflag)64 _NO_TRACE static errno_t sysinfo_item_constructor(void *obj, unsigned int kmflag) 65 65 { 66 66 sysinfo_item_t *item = (sysinfo_item_t *) obj; … … 82 82 * 83 83 */ 84 NO_TRACE static size_t sysinfo_item_destructor(void *obj)84 _NO_TRACE static size_t sysinfo_item_destructor(void *obj) 85 85 { 86 86 sysinfo_item_t *item = (sysinfo_item_t *) obj; … … 124 124 * 125 125 */ 126 NO_TRACE static sysinfo_item_t *sysinfo_find_item(const char *name,126 _NO_TRACE static sysinfo_item_t *sysinfo_find_item(const char *name, 127 127 sysinfo_item_t *subtree, sysinfo_return_t **ret, bool dry_run) 128 128 { … … 190 190 * 191 191 */ 192 NO_TRACE static sysinfo_item_t *sysinfo_create_path(const char *name,192 _NO_TRACE static sysinfo_item_t *sysinfo_create_path(const char *name, 193 193 sysinfo_item_t **psubtree) 194 194 { … … 508 508 * 509 509 */ 510 NO_TRACE static void sysinfo_indent(size_t spaces)510 _NO_TRACE static void sysinfo_indent(size_t spaces) 511 511 { 512 512 for (size_t i = 0; i < spaces; i++) … … 522 522 * 523 523 */ 524 NO_TRACE static void sysinfo_dump_internal(sysinfo_item_t *root, size_t spaces)524 _NO_TRACE static void sysinfo_dump_internal(sysinfo_item_t *root, size_t spaces) 525 525 { 526 526 /* Walk all siblings */ … … 622 622 * 623 623 */ 624 NO_TRACE static sysinfo_return_t sysinfo_get_item(const char *name,624 _NO_TRACE static sysinfo_return_t sysinfo_get_item(const char *name, 625 625 sysinfo_item_t **root, bool dry_run) 626 626 { … … 677 677 * 678 678 */ 679 NO_TRACE static sysinfo_return_t sysinfo_get_item_uspace(void *ptr, size_t size,679 _NO_TRACE static sysinfo_return_t sysinfo_get_item_uspace(void *ptr, size_t size, 680 680 bool dry_run) 681 681 { … … 720 720 * 721 721 */ 722 NO_TRACE static sysinfo_return_t sysinfo_get_keys(const char *name,722 _NO_TRACE static sysinfo_return_t sysinfo_get_keys(const char *name, 723 723 sysinfo_item_t **root, bool dry_run) 724 724 { … … 786 786 * 787 787 */ 788 NO_TRACE static sysinfo_return_t sysinfo_get_keys_uspace(void *ptr, size_t size,788 _NO_TRACE static sysinfo_return_t sysinfo_get_keys_uspace(void *ptr, size_t size, 789 789 bool dry_run) 790 790 {
Note:
See TracChangeset
for help on using the changeset viewer.
