Changeset 2fc3b2d in mainline
- Timestamp:
- 2018-12-10T11:15:10Z (6 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 247fdea
- Parents:
- de9a18e
- git-author:
- Jiri Svoboda <jiri@…> (2018-12-05 18:39:06)
- git-committer:
- jxsvoboda <5887334+jxsvoboda@…> (2018-12-10 11:15:10)
- Location:
- kernel/generic
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/mm/as.h
rde9a18e r2fc3b2d 45 45 #include <synch/mutex.h> 46 46 #include <adt/list.h> 47 #include <adt/btree.h>48 47 #include <adt/odict.h> 49 48 #include <lib/elf.h> … … 158 157 } as_pagemap_t; 159 158 159 /** Used space interval */ 160 typedef struct { 161 /** Containing used_space structure */ 162 struct used_space *used_space; 163 /** Link to @c used_space->ivals */ 164 odlink_t lused_space; 165 /** First page address */ 166 uintptr_t page; 167 /** Count of pages */ 168 size_t count; 169 } used_space_ival_t; 170 171 /** Map of used space in an address space area */ 172 typedef struct used_space { 173 /** 174 * Dictionary of intervals by start address. 175 * Members are of type @c used_space_ival_t. 176 */ 177 odict_t ivals; 178 /** Total number of used pages. */ 179 size_t pages; 180 } used_space_t; 181 160 182 /** 161 183 * This structure contains information associated with the shared address space … … 240 262 size_t pages; 241 263 242 /** Number of resident pages in the area. */243 size_t resident;244 245 264 /** Base address of this area. */ 246 265 uintptr_t base; 247 266 248 267 /** Map of used space. */ 249 btree_t used_space;268 used_space_t used_space; 250 269 251 270 /** … … 313 332 extern bool as_area_check_access(as_area_t *, pf_access_t); 314 333 extern size_t as_area_get_size(uintptr_t); 315 extern bool used_space_insert(as_area_t *, uintptr_t, size_t); 316 extern bool used_space_remove(as_area_t *, uintptr_t, size_t); 334 extern used_space_ival_t *used_space_first(used_space_t *); 335 extern used_space_ival_t *used_space_next(used_space_ival_t *); 336 extern used_space_ival_t *used_space_find_gteq(used_space_t *, uintptr_t); 337 extern bool used_space_insert(used_space_t *, uintptr_t, size_t); 317 338 318 339 /* Interface to be implemented by architectures. */ -
kernel/generic/src/mm/as.c
rde9a18e r2fc3b2d 63 63 #include <synch/mutex.h> 64 64 #include <adt/list.h> 65 #include <adt/btree.h>66 65 #include <proc/task.h> 67 66 #include <proc/thread.h> … … 95 94 static slab_cache_t *as_page_mapping_cache; 96 95 96 /** Cache for used_space_ival_t objects */ 97 static slab_cache_t *used_space_ival_cache; 98 97 99 /** ASID subsystem lock. 98 100 * … … 117 119 static int as_areas_cmp(void *, void *); 118 120 121 static void used_space_initialize(used_space_t *); 122 static void used_space_finalize(used_space_t *); 123 static void *used_space_getkey(odlink_t *); 124 static int used_space_cmp(void *, void *); 125 static used_space_ival_t *used_space_last(used_space_t *); 126 static void used_space_remove_ival(used_space_ival_t *); 127 static void used_space_shorten_ival(used_space_ival_t *, size_t); 128 119 129 NO_TRACE static errno_t as_constructor(void *obj, unsigned int flags) 120 130 { … … 142 152 as_page_mapping_cache = slab_cache_create("as_page_mapping_t", 143 153 sizeof(as_page_mapping_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED); 154 155 used_space_ival_cache = slab_cache_create("used_space_ival_t", 156 sizeof(used_space_ival_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED); 144 157 145 158 AS_KERNEL = as_create(FLAG_AS_KERNEL); … … 548 561 * @param b Pointer to virtual address cast as @c void * 549 562 * @return <0, =0, >0 if virtual address a is less than, equal to, or 550 * greater -than b, respectively.563 * greater than b, respectively. 551 564 */ 552 565 static int as_pagemap_cmp(void *a, void *b) … … 783 796 area->attributes = attrs; 784 797 area->pages = pages; 785 area->resident = 0;786 798 area->base = *base; 787 799 area->backend = backend; … … 836 848 } 837 849 838 btree_create(&area->used_space);850 used_space_initialize(&area->used_space); 839 851 odict_insert(&area->las_areas, &as->as_areas, NULL); 840 852 … … 944 956 945 957 /* 958 * Start TLB shootdown sequence. 959 */ 960 961 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, 962 as->asid, area->base + P2SZ(pages), 963 area->pages - pages); 964 965 /* 946 966 * Remove frames belonging to used space starting from 947 967 * the highest addresses downwards until an overlap with 948 * the resized address space area is found. Note that this 949 * is also the right way to remove part of the used_space 950 * B+tree leaf list. 968 * the resized address space area is found. 951 969 */ 952 970 bool cond = true; 953 971 while (cond) { 954 assert(!list_empty(&area->used_space.leaf_list)); 955 956 btree_node_t *node = 957 list_get_instance(list_last(&area->used_space.leaf_list), 958 btree_node_t, leaf_link); 959 960 if ((cond = (node->keys != 0))) { 961 uintptr_t ptr = node->key[node->keys - 1]; 962 size_t node_size = 963 (size_t) node->value[node->keys - 1]; 964 size_t i = 0; 965 966 if (overlaps(ptr, P2SZ(node_size), area->base, 967 P2SZ(pages))) { 968 969 if (ptr + P2SZ(node_size) <= start_free) { 970 /* 971 * The whole interval fits 972 * completely in the resized 973 * address space area. 974 */ 975 break; 976 } 977 972 used_space_ival_t *ival = 973 used_space_last(&area->used_space); 974 assert(ival != NULL); 975 976 uintptr_t ptr = ival->page; 977 size_t pcount = ival->count; 978 size_t i = 0; 979 980 if (overlaps(ptr, P2SZ(pcount), area->base, 981 P2SZ(pages))) { 982 983 if (ptr + P2SZ(pcount) <= start_free) { 978 984 /* 979 * Part of the interval corresponding 980 * to b and c overlaps with the resized 981 * address space area. 985 * The whole interval fits completely 986 * in the resized address space area. 982 987 */ 983 984 /* We are almost done */ 985 cond = false; 986 i = (start_free - ptr) >> PAGE_WIDTH; 987 if (!used_space_remove(area, start_free, 988 node_size - i)) 989 panic("Cannot remove used space."); 990 } else { 991 /* 992 * The interval of used space can be 993 * completely removed. 994 */ 995 if (!used_space_remove(area, ptr, node_size)) 996 panic("Cannot remove used space."); 988 break; 997 989 } 998 990 999 991 /* 1000 * Start TLB shootdown sequence. 1001 * 1002 * The sequence is rather short and can be 1003 * repeated multiple times. The reason is that 1004 * we don't want to have used_space_remove() 1005 * inside the sequence as it may use a blocking 1006 * memory allocation for its B+tree. Blocking 1007 * while holding the tlblock spinlock is 1008 * forbidden and would hit a kernel assertion. 992 * Part of the interval corresponding to b and 993 * c overlaps with the resized address space 994 * area. 1009 995 */ 1010 996 1011 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, 1012 as->asid, area->base + P2SZ(pages), 1013 area->pages - pages); 1014 1015 for (; i < node_size; i++) { 1016 pte_t pte; 1017 bool found = page_mapping_find(as, 1018 ptr + P2SZ(i), false, &pte); 1019 1020 (void) found; 1021 assert(found); 1022 assert(PTE_VALID(&pte)); 1023 assert(PTE_PRESENT(&pte)); 1024 1025 if ((area->backend) && 1026 (area->backend->frame_free)) { 1027 area->backend->frame_free(area, 1028 ptr + P2SZ(i), 1029 PTE_GET_FRAME(&pte)); 1030 } 1031 1032 page_mapping_remove(as, ptr + P2SZ(i)); 997 /* We are almost done */ 998 cond = false; 999 i = (start_free - ptr) >> PAGE_WIDTH; 1000 1001 /* Shorten the interval to @c i pages */ 1002 used_space_shorten_ival(ival, i); 1003 } else { 1004 /* 1005 * The interval of used space can be completely 1006 * removed. 1007 */ 1008 used_space_remove_ival(ival); 1009 } 1010 1011 for (; i < pcount; i++) { 1012 pte_t pte; 1013 bool found = page_mapping_find(as, 1014 ptr + P2SZ(i), false, &pte); 1015 1016 (void) found; 1017 assert(found); 1018 assert(PTE_VALID(&pte)); 1019 assert(PTE_PRESENT(&pte)); 1020 1021 if ((area->backend) && 1022 (area->backend->frame_free)) { 1023 area->backend->frame_free(area, 1024 ptr + P2SZ(i), 1025 PTE_GET_FRAME(&pte)); 1033 1026 } 1034 1027 1035 /* 1036 * Finish TLB shootdown sequence. 1037 */ 1038 1039 tlb_invalidate_pages(as->asid, 1040 area->base + P2SZ(pages), 1041 area->pages - pages); 1042 1043 /* 1044 * Invalidate software translation caches 1045 * (e.g. TSB on sparc64, PHT on ppc32). 1046 */ 1047 as_invalidate_translation_cache(as, 1048 area->base + P2SZ(pages), 1049 area->pages - pages); 1050 tlb_shootdown_finalize(ipl); 1028 page_mapping_remove(as, ptr + P2SZ(i)); 1051 1029 } 1030 1052 1031 } 1032 1033 /* 1034 * Finish TLB shootdown sequence. 1035 */ 1036 1037 tlb_invalidate_pages(as->asid, 1038 area->base + P2SZ(pages), 1039 area->pages - pages); 1040 1041 /* 1042 * Invalidate software translation caches 1043 * (e.g. TSB on sparc64, PHT on ppc32). 1044 */ 1045 as_invalidate_translation_cache(as, 1046 area->base + P2SZ(pages), 1047 area->pages - pages); 1048 tlb_shootdown_finalize(ipl); 1049 1053 1050 page_table_unlock(as, false); 1054 1051 } else { … … 1109 1106 1110 1107 page_table_lock(as, false); 1111 1112 1108 /* 1113 1109 * Start TLB shootdown sequence. … … 1117 1113 1118 1114 /* 1119 * Visit only the pages mapped by used_space B+tree. 1120 */ 1121 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1122 node) { 1123 btree_key_t i; 1124 1125 for (i = 0; i < node->keys; i++) { 1126 uintptr_t ptr = node->key[i]; 1127 size_t size; 1128 1129 for (size = 0; size < (size_t) node->value[i]; size++) { 1130 pte_t pte; 1131 bool found = page_mapping_find(as, 1132 ptr + P2SZ(size), false, &pte); 1133 1134 (void) found; 1135 assert(found); 1136 assert(PTE_VALID(&pte)); 1137 assert(PTE_PRESENT(&pte)); 1138 1139 if ((area->backend) && 1140 (area->backend->frame_free)) { 1141 area->backend->frame_free(area, 1142 ptr + P2SZ(size), 1143 PTE_GET_FRAME(&pte)); 1144 } 1145 1146 page_mapping_remove(as, ptr + P2SZ(size)); 1115 * Visit only the pages mapped by used_space. 1116 */ 1117 used_space_ival_t *ival = used_space_first(&area->used_space); 1118 while (ival != NULL) { 1119 uintptr_t ptr = ival->page; 1120 1121 for (size_t size = 0; size < ival->count; size++) { 1122 pte_t pte; 1123 bool found = page_mapping_find(as, 1124 ptr + P2SZ(size), false, &pte); 1125 1126 (void) found; 1127 assert(found); 1128 assert(PTE_VALID(&pte)); 1129 assert(PTE_PRESENT(&pte)); 1130 1131 if ((area->backend) && 1132 (area->backend->frame_free)) { 1133 area->backend->frame_free(area, 1134 ptr + P2SZ(size), 1135 PTE_GET_FRAME(&pte)); 1147 1136 } 1137 1138 page_mapping_remove(as, ptr + P2SZ(size)); 1148 1139 } 1140 1141 used_space_remove_ival(ival); 1142 ival = used_space_first(&area->used_space); 1149 1143 } 1150 1144 … … 1164 1158 page_table_unlock(as, false); 1165 1159 1166 btree_destroy(&area->used_space); 1167 1160 used_space_finalize(&area->used_space); 1168 1161 area->attributes |= AS_AREA_ATTR_PARTIAL; 1169 1170 1162 sh_info_remove_reference(area->sh_info); 1171 1163 … … 1404 1396 1405 1397 /* 1406 * Compute total number of used pages in the used_space B+tree1398 * Compute total number of used pages 1407 1399 */ 1408 1400 size_t used_pages = 0; 1409 1401 1410 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1411 node) { 1412 btree_key_t i; 1413 1414 for (i = 0; i < node->keys; i++) 1415 used_pages += (size_t) node->value[i]; 1402 used_space_ival_t *ival = used_space_first(&area->used_space); 1403 while (ival != NULL) { 1404 used_pages += ival->count; 1405 ival = used_space_next(ival); 1416 1406 } 1417 1407 … … 1438 1428 size_t frame_idx = 0; 1439 1429 1440 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1441 node) { 1442 btree_key_t i; 1443 1444 for (i = 0; i < node->keys; i++) { 1445 uintptr_t ptr = node->key[i]; 1446 size_t size; 1447 1448 for (size = 0; size < (size_t) node->value[i]; size++) { 1449 pte_t pte; 1450 bool found = page_mapping_find(as, 1451 ptr + P2SZ(size), false, &pte); 1452 1453 (void) found; 1454 assert(found); 1455 assert(PTE_VALID(&pte)); 1456 assert(PTE_PRESENT(&pte)); 1457 1458 old_frame[frame_idx++] = PTE_GET_FRAME(&pte); 1459 1460 /* Remove old mapping */ 1461 page_mapping_remove(as, ptr + P2SZ(size)); 1462 } 1430 ival = used_space_first(&area->used_space); 1431 while (ival != NULL) { 1432 uintptr_t ptr = ival->page; 1433 size_t size; 1434 1435 for (size = 0; size < ival->count; size++) { 1436 pte_t pte; 1437 bool found = page_mapping_find(as, ptr + P2SZ(size), 1438 false, &pte); 1439 1440 (void) found; 1441 assert(found); 1442 assert(PTE_VALID(&pte)); 1443 assert(PTE_PRESENT(&pte)); 1444 1445 old_frame[frame_idx++] = PTE_GET_FRAME(&pte); 1446 1447 /* Remove old mapping */ 1448 page_mapping_remove(as, ptr + P2SZ(size)); 1463 1449 } 1450 1451 ival = used_space_next(ival); 1464 1452 } 1465 1453 … … 1491 1479 frame_idx = 0; 1492 1480 1493 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1494 node) { 1495 btree_key_t i; 1496 1497 for (i = 0; i < node->keys; i++) { 1498 uintptr_t ptr = node->key[i]; 1499 size_t size; 1500 1501 for (size = 0; size < (size_t) node->value[i]; size++) { 1502 page_table_lock(as, false); 1503 1504 /* Insert the new mapping */ 1505 page_mapping_insert(as, ptr + P2SZ(size), 1506 old_frame[frame_idx++], page_flags); 1507 1508 page_table_unlock(as, false); 1509 } 1481 ival = used_space_first(&area->used_space); 1482 while (ival != NULL) { 1483 uintptr_t ptr = ival->page; 1484 size_t size; 1485 1486 for (size = 0; size < ival->count; size++) { 1487 page_table_lock(as, false); 1488 1489 /* Insert the new mapping */ 1490 page_mapping_insert(as, ptr + P2SZ(size), 1491 old_frame[frame_idx++], page_flags); 1492 1493 page_table_unlock(as, false); 1510 1494 } 1495 1496 ival = used_space_next(ival); 1511 1497 } 1512 1498 … … 1872 1858 } 1873 1859 1860 /** Initialize used space map. 1861 * 1862 * @param used_space Used space map 1863 */ 1864 static void used_space_initialize(used_space_t *used_space) 1865 { 1866 odict_initialize(&used_space->ivals, used_space_getkey, used_space_cmp); 1867 used_space->pages = 0; 1868 } 1869 1870 /** Finalize used space map. 1871 * 1872 * @param used_space Used space map 1873 */ 1874 static void used_space_finalize(used_space_t *used_space) 1875 { 1876 assert(odict_empty(&used_space->ivals)); 1877 odict_finalize(&used_space->ivals); 1878 } 1879 1880 /** Get first interval of used space. 1881 * 1882 * @param used_space Used space map 1883 * @return First interval or @c NULL if there are none 1884 */ 1885 used_space_ival_t *used_space_first(used_space_t *used_space) 1886 { 1887 odlink_t *odlink = odict_first(&used_space->ivals); 1888 1889 if (odlink == NULL) 1890 return NULL; 1891 1892 return odict_get_instance(odlink, used_space_ival_t, lused_space); 1893 } 1894 1895 /** Get next interval of used space. 1896 * 1897 * @param cur Current interval 1898 * @return Next interval or @c NULL if there are none 1899 */ 1900 used_space_ival_t *used_space_next(used_space_ival_t *cur) 1901 { 1902 odlink_t *odlink = odict_next(&cur->lused_space, 1903 &cur->used_space->ivals); 1904 1905 if (odlink == NULL) 1906 return NULL; 1907 1908 return odict_get_instance(odlink, used_space_ival_t, lused_space); 1909 } 1910 1911 /** Get last interval of used space. 1912 * 1913 * @param used_space Used space map 1914 * @return First interval or @c NULL if there are none 1915 */ 1916 static used_space_ival_t *used_space_last(used_space_t *used_space) 1917 { 1918 odlink_t *odlink = odict_last(&used_space->ivals); 1919 1920 if (odlink == NULL) 1921 return NULL; 1922 1923 return odict_get_instance(odlink, used_space_ival_t, lused_space); 1924 } 1925 1926 /** Find the first interval that contains addresses greater than or equal to 1927 * @a ptr. 1928 * 1929 * @param used_space Used space map 1930 * @param ptr Virtual address 1931 * 1932 * @return Used space interval or @c NULL if none matches 1933 */ 1934 used_space_ival_t *used_space_find_gteq(used_space_t *used_space, uintptr_t ptr) 1935 { 1936 odlink_t *odlink; 1937 used_space_ival_t *ival; 1938 1939 /* Find last interval to start at address less than @a ptr */ 1940 odlink = odict_find_lt(&used_space->ivals, &ptr, NULL); 1941 if (odlink != NULL) { 1942 ival = odict_get_instance(odlink, used_space_ival_t, 1943 lused_space); 1944 1945 /* If the interval extends above @a ptr, return it */ 1946 if (ival->page + P2SZ(ival->count) > ptr) 1947 return ival; 1948 1949 /* 1950 * Otherwise, if a next interval exists, it must match 1951 * the criteria. 1952 */ 1953 odlink = odict_next(&ival->lused_space, &used_space->ivals); 1954 } else { 1955 /* 1956 * No interval with lower base address, so if there is any 1957 * interval at all, it must match the criteria 1958 */ 1959 odlink = odict_first(&used_space->ivals); 1960 } 1961 1962 if (odlink != NULL) { 1963 ival = odict_get_instance(odlink, used_space_ival_t, 1964 lused_space); 1965 return ival; 1966 } 1967 1968 return NULL; 1969 } 1970 1971 /** Get key function for used space ordered dictionary. 1972 * 1973 * The key is the virtual address of the first page 1974 * 1975 * @param odlink Ordered dictionary link (used_space_ival_t.lused_space) 1976 * @return Pointer to virtual address of first page cast as @c void *. 1977 */ 1978 static void *used_space_getkey(odlink_t *odlink) 1979 { 1980 used_space_ival_t *ival = odict_get_instance(odlink, used_space_ival_t, 1981 lused_space); 1982 return (void *) &ival->page; 1983 } 1984 1985 /** Compare function for used space ordered dictionary. 1986 * 1987 * @param a Pointer to virtual address of first page cast as @c void * 1988 * @param b Pointer to virtual address of first page cast as @c void * 1989 * @return Less than zero, zero, greater than zero if virtual address @a a 1990 * is less than, equal to, greater than virtual address b, respectively. 1991 */ 1992 static int used_space_cmp(void *a, void *b) 1993 { 1994 uintptr_t va = *(uintptr_t *) a; 1995 uintptr_t vb = *(uintptr_t *) b; 1996 1997 if (va < vb) 1998 return -1; 1999 else if (va == vb) 2000 return 0; 2001 else 2002 return +1; 2003 } 2004 2005 /** Remove used space interval. 2006 * 2007 * @param ival Used space interval 2008 */ 2009 static void used_space_remove_ival(used_space_ival_t *ival) 2010 { 2011 ival->used_space->pages -= ival->count; 2012 odict_remove(&ival->lused_space); 2013 slab_free(used_space_ival_cache, ival); 2014 } 2015 2016 /** Shorten used space interval. 2017 * 2018 * @param ival Used space interval 2019 * @param count New number of pages in the interval 2020 */ 2021 static void used_space_shorten_ival(used_space_ival_t *ival, size_t count) 2022 { 2023 assert(count > 0); 2024 assert(count < ival->count); 2025 2026 ival->used_space->pages -= ival->count - count; 2027 ival->count = count; 2028 } 2029 1874 2030 /** Mark portion of address space area as used. 1875 2031 * 1876 2032 * The address space area must be already locked. 1877 2033 * 1878 * @param area Address space area.1879 * @param page 2034 * @param used_space Used space map 2035 * @param page First page to be marked. 1880 2036 * @param count Number of page to be marked. 1881 2037 * … … 1883 2039 * 1884 2040 */ 1885 bool used_space_insert(as_area_t *area, uintptr_t page, size_t count) 1886 { 1887 assert(mutex_locked(&area->lock)); 2041 bool used_space_insert(used_space_t *used_space, uintptr_t page, size_t count) 2042 { 2043 used_space_ival_t *a; 2044 used_space_ival_t *b; 2045 bool adj_a; 2046 bool adj_b; 2047 odlink_t *odlink; 2048 used_space_ival_t *ival; 2049 1888 2050 assert(IS_ALIGNED(page, PAGE_SIZE)); 1889 2051 assert(count); 1890 2052 1891 btree_node_t *leaf = NULL; 1892 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1893 if (pages) { 1894 /* 1895 * We hit the beginning of some used space. 1896 */ 2053 /* Interval to the left */ 2054 odlink = odict_find_lt(&used_space->ivals, &page, NULL); 2055 a = (odlink != NULL) ? 2056 odict_get_instance(odlink, used_space_ival_t, lused_space) : 2057 NULL; 2058 2059 /* Interval to the right */ 2060 b = (a != NULL) ? used_space_next(a) : 2061 used_space_first(used_space); 2062 2063 /* Check for conflict with left interval */ 2064 if (a != NULL && overlaps(a->page, P2SZ(a->count), page, P2SZ(count))) 1897 2065 return false; 1898 } 1899 1900 assert(leaf != NULL); 1901 1902 if (!leaf->keys) { 1903 btree_insert(&area->used_space, page, (void *) count, leaf); 1904 goto success; 1905 } 1906 1907 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf); 1908 if (node) { 1909 uintptr_t left_pg = node->key[node->keys - 1]; 1910 uintptr_t right_pg = leaf->key[0]; 1911 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1912 size_t right_cnt = (size_t) leaf->value[0]; 1913 1914 /* 1915 * Examine the possibility that the interval fits 1916 * somewhere between the rightmost interval of 1917 * the left neigbour and the first interval of the leaf. 1918 */ 1919 1920 if (page >= right_pg) { 1921 /* Do nothing. */ 1922 } else if (overlaps(page, P2SZ(count), left_pg, 1923 P2SZ(left_cnt))) { 1924 /* The interval intersects with the left interval. */ 1925 return false; 1926 } else if (overlaps(page, P2SZ(count), right_pg, 1927 P2SZ(right_cnt))) { 1928 /* The interval intersects with the right interval. */ 1929 return false; 1930 } else if ((page == left_pg + P2SZ(left_cnt)) && 1931 (page + P2SZ(count) == right_pg)) { 1932 /* 1933 * The interval can be added by merging the two already 1934 * present intervals. 1935 */ 1936 node->value[node->keys - 1] += count + right_cnt; 1937 btree_remove(&area->used_space, right_pg, leaf); 1938 goto success; 1939 } else if (page == left_pg + P2SZ(left_cnt)) { 1940 /* 1941 * The interval can be added by simply growing the left 1942 * interval. 1943 */ 1944 node->value[node->keys - 1] += count; 1945 goto success; 1946 } else if (page + P2SZ(count) == right_pg) { 1947 /* 1948 * The interval can be addded by simply moving base of 1949 * the right interval down and increasing its size 1950 * accordingly. 1951 */ 1952 leaf->value[0] += count; 1953 leaf->key[0] = page; 1954 goto success; 1955 } else { 1956 /* 1957 * The interval is between both neigbouring intervals, 1958 * but cannot be merged with any of them. 1959 */ 1960 btree_insert(&area->used_space, page, (void *) count, 1961 leaf); 1962 goto success; 1963 } 1964 } else if (page < leaf->key[0]) { 1965 uintptr_t right_pg = leaf->key[0]; 1966 size_t right_cnt = (size_t) leaf->value[0]; 1967 1968 /* 1969 * Investigate the border case in which the left neighbour does 1970 * not exist but the interval fits from the left. 1971 */ 1972 1973 if (overlaps(page, P2SZ(count), right_pg, P2SZ(right_cnt))) { 1974 /* The interval intersects with the right interval. */ 1975 return false; 1976 } else if (page + P2SZ(count) == right_pg) { 1977 /* 1978 * The interval can be added by moving the base of the 1979 * right interval down and increasing its size 1980 * accordingly. 1981 */ 1982 leaf->key[0] = page; 1983 leaf->value[0] += count; 1984 goto success; 1985 } else { 1986 /* 1987 * The interval doesn't adjoin with the right interval. 1988 * It must be added individually. 1989 */ 1990 btree_insert(&area->used_space, page, (void *) count, 1991 leaf); 1992 goto success; 1993 } 1994 } 1995 1996 node = btree_leaf_node_right_neighbour(&area->used_space, leaf); 1997 if (node) { 1998 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1999 uintptr_t right_pg = node->key[0]; 2000 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 2001 size_t right_cnt = (size_t) node->value[0]; 2002 2003 /* 2004 * Examine the possibility that the interval fits 2005 * somewhere between the leftmost interval of 2006 * the right neigbour and the last interval of the leaf. 2007 */ 2008 2009 if (page < left_pg) { 2010 /* Do nothing. */ 2011 } else if (overlaps(page, P2SZ(count), left_pg, 2012 P2SZ(left_cnt))) { 2013 /* The interval intersects with the left interval. */ 2014 return false; 2015 } else if (overlaps(page, P2SZ(count), right_pg, 2016 P2SZ(right_cnt))) { 2017 /* The interval intersects with the right interval. */ 2018 return false; 2019 } else if ((page == left_pg + P2SZ(left_cnt)) && 2020 (page + P2SZ(count) == right_pg)) { 2021 /* 2022 * The interval can be added by merging the two already 2023 * present intervals. 2024 */ 2025 leaf->value[leaf->keys - 1] += count + right_cnt; 2026 btree_remove(&area->used_space, right_pg, node); 2027 goto success; 2028 } else if (page == left_pg + P2SZ(left_cnt)) { 2029 /* 2030 * The interval can be added by simply growing the left 2031 * interval. 2032 */ 2033 leaf->value[leaf->keys - 1] += count; 2034 goto success; 2035 } else if (page + P2SZ(count) == right_pg) { 2036 /* 2037 * The interval can be addded by simply moving base of 2038 * the right interval down and increasing its size 2039 * accordingly. 2040 */ 2041 node->value[0] += count; 2042 node->key[0] = page; 2043 goto success; 2044 } else { 2045 /* 2046 * The interval is between both neigbouring intervals, 2047 * but cannot be merged with any of them. 2048 */ 2049 btree_insert(&area->used_space, page, (void *) count, 2050 leaf); 2051 goto success; 2052 } 2053 } else if (page >= leaf->key[leaf->keys - 1]) { 2054 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 2055 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 2056 2057 /* 2058 * Investigate the border case in which the right neighbour 2059 * does not exist but the interval fits from the right. 2060 */ 2061 2062 if (overlaps(page, P2SZ(count), left_pg, P2SZ(left_cnt))) { 2063 /* The interval intersects with the left interval. */ 2064 return false; 2065 } else if (left_pg + P2SZ(left_cnt) == page) { 2066 /* 2067 * The interval can be added by growing the left 2068 * interval. 2069 */ 2070 leaf->value[leaf->keys - 1] += count; 2071 goto success; 2072 } else { 2073 /* 2074 * The interval doesn't adjoin with the left interval. 2075 * It must be added individually. 2076 */ 2077 btree_insert(&area->used_space, page, (void *) count, 2078 leaf); 2079 goto success; 2080 } 2081 } 2082 2083 /* 2084 * Note that if the algorithm made it thus far, the interval can fit 2085 * only between two other intervals of the leaf. The two border cases 2086 * were already resolved. 2087 */ 2088 btree_key_t i; 2089 for (i = 1; i < leaf->keys; i++) { 2090 if (page < leaf->key[i]) { 2091 uintptr_t left_pg = leaf->key[i - 1]; 2092 uintptr_t right_pg = leaf->key[i]; 2093 size_t left_cnt = (size_t) leaf->value[i - 1]; 2094 size_t right_cnt = (size_t) leaf->value[i]; 2095 2096 /* 2097 * The interval fits between left_pg and right_pg. 2098 */ 2099 2100 if (overlaps(page, P2SZ(count), left_pg, 2101 P2SZ(left_cnt))) { 2102 /* 2103 * The interval intersects with the left 2104 * interval. 2105 */ 2106 return false; 2107 } else if (overlaps(page, P2SZ(count), right_pg, 2108 P2SZ(right_cnt))) { 2109 /* 2110 * The interval intersects with the right 2111 * interval. 2112 */ 2113 return false; 2114 } else if ((page == left_pg + P2SZ(left_cnt)) && 2115 (page + P2SZ(count) == right_pg)) { 2116 /* 2117 * The interval can be added by merging the two 2118 * already present intervals. 2119 */ 2120 leaf->value[i - 1] += count + right_cnt; 2121 btree_remove(&area->used_space, right_pg, leaf); 2122 goto success; 2123 } else if (page == left_pg + P2SZ(left_cnt)) { 2124 /* 2125 * The interval can be added by simply growing 2126 * the left interval. 2127 */ 2128 leaf->value[i - 1] += count; 2129 goto success; 2130 } else if (page + P2SZ(count) == right_pg) { 2131 /* 2132 * The interval can be addded by simply moving 2133 * base of the right interval down and 2134 * increasing its size accordingly. 2135 */ 2136 leaf->value[i] += count; 2137 leaf->key[i] = page; 2138 goto success; 2139 } else { 2140 /* 2141 * The interval is between both neigbouring 2142 * intervals, but cannot be merged with any of 2143 * them. 2144 */ 2145 btree_insert(&area->used_space, page, 2146 (void *) count, leaf); 2147 goto success; 2148 } 2149 } 2150 } 2151 2152 panic("Inconsistency detected while adding %zu pages of used " 2153 "space at %p.", count, (void *) page); 2154 2155 success: 2156 area->resident += count; 2157 return true; 2158 } 2159 2160 /** Mark portion of address space area as unused. 2161 * 2162 * The address space area must be already locked. 2163 * 2164 * @param area Address space area. 2165 * @param page First page to be marked. 2166 * @param count Number of page to be marked. 2167 * 2168 * @return False on failure or true on success. 2169 * 2170 */ 2171 bool used_space_remove(as_area_t *area, uintptr_t page, size_t count) 2172 { 2173 assert(mutex_locked(&area->lock)); 2174 assert(IS_ALIGNED(page, PAGE_SIZE)); 2175 assert(count); 2176 2177 btree_node_t *leaf; 2178 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 2179 if (pages) { 2180 /* 2181 * We are lucky, page is the beginning of some interval. 2182 */ 2183 if (count > pages) { 2184 return false; 2185 } else if (count == pages) { 2186 btree_remove(&area->used_space, page, leaf); 2187 goto success; 2188 } else { 2189 /* 2190 * Find the respective interval. 2191 * Decrease its size and relocate its start address. 2192 */ 2193 btree_key_t i; 2194 for (i = 0; i < leaf->keys; i++) { 2195 if (leaf->key[i] == page) { 2196 leaf->key[i] += P2SZ(count); 2197 leaf->value[i] -= count; 2198 goto success; 2199 } 2200 } 2201 2202 goto error; 2203 } 2204 } 2205 2206 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, 2207 leaf); 2208 if ((node) && (page < leaf->key[0])) { 2209 uintptr_t left_pg = node->key[node->keys - 1]; 2210 size_t left_cnt = (size_t) node->value[node->keys - 1]; 2211 2212 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) { 2213 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) { 2214 /* 2215 * The interval is contained in the rightmost 2216 * interval of the left neighbour and can be 2217 * removed by updating the size of the bigger 2218 * interval. 2219 */ 2220 node->value[node->keys - 1] -= count; 2221 goto success; 2222 } else if (page + P2SZ(count) < 2223 left_pg + P2SZ(left_cnt)) { 2224 size_t new_cnt; 2225 2226 /* 2227 * The interval is contained in the rightmost 2228 * interval of the left neighbour but its 2229 * removal requires both updating the size of 2230 * the original interval and also inserting a 2231 * new interval. 2232 */ 2233 new_cnt = ((left_pg + P2SZ(left_cnt)) - 2234 (page + P2SZ(count))) >> PAGE_WIDTH; 2235 node->value[node->keys - 1] -= count + new_cnt; 2236 btree_insert(&area->used_space, page + 2237 P2SZ(count), (void *) new_cnt, leaf); 2238 goto success; 2239 } 2240 } 2241 2066 2067 /* Check for conflict with right interval */ 2068 if (b != NULL && overlaps(page, P2SZ(count), b->page, P2SZ(b->count))) 2242 2069 return false; 2243 } else if (page < leaf->key[0]) 2244 return false; 2245 2246 if (page > leaf->key[leaf->keys - 1]) { 2247 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 2248 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 2249 2250 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) { 2251 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) { 2252 /* 2253 * The interval is contained in the rightmost 2254 * interval of the leaf and can be removed by 2255 * updating the size of the bigger interval. 2256 */ 2257 leaf->value[leaf->keys - 1] -= count; 2258 goto success; 2259 } else if (page + P2SZ(count) < left_pg + 2260 P2SZ(left_cnt)) { 2261 size_t new_cnt; 2262 2263 /* 2264 * The interval is contained in the rightmost 2265 * interval of the leaf but its removal 2266 * requires both updating the size of the 2267 * original interval and also inserting a new 2268 * interval. 2269 */ 2270 new_cnt = ((left_pg + P2SZ(left_cnt)) - 2271 (page + P2SZ(count))) >> PAGE_WIDTH; 2272 leaf->value[leaf->keys - 1] -= count + new_cnt; 2273 btree_insert(&area->used_space, page + 2274 P2SZ(count), (void *) new_cnt, leaf); 2275 goto success; 2276 } 2277 } 2278 2279 return false; 2280 } 2281 2282 /* 2283 * The border cases have been already resolved. 2284 * Now the interval can be only between intervals of the leaf. 2285 */ 2286 btree_key_t i; 2287 for (i = 1; i < leaf->keys - 1; i++) { 2288 if (page < leaf->key[i]) { 2289 uintptr_t left_pg = leaf->key[i - 1]; 2290 size_t left_cnt = (size_t) leaf->value[i - 1]; 2291 2292 /* 2293 * Now the interval is between intervals corresponding 2294 * to (i - 1) and i. 2295 */ 2296 if (overlaps(left_pg, P2SZ(left_cnt), page, 2297 P2SZ(count))) { 2298 if (page + P2SZ(count) == 2299 left_pg + P2SZ(left_cnt)) { 2300 /* 2301 * The interval is contained in the 2302 * interval (i - 1) of the leaf and can 2303 * be removed by updating the size of 2304 * the bigger interval. 2305 */ 2306 leaf->value[i - 1] -= count; 2307 goto success; 2308 } else if (page + P2SZ(count) < 2309 left_pg + P2SZ(left_cnt)) { 2310 size_t new_cnt; 2311 2312 /* 2313 * The interval is contained in the 2314 * interval (i - 1) of the leaf but its 2315 * removal requires both updating the 2316 * size of the original interval and 2317 * also inserting a new interval. 2318 */ 2319 new_cnt = ((left_pg + P2SZ(left_cnt)) - 2320 (page + P2SZ(count))) >> 2321 PAGE_WIDTH; 2322 leaf->value[i - 1] -= count + new_cnt; 2323 btree_insert(&area->used_space, page + 2324 P2SZ(count), (void *) new_cnt, 2325 leaf); 2326 goto success; 2327 } 2328 } 2329 2330 return false; 2331 } 2332 } 2333 2334 error: 2335 panic("Inconsistency detected while removing %zu pages of used " 2336 "space from %p.", count, (void *) page); 2337 2338 success: 2339 area->resident -= count; 2070 2071 /* Check if A is adjacent to the new interval */ 2072 adj_a = (a != NULL) && (a->page + P2SZ(a->count) == page); 2073 /* Check if the new interval is adjacent to B*/ 2074 adj_b = (b != NULL) && page + P2SZ(count) == b->page; 2075 2076 if (adj_a && adj_b) { 2077 /* Fuse into a single interval */ 2078 a->count += count + b->count; 2079 used_space_remove_ival(b); 2080 } else if (adj_a) { 2081 /* Append to A */ 2082 a->count += count; 2083 } else if (adj_b) { 2084 /* Prepend to B */ 2085 b->page = page; 2086 b->count += count; 2087 } else { 2088 /* Create new interval */ 2089 ival = slab_alloc(used_space_ival_cache, 0); 2090 ival->used_space = used_space; 2091 odlink_initialize(&ival->lused_space); 2092 ival->page = page; 2093 ival->count = count; 2094 2095 odict_insert(&ival->lused_space, &used_space->ivals, 2096 NULL); 2097 } 2098 2099 used_space->pages += count; 2340 2100 return true; 2341 2101 } -
kernel/generic/src/mm/backend_anon.c
rde9a18e r2fc3b2d 48 48 #include <synch/mutex.h> 49 49 #include <adt/list.h> 50 #include <adt/btree.h>51 50 #include <errno.h> 52 51 #include <typedefs.h> … … 122 121 */ 123 122 mutex_lock(&area->sh_info->lock); 124 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 125 node) { 126 unsigned int i; 127 128 for (i = 0; i < node->keys; i++) { 129 uintptr_t base = node->key[i]; 130 size_t count = (size_t) node->value[i]; 131 unsigned int j; 132 133 for (j = 0; j < count; j++) { 134 pte_t pte; 135 bool found; 136 137 page_table_lock(area->as, false); 138 found = page_mapping_find(area->as, 139 base + P2SZ(j), false, &pte); 140 141 (void)found; 142 assert(found); 143 assert(PTE_VALID(&pte)); 144 assert(PTE_PRESENT(&pte)); 145 146 as_pagemap_insert(&area->sh_info->pagemap, 147 (base + P2SZ(j)) - area->base, 148 PTE_GET_FRAME(&pte)); 149 page_table_unlock(area->as, false); 150 151 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(&pte)); 152 frame_reference_add(pfn); 153 } 154 123 used_space_ival_t *ival = used_space_first(&area->used_space); 124 while (ival != NULL) { 125 uintptr_t base = ival->page; 126 size_t count = ival->count; 127 unsigned int j; 128 129 for (j = 0; j < count; j++) { 130 pte_t pte; 131 bool found; 132 133 page_table_lock(area->as, false); 134 found = page_mapping_find(area->as, base + P2SZ(j), 135 false, &pte); 136 137 (void)found; 138 assert(found); 139 assert(PTE_VALID(&pte)); 140 assert(PTE_PRESENT(&pte)); 141 142 as_pagemap_insert(&area->sh_info->pagemap, 143 (base + P2SZ(j)) - area->base, PTE_GET_FRAME(&pte)); 144 page_table_unlock(area->as, false); 145 146 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(&pte)); 147 frame_reference_add(pfn); 155 148 } 149 150 ival = used_space_next(ival); 156 151 } 157 152 mutex_unlock(&area->sh_info->lock); … … 264 259 */ 265 260 page_mapping_insert(AS, upage, frame, as_area_get_flags(area)); 266 if (!used_space_insert( area, upage, 1))261 if (!used_space_insert(&area->used_space, upage, 1)) 267 262 panic("Cannot insert used space."); 268 263 -
kernel/generic/src/mm/backend_elf.c
rde9a18e r2fc3b2d 153 153 { 154 154 elf_segment_header_t *entry = area->backend_data.segment; 155 link_t *cur;156 btree_node_t *leaf, *node;155 used_space_ival_t *start; 156 used_space_ival_t *cur; 157 157 uintptr_t start_anon = entry->p_vaddr + entry->p_filesz; 158 158 … … 164 164 */ 165 165 if (area->flags & AS_AREA_WRITE) { 166 node = list_get_instance(list_first(&area->used_space.leaf_list), 167 btree_node_t, leaf_link); 166 start = used_space_first(&area->used_space); 168 167 } else { 169 (void) btree_search(&area->used_space, start_anon, &leaf); 170 node = btree_leaf_node_left_neighbour(&area->used_space, leaf); 171 if (!node) 172 node = leaf; 168 /* Find first interval containing addresses >= start_anon */ 169 start = used_space_find_gteq(&area->used_space, start_anon); 173 170 } 174 171 … … 177 174 */ 178 175 mutex_lock(&area->sh_info->lock); 179 for (cur = &node->leaf_link; cur != &area->used_space.leaf_list.head; 180 cur = cur->next) { 176 cur = start; 177 while (cur != NULL) { 178 uintptr_t base = cur->page; 179 size_t count = cur->count; 181 180 unsigned int i; 182 181 183 node = list_get_instance(cur, btree_node_t, leaf_link); 184 185 for (i = 0; i < node->keys; i++) { 186 uintptr_t base = node->key[i]; 187 size_t count = (size_t) node->value[i]; 188 unsigned int j; 182 /* 183 * Skip read-only areas of used space that are backed 184 * by the ELF image. 185 */ 186 if (!(area->flags & AS_AREA_WRITE)) 187 if (base >= entry->p_vaddr && 188 base + P2SZ(count) <= start_anon) 189 continue; 190 191 for (i = 0; i < count; i++) { 192 pte_t pte; 193 bool found; 189 194 190 195 /* 191 * Skip read-only areas of used space that are backed192 * by theELF image.196 * Skip read-only pages that are backed by the 197 * ELF image. 193 198 */ 194 199 if (!(area->flags & AS_AREA_WRITE)) 195 200 if (base >= entry->p_vaddr && 196 base + P2SZ( count) <= start_anon)201 base + P2SZ(i + 1) <= start_anon) 197 202 continue; 198 203 199 for (j = 0; j < count; j++) { 200 pte_t pte; 201 bool found; 202 203 /* 204 * Skip read-only pages that are backed by the 205 * ELF image. 206 */ 207 if (!(area->flags & AS_AREA_WRITE)) 208 if (base >= entry->p_vaddr && 209 base + P2SZ(j + 1) <= start_anon) 210 continue; 211 212 page_table_lock(area->as, false); 213 found = page_mapping_find(area->as, 214 base + P2SZ(j), false, &pte); 215 216 (void) found; 217 assert(found); 218 assert(PTE_VALID(&pte)); 219 assert(PTE_PRESENT(&pte)); 220 221 as_pagemap_insert(&area->sh_info->pagemap, 222 (base + P2SZ(j)) - area->base, 223 PTE_GET_FRAME(&pte)); 224 page_table_unlock(area->as, false); 225 226 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(&pte)); 227 frame_reference_add(pfn); 228 } 229 204 page_table_lock(area->as, false); 205 found = page_mapping_find(area->as, 206 base + P2SZ(i), false, &pte); 207 208 (void) found; 209 assert(found); 210 assert(PTE_VALID(&pte)); 211 assert(PTE_PRESENT(&pte)); 212 213 as_pagemap_insert(&area->sh_info->pagemap, 214 (base + P2SZ(i)) - area->base, 215 PTE_GET_FRAME(&pte)); 216 page_table_unlock(area->as, false); 217 218 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(&pte)); 219 frame_reference_add(pfn); 230 220 } 231 } 221 222 cur = used_space_next(cur); 223 } 224 232 225 mutex_unlock(&area->sh_info->lock); 233 226 } … … 310 303 page_mapping_insert(AS, upage, frame, 311 304 as_area_get_flags(area)); 312 if (!used_space_insert( area, upage, 1))305 if (!used_space_insert(&area->used_space, upage, 1)) 313 306 panic("Cannot insert used space."); 314 307 mutex_unlock(&area->sh_info->lock); … … 405 398 406 399 page_mapping_insert(AS, upage, frame, as_area_get_flags(area)); 407 if (!used_space_insert( area, upage, 1))400 if (!used_space_insert(&area->used_space, upage, 1)) 408 401 panic("Cannot insert used space."); 409 402 -
kernel/generic/src/mm/backend_phys.c
rde9a18e r2fc3b2d 143 143 as_area_get_flags(area)); 144 144 145 if (!used_space_insert( area, upage, 1))145 if (!used_space_insert(&area->used_space, upage, 1)) 146 146 panic("Cannot insert used space."); 147 147 -
kernel/generic/src/mm/backend_user.c
rde9a18e r2fc3b2d 147 147 uintptr_t frame = IPC_GET_ARG1(data); 148 148 page_mapping_insert(AS, upage, frame, as_area_get_flags(area)); 149 if (!used_space_insert( area, upage, 1))149 if (!used_space_insert(&area->used_space, upage, 1)) 150 150 panic("Cannot insert used space."); 151 151 -
kernel/generic/src/sysinfo/stats.c
rde9a18e r2fc3b2d 189 189 continue; 190 190 191 pages += area-> resident;191 pages += area->used_space.pages; 192 192 mutex_unlock(&area->lock); 193 193 area = as_area_next(area);
Note:
See TracChangeset
for help on using the changeset viewer.