Changeset b7068da in mainline for kernel/generic/src/mm
- Timestamp:
- 2012-02-09T20:35:12Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/fix-logger-deadlock, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 591762c6
- Parents:
- 7cede12c (diff), 3d4750f (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/generic/src/mm
- Files:
-
- 1 added
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
r7cede12c rb7068da 387 387 } 388 388 389 /** Return pointer to unmapped address space area 390 * 391 * The address space must be already locked when calling 392 * this function. 393 * 394 * @param as Address space. 395 * @param bound Lowest address bound. 396 * @param size Requested size of the allocation. 397 * 398 * @return Address of the beginning of unmapped address space area. 399 * @return -1 if no suitable address space area was found. 400 * 401 */ 402 NO_TRACE static uintptr_t as_get_unmapped_area(as_t *as, uintptr_t bound, 403 size_t size) 404 { 405 ASSERT(mutex_locked(&as->lock)); 406 407 if (size == 0) 408 return (uintptr_t) -1; 409 410 /* 411 * Make sure we allocate from page-aligned 412 * address. Check for possible overflow in 413 * each step. 414 */ 415 416 size_t pages = SIZE2FRAMES(size); 417 418 /* 419 * Find the lowest unmapped address aligned on the size 420 * boundary, not smaller than bound and of the required size. 421 */ 422 423 /* First check the bound address itself */ 424 uintptr_t addr = ALIGN_UP(bound, PAGE_SIZE); 425 if ((addr >= bound) && 426 (check_area_conflicts(as, addr, pages, NULL))) 427 return addr; 428 429 /* Eventually check the addresses behind each area */ 430 list_foreach(as->as_area_btree.leaf_list, cur) { 431 btree_node_t *node = 432 list_get_instance(cur, btree_node_t, leaf_link); 433 434 for (btree_key_t i = 0; i < node->keys; i++) { 435 as_area_t *area = (as_area_t *) node->value[i]; 436 437 mutex_lock(&area->lock); 438 439 addr = 440 ALIGN_UP(area->base + P2SZ(area->pages), PAGE_SIZE); 441 bool avail = 442 ((addr >= bound) && (addr >= area->base) && 443 (check_area_conflicts(as, addr, pages, area))); 444 445 mutex_unlock(&area->lock); 446 447 if (avail) 448 return addr; 449 } 450 } 451 452 /* No suitable address space area found */ 453 return (uintptr_t) -1; 454 } 455 389 456 /** Create address space area of common attributes. 390 457 * … … 394 461 * @param flags Flags of the area memory. 395 462 * @param size Size of area. 396 * @param base Base address of area.397 463 * @param attrs Attributes of the area. 398 464 * @param backend Address space area backend. NULL if no backend is used. 399 465 * @param backend_data NULL or a pointer to an array holding two void *. 466 * @param base Starting virtual address of the area. 467 * If set to -1, a suitable mappable area is found. 468 * @param bound Lowest address bound if base is set to -1. 469 * Otherwise ignored. 400 470 * 401 471 * @return Address space area on success or NULL on failure. … … 403 473 */ 404 474 as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size, 405 u intptr_t base, unsigned int attrs, mem_backend_t *backend,406 mem_backend_data_t *backend_data )407 { 408 if (( base % PAGE_SIZE) != 0)475 unsigned int attrs, mem_backend_t *backend, 476 mem_backend_data_t *backend_data, uintptr_t *base, uintptr_t bound) 477 { 478 if ((*base != (uintptr_t) -1) && ((*base % PAGE_SIZE) != 0)) 409 479 return NULL; 410 480 … … 420 490 mutex_lock(&as->lock); 421 491 422 if (!check_area_conflicts(as, base, pages, NULL)) { 492 if (*base == (uintptr_t) -1) { 493 *base = as_get_unmapped_area(as, bound, size); 494 if (*base == (uintptr_t) -1) { 495 mutex_unlock(&as->lock); 496 return NULL; 497 } 498 } 499 500 if (!check_area_conflicts(as, *base, pages, NULL)) { 423 501 mutex_unlock(&as->lock); 424 502 return NULL; … … 434 512 area->pages = pages; 435 513 area->resident = 0; 436 area->base = base;514 area->base = *base; 437 515 area->sh_info = NULL; 438 516 area->backend = backend; … … 452 530 453 531 btree_create(&area->used_space); 454 btree_insert(&as->as_area_btree, base, (void *) area, NULL); 532 btree_insert(&as->as_area_btree, *base, (void *) area, 533 NULL); 455 534 456 535 mutex_unlock(&as->lock); … … 860 939 * @param acc_size Expected size of the source area. 861 940 * @param dst_as Pointer to destination address space. 862 * @param dst_base Target base address.863 941 * @param dst_flags_mask Destination address space area flags mask. 942 * @param dst_base Target base address. If set to -1, 943 * a suitable mappable area is found. 944 * @param bound Lowest address bound if dst_base is set to -1. 945 * Otherwise ignored. 864 946 * 865 947 * @return Zero on success. … … 873 955 */ 874 956 int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, 875 as_t *dst_as, uintptr_t dst_base, unsigned int dst_flags_mask) 957 as_t *dst_as, unsigned int dst_flags_mask, uintptr_t *dst_base, 958 uintptr_t bound) 876 959 { 877 960 mutex_lock(&src_as->lock); … … 945 1028 * to support sharing in less privileged mode. 946 1029 */ 947 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, 948 dst_base, AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 1030 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, 1031 src_size, AS_AREA_ATTR_PARTIAL, src_backend, 1032 &src_backend_data, dst_base, bound); 949 1033 if (!dst_area) { 950 1034 /* … … 1955 2039 */ 1956 2040 1957 /** Wrapper for as_area_create(). */ 1958 sysarg_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags)1959 { 1960 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,1961 AS_AREA_ATTR_NONE, &anon_backend, NULL))1962 return (sysarg_t) address;1963 else2041 sysarg_t sys_as_area_create(uintptr_t base, size_t size, unsigned int flags, 2042 uintptr_t bound) 2043 { 2044 uintptr_t virt = base; 2045 as_area_t *area = as_area_create(AS, flags | AS_AREA_CACHEABLE, size, 2046 AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, bound); 2047 if (area == NULL) 1964 2048 return (sysarg_t) -1; 1965 } 1966 1967 /** Wrapper for as_area_resize(). */ 2049 2050 return (sysarg_t) virt; 2051 } 2052 1968 2053 sysarg_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags) 1969 2054 { … … 1971 2056 } 1972 2057 1973 /** Wrapper for as_area_change_flags(). */1974 2058 sysarg_t sys_as_area_change_flags(uintptr_t address, unsigned int flags) 1975 2059 { … … 1977 2061 } 1978 2062 1979 /** Wrapper for as_area_destroy(). */1980 2063 sysarg_t sys_as_area_destroy(uintptr_t address) 1981 2064 { 1982 2065 return (sysarg_t) as_area_destroy(AS, address); 1983 }1984 1985 /** Return pointer to unmapped address space area1986 *1987 * @param base Lowest address bound.1988 * @param size Requested size of the allocation.1989 *1990 * @return Pointer to the beginning of unmapped address space area.1991 *1992 */1993 sysarg_t sys_as_get_unmapped_area(uintptr_t base, size_t size)1994 {1995 if (size == 0)1996 return 0;1997 1998 /*1999 * Make sure we allocate from page-aligned2000 * address. Check for possible overflow in2001 * each step.2002 */2003 2004 size_t pages = SIZE2FRAMES(size);2005 uintptr_t ret = 0;2006 2007 /*2008 * Find the lowest unmapped address aligned on the sz2009 * boundary, not smaller than base and of the required size.2010 */2011 2012 mutex_lock(&AS->lock);2013 2014 /* First check the base address itself */2015 uintptr_t addr = ALIGN_UP(base, PAGE_SIZE);2016 if ((addr >= base) &&2017 (check_area_conflicts(AS, addr, pages, NULL)))2018 ret = addr;2019 2020 /* Eventually check the addresses behind each area */2021 list_foreach(AS->as_area_btree.leaf_list, cur) {2022 if (ret != 0)2023 break;2024 2025 btree_node_t *node =2026 list_get_instance(cur, btree_node_t, leaf_link);2027 2028 btree_key_t i;2029 for (i = 0; (ret == 0) && (i < node->keys); i++) {2030 uintptr_t addr;2031 2032 as_area_t *area = (as_area_t *) node->value[i];2033 2034 mutex_lock(&area->lock);2035 2036 addr = ALIGN_UP(area->base + P2SZ(area->pages),2037 PAGE_SIZE);2038 2039 if ((addr >= base) && (addr >= area->base) &&2040 (check_area_conflicts(AS, addr, pages, area)))2041 ret = addr;2042 2043 mutex_unlock(&area->lock);2044 }2045 }2046 2047 mutex_unlock(&AS->lock);2048 2049 return (sysarg_t) ret;2050 2066 } 2051 2067 -
kernel/generic/src/mm/backend_anon.c
r7cede12c rb7068da 44 44 #include <mm/frame.h> 45 45 #include <mm/slab.h> 46 #include <mm/km.h> 46 47 #include <synch/mutex.h> 47 48 #include <adt/list.h> … … 155 156 int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) 156 157 { 158 uintptr_t upage = ALIGN_DOWN(addr, PAGE_SIZE); 159 uintptr_t kpage; 157 160 uintptr_t frame; 158 161 … … 175 178 mutex_lock(&area->sh_info->lock); 176 179 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 177 ALIGN_DOWN(addr, PAGE_SIZE)- area->base, &leaf);180 upage - area->base, &leaf); 178 181 if (!frame) { 179 182 bool allocate = true; … … 185 188 */ 186 189 for (i = 0; i < leaf->keys; i++) { 187 if (leaf->key[i] == 188 ALIGN_DOWN(addr, PAGE_SIZE) - area->base) { 190 if (leaf->key[i] == upage - area->base) { 189 191 allocate = false; 190 192 break; … … 192 194 } 193 195 if (allocate) { 194 frame = (uintptr_t) frame_alloc_noreserve( 195 ONE_FRAME, 0); 196 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 196 kpage = km_temporary_page_get(&frame, 197 FRAME_NO_RESERVE); 198 memsetb((void *) kpage, PAGE_SIZE, 0); 199 km_temporary_page_put(kpage); 197 200 198 201 /* … … 201 204 */ 202 205 btree_insert(&area->sh_info->pagemap, 203 ALIGN_DOWN(addr, PAGE_SIZE) - area->base, 204 (void *) frame, leaf); 206 upage - area->base, (void *) frame, leaf); 205 207 } 206 208 } … … 223 225 * the different causes 224 226 */ 225 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 226 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 227 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE); 228 memsetb((void *) kpage, PAGE_SIZE, 0); 229 km_temporary_page_put(kpage); 227 230 } 228 231 229 232 /* 230 * Map ' page' to 'frame'.233 * Map 'upage' to 'frame'. 231 234 * Note that TLB shootdown is not attempted as only new information is 232 235 * being inserted into page tables. 233 236 */ 234 page_mapping_insert(AS, addr, frame, as_area_get_flags(area));235 if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))237 page_mapping_insert(AS, upage, frame, as_area_get_flags(area)); 238 if (!used_space_insert(area, upage, 1)) 236 239 panic("Cannot insert used space."); 237 240 -
kernel/generic/src/mm/backend_elf.c
r7cede12c rb7068da 44 44 #include <mm/page.h> 45 45 #include <mm/reserve.h> 46 #include <mm/km.h> 46 47 #include <genarch/mm/page_pt.h> 47 48 #include <genarch/mm/page_ht.h> … … 229 230 elf_segment_header_t *entry = area->backend_data.segment; 230 231 btree_node_t *leaf; 231 uintptr_t base, frame, page, start_anon; 232 uintptr_t base; 233 uintptr_t frame; 234 uintptr_t kpage; 235 uintptr_t upage; 236 uintptr_t start_anon; 232 237 size_t i; 233 238 bool dirty = false; … … 249 254 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE)); 250 255 251 /* Virtual address of faulting page */252 page = ALIGN_DOWN(addr, PAGE_SIZE);256 /* Virtual address of faulting page */ 257 upage = ALIGN_DOWN(addr, PAGE_SIZE); 253 258 254 259 /* Virtual address of the end of initialized part of segment */ … … 264 269 mutex_lock(&area->sh_info->lock); 265 270 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 266 page - area->base, &leaf);271 upage - area->base, &leaf); 267 272 if (!frame) { 268 273 unsigned int i; … … 273 278 274 279 for (i = 0; i < leaf->keys; i++) { 275 if (leaf->key[i] == page - area->base) {280 if (leaf->key[i] == upage - area->base) { 276 281 found = true; 277 282 break; … … 281 286 if (frame || found) { 282 287 frame_reference_add(ADDR2PFN(frame)); 283 page_mapping_insert(AS, addr, frame,288 page_mapping_insert(AS, upage, frame, 284 289 as_area_get_flags(area)); 285 if (!used_space_insert(area, page, 1))290 if (!used_space_insert(area, upage, 1)) 286 291 panic("Cannot insert used space."); 287 292 mutex_unlock(&area->sh_info->lock); … … 294 299 * mapping. 295 300 */ 296 if ( page >= entry->p_vaddr &&page + PAGE_SIZE <= start_anon) {301 if (upage >= entry->p_vaddr && upage + PAGE_SIZE <= start_anon) { 297 302 /* 298 303 * Initialized portion of the segment. The memory is backed … … 304 309 */ 305 310 if (entry->p_flags & PF_W) { 306 frame = (uintptr_t)frame_alloc_noreserve(ONE_FRAME, 0);307 memcpy((void *) PA2KA(frame),308 (void *) (base + i * FRAME_SIZE), FRAME_SIZE);311 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE); 312 memcpy((void *) kpage, (void *) (base + i * PAGE_SIZE), 313 PAGE_SIZE); 309 314 if (entry->p_flags & PF_X) { 310 smc_coherence_block((void *) PA2KA(frame), 311 FRAME_SIZE); 315 smc_coherence_block((void *) kpage, PAGE_SIZE); 312 316 } 317 km_temporary_page_put(kpage); 313 318 dirty = true; 314 319 } else { 315 frame = KA2PA(base + i * FRAME_SIZE); 320 pte_t *pte = page_mapping_find(AS_KERNEL, 321 base + i * FRAME_SIZE, true); 322 323 ASSERT(pte); 324 ASSERT(PTE_PRESENT(pte)); 325 326 frame = PTE_GET_FRAME(pte); 316 327 } 317 } else if ( page >= start_anon) {328 } else if (upage >= start_anon) { 318 329 /* 319 330 * This is the uninitialized portion of the segment. … … 322 333 * and cleared. 323 334 */ 324 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 325 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 335 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE); 336 memsetb((void *) kpage, PAGE_SIZE, 0); 337 km_temporary_page_put(kpage); 326 338 dirty = true; 327 339 } else { … … 334 346 * (The segment can be and often is shorter than 1 page). 335 347 */ 336 if ( page < entry->p_vaddr)337 pad_lo = entry->p_vaddr - page;348 if (upage < entry->p_vaddr) 349 pad_lo = entry->p_vaddr - upage; 338 350 else 339 351 pad_lo = 0; 340 352 341 if (start_anon < page + PAGE_SIZE)342 pad_hi = page + PAGE_SIZE - start_anon;353 if (start_anon < upage + PAGE_SIZE) 354 pad_hi = upage + PAGE_SIZE - start_anon; 343 355 else 344 356 pad_hi = 0; 345 357 346 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0);347 memcpy((void *) ( PA2KA(frame)+ pad_lo),348 (void *) (base + i * FRAME_SIZE + pad_lo),349 FRAME_SIZE - pad_lo - pad_hi);358 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE); 359 memcpy((void *) (kpage + pad_lo), 360 (void *) (base + i * PAGE_SIZE + pad_lo), 361 PAGE_SIZE - pad_lo - pad_hi); 350 362 if (entry->p_flags & PF_X) { 351 smc_coherence_block((void *) ( PA2KA(frame)+ pad_lo),352 FRAME_SIZE - pad_lo - pad_hi);363 smc_coherence_block((void *) (kpage + pad_lo), 364 PAGE_SIZE - pad_lo - pad_hi); 353 365 } 354 memsetb((void *) PA2KA(frame), pad_lo, 0);355 memsetb((void *) ( PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi,356 0);366 memsetb((void *) kpage, pad_lo, 0); 367 memsetb((void *) (kpage + PAGE_SIZE - pad_hi), pad_hi, 0); 368 km_temporary_page_put(kpage); 357 369 dirty = true; 358 370 } … … 360 372 if (dirty && area->sh_info) { 361 373 frame_reference_add(ADDR2PFN(frame)); 362 btree_insert(&area->sh_info->pagemap, page - area->base,374 btree_insert(&area->sh_info->pagemap, upage - area->base, 363 375 (void *) frame, leaf); 364 376 } … … 367 379 mutex_unlock(&area->sh_info->lock); 368 380 369 page_mapping_insert(AS, addr, frame, as_area_get_flags(area));370 if (!used_space_insert(area, page, 1))381 page_mapping_insert(AS, upage, frame, as_area_get_flags(area)); 382 if (!used_space_insert(area, upage, 1)) 371 383 panic("Cannot insert used space."); 372 384 -
kernel/generic/src/mm/frame.c
r7cede12c rb7068da 240 240 NO_TRACE static bool zone_can_alloc(zone_t *zone, uint8_t order) 241 241 { 242 return ( zone_flags_available(zone->flags)243 &&buddy_system_can_alloc(zone->buddy_system, order));242 return ((zone->flags & ZONE_AVAILABLE) && 243 buddy_system_can_alloc(zone->buddy_system, order)); 244 244 } 245 245 … … 265 265 * Check whether the zone meets the search criteria. 266 266 */ 267 if ( (zones.info[i].flags & flags) == flags) {267 if (ZONE_FLAGS_MATCH(zones.info[i].flags, flags)) { 268 268 /* 269 269 * Check if the zone has 2^order frames area available. … … 460 460 NO_TRACE static pfn_t zone_frame_alloc(zone_t *zone, uint8_t order) 461 461 { 462 ASSERT(zone _flags_available(zone->flags));462 ASSERT(zone->flags & ZONE_AVAILABLE); 463 463 464 464 /* Allocate frames from zone buddy system */ … … 490 490 NO_TRACE static size_t zone_frame_free(zone_t *zone, size_t frame_idx) 491 491 { 492 ASSERT(zone _flags_available(zone->flags));492 ASSERT(zone->flags & ZONE_AVAILABLE); 493 493 494 494 frame_t *frame = &zone->frames[frame_idx]; … … 518 518 NO_TRACE static void zone_mark_unavailable(zone_t *zone, size_t frame_idx) 519 519 { 520 ASSERT(zone _flags_available(zone->flags));520 ASSERT(zone->flags & ZONE_AVAILABLE); 521 521 522 522 frame_t *frame = zone_get_frame(zone, frame_idx); … … 549 549 buddy_system_t *buddy) 550 550 { 551 ASSERT(zone _flags_available(zones.info[z1].flags));552 ASSERT(zone _flags_available(zones.info[z2].flags));551 ASSERT(zones.info[z1].flags & ZONE_AVAILABLE); 552 ASSERT(zones.info[z2].flags & ZONE_AVAILABLE); 553 553 ASSERT(zones.info[z1].flags == zones.info[z2].flags); 554 554 ASSERT(zones.info[z1].base < zones.info[z2].base); … … 645 645 NO_TRACE static void return_config_frames(size_t znum, pfn_t pfn, size_t count) 646 646 { 647 ASSERT(zone _flags_available(zones.info[znum].flags));647 ASSERT(zones.info[znum].flags & ZONE_AVAILABLE); 648 648 649 649 size_t cframes = SIZE2FRAMES(zone_conf_size(count)); … … 681 681 size_t count) 682 682 { 683 ASSERT(zone _flags_available(zones.info[znum].flags));683 ASSERT(zones.info[znum].flags & ZONE_AVAILABLE); 684 684 ASSERT(frame_idx + count < zones.info[znum].count); 685 685 … … 723 723 * set of flags 724 724 */ 725 if ((z1 >= zones.count) || (z2 >= zones.count) 726 || (z2 - z1 != 1) 727 || (!zone_flags_available(zones.info[z1].flags)) 728 || (!zone_flags_available(zones.info[z2].flags)) 729 || (zones.info[z1].flags != zones.info[z2].flags)) { 725 if ((z1 >= zones.count) || (z2 >= zones.count) || (z2 - z1 != 1) || 726 (zones.info[z1].flags != zones.info[z2].flags)) { 730 727 ret = false; 731 728 goto errout; … … 828 825 zone->buddy_system = buddy; 829 826 830 if ( zone_flags_available(flags)) {827 if (flags & ZONE_AVAILABLE) { 831 828 /* 832 829 * Compute order for buddy system and initialize … … 865 862 { 866 863 return (count * sizeof(frame_t) + buddy_conf_size(fnzb(count))); 864 } 865 866 /** Allocate external configuration frames from low memory. */ 867 pfn_t zone_external_conf_alloc(size_t count) 868 { 869 size_t size = zone_conf_size(count); 870 size_t order = ispwr2(size) ? fnzb(size) : (fnzb(size) + 1); 871 872 return ADDR2PFN((uintptr_t) frame_alloc(order - FRAME_WIDTH, 873 FRAME_LOWMEM | FRAME_ATOMIC)); 867 874 } 868 875 … … 888 895 irq_spinlock_lock(&zones.lock, true); 889 896 890 if ( zone_flags_available(flags)) { /* Create available zone */897 if (flags & ZONE_AVAILABLE) { /* Create available zone */ 891 898 /* Theoretically we could have NULL here, practically make sure 892 899 * nobody tries to do that. If some platform requires, remove … … 894 901 */ 895 902 ASSERT(confframe != ADDR2PFN((uintptr_t ) NULL)); 903 904 /* Update the known end of physical memory. */ 905 config.physmem_end = max(config.physmem_end, PFN2ADDR(start + count)); 896 906 897 907 /* If confframe is supposed to be inside our zone, then make sure … … 914 924 for (i = 0; i < init.cnt; i++) 915 925 if (overlaps(addr, PFN2ADDR(confcount), 916 KA2PA(init.tasks[i].addr),926 init.tasks[i].paddr, 917 927 init.tasks[i].size)) { 918 928 overlap = true; … … 1232 1242 1233 1243 /* Tell the architecture to create some memory */ 1234 frame_ arch_init();1244 frame_low_arch_init(); 1235 1245 if (config.cpu_active == 1) { 1236 1246 frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)), … … 1241 1251 size_t i; 1242 1252 for (i = 0; i < init.cnt; i++) { 1243 pfn_t pfn = ADDR2PFN( KA2PA(init.tasks[i].addr));1253 pfn_t pfn = ADDR2PFN(init.tasks[i].paddr); 1244 1254 frame_mark_unavailable(pfn, 1245 1255 SIZE2FRAMES(init.tasks[i].size)); … … 1255 1265 frame_mark_unavailable(0, 1); 1256 1266 } 1267 frame_high_arch_init(); 1268 } 1269 1270 /** Adjust bounds of physical memory region according to low/high memory split. 1271 * 1272 * @param low[in] If true, the adujstment is performed to make the region 1273 * fit in the low memory. Otherwise the adjustment is 1274 * performed to make the region fit in the high memory. 1275 * @param basep[inout] Pointer to a variable which contains the region's base 1276 * address and which may receive the adjusted base address. 1277 * @param sizep[inout] Pointer to a variable which contains the region's size 1278 * and which may receive the adjusted size. 1279 * @retun True if the region still exists even after the 1280 * adjustment, false otherwise. 1281 */ 1282 bool frame_adjust_zone_bounds(bool low, uintptr_t *basep, size_t *sizep) 1283 { 1284 uintptr_t limit = config.identity_size; 1285 1286 if (low) { 1287 if (*basep > limit) 1288 return false; 1289 if (*basep + *sizep > limit) 1290 *sizep = limit - *basep; 1291 } else { 1292 if (*basep + *sizep <= limit) 1293 return false; 1294 if (*basep <= limit) { 1295 *sizep -= limit - *basep; 1296 *basep = limit; 1297 } 1298 } 1299 return true; 1257 1300 } 1258 1301 … … 1293 1336 *total += (uint64_t) FRAMES2SIZE(zones.info[i].count); 1294 1337 1295 if (zone _flags_available(zones.info[i].flags)) {1338 if (zones.info[i].flags & ZONE_AVAILABLE) { 1296 1339 *busy += (uint64_t) FRAMES2SIZE(zones.info[i].busy_count); 1297 1340 *free += (uint64_t) FRAMES2SIZE(zones.info[i].free_count); … … 1344 1387 irq_spinlock_unlock(&zones.lock, true); 1345 1388 1346 bool available = zone_flags_available(flags);1389 bool available = ((flags & ZONE_AVAILABLE) != 0); 1347 1390 1348 1391 printf("%-4zu", i); … … 1356 1399 #endif 1357 1400 1358 printf(" %12zu %c%c%c ", count, 1359 available ? 'A' : ' ', 1360 (flags & ZONE_RESERVED) ? 'R' : ' ', 1361 (flags & ZONE_FIRMWARE) ? 'F' : ' '); 1401 printf(" %12zu %c%c%c%c%c ", count, 1402 available ? 'A' : '-', 1403 (flags & ZONE_RESERVED) ? 'R' : '-', 1404 (flags & ZONE_FIRMWARE) ? 'F' : '-', 1405 (flags & ZONE_LOWMEM) ? 'L' : '-', 1406 (flags & ZONE_HIGHMEM) ? 'H' : '-'); 1362 1407 1363 1408 if (available) … … 1401 1446 irq_spinlock_unlock(&zones.lock, true); 1402 1447 1403 bool available = zone_flags_available(flags);1448 bool available = ((flags & ZONE_AVAILABLE) != 0); 1404 1449 1405 1450 uint64_t size; … … 1411 1456 printf("Zone size: %zu frames (%" PRIu64 " %s)\n", count, 1412 1457 size, size_suffix); 1413 printf("Zone flags: %c%c%c\n", 1414 available ? 'A' : ' ', 1415 (flags & ZONE_RESERVED) ? 'R' : ' ', 1416 (flags & ZONE_FIRMWARE) ? 'F' : ' '); 1458 printf("Zone flags: %c%c%c%c%c\n", 1459 available ? 'A' : '-', 1460 (flags & ZONE_RESERVED) ? 'R' : '-', 1461 (flags & ZONE_FIRMWARE) ? 'F' : '-', 1462 (flags & ZONE_LOWMEM) ? 'L' : '-', 1463 (flags & ZONE_HIGHMEM) ? 'H' : '-'); 1417 1464 1418 1465 if (available) { -
kernel/generic/src/mm/page.c
r7cede12c rb7068da 53 53 * We assume that the other processors are either not using the mapping yet 54 54 * (i.e. during the bootstrap) or are executing the TLB shootdown code. While 55 * we don't care much about the former case, the processors in the latter case 55 * we don't care much about the former case, the processors in the latter case 56 56 * will do an implicit serialization by virtue of running the TLB shootdown 57 57 * interrupt handler. … … 74 74 #include <syscall/copy.h> 75 75 #include <errno.h> 76 #include <align.h> 76 77 77 78 /** Virtual operations for page subsystem. */ … … 81 82 { 82 83 page_arch_init(); 83 }84 85 /** Map memory structure86 *87 * Identity-map memory structure88 * considering possible crossings89 * of page boundaries.90 *91 * @param addr Address of the structure.92 * @param size Size of the structure.93 *94 */95 void map_structure(uintptr_t addr, size_t size)96 {97 size_t length = size + (addr - (addr & ~(PAGE_SIZE - 1)));98 size_t cnt = length / PAGE_SIZE + (length % PAGE_SIZE > 0);99 100 size_t i;101 for (i = 0; i < cnt; i++)102 page_mapping_insert(AS_KERNEL, addr + i * PAGE_SIZE,103 addr + i * PAGE_SIZE, PAGE_NOT_CACHEABLE | PAGE_WRITE);104 105 /* Repel prefetched accesses to the old mapping. */106 memory_barrier();107 84 } 108 85 … … 176 153 } 177 154 155 /** Make the mapping shared by all page tables (not address spaces). 156 * 157 * @param base Starting virtual address of the range that is made global. 158 * @param size Size of the address range that is made global. 159 */ 160 void page_mapping_make_global(uintptr_t base, size_t size) 161 { 162 ASSERT(page_mapping_operations); 163 ASSERT(page_mapping_operations->mapping_make_global); 164 165 return page_mapping_operations->mapping_make_global(base, size); 166 } 167 168 int page_find_mapping(uintptr_t virt, void **phys) 169 { 170 mutex_lock(&AS->lock); 171 172 pte_t *pte = page_mapping_find(AS, virt, false); 173 if ((!PTE_VALID(pte)) || (!PTE_PRESENT(pte))) { 174 mutex_unlock(&AS->lock); 175 return ENOENT; 176 } 177 178 *phys = (void *) PTE_GET_FRAME(pte) + 179 (virt - ALIGN_DOWN(virt, PAGE_SIZE)); 180 181 mutex_unlock(&AS->lock); 182 183 return EOK; 184 } 185 178 186 /** Syscall wrapper for getting mapping of a virtual page. 179 * 180 * @retval EOK Everything went find, @p uspace_frame and @p uspace_node 181 * contains correct values. 182 * @retval ENOENT Virtual address has no mapping. 183 */ 184 sysarg_t sys_page_find_mapping(uintptr_t virt_address, 185 uintptr_t *uspace_frame) 186 { 187 mutex_lock(&AS->lock); 188 189 pte_t *pte = page_mapping_find(AS, virt_address, false); 190 if (!PTE_VALID(pte) || !PTE_PRESENT(pte)) { 191 mutex_unlock(&AS->lock); 192 193 return (sysarg_t) ENOENT; 194 } 195 196 uintptr_t phys_address = PTE_GET_FRAME(pte); 197 198 mutex_unlock(&AS->lock); 199 200 int rc = copy_to_uspace(uspace_frame, 201 &phys_address, sizeof(phys_address)); 202 if (rc != EOK) { 203 return (sysarg_t) rc; 204 } 205 206 return EOK; 187 * 188 * @return EOK on success. 189 * @return ENOENT if no virtual address mapping found. 190 * 191 */ 192 sysarg_t sys_page_find_mapping(uintptr_t virt, void *phys_ptr) 193 { 194 void *phys; 195 int rc = page_find_mapping(virt, &phys); 196 if (rc != EOK) 197 return rc; 198 199 rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys)); 200 return (sysarg_t) rc; 207 201 } 208 202 -
kernel/generic/src/mm/reserve.c
r7cede12c rb7068da 42 42 #include <typedefs.h> 43 43 #include <arch/types.h> 44 #include <debug.h> 45 46 static bool reserve_initialized = false; 44 47 45 48 IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(reserve_lock, "reserve_lock"); … … 54 57 { 55 58 reserve = frame_total_free_get(); 59 reserve_initialized = true; 56 60 } 57 61 … … 67 71 { 68 72 bool reserved = false; 73 74 ASSERT(reserve_initialized); 69 75 70 76 irq_spinlock_lock(&reserve_lock, true); … … 111 117 void reserve_force_alloc(size_t size) 112 118 { 119 if (!reserve_initialized) 120 return; 121 113 122 irq_spinlock_lock(&reserve_lock, true); 114 123 reserve -= size; … … 122 131 void reserve_free(size_t size) 123 132 { 133 if (!reserve_initialized) 134 return; 135 124 136 irq_spinlock_lock(&reserve_lock, true); 125 137 reserve += size;
Note:
See TracChangeset
for help on using the changeset viewer.
