Changeset c520034 in mainline for kernel/generic/src
- Timestamp:
- 2011-12-31T18:19:35Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 295f658, 77c2b02, 96cd5b4
- Parents:
- 852052d (diff), 22f0561 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel/generic/src
- Files:
-
- 2 added
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/cpu/cpu.c
r852052d rc520034 74 74 for (i = 0; i < config.cpu_count; i++) { 75 75 cpus[i].stack = (uint8_t *) frame_alloc(STACK_FRAMES, 76 FRAME_ KA | FRAME_ATOMIC);76 FRAME_LOWMEM | FRAME_KA | FRAME_ATOMIC); 77 77 cpus[i].id = i; 78 78 -
kernel/generic/src/main/main.c
r852052d rc520034 68 68 #include <mm/page.h> 69 69 #include <genarch/mm/page_pt.h> 70 #include <mm/km.h> 70 71 #include <mm/tlb.h> 71 72 #include <mm/as.h> … … 86 87 #include <sysinfo/sysinfo.h> 87 88 #include <sysinfo/stats.h> 89 #include <lib/ra.h> 88 90 89 91 /** Global configuration structure. */ 90 config_t config; 92 config_t config = { 93 .identity_configured = false, 94 .non_identity_configured = false, 95 .physmem_end = 0 96 }; 91 97 92 98 /** Initial user-space tasks */ … … 205 211 */ 206 212 arch_pre_mm_init(); 213 km_identity_init(); 207 214 frame_init(); 208 209 /* Initialize at least 1 memory segment big enough for slab to work. */210 215 slab_cache_init(); 216 ra_init(); 211 217 sysinfo_init(); 212 218 btree_init(); … … 214 220 page_init(); 215 221 tlb_init(); 222 km_non_identity_init(); 216 223 ddi_init(); 217 224 arch_post_mm_init(); -
kernel/generic/src/mm/frame.c
r852052d rc520034 240 240 NO_TRACE static bool zone_can_alloc(zone_t *zone, uint8_t order) 241 241 { 242 return ( zone_flags_available(zone->flags)243 &&buddy_system_can_alloc(zone->buddy_system, order));242 return ((zone->flags & ZONE_AVAILABLE) && 243 buddy_system_can_alloc(zone->buddy_system, order)); 244 244 } 245 245 … … 265 265 * Check whether the zone meets the search criteria. 266 266 */ 267 if ( (zones.info[i].flags & flags) == flags) {267 if (ZONE_FLAGS_MATCH(zones.info[i].flags, flags)) { 268 268 /* 269 269 * Check if the zone has 2^order frames area available. … … 460 460 NO_TRACE static pfn_t zone_frame_alloc(zone_t *zone, uint8_t order) 461 461 { 462 ASSERT(zone _flags_available(zone->flags));462 ASSERT(zone->flags & ZONE_AVAILABLE); 463 463 464 464 /* Allocate frames from zone buddy system */ … … 490 490 NO_TRACE static size_t zone_frame_free(zone_t *zone, size_t frame_idx) 491 491 { 492 ASSERT(zone _flags_available(zone->flags));492 ASSERT(zone->flags & ZONE_AVAILABLE); 493 493 494 494 frame_t *frame = &zone->frames[frame_idx]; … … 518 518 NO_TRACE static void zone_mark_unavailable(zone_t *zone, size_t frame_idx) 519 519 { 520 ASSERT(zone _flags_available(zone->flags));520 ASSERT(zone->flags & ZONE_AVAILABLE); 521 521 522 522 frame_t *frame = zone_get_frame(zone, frame_idx); … … 549 549 buddy_system_t *buddy) 550 550 { 551 ASSERT(zone _flags_available(zones.info[z1].flags));552 ASSERT(zone _flags_available(zones.info[z2].flags));551 ASSERT(zones.info[z1].flags & ZONE_AVAILABLE); 552 ASSERT(zones.info[z2].flags & ZONE_AVAILABLE); 553 553 ASSERT(zones.info[z1].flags == zones.info[z2].flags); 554 554 ASSERT(zones.info[z1].base < zones.info[z2].base); … … 645 645 NO_TRACE static void return_config_frames(size_t znum, pfn_t pfn, size_t count) 646 646 { 647 ASSERT(zone _flags_available(zones.info[znum].flags));647 ASSERT(zones.info[znum].flags & ZONE_AVAILABLE); 648 648 649 649 size_t cframes = SIZE2FRAMES(zone_conf_size(count)); … … 681 681 size_t count) 682 682 { 683 ASSERT(zone _flags_available(zones.info[znum].flags));683 ASSERT(zones.info[znum].flags & ZONE_AVAILABLE); 684 684 ASSERT(frame_idx + count < zones.info[znum].count); 685 685 … … 723 723 * set of flags 724 724 */ 725 if ((z1 >= zones.count) || (z2 >= zones.count) 726 || (z2 - z1 != 1) 727 || (!zone_flags_available(zones.info[z1].flags)) 728 || (!zone_flags_available(zones.info[z2].flags)) 729 || (zones.info[z1].flags != zones.info[z2].flags)) { 725 if ((z1 >= zones.count) || (z2 >= zones.count) || (z2 - z1 != 1) || 726 (zones.info[z1].flags != zones.info[z2].flags)) { 730 727 ret = false; 731 728 goto errout; … … 828 825 zone->buddy_system = buddy; 829 826 830 if ( zone_flags_available(flags)) {827 if (flags & ZONE_AVAILABLE) { 831 828 /* 832 829 * Compute order for buddy system and initialize … … 865 862 { 866 863 return (count * sizeof(frame_t) + buddy_conf_size(fnzb(count))); 864 } 865 866 /** Allocate external configuration frames from low memory. */ 867 pfn_t zone_external_conf_alloc(size_t count) 868 { 869 size_t size = zone_conf_size(count); 870 size_t order = ispwr2(size) ? fnzb(size) : (fnzb(size) + 1); 871 872 return ADDR2PFN((uintptr_t) frame_alloc(order - FRAME_WIDTH, FRAME_LOWMEM)); 867 873 } 868 874 … … 888 894 irq_spinlock_lock(&zones.lock, true); 889 895 890 if ( zone_flags_available(flags)) { /* Create available zone */896 if (flags & ZONE_AVAILABLE) { /* Create available zone */ 891 897 /* Theoretically we could have NULL here, practically make sure 892 898 * nobody tries to do that. If some platform requires, remove … … 894 900 */ 895 901 ASSERT(confframe != ADDR2PFN((uintptr_t ) NULL)); 902 903 /* Update the known end of physical memory. */ 904 config.physmem_end = max(config.physmem_end, PFN2ADDR(start + count)); 896 905 897 906 /* If confframe is supposed to be inside our zone, then make sure … … 1232 1241 1233 1242 /* Tell the architecture to create some memory */ 1234 frame_ arch_init();1243 frame_low_arch_init(); 1235 1244 if (config.cpu_active == 1) { 1236 1245 frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)), … … 1255 1264 frame_mark_unavailable(0, 1); 1256 1265 } 1266 frame_high_arch_init(); 1267 } 1268 1269 /** Adjust bounds of physical memory region according to low/high memory split. 1270 * 1271 * @param low[in] If true, the adujstment is performed to make the region 1272 * fit in the low memory. Otherwise the adjustment is 1273 * performed to make the region fit in the high memory. 1274 * @param basep[inout] Pointer to a variable which contains the region's base 1275 * address and which may receive the adjusted base address. 1276 * @param sizep[inout] Pointer to a variable which contains the region's size 1277 * and which may receive the adjusted size. 1278 * @retun True if the region still exists even after the 1279 * adjustment, false otherwise. 1280 */ 1281 bool frame_adjust_zone_bounds(bool low, uintptr_t *basep, size_t *sizep) 1282 { 1283 uintptr_t limit = config.identity_size; 1284 1285 if (low) { 1286 if (*basep > limit) 1287 return false; 1288 if (*basep + *sizep > limit) 1289 *sizep = limit - *basep; 1290 } else { 1291 if (*basep + *sizep <= limit) 1292 return false; 1293 if (*basep <= limit) { 1294 *sizep -= limit - *basep; 1295 *basep = limit; 1296 } 1297 } 1298 return true; 1257 1299 } 1258 1300 … … 1293 1335 *total += (uint64_t) FRAMES2SIZE(zones.info[i].count); 1294 1336 1295 if (zone _flags_available(zones.info[i].flags)) {1337 if (zones.info[i].flags & ZONE_AVAILABLE) { 1296 1338 *busy += (uint64_t) FRAMES2SIZE(zones.info[i].busy_count); 1297 1339 *free += (uint64_t) FRAMES2SIZE(zones.info[i].free_count); … … 1344 1386 irq_spinlock_unlock(&zones.lock, true); 1345 1387 1346 bool available = zone_flags_available(flags);1388 bool available = ((flags & ZONE_AVAILABLE) != 0); 1347 1389 1348 1390 printf("%-4zu", i); … … 1356 1398 #endif 1357 1399 1358 printf(" %12zu %c%c%c ", count, 1359 available ? 'A' : ' ', 1360 (flags & ZONE_RESERVED) ? 'R' : ' ', 1361 (flags & ZONE_FIRMWARE) ? 'F' : ' '); 1400 printf(" %12zu %c%c%c%c%c ", count, 1401 available ? 'A' : '-', 1402 (flags & ZONE_RESERVED) ? 'R' : '-', 1403 (flags & ZONE_FIRMWARE) ? 'F' : '-', 1404 (flags & ZONE_LOWMEM) ? 'L' : '-', 1405 (flags & ZONE_HIGHMEM) ? 'H' : '-'); 1362 1406 1363 1407 if (available) … … 1401 1445 irq_spinlock_unlock(&zones.lock, true); 1402 1446 1403 bool available = zone_flags_available(flags);1447 bool available = ((flags & ZONE_AVAILABLE) != 0); 1404 1448 1405 1449 uint64_t size; … … 1411 1455 printf("Zone size: %zu frames (%" PRIu64 " %s)\n", count, 1412 1456 size, size_suffix); 1413 printf("Zone flags: %c%c%c\n", 1414 available ? 'A' : ' ', 1415 (flags & ZONE_RESERVED) ? 'R' : ' ', 1416 (flags & ZONE_FIRMWARE) ? 'F' : ' '); 1457 printf("Zone flags: %c%c%c%c%c\n", 1458 available ? 'A' : '-', 1459 (flags & ZONE_RESERVED) ? 'R' : '-', 1460 (flags & ZONE_FIRMWARE) ? 'F' : '-', 1461 (flags & ZONE_LOWMEM) ? 'L' : '-', 1462 (flags & ZONE_HIGHMEM) ? 'H' : '-'); 1417 1463 1418 1464 if (available) { -
kernel/generic/src/mm/page.c
r852052d rc520034 65 65 #include <arch/mm/asid.h> 66 66 #include <mm/as.h> 67 #include <mm/km.h> 67 68 #include <mm/frame.h> 68 69 #include <arch/barrier.h> … … 75 76 #include <errno.h> 76 77 #include <align.h> 78 #include <macros.h> 79 #include <bitops.h> 77 80 78 81 /** Virtual operations for page subsystem. */ … … 177 180 } 178 181 182 /** Make the mapping shared by all page tables (not address spaces). 183 * 184 * @param base Starting virtual address of the range that is made global. 185 * @param size Size of the address range that is made global. 186 */ 187 void page_mapping_make_global(uintptr_t base, size_t size) 188 { 189 ASSERT(page_mapping_operations); 190 ASSERT(page_mapping_operations->mapping_make_global); 191 192 return page_mapping_operations->mapping_make_global(base, size); 193 } 194 195 uintptr_t hw_map(uintptr_t physaddr, size_t size) 196 { 197 uintptr_t virtaddr; 198 size_t asize; 199 size_t align; 200 pfn_t i; 201 202 asize = ALIGN_UP(size, PAGE_SIZE); 203 align = ispwr2(size) ? size : (1U << (fnzb(size) + 1)); 204 virtaddr = km_page_alloc(asize, align); 205 206 page_table_lock(AS_KERNEL, true); 207 for (i = 0; i < ADDR2PFN(asize); i++) { 208 uintptr_t addr = PFN2ADDR(i); 209 page_mapping_insert(AS_KERNEL, virtaddr + addr, physaddr + addr, 210 PAGE_NOT_CACHEABLE | PAGE_WRITE); 211 } 212 page_table_unlock(AS_KERNEL, true); 213 214 return virtaddr; 215 } 216 179 217 int page_find_mapping(uintptr_t virt, void **phys) 180 218 { -
kernel/generic/src/mm/reserve.c
r852052d rc520034 42 42 #include <typedefs.h> 43 43 #include <arch/types.h> 44 #include <debug.h> 45 46 static bool reserve_initialized = false; 44 47 45 48 IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(reserve_lock, "reserve_lock"); … … 54 57 { 55 58 reserve = frame_total_free_get(); 59 reserve_initialized = true; 56 60 } 57 61 … … 67 71 { 68 72 bool reserved = false; 73 74 ASSERT(reserve_initialized); 69 75 70 76 irq_spinlock_lock(&reserve_lock, true); … … 111 117 void reserve_force_alloc(size_t size) 112 118 { 119 if (!reserve_initialized) 120 return; 121 113 122 irq_spinlock_lock(&reserve_lock, true); 114 123 reserve -= size; … … 122 131 void reserve_free(size_t size) 123 132 { 133 if (!reserve_initialized) 134 return; 135 124 136 irq_spinlock_lock(&reserve_lock, true); 125 137 reserve += size; -
kernel/generic/src/proc/thread.c
r852052d rc520034 173 173 #endif /* CONFIG_FPU */ 174 174 175 /* 176 * Allocate the kernel stack from the low-memory to prevent an infinite 177 * nesting of TLB-misses when accessing the stack from the part of the 178 * TLB-miss handler written in C. 179 * 180 * Note that low-memory is safe to be used for the stack as it will be 181 * covered by the kernel identity mapping, which guarantees not to 182 * nest TLB-misses infinitely (either via some hardware mechanism or 183 * by the construciton of the assembly-language part of the TLB-miss 184 * handler). 185 * 186 * This restriction can be lifted once each architecture provides 187 * a similar guarantee, for example by locking the kernel stack 188 * in the TLB whenever it is allocated from the high-memory and the 189 * thread is being scheduled to run. 190 */ 191 kmflags |= FRAME_LOWMEM; 192 kmflags &= ~FRAME_HIGHMEM; 193 175 194 thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); 176 195 if (!thread->kstack) {
Note:
See TracChangeset
for help on using the changeset viewer.