Changeset 864a081 in mainline
- Timestamp:
- 2011-05-24T03:40:00Z (13 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- bf49001
- Parents:
- 5c460cc (diff), d4c472b (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Files:
-
- 2 added
- 1 deleted
- 43 edited
Legend:
- Unmodified
- Added
- Removed
-
boot/arch/ppc32/src/asm.S
r5c460cc r864a081 58 58 .endm 59 59 60 .macro BAT_COMPUTE base size mask lower upper 61 # less than 128 KB -> no BAT 62 63 lis \upper, 0x0002 64 cmpw \size, \upper 65 blt no_bat 66 67 # mask = total >> 18 68 69 li \upper, 18 70 srw \mask, \size, \upper 71 72 # create Block Length mask by replicating 73 # the leading logical one 14 times 74 75 li \upper, 14 76 mtctr \mask 77 li \upper, 1 78 79 0: 80 # mask = (mask >> 1) | mask 81 82 srw \lower, \mask, \upper 83 or \mask, \mask, \lower 84 85 bdnz 0b 86 87 # mask = mask & 0x07ff 88 # (BAT can map up to 256 MB) 89 90 andi. \mask, \mask, 0x07ff 91 92 # mask = (mask << 2) | 0x0002 93 # (priviledged access only) 94 95 li \upper, 2 96 slw \mask, \mask, \upper 97 ori \mask, \mask, 0x0002 98 99 lis \upper, (0x8000 + \base) 100 or \upper, \upper, \mask 101 102 lis \lower, \base 103 ori \lower, \lower, 0x0002 104 .endm 105 60 106 .global start 61 107 .global halt … … 247 293 lwz r31, 4(r3) # r31 = memory size 248 294 249 lis r29, 0x0002 250 cmpw r31, r29 251 blt no_bat # less than 128 KB -> no BAT 252 253 li r29, 18 254 srw r31, r31, r29 # r31 = total >> 18 255 256 # create Block Length mask by replicating 257 # the leading logical one 14 times 258 259 li r29, 14 260 mtctr r31 261 li r29, 1 262 263 bat_mask: 264 srw r30, r31, r29 # r30 = mask >> 1 265 or r31, r31, r30 # mask = mask | r30 266 267 bdnz bat_mask 268 269 andi. r31, r31, 0x07ff # mask = mask & 0x07ff (BAT can map up to 256 MB) 270 271 li r29, 2 272 slw r31, r31, r29 # mask = mask << 2 273 ori r31, r31, 0x0002 # mask = mask | 0x0002 (priviledged access only) 274 275 lis r29, 0x8000 276 or r29, r29, r31 277 278 lis r30, 0x0000 279 ori r30, r30, 0x0002 280 281 mtspr ibat0u, r29 282 mtspr ibat0l, r30 283 284 mtspr dbat0u, r29 285 mtspr dbat0l, r30 295 lis r30, 268435456@h 296 ori r30, r30, 268435456@l # r30 = 256 MB 297 298 # BAT0 299 300 # r29 = min(r31, r30) 301 302 cmpw r31, r30 303 blt bat0_r31 304 305 mr r29, r30 306 b bat0_r30 307 308 bat0_r31: 309 310 mr r29, r31 311 312 bat0_r30: 313 314 BAT_COMPUTE 0x0000 r29 r28 r27 r26 315 mtspr ibat0u, r26 316 mtspr ibat0l, r27 317 318 mtspr dbat0u, r26 319 mtspr dbat0l, r27 320 321 # BAT1 322 323 sub r31, r31, r29 # r31 = r31 - r29 324 325 # r29 = min(r31, r30) 326 327 cmpw r31, r30 328 blt bat1_r31 329 330 mr r29, r30 331 b bat1_r30 332 333 bat1_r31: 334 335 mr r29, r31 336 337 bat1_r30: 338 339 BAT_COMPUTE 0x1000 r29 r28 r27 r26 340 mtspr ibat1u, r26 341 mtspr ibat1l, r27 342 343 mtspr dbat1u, r26 344 mtspr dbat1l, r27 345 346 # BAT2 347 348 sub r31, r31, r29 # r31 = r31 - r29 349 350 # r29 = min(r31, r30) 351 352 cmpw r31, r30 353 blt bat2_r31 354 355 mr r29, r30 356 b bat2_r30 357 358 bat2_r31: 359 360 mr r29, r31 361 362 bat2_r30: 363 364 BAT_COMPUTE 0x2000 r29 r28 r27 r26 365 mtspr ibat2u, r26 366 mtspr ibat2l, r27 367 368 mtspr dbat2u, r26 369 mtspr dbat2l, r27 370 371 # BAT3 372 373 sub r31, r31, r29 # r31 = r31 - r29 374 375 # r29 = min(r31, r30) 376 377 cmpw r31, r30 378 blt bat3_r31 379 380 mr r29, r30 381 b bat3_r30 382 383 bat3_r31: 384 385 mr r29, r31 386 387 bat3_r30: 388 389 BAT_COMPUTE 0x3000 r29 r28 r27 r26 390 mtspr ibat3u, r26 391 mtspr ibat3l, r27 392 393 mtspr dbat3u, r26 394 mtspr dbat3l, r27 286 395 287 396 no_bat: -
contrib/conf/arm32-qe.sh
r5c460cc r864a081 1 1 #!/bin/sh 2 2 3 qemu-system-arm -M integratorcp --kernel image.boot3 qemu-system-arm $@ -M integratorcp --kernel image.boot -
contrib/conf/ia32-qe.sh
r5c460cc r864a081 8 8 fi 9 9 10 qemu -m 32 -hda "$DISK_IMG" -cdrom image.iso -boot d10 qemu $@ -m 32 -hda "$DISK_IMG" -cdrom image.iso -boot d -
contrib/conf/ppc32-qe.sh
r5c460cc r864a081 1 1 #!/bin/sh 2 2 3 qemu-system-ppc -M mac99 -boot d -cdrom image.iso3 qemu-system-ppc $@ -M mac99 -boot d -cdrom image.iso -
kernel/arch/ia64/src/mm/tlb.c
r5c460cc r864a081 481 481 482 482 page_table_lock(AS, true); 483 t = page_mapping_find(AS, va );483 t = page_mapping_find(AS, va, true); 484 484 if (t) { 485 485 /* … … 599 599 600 600 page_table_lock(AS, true); 601 pte_t *entry = page_mapping_find(AS, va );601 pte_t *entry = page_mapping_find(AS, va, true); 602 602 if (entry) { 603 603 /* … … 651 651 652 652 page_table_lock(AS, true); 653 t = page_mapping_find(AS, va );653 t = page_mapping_find(AS, va, true); 654 654 ASSERT((t) && (t->p)); 655 655 if ((t) && (t->p) && (t->w)) { … … 684 684 685 685 page_table_lock(AS, true); 686 t = page_mapping_find(AS, va );686 t = page_mapping_find(AS, va, true); 687 687 ASSERT((t) && (t->p)); 688 688 if ((t) && (t->p) && (t->x)) { … … 717 717 718 718 page_table_lock(AS, true); 719 t = page_mapping_find(AS, va );719 t = page_mapping_find(AS, va, true); 720 720 ASSERT((t) && (t->p)); 721 721 if ((t) && (t->p)) { … … 753 753 */ 754 754 page_table_lock(AS, true); 755 t = page_mapping_find(AS, va );755 t = page_mapping_find(AS, va, true); 756 756 ASSERT((t) && (t->p)); 757 757 ASSERT(!t->w); … … 778 778 779 779 page_table_lock(AS, true); 780 t = page_mapping_find(AS, va );780 t = page_mapping_find(AS, va, true); 781 781 ASSERT(t); 782 782 -
kernel/arch/mips32/src/mm/tlb.c
r5c460cc r864a081 100 100 mutex_unlock(&AS->lock); 101 101 102 page_table_lock(AS, true);103 104 102 pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate, &pfrc); 105 103 if (!pte) { … … 113 111 * or copy_to_uspace(). 114 112 */ 115 page_table_unlock(AS, true);116 113 return; 117 114 default: … … 144 141 tlbwr(); 145 142 146 page_table_unlock(AS, true);147 143 return; 148 144 149 145 fail: 150 page_table_unlock(AS, true);151 146 tlb_refill_fail(istate); 152 147 } … … 176 171 index.value = cp0_index_read(); 177 172 178 page_table_lock(AS, true);179 180 173 /* 181 174 * Fail if the entry is not in TLB. … … 197 190 * or copy_to_uspace(). 198 191 */ 199 page_table_unlock(AS, true);200 192 return; 201 193 default: … … 227 219 tlbwi(); 228 220 229 page_table_unlock(AS, true);230 221 return; 231 222 232 223 fail: 233 page_table_unlock(AS, true);234 224 tlb_invalid_fail(istate); 235 225 } … … 259 249 index.value = cp0_index_read(); 260 250 261 page_table_lock(AS, true);262 263 251 /* 264 252 * Fail if the entry is not in TLB. … … 280 268 * or copy_to_uspace(). 281 269 */ 282 page_table_unlock(AS, true);283 270 return; 284 271 default: … … 311 298 tlbwi(); 312 299 313 page_table_unlock(AS, true);314 300 return; 315 301 316 302 fail: 317 page_table_unlock(AS, true);318 303 tlb_modified_fail(istate); 319 304 } … … 364 349 pte_t *pte; 365 350 366 ASSERT(mutex_locked(&AS->lock));367 368 351 hi.value = cp0_entry_hi_read(); 369 352 … … 379 362 * Check if the mapping exists in page tables. 380 363 */ 381 pte = page_mapping_find(AS, badvaddr );364 pte = page_mapping_find(AS, badvaddr, true); 382 365 if (pte && pte->p && (pte->w || access != PF_ACCESS_WRITE)) { 383 366 /* … … 393 376 * Resort to higher-level page fault handler. 394 377 */ 395 page_table_unlock(AS, true);396 378 switch (rc = as_page_fault(badvaddr, access, istate)) { 397 379 case AS_PF_OK: … … 400 382 * The mapping ought to be in place. 401 383 */ 402 page_table_lock(AS, true); 403 pte = page_mapping_find(AS, badvaddr); 384 pte = page_mapping_find(AS, badvaddr, true); 404 385 ASSERT(pte && pte->p); 405 386 ASSERT(pte->w || access != PF_ACCESS_WRITE); … … 407 388 break; 408 389 case AS_PF_DEFER: 409 page_table_lock(AS, true);410 390 *pfrc = AS_PF_DEFER; 411 391 return NULL; 412 392 break; 413 393 case AS_PF_FAULT: 414 page_table_lock(AS, true);415 394 *pfrc = AS_PF_FAULT; 416 395 return NULL; -
kernel/arch/ppc32/Makefile.inc
r5c460cc r864a081 55 55 arch/$(KARCH)/src/mm/frame.c \ 56 56 arch/$(KARCH)/src/mm/page.c \ 57 arch/$(KARCH)/src/mm/pht.c \ 57 58 arch/$(KARCH)/src/mm/tlb.c \ 58 59 arch/$(KARCH)/src/drivers/pic.c -
kernel/arch/ppc32/include/mm/as.h
r5c460cc r864a081 36 36 #define KERN_ppc32_AS_H_ 37 37 38 #include <arch/mm/pht.h> 39 38 40 #define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH 0 39 41 … … 52 54 #define as_create_arch(as, flags) (as != as) 53 55 #define as_deinstall_arch(as) 54 #define as_invalidate_translation_cache(as, page, cnt) 56 57 #define as_invalidate_translation_cache(as, page, cnt) \ 58 pht_invalidate((as), (page), (cnt)) 55 59 56 60 extern void as_arch_init(void); -
kernel/arch/ppc32/include/mm/tlb.h
r5c460cc r864a081 37 37 38 38 #include <arch/interrupt.h> 39 #include <typedefs.h>40 39 41 40 #define WIMG_GUARDED 0x01 … … 75 74 } ptelo_t; 76 75 77 extern void pht_init(void);78 extern void pht_refill(unsigned int, istate_t *);79 76 extern void tlb_refill(unsigned int, istate_t *); 80 77 -
kernel/arch/ppc32/src/interrupt.c
r5c460cc r864a081 42 42 #include <arch/drivers/pic.h> 43 43 #include <arch/mm/tlb.h> 44 #include <arch/mm/pht.h> 44 45 #include <print.h> 45 46 -
kernel/arch/ppc32/src/mm/page.c
r5c460cc r864a081 43 43 if (config.cpu_active == 1) 44 44 page_mapping_operations = &pt_mapping_operations; 45 as_switch(NULL, AS_KERNEL); 45 46 } 46 47 -
kernel/arch/ppc32/src/mm/tlb.c
r5c460cc r864a081 33 33 */ 34 34 35 #include <mm/tlb.h>36 35 #include <arch/mm/tlb.h> 37 #include <arch/interrupt.h>38 36 #include <interrupt.h> 39 #include <mm/as.h> 40 #include <mm/page.h> 41 #include <arch.h> 42 #include <print.h> 43 #include <macros.h> 44 #include <symtab.h> 45 46 static unsigned int seed = 42; 47 48 /** Try to find PTE for faulting address 49 * 50 * @param as Address space. 51 * @param lock Lock/unlock the address space. 52 * @param badvaddr Faulting virtual address. 53 * @param access Access mode that caused the fault. 54 * @param istate Pointer to interrupted state. 55 * @param pfrc Pointer to variable where as_page_fault() return code 56 * will be stored. 57 * 58 * @return PTE on success, NULL otherwise. 59 * 60 */ 61 static pte_t *find_mapping_and_check(as_t *as, uintptr_t badvaddr, int access, 62 istate_t *istate, int *pfrc) 63 { 64 ASSERT(mutex_locked(&as->lock)); 65 66 /* 67 * Check if the mapping exists in page tables. 68 */ 69 pte_t *pte = page_mapping_find(as, badvaddr); 70 if ((pte) && (pte->present)) { 71 /* 72 * Mapping found in page tables. 73 * Immediately succeed. 74 */ 75 return pte; 76 } else { 77 /* 78 * Mapping not found in page tables. 79 * Resort to higher-level page fault handler. 80 */ 81 page_table_unlock(as, true); 82 83 int rc = as_page_fault(badvaddr, access, istate); 84 switch (rc) { 85 case AS_PF_OK: 86 /* 87 * The higher-level page fault handler succeeded, 88 * The mapping ought to be in place. 89 */ 90 page_table_lock(as, true); 91 pte = page_mapping_find(as, badvaddr); 92 ASSERT((pte) && (pte->present)); 93 *pfrc = 0; 94 return pte; 95 case AS_PF_DEFER: 96 page_table_lock(as, true); 97 *pfrc = rc; 98 return NULL; 99 case AS_PF_FAULT: 100 page_table_lock(as, true); 101 *pfrc = rc; 102 return NULL; 103 default: 104 panic("Unexpected rc (%d).", rc); 105 } 106 } 107 } 108 109 static void pht_refill_fail(uintptr_t badvaddr, istate_t *istate) 110 { 111 fault_if_from_uspace(istate, "PHT Refill Exception on %p.", 112 (void *) badvaddr); 113 panic_memtrap(istate, PF_ACCESS_UNKNOWN, badvaddr, 114 "PHT Refill Exception."); 115 } 116 117 static void pht_insert(const uintptr_t vaddr, const pte_t *pte) 118 { 119 uint32_t page = (vaddr >> 12) & 0xffff; 120 uint32_t api = (vaddr >> 22) & 0x3f; 121 122 uint32_t vsid = sr_get(vaddr); 123 uint32_t sdr1 = sdr1_get(); 124 125 // FIXME: compute size of PHT exactly 126 phte_t *phte = (phte_t *) PA2KA(sdr1 & 0xffff0000); 127 128 /* Primary hash (xor) */ 129 uint32_t h = 0; 130 uint32_t hash = vsid ^ page; 131 uint32_t base = (hash & 0x3ff) << 3; 132 uint32_t i; 133 bool found = false; 134 135 /* Find colliding PTE in PTEG */ 136 for (i = 0; i < 8; i++) { 137 if ((phte[base + i].v) 138 && (phte[base + i].vsid == vsid) 139 && (phte[base + i].api == api) 140 && (phte[base + i].h == 0)) { 141 found = true; 142 break; 143 } 144 } 145 146 if (!found) { 147 /* Find unused PTE in PTEG */ 148 for (i = 0; i < 8; i++) { 149 if (!phte[base + i].v) { 150 found = true; 151 break; 152 } 153 } 154 } 155 156 if (!found) { 157 /* Secondary hash (not) */ 158 uint32_t base2 = (~hash & 0x3ff) << 3; 159 160 /* Find colliding PTE in PTEG */ 161 for (i = 0; i < 8; i++) { 162 if ((phte[base2 + i].v) 163 && (phte[base2 + i].vsid == vsid) 164 && (phte[base2 + i].api == api) 165 && (phte[base2 + i].h == 1)) { 166 found = true; 167 base = base2; 168 h = 1; 169 break; 170 } 171 } 172 173 if (!found) { 174 /* Find unused PTE in PTEG */ 175 for (i = 0; i < 8; i++) { 176 if (!phte[base2 + i].v) { 177 found = true; 178 base = base2; 179 h = 1; 180 break; 181 } 182 } 183 } 184 185 if (!found) 186 i = RANDI(seed) % 8; 187 } 188 189 phte[base + i].v = 1; 190 phte[base + i].vsid = vsid; 191 phte[base + i].h = h; 192 phte[base + i].api = api; 193 phte[base + i].rpn = pte->pfn; 194 phte[base + i].r = 0; 195 phte[base + i].c = 0; 196 phte[base + i].wimg = (pte->page_cache_disable ? WIMG_NO_CACHE : 0); 197 phte[base + i].pp = 2; // FIXME 198 } 199 200 /** Process Instruction/Data Storage Exception 201 * 202 * @param n Exception vector number. 203 * @param istate Interrupted register context. 204 * 205 */ 206 void pht_refill(unsigned int n, istate_t *istate) 207 { 208 as_t *as = (AS == NULL) ? AS_KERNEL : AS; 209 uintptr_t badvaddr; 210 211 if (n == VECTOR_DATA_STORAGE) 212 badvaddr = istate->dar; 213 else 214 badvaddr = istate->pc; 215 216 page_table_lock(as, true); 217 218 int pfrc; 219 pte_t *pte = find_mapping_and_check(as, badvaddr, 220 PF_ACCESS_READ /* FIXME */, istate, &pfrc); 221 222 if (!pte) { 223 switch (pfrc) { 224 case AS_PF_FAULT: 225 page_table_unlock(as, true); 226 pht_refill_fail(badvaddr, istate); 227 return; 228 case AS_PF_DEFER: 229 /* 230 * The page fault came during copy_from_uspace() 231 * or copy_to_uspace(). 232 */ 233 page_table_unlock(as, true); 234 return; 235 default: 236 panic("Unexpected pfrc (%d).", pfrc); 237 } 238 } 239 240 /* Record access to PTE */ 241 pte->accessed = 1; 242 pht_insert(badvaddr, pte); 243 244 page_table_unlock(as, true); 245 } 37 #include <typedefs.h> 246 38 247 39 void tlb_refill(unsigned int n, istate_t *istate) … … 289 81 void tlb_invalidate_all(void) 290 82 { 291 uint32_t index; 83 asm volatile ( 84 "sync\n" 85 ); 86 87 for (unsigned int i = 0; i < 0x00040000; i += 0x00001000) { 88 asm volatile ( 89 "tlbie %[i]\n" 90 :: [i] "r" (i) 91 ); 92 } 292 93 293 94 asm volatile ( 294 "li %[index], 0\n"295 "sync\n"296 297 ".rept 64\n"298 " tlbie %[index]\n"299 " addi %[index], %[index], 0x1000\n"300 ".endr\n"301 302 95 "eieio\n" 303 96 "tlbsync\n" 304 97 "sync\n" 305 : [index] "=r" (index)306 98 ); 307 99 } … … 309 101 void tlb_invalidate_asid(asid_t asid) 310 102 { 311 uint32_t sdr1 = sdr1_get();312 313 // FIXME: compute size of PHT exactly314 phte_t *phte = (phte_t *) PA2KA(sdr1 & 0xffff0000);315 316 size_t i;317 for (i = 0; i < 8192; i++) {318 if ((phte[i].v) && (phte[i].vsid >= (asid << 4)) &&319 (phte[i].vsid < ((asid << 4) + 16)))320 phte[i].v = 0;321 }322 323 103 tlb_invalidate_all(); 324 104 } … … 326 106 void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt) 327 107 { 328 // TODO329 108 tlb_invalidate_all(); 330 109 } -
kernel/arch/sparc64/src/mm/sun4u/as.c
r5c460cc r864a081 47 47 #include <bitops.h> 48 48 #include <macros.h> 49 #include <memstr.h> 49 50 50 51 #endif /* CONFIG_TSB */ -
kernel/arch/sparc64/src/mm/sun4u/tlb.c
r5c460cc r864a081 207 207 208 208 page_table_lock(AS, true); 209 t = page_mapping_find(AS, page_16k );209 t = page_mapping_find(AS, page_16k, true); 210 210 if (t && PTE_EXECUTABLE(t)) { 211 211 /* … … 275 275 276 276 page_table_lock(AS, true); 277 t = page_mapping_find(AS, page_16k );277 t = page_mapping_find(AS, page_16k, true); 278 278 if (t) { 279 279 /* … … 319 319 320 320 page_table_lock(AS, true); 321 t = page_mapping_find(AS, page_16k );321 t = page_mapping_find(AS, page_16k, true); 322 322 if (t && PTE_WRITABLE(t)) { 323 323 /* -
kernel/arch/sparc64/src/mm/sun4v/as.c
r5c460cc r864a081 50 50 #include <bitops.h> 51 51 #include <macros.h> 52 #include <memstr.h> 52 53 53 54 #endif /* CONFIG_TSB */ -
kernel/arch/sparc64/src/mm/sun4v/tlb.c
r5c460cc r864a081 219 219 220 220 page_table_lock(AS, true); 221 t = page_mapping_find(AS, va );221 t = page_mapping_find(AS, va, true); 222 222 223 223 if (t && PTE_EXECUTABLE(t)) { … … 275 275 276 276 page_table_lock(AS, true); 277 t = page_mapping_find(AS, va );277 t = page_mapping_find(AS, va, true); 278 278 if (t) { 279 279 /* … … 317 317 318 318 page_table_lock(AS, true); 319 t = page_mapping_find(AS, va );319 t = page_mapping_find(AS, va, true); 320 320 if (t && PTE_WRITABLE(t)) { 321 321 /* -
kernel/genarch/include/mm/page_pt.h
r5c460cc r864a081 129 129 130 130 extern void page_mapping_insert_pt(as_t *, uintptr_t, uintptr_t, unsigned int); 131 extern pte_t *page_mapping_find_pt(as_t *, uintptr_t );131 extern pte_t *page_mapping_find_pt(as_t *, uintptr_t, bool); 132 132 133 133 #endif -
kernel/genarch/src/drivers/ega/ega.c
r5c460cc r864a081 41 41 #include <mm/slab.h> 42 42 #include <arch/mm/page.h> 43 #include <synch/spinlock.h>44 43 #include <typedefs.h> 45 44 #include <arch/asm.h> -
kernel/genarch/src/mm/page_ht.c
r5c460cc r864a081 58 58 static void ht_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int); 59 59 static void ht_mapping_remove(as_t *, uintptr_t); 60 static pte_t *ht_mapping_find(as_t *, uintptr_t );60 static pte_t *ht_mapping_find(as_t *, uintptr_t, bool); 61 61 62 62 /** … … 214 214 * this call visible. 215 215 * 216 * @param as Address space to w ich page belongs.216 * @param as Address space to which page belongs. 217 217 * @param page Virtual address of the page to be demapped. 218 218 * … … 237 237 /** Find mapping for virtual page in page hash table. 238 238 * 239 * Find mapping for virtual page. 240 * 241 * @param as Address space to wich page belongs. 242 * @param page Virtual page. 239 * @param as Address space to which page belongs. 240 * @param page Virtual page. 241 * @param nolock True if the page tables need not be locked. 243 242 * 244 243 * @return NULL if there is no such mapping; requested mapping otherwise. 245 244 * 246 245 */ 247 pte_t *ht_mapping_find(as_t *as, uintptr_t page )246 pte_t *ht_mapping_find(as_t *as, uintptr_t page, bool nolock) 248 247 { 249 248 sysarg_t key[2] = { … … 252 251 }; 253 252 254 ASSERT( page_table_locked(as));253 ASSERT(nolock || page_table_locked(as)); 255 254 256 255 link_t *cur = hash_table_find(&page_ht, key); -
kernel/genarch/src/mm/page_pt.c
r5c460cc r864a081 48 48 static void pt_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int); 49 49 static void pt_mapping_remove(as_t *, uintptr_t); 50 static pte_t *pt_mapping_find(as_t *, uintptr_t );50 static pte_t *pt_mapping_find(as_t *, uintptr_t, bool); 51 51 52 52 page_mapping_operations_t pt_mapping_operations = { … … 238 238 /** Find mapping for virtual page in hierarchical page tables. 239 239 * 240 * Find mapping for virtual page. 241 * 242 * @param as Address space to which page belongs. 243 * @param page Virtual page. 240 * @param as Address space to which page belongs. 241 * @param page Virtual page. 242 * @param nolock True if the page tables need not be locked. 244 243 * 245 244 * @return NULL if there is no such mapping; entry from PTL3 describing … … 247 246 * 248 247 */ 249 pte_t *pt_mapping_find(as_t *as, uintptr_t page )248 pte_t *pt_mapping_find(as_t *as, uintptr_t page, bool nolock) 250 249 { 251 ASSERT( page_table_locked(as));250 ASSERT(nolock || page_table_locked(as)); 252 251 253 252 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table); -
kernel/generic/include/mm/page.h
r5c460cc r864a081 38 38 #include <typedefs.h> 39 39 #include <mm/as.h> 40 #include <memstr.h> 40 #include <arch/mm/page.h> 41 42 #define P2SZ(pages) \ 43 ((pages) << PAGE_WIDTH) 41 44 42 45 /** Operations to manipulate page mappings. */ … … 44 47 void (* mapping_insert)(as_t *, uintptr_t, uintptr_t, unsigned int); 45 48 void (* mapping_remove)(as_t *, uintptr_t); 46 pte_t *(* mapping_find)(as_t *, uintptr_t );49 pte_t *(* mapping_find)(as_t *, uintptr_t, bool); 47 50 } page_mapping_operations_t; 48 51 … … 55 58 extern void page_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int); 56 59 extern void page_mapping_remove(as_t *, uintptr_t); 57 extern pte_t *page_mapping_find(as_t *, uintptr_t );60 extern pte_t *page_mapping_find(as_t *, uintptr_t, bool); 58 61 extern pte_t *page_table_create(unsigned int); 59 62 extern void page_table_destroy(pte_t *); -
kernel/generic/include/mm/tlb.h
r5c460cc r864a081 86 86 extern void tlb_invalidate_asid(asid_t); 87 87 extern void tlb_invalidate_pages(asid_t, uintptr_t, size_t); 88 88 89 #endif 89 90 -
kernel/generic/src/console/console.c
r5c460cc r864a081 60 60 61 61 /** Kernel log initialized */ 62 static bool klog_inited = false;62 static atomic_t klog_inited = {false}; 63 63 64 64 /** First kernel log characters */ … … 75 75 76 76 /** Kernel log spinlock */ 77 SPINLOCK_STATIC_INITIALIZE_NAME(klog_lock, " *klog_lock");77 SPINLOCK_STATIC_INITIALIZE_NAME(klog_lock, "klog_lock"); 78 78 79 79 /** Physical memory area used for klog buffer */ … … 166 166 167 167 event_set_unmask_callback(EVENT_KLOG, klog_update); 168 169 spinlock_lock(&klog_lock); 170 klog_inited = true; 171 spinlock_unlock(&klog_lock); 168 atomic_set(&klog_inited, true); 172 169 } 173 170 … … 264 261 void klog_update(void) 265 262 { 263 if (!atomic_get(&klog_inited)) 264 return; 265 266 266 spinlock_lock(&klog_lock); 267 267 268 if ( (klog_inited) && (klog_uspace > 0)) {268 if (klog_uspace > 0) { 269 269 if (event_notify_3(EVENT_KLOG, true, klog_start, klog_len, 270 270 klog_uspace) == EOK) … … 277 277 void putchar(const wchar_t ch) 278 278 { 279 bool ordy = ((stdout) && (stdout->op->write)); 280 279 281 spinlock_lock(&klog_lock); 280 282 281 if ((klog_stored > 0) && (stdout) && (stdout->op->write)) { 282 /* Print charaters stored in kernel log */ 283 size_t i; 284 for (i = klog_len - klog_stored; i < klog_len; i++) 285 stdout->op->write(stdout, klog[(klog_start + i) % KLOG_LENGTH], silent); 286 klog_stored = 0; 283 /* Print charaters stored in kernel log */ 284 if (ordy) { 285 while (klog_stored > 0) { 286 wchar_t tmp = klog[(klog_start + klog_len - klog_stored) % KLOG_LENGTH]; 287 klog_stored--; 288 289 /* 290 * We need to give up the spinlock for 291 * the physical operation of writting out 292 * the character. 293 */ 294 spinlock_unlock(&klog_lock); 295 stdout->op->write(stdout, tmp, silent); 296 spinlock_lock(&klog_lock); 297 } 287 298 } 288 299 … … 294 305 klog_start = (klog_start + 1) % KLOG_LENGTH; 295 306 296 if ((stdout) && (stdout->op->write)) 307 if (!ordy) { 308 if (klog_stored < klog_len) 309 klog_stored++; 310 } 311 312 /* The character is stored for uspace */ 313 if (klog_uspace < klog_len) 314 klog_uspace++; 315 316 spinlock_unlock(&klog_lock); 317 318 if (ordy) { 319 /* 320 * Output the character. In this case 321 * it should be no longer buffered. 322 */ 297 323 stdout->op->write(stdout, ch, silent); 298 else {324 } else { 299 325 /* 300 326 * No standard output routine defined yet. … … 306 332 * Note that the early_putc() function might be 307 333 * a no-op on certain hardware configurations. 308 *309 334 */ 310 335 early_putchar(ch); 311 312 if (klog_stored < klog_len) 313 klog_stored++; 314 } 315 316 /* The character is stored for uspace */ 317 if (klog_uspace < klog_len) 318 klog_uspace++; 319 320 spinlock_unlock(&klog_lock); 336 } 321 337 322 338 /* Force notification on newline */ -
kernel/generic/src/mm/as.c
r5c460cc r864a081 302 302 * We don't want any area to have conflicts with NULL page. 303 303 */ 304 if (overlaps(addr, count << PAGE_WIDTH, (uintptr_t) NULL, PAGE_SIZE))304 if (overlaps(addr, P2SZ(count), (uintptr_t) NULL, PAGE_SIZE)) 305 305 return false; 306 306 … … 329 329 mutex_lock(&area->lock); 330 330 331 if (overlaps(addr, count << PAGE_WIDTH,332 area->base, area->pages << PAGE_WIDTH)) {331 if (overlaps(addr, P2SZ(count), area->base, 332 P2SZ(area->pages))) { 333 333 mutex_unlock(&area->lock); 334 334 return false; … … 346 346 mutex_lock(&area->lock); 347 347 348 if (overlaps(addr, count << PAGE_WIDTH,349 area->base, area->pages << PAGE_WIDTH)) {348 if (overlaps(addr, P2SZ(count), area->base, 349 P2SZ(area->pages))) { 350 350 mutex_unlock(&area->lock); 351 351 return false; … … 366 366 mutex_lock(&area->lock); 367 367 368 if (overlaps(addr, count << PAGE_WIDTH,369 area->base, area->pages << PAGE_WIDTH)) {368 if (overlaps(addr, P2SZ(count), area->base, 369 P2SZ(area->pages))) { 370 370 mutex_unlock(&area->lock); 371 371 return false; … … 380 380 */ 381 381 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 382 return !overlaps(addr, count << PAGE_WIDTH, 383 KERNEL_ADDRESS_SPACE_START, 382 return !overlaps(addr, P2SZ(count), KERNEL_ADDRESS_SPACE_START, 384 383 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 385 384 } … … 474 473 475 474 btree_node_t *leaf; 476 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 475 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, 476 &leaf); 477 477 if (area) { 478 478 /* va is the base address of an address space area */ … … 495 495 mutex_lock(&area->lock); 496 496 497 size_t size = area->pages << PAGE_WIDTH;498 if ((area->base <= va) && (va <= area->base + (size- 1)))497 if ((area->base <= va) && 498 (va <= area->base + (P2SZ(area->pages) - 1))) 499 499 return area; 500 500 … … 506 506 * Because of its position in the B+tree, it must have base < va. 507 507 */ 508 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 508 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, 509 leaf); 509 510 if (lnode) { 510 511 area = (as_area_t *) lnode->value[lnode->keys - 1]; … … 512 513 mutex_lock(&area->lock); 513 514 514 if (va < area->base + (area->pages << PAGE_WIDTH))515 if (va <= area->base + (P2SZ(area->pages) - 1)) 515 516 return area; 516 517 … … 577 578 578 579 if (pages < area->pages) { 579 uintptr_t start_free = area->base + (pages << PAGE_WIDTH);580 uintptr_t start_free = area->base + P2SZ(pages); 580 581 581 582 /* … … 590 591 */ 591 592 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, 592 area->base + (pages << PAGE_WIDTH), area->pages - pages);593 area->base + P2SZ(pages), area->pages - pages); 593 594 594 595 /* … … 613 614 size_t i = 0; 614 615 615 if (overlaps(ptr, size << PAGE_WIDTH, area->base,616 pages << PAGE_WIDTH)) {616 if (overlaps(ptr, P2SZ(size), area->base, 617 P2SZ(pages))) { 617 618 618 if (ptr + (size << PAGE_WIDTH) <= start_free) {619 if (ptr + P2SZ(size) <= start_free) { 619 620 /* 620 621 * The whole interval fits … … 647 648 648 649 for (; i < size; i++) { 649 pte_t *pte = page_mapping_find(as, ptr +650 (i << PAGE_WIDTH));650 pte_t *pte = page_mapping_find(as, 651 ptr + P2SZ(i), false); 651 652 652 653 ASSERT(pte); … … 657 658 (area->backend->frame_free)) { 658 659 area->backend->frame_free(area, 659 ptr + (i << PAGE_WIDTH),660 ptr + P2SZ(i), 660 661 PTE_GET_FRAME(pte)); 661 662 } 662 663 663 page_mapping_remove(as, ptr + 664 (i << PAGE_WIDTH)); 664 page_mapping_remove(as, ptr + P2SZ(i)); 665 665 } 666 666 } … … 671 671 */ 672 672 673 tlb_invalidate_pages(as->asid, area->base + (pages << PAGE_WIDTH),673 tlb_invalidate_pages(as->asid, area->base + P2SZ(pages), 674 674 area->pages - pages); 675 675 676 676 /* 677 * Invalidate software translation caches (e.g. TSB on sparc64). 678 */ 679 as_invalidate_translation_cache(as, area->base + 680 (pages << PAGE_WIDTH), area->pages - pages); 677 * Invalidate software translation caches 678 * (e.g. TSB on sparc64, PHT on ppc32). 679 */ 680 as_invalidate_translation_cache(as, area->base + P2SZ(pages), 681 area->pages - pages); 681 682 tlb_shootdown_finalize(ipl); 682 683 … … 797 798 798 799 for (size = 0; size < (size_t) node->value[i]; size++) { 799 pte_t *pte = 800 page_mapping_find(as, ptr + (size << PAGE_WIDTH));800 pte_t *pte = page_mapping_find(as, 801 ptr + P2SZ(size), false); 801 802 802 803 ASSERT(pte); … … 807 808 (area->backend->frame_free)) { 808 809 area->backend->frame_free(area, 809 ptr + (size << PAGE_WIDTH), PTE_GET_FRAME(pte)); 810 ptr + P2SZ(size), 811 PTE_GET_FRAME(pte)); 810 812 } 811 813 812 page_mapping_remove(as, ptr + (size << PAGE_WIDTH));814 page_mapping_remove(as, ptr + P2SZ(size)); 813 815 } 814 816 } … … 822 824 823 825 /* 824 * Invalidate potential software translation caches (e.g. TSB on825 * sparc64).826 * Invalidate potential software translation caches 827 * (e.g. TSB on sparc64, PHT on ppc32). 826 828 */ 827 829 as_invalidate_translation_cache(as, area->base, area->pages); … … 897 899 } 898 900 899 size_t src_size = src_area->pages << PAGE_WIDTH;901 size_t src_size = P2SZ(src_area->pages); 900 902 unsigned int src_flags = src_area->flags; 901 903 mem_backend_t *src_backend = src_area->backend; … … 1094 1096 for (cur = area->used_space.leaf_head.next; 1095 1097 cur != &area->used_space.leaf_head; cur = cur->next) { 1096 btree_node_t *node 1097 = list_get_instance(cur, btree_node_t,leaf_link);1098 btree_node_t *node = list_get_instance(cur, btree_node_t, 1099 leaf_link); 1098 1100 btree_key_t i; 1099 1101 … … 1103 1105 1104 1106 for (size = 0; size < (size_t) node->value[i]; size++) { 1105 pte_t *pte = 1106 p age_mapping_find(as, ptr + (size << PAGE_WIDTH));1107 pte_t *pte = page_mapping_find(as, 1108 ptr + P2SZ(size), false); 1107 1109 1108 1110 ASSERT(pte); … … 1113 1115 1114 1116 /* Remove old mapping */ 1115 page_mapping_remove(as, ptr + (size << PAGE_WIDTH));1117 page_mapping_remove(as, ptr + P2SZ(size)); 1116 1118 } 1117 1119 } … … 1125 1127 1126 1128 /* 1127 * Invalidate potential software translation caches (e.g. TSB on1128 * sparc64).1129 * Invalidate potential software translation caches 1130 * (e.g. TSB on sparc64, PHT on ppc32). 1129 1131 */ 1130 1132 as_invalidate_translation_cache(as, area->base, area->pages); … … 1159 1161 1160 1162 /* Insert the new mapping */ 1161 page_mapping_insert(as, ptr + (size << PAGE_WIDTH),1163 page_mapping_insert(as, ptr + P2SZ(size), 1162 1164 old_frame[frame_idx++], page_flags); 1163 1165 … … 1240 1242 */ 1241 1243 pte_t *pte; 1242 if ((pte = page_mapping_find(AS, page ))) {1244 if ((pte = page_mapping_find(AS, page, false))) { 1243 1245 if (PTE_PRESENT(pte)) { 1244 1246 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) || … … 1481 1483 1482 1484 if (src_area) { 1483 size = src_area->pages << PAGE_WIDTH;1485 size = P2SZ(src_area->pages); 1484 1486 mutex_unlock(&src_area->lock); 1485 1487 } else … … 1536 1538 if (page >= right_pg) { 1537 1539 /* Do nothing. */ 1538 } else if (overlaps(page, count << PAGE_WIDTH, left_pg,1539 left_cnt << PAGE_WIDTH)) {1540 } else if (overlaps(page, P2SZ(count), left_pg, 1541 P2SZ(left_cnt))) { 1540 1542 /* The interval intersects with the left interval. */ 1541 1543 return false; 1542 } else if (overlaps(page, count << PAGE_WIDTH, right_pg,1543 right_cnt << PAGE_WIDTH)) {1544 } else if (overlaps(page, P2SZ(count), right_pg, 1545 P2SZ(right_cnt))) { 1544 1546 /* The interval intersects with the right interval. */ 1545 1547 return false; 1546 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) &&1547 (page + (count << PAGE_WIDTH) == right_pg)) {1548 } else if ((page == left_pg + P2SZ(left_cnt)) && 1549 (page + P2SZ(count) == right_pg)) { 1548 1550 /* 1549 1551 * The interval can be added by merging the two already … … 1553 1555 btree_remove(&area->used_space, right_pg, leaf); 1554 1556 goto success; 1555 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) {1557 } else if (page == left_pg + P2SZ(left_cnt)) { 1556 1558 /* 1557 1559 * The interval can be added by simply growing the left … … 1560 1562 node->value[node->keys - 1] += count; 1561 1563 goto success; 1562 } else if (page + (count << PAGE_WIDTH) == right_pg) {1564 } else if (page + P2SZ(count) == right_pg) { 1563 1565 /* 1564 1566 * The interval can be addded by simply moving base of … … 1587 1589 */ 1588 1590 1589 if (overlaps(page, count << PAGE_WIDTH, right_pg, 1590 right_cnt << PAGE_WIDTH)) { 1591 if (overlaps(page, P2SZ(count), right_pg, P2SZ(right_cnt))) { 1591 1592 /* The interval intersects with the right interval. */ 1592 1593 return false; 1593 } else if (page + (count << PAGE_WIDTH) == right_pg) {1594 } else if (page + P2SZ(count) == right_pg) { 1594 1595 /* 1595 1596 * The interval can be added by moving the base of the … … 1626 1627 if (page < left_pg) { 1627 1628 /* Do nothing. */ 1628 } else if (overlaps(page, count << PAGE_WIDTH, left_pg,1629 left_cnt << PAGE_WIDTH)) {1629 } else if (overlaps(page, P2SZ(count), left_pg, 1630 P2SZ(left_cnt))) { 1630 1631 /* The interval intersects with the left interval. */ 1631 1632 return false; 1632 } else if (overlaps(page, count << PAGE_WIDTH, right_pg,1633 right_cnt << PAGE_WIDTH)) {1633 } else if (overlaps(page, P2SZ(count), right_pg, 1634 P2SZ(right_cnt))) { 1634 1635 /* The interval intersects with the right interval. */ 1635 1636 return false; 1636 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) &&1637 (page + (count << PAGE_WIDTH) == right_pg)) {1637 } else if ((page == left_pg + P2SZ(left_cnt)) && 1638 (page + P2SZ(count) == right_pg)) { 1638 1639 /* 1639 1640 * The interval can be added by merging the two already … … 1643 1644 btree_remove(&area->used_space, right_pg, node); 1644 1645 goto success; 1645 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) {1646 } else if (page == left_pg + P2SZ(left_cnt)) { 1646 1647 /* 1647 1648 * The interval can be added by simply growing the left … … 1650 1651 leaf->value[leaf->keys - 1] += count; 1651 1652 goto success; 1652 } else if (page + (count << PAGE_WIDTH) == right_pg) {1653 } else if (page + P2SZ(count) == right_pg) { 1653 1654 /* 1654 1655 * The interval can be addded by simply moving base of … … 1677 1678 */ 1678 1679 1679 if (overlaps(page, count << PAGE_WIDTH, left_pg, 1680 left_cnt << PAGE_WIDTH)) { 1680 if (overlaps(page, P2SZ(count), left_pg, P2SZ(left_cnt))) { 1681 1681 /* The interval intersects with the left interval. */ 1682 1682 return false; 1683 } else if (left_pg + (left_cnt << PAGE_WIDTH) == page) {1683 } else if (left_pg + P2SZ(left_cnt) == page) { 1684 1684 /* 1685 1685 * The interval can be added by growing the left … … 1716 1716 */ 1717 1717 1718 if (overlaps(page, count << PAGE_WIDTH, left_pg,1719 left_cnt << PAGE_WIDTH)) {1718 if (overlaps(page, P2SZ(count), left_pg, 1719 P2SZ(left_cnt))) { 1720 1720 /* 1721 1721 * The interval intersects with the left … … 1723 1723 */ 1724 1724 return false; 1725 } else if (overlaps(page, count << PAGE_WIDTH, right_pg,1726 right_cnt << PAGE_WIDTH)) {1725 } else if (overlaps(page, P2SZ(count), right_pg, 1726 P2SZ(right_cnt))) { 1727 1727 /* 1728 1728 * The interval intersects with the right … … 1730 1730 */ 1731 1731 return false; 1732 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) &&1733 (page + (count << PAGE_WIDTH) == right_pg)) {1732 } else if ((page == left_pg + P2SZ(left_cnt)) && 1733 (page + P2SZ(count) == right_pg)) { 1734 1734 /* 1735 1735 * The interval can be added by merging the two … … 1739 1739 btree_remove(&area->used_space, right_pg, leaf); 1740 1740 goto success; 1741 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) {1741 } else if (page == left_pg + P2SZ(left_cnt)) { 1742 1742 /* 1743 1743 * The interval can be added by simply growing … … 1746 1746 leaf->value[i - 1] += count; 1747 1747 goto success; 1748 } else if (page + (count << PAGE_WIDTH) == right_pg) {1748 } else if (page + P2SZ(count) == right_pg) { 1749 1749 /* 1750 1750 * The interval can be addded by simply moving … … 1812 1812 for (i = 0; i < leaf->keys; i++) { 1813 1813 if (leaf->key[i] == page) { 1814 leaf->key[i] += count << PAGE_WIDTH;1814 leaf->key[i] += P2SZ(count); 1815 1815 leaf->value[i] -= count; 1816 1816 goto success; … … 1822 1822 } 1823 1823 1824 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf); 1824 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, 1825 leaf); 1825 1826 if ((node) && (page < leaf->key[0])) { 1826 1827 uintptr_t left_pg = node->key[node->keys - 1]; 1827 1828 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1828 1829 1829 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1830 count << PAGE_WIDTH)) { 1831 if (page + (count << PAGE_WIDTH) == 1832 left_pg + (left_cnt << PAGE_WIDTH)) { 1830 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) { 1831 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) { 1833 1832 /* 1834 1833 * The interval is contained in the rightmost … … 1839 1838 node->value[node->keys - 1] -= count; 1840 1839 goto success; 1841 } else if (page + (count << PAGE_WIDTH) < 1842 left_pg + (left_cnt << PAGE_WIDTH)) { 1840 } else if (page + P2SZ(count) < 1841 left_pg + P2SZ(left_cnt)) { 1842 size_t new_cnt; 1843 1843 1844 /* 1844 1845 * The interval is contained in the rightmost … … 1848 1849 * new interval. 1849 1850 */ 1850 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) -1851 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH;1851 new_cnt = ((left_pg + P2SZ(left_cnt)) - 1852 (page + P2SZ(count))) >> PAGE_WIDTH; 1852 1853 node->value[node->keys - 1] -= count + new_cnt; 1853 1854 btree_insert(&area->used_space, page + 1854 (count << PAGE_WIDTH), (void *) new_cnt, leaf);1855 P2SZ(count), (void *) new_cnt, leaf); 1855 1856 goto success; 1856 1857 } … … 1865 1866 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1866 1867 1867 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1868 count << PAGE_WIDTH)) { 1869 if (page + (count << PAGE_WIDTH) == 1870 left_pg + (left_cnt << PAGE_WIDTH)) { 1868 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) { 1869 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) { 1871 1870 /* 1872 1871 * The interval is contained in the rightmost … … 1876 1875 leaf->value[leaf->keys - 1] -= count; 1877 1876 goto success; 1878 } else if (page + (count << PAGE_WIDTH) < left_pg + 1879 (left_cnt << PAGE_WIDTH)) { 1877 } else if (page + P2SZ(count) < left_pg + 1878 P2SZ(left_cnt)) { 1879 size_t new_cnt; 1880 1880 1881 /* 1881 1882 * The interval is contained in the rightmost … … 1885 1886 * interval. 1886 1887 */ 1887 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) -1888 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH;1888 new_cnt = ((left_pg + P2SZ(left_cnt)) - 1889 (page + P2SZ(count))) >> PAGE_WIDTH; 1889 1890 leaf->value[leaf->keys - 1] -= count + new_cnt; 1890 1891 btree_insert(&area->used_space, page + 1891 (count << PAGE_WIDTH), (void *) new_cnt, leaf);1892 P2SZ(count), (void *) new_cnt, leaf); 1892 1893 goto success; 1893 1894 } … … 1911 1912 * to (i - 1) and i. 1912 1913 */ 1913 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page,1914 count << PAGE_WIDTH)) {1915 if (page + (count << PAGE_WIDTH) ==1916 left_pg + (left_cnt << PAGE_WIDTH)) {1914 if (overlaps(left_pg, P2SZ(left_cnt), page, 1915 P2SZ(count))) { 1916 if (page + P2SZ(count) == 1917 left_pg + P2SZ(left_cnt)) { 1917 1918 /* 1918 1919 * The interval is contained in the … … 1923 1924 leaf->value[i - 1] -= count; 1924 1925 goto success; 1925 } else if (page + (count << PAGE_WIDTH) < 1926 left_pg + (left_cnt << PAGE_WIDTH)) { 1926 } else if (page + P2SZ(count) < 1927 left_pg + P2SZ(left_cnt)) { 1928 size_t new_cnt; 1929 1927 1930 /* 1928 1931 * The interval is contained in the … … 1932 1935 * also inserting a new interval. 1933 1936 */ 1934 size_t new_cnt = ((left_pg + 1935 (left_cnt << PAGE_WIDTH)) - 1936 (page + (count << PAGE_WIDTH))) >> 1937 new_cnt = ((left_pg + P2SZ(left_cnt)) - 1938 (page + P2SZ(count))) >> 1937 1939 PAGE_WIDTH; 1938 1940 leaf->value[i - 1] -= count + new_cnt; 1939 1941 btree_insert(&area->used_space, page + 1940 (count << PAGE_WIDTH), (void *) new_cnt,1942 P2SZ(count), (void *) new_cnt, 1941 1943 leaf); 1942 1944 goto success; … … 2034 2036 btree_key_t i; 2035 2037 for (i = 0; (ret == 0) && (i < node->keys); i++) { 2038 uintptr_t addr; 2039 2036 2040 as_area_t *area = (as_area_t *) node->value[i]; 2037 2041 2038 2042 mutex_lock(&area->lock); 2039 2043 2040 uintptr_t addr = 2041 ALIGN_UP(area->base + (area->pages << PAGE_WIDTH), 2044 addr = ALIGN_UP(area->base + P2SZ(area->pages), 2042 2045 PAGE_SIZE); 2043 2046 … … 2098 2101 2099 2102 info[area_idx].start_addr = area->base; 2100 info[area_idx].size = FRAMES2SIZE(area->pages);2103 info[area_idx].size = P2SZ(area->pages); 2101 2104 info[area_idx].flags = area->flags; 2102 2105 ++area_idx; … … 2136 2139 " (%p - %p)\n", area, (void *) area->base, 2137 2140 area->pages, (void *) area->base, 2138 (void *) (area->base + FRAMES2SIZE(area->pages)));2141 (void *) (area->base + P2SZ(area->pages))); 2139 2142 mutex_unlock(&area->lock); 2140 2143 } -
kernel/generic/src/mm/backend_anon.c
r5c460cc r864a081 50 50 #include <typedefs.h> 51 51 #include <align.h> 52 #include <memstr.h> 52 53 #include <arch.h> 53 54 … … 121 122 page_table_lock(area->as, false); 122 123 pte = page_mapping_find(area->as, 123 base + j * PAGE_SIZE);124 base + P2SZ(j), false); 124 125 ASSERT(pte && PTE_VALID(pte) && 125 126 PTE_PRESENT(pte)); 126 127 btree_insert(&area->sh_info->pagemap, 127 (base + j * PAGE_SIZE) - area->base,128 (base + P2SZ(j)) - area->base, 128 129 (void *) PTE_GET_FRAME(pte), NULL); 129 130 page_table_unlock(area->as, false); -
kernel/generic/src/mm/backend_elf.c
r5c460cc r864a081 170 170 if (!(area->flags & AS_AREA_WRITE)) 171 171 if (base >= entry->p_vaddr && 172 base + count * PAGE_SIZE<= start_anon)172 base + P2SZ(count) <= start_anon) 173 173 continue; 174 174 … … 182 182 if (!(area->flags & AS_AREA_WRITE)) 183 183 if (base >= entry->p_vaddr && 184 base + (j + 1) * PAGE_SIZE <= 185 start_anon) 184 base + P2SZ(j + 1) <= start_anon) 186 185 continue; 187 186 188 187 page_table_lock(area->as, false); 189 188 pte = page_mapping_find(area->as, 190 base + j * PAGE_SIZE);189 base + P2SZ(j), false); 191 190 ASSERT(pte && PTE_VALID(pte) && 192 191 PTE_PRESENT(pte)); 193 192 btree_insert(&area->sh_info->pagemap, 194 (base + j * PAGE_SIZE) - area->base,193 (base + P2SZ(j)) - area->base, 195 194 (void *) PTE_GET_FRAME(pte), NULL); 196 195 page_table_unlock(area->as, false); -
kernel/generic/src/mm/page.c
r5c460cc r864a081 108 108 * using flags. Allocate and setup any missing page tables. 109 109 * 110 * @param as Address space to w ich page belongs.110 * @param as Address space to which page belongs. 111 111 * @param page Virtual address of the page to be mapped. 112 112 * @param frame Physical address of memory frame to which the mapping is … … 135 135 * this call visible. 136 136 * 137 * @param as Address space to w ich page belongs.137 * @param as Address space to which page belongs. 138 138 * @param page Virtual address of the page to be demapped. 139 139 * … … 152 152 } 153 153 154 /** Find mapping for virtual page 154 /** Find mapping for virtual page. 155 155 * 156 * Find mapping for virtual page. 157 * 158 * @param as Address space to wich page belongs. 159 * @param page Virtual page. 156 * @param as Address space to which page belongs. 157 * @param page Virtual page. 158 * @param nolock True if the page tables need not be locked. 160 159 * 161 160 * @return NULL if there is no such mapping; requested mapping … … 163 162 * 164 163 */ 165 NO_TRACE pte_t *page_mapping_find(as_t *as, uintptr_t page )164 NO_TRACE pte_t *page_mapping_find(as_t *as, uintptr_t page, bool nolock) 166 165 { 167 ASSERT( page_table_locked(as));166 ASSERT(nolock || page_table_locked(as)); 168 167 169 168 ASSERT(page_mapping_operations); 170 169 ASSERT(page_mapping_operations->mapping_find); 171 170 172 return page_mapping_operations->mapping_find(as, page );171 return page_mapping_operations->mapping_find(as, page, nolock); 173 172 } 174 173 -
kernel/generic/src/printf/vprintf.c
r5c460cc r864a081 41 41 #include <typedefs.h> 42 42 #include <str.h> 43 44 IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(printf_lock, "*printf_lock");45 43 46 44 static int vprintf_str_write(const char *str, size_t size, void *data) … … 93 91 }; 94 92 95 irq_spinlock_lock(&printf_lock, true); 96 int ret = printf_core(fmt, &ps, ap); 97 irq_spinlock_unlock(&printf_lock, true); 98 99 return ret; 93 return printf_core(fmt, &ps, ap); 100 94 } 101 95 -
kernel/generic/src/synch/futex.c
r5c460cc r864a081 119 119 */ 120 120 page_table_lock(AS, true); 121 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE) );121 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE), false); 122 122 if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { 123 123 page_table_unlock(AS, true); … … 155 155 */ 156 156 page_table_lock(AS, true); 157 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE) );157 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE), false); 158 158 if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { 159 159 page_table_unlock(AS, true); -
kernel/generic/src/synch/spinlock.c
r5c460cc r864a081 96 96 * run in a simulator) that caused problems with both 97 97 * printf_lock and the framebuffer lock. 98 *99 98 */ 100 99 if (lock->name[0] == '*') -
kernel/test/mm/mapping1.c
r5c460cc r864a081 35 35 #include <typedefs.h> 36 36 #include <debug.h> 37 #include <arch.h> 37 38 38 39 #define PAGE0 0x10000000 … … 58 59 *((uint32_t *) frame1) = VALUE1; 59 60 61 page_table_lock(AS, true); 62 60 63 TPRINTF("Mapping virtual address %p to physical address %p.\n", 61 64 (void *) PAGE0, (void *) KA2PA(frame0)); … … 65 68 (void *) PAGE1, (void *) KA2PA(frame1)); 66 69 page_mapping_insert(AS_KERNEL, PAGE1, KA2PA(frame1), PAGE_PRESENT | PAGE_WRITE); 70 71 page_table_unlock(AS, true); 67 72 68 73 v0 = *((uint32_t *) PAGE0); -
uspace/app/tester/mm/common.c
r5c460cc r864a081 135 135 } 136 136 137 static void check_consistency(const char *loc) 138 { 139 /* Check heap consistency */ 140 void *prob = heap_check(); 141 if (prob != NULL) { 142 TPRINTF("\nError: Heap inconsistency at %p in %s.\n", 143 prob, loc); 144 TSTACKTRACE(); 145 error_flag = true; 146 } 147 } 148 137 149 /** Checked malloc 138 150 * … … 153 165 /* Allocate the chunk of memory */ 154 166 data = malloc(size); 167 check_consistency("checked_malloc"); 155 168 if (data == NULL) 156 169 return NULL; … … 160 173 TPRINTF("\nError: Allocated block overlaps with another " 161 174 "previously allocated block.\n"); 175 TSTACKTRACE(); 162 176 error_flag = true; 163 177 } … … 198 212 if (block->addr == NULL) { 199 213 free(block); 214 check_consistency("alloc_block"); 200 215 return NULL; 201 216 } … … 228 243 /* Free the memory */ 229 244 free(block->addr); 245 check_consistency("free_block (a)"); 230 246 free(block); 247 check_consistency("free_block (b)"); 231 248 } 232 249 … … 257 274 pos < end; pos++) 258 275 *pos = block_expected_value(block, pos); 276 277 check_consistency("fill_block"); 259 278 } 260 279 … … 273 292 if (*pos != block_expected_value(block, pos)) { 274 293 TPRINTF("\nError: Corrupted content of a data block.\n"); 294 TSTACKTRACE(); 275 295 error_flag = true; 276 296 return; … … 296 316 if (entry == NULL) { 297 317 TPRINTF("\nError: Corrupted list of allocated memory blocks.\n"); 318 TSTACKTRACE(); 298 319 error_flag = true; 299 320 } … … 325 346 if (addr == NULL) { 326 347 free(area); 348 check_consistency("map_area (a)"); 327 349 return NULL; 328 350 } … … 331 353 if (area->addr == (void *) -1) { 332 354 free(area); 355 check_consistency("map_area (b)"); 333 356 return NULL; 334 357 } … … 361 384 362 385 free(area); 386 check_consistency("unmap_area"); 363 387 } 364 388 … … 389 413 pos < end; pos++) 390 414 *pos = area_expected_value(area, pos); 391 } 415 416 check_consistency("fill_area"); 417 } -
uspace/app/tester/mm/malloc1.c
r5c460cc r864a081 241 241 TPRINTF("A"); 242 242 fill_block(blk); 243 RETURN_IF_ERROR; 243 244 } 244 245 -
uspace/app/tester/mm/malloc3.c
r5c460cc r864a081 232 232 TPRINTF("A"); 233 233 fill_block(blk); 234 RETURN_IF_ERROR; 234 235 235 236 if ((mem_blocks_count % AREA_GRANULARITY) == 0) { 236 237 mem_area_t *area = map_area(AREA_SIZE); 237 238 RETURN_IF_ERROR; 239 238 240 if (area != NULL) { 239 241 TPRINTF("*"); 240 242 fill_area(area); 243 RETURN_IF_ERROR; 241 244 } else 242 245 TPRINTF("F(*)"); -
uspace/app/tester/tester.h
r5c460cc r864a081 38 38 #include <sys/types.h> 39 39 #include <bool.h> 40 #include <stacktrace.h> 40 41 41 42 #define IPC_TEST_SERVICE 10240 … … 59 60 if (!test_quiet) { \ 60 61 fprintf(stderr, (format), ##__VA_ARGS__); \ 62 } \ 63 } while (0) 64 65 #define TSTACKTRACE() \ 66 do { \ 67 if (!test_quiet) { \ 68 stacktrace_print(); \ 61 69 } \ 62 70 } while (0) -
uspace/lib/c/arch/ppc32/_link.ld.in
r5c460cc r864a081 10 10 #endif 11 11 data PT_LOAD FLAGS(6); 12 debug PT_NOTE; 12 13 } 13 14 … … 55 56 } :data 56 57 58 #ifdef CONFIG_LINE_DEBUG 59 .comment 0 : { *(.comment); } :debug 60 .debug_abbrev 0 : { *(.debug_abbrev); } :debug 61 .debug_aranges 0 : { *(.debug_aranges); } :debug 62 .debug_info 0 : { *(.debug_info); } :debug 63 .debug_line 0 : { *(.debug_line); } :debug 64 .debug_loc 0 : { *(.debug_loc); } :debug 65 .debug_pubnames 0 : { *(.debug_pubnames); } :debug 66 .debug_pubtypes 0 : { *(.debug_pubtypes); } :debug 67 .debug_ranges 0 : { *(.debug_ranges); } :debug 68 .debug_str 0 : { *(.debug_str); } :debug 69 #endif 70 57 71 /DISCARD/ : { 58 72 *(*); -
uspace/lib/c/generic/assert.c
r5c460cc r864a081 33 33 #include <assert.h> 34 34 #include <stdio.h> 35 #include <io/klog.h> 35 36 #include <stdlib.h> 37 #include <atomic.h> 36 38 #include <stacktrace.h> 39 #include <stdint.h> 40 41 static atomic_t failed_asserts = {0}; 37 42 38 43 void assert_abort(const char *cond, const char *file, unsigned int line) 39 44 { 45 /* 46 * Send the message safely to klog. Nested asserts should not occur. 47 */ 48 klog_printf("Assertion failed (%s) in file \"%s\", line %u.\n", 49 cond, file, line); 50 51 /* 52 * Check if this is a nested or parallel assert. 53 */ 54 if (atomic_postinc(&failed_asserts)) 55 abort(); 56 57 /* 58 * Attempt to print the message to standard output and display 59 * the stack trace. These operations can theoretically trigger nested 60 * assertions. 61 */ 40 62 printf("Assertion failed (%s) in file \"%s\", line %u.\n", 41 63 cond, file, line); 42 64 stacktrace_print(); 65 43 66 abort(); 44 67 } -
uspace/lib/c/generic/io/klog.c
r5c460cc r864a081 38 38 #include <sys/types.h> 39 39 #include <unistd.h> 40 #include <errno.h> 40 41 #include <io/klog.h> 42 #include <io/printf_core.h> 41 43 42 44 size_t klog_write(const void *buf, size_t size) … … 55 57 } 56 58 59 /** Print formatted text to klog. 60 * 61 * @param fmt Format string 62 * 63 * \see For more details about format string see printf_core. 64 * 65 */ 66 int klog_printf(const char *fmt, ...) 67 { 68 va_list args; 69 va_start(args, fmt); 70 71 int ret = klog_vprintf(fmt, args); 72 73 va_end(args); 74 75 return ret; 76 } 77 78 static int klog_vprintf_str_write(const char *str, size_t size, void *data) 79 { 80 size_t wr = klog_write(str, size); 81 return str_nlength(str, wr); 82 } 83 84 static int klog_vprintf_wstr_write(const wchar_t *str, size_t size, void *data) 85 { 86 size_t offset = 0; 87 size_t chars = 0; 88 89 while (offset < size) { 90 char buf[STR_BOUNDS(1)]; 91 size_t sz = 0; 92 93 if (chr_encode(str[chars], buf, &sz, STR_BOUNDS(1)) == EOK) 94 klog_write(buf, sz); 95 96 chars++; 97 offset += sizeof(wchar_t); 98 } 99 100 return chars; 101 } 102 103 /** Print formatted text to klog. 104 * 105 * @param fmt Format string 106 * @param ap Format parameters 107 * 108 * \see For more details about format string see printf_core. 109 * 110 */ 111 int klog_vprintf(const char *fmt, va_list ap) 112 { 113 printf_spec_t ps = { 114 klog_vprintf_str_write, 115 klog_vprintf_wstr_write, 116 NULL 117 }; 118 119 return printf_core(fmt, &ps, ap); 120 } 121 57 122 /** @} 58 123 */ -
uspace/lib/c/generic/io/vprintf.c
r5c460cc r864a081 96 96 /** Print formatted text to stdout. 97 97 * 98 * @param file Output stream 99 * @param fmt Format string 100 * @param ap Format parameters 98 * @param fmt Format string 99 * @param ap Format parameters 101 100 * 102 101 * \see For more details about format string see printf_core. -
uspace/lib/c/generic/malloc.c
r5c460cc r864a081 79 79 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t)) 80 80 81 /** Overhead of each area. */ 82 #define AREA_OVERHEAD(size) \ 83 (ALIGN_UP(size + sizeof(heap_area_t), BASE_ALIGN)) 84 81 85 /** Calculate real size of a heap block. 82 86 * … … 183 187 184 188 /** Next heap block to examine (next fit algorithm) */ 185 static heap_block_head_t *next = NULL;189 static heap_block_head_t *next_fit = NULL; 186 190 187 191 /** Futex for thread-safe heap manipulation */ 188 192 static futex_t malloc_futex = FUTEX_INITIALIZER; 193 194 #ifndef NDEBUG 195 196 #define malloc_assert(expr) \ 197 do { \ 198 if (!(expr)) {\ 199 futex_up(&malloc_futex); \ 200 assert_abort(#expr, __FILE__, __LINE__); \ 201 } \ 202 } while (0) 203 204 #else /* NDEBUG */ 205 206 #define malloc_assert(expr) 207 208 #endif /* NDEBUG */ 189 209 190 210 /** Initialize a heap block … … 228 248 heap_block_head_t *head = (heap_block_head_t *) addr; 229 249 230 assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);250 malloc_assert(head->magic == HEAP_BLOCK_HEAD_MAGIC); 231 251 232 252 heap_block_foot_t *foot = BLOCK_FOOT(head); 233 253 234 assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);235 assert(head->size == foot->size);254 malloc_assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC); 255 malloc_assert(head->size == foot->size); 236 256 } 237 257 … … 247 267 heap_area_t *area = (heap_area_t *) addr; 248 268 249 assert(area->magic == HEAP_AREA_MAGIC);250 assert(addr == area->start);251 assert(area->start < area->end);252 assert(((uintptr_t) area->start % PAGE_SIZE) == 0);253 assert(((uintptr_t) area->end % PAGE_SIZE) == 0);269 malloc_assert(area->magic == HEAP_AREA_MAGIC); 270 malloc_assert(addr == area->start); 271 malloc_assert(area->start < area->end); 272 malloc_assert(((uintptr_t) area->start % PAGE_SIZE) == 0); 273 malloc_assert(((uintptr_t) area->end % PAGE_SIZE) == 0); 254 274 } 255 275 … … 362 382 363 383 /* Eventually try to create a new area */ 364 return area_create(AREA_ FIRST_BLOCK_HEAD(size));384 return area_create(AREA_OVERHEAD(size)); 365 385 } 366 386 … … 382 402 383 403 block_check((void *) last_head); 384 assert(last_head->area == area);404 malloc_assert(last_head->area == area); 385 405 386 406 if (last_head->free) { … … 395 415 396 416 block_check((void *) first_head); 397 assert(first_head->area == area);417 malloc_assert(first_head->area == area); 398 418 399 419 size_t shrink_size = ALIGN_DOWN(last_head->size, PAGE_SIZE); … … 439 459 /* Update heap area parameters */ 440 460 area->end = end; 441 442 /* Update block layout */ 443 void *last = (void *) last_head; 444 size_t excess = (size_t) (area->end - last); 461 size_t excess = ((size_t) area->end) - ((size_t) last_head); 445 462 446 463 if (excess > 0) { … … 451 468 * create a new free block. 452 469 */ 453 block_init( last, excess, true, area);470 block_init((void *) last_head, excess, true, area); 454 471 } else { 455 472 /* … … 470 487 } 471 488 472 next = NULL;489 next_fit = NULL; 473 490 } 474 491 … … 497 514 static void split_mark(heap_block_head_t *cur, const size_t size) 498 515 { 499 assert(cur->size >= size);516 malloc_assert(cur->size >= size); 500 517 501 518 /* See if we should split the block. */ … … 533 550 { 534 551 area_check((void *) area); 535 assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area));536 assert((void *) first_block < area->end);552 malloc_assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area)); 553 malloc_assert((void *) first_block < area->end); 537 554 538 555 for (heap_block_head_t *cur = first_block; (void *) cur < area->end; … … 559 576 split_mark(cur, real_size); 560 577 561 next = cur;578 next_fit = cur; 562 579 return addr; 563 580 } else { … … 611 628 split_mark(next_head, real_size); 612 629 613 next = next_head;630 next_fit = next_head; 614 631 return aligned; 615 632 } else { … … 637 654 split_mark(cur, real_size); 638 655 639 next = cur;656 next_fit = cur; 640 657 return aligned; 641 658 } … … 661 678 static void *malloc_internal(const size_t size, const size_t align) 662 679 { 663 assert(first_heap_area != NULL);680 malloc_assert(first_heap_area != NULL); 664 681 665 682 if (align == 0) … … 675 692 676 693 /* Try the next fit approach */ 677 split = next ;694 split = next_fit; 678 695 679 696 if (split != NULL) { … … 786 803 787 804 block_check(head); 788 assert(!head->free);805 malloc_assert(!head->free); 789 806 790 807 heap_area_t *area = head->area; 791 808 792 809 area_check(area); 793 assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));794 assert((void *) head < area->end);810 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area)); 811 malloc_assert((void *) head < area->end); 795 812 796 813 void *ptr = NULL; … … 831 848 832 849 ptr = ((void *) head) + sizeof(heap_block_head_t); 833 next = NULL;850 next_fit = NULL; 834 851 } else 835 852 reloc = true; … … 863 880 864 881 block_check(head); 865 assert(!head->free);882 malloc_assert(!head->free); 866 883 867 884 heap_area_t *area = head->area; 868 885 869 886 area_check(area); 870 assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));871 assert((void *) head < area->end);887 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area)); 888 malloc_assert((void *) head < area->end); 872 889 873 890 /* Mark the block itself as free. */ … … 904 921 } 905 922 923 void *heap_check(void) 924 { 925 futex_down(&malloc_futex); 926 927 if (first_heap_area == NULL) { 928 futex_up(&malloc_futex); 929 return (void *) -1; 930 } 931 932 /* Walk all heap areas */ 933 for (heap_area_t *area = first_heap_area; area != NULL; 934 area = area->next) { 935 936 /* Check heap area consistency */ 937 if ((area->magic != HEAP_AREA_MAGIC) || 938 ((void *) area != area->start) || 939 (area->start >= area->end) || 940 (((uintptr_t) area->start % PAGE_SIZE) != 0) || 941 (((uintptr_t) area->end % PAGE_SIZE) != 0)) { 942 futex_up(&malloc_futex); 943 return (void *) area; 944 } 945 946 /* Walk all heap blocks */ 947 for (heap_block_head_t *head = (heap_block_head_t *) 948 AREA_FIRST_BLOCK_HEAD(area); (void *) head < area->end; 949 head = (heap_block_head_t *) (((void *) head) + head->size)) { 950 951 /* Check heap block consistency */ 952 if (head->magic != HEAP_BLOCK_HEAD_MAGIC) { 953 futex_up(&malloc_futex); 954 return (void *) head; 955 } 956 957 heap_block_foot_t *foot = BLOCK_FOOT(head); 958 959 if ((foot->magic != HEAP_BLOCK_FOOT_MAGIC) || 960 (head->size != foot->size)) { 961 futex_up(&malloc_futex); 962 return (void *) foot; 963 } 964 } 965 } 966 967 futex_up(&malloc_futex); 968 969 return NULL; 970 } 971 906 972 /** @} 907 973 */ -
uspace/lib/c/generic/thread.c
r5c460cc r864a081 44 44 45 45 #ifndef THREAD_INITIAL_STACK_PAGES_NO 46 #define THREAD_INITIAL_STACK_PAGES_NO 146 #define THREAD_INITIAL_STACK_PAGES_NO 2 47 47 #endif 48 48 -
uspace/lib/c/include/io/klog.h
r5c460cc r864a081 37 37 38 38 #include <sys/types.h> 39 #include <stdarg.h> 39 40 40 41 extern size_t klog_write(const void *, size_t); 41 42 extern void klog_update(void); 43 extern int klog_printf(const char *, ...); 44 extern int klog_vprintf(const char *, va_list); 42 45 43 46 #endif -
uspace/lib/c/include/malloc.h
r5c460cc r864a081 46 46 extern void *realloc(const void *addr, const size_t size); 47 47 extern void free(const void *addr); 48 extern void *heap_check(void); 48 49 49 50 #endif
Note:
See TracChangeset
for help on using the changeset viewer.