- Timestamp:
- 2005-07-15T21:57:30Z (20 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- b4a4c5e3
- Parents:
- e41c47e
- Location:
- src
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
src/cpu/cpu.c
re41c47e r76cec1e 67 67 memsetb((__address) cpu_private_data, sizeof(cpu_private_data_t) * config.cpu_count, 0); 68 68 memsetb((__address) cpus, sizeof(cpu_t) * config.cpu_count, 0); 69 69 70 70 for (i=0; i < config.cpu_count; i++) { 71 71 cpus[i].stack = (__u8 *) malloc(CPU_STACK_SIZE); -
src/main/kinit.c
re41c47e r76cec1e 95 95 #ifdef __SMP__ 96 96 if (config.cpu_count > 1) { 97 97 /* 98 98 * For each CPU, create its load balancing thread. 99 99 */ -
src/mm/frame.c
re41c47e r76cec1e 55 55 void frame_init(void) 56 56 { 57 57 if (config.cpu_active == 1) { 58 58 59 60 61 59 /* 60 * The bootstrap processor will allocate all necessary memory for frame allocation. 61 */ 62 62 63 64 65 66 67 63 frames = config.memory_size / FRAME_SIZE; 64 frame_bitmap_octets = frames / 8 + (frames % 8 > 0); 65 frame_bitmap = (__u8 *) malloc(frame_bitmap_octets); 66 if (!frame_bitmap) 67 panic("malloc/frame_bitmap\n"); 68 68 69 70 71 72 73 69 /* 70 * Mark all frames free. 71 */ 72 memsetb((__address) frame_bitmap, frame_bitmap_octets, 0); 73 frames_free = frames; 74 74 } 75 75 … … 81 81 82 82 if (config.cpu_active == 1) { 83 84 85 86 83 /* 84 * Create the memory address space map. Marked frames and frame 85 * regions cannot be used for allocation. 86 */ 87 87 frame_region_not_free(config.base, config.base + config.kernel_size); 88 88 } -
src/mm/page.c
re41c47e r76cec1e 54 54 55 55 /* TODO: implement portable way of computing page address from address */ 56 57 56 length = size + (s - (s & 0xfffff000)); 57 cnt = length/PAGE_SIZE + (length%PAGE_SIZE>0); 58 58 59 60 59 for (i = 0; i < cnt; i++) 60 map_page_to_frame(s + i*PAGE_SIZE, s + i*PAGE_SIZE, PAGE_NOT_CACHEABLE, 0); 61 61 62 62 } … … 74 74 void map_page_to_frame(__address page, __address frame, int flags, __address root) 75 75 { 76 77 76 pte_t *ptl0, *ptl1, *ptl2, *ptl3; 77 __address newpt; 78 78 79 79 ptl0 = (pte_t *) PA2KA(root ? root : (__address) GET_PTL0_ADDRESS()); 80 80 81 82 83 84 85 86 81 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) { 82 newpt = frame_alloc(FRAME_KA); 83 memsetb(newpt, PAGE_SIZE, 0); 84 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt)); 85 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page), PAGE_PRESENT | PAGE_USER); 86 } 87 87 88 88 ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page))); 89 89 90 91 92 93 94 95 90 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) { 91 newpt = frame_alloc(FRAME_KA); 92 memsetb(newpt, PAGE_SIZE, 0); 93 SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt)); 94 SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page), PAGE_PRESENT | PAGE_USER); 95 } 96 96 97 97 ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page))); 98 98 99 100 101 102 103 104 99 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) { 100 newpt = frame_alloc(FRAME_KA); 101 memsetb(newpt, PAGE_SIZE, 0); 102 SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt)); 103 SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page), PAGE_PRESENT | PAGE_USER); 104 } 105 105 106 106 ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page))); 107 107 108 109 108 SET_FRAME_ADDRESS(ptl3, PTL3_INDEX(page), frame); 109 SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), flags); 110 110 } -
src/proc/scheduler.c
re41c47e r76cec1e 103 103 */ 104 104 if (test_and_set(&CPU->kcpulbstarted) == 0) { 105 105 waitq_wakeup(&CPU->kcpulb_wq, 0); 106 106 goto loop; 107 107 } … … 239 239 */ 240 240 before_thread_runs(); 241 241 spinlock_unlock(&THREAD->lock); 242 242 cpu_priority_restore(THREAD->saved_context.pri); 243 243 return; … … 279 279 switch (THREAD->state) { 280 280 case Running: 281 282 283 284 281 THREAD->state = Ready; 282 spinlock_unlock(&THREAD->lock); 283 thread_ready(THREAD); 284 break; 285 285 286 286 case Exiting: 287 frame_free((__address) THREAD->kstack); 288 if (THREAD->ustack) { 289 frame_free((__address) THREAD->ustack); 290 } 291 292 /* 293 * Detach from the containing task. 294 */ 295 spinlock_lock(&TASK->lock); 296 list_remove(&THREAD->th_link); 297 spinlock_unlock(&TASK->lock); 298 299 spinlock_unlock(&THREAD->lock); 300 301 spinlock_lock(&threads_lock); 302 list_remove(&THREAD->threads_link); 303 spinlock_unlock(&threads_lock); 304 305 spinlock_lock(&CPU->lock); 306 if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL; 307 spinlock_unlock(&CPU->lock); 308 309 310 free(THREAD); 311 312 break; 313 287 frame_free((__address) THREAD->kstack); 288 if (THREAD->ustack) { 289 frame_free((__address) THREAD->ustack); 290 } 291 292 /* 293 * Detach from the containing task. 294 */ 295 spinlock_lock(&TASK->lock); 296 list_remove(&THREAD->th_link); 297 spinlock_unlock(&TASK->lock); 298 299 spinlock_unlock(&THREAD->lock); 300 301 spinlock_lock(&threads_lock); 302 list_remove(&THREAD->threads_link); 303 spinlock_unlock(&threads_lock); 304 305 spinlock_lock(&CPU->lock); 306 if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL; 307 spinlock_unlock(&CPU->lock); 308 309 free(THREAD); 310 311 break; 312 314 313 case Sleeping: 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 314 /* 315 * Prefer the thread after it's woken up. 316 */ 317 THREAD->pri = -1; 318 319 /* 320 * We need to release wq->lock which we locked in waitq_sleep(). 321 * Address of wq->lock is kept in THREAD->sleep_queue. 322 */ 323 spinlock_unlock(&THREAD->sleep_queue->lock); 324 325 /* 326 * Check for possible requests for out-of-context invocation. 327 */ 328 if (THREAD->call_me) { 329 THREAD->call_me(THREAD->call_me_with); 330 THREAD->call_me = NULL; 331 THREAD->call_me_with = NULL; 332 } 333 334 spinlock_unlock(&THREAD->lock); 335 336 break; 338 337 339 338 default: 340 341 342 343 344 339 /* 340 * Entering state is unexpected. 341 */ 342 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); 343 break; 345 344 } 346 345 THREAD = NULL; 347 346 } 348 347 349 348 THREAD = find_best_thread(); 350 349 … … 470 469 t = list_get_instance(l, thread_t, rq_link); 471 470 /* 472 471 * We don't want to steal CPU-wired threads neither threads already stolen. 473 472 * The latter prevents threads from migrating between CPU's without ever being run. 474 473 * We don't want to steal threads whose FPU context is still in CPU. 475 474 */ 476 475 spinlock_lock(&t->lock); … … 498 497 atomic_dec(&nrdy); 499 498 500 499 r->n--; 501 500 list_remove(&t->rq_link); 502 501 … … 528 527 529 528 /* 530 529 * We are not satisfied yet, focus on another CPU next time. 531 530 */ 532 531 k++; … … 553 552 554 553 goto not_satisfied; 555 554 556 555 satisfied: 557 556 /* -
src/proc/thread.c
re41c47e r76cec1e 147 147 } 148 148 spinlock_unlock(&cpu->lock); 149 149 150 150 cpu_priority_restore(pri); 151 151 } … … 278 278 void thread_sleep(__u32 sec) 279 279 { 280 280 thread_usleep(sec*1000000); 281 281 } 282 282 -
src/time/clock.c
re41c47e r76cec1e 90 90 91 91 spinlock_lock(&THREAD->lock); 92 92 if (!THREAD->ticks--) { 93 93 spinlock_unlock(&THREAD->lock); 94 94 scheduler(); -
src/time/timeout.c
re41c47e r76cec1e 106 106 spinlock_lock(&CPU->timeoutlock); 107 107 spinlock_lock(&t->lock); 108 108 109 109 if (t->cpu) 110 110 panic("t->cpu != 0"); … … 115 115 t->handler = f; 116 116 t->arg = arg; 117 117 118 118 /* 119 119 * Insert t into the active timeouts list according to t->ticks.
Note:
See TracChangeset
for help on using the changeset viewer.