Changeset 151c050 in mainline for kernel/generic/src/proc/scheduler.c
- Timestamp:
- 2024-01-15T14:33:03Z (17 months ago)
- Branches:
- master
- Children:
- c7ceacf
- Parents:
- 8996582
- git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2023-04-15 16:15:29)
- git-committer:
- Jiří Zárevúcky <zarevucky.jiri@…> (2024-01-15 14:33:03)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
r8996582 r151c050 302 302 } 303 303 304 void scheduler(void) 305 { 306 ipl_t ipl = interrupts_disable(); 307 308 if (atomic_load(&haltstate)) 309 halt(); 310 311 if (THREAD) { 312 irq_spinlock_lock(&THREAD->lock, false); 313 } 314 315 scheduler_locked(ipl); 304 void scheduler_run(void) 305 { 306 assert(interrupts_disabled()); 307 assert(THREAD == NULL); 308 assert(CPU != NULL); 309 310 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack); 311 312 context_t ctx; 313 context_save(&ctx); 314 context_set(&ctx, FADDR(scheduler_separated_stack), 315 (uintptr_t) CPU_LOCAL->stack, STACK_SIZE); 316 context_restore(&ctx); 317 318 unreachable(); 316 319 } 317 320 … … 431 434 * 432 435 */ 433 void scheduler_locked(ipl_t ipl) 434 { 436 void scheduler_enter(state_t new_state) 437 { 438 ipl_t ipl = interrupts_disable(); 439 435 440 assert(CPU != NULL); 436 437 if (THREAD) { 438 /* Update thread kernel accounting */ 439 THREAD->kcycles += get_cycle() - THREAD->last_cycle; 440 441 fpu_cleanup(); 442 443 if (!context_save(&THREAD->saved_context)) { 444 /* 445 * This is the place where threads leave scheduler(); 446 */ 447 448 irq_spinlock_unlock(&THREAD->lock, false); 449 interrupts_restore(THREAD->saved_ipl); 450 451 return; 452 } 453 454 /* 455 * Interrupt priority level of preempted thread is recorded 456 * here to facilitate scheduler() invocations from 457 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 458 * 459 */ 460 THREAD->saved_ipl = ipl; 441 assert(THREAD != NULL); 442 443 fpu_cleanup(); 444 445 irq_spinlock_lock(&THREAD->lock, false); 446 THREAD->state = new_state; 447 448 /* Update thread kernel accounting */ 449 THREAD->kcycles += get_cycle() - THREAD->last_cycle; 450 451 if (!context_save(&THREAD->saved_context)) { 452 /* 453 * This is the place where threads leave scheduler(); 454 */ 455 456 irq_spinlock_unlock(&THREAD->lock, false); 457 interrupts_restore(ipl); 458 return; 461 459 } 462 460 … … 504 502 assert(interrupts_disabled()); 505 503 504 if (atomic_load(&haltstate)) 505 halt(); 506 506 507 if (THREAD) { 507 508 after_thread_ran_arch(); … … 678 679 * 679 680 */ 680 scheduler();681 thread_yield(); 681 682 } else { 682 683 /*
Note:
See TracChangeset
for help on using the changeset viewer.