Changeset 43ac0cc in mainline
- Timestamp:
- 2011-06-13T18:53:42Z (13 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 9934f7d
- Parents:
- e8a69913
- Location:
- kernel/generic
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/proc/thread.h
re8a69913 r43ac0cc 156 156 int fpu_context_engaged; 157 157 158 /* The thread will not be migrated if nomigrate is non-zero. */ 159 int nomigrate; 160 158 161 /** Thread's state. */ 159 162 state_t state; … … 245 248 extern bool thread_exists(thread_t *); 246 249 250 extern void thread_migration_disable(void); 251 extern void thread_migration_enable(void); 252 247 253 #ifdef CONFIG_UDEBUG 248 254 extern void thread_stack_trace(thread_id_t); -
kernel/generic/src/proc/scheduler.c
re8a69913 r43ac0cc 586 586 * Searching least priority queues on all CPU's first and most priority 587 587 * queues on all CPU's last. 588 *589 588 */ 590 589 size_t acpu; … … 620 619 621 620 while (link != &(cpu->rq[rq].rq_head)) { 622 thread = (thread_t *) list_get_instance(link, thread_t, rq_link); 621 thread = (thread_t *) list_get_instance(link, 622 thread_t, rq_link); 623 623 624 624 /* 625 * We don't want to steal CPU-wired threads 626 * neither threads already stolen. The latter 627 * prevents threads from migrating between CPU's 628 * without ever being run. We don't want to 629 * steal threads whose FPU context is still in 630 * CPU. 631 * 625 * Do not steal CPU-wired threads, threads 626 * already stolen, threads for which migration 627 * was temporarily disabled or threads whose 628 * FPU context is still in the CPU. 632 629 */ 633 630 irq_spinlock_lock(&thread->lock, false); 634 631 635 if ((!(thread->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) 636 && (!(thread->fpu_context_engaged))) { 632 if (!(thread->flags & THREAD_FLAG_WIRED) && 633 !(thread->flags & THREAD_FLAG_STOLEN) && 634 !thread->nomigrate && 635 !thread->fpu_context_engaged) { 637 636 /* 638 637 * Remove thread from ready queue. 639 638 */ 640 irq_spinlock_unlock(&thread->lock, false); 639 irq_spinlock_unlock(&thread->lock, 640 false); 641 641 642 642 atomic_dec(&cpu->nrdy); … … 660 660 */ 661 661 662 irq_spinlock_pass(&(cpu->rq[rq].lock), &thread->lock); 662 irq_spinlock_pass(&(cpu->rq[rq].lock), 663 &thread->lock); 663 664 664 665 #ifdef KCPULB_VERBOSE -
kernel/generic/src/proc/thread.c
re8a69913 r43ac0cc 322 322 thread->cpu = NULL; 323 323 thread->flags = flags; 324 thread->nomigrate = 0; 324 325 thread->state = Entering; 325 326 … … 482 483 /* Not reached */ 483 484 while (true); 485 } 486 487 /** Prevent the current thread from being migrated to another processor. */ 488 void thread_migration_disable(void) 489 { 490 ASSERT(THREAD); 491 492 THREAD->nomigrate++; 493 } 494 495 /** Allow the current thread to be migrated to another processor. */ 496 void thread_migration_enable(void) 497 { 498 ASSERT(THREAD); 499 ASSERT(THREAD->nomigrate > 0); 500 501 THREAD->nomigrate--; 484 502 } 485 503 -
kernel/generic/src/time/delay.c
re8a69913 r43ac0cc 37 37 38 38 #include <time/delay.h> 39 #include <proc/thread.h> 39 40 #include <typedefs.h> 40 41 #include <cpu.h> … … 42 43 #include <arch.h> 43 44 44 /** Active delay45 /** Delay the execution for the given number of microseconds (or slightly more). 45 46 * 46 * Delay the execution for the given number 47 * of microseconds (or slightly more). The delay 48 * is implemented as CPU calibrated active loop. 47 * The delay is implemented as active delay loop. 49 48 * 50 49 * @param usec Number of microseconds to sleep. … … 52 51 void delay(uint32_t usec) 53 52 { 54 ipl_t ipl;55 56 53 /* 57 * The delay loop is calibrated for each and every 58 * CPU in the system. Therefore it is necessary to 59 * call interrupts_disable() before calling the 60 * asm_delay_loop(). 54 * The delay loop is calibrated for each and every CPU in the system. 55 * If running in a thread context, it is therefore necessary to disable 56 * thread migration. We want to do this in a lightweight manner. 61 57 */ 62 ipl = interrupts_disable(); 58 if (THREAD) 59 thread_migration_disable(); 63 60 asm_delay_loop(usec * CPU->delay_loop_const); 64 interrupts_restore(ipl); 61 if (THREAD) 62 thread_migration_enable(); 65 63 } 66 64 67 65 /** @} 68 66 */
Note:
See TracChangeset
for help on using the changeset viewer.