Changeset 43ac0cc in mainline


Ignore:
Timestamp:
2011-06-13T18:53:42Z (13 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
9934f7d
Parents:
e8a69913
Message:

More lightweight implementation of delay() in kernel.

Location:
kernel/generic
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/proc/thread.h

    re8a69913 r43ac0cc  
    156156        int fpu_context_engaged;
    157157       
     158        /* The thread will not be migrated if nomigrate is non-zero. */
     159        int nomigrate;
     160       
    158161        /** Thread's state. */
    159162        state_t state;
     
    245248extern bool thread_exists(thread_t *);
    246249
     250extern void thread_migration_disable(void);
     251extern void thread_migration_enable(void);
     252
    247253#ifdef CONFIG_UDEBUG
    248254extern void thread_stack_trace(thread_id_t);
  • kernel/generic/src/proc/scheduler.c

    re8a69913 r43ac0cc  
    586586         * Searching least priority queues on all CPU's first and most priority
    587587         * queues on all CPU's last.
    588          *
    589588         */
    590589        size_t acpu;
     
    620619                       
    621620                        while (link != &(cpu->rq[rq].rq_head)) {
    622                                 thread = (thread_t *) list_get_instance(link, thread_t, rq_link);
     621                                thread = (thread_t *) list_get_instance(link,
     622                                    thread_t, rq_link);
    623623                               
    624624                                /*
    625                                  * We don't want to steal CPU-wired threads
    626                                  * neither threads already stolen. The latter
    627                                  * prevents threads from migrating between CPU's
    628                                  * without ever being run. We don't want to
    629                                  * steal threads whose FPU context is still in
    630                                  * CPU.
    631                                  *
     625                                 * Do not steal CPU-wired threads, threads
     626                                 * already stolen, threads for which migration
     627                                 * was temporarily disabled or threads whose
     628                                 * FPU context is still in the CPU.
    632629                                 */
    633630                                irq_spinlock_lock(&thread->lock, false);
    634631                               
    635                                 if ((!(thread->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN)))
    636                                     && (!(thread->fpu_context_engaged))) {
     632                                if (!(thread->flags & THREAD_FLAG_WIRED) &&
     633                                    !(thread->flags & THREAD_FLAG_STOLEN) &&
     634                                    !thread->nomigrate &&
     635                                    !thread->fpu_context_engaged) {
    637636                                        /*
    638637                                         * Remove thread from ready queue.
    639638                                         */
    640                                         irq_spinlock_unlock(&thread->lock, false);
     639                                        irq_spinlock_unlock(&thread->lock,
     640                                            false);
    641641                                       
    642642                                        atomic_dec(&cpu->nrdy);
     
    660660                                 */
    661661                               
    662                                 irq_spinlock_pass(&(cpu->rq[rq].lock), &thread->lock);
     662                                irq_spinlock_pass(&(cpu->rq[rq].lock),
     663                                    &thread->lock);
    663664                               
    664665#ifdef KCPULB_VERBOSE
  • kernel/generic/src/proc/thread.c

    re8a69913 r43ac0cc  
    322322        thread->cpu = NULL;
    323323        thread->flags = flags;
     324        thread->nomigrate = 0;
    324325        thread->state = Entering;
    325326       
     
    482483        /* Not reached */
    483484        while (true);
     485}
     486
     487/** Prevent the current thread from being migrated to another processor. */
     488void thread_migration_disable(void)
     489{
     490        ASSERT(THREAD);
     491
     492        THREAD->nomigrate++;
     493}
     494
     495/** Allow the current thread to be migrated to another processor. */
     496void thread_migration_enable(void)
     497{
     498        ASSERT(THREAD);
     499        ASSERT(THREAD->nomigrate > 0);
     500
     501        THREAD->nomigrate--;
    484502}
    485503
  • kernel/generic/src/time/delay.c

    re8a69913 r43ac0cc  
    3737 
    3838#include <time/delay.h>
     39#include <proc/thread.h>
    3940#include <typedefs.h>
    4041#include <cpu.h>
     
    4243#include <arch.h>
    4344
    44 /** Active delay
     45/** Delay the execution for the given number of microseconds (or slightly more).
    4546 *
    46  * Delay the execution for the given number
    47  * of microseconds (or slightly more). The delay
    48  * is implemented as CPU calibrated active loop.
     47 * The delay is implemented as active delay loop.
    4948 *
    5049 * @param usec Number of microseconds to sleep.
     
    5251void delay(uint32_t usec)
    5352{
    54         ipl_t ipl;
    55        
    5653        /*
    57          * The delay loop is calibrated for each and every
    58          * CPU in the system. Therefore it is necessary to
    59          * call interrupts_disable() before calling the
    60          * asm_delay_loop().
     54         * The delay loop is calibrated for each and every CPU in the system.
     55         * If running in a thread context, it is therefore necessary to disable
     56         * thread migration. We want to do this in a lightweight manner.
    6157         */
    62         ipl = interrupts_disable();
     58        if (THREAD)
     59                thread_migration_disable();
    6360        asm_delay_loop(usec * CPU->delay_loop_const);
    64         interrupts_restore(ipl);
     61        if (THREAD)
     62                thread_migration_enable();
    6563}
    6664
    6765/** @}
    68  */
     66*/
Note: See TracChangeset for help on using the changeset viewer.