Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/proc/scheduler.c

    r151c050 r4760793  
    7070atomic_size_t nrdy;  /**< Number of ready threads in the system. */
    7171
     72/** Take actions before new thread runs.
     73 *
     74 * Perform actions that need to be
     75 * taken before the newly selected
     76 * thread is passed control.
     77 *
     78 * THREAD->lock is locked on entry
     79 *
     80 */
     81static void before_thread_runs(void)
     82{
     83        before_thread_runs_arch();
     84
     85#ifdef CONFIG_FPU_LAZY
     86        /*
     87         * The only concurrent modification possible for fpu_owner here is
     88         * another thread changing it from itself to NULL in its destructor.
     89         */
     90        thread_t *owner = atomic_load_explicit(&CPU->fpu_owner,
     91            memory_order_relaxed);
     92
     93        if (THREAD == owner)
     94                fpu_enable();
     95        else
     96                fpu_disable();
     97#elif defined CONFIG_FPU
     98        fpu_enable();
     99        if (THREAD->fpu_context_exists)
     100                fpu_context_restore(&THREAD->fpu_context);
     101        else {
     102                fpu_init();
     103                THREAD->fpu_context_exists = true;
     104        }
     105#endif
     106
     107#ifdef CONFIG_UDEBUG
     108        if (THREAD->btrace) {
     109                istate_t *istate = THREAD->udebug.uspace_state;
     110                if (istate != NULL) {
     111                        printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid);
     112                        stack_trace_istate(istate);
     113                }
     114
     115                THREAD->btrace = false;
     116        }
     117#endif
     118}
     119
     120/** Take actions after THREAD had run.
     121 *
     122 * Perform actions that need to be
     123 * taken after the running thread
     124 * had been preempted by the scheduler.
     125 *
     126 * THREAD->lock is locked on entry
     127 *
     128 */
     129static void after_thread_ran(void)
     130{
     131        after_thread_ran_arch();
     132}
     133
    72134#ifdef CONFIG_FPU_LAZY
    73135void scheduler_fpu_lazy_request(void)
     
    145207                list_remove(&thread->rq_link);
    146208
    147                 irq_spinlock_unlock(&(CPU->rq[i].lock), false);
     209                irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock);
     210
     211                thread->cpu = CPU;
     212                thread->priority = i;  /* Correct rq index */
     213
     214                /* Time allocation in microseconds. */
     215                uint64_t time_to_run = (i + 1) * 10000;
     216
     217                /* This is safe because interrupts are disabled. */
     218                CPU_LOCAL->preempt_deadline =
     219                    CPU_LOCAL->current_clock_tick + us2ticks(time_to_run);
     220
     221                /*
     222                 * Clear the stolen flag so that it can be migrated
     223                 * when load balancing needs emerge.
     224                 */
     225                thread->stolen = false;
     226                irq_spinlock_unlock(&thread->lock, false);
    148227
    149228                *rq_index = i;
     
    262341}
    263342
    264 /**
    265  * Do whatever needs to be done with current FPU state before we switch to
    266  * another thread.
    267  */
    268 static void fpu_cleanup(void)
    269 {
    270 #if (defined CONFIG_FPU) && (!defined CONFIG_FPU_LAZY)
    271         fpu_context_save(&THREAD->fpu_context);
    272 #endif
    273 }
    274 
    275 /**
    276  * Set correct FPU state for this thread after switch from another thread.
    277  */
    278 static void fpu_restore(void)
    279 {
    280 #ifdef CONFIG_FPU_LAZY
    281         /*
    282          * The only concurrent modification possible for fpu_owner here is
    283          * another thread changing it from itself to NULL in its destructor.
    284          */
    285         thread_t *owner = atomic_load_explicit(&CPU->fpu_owner,
    286             memory_order_relaxed);
    287 
    288         if (THREAD == owner)
    289                 fpu_enable();
    290         else
    291                 fpu_disable();
    292 
    293 #elif defined CONFIG_FPU
    294         fpu_enable();
    295         if (THREAD->fpu_context_exists)
    296                 fpu_context_restore(&THREAD->fpu_context);
    297         else {
    298                 fpu_init();
    299                 THREAD->fpu_context_exists = true;
    300         }
    301 #endif
    302 }
    303 
    304 void scheduler_run(void)
    305 {
    306         assert(interrupts_disabled());
    307         assert(THREAD == NULL);
    308         assert(CPU != NULL);
    309 
    310         current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
    311 
    312         context_t ctx;
    313         context_save(&ctx);
    314         context_set(&ctx, FADDR(scheduler_separated_stack),
    315             (uintptr_t) CPU_LOCAL->stack, STACK_SIZE);
    316         context_restore(&ctx);
    317 
    318         unreachable();
    319 }
    320 
    321 /** Things to do before we switch to THREAD context.
    322  */
    323 static void prepare_to_run_thread(int rq_index)
    324 {
    325         relink_rq(rq_index);
    326 
    327         switch_task(THREAD->task);
    328 
    329         irq_spinlock_lock(&THREAD->lock, false);
    330         THREAD->state = Running;
    331         THREAD->cpu = CPU;
    332         THREAD->priority = rq_index;  /* Correct rq index */
    333 
    334         /*
    335          * Clear the stolen flag so that it can be migrated
    336          * when load balancing needs emerge.
    337          */
    338         THREAD->stolen = false;
    339 
    340 #ifdef SCHEDULER_VERBOSE
    341         log(LF_OTHER, LVL_DEBUG,
    342             "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64
    343             ", nrdy=%zu)", CPU->id, THREAD->tid, THREAD->priority,
    344             THREAD->ticks, atomic_load(&CPU->nrdy));
    345 #endif
    346 
    347         /*
    348          * Some architectures provide late kernel PA2KA(identity)
    349          * mapping in a page fault handler. However, the page fault
    350          * handler uses the kernel stack of the running thread and
    351          * therefore cannot be used to map it. The kernel stack, if
    352          * necessary, is to be mapped in before_thread_runs(). This
    353          * function must be executed before the switch to the new stack.
    354          */
    355         before_thread_runs_arch();
    356 
    357 #ifdef CONFIG_UDEBUG
    358         if (THREAD->btrace) {
    359                 istate_t *istate = THREAD->udebug.uspace_state;
    360                 if (istate != NULL) {
    361                         printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid);
    362                         stack_trace_istate(istate);
    363                 }
    364 
    365                 THREAD->btrace = false;
    366         }
    367 #endif
    368 
    369         fpu_restore();
    370 
    371         /* Time allocation in microseconds. */
    372         uint64_t time_to_run = (rq_index + 1) * 10000;
    373 
    374         /* Set the time of next preemption. */
    375         CPU_LOCAL->preempt_deadline =
    376             CPU_LOCAL->current_clock_tick + us2ticks(time_to_run);
    377 
    378         /* Save current CPU cycle */
    379         THREAD->last_cycle = get_cycle();
    380 }
    381 
    382 static void cleanup_after_thread(thread_t *thread, state_t out_state)
    383 {
    384         assert(CURRENT->mutex_locks == 0);
    385         assert(interrupts_disabled());
    386 
    387         int expected;
    388 
    389         switch (out_state) {
    390         case Running:
    391                 thread_ready(thread);
    392                 break;
    393 
    394         case Exiting:
    395                 waitq_close(&thread->join_wq);
    396 
    397                 /*
    398                  * Release the reference CPU has for the thread.
    399                  * If there are no other references (e.g. threads calling join),
    400                  * the thread structure is deallocated.
    401                  */
    402                 thread_put(thread);
    403                 break;
    404 
    405         case Sleeping:
    406                 expected = SLEEP_INITIAL;
    407 
    408                 /* Only set SLEEP_ASLEEP in sleep pad if it's still in initial state */
    409                 if (!atomic_compare_exchange_strong_explicit(&thread->sleep_state,
    410                     &expected, SLEEP_ASLEEP,
    411                     memory_order_acq_rel, memory_order_acquire)) {
    412 
    413                         assert(expected == SLEEP_WOKE);
    414                         /* The thread has already been woken up, requeue immediately. */
    415                         thread_ready(thread);
    416                 }
    417                 break;
    418 
    419         default:
    420                 /*
    421                  * Entering state is unexpected.
    422                  */
    423                 panic("tid%" PRIu64 ": unexpected state %s.",
    424                     thread->tid, thread_states[thread->state]);
    425                 break;
    426         }
     343void scheduler(void)
     344{
     345        ipl_t ipl = interrupts_disable();
     346
     347        if (atomic_load(&haltstate))
     348                halt();
     349
     350        if (THREAD) {
     351                irq_spinlock_lock(&THREAD->lock, false);
     352        }
     353
     354        scheduler_locked(ipl);
    427355}
    428356
     
    434362 *
    435363 */
    436 void scheduler_enter(state_t new_state)
    437 {
    438         ipl_t ipl = interrupts_disable();
    439 
     364void scheduler_locked(ipl_t ipl)
     365{
    440366        assert(CPU != NULL);
    441         assert(THREAD != NULL);
    442 
    443         fpu_cleanup();
    444 
    445         irq_spinlock_lock(&THREAD->lock, false);
    446         THREAD->state = new_state;
    447 
    448         /* Update thread kernel accounting */
    449         THREAD->kcycles += get_cycle() - THREAD->last_cycle;
    450 
    451         if (!context_save(&THREAD->saved_context)) {
    452                 /*
    453                  * This is the place where threads leave scheduler();
    454                  */
    455 
    456                 irq_spinlock_unlock(&THREAD->lock, false);
    457                 interrupts_restore(ipl);
    458                 return;
     367
     368        if (THREAD) {
     369                /* Update thread kernel accounting */
     370                THREAD->kcycles += get_cycle() - THREAD->last_cycle;
     371
     372#if (defined CONFIG_FPU) && (!defined CONFIG_FPU_LAZY)
     373                fpu_context_save(&THREAD->fpu_context);
     374#endif
     375                if (!context_save(&THREAD->saved_context)) {
     376                        /*
     377                         * This is the place where threads leave scheduler();
     378                         */
     379
     380                        /* Save current CPU cycle */
     381                        THREAD->last_cycle = get_cycle();
     382
     383                        irq_spinlock_unlock(&THREAD->lock, false);
     384                        interrupts_restore(THREAD->saved_ipl);
     385
     386                        return;
     387                }
     388
     389                /*
     390                 * Interrupt priority level of preempted thread is recorded
     391                 * here to facilitate scheduler() invocations from
     392                 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()).
     393                 *
     394                 */
     395                THREAD->saved_ipl = ipl;
    459396        }
    460397
     
    502439        assert(interrupts_disabled());
    503440
    504         if (atomic_load(&haltstate))
    505                 halt();
    506 
    507441        if (THREAD) {
    508                 after_thread_ran_arch();
    509 
    510                 state_t state = THREAD->state;
    511 
    512                 if (state == Sleeping) {
    513                         /* Prefer the thread after it's woken up. */
     442                /* Must be run after the switch to scheduler stack */
     443                after_thread_ran();
     444
     445                switch (THREAD->state) {
     446                case Running:
     447                        irq_spinlock_unlock(&THREAD->lock, false);
     448                        thread_ready(THREAD);
     449                        break;
     450
     451                case Exiting:
     452                        irq_spinlock_unlock(&THREAD->lock, false);
     453                        waitq_close(&THREAD->join_wq);
     454
     455                        /*
     456                         * Release the reference CPU has for the thread.
     457                         * If there are no other references (e.g. threads calling join),
     458                         * the thread structure is deallocated.
     459                         */
     460                        thread_put(THREAD);
     461                        break;
     462
     463                case Sleeping:
     464                        /*
     465                         * Prefer the thread after it's woken up.
     466                         */
    514467                        THREAD->priority = -1;
     468                        irq_spinlock_unlock(&THREAD->lock, false);
     469                        break;
     470
     471                default:
     472                        /*
     473                         * Entering state is unexpected.
     474                         */
     475                        panic("tid%" PRIu64 ": unexpected state %s.",
     476                            THREAD->tid, thread_states[THREAD->state]);
     477                        break;
    515478                }
    516 
    517                 irq_spinlock_unlock(&THREAD->lock, false);
    518 
    519                 cleanup_after_thread(THREAD, state);
    520479
    521480                THREAD = NULL;
     
    525484        THREAD = find_best_thread(&rq_index);
    526485
    527         prepare_to_run_thread(rq_index);
     486        relink_rq(rq_index);
     487
     488        switch_task(THREAD->task);
     489
     490        irq_spinlock_lock(&THREAD->lock, false);
     491        THREAD->state = Running;
     492
     493#ifdef SCHEDULER_VERBOSE
     494        log(LF_OTHER, LVL_DEBUG,
     495            "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64
     496            ", nrdy=%zu)", CPU->id, THREAD->tid, THREAD->priority,
     497            THREAD->ticks, atomic_load(&CPU->nrdy));
     498#endif
     499
     500        /*
     501         * Some architectures provide late kernel PA2KA(identity)
     502         * mapping in a page fault handler. However, the page fault
     503         * handler uses the kernel stack of the running thread and
     504         * therefore cannot be used to map it. The kernel stack, if
     505         * necessary, is to be mapped in before_thread_runs(). This
     506         * function must be executed before the switch to the new stack.
     507         */
     508        before_thread_runs();
    528509
    529510        /*
     
    679660                 *
    680661                 */
    681                 thread_yield();
     662                scheduler();
    682663        } else {
    683664                /*
Note: See TracChangeset for help on using the changeset viewer.