Changeset ec8ef12 in mainline for kernel/generic/src/proc/scheduler.c


Ignore:
Timestamp:
2023-04-19T09:31:02Z (20 months ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
c0757e1f
Parents:
117ad5a2
Message:

Split find_best_thread() into two functions

try_find_thread() attempts to get a thread from runqueues and returns
NULL when there's none available. find_best_thread() functions as
before and goes to sleep between attempts to find a thread to run.

The purpose of this split is that we can use the non-sleeping version
in the context of a previously running thread to avoid an additional
context switch in case new thread is immediately available.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/proc/scheduler.c

    r117ad5a2 rec8ef12  
    178178 *
    179179 */
    180 static thread_t *find_best_thread(int *rq_index)
    181 {
     180static thread_t *try_find_thread(int *rq_index)
     181{
     182        assert(interrupts_disabled());
    182183        assert(CPU != NULL);
    183184
    184 loop:
    185         if (atomic_load(&CPU->nrdy) == 0) {
    186                 /*
    187                  * For there was nothing to run, the CPU goes to sleep
    188                  * until a hardware interrupt or an IPI comes.
    189                  * This improves energy saving and hyperthreading.
    190                  */
    191                 CPU->idle = true;
    192 
    193                 /*
    194                  * Go to sleep with interrupts enabled.
    195                  * Ideally, this should be atomic, but this is not guaranteed on
    196                  * all platforms yet, so it is possible we will go sleep when
    197                  * a thread has just become available.
    198                  */
    199                 cpu_interruptible_sleep();
    200 
    201                 /* Interrupts are disabled again. */
    202                 goto loop;
    203         }
    204 
    205         assert(!CPU->idle);
    206 
    207         unsigned int i;
    208         for (i = 0; i < RQ_COUNT; i++) {
     185        if (atomic_load(&CPU->nrdy) == 0)
     186                return NULL;
     187
     188        for (int i = 0; i < RQ_COUNT; i++) {
    209189                irq_spinlock_lock(&(CPU->rq[i].lock), false);
    210190                if (CPU->rq[i].n == 0) {
     
    249229        }
    250230
    251         goto loop;
     231        return NULL;
     232}
     233
     234/** Get thread to be scheduled
     235 *
     236 * Get the optimal thread to be scheduled
     237 * according to thread accounting and scheduler
     238 * policy.
     239 *
     240 * @return Thread to be scheduled.
     241 *
     242 */
     243static thread_t *find_best_thread(int *rq_index)
     244{
     245        assert(interrupts_disabled());
     246        assert(CPU != NULL);
     247
     248        while (true) {
     249                thread_t *thread = try_find_thread(rq_index);
     250
     251                if (thread != NULL)
     252                        return thread;
     253
     254                /*
     255                 * For there was nothing to run, the CPU goes to sleep
     256                 * until a hardware interrupt or an IPI comes.
     257                 * This improves energy saving and hyperthreading.
     258                 */
     259                CPU->idle = true;
     260
     261                /*
     262                 * Go to sleep with interrupts enabled.
     263                 * Ideally, this should be atomic, but this is not guaranteed on
     264                 * all platforms yet, so it is possible we will go sleep when
     265                 * a thread has just become available.
     266                 */
     267                cpu_interruptible_sleep();
     268        }
    252269}
    253270
Note: See TracChangeset for help on using the changeset viewer.