[f761f1eb] | 1 | /*
|
---|
[4e33b6b] | 2 | * Copyright (C) 2001-2007 Jakub Jermar
|
---|
[f761f1eb] | 3 | * All rights reserved.
|
---|
| 4 | *
|
---|
| 5 | * Redistribution and use in source and binary forms, with or without
|
---|
| 6 | * modification, are permitted provided that the following conditions
|
---|
| 7 | * are met:
|
---|
| 8 | *
|
---|
| 9 | * - Redistributions of source code must retain the above copyright
|
---|
| 10 | * notice, this list of conditions and the following disclaimer.
|
---|
| 11 | * - Redistributions in binary form must reproduce the above copyright
|
---|
| 12 | * notice, this list of conditions and the following disclaimer in the
|
---|
| 13 | * documentation and/or other materials provided with the distribution.
|
---|
| 14 | * - The name of the author may not be used to endorse or promote products
|
---|
| 15 | * derived from this software without specific prior written permission.
|
---|
| 16 | *
|
---|
| 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
---|
| 18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
---|
| 19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
---|
| 20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
---|
| 21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
---|
| 22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
---|
| 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
---|
| 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
| 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
---|
| 26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
| 27 | */
|
---|
| 28 |
|
---|
[cc73a8a1] | 29 | /** @addtogroup genericproc
|
---|
[b45c443] | 30 | * @{
|
---|
| 31 | */
|
---|
| 32 |
|
---|
[9179d0a] | 33 | /**
|
---|
[b45c443] | 34 | * @file
|
---|
[9179d0a] | 35 | * @brief Scheduler and load balancing.
|
---|
| 36 | *
|
---|
[cf26ba9] | 37 | * This file contains the scheduler and kcpulb kernel thread which
|
---|
[9179d0a] | 38 | * performs load-balancing of per-CPU run queues.
|
---|
| 39 | */
|
---|
| 40 |
|
---|
[f761f1eb] | 41 | #include <proc/scheduler.h>
|
---|
| 42 | #include <proc/thread.h>
|
---|
| 43 | #include <proc/task.h>
|
---|
[32ff43e6] | 44 | #include <mm/frame.h>
|
---|
| 45 | #include <mm/page.h>
|
---|
[20d50a1] | 46 | #include <mm/as.h>
|
---|
[fe19611] | 47 | #include <time/delay.h>
|
---|
[32ff43e6] | 48 | #include <arch/asm.h>
|
---|
| 49 | #include <arch/faddr.h>
|
---|
[cce6acf] | 50 | #include <arch/cycle.h>
|
---|
[23684b7] | 51 | #include <atomic.h>
|
---|
[32ff43e6] | 52 | #include <synch/spinlock.h>
|
---|
[f761f1eb] | 53 | #include <config.h>
|
---|
| 54 | #include <context.h>
|
---|
| 55 | #include <func.h>
|
---|
| 56 | #include <arch.h>
|
---|
[5c9a08b] | 57 | #include <adt/list.h>
|
---|
[02a99d2] | 58 | #include <panic.h>
|
---|
[f761f1eb] | 59 | #include <typedefs.h>
|
---|
[32ff43e6] | 60 | #include <cpu.h>
|
---|
[9c0a9b3] | 61 | #include <print.h>
|
---|
[623ba26c] | 62 | #include <debug.h>
|
---|
[9c0a9b3] | 63 |
|
---|
[39cea6a] | 64 | static void before_task_runs(void);
|
---|
| 65 | static void before_thread_runs(void);
|
---|
| 66 | static void after_thread_ran(void);
|
---|
[7d6ec87] | 67 | static void scheduler_separated_stack(void);
|
---|
| 68 |
|
---|
| 69 | atomic_t nrdy; /**< Number of ready threads in the system. */
|
---|
[f761f1eb] | 70 |
|
---|
[39cea6a] | 71 | /** Carry out actions before new task runs. */
|
---|
| 72 | void before_task_runs(void)
|
---|
| 73 | {
|
---|
| 74 | before_task_runs_arch();
|
---|
| 75 | }
|
---|
| 76 |
|
---|
[97f1691] | 77 | /** Take actions before new thread runs.
|
---|
[70527f1] | 78 | *
|
---|
[b60a22c] | 79 | * Perform actions that need to be
|
---|
| 80 | * taken before the newly selected
|
---|
| 81 | * tread is passed control.
|
---|
[70527f1] | 82 | *
|
---|
[a3eeceb6] | 83 | * THREAD->lock is locked on entry
|
---|
| 84 | *
|
---|
[70527f1] | 85 | */
|
---|
[0ca6faa] | 86 | void before_thread_runs(void)
|
---|
| 87 | {
|
---|
[b49f4ae] | 88 | before_thread_runs_arch();
|
---|
[f76fed4] | 89 | #ifdef CONFIG_FPU_LAZY
|
---|
[6eabb6e6] | 90 | if(THREAD == CPU->fpu_owner)
|
---|
[b49f4ae] | 91 | fpu_enable();
|
---|
| 92 | else
|
---|
| 93 | fpu_disable();
|
---|
[f76fed4] | 94 | #else
|
---|
[b49f4ae] | 95 | fpu_enable();
|
---|
| 96 | if (THREAD->fpu_context_exists)
|
---|
[f76fed4] | 97 | fpu_context_restore(THREAD->saved_fpu_context);
|
---|
[b49f4ae] | 98 | else {
|
---|
[f76fed4] | 99 | fpu_init();
|
---|
[6eabb6e6] | 100 | THREAD->fpu_context_exists = 1;
|
---|
[b49f4ae] | 101 | }
|
---|
[f76fed4] | 102 | #endif
|
---|
[0ca6faa] | 103 | }
|
---|
| 104 |
|
---|
[7d6ec87] | 105 | /** Take actions after THREAD had run.
|
---|
[97f1691] | 106 | *
|
---|
| 107 | * Perform actions that need to be
|
---|
| 108 | * taken after the running thread
|
---|
[7d6ec87] | 109 | * had been preempted by the scheduler.
|
---|
[97f1691] | 110 | *
|
---|
| 111 | * THREAD->lock is locked on entry
|
---|
| 112 | *
|
---|
| 113 | */
|
---|
| 114 | void after_thread_ran(void)
|
---|
| 115 | {
|
---|
| 116 | after_thread_ran_arch();
|
---|
| 117 | }
|
---|
| 118 |
|
---|
[5f85c91] | 119 | #ifdef CONFIG_FPU_LAZY
|
---|
[b49f4ae] | 120 | void scheduler_fpu_lazy_request(void)
|
---|
| 121 | {
|
---|
[09c18f78] | 122 | restart:
|
---|
[b49f4ae] | 123 | fpu_enable();
|
---|
[a3eeceb6] | 124 | spinlock_lock(&CPU->lock);
|
---|
| 125 |
|
---|
| 126 | /* Save old context */
|
---|
[b49f4ae] | 127 | if (CPU->fpu_owner != NULL) {
|
---|
[a3eeceb6] | 128 | spinlock_lock(&CPU->fpu_owner->lock);
|
---|
[f76fed4] | 129 | fpu_context_save(CPU->fpu_owner->saved_fpu_context);
|
---|
[b49f4ae] | 130 | /* don't prevent migration */
|
---|
[6eabb6e6] | 131 | CPU->fpu_owner->fpu_context_engaged = 0;
|
---|
[a3eeceb6] | 132 | spinlock_unlock(&CPU->fpu_owner->lock);
|
---|
[09c18f78] | 133 | CPU->fpu_owner = NULL;
|
---|
[b49f4ae] | 134 | }
|
---|
[a3eeceb6] | 135 |
|
---|
| 136 | spinlock_lock(&THREAD->lock);
|
---|
[7d6ec87] | 137 | if (THREAD->fpu_context_exists) {
|
---|
[f76fed4] | 138 | fpu_context_restore(THREAD->saved_fpu_context);
|
---|
[7d6ec87] | 139 | } else {
|
---|
[f76fed4] | 140 | /* Allocate FPU context */
|
---|
| 141 | if (!THREAD->saved_fpu_context) {
|
---|
| 142 | /* Might sleep */
|
---|
| 143 | spinlock_unlock(&THREAD->lock);
|
---|
[09c18f78] | 144 | spinlock_unlock(&CPU->lock);
|
---|
[4e33b6b] | 145 | THREAD->saved_fpu_context =
|
---|
| 146 | slab_alloc(fpu_context_slab, 0);
|
---|
[09c18f78] | 147 | /* We may have switched CPUs during slab_alloc */
|
---|
| 148 | goto restart;
|
---|
[f76fed4] | 149 | }
|
---|
| 150 | fpu_init();
|
---|
[6eabb6e6] | 151 | THREAD->fpu_context_exists = 1;
|
---|
[b49f4ae] | 152 | }
|
---|
[6eabb6e6] | 153 | CPU->fpu_owner = THREAD;
|
---|
[b49f4ae] | 154 | THREAD->fpu_context_engaged = 1;
|
---|
[a3eeceb6] | 155 | spinlock_unlock(&THREAD->lock);
|
---|
[7d6ec87] | 156 |
|
---|
[a3eeceb6] | 157 | spinlock_unlock(&CPU->lock);
|
---|
[b49f4ae] | 158 | }
|
---|
| 159 | #endif
|
---|
[0ca6faa] | 160 |
|
---|
[70527f1] | 161 | /** Initialize scheduler
|
---|
| 162 | *
|
---|
| 163 | * Initialize kernel scheduler.
|
---|
| 164 | *
|
---|
| 165 | */
|
---|
[f761f1eb] | 166 | void scheduler_init(void)
|
---|
| 167 | {
|
---|
| 168 | }
|
---|
| 169 |
|
---|
[70527f1] | 170 | /** Get thread to be scheduled
|
---|
| 171 | *
|
---|
| 172 | * Get the optimal thread to be scheduled
|
---|
[d1a184f] | 173 | * according to thread accounting and scheduler
|
---|
[70527f1] | 174 | * policy.
|
---|
| 175 | *
|
---|
| 176 | * @return Thread to be scheduled.
|
---|
| 177 | *
|
---|
| 178 | */
|
---|
[e507afa] | 179 | static thread_t *find_best_thread(void)
|
---|
[f761f1eb] | 180 | {
|
---|
| 181 | thread_t *t;
|
---|
| 182 | runq_t *r;
|
---|
[248fc1a] | 183 | int i;
|
---|
[f761f1eb] | 184 |
|
---|
[623ba26c] | 185 | ASSERT(CPU != NULL);
|
---|
| 186 |
|
---|
[f761f1eb] | 187 | loop:
|
---|
[22f7769] | 188 | interrupts_enable();
|
---|
[f761f1eb] | 189 |
|
---|
[248fc1a] | 190 | if (atomic_get(&CPU->nrdy) == 0) {
|
---|
[f761f1eb] | 191 | /*
|
---|
| 192 | * For there was nothing to run, the CPU goes to sleep
|
---|
| 193 | * until a hardware interrupt or an IPI comes.
|
---|
| 194 | * This improves energy saving and hyperthreading.
|
---|
| 195 | */
|
---|
[328e0d3] | 196 |
|
---|
| 197 | /*
|
---|
| 198 | * An interrupt might occur right now and wake up a thread.
|
---|
| 199 | * In such case, the CPU will continue to go to sleep
|
---|
| 200 | * even though there is a runnable thread.
|
---|
| 201 | */
|
---|
| 202 |
|
---|
[f761f1eb] | 203 | cpu_sleep();
|
---|
| 204 | goto loop;
|
---|
| 205 | }
|
---|
| 206 |
|
---|
[22f7769] | 207 | interrupts_disable();
|
---|
[d896525] | 208 |
|
---|
[7d6ec87] | 209 | for (i = 0; i<RQ_COUNT; i++) {
|
---|
[43114c5] | 210 | r = &CPU->rq[i];
|
---|
[f761f1eb] | 211 | spinlock_lock(&r->lock);
|
---|
| 212 | if (r->n == 0) {
|
---|
| 213 | /*
|
---|
| 214 | * If this queue is empty, try a lower-priority queue.
|
---|
| 215 | */
|
---|
| 216 | spinlock_unlock(&r->lock);
|
---|
| 217 | continue;
|
---|
| 218 | }
|
---|
[3e1607f] | 219 |
|
---|
[248fc1a] | 220 | atomic_dec(&CPU->nrdy);
|
---|
[59e07c91] | 221 | atomic_dec(&nrdy);
|
---|
[f761f1eb] | 222 | r->n--;
|
---|
| 223 |
|
---|
| 224 | /*
|
---|
| 225 | * Take the first thread from the queue.
|
---|
| 226 | */
|
---|
| 227 | t = list_get_instance(r->rq_head.next, thread_t, rq_link);
|
---|
| 228 | list_remove(&t->rq_link);
|
---|
| 229 |
|
---|
| 230 | spinlock_unlock(&r->lock);
|
---|
| 231 |
|
---|
| 232 | spinlock_lock(&t->lock);
|
---|
[43114c5] | 233 | t->cpu = CPU;
|
---|
[f761f1eb] | 234 |
|
---|
[4e33b6b] | 235 | t->ticks = us2ticks((i + 1) * 10000);
|
---|
[7d6ec87] | 236 | t->priority = i; /* correct rq index */
|
---|
[f761f1eb] | 237 |
|
---|
| 238 | /*
|
---|
[32fffef0] | 239 | * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated
|
---|
| 240 | * when load balancing needs emerge.
|
---|
[f761f1eb] | 241 | */
|
---|
[32fffef0] | 242 | t->flags &= ~THREAD_FLAG_STOLEN;
|
---|
[f761f1eb] | 243 | spinlock_unlock(&t->lock);
|
---|
| 244 |
|
---|
| 245 | return t;
|
---|
| 246 | }
|
---|
| 247 | goto loop;
|
---|
| 248 |
|
---|
| 249 | }
|
---|
| 250 |
|
---|
[70527f1] | 251 | /** Prevent rq starvation
|
---|
| 252 | *
|
---|
| 253 | * Prevent low priority threads from starving in rq's.
|
---|
| 254 | *
|
---|
| 255 | * When the function decides to relink rq's, it reconnects
|
---|
| 256 | * respective pointers so that in result threads with 'pri'
|
---|
[abbc16e] | 257 | * greater or equal start are moved to a higher-priority queue.
|
---|
[70527f1] | 258 | *
|
---|
| 259 | * @param start Threshold priority.
|
---|
| 260 | *
|
---|
[f761f1eb] | 261 | */
|
---|
[e16e036a] | 262 | static void relink_rq(int start)
|
---|
[f761f1eb] | 263 | {
|
---|
| 264 | link_t head;
|
---|
| 265 | runq_t *r;
|
---|
| 266 | int i, n;
|
---|
| 267 |
|
---|
| 268 | list_initialize(&head);
|
---|
[43114c5] | 269 | spinlock_lock(&CPU->lock);
|
---|
| 270 | if (CPU->needs_relink > NEEDS_RELINK_MAX) {
|
---|
[4e33b6b] | 271 | for (i = start; i < RQ_COUNT - 1; i++) {
|
---|
[f761f1eb] | 272 | /* remember and empty rq[i + 1] */
|
---|
[43114c5] | 273 | r = &CPU->rq[i + 1];
|
---|
[f761f1eb] | 274 | spinlock_lock(&r->lock);
|
---|
| 275 | list_concat(&head, &r->rq_head);
|
---|
| 276 | n = r->n;
|
---|
| 277 | r->n = 0;
|
---|
| 278 | spinlock_unlock(&r->lock);
|
---|
| 279 |
|
---|
| 280 | /* append rq[i + 1] to rq[i] */
|
---|
[43114c5] | 281 | r = &CPU->rq[i];
|
---|
[f761f1eb] | 282 | spinlock_lock(&r->lock);
|
---|
| 283 | list_concat(&r->rq_head, &head);
|
---|
| 284 | r->n += n;
|
---|
| 285 | spinlock_unlock(&r->lock);
|
---|
| 286 | }
|
---|
[43114c5] | 287 | CPU->needs_relink = 0;
|
---|
[f761f1eb] | 288 | }
|
---|
[444ec64] | 289 | spinlock_unlock(&CPU->lock);
|
---|
[f761f1eb] | 290 |
|
---|
| 291 | }
|
---|
| 292 |
|
---|
[7d6ec87] | 293 | /** The scheduler
|
---|
| 294 | *
|
---|
| 295 | * The thread scheduling procedure.
|
---|
| 296 | * Passes control directly to
|
---|
| 297 | * scheduler_separated_stack().
|
---|
| 298 | *
|
---|
| 299 | */
|
---|
| 300 | void scheduler(void)
|
---|
| 301 | {
|
---|
| 302 | volatile ipl_t ipl;
|
---|
| 303 |
|
---|
| 304 | ASSERT(CPU != NULL);
|
---|
| 305 |
|
---|
| 306 | ipl = interrupts_disable();
|
---|
| 307 |
|
---|
| 308 | if (atomic_get(&haltstate))
|
---|
| 309 | halt();
|
---|
[8965838e] | 310 |
|
---|
[7d6ec87] | 311 | if (THREAD) {
|
---|
| 312 | spinlock_lock(&THREAD->lock);
|
---|
[cce6acf] | 313 |
|
---|
| 314 | /* Update thread accounting */
|
---|
| 315 | THREAD->cycles += get_cycle() - THREAD->last_cycle;
|
---|
| 316 |
|
---|
[f76fed4] | 317 | #ifndef CONFIG_FPU_LAZY
|
---|
| 318 | fpu_context_save(THREAD->saved_fpu_context);
|
---|
| 319 | #endif
|
---|
[7d6ec87] | 320 | if (!context_save(&THREAD->saved_context)) {
|
---|
| 321 | /*
|
---|
| 322 | * This is the place where threads leave scheduler();
|
---|
| 323 | */
|
---|
[cce6acf] | 324 |
|
---|
| 325 | /* Save current CPU cycle */
|
---|
| 326 | THREAD->last_cycle = get_cycle();
|
---|
| 327 |
|
---|
[7d6ec87] | 328 | spinlock_unlock(&THREAD->lock);
|
---|
| 329 | interrupts_restore(THREAD->saved_context.ipl);
|
---|
[8965838e] | 330 |
|
---|
[7d6ec87] | 331 | return;
|
---|
| 332 | }
|
---|
| 333 |
|
---|
| 334 | /*
|
---|
[4e33b6b] | 335 | * Interrupt priority level of preempted thread is recorded
|
---|
| 336 | * here to facilitate scheduler() invocations from
|
---|
| 337 | * interrupts_disable()'d code (e.g. waitq_sleep_timeout()).
|
---|
[7d6ec87] | 338 | */
|
---|
| 339 | THREAD->saved_context.ipl = ipl;
|
---|
| 340 | }
|
---|
| 341 |
|
---|
| 342 | /*
|
---|
| 343 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
|
---|
| 344 | * and preemption counter. At this point THE could be coming either
|
---|
| 345 | * from THREAD's or CPU's stack.
|
---|
| 346 | */
|
---|
| 347 | the_copy(THE, (the_t *) CPU->stack);
|
---|
| 348 |
|
---|
| 349 | /*
|
---|
| 350 | * We may not keep the old stack.
|
---|
| 351 | * Reason: If we kept the old stack and got blocked, for instance, in
|
---|
| 352 | * find_best_thread(), the old thread could get rescheduled by another
|
---|
| 353 | * CPU and overwrite the part of its own stack that was also used by
|
---|
| 354 | * the scheduler on this CPU.
|
---|
| 355 | *
|
---|
| 356 | * Moreover, we have to bypass the compiler-generated POP sequence
|
---|
| 357 | * which is fooled by SP being set to the very top of the stack.
|
---|
| 358 | * Therefore the scheduler() function continues in
|
---|
| 359 | * scheduler_separated_stack().
|
---|
| 360 | */
|
---|
| 361 | context_save(&CPU->saved_context);
|
---|
[32fffef0] | 362 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack),
|
---|
| 363 | (uintptr_t) CPU->stack, CPU_STACK_SIZE);
|
---|
[7d6ec87] | 364 | context_restore(&CPU->saved_context);
|
---|
| 365 | /* not reached */
|
---|
| 366 | }
|
---|
[70527f1] | 367 |
|
---|
| 368 | /** Scheduler stack switch wrapper
|
---|
| 369 | *
|
---|
| 370 | * Second part of the scheduler() function
|
---|
| 371 | * using new stack. Handling the actual context
|
---|
| 372 | * switch to a new thread.
|
---|
| 373 | *
|
---|
[266294a9] | 374 | * Assume THREAD->lock is held.
|
---|
[70527f1] | 375 | */
|
---|
[7d6ec87] | 376 | void scheduler_separated_stack(void)
|
---|
[f761f1eb] | 377 | {
|
---|
| 378 | int priority;
|
---|
[8965838e] | 379 |
|
---|
[623ba26c] | 380 | ASSERT(CPU != NULL);
|
---|
[8965838e] | 381 |
|
---|
[43114c5] | 382 | if (THREAD) {
|
---|
[7d6ec87] | 383 | /* must be run after the switch to scheduler stack */
|
---|
[97f1691] | 384 | after_thread_ran();
|
---|
| 385 |
|
---|
[43114c5] | 386 | switch (THREAD->state) {
|
---|
[06e1e95] | 387 | case Running:
|
---|
[76cec1e] | 388 | spinlock_unlock(&THREAD->lock);
|
---|
| 389 | thread_ready(THREAD);
|
---|
| 390 | break;
|
---|
[f761f1eb] | 391 |
|
---|
[06e1e95] | 392 | case Exiting:
|
---|
[fe19611] | 393 | repeat:
|
---|
[def5207] | 394 | if (THREAD->detached) {
|
---|
[fe19611] | 395 | thread_destroy(THREAD);
|
---|
| 396 | } else {
|
---|
| 397 | /*
|
---|
[4e33b6b] | 398 | * The thread structure is kept allocated until
|
---|
| 399 | * somebody calls thread_detach() on it.
|
---|
[fe19611] | 400 | */
|
---|
| 401 | if (!spinlock_trylock(&THREAD->join_wq.lock)) {
|
---|
| 402 | /*
|
---|
| 403 | * Avoid deadlock.
|
---|
| 404 | */
|
---|
| 405 | spinlock_unlock(&THREAD->lock);
|
---|
| 406 | delay(10);
|
---|
| 407 | spinlock_lock(&THREAD->lock);
|
---|
| 408 | goto repeat;
|
---|
| 409 | }
|
---|
| 410 | _waitq_wakeup_unsafe(&THREAD->join_wq, false);
|
---|
| 411 | spinlock_unlock(&THREAD->join_wq.lock);
|
---|
| 412 |
|
---|
| 413 | THREAD->state = Undead;
|
---|
| 414 | spinlock_unlock(&THREAD->lock);
|
---|
| 415 | }
|
---|
[76cec1e] | 416 | break;
|
---|
[266294a9] | 417 |
|
---|
[06e1e95] | 418 | case Sleeping:
|
---|
[76cec1e] | 419 | /*
|
---|
| 420 | * Prefer the thread after it's woken up.
|
---|
| 421 | */
|
---|
[22f7769] | 422 | THREAD->priority = -1;
|
---|
[76cec1e] | 423 |
|
---|
| 424 | /*
|
---|
[4e33b6b] | 425 | * We need to release wq->lock which we locked in
|
---|
| 426 | * waitq_sleep(). Address of wq->lock is kept in
|
---|
| 427 | * THREAD->sleep_queue.
|
---|
[76cec1e] | 428 | */
|
---|
| 429 | spinlock_unlock(&THREAD->sleep_queue->lock);
|
---|
| 430 |
|
---|
| 431 | /*
|
---|
[4e33b6b] | 432 | * Check for possible requests for out-of-context
|
---|
| 433 | * invocation.
|
---|
[76cec1e] | 434 | */
|
---|
| 435 | if (THREAD->call_me) {
|
---|
| 436 | THREAD->call_me(THREAD->call_me_with);
|
---|
| 437 | THREAD->call_me = NULL;
|
---|
| 438 | THREAD->call_me_with = NULL;
|
---|
| 439 | }
|
---|
| 440 |
|
---|
| 441 | spinlock_unlock(&THREAD->lock);
|
---|
| 442 |
|
---|
| 443 | break;
|
---|
[f761f1eb] | 444 |
|
---|
[06e1e95] | 445 | default:
|
---|
[76cec1e] | 446 | /*
|
---|
| 447 | * Entering state is unexpected.
|
---|
| 448 | */
|
---|
[4e33b6b] | 449 | panic("tid%d: unexpected state %s\n", THREAD->tid,
|
---|
| 450 | thread_states[THREAD->state]);
|
---|
[76cec1e] | 451 | break;
|
---|
[f761f1eb] | 452 | }
|
---|
[97f1691] | 453 |
|
---|
[43114c5] | 454 | THREAD = NULL;
|
---|
[f761f1eb] | 455 | }
|
---|
[ba18512] | 456 |
|
---|
[43114c5] | 457 | THREAD = find_best_thread();
|
---|
[f761f1eb] | 458 |
|
---|
[43114c5] | 459 | spinlock_lock(&THREAD->lock);
|
---|
[22f7769] | 460 | priority = THREAD->priority;
|
---|
[43114c5] | 461 | spinlock_unlock(&THREAD->lock);
|
---|
[7ce9284] | 462 |
|
---|
[f761f1eb] | 463 | relink_rq(priority);
|
---|
| 464 |
|
---|
| 465 | /*
|
---|
[4e33b6b] | 466 | * If both the old and the new task are the same, lots of work is
|
---|
| 467 | * avoided.
|
---|
[f761f1eb] | 468 | */
|
---|
[43114c5] | 469 | if (TASK != THREAD->task) {
|
---|
[20d50a1] | 470 | as_t *as1 = NULL;
|
---|
| 471 | as_t *as2;
|
---|
[f761f1eb] | 472 |
|
---|
[43114c5] | 473 | if (TASK) {
|
---|
| 474 | spinlock_lock(&TASK->lock);
|
---|
[20d50a1] | 475 | as1 = TASK->as;
|
---|
[43114c5] | 476 | spinlock_unlock(&TASK->lock);
|
---|
[f761f1eb] | 477 | }
|
---|
| 478 |
|
---|
[43114c5] | 479 | spinlock_lock(&THREAD->task->lock);
|
---|
[20d50a1] | 480 | as2 = THREAD->task->as;
|
---|
[43114c5] | 481 | spinlock_unlock(&THREAD->task->lock);
|
---|
[f761f1eb] | 482 |
|
---|
| 483 | /*
|
---|
[4e33b6b] | 484 | * Note that it is possible for two tasks to share one address
|
---|
| 485 | * space.
|
---|
[f761f1eb] | 486 | */
|
---|
[20d50a1] | 487 | if (as1 != as2) {
|
---|
[f761f1eb] | 488 | /*
|
---|
[20d50a1] | 489 | * Both tasks and address spaces are different.
|
---|
[f761f1eb] | 490 | * Replace the old one with the new one.
|
---|
| 491 | */
|
---|
[7e4e532] | 492 | as_switch(as1, as2);
|
---|
[f761f1eb] | 493 | }
|
---|
[f76fed4] | 494 | TASK = THREAD->task;
|
---|
[39cea6a] | 495 | before_task_runs();
|
---|
[f761f1eb] | 496 | }
|
---|
| 497 |
|
---|
[1068f6a] | 498 | spinlock_lock(&THREAD->lock);
|
---|
[43114c5] | 499 | THREAD->state = Running;
|
---|
[f761f1eb] | 500 |
|
---|
[f76fed4] | 501 | #ifdef SCHEDULER_VERBOSE
|
---|
[4e33b6b] | 502 | printf("cpu%d: tid %d (priority=%d, ticks=%lld, nrdy=%ld)\n",
|
---|
| 503 | CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks,
|
---|
[d78d603] | 504 | atomic_get(&CPU->nrdy));
|
---|
[f76fed4] | 505 | #endif
|
---|
[f761f1eb] | 506 |
|
---|
[97f1691] | 507 | /*
|
---|
| 508 | * Some architectures provide late kernel PA2KA(identity)
|
---|
| 509 | * mapping in a page fault handler. However, the page fault
|
---|
| 510 | * handler uses the kernel stack of the running thread and
|
---|
| 511 | * therefore cannot be used to map it. The kernel stack, if
|
---|
| 512 | * necessary, is to be mapped in before_thread_runs(). This
|
---|
| 513 | * function must be executed before the switch to the new stack.
|
---|
| 514 | */
|
---|
| 515 | before_thread_runs();
|
---|
| 516 |
|
---|
[3e1607f] | 517 | /*
|
---|
[4e33b6b] | 518 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
|
---|
| 519 | * thread's stack.
|
---|
[3e1607f] | 520 | */
|
---|
[bcdd9aa] | 521 | the_copy(THE, (the_t *) THREAD->kstack);
|
---|
| 522 |
|
---|
[43114c5] | 523 | context_restore(&THREAD->saved_context);
|
---|
[f761f1eb] | 524 | /* not reached */
|
---|
| 525 | }
|
---|
| 526 |
|
---|
[5f85c91] | 527 | #ifdef CONFIG_SMP
|
---|
[70527f1] | 528 | /** Load balancing thread
|
---|
| 529 | *
|
---|
| 530 | * SMP load balancing thread, supervising thread supplies
|
---|
| 531 | * for the CPU it's wired to.
|
---|
| 532 | *
|
---|
| 533 | * @param arg Generic thread argument (unused).
|
---|
| 534 | *
|
---|
[f761f1eb] | 535 | */
|
---|
| 536 | void kcpulb(void *arg)
|
---|
| 537 | {
|
---|
| 538 | thread_t *t;
|
---|
[248fc1a] | 539 | int count, average, i, j, k = 0;
|
---|
[22f7769] | 540 | ipl_t ipl;
|
---|
[f761f1eb] | 541 |
|
---|
[2cb5e64] | 542 | /*
|
---|
| 543 | * Detach kcpulb as nobody will call thread_join_timeout() on it.
|
---|
| 544 | */
|
---|
| 545 | thread_detach(THREAD);
|
---|
| 546 |
|
---|
[f761f1eb] | 547 | loop:
|
---|
| 548 | /*
|
---|
[3260ada] | 549 | * Work in 1s intervals.
|
---|
[f761f1eb] | 550 | */
|
---|
[3260ada] | 551 | thread_sleep(1);
|
---|
[f761f1eb] | 552 |
|
---|
| 553 | not_satisfied:
|
---|
| 554 | /*
|
---|
| 555 | * Calculate the number of threads that will be migrated/stolen from
|
---|
| 556 | * other CPU's. Note that situation can have changed between two
|
---|
| 557 | * passes. Each time get the most up to date counts.
|
---|
| 558 | */
|
---|
[444ec64] | 559 | average = atomic_get(&nrdy) / config.cpu_active + 1;
|
---|
[248fc1a] | 560 | count = average - atomic_get(&CPU->nrdy);
|
---|
[f761f1eb] | 561 |
|
---|
[444ec64] | 562 | if (count <= 0)
|
---|
[f761f1eb] | 563 | goto satisfied;
|
---|
| 564 |
|
---|
| 565 | /*
|
---|
[4e33b6b] | 566 | * Searching least priority queues on all CPU's first and most priority
|
---|
| 567 | * queues on all CPU's last.
|
---|
[f761f1eb] | 568 | */
|
---|
[4e33b6b] | 569 | for (j= RQ_COUNT - 1; j >= 0; j--) {
|
---|
| 570 | for (i = 0; i < config.cpu_active; i++) {
|
---|
[f761f1eb] | 571 | link_t *l;
|
---|
| 572 | runq_t *r;
|
---|
| 573 | cpu_t *cpu;
|
---|
| 574 |
|
---|
| 575 | cpu = &cpus[(i + k) % config.cpu_active];
|
---|
| 576 |
|
---|
| 577 | /*
|
---|
| 578 | * Not interested in ourselves.
|
---|
[4e33b6b] | 579 | * Doesn't require interrupt disabling for kcpulb has
|
---|
| 580 | * THREAD_FLAG_WIRED.
|
---|
[f761f1eb] | 581 | */
|
---|
[43114c5] | 582 | if (CPU == cpu)
|
---|
[248fc1a] | 583 | continue;
|
---|
| 584 | if (atomic_get(&cpu->nrdy) <= average)
|
---|
| 585 | continue;
|
---|
[f761f1eb] | 586 |
|
---|
[444ec64] | 587 | ipl = interrupts_disable();
|
---|
[18e0a6c] | 588 | r = &cpu->rq[j];
|
---|
[f761f1eb] | 589 | spinlock_lock(&r->lock);
|
---|
| 590 | if (r->n == 0) {
|
---|
| 591 | spinlock_unlock(&r->lock);
|
---|
[22f7769] | 592 | interrupts_restore(ipl);
|
---|
[f761f1eb] | 593 | continue;
|
---|
| 594 | }
|
---|
| 595 |
|
---|
| 596 | t = NULL;
|
---|
| 597 | l = r->rq_head.prev; /* search rq from the back */
|
---|
| 598 | while (l != &r->rq_head) {
|
---|
| 599 | t = list_get_instance(l, thread_t, rq_link);
|
---|
| 600 | /*
|
---|
[4e33b6b] | 601 | * We don't want to steal CPU-wired threads
|
---|
| 602 | * neither threads already stolen. The latter
|
---|
| 603 | * prevents threads from migrating between CPU's
|
---|
| 604 | * without ever being run. We don't want to
|
---|
| 605 | * steal threads whose FPU context is still in
|
---|
| 606 | * CPU.
|
---|
[6a27d63] | 607 | */
|
---|
[f761f1eb] | 608 | spinlock_lock(&t->lock);
|
---|
[4e33b6b] | 609 | if ((!(t->flags & (THREAD_FLAG_WIRED |
|
---|
| 610 | THREAD_FLAG_STOLEN))) &&
|
---|
[32fffef0] | 611 | (!(t->fpu_context_engaged)) ) {
|
---|
[f761f1eb] | 612 | /*
|
---|
| 613 | * Remove t from r.
|
---|
| 614 | */
|
---|
| 615 | spinlock_unlock(&t->lock);
|
---|
| 616 |
|
---|
[248fc1a] | 617 | atomic_dec(&cpu->nrdy);
|
---|
[59e07c91] | 618 | atomic_dec(&nrdy);
|
---|
[f761f1eb] | 619 |
|
---|
[76cec1e] | 620 | r->n--;
|
---|
[f761f1eb] | 621 | list_remove(&t->rq_link);
|
---|
| 622 |
|
---|
| 623 | break;
|
---|
| 624 | }
|
---|
| 625 | spinlock_unlock(&t->lock);
|
---|
| 626 | l = l->prev;
|
---|
| 627 | t = NULL;
|
---|
| 628 | }
|
---|
| 629 | spinlock_unlock(&r->lock);
|
---|
| 630 |
|
---|
| 631 | if (t) {
|
---|
| 632 | /*
|
---|
| 633 | * Ready t on local CPU
|
---|
| 634 | */
|
---|
| 635 | spinlock_lock(&t->lock);
|
---|
[f76fed4] | 636 | #ifdef KCPULB_VERBOSE
|
---|
[4e33b6b] | 637 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, "
|
---|
| 638 | "avg=%nd\n", CPU->id, t->tid, CPU->id,
|
---|
| 639 | atomic_get(&CPU->nrdy),
|
---|
[32fffef0] | 640 | atomic_get(&nrdy) / config.cpu_active);
|
---|
[f76fed4] | 641 | #endif
|
---|
[32fffef0] | 642 | t->flags |= THREAD_FLAG_STOLEN;
|
---|
[a0bb10ef] | 643 | t->state = Entering;
|
---|
[f761f1eb] | 644 | spinlock_unlock(&t->lock);
|
---|
| 645 |
|
---|
| 646 | thread_ready(t);
|
---|
| 647 |
|
---|
[22f7769] | 648 | interrupts_restore(ipl);
|
---|
[f761f1eb] | 649 |
|
---|
| 650 | if (--count == 0)
|
---|
| 651 | goto satisfied;
|
---|
| 652 |
|
---|
| 653 | /*
|
---|
[4e33b6b] | 654 | * We are not satisfied yet, focus on another
|
---|
| 655 | * CPU next time.
|
---|
[f761f1eb] | 656 | */
|
---|
| 657 | k++;
|
---|
| 658 |
|
---|
| 659 | continue;
|
---|
| 660 | }
|
---|
[22f7769] | 661 | interrupts_restore(ipl);
|
---|
[f761f1eb] | 662 | }
|
---|
| 663 | }
|
---|
| 664 |
|
---|
[248fc1a] | 665 | if (atomic_get(&CPU->nrdy)) {
|
---|
[f761f1eb] | 666 | /*
|
---|
| 667 | * Be a little bit light-weight and let migrated threads run.
|
---|
| 668 | */
|
---|
| 669 | scheduler();
|
---|
[3260ada] | 670 | } else {
|
---|
[f761f1eb] | 671 | /*
|
---|
| 672 | * We failed to migrate a single thread.
|
---|
[3260ada] | 673 | * Give up this turn.
|
---|
[f761f1eb] | 674 | */
|
---|
[3260ada] | 675 | goto loop;
|
---|
[f761f1eb] | 676 | }
|
---|
| 677 |
|
---|
| 678 | goto not_satisfied;
|
---|
[76cec1e] | 679 |
|
---|
[f761f1eb] | 680 | satisfied:
|
---|
| 681 | goto loop;
|
---|
| 682 | }
|
---|
| 683 |
|
---|
[5f85c91] | 684 | #endif /* CONFIG_SMP */
|
---|
[10e16a7] | 685 |
|
---|
| 686 |
|
---|
| 687 | /** Print information about threads & scheduler queues */
|
---|
| 688 | void sched_print_list(void)
|
---|
| 689 | {
|
---|
| 690 | ipl_t ipl;
|
---|
| 691 | int cpu,i;
|
---|
| 692 | runq_t *r;
|
---|
| 693 | thread_t *t;
|
---|
| 694 | link_t *cur;
|
---|
| 695 |
|
---|
| 696 | /* We are going to mess with scheduler structures,
|
---|
| 697 | * let's not be interrupted */
|
---|
| 698 | ipl = interrupts_disable();
|
---|
| 699 | for (cpu=0;cpu < config.cpu_count; cpu++) {
|
---|
[7d6ec87] | 700 |
|
---|
[10e16a7] | 701 | if (!cpus[cpu].active)
|
---|
| 702 | continue;
|
---|
[7d6ec87] | 703 |
|
---|
[10e16a7] | 704 | spinlock_lock(&cpus[cpu].lock);
|
---|
[cf85e24c] | 705 | printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n",
|
---|
[4e33b6b] | 706 | cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy),
|
---|
| 707 | cpus[cpu].needs_relink);
|
---|
[10e16a7] | 708 |
|
---|
[4e33b6b] | 709 | for (i = 0; i < RQ_COUNT; i++) {
|
---|
[10e16a7] | 710 | r = &cpus[cpu].rq[i];
|
---|
| 711 | spinlock_lock(&r->lock);
|
---|
| 712 | if (!r->n) {
|
---|
| 713 | spinlock_unlock(&r->lock);
|
---|
| 714 | continue;
|
---|
| 715 | }
|
---|
[7d6ec87] | 716 | printf("\trq[%d]: ", i);
|
---|
[4e33b6b] | 717 | for (cur = r->rq_head.next; cur != &r->rq_head;
|
---|
| 718 | cur = cur->next) {
|
---|
[10e16a7] | 719 | t = list_get_instance(cur, thread_t, rq_link);
|
---|
| 720 | printf("%d(%s) ", t->tid,
|
---|
[4e33b6b] | 721 | thread_states[t->state]);
|
---|
[10e16a7] | 722 | }
|
---|
| 723 | printf("\n");
|
---|
| 724 | spinlock_unlock(&r->lock);
|
---|
| 725 | }
|
---|
| 726 | spinlock_unlock(&cpus[cpu].lock);
|
---|
| 727 | }
|
---|
| 728 |
|
---|
| 729 | interrupts_restore(ipl);
|
---|
| 730 | }
|
---|
[b45c443] | 731 |
|
---|
[cc73a8a1] | 732 | /** @}
|
---|
[b45c443] | 733 | */
|
---|