source: mainline/kernel/generic/src/proc/scheduler.c@ b2fa1204

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since b2fa1204 was b2fa1204, checked in by Martin Sucha <sucha14@…>, 11 years ago

Cherrypick usage of kernel logger

  • Property mode set to 100644
File size: 17.5 KB
RevLine 
[f761f1eb]1/*
[481d4751]2 * Copyright (c) 2010 Jakub Jermar
[f761f1eb]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[cc73a8a1]29/** @addtogroup genericproc
[b45c443]30 * @{
31 */
32
[9179d0a]33/**
[b45c443]34 * @file
[da1bafb]35 * @brief Scheduler and load balancing.
[9179d0a]36 *
[cf26ba9]37 * This file contains the scheduler and kcpulb kernel thread which
[9179d0a]38 * performs load-balancing of per-CPU run queues.
39 */
40
[f761f1eb]41#include <proc/scheduler.h>
42#include <proc/thread.h>
43#include <proc/task.h>
[32ff43e6]44#include <mm/frame.h>
45#include <mm/page.h>
[20d50a1]46#include <mm/as.h>
[b3f8fb7]47#include <time/timeout.h>
[fe19611]48#include <time/delay.h>
[32ff43e6]49#include <arch/asm.h>
50#include <arch/faddr.h>
[cce6acf]51#include <arch/cycle.h>
[23684b7]52#include <atomic.h>
[32ff43e6]53#include <synch/spinlock.h>
[f761f1eb]54#include <config.h>
55#include <context.h>
[b3f8fb7]56#include <fpu_context.h>
[f761f1eb]57#include <func.h>
58#include <arch.h>
[5c9a08b]59#include <adt/list.h>
[02a99d2]60#include <panic.h>
[32ff43e6]61#include <cpu.h>
[9c0a9b3]62#include <print.h>
[b2fa1204]63#include <log.h>
[623ba26c]64#include <debug.h>
[df58e44]65#include <stacktrace.h>
[9c0a9b3]66
[7d6ec87]67static void scheduler_separated_stack(void);
68
[da1bafb]69atomic_t nrdy; /**< Number of ready threads in the system. */
[f761f1eb]70
[39cea6a]71/** Carry out actions before new task runs. */
[4e7d3dd]72static void before_task_runs(void)
[39cea6a]73{
74 before_task_runs_arch();
75}
76
[97f1691]77/** Take actions before new thread runs.
[70527f1]78 *
[b60a22c]79 * Perform actions that need to be
80 * taken before the newly selected
[df58e44]81 * thread is passed control.
[70527f1]82 *
[a3eeceb6]83 * THREAD->lock is locked on entry
84 *
[70527f1]85 */
[4e7d3dd]86static void before_thread_runs(void)
[0ca6faa]87{
[b49f4ae]88 before_thread_runs_arch();
[4e7d3dd]89
[f76fed4]90#ifdef CONFIG_FPU_LAZY
[df58e44]91 if (THREAD == CPU->fpu_owner)
[b49f4ae]92 fpu_enable();
93 else
[da1bafb]94 fpu_disable();
[e1326cf]95#elif defined CONFIG_FPU
[b49f4ae]96 fpu_enable();
97 if (THREAD->fpu_context_exists)
[f76fed4]98 fpu_context_restore(THREAD->saved_fpu_context);
[b49f4ae]99 else {
[f76fed4]100 fpu_init();
[6eef3c4]101 THREAD->fpu_context_exists = true;
[b49f4ae]102 }
[f76fed4]103#endif
[df58e44]104
[5b7a107]105#ifdef CONFIG_UDEBUG
[df58e44]106 if (THREAD->btrace) {
107 istate_t *istate = THREAD->udebug.uspace_state;
108 if (istate != NULL) {
109 printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid);
110 stack_trace_istate(istate);
111 }
112
113 THREAD->btrace = false;
114 }
[5b7a107]115#endif
[0ca6faa]116}
117
[7d6ec87]118/** Take actions after THREAD had run.
[97f1691]119 *
120 * Perform actions that need to be
121 * taken after the running thread
[7d6ec87]122 * had been preempted by the scheduler.
[97f1691]123 *
124 * THREAD->lock is locked on entry
125 *
126 */
[4e7d3dd]127static void after_thread_ran(void)
[97f1691]128{
129 after_thread_ran_arch();
130}
131
[5f85c91]132#ifdef CONFIG_FPU_LAZY
[b49f4ae]133void scheduler_fpu_lazy_request(void)
134{
[09c18f78]135restart:
[b49f4ae]136 fpu_enable();
[da1bafb]137 irq_spinlock_lock(&CPU->lock, false);
138
[a3eeceb6]139 /* Save old context */
[da1bafb]140 if (CPU->fpu_owner != NULL) {
141 irq_spinlock_lock(&CPU->fpu_owner->lock, false);
[f76fed4]142 fpu_context_save(CPU->fpu_owner->saved_fpu_context);
[da1bafb]143
144 /* Don't prevent migration */
[6eef3c4]145 CPU->fpu_owner->fpu_context_engaged = false;
[da1bafb]146 irq_spinlock_unlock(&CPU->fpu_owner->lock, false);
[09c18f78]147 CPU->fpu_owner = NULL;
[b49f4ae]148 }
[da1bafb]149
150 irq_spinlock_lock(&THREAD->lock, false);
[7d6ec87]151 if (THREAD->fpu_context_exists) {
[f76fed4]152 fpu_context_restore(THREAD->saved_fpu_context);
[7d6ec87]153 } else {
[f76fed4]154 /* Allocate FPU context */
155 if (!THREAD->saved_fpu_context) {
156 /* Might sleep */
[da1bafb]157 irq_spinlock_unlock(&THREAD->lock, false);
158 irq_spinlock_unlock(&CPU->lock, false);
[4e33b6b]159 THREAD->saved_fpu_context =
[4184e76]160 (fpu_context_t *) slab_alloc(fpu_context_slab, 0);
[da1bafb]161
[09c18f78]162 /* We may have switched CPUs during slab_alloc */
[da1bafb]163 goto restart;
[f76fed4]164 }
165 fpu_init();
[6eef3c4]166 THREAD->fpu_context_exists = true;
[b49f4ae]167 }
[da1bafb]168
[6eabb6e6]169 CPU->fpu_owner = THREAD;
[6eef3c4]170 THREAD->fpu_context_engaged = true;
[da1bafb]171 irq_spinlock_unlock(&THREAD->lock, false);
172
173 irq_spinlock_unlock(&CPU->lock, false);
[b49f4ae]174}
[da1bafb]175#endif /* CONFIG_FPU_LAZY */
[0ca6faa]176
[70527f1]177/** Initialize scheduler
178 *
179 * Initialize kernel scheduler.
180 *
181 */
[f761f1eb]182void scheduler_init(void)
183{
184}
185
[70527f1]186/** Get thread to be scheduled
187 *
188 * Get the optimal thread to be scheduled
[d1a184f]189 * according to thread accounting and scheduler
[70527f1]190 * policy.
191 *
192 * @return Thread to be scheduled.
193 *
194 */
[e507afa]195static thread_t *find_best_thread(void)
[f761f1eb]196{
[623ba26c]197 ASSERT(CPU != NULL);
[da1bafb]198
[f761f1eb]199loop:
200
[248fc1a]201 if (atomic_get(&CPU->nrdy) == 0) {
[f761f1eb]202 /*
203 * For there was nothing to run, the CPU goes to sleep
204 * until a hardware interrupt or an IPI comes.
205 * This improves energy saving and hyperthreading.
206 */
[da1bafb]207 irq_spinlock_lock(&CPU->lock, false);
208 CPU->idle = true;
209 irq_spinlock_unlock(&CPU->lock, false);
210 interrupts_enable();
[d0c82c5]211
[da1bafb]212 /*
[328e0d3]213 * An interrupt might occur right now and wake up a thread.
214 * In such case, the CPU will continue to go to sleep
215 * even though there is a runnable thread.
216 */
[da1bafb]217 cpu_sleep();
218 interrupts_disable();
219 goto loop;
[f761f1eb]220 }
[d896525]221
[da1bafb]222 unsigned int i;
[ea63704]223 for (i = 0; i < RQ_COUNT; i++) {
[da1bafb]224 irq_spinlock_lock(&(CPU->rq[i].lock), false);
225 if (CPU->rq[i].n == 0) {
[f761f1eb]226 /*
227 * If this queue is empty, try a lower-priority queue.
228 */
[da1bafb]229 irq_spinlock_unlock(&(CPU->rq[i].lock), false);
[f761f1eb]230 continue;
231 }
[da1bafb]232
[248fc1a]233 atomic_dec(&CPU->nrdy);
[59e07c91]234 atomic_dec(&nrdy);
[da1bafb]235 CPU->rq[i].n--;
236
[f761f1eb]237 /*
238 * Take the first thread from the queue.
239 */
[55b77d9]240 thread_t *thread = list_get_instance(
241 list_first(&CPU->rq[i].rq), thread_t, rq_link);
[da1bafb]242 list_remove(&thread->rq_link);
243
244 irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock);
245
246 thread->cpu = CPU;
247 thread->ticks = us2ticks((i + 1) * 10000);
248 thread->priority = i; /* Correct rq index */
249
[f761f1eb]250 /*
[6eef3c4]251 * Clear the stolen flag so that it can be migrated
[32fffef0]252 * when load balancing needs emerge.
[f761f1eb]253 */
[6eef3c4]254 thread->stolen = false;
[da1bafb]255 irq_spinlock_unlock(&thread->lock, false);
256
257 return thread;
[f761f1eb]258 }
[da1bafb]259
[f761f1eb]260 goto loop;
261}
262
[70527f1]263/** Prevent rq starvation
264 *
265 * Prevent low priority threads from starving in rq's.
266 *
267 * When the function decides to relink rq's, it reconnects
268 * respective pointers so that in result threads with 'pri'
[abbc16e]269 * greater or equal start are moved to a higher-priority queue.
[70527f1]270 *
271 * @param start Threshold priority.
272 *
[f761f1eb]273 */
[e16e036a]274static void relink_rq(int start)
[f761f1eb]275{
[55b77d9]276 list_t list;
[da1bafb]277
[55b77d9]278 list_initialize(&list);
[da1bafb]279 irq_spinlock_lock(&CPU->lock, false);
280
[43114c5]281 if (CPU->needs_relink > NEEDS_RELINK_MAX) {
[da1bafb]282 int i;
[4e33b6b]283 for (i = start; i < RQ_COUNT - 1; i++) {
[da1bafb]284 /* Remember and empty rq[i + 1] */
285
286 irq_spinlock_lock(&CPU->rq[i + 1].lock, false);
[55b77d9]287 list_concat(&list, &CPU->rq[i + 1].rq);
[da1bafb]288 size_t n = CPU->rq[i + 1].n;
289 CPU->rq[i + 1].n = 0;
290 irq_spinlock_unlock(&CPU->rq[i + 1].lock, false);
291
292 /* Append rq[i + 1] to rq[i] */
293
294 irq_spinlock_lock(&CPU->rq[i].lock, false);
[55b77d9]295 list_concat(&CPU->rq[i].rq, &list);
[da1bafb]296 CPU->rq[i].n += n;
297 irq_spinlock_unlock(&CPU->rq[i].lock, false);
[f761f1eb]298 }
[da1bafb]299
[43114c5]300 CPU->needs_relink = 0;
[f761f1eb]301 }
[da1bafb]302
303 irq_spinlock_unlock(&CPU->lock, false);
[f761f1eb]304}
305
[7d6ec87]306/** The scheduler
307 *
308 * The thread scheduling procedure.
309 * Passes control directly to
310 * scheduler_separated_stack().
311 *
312 */
313void scheduler(void)
314{
315 volatile ipl_t ipl;
[da1bafb]316
[7d6ec87]317 ASSERT(CPU != NULL);
[da1bafb]318
[7d6ec87]319 ipl = interrupts_disable();
[da1bafb]320
[7d6ec87]321 if (atomic_get(&haltstate))
322 halt();
[8965838e]323
[7d6ec87]324 if (THREAD) {
[da1bafb]325 irq_spinlock_lock(&THREAD->lock, false);
[cce6acf]326
[1ba37fa]327 /* Update thread kernel accounting */
[a2a00e8]328 THREAD->kcycles += get_cycle() - THREAD->last_cycle;
[cce6acf]329
[e1326cf]330#if (defined CONFIG_FPU) && (!defined CONFIG_FPU_LAZY)
[f76fed4]331 fpu_context_save(THREAD->saved_fpu_context);
332#endif
[7d6ec87]333 if (!context_save(&THREAD->saved_context)) {
334 /*
335 * This is the place where threads leave scheduler();
336 */
[cce6acf]337
338 /* Save current CPU cycle */
339 THREAD->last_cycle = get_cycle();
340
[da1bafb]341 irq_spinlock_unlock(&THREAD->lock, false);
[7d6ec87]342 interrupts_restore(THREAD->saved_context.ipl);
[8965838e]343
[7d6ec87]344 return;
345 }
[da1bafb]346
[7d6ec87]347 /*
[4e33b6b]348 * Interrupt priority level of preempted thread is recorded
349 * here to facilitate scheduler() invocations from
[da1bafb]350 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()).
351 *
[7d6ec87]352 */
353 THREAD->saved_context.ipl = ipl;
354 }
[da1bafb]355
[7d6ec87]356 /*
[b4dc35a]357 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, AS
[7d6ec87]358 * and preemption counter. At this point THE could be coming either
359 * from THREAD's or CPU's stack.
[da1bafb]360 *
[7d6ec87]361 */
362 the_copy(THE, (the_t *) CPU->stack);
[da1bafb]363
[7d6ec87]364 /*
365 * We may not keep the old stack.
366 * Reason: If we kept the old stack and got blocked, for instance, in
367 * find_best_thread(), the old thread could get rescheduled by another
368 * CPU and overwrite the part of its own stack that was also used by
369 * the scheduler on this CPU.
370 *
371 * Moreover, we have to bypass the compiler-generated POP sequence
372 * which is fooled by SP being set to the very top of the stack.
373 * Therefore the scheduler() function continues in
374 * scheduler_separated_stack().
[da1bafb]375 *
[7d6ec87]376 */
377 context_save(&CPU->saved_context);
[32fffef0]378 context_set(&CPU->saved_context, FADDR(scheduler_separated_stack),
[26aafe8]379 (uintptr_t) CPU->stack, STACK_SIZE);
[7d6ec87]380 context_restore(&CPU->saved_context);
[da1bafb]381
382 /* Not reached */
[7d6ec87]383}
[70527f1]384
385/** Scheduler stack switch wrapper
386 *
387 * Second part of the scheduler() function
388 * using new stack. Handling the actual context
389 * switch to a new thread.
390 *
391 */
[7d6ec87]392void scheduler_separated_stack(void)
[f761f1eb]393{
[31d8e10]394 DEADLOCK_PROBE_INIT(p_joinwq);
[481d4751]395 task_t *old_task = TASK;
396 as_t *old_as = AS;
[da1bafb]397
[d0c82c5]398 ASSERT((!THREAD) || (irq_spinlock_locked(&THREAD->lock)));
[623ba26c]399 ASSERT(CPU != NULL);
[8965838e]400
[481d4751]401 /*
402 * Hold the current task and the address space to prevent their
403 * possible destruction should thread_destroy() be called on this or any
404 * other processor while the scheduler is still using them.
405 */
406 if (old_task)
407 task_hold(old_task);
[da1bafb]408
[481d4751]409 if (old_as)
410 as_hold(old_as);
[da1bafb]411
[43114c5]412 if (THREAD) {
[da1bafb]413 /* Must be run after the switch to scheduler stack */
[97f1691]414 after_thread_ran();
[da1bafb]415
[43114c5]416 switch (THREAD->state) {
[06e1e95]417 case Running:
[da1bafb]418 irq_spinlock_unlock(&THREAD->lock, false);
[76cec1e]419 thread_ready(THREAD);
420 break;
[da1bafb]421
[06e1e95]422 case Exiting:
[fe19611]423repeat:
[def5207]424 if (THREAD->detached) {
[da1bafb]425 thread_destroy(THREAD, false);
[fe19611]426 } else {
427 /*
[4e33b6b]428 * The thread structure is kept allocated until
429 * somebody calls thread_detach() on it.
[fe19611]430 */
[da1bafb]431 if (!irq_spinlock_trylock(&THREAD->join_wq.lock)) {
[fe19611]432 /*
433 * Avoid deadlock.
434 */
[da1bafb]435 irq_spinlock_unlock(&THREAD->lock, false);
[ea7890e7]436 delay(HZ);
[da1bafb]437 irq_spinlock_lock(&THREAD->lock, false);
[31d8e10]438 DEADLOCK_PROBE(p_joinwq,
439 DEADLOCK_THRESHOLD);
[fe19611]440 goto repeat;
441 }
[5c8ba05]442 _waitq_wakeup_unsafe(&THREAD->join_wq,
443 WAKEUP_FIRST);
[da1bafb]444 irq_spinlock_unlock(&THREAD->join_wq.lock, false);
[fe19611]445
[48d14222]446 THREAD->state = Lingering;
[da1bafb]447 irq_spinlock_unlock(&THREAD->lock, false);
[fe19611]448 }
[76cec1e]449 break;
[266294a9]450
[06e1e95]451 case Sleeping:
[76cec1e]452 /*
453 * Prefer the thread after it's woken up.
454 */
[22f7769]455 THREAD->priority = -1;
[da1bafb]456
[76cec1e]457 /*
[4e33b6b]458 * We need to release wq->lock which we locked in
459 * waitq_sleep(). Address of wq->lock is kept in
460 * THREAD->sleep_queue.
[76cec1e]461 */
[da1bafb]462 irq_spinlock_unlock(&THREAD->sleep_queue->lock, false);
463
464 irq_spinlock_unlock(&THREAD->lock, false);
[76cec1e]465 break;
[da1bafb]466
[06e1e95]467 default:
[76cec1e]468 /*
469 * Entering state is unexpected.
470 */
[f651e80]471 panic("tid%" PRIu64 ": unexpected state %s.",
[1e9d0e3]472 THREAD->tid, thread_states[THREAD->state]);
[76cec1e]473 break;
[f761f1eb]474 }
[da1bafb]475
[43114c5]476 THREAD = NULL;
[f761f1eb]477 }
[da1bafb]478
[43114c5]479 THREAD = find_best_thread();
[f761f1eb]480
[da1bafb]481 irq_spinlock_lock(&THREAD->lock, false);
482 int priority = THREAD->priority;
483 irq_spinlock_unlock(&THREAD->lock, false);
484
485 relink_rq(priority);
486
[f761f1eb]487 /*
[4e7d3dd]488 * If both the old and the new task are the same,
489 * lots of work is avoided.
[f761f1eb]490 */
[43114c5]491 if (TASK != THREAD->task) {
[481d4751]492 as_t *new_as = THREAD->task->as;
[f761f1eb]493
494 /*
[4e7d3dd]495 * Note that it is possible for two tasks
496 * to share one address space.
[f761f1eb]497 */
[481d4751]498 if (old_as != new_as) {
[f761f1eb]499 /*
[20d50a1]500 * Both tasks and address spaces are different.
[f761f1eb]501 * Replace the old one with the new one.
502 */
[481d4751]503 as_switch(old_as, new_as);
[f761f1eb]504 }
[da1bafb]505
[f76fed4]506 TASK = THREAD->task;
[39cea6a]507 before_task_runs();
[f761f1eb]508 }
[da1bafb]509
[481d4751]510 if (old_task)
511 task_release(old_task);
[da1bafb]512
[481d4751]513 if (old_as)
514 as_release(old_as);
515
[da1bafb]516 irq_spinlock_lock(&THREAD->lock, false);
[43114c5]517 THREAD->state = Running;
[da1bafb]518
[f76fed4]519#ifdef SCHEDULER_VERBOSE
[b2fa1204]520 log(LF_OTHER, LVL_DEBUG,
521 "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64
522 ", nrdy=%" PRIua ")", CPU->id, THREAD->tid, THREAD->priority,
[1e9d0e3]523 THREAD->ticks, atomic_get(&CPU->nrdy));
[da1bafb]524#endif
525
[97f1691]526 /*
527 * Some architectures provide late kernel PA2KA(identity)
528 * mapping in a page fault handler. However, the page fault
529 * handler uses the kernel stack of the running thread and
530 * therefore cannot be used to map it. The kernel stack, if
531 * necessary, is to be mapped in before_thread_runs(). This
532 * function must be executed before the switch to the new stack.
533 */
534 before_thread_runs();
[da1bafb]535
[3e1607f]536 /*
[4e33b6b]537 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
538 * thread's stack.
[3e1607f]539 */
[bcdd9aa]540 the_copy(THE, (the_t *) THREAD->kstack);
541
[43114c5]542 context_restore(&THREAD->saved_context);
[da1bafb]543
544 /* Not reached */
[f761f1eb]545}
546
[5f85c91]547#ifdef CONFIG_SMP
[70527f1]548/** Load balancing thread
549 *
550 * SMP load balancing thread, supervising thread supplies
551 * for the CPU it's wired to.
552 *
553 * @param arg Generic thread argument (unused).
554 *
[f761f1eb]555 */
556void kcpulb(void *arg)
557{
[228666c]558 atomic_count_t average;
[da1bafb]559 atomic_count_t rdy;
560
[2cb5e64]561 /*
562 * Detach kcpulb as nobody will call thread_join_timeout() on it.
563 */
564 thread_detach(THREAD);
565
[f761f1eb]566loop:
567 /*
[3260ada]568 * Work in 1s intervals.
[f761f1eb]569 */
[3260ada]570 thread_sleep(1);
[da1bafb]571
[f761f1eb]572not_satisfied:
573 /*
574 * Calculate the number of threads that will be migrated/stolen from
575 * other CPU's. Note that situation can have changed between two
576 * passes. Each time get the most up to date counts.
[da1bafb]577 *
[f761f1eb]578 */
[444ec64]579 average = atomic_get(&nrdy) / config.cpu_active + 1;
[da1bafb]580 rdy = atomic_get(&CPU->nrdy);
581
582 if (average <= rdy)
[f761f1eb]583 goto satisfied;
[da1bafb]584
585 atomic_count_t count = average - rdy;
586
[f761f1eb]587 /*
[4e33b6b]588 * Searching least priority queues on all CPU's first and most priority
589 * queues on all CPU's last.
[f761f1eb]590 */
[da1bafb]591 size_t acpu;
592 size_t acpu_bias = 0;
593 int rq;
594
595 for (rq = RQ_COUNT - 1; rq >= 0; rq--) {
596 for (acpu = 0; acpu < config.cpu_active; acpu++) {
597 cpu_t *cpu = &cpus[(acpu + acpu_bias) % config.cpu_active];
598
[f761f1eb]599 /*
600 * Not interested in ourselves.
[4e33b6b]601 * Doesn't require interrupt disabling for kcpulb has
602 * THREAD_FLAG_WIRED.
[da1bafb]603 *
[f761f1eb]604 */
[43114c5]605 if (CPU == cpu)
[248fc1a]606 continue;
[da1bafb]607
[248fc1a]608 if (atomic_get(&cpu->nrdy) <= average)
609 continue;
[da1bafb]610
611 irq_spinlock_lock(&(cpu->rq[rq].lock), true);
612 if (cpu->rq[rq].n == 0) {
613 irq_spinlock_unlock(&(cpu->rq[rq].lock), true);
[f761f1eb]614 continue;
615 }
[da1bafb]616
617 thread_t *thread = NULL;
618
619 /* Search rq from the back */
[55b77d9]620 link_t *link = cpu->rq[rq].rq.head.prev;
[da1bafb]621
[55b77d9]622 while (link != &(cpu->rq[rq].rq.head)) {
[43ac0cc]623 thread = (thread_t *) list_get_instance(link,
624 thread_t, rq_link);
[da1bafb]625
[f761f1eb]626 /*
[43ac0cc]627 * Do not steal CPU-wired threads, threads
628 * already stolen, threads for which migration
629 * was temporarily disabled or threads whose
630 * FPU context is still in the CPU.
[6a27d63]631 */
[da1bafb]632 irq_spinlock_lock(&thread->lock, false);
633
[6eef3c4]634 if ((!thread->wired) && (!thread->stolen) &&
635 (!thread->nomigrate) &&
636 (!thread->fpu_context_engaged)) {
[f761f1eb]637 /*
[da1bafb]638 * Remove thread from ready queue.
[f761f1eb]639 */
[43ac0cc]640 irq_spinlock_unlock(&thread->lock,
641 false);
[f761f1eb]642
[248fc1a]643 atomic_dec(&cpu->nrdy);
[59e07c91]644 atomic_dec(&nrdy);
[da1bafb]645
646 cpu->rq[rq].n--;
647 list_remove(&thread->rq_link);
648
[f761f1eb]649 break;
650 }
[da1bafb]651
652 irq_spinlock_unlock(&thread->lock, false);
653
654 link = link->prev;
655 thread = NULL;
[f761f1eb]656 }
[da1bafb]657
658 if (thread) {
[f761f1eb]659 /*
[da1bafb]660 * Ready thread on local CPU
[f761f1eb]661 */
[da1bafb]662
[43ac0cc]663 irq_spinlock_pass(&(cpu->rq[rq].lock),
664 &thread->lock);
[da1bafb]665
[f76fed4]666#ifdef KCPULB_VERBOSE
[b2fa1204]667 log(LF_OTHER, LVL_DEBUG,
668 "kcpulb%u: TID %" PRIu64 " -> cpu%u, "
669 "nrdy=%ld, avg=%ld", CPU->id, t->tid,
[1e9d0e3]670 CPU->id, atomic_get(&CPU->nrdy),
[6f4495f5]671 atomic_get(&nrdy) / config.cpu_active);
[f76fed4]672#endif
[da1bafb]673
[6eef3c4]674 thread->stolen = true;
[da1bafb]675 thread->state = Entering;
676
677 irq_spinlock_unlock(&thread->lock, true);
678 thread_ready(thread);
679
[f761f1eb]680 if (--count == 0)
681 goto satisfied;
[da1bafb]682
[f761f1eb]683 /*
[4e33b6b]684 * We are not satisfied yet, focus on another
685 * CPU next time.
[da1bafb]686 *
[f761f1eb]687 */
[da1bafb]688 acpu_bias++;
[f761f1eb]689
690 continue;
[da1bafb]691 } else
692 irq_spinlock_unlock(&(cpu->rq[rq].lock), true);
693
[f761f1eb]694 }
695 }
[da1bafb]696
[248fc1a]697 if (atomic_get(&CPU->nrdy)) {
[f761f1eb]698 /*
699 * Be a little bit light-weight and let migrated threads run.
[da1bafb]700 *
[f761f1eb]701 */
702 scheduler();
[3260ada]703 } else {
[f761f1eb]704 /*
705 * We failed to migrate a single thread.
[3260ada]706 * Give up this turn.
[da1bafb]707 *
[f761f1eb]708 */
[3260ada]709 goto loop;
[f761f1eb]710 }
[da1bafb]711
[f761f1eb]712 goto not_satisfied;
[da1bafb]713
[f761f1eb]714satisfied:
715 goto loop;
716}
[5f85c91]717#endif /* CONFIG_SMP */
[10e16a7]718
[da1bafb]719/** Print information about threads & scheduler queues
720 *
721 */
[10e16a7]722void sched_print_list(void)
723{
[da1bafb]724 size_t cpu;
[4184e76]725 for (cpu = 0; cpu < config.cpu_count; cpu++) {
[10e16a7]726 if (!cpus[cpu].active)
727 continue;
[da1bafb]728
729 irq_spinlock_lock(&cpus[cpu].lock, true);
730
[7e752b2]731 printf("cpu%u: address=%p, nrdy=%" PRIua ", needs_relink=%zu\n",
[6f4495f5]732 cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy),
733 cpus[cpu].needs_relink);
[10e16a7]734
[da1bafb]735 unsigned int i;
[4e33b6b]736 for (i = 0; i < RQ_COUNT; i++) {
[da1bafb]737 irq_spinlock_lock(&(cpus[cpu].rq[i].lock), false);
738 if (cpus[cpu].rq[i].n == 0) {
739 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false);
[10e16a7]740 continue;
741 }
[da1bafb]742
[5b86d10]743 printf("\trq[%u]: ", i);
[feeac0d]744 list_foreach(cpus[cpu].rq[i].rq, rq_link, thread_t,
745 thread) {
[da1bafb]746 printf("%" PRIu64 "(%s) ", thread->tid,
747 thread_states[thread->state]);
[10e16a7]748 }
749 printf("\n");
[da1bafb]750
751 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false);
[10e16a7]752 }
[da1bafb]753
754 irq_spinlock_unlock(&cpus[cpu].lock, true);
[10e16a7]755 }
756}
[b45c443]757
[cc73a8a1]758/** @}
[b45c443]759 */
Note: See TracBrowser for help on using the repository browser.