source: mainline/kernel/generic/src/proc/scheduler.c@ 5861b60

Last change on this file since 5861b60 was 5861b60, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 18 months ago

Lift actions that do not need separate context out of scheduler_separated_stack()

We're making scheduler_separated_stack() as simple as possible so that later
we can switch from thread to thread directly whenever we don't need to wait.

  • Property mode set to 100644
File size: 16.8 KB
RevLine 
[f761f1eb]1/*
[481d4751]2 * Copyright (c) 2010 Jakub Jermar
[f761f1eb]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[174156fd]29/** @addtogroup kernel_generic_proc
[b45c443]30 * @{
31 */
32
[9179d0a]33/**
[b45c443]34 * @file
[da1bafb]35 * @brief Scheduler and load balancing.
[9179d0a]36 *
[cf26ba9]37 * This file contains the scheduler and kcpulb kernel thread which
[9179d0a]38 * performs load-balancing of per-CPU run queues.
39 */
40
[63e27ef]41#include <assert.h>
[4621d23]42#include <atomic.h>
[f761f1eb]43#include <proc/scheduler.h>
44#include <proc/thread.h>
45#include <proc/task.h>
[32ff43e6]46#include <mm/frame.h>
47#include <mm/page.h>
[20d50a1]48#include <mm/as.h>
[b3f8fb7]49#include <time/timeout.h>
[fe19611]50#include <time/delay.h>
[32ff43e6]51#include <arch/asm.h>
52#include <arch/faddr.h>
[cce6acf]53#include <arch/cycle.h>
[23684b7]54#include <atomic.h>
[32ff43e6]55#include <synch/spinlock.h>
[f761f1eb]56#include <config.h>
57#include <context.h>
[b3f8fb7]58#include <fpu_context.h>
[b2e121a]59#include <halt.h>
[f761f1eb]60#include <arch.h>
[5c9a08b]61#include <adt/list.h>
[02a99d2]62#include <panic.h>
[32ff43e6]63#include <cpu.h>
[bab75df6]64#include <stdio.h>
[b2fa1204]65#include <log.h>
[df58e44]66#include <stacktrace.h>
[9c0a9b3]67
[7d6ec87]68static void scheduler_separated_stack(void);
69
[31e15be]70atomic_size_t nrdy; /**< Number of ready threads in the system. */
[f761f1eb]71
[5f85c91]72#ifdef CONFIG_FPU_LAZY
[b49f4ae]73void scheduler_fpu_lazy_request(void)
74{
75 fpu_enable();
[f3dbe27]76
77 /* We need this lock to ensure synchronization with thread destructor. */
[169815e]78 irq_spinlock_lock(&CPU->fpu_lock, false);
[a35b458]79
[a3eeceb6]80 /* Save old context */
[f3dbe27]81 thread_t *owner = atomic_load_explicit(&CPU->fpu_owner, memory_order_relaxed);
82 if (owner != NULL) {
83 fpu_context_save(&owner->fpu_context);
84 atomic_store_explicit(&CPU->fpu_owner, NULL, memory_order_relaxed);
[b49f4ae]85 }
[a35b458]86
[f3dbe27]87 irq_spinlock_unlock(&CPU->fpu_lock, false);
88
[7d6ec87]89 if (THREAD->fpu_context_exists) {
[0366d09d]90 fpu_context_restore(&THREAD->fpu_context);
[7d6ec87]91 } else {
[f76fed4]92 fpu_init();
[6eef3c4]93 THREAD->fpu_context_exists = true;
[b49f4ae]94 }
[a35b458]95
[f3dbe27]96 atomic_store_explicit(&CPU->fpu_owner, THREAD, memory_order_relaxed);
[b49f4ae]97}
[da1bafb]98#endif /* CONFIG_FPU_LAZY */
[0ca6faa]99
[70527f1]100/** Initialize scheduler
101 *
102 * Initialize kernel scheduler.
103 *
104 */
[f761f1eb]105void scheduler_init(void)
106{
107}
108
[70527f1]109/** Get thread to be scheduled
110 *
111 * Get the optimal thread to be scheduled
[d1a184f]112 * according to thread accounting and scheduler
[70527f1]113 * policy.
114 *
115 * @return Thread to be scheduled.
116 *
117 */
[ec8ef12]118static thread_t *try_find_thread(int *rq_index)
[f761f1eb]119{
[ec8ef12]120 assert(interrupts_disabled());
[63e27ef]121 assert(CPU != NULL);
[a35b458]122
[ec8ef12]123 if (atomic_load(&CPU->nrdy) == 0)
124 return NULL;
[a35b458]125
[ec8ef12]126 for (int i = 0; i < RQ_COUNT; i++) {
[da1bafb]127 irq_spinlock_lock(&(CPU->rq[i].lock), false);
128 if (CPU->rq[i].n == 0) {
[f761f1eb]129 /*
130 * If this queue is empty, try a lower-priority queue.
131 */
[da1bafb]132 irq_spinlock_unlock(&(CPU->rq[i].lock), false);
[f761f1eb]133 continue;
134 }
[a35b458]135
[248fc1a]136 atomic_dec(&CPU->nrdy);
[59e07c91]137 atomic_dec(&nrdy);
[da1bafb]138 CPU->rq[i].n--;
[a35b458]139
[f761f1eb]140 /*
141 * Take the first thread from the queue.
142 */
[55b77d9]143 thread_t *thread = list_get_instance(
144 list_first(&CPU->rq[i].rq), thread_t, rq_link);
[da1bafb]145 list_remove(&thread->rq_link);
[a35b458]146
[8996582]147 irq_spinlock_unlock(&(CPU->rq[i].lock), false);
[a35b458]148
[117ad5a2]149 *rq_index = i;
[da1bafb]150 return thread;
[f761f1eb]151 }
[a35b458]152
[ec8ef12]153 return NULL;
154}
155
156/** Get thread to be scheduled
157 *
158 * Get the optimal thread to be scheduled
159 * according to thread accounting and scheduler
160 * policy.
161 *
162 * @return Thread to be scheduled.
163 *
164 */
165static thread_t *find_best_thread(int *rq_index)
166{
167 assert(interrupts_disabled());
168 assert(CPU != NULL);
169
170 while (true) {
171 thread_t *thread = try_find_thread(rq_index);
172
173 if (thread != NULL)
174 return thread;
175
176 /*
177 * For there was nothing to run, the CPU goes to sleep
178 * until a hardware interrupt or an IPI comes.
179 * This improves energy saving and hyperthreading.
180 */
[4760793]181 CPU_LOCAL->idle = true;
[ec8ef12]182
183 /*
184 * Go to sleep with interrupts enabled.
185 * Ideally, this should be atomic, but this is not guaranteed on
186 * all platforms yet, so it is possible we will go sleep when
187 * a thread has just become available.
188 */
189 cpu_interruptible_sleep();
190 }
[f761f1eb]191}
192
[c680333]193static void switch_task(task_t *task)
194{
195 /* If the task stays the same, a lot of work is avoided. */
196 if (TASK == task)
197 return;
198
199 as_t *old_as = AS;
200 as_t *new_as = task->as;
201
202 /* It is possible for two tasks to share one address space. */
203 if (old_as != new_as)
204 as_switch(old_as, new_as);
205
206 if (TASK)
207 task_release(TASK);
208
209 TASK = task;
210
211 task_hold(TASK);
212
213 before_task_runs_arch();
214}
215
[70527f1]216/** Prevent rq starvation
217 *
218 * Prevent low priority threads from starving in rq's.
219 *
220 * When the function decides to relink rq's, it reconnects
221 * respective pointers so that in result threads with 'pri'
[abbc16e]222 * greater or equal start are moved to a higher-priority queue.
[70527f1]223 *
224 * @param start Threshold priority.
225 *
[f761f1eb]226 */
[e16e036a]227static void relink_rq(int start)
[f761f1eb]228{
[4760793]229 if (CPU_LOCAL->current_clock_tick < CPU_LOCAL->relink_deadline)
[011c79a]230 return;
231
[4760793]232 CPU_LOCAL->relink_deadline = CPU_LOCAL->current_clock_tick + NEEDS_RELINK_MAX;
[a35b458]233
[3118355]234 /* Temporary cache for lists we are moving. */
[011c79a]235 list_t list;
[55b77d9]236 list_initialize(&list);
[a35b458]237
[3118355]238 size_t n = 0;
239
240 /* Move every list (except the one with highest priority) one level up. */
241 for (int i = RQ_COUNT - 1; i > start; i--) {
242 irq_spinlock_lock(&CPU->rq[i].lock, false);
[a35b458]243
[3118355]244 /* Swap lists. */
245 list_swap(&CPU->rq[i].rq, &list);
[a35b458]246
[3118355]247 /* Swap number of items. */
248 size_t tmpn = CPU->rq[i].n;
249 CPU->rq[i].n = n;
250 n = tmpn;
[a35b458]251
[011c79a]252 irq_spinlock_unlock(&CPU->rq[i].lock, false);
[f761f1eb]253 }
[a35b458]254
[3118355]255 /* Append the contents of rq[start + 1] to rq[start]. */
256 if (n != 0) {
257 irq_spinlock_lock(&CPU->rq[start].lock, false);
258 list_concat(&CPU->rq[start].rq, &list);
259 CPU->rq[start].n += n;
260 irq_spinlock_unlock(&CPU->rq[start].lock, false);
261 }
[f761f1eb]262}
263
[23f36a3]264/**
265 * Do whatever needs to be done with current FPU state before we switch to
266 * another thread.
267 */
268static void fpu_cleanup(void)
269{
270#if (defined CONFIG_FPU) && (!defined CONFIG_FPU_LAZY)
271 fpu_context_save(&THREAD->fpu_context);
272#endif
273}
274
275/**
276 * Set correct FPU state for this thread after switch from another thread.
277 */
278static void fpu_restore(void)
279{
280#ifdef CONFIG_FPU_LAZY
281 /*
282 * The only concurrent modification possible for fpu_owner here is
283 * another thread changing it from itself to NULL in its destructor.
284 */
285 thread_t *owner = atomic_load_explicit(&CPU->fpu_owner,
286 memory_order_relaxed);
287
288 if (THREAD == owner)
289 fpu_enable();
290 else
291 fpu_disable();
292
293#elif defined CONFIG_FPU
294 fpu_enable();
295 if (THREAD->fpu_context_exists)
296 fpu_context_restore(&THREAD->fpu_context);
297 else {
298 fpu_init();
299 THREAD->fpu_context_exists = true;
300 }
301#endif
302}
303
[151c050]304void scheduler_run(void)
[111b9b9]305{
[151c050]306 assert(interrupts_disabled());
307 assert(THREAD == NULL);
308 assert(CPU != NULL);
[111b9b9]309
[151c050]310 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
[111b9b9]311
[151c050]312 context_t ctx;
313 context_save(&ctx);
314 context_set(&ctx, FADDR(scheduler_separated_stack),
315 (uintptr_t) CPU_LOCAL->stack, STACK_SIZE);
316 context_restore(&ctx);
[111b9b9]317
[151c050]318 unreachable();
[111b9b9]319}
320
[8996582]321/** Things to do before we switch to THREAD context.
322 */
323static void prepare_to_run_thread(int rq_index)
324{
325 relink_rq(rq_index);
326
327 switch_task(THREAD->task);
328
329 irq_spinlock_lock(&THREAD->lock, false);
330 THREAD->state = Running;
331 THREAD->cpu = CPU;
332 THREAD->priority = rq_index; /* Correct rq index */
333
334 /*
335 * Clear the stolen flag so that it can be migrated
336 * when load balancing needs emerge.
337 */
338 THREAD->stolen = false;
339
340#ifdef SCHEDULER_VERBOSE
341 log(LF_OTHER, LVL_DEBUG,
342 "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64
343 ", nrdy=%zu)", CPU->id, THREAD->tid, THREAD->priority,
344 THREAD->ticks, atomic_load(&CPU->nrdy));
345#endif
346
347 /*
348 * Some architectures provide late kernel PA2KA(identity)
349 * mapping in a page fault handler. However, the page fault
350 * handler uses the kernel stack of the running thread and
351 * therefore cannot be used to map it. The kernel stack, if
352 * necessary, is to be mapped in before_thread_runs(). This
353 * function must be executed before the switch to the new stack.
354 */
355 before_thread_runs_arch();
356
357#ifdef CONFIG_UDEBUG
358 if (THREAD->btrace) {
359 istate_t *istate = THREAD->udebug.uspace_state;
360 if (istate != NULL) {
361 printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid);
362 stack_trace_istate(istate);
363 }
364
365 THREAD->btrace = false;
366 }
367#endif
368
369 fpu_restore();
370
371 /* Time allocation in microseconds. */
372 uint64_t time_to_run = (rq_index + 1) * 10000;
373
374 /* Set the time of next preemption. */
375 CPU_LOCAL->preempt_deadline =
376 CPU_LOCAL->current_clock_tick + us2ticks(time_to_run);
377
378 /* Save current CPU cycle */
379 THREAD->last_cycle = get_cycle();
380}
381
[6e49dab]382static void cleanup_after_thread(thread_t *thread, state_t out_state)
383{
384 assert(CURRENT->mutex_locks == 0);
385 assert(interrupts_disabled());
386
387 int expected;
388
389 switch (out_state) {
390 case Running:
391 thread_ready(thread);
392 break;
393
394 case Exiting:
395 waitq_close(&thread->join_wq);
396
397 /*
398 * Release the reference CPU has for the thread.
399 * If there are no other references (e.g. threads calling join),
400 * the thread structure is deallocated.
401 */
402 thread_put(thread);
403 break;
404
405 case Sleeping:
406 expected = SLEEP_INITIAL;
407
408 /* Only set SLEEP_ASLEEP in sleep pad if it's still in initial state */
409 if (!atomic_compare_exchange_strong_explicit(&thread->sleep_state,
410 &expected, SLEEP_ASLEEP,
411 memory_order_acq_rel, memory_order_acquire)) {
412
413 assert(expected == SLEEP_WOKE);
414 /* The thread has already been woken up, requeue immediately. */
415 thread_ready(thread);
416 }
417 break;
418
419 default:
420 /*
421 * Entering state is unexpected.
422 */
423 panic("tid%" PRIu64 ": unexpected state %s.",
424 thread->tid, thread_states[thread->state]);
425 break;
426 }
427}
428
[7d6ec87]429/** The scheduler
430 *
431 * The thread scheduling procedure.
432 * Passes control directly to
433 * scheduler_separated_stack().
434 *
435 */
[151c050]436void scheduler_enter(state_t new_state)
[7d6ec87]437{
[151c050]438 ipl_t ipl = interrupts_disable();
[a35b458]439
[151c050]440 assert(CPU != NULL);
441 assert(THREAD != NULL);
[23f36a3]442
[151c050]443 fpu_cleanup();
[a35b458]444
[151c050]445 irq_spinlock_lock(&THREAD->lock, false);
446 THREAD->state = new_state;
[a35b458]447
[151c050]448 /* Update thread kernel accounting */
449 THREAD->kcycles += get_cycle() - THREAD->last_cycle;
[a35b458]450
[5861b60]451 after_thread_ran_arch();
452
453 if (new_state == Sleeping) {
454 /* Prefer the thread after it's woken up. */
455 THREAD->priority = -1;
456 }
457
[151c050]458 if (!context_save(&THREAD->saved_context)) {
[7d6ec87]459 /*
[151c050]460 * This is the place where threads leave scheduler();
[7d6ec87]461 */
[151c050]462
463 irq_spinlock_unlock(&THREAD->lock, false);
464 interrupts_restore(ipl);
465 return;
[7d6ec87]466 }
[a35b458]467
[7d6ec87]468 /*
[a6e55886]469 * Through the 'CURRENT' structure, we keep track of THREAD, TASK, CPU, AS
470 * and preemption counter. At this point CURRENT could be coming either
[7d6ec87]471 * from THREAD's or CPU's stack.
[da1bafb]472 *
[7d6ec87]473 */
[4760793]474 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
[a35b458]475
[7d6ec87]476 /*
477 * We may not keep the old stack.
478 * Reason: If we kept the old stack and got blocked, for instance, in
479 * find_best_thread(), the old thread could get rescheduled by another
480 * CPU and overwrite the part of its own stack that was also used by
481 * the scheduler on this CPU.
482 *
483 * Moreover, we have to bypass the compiler-generated POP sequence
484 * which is fooled by SP being set to the very top of the stack.
485 * Therefore the scheduler() function continues in
486 * scheduler_separated_stack().
[da1bafb]487 *
[7d6ec87]488 */
[daadfa6]489 context_t ctx;
490 context_save(&ctx);
491 context_set(&ctx, FADDR(scheduler_separated_stack),
[4760793]492 (uintptr_t) CPU_LOCAL->stack, STACK_SIZE);
[daadfa6]493 context_restore(&ctx);
[a35b458]494
[da1bafb]495 /* Not reached */
[7d6ec87]496}
[70527f1]497
498/** Scheduler stack switch wrapper
499 *
500 * Second part of the scheduler() function
501 * using new stack. Handling the actual context
502 * switch to a new thread.
503 *
504 */
[7d6ec87]505void scheduler_separated_stack(void)
[f761f1eb]506{
[63e27ef]507 assert((!THREAD) || (irq_spinlock_locked(&THREAD->lock)));
508 assert(CPU != NULL);
509 assert(interrupts_disabled());
[a35b458]510
[151c050]511 if (atomic_load(&haltstate))
512 halt();
513
[43114c5]514 if (THREAD) {
[6e49dab]515 state_t state = THREAD->state;
516 irq_spinlock_unlock(&THREAD->lock, false);
[1871118]517
[6e49dab]518 cleanup_after_thread(THREAD, state);
[a35b458]519
[43114c5]520 THREAD = NULL;
[f761f1eb]521 }
[a35b458]522
[117ad5a2]523 int rq_index;
524 THREAD = find_best_thread(&rq_index);
[a35b458]525
[8996582]526 prepare_to_run_thread(rq_index);
[a35b458]527
[3e1607f]528 /*
[4e33b6b]529 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
530 * thread's stack.
[3e1607f]531 */
[a6e55886]532 current_copy(CURRENT, (current_t *) THREAD->kstack);
[a35b458]533
[43114c5]534 context_restore(&THREAD->saved_context);
[a35b458]535
[da1bafb]536 /* Not reached */
[f761f1eb]537}
538
[5f85c91]539#ifdef CONFIG_SMP
[fbaf6ac]540
541static thread_t *steal_thread_from(cpu_t *old_cpu, int i)
542{
543 runq_t *old_rq = &old_cpu->rq[i];
544 runq_t *new_rq = &CPU->rq[i];
545
[06f81c4]546 ipl_t ipl = interrupts_disable();
547
548 irq_spinlock_lock(&old_rq->lock, false);
[fbaf6ac]549
[f3dbe27]550 /*
551 * If fpu_owner is any thread in the list, its store is seen here thanks to
552 * the runqueue lock.
553 */
554 thread_t *fpu_owner = atomic_load_explicit(&old_cpu->fpu_owner,
555 memory_order_relaxed);
556
[fbaf6ac]557 /* Search rq from the back */
558 list_foreach_rev(old_rq->rq, rq_link, thread_t, thread) {
559
560 irq_spinlock_lock(&thread->lock, false);
561
562 /*
563 * Do not steal CPU-wired threads, threads
564 * already stolen, threads for which migration
565 * was temporarily disabled or threads whose
566 * FPU context is still in the CPU.
567 */
[06f81c4]568 if (thread->stolen || thread->nomigrate ||
[f3dbe27]569 thread == fpu_owner) {
[fbaf6ac]570 irq_spinlock_unlock(&thread->lock, false);
571 continue;
572 }
573
574 thread->stolen = true;
575 thread->cpu = CPU;
576
577 irq_spinlock_unlock(&thread->lock, false);
578
579 /*
580 * Ready thread on local CPU
581 */
582
583#ifdef KCPULB_VERBOSE
584 log(LF_OTHER, LVL_DEBUG,
585 "kcpulb%u: TID %" PRIu64 " -> cpu%u, "
586 "nrdy=%ld, avg=%ld", CPU->id, thread->tid,
587 CPU->id, atomic_load(&CPU->nrdy),
588 atomic_load(&nrdy) / config.cpu_active);
589#endif
590
591 /* Remove thread from ready queue. */
592 old_rq->n--;
593 list_remove(&thread->rq_link);
[06f81c4]594 irq_spinlock_unlock(&old_rq->lock, false);
[fbaf6ac]595
596 /* Append thread to local queue. */
[06f81c4]597 irq_spinlock_lock(&new_rq->lock, false);
[fbaf6ac]598 list_append(&thread->rq_link, &new_rq->rq);
599 new_rq->n++;
[06f81c4]600 irq_spinlock_unlock(&new_rq->lock, false);
[fbaf6ac]601
602 atomic_dec(&old_cpu->nrdy);
603 atomic_inc(&CPU->nrdy);
[06f81c4]604 interrupts_restore(ipl);
[fbaf6ac]605 return thread;
606 }
607
[06f81c4]608 irq_spinlock_unlock(&old_rq->lock, false);
609 interrupts_restore(ipl);
[fbaf6ac]610 return NULL;
611}
612
[70527f1]613/** Load balancing thread
614 *
615 * SMP load balancing thread, supervising thread supplies
616 * for the CPU it's wired to.
617 *
618 * @param arg Generic thread argument (unused).
619 *
[f761f1eb]620 */
621void kcpulb(void *arg)
622{
[3cfe2b8]623 size_t average;
624 size_t rdy;
[a35b458]625
[f761f1eb]626loop:
627 /*
[3260ada]628 * Work in 1s intervals.
[f761f1eb]629 */
[3260ada]630 thread_sleep(1);
[a35b458]631
[f761f1eb]632not_satisfied:
633 /*
634 * Calculate the number of threads that will be migrated/stolen from
635 * other CPU's. Note that situation can have changed between two
636 * passes. Each time get the most up to date counts.
[da1bafb]637 *
[f761f1eb]638 */
[036e97c]639 average = atomic_load(&nrdy) / config.cpu_active + 1;
640 rdy = atomic_load(&CPU->nrdy);
[a35b458]641
[da1bafb]642 if (average <= rdy)
[f761f1eb]643 goto satisfied;
[a35b458]644
[3cfe2b8]645 size_t count = average - rdy;
[a35b458]646
[f761f1eb]647 /*
[4e33b6b]648 * Searching least priority queues on all CPU's first and most priority
649 * queues on all CPU's last.
[f761f1eb]650 */
[da1bafb]651 size_t acpu;
652 int rq;
[a35b458]653
[da1bafb]654 for (rq = RQ_COUNT - 1; rq >= 0; rq--) {
655 for (acpu = 0; acpu < config.cpu_active; acpu++) {
[fbaf6ac]656 cpu_t *cpu = &cpus[acpu];
[a35b458]657
[f761f1eb]658 /*
659 * Not interested in ourselves.
[4e33b6b]660 * Doesn't require interrupt disabling for kcpulb has
661 * THREAD_FLAG_WIRED.
[da1bafb]662 *
[f761f1eb]663 */
[43114c5]664 if (CPU == cpu)
[248fc1a]665 continue;
[a35b458]666
[036e97c]667 if (atomic_load(&cpu->nrdy) <= average)
[248fc1a]668 continue;
[a35b458]669
[fbaf6ac]670 if (steal_thread_from(cpu, rq) && --count == 0)
671 goto satisfied;
[f761f1eb]672 }
673 }
[a35b458]674
[036e97c]675 if (atomic_load(&CPU->nrdy)) {
[f761f1eb]676 /*
677 * Be a little bit light-weight and let migrated threads run.
[da1bafb]678 *
[f761f1eb]679 */
[151c050]680 thread_yield();
[3260ada]681 } else {
[f761f1eb]682 /*
683 * We failed to migrate a single thread.
[3260ada]684 * Give up this turn.
[da1bafb]685 *
[f761f1eb]686 */
[3260ada]687 goto loop;
[f761f1eb]688 }
[a35b458]689
[f761f1eb]690 goto not_satisfied;
[a35b458]691
[f761f1eb]692satisfied:
693 goto loop;
694}
[5f85c91]695#endif /* CONFIG_SMP */
[10e16a7]696
[da1bafb]697/** Print information about threads & scheduler queues
698 *
699 */
[10e16a7]700void sched_print_list(void)
701{
[da1bafb]702 size_t cpu;
[4184e76]703 for (cpu = 0; cpu < config.cpu_count; cpu++) {
[10e16a7]704 if (!cpus[cpu].active)
705 continue;
[a35b458]706
[3b68542]707 printf("cpu%u: address=%p, nrdy=%zu\n",
708 cpus[cpu].id, &cpus[cpu], atomic_load(&cpus[cpu].nrdy));
[a35b458]709
[da1bafb]710 unsigned int i;
[4e33b6b]711 for (i = 0; i < RQ_COUNT; i++) {
[da1bafb]712 irq_spinlock_lock(&(cpus[cpu].rq[i].lock), false);
713 if (cpus[cpu].rq[i].n == 0) {
714 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false);
[10e16a7]715 continue;
716 }
[a35b458]717
[5b86d10]718 printf("\trq[%u]: ", i);
[feeac0d]719 list_foreach(cpus[cpu].rq[i].rq, rq_link, thread_t,
720 thread) {
[da1bafb]721 printf("%" PRIu64 "(%s) ", thread->tid,
722 thread_states[thread->state]);
[10e16a7]723 }
724 printf("\n");
[a35b458]725
[da1bafb]726 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false);
[10e16a7]727 }
728 }
729}
[b45c443]730
[cc73a8a1]731/** @}
[b45c443]732 */
Note: See TracBrowser for help on using the repository browser.