source: mainline/kernel/generic/src/proc/scheduler.c@ 4f3aa76

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 4f3aa76 was aae365bc, checked in by Jakub Jermar <jakub@…>, 7 years ago

Remove RCU and CHT support

  • Property mode set to 100644
File size: 17.4 KB
Line 
1/*
2 * Copyright (c) 2010 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup kernel_generic_proc
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Scheduler and load balancing.
36 *
37 * This file contains the scheduler and kcpulb kernel thread which
38 * performs load-balancing of per-CPU run queues.
39 */
40
41#include <assert.h>
42#include <atomic.h>
43#include <proc/scheduler.h>
44#include <proc/thread.h>
45#include <proc/task.h>
46#include <mm/frame.h>
47#include <mm/page.h>
48#include <mm/as.h>
49#include <time/timeout.h>
50#include <time/delay.h>
51#include <arch/asm.h>
52#include <arch/faddr.h>
53#include <arch/cycle.h>
54#include <atomic.h>
55#include <synch/spinlock.h>
56#include <config.h>
57#include <context.h>
58#include <fpu_context.h>
59#include <halt.h>
60#include <arch.h>
61#include <adt/list.h>
62#include <panic.h>
63#include <cpu.h>
64#include <stdio.h>
65#include <log.h>
66#include <stacktrace.h>
67
68static void scheduler_separated_stack(void);
69
70atomic_t nrdy; /**< Number of ready threads in the system. */
71
72/** Carry out actions before new task runs. */
73static void before_task_runs(void)
74{
75 before_task_runs_arch();
76}
77
78/** Take actions before new thread runs.
79 *
80 * Perform actions that need to be
81 * taken before the newly selected
82 * thread is passed control.
83 *
84 * THREAD->lock is locked on entry
85 *
86 */
87static void before_thread_runs(void)
88{
89 before_thread_runs_arch();
90
91#ifdef CONFIG_FPU_LAZY
92 if (THREAD == CPU->fpu_owner)
93 fpu_enable();
94 else
95 fpu_disable();
96#elif defined CONFIG_FPU
97 fpu_enable();
98 if (THREAD->fpu_context_exists)
99 fpu_context_restore(THREAD->saved_fpu_context);
100 else {
101 fpu_init();
102 THREAD->fpu_context_exists = true;
103 }
104#endif
105
106#ifdef CONFIG_UDEBUG
107 if (THREAD->btrace) {
108 istate_t *istate = THREAD->udebug.uspace_state;
109 if (istate != NULL) {
110 printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid);
111 stack_trace_istate(istate);
112 }
113
114 THREAD->btrace = false;
115 }
116#endif
117}
118
119/** Take actions after THREAD had run.
120 *
121 * Perform actions that need to be
122 * taken after the running thread
123 * had been preempted by the scheduler.
124 *
125 * THREAD->lock is locked on entry
126 *
127 */
128static void after_thread_ran(void)
129{
130 after_thread_ran_arch();
131}
132
133#ifdef CONFIG_FPU_LAZY
134void scheduler_fpu_lazy_request(void)
135{
136restart:
137 fpu_enable();
138 irq_spinlock_lock(&CPU->lock, false);
139
140 /* Save old context */
141 if (CPU->fpu_owner != NULL) {
142 irq_spinlock_lock(&CPU->fpu_owner->lock, false);
143 fpu_context_save(CPU->fpu_owner->saved_fpu_context);
144
145 /* Don't prevent migration */
146 CPU->fpu_owner->fpu_context_engaged = false;
147 irq_spinlock_unlock(&CPU->fpu_owner->lock, false);
148 CPU->fpu_owner = NULL;
149 }
150
151 irq_spinlock_lock(&THREAD->lock, false);
152 if (THREAD->fpu_context_exists) {
153 fpu_context_restore(THREAD->saved_fpu_context);
154 } else {
155 /* Allocate FPU context */
156 if (!THREAD->saved_fpu_context) {
157 /* Might sleep */
158 irq_spinlock_unlock(&THREAD->lock, false);
159 irq_spinlock_unlock(&CPU->lock, false);
160 THREAD->saved_fpu_context =
161 (fpu_context_t *) slab_alloc(fpu_context_cache, 0);
162
163 /* We may have switched CPUs during slab_alloc */
164 goto restart;
165 }
166 fpu_init();
167 THREAD->fpu_context_exists = true;
168 }
169
170 CPU->fpu_owner = THREAD;
171 THREAD->fpu_context_engaged = true;
172 irq_spinlock_unlock(&THREAD->lock, false);
173
174 irq_spinlock_unlock(&CPU->lock, false);
175}
176#endif /* CONFIG_FPU_LAZY */
177
178/** Initialize scheduler
179 *
180 * Initialize kernel scheduler.
181 *
182 */
183void scheduler_init(void)
184{
185}
186
187/** Get thread to be scheduled
188 *
189 * Get the optimal thread to be scheduled
190 * according to thread accounting and scheduler
191 * policy.
192 *
193 * @return Thread to be scheduled.
194 *
195 */
196static thread_t *find_best_thread(void)
197{
198 assert(CPU != NULL);
199
200loop:
201
202 if (atomic_load(&CPU->nrdy) == 0) {
203 /*
204 * For there was nothing to run, the CPU goes to sleep
205 * until a hardware interrupt or an IPI comes.
206 * This improves energy saving and hyperthreading.
207 */
208 irq_spinlock_lock(&CPU->lock, false);
209 CPU->idle = true;
210 irq_spinlock_unlock(&CPU->lock, false);
211 interrupts_enable();
212
213 /*
214 * An interrupt might occur right now and wake up a thread.
215 * In such case, the CPU will continue to go to sleep
216 * even though there is a runnable thread.
217 */
218 cpu_sleep();
219 interrupts_disable();
220 goto loop;
221 }
222
223 assert(!CPU->idle);
224
225 unsigned int i;
226 for (i = 0; i < RQ_COUNT; i++) {
227 irq_spinlock_lock(&(CPU->rq[i].lock), false);
228 if (CPU->rq[i].n == 0) {
229 /*
230 * If this queue is empty, try a lower-priority queue.
231 */
232 irq_spinlock_unlock(&(CPU->rq[i].lock), false);
233 continue;
234 }
235
236 atomic_dec(&CPU->nrdy);
237 atomic_dec(&nrdy);
238 CPU->rq[i].n--;
239
240 /*
241 * Take the first thread from the queue.
242 */
243 thread_t *thread = list_get_instance(
244 list_first(&CPU->rq[i].rq), thread_t, rq_link);
245 list_remove(&thread->rq_link);
246
247 irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock);
248
249 thread->cpu = CPU;
250 thread->ticks = us2ticks((i + 1) * 10000);
251 thread->priority = i; /* Correct rq index */
252
253 /*
254 * Clear the stolen flag so that it can be migrated
255 * when load balancing needs emerge.
256 */
257 thread->stolen = false;
258 irq_spinlock_unlock(&thread->lock, false);
259
260 return thread;
261 }
262
263 goto loop;
264}
265
266/** Prevent rq starvation
267 *
268 * Prevent low priority threads from starving in rq's.
269 *
270 * When the function decides to relink rq's, it reconnects
271 * respective pointers so that in result threads with 'pri'
272 * greater or equal start are moved to a higher-priority queue.
273 *
274 * @param start Threshold priority.
275 *
276 */
277static void relink_rq(int start)
278{
279 list_t list;
280
281 list_initialize(&list);
282 irq_spinlock_lock(&CPU->lock, false);
283
284 if (CPU->needs_relink > NEEDS_RELINK_MAX) {
285 int i;
286 for (i = start; i < RQ_COUNT - 1; i++) {
287 /* Remember and empty rq[i + 1] */
288
289 irq_spinlock_lock(&CPU->rq[i + 1].lock, false);
290 list_concat(&list, &CPU->rq[i + 1].rq);
291 size_t n = CPU->rq[i + 1].n;
292 CPU->rq[i + 1].n = 0;
293 irq_spinlock_unlock(&CPU->rq[i + 1].lock, false);
294
295 /* Append rq[i + 1] to rq[i] */
296
297 irq_spinlock_lock(&CPU->rq[i].lock, false);
298 list_concat(&CPU->rq[i].rq, &list);
299 CPU->rq[i].n += n;
300 irq_spinlock_unlock(&CPU->rq[i].lock, false);
301 }
302
303 CPU->needs_relink = 0;
304 }
305
306 irq_spinlock_unlock(&CPU->lock, false);
307}
308
309/** The scheduler
310 *
311 * The thread scheduling procedure.
312 * Passes control directly to
313 * scheduler_separated_stack().
314 *
315 */
316void scheduler(void)
317{
318 volatile ipl_t ipl;
319
320 assert(CPU != NULL);
321
322 ipl = interrupts_disable();
323
324 if (atomic_load(&haltstate))
325 halt();
326
327 if (THREAD) {
328 irq_spinlock_lock(&THREAD->lock, false);
329
330 /* Update thread kernel accounting */
331 THREAD->kcycles += get_cycle() - THREAD->last_cycle;
332
333#if (defined CONFIG_FPU) && (!defined CONFIG_FPU_LAZY)
334 fpu_context_save(THREAD->saved_fpu_context);
335#endif
336 if (!context_save(&THREAD->saved_context)) {
337 /*
338 * This is the place where threads leave scheduler();
339 */
340
341 /* Save current CPU cycle */
342 THREAD->last_cycle = get_cycle();
343
344 irq_spinlock_unlock(&THREAD->lock, false);
345 interrupts_restore(THREAD->saved_context.ipl);
346
347 return;
348 }
349
350 /*
351 * Interrupt priority level of preempted thread is recorded
352 * here to facilitate scheduler() invocations from
353 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()).
354 *
355 */
356 THREAD->saved_context.ipl = ipl;
357 }
358
359 /*
360 * Through the 'CURRENT' structure, we keep track of THREAD, TASK, CPU, AS
361 * and preemption counter. At this point CURRENT could be coming either
362 * from THREAD's or CPU's stack.
363 *
364 */
365 current_copy(CURRENT, (current_t *) CPU->stack);
366
367 /*
368 * We may not keep the old stack.
369 * Reason: If we kept the old stack and got blocked, for instance, in
370 * find_best_thread(), the old thread could get rescheduled by another
371 * CPU and overwrite the part of its own stack that was also used by
372 * the scheduler on this CPU.
373 *
374 * Moreover, we have to bypass the compiler-generated POP sequence
375 * which is fooled by SP being set to the very top of the stack.
376 * Therefore the scheduler() function continues in
377 * scheduler_separated_stack().
378 *
379 */
380 context_save(&CPU->saved_context);
381 context_set(&CPU->saved_context, FADDR(scheduler_separated_stack),
382 (uintptr_t) CPU->stack, STACK_SIZE);
383 context_restore(&CPU->saved_context);
384
385 /* Not reached */
386}
387
388/** Scheduler stack switch wrapper
389 *
390 * Second part of the scheduler() function
391 * using new stack. Handling the actual context
392 * switch to a new thread.
393 *
394 */
395void scheduler_separated_stack(void)
396{
397 DEADLOCK_PROBE_INIT(p_joinwq);
398 task_t *old_task = TASK;
399 as_t *old_as = AS;
400
401 assert((!THREAD) || (irq_spinlock_locked(&THREAD->lock)));
402 assert(CPU != NULL);
403 assert(interrupts_disabled());
404
405 /*
406 * Hold the current task and the address space to prevent their
407 * possible destruction should thread_destroy() be called on this or any
408 * other processor while the scheduler is still using them.
409 */
410 if (old_task)
411 task_hold(old_task);
412
413 if (old_as)
414 as_hold(old_as);
415
416 if (THREAD) {
417 /* Must be run after the switch to scheduler stack */
418 after_thread_ran();
419
420 switch (THREAD->state) {
421 case Running:
422 irq_spinlock_unlock(&THREAD->lock, false);
423 thread_ready(THREAD);
424 break;
425
426 case Exiting:
427 repeat:
428 if (THREAD->detached) {
429 thread_destroy(THREAD, false);
430 } else {
431 /*
432 * The thread structure is kept allocated until
433 * somebody calls thread_detach() on it.
434 */
435 if (!irq_spinlock_trylock(&THREAD->join_wq.lock)) {
436 /*
437 * Avoid deadlock.
438 */
439 irq_spinlock_unlock(&THREAD->lock, false);
440 delay(HZ);
441 irq_spinlock_lock(&THREAD->lock, false);
442 DEADLOCK_PROBE(p_joinwq,
443 DEADLOCK_THRESHOLD);
444 goto repeat;
445 }
446 _waitq_wakeup_unsafe(&THREAD->join_wq,
447 WAKEUP_FIRST);
448 irq_spinlock_unlock(&THREAD->join_wq.lock, false);
449
450 THREAD->state = Lingering;
451 irq_spinlock_unlock(&THREAD->lock, false);
452 }
453 break;
454
455 case Sleeping:
456 /*
457 * Prefer the thread after it's woken up.
458 */
459 THREAD->priority = -1;
460
461 /*
462 * We need to release wq->lock which we locked in
463 * waitq_sleep(). Address of wq->lock is kept in
464 * THREAD->sleep_queue.
465 */
466 irq_spinlock_unlock(&THREAD->sleep_queue->lock, false);
467
468 irq_spinlock_unlock(&THREAD->lock, false);
469 break;
470
471 default:
472 /*
473 * Entering state is unexpected.
474 */
475 panic("tid%" PRIu64 ": unexpected state %s.",
476 THREAD->tid, thread_states[THREAD->state]);
477 break;
478 }
479
480 THREAD = NULL;
481 }
482
483 THREAD = find_best_thread();
484
485 irq_spinlock_lock(&THREAD->lock, false);
486 int priority = THREAD->priority;
487 irq_spinlock_unlock(&THREAD->lock, false);
488
489 relink_rq(priority);
490
491 /*
492 * If both the old and the new task are the same,
493 * lots of work is avoided.
494 */
495 if (TASK != THREAD->task) {
496 as_t *new_as = THREAD->task->as;
497
498 /*
499 * Note that it is possible for two tasks
500 * to share one address space.
501 */
502 if (old_as != new_as) {
503 /*
504 * Both tasks and address spaces are different.
505 * Replace the old one with the new one.
506 */
507 as_switch(old_as, new_as);
508 }
509
510 TASK = THREAD->task;
511 before_task_runs();
512 }
513
514 if (old_task)
515 task_release(old_task);
516
517 if (old_as)
518 as_release(old_as);
519
520 irq_spinlock_lock(&THREAD->lock, false);
521 THREAD->state = Running;
522
523#ifdef SCHEDULER_VERBOSE
524 log(LF_OTHER, LVL_DEBUG,
525 "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64
526 ", nrdy=%zu)", CPU->id, THREAD->tid, THREAD->priority,
527 THREAD->ticks, atomic_load(&CPU->nrdy));
528#endif
529
530 /*
531 * Some architectures provide late kernel PA2KA(identity)
532 * mapping in a page fault handler. However, the page fault
533 * handler uses the kernel stack of the running thread and
534 * therefore cannot be used to map it. The kernel stack, if
535 * necessary, is to be mapped in before_thread_runs(). This
536 * function must be executed before the switch to the new stack.
537 */
538 before_thread_runs();
539
540 /*
541 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
542 * thread's stack.
543 */
544 current_copy(CURRENT, (current_t *) THREAD->kstack);
545
546 context_restore(&THREAD->saved_context);
547
548 /* Not reached */
549}
550
551#ifdef CONFIG_SMP
552/** Load balancing thread
553 *
554 * SMP load balancing thread, supervising thread supplies
555 * for the CPU it's wired to.
556 *
557 * @param arg Generic thread argument (unused).
558 *
559 */
560void kcpulb(void *arg)
561{
562 size_t average;
563 size_t rdy;
564
565 /*
566 * Detach kcpulb as nobody will call thread_join_timeout() on it.
567 */
568 thread_detach(THREAD);
569
570loop:
571 /*
572 * Work in 1s intervals.
573 */
574 thread_sleep(1);
575
576not_satisfied:
577 /*
578 * Calculate the number of threads that will be migrated/stolen from
579 * other CPU's. Note that situation can have changed between two
580 * passes. Each time get the most up to date counts.
581 *
582 */
583 average = atomic_load(&nrdy) / config.cpu_active + 1;
584 rdy = atomic_load(&CPU->nrdy);
585
586 if (average <= rdy)
587 goto satisfied;
588
589 size_t count = average - rdy;
590
591 /*
592 * Searching least priority queues on all CPU's first and most priority
593 * queues on all CPU's last.
594 */
595 size_t acpu;
596 size_t acpu_bias = 0;
597 int rq;
598
599 for (rq = RQ_COUNT - 1; rq >= 0; rq--) {
600 for (acpu = 0; acpu < config.cpu_active; acpu++) {
601 cpu_t *cpu = &cpus[(acpu + acpu_bias) % config.cpu_active];
602
603 /*
604 * Not interested in ourselves.
605 * Doesn't require interrupt disabling for kcpulb has
606 * THREAD_FLAG_WIRED.
607 *
608 */
609 if (CPU == cpu)
610 continue;
611
612 if (atomic_load(&cpu->nrdy) <= average)
613 continue;
614
615 irq_spinlock_lock(&(cpu->rq[rq].lock), true);
616 if (cpu->rq[rq].n == 0) {
617 irq_spinlock_unlock(&(cpu->rq[rq].lock), true);
618 continue;
619 }
620
621 thread_t *thread = NULL;
622
623 /* Search rq from the back */
624 link_t *link = cpu->rq[rq].rq.head.prev;
625
626 while (link != &(cpu->rq[rq].rq.head)) {
627 thread = (thread_t *) list_get_instance(link,
628 thread_t, rq_link);
629
630 /*
631 * Do not steal CPU-wired threads, threads
632 * already stolen, threads for which migration
633 * was temporarily disabled or threads whose
634 * FPU context is still in the CPU.
635 */
636 irq_spinlock_lock(&thread->lock, false);
637
638 if ((!thread->wired) && (!thread->stolen) &&
639 (!thread->nomigrate) &&
640 (!thread->fpu_context_engaged)) {
641 /*
642 * Remove thread from ready queue.
643 */
644 irq_spinlock_unlock(&thread->lock,
645 false);
646
647 atomic_dec(&cpu->nrdy);
648 atomic_dec(&nrdy);
649
650 cpu->rq[rq].n--;
651 list_remove(&thread->rq_link);
652
653 break;
654 }
655
656 irq_spinlock_unlock(&thread->lock, false);
657
658 link = link->prev;
659 thread = NULL;
660 }
661
662 if (thread) {
663 /*
664 * Ready thread on local CPU
665 */
666
667 irq_spinlock_pass(&(cpu->rq[rq].lock),
668 &thread->lock);
669
670#ifdef KCPULB_VERBOSE
671 log(LF_OTHER, LVL_DEBUG,
672 "kcpulb%u: TID %" PRIu64 " -> cpu%u, "
673 "nrdy=%ld, avg=%ld", CPU->id, t->tid,
674 CPU->id, atomic_load(&CPU->nrdy),
675 atomic_load(&nrdy) / config.cpu_active);
676#endif
677
678 thread->stolen = true;
679 thread->state = Entering;
680
681 irq_spinlock_unlock(&thread->lock, true);
682 thread_ready(thread);
683
684 if (--count == 0)
685 goto satisfied;
686
687 /*
688 * We are not satisfied yet, focus on another
689 * CPU next time.
690 *
691 */
692 acpu_bias++;
693
694 continue;
695 } else
696 irq_spinlock_unlock(&(cpu->rq[rq].lock), true);
697
698 }
699 }
700
701 if (atomic_load(&CPU->nrdy)) {
702 /*
703 * Be a little bit light-weight and let migrated threads run.
704 *
705 */
706 scheduler();
707 } else {
708 /*
709 * We failed to migrate a single thread.
710 * Give up this turn.
711 *
712 */
713 goto loop;
714 }
715
716 goto not_satisfied;
717
718satisfied:
719 goto loop;
720}
721#endif /* CONFIG_SMP */
722
723/** Print information about threads & scheduler queues
724 *
725 */
726void sched_print_list(void)
727{
728 size_t cpu;
729 for (cpu = 0; cpu < config.cpu_count; cpu++) {
730 if (!cpus[cpu].active)
731 continue;
732
733 irq_spinlock_lock(&cpus[cpu].lock, true);
734
735 printf("cpu%u: address=%p, nrdy=%zu, needs_relink=%zu\n",
736 cpus[cpu].id, &cpus[cpu], atomic_load(&cpus[cpu].nrdy),
737 cpus[cpu].needs_relink);
738
739 unsigned int i;
740 for (i = 0; i < RQ_COUNT; i++) {
741 irq_spinlock_lock(&(cpus[cpu].rq[i].lock), false);
742 if (cpus[cpu].rq[i].n == 0) {
743 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false);
744 continue;
745 }
746
747 printf("\trq[%u]: ", i);
748 list_foreach(cpus[cpu].rq[i].rq, rq_link, thread_t,
749 thread) {
750 printf("%" PRIu64 "(%s) ", thread->tid,
751 thread_states[thread->state]);
752 }
753 printf("\n");
754
755 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false);
756 }
757
758 irq_spinlock_unlock(&cpus[cpu].lock, true);
759 }
760}
761
762/** @}
763 */
Note: See TracBrowser for help on using the repository browser.