source: mainline/kernel/generic/src/proc/scheduler.c@ 117ad5a2

ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 117ad5a2 was 117ad5a2, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 2 years ago

Get thread priority from find_best_thread(), instead of locking thread again

  • Property mode set to 100644
File size: 16.2 KB
Line 
1/*
2 * Copyright (c) 2010 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup kernel_generic_proc
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Scheduler and load balancing.
36 *
37 * This file contains the scheduler and kcpulb kernel thread which
38 * performs load-balancing of per-CPU run queues.
39 */
40
41#include <assert.h>
42#include <atomic.h>
43#include <proc/scheduler.h>
44#include <proc/thread.h>
45#include <proc/task.h>
46#include <mm/frame.h>
47#include <mm/page.h>
48#include <mm/as.h>
49#include <time/timeout.h>
50#include <time/delay.h>
51#include <arch/asm.h>
52#include <arch/faddr.h>
53#include <arch/cycle.h>
54#include <atomic.h>
55#include <synch/spinlock.h>
56#include <config.h>
57#include <context.h>
58#include <fpu_context.h>
59#include <halt.h>
60#include <arch.h>
61#include <adt/list.h>
62#include <panic.h>
63#include <cpu.h>
64#include <stdio.h>
65#include <log.h>
66#include <stacktrace.h>
67
68static void scheduler_separated_stack(void);
69
70atomic_size_t nrdy; /**< Number of ready threads in the system. */
71
72/** Take actions before new thread runs.
73 *
74 * Perform actions that need to be
75 * taken before the newly selected
76 * thread is passed control.
77 *
78 * THREAD->lock is locked on entry
79 *
80 */
81static void before_thread_runs(void)
82{
83 before_thread_runs_arch();
84
85#ifdef CONFIG_FPU_LAZY
86 /*
87 * The only concurrent modification possible for fpu_owner here is
88 * another thread changing it from itself to NULL in its destructor.
89 */
90 thread_t *owner = atomic_load_explicit(&CPU->fpu_owner,
91 memory_order_relaxed);
92
93 if (THREAD == owner)
94 fpu_enable();
95 else
96 fpu_disable();
97#elif defined CONFIG_FPU
98 fpu_enable();
99 if (THREAD->fpu_context_exists)
100 fpu_context_restore(&THREAD->fpu_context);
101 else {
102 fpu_init();
103 THREAD->fpu_context_exists = true;
104 }
105#endif
106
107#ifdef CONFIG_UDEBUG
108 if (THREAD->btrace) {
109 istate_t *istate = THREAD->udebug.uspace_state;
110 if (istate != NULL) {
111 printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid);
112 stack_trace_istate(istate);
113 }
114
115 THREAD->btrace = false;
116 }
117#endif
118}
119
120/** Take actions after THREAD had run.
121 *
122 * Perform actions that need to be
123 * taken after the running thread
124 * had been preempted by the scheduler.
125 *
126 * THREAD->lock is locked on entry
127 *
128 */
129static void after_thread_ran(void)
130{
131 after_thread_ran_arch();
132}
133
134#ifdef CONFIG_FPU_LAZY
135void scheduler_fpu_lazy_request(void)
136{
137 fpu_enable();
138
139 /* We need this lock to ensure synchronization with thread destructor. */
140 irq_spinlock_lock(&CPU->fpu_lock, false);
141
142 /* Save old context */
143 thread_t *owner = atomic_load_explicit(&CPU->fpu_owner, memory_order_relaxed);
144 if (owner != NULL) {
145 fpu_context_save(&owner->fpu_context);
146 atomic_store_explicit(&CPU->fpu_owner, NULL, memory_order_relaxed);
147 }
148
149 irq_spinlock_unlock(&CPU->fpu_lock, false);
150
151 if (THREAD->fpu_context_exists) {
152 fpu_context_restore(&THREAD->fpu_context);
153 } else {
154 fpu_init();
155 THREAD->fpu_context_exists = true;
156 }
157
158 atomic_store_explicit(&CPU->fpu_owner, THREAD, memory_order_relaxed);
159}
160#endif /* CONFIG_FPU_LAZY */
161
162/** Initialize scheduler
163 *
164 * Initialize kernel scheduler.
165 *
166 */
167void scheduler_init(void)
168{
169}
170
171/** Get thread to be scheduled
172 *
173 * Get the optimal thread to be scheduled
174 * according to thread accounting and scheduler
175 * policy.
176 *
177 * @return Thread to be scheduled.
178 *
179 */
180static thread_t *find_best_thread(int *rq_index)
181{
182 assert(CPU != NULL);
183
184loop:
185 if (atomic_load(&CPU->nrdy) == 0) {
186 /*
187 * For there was nothing to run, the CPU goes to sleep
188 * until a hardware interrupt or an IPI comes.
189 * This improves energy saving and hyperthreading.
190 */
191 CPU->idle = true;
192
193 /*
194 * Go to sleep with interrupts enabled.
195 * Ideally, this should be atomic, but this is not guaranteed on
196 * all platforms yet, so it is possible we will go sleep when
197 * a thread has just become available.
198 */
199 cpu_interruptible_sleep();
200
201 /* Interrupts are disabled again. */
202 goto loop;
203 }
204
205 assert(!CPU->idle);
206
207 unsigned int i;
208 for (i = 0; i < RQ_COUNT; i++) {
209 irq_spinlock_lock(&(CPU->rq[i].lock), false);
210 if (CPU->rq[i].n == 0) {
211 /*
212 * If this queue is empty, try a lower-priority queue.
213 */
214 irq_spinlock_unlock(&(CPU->rq[i].lock), false);
215 continue;
216 }
217
218 atomic_dec(&CPU->nrdy);
219 atomic_dec(&nrdy);
220 CPU->rq[i].n--;
221
222 /*
223 * Take the first thread from the queue.
224 */
225 thread_t *thread = list_get_instance(
226 list_first(&CPU->rq[i].rq), thread_t, rq_link);
227 list_remove(&thread->rq_link);
228
229 irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock);
230
231 thread->cpu = CPU;
232 thread->priority = i; /* Correct rq index */
233
234 /* Time allocation in microseconds. */
235 uint64_t time_to_run = (i + 1) * 10000;
236
237 /* This is safe because interrupts are disabled. */
238 CPU->preempt_deadline = CPU->current_clock_tick + us2ticks(time_to_run);
239
240 /*
241 * Clear the stolen flag so that it can be migrated
242 * when load balancing needs emerge.
243 */
244 thread->stolen = false;
245 irq_spinlock_unlock(&thread->lock, false);
246
247 *rq_index = i;
248 return thread;
249 }
250
251 goto loop;
252}
253
254static void switch_task(task_t *task)
255{
256 /* If the task stays the same, a lot of work is avoided. */
257 if (TASK == task)
258 return;
259
260 as_t *old_as = AS;
261 as_t *new_as = task->as;
262
263 /* It is possible for two tasks to share one address space. */
264 if (old_as != new_as)
265 as_switch(old_as, new_as);
266
267 if (TASK)
268 task_release(TASK);
269
270 TASK = task;
271
272 task_hold(TASK);
273
274 before_task_runs_arch();
275}
276
277/** Prevent rq starvation
278 *
279 * Prevent low priority threads from starving in rq's.
280 *
281 * When the function decides to relink rq's, it reconnects
282 * respective pointers so that in result threads with 'pri'
283 * greater or equal start are moved to a higher-priority queue.
284 *
285 * @param start Threshold priority.
286 *
287 */
288static void relink_rq(int start)
289{
290 if (CPU->current_clock_tick < CPU->relink_deadline)
291 return;
292
293 CPU->relink_deadline = CPU->current_clock_tick + NEEDS_RELINK_MAX;
294
295 /* Temporary cache for lists we are moving. */
296 list_t list;
297 list_initialize(&list);
298
299 size_t n = 0;
300
301 /* Move every list (except the one with highest priority) one level up. */
302 for (int i = RQ_COUNT - 1; i > start; i--) {
303 irq_spinlock_lock(&CPU->rq[i].lock, false);
304
305 /* Swap lists. */
306 list_swap(&CPU->rq[i].rq, &list);
307
308 /* Swap number of items. */
309 size_t tmpn = CPU->rq[i].n;
310 CPU->rq[i].n = n;
311 n = tmpn;
312
313 irq_spinlock_unlock(&CPU->rq[i].lock, false);
314 }
315
316 /* Append the contents of rq[start + 1] to rq[start]. */
317 if (n != 0) {
318 irq_spinlock_lock(&CPU->rq[start].lock, false);
319 list_concat(&CPU->rq[start].rq, &list);
320 CPU->rq[start].n += n;
321 irq_spinlock_unlock(&CPU->rq[start].lock, false);
322 }
323}
324
325void scheduler(void)
326{
327 ipl_t ipl = interrupts_disable();
328
329 if (atomic_load(&haltstate))
330 halt();
331
332 if (THREAD) {
333 irq_spinlock_lock(&THREAD->lock, false);
334 }
335
336 scheduler_locked(ipl);
337}
338
339/** The scheduler
340 *
341 * The thread scheduling procedure.
342 * Passes control directly to
343 * scheduler_separated_stack().
344 *
345 */
346void scheduler_locked(ipl_t ipl)
347{
348 assert(CPU != NULL);
349
350 if (THREAD) {
351 /* Update thread kernel accounting */
352 THREAD->kcycles += get_cycle() - THREAD->last_cycle;
353
354#if (defined CONFIG_FPU) && (!defined CONFIG_FPU_LAZY)
355 fpu_context_save(&THREAD->fpu_context);
356#endif
357 if (!context_save(&THREAD->saved_context)) {
358 /*
359 * This is the place where threads leave scheduler();
360 */
361
362 /* Save current CPU cycle */
363 THREAD->last_cycle = get_cycle();
364
365 irq_spinlock_unlock(&THREAD->lock, false);
366 interrupts_restore(THREAD->saved_ipl);
367
368 return;
369 }
370
371 /*
372 * Interrupt priority level of preempted thread is recorded
373 * here to facilitate scheduler() invocations from
374 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()).
375 *
376 */
377 THREAD->saved_ipl = ipl;
378 }
379
380 /*
381 * Through the 'CURRENT' structure, we keep track of THREAD, TASK, CPU, AS
382 * and preemption counter. At this point CURRENT could be coming either
383 * from THREAD's or CPU's stack.
384 *
385 */
386 current_copy(CURRENT, (current_t *) CPU->stack);
387
388 /*
389 * We may not keep the old stack.
390 * Reason: If we kept the old stack and got blocked, for instance, in
391 * find_best_thread(), the old thread could get rescheduled by another
392 * CPU and overwrite the part of its own stack that was also used by
393 * the scheduler on this CPU.
394 *
395 * Moreover, we have to bypass the compiler-generated POP sequence
396 * which is fooled by SP being set to the very top of the stack.
397 * Therefore the scheduler() function continues in
398 * scheduler_separated_stack().
399 *
400 */
401 context_t ctx;
402 context_save(&ctx);
403 context_set(&ctx, FADDR(scheduler_separated_stack),
404 (uintptr_t) CPU->stack, STACK_SIZE);
405 context_restore(&ctx);
406
407 /* Not reached */
408}
409
410/** Scheduler stack switch wrapper
411 *
412 * Second part of the scheduler() function
413 * using new stack. Handling the actual context
414 * switch to a new thread.
415 *
416 */
417void scheduler_separated_stack(void)
418{
419 assert((!THREAD) || (irq_spinlock_locked(&THREAD->lock)));
420 assert(CPU != NULL);
421 assert(interrupts_disabled());
422
423 if (THREAD) {
424 /* Must be run after the switch to scheduler stack */
425 after_thread_ran();
426
427 switch (THREAD->state) {
428 case Running:
429 irq_spinlock_unlock(&THREAD->lock, false);
430 thread_ready(THREAD);
431 break;
432
433 case Exiting:
434 irq_spinlock_unlock(&THREAD->lock, false);
435 waitq_close(&THREAD->join_wq);
436
437 /*
438 * Release the reference CPU has for the thread.
439 * If there are no other references (e.g. threads calling join),
440 * the thread structure is deallocated.
441 */
442 thread_put(THREAD);
443 break;
444
445 case Sleeping:
446 /*
447 * Prefer the thread after it's woken up.
448 */
449 THREAD->priority = -1;
450 irq_spinlock_unlock(&THREAD->lock, false);
451 break;
452
453 default:
454 /*
455 * Entering state is unexpected.
456 */
457 panic("tid%" PRIu64 ": unexpected state %s.",
458 THREAD->tid, thread_states[THREAD->state]);
459 break;
460 }
461
462 THREAD = NULL;
463 }
464
465 int rq_index;
466 THREAD = find_best_thread(&rq_index);
467
468 relink_rq(rq_index);
469
470 switch_task(THREAD->task);
471
472 irq_spinlock_lock(&THREAD->lock, false);
473 THREAD->state = Running;
474
475#ifdef SCHEDULER_VERBOSE
476 log(LF_OTHER, LVL_DEBUG,
477 "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64
478 ", nrdy=%zu)", CPU->id, THREAD->tid, THREAD->priority,
479 THREAD->ticks, atomic_load(&CPU->nrdy));
480#endif
481
482 /*
483 * Some architectures provide late kernel PA2KA(identity)
484 * mapping in a page fault handler. However, the page fault
485 * handler uses the kernel stack of the running thread and
486 * therefore cannot be used to map it. The kernel stack, if
487 * necessary, is to be mapped in before_thread_runs(). This
488 * function must be executed before the switch to the new stack.
489 */
490 before_thread_runs();
491
492 /*
493 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
494 * thread's stack.
495 */
496 current_copy(CURRENT, (current_t *) THREAD->kstack);
497
498 context_restore(&THREAD->saved_context);
499
500 /* Not reached */
501}
502
503#ifdef CONFIG_SMP
504
505static thread_t *steal_thread_from(cpu_t *old_cpu, int i)
506{
507 runq_t *old_rq = &old_cpu->rq[i];
508 runq_t *new_rq = &CPU->rq[i];
509
510 ipl_t ipl = interrupts_disable();
511
512 irq_spinlock_lock(&old_rq->lock, false);
513
514 /*
515 * If fpu_owner is any thread in the list, its store is seen here thanks to
516 * the runqueue lock.
517 */
518 thread_t *fpu_owner = atomic_load_explicit(&old_cpu->fpu_owner,
519 memory_order_relaxed);
520
521 /* Search rq from the back */
522 list_foreach_rev(old_rq->rq, rq_link, thread_t, thread) {
523
524 irq_spinlock_lock(&thread->lock, false);
525
526 /*
527 * Do not steal CPU-wired threads, threads
528 * already stolen, threads for which migration
529 * was temporarily disabled or threads whose
530 * FPU context is still in the CPU.
531 */
532 if (thread->stolen || thread->nomigrate ||
533 thread == fpu_owner) {
534 irq_spinlock_unlock(&thread->lock, false);
535 continue;
536 }
537
538 thread->stolen = true;
539 thread->cpu = CPU;
540
541 irq_spinlock_unlock(&thread->lock, false);
542
543 /*
544 * Ready thread on local CPU
545 */
546
547#ifdef KCPULB_VERBOSE
548 log(LF_OTHER, LVL_DEBUG,
549 "kcpulb%u: TID %" PRIu64 " -> cpu%u, "
550 "nrdy=%ld, avg=%ld", CPU->id, thread->tid,
551 CPU->id, atomic_load(&CPU->nrdy),
552 atomic_load(&nrdy) / config.cpu_active);
553#endif
554
555 /* Remove thread from ready queue. */
556 old_rq->n--;
557 list_remove(&thread->rq_link);
558 irq_spinlock_unlock(&old_rq->lock, false);
559
560 /* Append thread to local queue. */
561 irq_spinlock_lock(&new_rq->lock, false);
562 list_append(&thread->rq_link, &new_rq->rq);
563 new_rq->n++;
564 irq_spinlock_unlock(&new_rq->lock, false);
565
566 atomic_dec(&old_cpu->nrdy);
567 atomic_inc(&CPU->nrdy);
568 interrupts_restore(ipl);
569 return thread;
570 }
571
572 irq_spinlock_unlock(&old_rq->lock, false);
573 interrupts_restore(ipl);
574 return NULL;
575}
576
577/** Load balancing thread
578 *
579 * SMP load balancing thread, supervising thread supplies
580 * for the CPU it's wired to.
581 *
582 * @param arg Generic thread argument (unused).
583 *
584 */
585void kcpulb(void *arg)
586{
587 size_t average;
588 size_t rdy;
589
590loop:
591 /*
592 * Work in 1s intervals.
593 */
594 thread_sleep(1);
595
596not_satisfied:
597 /*
598 * Calculate the number of threads that will be migrated/stolen from
599 * other CPU's. Note that situation can have changed between two
600 * passes. Each time get the most up to date counts.
601 *
602 */
603 average = atomic_load(&nrdy) / config.cpu_active + 1;
604 rdy = atomic_load(&CPU->nrdy);
605
606 if (average <= rdy)
607 goto satisfied;
608
609 size_t count = average - rdy;
610
611 /*
612 * Searching least priority queues on all CPU's first and most priority
613 * queues on all CPU's last.
614 */
615 size_t acpu;
616 int rq;
617
618 for (rq = RQ_COUNT - 1; rq >= 0; rq--) {
619 for (acpu = 0; acpu < config.cpu_active; acpu++) {
620 cpu_t *cpu = &cpus[acpu];
621
622 /*
623 * Not interested in ourselves.
624 * Doesn't require interrupt disabling for kcpulb has
625 * THREAD_FLAG_WIRED.
626 *
627 */
628 if (CPU == cpu)
629 continue;
630
631 if (atomic_load(&cpu->nrdy) <= average)
632 continue;
633
634 if (steal_thread_from(cpu, rq) && --count == 0)
635 goto satisfied;
636 }
637 }
638
639 if (atomic_load(&CPU->nrdy)) {
640 /*
641 * Be a little bit light-weight and let migrated threads run.
642 *
643 */
644 scheduler();
645 } else {
646 /*
647 * We failed to migrate a single thread.
648 * Give up this turn.
649 *
650 */
651 goto loop;
652 }
653
654 goto not_satisfied;
655
656satisfied:
657 goto loop;
658}
659#endif /* CONFIG_SMP */
660
661/** Print information about threads & scheduler queues
662 *
663 */
664void sched_print_list(void)
665{
666 size_t cpu;
667 for (cpu = 0; cpu < config.cpu_count; cpu++) {
668 if (!cpus[cpu].active)
669 continue;
670
671 /* Technically a data race, but we don't really care in this case. */
672 int needs_relink = cpus[cpu].relink_deadline - cpus[cpu].current_clock_tick;
673
674 printf("cpu%u: address=%p, nrdy=%zu, needs_relink=%d\n",
675 cpus[cpu].id, &cpus[cpu], atomic_load(&cpus[cpu].nrdy),
676 needs_relink);
677
678 unsigned int i;
679 for (i = 0; i < RQ_COUNT; i++) {
680 irq_spinlock_lock(&(cpus[cpu].rq[i].lock), false);
681 if (cpus[cpu].rq[i].n == 0) {
682 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false);
683 continue;
684 }
685
686 printf("\trq[%u]: ", i);
687 list_foreach(cpus[cpu].rq[i].rq, rq_link, thread_t,
688 thread) {
689 printf("%" PRIu64 "(%s) ", thread->tid,
690 thread_states[thread->state]);
691 }
692 printf("\n");
693
694 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false);
695 }
696 }
697}
698
699/** @}
700 */
Note: See TracBrowser for help on using the repository browser.