source: mainline/kernel/generic/src/proc/scheduler.c@ 62550dce

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 62550dce was 62550dce, checked in by Stanislav Kozina <stanislav.kozina@…>, 15 years ago

ps -c echoes info about CPU's
cpu count is accessible through sysinfo("cpu.count")

  • Property mode set to 100644
File size: 16.6 KB
Line 
1/*
2 * Copyright (c) 2001-2007 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericproc
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Scheduler and load balancing.
36 *
37 * This file contains the scheduler and kcpulb kernel thread which
38 * performs load-balancing of per-CPU run queues.
39 */
40
41#include <proc/scheduler.h>
42#include <proc/thread.h>
43#include <proc/task.h>
44#include <mm/frame.h>
45#include <mm/page.h>
46#include <mm/as.h>
47#include <time/timeout.h>
48#include <time/delay.h>
49#include <arch/asm.h>
50#include <arch/faddr.h>
51#include <arch/cycle.h>
52#include <atomic.h>
53#include <synch/spinlock.h>
54#include <config.h>
55#include <context.h>
56#include <fpu_context.h>
57#include <func.h>
58#include <arch.h>
59#include <adt/list.h>
60#include <panic.h>
61#include <cpu.h>
62#include <print.h>
63#include <debug.h>
64
65static void before_task_runs(void);
66static void before_thread_runs(void);
67static void after_thread_ran(void);
68static void scheduler_separated_stack(void);
69
70atomic_t nrdy; /**< Number of ready threads in the system. */
71
72/** Carry out actions before new task runs. */
73void before_task_runs(void)
74{
75 before_task_runs_arch();
76}
77
78/** Take actions before new thread runs.
79 *
80 * Perform actions that need to be
81 * taken before the newly selected
82 * tread is passed control.
83 *
84 * THREAD->lock is locked on entry
85 *
86 */
87void before_thread_runs(void)
88{
89 before_thread_runs_arch();
90#ifdef CONFIG_FPU_LAZY
91 if(THREAD == CPU->fpu_owner)
92 fpu_enable();
93 else
94 fpu_disable();
95#else
96 fpu_enable();
97 if (THREAD->fpu_context_exists)
98 fpu_context_restore(THREAD->saved_fpu_context);
99 else {
100 fpu_init();
101 THREAD->fpu_context_exists = 1;
102 }
103#endif
104}
105
106/** Take actions after THREAD had run.
107 *
108 * Perform actions that need to be
109 * taken after the running thread
110 * had been preempted by the scheduler.
111 *
112 * THREAD->lock is locked on entry
113 *
114 */
115void after_thread_ran(void)
116{
117 after_thread_ran_arch();
118}
119
120#ifdef CONFIG_FPU_LAZY
121void scheduler_fpu_lazy_request(void)
122{
123restart:
124 fpu_enable();
125 spinlock_lock(&CPU->lock);
126
127 /* Save old context */
128 if (CPU->fpu_owner != NULL) {
129 spinlock_lock(&CPU->fpu_owner->lock);
130 fpu_context_save(CPU->fpu_owner->saved_fpu_context);
131 /* don't prevent migration */
132 CPU->fpu_owner->fpu_context_engaged = 0;
133 spinlock_unlock(&CPU->fpu_owner->lock);
134 CPU->fpu_owner = NULL;
135 }
136
137 spinlock_lock(&THREAD->lock);
138 if (THREAD->fpu_context_exists) {
139 fpu_context_restore(THREAD->saved_fpu_context);
140 } else {
141 /* Allocate FPU context */
142 if (!THREAD->saved_fpu_context) {
143 /* Might sleep */
144 spinlock_unlock(&THREAD->lock);
145 spinlock_unlock(&CPU->lock);
146 THREAD->saved_fpu_context =
147 (fpu_context_t *) slab_alloc(fpu_context_slab, 0);
148 /* We may have switched CPUs during slab_alloc */
149 goto restart;
150 }
151 fpu_init();
152 THREAD->fpu_context_exists = 1;
153 }
154 CPU->fpu_owner = THREAD;
155 THREAD->fpu_context_engaged = 1;
156 spinlock_unlock(&THREAD->lock);
157
158 spinlock_unlock(&CPU->lock);
159}
160#endif
161
162/** Initialize scheduler
163 *
164 * Initialize kernel scheduler.
165 *
166 */
167void scheduler_init(void)
168{
169}
170
171/** Get thread to be scheduled
172 *
173 * Get the optimal thread to be scheduled
174 * according to thread accounting and scheduler
175 * policy.
176 *
177 * @return Thread to be scheduled.
178 *
179 */
180static thread_t *find_best_thread(void)
181{
182 thread_t *t;
183 runq_t *r;
184 int i;
185
186 ASSERT(CPU != NULL);
187
188loop:
189 interrupts_enable();
190
191 if (atomic_get(&CPU->nrdy) == 0) {
192 /*
193 * For there was nothing to run, the CPU goes to sleep
194 * until a hardware interrupt or an IPI comes.
195 * This improves energy saving and hyperthreading.
196 */
197
198 /*
199 * An interrupt might occur right now and wake up a thread.
200 * In such case, the CPU will continue to go to sleep
201 * even though there is a runnable thread.
202 */
203
204 spinlock_lock(&CPU->lock);
205 CPU->idle = true;
206 spinlock_unlock(&CPU->lock);
207 cpu_sleep();
208 goto loop;
209 }
210
211 interrupts_disable();
212
213 for (i = 0; i < RQ_COUNT; i++) {
214 r = &CPU->rq[i];
215 spinlock_lock(&r->lock);
216 if (r->n == 0) {
217 /*
218 * If this queue is empty, try a lower-priority queue.
219 */
220 spinlock_unlock(&r->lock);
221 continue;
222 }
223
224 atomic_dec(&CPU->nrdy);
225 atomic_dec(&nrdy);
226 r->n--;
227
228 /*
229 * Take the first thread from the queue.
230 */
231 t = list_get_instance(r->rq_head.next, thread_t, rq_link);
232 list_remove(&t->rq_link);
233
234 spinlock_unlock(&r->lock);
235
236 spinlock_lock(&t->lock);
237 t->cpu = CPU;
238
239 t->ticks = us2ticks((i + 1) * 10000);
240 t->priority = i; /* correct rq index */
241
242 /*
243 * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated
244 * when load balancing needs emerge.
245 */
246 t->flags &= ~THREAD_FLAG_STOLEN;
247 spinlock_unlock(&t->lock);
248
249 return t;
250 }
251 goto loop;
252
253}
254
255/** Prevent rq starvation
256 *
257 * Prevent low priority threads from starving in rq's.
258 *
259 * When the function decides to relink rq's, it reconnects
260 * respective pointers so that in result threads with 'pri'
261 * greater or equal start are moved to a higher-priority queue.
262 *
263 * @param start Threshold priority.
264 *
265 */
266static void relink_rq(int start)
267{
268 link_t head;
269 runq_t *r;
270 int i, n;
271
272 list_initialize(&head);
273 spinlock_lock(&CPU->lock);
274 if (CPU->needs_relink > NEEDS_RELINK_MAX) {
275 for (i = start; i < RQ_COUNT - 1; i++) {
276 /* remember and empty rq[i + 1] */
277 r = &CPU->rq[i + 1];
278 spinlock_lock(&r->lock);
279 list_concat(&head, &r->rq_head);
280 n = r->n;
281 r->n = 0;
282 spinlock_unlock(&r->lock);
283
284 /* append rq[i + 1] to rq[i] */
285 r = &CPU->rq[i];
286 spinlock_lock(&r->lock);
287 list_concat(&r->rq_head, &head);
288 r->n += n;
289 spinlock_unlock(&r->lock);
290 }
291 CPU->needs_relink = 0;
292 }
293 spinlock_unlock(&CPU->lock);
294
295}
296
297/** The scheduler
298 *
299 * The thread scheduling procedure.
300 * Passes control directly to
301 * scheduler_separated_stack().
302 *
303 */
304void scheduler(void)
305{
306 volatile ipl_t ipl;
307
308 ASSERT(CPU != NULL);
309
310 ipl = interrupts_disable();
311
312 if (atomic_get(&haltstate))
313 halt();
314
315 if (THREAD) {
316 spinlock_lock(&THREAD->lock);
317
318 /* Update thread accounting */
319 THREAD->cycles += get_cycle() - THREAD->last_cycle;
320 THREAD->kcycles += get_cycle() - THREAD->last_cycle;
321
322#ifndef CONFIG_FPU_LAZY
323 fpu_context_save(THREAD->saved_fpu_context);
324#endif
325 if (!context_save(&THREAD->saved_context)) {
326 /*
327 * This is the place where threads leave scheduler();
328 */
329
330 /* Save current CPU cycle */
331 THREAD->last_cycle = get_cycle();
332
333 spinlock_unlock(&THREAD->lock);
334 interrupts_restore(THREAD->saved_context.ipl);
335
336 return;
337 }
338
339 /*
340 * Interrupt priority level of preempted thread is recorded
341 * here to facilitate scheduler() invocations from
342 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()).
343 */
344 THREAD->saved_context.ipl = ipl;
345 }
346
347 /*
348 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
349 * and preemption counter. At this point THE could be coming either
350 * from THREAD's or CPU's stack.
351 */
352 the_copy(THE, (the_t *) CPU->stack);
353
354 /*
355 * We may not keep the old stack.
356 * Reason: If we kept the old stack and got blocked, for instance, in
357 * find_best_thread(), the old thread could get rescheduled by another
358 * CPU and overwrite the part of its own stack that was also used by
359 * the scheduler on this CPU.
360 *
361 * Moreover, we have to bypass the compiler-generated POP sequence
362 * which is fooled by SP being set to the very top of the stack.
363 * Therefore the scheduler() function continues in
364 * scheduler_separated_stack().
365 */
366 context_save(&CPU->saved_context);
367 context_set(&CPU->saved_context, FADDR(scheduler_separated_stack),
368 (uintptr_t) CPU->stack, CPU_STACK_SIZE);
369 context_restore(&CPU->saved_context);
370 /* not reached */
371}
372
373/** Scheduler stack switch wrapper
374 *
375 * Second part of the scheduler() function
376 * using new stack. Handling the actual context
377 * switch to a new thread.
378 *
379 * Assume THREAD->lock is held.
380 */
381void scheduler_separated_stack(void)
382{
383 int priority;
384 DEADLOCK_PROBE_INIT(p_joinwq);
385
386 ASSERT(CPU != NULL);
387
388 if (THREAD) {
389 /* must be run after the switch to scheduler stack */
390 after_thread_ran();
391
392 switch (THREAD->state) {
393 case Running:
394 spinlock_unlock(&THREAD->lock);
395 thread_ready(THREAD);
396 break;
397
398 case Exiting:
399repeat:
400 if (THREAD->detached) {
401 thread_destroy(THREAD);
402 } else {
403 /*
404 * The thread structure is kept allocated until
405 * somebody calls thread_detach() on it.
406 */
407 if (!spinlock_trylock(&THREAD->join_wq.lock)) {
408 /*
409 * Avoid deadlock.
410 */
411 spinlock_unlock(&THREAD->lock);
412 delay(HZ);
413 spinlock_lock(&THREAD->lock);
414 DEADLOCK_PROBE(p_joinwq,
415 DEADLOCK_THRESHOLD);
416 goto repeat;
417 }
418 _waitq_wakeup_unsafe(&THREAD->join_wq,
419 WAKEUP_FIRST);
420 spinlock_unlock(&THREAD->join_wq.lock);
421
422 THREAD->state = Lingering;
423 spinlock_unlock(&THREAD->lock);
424 }
425 break;
426
427 case Sleeping:
428 /*
429 * Prefer the thread after it's woken up.
430 */
431 THREAD->priority = -1;
432
433 /*
434 * We need to release wq->lock which we locked in
435 * waitq_sleep(). Address of wq->lock is kept in
436 * THREAD->sleep_queue.
437 */
438 spinlock_unlock(&THREAD->sleep_queue->lock);
439
440 /*
441 * Check for possible requests for out-of-context
442 * invocation.
443 */
444 if (THREAD->call_me) {
445 THREAD->call_me(THREAD->call_me_with);
446 THREAD->call_me = NULL;
447 THREAD->call_me_with = NULL;
448 }
449
450 spinlock_unlock(&THREAD->lock);
451
452 break;
453
454 default:
455 /*
456 * Entering state is unexpected.
457 */
458 panic("tid%" PRIu64 ": unexpected state %s.",
459 THREAD->tid, thread_states[THREAD->state]);
460 break;
461 }
462
463 THREAD = NULL;
464 }
465
466 THREAD = find_best_thread();
467
468 spinlock_lock(&THREAD->lock);
469 priority = THREAD->priority;
470 spinlock_unlock(&THREAD->lock);
471
472 relink_rq(priority);
473
474 /*
475 * If both the old and the new task are the same, lots of work is
476 * avoided.
477 */
478 if (TASK != THREAD->task) {
479 as_t *as1 = NULL;
480 as_t *as2;
481
482 if (TASK) {
483 spinlock_lock(&TASK->lock);
484 as1 = TASK->as;
485 spinlock_unlock(&TASK->lock);
486 }
487
488 spinlock_lock(&THREAD->task->lock);
489 as2 = THREAD->task->as;
490 spinlock_unlock(&THREAD->task->lock);
491
492 /*
493 * Note that it is possible for two tasks to share one address
494 * space.
495 */
496 if (as1 != as2) {
497 /*
498 * Both tasks and address spaces are different.
499 * Replace the old one with the new one.
500 */
501 as_switch(as1, as2);
502 }
503 TASK = THREAD->task;
504 before_task_runs();
505 }
506
507 spinlock_lock(&THREAD->lock);
508 THREAD->state = Running;
509
510#ifdef SCHEDULER_VERBOSE
511 printf("cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64
512 ", nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority,
513 THREAD->ticks, atomic_get(&CPU->nrdy));
514#endif
515
516 /*
517 * Some architectures provide late kernel PA2KA(identity)
518 * mapping in a page fault handler. However, the page fault
519 * handler uses the kernel stack of the running thread and
520 * therefore cannot be used to map it. The kernel stack, if
521 * necessary, is to be mapped in before_thread_runs(). This
522 * function must be executed before the switch to the new stack.
523 */
524 before_thread_runs();
525
526 /*
527 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
528 * thread's stack.
529 */
530 the_copy(THE, (the_t *) THREAD->kstack);
531
532 context_restore(&THREAD->saved_context);
533 /* not reached */
534}
535
536#ifdef CONFIG_SMP
537/** Load balancing thread
538 *
539 * SMP load balancing thread, supervising thread supplies
540 * for the CPU it's wired to.
541 *
542 * @param arg Generic thread argument (unused).
543 *
544 */
545void kcpulb(void *arg)
546{
547 thread_t *t;
548 int count;
549 atomic_count_t average;
550 unsigned int i;
551 int j;
552 int k = 0;
553 ipl_t ipl;
554
555 /*
556 * Detach kcpulb as nobody will call thread_join_timeout() on it.
557 */
558 thread_detach(THREAD);
559
560loop:
561 /*
562 * Work in 1s intervals.
563 */
564 thread_sleep(1);
565
566not_satisfied:
567 /*
568 * Calculate the number of threads that will be migrated/stolen from
569 * other CPU's. Note that situation can have changed between two
570 * passes. Each time get the most up to date counts.
571 */
572 average = atomic_get(&nrdy) / config.cpu_active + 1;
573 count = average - atomic_get(&CPU->nrdy);
574
575 if (count <= 0)
576 goto satisfied;
577
578 /*
579 * Searching least priority queues on all CPU's first and most priority
580 * queues on all CPU's last.
581 */
582 for (j = RQ_COUNT - 1; j >= 0; j--) {
583 for (i = 0; i < config.cpu_active; i++) {
584 link_t *l;
585 runq_t *r;
586 cpu_t *cpu;
587
588 cpu = &cpus[(i + k) % config.cpu_active];
589
590 /*
591 * Not interested in ourselves.
592 * Doesn't require interrupt disabling for kcpulb has
593 * THREAD_FLAG_WIRED.
594 */
595 if (CPU == cpu)
596 continue;
597 if (atomic_get(&cpu->nrdy) <= average)
598 continue;
599
600 ipl = interrupts_disable();
601 r = &cpu->rq[j];
602 spinlock_lock(&r->lock);
603 if (r->n == 0) {
604 spinlock_unlock(&r->lock);
605 interrupts_restore(ipl);
606 continue;
607 }
608
609 t = NULL;
610 l = r->rq_head.prev; /* search rq from the back */
611 while (l != &r->rq_head) {
612 t = list_get_instance(l, thread_t, rq_link);
613 /*
614 * We don't want to steal CPU-wired threads
615 * neither threads already stolen. The latter
616 * prevents threads from migrating between CPU's
617 * without ever being run. We don't want to
618 * steal threads whose FPU context is still in
619 * CPU.
620 */
621 spinlock_lock(&t->lock);
622 if ((!(t->flags & (THREAD_FLAG_WIRED |
623 THREAD_FLAG_STOLEN))) &&
624 (!(t->fpu_context_engaged))) {
625 /*
626 * Remove t from r.
627 */
628 spinlock_unlock(&t->lock);
629
630 atomic_dec(&cpu->nrdy);
631 atomic_dec(&nrdy);
632
633 r->n--;
634 list_remove(&t->rq_link);
635
636 break;
637 }
638 spinlock_unlock(&t->lock);
639 l = l->prev;
640 t = NULL;
641 }
642 spinlock_unlock(&r->lock);
643
644 if (t) {
645 /*
646 * Ready t on local CPU
647 */
648 spinlock_lock(&t->lock);
649#ifdef KCPULB_VERBOSE
650 printf("kcpulb%u: TID %" PRIu64 " -> cpu%u, "
651 "nrdy=%ld, avg=%ld\n", CPU->id, t->tid,
652 CPU->id, atomic_get(&CPU->nrdy),
653 atomic_get(&nrdy) / config.cpu_active);
654#endif
655 t->flags |= THREAD_FLAG_STOLEN;
656 t->state = Entering;
657 spinlock_unlock(&t->lock);
658
659 thread_ready(t);
660
661 interrupts_restore(ipl);
662
663 if (--count == 0)
664 goto satisfied;
665
666 /*
667 * We are not satisfied yet, focus on another
668 * CPU next time.
669 */
670 k++;
671
672 continue;
673 }
674 interrupts_restore(ipl);
675 }
676 }
677
678 if (atomic_get(&CPU->nrdy)) {
679 /*
680 * Be a little bit light-weight and let migrated threads run.
681 */
682 scheduler();
683 } else {
684 /*
685 * We failed to migrate a single thread.
686 * Give up this turn.
687 */
688 goto loop;
689 }
690
691 goto not_satisfied;
692
693satisfied:
694 goto loop;
695}
696
697#endif /* CONFIG_SMP */
698
699
700/** Print information about threads & scheduler queues */
701void sched_print_list(void)
702{
703 ipl_t ipl;
704 unsigned int cpu, i;
705 runq_t *r;
706 thread_t *t;
707 link_t *cur;
708
709 /* We are going to mess with scheduler structures,
710 * let's not be interrupted */
711 ipl = interrupts_disable();
712 for (cpu = 0; cpu < config.cpu_count; cpu++) {
713
714 if (!cpus[cpu].active)
715 continue;
716
717 spinlock_lock(&cpus[cpu].lock);
718 printf("cpu%u: address=%p, nrdy=%ld, needs_relink=%" PRIs "\n",
719 cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy),
720 cpus[cpu].needs_relink);
721
722 for (i = 0; i < RQ_COUNT; i++) {
723 r = &cpus[cpu].rq[i];
724 spinlock_lock(&r->lock);
725 if (!r->n) {
726 spinlock_unlock(&r->lock);
727 continue;
728 }
729 printf("\trq[%u]: ", i);
730 for (cur = r->rq_head.next; cur != &r->rq_head;
731 cur = cur->next) {
732 t = list_get_instance(cur, thread_t, rq_link);
733 printf("%" PRIu64 "(%s) ", t->tid,
734 thread_states[t->state]);
735 }
736 printf("\n");
737 spinlock_unlock(&r->lock);
738 }
739 spinlock_unlock(&cpus[cpu].lock);
740 }
741
742 interrupts_restore(ipl);
743}
744
745/** @}
746 */
Note: See TracBrowser for help on using the repository browser.