source: mainline/kernel/generic/src/proc/scheduler.c@ 4e33b6b

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 4e33b6b was 4e33b6b, checked in by Jakub Jermar <jakub@…>, 18 years ago

More formatting changes.

  • Property mode set to 100644
File size: 16.2 KB
Line 
1/*
2 * Copyright (C) 2001-2007 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericproc
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Scheduler and load balancing.
36 *
37 * This file contains the scheduler and kcpulb kernel thread which
38 * performs load-balancing of per-CPU run queues.
39 */
40
41#include <proc/scheduler.h>
42#include <proc/thread.h>
43#include <proc/task.h>
44#include <mm/frame.h>
45#include <mm/page.h>
46#include <mm/as.h>
47#include <time/delay.h>
48#include <arch/asm.h>
49#include <arch/faddr.h>
50#include <arch/cycle.h>
51#include <atomic.h>
52#include <synch/spinlock.h>
53#include <config.h>
54#include <context.h>
55#include <func.h>
56#include <arch.h>
57#include <adt/list.h>
58#include <panic.h>
59#include <typedefs.h>
60#include <cpu.h>
61#include <print.h>
62#include <debug.h>
63
64static void before_task_runs(void);
65static void before_thread_runs(void);
66static void after_thread_ran(void);
67static void scheduler_separated_stack(void);
68
69atomic_t nrdy; /**< Number of ready threads in the system. */
70
71/** Carry out actions before new task runs. */
72void before_task_runs(void)
73{
74 before_task_runs_arch();
75}
76
77/** Take actions before new thread runs.
78 *
79 * Perform actions that need to be
80 * taken before the newly selected
81 * tread is passed control.
82 *
83 * THREAD->lock is locked on entry
84 *
85 */
86void before_thread_runs(void)
87{
88 before_thread_runs_arch();
89#ifdef CONFIG_FPU_LAZY
90 if(THREAD == CPU->fpu_owner)
91 fpu_enable();
92 else
93 fpu_disable();
94#else
95 fpu_enable();
96 if (THREAD->fpu_context_exists)
97 fpu_context_restore(THREAD->saved_fpu_context);
98 else {
99 fpu_init();
100 THREAD->fpu_context_exists = 1;
101 }
102#endif
103}
104
105/** Take actions after THREAD had run.
106 *
107 * Perform actions that need to be
108 * taken after the running thread
109 * had been preempted by the scheduler.
110 *
111 * THREAD->lock is locked on entry
112 *
113 */
114void after_thread_ran(void)
115{
116 after_thread_ran_arch();
117}
118
119#ifdef CONFIG_FPU_LAZY
120void scheduler_fpu_lazy_request(void)
121{
122restart:
123 fpu_enable();
124 spinlock_lock(&CPU->lock);
125
126 /* Save old context */
127 if (CPU->fpu_owner != NULL) {
128 spinlock_lock(&CPU->fpu_owner->lock);
129 fpu_context_save(CPU->fpu_owner->saved_fpu_context);
130 /* don't prevent migration */
131 CPU->fpu_owner->fpu_context_engaged = 0;
132 spinlock_unlock(&CPU->fpu_owner->lock);
133 CPU->fpu_owner = NULL;
134 }
135
136 spinlock_lock(&THREAD->lock);
137 if (THREAD->fpu_context_exists) {
138 fpu_context_restore(THREAD->saved_fpu_context);
139 } else {
140 /* Allocate FPU context */
141 if (!THREAD->saved_fpu_context) {
142 /* Might sleep */
143 spinlock_unlock(&THREAD->lock);
144 spinlock_unlock(&CPU->lock);
145 THREAD->saved_fpu_context =
146 slab_alloc(fpu_context_slab, 0);
147 /* We may have switched CPUs during slab_alloc */
148 goto restart;
149 }
150 fpu_init();
151 THREAD->fpu_context_exists = 1;
152 }
153 CPU->fpu_owner = THREAD;
154 THREAD->fpu_context_engaged = 1;
155 spinlock_unlock(&THREAD->lock);
156
157 spinlock_unlock(&CPU->lock);
158}
159#endif
160
161/** Initialize scheduler
162 *
163 * Initialize kernel scheduler.
164 *
165 */
166void scheduler_init(void)
167{
168}
169
170/** Get thread to be scheduled
171 *
172 * Get the optimal thread to be scheduled
173 * according to thread accounting and scheduler
174 * policy.
175 *
176 * @return Thread to be scheduled.
177 *
178 */
179static thread_t *find_best_thread(void)
180{
181 thread_t *t;
182 runq_t *r;
183 int i;
184
185 ASSERT(CPU != NULL);
186
187loop:
188 interrupts_enable();
189
190 if (atomic_get(&CPU->nrdy) == 0) {
191 /*
192 * For there was nothing to run, the CPU goes to sleep
193 * until a hardware interrupt or an IPI comes.
194 * This improves energy saving and hyperthreading.
195 */
196
197 /*
198 * An interrupt might occur right now and wake up a thread.
199 * In such case, the CPU will continue to go to sleep
200 * even though there is a runnable thread.
201 */
202
203 cpu_sleep();
204 goto loop;
205 }
206
207 interrupts_disable();
208
209 for (i = 0; i<RQ_COUNT; i++) {
210 r = &CPU->rq[i];
211 spinlock_lock(&r->lock);
212 if (r->n == 0) {
213 /*
214 * If this queue is empty, try a lower-priority queue.
215 */
216 spinlock_unlock(&r->lock);
217 continue;
218 }
219
220 atomic_dec(&CPU->nrdy);
221 atomic_dec(&nrdy);
222 r->n--;
223
224 /*
225 * Take the first thread from the queue.
226 */
227 t = list_get_instance(r->rq_head.next, thread_t, rq_link);
228 list_remove(&t->rq_link);
229
230 spinlock_unlock(&r->lock);
231
232 spinlock_lock(&t->lock);
233 t->cpu = CPU;
234
235 t->ticks = us2ticks((i + 1) * 10000);
236 t->priority = i; /* correct rq index */
237
238 /*
239 * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated
240 * when load balancing needs emerge.
241 */
242 t->flags &= ~THREAD_FLAG_STOLEN;
243 spinlock_unlock(&t->lock);
244
245 return t;
246 }
247 goto loop;
248
249}
250
251/** Prevent rq starvation
252 *
253 * Prevent low priority threads from starving in rq's.
254 *
255 * When the function decides to relink rq's, it reconnects
256 * respective pointers so that in result threads with 'pri'
257 * greater or equal start are moved to a higher-priority queue.
258 *
259 * @param start Threshold priority.
260 *
261 */
262static void relink_rq(int start)
263{
264 link_t head;
265 runq_t *r;
266 int i, n;
267
268 list_initialize(&head);
269 spinlock_lock(&CPU->lock);
270 if (CPU->needs_relink > NEEDS_RELINK_MAX) {
271 for (i = start; i < RQ_COUNT - 1; i++) {
272 /* remember and empty rq[i + 1] */
273 r = &CPU->rq[i + 1];
274 spinlock_lock(&r->lock);
275 list_concat(&head, &r->rq_head);
276 n = r->n;
277 r->n = 0;
278 spinlock_unlock(&r->lock);
279
280 /* append rq[i + 1] to rq[i] */
281 r = &CPU->rq[i];
282 spinlock_lock(&r->lock);
283 list_concat(&r->rq_head, &head);
284 r->n += n;
285 spinlock_unlock(&r->lock);
286 }
287 CPU->needs_relink = 0;
288 }
289 spinlock_unlock(&CPU->lock);
290
291}
292
293/** The scheduler
294 *
295 * The thread scheduling procedure.
296 * Passes control directly to
297 * scheduler_separated_stack().
298 *
299 */
300void scheduler(void)
301{
302 volatile ipl_t ipl;
303
304 ASSERT(CPU != NULL);
305
306 ipl = interrupts_disable();
307
308 if (atomic_get(&haltstate))
309 halt();
310
311 if (THREAD) {
312 spinlock_lock(&THREAD->lock);
313
314 /* Update thread accounting */
315 THREAD->cycles += get_cycle() - THREAD->last_cycle;
316
317#ifndef CONFIG_FPU_LAZY
318 fpu_context_save(THREAD->saved_fpu_context);
319#endif
320 if (!context_save(&THREAD->saved_context)) {
321 /*
322 * This is the place where threads leave scheduler();
323 */
324
325 /* Save current CPU cycle */
326 THREAD->last_cycle = get_cycle();
327
328 spinlock_unlock(&THREAD->lock);
329 interrupts_restore(THREAD->saved_context.ipl);
330
331 return;
332 }
333
334 /*
335 * Interrupt priority level of preempted thread is recorded
336 * here to facilitate scheduler() invocations from
337 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()).
338 */
339 THREAD->saved_context.ipl = ipl;
340 }
341
342 /*
343 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
344 * and preemption counter. At this point THE could be coming either
345 * from THREAD's or CPU's stack.
346 */
347 the_copy(THE, (the_t *) CPU->stack);
348
349 /*
350 * We may not keep the old stack.
351 * Reason: If we kept the old stack and got blocked, for instance, in
352 * find_best_thread(), the old thread could get rescheduled by another
353 * CPU and overwrite the part of its own stack that was also used by
354 * the scheduler on this CPU.
355 *
356 * Moreover, we have to bypass the compiler-generated POP sequence
357 * which is fooled by SP being set to the very top of the stack.
358 * Therefore the scheduler() function continues in
359 * scheduler_separated_stack().
360 */
361 context_save(&CPU->saved_context);
362 context_set(&CPU->saved_context, FADDR(scheduler_separated_stack),
363 (uintptr_t) CPU->stack, CPU_STACK_SIZE);
364 context_restore(&CPU->saved_context);
365 /* not reached */
366}
367
368/** Scheduler stack switch wrapper
369 *
370 * Second part of the scheduler() function
371 * using new stack. Handling the actual context
372 * switch to a new thread.
373 *
374 * Assume THREAD->lock is held.
375 */
376void scheduler_separated_stack(void)
377{
378 int priority;
379
380 ASSERT(CPU != NULL);
381
382 if (THREAD) {
383 /* must be run after the switch to scheduler stack */
384 after_thread_ran();
385
386 switch (THREAD->state) {
387 case Running:
388 spinlock_unlock(&THREAD->lock);
389 thread_ready(THREAD);
390 break;
391
392 case Exiting:
393repeat:
394 if (THREAD->detached) {
395 thread_destroy(THREAD);
396 } else {
397 /*
398 * The thread structure is kept allocated until
399 * somebody calls thread_detach() on it.
400 */
401 if (!spinlock_trylock(&THREAD->join_wq.lock)) {
402 /*
403 * Avoid deadlock.
404 */
405 spinlock_unlock(&THREAD->lock);
406 delay(10);
407 spinlock_lock(&THREAD->lock);
408 goto repeat;
409 }
410 _waitq_wakeup_unsafe(&THREAD->join_wq, false);
411 spinlock_unlock(&THREAD->join_wq.lock);
412
413 THREAD->state = Undead;
414 spinlock_unlock(&THREAD->lock);
415 }
416 break;
417
418 case Sleeping:
419 /*
420 * Prefer the thread after it's woken up.
421 */
422 THREAD->priority = -1;
423
424 /*
425 * We need to release wq->lock which we locked in
426 * waitq_sleep(). Address of wq->lock is kept in
427 * THREAD->sleep_queue.
428 */
429 spinlock_unlock(&THREAD->sleep_queue->lock);
430
431 /*
432 * Check for possible requests for out-of-context
433 * invocation.
434 */
435 if (THREAD->call_me) {
436 THREAD->call_me(THREAD->call_me_with);
437 THREAD->call_me = NULL;
438 THREAD->call_me_with = NULL;
439 }
440
441 spinlock_unlock(&THREAD->lock);
442
443 break;
444
445 default:
446 /*
447 * Entering state is unexpected.
448 */
449 panic("tid%d: unexpected state %s\n", THREAD->tid,
450 thread_states[THREAD->state]);
451 break;
452 }
453
454 THREAD = NULL;
455 }
456
457 THREAD = find_best_thread();
458
459 spinlock_lock(&THREAD->lock);
460 priority = THREAD->priority;
461 spinlock_unlock(&THREAD->lock);
462
463 relink_rq(priority);
464
465 /*
466 * If both the old and the new task are the same, lots of work is
467 * avoided.
468 */
469 if (TASK != THREAD->task) {
470 as_t *as1 = NULL;
471 as_t *as2;
472
473 if (TASK) {
474 spinlock_lock(&TASK->lock);
475 as1 = TASK->as;
476 spinlock_unlock(&TASK->lock);
477 }
478
479 spinlock_lock(&THREAD->task->lock);
480 as2 = THREAD->task->as;
481 spinlock_unlock(&THREAD->task->lock);
482
483 /*
484 * Note that it is possible for two tasks to share one address
485 * space.
486 */
487 if (as1 != as2) {
488 /*
489 * Both tasks and address spaces are different.
490 * Replace the old one with the new one.
491 */
492 as_switch(as1, as2);
493 }
494 TASK = THREAD->task;
495 before_task_runs();
496 }
497
498 spinlock_lock(&THREAD->lock);
499 THREAD->state = Running;
500
501#ifdef SCHEDULER_VERBOSE
502 printf("cpu%d: tid %d (priority=%d, ticks=%lld, nrdy=%ld)\n",
503 CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks,
504 atomic_get(&CPU->nrdy));
505#endif
506
507 /*
508 * Some architectures provide late kernel PA2KA(identity)
509 * mapping in a page fault handler. However, the page fault
510 * handler uses the kernel stack of the running thread and
511 * therefore cannot be used to map it. The kernel stack, if
512 * necessary, is to be mapped in before_thread_runs(). This
513 * function must be executed before the switch to the new stack.
514 */
515 before_thread_runs();
516
517 /*
518 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
519 * thread's stack.
520 */
521 the_copy(THE, (the_t *) THREAD->kstack);
522
523 context_restore(&THREAD->saved_context);
524 /* not reached */
525}
526
527#ifdef CONFIG_SMP
528/** Load balancing thread
529 *
530 * SMP load balancing thread, supervising thread supplies
531 * for the CPU it's wired to.
532 *
533 * @param arg Generic thread argument (unused).
534 *
535 */
536void kcpulb(void *arg)
537{
538 thread_t *t;
539 int count, average, i, j, k = 0;
540 ipl_t ipl;
541
542 /*
543 * Detach kcpulb as nobody will call thread_join_timeout() on it.
544 */
545 thread_detach(THREAD);
546
547loop:
548 /*
549 * Work in 1s intervals.
550 */
551 thread_sleep(1);
552
553not_satisfied:
554 /*
555 * Calculate the number of threads that will be migrated/stolen from
556 * other CPU's. Note that situation can have changed between two
557 * passes. Each time get the most up to date counts.
558 */
559 average = atomic_get(&nrdy) / config.cpu_active + 1;
560 count = average - atomic_get(&CPU->nrdy);
561
562 if (count <= 0)
563 goto satisfied;
564
565 /*
566 * Searching least priority queues on all CPU's first and most priority
567 * queues on all CPU's last.
568 */
569 for (j= RQ_COUNT - 1; j >= 0; j--) {
570 for (i = 0; i < config.cpu_active; i++) {
571 link_t *l;
572 runq_t *r;
573 cpu_t *cpu;
574
575 cpu = &cpus[(i + k) % config.cpu_active];
576
577 /*
578 * Not interested in ourselves.
579 * Doesn't require interrupt disabling for kcpulb has
580 * THREAD_FLAG_WIRED.
581 */
582 if (CPU == cpu)
583 continue;
584 if (atomic_get(&cpu->nrdy) <= average)
585 continue;
586
587 ipl = interrupts_disable();
588 r = &cpu->rq[j];
589 spinlock_lock(&r->lock);
590 if (r->n == 0) {
591 spinlock_unlock(&r->lock);
592 interrupts_restore(ipl);
593 continue;
594 }
595
596 t = NULL;
597 l = r->rq_head.prev; /* search rq from the back */
598 while (l != &r->rq_head) {
599 t = list_get_instance(l, thread_t, rq_link);
600 /*
601 * We don't want to steal CPU-wired threads
602 * neither threads already stolen. The latter
603 * prevents threads from migrating between CPU's
604 * without ever being run. We don't want to
605 * steal threads whose FPU context is still in
606 * CPU.
607 */
608 spinlock_lock(&t->lock);
609 if ((!(t->flags & (THREAD_FLAG_WIRED |
610 THREAD_FLAG_STOLEN))) &&
611 (!(t->fpu_context_engaged)) ) {
612 /*
613 * Remove t from r.
614 */
615 spinlock_unlock(&t->lock);
616
617 atomic_dec(&cpu->nrdy);
618 atomic_dec(&nrdy);
619
620 r->n--;
621 list_remove(&t->rq_link);
622
623 break;
624 }
625 spinlock_unlock(&t->lock);
626 l = l->prev;
627 t = NULL;
628 }
629 spinlock_unlock(&r->lock);
630
631 if (t) {
632 /*
633 * Ready t on local CPU
634 */
635 spinlock_lock(&t->lock);
636#ifdef KCPULB_VERBOSE
637 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, "
638 "avg=%nd\n", CPU->id, t->tid, CPU->id,
639 atomic_get(&CPU->nrdy),
640 atomic_get(&nrdy) / config.cpu_active);
641#endif
642 t->flags |= THREAD_FLAG_STOLEN;
643 t->state = Entering;
644 spinlock_unlock(&t->lock);
645
646 thread_ready(t);
647
648 interrupts_restore(ipl);
649
650 if (--count == 0)
651 goto satisfied;
652
653 /*
654 * We are not satisfied yet, focus on another
655 * CPU next time.
656 */
657 k++;
658
659 continue;
660 }
661 interrupts_restore(ipl);
662 }
663 }
664
665 if (atomic_get(&CPU->nrdy)) {
666 /*
667 * Be a little bit light-weight and let migrated threads run.
668 */
669 scheduler();
670 } else {
671 /*
672 * We failed to migrate a single thread.
673 * Give up this turn.
674 */
675 goto loop;
676 }
677
678 goto not_satisfied;
679
680satisfied:
681 goto loop;
682}
683
684#endif /* CONFIG_SMP */
685
686
687/** Print information about threads & scheduler queues */
688void sched_print_list(void)
689{
690 ipl_t ipl;
691 int cpu,i;
692 runq_t *r;
693 thread_t *t;
694 link_t *cur;
695
696 /* We are going to mess with scheduler structures,
697 * let's not be interrupted */
698 ipl = interrupts_disable();
699 for (cpu=0;cpu < config.cpu_count; cpu++) {
700
701 if (!cpus[cpu].active)
702 continue;
703
704 spinlock_lock(&cpus[cpu].lock);
705 printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n",
706 cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy),
707 cpus[cpu].needs_relink);
708
709 for (i = 0; i < RQ_COUNT; i++) {
710 r = &cpus[cpu].rq[i];
711 spinlock_lock(&r->lock);
712 if (!r->n) {
713 spinlock_unlock(&r->lock);
714 continue;
715 }
716 printf("\trq[%d]: ", i);
717 for (cur = r->rq_head.next; cur != &r->rq_head;
718 cur = cur->next) {
719 t = list_get_instance(cur, thread_t, rq_link);
720 printf("%d(%s) ", t->tid,
721 thread_states[t->state]);
722 }
723 printf("\n");
724 spinlock_unlock(&r->lock);
725 }
726 spinlock_unlock(&cpus[cpu].lock);
727 }
728
729 interrupts_restore(ipl);
730}
731
732/** @}
733 */
Note: See TracBrowser for help on using the repository browser.