source: mainline/kernel/generic/src/proc/scheduler.c@ c109dd0

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since c109dd0 was def5207, checked in by Martin Decky <martin@…>, 19 years ago

add forgotten btree_remove()

  • Property mode set to 100644
File size: 16.1 KB
Line 
1/*
2 * Copyright (C) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericproc
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Scheduler and load balancing.
36 *
37 * This file contains the scheduler and kcpulb kernel thread which
38 * performs load-balancing of per-CPU run queues.
39 */
40
41#include <proc/scheduler.h>
42#include <proc/thread.h>
43#include <proc/task.h>
44#include <mm/frame.h>
45#include <mm/page.h>
46#include <mm/as.h>
47#include <time/delay.h>
48#include <arch/asm.h>
49#include <arch/faddr.h>
50#include <arch/cycle.h>
51#include <atomic.h>
52#include <synch/spinlock.h>
53#include <config.h>
54#include <context.h>
55#include <func.h>
56#include <arch.h>
57#include <adt/list.h>
58#include <panic.h>
59#include <typedefs.h>
60#include <cpu.h>
61#include <print.h>
62#include <debug.h>
63
64static void before_task_runs(void);
65static void before_thread_runs(void);
66static void after_thread_ran(void);
67static void scheduler_separated_stack(void);
68
69atomic_t nrdy; /**< Number of ready threads in the system. */
70
71/** Carry out actions before new task runs. */
72void before_task_runs(void)
73{
74 before_task_runs_arch();
75}
76
77/** Take actions before new thread runs.
78 *
79 * Perform actions that need to be
80 * taken before the newly selected
81 * tread is passed control.
82 *
83 * THREAD->lock is locked on entry
84 *
85 */
86void before_thread_runs(void)
87{
88 before_thread_runs_arch();
89#ifdef CONFIG_FPU_LAZY
90 if(THREAD == CPU->fpu_owner)
91 fpu_enable();
92 else
93 fpu_disable();
94#else
95 fpu_enable();
96 if (THREAD->fpu_context_exists)
97 fpu_context_restore(THREAD->saved_fpu_context);
98 else {
99 fpu_init();
100 THREAD->fpu_context_exists = 1;
101 }
102#endif
103}
104
105/** Take actions after THREAD had run.
106 *
107 * Perform actions that need to be
108 * taken after the running thread
109 * had been preempted by the scheduler.
110 *
111 * THREAD->lock is locked on entry
112 *
113 */
114void after_thread_ran(void)
115{
116 after_thread_ran_arch();
117}
118
119#ifdef CONFIG_FPU_LAZY
120void scheduler_fpu_lazy_request(void)
121{
122restart:
123 fpu_enable();
124 spinlock_lock(&CPU->lock);
125
126 /* Save old context */
127 if (CPU->fpu_owner != NULL) {
128 spinlock_lock(&CPU->fpu_owner->lock);
129 fpu_context_save(CPU->fpu_owner->saved_fpu_context);
130 /* don't prevent migration */
131 CPU->fpu_owner->fpu_context_engaged = 0;
132 spinlock_unlock(&CPU->fpu_owner->lock);
133 CPU->fpu_owner = NULL;
134 }
135
136 spinlock_lock(&THREAD->lock);
137 if (THREAD->fpu_context_exists) {
138 fpu_context_restore(THREAD->saved_fpu_context);
139 } else {
140 /* Allocate FPU context */
141 if (!THREAD->saved_fpu_context) {
142 /* Might sleep */
143 spinlock_unlock(&THREAD->lock);
144 spinlock_unlock(&CPU->lock);
145 THREAD->saved_fpu_context = slab_alloc(fpu_context_slab, 0);
146 /* We may have switched CPUs during slab_alloc */
147 goto restart;
148 }
149 fpu_init();
150 THREAD->fpu_context_exists = 1;
151 }
152 CPU->fpu_owner = THREAD;
153 THREAD->fpu_context_engaged = 1;
154 spinlock_unlock(&THREAD->lock);
155
156 spinlock_unlock(&CPU->lock);
157}
158#endif
159
160/** Initialize scheduler
161 *
162 * Initialize kernel scheduler.
163 *
164 */
165void scheduler_init(void)
166{
167}
168
169/** Get thread to be scheduled
170 *
171 * Get the optimal thread to be scheduled
172 * according to thread accounting and scheduler
173 * policy.
174 *
175 * @return Thread to be scheduled.
176 *
177 */
178static thread_t *find_best_thread(void)
179{
180 thread_t *t;
181 runq_t *r;
182 int i;
183
184 ASSERT(CPU != NULL);
185
186loop:
187 interrupts_enable();
188
189 if (atomic_get(&CPU->nrdy) == 0) {
190 /*
191 * For there was nothing to run, the CPU goes to sleep
192 * until a hardware interrupt or an IPI comes.
193 * This improves energy saving and hyperthreading.
194 */
195
196 /*
197 * An interrupt might occur right now and wake up a thread.
198 * In such case, the CPU will continue to go to sleep
199 * even though there is a runnable thread.
200 */
201
202 cpu_sleep();
203 goto loop;
204 }
205
206 interrupts_disable();
207
208 for (i = 0; i<RQ_COUNT; i++) {
209 r = &CPU->rq[i];
210 spinlock_lock(&r->lock);
211 if (r->n == 0) {
212 /*
213 * If this queue is empty, try a lower-priority queue.
214 */
215 spinlock_unlock(&r->lock);
216 continue;
217 }
218
219 atomic_dec(&CPU->nrdy);
220 atomic_dec(&nrdy);
221 r->n--;
222
223 /*
224 * Take the first thread from the queue.
225 */
226 t = list_get_instance(r->rq_head.next, thread_t, rq_link);
227 list_remove(&t->rq_link);
228
229 spinlock_unlock(&r->lock);
230
231 spinlock_lock(&t->lock);
232 t->cpu = CPU;
233
234 t->ticks = us2ticks((i+1)*10000);
235 t->priority = i; /* correct rq index */
236
237 /*
238 * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated
239 * when load balancing needs emerge.
240 */
241 t->flags &= ~THREAD_FLAG_STOLEN;
242 spinlock_unlock(&t->lock);
243
244 return t;
245 }
246 goto loop;
247
248}
249
250/** Prevent rq starvation
251 *
252 * Prevent low priority threads from starving in rq's.
253 *
254 * When the function decides to relink rq's, it reconnects
255 * respective pointers so that in result threads with 'pri'
256 * greater or equal start are moved to a higher-priority queue.
257 *
258 * @param start Threshold priority.
259 *
260 */
261static void relink_rq(int start)
262{
263 link_t head;
264 runq_t *r;
265 int i, n;
266
267 list_initialize(&head);
268 spinlock_lock(&CPU->lock);
269 if (CPU->needs_relink > NEEDS_RELINK_MAX) {
270 for (i = start; i<RQ_COUNT-1; i++) {
271 /* remember and empty rq[i + 1] */
272 r = &CPU->rq[i + 1];
273 spinlock_lock(&r->lock);
274 list_concat(&head, &r->rq_head);
275 n = r->n;
276 r->n = 0;
277 spinlock_unlock(&r->lock);
278
279 /* append rq[i + 1] to rq[i] */
280 r = &CPU->rq[i];
281 spinlock_lock(&r->lock);
282 list_concat(&r->rq_head, &head);
283 r->n += n;
284 spinlock_unlock(&r->lock);
285 }
286 CPU->needs_relink = 0;
287 }
288 spinlock_unlock(&CPU->lock);
289
290}
291
292/** The scheduler
293 *
294 * The thread scheduling procedure.
295 * Passes control directly to
296 * scheduler_separated_stack().
297 *
298 */
299void scheduler(void)
300{
301 volatile ipl_t ipl;
302
303 ASSERT(CPU != NULL);
304
305 ipl = interrupts_disable();
306
307 if (atomic_get(&haltstate))
308 halt();
309
310 if (THREAD) {
311 spinlock_lock(&THREAD->lock);
312
313 /* Update thread accounting */
314 THREAD->cycles += get_cycle() - THREAD->last_cycle;
315
316#ifndef CONFIG_FPU_LAZY
317 fpu_context_save(THREAD->saved_fpu_context);
318#endif
319 if (!context_save(&THREAD->saved_context)) {
320 /*
321 * This is the place where threads leave scheduler();
322 */
323
324 /* Save current CPU cycle */
325 THREAD->last_cycle = get_cycle();
326
327 spinlock_unlock(&THREAD->lock);
328 interrupts_restore(THREAD->saved_context.ipl);
329
330 return;
331 }
332
333 /*
334 * Interrupt priority level of preempted thread is recorded here
335 * to facilitate scheduler() invocations from interrupts_disable()'d
336 * code (e.g. waitq_sleep_timeout()).
337 */
338 THREAD->saved_context.ipl = ipl;
339 }
340
341 /*
342 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
343 * and preemption counter. At this point THE could be coming either
344 * from THREAD's or CPU's stack.
345 */
346 the_copy(THE, (the_t *) CPU->stack);
347
348 /*
349 * We may not keep the old stack.
350 * Reason: If we kept the old stack and got blocked, for instance, in
351 * find_best_thread(), the old thread could get rescheduled by another
352 * CPU and overwrite the part of its own stack that was also used by
353 * the scheduler on this CPU.
354 *
355 * Moreover, we have to bypass the compiler-generated POP sequence
356 * which is fooled by SP being set to the very top of the stack.
357 * Therefore the scheduler() function continues in
358 * scheduler_separated_stack().
359 */
360 context_save(&CPU->saved_context);
361 context_set(&CPU->saved_context, FADDR(scheduler_separated_stack),
362 (uintptr_t) CPU->stack, CPU_STACK_SIZE);
363 context_restore(&CPU->saved_context);
364 /* not reached */
365}
366
367/** Scheduler stack switch wrapper
368 *
369 * Second part of the scheduler() function
370 * using new stack. Handling the actual context
371 * switch to a new thread.
372 *
373 * Assume THREAD->lock is held.
374 */
375void scheduler_separated_stack(void)
376{
377 int priority;
378
379 ASSERT(CPU != NULL);
380
381 if (THREAD) {
382 /* must be run after the switch to scheduler stack */
383 after_thread_ran();
384
385 switch (THREAD->state) {
386 case Running:
387 spinlock_unlock(&THREAD->lock);
388 thread_ready(THREAD);
389 break;
390
391 case Exiting:
392repeat:
393 if (THREAD->detached) {
394 thread_destroy(THREAD);
395 } else {
396 /*
397 * The thread structure is kept allocated until somebody
398 * calls thread_detach() on it.
399 */
400 if (!spinlock_trylock(&THREAD->join_wq.lock)) {
401 /*
402 * Avoid deadlock.
403 */
404 spinlock_unlock(&THREAD->lock);
405 delay(10);
406 spinlock_lock(&THREAD->lock);
407 goto repeat;
408 }
409 _waitq_wakeup_unsafe(&THREAD->join_wq, false);
410 spinlock_unlock(&THREAD->join_wq.lock);
411
412 THREAD->state = Undead;
413 spinlock_unlock(&THREAD->lock);
414 }
415 break;
416
417 case Sleeping:
418 /*
419 * Prefer the thread after it's woken up.
420 */
421 THREAD->priority = -1;
422
423 /*
424 * We need to release wq->lock which we locked in waitq_sleep().
425 * Address of wq->lock is kept in THREAD->sleep_queue.
426 */
427 spinlock_unlock(&THREAD->sleep_queue->lock);
428
429 /*
430 * Check for possible requests for out-of-context invocation.
431 */
432 if (THREAD->call_me) {
433 THREAD->call_me(THREAD->call_me_with);
434 THREAD->call_me = NULL;
435 THREAD->call_me_with = NULL;
436 }
437
438 spinlock_unlock(&THREAD->lock);
439
440 break;
441
442 default:
443 /*
444 * Entering state is unexpected.
445 */
446 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
447 break;
448 }
449
450 THREAD = NULL;
451 }
452
453 THREAD = find_best_thread();
454
455 spinlock_lock(&THREAD->lock);
456 priority = THREAD->priority;
457 spinlock_unlock(&THREAD->lock);
458
459 relink_rq(priority);
460
461 /*
462 * If both the old and the new task are the same, lots of work is avoided.
463 */
464 if (TASK != THREAD->task) {
465 as_t *as1 = NULL;
466 as_t *as2;
467
468 if (TASK) {
469 spinlock_lock(&TASK->lock);
470 as1 = TASK->as;
471 spinlock_unlock(&TASK->lock);
472 }
473
474 spinlock_lock(&THREAD->task->lock);
475 as2 = THREAD->task->as;
476 spinlock_unlock(&THREAD->task->lock);
477
478 /*
479 * Note that it is possible for two tasks to share one address space.
480 */
481 if (as1 != as2) {
482 /*
483 * Both tasks and address spaces are different.
484 * Replace the old one with the new one.
485 */
486 as_switch(as1, as2);
487 }
488 TASK = THREAD->task;
489 before_task_runs();
490 }
491
492 spinlock_lock(&THREAD->lock);
493 THREAD->state = Running;
494
495#ifdef SCHEDULER_VERBOSE
496 printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n",
497 CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
498#endif
499
500 /*
501 * Some architectures provide late kernel PA2KA(identity)
502 * mapping in a page fault handler. However, the page fault
503 * handler uses the kernel stack of the running thread and
504 * therefore cannot be used to map it. The kernel stack, if
505 * necessary, is to be mapped in before_thread_runs(). This
506 * function must be executed before the switch to the new stack.
507 */
508 before_thread_runs();
509
510 /*
511 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
512 */
513 the_copy(THE, (the_t *) THREAD->kstack);
514
515 context_restore(&THREAD->saved_context);
516 /* not reached */
517}
518
519#ifdef CONFIG_SMP
520/** Load balancing thread
521 *
522 * SMP load balancing thread, supervising thread supplies
523 * for the CPU it's wired to.
524 *
525 * @param arg Generic thread argument (unused).
526 *
527 */
528void kcpulb(void *arg)
529{
530 thread_t *t;
531 int count, average, i, j, k = 0;
532 ipl_t ipl;
533
534 /*
535 * Detach kcpulb as nobody will call thread_join_timeout() on it.
536 */
537 thread_detach(THREAD);
538
539loop:
540 /*
541 * Work in 1s intervals.
542 */
543 thread_sleep(1);
544
545not_satisfied:
546 /*
547 * Calculate the number of threads that will be migrated/stolen from
548 * other CPU's. Note that situation can have changed between two
549 * passes. Each time get the most up to date counts.
550 */
551 average = atomic_get(&nrdy) / config.cpu_active + 1;
552 count = average - atomic_get(&CPU->nrdy);
553
554 if (count <= 0)
555 goto satisfied;
556
557 /*
558 * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
559 */
560 for (j=RQ_COUNT-1; j >= 0; j--) {
561 for (i=0; i < config.cpu_active; i++) {
562 link_t *l;
563 runq_t *r;
564 cpu_t *cpu;
565
566 cpu = &cpus[(i + k) % config.cpu_active];
567
568 /*
569 * Not interested in ourselves.
570 * Doesn't require interrupt disabling for kcpulb has THREAD_FLAG_WIRED.
571 */
572 if (CPU == cpu)
573 continue;
574 if (atomic_get(&cpu->nrdy) <= average)
575 continue;
576
577 ipl = interrupts_disable();
578 r = &cpu->rq[j];
579 spinlock_lock(&r->lock);
580 if (r->n == 0) {
581 spinlock_unlock(&r->lock);
582 interrupts_restore(ipl);
583 continue;
584 }
585
586 t = NULL;
587 l = r->rq_head.prev; /* search rq from the back */
588 while (l != &r->rq_head) {
589 t = list_get_instance(l, thread_t, rq_link);
590 /*
591 * We don't want to steal CPU-wired threads neither threads already
592 * stolen. The latter prevents threads from migrating between CPU's
593 * without ever being run. We don't want to steal threads whose FPU
594 * context is still in CPU.
595 */
596 spinlock_lock(&t->lock);
597 if ((!(t->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) &&
598 (!(t->fpu_context_engaged)) ) {
599 /*
600 * Remove t from r.
601 */
602 spinlock_unlock(&t->lock);
603
604 atomic_dec(&cpu->nrdy);
605 atomic_dec(&nrdy);
606
607 r->n--;
608 list_remove(&t->rq_link);
609
610 break;
611 }
612 spinlock_unlock(&t->lock);
613 l = l->prev;
614 t = NULL;
615 }
616 spinlock_unlock(&r->lock);
617
618 if (t) {
619 /*
620 * Ready t on local CPU
621 */
622 spinlock_lock(&t->lock);
623#ifdef KCPULB_VERBOSE
624 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n",
625 CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy),
626 atomic_get(&nrdy) / config.cpu_active);
627#endif
628 t->flags |= THREAD_FLAG_STOLEN;
629 t->state = Entering;
630 spinlock_unlock(&t->lock);
631
632 thread_ready(t);
633
634 interrupts_restore(ipl);
635
636 if (--count == 0)
637 goto satisfied;
638
639 /*
640 * We are not satisfied yet, focus on another CPU next time.
641 */
642 k++;
643
644 continue;
645 }
646 interrupts_restore(ipl);
647 }
648 }
649
650 if (atomic_get(&CPU->nrdy)) {
651 /*
652 * Be a little bit light-weight and let migrated threads run.
653 */
654 scheduler();
655 } else {
656 /*
657 * We failed to migrate a single thread.
658 * Give up this turn.
659 */
660 goto loop;
661 }
662
663 goto not_satisfied;
664
665satisfied:
666 goto loop;
667}
668
669#endif /* CONFIG_SMP */
670
671
672/** Print information about threads & scheduler queues */
673void sched_print_list(void)
674{
675 ipl_t ipl;
676 int cpu,i;
677 runq_t *r;
678 thread_t *t;
679 link_t *cur;
680
681 /* We are going to mess with scheduler structures,
682 * let's not be interrupted */
683 ipl = interrupts_disable();
684 for (cpu=0;cpu < config.cpu_count; cpu++) {
685
686 if (!cpus[cpu].active)
687 continue;
688
689 spinlock_lock(&cpus[cpu].lock);
690 printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n",
691 cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
692
693 for (i=0; i<RQ_COUNT; i++) {
694 r = &cpus[cpu].rq[i];
695 spinlock_lock(&r->lock);
696 if (!r->n) {
697 spinlock_unlock(&r->lock);
698 continue;
699 }
700 printf("\trq[%d]: ", i);
701 for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
702 t = list_get_instance(cur, thread_t, rq_link);
703 printf("%d(%s) ", t->tid,
704 thread_states[t->state]);
705 }
706 printf("\n");
707 spinlock_unlock(&r->lock);
708 }
709 spinlock_unlock(&cpus[cpu].lock);
710 }
711
712 interrupts_restore(ipl);
713}
714
715/** @}
716 */
Note: See TracBrowser for help on using the repository browser.