source: mainline/kernel/generic/src/proc/scheduler.c@ 11675207

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 11675207 was 11675207, checked in by jermar <jermar@…>, 17 years ago

Move everything to kernel/.

  • Property mode set to 100644
File size: 15.9 KB
Line 
1/*
2 * Copyright (C) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericproc
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Scheduler and load balancing.
36 *
37 * This file contains the scheduler and kcpulb kernel thread which
38 * performs load-balancing of per-CPU run queues.
39 */
40
41#include <proc/scheduler.h>
42#include <proc/thread.h>
43#include <proc/task.h>
44#include <mm/frame.h>
45#include <mm/page.h>
46#include <mm/as.h>
47#include <time/delay.h>
48#include <arch/asm.h>
49#include <arch/faddr.h>
50#include <atomic.h>
51#include <synch/spinlock.h>
52#include <config.h>
53#include <context.h>
54#include <func.h>
55#include <arch.h>
56#include <adt/list.h>
57#include <panic.h>
58#include <typedefs.h>
59#include <cpu.h>
60#include <print.h>
61#include <debug.h>
62
63static void before_task_runs(void);
64static void before_thread_runs(void);
65static void after_thread_ran(void);
66static void scheduler_separated_stack(void);
67
68atomic_t nrdy; /**< Number of ready threads in the system. */
69
70/** Carry out actions before new task runs. */
71void before_task_runs(void)
72{
73 before_task_runs_arch();
74}
75
76/** Take actions before new thread runs.
77 *
78 * Perform actions that need to be
79 * taken before the newly selected
80 * tread is passed control.
81 *
82 * THREAD->lock is locked on entry
83 *
84 */
85void before_thread_runs(void)
86{
87 before_thread_runs_arch();
88#ifdef CONFIG_FPU_LAZY
89 if(THREAD==CPU->fpu_owner)
90 fpu_enable();
91 else
92 fpu_disable();
93#else
94 fpu_enable();
95 if (THREAD->fpu_context_exists)
96 fpu_context_restore(THREAD->saved_fpu_context);
97 else {
98 fpu_init();
99 THREAD->fpu_context_exists=1;
100 }
101#endif
102}
103
104/** Take actions after THREAD had run.
105 *
106 * Perform actions that need to be
107 * taken after the running thread
108 * had been preempted by the scheduler.
109 *
110 * THREAD->lock is locked on entry
111 *
112 */
113void after_thread_ran(void)
114{
115 after_thread_ran_arch();
116}
117
118#ifdef CONFIG_FPU_LAZY
119void scheduler_fpu_lazy_request(void)
120{
121restart:
122 fpu_enable();
123 spinlock_lock(&CPU->lock);
124
125 /* Save old context */
126 if (CPU->fpu_owner != NULL) {
127 spinlock_lock(&CPU->fpu_owner->lock);
128 fpu_context_save(CPU->fpu_owner->saved_fpu_context);
129 /* don't prevent migration */
130 CPU->fpu_owner->fpu_context_engaged=0;
131 spinlock_unlock(&CPU->fpu_owner->lock);
132 CPU->fpu_owner = NULL;
133 }
134
135 spinlock_lock(&THREAD->lock);
136 if (THREAD->fpu_context_exists) {
137 fpu_context_restore(THREAD->saved_fpu_context);
138 } else {
139 /* Allocate FPU context */
140 if (!THREAD->saved_fpu_context) {
141 /* Might sleep */
142 spinlock_unlock(&THREAD->lock);
143 spinlock_unlock(&CPU->lock);
144 THREAD->saved_fpu_context = slab_alloc(fpu_context_slab,
145 0);
146 /* We may have switched CPUs during slab_alloc */
147 goto restart;
148 }
149 fpu_init();
150 THREAD->fpu_context_exists=1;
151 }
152 CPU->fpu_owner=THREAD;
153 THREAD->fpu_context_engaged = 1;
154 spinlock_unlock(&THREAD->lock);
155
156 spinlock_unlock(&CPU->lock);
157}
158#endif
159
160/** Initialize scheduler
161 *
162 * Initialize kernel scheduler.
163 *
164 */
165void scheduler_init(void)
166{
167}
168
169/** Get thread to be scheduled
170 *
171 * Get the optimal thread to be scheduled
172 * according to thread accounting and scheduler
173 * policy.
174 *
175 * @return Thread to be scheduled.
176 *
177 */
178static thread_t *find_best_thread(void)
179{
180 thread_t *t;
181 runq_t *r;
182 int i;
183
184 ASSERT(CPU != NULL);
185
186loop:
187 interrupts_enable();
188
189 if (atomic_get(&CPU->nrdy) == 0) {
190 /*
191 * For there was nothing to run, the CPU goes to sleep
192 * until a hardware interrupt or an IPI comes.
193 * This improves energy saving and hyperthreading.
194 */
195
196 /*
197 * An interrupt might occur right now and wake up a thread.
198 * In such case, the CPU will continue to go to sleep
199 * even though there is a runnable thread.
200 */
201
202 cpu_sleep();
203 goto loop;
204 }
205
206 interrupts_disable();
207
208 for (i = 0; i<RQ_COUNT; i++) {
209 r = &CPU->rq[i];
210 spinlock_lock(&r->lock);
211 if (r->n == 0) {
212 /*
213 * If this queue is empty, try a lower-priority queue.
214 */
215 spinlock_unlock(&r->lock);
216 continue;
217 }
218
219 atomic_dec(&CPU->nrdy);
220 atomic_dec(&nrdy);
221 r->n--;
222
223 /*
224 * Take the first thread from the queue.
225 */
226 t = list_get_instance(r->rq_head.next, thread_t, rq_link);
227 list_remove(&t->rq_link);
228
229 spinlock_unlock(&r->lock);
230
231 spinlock_lock(&t->lock);
232 t->cpu = CPU;
233
234 t->ticks = us2ticks((i+1)*10000);
235 t->priority = i; /* correct rq index */
236
237 /*
238 * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
239 */
240 t->flags &= ~X_STOLEN;
241 spinlock_unlock(&t->lock);
242
243 return t;
244 }
245 goto loop;
246
247}
248
249/** Prevent rq starvation
250 *
251 * Prevent low priority threads from starving in rq's.
252 *
253 * When the function decides to relink rq's, it reconnects
254 * respective pointers so that in result threads with 'pri'
255 * greater or equal start are moved to a higher-priority queue.
256 *
257 * @param start Threshold priority.
258 *
259 */
260static void relink_rq(int start)
261{
262 link_t head;
263 runq_t *r;
264 int i, n;
265
266 list_initialize(&head);
267 spinlock_lock(&CPU->lock);
268 if (CPU->needs_relink > NEEDS_RELINK_MAX) {
269 for (i = start; i<RQ_COUNT-1; i++) {
270 /* remember and empty rq[i + 1] */
271 r = &CPU->rq[i + 1];
272 spinlock_lock(&r->lock);
273 list_concat(&head, &r->rq_head);
274 n = r->n;
275 r->n = 0;
276 spinlock_unlock(&r->lock);
277
278 /* append rq[i + 1] to rq[i] */
279 r = &CPU->rq[i];
280 spinlock_lock(&r->lock);
281 list_concat(&r->rq_head, &head);
282 r->n += n;
283 spinlock_unlock(&r->lock);
284 }
285 CPU->needs_relink = 0;
286 }
287 spinlock_unlock(&CPU->lock);
288
289}
290
291/** The scheduler
292 *
293 * The thread scheduling procedure.
294 * Passes control directly to
295 * scheduler_separated_stack().
296 *
297 */
298void scheduler(void)
299{
300 volatile ipl_t ipl;
301
302 ASSERT(CPU != NULL);
303
304 ipl = interrupts_disable();
305
306 if (atomic_get(&haltstate))
307 halt();
308
309 if (THREAD) {
310 spinlock_lock(&THREAD->lock);
311#ifndef CONFIG_FPU_LAZY
312 fpu_context_save(THREAD->saved_fpu_context);
313#endif
314 if (!context_save(&THREAD->saved_context)) {
315 /*
316 * This is the place where threads leave scheduler();
317 */
318 spinlock_unlock(&THREAD->lock);
319 interrupts_restore(THREAD->saved_context.ipl);
320
321 return;
322 }
323
324 /*
325 * Interrupt priority level of preempted thread is recorded here
326 * to facilitate scheduler() invocations from interrupts_disable()'d
327 * code (e.g. waitq_sleep_timeout()).
328 */
329 THREAD->saved_context.ipl = ipl;
330 }
331
332 /*
333 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
334 * and preemption counter. At this point THE could be coming either
335 * from THREAD's or CPU's stack.
336 */
337 the_copy(THE, (the_t *) CPU->stack);
338
339 /*
340 * We may not keep the old stack.
341 * Reason: If we kept the old stack and got blocked, for instance, in
342 * find_best_thread(), the old thread could get rescheduled by another
343 * CPU and overwrite the part of its own stack that was also used by
344 * the scheduler on this CPU.
345 *
346 * Moreover, we have to bypass the compiler-generated POP sequence
347 * which is fooled by SP being set to the very top of the stack.
348 * Therefore the scheduler() function continues in
349 * scheduler_separated_stack().
350 */
351 context_save(&CPU->saved_context);
352 context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (uintptr_t) CPU->stack, CPU_STACK_SIZE);
353 context_restore(&CPU->saved_context);
354 /* not reached */
355}
356
357/** Scheduler stack switch wrapper
358 *
359 * Second part of the scheduler() function
360 * using new stack. Handling the actual context
361 * switch to a new thread.
362 *
363 * Assume THREAD->lock is held.
364 */
365void scheduler_separated_stack(void)
366{
367 int priority;
368
369 ASSERT(CPU != NULL);
370
371 if (THREAD) {
372 /* must be run after the switch to scheduler stack */
373 after_thread_ran();
374
375 switch (THREAD->state) {
376 case Running:
377 spinlock_unlock(&THREAD->lock);
378 thread_ready(THREAD);
379 break;
380
381 case Exiting:
382repeat:
383 if (THREAD->detached) {
384 thread_destroy(THREAD);
385 } else {
386 /*
387 * The thread structure is kept allocated until somebody
388 * calls thread_detach() on it.
389 */
390 if (!spinlock_trylock(&THREAD->join_wq.lock)) {
391 /*
392 * Avoid deadlock.
393 */
394 spinlock_unlock(&THREAD->lock);
395 delay(10);
396 spinlock_lock(&THREAD->lock);
397 goto repeat;
398 }
399 _waitq_wakeup_unsafe(&THREAD->join_wq, false);
400 spinlock_unlock(&THREAD->join_wq.lock);
401
402 THREAD->state = Undead;
403 spinlock_unlock(&THREAD->lock);
404 }
405 break;
406
407 case Sleeping:
408 /*
409 * Prefer the thread after it's woken up.
410 */
411 THREAD->priority = -1;
412
413 /*
414 * We need to release wq->lock which we locked in waitq_sleep().
415 * Address of wq->lock is kept in THREAD->sleep_queue.
416 */
417 spinlock_unlock(&THREAD->sleep_queue->lock);
418
419 /*
420 * Check for possible requests for out-of-context invocation.
421 */
422 if (THREAD->call_me) {
423 THREAD->call_me(THREAD->call_me_with);
424 THREAD->call_me = NULL;
425 THREAD->call_me_with = NULL;
426 }
427
428 spinlock_unlock(&THREAD->lock);
429
430 break;
431
432 default:
433 /*
434 * Entering state is unexpected.
435 */
436 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
437 break;
438 }
439
440 THREAD = NULL;
441 }
442
443 THREAD = find_best_thread();
444
445 spinlock_lock(&THREAD->lock);
446 priority = THREAD->priority;
447 spinlock_unlock(&THREAD->lock);
448
449 relink_rq(priority);
450
451 /*
452 * If both the old and the new task are the same, lots of work is avoided.
453 */
454 if (TASK != THREAD->task) {
455 as_t *as1 = NULL;
456 as_t *as2;
457
458 if (TASK) {
459 spinlock_lock(&TASK->lock);
460 as1 = TASK->as;
461 spinlock_unlock(&TASK->lock);
462 }
463
464 spinlock_lock(&THREAD->task->lock);
465 as2 = THREAD->task->as;
466 spinlock_unlock(&THREAD->task->lock);
467
468 /*
469 * Note that it is possible for two tasks to share one address space.
470 */
471 if (as1 != as2) {
472 /*
473 * Both tasks and address spaces are different.
474 * Replace the old one with the new one.
475 */
476 as_switch(as1, as2);
477 }
478 TASK = THREAD->task;
479 before_task_runs();
480 }
481
482 spinlock_lock(&THREAD->lock);
483 THREAD->state = Running;
484
485#ifdef SCHEDULER_VERBOSE
486 printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
487#endif
488
489 /*
490 * Some architectures provide late kernel PA2KA(identity)
491 * mapping in a page fault handler. However, the page fault
492 * handler uses the kernel stack of the running thread and
493 * therefore cannot be used to map it. The kernel stack, if
494 * necessary, is to be mapped in before_thread_runs(). This
495 * function must be executed before the switch to the new stack.
496 */
497 before_thread_runs();
498
499 /*
500 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
501 */
502 the_copy(THE, (the_t *) THREAD->kstack);
503
504 context_restore(&THREAD->saved_context);
505 /* not reached */
506}
507
508#ifdef CONFIG_SMP
509/** Load balancing thread
510 *
511 * SMP load balancing thread, supervising thread supplies
512 * for the CPU it's wired to.
513 *
514 * @param arg Generic thread argument (unused).
515 *
516 */
517void kcpulb(void *arg)
518{
519 thread_t *t;
520 int count, average, i, j, k = 0;
521 ipl_t ipl;
522
523 /*
524 * Detach kcpulb as nobody will call thread_join_timeout() on it.
525 */
526 thread_detach(THREAD);
527
528loop:
529 /*
530 * Work in 1s intervals.
531 */
532 thread_sleep(1);
533
534not_satisfied:
535 /*
536 * Calculate the number of threads that will be migrated/stolen from
537 * other CPU's. Note that situation can have changed between two
538 * passes. Each time get the most up to date counts.
539 */
540 average = atomic_get(&nrdy) / config.cpu_active + 1;
541 count = average - atomic_get(&CPU->nrdy);
542
543 if (count <= 0)
544 goto satisfied;
545
546 /*
547 * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
548 */
549 for (j=RQ_COUNT-1; j >= 0; j--) {
550 for (i=0; i < config.cpu_active; i++) {
551 link_t *l;
552 runq_t *r;
553 cpu_t *cpu;
554
555 cpu = &cpus[(i + k) % config.cpu_active];
556
557 /*
558 * Not interested in ourselves.
559 * Doesn't require interrupt disabling for kcpulb is X_WIRED.
560 */
561 if (CPU == cpu)
562 continue;
563 if (atomic_get(&cpu->nrdy) <= average)
564 continue;
565
566 ipl = interrupts_disable();
567 r = &cpu->rq[j];
568 spinlock_lock(&r->lock);
569 if (r->n == 0) {
570 spinlock_unlock(&r->lock);
571 interrupts_restore(ipl);
572 continue;
573 }
574
575 t = NULL;
576 l = r->rq_head.prev; /* search rq from the back */
577 while (l != &r->rq_head) {
578 t = list_get_instance(l, thread_t, rq_link);
579 /*
580 * We don't want to steal CPU-wired threads neither threads already stolen.
581 * The latter prevents threads from migrating between CPU's without ever being run.
582 * We don't want to steal threads whose FPU context is still in CPU.
583 */
584 spinlock_lock(&t->lock);
585 if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
586 /*
587 * Remove t from r.
588 */
589 spinlock_unlock(&t->lock);
590
591 atomic_dec(&cpu->nrdy);
592 atomic_dec(&nrdy);
593
594 r->n--;
595 list_remove(&t->rq_link);
596
597 break;
598 }
599 spinlock_unlock(&t->lock);
600 l = l->prev;
601 t = NULL;
602 }
603 spinlock_unlock(&r->lock);
604
605 if (t) {
606 /*
607 * Ready t on local CPU
608 */
609 spinlock_lock(&t->lock);
610#ifdef KCPULB_VERBOSE
611 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
612#endif
613 t->flags |= X_STOLEN;
614 t->state = Entering;
615 spinlock_unlock(&t->lock);
616
617 thread_ready(t);
618
619 interrupts_restore(ipl);
620
621 if (--count == 0)
622 goto satisfied;
623
624 /*
625 * We are not satisfied yet, focus on another CPU next time.
626 */
627 k++;
628
629 continue;
630 }
631 interrupts_restore(ipl);
632 }
633 }
634
635 if (atomic_get(&CPU->nrdy)) {
636 /*
637 * Be a little bit light-weight and let migrated threads run.
638 */
639 scheduler();
640 } else {
641 /*
642 * We failed to migrate a single thread.
643 * Give up this turn.
644 */
645 goto loop;
646 }
647
648 goto not_satisfied;
649
650satisfied:
651 goto loop;
652}
653
654#endif /* CONFIG_SMP */
655
656
657/** Print information about threads & scheduler queues */
658void sched_print_list(void)
659{
660 ipl_t ipl;
661 int cpu,i;
662 runq_t *r;
663 thread_t *t;
664 link_t *cur;
665
666 /* We are going to mess with scheduler structures,
667 * let's not be interrupted */
668 ipl = interrupts_disable();
669 for (cpu=0;cpu < config.cpu_count; cpu++) {
670
671 if (!cpus[cpu].active)
672 continue;
673
674 spinlock_lock(&cpus[cpu].lock);
675 printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n",
676 cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
677
678 for (i=0; i<RQ_COUNT; i++) {
679 r = &cpus[cpu].rq[i];
680 spinlock_lock(&r->lock);
681 if (!r->n) {
682 spinlock_unlock(&r->lock);
683 continue;
684 }
685 printf("\trq[%d]: ", i);
686 for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
687 t = list_get_instance(cur, thread_t, rq_link);
688 printf("%d(%s) ", t->tid,
689 thread_states[t->state]);
690 }
691 printf("\n");
692 spinlock_unlock(&r->lock);
693 }
694 spinlock_unlock(&cpus[cpu].lock);
695 }
696
697 interrupts_restore(ipl);
698}
699
700/** @}
701 */
Note: See TracBrowser for help on using the repository browser.