source: mainline/generic/src/proc/scheduler.c@ 085d973

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 085d973 was 085d973, checked in by Ondrej Palkovsky <ondrap@…>, 19 years ago

Cleanup o frame allocator.
Removed early_malloc & initial heap.
Will break ia64, ppc & sparc.
Added e820 table print.

  • Property mode set to 100644
File size: 13.4 KB
Line 
1/*
2 * Copyright (C) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <proc/scheduler.h>
30#include <proc/thread.h>
31#include <proc/task.h>
32#include <mm/frame.h>
33#include <mm/page.h>
34#include <mm/as.h>
35#include <arch/asm.h>
36#include <arch/faddr.h>
37#include <arch/atomic.h>
38#include <synch/spinlock.h>
39#include <config.h>
40#include <context.h>
41#include <func.h>
42#include <arch.h>
43#include <adt/list.h>
44#include <panic.h>
45#include <typedefs.h>
46#include <cpu.h>
47#include <print.h>
48#include <debug.h>
49
50atomic_t nrdy;
51
52/** Take actions before new thread runs
53 *
54 * Perform actions that need to be
55 * taken before the newly selected
56 * tread is passed control.
57 *
58 */
59void before_thread_runs(void)
60{
61 before_thread_runs_arch();
62#ifdef CONFIG_FPU_LAZY
63 if(THREAD==CPU->fpu_owner)
64 fpu_enable();
65 else
66 fpu_disable();
67#else
68 fpu_enable();
69 if (THREAD->fpu_context_exists)
70 fpu_context_restore(&(THREAD->saved_fpu_context));
71 else {
72 fpu_init();
73 THREAD->fpu_context_exists=1;
74 }
75#endif
76}
77
78#ifdef CONFIG_FPU_LAZY
79void scheduler_fpu_lazy_request(void)
80{
81 fpu_enable();
82 if (CPU->fpu_owner != NULL) {
83 fpu_context_save(&CPU->fpu_owner->saved_fpu_context);
84 /* don't prevent migration */
85 CPU->fpu_owner->fpu_context_engaged=0;
86 }
87 if (THREAD->fpu_context_exists)
88 fpu_context_restore(&THREAD->saved_fpu_context);
89 else {
90 fpu_init();
91 THREAD->fpu_context_exists=1;
92 }
93 CPU->fpu_owner=THREAD;
94 THREAD->fpu_context_engaged = 1;
95}
96#endif
97
98/** Initialize scheduler
99 *
100 * Initialize kernel scheduler.
101 *
102 */
103void scheduler_init(void)
104{
105}
106
107
108/** Get thread to be scheduled
109 *
110 * Get the optimal thread to be scheduled
111 * according to thread accounting and scheduler
112 * policy.
113 *
114 * @return Thread to be scheduled.
115 *
116 */
117static thread_t *find_best_thread(void)
118{
119 thread_t *t;
120 runq_t *r;
121 int i;
122
123 ASSERT(CPU != NULL);
124
125loop:
126 interrupts_enable();
127
128 if (atomic_get(&CPU->nrdy) == 0) {
129 /*
130 * For there was nothing to run, the CPU goes to sleep
131 * until a hardware interrupt or an IPI comes.
132 * This improves energy saving and hyperthreading.
133 */
134
135 /*
136 * An interrupt might occur right now and wake up a thread.
137 * In such case, the CPU will continue to go to sleep
138 * even though there is a runnable thread.
139 */
140
141 cpu_sleep();
142 goto loop;
143 }
144
145 interrupts_disable();
146
147 i = 0;
148 for (; i<RQ_COUNT; i++) {
149 r = &CPU->rq[i];
150 spinlock_lock(&r->lock);
151 if (r->n == 0) {
152 /*
153 * If this queue is empty, try a lower-priority queue.
154 */
155 spinlock_unlock(&r->lock);
156 continue;
157 }
158
159 atomic_dec(&CPU->nrdy);
160 atomic_dec(&nrdy);
161 r->n--;
162
163 /*
164 * Take the first thread from the queue.
165 */
166 t = list_get_instance(r->rq_head.next, thread_t, rq_link);
167 list_remove(&t->rq_link);
168
169 spinlock_unlock(&r->lock);
170
171 spinlock_lock(&t->lock);
172 t->cpu = CPU;
173
174 t->ticks = us2ticks((i+1)*10000);
175 t->priority = i; /* eventually correct rq index */
176
177 /*
178 * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
179 */
180 t->flags &= ~X_STOLEN;
181 spinlock_unlock(&t->lock);
182
183 return t;
184 }
185 goto loop;
186
187}
188
189
190/** Prevent rq starvation
191 *
192 * Prevent low priority threads from starving in rq's.
193 *
194 * When the function decides to relink rq's, it reconnects
195 * respective pointers so that in result threads with 'pri'
196 * greater or equal 'start' are moved to a higher-priority queue.
197 *
198 * @param start Threshold priority.
199 *
200 */
201static void relink_rq(int start)
202{
203 link_t head;
204 runq_t *r;
205 int i, n;
206
207 list_initialize(&head);
208 spinlock_lock(&CPU->lock);
209 if (CPU->needs_relink > NEEDS_RELINK_MAX) {
210 for (i = start; i<RQ_COUNT-1; i++) {
211 /* remember and empty rq[i + 1] */
212 r = &CPU->rq[i + 1];
213 spinlock_lock(&r->lock);
214 list_concat(&head, &r->rq_head);
215 n = r->n;
216 r->n = 0;
217 spinlock_unlock(&r->lock);
218
219 /* append rq[i + 1] to rq[i] */
220 r = &CPU->rq[i];
221 spinlock_lock(&r->lock);
222 list_concat(&r->rq_head, &head);
223 r->n += n;
224 spinlock_unlock(&r->lock);
225 }
226 CPU->needs_relink = 0;
227 }
228 spinlock_unlock(&CPU->lock);
229
230}
231
232
233/** Scheduler stack switch wrapper
234 *
235 * Second part of the scheduler() function
236 * using new stack. Handling the actual context
237 * switch to a new thread.
238 *
239 * Assume THREAD->lock is held.
240 */
241static void scheduler_separated_stack(void)
242{
243 int priority;
244
245 ASSERT(CPU != NULL);
246
247 if (THREAD) {
248 switch (THREAD->state) {
249 case Running:
250 THREAD->state = Ready;
251 spinlock_unlock(&THREAD->lock);
252 thread_ready(THREAD);
253 break;
254
255 case Exiting:
256 thread_destroy(THREAD);
257 break;
258
259 case Sleeping:
260 /*
261 * Prefer the thread after it's woken up.
262 */
263 THREAD->priority = -1;
264
265 /*
266 * We need to release wq->lock which we locked in waitq_sleep().
267 * Address of wq->lock is kept in THREAD->sleep_queue.
268 */
269 spinlock_unlock(&THREAD->sleep_queue->lock);
270
271 /*
272 * Check for possible requests for out-of-context invocation.
273 */
274 if (THREAD->call_me) {
275 THREAD->call_me(THREAD->call_me_with);
276 THREAD->call_me = NULL;
277 THREAD->call_me_with = NULL;
278 }
279
280 spinlock_unlock(&THREAD->lock);
281
282 break;
283
284 default:
285 /*
286 * Entering state is unexpected.
287 */
288 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
289 break;
290 }
291 THREAD = NULL;
292 }
293
294
295 THREAD = find_best_thread();
296
297 spinlock_lock(&THREAD->lock);
298 priority = THREAD->priority;
299 spinlock_unlock(&THREAD->lock);
300
301 relink_rq(priority);
302
303 spinlock_lock(&THREAD->lock);
304
305 /*
306 * If both the old and the new task are the same, lots of work is avoided.
307 */
308 if (TASK != THREAD->task) {
309 as_t *as1 = NULL;
310 as_t *as2;
311
312 if (TASK) {
313 spinlock_lock(&TASK->lock);
314 as1 = TASK->as;
315 spinlock_unlock(&TASK->lock);
316 }
317
318 spinlock_lock(&THREAD->task->lock);
319 as2 = THREAD->task->as;
320 spinlock_unlock(&THREAD->task->lock);
321
322 /*
323 * Note that it is possible for two tasks to share one address space.
324 */
325 if (as1 != as2) {
326 /*
327 * Both tasks and address spaces are different.
328 * Replace the old one with the new one.
329 */
330 as_install(as2);
331 }
332 TASK = THREAD->task;
333 }
334
335 THREAD->state = Running;
336
337 #ifdef SCHEDULER_VERBOSE
338 printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, CPU->nrdy);
339 #endif
340
341 /*
342 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
343 */
344 the_copy(THE, (the_t *) THREAD->kstack);
345
346 context_restore(&THREAD->saved_context);
347 /* not reached */
348}
349
350
351/** The scheduler
352 *
353 * The thread scheduling procedure.
354 * Passes control directly to
355 * scheduler_separated_stack().
356 *
357 */
358void scheduler(void)
359{
360 volatile ipl_t ipl;
361
362 ASSERT(CPU != NULL);
363
364 ipl = interrupts_disable();
365
366 if (atomic_get(&haltstate))
367 halt();
368
369 if (THREAD) {
370 spinlock_lock(&THREAD->lock);
371#ifndef CONFIG_FPU_LAZY
372 fpu_context_save(&(THREAD->saved_fpu_context));
373#endif
374 if (!context_save(&THREAD->saved_context)) {
375 /*
376 * This is the place where threads leave scheduler();
377 */
378 before_thread_runs();
379 spinlock_unlock(&THREAD->lock);
380 interrupts_restore(THREAD->saved_context.ipl);
381 return;
382 }
383
384 /*
385 * Interrupt priority level of preempted thread is recorded here
386 * to facilitate scheduler() invocations from interrupts_disable()'d
387 * code (e.g. waitq_sleep_timeout()).
388 */
389 THREAD->saved_context.ipl = ipl;
390 }
391
392 /*
393 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
394 * and preemption counter. At this point THE could be coming either
395 * from THREAD's or CPU's stack.
396 */
397 the_copy(THE, (the_t *) CPU->stack);
398
399 /*
400 * We may not keep the old stack.
401 * Reason: If we kept the old stack and got blocked, for instance, in
402 * find_best_thread(), the old thread could get rescheduled by another
403 * CPU and overwrite the part of its own stack that was also used by
404 * the scheduler on this CPU.
405 *
406 * Moreover, we have to bypass the compiler-generated POP sequence
407 * which is fooled by SP being set to the very top of the stack.
408 * Therefore the scheduler() function continues in
409 * scheduler_separated_stack().
410 */
411 context_save(&CPU->saved_context);
412 context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
413 context_restore(&CPU->saved_context);
414 /* not reached */
415}
416
417
418
419
420
421#ifdef CONFIG_SMP
422/** Load balancing thread
423 *
424 * SMP load balancing thread, supervising thread supplies
425 * for the CPU it's wired to.
426 *
427 * @param arg Generic thread argument (unused).
428 *
429 */
430void kcpulb(void *arg)
431{
432 thread_t *t;
433 int count, average, i, j, k = 0;
434 ipl_t ipl;
435
436loop:
437 /*
438 * Work in 1s intervals.
439 */
440 thread_sleep(1);
441
442not_satisfied:
443 /*
444 * Calculate the number of threads that will be migrated/stolen from
445 * other CPU's. Note that situation can have changed between two
446 * passes. Each time get the most up to date counts.
447 */
448 average = atomic_get(&nrdy) / config.cpu_active + 1;
449 count = average - atomic_get(&CPU->nrdy);
450
451 if (count <= 0)
452 goto satisfied;
453
454 /*
455 * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
456 */
457 for (j=RQ_COUNT-1; j >= 0; j--) {
458 for (i=0; i < config.cpu_active; i++) {
459 link_t *l;
460 runq_t *r;
461 cpu_t *cpu;
462
463 cpu = &cpus[(i + k) % config.cpu_active];
464
465 /*
466 * Not interested in ourselves.
467 * Doesn't require interrupt disabling for kcpulb is X_WIRED.
468 */
469 if (CPU == cpu)
470 continue;
471 if (atomic_get(&cpu->nrdy) <= average)
472 continue;
473
474 ipl = interrupts_disable();
475 r = &cpu->rq[j];
476 spinlock_lock(&r->lock);
477 if (r->n == 0) {
478 spinlock_unlock(&r->lock);
479 interrupts_restore(ipl);
480 continue;
481 }
482
483 t = NULL;
484 l = r->rq_head.prev; /* search rq from the back */
485 while (l != &r->rq_head) {
486 t = list_get_instance(l, thread_t, rq_link);
487 /*
488 * We don't want to steal CPU-wired threads neither threads already stolen.
489 * The latter prevents threads from migrating between CPU's without ever being run.
490 * We don't want to steal threads whose FPU context is still in CPU.
491 */
492 spinlock_lock(&t->lock);
493 if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
494 /*
495 * Remove t from r.
496 */
497 spinlock_unlock(&t->lock);
498
499 atomic_dec(&cpu->nrdy);
500 atomic_dec(&nrdy);
501
502 r->n--;
503 list_remove(&t->rq_link);
504
505 break;
506 }
507 spinlock_unlock(&t->lock);
508 l = l->prev;
509 t = NULL;
510 }
511 spinlock_unlock(&r->lock);
512
513 if (t) {
514 /*
515 * Ready t on local CPU
516 */
517 spinlock_lock(&t->lock);
518 #ifdef KCPULB_VERBOSE
519 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
520 #endif
521 t->flags |= X_STOLEN;
522 spinlock_unlock(&t->lock);
523
524 thread_ready(t);
525
526 interrupts_restore(ipl);
527
528 if (--count == 0)
529 goto satisfied;
530
531 /*
532 * We are not satisfied yet, focus on another CPU next time.
533 */
534 k++;
535
536 continue;
537 }
538 interrupts_restore(ipl);
539 }
540 }
541
542 if (atomic_get(&CPU->nrdy)) {
543 /*
544 * Be a little bit light-weight and let migrated threads run.
545 */
546 scheduler();
547 } else {
548 /*
549 * We failed to migrate a single thread.
550 * Give up this turn.
551 */
552 goto loop;
553 }
554
555 goto not_satisfied;
556
557satisfied:
558 goto loop;
559}
560
561#endif /* CONFIG_SMP */
562
563
564/** Print information about threads & scheduler queues */
565void sched_print_list(void)
566{
567 ipl_t ipl;
568 int cpu,i;
569 runq_t *r;
570 thread_t *t;
571 link_t *cur;
572
573 /* We are going to mess with scheduler structures,
574 * let's not be interrupted */
575 ipl = interrupts_disable();
576 printf("*********** Scheduler dump ***********\n");
577 for (cpu=0;cpu < config.cpu_count; cpu++) {
578 if (!cpus[cpu].active)
579 continue;
580 spinlock_lock(&cpus[cpu].lock);
581 printf("cpu%d: nrdy: %d needs_relink: %d\n",
582 cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
583
584 for (i=0; i<RQ_COUNT; i++) {
585 r = &cpus[cpu].rq[i];
586 spinlock_lock(&r->lock);
587 if (!r->n) {
588 spinlock_unlock(&r->lock);
589 continue;
590 }
591 printf("\tRq %d: ", i);
592 for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
593 t = list_get_instance(cur, thread_t, rq_link);
594 printf("%d(%s) ", t->tid,
595 thread_states[t->state]);
596 }
597 printf("\n");
598 spinlock_unlock(&r->lock);
599 }
600 spinlock_unlock(&cpus[cpu].lock);
601 }
602
603 interrupts_restore(ipl);
604}
Note: See TracBrowser for help on using the repository browser.