source: mainline/generic/src/proc/scheduler.c@ a3eeceb6

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since a3eeceb6 was a3eeceb6, checked in by Ondrej Palkovsky <ondrap@…>, 19 years ago

Unimportant changes regarding FPU context.

  • Property mode set to 100644
File size: 13.8 KB
Line 
1/*
2 * Copyright (C) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <proc/scheduler.h>
30#include <proc/thread.h>
31#include <proc/task.h>
32#include <mm/frame.h>
33#include <mm/page.h>
34#include <mm/as.h>
35#include <arch/asm.h>
36#include <arch/faddr.h>
37#include <arch/atomic.h>
38#include <synch/spinlock.h>
39#include <config.h>
40#include <context.h>
41#include <func.h>
42#include <arch.h>
43#include <adt/list.h>
44#include <panic.h>
45#include <typedefs.h>
46#include <cpu.h>
47#include <print.h>
48#include <debug.h>
49
50atomic_t nrdy;
51
52/** Take actions before new thread runs
53 *
54 * Perform actions that need to be
55 * taken before the newly selected
56 * tread is passed control.
57 *
58 * THREAD->lock is locked on entry
59 *
60 */
61void before_thread_runs(void)
62{
63 before_thread_runs_arch();
64#ifdef CONFIG_FPU_LAZY
65 if(THREAD==CPU->fpu_owner)
66 fpu_enable();
67 else
68 fpu_disable();
69#else
70 fpu_enable();
71 if (THREAD->fpu_context_exists)
72 fpu_context_restore(&(THREAD->saved_fpu_context));
73 else {
74 fpu_init(&(THREAD->saved_fpu_context));
75 THREAD->fpu_context_exists=1;
76 }
77#endif
78}
79
80#ifdef CONFIG_FPU_LAZY
81void scheduler_fpu_lazy_request(void)
82{
83 fpu_enable();
84 spinlock_lock(&CPU->lock);
85
86 /* Save old context */
87 if (CPU->fpu_owner != NULL) {
88 spinlock_lock(&CPU->fpu_owner->lock);
89 fpu_context_save(&CPU->fpu_owner->saved_fpu_context);
90 /* don't prevent migration */
91 CPU->fpu_owner->fpu_context_engaged=0;
92 spinlock_unlock(&CPU->fpu_owner->lock);
93 }
94
95 spinlock_lock(&THREAD->lock);
96 if (THREAD->fpu_context_exists)
97 fpu_context_restore(&THREAD->saved_fpu_context);
98 else {
99 fpu_init(&(THREAD->saved_fpu_context));
100 THREAD->fpu_context_exists=1;
101 }
102 CPU->fpu_owner=THREAD;
103 THREAD->fpu_context_engaged = 1;
104
105 spinlock_unlock(&THREAD->lock);
106 spinlock_unlock(&CPU->lock);
107}
108#endif
109
110/** Initialize scheduler
111 *
112 * Initialize kernel scheduler.
113 *
114 */
115void scheduler_init(void)
116{
117}
118
119
120/** Get thread to be scheduled
121 *
122 * Get the optimal thread to be scheduled
123 * according to thread accounting and scheduler
124 * policy.
125 *
126 * @return Thread to be scheduled.
127 *
128 */
129static thread_t *find_best_thread(void)
130{
131 thread_t *t;
132 runq_t *r;
133 int i;
134
135 ASSERT(CPU != NULL);
136
137loop:
138 interrupts_enable();
139
140 if (atomic_get(&CPU->nrdy) == 0) {
141 /*
142 * For there was nothing to run, the CPU goes to sleep
143 * until a hardware interrupt or an IPI comes.
144 * This improves energy saving and hyperthreading.
145 */
146
147 /*
148 * An interrupt might occur right now and wake up a thread.
149 * In such case, the CPU will continue to go to sleep
150 * even though there is a runnable thread.
151 */
152
153 cpu_sleep();
154 goto loop;
155 }
156
157 interrupts_disable();
158
159 i = 0;
160 for (; i<RQ_COUNT; i++) {
161 r = &CPU->rq[i];
162 spinlock_lock(&r->lock);
163 if (r->n == 0) {
164 /*
165 * If this queue is empty, try a lower-priority queue.
166 */
167 spinlock_unlock(&r->lock);
168 continue;
169 }
170
171 atomic_dec(&CPU->nrdy);
172 atomic_dec(&nrdy);
173 r->n--;
174
175 /*
176 * Take the first thread from the queue.
177 */
178 t = list_get_instance(r->rq_head.next, thread_t, rq_link);
179 list_remove(&t->rq_link);
180
181 spinlock_unlock(&r->lock);
182
183 spinlock_lock(&t->lock);
184 t->cpu = CPU;
185
186 t->ticks = us2ticks((i+1)*10000);
187 t->priority = i; /* eventually correct rq index */
188
189 /*
190 * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
191 */
192 t->flags &= ~X_STOLEN;
193 spinlock_unlock(&t->lock);
194
195 return t;
196 }
197 goto loop;
198
199}
200
201
202/** Prevent rq starvation
203 *
204 * Prevent low priority threads from starving in rq's.
205 *
206 * When the function decides to relink rq's, it reconnects
207 * respective pointers so that in result threads with 'pri'
208 * greater or equal 'start' are moved to a higher-priority queue.
209 *
210 * @param start Threshold priority.
211 *
212 */
213static void relink_rq(int start)
214{
215 link_t head;
216 runq_t *r;
217 int i, n;
218
219 list_initialize(&head);
220 spinlock_lock(&CPU->lock);
221 if (CPU->needs_relink > NEEDS_RELINK_MAX) {
222 for (i = start; i<RQ_COUNT-1; i++) {
223 /* remember and empty rq[i + 1] */
224 r = &CPU->rq[i + 1];
225 spinlock_lock(&r->lock);
226 list_concat(&head, &r->rq_head);
227 n = r->n;
228 r->n = 0;
229 spinlock_unlock(&r->lock);
230
231 /* append rq[i + 1] to rq[i] */
232 r = &CPU->rq[i];
233 spinlock_lock(&r->lock);
234 list_concat(&r->rq_head, &head);
235 r->n += n;
236 spinlock_unlock(&r->lock);
237 }
238 CPU->needs_relink = 0;
239 }
240 spinlock_unlock(&CPU->lock);
241
242}
243
244
245/** Scheduler stack switch wrapper
246 *
247 * Second part of the scheduler() function
248 * using new stack. Handling the actual context
249 * switch to a new thread.
250 *
251 * Assume THREAD->lock is held.
252 */
253static void scheduler_separated_stack(void)
254{
255 int priority;
256
257 ASSERT(CPU != NULL);
258
259 if (THREAD) {
260 switch (THREAD->state) {
261 case Running:
262 THREAD->state = Ready;
263 spinlock_unlock(&THREAD->lock);
264 thread_ready(THREAD);
265 break;
266
267 case Exiting:
268 thread_destroy(THREAD);
269 break;
270
271 case Sleeping:
272 /*
273 * Prefer the thread after it's woken up.
274 */
275 THREAD->priority = -1;
276
277 /*
278 * We need to release wq->lock which we locked in waitq_sleep().
279 * Address of wq->lock is kept in THREAD->sleep_queue.
280 */
281 spinlock_unlock(&THREAD->sleep_queue->lock);
282
283 /*
284 * Check for possible requests for out-of-context invocation.
285 */
286 if (THREAD->call_me) {
287 THREAD->call_me(THREAD->call_me_with);
288 THREAD->call_me = NULL;
289 THREAD->call_me_with = NULL;
290 }
291
292 spinlock_unlock(&THREAD->lock);
293
294 break;
295
296 default:
297 /*
298 * Entering state is unexpected.
299 */
300 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
301 break;
302 }
303 THREAD = NULL;
304 }
305
306
307 THREAD = find_best_thread();
308
309 spinlock_lock(&THREAD->lock);
310 priority = THREAD->priority;
311 spinlock_unlock(&THREAD->lock);
312
313 relink_rq(priority);
314
315 spinlock_lock(&THREAD->lock);
316
317 /*
318 * If both the old and the new task are the same, lots of work is avoided.
319 */
320 if (TASK != THREAD->task) {
321 as_t *as1 = NULL;
322 as_t *as2;
323
324 if (TASK) {
325 spinlock_lock(&TASK->lock);
326 as1 = TASK->as;
327 spinlock_unlock(&TASK->lock);
328 }
329
330 spinlock_lock(&THREAD->task->lock);
331 as2 = THREAD->task->as;
332 spinlock_unlock(&THREAD->task->lock);
333
334 /*
335 * Note that it is possible for two tasks to share one address space.
336 */
337 if (as1 != as2) {
338 /*
339 * Both tasks and address spaces are different.
340 * Replace the old one with the new one.
341 */
342 as_switch(as1, as2);
343 }
344 TASK = THREAD->task;
345 }
346
347 THREAD->state = Running;
348
349 #ifdef SCHEDULER_VERBOSE
350 printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
351 #endif
352
353 /*
354 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
355 */
356 the_copy(THE, (the_t *) THREAD->kstack);
357
358 context_restore(&THREAD->saved_context);
359 /* not reached */
360}
361
362
363/** The scheduler
364 *
365 * The thread scheduling procedure.
366 * Passes control directly to
367 * scheduler_separated_stack().
368 *
369 */
370void scheduler(void)
371{
372 volatile ipl_t ipl;
373
374 ASSERT(CPU != NULL);
375
376 ipl = interrupts_disable();
377
378 if (atomic_get(&haltstate))
379 halt();
380
381 if (THREAD) {
382 spinlock_lock(&THREAD->lock);
383#ifndef CONFIG_FPU_LAZY
384 fpu_context_save(&(THREAD->saved_fpu_context));
385#endif
386 if (!context_save(&THREAD->saved_context)) {
387 /*
388 * This is the place where threads leave scheduler();
389 */
390 before_thread_runs();
391 spinlock_unlock(&THREAD->lock);
392 interrupts_restore(THREAD->saved_context.ipl);
393 return;
394 }
395
396 /*
397 * Interrupt priority level of preempted thread is recorded here
398 * to facilitate scheduler() invocations from interrupts_disable()'d
399 * code (e.g. waitq_sleep_timeout()).
400 */
401 THREAD->saved_context.ipl = ipl;
402 }
403
404 /*
405 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
406 * and preemption counter. At this point THE could be coming either
407 * from THREAD's or CPU's stack.
408 */
409 the_copy(THE, (the_t *) CPU->stack);
410
411 /*
412 * We may not keep the old stack.
413 * Reason: If we kept the old stack and got blocked, for instance, in
414 * find_best_thread(), the old thread could get rescheduled by another
415 * CPU and overwrite the part of its own stack that was also used by
416 * the scheduler on this CPU.
417 *
418 * Moreover, we have to bypass the compiler-generated POP sequence
419 * which is fooled by SP being set to the very top of the stack.
420 * Therefore the scheduler() function continues in
421 * scheduler_separated_stack().
422 */
423 context_save(&CPU->saved_context);
424 context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
425 context_restore(&CPU->saved_context);
426 /* not reached */
427}
428
429
430
431
432
433#ifdef CONFIG_SMP
434/** Load balancing thread
435 *
436 * SMP load balancing thread, supervising thread supplies
437 * for the CPU it's wired to.
438 *
439 * @param arg Generic thread argument (unused).
440 *
441 */
442void kcpulb(void *arg)
443{
444 thread_t *t;
445 int count, average, i, j, k = 0;
446 ipl_t ipl;
447
448loop:
449 /*
450 * Work in 1s intervals.
451 */
452 thread_sleep(1);
453
454not_satisfied:
455 /*
456 * Calculate the number of threads that will be migrated/stolen from
457 * other CPU's. Note that situation can have changed between two
458 * passes. Each time get the most up to date counts.
459 */
460 average = atomic_get(&nrdy) / config.cpu_active + 1;
461 count = average - atomic_get(&CPU->nrdy);
462
463 if (count <= 0)
464 goto satisfied;
465
466 /*
467 * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
468 */
469 for (j=RQ_COUNT-1; j >= 0; j--) {
470 for (i=0; i < config.cpu_active; i++) {
471 link_t *l;
472 runq_t *r;
473 cpu_t *cpu;
474
475 cpu = &cpus[(i + k) % config.cpu_active];
476
477 /*
478 * Not interested in ourselves.
479 * Doesn't require interrupt disabling for kcpulb is X_WIRED.
480 */
481 if (CPU == cpu)
482 continue;
483 if (atomic_get(&cpu->nrdy) <= average)
484 continue;
485
486 ipl = interrupts_disable();
487 r = &cpu->rq[j];
488 spinlock_lock(&r->lock);
489 if (r->n == 0) {
490 spinlock_unlock(&r->lock);
491 interrupts_restore(ipl);
492 continue;
493 }
494
495 t = NULL;
496 l = r->rq_head.prev; /* search rq from the back */
497 while (l != &r->rq_head) {
498 t = list_get_instance(l, thread_t, rq_link);
499 /*
500 * We don't want to steal CPU-wired threads neither threads already stolen.
501 * The latter prevents threads from migrating between CPU's without ever being run.
502 * We don't want to steal threads whose FPU context is still in CPU.
503 */
504 spinlock_lock(&t->lock);
505 if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
506 /*
507 * Remove t from r.
508 */
509 spinlock_unlock(&t->lock);
510
511 atomic_dec(&cpu->nrdy);
512 atomic_dec(&nrdy);
513
514 r->n--;
515 list_remove(&t->rq_link);
516
517 break;
518 }
519 spinlock_unlock(&t->lock);
520 l = l->prev;
521 t = NULL;
522 }
523 spinlock_unlock(&r->lock);
524
525 if (t) {
526 /*
527 * Ready t on local CPU
528 */
529 spinlock_lock(&t->lock);
530 #ifdef KCPULB_VERBOSE
531 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
532 #endif
533 t->flags |= X_STOLEN;
534 spinlock_unlock(&t->lock);
535
536 thread_ready(t);
537
538 interrupts_restore(ipl);
539
540 if (--count == 0)
541 goto satisfied;
542
543 /*
544 * We are not satisfied yet, focus on another CPU next time.
545 */
546 k++;
547
548 continue;
549 }
550 interrupts_restore(ipl);
551 }
552 }
553
554 if (atomic_get(&CPU->nrdy)) {
555 /*
556 * Be a little bit light-weight and let migrated threads run.
557 */
558 scheduler();
559 } else {
560 /*
561 * We failed to migrate a single thread.
562 * Give up this turn.
563 */
564 goto loop;
565 }
566
567 goto not_satisfied;
568
569satisfied:
570 goto loop;
571}
572
573#endif /* CONFIG_SMP */
574
575
576/** Print information about threads & scheduler queues */
577void sched_print_list(void)
578{
579 ipl_t ipl;
580 int cpu,i;
581 runq_t *r;
582 thread_t *t;
583 link_t *cur;
584
585 /* We are going to mess with scheduler structures,
586 * let's not be interrupted */
587 ipl = interrupts_disable();
588 printf("*********** Scheduler dump ***********\n");
589 for (cpu=0;cpu < config.cpu_count; cpu++) {
590 if (!cpus[cpu].active)
591 continue;
592 spinlock_lock(&cpus[cpu].lock);
593 printf("cpu%d: nrdy: %d needs_relink: %d\n",
594 cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
595
596 for (i=0; i<RQ_COUNT; i++) {
597 r = &cpus[cpu].rq[i];
598 spinlock_lock(&r->lock);
599 if (!r->n) {
600 spinlock_unlock(&r->lock);
601 continue;
602 }
603 printf("\tRq %d: ", i);
604 for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
605 t = list_get_instance(cur, thread_t, rq_link);
606 printf("%d(%s) ", t->tid,
607 thread_states[t->state]);
608 }
609 printf("\n");
610 spinlock_unlock(&r->lock);
611 }
612 spinlock_unlock(&cpus[cpu].lock);
613 }
614
615 interrupts_restore(ipl);
616}
Note: See TracBrowser for help on using the repository browser.