source: mainline/generic/src/proc/scheduler.c@ d71007e

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since d71007e was 23684b7, checked in by Jakub Jermar <jakub@…>, 19 years ago

Define atomic_t only once in atomic.h
Change the encapsulated counter type to long so that it supports negative values as well.

  • Property mode set to 100644
File size: 14.7 KB
Line 
1/*
2 * Copyright (C) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <proc/scheduler.h>
30#include <proc/thread.h>
31#include <proc/task.h>
32#include <mm/frame.h>
33#include <mm/page.h>
34#include <mm/as.h>
35#include <arch/asm.h>
36#include <arch/faddr.h>
37#include <atomic.h>
38#include <synch/spinlock.h>
39#include <config.h>
40#include <context.h>
41#include <func.h>
42#include <arch.h>
43#include <adt/list.h>
44#include <panic.h>
45#include <typedefs.h>
46#include <cpu.h>
47#include <print.h>
48#include <debug.h>
49
50static void scheduler_separated_stack(void);
51
52atomic_t nrdy; /**< Number of ready threads in the system. */
53
54/** Take actions before new thread runs.
55 *
56 * Perform actions that need to be
57 * taken before the newly selected
58 * tread is passed control.
59 *
60 * THREAD->lock is locked on entry
61 *
62 */
63void before_thread_runs(void)
64{
65 before_thread_runs_arch();
66#ifdef CONFIG_FPU_LAZY
67 if(THREAD==CPU->fpu_owner)
68 fpu_enable();
69 else
70 fpu_disable();
71#else
72 fpu_enable();
73 if (THREAD->fpu_context_exists)
74 fpu_context_restore(THREAD->saved_fpu_context);
75 else {
76 fpu_init();
77 THREAD->fpu_context_exists=1;
78 }
79#endif
80}
81
82/** Take actions after THREAD had run.
83 *
84 * Perform actions that need to be
85 * taken after the running thread
86 * had been preempted by the scheduler.
87 *
88 * THREAD->lock is locked on entry
89 *
90 */
91void after_thread_ran(void)
92{
93 after_thread_ran_arch();
94}
95
96#ifdef CONFIG_FPU_LAZY
97void scheduler_fpu_lazy_request(void)
98{
99restart:
100 fpu_enable();
101 spinlock_lock(&CPU->lock);
102
103 /* Save old context */
104 if (CPU->fpu_owner != NULL) {
105 spinlock_lock(&CPU->fpu_owner->lock);
106 fpu_context_save(CPU->fpu_owner->saved_fpu_context);
107 /* don't prevent migration */
108 CPU->fpu_owner->fpu_context_engaged=0;
109 spinlock_unlock(&CPU->fpu_owner->lock);
110 CPU->fpu_owner = NULL;
111 }
112
113 spinlock_lock(&THREAD->lock);
114 if (THREAD->fpu_context_exists) {
115 fpu_context_restore(THREAD->saved_fpu_context);
116 } else {
117 /* Allocate FPU context */
118 if (!THREAD->saved_fpu_context) {
119 /* Might sleep */
120 spinlock_unlock(&THREAD->lock);
121 spinlock_unlock(&CPU->lock);
122 THREAD->saved_fpu_context = slab_alloc(fpu_context_slab,
123 0);
124 /* We may have switched CPUs during slab_alloc */
125 goto restart;
126 }
127 fpu_init();
128 THREAD->fpu_context_exists=1;
129 }
130 CPU->fpu_owner=THREAD;
131 THREAD->fpu_context_engaged = 1;
132 spinlock_unlock(&THREAD->lock);
133
134 spinlock_unlock(&CPU->lock);
135}
136#endif
137
138/** Initialize scheduler
139 *
140 * Initialize kernel scheduler.
141 *
142 */
143void scheduler_init(void)
144{
145}
146
147/** Get thread to be scheduled
148 *
149 * Get the optimal thread to be scheduled
150 * according to thread accounting and scheduler
151 * policy.
152 *
153 * @return Thread to be scheduled.
154 *
155 */
156static thread_t *find_best_thread(void)
157{
158 thread_t *t;
159 runq_t *r;
160 int i;
161
162 ASSERT(CPU != NULL);
163
164loop:
165 interrupts_enable();
166
167 if (atomic_get(&CPU->nrdy) == 0) {
168 /*
169 * For there was nothing to run, the CPU goes to sleep
170 * until a hardware interrupt or an IPI comes.
171 * This improves energy saving and hyperthreading.
172 */
173
174 /*
175 * An interrupt might occur right now and wake up a thread.
176 * In such case, the CPU will continue to go to sleep
177 * even though there is a runnable thread.
178 */
179
180 cpu_sleep();
181 goto loop;
182 }
183
184 interrupts_disable();
185
186 for (i = 0; i<RQ_COUNT; i++) {
187 r = &CPU->rq[i];
188 spinlock_lock(&r->lock);
189 if (r->n == 0) {
190 /*
191 * If this queue is empty, try a lower-priority queue.
192 */
193 spinlock_unlock(&r->lock);
194 continue;
195 }
196
197 atomic_dec(&CPU->nrdy);
198 atomic_dec(&nrdy);
199 r->n--;
200
201 /*
202 * Take the first thread from the queue.
203 */
204 t = list_get_instance(r->rq_head.next, thread_t, rq_link);
205 list_remove(&t->rq_link);
206
207 spinlock_unlock(&r->lock);
208
209 spinlock_lock(&t->lock);
210 t->cpu = CPU;
211
212 t->ticks = us2ticks((i+1)*10000);
213 t->priority = i; /* correct rq index */
214
215 /*
216 * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
217 */
218 t->flags &= ~X_STOLEN;
219 spinlock_unlock(&t->lock);
220
221 return t;
222 }
223 goto loop;
224
225}
226
227/** Prevent rq starvation
228 *
229 * Prevent low priority threads from starving in rq's.
230 *
231 * When the function decides to relink rq's, it reconnects
232 * respective pointers so that in result threads with 'pri'
233 * greater or equal 'start' are moved to a higher-priority queue.
234 *
235 * @param start Threshold priority.
236 *
237 */
238static void relink_rq(int start)
239{
240 link_t head;
241 runq_t *r;
242 int i, n;
243
244 list_initialize(&head);
245 spinlock_lock(&CPU->lock);
246 if (CPU->needs_relink > NEEDS_RELINK_MAX) {
247 for (i = start; i<RQ_COUNT-1; i++) {
248 /* remember and empty rq[i + 1] */
249 r = &CPU->rq[i + 1];
250 spinlock_lock(&r->lock);
251 list_concat(&head, &r->rq_head);
252 n = r->n;
253 r->n = 0;
254 spinlock_unlock(&r->lock);
255
256 /* append rq[i + 1] to rq[i] */
257 r = &CPU->rq[i];
258 spinlock_lock(&r->lock);
259 list_concat(&r->rq_head, &head);
260 r->n += n;
261 spinlock_unlock(&r->lock);
262 }
263 CPU->needs_relink = 0;
264 }
265 spinlock_unlock(&CPU->lock);
266
267}
268
269/** The scheduler
270 *
271 * The thread scheduling procedure.
272 * Passes control directly to
273 * scheduler_separated_stack().
274 *
275 */
276void scheduler(void)
277{
278 volatile ipl_t ipl;
279
280 ASSERT(CPU != NULL);
281
282 ipl = interrupts_disable();
283
284 if (atomic_get(&haltstate))
285 halt();
286
287 if (THREAD) {
288 spinlock_lock(&THREAD->lock);
289#ifndef CONFIG_FPU_LAZY
290 fpu_context_save(THREAD->saved_fpu_context);
291#endif
292 if (!context_save(&THREAD->saved_context)) {
293 /*
294 * This is the place where threads leave scheduler();
295 */
296 spinlock_unlock(&THREAD->lock);
297 interrupts_restore(THREAD->saved_context.ipl);
298
299 return;
300 }
301
302 /*
303 * Interrupt priority level of preempted thread is recorded here
304 * to facilitate scheduler() invocations from interrupts_disable()'d
305 * code (e.g. waitq_sleep_timeout()).
306 */
307 THREAD->saved_context.ipl = ipl;
308 }
309
310 /*
311 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
312 * and preemption counter. At this point THE could be coming either
313 * from THREAD's or CPU's stack.
314 */
315 the_copy(THE, (the_t *) CPU->stack);
316
317 /*
318 * We may not keep the old stack.
319 * Reason: If we kept the old stack and got blocked, for instance, in
320 * find_best_thread(), the old thread could get rescheduled by another
321 * CPU and overwrite the part of its own stack that was also used by
322 * the scheduler on this CPU.
323 *
324 * Moreover, we have to bypass the compiler-generated POP sequence
325 * which is fooled by SP being set to the very top of the stack.
326 * Therefore the scheduler() function continues in
327 * scheduler_separated_stack().
328 */
329 context_save(&CPU->saved_context);
330 context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
331 context_restore(&CPU->saved_context);
332 /* not reached */
333}
334
335/** Scheduler stack switch wrapper
336 *
337 * Second part of the scheduler() function
338 * using new stack. Handling the actual context
339 * switch to a new thread.
340 *
341 * Assume THREAD->lock is held.
342 */
343void scheduler_separated_stack(void)
344{
345 int priority;
346
347 ASSERT(CPU != NULL);
348
349 if (THREAD) {
350 /* must be run after the switch to scheduler stack */
351 after_thread_ran();
352
353 switch (THREAD->state) {
354 case Running:
355 spinlock_unlock(&THREAD->lock);
356 thread_ready(THREAD);
357 break;
358
359 case Exiting:
360 thread_destroy(THREAD);
361 break;
362
363 case Sleeping:
364 /*
365 * Prefer the thread after it's woken up.
366 */
367 THREAD->priority = -1;
368
369 /*
370 * We need to release wq->lock which we locked in waitq_sleep().
371 * Address of wq->lock is kept in THREAD->sleep_queue.
372 */
373 spinlock_unlock(&THREAD->sleep_queue->lock);
374
375 /*
376 * Check for possible requests for out-of-context invocation.
377 */
378 if (THREAD->call_me) {
379 THREAD->call_me(THREAD->call_me_with);
380 THREAD->call_me = NULL;
381 THREAD->call_me_with = NULL;
382 }
383
384 spinlock_unlock(&THREAD->lock);
385
386 break;
387
388 default:
389 /*
390 * Entering state is unexpected.
391 */
392 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
393 break;
394 }
395
396 THREAD = NULL;
397 }
398
399 THREAD = find_best_thread();
400
401 spinlock_lock(&THREAD->lock);
402 priority = THREAD->priority;
403 spinlock_unlock(&THREAD->lock);
404
405 relink_rq(priority);
406
407 spinlock_lock(&THREAD->lock);
408
409 /*
410 * If both the old and the new task are the same, lots of work is avoided.
411 */
412 if (TASK != THREAD->task) {
413 as_t *as1 = NULL;
414 as_t *as2;
415
416 if (TASK) {
417 spinlock_lock(&TASK->lock);
418 as1 = TASK->as;
419 spinlock_unlock(&TASK->lock);
420 }
421
422 spinlock_lock(&THREAD->task->lock);
423 as2 = THREAD->task->as;
424 spinlock_unlock(&THREAD->task->lock);
425
426 /*
427 * Note that it is possible for two tasks to share one address space.
428 */
429 if (as1 != as2) {
430 /*
431 * Both tasks and address spaces are different.
432 * Replace the old one with the new one.
433 */
434 as_switch(as1, as2);
435 }
436 TASK = THREAD->task;
437 }
438
439 THREAD->state = Running;
440
441#ifdef SCHEDULER_VERBOSE
442 printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
443#endif
444
445 /*
446 * Some architectures provide late kernel PA2KA(identity)
447 * mapping in a page fault handler. However, the page fault
448 * handler uses the kernel stack of the running thread and
449 * therefore cannot be used to map it. The kernel stack, if
450 * necessary, is to be mapped in before_thread_runs(). This
451 * function must be executed before the switch to the new stack.
452 */
453 before_thread_runs();
454
455 /*
456 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
457 */
458 the_copy(THE, (the_t *) THREAD->kstack);
459
460 context_restore(&THREAD->saved_context);
461 /* not reached */
462}
463
464#ifdef CONFIG_SMP
465/** Load balancing thread
466 *
467 * SMP load balancing thread, supervising thread supplies
468 * for the CPU it's wired to.
469 *
470 * @param arg Generic thread argument (unused).
471 *
472 */
473void kcpulb(void *arg)
474{
475 thread_t *t;
476 int count, average, i, j, k = 0;
477 ipl_t ipl;
478
479loop:
480 /*
481 * Work in 1s intervals.
482 */
483 thread_sleep(1);
484
485not_satisfied:
486 /*
487 * Calculate the number of threads that will be migrated/stolen from
488 * other CPU's. Note that situation can have changed between two
489 * passes. Each time get the most up to date counts.
490 */
491 average = atomic_get(&nrdy) / config.cpu_active + 1;
492 count = average - atomic_get(&CPU->nrdy);
493
494 if (count <= 0)
495 goto satisfied;
496
497 /*
498 * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
499 */
500 for (j=RQ_COUNT-1; j >= 0; j--) {
501 for (i=0; i < config.cpu_active; i++) {
502 link_t *l;
503 runq_t *r;
504 cpu_t *cpu;
505
506 cpu = &cpus[(i + k) % config.cpu_active];
507
508 /*
509 * Not interested in ourselves.
510 * Doesn't require interrupt disabling for kcpulb is X_WIRED.
511 */
512 if (CPU == cpu)
513 continue;
514 if (atomic_get(&cpu->nrdy) <= average)
515 continue;
516
517 ipl = interrupts_disable();
518 r = &cpu->rq[j];
519 spinlock_lock(&r->lock);
520 if (r->n == 0) {
521 spinlock_unlock(&r->lock);
522 interrupts_restore(ipl);
523 continue;
524 }
525
526 t = NULL;
527 l = r->rq_head.prev; /* search rq from the back */
528 while (l != &r->rq_head) {
529 t = list_get_instance(l, thread_t, rq_link);
530 /*
531 * We don't want to steal CPU-wired threads neither threads already stolen.
532 * The latter prevents threads from migrating between CPU's without ever being run.
533 * We don't want to steal threads whose FPU context is still in CPU.
534 */
535 spinlock_lock(&t->lock);
536 if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
537 /*
538 * Remove t from r.
539 */
540 spinlock_unlock(&t->lock);
541
542 atomic_dec(&cpu->nrdy);
543 atomic_dec(&nrdy);
544
545 r->n--;
546 list_remove(&t->rq_link);
547
548 break;
549 }
550 spinlock_unlock(&t->lock);
551 l = l->prev;
552 t = NULL;
553 }
554 spinlock_unlock(&r->lock);
555
556 if (t) {
557 /*
558 * Ready t on local CPU
559 */
560 spinlock_lock(&t->lock);
561#ifdef KCPULB_VERBOSE
562 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
563#endif
564 t->flags |= X_STOLEN;
565 spinlock_unlock(&t->lock);
566
567 thread_ready(t);
568
569 interrupts_restore(ipl);
570
571 if (--count == 0)
572 goto satisfied;
573
574 /*
575 * We are not satisfied yet, focus on another CPU next time.
576 */
577 k++;
578
579 continue;
580 }
581 interrupts_restore(ipl);
582 }
583 }
584
585 if (atomic_get(&CPU->nrdy)) {
586 /*
587 * Be a little bit light-weight and let migrated threads run.
588 */
589 scheduler();
590 } else {
591 /*
592 * We failed to migrate a single thread.
593 * Give up this turn.
594 */
595 goto loop;
596 }
597
598 goto not_satisfied;
599
600satisfied:
601 goto loop;
602}
603
604#endif /* CONFIG_SMP */
605
606
607/** Print information about threads & scheduler queues */
608void sched_print_list(void)
609{
610 ipl_t ipl;
611 int cpu,i;
612 runq_t *r;
613 thread_t *t;
614 link_t *cur;
615
616 /* We are going to mess with scheduler structures,
617 * let's not be interrupted */
618 ipl = interrupts_disable();
619 for (cpu=0;cpu < config.cpu_count; cpu++) {
620
621 if (!cpus[cpu].active)
622 continue;
623
624 spinlock_lock(&cpus[cpu].lock);
625 printf("cpu%d: address=%P, nrdy=%d, needs_relink=%d\n",
626 cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
627
628 for (i=0; i<RQ_COUNT; i++) {
629 r = &cpus[cpu].rq[i];
630 spinlock_lock(&r->lock);
631 if (!r->n) {
632 spinlock_unlock(&r->lock);
633 continue;
634 }
635 printf("\trq[%d]: ", i);
636 for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
637 t = list_get_instance(cur, thread_t, rq_link);
638 printf("%d(%s) ", t->tid,
639 thread_states[t->state]);
640 }
641 printf("\n");
642 spinlock_unlock(&r->lock);
643 }
644 spinlock_unlock(&cpus[cpu].lock);
645 }
646
647 interrupts_restore(ipl);
648}
Note: See TracBrowser for help on using the repository browser.