source: mainline/generic/src/proc/scheduler.c@ 266294a9

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 266294a9 was 266294a9, checked in by Ondrej Palkovsky <ondrap@…>, 19 years ago

Added constructor/destructor calls to SLAB.
Changed allocation of thread_t structure to use SLAB.

  • Property mode set to 100644
File size: 13.4 KB
RevLine 
[f761f1eb]1/*
2 * Copyright (C) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <proc/scheduler.h>
30#include <proc/thread.h>
31#include <proc/task.h>
[32ff43e6]32#include <mm/heap.h>
33#include <mm/frame.h>
34#include <mm/page.h>
[20d50a1]35#include <mm/as.h>
[32ff43e6]36#include <arch/asm.h>
37#include <arch/faddr.h>
38#include <arch/atomic.h>
39#include <synch/spinlock.h>
[f761f1eb]40#include <config.h>
41#include <context.h>
42#include <func.h>
43#include <arch.h>
44#include <list.h>
[02a99d2]45#include <panic.h>
[f761f1eb]46#include <typedefs.h>
[32ff43e6]47#include <cpu.h>
[9c0a9b3]48#include <print.h>
[623ba26c]49#include <debug.h>
[9c0a9b3]50
[59e07c91]51atomic_t nrdy;
[f761f1eb]52
[b60a22c]53/** Take actions before new thread runs
[70527f1]54 *
[b60a22c]55 * Perform actions that need to be
56 * taken before the newly selected
57 * tread is passed control.
[70527f1]58 *
59 */
[0ca6faa]60void before_thread_runs(void)
61{
[b49f4ae]62 before_thread_runs_arch();
[5f85c91]63#ifdef CONFIG_FPU_LAZY
[b49f4ae]64 if(THREAD==CPU->fpu_owner)
65 fpu_enable();
66 else
67 fpu_disable();
68#else
69 fpu_enable();
70 if (THREAD->fpu_context_exists)
71 fpu_context_restore(&(THREAD->saved_fpu_context));
72 else {
73 fpu_init();
74 THREAD->fpu_context_exists=1;
75 }
76#endif
[0ca6faa]77}
78
[5f85c91]79#ifdef CONFIG_FPU_LAZY
[b49f4ae]80void scheduler_fpu_lazy_request(void)
81{
82 fpu_enable();
83 if (CPU->fpu_owner != NULL) {
84 fpu_context_save(&CPU->fpu_owner->saved_fpu_context);
85 /* don't prevent migration */
86 CPU->fpu_owner->fpu_context_engaged=0;
87 }
88 if (THREAD->fpu_context_exists)
89 fpu_context_restore(&THREAD->saved_fpu_context);
90 else {
91 fpu_init();
92 THREAD->fpu_context_exists=1;
93 }
94 CPU->fpu_owner=THREAD;
95 THREAD->fpu_context_engaged = 1;
96}
97#endif
[0ca6faa]98
[70527f1]99/** Initialize scheduler
100 *
101 * Initialize kernel scheduler.
102 *
103 */
[f761f1eb]104void scheduler_init(void)
105{
106}
107
[70527f1]108
109/** Get thread to be scheduled
110 *
111 * Get the optimal thread to be scheduled
[d1a184f]112 * according to thread accounting and scheduler
[70527f1]113 * policy.
114 *
115 * @return Thread to be scheduled.
116 *
117 */
[e507afa]118static thread_t *find_best_thread(void)
[f761f1eb]119{
120 thread_t *t;
121 runq_t *r;
[248fc1a]122 int i;
[f761f1eb]123
[623ba26c]124 ASSERT(CPU != NULL);
125
[f761f1eb]126loop:
[22f7769]127 interrupts_enable();
[f761f1eb]128
[248fc1a]129 if (atomic_get(&CPU->nrdy) == 0) {
[f761f1eb]130 /*
131 * For there was nothing to run, the CPU goes to sleep
132 * until a hardware interrupt or an IPI comes.
133 * This improves energy saving and hyperthreading.
134 */
[328e0d3]135
136 /*
137 * An interrupt might occur right now and wake up a thread.
138 * In such case, the CPU will continue to go to sleep
139 * even though there is a runnable thread.
140 */
141
[f761f1eb]142 cpu_sleep();
143 goto loop;
144 }
145
[22f7769]146 interrupts_disable();
[d896525]147
148 i = 0;
149 for (; i<RQ_COUNT; i++) {
[43114c5]150 r = &CPU->rq[i];
[f761f1eb]151 spinlock_lock(&r->lock);
152 if (r->n == 0) {
153 /*
154 * If this queue is empty, try a lower-priority queue.
155 */
156 spinlock_unlock(&r->lock);
157 continue;
158 }
[3e1607f]159
[248fc1a]160 atomic_dec(&CPU->nrdy);
[59e07c91]161 atomic_dec(&nrdy);
[f761f1eb]162 r->n--;
163
164 /*
165 * Take the first thread from the queue.
166 */
167 t = list_get_instance(r->rq_head.next, thread_t, rq_link);
168 list_remove(&t->rq_link);
169
170 spinlock_unlock(&r->lock);
171
172 spinlock_lock(&t->lock);
[43114c5]173 t->cpu = CPU;
[f761f1eb]174
175 t->ticks = us2ticks((i+1)*10000);
[22f7769]176 t->priority = i; /* eventually correct rq index */
[f761f1eb]177
178 /*
179 * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
180 */
181 t->flags &= ~X_STOLEN;
182 spinlock_unlock(&t->lock);
183
184 return t;
185 }
186 goto loop;
187
188}
189
[70527f1]190
191/** Prevent rq starvation
192 *
193 * Prevent low priority threads from starving in rq's.
194 *
195 * When the function decides to relink rq's, it reconnects
196 * respective pointers so that in result threads with 'pri'
197 * greater or equal 'start' are moved to a higher-priority queue.
198 *
199 * @param start Threshold priority.
200 *
[f761f1eb]201 */
[e16e036a]202static void relink_rq(int start)
[f761f1eb]203{
204 link_t head;
205 runq_t *r;
206 int i, n;
207
208 list_initialize(&head);
[43114c5]209 spinlock_lock(&CPU->lock);
210 if (CPU->needs_relink > NEEDS_RELINK_MAX) {
[f761f1eb]211 for (i = start; i<RQ_COUNT-1; i++) {
212 /* remember and empty rq[i + 1] */
[43114c5]213 r = &CPU->rq[i + 1];
[f761f1eb]214 spinlock_lock(&r->lock);
215 list_concat(&head, &r->rq_head);
216 n = r->n;
217 r->n = 0;
218 spinlock_unlock(&r->lock);
219
220 /* append rq[i + 1] to rq[i] */
[43114c5]221 r = &CPU->rq[i];
[f761f1eb]222 spinlock_lock(&r->lock);
223 list_concat(&r->rq_head, &head);
224 r->n += n;
225 spinlock_unlock(&r->lock);
226 }
[43114c5]227 CPU->needs_relink = 0;
[f761f1eb]228 }
[444ec64]229 spinlock_unlock(&CPU->lock);
[f761f1eb]230
231}
232
[70527f1]233
234/** Scheduler stack switch wrapper
235 *
236 * Second part of the scheduler() function
237 * using new stack. Handling the actual context
238 * switch to a new thread.
239 *
[266294a9]240 * Assume THREAD->lock is held.
[70527f1]241 */
[e16e036a]242static void scheduler_separated_stack(void)
[f761f1eb]243{
244 int priority;
245
[623ba26c]246 ASSERT(CPU != NULL);
247
[43114c5]248 if (THREAD) {
249 switch (THREAD->state) {
[f761f1eb]250 case Running:
[76cec1e]251 THREAD->state = Ready;
252 spinlock_unlock(&THREAD->lock);
253 thread_ready(THREAD);
254 break;
[f761f1eb]255
256 case Exiting:
[266294a9]257 thread_destroy(THREAD);
[76cec1e]258 break;
[266294a9]259
[f761f1eb]260 case Sleeping:
[76cec1e]261 /*
262 * Prefer the thread after it's woken up.
263 */
[22f7769]264 THREAD->priority = -1;
[76cec1e]265
266 /*
267 * We need to release wq->lock which we locked in waitq_sleep().
268 * Address of wq->lock is kept in THREAD->sleep_queue.
269 */
270 spinlock_unlock(&THREAD->sleep_queue->lock);
271
272 /*
273 * Check for possible requests for out-of-context invocation.
274 */
275 if (THREAD->call_me) {
276 THREAD->call_me(THREAD->call_me_with);
277 THREAD->call_me = NULL;
278 THREAD->call_me_with = NULL;
279 }
280
281 spinlock_unlock(&THREAD->lock);
282
283 break;
[f761f1eb]284
285 default:
[76cec1e]286 /*
287 * Entering state is unexpected.
288 */
289 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
290 break;
[f761f1eb]291 }
[43114c5]292 THREAD = NULL;
[f761f1eb]293 }
[ba18512]294
[cd95d784]295
[43114c5]296 THREAD = find_best_thread();
[f761f1eb]297
[43114c5]298 spinlock_lock(&THREAD->lock);
[22f7769]299 priority = THREAD->priority;
[43114c5]300 spinlock_unlock(&THREAD->lock);
[7ce9284]301
[f761f1eb]302 relink_rq(priority);
303
[43114c5]304 spinlock_lock(&THREAD->lock);
[f761f1eb]305
306 /*
307 * If both the old and the new task are the same, lots of work is avoided.
308 */
[43114c5]309 if (TASK != THREAD->task) {
[20d50a1]310 as_t *as1 = NULL;
311 as_t *as2;
[f761f1eb]312
[43114c5]313 if (TASK) {
314 spinlock_lock(&TASK->lock);
[20d50a1]315 as1 = TASK->as;
[43114c5]316 spinlock_unlock(&TASK->lock);
[f761f1eb]317 }
318
[43114c5]319 spinlock_lock(&THREAD->task->lock);
[20d50a1]320 as2 = THREAD->task->as;
[43114c5]321 spinlock_unlock(&THREAD->task->lock);
[f761f1eb]322
323 /*
[20d50a1]324 * Note that it is possible for two tasks to share one address space.
[f761f1eb]325 */
[20d50a1]326 if (as1 != as2) {
[f761f1eb]327 /*
[20d50a1]328 * Both tasks and address spaces are different.
[f761f1eb]329 * Replace the old one with the new one.
330 */
[20d50a1]331 as_install(as2);
[f761f1eb]332 }
[43114c5]333 TASK = THREAD->task;
[f761f1eb]334 }
335
[43114c5]336 THREAD->state = Running;
[f761f1eb]337
338 #ifdef SCHEDULER_VERBOSE
[22f7769]339 printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, CPU->nrdy);
[f761f1eb]340 #endif
341
[3e1607f]342 /*
343 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
344 */
[bcdd9aa]345 the_copy(THE, (the_t *) THREAD->kstack);
346
[43114c5]347 context_restore(&THREAD->saved_context);
[f761f1eb]348 /* not reached */
349}
350
[70527f1]351
[e16e036a]352/** The scheduler
353 *
354 * The thread scheduling procedure.
[5fe5f1e]355 * Passes control directly to
356 * scheduler_separated_stack().
[e16e036a]357 *
358 */
359void scheduler(void)
360{
361 volatile ipl_t ipl;
362
363 ASSERT(CPU != NULL);
364
365 ipl = interrupts_disable();
366
[36e7ee98]367 if (atomic_get(&haltstate))
[e16e036a]368 halt();
369
370 if (THREAD) {
371 spinlock_lock(&THREAD->lock);
[5f85c91]372#ifndef CONFIG_FPU_LAZY
[e16e036a]373 fpu_context_save(&(THREAD->saved_fpu_context));
374#endif
375 if (!context_save(&THREAD->saved_context)) {
376 /*
377 * This is the place where threads leave scheduler();
378 */
379 before_thread_runs();
380 spinlock_unlock(&THREAD->lock);
381 interrupts_restore(THREAD->saved_context.ipl);
382 return;
383 }
384
385 /*
386 * Interrupt priority level of preempted thread is recorded here
387 * to facilitate scheduler() invocations from interrupts_disable()'d
388 * code (e.g. waitq_sleep_timeout()).
389 */
390 THREAD->saved_context.ipl = ipl;
391 }
392
393 /*
[05e2a7ad]394 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
[e16e036a]395 * and preemption counter. At this point THE could be coming either
396 * from THREAD's or CPU's stack.
397 */
398 the_copy(THE, (the_t *) CPU->stack);
399
400 /*
401 * We may not keep the old stack.
402 * Reason: If we kept the old stack and got blocked, for instance, in
403 * find_best_thread(), the old thread could get rescheduled by another
404 * CPU and overwrite the part of its own stack that was also used by
405 * the scheduler on this CPU.
406 *
407 * Moreover, we have to bypass the compiler-generated POP sequence
408 * which is fooled by SP being set to the very top of the stack.
409 * Therefore the scheduler() function continues in
410 * scheduler_separated_stack().
411 */
412 context_save(&CPU->saved_context);
413 context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
414 context_restore(&CPU->saved_context);
415 /* not reached */
416}
417
418
419
420
421
[5f85c91]422#ifdef CONFIG_SMP
[70527f1]423/** Load balancing thread
424 *
425 * SMP load balancing thread, supervising thread supplies
426 * for the CPU it's wired to.
427 *
428 * @param arg Generic thread argument (unused).
429 *
[f761f1eb]430 */
431void kcpulb(void *arg)
432{
433 thread_t *t;
[248fc1a]434 int count, average, i, j, k = 0;
[22f7769]435 ipl_t ipl;
[f761f1eb]436
437loop:
438 /*
[3260ada]439 * Work in 1s intervals.
[f761f1eb]440 */
[3260ada]441 thread_sleep(1);
[f761f1eb]442
443not_satisfied:
444 /*
445 * Calculate the number of threads that will be migrated/stolen from
446 * other CPU's. Note that situation can have changed between two
447 * passes. Each time get the most up to date counts.
448 */
[444ec64]449 average = atomic_get(&nrdy) / config.cpu_active + 1;
[248fc1a]450 count = average - atomic_get(&CPU->nrdy);
[f761f1eb]451
[444ec64]452 if (count <= 0)
[f761f1eb]453 goto satisfied;
454
455 /*
456 * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
457 */
458 for (j=RQ_COUNT-1; j >= 0; j--) {
459 for (i=0; i < config.cpu_active; i++) {
460 link_t *l;
461 runq_t *r;
462 cpu_t *cpu;
463
464 cpu = &cpus[(i + k) % config.cpu_active];
465
466 /*
467 * Not interested in ourselves.
468 * Doesn't require interrupt disabling for kcpulb is X_WIRED.
469 */
[43114c5]470 if (CPU == cpu)
[248fc1a]471 continue;
472 if (atomic_get(&cpu->nrdy) <= average)
473 continue;
[f761f1eb]474
[444ec64]475 ipl = interrupts_disable();
[18e0a6c]476 r = &cpu->rq[j];
[f761f1eb]477 spinlock_lock(&r->lock);
478 if (r->n == 0) {
479 spinlock_unlock(&r->lock);
[22f7769]480 interrupts_restore(ipl);
[f761f1eb]481 continue;
482 }
483
484 t = NULL;
485 l = r->rq_head.prev; /* search rq from the back */
486 while (l != &r->rq_head) {
487 t = list_get_instance(l, thread_t, rq_link);
488 /*
[76cec1e]489 * We don't want to steal CPU-wired threads neither threads already stolen.
[f761f1eb]490 * The latter prevents threads from migrating between CPU's without ever being run.
[76cec1e]491 * We don't want to steal threads whose FPU context is still in CPU.
[6a27d63]492 */
[f761f1eb]493 spinlock_lock(&t->lock);
[6a27d63]494 if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
[f761f1eb]495 /*
496 * Remove t from r.
497 */
498 spinlock_unlock(&t->lock);
499
[248fc1a]500 atomic_dec(&cpu->nrdy);
[59e07c91]501 atomic_dec(&nrdy);
[f761f1eb]502
[76cec1e]503 r->n--;
[f761f1eb]504 list_remove(&t->rq_link);
505
506 break;
507 }
508 spinlock_unlock(&t->lock);
509 l = l->prev;
510 t = NULL;
511 }
512 spinlock_unlock(&r->lock);
513
514 if (t) {
515 /*
516 * Ready t on local CPU
517 */
518 spinlock_lock(&t->lock);
519 #ifdef KCPULB_VERBOSE
[248fc1a]520 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
[f761f1eb]521 #endif
522 t->flags |= X_STOLEN;
523 spinlock_unlock(&t->lock);
524
525 thread_ready(t);
526
[22f7769]527 interrupts_restore(ipl);
[f761f1eb]528
529 if (--count == 0)
530 goto satisfied;
531
532 /*
[76cec1e]533 * We are not satisfied yet, focus on another CPU next time.
[f761f1eb]534 */
535 k++;
536
537 continue;
538 }
[22f7769]539 interrupts_restore(ipl);
[f761f1eb]540 }
541 }
542
[248fc1a]543 if (atomic_get(&CPU->nrdy)) {
[f761f1eb]544 /*
545 * Be a little bit light-weight and let migrated threads run.
546 */
547 scheduler();
[3260ada]548 } else {
[f761f1eb]549 /*
550 * We failed to migrate a single thread.
[3260ada]551 * Give up this turn.
[f761f1eb]552 */
[3260ada]553 goto loop;
[f761f1eb]554 }
555
556 goto not_satisfied;
[76cec1e]557
[f761f1eb]558satisfied:
559 goto loop;
560}
561
[5f85c91]562#endif /* CONFIG_SMP */
[10e16a7]563
564
565/** Print information about threads & scheduler queues */
566void sched_print_list(void)
567{
568 ipl_t ipl;
569 int cpu,i;
570 runq_t *r;
571 thread_t *t;
572 link_t *cur;
573
574 /* We are going to mess with scheduler structures,
575 * let's not be interrupted */
576 ipl = interrupts_disable();
577 printf("*********** Scheduler dump ***********\n");
578 for (cpu=0;cpu < config.cpu_count; cpu++) {
579 if (!cpus[cpu].active)
580 continue;
581 spinlock_lock(&cpus[cpu].lock);
582 printf("cpu%d: nrdy: %d needs_relink: %d\n",
[248fc1a]583 cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
[10e16a7]584
585 for (i=0; i<RQ_COUNT; i++) {
586 r = &cpus[cpu].rq[i];
587 spinlock_lock(&r->lock);
588 if (!r->n) {
589 spinlock_unlock(&r->lock);
590 continue;
591 }
[3260ada]592 printf("\tRq %d: ", i);
[10e16a7]593 for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
594 t = list_get_instance(cur, thread_t, rq_link);
595 printf("%d(%s) ", t->tid,
596 thread_states[t->state]);
597 }
598 printf("\n");
599 spinlock_unlock(&r->lock);
600 }
601 spinlock_unlock(&cpus[cpu].lock);
602 }
603
604 interrupts_restore(ipl);
605}
Note: See TracBrowser for help on using the repository browser.