source: mainline/src/proc/scheduler.c@ 1e9a463

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 1e9a463 was bcdd9aa, checked in by Jakub Jermar <jakub@…>, 20 years ago

Add lib/the.c.
Add and deploy the_initialize() and the_copy().

Make IA-32's before_thread_runs() use SP_DELTA macro.

  • Property mode set to 100644
File size: 12.7 KB
RevLine 
[f761f1eb]1/*
2 * Copyright (C) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <proc/scheduler.h>
30#include <proc/thread.h>
31#include <proc/task.h>
32#include <cpu.h>
33#include <mm/vm.h>
34#include <config.h>
35#include <context.h>
36#include <func.h>
37#include <arch.h>
38#include <arch/asm.h>
39#include <list.h>
[02a99d2]40#include <panic.h>
[f761f1eb]41#include <typedefs.h>
42#include <mm/page.h>
43#include <synch/spinlock.h>
[f2ffad4]44#include <arch/faddr.h>
[e3f41b6]45#include <arch/atomic.h>
[f761f1eb]46
47volatile int nrdy;
48
[70527f1]49
[b60a22c]50/** Take actions before new thread runs
[70527f1]51 *
[b60a22c]52 * Perform actions that need to be
53 * taken before the newly selected
54 * tread is passed control.
[70527f1]55 *
56 */
[0ca6faa]57void before_thread_runs(void)
58{
59 before_thread_runs_arch();
[9c926f3]60 fpu_context_restore(&(THREAD->saved_fpu_context));
[0ca6faa]61}
62
63
[70527f1]64/** Initialize scheduler
65 *
66 * Initialize kernel scheduler.
67 *
68 */
[f761f1eb]69void scheduler_init(void)
70{
71}
72
[70527f1]73
74/** Get thread to be scheduled
75 *
76 * Get the optimal thread to be scheduled
[d1a184f]77 * according to thread accounting and scheduler
[70527f1]78 * policy.
79 *
80 * @return Thread to be scheduled.
81 *
82 */
[f761f1eb]83struct thread *find_best_thread(void)
84{
85 thread_t *t;
86 runq_t *r;
87 int i, n;
88
89loop:
90 cpu_priority_high();
91
[43114c5]92 spinlock_lock(&CPU->lock);
93 n = CPU->nrdy;
94 spinlock_unlock(&CPU->lock);
[f761f1eb]95
96 cpu_priority_low();
97
98 if (n == 0) {
99 #ifdef __SMP__
100 /*
101 * If the load balancing thread is not running, wake it up and
102 * set CPU-private flag that the kcpulb has been started.
103 */
[43114c5]104 if (test_and_set(&CPU->kcpulbstarted) == 0) {
[76cec1e]105 waitq_wakeup(&CPU->kcpulb_wq, 0);
[f761f1eb]106 goto loop;
107 }
108 #endif /* __SMP__ */
109
110 /*
111 * For there was nothing to run, the CPU goes to sleep
112 * until a hardware interrupt or an IPI comes.
113 * This improves energy saving and hyperthreading.
114 * On the other hand, several hardware interrupts can be ignored.
115 */
116 cpu_sleep();
117 goto loop;
118 }
119
120 cpu_priority_high();
[d896525]121
122 i = 0;
123retry:
124 for (; i<RQ_COUNT; i++) {
[43114c5]125 r = &CPU->rq[i];
[f761f1eb]126 spinlock_lock(&r->lock);
127 if (r->n == 0) {
128 /*
129 * If this queue is empty, try a lower-priority queue.
130 */
131 spinlock_unlock(&r->lock);
132 continue;
133 }
134
[18e0a6c]135 /* avoid deadlock with relink_rq() */
[d896525]136 if (!spinlock_trylock(&CPU->lock)) {
137 /*
138 * Unlock r and try again.
139 */
140 spinlock_unlock(&r->lock);
141 goto retry;
142 }
[43114c5]143 CPU->nrdy--;
144 spinlock_unlock(&CPU->lock);
[f761f1eb]145
[d896525]146 atomic_dec(&nrdy);
[f761f1eb]147 r->n--;
148
149 /*
150 * Take the first thread from the queue.
151 */
152 t = list_get_instance(r->rq_head.next, thread_t, rq_link);
153 list_remove(&t->rq_link);
154
155 spinlock_unlock(&r->lock);
156
157 spinlock_lock(&t->lock);
[43114c5]158 t->cpu = CPU;
[f761f1eb]159
160 t->ticks = us2ticks((i+1)*10000);
161 t->pri = i; /* eventually correct rq index */
162
163 /*
164 * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
165 */
166 t->flags &= ~X_STOLEN;
167 spinlock_unlock(&t->lock);
168
169 return t;
170 }
171 goto loop;
172
173}
174
[70527f1]175
176/** Prevent rq starvation
177 *
178 * Prevent low priority threads from starving in rq's.
179 *
180 * When the function decides to relink rq's, it reconnects
181 * respective pointers so that in result threads with 'pri'
182 * greater or equal 'start' are moved to a higher-priority queue.
183 *
184 * @param start Threshold priority.
185 *
[f761f1eb]186 */
187void relink_rq(int start)
188{
189 link_t head;
190 runq_t *r;
191 int i, n;
192
193 list_initialize(&head);
[43114c5]194 spinlock_lock(&CPU->lock);
195 if (CPU->needs_relink > NEEDS_RELINK_MAX) {
[f761f1eb]196 for (i = start; i<RQ_COUNT-1; i++) {
197 /* remember and empty rq[i + 1] */
[43114c5]198 r = &CPU->rq[i + 1];
[f761f1eb]199 spinlock_lock(&r->lock);
200 list_concat(&head, &r->rq_head);
201 n = r->n;
202 r->n = 0;
203 spinlock_unlock(&r->lock);
204
205 /* append rq[i + 1] to rq[i] */
[43114c5]206 r = &CPU->rq[i];
[f761f1eb]207 spinlock_lock(&r->lock);
208 list_concat(&r->rq_head, &head);
209 r->n += n;
210 spinlock_unlock(&r->lock);
211 }
[43114c5]212 CPU->needs_relink = 0;
[f761f1eb]213 }
[43114c5]214 spinlock_unlock(&CPU->lock);
[f761f1eb]215
216}
217
[70527f1]218
219/** The scheduler
220 *
221 * The thread scheduling procedure.
222 *
[f761f1eb]223 */
224void scheduler(void)
225{
226 volatile pri_t pri;
227
228 pri = cpu_priority_high();
229
230 if (haltstate)
231 halt();
232
[43114c5]233 if (THREAD) {
234 spinlock_lock(&THREAD->lock);
[9c926f3]235 fpu_context_save(&(THREAD->saved_fpu_context));
[43114c5]236 if (!context_save(&THREAD->saved_context)) {
[f761f1eb]237 /*
238 * This is the place where threads leave scheduler();
239 */
[cb4b61d]240 before_thread_runs();
[76cec1e]241 spinlock_unlock(&THREAD->lock);
[43114c5]242 cpu_priority_restore(THREAD->saved_context.pri);
[f761f1eb]243 return;
244 }
[a8f9a82]245
246 /*
247 * CPU priority of preempted thread is recorded here
248 * to facilitate scheduler() invocations from
249 * cpu_priority_high()'ed code (e.g. waitq_sleep_timeout()).
250 */
[43114c5]251 THREAD->saved_context.pri = pri;
[f761f1eb]252 }
253
[bcdd9aa]254 /*
255 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU
256 * and preemption counter. At this point THE could be coming either
257 * from THREAD's or CPU's stack.
258 */
259 the_copy(THE, (the_t *) CPU->stack);
260
[f761f1eb]261 /*
262 * We may not keep the old stack.
263 * Reason: If we kept the old stack and got blocked, for instance, in
264 * find_best_thread(), the old thread could get rescheduled by another
265 * CPU and overwrite the part of its own stack that was also used by
266 * the scheduler on this CPU.
267 *
268 * Moreover, we have to bypass the compiler-generated POP sequence
269 * which is fooled by SP being set to the very top of the stack.
270 * Therefore the scheduler() function continues in
271 * scheduler_separated_stack().
272 */
[43114c5]273 context_save(&CPU->saved_context);
[d5d2a3f]274 context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), CPU->stack, CPU_STACK_SIZE);
[43114c5]275 context_restore(&CPU->saved_context);
[f761f1eb]276 /* not reached */
277}
278
[70527f1]279
280/** Scheduler stack switch wrapper
281 *
282 * Second part of the scheduler() function
283 * using new stack. Handling the actual context
284 * switch to a new thread.
285 *
286 */
[f761f1eb]287void scheduler_separated_stack(void)
288{
289 int priority;
290
[43114c5]291 if (THREAD) {
292 switch (THREAD->state) {
[f761f1eb]293 case Running:
[76cec1e]294 THREAD->state = Ready;
295 spinlock_unlock(&THREAD->lock);
296 thread_ready(THREAD);
297 break;
[f761f1eb]298
299 case Exiting:
[76cec1e]300 frame_free((__address) THREAD->kstack);
301 if (THREAD->ustack) {
302 frame_free((__address) THREAD->ustack);
303 }
304
305 /*
306 * Detach from the containing task.
307 */
308 spinlock_lock(&TASK->lock);
309 list_remove(&THREAD->th_link);
310 spinlock_unlock(&TASK->lock);
311
312 spinlock_unlock(&THREAD->lock);
313
314 spinlock_lock(&threads_lock);
315 list_remove(&THREAD->threads_link);
316 spinlock_unlock(&threads_lock);
317
318 spinlock_lock(&CPU->lock);
319 if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL;
320 spinlock_unlock(&CPU->lock);
321
322 free(THREAD);
323
324 break;
325
[f761f1eb]326 case Sleeping:
[76cec1e]327 /*
328 * Prefer the thread after it's woken up.
329 */
330 THREAD->pri = -1;
331
332 /*
333 * We need to release wq->lock which we locked in waitq_sleep().
334 * Address of wq->lock is kept in THREAD->sleep_queue.
335 */
336 spinlock_unlock(&THREAD->sleep_queue->lock);
337
338 /*
339 * Check for possible requests for out-of-context invocation.
340 */
341 if (THREAD->call_me) {
342 THREAD->call_me(THREAD->call_me_with);
343 THREAD->call_me = NULL;
344 THREAD->call_me_with = NULL;
345 }
346
347 spinlock_unlock(&THREAD->lock);
348
349 break;
[f761f1eb]350
351 default:
[76cec1e]352 /*
353 * Entering state is unexpected.
354 */
355 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
356 break;
[f761f1eb]357 }
[43114c5]358 THREAD = NULL;
[f761f1eb]359 }
[76cec1e]360
[43114c5]361 THREAD = find_best_thread();
[f761f1eb]362
[43114c5]363 spinlock_lock(&THREAD->lock);
364 priority = THREAD->pri;
365 spinlock_unlock(&THREAD->lock);
[f761f1eb]366
367 relink_rq(priority);
368
[43114c5]369 spinlock_lock(&THREAD->lock);
[f761f1eb]370
371 /*
372 * If both the old and the new task are the same, lots of work is avoided.
373 */
[43114c5]374 if (TASK != THREAD->task) {
[f761f1eb]375 vm_t *m1 = NULL;
376 vm_t *m2;
377
[43114c5]378 if (TASK) {
379 spinlock_lock(&TASK->lock);
380 m1 = TASK->vm;
381 spinlock_unlock(&TASK->lock);
[f761f1eb]382 }
383
[43114c5]384 spinlock_lock(&THREAD->task->lock);
385 m2 = THREAD->task->vm;
386 spinlock_unlock(&THREAD->task->lock);
[f761f1eb]387
388 /*
389 * Note that it is possible for two tasks to share one vm mapping.
390 */
391 if (m1 != m2) {
392 /*
393 * Both tasks and vm mappings are different.
394 * Replace the old one with the new one.
395 */
396 vm_install(m2);
397 }
[43114c5]398 TASK = THREAD->task;
[f761f1eb]399 }
400
[43114c5]401 THREAD->state = Running;
[f761f1eb]402
403 #ifdef SCHEDULER_VERBOSE
[43114c5]404 printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy);
[f761f1eb]405 #endif
406
[bcdd9aa]407 the_copy(THE, (the_t *) THREAD->kstack);
408
[43114c5]409 context_restore(&THREAD->saved_context);
[f761f1eb]410 /* not reached */
411}
412
[70527f1]413
[f761f1eb]414#ifdef __SMP__
[70527f1]415/** Load balancing thread
416 *
417 * SMP load balancing thread, supervising thread supplies
418 * for the CPU it's wired to.
419 *
420 * @param arg Generic thread argument (unused).
421 *
[f761f1eb]422 */
423void kcpulb(void *arg)
424{
425 thread_t *t;
426 int count, i, j, k = 0;
427 pri_t pri;
428
429loop:
430 /*
431 * Sleep until there's some work to do.
432 */
[43114c5]433 waitq_sleep(&CPU->kcpulb_wq);
[f761f1eb]434
435not_satisfied:
436 /*
437 * Calculate the number of threads that will be migrated/stolen from
438 * other CPU's. Note that situation can have changed between two
439 * passes. Each time get the most up to date counts.
440 */
441 pri = cpu_priority_high();
[43114c5]442 spinlock_lock(&CPU->lock);
[f761f1eb]443 count = nrdy / config.cpu_active;
[43114c5]444 count -= CPU->nrdy;
445 spinlock_unlock(&CPU->lock);
[f761f1eb]446 cpu_priority_restore(pri);
447
448 if (count <= 0)
449 goto satisfied;
450
451 /*
452 * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
453 */
454 for (j=RQ_COUNT-1; j >= 0; j--) {
455 for (i=0; i < config.cpu_active; i++) {
456 link_t *l;
457 runq_t *r;
458 cpu_t *cpu;
459
460 cpu = &cpus[(i + k) % config.cpu_active];
461
462 /*
463 * Not interested in ourselves.
464 * Doesn't require interrupt disabling for kcpulb is X_WIRED.
465 */
[43114c5]466 if (CPU == cpu)
[18e0a6c]467 continue;
[f761f1eb]468
469restart: pri = cpu_priority_high();
[18e0a6c]470 r = &cpu->rq[j];
[f761f1eb]471 spinlock_lock(&r->lock);
472 if (r->n == 0) {
473 spinlock_unlock(&r->lock);
474 cpu_priority_restore(pri);
475 continue;
476 }
477
478 t = NULL;
479 l = r->rq_head.prev; /* search rq from the back */
480 while (l != &r->rq_head) {
481 t = list_get_instance(l, thread_t, rq_link);
482 /*
[76cec1e]483 * We don't want to steal CPU-wired threads neither threads already stolen.
[f761f1eb]484 * The latter prevents threads from migrating between CPU's without ever being run.
[76cec1e]485 * We don't want to steal threads whose FPU context is still in CPU.
[6a27d63]486 */
[f761f1eb]487 spinlock_lock(&t->lock);
[6a27d63]488 if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
[18e0a6c]489
[f761f1eb]490 /*
491 * Remove t from r.
492 */
493
494 spinlock_unlock(&t->lock);
495
496 /*
497 * Here we have to avoid deadlock with relink_rq(),
498 * because it locks cpu and r in a different order than we do.
499 */
500 if (!spinlock_trylock(&cpu->lock)) {
501 /* Release all locks and try again. */
502 spinlock_unlock(&r->lock);
503 cpu_priority_restore(pri);
504 goto restart;
505 }
506 cpu->nrdy--;
507 spinlock_unlock(&cpu->lock);
508
[e3f41b6]509 atomic_dec(&nrdy);
[f761f1eb]510
[76cec1e]511 r->n--;
[f761f1eb]512 list_remove(&t->rq_link);
513
514 break;
515 }
516 spinlock_unlock(&t->lock);
517 l = l->prev;
518 t = NULL;
519 }
520 spinlock_unlock(&r->lock);
521
522 if (t) {
523 /*
524 * Ready t on local CPU
525 */
526 spinlock_lock(&t->lock);
527 #ifdef KCPULB_VERBOSE
[43114c5]528 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active);
[f761f1eb]529 #endif
530 t->flags |= X_STOLEN;
531 spinlock_unlock(&t->lock);
532
533 thread_ready(t);
534
535 cpu_priority_restore(pri);
536
537 if (--count == 0)
538 goto satisfied;
539
540 /*
[76cec1e]541 * We are not satisfied yet, focus on another CPU next time.
[f761f1eb]542 */
543 k++;
544
545 continue;
546 }
547 cpu_priority_restore(pri);
548 }
549 }
550
[43114c5]551 if (CPU->nrdy) {
[f761f1eb]552 /*
553 * Be a little bit light-weight and let migrated threads run.
554 */
555 scheduler();
556 }
557 else {
558 /*
559 * We failed to migrate a single thread.
560 * Something more sophisticated should be done.
561 */
562 scheduler();
563 }
564
565 goto not_satisfied;
[76cec1e]566
[f761f1eb]567satisfied:
568 /*
569 * Tell find_best_thread() to wake us up later again.
570 */
[43114c5]571 CPU->kcpulbstarted = 0;
[f761f1eb]572 goto loop;
573}
574
575#endif /* __SMP__ */
Note: See TracBrowser for help on using the repository browser.