source: mainline/src/proc/scheduler.c@ 0ca6faa

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 0ca6faa was 0ca6faa, checked in by Jakub Vana <jakub.vana@…>, 20 years ago

Move fpu_context switching functions from context.s to fpu_context.c on all platforms.
Add fpu_context.h.

Make before_thread_runs() arch-independent and create arch dependent version before_thread_runs_arch().

  • Property mode set to 100644
File size: 11.8 KB
Line 
1/*
2 * Copyright (C) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <proc/scheduler.h>
30#include <proc/thread.h>
31#include <proc/task.h>
32#include <cpu.h>
33#include <mm/vm.h>
34#include <config.h>
35#include <context.h>
36#include <func.h>
37#include <arch.h>
38#include <arch/asm.h>
39#include <list.h>
40#include <typedefs.h>
41#include <mm/page.h>
42#include <synch/spinlock.h>
43
44#ifdef __SMP__
45#include <arch/smp/atomic.h>
46#endif /* __SMP__ */
47
48/*
49 * NOTE ON ATOMIC READS:
50 * Some architectures cannot read __u32 atomically.
51 * For that reason, all accesses to nrdy and the likes must be protected by spinlock.
52 */
53
54spinlock_t nrdylock;
55volatile int nrdy;
56
57void before_thread_runs(void)
58{
59 before_thread_runs_arch();
60 fpu_context_restore();
61}
62
63
64void scheduler_init(void)
65{
66 spinlock_initialize(&nrdylock);
67}
68
69/* cpu_priority_high()'d */
70struct thread *find_best_thread(void)
71{
72 thread_t *t;
73 runq_t *r;
74 int i, n;
75
76loop:
77 cpu_priority_high();
78
79 spinlock_lock(&CPU->lock);
80 n = CPU->nrdy;
81 spinlock_unlock(&CPU->lock);
82
83 cpu_priority_low();
84
85 if (n == 0) {
86 #ifdef __SMP__
87 /*
88 * If the load balancing thread is not running, wake it up and
89 * set CPU-private flag that the kcpulb has been started.
90 */
91 if (test_and_set(&CPU->kcpulbstarted) == 0) {
92 waitq_wakeup(&CPU->kcpulb_wq, 0);
93 goto loop;
94 }
95 #endif /* __SMP__ */
96
97 /*
98 * For there was nothing to run, the CPU goes to sleep
99 * until a hardware interrupt or an IPI comes.
100 * This improves energy saving and hyperthreading.
101 * On the other hand, several hardware interrupts can be ignored.
102 */
103 cpu_sleep();
104 goto loop;
105 }
106
107 cpu_priority_high();
108
109 for (i = 0; i<RQ_COUNT; i++) {
110 r = &CPU->rq[i];
111 spinlock_lock(&r->lock);
112 if (r->n == 0) {
113 /*
114 * If this queue is empty, try a lower-priority queue.
115 */
116 spinlock_unlock(&r->lock);
117 continue;
118 }
119
120 spinlock_lock(&nrdylock);
121 nrdy--;
122 spinlock_unlock(&nrdylock);
123
124 spinlock_lock(&CPU->lock);
125 CPU->nrdy--;
126 spinlock_unlock(&CPU->lock);
127
128 r->n--;
129
130 /*
131 * Take the first thread from the queue.
132 */
133 t = list_get_instance(r->rq_head.next, thread_t, rq_link);
134 list_remove(&t->rq_link);
135
136 spinlock_unlock(&r->lock);
137
138 spinlock_lock(&t->lock);
139 t->cpu = CPU;
140
141 t->ticks = us2ticks((i+1)*10000);
142 t->pri = i; /* eventually correct rq index */
143
144 /*
145 * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
146 */
147 t->flags &= ~X_STOLEN;
148 spinlock_unlock(&t->lock);
149
150 return t;
151 }
152 goto loop;
153
154}
155
156/*
157 * This function prevents low priority threads from starving in rq's.
158 * When it decides to relink rq's, it reconnects respective pointers
159 * so that in result threads with 'pri' greater or equal 'start' are
160 * moved to a higher-priority queue.
161 */
162void relink_rq(int start)
163{
164 link_t head;
165 runq_t *r;
166 int i, n;
167
168 list_initialize(&head);
169 spinlock_lock(&CPU->lock);
170 if (CPU->needs_relink > NEEDS_RELINK_MAX) {
171 for (i = start; i<RQ_COUNT-1; i++) {
172 /* remember and empty rq[i + 1] */
173 r = &CPU->rq[i + 1];
174 spinlock_lock(&r->lock);
175 list_concat(&head, &r->rq_head);
176 n = r->n;
177 r->n = 0;
178 spinlock_unlock(&r->lock);
179
180 /* append rq[i + 1] to rq[i] */
181 r = &CPU->rq[i];
182 spinlock_lock(&r->lock);
183 list_concat(&r->rq_head, &head);
184 r->n += n;
185 spinlock_unlock(&r->lock);
186 }
187 CPU->needs_relink = 0;
188 }
189 spinlock_unlock(&CPU->lock);
190
191}
192
193/*
194 * The scheduler.
195 */
196void scheduler(void)
197{
198 volatile pri_t pri;
199
200 pri = cpu_priority_high();
201
202 if (haltstate)
203 halt();
204
205 if (THREAD) {
206 spinlock_lock(&THREAD->lock);
207 if (!context_save(&THREAD->saved_context)) {
208 /*
209 * This is the place where threads leave scheduler();
210 */
211 before_thread_runs();
212 spinlock_unlock(&THREAD->lock);
213 cpu_priority_restore(THREAD->saved_context.pri);
214 return;
215 }
216 THREAD->saved_context.pri = pri;
217 }
218
219 /*
220 * We may not keep the old stack.
221 * Reason: If we kept the old stack and got blocked, for instance, in
222 * find_best_thread(), the old thread could get rescheduled by another
223 * CPU and overwrite the part of its own stack that was also used by
224 * the scheduler on this CPU.
225 *
226 * Moreover, we have to bypass the compiler-generated POP sequence
227 * which is fooled by SP being set to the very top of the stack.
228 * Therefore the scheduler() function continues in
229 * scheduler_separated_stack().
230 */
231 context_save(&CPU->saved_context);
232 CPU->saved_context.sp = (__address) &CPU->stack[CPU_STACK_SIZE-8];
233 CPU->saved_context.pc = (__address) scheduler_separated_stack;
234 context_restore(&CPU->saved_context);
235 /* not reached */
236}
237
238void scheduler_separated_stack(void)
239{
240 int priority;
241
242 if (THREAD) {
243 switch (THREAD->state) {
244 case Running:
245 THREAD->state = Ready;
246 spinlock_unlock(&THREAD->lock);
247 thread_ready(THREAD);
248 break;
249
250 case Exiting:
251 frame_free((__address) THREAD->kstack);
252 if (THREAD->ustack) {
253 frame_free((__address) THREAD->ustack);
254 }
255
256 /*
257 * Detach from the containing task.
258 */
259 spinlock_lock(&TASK->lock);
260 list_remove(&THREAD->th_link);
261 spinlock_unlock(&TASK->lock);
262
263 spinlock_unlock(&THREAD->lock);
264
265 spinlock_lock(&threads_lock);
266 list_remove(&THREAD->threads_link);
267 spinlock_unlock(&threads_lock);
268
269 free(THREAD);
270
271 break;
272
273 case Sleeping:
274 /*
275 * Prefer the thread after it's woken up.
276 */
277 THREAD->pri = -1;
278
279 /*
280 * We need to release wq->lock which we locked in waitq_sleep().
281 * Address of wq->lock is kept in THREAD->sleep_queue.
282 */
283 spinlock_unlock(&THREAD->sleep_queue->lock);
284
285 /*
286 * Check for possible requests for out-of-context invocation.
287 */
288 if (THREAD->call_me) {
289 THREAD->call_me(THREAD->call_me_with);
290 THREAD->call_me = NULL;
291 THREAD->call_me_with = NULL;
292 }
293
294 spinlock_unlock(&THREAD->lock);
295
296 break;
297
298 default:
299 /*
300 * Entering state is unexpected.
301 */
302 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
303 break;
304 }
305 THREAD = NULL;
306 }
307
308 THREAD = find_best_thread();
309
310 spinlock_lock(&THREAD->lock);
311 priority = THREAD->pri;
312 spinlock_unlock(&THREAD->lock);
313
314 relink_rq(priority);
315
316 spinlock_lock(&THREAD->lock);
317
318 /*
319 * If both the old and the new task are the same, lots of work is avoided.
320 */
321 if (TASK != THREAD->task) {
322 vm_t *m1 = NULL;
323 vm_t *m2;
324
325 if (TASK) {
326 spinlock_lock(&TASK->lock);
327 m1 = TASK->vm;
328 spinlock_unlock(&TASK->lock);
329 }
330
331 spinlock_lock(&THREAD->task->lock);
332 m2 = THREAD->task->vm;
333 spinlock_unlock(&THREAD->task->lock);
334
335 /*
336 * Note that it is possible for two tasks to share one vm mapping.
337 */
338 if (m1 != m2) {
339 /*
340 * Both tasks and vm mappings are different.
341 * Replace the old one with the new one.
342 */
343 if (m1) {
344 vm_uninstall(m1);
345 }
346 vm_install(m2);
347 }
348 TASK = THREAD->task;
349 }
350
351 THREAD->state = Running;
352
353 #ifdef SCHEDULER_VERBOSE
354 printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy);
355 #endif
356
357 context_restore(&THREAD->saved_context);
358 /* not reached */
359}
360
361#ifdef __SMP__
362/*
363 * This is the load balancing thread.
364 * It supervises thread supplies for the CPU it's wired to.
365 */
366void kcpulb(void *arg)
367{
368 thread_t *t;
369 int count, i, j, k = 0;
370 pri_t pri;
371
372loop:
373 /*
374 * Sleep until there's some work to do.
375 */
376 waitq_sleep(&CPU->kcpulb_wq);
377
378not_satisfied:
379 /*
380 * Calculate the number of threads that will be migrated/stolen from
381 * other CPU's. Note that situation can have changed between two
382 * passes. Each time get the most up to date counts.
383 */
384 pri = cpu_priority_high();
385 spinlock_lock(&CPU->lock);
386 count = nrdy / config.cpu_active;
387 count -= CPU->nrdy;
388 spinlock_unlock(&CPU->lock);
389 cpu_priority_restore(pri);
390
391 if (count <= 0)
392 goto satisfied;
393
394 /*
395 * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
396 */
397 for (j=RQ_COUNT-1; j >= 0; j--) {
398 for (i=0; i < config.cpu_active; i++) {
399 link_t *l;
400 runq_t *r;
401 cpu_t *cpu;
402
403 cpu = &cpus[(i + k) % config.cpu_active];
404 r = &cpu->rq[j];
405
406 /*
407 * Not interested in ourselves.
408 * Doesn't require interrupt disabling for kcpulb is X_WIRED.
409 */
410 if (CPU == cpu)
411 continue;
412
413restart: pri = cpu_priority_high();
414 spinlock_lock(&r->lock);
415 if (r->n == 0) {
416 spinlock_unlock(&r->lock);
417 cpu_priority_restore(pri);
418 continue;
419 }
420
421 t = NULL;
422 l = r->rq_head.prev; /* search rq from the back */
423 while (l != &r->rq_head) {
424 t = list_get_instance(l, thread_t, rq_link);
425 /*
426 * We don't want to steal CPU-wired threads neither threads already stolen.
427 * The latter prevents threads from migrating between CPU's without ever being run.
428 */
429 spinlock_lock(&t->lock);
430 if (!(t->flags & (X_WIRED | X_STOLEN))) {
431 /*
432 * Remove t from r.
433 */
434
435 spinlock_unlock(&t->lock);
436
437 /*
438 * Here we have to avoid deadlock with relink_rq(),
439 * because it locks cpu and r in a different order than we do.
440 */
441 if (!spinlock_trylock(&cpu->lock)) {
442 /* Release all locks and try again. */
443 spinlock_unlock(&r->lock);
444 cpu_priority_restore(pri);
445 goto restart;
446 }
447 cpu->nrdy--;
448 spinlock_unlock(&cpu->lock);
449
450 spinlock_lock(&nrdylock);
451 nrdy--;
452 spinlock_unlock(&nrdylock);
453
454 r->n--;
455 list_remove(&t->rq_link);
456
457 break;
458 }
459 spinlock_unlock(&t->lock);
460 l = l->prev;
461 t = NULL;
462 }
463 spinlock_unlock(&r->lock);
464
465 if (t) {
466 /*
467 * Ready t on local CPU
468 */
469 spinlock_lock(&t->lock);
470 #ifdef KCPULB_VERBOSE
471 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active);
472 #endif
473 t->flags |= X_STOLEN;
474 spinlock_unlock(&t->lock);
475
476 thread_ready(t);
477
478 cpu_priority_restore(pri);
479
480 if (--count == 0)
481 goto satisfied;
482
483 /*
484 * We are not satisfied yet, focus on another CPU next time.
485 */
486 k++;
487
488 continue;
489 }
490 cpu_priority_restore(pri);
491 }
492 }
493
494 if (CPU->nrdy) {
495 /*
496 * Be a little bit light-weight and let migrated threads run.
497 */
498 scheduler();
499 }
500 else {
501 /*
502 * We failed to migrate a single thread.
503 * Something more sophisticated should be done.
504 */
505 scheduler();
506 }
507
508 goto not_satisfied;
509
510satisfied:
511 /*
512 * Tell find_best_thread() to wake us up later again.
513 */
514 CPU->kcpulbstarted = 0;
515 goto loop;
516}
517
518#endif /* __SMP__ */
Note: See TracBrowser for help on using the repository browser.