source: mainline/src/proc/scheduler.c@ 9c926f3

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 9c926f3 was 9c926f3, checked in by Jakub Vana <jakub.vana@…>, 20 years ago

Begin support for FPU context switching on platforms which can't do it in lazy way.

  • Property mode set to 100644
File size: 11.9 KB
Line 
1/*
2 * Copyright (C) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <proc/scheduler.h>
30#include <proc/thread.h>
31#include <proc/task.h>
32#include <cpu.h>
33#include <mm/vm.h>
34#include <config.h>
35#include <context.h>
36#include <func.h>
37#include <arch.h>
38#include <arch/asm.h>
39#include <list.h>
40#include <typedefs.h>
41#include <mm/page.h>
42#include <synch/spinlock.h>
43
44#ifdef __SMP__
45#include <arch/smp/atomic.h>
46#endif /* __SMP__ */
47
48/*
49 * NOTE ON ATOMIC READS:
50 * Some architectures cannot read __u32 atomically.
51 * For that reason, all accesses to nrdy and the likes must be protected by spinlock.
52 */
53
54spinlock_t nrdylock;
55volatile int nrdy;
56
57void before_thread_runs(void)
58{
59 before_thread_runs_arch();
60 fpu_context_restore(&(THREAD->saved_fpu_context));
61}
62
63
64void scheduler_init(void)
65{
66 spinlock_initialize(&nrdylock);
67}
68
69/* cpu_priority_high()'d */
70struct thread *find_best_thread(void)
71{
72 thread_t *t;
73 runq_t *r;
74 int i, n;
75
76loop:
77 cpu_priority_high();
78
79 spinlock_lock(&CPU->lock);
80 n = CPU->nrdy;
81 spinlock_unlock(&CPU->lock);
82
83 cpu_priority_low();
84
85 if (n == 0) {
86 #ifdef __SMP__
87 /*
88 * If the load balancing thread is not running, wake it up and
89 * set CPU-private flag that the kcpulb has been started.
90 */
91 if (test_and_set(&CPU->kcpulbstarted) == 0) {
92 waitq_wakeup(&CPU->kcpulb_wq, 0);
93 goto loop;
94 }
95 #endif /* __SMP__ */
96
97 /*
98 * For there was nothing to run, the CPU goes to sleep
99 * until a hardware interrupt or an IPI comes.
100 * This improves energy saving and hyperthreading.
101 * On the other hand, several hardware interrupts can be ignored.
102 */
103 cpu_sleep();
104 goto loop;
105 }
106
107 cpu_priority_high();
108
109 for (i = 0; i<RQ_COUNT; i++) {
110 r = &CPU->rq[i];
111 spinlock_lock(&r->lock);
112 if (r->n == 0) {
113 /*
114 * If this queue is empty, try a lower-priority queue.
115 */
116 spinlock_unlock(&r->lock);
117 continue;
118 }
119
120 spinlock_lock(&nrdylock);
121 nrdy--;
122 spinlock_unlock(&nrdylock);
123
124 spinlock_lock(&CPU->lock);
125 CPU->nrdy--;
126 spinlock_unlock(&CPU->lock);
127
128 r->n--;
129
130 /*
131 * Take the first thread from the queue.
132 */
133 t = list_get_instance(r->rq_head.next, thread_t, rq_link);
134 list_remove(&t->rq_link);
135
136 spinlock_unlock(&r->lock);
137
138 spinlock_lock(&t->lock);
139 t->cpu = CPU;
140
141 t->ticks = us2ticks((i+1)*10000);
142 t->pri = i; /* eventually correct rq index */
143
144 /*
145 * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
146 */
147 t->flags &= ~X_STOLEN;
148 spinlock_unlock(&t->lock);
149
150 return t;
151 }
152 goto loop;
153
154}
155
156/*
157 * This function prevents low priority threads from starving in rq's.
158 * When it decides to relink rq's, it reconnects respective pointers
159 * so that in result threads with 'pri' greater or equal 'start' are
160 * moved to a higher-priority queue.
161 */
162void relink_rq(int start)
163{
164 link_t head;
165 runq_t *r;
166 int i, n;
167
168 list_initialize(&head);
169 spinlock_lock(&CPU->lock);
170 if (CPU->needs_relink > NEEDS_RELINK_MAX) {
171 for (i = start; i<RQ_COUNT-1; i++) {
172 /* remember and empty rq[i + 1] */
173 r = &CPU->rq[i + 1];
174 spinlock_lock(&r->lock);
175 list_concat(&head, &r->rq_head);
176 n = r->n;
177 r->n = 0;
178 spinlock_unlock(&r->lock);
179
180 /* append rq[i + 1] to rq[i] */
181 r = &CPU->rq[i];
182 spinlock_lock(&r->lock);
183 list_concat(&r->rq_head, &head);
184 r->n += n;
185 spinlock_unlock(&r->lock);
186 }
187 CPU->needs_relink = 0;
188 }
189 spinlock_unlock(&CPU->lock);
190
191}
192
193/*
194 * The scheduler.
195 */
196void scheduler(void)
197{
198 volatile pri_t pri;
199
200 pri = cpu_priority_high();
201
202 if (haltstate)
203 halt();
204
205 if (THREAD) {
206 spinlock_lock(&THREAD->lock);
207 fpu_context_save(&(THREAD->saved_fpu_context));
208 if (!context_save(&THREAD->saved_context)) {
209 /*
210 * This is the place where threads leave scheduler();
211 */
212 before_thread_runs();
213 spinlock_unlock(&THREAD->lock);
214 cpu_priority_restore(THREAD->saved_context.pri);
215 return;
216 }
217 THREAD->saved_context.pri = pri;
218 }
219
220 /*
221 * We may not keep the old stack.
222 * Reason: If we kept the old stack and got blocked, for instance, in
223 * find_best_thread(), the old thread could get rescheduled by another
224 * CPU and overwrite the part of its own stack that was also used by
225 * the scheduler on this CPU.
226 *
227 * Moreover, we have to bypass the compiler-generated POP sequence
228 * which is fooled by SP being set to the very top of the stack.
229 * Therefore the scheduler() function continues in
230 * scheduler_separated_stack().
231 */
232 context_save(&CPU->saved_context);
233 CPU->saved_context.sp = (__address) &CPU->stack[CPU_STACK_SIZE-8];
234 CPU->saved_context.pc = (__address) scheduler_separated_stack;
235 context_restore(&CPU->saved_context);
236 /* not reached */
237}
238
239void scheduler_separated_stack(void)
240{
241 int priority;
242
243 if (THREAD) {
244 switch (THREAD->state) {
245 case Running:
246 THREAD->state = Ready;
247 spinlock_unlock(&THREAD->lock);
248 thread_ready(THREAD);
249 break;
250
251 case Exiting:
252 frame_free((__address) THREAD->kstack);
253 if (THREAD->ustack) {
254 frame_free((__address) THREAD->ustack);
255 }
256
257 /*
258 * Detach from the containing task.
259 */
260 spinlock_lock(&TASK->lock);
261 list_remove(&THREAD->th_link);
262 spinlock_unlock(&TASK->lock);
263
264 spinlock_unlock(&THREAD->lock);
265
266 spinlock_lock(&threads_lock);
267 list_remove(&THREAD->threads_link);
268 spinlock_unlock(&threads_lock);
269
270 free(THREAD);
271
272 break;
273
274 case Sleeping:
275 /*
276 * Prefer the thread after it's woken up.
277 */
278 THREAD->pri = -1;
279
280 /*
281 * We need to release wq->lock which we locked in waitq_sleep().
282 * Address of wq->lock is kept in THREAD->sleep_queue.
283 */
284 spinlock_unlock(&THREAD->sleep_queue->lock);
285
286 /*
287 * Check for possible requests for out-of-context invocation.
288 */
289 if (THREAD->call_me) {
290 THREAD->call_me(THREAD->call_me_with);
291 THREAD->call_me = NULL;
292 THREAD->call_me_with = NULL;
293 }
294
295 spinlock_unlock(&THREAD->lock);
296
297 break;
298
299 default:
300 /*
301 * Entering state is unexpected.
302 */
303 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
304 break;
305 }
306 THREAD = NULL;
307 }
308
309 THREAD = find_best_thread();
310
311 spinlock_lock(&THREAD->lock);
312 priority = THREAD->pri;
313 spinlock_unlock(&THREAD->lock);
314
315 relink_rq(priority);
316
317 spinlock_lock(&THREAD->lock);
318
319 /*
320 * If both the old and the new task are the same, lots of work is avoided.
321 */
322 if (TASK != THREAD->task) {
323 vm_t *m1 = NULL;
324 vm_t *m2;
325
326 if (TASK) {
327 spinlock_lock(&TASK->lock);
328 m1 = TASK->vm;
329 spinlock_unlock(&TASK->lock);
330 }
331
332 spinlock_lock(&THREAD->task->lock);
333 m2 = THREAD->task->vm;
334 spinlock_unlock(&THREAD->task->lock);
335
336 /*
337 * Note that it is possible for two tasks to share one vm mapping.
338 */
339 if (m1 != m2) {
340 /*
341 * Both tasks and vm mappings are different.
342 * Replace the old one with the new one.
343 */
344 if (m1) {
345 vm_uninstall(m1);
346 }
347 vm_install(m2);
348 }
349 TASK = THREAD->task;
350 }
351
352 THREAD->state = Running;
353
354 #ifdef SCHEDULER_VERBOSE
355 printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy);
356 #endif
357
358 context_restore(&THREAD->saved_context);
359 /* not reached */
360}
361
362#ifdef __SMP__
363/*
364 * This is the load balancing thread.
365 * It supervises thread supplies for the CPU it's wired to.
366 */
367void kcpulb(void *arg)
368{
369 thread_t *t;
370 int count, i, j, k = 0;
371 pri_t pri;
372
373loop:
374 /*
375 * Sleep until there's some work to do.
376 */
377 waitq_sleep(&CPU->kcpulb_wq);
378
379not_satisfied:
380 /*
381 * Calculate the number of threads that will be migrated/stolen from
382 * other CPU's. Note that situation can have changed between two
383 * passes. Each time get the most up to date counts.
384 */
385 pri = cpu_priority_high();
386 spinlock_lock(&CPU->lock);
387 count = nrdy / config.cpu_active;
388 count -= CPU->nrdy;
389 spinlock_unlock(&CPU->lock);
390 cpu_priority_restore(pri);
391
392 if (count <= 0)
393 goto satisfied;
394
395 /*
396 * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
397 */
398 for (j=RQ_COUNT-1; j >= 0; j--) {
399 for (i=0; i < config.cpu_active; i++) {
400 link_t *l;
401 runq_t *r;
402 cpu_t *cpu;
403
404 cpu = &cpus[(i + k) % config.cpu_active];
405 r = &cpu->rq[j];
406
407 /*
408 * Not interested in ourselves.
409 * Doesn't require interrupt disabling for kcpulb is X_WIRED.
410 */
411 if (CPU == cpu)
412 continue;
413
414restart: pri = cpu_priority_high();
415 spinlock_lock(&r->lock);
416 if (r->n == 0) {
417 spinlock_unlock(&r->lock);
418 cpu_priority_restore(pri);
419 continue;
420 }
421
422 t = NULL;
423 l = r->rq_head.prev; /* search rq from the back */
424 while (l != &r->rq_head) {
425 t = list_get_instance(l, thread_t, rq_link);
426 /*
427 * We don't want to steal CPU-wired threads neither threads already stolen.
428 * The latter prevents threads from migrating between CPU's without ever being run.
429 */
430 spinlock_lock(&t->lock);
431 if (!(t->flags & (X_WIRED | X_STOLEN))) {
432 /*
433 * Remove t from r.
434 */
435
436 spinlock_unlock(&t->lock);
437
438 /*
439 * Here we have to avoid deadlock with relink_rq(),
440 * because it locks cpu and r in a different order than we do.
441 */
442 if (!spinlock_trylock(&cpu->lock)) {
443 /* Release all locks and try again. */
444 spinlock_unlock(&r->lock);
445 cpu_priority_restore(pri);
446 goto restart;
447 }
448 cpu->nrdy--;
449 spinlock_unlock(&cpu->lock);
450
451 spinlock_lock(&nrdylock);
452 nrdy--;
453 spinlock_unlock(&nrdylock);
454
455 r->n--;
456 list_remove(&t->rq_link);
457
458 break;
459 }
460 spinlock_unlock(&t->lock);
461 l = l->prev;
462 t = NULL;
463 }
464 spinlock_unlock(&r->lock);
465
466 if (t) {
467 /*
468 * Ready t on local CPU
469 */
470 spinlock_lock(&t->lock);
471 #ifdef KCPULB_VERBOSE
472 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active);
473 #endif
474 t->flags |= X_STOLEN;
475 spinlock_unlock(&t->lock);
476
477 thread_ready(t);
478
479 cpu_priority_restore(pri);
480
481 if (--count == 0)
482 goto satisfied;
483
484 /*
485 * We are not satisfied yet, focus on another CPU next time.
486 */
487 k++;
488
489 continue;
490 }
491 cpu_priority_restore(pri);
492 }
493 }
494
495 if (CPU->nrdy) {
496 /*
497 * Be a little bit light-weight and let migrated threads run.
498 */
499 scheduler();
500 }
501 else {
502 /*
503 * We failed to migrate a single thread.
504 * Something more sophisticated should be done.
505 */
506 scheduler();
507 }
508
509 goto not_satisfied;
510
511satisfied:
512 /*
513 * Tell find_best_thread() to wake us up later again.
514 */
515 CPU->kcpulbstarted = 0;
516 goto loop;
517}
518
519#endif /* __SMP__ */
Note: See TracBrowser for help on using the repository browser.