source: mainline/src/proc/scheduler.c@ 747a2476

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 747a2476 was fc9c4e1, checked in by Jakub Jermar <jakub@…>, 20 years ago

Replace "THREAD→cpu" with "CPU" in scheduler().

Add trailing '\n' to memmap.h
Fix some translations.
Relpace one Czech sentence with its English translation.

  • Property mode set to 100644
File size: 12.1 KB
Line 
1/*
2 * Copyright (C) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <proc/scheduler.h>
30#include <proc/thread.h>
31#include <proc/task.h>
32#include <cpu.h>
33#include <mm/vm.h>
34#include <config.h>
35#include <context.h>
36#include <func.h>
37#include <arch.h>
38#include <arch/asm.h>
39#include <list.h>
40#include <panic.h>
41#include <typedefs.h>
42#include <mm/page.h>
43#include <synch/spinlock.h>
44#include <arch/faddr.h>
45
46#ifdef __SMP__
47#include <arch/smp/atomic.h>
48#endif /* __SMP__ */
49
50/*
51 * NOTE ON ATOMIC READS:
52 * Some architectures cannot read __u32 atomically.
53 * For that reason, all accesses to nrdy and the likes must be protected by spinlock.
54 */
55
56spinlock_t nrdylock;
57volatile int nrdy;
58
59void before_thread_runs(void)
60{
61 before_thread_runs_arch();
62 fpu_context_restore(&(THREAD->saved_fpu_context));
63}
64
65
66void scheduler_init(void)
67{
68 spinlock_initialize(&nrdylock);
69}
70
71/* cpu_priority_high()'d */
72struct thread *find_best_thread(void)
73{
74 thread_t *t;
75 runq_t *r;
76 int i, n;
77
78loop:
79 cpu_priority_high();
80
81 spinlock_lock(&CPU->lock);
82 n = CPU->nrdy;
83 spinlock_unlock(&CPU->lock);
84
85 cpu_priority_low();
86
87 if (n == 0) {
88 #ifdef __SMP__
89 /*
90 * If the load balancing thread is not running, wake it up and
91 * set CPU-private flag that the kcpulb has been started.
92 */
93 if (test_and_set(&CPU->kcpulbstarted) == 0) {
94 waitq_wakeup(&CPU->kcpulb_wq, 0);
95 goto loop;
96 }
97 #endif /* __SMP__ */
98
99 /*
100 * For there was nothing to run, the CPU goes to sleep
101 * until a hardware interrupt or an IPI comes.
102 * This improves energy saving and hyperthreading.
103 * On the other hand, several hardware interrupts can be ignored.
104 */
105 cpu_sleep();
106 goto loop;
107 }
108
109 cpu_priority_high();
110
111 for (i = 0; i<RQ_COUNT; i++) {
112 r = &CPU->rq[i];
113 spinlock_lock(&r->lock);
114 if (r->n == 0) {
115 /*
116 * If this queue is empty, try a lower-priority queue.
117 */
118 spinlock_unlock(&r->lock);
119 continue;
120 }
121
122 spinlock_lock(&nrdylock);
123 nrdy--;
124 spinlock_unlock(&nrdylock);
125
126 spinlock_lock(&CPU->lock);
127 CPU->nrdy--;
128 spinlock_unlock(&CPU->lock);
129
130 r->n--;
131
132 /*
133 * Take the first thread from the queue.
134 */
135 t = list_get_instance(r->rq_head.next, thread_t, rq_link);
136 list_remove(&t->rq_link);
137
138 spinlock_unlock(&r->lock);
139
140 spinlock_lock(&t->lock);
141 t->cpu = CPU;
142
143 t->ticks = us2ticks((i+1)*10000);
144 t->pri = i; /* eventually correct rq index */
145
146 /*
147 * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
148 */
149 t->flags &= ~X_STOLEN;
150 spinlock_unlock(&t->lock);
151
152 return t;
153 }
154 goto loop;
155
156}
157
158/*
159 * This function prevents low priority threads from starving in rq's.
160 * When it decides to relink rq's, it reconnects respective pointers
161 * so that in result threads with 'pri' greater or equal 'start' are
162 * moved to a higher-priority queue.
163 */
164void relink_rq(int start)
165{
166 link_t head;
167 runq_t *r;
168 int i, n;
169
170 list_initialize(&head);
171 spinlock_lock(&CPU->lock);
172 if (CPU->needs_relink > NEEDS_RELINK_MAX) {
173 for (i = start; i<RQ_COUNT-1; i++) {
174 /* remember and empty rq[i + 1] */
175 r = &CPU->rq[i + 1];
176 spinlock_lock(&r->lock);
177 list_concat(&head, &r->rq_head);
178 n = r->n;
179 r->n = 0;
180 spinlock_unlock(&r->lock);
181
182 /* append rq[i + 1] to rq[i] */
183 r = &CPU->rq[i];
184 spinlock_lock(&r->lock);
185 list_concat(&r->rq_head, &head);
186 r->n += n;
187 spinlock_unlock(&r->lock);
188 }
189 CPU->needs_relink = 0;
190 }
191 spinlock_unlock(&CPU->lock);
192
193}
194
195/*
196 * The scheduler.
197 */
198void scheduler(void)
199{
200 volatile pri_t pri;
201
202 pri = cpu_priority_high();
203
204 if (haltstate)
205 halt();
206
207 if (THREAD) {
208 spinlock_lock(&THREAD->lock);
209 fpu_context_save(&(THREAD->saved_fpu_context));
210 if (!context_save(&THREAD->saved_context)) {
211 /*
212 * This is the place where threads leave scheduler();
213 */
214 before_thread_runs();
215 spinlock_unlock(&THREAD->lock);
216 cpu_priority_restore(THREAD->saved_context.pri);
217 return;
218 }
219 THREAD->saved_context.pri = pri;
220 }
221
222 /*
223 * We may not keep the old stack.
224 * Reason: If we kept the old stack and got blocked, for instance, in
225 * find_best_thread(), the old thread could get rescheduled by another
226 * CPU and overwrite the part of its own stack that was also used by
227 * the scheduler on this CPU.
228 *
229 * Moreover, we have to bypass the compiler-generated POP sequence
230 * which is fooled by SP being set to the very top of the stack.
231 * Therefore the scheduler() function continues in
232 * scheduler_separated_stack().
233 */
234 context_save(&CPU->saved_context);
235 context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), CPU->stack, CPU_STACK_SIZE);
236 context_restore(&CPU->saved_context);
237 /* not reached */
238}
239
240void scheduler_separated_stack(void)
241{
242 int priority;
243
244 if (THREAD) {
245 switch (THREAD->state) {
246 case Running:
247 THREAD->state = Ready;
248 spinlock_unlock(&THREAD->lock);
249 thread_ready(THREAD);
250 break;
251
252 case Exiting:
253 frame_free((__address) THREAD->kstack);
254 if (THREAD->ustack) {
255 frame_free((__address) THREAD->ustack);
256 }
257
258 /*
259 * Detach from the containing task.
260 */
261 spinlock_lock(&TASK->lock);
262 list_remove(&THREAD->th_link);
263 spinlock_unlock(&TASK->lock);
264
265 spinlock_unlock(&THREAD->lock);
266
267 spinlock_lock(&threads_lock);
268 list_remove(&THREAD->threads_link);
269 spinlock_unlock(&threads_lock);
270
271 spinlock_lock(&CPU->lock);
272 if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL;
273 spinlock_unlock(&CPU->lock);
274
275
276 free(THREAD);
277
278 break;
279
280 case Sleeping:
281 /*
282 * Prefer the thread after it's woken up.
283 */
284 THREAD->pri = -1;
285
286 /*
287 * We need to release wq->lock which we locked in waitq_sleep().
288 * Address of wq->lock is kept in THREAD->sleep_queue.
289 */
290 spinlock_unlock(&THREAD->sleep_queue->lock);
291
292 /*
293 * Check for possible requests for out-of-context invocation.
294 */
295 if (THREAD->call_me) {
296 THREAD->call_me(THREAD->call_me_with);
297 THREAD->call_me = NULL;
298 THREAD->call_me_with = NULL;
299 }
300
301 spinlock_unlock(&THREAD->lock);
302
303 break;
304
305 default:
306 /*
307 * Entering state is unexpected.
308 */
309 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
310 break;
311 }
312 THREAD = NULL;
313 }
314
315 THREAD = find_best_thread();
316
317 spinlock_lock(&THREAD->lock);
318 priority = THREAD->pri;
319 spinlock_unlock(&THREAD->lock);
320
321 relink_rq(priority);
322
323 spinlock_lock(&THREAD->lock);
324
325 /*
326 * If both the old and the new task are the same, lots of work is avoided.
327 */
328 if (TASK != THREAD->task) {
329 vm_t *m1 = NULL;
330 vm_t *m2;
331
332 if (TASK) {
333 spinlock_lock(&TASK->lock);
334 m1 = TASK->vm;
335 spinlock_unlock(&TASK->lock);
336 }
337
338 spinlock_lock(&THREAD->task->lock);
339 m2 = THREAD->task->vm;
340 spinlock_unlock(&THREAD->task->lock);
341
342 /*
343 * Note that it is possible for two tasks to share one vm mapping.
344 */
345 if (m1 != m2) {
346 /*
347 * Both tasks and vm mappings are different.
348 * Replace the old one with the new one.
349 */
350 if (m1) {
351 vm_uninstall(m1);
352 }
353 vm_install(m2);
354 }
355 TASK = THREAD->task;
356 }
357
358 THREAD->state = Running;
359
360 #ifdef SCHEDULER_VERBOSE
361 printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy);
362 #endif
363
364 context_restore(&THREAD->saved_context);
365 /* not reached */
366}
367
368#ifdef __SMP__
369/*
370 * This is the load balancing thread.
371 * It supervises thread supplies for the CPU it's wired to.
372 */
373void kcpulb(void *arg)
374{
375 thread_t *t;
376 int count, i, j, k = 0;
377 pri_t pri;
378
379loop:
380 /*
381 * Sleep until there's some work to do.
382 */
383 waitq_sleep(&CPU->kcpulb_wq);
384
385not_satisfied:
386 /*
387 * Calculate the number of threads that will be migrated/stolen from
388 * other CPU's. Note that situation can have changed between two
389 * passes. Each time get the most up to date counts.
390 */
391 pri = cpu_priority_high();
392 spinlock_lock(&CPU->lock);
393 count = nrdy / config.cpu_active;
394 count -= CPU->nrdy;
395 spinlock_unlock(&CPU->lock);
396 cpu_priority_restore(pri);
397
398 if (count <= 0)
399 goto satisfied;
400
401 /*
402 * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
403 */
404 for (j=RQ_COUNT-1; j >= 0; j--) {
405 for (i=0; i < config.cpu_active; i++) {
406 link_t *l;
407 runq_t *r;
408 cpu_t *cpu;
409
410 cpu = &cpus[(i + k) % config.cpu_active];
411 r = &cpu->rq[j];
412
413 /*
414 * Not interested in ourselves.
415 * Doesn't require interrupt disabling for kcpulb is X_WIRED.
416 */
417 if (CPU == cpu)
418 continue;
419
420restart: pri = cpu_priority_high();
421 spinlock_lock(&r->lock);
422 if (r->n == 0) {
423 spinlock_unlock(&r->lock);
424 cpu_priority_restore(pri);
425 continue;
426 }
427
428 t = NULL;
429 l = r->rq_head.prev; /* search rq from the back */
430 while (l != &r->rq_head) {
431 t = list_get_instance(l, thread_t, rq_link);
432 /*
433 * We don't want to steal CPU-wired threads neither threads already stolen.
434 * The latter prevents threads from migrating between CPU's without ever being run.
435 * We don't want to steal threads whose FPU context is still in CPU
436 */
437 spinlock_lock(&t->lock);
438 if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
439 /*
440 * Remove t from r.
441 */
442
443 spinlock_unlock(&t->lock);
444
445 /*
446 * Here we have to avoid deadlock with relink_rq(),
447 * because it locks cpu and r in a different order than we do.
448 */
449 if (!spinlock_trylock(&cpu->lock)) {
450 /* Release all locks and try again. */
451 spinlock_unlock(&r->lock);
452 cpu_priority_restore(pri);
453 goto restart;
454 }
455 cpu->nrdy--;
456 spinlock_unlock(&cpu->lock);
457
458 spinlock_lock(&nrdylock);
459 nrdy--;
460 spinlock_unlock(&nrdylock);
461
462 r->n--;
463 list_remove(&t->rq_link);
464
465 break;
466 }
467 spinlock_unlock(&t->lock);
468 l = l->prev;
469 t = NULL;
470 }
471 spinlock_unlock(&r->lock);
472
473 if (t) {
474 /*
475 * Ready t on local CPU
476 */
477 spinlock_lock(&t->lock);
478 #ifdef KCPULB_VERBOSE
479 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active);
480 #endif
481 t->flags |= X_STOLEN;
482 spinlock_unlock(&t->lock);
483
484 thread_ready(t);
485
486 cpu_priority_restore(pri);
487
488 if (--count == 0)
489 goto satisfied;
490
491 /*
492 * We are not satisfied yet, focus on another CPU next time.
493 */
494 k++;
495
496 continue;
497 }
498 cpu_priority_restore(pri);
499 }
500 }
501
502 if (CPU->nrdy) {
503 /*
504 * Be a little bit light-weight and let migrated threads run.
505 */
506 scheduler();
507 }
508 else {
509 /*
510 * We failed to migrate a single thread.
511 * Something more sophisticated should be done.
512 */
513 scheduler();
514 }
515
516 goto not_satisfied;
517
518satisfied:
519 /*
520 * Tell find_best_thread() to wake us up later again.
521 */
522 CPU->kcpulbstarted = 0;
523 goto loop;
524}
525
526#endif /* __SMP__ */
Note: See TracBrowser for help on using the repository browser.