source: mainline/src/proc/scheduler.c@ 9c0a9b3

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 9c0a9b3 was 9c0a9b3, checked in by Jakub Vana <jakub.vana@…>, 20 years ago

1) memcopy and _memcopy functions rewriten to ANSI C norm.
2) Repaired ia32,ia64 and mips version of SPARTAN to work with this memcopy functions
3) Warning for non declared funcions added and repaired ia32,ia64 and mips versions to pass build process with this warning and Werror option

  • Property mode set to 100644
File size: 12.8 KB
Line 
1/*
2 * Copyright (C) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <proc/scheduler.h>
30#include <proc/thread.h>
31#include <proc/task.h>
32#include <cpu.h>
33#include <mm/vm.h>
34#include <config.h>
35#include <context.h>
36#include <func.h>
37#include <arch.h>
38#include <arch/asm.h>
39#include <list.h>
40#include <panic.h>
41#include <typedefs.h>
42#include <mm/page.h>
43#include <synch/spinlock.h>
44#include <arch/faddr.h>
45#include <arch/atomic.h>
46#include <print.h>
47#include <mm/frame.h>
48#include <mm/heap.h>
49
50
51volatile int nrdy;
52
53
54/** Take actions before new thread runs
55 *
56 * Perform actions that need to be
57 * taken before the newly selected
58 * tread is passed control.
59 *
60 */
61void before_thread_runs(void)
62{
63 before_thread_runs_arch();
64 fpu_context_restore(&(THREAD->saved_fpu_context));
65}
66
67
68/** Initialize scheduler
69 *
70 * Initialize kernel scheduler.
71 *
72 */
73void scheduler_init(void)
74{
75}
76
77
78/** Get thread to be scheduled
79 *
80 * Get the optimal thread to be scheduled
81 * according to thread accounting and scheduler
82 * policy.
83 *
84 * @return Thread to be scheduled.
85 *
86 */
87struct thread *find_best_thread(void)
88{
89 thread_t *t;
90 runq_t *r;
91 int i, n;
92
93loop:
94 cpu_priority_high();
95
96 spinlock_lock(&CPU->lock);
97 n = CPU->nrdy;
98 spinlock_unlock(&CPU->lock);
99
100 cpu_priority_low();
101
102 if (n == 0) {
103 #ifdef __SMP__
104 /*
105 * If the load balancing thread is not running, wake it up and
106 * set CPU-private flag that the kcpulb has been started.
107 */
108 if (test_and_set(&CPU->kcpulbstarted) == 0) {
109 waitq_wakeup(&CPU->kcpulb_wq, 0);
110 goto loop;
111 }
112 #endif /* __SMP__ */
113
114 /*
115 * For there was nothing to run, the CPU goes to sleep
116 * until a hardware interrupt or an IPI comes.
117 * This improves energy saving and hyperthreading.
118 * On the other hand, several hardware interrupts can be ignored.
119 */
120 cpu_sleep();
121 goto loop;
122 }
123
124 cpu_priority_high();
125
126 i = 0;
127retry:
128 for (; i<RQ_COUNT; i++) {
129 r = &CPU->rq[i];
130 spinlock_lock(&r->lock);
131 if (r->n == 0) {
132 /*
133 * If this queue is empty, try a lower-priority queue.
134 */
135 spinlock_unlock(&r->lock);
136 continue;
137 }
138
139 /* avoid deadlock with relink_rq() */
140 if (!spinlock_trylock(&CPU->lock)) {
141 /*
142 * Unlock r and try again.
143 */
144 spinlock_unlock(&r->lock);
145 goto retry;
146 }
147 CPU->nrdy--;
148 spinlock_unlock(&CPU->lock);
149
150 atomic_dec(&nrdy);
151 r->n--;
152
153 /*
154 * Take the first thread from the queue.
155 */
156 t = list_get_instance(r->rq_head.next, thread_t, rq_link);
157 list_remove(&t->rq_link);
158
159 spinlock_unlock(&r->lock);
160
161 spinlock_lock(&t->lock);
162 t->cpu = CPU;
163
164 t->ticks = us2ticks((i+1)*10000);
165 t->pri = i; /* eventually correct rq index */
166
167 /*
168 * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
169 */
170 t->flags &= ~X_STOLEN;
171 spinlock_unlock(&t->lock);
172
173 return t;
174 }
175 goto loop;
176
177}
178
179
180/** Prevent rq starvation
181 *
182 * Prevent low priority threads from starving in rq's.
183 *
184 * When the function decides to relink rq's, it reconnects
185 * respective pointers so that in result threads with 'pri'
186 * greater or equal 'start' are moved to a higher-priority queue.
187 *
188 * @param start Threshold priority.
189 *
190 */
191void relink_rq(int start)
192{
193 link_t head;
194 runq_t *r;
195 int i, n;
196
197 list_initialize(&head);
198 spinlock_lock(&CPU->lock);
199 if (CPU->needs_relink > NEEDS_RELINK_MAX) {
200 for (i = start; i<RQ_COUNT-1; i++) {
201 /* remember and empty rq[i + 1] */
202 r = &CPU->rq[i + 1];
203 spinlock_lock(&r->lock);
204 list_concat(&head, &r->rq_head);
205 n = r->n;
206 r->n = 0;
207 spinlock_unlock(&r->lock);
208
209 /* append rq[i + 1] to rq[i] */
210 r = &CPU->rq[i];
211 spinlock_lock(&r->lock);
212 list_concat(&r->rq_head, &head);
213 r->n += n;
214 spinlock_unlock(&r->lock);
215 }
216 CPU->needs_relink = 0;
217 }
218 spinlock_unlock(&CPU->lock);
219
220}
221
222
223/** The scheduler
224 *
225 * The thread scheduling procedure.
226 *
227 */
228void scheduler(void)
229{
230 volatile pri_t pri;
231
232 pri = cpu_priority_high();
233
234 if (haltstate)
235 halt();
236
237 if (THREAD) {
238 spinlock_lock(&THREAD->lock);
239 fpu_context_save(&(THREAD->saved_fpu_context));
240 if (!context_save(&THREAD->saved_context)) {
241 /*
242 * This is the place where threads leave scheduler();
243 */
244 before_thread_runs();
245 spinlock_unlock(&THREAD->lock);
246 cpu_priority_restore(THREAD->saved_context.pri);
247 return;
248 }
249
250 /*
251 * CPU priority of preempted thread is recorded here
252 * to facilitate scheduler() invocations from
253 * cpu_priority_high()'ed code (e.g. waitq_sleep_timeout()).
254 */
255 THREAD->saved_context.pri = pri;
256 }
257
258 /*
259 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU
260 * and preemption counter. At this point THE could be coming either
261 * from THREAD's or CPU's stack.
262 */
263 the_copy(THE, (the_t *) CPU->stack);
264
265 /*
266 * We may not keep the old stack.
267 * Reason: If we kept the old stack and got blocked, for instance, in
268 * find_best_thread(), the old thread could get rescheduled by another
269 * CPU and overwrite the part of its own stack that was also used by
270 * the scheduler on this CPU.
271 *
272 * Moreover, we have to bypass the compiler-generated POP sequence
273 * which is fooled by SP being set to the very top of the stack.
274 * Therefore the scheduler() function continues in
275 * scheduler_separated_stack().
276 */
277 context_save(&CPU->saved_context);
278 context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), CPU->stack, CPU_STACK_SIZE);
279 context_restore(&CPU->saved_context);
280 /* not reached */
281}
282
283
284/** Scheduler stack switch wrapper
285 *
286 * Second part of the scheduler() function
287 * using new stack. Handling the actual context
288 * switch to a new thread.
289 *
290 */
291void scheduler_separated_stack(void)
292{
293 int priority;
294
295 if (THREAD) {
296 switch (THREAD->state) {
297 case Running:
298 THREAD->state = Ready;
299 spinlock_unlock(&THREAD->lock);
300 thread_ready(THREAD);
301 break;
302
303 case Exiting:
304 frame_free((__address) THREAD->kstack);
305 if (THREAD->ustack) {
306 frame_free((__address) THREAD->ustack);
307 }
308
309 /*
310 * Detach from the containing task.
311 */
312 spinlock_lock(&TASK->lock);
313 list_remove(&THREAD->th_link);
314 spinlock_unlock(&TASK->lock);
315
316 spinlock_unlock(&THREAD->lock);
317
318 spinlock_lock(&threads_lock);
319 list_remove(&THREAD->threads_link);
320 spinlock_unlock(&threads_lock);
321
322 spinlock_lock(&CPU->lock);
323 if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL;
324 spinlock_unlock(&CPU->lock);
325
326 free(THREAD);
327
328 break;
329
330 case Sleeping:
331 /*
332 * Prefer the thread after it's woken up.
333 */
334 THREAD->pri = -1;
335
336 /*
337 * We need to release wq->lock which we locked in waitq_sleep().
338 * Address of wq->lock is kept in THREAD->sleep_queue.
339 */
340 spinlock_unlock(&THREAD->sleep_queue->lock);
341
342 /*
343 * Check for possible requests for out-of-context invocation.
344 */
345 if (THREAD->call_me) {
346 THREAD->call_me(THREAD->call_me_with);
347 THREAD->call_me = NULL;
348 THREAD->call_me_with = NULL;
349 }
350
351 spinlock_unlock(&THREAD->lock);
352
353 break;
354
355 default:
356 /*
357 * Entering state is unexpected.
358 */
359 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
360 break;
361 }
362 THREAD = NULL;
363 }
364 printf("*0*");
365 THREAD = find_best_thread();
366 printf("*1*");
367
368 spinlock_lock(&THREAD->lock);
369 priority = THREAD->pri;
370 spinlock_unlock(&THREAD->lock);
371
372 relink_rq(priority);
373
374 spinlock_lock(&THREAD->lock);
375
376 /*
377 * If both the old and the new task are the same, lots of work is avoided.
378 */
379 if (TASK != THREAD->task) {
380 vm_t *m1 = NULL;
381 vm_t *m2;
382
383 if (TASK) {
384 spinlock_lock(&TASK->lock);
385 m1 = TASK->vm;
386 spinlock_unlock(&TASK->lock);
387 }
388
389 spinlock_lock(&THREAD->task->lock);
390 m2 = THREAD->task->vm;
391 spinlock_unlock(&THREAD->task->lock);
392
393 /*
394 * Note that it is possible for two tasks to share one vm mapping.
395 */
396 if (m1 != m2) {
397 /*
398 * Both tasks and vm mappings are different.
399 * Replace the old one with the new one.
400 */
401 vm_install(m2);
402 }
403 TASK = THREAD->task;
404 }
405
406 THREAD->state = Running;
407
408 #ifdef SCHEDULER_VERBOSE
409 printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy);
410 #endif
411
412 the_copy(THE, (the_t *) THREAD->kstack);
413
414 context_restore(&THREAD->saved_context);
415 /* not reached */
416}
417
418
419#ifdef __SMP__
420/** Load balancing thread
421 *
422 * SMP load balancing thread, supervising thread supplies
423 * for the CPU it's wired to.
424 *
425 * @param arg Generic thread argument (unused).
426 *
427 */
428void kcpulb(void *arg)
429{
430 thread_t *t;
431 int count, i, j, k = 0;
432 pri_t pri;
433
434loop:
435 /*
436 * Sleep until there's some work to do.
437 */
438 waitq_sleep(&CPU->kcpulb_wq);
439
440not_satisfied:
441 /*
442 * Calculate the number of threads that will be migrated/stolen from
443 * other CPU's. Note that situation can have changed between two
444 * passes. Each time get the most up to date counts.
445 */
446 pri = cpu_priority_high();
447 spinlock_lock(&CPU->lock);
448 count = nrdy / config.cpu_active;
449 count -= CPU->nrdy;
450 spinlock_unlock(&CPU->lock);
451 cpu_priority_restore(pri);
452
453 if (count <= 0)
454 goto satisfied;
455
456 /*
457 * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
458 */
459 for (j=RQ_COUNT-1; j >= 0; j--) {
460 for (i=0; i < config.cpu_active; i++) {
461 link_t *l;
462 runq_t *r;
463 cpu_t *cpu;
464
465 cpu = &cpus[(i + k) % config.cpu_active];
466
467 /*
468 * Not interested in ourselves.
469 * Doesn't require interrupt disabling for kcpulb is X_WIRED.
470 */
471 if (CPU == cpu)
472 continue;
473
474restart: pri = cpu_priority_high();
475 r = &cpu->rq[j];
476 spinlock_lock(&r->lock);
477 if (r->n == 0) {
478 spinlock_unlock(&r->lock);
479 cpu_priority_restore(pri);
480 continue;
481 }
482
483 t = NULL;
484 l = r->rq_head.prev; /* search rq from the back */
485 while (l != &r->rq_head) {
486 t = list_get_instance(l, thread_t, rq_link);
487 /*
488 * We don't want to steal CPU-wired threads neither threads already stolen.
489 * The latter prevents threads from migrating between CPU's without ever being run.
490 * We don't want to steal threads whose FPU context is still in CPU.
491 */
492 spinlock_lock(&t->lock);
493 if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
494
495 /*
496 * Remove t from r.
497 */
498
499 spinlock_unlock(&t->lock);
500
501 /*
502 * Here we have to avoid deadlock with relink_rq(),
503 * because it locks cpu and r in a different order than we do.
504 */
505 if (!spinlock_trylock(&cpu->lock)) {
506 /* Release all locks and try again. */
507 spinlock_unlock(&r->lock);
508 cpu_priority_restore(pri);
509 goto restart;
510 }
511 cpu->nrdy--;
512 spinlock_unlock(&cpu->lock);
513
514 atomic_dec(&nrdy);
515
516 r->n--;
517 list_remove(&t->rq_link);
518
519 break;
520 }
521 spinlock_unlock(&t->lock);
522 l = l->prev;
523 t = NULL;
524 }
525 spinlock_unlock(&r->lock);
526
527 if (t) {
528 /*
529 * Ready t on local CPU
530 */
531 spinlock_lock(&t->lock);
532 #ifdef KCPULB_VERBOSE
533 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active);
534 #endif
535 t->flags |= X_STOLEN;
536 spinlock_unlock(&t->lock);
537
538 thread_ready(t);
539
540 cpu_priority_restore(pri);
541
542 if (--count == 0)
543 goto satisfied;
544
545 /*
546 * We are not satisfied yet, focus on another CPU next time.
547 */
548 k++;
549
550 continue;
551 }
552 cpu_priority_restore(pri);
553 }
554 }
555
556 if (CPU->nrdy) {
557 /*
558 * Be a little bit light-weight and let migrated threads run.
559 */
560 scheduler();
561 }
562 else {
563 /*
564 * We failed to migrate a single thread.
565 * Something more sophisticated should be done.
566 */
567 scheduler();
568 }
569
570 goto not_satisfied;
571
572satisfied:
573 /*
574 * Tell find_best_thread() to wake us up later again.
575 */
576 CPU->kcpulbstarted = 0;
577 goto loop;
578}
579
580#endif /* __SMP__ */
Note: See TracBrowser for help on using the repository browser.