source: mainline/kernel/generic/src/proc/scheduler.c@ ee42e43

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since ee42e43 was ee42e43, checked in by Jakub Jermar <jakub@…>, 15 years ago

Retire kernel rwlocks.

  • Property mode set to 100644
File size: 17.5 KB
Line 
1/*
2 * Copyright (c) 2010 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericproc
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Scheduler and load balancing.
36 *
37 * This file contains the scheduler and kcpulb kernel thread which
38 * performs load-balancing of per-CPU run queues.
39 */
40
41#include <proc/scheduler.h>
42#include <proc/thread.h>
43#include <proc/task.h>
44#include <mm/frame.h>
45#include <mm/page.h>
46#include <mm/as.h>
47#include <time/timeout.h>
48#include <time/delay.h>
49#include <arch/asm.h>
50#include <arch/faddr.h>
51#include <arch/cycle.h>
52#include <atomic.h>
53#include <synch/spinlock.h>
54#include <config.h>
55#include <context.h>
56#include <fpu_context.h>
57#include <func.h>
58#include <arch.h>
59#include <adt/list.h>
60#include <panic.h>
61#include <cpu.h>
62#include <print.h>
63#include <debug.h>
64
65static void before_task_runs(void);
66static void before_thread_runs(void);
67static void after_thread_ran(void);
68static void scheduler_separated_stack(void);
69
70atomic_t nrdy; /**< Number of ready threads in the system. */
71
72/** Carry out actions before new task runs. */
73void before_task_runs(void)
74{
75 before_task_runs_arch();
76}
77
78/** Take actions before new thread runs.
79 *
80 * Perform actions that need to be
81 * taken before the newly selected
82 * tread is passed control.
83 *
84 * THREAD->lock is locked on entry
85 *
86 */
87void before_thread_runs(void)
88{
89 before_thread_runs_arch();
90#ifdef CONFIG_FPU_LAZY
91 if(THREAD == CPU->fpu_owner)
92 fpu_enable();
93 else
94 fpu_disable();
95#else
96 fpu_enable();
97 if (THREAD->fpu_context_exists)
98 fpu_context_restore(THREAD->saved_fpu_context);
99 else {
100 fpu_init();
101 THREAD->fpu_context_exists = 1;
102 }
103#endif
104}
105
106/** Take actions after THREAD had run.
107 *
108 * Perform actions that need to be
109 * taken after the running thread
110 * had been preempted by the scheduler.
111 *
112 * THREAD->lock is locked on entry
113 *
114 */
115void after_thread_ran(void)
116{
117 after_thread_ran_arch();
118}
119
120#ifdef CONFIG_FPU_LAZY
121void scheduler_fpu_lazy_request(void)
122{
123restart:
124 fpu_enable();
125 irq_spinlock_lock(&CPU->lock, false);
126
127 /* Save old context */
128 if (CPU->fpu_owner != NULL) {
129 irq_spinlock_lock(&CPU->fpu_owner->lock, false);
130 fpu_context_save(CPU->fpu_owner->saved_fpu_context);
131
132 /* Don't prevent migration */
133 CPU->fpu_owner->fpu_context_engaged = 0;
134 irq_spinlock_unlock(&CPU->fpu_owner->lock, false);
135 CPU->fpu_owner = NULL;
136 }
137
138 irq_spinlock_lock(&THREAD->lock, false);
139 if (THREAD->fpu_context_exists) {
140 fpu_context_restore(THREAD->saved_fpu_context);
141 } else {
142 /* Allocate FPU context */
143 if (!THREAD->saved_fpu_context) {
144 /* Might sleep */
145 irq_spinlock_unlock(&THREAD->lock, false);
146 irq_spinlock_unlock(&CPU->lock, false);
147 THREAD->saved_fpu_context =
148 (fpu_context_t *) slab_alloc(fpu_context_slab, 0);
149
150 /* We may have switched CPUs during slab_alloc */
151 goto restart;
152 }
153 fpu_init();
154 THREAD->fpu_context_exists = 1;
155 }
156
157 CPU->fpu_owner = THREAD;
158 THREAD->fpu_context_engaged = 1;
159 irq_spinlock_unlock(&THREAD->lock, false);
160
161 irq_spinlock_unlock(&CPU->lock, false);
162}
163#endif /* CONFIG_FPU_LAZY */
164
165/** Initialize scheduler
166 *
167 * Initialize kernel scheduler.
168 *
169 */
170void scheduler_init(void)
171{
172}
173
174/** Get thread to be scheduled
175 *
176 * Get the optimal thread to be scheduled
177 * according to thread accounting and scheduler
178 * policy.
179 *
180 * @return Thread to be scheduled.
181 *
182 */
183static thread_t *find_best_thread(void)
184{
185 ASSERT(CPU != NULL);
186
187loop:
188
189 if (atomic_get(&CPU->nrdy) == 0) {
190 /*
191 * For there was nothing to run, the CPU goes to sleep
192 * until a hardware interrupt or an IPI comes.
193 * This improves energy saving and hyperthreading.
194 */
195 irq_spinlock_lock(&CPU->lock, false);
196 CPU->idle = true;
197 irq_spinlock_unlock(&CPU->lock, false);
198 interrupts_enable();
199
200 /*
201 * An interrupt might occur right now and wake up a thread.
202 * In such case, the CPU will continue to go to sleep
203 * even though there is a runnable thread.
204 */
205 cpu_sleep();
206 interrupts_disable();
207 goto loop;
208 }
209
210 unsigned int i;
211 for (i = 0; i < RQ_COUNT; i++) {
212 irq_spinlock_lock(&(CPU->rq[i].lock), false);
213 if (CPU->rq[i].n == 0) {
214 /*
215 * If this queue is empty, try a lower-priority queue.
216 */
217 irq_spinlock_unlock(&(CPU->rq[i].lock), false);
218 continue;
219 }
220
221 atomic_dec(&CPU->nrdy);
222 atomic_dec(&nrdy);
223 CPU->rq[i].n--;
224
225 /*
226 * Take the first thread from the queue.
227 */
228 thread_t *thread =
229 list_get_instance(CPU->rq[i].rq_head.next, thread_t, rq_link);
230 list_remove(&thread->rq_link);
231
232 irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock);
233
234 thread->cpu = CPU;
235 thread->ticks = us2ticks((i + 1) * 10000);
236 thread->priority = i; /* Correct rq index */
237
238 /*
239 * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated
240 * when load balancing needs emerge.
241 */
242 thread->flags &= ~THREAD_FLAG_STOLEN;
243 irq_spinlock_unlock(&thread->lock, false);
244
245 return thread;
246 }
247
248 goto loop;
249}
250
251/** Prevent rq starvation
252 *
253 * Prevent low priority threads from starving in rq's.
254 *
255 * When the function decides to relink rq's, it reconnects
256 * respective pointers so that in result threads with 'pri'
257 * greater or equal start are moved to a higher-priority queue.
258 *
259 * @param start Threshold priority.
260 *
261 */
262static void relink_rq(int start)
263{
264 link_t head;
265
266 list_initialize(&head);
267 irq_spinlock_lock(&CPU->lock, false);
268
269 if (CPU->needs_relink > NEEDS_RELINK_MAX) {
270 int i;
271 for (i = start; i < RQ_COUNT - 1; i++) {
272 /* Remember and empty rq[i + 1] */
273
274 irq_spinlock_lock(&CPU->rq[i + 1].lock, false);
275 list_concat(&head, &CPU->rq[i + 1].rq_head);
276 size_t n = CPU->rq[i + 1].n;
277 CPU->rq[i + 1].n = 0;
278 irq_spinlock_unlock(&CPU->rq[i + 1].lock, false);
279
280 /* Append rq[i + 1] to rq[i] */
281
282 irq_spinlock_lock(&CPU->rq[i].lock, false);
283 list_concat(&CPU->rq[i].rq_head, &head);
284 CPU->rq[i].n += n;
285 irq_spinlock_unlock(&CPU->rq[i].lock, false);
286 }
287
288 CPU->needs_relink = 0;
289 }
290
291 irq_spinlock_unlock(&CPU->lock, false);
292}
293
294/** The scheduler
295 *
296 * The thread scheduling procedure.
297 * Passes control directly to
298 * scheduler_separated_stack().
299 *
300 */
301void scheduler(void)
302{
303 volatile ipl_t ipl;
304
305 ASSERT(CPU != NULL);
306
307 ipl = interrupts_disable();
308
309 if (atomic_get(&haltstate))
310 halt();
311
312 if (THREAD) {
313 irq_spinlock_lock(&THREAD->lock, false);
314
315 /* Update thread kernel accounting */
316 THREAD->kcycles += get_cycle() - THREAD->last_cycle;
317
318#ifndef CONFIG_FPU_LAZY
319 fpu_context_save(THREAD->saved_fpu_context);
320#endif
321 if (!context_save(&THREAD->saved_context)) {
322 /*
323 * This is the place where threads leave scheduler();
324 */
325
326 /* Save current CPU cycle */
327 THREAD->last_cycle = get_cycle();
328
329 irq_spinlock_unlock(&THREAD->lock, false);
330 interrupts_restore(THREAD->saved_context.ipl);
331
332 return;
333 }
334
335 /*
336 * Interrupt priority level of preempted thread is recorded
337 * here to facilitate scheduler() invocations from
338 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()).
339 *
340 */
341 THREAD->saved_context.ipl = ipl;
342 }
343
344 /*
345 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
346 * and preemption counter. At this point THE could be coming either
347 * from THREAD's or CPU's stack.
348 *
349 */
350 the_copy(THE, (the_t *) CPU->stack);
351
352 /*
353 * We may not keep the old stack.
354 * Reason: If we kept the old stack and got blocked, for instance, in
355 * find_best_thread(), the old thread could get rescheduled by another
356 * CPU and overwrite the part of its own stack that was also used by
357 * the scheduler on this CPU.
358 *
359 * Moreover, we have to bypass the compiler-generated POP sequence
360 * which is fooled by SP being set to the very top of the stack.
361 * Therefore the scheduler() function continues in
362 * scheduler_separated_stack().
363 *
364 */
365 context_save(&CPU->saved_context);
366 context_set(&CPU->saved_context, FADDR(scheduler_separated_stack),
367 (uintptr_t) CPU->stack, CPU_STACK_SIZE);
368 context_restore(&CPU->saved_context);
369
370 /* Not reached */
371}
372
373/** Scheduler stack switch wrapper
374 *
375 * Second part of the scheduler() function
376 * using new stack. Handling the actual context
377 * switch to a new thread.
378 *
379 */
380void scheduler_separated_stack(void)
381{
382 DEADLOCK_PROBE_INIT(p_joinwq);
383 task_t *old_task = TASK;
384 as_t *old_as = AS;
385
386 ASSERT((!THREAD) || (irq_spinlock_locked(&THREAD->lock)));
387 ASSERT(CPU != NULL);
388
389 /*
390 * Hold the current task and the address space to prevent their
391 * possible destruction should thread_destroy() be called on this or any
392 * other processor while the scheduler is still using them.
393 *
394 */
395 if (old_task)
396 task_hold(old_task);
397
398 if (old_as)
399 as_hold(old_as);
400
401 if (THREAD) {
402 /* Must be run after the switch to scheduler stack */
403 after_thread_ran();
404
405 switch (THREAD->state) {
406 case Running:
407 irq_spinlock_unlock(&THREAD->lock, false);
408 thread_ready(THREAD);
409 break;
410
411 case Exiting:
412repeat:
413 if (THREAD->detached) {
414 thread_destroy(THREAD, false);
415 } else {
416 /*
417 * The thread structure is kept allocated until
418 * somebody calls thread_detach() on it.
419 *
420 */
421 if (!irq_spinlock_trylock(&THREAD->join_wq.lock)) {
422 /*
423 * Avoid deadlock.
424 *
425 */
426 irq_spinlock_unlock(&THREAD->lock, false);
427 delay(HZ);
428 irq_spinlock_lock(&THREAD->lock, false);
429 DEADLOCK_PROBE(p_joinwq,
430 DEADLOCK_THRESHOLD);
431 goto repeat;
432 }
433 _waitq_wakeup_unsafe(&THREAD->join_wq,
434 WAKEUP_FIRST);
435 irq_spinlock_unlock(&THREAD->join_wq.lock, false);
436
437 THREAD->state = Lingering;
438 irq_spinlock_unlock(&THREAD->lock, false);
439 }
440 break;
441
442 case Sleeping:
443 /*
444 * Prefer the thread after it's woken up.
445 *
446 */
447 THREAD->priority = -1;
448
449 /*
450 * We need to release wq->lock which we locked in
451 * waitq_sleep(). Address of wq->lock is kept in
452 * THREAD->sleep_queue.
453 *
454 */
455 irq_spinlock_unlock(&THREAD->sleep_queue->lock, false);
456
457 irq_spinlock_unlock(&THREAD->lock, false);
458 break;
459
460 default:
461 /*
462 * Entering state is unexpected.
463 *
464 */
465 panic("tid%" PRIu64 ": unexpected state %s.",
466 THREAD->tid, thread_states[THREAD->state]);
467 break;
468 }
469
470 THREAD = NULL;
471 }
472
473 THREAD = find_best_thread();
474
475 irq_spinlock_lock(&THREAD->lock, false);
476 int priority = THREAD->priority;
477 irq_spinlock_unlock(&THREAD->lock, false);
478
479 relink_rq(priority);
480
481 /*
482 * If both the old and the new task are the same, lots of work is
483 * avoided.
484 *
485 */
486 if (TASK != THREAD->task) {
487 as_t *new_as = THREAD->task->as;
488
489 /*
490 * Note that it is possible for two tasks to share one address
491 * space.
492 (
493 */
494 if (old_as != new_as) {
495 /*
496 * Both tasks and address spaces are different.
497 * Replace the old one with the new one.
498 *
499 */
500 as_switch(old_as, new_as);
501 }
502
503 TASK = THREAD->task;
504 before_task_runs();
505 }
506
507 if (old_task)
508 task_release(old_task);
509
510 if (old_as)
511 as_release(old_as);
512
513 irq_spinlock_lock(&THREAD->lock, false);
514 THREAD->state = Running;
515
516#ifdef SCHEDULER_VERBOSE
517 printf("cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64
518 ", nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority,
519 THREAD->ticks, atomic_get(&CPU->nrdy));
520#endif
521
522 /*
523 * Some architectures provide late kernel PA2KA(identity)
524 * mapping in a page fault handler. However, the page fault
525 * handler uses the kernel stack of the running thread and
526 * therefore cannot be used to map it. The kernel stack, if
527 * necessary, is to be mapped in before_thread_runs(). This
528 * function must be executed before the switch to the new stack.
529 *
530 */
531 before_thread_runs();
532
533 /*
534 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
535 * thread's stack.
536 *
537 */
538 the_copy(THE, (the_t *) THREAD->kstack);
539
540 context_restore(&THREAD->saved_context);
541
542 /* Not reached */
543}
544
545#ifdef CONFIG_SMP
546/** Load balancing thread
547 *
548 * SMP load balancing thread, supervising thread supplies
549 * for the CPU it's wired to.
550 *
551 * @param arg Generic thread argument (unused).
552 *
553 */
554void kcpulb(void *arg)
555{
556 atomic_count_t average;
557 atomic_count_t rdy;
558
559 /*
560 * Detach kcpulb as nobody will call thread_join_timeout() on it.
561 */
562 thread_detach(THREAD);
563
564loop:
565 /*
566 * Work in 1s intervals.
567 */
568 thread_sleep(1);
569
570not_satisfied:
571 /*
572 * Calculate the number of threads that will be migrated/stolen from
573 * other CPU's. Note that situation can have changed between two
574 * passes. Each time get the most up to date counts.
575 *
576 */
577 average = atomic_get(&nrdy) / config.cpu_active + 1;
578 rdy = atomic_get(&CPU->nrdy);
579
580 if (average <= rdy)
581 goto satisfied;
582
583 atomic_count_t count = average - rdy;
584
585 /*
586 * Searching least priority queues on all CPU's first and most priority
587 * queues on all CPU's last.
588 *
589 */
590 size_t acpu;
591 size_t acpu_bias = 0;
592 int rq;
593
594 for (rq = RQ_COUNT - 1; rq >= 0; rq--) {
595 for (acpu = 0; acpu < config.cpu_active; acpu++) {
596 cpu_t *cpu = &cpus[(acpu + acpu_bias) % config.cpu_active];
597
598 /*
599 * Not interested in ourselves.
600 * Doesn't require interrupt disabling for kcpulb has
601 * THREAD_FLAG_WIRED.
602 *
603 */
604 if (CPU == cpu)
605 continue;
606
607 if (atomic_get(&cpu->nrdy) <= average)
608 continue;
609
610 irq_spinlock_lock(&(cpu->rq[rq].lock), true);
611 if (cpu->rq[rq].n == 0) {
612 irq_spinlock_unlock(&(cpu->rq[rq].lock), true);
613 continue;
614 }
615
616 thread_t *thread = NULL;
617
618 /* Search rq from the back */
619 link_t *link = cpu->rq[rq].rq_head.prev;
620
621 while (link != &(cpu->rq[rq].rq_head)) {
622 thread = (thread_t *) list_get_instance(link, thread_t, rq_link);
623
624 /*
625 * We don't want to steal CPU-wired threads
626 * neither threads already stolen. The latter
627 * prevents threads from migrating between CPU's
628 * without ever being run. We don't want to
629 * steal threads whose FPU context is still in
630 * CPU.
631 *
632 */
633 irq_spinlock_lock(&thread->lock, false);
634
635 if ((!(thread->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN)))
636 && (!(thread->fpu_context_engaged))) {
637 /*
638 * Remove thread from ready queue.
639 */
640 irq_spinlock_unlock(&thread->lock, false);
641
642 atomic_dec(&cpu->nrdy);
643 atomic_dec(&nrdy);
644
645 cpu->rq[rq].n--;
646 list_remove(&thread->rq_link);
647
648 break;
649 }
650
651 irq_spinlock_unlock(&thread->lock, false);
652
653 link = link->prev;
654 thread = NULL;
655 }
656
657 if (thread) {
658 /*
659 * Ready thread on local CPU
660 *
661 */
662
663 irq_spinlock_pass(&(cpu->rq[rq].lock), &thread->lock);
664
665#ifdef KCPULB_VERBOSE
666 printf("kcpulb%u: TID %" PRIu64 " -> cpu%u, "
667 "nrdy=%ld, avg=%ld\n", CPU->id, t->tid,
668 CPU->id, atomic_get(&CPU->nrdy),
669 atomic_get(&nrdy) / config.cpu_active);
670#endif
671
672 thread->flags |= THREAD_FLAG_STOLEN;
673 thread->state = Entering;
674
675 irq_spinlock_unlock(&thread->lock, true);
676 thread_ready(thread);
677
678 if (--count == 0)
679 goto satisfied;
680
681 /*
682 * We are not satisfied yet, focus on another
683 * CPU next time.
684 *
685 */
686 acpu_bias++;
687
688 continue;
689 } else
690 irq_spinlock_unlock(&(cpu->rq[rq].lock), true);
691
692 }
693 }
694
695 if (atomic_get(&CPU->nrdy)) {
696 /*
697 * Be a little bit light-weight and let migrated threads run.
698 *
699 */
700 scheduler();
701 } else {
702 /*
703 * We failed to migrate a single thread.
704 * Give up this turn.
705 *
706 */
707 goto loop;
708 }
709
710 goto not_satisfied;
711
712satisfied:
713 goto loop;
714}
715#endif /* CONFIG_SMP */
716
717/** Print information about threads & scheduler queues
718 *
719 */
720void sched_print_list(void)
721{
722 size_t cpu;
723 for (cpu = 0; cpu < config.cpu_count; cpu++) {
724 if (!cpus[cpu].active)
725 continue;
726
727 irq_spinlock_lock(&cpus[cpu].lock, true);
728
729 printf("cpu%u: address=%p, nrdy=%ld, needs_relink=%" PRIs "\n",
730 cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy),
731 cpus[cpu].needs_relink);
732
733 unsigned int i;
734 for (i = 0; i < RQ_COUNT; i++) {
735 irq_spinlock_lock(&(cpus[cpu].rq[i].lock), false);
736 if (cpus[cpu].rq[i].n == 0) {
737 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false);
738 continue;
739 }
740
741 printf("\trq[%u]: ", i);
742 link_t *cur;
743 for (cur = cpus[cpu].rq[i].rq_head.next;
744 cur != &(cpus[cpu].rq[i].rq_head);
745 cur = cur->next) {
746 thread_t *thread = list_get_instance(cur, thread_t, rq_link);
747 printf("%" PRIu64 "(%s) ", thread->tid,
748 thread_states[thread->state]);
749 }
750 printf("\n");
751
752 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false);
753 }
754
755 irq_spinlock_unlock(&cpus[cpu].lock, true);
756 }
757}
758
759/** @}
760 */
Note: See TracBrowser for help on using the repository browser.