1 | /*
|
---|
2 | * Copyright (c) 2010 Jakub Jermar
|
---|
3 | * All rights reserved.
|
---|
4 | *
|
---|
5 | * Redistribution and use in source and binary forms, with or without
|
---|
6 | * modification, are permitted provided that the following conditions
|
---|
7 | * are met:
|
---|
8 | *
|
---|
9 | * - Redistributions of source code must retain the above copyright
|
---|
10 | * notice, this list of conditions and the following disclaimer.
|
---|
11 | * - Redistributions in binary form must reproduce the above copyright
|
---|
12 | * notice, this list of conditions and the following disclaimer in the
|
---|
13 | * documentation and/or other materials provided with the distribution.
|
---|
14 | * - The name of the author may not be used to endorse or promote products
|
---|
15 | * derived from this software without specific prior written permission.
|
---|
16 | *
|
---|
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
---|
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
---|
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
---|
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
---|
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
---|
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
---|
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
---|
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
---|
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
27 | */
|
---|
28 |
|
---|
29 | /** @addtogroup genericproc
|
---|
30 | * @{
|
---|
31 | */
|
---|
32 |
|
---|
33 | /**
|
---|
34 | * @file
|
---|
35 | * @brief Scheduler and load balancing.
|
---|
36 | *
|
---|
37 | * This file contains the scheduler and kcpulb kernel thread which
|
---|
38 | * performs load-balancing of per-CPU run queues.
|
---|
39 | */
|
---|
40 |
|
---|
41 | #include <proc/scheduler.h>
|
---|
42 | #include <proc/thread.h>
|
---|
43 | #include <proc/task.h>
|
---|
44 | #include <mm/frame.h>
|
---|
45 | #include <mm/page.h>
|
---|
46 | #include <mm/as.h>
|
---|
47 | #include <time/timeout.h>
|
---|
48 | #include <time/delay.h>
|
---|
49 | #include <arch/asm.h>
|
---|
50 | #include <arch/faddr.h>
|
---|
51 | #include <arch/cycle.h>
|
---|
52 | #include <atomic.h>
|
---|
53 | #include <synch/spinlock.h>
|
---|
54 | #include <config.h>
|
---|
55 | #include <context.h>
|
---|
56 | #include <fpu_context.h>
|
---|
57 | #include <func.h>
|
---|
58 | #include <arch.h>
|
---|
59 | #include <adt/list.h>
|
---|
60 | #include <panic.h>
|
---|
61 | #include <cpu.h>
|
---|
62 | #include <print.h>
|
---|
63 | #include <log.h>
|
---|
64 | #include <debug.h>
|
---|
65 | #include <stacktrace.h>
|
---|
66 |
|
---|
67 | static void scheduler_separated_stack(void);
|
---|
68 |
|
---|
69 | atomic_t nrdy; /**< Number of ready threads in the system. */
|
---|
70 |
|
---|
71 | /** Carry out actions before new task runs. */
|
---|
72 | static void before_task_runs(void)
|
---|
73 | {
|
---|
74 | before_task_runs_arch();
|
---|
75 | }
|
---|
76 |
|
---|
77 | /** Take actions before new thread runs.
|
---|
78 | *
|
---|
79 | * Perform actions that need to be
|
---|
80 | * taken before the newly selected
|
---|
81 | * thread is passed control.
|
---|
82 | *
|
---|
83 | * THREAD->lock is locked on entry
|
---|
84 | *
|
---|
85 | */
|
---|
86 | static void before_thread_runs(void)
|
---|
87 | {
|
---|
88 | before_thread_runs_arch();
|
---|
89 |
|
---|
90 | #ifdef CONFIG_FPU_LAZY
|
---|
91 | if (THREAD == CPU->fpu_owner)
|
---|
92 | fpu_enable();
|
---|
93 | else
|
---|
94 | fpu_disable();
|
---|
95 | #elif defined CONFIG_FPU
|
---|
96 | fpu_enable();
|
---|
97 | if (THREAD->fpu_context_exists)
|
---|
98 | fpu_context_restore(THREAD->saved_fpu_context);
|
---|
99 | else {
|
---|
100 | fpu_init();
|
---|
101 | THREAD->fpu_context_exists = true;
|
---|
102 | }
|
---|
103 | #endif
|
---|
104 |
|
---|
105 | #ifdef CONFIG_UDEBUG
|
---|
106 | if (THREAD->btrace) {
|
---|
107 | istate_t *istate = THREAD->udebug.uspace_state;
|
---|
108 | if (istate != NULL) {
|
---|
109 | printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid);
|
---|
110 | stack_trace_istate(istate);
|
---|
111 | }
|
---|
112 |
|
---|
113 | THREAD->btrace = false;
|
---|
114 | }
|
---|
115 | #endif
|
---|
116 | }
|
---|
117 |
|
---|
118 | /** Take actions after THREAD had run.
|
---|
119 | *
|
---|
120 | * Perform actions that need to be
|
---|
121 | * taken after the running thread
|
---|
122 | * had been preempted by the scheduler.
|
---|
123 | *
|
---|
124 | * THREAD->lock is locked on entry
|
---|
125 | *
|
---|
126 | */
|
---|
127 | static void after_thread_ran(void)
|
---|
128 | {
|
---|
129 | after_thread_ran_arch();
|
---|
130 | }
|
---|
131 |
|
---|
132 | #ifdef CONFIG_FPU_LAZY
|
---|
133 | void scheduler_fpu_lazy_request(void)
|
---|
134 | {
|
---|
135 | restart:
|
---|
136 | fpu_enable();
|
---|
137 | irq_spinlock_lock(&CPU->lock, false);
|
---|
138 |
|
---|
139 | /* Save old context */
|
---|
140 | if (CPU->fpu_owner != NULL) {
|
---|
141 | irq_spinlock_lock(&CPU->fpu_owner->lock, false);
|
---|
142 | fpu_context_save(CPU->fpu_owner->saved_fpu_context);
|
---|
143 |
|
---|
144 | /* Don't prevent migration */
|
---|
145 | CPU->fpu_owner->fpu_context_engaged = false;
|
---|
146 | irq_spinlock_unlock(&CPU->fpu_owner->lock, false);
|
---|
147 | CPU->fpu_owner = NULL;
|
---|
148 | }
|
---|
149 |
|
---|
150 | irq_spinlock_lock(&THREAD->lock, false);
|
---|
151 | if (THREAD->fpu_context_exists) {
|
---|
152 | fpu_context_restore(THREAD->saved_fpu_context);
|
---|
153 | } else {
|
---|
154 | /* Allocate FPU context */
|
---|
155 | if (!THREAD->saved_fpu_context) {
|
---|
156 | /* Might sleep */
|
---|
157 | irq_spinlock_unlock(&THREAD->lock, false);
|
---|
158 | irq_spinlock_unlock(&CPU->lock, false);
|
---|
159 | THREAD->saved_fpu_context =
|
---|
160 | (fpu_context_t *) slab_alloc(fpu_context_slab, 0);
|
---|
161 |
|
---|
162 | /* We may have switched CPUs during slab_alloc */
|
---|
163 | goto restart;
|
---|
164 | }
|
---|
165 | fpu_init();
|
---|
166 | THREAD->fpu_context_exists = true;
|
---|
167 | }
|
---|
168 |
|
---|
169 | CPU->fpu_owner = THREAD;
|
---|
170 | THREAD->fpu_context_engaged = true;
|
---|
171 | irq_spinlock_unlock(&THREAD->lock, false);
|
---|
172 |
|
---|
173 | irq_spinlock_unlock(&CPU->lock, false);
|
---|
174 | }
|
---|
175 | #endif /* CONFIG_FPU_LAZY */
|
---|
176 |
|
---|
177 | /** Initialize scheduler
|
---|
178 | *
|
---|
179 | * Initialize kernel scheduler.
|
---|
180 | *
|
---|
181 | */
|
---|
182 | void scheduler_init(void)
|
---|
183 | {
|
---|
184 | }
|
---|
185 |
|
---|
186 | /** Get thread to be scheduled
|
---|
187 | *
|
---|
188 | * Get the optimal thread to be scheduled
|
---|
189 | * according to thread accounting and scheduler
|
---|
190 | * policy.
|
---|
191 | *
|
---|
192 | * @return Thread to be scheduled.
|
---|
193 | *
|
---|
194 | */
|
---|
195 | static thread_t *find_best_thread(void)
|
---|
196 | {
|
---|
197 | ASSERT(CPU != NULL);
|
---|
198 |
|
---|
199 | loop:
|
---|
200 |
|
---|
201 | if (atomic_get(&CPU->nrdy) == 0) {
|
---|
202 | /*
|
---|
203 | * For there was nothing to run, the CPU goes to sleep
|
---|
204 | * until a hardware interrupt or an IPI comes.
|
---|
205 | * This improves energy saving and hyperthreading.
|
---|
206 | */
|
---|
207 | irq_spinlock_lock(&CPU->lock, false);
|
---|
208 | CPU->idle = true;
|
---|
209 | irq_spinlock_unlock(&CPU->lock, false);
|
---|
210 | interrupts_enable();
|
---|
211 |
|
---|
212 | /*
|
---|
213 | * An interrupt might occur right now and wake up a thread.
|
---|
214 | * In such case, the CPU will continue to go to sleep
|
---|
215 | * even though there is a runnable thread.
|
---|
216 | */
|
---|
217 | cpu_sleep();
|
---|
218 | interrupts_disable();
|
---|
219 | goto loop;
|
---|
220 | }
|
---|
221 |
|
---|
222 | unsigned int i;
|
---|
223 | for (i = 0; i < RQ_COUNT; i++) {
|
---|
224 | irq_spinlock_lock(&(CPU->rq[i].lock), false);
|
---|
225 | if (CPU->rq[i].n == 0) {
|
---|
226 | /*
|
---|
227 | * If this queue is empty, try a lower-priority queue.
|
---|
228 | */
|
---|
229 | irq_spinlock_unlock(&(CPU->rq[i].lock), false);
|
---|
230 | continue;
|
---|
231 | }
|
---|
232 |
|
---|
233 | atomic_dec(&CPU->nrdy);
|
---|
234 | atomic_dec(&nrdy);
|
---|
235 | CPU->rq[i].n--;
|
---|
236 |
|
---|
237 | /*
|
---|
238 | * Take the first thread from the queue.
|
---|
239 | */
|
---|
240 | thread_t *thread = list_get_instance(
|
---|
241 | list_first(&CPU->rq[i].rq), thread_t, rq_link);
|
---|
242 | list_remove(&thread->rq_link);
|
---|
243 |
|
---|
244 | irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock);
|
---|
245 |
|
---|
246 | thread->cpu = CPU;
|
---|
247 | thread->ticks = us2ticks((i + 1) * 10000);
|
---|
248 | thread->priority = i; /* Correct rq index */
|
---|
249 |
|
---|
250 | /*
|
---|
251 | * Clear the stolen flag so that it can be migrated
|
---|
252 | * when load balancing needs emerge.
|
---|
253 | */
|
---|
254 | thread->stolen = false;
|
---|
255 | irq_spinlock_unlock(&thread->lock, false);
|
---|
256 |
|
---|
257 | return thread;
|
---|
258 | }
|
---|
259 |
|
---|
260 | goto loop;
|
---|
261 | }
|
---|
262 |
|
---|
263 | /** Prevent rq starvation
|
---|
264 | *
|
---|
265 | * Prevent low priority threads from starving in rq's.
|
---|
266 | *
|
---|
267 | * When the function decides to relink rq's, it reconnects
|
---|
268 | * respective pointers so that in result threads with 'pri'
|
---|
269 | * greater or equal start are moved to a higher-priority queue.
|
---|
270 | *
|
---|
271 | * @param start Threshold priority.
|
---|
272 | *
|
---|
273 | */
|
---|
274 | static void relink_rq(int start)
|
---|
275 | {
|
---|
276 | list_t list;
|
---|
277 |
|
---|
278 | list_initialize(&list);
|
---|
279 | irq_spinlock_lock(&CPU->lock, false);
|
---|
280 |
|
---|
281 | if (CPU->needs_relink > NEEDS_RELINK_MAX) {
|
---|
282 | int i;
|
---|
283 | for (i = start; i < RQ_COUNT - 1; i++) {
|
---|
284 | /* Remember and empty rq[i + 1] */
|
---|
285 |
|
---|
286 | irq_spinlock_lock(&CPU->rq[i + 1].lock, false);
|
---|
287 | list_concat(&list, &CPU->rq[i + 1].rq);
|
---|
288 | size_t n = CPU->rq[i + 1].n;
|
---|
289 | CPU->rq[i + 1].n = 0;
|
---|
290 | irq_spinlock_unlock(&CPU->rq[i + 1].lock, false);
|
---|
291 |
|
---|
292 | /* Append rq[i + 1] to rq[i] */
|
---|
293 |
|
---|
294 | irq_spinlock_lock(&CPU->rq[i].lock, false);
|
---|
295 | list_concat(&CPU->rq[i].rq, &list);
|
---|
296 | CPU->rq[i].n += n;
|
---|
297 | irq_spinlock_unlock(&CPU->rq[i].lock, false);
|
---|
298 | }
|
---|
299 |
|
---|
300 | CPU->needs_relink = 0;
|
---|
301 | }
|
---|
302 |
|
---|
303 | irq_spinlock_unlock(&CPU->lock, false);
|
---|
304 | }
|
---|
305 |
|
---|
306 | /** The scheduler
|
---|
307 | *
|
---|
308 | * The thread scheduling procedure.
|
---|
309 | * Passes control directly to
|
---|
310 | * scheduler_separated_stack().
|
---|
311 | *
|
---|
312 | */
|
---|
313 | void scheduler(void)
|
---|
314 | {
|
---|
315 | volatile ipl_t ipl;
|
---|
316 |
|
---|
317 | ASSERT(CPU != NULL);
|
---|
318 |
|
---|
319 | ipl = interrupts_disable();
|
---|
320 |
|
---|
321 | if (atomic_get(&haltstate))
|
---|
322 | halt();
|
---|
323 |
|
---|
324 | if (THREAD) {
|
---|
325 | irq_spinlock_lock(&THREAD->lock, false);
|
---|
326 |
|
---|
327 | /* Update thread kernel accounting */
|
---|
328 | THREAD->kcycles += get_cycle() - THREAD->last_cycle;
|
---|
329 |
|
---|
330 | #if (defined CONFIG_FPU) && (!defined CONFIG_FPU_LAZY)
|
---|
331 | fpu_context_save(THREAD->saved_fpu_context);
|
---|
332 | #endif
|
---|
333 | if (!context_save(&THREAD->saved_context)) {
|
---|
334 | /*
|
---|
335 | * This is the place where threads leave scheduler();
|
---|
336 | */
|
---|
337 |
|
---|
338 | /* Save current CPU cycle */
|
---|
339 | THREAD->last_cycle = get_cycle();
|
---|
340 |
|
---|
341 | irq_spinlock_unlock(&THREAD->lock, false);
|
---|
342 | interrupts_restore(THREAD->saved_context.ipl);
|
---|
343 |
|
---|
344 | return;
|
---|
345 | }
|
---|
346 |
|
---|
347 | /*
|
---|
348 | * Interrupt priority level of preempted thread is recorded
|
---|
349 | * here to facilitate scheduler() invocations from
|
---|
350 | * interrupts_disable()'d code (e.g. waitq_sleep_timeout()).
|
---|
351 | *
|
---|
352 | */
|
---|
353 | THREAD->saved_context.ipl = ipl;
|
---|
354 | }
|
---|
355 |
|
---|
356 | /*
|
---|
357 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, AS
|
---|
358 | * and preemption counter. At this point THE could be coming either
|
---|
359 | * from THREAD's or CPU's stack.
|
---|
360 | *
|
---|
361 | */
|
---|
362 | the_copy(THE, (the_t *) CPU->stack);
|
---|
363 |
|
---|
364 | /*
|
---|
365 | * We may not keep the old stack.
|
---|
366 | * Reason: If we kept the old stack and got blocked, for instance, in
|
---|
367 | * find_best_thread(), the old thread could get rescheduled by another
|
---|
368 | * CPU and overwrite the part of its own stack that was also used by
|
---|
369 | * the scheduler on this CPU.
|
---|
370 | *
|
---|
371 | * Moreover, we have to bypass the compiler-generated POP sequence
|
---|
372 | * which is fooled by SP being set to the very top of the stack.
|
---|
373 | * Therefore the scheduler() function continues in
|
---|
374 | * scheduler_separated_stack().
|
---|
375 | *
|
---|
376 | */
|
---|
377 | context_save(&CPU->saved_context);
|
---|
378 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack),
|
---|
379 | (uintptr_t) CPU->stack, STACK_SIZE);
|
---|
380 | context_restore(&CPU->saved_context);
|
---|
381 |
|
---|
382 | /* Not reached */
|
---|
383 | }
|
---|
384 |
|
---|
385 | /** Scheduler stack switch wrapper
|
---|
386 | *
|
---|
387 | * Second part of the scheduler() function
|
---|
388 | * using new stack. Handling the actual context
|
---|
389 | * switch to a new thread.
|
---|
390 | *
|
---|
391 | */
|
---|
392 | void scheduler_separated_stack(void)
|
---|
393 | {
|
---|
394 | DEADLOCK_PROBE_INIT(p_joinwq);
|
---|
395 | task_t *old_task = TASK;
|
---|
396 | as_t *old_as = AS;
|
---|
397 |
|
---|
398 | ASSERT((!THREAD) || (irq_spinlock_locked(&THREAD->lock)));
|
---|
399 | ASSERT(CPU != NULL);
|
---|
400 |
|
---|
401 | /*
|
---|
402 | * Hold the current task and the address space to prevent their
|
---|
403 | * possible destruction should thread_destroy() be called on this or any
|
---|
404 | * other processor while the scheduler is still using them.
|
---|
405 | */
|
---|
406 | if (old_task)
|
---|
407 | task_hold(old_task);
|
---|
408 |
|
---|
409 | if (old_as)
|
---|
410 | as_hold(old_as);
|
---|
411 |
|
---|
412 | if (THREAD) {
|
---|
413 | /* Must be run after the switch to scheduler stack */
|
---|
414 | after_thread_ran();
|
---|
415 |
|
---|
416 | switch (THREAD->state) {
|
---|
417 | case Running:
|
---|
418 | irq_spinlock_unlock(&THREAD->lock, false);
|
---|
419 | thread_ready(THREAD);
|
---|
420 | break;
|
---|
421 |
|
---|
422 | case Exiting:
|
---|
423 | repeat:
|
---|
424 | if (THREAD->detached) {
|
---|
425 | thread_destroy(THREAD, false);
|
---|
426 | } else {
|
---|
427 | /*
|
---|
428 | * The thread structure is kept allocated until
|
---|
429 | * somebody calls thread_detach() on it.
|
---|
430 | */
|
---|
431 | if (!irq_spinlock_trylock(&THREAD->join_wq.lock)) {
|
---|
432 | /*
|
---|
433 | * Avoid deadlock.
|
---|
434 | */
|
---|
435 | irq_spinlock_unlock(&THREAD->lock, false);
|
---|
436 | delay(HZ);
|
---|
437 | irq_spinlock_lock(&THREAD->lock, false);
|
---|
438 | DEADLOCK_PROBE(p_joinwq,
|
---|
439 | DEADLOCK_THRESHOLD);
|
---|
440 | goto repeat;
|
---|
441 | }
|
---|
442 | _waitq_wakeup_unsafe(&THREAD->join_wq,
|
---|
443 | WAKEUP_FIRST);
|
---|
444 | irq_spinlock_unlock(&THREAD->join_wq.lock, false);
|
---|
445 |
|
---|
446 | THREAD->state = Lingering;
|
---|
447 | irq_spinlock_unlock(&THREAD->lock, false);
|
---|
448 | }
|
---|
449 | break;
|
---|
450 |
|
---|
451 | case Sleeping:
|
---|
452 | /*
|
---|
453 | * Prefer the thread after it's woken up.
|
---|
454 | */
|
---|
455 | THREAD->priority = -1;
|
---|
456 |
|
---|
457 | /*
|
---|
458 | * We need to release wq->lock which we locked in
|
---|
459 | * waitq_sleep(). Address of wq->lock is kept in
|
---|
460 | * THREAD->sleep_queue.
|
---|
461 | */
|
---|
462 | irq_spinlock_unlock(&THREAD->sleep_queue->lock, false);
|
---|
463 |
|
---|
464 | irq_spinlock_unlock(&THREAD->lock, false);
|
---|
465 | break;
|
---|
466 |
|
---|
467 | default:
|
---|
468 | /*
|
---|
469 | * Entering state is unexpected.
|
---|
470 | */
|
---|
471 | panic("tid%" PRIu64 ": unexpected state %s.",
|
---|
472 | THREAD->tid, thread_states[THREAD->state]);
|
---|
473 | break;
|
---|
474 | }
|
---|
475 |
|
---|
476 | THREAD = NULL;
|
---|
477 | }
|
---|
478 |
|
---|
479 | THREAD = find_best_thread();
|
---|
480 |
|
---|
481 | irq_spinlock_lock(&THREAD->lock, false);
|
---|
482 | int priority = THREAD->priority;
|
---|
483 | irq_spinlock_unlock(&THREAD->lock, false);
|
---|
484 |
|
---|
485 | relink_rq(priority);
|
---|
486 |
|
---|
487 | /*
|
---|
488 | * If both the old and the new task are the same,
|
---|
489 | * lots of work is avoided.
|
---|
490 | */
|
---|
491 | if (TASK != THREAD->task) {
|
---|
492 | as_t *new_as = THREAD->task->as;
|
---|
493 |
|
---|
494 | /*
|
---|
495 | * Note that it is possible for two tasks
|
---|
496 | * to share one address space.
|
---|
497 | */
|
---|
498 | if (old_as != new_as) {
|
---|
499 | /*
|
---|
500 | * Both tasks and address spaces are different.
|
---|
501 | * Replace the old one with the new one.
|
---|
502 | */
|
---|
503 | as_switch(old_as, new_as);
|
---|
504 | }
|
---|
505 |
|
---|
506 | TASK = THREAD->task;
|
---|
507 | before_task_runs();
|
---|
508 | }
|
---|
509 |
|
---|
510 | if (old_task)
|
---|
511 | task_release(old_task);
|
---|
512 |
|
---|
513 | if (old_as)
|
---|
514 | as_release(old_as);
|
---|
515 |
|
---|
516 | irq_spinlock_lock(&THREAD->lock, false);
|
---|
517 | THREAD->state = Running;
|
---|
518 |
|
---|
519 | #ifdef SCHEDULER_VERBOSE
|
---|
520 | log(LF_OTHER, LVL_DEBUG,
|
---|
521 | "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64
|
---|
522 | ", nrdy=%" PRIua ")", CPU->id, THREAD->tid, THREAD->priority,
|
---|
523 | THREAD->ticks, atomic_get(&CPU->nrdy));
|
---|
524 | #endif
|
---|
525 |
|
---|
526 | /*
|
---|
527 | * Some architectures provide late kernel PA2KA(identity)
|
---|
528 | * mapping in a page fault handler. However, the page fault
|
---|
529 | * handler uses the kernel stack of the running thread and
|
---|
530 | * therefore cannot be used to map it. The kernel stack, if
|
---|
531 | * necessary, is to be mapped in before_thread_runs(). This
|
---|
532 | * function must be executed before the switch to the new stack.
|
---|
533 | */
|
---|
534 | before_thread_runs();
|
---|
535 |
|
---|
536 | /*
|
---|
537 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
|
---|
538 | * thread's stack.
|
---|
539 | */
|
---|
540 | the_copy(THE, (the_t *) THREAD->kstack);
|
---|
541 |
|
---|
542 | context_restore(&THREAD->saved_context);
|
---|
543 |
|
---|
544 | /* Not reached */
|
---|
545 | }
|
---|
546 |
|
---|
547 | #ifdef CONFIG_SMP
|
---|
548 | /** Load balancing thread
|
---|
549 | *
|
---|
550 | * SMP load balancing thread, supervising thread supplies
|
---|
551 | * for the CPU it's wired to.
|
---|
552 | *
|
---|
553 | * @param arg Generic thread argument (unused).
|
---|
554 | *
|
---|
555 | */
|
---|
556 | void kcpulb(void *arg)
|
---|
557 | {
|
---|
558 | atomic_count_t average;
|
---|
559 | atomic_count_t rdy;
|
---|
560 |
|
---|
561 | /*
|
---|
562 | * Detach kcpulb as nobody will call thread_join_timeout() on it.
|
---|
563 | */
|
---|
564 | thread_detach(THREAD);
|
---|
565 |
|
---|
566 | loop:
|
---|
567 | /*
|
---|
568 | * Work in 1s intervals.
|
---|
569 | */
|
---|
570 | thread_sleep(1);
|
---|
571 |
|
---|
572 | not_satisfied:
|
---|
573 | /*
|
---|
574 | * Calculate the number of threads that will be migrated/stolen from
|
---|
575 | * other CPU's. Note that situation can have changed between two
|
---|
576 | * passes. Each time get the most up to date counts.
|
---|
577 | *
|
---|
578 | */
|
---|
579 | average = atomic_get(&nrdy) / config.cpu_active + 1;
|
---|
580 | rdy = atomic_get(&CPU->nrdy);
|
---|
581 |
|
---|
582 | if (average <= rdy)
|
---|
583 | goto satisfied;
|
---|
584 |
|
---|
585 | atomic_count_t count = average - rdy;
|
---|
586 |
|
---|
587 | /*
|
---|
588 | * Searching least priority queues on all CPU's first and most priority
|
---|
589 | * queues on all CPU's last.
|
---|
590 | */
|
---|
591 | size_t acpu;
|
---|
592 | size_t acpu_bias = 0;
|
---|
593 | int rq;
|
---|
594 |
|
---|
595 | for (rq = RQ_COUNT - 1; rq >= 0; rq--) {
|
---|
596 | for (acpu = 0; acpu < config.cpu_active; acpu++) {
|
---|
597 | cpu_t *cpu = &cpus[(acpu + acpu_bias) % config.cpu_active];
|
---|
598 |
|
---|
599 | /*
|
---|
600 | * Not interested in ourselves.
|
---|
601 | * Doesn't require interrupt disabling for kcpulb has
|
---|
602 | * THREAD_FLAG_WIRED.
|
---|
603 | *
|
---|
604 | */
|
---|
605 | if (CPU == cpu)
|
---|
606 | continue;
|
---|
607 |
|
---|
608 | if (atomic_get(&cpu->nrdy) <= average)
|
---|
609 | continue;
|
---|
610 |
|
---|
611 | irq_spinlock_lock(&(cpu->rq[rq].lock), true);
|
---|
612 | if (cpu->rq[rq].n == 0) {
|
---|
613 | irq_spinlock_unlock(&(cpu->rq[rq].lock), true);
|
---|
614 | continue;
|
---|
615 | }
|
---|
616 |
|
---|
617 | thread_t *thread = NULL;
|
---|
618 |
|
---|
619 | /* Search rq from the back */
|
---|
620 | link_t *link = cpu->rq[rq].rq.head.prev;
|
---|
621 |
|
---|
622 | while (link != &(cpu->rq[rq].rq.head)) {
|
---|
623 | thread = (thread_t *) list_get_instance(link,
|
---|
624 | thread_t, rq_link);
|
---|
625 |
|
---|
626 | /*
|
---|
627 | * Do not steal CPU-wired threads, threads
|
---|
628 | * already stolen, threads for which migration
|
---|
629 | * was temporarily disabled or threads whose
|
---|
630 | * FPU context is still in the CPU.
|
---|
631 | */
|
---|
632 | irq_spinlock_lock(&thread->lock, false);
|
---|
633 |
|
---|
634 | if ((!thread->wired) && (!thread->stolen) &&
|
---|
635 | (!thread->nomigrate) &&
|
---|
636 | (!thread->fpu_context_engaged)) {
|
---|
637 | /*
|
---|
638 | * Remove thread from ready queue.
|
---|
639 | */
|
---|
640 | irq_spinlock_unlock(&thread->lock,
|
---|
641 | false);
|
---|
642 |
|
---|
643 | atomic_dec(&cpu->nrdy);
|
---|
644 | atomic_dec(&nrdy);
|
---|
645 |
|
---|
646 | cpu->rq[rq].n--;
|
---|
647 | list_remove(&thread->rq_link);
|
---|
648 |
|
---|
649 | break;
|
---|
650 | }
|
---|
651 |
|
---|
652 | irq_spinlock_unlock(&thread->lock, false);
|
---|
653 |
|
---|
654 | link = link->prev;
|
---|
655 | thread = NULL;
|
---|
656 | }
|
---|
657 |
|
---|
658 | if (thread) {
|
---|
659 | /*
|
---|
660 | * Ready thread on local CPU
|
---|
661 | */
|
---|
662 |
|
---|
663 | irq_spinlock_pass(&(cpu->rq[rq].lock),
|
---|
664 | &thread->lock);
|
---|
665 |
|
---|
666 | #ifdef KCPULB_VERBOSE
|
---|
667 | log(LF_OTHER, LVL_DEBUG,
|
---|
668 | "kcpulb%u: TID %" PRIu64 " -> cpu%u, "
|
---|
669 | "nrdy=%ld, avg=%ld", CPU->id, t->tid,
|
---|
670 | CPU->id, atomic_get(&CPU->nrdy),
|
---|
671 | atomic_get(&nrdy) / config.cpu_active);
|
---|
672 | #endif
|
---|
673 |
|
---|
674 | thread->stolen = true;
|
---|
675 | thread->state = Entering;
|
---|
676 |
|
---|
677 | irq_spinlock_unlock(&thread->lock, true);
|
---|
678 | thread_ready(thread);
|
---|
679 |
|
---|
680 | if (--count == 0)
|
---|
681 | goto satisfied;
|
---|
682 |
|
---|
683 | /*
|
---|
684 | * We are not satisfied yet, focus on another
|
---|
685 | * CPU next time.
|
---|
686 | *
|
---|
687 | */
|
---|
688 | acpu_bias++;
|
---|
689 |
|
---|
690 | continue;
|
---|
691 | } else
|
---|
692 | irq_spinlock_unlock(&(cpu->rq[rq].lock), true);
|
---|
693 |
|
---|
694 | }
|
---|
695 | }
|
---|
696 |
|
---|
697 | if (atomic_get(&CPU->nrdy)) {
|
---|
698 | /*
|
---|
699 | * Be a little bit light-weight and let migrated threads run.
|
---|
700 | *
|
---|
701 | */
|
---|
702 | scheduler();
|
---|
703 | } else {
|
---|
704 | /*
|
---|
705 | * We failed to migrate a single thread.
|
---|
706 | * Give up this turn.
|
---|
707 | *
|
---|
708 | */
|
---|
709 | goto loop;
|
---|
710 | }
|
---|
711 |
|
---|
712 | goto not_satisfied;
|
---|
713 |
|
---|
714 | satisfied:
|
---|
715 | goto loop;
|
---|
716 | }
|
---|
717 | #endif /* CONFIG_SMP */
|
---|
718 |
|
---|
719 | /** Print information about threads & scheduler queues
|
---|
720 | *
|
---|
721 | */
|
---|
722 | void sched_print_list(void)
|
---|
723 | {
|
---|
724 | size_t cpu;
|
---|
725 | for (cpu = 0; cpu < config.cpu_count; cpu++) {
|
---|
726 | if (!cpus[cpu].active)
|
---|
727 | continue;
|
---|
728 |
|
---|
729 | irq_spinlock_lock(&cpus[cpu].lock, true);
|
---|
730 |
|
---|
731 | printf("cpu%u: address=%p, nrdy=%" PRIua ", needs_relink=%zu\n",
|
---|
732 | cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy),
|
---|
733 | cpus[cpu].needs_relink);
|
---|
734 |
|
---|
735 | unsigned int i;
|
---|
736 | for (i = 0; i < RQ_COUNT; i++) {
|
---|
737 | irq_spinlock_lock(&(cpus[cpu].rq[i].lock), false);
|
---|
738 | if (cpus[cpu].rq[i].n == 0) {
|
---|
739 | irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false);
|
---|
740 | continue;
|
---|
741 | }
|
---|
742 |
|
---|
743 | printf("\trq[%u]: ", i);
|
---|
744 | list_foreach(cpus[cpu].rq[i].rq, rq_link, thread_t,
|
---|
745 | thread) {
|
---|
746 | printf("%" PRIu64 "(%s) ", thread->tid,
|
---|
747 | thread_states[thread->state]);
|
---|
748 | }
|
---|
749 | printf("\n");
|
---|
750 |
|
---|
751 | irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false);
|
---|
752 | }
|
---|
753 |
|
---|
754 | irq_spinlock_unlock(&cpus[cpu].lock, true);
|
---|
755 | }
|
---|
756 | }
|
---|
757 |
|
---|
758 | /** @}
|
---|
759 | */
|
---|