| 1 | /*
|
|---|
| 2 | * Copyright (C) 2001-2004 Jakub Jermar
|
|---|
| 3 | * All rights reserved.
|
|---|
| 4 | *
|
|---|
| 5 | * Redistribution and use in source and binary forms, with or without
|
|---|
| 6 | * modification, are permitted provided that the following conditions
|
|---|
| 7 | * are met:
|
|---|
| 8 | *
|
|---|
| 9 | * - Redistributions of source code must retain the above copyright
|
|---|
| 10 | * notice, this list of conditions and the following disclaimer.
|
|---|
| 11 | * - Redistributions in binary form must reproduce the above copyright
|
|---|
| 12 | * notice, this list of conditions and the following disclaimer in the
|
|---|
| 13 | * documentation and/or other materials provided with the distribution.
|
|---|
| 14 | * - The name of the author may not be used to endorse or promote products
|
|---|
| 15 | * derived from this software without specific prior written permission.
|
|---|
| 16 | *
|
|---|
| 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|---|
| 18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|---|
| 19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|---|
| 20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|---|
| 21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|---|
| 22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|---|
| 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|---|
| 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|---|
| 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|---|
| 26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|---|
| 27 | */
|
|---|
| 28 |
|
|---|
| 29 | #include <proc/scheduler.h>
|
|---|
| 30 | #include <proc/thread.h>
|
|---|
| 31 | #include <proc/task.h>
|
|---|
| 32 | #include <mm/frame.h>
|
|---|
| 33 | #include <mm/page.h>
|
|---|
| 34 | #include <mm/as.h>
|
|---|
| 35 | #include <arch/asm.h>
|
|---|
| 36 | #include <arch/faddr.h>
|
|---|
| 37 | #include <arch/atomic.h>
|
|---|
| 38 | #include <synch/spinlock.h>
|
|---|
| 39 | #include <config.h>
|
|---|
| 40 | #include <context.h>
|
|---|
| 41 | #include <func.h>
|
|---|
| 42 | #include <arch.h>
|
|---|
| 43 | #include <adt/list.h>
|
|---|
| 44 | #include <panic.h>
|
|---|
| 45 | #include <typedefs.h>
|
|---|
| 46 | #include <cpu.h>
|
|---|
| 47 | #include <print.h>
|
|---|
| 48 | #include <debug.h>
|
|---|
| 49 |
|
|---|
| 50 | static void scheduler_separated_stack(void);
|
|---|
| 51 |
|
|---|
| 52 | atomic_t nrdy; /**< Number of ready threads in the system. */
|
|---|
| 53 |
|
|---|
| 54 | /** Take actions before new thread runs.
|
|---|
| 55 | *
|
|---|
| 56 | * Perform actions that need to be
|
|---|
| 57 | * taken before the newly selected
|
|---|
| 58 | * tread is passed control.
|
|---|
| 59 | *
|
|---|
| 60 | * THREAD->lock is locked on entry
|
|---|
| 61 | *
|
|---|
| 62 | */
|
|---|
| 63 | void before_thread_runs(void)
|
|---|
| 64 | {
|
|---|
| 65 | before_thread_runs_arch();
|
|---|
| 66 | #ifdef CONFIG_FPU_LAZY
|
|---|
| 67 | if(THREAD==CPU->fpu_owner)
|
|---|
| 68 | fpu_enable();
|
|---|
| 69 | else
|
|---|
| 70 | fpu_disable();
|
|---|
| 71 | #else
|
|---|
| 72 | fpu_enable();
|
|---|
| 73 | if (THREAD->fpu_context_exists)
|
|---|
| 74 | fpu_context_restore(THREAD->saved_fpu_context);
|
|---|
| 75 | else {
|
|---|
| 76 | fpu_init();
|
|---|
| 77 | THREAD->fpu_context_exists=1;
|
|---|
| 78 | }
|
|---|
| 79 | #endif
|
|---|
| 80 | }
|
|---|
| 81 |
|
|---|
| 82 | /** Take actions after THREAD had run.
|
|---|
| 83 | *
|
|---|
| 84 | * Perform actions that need to be
|
|---|
| 85 | * taken after the running thread
|
|---|
| 86 | * had been preempted by the scheduler.
|
|---|
| 87 | *
|
|---|
| 88 | * THREAD->lock is locked on entry
|
|---|
| 89 | *
|
|---|
| 90 | */
|
|---|
| 91 | void after_thread_ran(void)
|
|---|
| 92 | {
|
|---|
| 93 | after_thread_ran_arch();
|
|---|
| 94 | }
|
|---|
| 95 |
|
|---|
| 96 | #ifdef CONFIG_FPU_LAZY
|
|---|
| 97 | void scheduler_fpu_lazy_request(void)
|
|---|
| 98 | {
|
|---|
| 99 | restart:
|
|---|
| 100 | fpu_enable();
|
|---|
| 101 | spinlock_lock(&CPU->lock);
|
|---|
| 102 |
|
|---|
| 103 | /* Save old context */
|
|---|
| 104 | if (CPU->fpu_owner != NULL) {
|
|---|
| 105 | spinlock_lock(&CPU->fpu_owner->lock);
|
|---|
| 106 | fpu_context_save(CPU->fpu_owner->saved_fpu_context);
|
|---|
| 107 | /* don't prevent migration */
|
|---|
| 108 | CPU->fpu_owner->fpu_context_engaged=0;
|
|---|
| 109 | spinlock_unlock(&CPU->fpu_owner->lock);
|
|---|
| 110 | CPU->fpu_owner = NULL;
|
|---|
| 111 | }
|
|---|
| 112 |
|
|---|
| 113 | spinlock_lock(&THREAD->lock);
|
|---|
| 114 | if (THREAD->fpu_context_exists) {
|
|---|
| 115 | fpu_context_restore(THREAD->saved_fpu_context);
|
|---|
| 116 | } else {
|
|---|
| 117 | /* Allocate FPU context */
|
|---|
| 118 | if (!THREAD->saved_fpu_context) {
|
|---|
| 119 | /* Might sleep */
|
|---|
| 120 | spinlock_unlock(&THREAD->lock);
|
|---|
| 121 | spinlock_unlock(&CPU->lock);
|
|---|
| 122 | THREAD->saved_fpu_context = slab_alloc(fpu_context_slab,
|
|---|
| 123 | 0);
|
|---|
| 124 | /* We may have switched CPUs during slab_alloc */
|
|---|
| 125 | goto restart;
|
|---|
| 126 | }
|
|---|
| 127 | fpu_init();
|
|---|
| 128 | THREAD->fpu_context_exists=1;
|
|---|
| 129 | }
|
|---|
| 130 | CPU->fpu_owner=THREAD;
|
|---|
| 131 | THREAD->fpu_context_engaged = 1;
|
|---|
| 132 | spinlock_unlock(&THREAD->lock);
|
|---|
| 133 |
|
|---|
| 134 | spinlock_unlock(&CPU->lock);
|
|---|
| 135 | }
|
|---|
| 136 | #endif
|
|---|
| 137 |
|
|---|
| 138 | /** Initialize scheduler
|
|---|
| 139 | *
|
|---|
| 140 | * Initialize kernel scheduler.
|
|---|
| 141 | *
|
|---|
| 142 | */
|
|---|
| 143 | void scheduler_init(void)
|
|---|
| 144 | {
|
|---|
| 145 | }
|
|---|
| 146 |
|
|---|
| 147 | /** Get thread to be scheduled
|
|---|
| 148 | *
|
|---|
| 149 | * Get the optimal thread to be scheduled
|
|---|
| 150 | * according to thread accounting and scheduler
|
|---|
| 151 | * policy.
|
|---|
| 152 | *
|
|---|
| 153 | * @return Thread to be scheduled.
|
|---|
| 154 | *
|
|---|
| 155 | */
|
|---|
| 156 | static thread_t *find_best_thread(void)
|
|---|
| 157 | {
|
|---|
| 158 | thread_t *t;
|
|---|
| 159 | runq_t *r;
|
|---|
| 160 | int i;
|
|---|
| 161 |
|
|---|
| 162 | ASSERT(CPU != NULL);
|
|---|
| 163 |
|
|---|
| 164 | loop:
|
|---|
| 165 | interrupts_enable();
|
|---|
| 166 |
|
|---|
| 167 | if (atomic_get(&CPU->nrdy) == 0) {
|
|---|
| 168 | /*
|
|---|
| 169 | * For there was nothing to run, the CPU goes to sleep
|
|---|
| 170 | * until a hardware interrupt or an IPI comes.
|
|---|
| 171 | * This improves energy saving and hyperthreading.
|
|---|
| 172 | */
|
|---|
| 173 |
|
|---|
| 174 | /*
|
|---|
| 175 | * An interrupt might occur right now and wake up a thread.
|
|---|
| 176 | * In such case, the CPU will continue to go to sleep
|
|---|
| 177 | * even though there is a runnable thread.
|
|---|
| 178 | */
|
|---|
| 179 |
|
|---|
| 180 | cpu_sleep();
|
|---|
| 181 | goto loop;
|
|---|
| 182 | }
|
|---|
| 183 |
|
|---|
| 184 | interrupts_disable();
|
|---|
| 185 |
|
|---|
| 186 | for (i = 0; i<RQ_COUNT; i++) {
|
|---|
| 187 | r = &CPU->rq[i];
|
|---|
| 188 | spinlock_lock(&r->lock);
|
|---|
| 189 | if (r->n == 0) {
|
|---|
| 190 | /*
|
|---|
| 191 | * If this queue is empty, try a lower-priority queue.
|
|---|
| 192 | */
|
|---|
| 193 | spinlock_unlock(&r->lock);
|
|---|
| 194 | continue;
|
|---|
| 195 | }
|
|---|
| 196 |
|
|---|
| 197 | atomic_dec(&CPU->nrdy);
|
|---|
| 198 | atomic_dec(&nrdy);
|
|---|
| 199 | r->n--;
|
|---|
| 200 |
|
|---|
| 201 | /*
|
|---|
| 202 | * Take the first thread from the queue.
|
|---|
| 203 | */
|
|---|
| 204 | t = list_get_instance(r->rq_head.next, thread_t, rq_link);
|
|---|
| 205 | list_remove(&t->rq_link);
|
|---|
| 206 |
|
|---|
| 207 | spinlock_unlock(&r->lock);
|
|---|
| 208 |
|
|---|
| 209 | spinlock_lock(&t->lock);
|
|---|
| 210 | t->cpu = CPU;
|
|---|
| 211 |
|
|---|
| 212 | t->ticks = us2ticks((i+1)*10000);
|
|---|
| 213 | t->priority = i; /* correct rq index */
|
|---|
| 214 |
|
|---|
| 215 | /*
|
|---|
| 216 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
|
|---|
| 217 | */
|
|---|
| 218 | t->flags &= ~X_STOLEN;
|
|---|
| 219 | spinlock_unlock(&t->lock);
|
|---|
| 220 |
|
|---|
| 221 | return t;
|
|---|
| 222 | }
|
|---|
| 223 | goto loop;
|
|---|
| 224 |
|
|---|
| 225 | }
|
|---|
| 226 |
|
|---|
| 227 | /** Prevent rq starvation
|
|---|
| 228 | *
|
|---|
| 229 | * Prevent low priority threads from starving in rq's.
|
|---|
| 230 | *
|
|---|
| 231 | * When the function decides to relink rq's, it reconnects
|
|---|
| 232 | * respective pointers so that in result threads with 'pri'
|
|---|
| 233 | * greater or equal 'start' are moved to a higher-priority queue.
|
|---|
| 234 | *
|
|---|
| 235 | * @param start Threshold priority.
|
|---|
| 236 | *
|
|---|
| 237 | */
|
|---|
| 238 | static void relink_rq(int start)
|
|---|
| 239 | {
|
|---|
| 240 | link_t head;
|
|---|
| 241 | runq_t *r;
|
|---|
| 242 | int i, n;
|
|---|
| 243 |
|
|---|
| 244 | list_initialize(&head);
|
|---|
| 245 | spinlock_lock(&CPU->lock);
|
|---|
| 246 | if (CPU->needs_relink > NEEDS_RELINK_MAX) {
|
|---|
| 247 | for (i = start; i<RQ_COUNT-1; i++) {
|
|---|
| 248 | /* remember and empty rq[i + 1] */
|
|---|
| 249 | r = &CPU->rq[i + 1];
|
|---|
| 250 | spinlock_lock(&r->lock);
|
|---|
| 251 | list_concat(&head, &r->rq_head);
|
|---|
| 252 | n = r->n;
|
|---|
| 253 | r->n = 0;
|
|---|
| 254 | spinlock_unlock(&r->lock);
|
|---|
| 255 |
|
|---|
| 256 | /* append rq[i + 1] to rq[i] */
|
|---|
| 257 | r = &CPU->rq[i];
|
|---|
| 258 | spinlock_lock(&r->lock);
|
|---|
| 259 | list_concat(&r->rq_head, &head);
|
|---|
| 260 | r->n += n;
|
|---|
| 261 | spinlock_unlock(&r->lock);
|
|---|
| 262 | }
|
|---|
| 263 | CPU->needs_relink = 0;
|
|---|
| 264 | }
|
|---|
| 265 | spinlock_unlock(&CPU->lock);
|
|---|
| 266 |
|
|---|
| 267 | }
|
|---|
| 268 |
|
|---|
| 269 | /** The scheduler
|
|---|
| 270 | *
|
|---|
| 271 | * The thread scheduling procedure.
|
|---|
| 272 | * Passes control directly to
|
|---|
| 273 | * scheduler_separated_stack().
|
|---|
| 274 | *
|
|---|
| 275 | */
|
|---|
| 276 | void scheduler(void)
|
|---|
| 277 | {
|
|---|
| 278 | volatile ipl_t ipl;
|
|---|
| 279 |
|
|---|
| 280 | ASSERT(CPU != NULL);
|
|---|
| 281 |
|
|---|
| 282 | ipl = interrupts_disable();
|
|---|
| 283 |
|
|---|
| 284 | if (atomic_get(&haltstate))
|
|---|
| 285 | halt();
|
|---|
| 286 |
|
|---|
| 287 | if (THREAD) {
|
|---|
| 288 | spinlock_lock(&THREAD->lock);
|
|---|
| 289 | #ifndef CONFIG_FPU_LAZY
|
|---|
| 290 | fpu_context_save(THREAD->saved_fpu_context);
|
|---|
| 291 | #endif
|
|---|
| 292 | if (!context_save(&THREAD->saved_context)) {
|
|---|
| 293 | /*
|
|---|
| 294 | * This is the place where threads leave scheduler();
|
|---|
| 295 | */
|
|---|
| 296 | spinlock_unlock(&THREAD->lock);
|
|---|
| 297 | interrupts_restore(THREAD->saved_context.ipl);
|
|---|
| 298 |
|
|---|
| 299 | return;
|
|---|
| 300 | }
|
|---|
| 301 |
|
|---|
| 302 | /*
|
|---|
| 303 | * Interrupt priority level of preempted thread is recorded here
|
|---|
| 304 | * to facilitate scheduler() invocations from interrupts_disable()'d
|
|---|
| 305 | * code (e.g. waitq_sleep_timeout()).
|
|---|
| 306 | */
|
|---|
| 307 | THREAD->saved_context.ipl = ipl;
|
|---|
| 308 | }
|
|---|
| 309 |
|
|---|
| 310 | /*
|
|---|
| 311 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
|
|---|
| 312 | * and preemption counter. At this point THE could be coming either
|
|---|
| 313 | * from THREAD's or CPU's stack.
|
|---|
| 314 | */
|
|---|
| 315 | the_copy(THE, (the_t *) CPU->stack);
|
|---|
| 316 |
|
|---|
| 317 | /*
|
|---|
| 318 | * We may not keep the old stack.
|
|---|
| 319 | * Reason: If we kept the old stack and got blocked, for instance, in
|
|---|
| 320 | * find_best_thread(), the old thread could get rescheduled by another
|
|---|
| 321 | * CPU and overwrite the part of its own stack that was also used by
|
|---|
| 322 | * the scheduler on this CPU.
|
|---|
| 323 | *
|
|---|
| 324 | * Moreover, we have to bypass the compiler-generated POP sequence
|
|---|
| 325 | * which is fooled by SP being set to the very top of the stack.
|
|---|
| 326 | * Therefore the scheduler() function continues in
|
|---|
| 327 | * scheduler_separated_stack().
|
|---|
| 328 | */
|
|---|
| 329 | context_save(&CPU->saved_context);
|
|---|
| 330 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
|
|---|
| 331 | context_restore(&CPU->saved_context);
|
|---|
| 332 | /* not reached */
|
|---|
| 333 | }
|
|---|
| 334 |
|
|---|
| 335 | /** Scheduler stack switch wrapper
|
|---|
| 336 | *
|
|---|
| 337 | * Second part of the scheduler() function
|
|---|
| 338 | * using new stack. Handling the actual context
|
|---|
| 339 | * switch to a new thread.
|
|---|
| 340 | *
|
|---|
| 341 | * Assume THREAD->lock is held.
|
|---|
| 342 | */
|
|---|
| 343 | void scheduler_separated_stack(void)
|
|---|
| 344 | {
|
|---|
| 345 | int priority;
|
|---|
| 346 |
|
|---|
| 347 | ASSERT(CPU != NULL);
|
|---|
| 348 |
|
|---|
| 349 | if (THREAD) {
|
|---|
| 350 | /* must be run after the switch to scheduler stack */
|
|---|
| 351 | after_thread_ran();
|
|---|
| 352 |
|
|---|
| 353 | switch (THREAD->state) {
|
|---|
| 354 | case Running:
|
|---|
| 355 | THREAD->state = Ready;
|
|---|
| 356 | spinlock_unlock(&THREAD->lock);
|
|---|
| 357 | thread_ready(THREAD);
|
|---|
| 358 | break;
|
|---|
| 359 |
|
|---|
| 360 | case Exiting:
|
|---|
| 361 | thread_destroy(THREAD);
|
|---|
| 362 | break;
|
|---|
| 363 |
|
|---|
| 364 | case Sleeping:
|
|---|
| 365 | /*
|
|---|
| 366 | * Prefer the thread after it's woken up.
|
|---|
| 367 | */
|
|---|
| 368 | THREAD->priority = -1;
|
|---|
| 369 |
|
|---|
| 370 | /*
|
|---|
| 371 | * We need to release wq->lock which we locked in waitq_sleep().
|
|---|
| 372 | * Address of wq->lock is kept in THREAD->sleep_queue.
|
|---|
| 373 | */
|
|---|
| 374 | spinlock_unlock(&THREAD->sleep_queue->lock);
|
|---|
| 375 |
|
|---|
| 376 | /*
|
|---|
| 377 | * Check for possible requests for out-of-context invocation.
|
|---|
| 378 | */
|
|---|
| 379 | if (THREAD->call_me) {
|
|---|
| 380 | THREAD->call_me(THREAD->call_me_with);
|
|---|
| 381 | THREAD->call_me = NULL;
|
|---|
| 382 | THREAD->call_me_with = NULL;
|
|---|
| 383 | }
|
|---|
| 384 |
|
|---|
| 385 | spinlock_unlock(&THREAD->lock);
|
|---|
| 386 |
|
|---|
| 387 | break;
|
|---|
| 388 |
|
|---|
| 389 | default:
|
|---|
| 390 | /*
|
|---|
| 391 | * Entering state is unexpected.
|
|---|
| 392 | */
|
|---|
| 393 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
|
|---|
| 394 | break;
|
|---|
| 395 | }
|
|---|
| 396 |
|
|---|
| 397 | THREAD = NULL;
|
|---|
| 398 | }
|
|---|
| 399 |
|
|---|
| 400 | THREAD = find_best_thread();
|
|---|
| 401 |
|
|---|
| 402 | spinlock_lock(&THREAD->lock);
|
|---|
| 403 | priority = THREAD->priority;
|
|---|
| 404 | spinlock_unlock(&THREAD->lock);
|
|---|
| 405 |
|
|---|
| 406 | relink_rq(priority);
|
|---|
| 407 |
|
|---|
| 408 | spinlock_lock(&THREAD->lock);
|
|---|
| 409 |
|
|---|
| 410 | /*
|
|---|
| 411 | * If both the old and the new task are the same, lots of work is avoided.
|
|---|
| 412 | */
|
|---|
| 413 | if (TASK != THREAD->task) {
|
|---|
| 414 | as_t *as1 = NULL;
|
|---|
| 415 | as_t *as2;
|
|---|
| 416 |
|
|---|
| 417 | if (TASK) {
|
|---|
| 418 | spinlock_lock(&TASK->lock);
|
|---|
| 419 | as1 = TASK->as;
|
|---|
| 420 | spinlock_unlock(&TASK->lock);
|
|---|
| 421 | }
|
|---|
| 422 |
|
|---|
| 423 | spinlock_lock(&THREAD->task->lock);
|
|---|
| 424 | as2 = THREAD->task->as;
|
|---|
| 425 | spinlock_unlock(&THREAD->task->lock);
|
|---|
| 426 |
|
|---|
| 427 | /*
|
|---|
| 428 | * Note that it is possible for two tasks to share one address space.
|
|---|
| 429 | */
|
|---|
| 430 | if (as1 != as2) {
|
|---|
| 431 | /*
|
|---|
| 432 | * Both tasks and address spaces are different.
|
|---|
| 433 | * Replace the old one with the new one.
|
|---|
| 434 | */
|
|---|
| 435 | as_switch(as1, as2);
|
|---|
| 436 | }
|
|---|
| 437 | TASK = THREAD->task;
|
|---|
| 438 | }
|
|---|
| 439 |
|
|---|
| 440 | THREAD->state = Running;
|
|---|
| 441 |
|
|---|
| 442 | #ifdef SCHEDULER_VERBOSE
|
|---|
| 443 | printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
|
|---|
| 444 | #endif
|
|---|
| 445 |
|
|---|
| 446 | /*
|
|---|
| 447 | * Some architectures provide late kernel PA2KA(identity)
|
|---|
| 448 | * mapping in a page fault handler. However, the page fault
|
|---|
| 449 | * handler uses the kernel stack of the running thread and
|
|---|
| 450 | * therefore cannot be used to map it. The kernel stack, if
|
|---|
| 451 | * necessary, is to be mapped in before_thread_runs(). This
|
|---|
| 452 | * function must be executed before the switch to the new stack.
|
|---|
| 453 | */
|
|---|
| 454 | before_thread_runs();
|
|---|
| 455 |
|
|---|
| 456 | /*
|
|---|
| 457 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
|
|---|
| 458 | */
|
|---|
| 459 | the_copy(THE, (the_t *) THREAD->kstack);
|
|---|
| 460 |
|
|---|
| 461 | context_restore(&THREAD->saved_context);
|
|---|
| 462 | /* not reached */
|
|---|
| 463 | }
|
|---|
| 464 |
|
|---|
| 465 | #ifdef CONFIG_SMP
|
|---|
| 466 | /** Load balancing thread
|
|---|
| 467 | *
|
|---|
| 468 | * SMP load balancing thread, supervising thread supplies
|
|---|
| 469 | * for the CPU it's wired to.
|
|---|
| 470 | *
|
|---|
| 471 | * @param arg Generic thread argument (unused).
|
|---|
| 472 | *
|
|---|
| 473 | */
|
|---|
| 474 | void kcpulb(void *arg)
|
|---|
| 475 | {
|
|---|
| 476 | thread_t *t;
|
|---|
| 477 | int count, average, i, j, k = 0;
|
|---|
| 478 | ipl_t ipl;
|
|---|
| 479 |
|
|---|
| 480 | loop:
|
|---|
| 481 | /*
|
|---|
| 482 | * Work in 1s intervals.
|
|---|
| 483 | */
|
|---|
| 484 | thread_sleep(1);
|
|---|
| 485 |
|
|---|
| 486 | not_satisfied:
|
|---|
| 487 | /*
|
|---|
| 488 | * Calculate the number of threads that will be migrated/stolen from
|
|---|
| 489 | * other CPU's. Note that situation can have changed between two
|
|---|
| 490 | * passes. Each time get the most up to date counts.
|
|---|
| 491 | */
|
|---|
| 492 | average = atomic_get(&nrdy) / config.cpu_active + 1;
|
|---|
| 493 | count = average - atomic_get(&CPU->nrdy);
|
|---|
| 494 |
|
|---|
| 495 | if (count <= 0)
|
|---|
| 496 | goto satisfied;
|
|---|
| 497 |
|
|---|
| 498 | /*
|
|---|
| 499 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
|
|---|
| 500 | */
|
|---|
| 501 | for (j=RQ_COUNT-1; j >= 0; j--) {
|
|---|
| 502 | for (i=0; i < config.cpu_active; i++) {
|
|---|
| 503 | link_t *l;
|
|---|
| 504 | runq_t *r;
|
|---|
| 505 | cpu_t *cpu;
|
|---|
| 506 |
|
|---|
| 507 | cpu = &cpus[(i + k) % config.cpu_active];
|
|---|
| 508 |
|
|---|
| 509 | /*
|
|---|
| 510 | * Not interested in ourselves.
|
|---|
| 511 | * Doesn't require interrupt disabling for kcpulb is X_WIRED.
|
|---|
| 512 | */
|
|---|
| 513 | if (CPU == cpu)
|
|---|
| 514 | continue;
|
|---|
| 515 | if (atomic_get(&cpu->nrdy) <= average)
|
|---|
| 516 | continue;
|
|---|
| 517 |
|
|---|
| 518 | ipl = interrupts_disable();
|
|---|
| 519 | r = &cpu->rq[j];
|
|---|
| 520 | spinlock_lock(&r->lock);
|
|---|
| 521 | if (r->n == 0) {
|
|---|
| 522 | spinlock_unlock(&r->lock);
|
|---|
| 523 | interrupts_restore(ipl);
|
|---|
| 524 | continue;
|
|---|
| 525 | }
|
|---|
| 526 |
|
|---|
| 527 | t = NULL;
|
|---|
| 528 | l = r->rq_head.prev; /* search rq from the back */
|
|---|
| 529 | while (l != &r->rq_head) {
|
|---|
| 530 | t = list_get_instance(l, thread_t, rq_link);
|
|---|
| 531 | /*
|
|---|
| 532 | * We don't want to steal CPU-wired threads neither threads already stolen.
|
|---|
| 533 | * The latter prevents threads from migrating between CPU's without ever being run.
|
|---|
| 534 | * We don't want to steal threads whose FPU context is still in CPU.
|
|---|
| 535 | */
|
|---|
| 536 | spinlock_lock(&t->lock);
|
|---|
| 537 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
|
|---|
| 538 | /*
|
|---|
| 539 | * Remove t from r.
|
|---|
| 540 | */
|
|---|
| 541 | spinlock_unlock(&t->lock);
|
|---|
| 542 |
|
|---|
| 543 | atomic_dec(&cpu->nrdy);
|
|---|
| 544 | atomic_dec(&nrdy);
|
|---|
| 545 |
|
|---|
| 546 | r->n--;
|
|---|
| 547 | list_remove(&t->rq_link);
|
|---|
| 548 |
|
|---|
| 549 | break;
|
|---|
| 550 | }
|
|---|
| 551 | spinlock_unlock(&t->lock);
|
|---|
| 552 | l = l->prev;
|
|---|
| 553 | t = NULL;
|
|---|
| 554 | }
|
|---|
| 555 | spinlock_unlock(&r->lock);
|
|---|
| 556 |
|
|---|
| 557 | if (t) {
|
|---|
| 558 | /*
|
|---|
| 559 | * Ready t on local CPU
|
|---|
| 560 | */
|
|---|
| 561 | spinlock_lock(&t->lock);
|
|---|
| 562 | #ifdef KCPULB_VERBOSE
|
|---|
| 563 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
|
|---|
| 564 | #endif
|
|---|
| 565 | t->flags |= X_STOLEN;
|
|---|
| 566 | spinlock_unlock(&t->lock);
|
|---|
| 567 |
|
|---|
| 568 | thread_ready(t);
|
|---|
| 569 |
|
|---|
| 570 | interrupts_restore(ipl);
|
|---|
| 571 |
|
|---|
| 572 | if (--count == 0)
|
|---|
| 573 | goto satisfied;
|
|---|
| 574 |
|
|---|
| 575 | /*
|
|---|
| 576 | * We are not satisfied yet, focus on another CPU next time.
|
|---|
| 577 | */
|
|---|
| 578 | k++;
|
|---|
| 579 |
|
|---|
| 580 | continue;
|
|---|
| 581 | }
|
|---|
| 582 | interrupts_restore(ipl);
|
|---|
| 583 | }
|
|---|
| 584 | }
|
|---|
| 585 |
|
|---|
| 586 | if (atomic_get(&CPU->nrdy)) {
|
|---|
| 587 | /*
|
|---|
| 588 | * Be a little bit light-weight and let migrated threads run.
|
|---|
| 589 | */
|
|---|
| 590 | scheduler();
|
|---|
| 591 | } else {
|
|---|
| 592 | /*
|
|---|
| 593 | * We failed to migrate a single thread.
|
|---|
| 594 | * Give up this turn.
|
|---|
| 595 | */
|
|---|
| 596 | goto loop;
|
|---|
| 597 | }
|
|---|
| 598 |
|
|---|
| 599 | goto not_satisfied;
|
|---|
| 600 |
|
|---|
| 601 | satisfied:
|
|---|
| 602 | goto loop;
|
|---|
| 603 | }
|
|---|
| 604 |
|
|---|
| 605 | #endif /* CONFIG_SMP */
|
|---|
| 606 |
|
|---|
| 607 |
|
|---|
| 608 | /** Print information about threads & scheduler queues */
|
|---|
| 609 | void sched_print_list(void)
|
|---|
| 610 | {
|
|---|
| 611 | ipl_t ipl;
|
|---|
| 612 | int cpu,i;
|
|---|
| 613 | runq_t *r;
|
|---|
| 614 | thread_t *t;
|
|---|
| 615 | link_t *cur;
|
|---|
| 616 |
|
|---|
| 617 | /* We are going to mess with scheduler structures,
|
|---|
| 618 | * let's not be interrupted */
|
|---|
| 619 | ipl = interrupts_disable();
|
|---|
| 620 | printf("Scheduler dump:\n");
|
|---|
| 621 | for (cpu=0;cpu < config.cpu_count; cpu++) {
|
|---|
| 622 |
|
|---|
| 623 | if (!cpus[cpu].active)
|
|---|
| 624 | continue;
|
|---|
| 625 |
|
|---|
| 626 | spinlock_lock(&cpus[cpu].lock);
|
|---|
| 627 | printf("cpu%d: nrdy: %d, needs_relink: %d\n",
|
|---|
| 628 | cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
|
|---|
| 629 |
|
|---|
| 630 | for (i=0; i<RQ_COUNT; i++) {
|
|---|
| 631 | r = &cpus[cpu].rq[i];
|
|---|
| 632 | spinlock_lock(&r->lock);
|
|---|
| 633 | if (!r->n) {
|
|---|
| 634 | spinlock_unlock(&r->lock);
|
|---|
| 635 | continue;
|
|---|
| 636 | }
|
|---|
| 637 | printf("\trq[%d]: ", i);
|
|---|
| 638 | for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
|
|---|
| 639 | t = list_get_instance(cur, thread_t, rq_link);
|
|---|
| 640 | printf("%d(%s) ", t->tid,
|
|---|
| 641 | thread_states[t->state]);
|
|---|
| 642 | }
|
|---|
| 643 | printf("\n");
|
|---|
| 644 | spinlock_unlock(&r->lock);
|
|---|
| 645 | }
|
|---|
| 646 | spinlock_unlock(&cpus[cpu].lock);
|
|---|
| 647 | }
|
|---|
| 648 |
|
|---|
| 649 | interrupts_restore(ipl);
|
|---|
| 650 | }
|
|---|