[f761f1eb] | 1 | /*
|
---|
| 2 | * Copyright (C) 2001-2004 Jakub Jermar
|
---|
| 3 | * All rights reserved.
|
---|
| 4 | *
|
---|
| 5 | * Redistribution and use in source and binary forms, with or without
|
---|
| 6 | * modification, are permitted provided that the following conditions
|
---|
| 7 | * are met:
|
---|
| 8 | *
|
---|
| 9 | * - Redistributions of source code must retain the above copyright
|
---|
| 10 | * notice, this list of conditions and the following disclaimer.
|
---|
| 11 | * - Redistributions in binary form must reproduce the above copyright
|
---|
| 12 | * notice, this list of conditions and the following disclaimer in the
|
---|
| 13 | * documentation and/or other materials provided with the distribution.
|
---|
| 14 | * - The name of the author may not be used to endorse or promote products
|
---|
| 15 | * derived from this software without specific prior written permission.
|
---|
| 16 | *
|
---|
| 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
---|
| 18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
---|
| 19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
---|
| 20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
---|
| 21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
---|
| 22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
---|
| 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
---|
| 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
| 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
---|
| 26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
| 27 | */
|
---|
| 28 |
|
---|
| 29 | #include <proc/scheduler.h>
|
---|
| 30 | #include <proc/thread.h>
|
---|
| 31 | #include <proc/task.h>
|
---|
[32ff43e6] | 32 | #include <mm/heap.h>
|
---|
| 33 | #include <mm/frame.h>
|
---|
| 34 | #include <mm/page.h>
|
---|
[20d50a1] | 35 | #include <mm/as.h>
|
---|
[32ff43e6] | 36 | #include <arch/asm.h>
|
---|
| 37 | #include <arch/faddr.h>
|
---|
| 38 | #include <arch/atomic.h>
|
---|
| 39 | #include <synch/spinlock.h>
|
---|
[f761f1eb] | 40 | #include <config.h>
|
---|
| 41 | #include <context.h>
|
---|
| 42 | #include <func.h>
|
---|
| 43 | #include <arch.h>
|
---|
| 44 | #include <list.h>
|
---|
[02a99d2] | 45 | #include <panic.h>
|
---|
[f761f1eb] | 46 | #include <typedefs.h>
|
---|
[32ff43e6] | 47 | #include <cpu.h>
|
---|
[9c0a9b3] | 48 | #include <print.h>
|
---|
[623ba26c] | 49 | #include <debug.h>
|
---|
[9c0a9b3] | 50 |
|
---|
[59e07c91] | 51 | atomic_t nrdy;
|
---|
[f761f1eb] | 52 |
|
---|
[b60a22c] | 53 | /** Take actions before new thread runs
|
---|
[70527f1] | 54 | *
|
---|
[b60a22c] | 55 | * Perform actions that need to be
|
---|
| 56 | * taken before the newly selected
|
---|
| 57 | * tread is passed control.
|
---|
[70527f1] | 58 | *
|
---|
| 59 | */
|
---|
[0ca6faa] | 60 | void before_thread_runs(void)
|
---|
| 61 | {
|
---|
[b49f4ae] | 62 | before_thread_runs_arch();
|
---|
[5f85c91] | 63 | #ifdef CONFIG_FPU_LAZY
|
---|
[b49f4ae] | 64 | if(THREAD==CPU->fpu_owner)
|
---|
| 65 | fpu_enable();
|
---|
| 66 | else
|
---|
| 67 | fpu_disable();
|
---|
| 68 | #else
|
---|
| 69 | fpu_enable();
|
---|
| 70 | if (THREAD->fpu_context_exists)
|
---|
| 71 | fpu_context_restore(&(THREAD->saved_fpu_context));
|
---|
| 72 | else {
|
---|
| 73 | fpu_init();
|
---|
| 74 | THREAD->fpu_context_exists=1;
|
---|
| 75 | }
|
---|
| 76 | #endif
|
---|
[0ca6faa] | 77 | }
|
---|
| 78 |
|
---|
[5f85c91] | 79 | #ifdef CONFIG_FPU_LAZY
|
---|
[b49f4ae] | 80 | void scheduler_fpu_lazy_request(void)
|
---|
| 81 | {
|
---|
| 82 | fpu_enable();
|
---|
| 83 | if (CPU->fpu_owner != NULL) {
|
---|
| 84 | fpu_context_save(&CPU->fpu_owner->saved_fpu_context);
|
---|
| 85 | /* don't prevent migration */
|
---|
| 86 | CPU->fpu_owner->fpu_context_engaged=0;
|
---|
| 87 | }
|
---|
| 88 | if (THREAD->fpu_context_exists)
|
---|
| 89 | fpu_context_restore(&THREAD->saved_fpu_context);
|
---|
| 90 | else {
|
---|
| 91 | fpu_init();
|
---|
| 92 | THREAD->fpu_context_exists=1;
|
---|
| 93 | }
|
---|
| 94 | CPU->fpu_owner=THREAD;
|
---|
| 95 | THREAD->fpu_context_engaged = 1;
|
---|
| 96 | }
|
---|
| 97 | #endif
|
---|
[0ca6faa] | 98 |
|
---|
[70527f1] | 99 | /** Initialize scheduler
|
---|
| 100 | *
|
---|
| 101 | * Initialize kernel scheduler.
|
---|
| 102 | *
|
---|
| 103 | */
|
---|
[f761f1eb] | 104 | void scheduler_init(void)
|
---|
| 105 | {
|
---|
| 106 | }
|
---|
| 107 |
|
---|
[70527f1] | 108 |
|
---|
| 109 | /** Get thread to be scheduled
|
---|
| 110 | *
|
---|
| 111 | * Get the optimal thread to be scheduled
|
---|
[d1a184f] | 112 | * according to thread accounting and scheduler
|
---|
[70527f1] | 113 | * policy.
|
---|
| 114 | *
|
---|
| 115 | * @return Thread to be scheduled.
|
---|
| 116 | *
|
---|
| 117 | */
|
---|
[e507afa] | 118 | static thread_t *find_best_thread(void)
|
---|
[f761f1eb] | 119 | {
|
---|
| 120 | thread_t *t;
|
---|
| 121 | runq_t *r;
|
---|
[248fc1a] | 122 | int i;
|
---|
[f761f1eb] | 123 |
|
---|
[623ba26c] | 124 | ASSERT(CPU != NULL);
|
---|
| 125 |
|
---|
[f761f1eb] | 126 | loop:
|
---|
[22f7769] | 127 | interrupts_enable();
|
---|
[f761f1eb] | 128 |
|
---|
[248fc1a] | 129 | if (atomic_get(&CPU->nrdy) == 0) {
|
---|
[f761f1eb] | 130 | /*
|
---|
| 131 | * For there was nothing to run, the CPU goes to sleep
|
---|
| 132 | * until a hardware interrupt or an IPI comes.
|
---|
| 133 | * This improves energy saving and hyperthreading.
|
---|
[444ec64] | 134 | *
|
---|
| 135 | * - we might get an interrupt here that makes some thread runnable,
|
---|
| 136 | * in such a case we must wait for the next quantum to come
|
---|
[f761f1eb] | 137 | */
|
---|
| 138 | cpu_sleep();
|
---|
| 139 | goto loop;
|
---|
| 140 | }
|
---|
| 141 |
|
---|
[22f7769] | 142 | interrupts_disable();
|
---|
[d896525] | 143 |
|
---|
| 144 | i = 0;
|
---|
| 145 | for (; i<RQ_COUNT; i++) {
|
---|
[43114c5] | 146 | r = &CPU->rq[i];
|
---|
[f761f1eb] | 147 | spinlock_lock(&r->lock);
|
---|
| 148 | if (r->n == 0) {
|
---|
| 149 | /*
|
---|
| 150 | * If this queue is empty, try a lower-priority queue.
|
---|
| 151 | */
|
---|
| 152 | spinlock_unlock(&r->lock);
|
---|
| 153 | continue;
|
---|
| 154 | }
|
---|
[3e1607f] | 155 |
|
---|
[248fc1a] | 156 | atomic_dec(&CPU->nrdy);
|
---|
[59e07c91] | 157 | atomic_dec(&nrdy);
|
---|
[f761f1eb] | 158 | r->n--;
|
---|
| 159 |
|
---|
| 160 | /*
|
---|
| 161 | * Take the first thread from the queue.
|
---|
| 162 | */
|
---|
| 163 | t = list_get_instance(r->rq_head.next, thread_t, rq_link);
|
---|
| 164 | list_remove(&t->rq_link);
|
---|
| 165 |
|
---|
| 166 | spinlock_unlock(&r->lock);
|
---|
| 167 |
|
---|
| 168 | spinlock_lock(&t->lock);
|
---|
[43114c5] | 169 | t->cpu = CPU;
|
---|
[f761f1eb] | 170 |
|
---|
| 171 | t->ticks = us2ticks((i+1)*10000);
|
---|
[22f7769] | 172 | t->priority = i; /* eventually correct rq index */
|
---|
[f761f1eb] | 173 |
|
---|
| 174 | /*
|
---|
| 175 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
|
---|
| 176 | */
|
---|
| 177 | t->flags &= ~X_STOLEN;
|
---|
| 178 | spinlock_unlock(&t->lock);
|
---|
| 179 |
|
---|
| 180 | return t;
|
---|
| 181 | }
|
---|
| 182 | goto loop;
|
---|
| 183 |
|
---|
| 184 | }
|
---|
| 185 |
|
---|
[70527f1] | 186 |
|
---|
| 187 | /** Prevent rq starvation
|
---|
| 188 | *
|
---|
| 189 | * Prevent low priority threads from starving in rq's.
|
---|
| 190 | *
|
---|
| 191 | * When the function decides to relink rq's, it reconnects
|
---|
| 192 | * respective pointers so that in result threads with 'pri'
|
---|
| 193 | * greater or equal 'start' are moved to a higher-priority queue.
|
---|
| 194 | *
|
---|
| 195 | * @param start Threshold priority.
|
---|
| 196 | *
|
---|
[f761f1eb] | 197 | */
|
---|
[e16e036a] | 198 | static void relink_rq(int start)
|
---|
[f761f1eb] | 199 | {
|
---|
| 200 | link_t head;
|
---|
| 201 | runq_t *r;
|
---|
| 202 | int i, n;
|
---|
| 203 |
|
---|
| 204 | list_initialize(&head);
|
---|
[43114c5] | 205 | spinlock_lock(&CPU->lock);
|
---|
| 206 | if (CPU->needs_relink > NEEDS_RELINK_MAX) {
|
---|
[f761f1eb] | 207 | for (i = start; i<RQ_COUNT-1; i++) {
|
---|
| 208 | /* remember and empty rq[i + 1] */
|
---|
[43114c5] | 209 | r = &CPU->rq[i + 1];
|
---|
[f761f1eb] | 210 | spinlock_lock(&r->lock);
|
---|
| 211 | list_concat(&head, &r->rq_head);
|
---|
| 212 | n = r->n;
|
---|
| 213 | r->n = 0;
|
---|
| 214 | spinlock_unlock(&r->lock);
|
---|
| 215 |
|
---|
| 216 | /* append rq[i + 1] to rq[i] */
|
---|
[43114c5] | 217 | r = &CPU->rq[i];
|
---|
[f761f1eb] | 218 | spinlock_lock(&r->lock);
|
---|
| 219 | list_concat(&r->rq_head, &head);
|
---|
| 220 | r->n += n;
|
---|
| 221 | spinlock_unlock(&r->lock);
|
---|
| 222 | }
|
---|
[43114c5] | 223 | CPU->needs_relink = 0;
|
---|
[f761f1eb] | 224 | }
|
---|
[444ec64] | 225 | spinlock_unlock(&CPU->lock);
|
---|
[f761f1eb] | 226 |
|
---|
| 227 | }
|
---|
| 228 |
|
---|
[70527f1] | 229 |
|
---|
| 230 | /** Scheduler stack switch wrapper
|
---|
| 231 | *
|
---|
| 232 | * Second part of the scheduler() function
|
---|
| 233 | * using new stack. Handling the actual context
|
---|
| 234 | * switch to a new thread.
|
---|
| 235 | *
|
---|
| 236 | */
|
---|
[e16e036a] | 237 | static void scheduler_separated_stack(void)
|
---|
[f761f1eb] | 238 | {
|
---|
| 239 | int priority;
|
---|
| 240 |
|
---|
[623ba26c] | 241 | ASSERT(CPU != NULL);
|
---|
| 242 |
|
---|
[43114c5] | 243 | if (THREAD) {
|
---|
| 244 | switch (THREAD->state) {
|
---|
[f761f1eb] | 245 | case Running:
|
---|
[76cec1e] | 246 | THREAD->state = Ready;
|
---|
| 247 | spinlock_unlock(&THREAD->lock);
|
---|
| 248 | thread_ready(THREAD);
|
---|
| 249 | break;
|
---|
[f761f1eb] | 250 |
|
---|
| 251 | case Exiting:
|
---|
[76cec1e] | 252 | frame_free((__address) THREAD->kstack);
|
---|
| 253 | if (THREAD->ustack) {
|
---|
| 254 | frame_free((__address) THREAD->ustack);
|
---|
| 255 | }
|
---|
| 256 |
|
---|
| 257 | /*
|
---|
| 258 | * Detach from the containing task.
|
---|
| 259 | */
|
---|
| 260 | spinlock_lock(&TASK->lock);
|
---|
| 261 | list_remove(&THREAD->th_link);
|
---|
| 262 | spinlock_unlock(&TASK->lock);
|
---|
| 263 |
|
---|
| 264 | spinlock_unlock(&THREAD->lock);
|
---|
| 265 |
|
---|
| 266 | spinlock_lock(&threads_lock);
|
---|
| 267 | list_remove(&THREAD->threads_link);
|
---|
| 268 | spinlock_unlock(&threads_lock);
|
---|
| 269 |
|
---|
| 270 | spinlock_lock(&CPU->lock);
|
---|
[75e1db0] | 271 | if(CPU->fpu_owner==THREAD)
|
---|
| 272 | CPU->fpu_owner=NULL;
|
---|
[76cec1e] | 273 | spinlock_unlock(&CPU->lock);
|
---|
| 274 |
|
---|
| 275 | free(THREAD);
|
---|
| 276 |
|
---|
| 277 | break;
|
---|
| 278 |
|
---|
[f761f1eb] | 279 | case Sleeping:
|
---|
[76cec1e] | 280 | /*
|
---|
| 281 | * Prefer the thread after it's woken up.
|
---|
| 282 | */
|
---|
[22f7769] | 283 | THREAD->priority = -1;
|
---|
[76cec1e] | 284 |
|
---|
| 285 | /*
|
---|
| 286 | * We need to release wq->lock which we locked in waitq_sleep().
|
---|
| 287 | * Address of wq->lock is kept in THREAD->sleep_queue.
|
---|
| 288 | */
|
---|
| 289 | spinlock_unlock(&THREAD->sleep_queue->lock);
|
---|
| 290 |
|
---|
| 291 | /*
|
---|
| 292 | * Check for possible requests for out-of-context invocation.
|
---|
| 293 | */
|
---|
| 294 | if (THREAD->call_me) {
|
---|
| 295 | THREAD->call_me(THREAD->call_me_with);
|
---|
| 296 | THREAD->call_me = NULL;
|
---|
| 297 | THREAD->call_me_with = NULL;
|
---|
| 298 | }
|
---|
| 299 |
|
---|
| 300 | spinlock_unlock(&THREAD->lock);
|
---|
| 301 |
|
---|
| 302 | break;
|
---|
[f761f1eb] | 303 |
|
---|
| 304 | default:
|
---|
[76cec1e] | 305 | /*
|
---|
| 306 | * Entering state is unexpected.
|
---|
| 307 | */
|
---|
| 308 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
|
---|
| 309 | break;
|
---|
[f761f1eb] | 310 | }
|
---|
[43114c5] | 311 | THREAD = NULL;
|
---|
[f761f1eb] | 312 | }
|
---|
[ba18512] | 313 |
|
---|
[cd95d784] | 314 |
|
---|
[43114c5] | 315 | THREAD = find_best_thread();
|
---|
[f761f1eb] | 316 |
|
---|
[43114c5] | 317 | spinlock_lock(&THREAD->lock);
|
---|
[22f7769] | 318 | priority = THREAD->priority;
|
---|
[43114c5] | 319 | spinlock_unlock(&THREAD->lock);
|
---|
[7ce9284] | 320 |
|
---|
[f761f1eb] | 321 | relink_rq(priority);
|
---|
| 322 |
|
---|
[43114c5] | 323 | spinlock_lock(&THREAD->lock);
|
---|
[f761f1eb] | 324 |
|
---|
| 325 | /*
|
---|
| 326 | * If both the old and the new task are the same, lots of work is avoided.
|
---|
| 327 | */
|
---|
[43114c5] | 328 | if (TASK != THREAD->task) {
|
---|
[20d50a1] | 329 | as_t *as1 = NULL;
|
---|
| 330 | as_t *as2;
|
---|
[f761f1eb] | 331 |
|
---|
[43114c5] | 332 | if (TASK) {
|
---|
| 333 | spinlock_lock(&TASK->lock);
|
---|
[20d50a1] | 334 | as1 = TASK->as;
|
---|
[43114c5] | 335 | spinlock_unlock(&TASK->lock);
|
---|
[f761f1eb] | 336 | }
|
---|
| 337 |
|
---|
[43114c5] | 338 | spinlock_lock(&THREAD->task->lock);
|
---|
[20d50a1] | 339 | as2 = THREAD->task->as;
|
---|
[43114c5] | 340 | spinlock_unlock(&THREAD->task->lock);
|
---|
[f761f1eb] | 341 |
|
---|
| 342 | /*
|
---|
[20d50a1] | 343 | * Note that it is possible for two tasks to share one address space.
|
---|
[f761f1eb] | 344 | */
|
---|
[20d50a1] | 345 | if (as1 != as2) {
|
---|
[f761f1eb] | 346 | /*
|
---|
[20d50a1] | 347 | * Both tasks and address spaces are different.
|
---|
[f761f1eb] | 348 | * Replace the old one with the new one.
|
---|
| 349 | */
|
---|
[20d50a1] | 350 | as_install(as2);
|
---|
[f761f1eb] | 351 | }
|
---|
[43114c5] | 352 | TASK = THREAD->task;
|
---|
[f761f1eb] | 353 | }
|
---|
| 354 |
|
---|
[43114c5] | 355 | THREAD->state = Running;
|
---|
[f761f1eb] | 356 |
|
---|
| 357 | #ifdef SCHEDULER_VERBOSE
|
---|
[22f7769] | 358 | printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, CPU->nrdy);
|
---|
[f761f1eb] | 359 | #endif
|
---|
| 360 |
|
---|
[3e1607f] | 361 | /*
|
---|
| 362 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
|
---|
| 363 | */
|
---|
[bcdd9aa] | 364 | the_copy(THE, (the_t *) THREAD->kstack);
|
---|
| 365 |
|
---|
[43114c5] | 366 | context_restore(&THREAD->saved_context);
|
---|
[f761f1eb] | 367 | /* not reached */
|
---|
| 368 | }
|
---|
| 369 |
|
---|
[70527f1] | 370 |
|
---|
[e16e036a] | 371 | /** The scheduler
|
---|
| 372 | *
|
---|
| 373 | * The thread scheduling procedure.
|
---|
[5fe5f1e] | 374 | * Passes control directly to
|
---|
| 375 | * scheduler_separated_stack().
|
---|
[e16e036a] | 376 | *
|
---|
| 377 | */
|
---|
| 378 | void scheduler(void)
|
---|
| 379 | {
|
---|
| 380 | volatile ipl_t ipl;
|
---|
| 381 |
|
---|
| 382 | ASSERT(CPU != NULL);
|
---|
| 383 |
|
---|
| 384 | ipl = interrupts_disable();
|
---|
| 385 |
|
---|
[36e7ee98] | 386 | if (atomic_get(&haltstate))
|
---|
[e16e036a] | 387 | halt();
|
---|
| 388 |
|
---|
| 389 | if (THREAD) {
|
---|
| 390 | spinlock_lock(&THREAD->lock);
|
---|
[5f85c91] | 391 | #ifndef CONFIG_FPU_LAZY
|
---|
[e16e036a] | 392 | fpu_context_save(&(THREAD->saved_fpu_context));
|
---|
| 393 | #endif
|
---|
| 394 | if (!context_save(&THREAD->saved_context)) {
|
---|
| 395 | /*
|
---|
| 396 | * This is the place where threads leave scheduler();
|
---|
| 397 | */
|
---|
| 398 | before_thread_runs();
|
---|
| 399 | spinlock_unlock(&THREAD->lock);
|
---|
| 400 | interrupts_restore(THREAD->saved_context.ipl);
|
---|
| 401 | return;
|
---|
| 402 | }
|
---|
| 403 |
|
---|
| 404 | /*
|
---|
| 405 | * Interrupt priority level of preempted thread is recorded here
|
---|
| 406 | * to facilitate scheduler() invocations from interrupts_disable()'d
|
---|
| 407 | * code (e.g. waitq_sleep_timeout()).
|
---|
| 408 | */
|
---|
| 409 | THREAD->saved_context.ipl = ipl;
|
---|
| 410 | }
|
---|
| 411 |
|
---|
| 412 | /*
|
---|
[05e2a7ad] | 413 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
|
---|
[e16e036a] | 414 | * and preemption counter. At this point THE could be coming either
|
---|
| 415 | * from THREAD's or CPU's stack.
|
---|
| 416 | */
|
---|
| 417 | the_copy(THE, (the_t *) CPU->stack);
|
---|
| 418 |
|
---|
| 419 | /*
|
---|
| 420 | * We may not keep the old stack.
|
---|
| 421 | * Reason: If we kept the old stack and got blocked, for instance, in
|
---|
| 422 | * find_best_thread(), the old thread could get rescheduled by another
|
---|
| 423 | * CPU and overwrite the part of its own stack that was also used by
|
---|
| 424 | * the scheduler on this CPU.
|
---|
| 425 | *
|
---|
| 426 | * Moreover, we have to bypass the compiler-generated POP sequence
|
---|
| 427 | * which is fooled by SP being set to the very top of the stack.
|
---|
| 428 | * Therefore the scheduler() function continues in
|
---|
| 429 | * scheduler_separated_stack().
|
---|
| 430 | */
|
---|
| 431 | context_save(&CPU->saved_context);
|
---|
| 432 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
|
---|
| 433 | context_restore(&CPU->saved_context);
|
---|
| 434 | /* not reached */
|
---|
| 435 | }
|
---|
| 436 |
|
---|
| 437 |
|
---|
| 438 |
|
---|
| 439 |
|
---|
| 440 |
|
---|
[5f85c91] | 441 | #ifdef CONFIG_SMP
|
---|
[70527f1] | 442 | /** Load balancing thread
|
---|
| 443 | *
|
---|
| 444 | * SMP load balancing thread, supervising thread supplies
|
---|
| 445 | * for the CPU it's wired to.
|
---|
| 446 | *
|
---|
| 447 | * @param arg Generic thread argument (unused).
|
---|
| 448 | *
|
---|
[f761f1eb] | 449 | */
|
---|
| 450 | void kcpulb(void *arg)
|
---|
| 451 | {
|
---|
| 452 | thread_t *t;
|
---|
[248fc1a] | 453 | int count, average, i, j, k = 0;
|
---|
[22f7769] | 454 | ipl_t ipl;
|
---|
[f761f1eb] | 455 |
|
---|
| 456 | loop:
|
---|
| 457 | /*
|
---|
[3260ada] | 458 | * Work in 1s intervals.
|
---|
[f761f1eb] | 459 | */
|
---|
[3260ada] | 460 | thread_sleep(1);
|
---|
[f761f1eb] | 461 |
|
---|
| 462 | not_satisfied:
|
---|
| 463 | /*
|
---|
| 464 | * Calculate the number of threads that will be migrated/stolen from
|
---|
| 465 | * other CPU's. Note that situation can have changed between two
|
---|
| 466 | * passes. Each time get the most up to date counts.
|
---|
| 467 | */
|
---|
[444ec64] | 468 | average = atomic_get(&nrdy) / config.cpu_active + 1;
|
---|
[248fc1a] | 469 | count = average - atomic_get(&CPU->nrdy);
|
---|
[f761f1eb] | 470 |
|
---|
[444ec64] | 471 | if (count <= 0)
|
---|
[f761f1eb] | 472 | goto satisfied;
|
---|
| 473 |
|
---|
| 474 | /*
|
---|
| 475 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
|
---|
| 476 | */
|
---|
| 477 | for (j=RQ_COUNT-1; j >= 0; j--) {
|
---|
| 478 | for (i=0; i < config.cpu_active; i++) {
|
---|
| 479 | link_t *l;
|
---|
| 480 | runq_t *r;
|
---|
| 481 | cpu_t *cpu;
|
---|
| 482 |
|
---|
| 483 | cpu = &cpus[(i + k) % config.cpu_active];
|
---|
| 484 |
|
---|
| 485 | /*
|
---|
| 486 | * Not interested in ourselves.
|
---|
| 487 | * Doesn't require interrupt disabling for kcpulb is X_WIRED.
|
---|
| 488 | */
|
---|
[43114c5] | 489 | if (CPU == cpu)
|
---|
[248fc1a] | 490 | continue;
|
---|
| 491 | if (atomic_get(&cpu->nrdy) <= average)
|
---|
| 492 | continue;
|
---|
[f761f1eb] | 493 |
|
---|
[444ec64] | 494 | ipl = interrupts_disable();
|
---|
[18e0a6c] | 495 | r = &cpu->rq[j];
|
---|
[f761f1eb] | 496 | spinlock_lock(&r->lock);
|
---|
| 497 | if (r->n == 0) {
|
---|
| 498 | spinlock_unlock(&r->lock);
|
---|
[22f7769] | 499 | interrupts_restore(ipl);
|
---|
[f761f1eb] | 500 | continue;
|
---|
| 501 | }
|
---|
| 502 |
|
---|
| 503 | t = NULL;
|
---|
| 504 | l = r->rq_head.prev; /* search rq from the back */
|
---|
| 505 | while (l != &r->rq_head) {
|
---|
| 506 | t = list_get_instance(l, thread_t, rq_link);
|
---|
| 507 | /*
|
---|
[76cec1e] | 508 | * We don't want to steal CPU-wired threads neither threads already stolen.
|
---|
[f761f1eb] | 509 | * The latter prevents threads from migrating between CPU's without ever being run.
|
---|
[76cec1e] | 510 | * We don't want to steal threads whose FPU context is still in CPU.
|
---|
[6a27d63] | 511 | */
|
---|
[f761f1eb] | 512 | spinlock_lock(&t->lock);
|
---|
[6a27d63] | 513 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
|
---|
[f761f1eb] | 514 | /*
|
---|
| 515 | * Remove t from r.
|
---|
| 516 | */
|
---|
| 517 | spinlock_unlock(&t->lock);
|
---|
| 518 |
|
---|
[248fc1a] | 519 | atomic_dec(&cpu->nrdy);
|
---|
[59e07c91] | 520 | atomic_dec(&nrdy);
|
---|
[f761f1eb] | 521 |
|
---|
[76cec1e] | 522 | r->n--;
|
---|
[f761f1eb] | 523 | list_remove(&t->rq_link);
|
---|
| 524 |
|
---|
| 525 | break;
|
---|
| 526 | }
|
---|
| 527 | spinlock_unlock(&t->lock);
|
---|
| 528 | l = l->prev;
|
---|
| 529 | t = NULL;
|
---|
| 530 | }
|
---|
| 531 | spinlock_unlock(&r->lock);
|
---|
| 532 |
|
---|
| 533 | if (t) {
|
---|
| 534 | /*
|
---|
| 535 | * Ready t on local CPU
|
---|
| 536 | */
|
---|
| 537 | spinlock_lock(&t->lock);
|
---|
| 538 | #ifdef KCPULB_VERBOSE
|
---|
[248fc1a] | 539 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
|
---|
[f761f1eb] | 540 | #endif
|
---|
| 541 | t->flags |= X_STOLEN;
|
---|
| 542 | spinlock_unlock(&t->lock);
|
---|
| 543 |
|
---|
| 544 | thread_ready(t);
|
---|
| 545 |
|
---|
[22f7769] | 546 | interrupts_restore(ipl);
|
---|
[f761f1eb] | 547 |
|
---|
| 548 | if (--count == 0)
|
---|
| 549 | goto satisfied;
|
---|
| 550 |
|
---|
| 551 | /*
|
---|
[76cec1e] | 552 | * We are not satisfied yet, focus on another CPU next time.
|
---|
[f761f1eb] | 553 | */
|
---|
| 554 | k++;
|
---|
| 555 |
|
---|
| 556 | continue;
|
---|
| 557 | }
|
---|
[22f7769] | 558 | interrupts_restore(ipl);
|
---|
[f761f1eb] | 559 | }
|
---|
| 560 | }
|
---|
| 561 |
|
---|
[248fc1a] | 562 | if (atomic_get(&CPU->nrdy)) {
|
---|
[f761f1eb] | 563 | /*
|
---|
| 564 | * Be a little bit light-weight and let migrated threads run.
|
---|
| 565 | */
|
---|
| 566 | scheduler();
|
---|
[3260ada] | 567 | } else {
|
---|
[f761f1eb] | 568 | /*
|
---|
| 569 | * We failed to migrate a single thread.
|
---|
[3260ada] | 570 | * Give up this turn.
|
---|
[f761f1eb] | 571 | */
|
---|
[3260ada] | 572 | goto loop;
|
---|
[f761f1eb] | 573 | }
|
---|
| 574 |
|
---|
| 575 | goto not_satisfied;
|
---|
[76cec1e] | 576 |
|
---|
[f761f1eb] | 577 | satisfied:
|
---|
| 578 | goto loop;
|
---|
| 579 | }
|
---|
| 580 |
|
---|
[5f85c91] | 581 | #endif /* CONFIG_SMP */
|
---|
[10e16a7] | 582 |
|
---|
| 583 |
|
---|
| 584 | /** Print information about threads & scheduler queues */
|
---|
| 585 | void sched_print_list(void)
|
---|
| 586 | {
|
---|
| 587 | ipl_t ipl;
|
---|
| 588 | int cpu,i;
|
---|
| 589 | runq_t *r;
|
---|
| 590 | thread_t *t;
|
---|
| 591 | link_t *cur;
|
---|
| 592 |
|
---|
| 593 | /* We are going to mess with scheduler structures,
|
---|
| 594 | * let's not be interrupted */
|
---|
| 595 | ipl = interrupts_disable();
|
---|
| 596 | printf("*********** Scheduler dump ***********\n");
|
---|
| 597 | for (cpu=0;cpu < config.cpu_count; cpu++) {
|
---|
| 598 | if (!cpus[cpu].active)
|
---|
| 599 | continue;
|
---|
| 600 | spinlock_lock(&cpus[cpu].lock);
|
---|
| 601 | printf("cpu%d: nrdy: %d needs_relink: %d\n",
|
---|
[248fc1a] | 602 | cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
|
---|
[10e16a7] | 603 |
|
---|
| 604 | for (i=0; i<RQ_COUNT; i++) {
|
---|
| 605 | r = &cpus[cpu].rq[i];
|
---|
| 606 | spinlock_lock(&r->lock);
|
---|
| 607 | if (!r->n) {
|
---|
| 608 | spinlock_unlock(&r->lock);
|
---|
| 609 | continue;
|
---|
| 610 | }
|
---|
[3260ada] | 611 | printf("\tRq %d: ", i);
|
---|
[10e16a7] | 612 | for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
|
---|
| 613 | t = list_get_instance(cur, thread_t, rq_link);
|
---|
| 614 | printf("%d(%s) ", t->tid,
|
---|
| 615 | thread_states[t->state]);
|
---|
| 616 | }
|
---|
| 617 | printf("\n");
|
---|
| 618 | spinlock_unlock(&r->lock);
|
---|
| 619 | }
|
---|
| 620 | spinlock_unlock(&cpus[cpu].lock);
|
---|
| 621 | }
|
---|
| 622 |
|
---|
| 623 | interrupts_restore(ipl);
|
---|
| 624 | }
|
---|