[f761f1eb] | 1 | /*
|
---|
| 2 | * Copyright (C) 2001-2004 Jakub Jermar
|
---|
| 3 | * All rights reserved.
|
---|
| 4 | *
|
---|
| 5 | * Redistribution and use in source and binary forms, with or without
|
---|
| 6 | * modification, are permitted provided that the following conditions
|
---|
| 7 | * are met:
|
---|
| 8 | *
|
---|
| 9 | * - Redistributions of source code must retain the above copyright
|
---|
| 10 | * notice, this list of conditions and the following disclaimer.
|
---|
| 11 | * - Redistributions in binary form must reproduce the above copyright
|
---|
| 12 | * notice, this list of conditions and the following disclaimer in the
|
---|
| 13 | * documentation and/or other materials provided with the distribution.
|
---|
| 14 | * - The name of the author may not be used to endorse or promote products
|
---|
| 15 | * derived from this software without specific prior written permission.
|
---|
| 16 | *
|
---|
| 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
---|
| 18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
---|
| 19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
---|
| 20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
---|
| 21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
---|
| 22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
---|
| 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
---|
| 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
| 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
---|
| 26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
| 27 | */
|
---|
| 28 |
|
---|
| 29 | #include <proc/scheduler.h>
|
---|
| 30 | #include <proc/thread.h>
|
---|
| 31 | #include <proc/task.h>
|
---|
[32ff43e6] | 32 | #include <mm/frame.h>
|
---|
| 33 | #include <mm/page.h>
|
---|
[20d50a1] | 34 | #include <mm/as.h>
|
---|
[32ff43e6] | 35 | #include <arch/asm.h>
|
---|
| 36 | #include <arch/faddr.h>
|
---|
| 37 | #include <arch/atomic.h>
|
---|
| 38 | #include <synch/spinlock.h>
|
---|
[f761f1eb] | 39 | #include <config.h>
|
---|
| 40 | #include <context.h>
|
---|
| 41 | #include <func.h>
|
---|
| 42 | #include <arch.h>
|
---|
[5c9a08b] | 43 | #include <adt/list.h>
|
---|
[02a99d2] | 44 | #include <panic.h>
|
---|
[f761f1eb] | 45 | #include <typedefs.h>
|
---|
[32ff43e6] | 46 | #include <cpu.h>
|
---|
[9c0a9b3] | 47 | #include <print.h>
|
---|
[623ba26c] | 48 | #include <debug.h>
|
---|
[9c0a9b3] | 49 |
|
---|
[59e07c91] | 50 | atomic_t nrdy;
|
---|
[f761f1eb] | 51 |
|
---|
[b60a22c] | 52 | /** Take actions before new thread runs
|
---|
[70527f1] | 53 | *
|
---|
[b60a22c] | 54 | * Perform actions that need to be
|
---|
| 55 | * taken before the newly selected
|
---|
| 56 | * tread is passed control.
|
---|
[70527f1] | 57 | *
|
---|
[a3eeceb6] | 58 | * THREAD->lock is locked on entry
|
---|
| 59 | *
|
---|
[70527f1] | 60 | */
|
---|
[0ca6faa] | 61 | void before_thread_runs(void)
|
---|
| 62 | {
|
---|
[b49f4ae] | 63 | before_thread_runs_arch();
|
---|
[5f85c91] | 64 | #ifdef CONFIG_FPU_LAZY
|
---|
[b49f4ae] | 65 | if(THREAD==CPU->fpu_owner)
|
---|
| 66 | fpu_enable();
|
---|
| 67 | else
|
---|
| 68 | fpu_disable();
|
---|
| 69 | #else
|
---|
| 70 | fpu_enable();
|
---|
| 71 | if (THREAD->fpu_context_exists)
|
---|
| 72 | fpu_context_restore(&(THREAD->saved_fpu_context));
|
---|
| 73 | else {
|
---|
[a3eeceb6] | 74 | fpu_init(&(THREAD->saved_fpu_context));
|
---|
[b49f4ae] | 75 | THREAD->fpu_context_exists=1;
|
---|
| 76 | }
|
---|
| 77 | #endif
|
---|
[0ca6faa] | 78 | }
|
---|
| 79 |
|
---|
[5f85c91] | 80 | #ifdef CONFIG_FPU_LAZY
|
---|
[b49f4ae] | 81 | void scheduler_fpu_lazy_request(void)
|
---|
| 82 | {
|
---|
| 83 | fpu_enable();
|
---|
[a3eeceb6] | 84 | spinlock_lock(&CPU->lock);
|
---|
| 85 |
|
---|
| 86 | /* Save old context */
|
---|
[b49f4ae] | 87 | if (CPU->fpu_owner != NULL) {
|
---|
[a3eeceb6] | 88 | spinlock_lock(&CPU->fpu_owner->lock);
|
---|
[b49f4ae] | 89 | fpu_context_save(&CPU->fpu_owner->saved_fpu_context);
|
---|
| 90 | /* don't prevent migration */
|
---|
| 91 | CPU->fpu_owner->fpu_context_engaged=0;
|
---|
[a3eeceb6] | 92 | spinlock_unlock(&CPU->fpu_owner->lock);
|
---|
[b49f4ae] | 93 | }
|
---|
[a3eeceb6] | 94 |
|
---|
| 95 | spinlock_lock(&THREAD->lock);
|
---|
[b49f4ae] | 96 | if (THREAD->fpu_context_exists)
|
---|
| 97 | fpu_context_restore(&THREAD->saved_fpu_context);
|
---|
| 98 | else {
|
---|
[a3eeceb6] | 99 | fpu_init(&(THREAD->saved_fpu_context));
|
---|
[b49f4ae] | 100 | THREAD->fpu_context_exists=1;
|
---|
| 101 | }
|
---|
| 102 | CPU->fpu_owner=THREAD;
|
---|
| 103 | THREAD->fpu_context_engaged = 1;
|
---|
[a3eeceb6] | 104 |
|
---|
| 105 | spinlock_unlock(&THREAD->lock);
|
---|
| 106 | spinlock_unlock(&CPU->lock);
|
---|
[b49f4ae] | 107 | }
|
---|
| 108 | #endif
|
---|
[0ca6faa] | 109 |
|
---|
[70527f1] | 110 | /** Initialize scheduler
|
---|
| 111 | *
|
---|
| 112 | * Initialize kernel scheduler.
|
---|
| 113 | *
|
---|
| 114 | */
|
---|
[f761f1eb] | 115 | void scheduler_init(void)
|
---|
| 116 | {
|
---|
| 117 | }
|
---|
| 118 |
|
---|
[70527f1] | 119 |
|
---|
| 120 | /** Get thread to be scheduled
|
---|
| 121 | *
|
---|
| 122 | * Get the optimal thread to be scheduled
|
---|
[d1a184f] | 123 | * according to thread accounting and scheduler
|
---|
[70527f1] | 124 | * policy.
|
---|
| 125 | *
|
---|
| 126 | * @return Thread to be scheduled.
|
---|
| 127 | *
|
---|
| 128 | */
|
---|
[e507afa] | 129 | static thread_t *find_best_thread(void)
|
---|
[f761f1eb] | 130 | {
|
---|
| 131 | thread_t *t;
|
---|
| 132 | runq_t *r;
|
---|
[248fc1a] | 133 | int i;
|
---|
[f761f1eb] | 134 |
|
---|
[623ba26c] | 135 | ASSERT(CPU != NULL);
|
---|
| 136 |
|
---|
[f761f1eb] | 137 | loop:
|
---|
[22f7769] | 138 | interrupts_enable();
|
---|
[f761f1eb] | 139 |
|
---|
[248fc1a] | 140 | if (atomic_get(&CPU->nrdy) == 0) {
|
---|
[f761f1eb] | 141 | /*
|
---|
| 142 | * For there was nothing to run, the CPU goes to sleep
|
---|
| 143 | * until a hardware interrupt or an IPI comes.
|
---|
| 144 | * This improves energy saving and hyperthreading.
|
---|
| 145 | */
|
---|
[328e0d3] | 146 |
|
---|
| 147 | /*
|
---|
| 148 | * An interrupt might occur right now and wake up a thread.
|
---|
| 149 | * In such case, the CPU will continue to go to sleep
|
---|
| 150 | * even though there is a runnable thread.
|
---|
| 151 | */
|
---|
| 152 |
|
---|
[f761f1eb] | 153 | cpu_sleep();
|
---|
| 154 | goto loop;
|
---|
| 155 | }
|
---|
| 156 |
|
---|
[22f7769] | 157 | interrupts_disable();
|
---|
[d896525] | 158 |
|
---|
| 159 | i = 0;
|
---|
| 160 | for (; i<RQ_COUNT; i++) {
|
---|
[43114c5] | 161 | r = &CPU->rq[i];
|
---|
[f761f1eb] | 162 | spinlock_lock(&r->lock);
|
---|
| 163 | if (r->n == 0) {
|
---|
| 164 | /*
|
---|
| 165 | * If this queue is empty, try a lower-priority queue.
|
---|
| 166 | */
|
---|
| 167 | spinlock_unlock(&r->lock);
|
---|
| 168 | continue;
|
---|
| 169 | }
|
---|
[3e1607f] | 170 |
|
---|
[248fc1a] | 171 | atomic_dec(&CPU->nrdy);
|
---|
[59e07c91] | 172 | atomic_dec(&nrdy);
|
---|
[f761f1eb] | 173 | r->n--;
|
---|
| 174 |
|
---|
| 175 | /*
|
---|
| 176 | * Take the first thread from the queue.
|
---|
| 177 | */
|
---|
| 178 | t = list_get_instance(r->rq_head.next, thread_t, rq_link);
|
---|
| 179 | list_remove(&t->rq_link);
|
---|
| 180 |
|
---|
| 181 | spinlock_unlock(&r->lock);
|
---|
| 182 |
|
---|
| 183 | spinlock_lock(&t->lock);
|
---|
[43114c5] | 184 | t->cpu = CPU;
|
---|
[f761f1eb] | 185 |
|
---|
| 186 | t->ticks = us2ticks((i+1)*10000);
|
---|
[22f7769] | 187 | t->priority = i; /* eventually correct rq index */
|
---|
[f761f1eb] | 188 |
|
---|
| 189 | /*
|
---|
| 190 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
|
---|
| 191 | */
|
---|
| 192 | t->flags &= ~X_STOLEN;
|
---|
| 193 | spinlock_unlock(&t->lock);
|
---|
| 194 |
|
---|
| 195 | return t;
|
---|
| 196 | }
|
---|
| 197 | goto loop;
|
---|
| 198 |
|
---|
| 199 | }
|
---|
| 200 |
|
---|
[70527f1] | 201 |
|
---|
| 202 | /** Prevent rq starvation
|
---|
| 203 | *
|
---|
| 204 | * Prevent low priority threads from starving in rq's.
|
---|
| 205 | *
|
---|
| 206 | * When the function decides to relink rq's, it reconnects
|
---|
| 207 | * respective pointers so that in result threads with 'pri'
|
---|
| 208 | * greater or equal 'start' are moved to a higher-priority queue.
|
---|
| 209 | *
|
---|
| 210 | * @param start Threshold priority.
|
---|
| 211 | *
|
---|
[f761f1eb] | 212 | */
|
---|
[e16e036a] | 213 | static void relink_rq(int start)
|
---|
[f761f1eb] | 214 | {
|
---|
| 215 | link_t head;
|
---|
| 216 | runq_t *r;
|
---|
| 217 | int i, n;
|
---|
| 218 |
|
---|
| 219 | list_initialize(&head);
|
---|
[43114c5] | 220 | spinlock_lock(&CPU->lock);
|
---|
| 221 | if (CPU->needs_relink > NEEDS_RELINK_MAX) {
|
---|
[f761f1eb] | 222 | for (i = start; i<RQ_COUNT-1; i++) {
|
---|
| 223 | /* remember and empty rq[i + 1] */
|
---|
[43114c5] | 224 | r = &CPU->rq[i + 1];
|
---|
[f761f1eb] | 225 | spinlock_lock(&r->lock);
|
---|
| 226 | list_concat(&head, &r->rq_head);
|
---|
| 227 | n = r->n;
|
---|
| 228 | r->n = 0;
|
---|
| 229 | spinlock_unlock(&r->lock);
|
---|
| 230 |
|
---|
| 231 | /* append rq[i + 1] to rq[i] */
|
---|
[43114c5] | 232 | r = &CPU->rq[i];
|
---|
[f761f1eb] | 233 | spinlock_lock(&r->lock);
|
---|
| 234 | list_concat(&r->rq_head, &head);
|
---|
| 235 | r->n += n;
|
---|
| 236 | spinlock_unlock(&r->lock);
|
---|
| 237 | }
|
---|
[43114c5] | 238 | CPU->needs_relink = 0;
|
---|
[f761f1eb] | 239 | }
|
---|
[444ec64] | 240 | spinlock_unlock(&CPU->lock);
|
---|
[f761f1eb] | 241 |
|
---|
| 242 | }
|
---|
| 243 |
|
---|
[70527f1] | 244 |
|
---|
| 245 | /** Scheduler stack switch wrapper
|
---|
| 246 | *
|
---|
| 247 | * Second part of the scheduler() function
|
---|
| 248 | * using new stack. Handling the actual context
|
---|
| 249 | * switch to a new thread.
|
---|
| 250 | *
|
---|
[266294a9] | 251 | * Assume THREAD->lock is held.
|
---|
[70527f1] | 252 | */
|
---|
[e16e036a] | 253 | static void scheduler_separated_stack(void)
|
---|
[f761f1eb] | 254 | {
|
---|
| 255 | int priority;
|
---|
| 256 |
|
---|
[623ba26c] | 257 | ASSERT(CPU != NULL);
|
---|
| 258 |
|
---|
[43114c5] | 259 | if (THREAD) {
|
---|
| 260 | switch (THREAD->state) {
|
---|
[f761f1eb] | 261 | case Running:
|
---|
[76cec1e] | 262 | THREAD->state = Ready;
|
---|
| 263 | spinlock_unlock(&THREAD->lock);
|
---|
| 264 | thread_ready(THREAD);
|
---|
| 265 | break;
|
---|
[f761f1eb] | 266 |
|
---|
| 267 | case Exiting:
|
---|
[266294a9] | 268 | thread_destroy(THREAD);
|
---|
[76cec1e] | 269 | break;
|
---|
[266294a9] | 270 |
|
---|
[f761f1eb] | 271 | case Sleeping:
|
---|
[76cec1e] | 272 | /*
|
---|
| 273 | * Prefer the thread after it's woken up.
|
---|
| 274 | */
|
---|
[22f7769] | 275 | THREAD->priority = -1;
|
---|
[76cec1e] | 276 |
|
---|
| 277 | /*
|
---|
| 278 | * We need to release wq->lock which we locked in waitq_sleep().
|
---|
| 279 | * Address of wq->lock is kept in THREAD->sleep_queue.
|
---|
| 280 | */
|
---|
| 281 | spinlock_unlock(&THREAD->sleep_queue->lock);
|
---|
| 282 |
|
---|
| 283 | /*
|
---|
| 284 | * Check for possible requests for out-of-context invocation.
|
---|
| 285 | */
|
---|
| 286 | if (THREAD->call_me) {
|
---|
| 287 | THREAD->call_me(THREAD->call_me_with);
|
---|
| 288 | THREAD->call_me = NULL;
|
---|
| 289 | THREAD->call_me_with = NULL;
|
---|
| 290 | }
|
---|
| 291 |
|
---|
| 292 | spinlock_unlock(&THREAD->lock);
|
---|
| 293 |
|
---|
| 294 | break;
|
---|
[f761f1eb] | 295 |
|
---|
| 296 | default:
|
---|
[76cec1e] | 297 | /*
|
---|
| 298 | * Entering state is unexpected.
|
---|
| 299 | */
|
---|
| 300 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
|
---|
| 301 | break;
|
---|
[f761f1eb] | 302 | }
|
---|
[43114c5] | 303 | THREAD = NULL;
|
---|
[f761f1eb] | 304 | }
|
---|
[ba18512] | 305 |
|
---|
[cd95d784] | 306 |
|
---|
[43114c5] | 307 | THREAD = find_best_thread();
|
---|
[f761f1eb] | 308 |
|
---|
[43114c5] | 309 | spinlock_lock(&THREAD->lock);
|
---|
[22f7769] | 310 | priority = THREAD->priority;
|
---|
[43114c5] | 311 | spinlock_unlock(&THREAD->lock);
|
---|
[7ce9284] | 312 |
|
---|
[f761f1eb] | 313 | relink_rq(priority);
|
---|
| 314 |
|
---|
[43114c5] | 315 | spinlock_lock(&THREAD->lock);
|
---|
[f761f1eb] | 316 |
|
---|
| 317 | /*
|
---|
| 318 | * If both the old and the new task are the same, lots of work is avoided.
|
---|
| 319 | */
|
---|
[43114c5] | 320 | if (TASK != THREAD->task) {
|
---|
[20d50a1] | 321 | as_t *as1 = NULL;
|
---|
| 322 | as_t *as2;
|
---|
[f761f1eb] | 323 |
|
---|
[43114c5] | 324 | if (TASK) {
|
---|
| 325 | spinlock_lock(&TASK->lock);
|
---|
[20d50a1] | 326 | as1 = TASK->as;
|
---|
[43114c5] | 327 | spinlock_unlock(&TASK->lock);
|
---|
[f761f1eb] | 328 | }
|
---|
| 329 |
|
---|
[43114c5] | 330 | spinlock_lock(&THREAD->task->lock);
|
---|
[20d50a1] | 331 | as2 = THREAD->task->as;
|
---|
[43114c5] | 332 | spinlock_unlock(&THREAD->task->lock);
|
---|
[f761f1eb] | 333 |
|
---|
| 334 | /*
|
---|
[20d50a1] | 335 | * Note that it is possible for two tasks to share one address space.
|
---|
[f761f1eb] | 336 | */
|
---|
[20d50a1] | 337 | if (as1 != as2) {
|
---|
[f761f1eb] | 338 | /*
|
---|
[20d50a1] | 339 | * Both tasks and address spaces are different.
|
---|
[f761f1eb] | 340 | * Replace the old one with the new one.
|
---|
| 341 | */
|
---|
[7e4e532] | 342 | as_switch(as1, as2);
|
---|
[f761f1eb] | 343 | }
|
---|
[43114c5] | 344 | TASK = THREAD->task;
|
---|
[f761f1eb] | 345 | }
|
---|
| 346 |
|
---|
[43114c5] | 347 | THREAD->state = Running;
|
---|
[f761f1eb] | 348 |
|
---|
| 349 | #ifdef SCHEDULER_VERBOSE
|
---|
[7e4e532] | 350 | printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
|
---|
[f761f1eb] | 351 | #endif
|
---|
| 352 |
|
---|
[3e1607f] | 353 | /*
|
---|
| 354 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
|
---|
| 355 | */
|
---|
[bcdd9aa] | 356 | the_copy(THE, (the_t *) THREAD->kstack);
|
---|
| 357 |
|
---|
[43114c5] | 358 | context_restore(&THREAD->saved_context);
|
---|
[f761f1eb] | 359 | /* not reached */
|
---|
| 360 | }
|
---|
| 361 |
|
---|
[70527f1] | 362 |
|
---|
[e16e036a] | 363 | /** The scheduler
|
---|
| 364 | *
|
---|
| 365 | * The thread scheduling procedure.
|
---|
[5fe5f1e] | 366 | * Passes control directly to
|
---|
| 367 | * scheduler_separated_stack().
|
---|
[e16e036a] | 368 | *
|
---|
| 369 | */
|
---|
| 370 | void scheduler(void)
|
---|
| 371 | {
|
---|
| 372 | volatile ipl_t ipl;
|
---|
| 373 |
|
---|
| 374 | ASSERT(CPU != NULL);
|
---|
| 375 |
|
---|
| 376 | ipl = interrupts_disable();
|
---|
| 377 |
|
---|
[36e7ee98] | 378 | if (atomic_get(&haltstate))
|
---|
[e16e036a] | 379 | halt();
|
---|
| 380 |
|
---|
| 381 | if (THREAD) {
|
---|
| 382 | spinlock_lock(&THREAD->lock);
|
---|
[5f85c91] | 383 | #ifndef CONFIG_FPU_LAZY
|
---|
[e16e036a] | 384 | fpu_context_save(&(THREAD->saved_fpu_context));
|
---|
| 385 | #endif
|
---|
| 386 | if (!context_save(&THREAD->saved_context)) {
|
---|
| 387 | /*
|
---|
| 388 | * This is the place where threads leave scheduler();
|
---|
| 389 | */
|
---|
| 390 | before_thread_runs();
|
---|
| 391 | spinlock_unlock(&THREAD->lock);
|
---|
| 392 | interrupts_restore(THREAD->saved_context.ipl);
|
---|
| 393 | return;
|
---|
| 394 | }
|
---|
| 395 |
|
---|
| 396 | /*
|
---|
| 397 | * Interrupt priority level of preempted thread is recorded here
|
---|
| 398 | * to facilitate scheduler() invocations from interrupts_disable()'d
|
---|
| 399 | * code (e.g. waitq_sleep_timeout()).
|
---|
| 400 | */
|
---|
| 401 | THREAD->saved_context.ipl = ipl;
|
---|
| 402 | }
|
---|
| 403 |
|
---|
| 404 | /*
|
---|
[05e2a7ad] | 405 | * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
|
---|
[e16e036a] | 406 | * and preemption counter. At this point THE could be coming either
|
---|
| 407 | * from THREAD's or CPU's stack.
|
---|
| 408 | */
|
---|
| 409 | the_copy(THE, (the_t *) CPU->stack);
|
---|
| 410 |
|
---|
| 411 | /*
|
---|
| 412 | * We may not keep the old stack.
|
---|
| 413 | * Reason: If we kept the old stack and got blocked, for instance, in
|
---|
| 414 | * find_best_thread(), the old thread could get rescheduled by another
|
---|
| 415 | * CPU and overwrite the part of its own stack that was also used by
|
---|
| 416 | * the scheduler on this CPU.
|
---|
| 417 | *
|
---|
| 418 | * Moreover, we have to bypass the compiler-generated POP sequence
|
---|
| 419 | * which is fooled by SP being set to the very top of the stack.
|
---|
| 420 | * Therefore the scheduler() function continues in
|
---|
| 421 | * scheduler_separated_stack().
|
---|
| 422 | */
|
---|
| 423 | context_save(&CPU->saved_context);
|
---|
| 424 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
|
---|
| 425 | context_restore(&CPU->saved_context);
|
---|
| 426 | /* not reached */
|
---|
| 427 | }
|
---|
| 428 |
|
---|
| 429 |
|
---|
| 430 |
|
---|
| 431 |
|
---|
| 432 |
|
---|
[5f85c91] | 433 | #ifdef CONFIG_SMP
|
---|
[70527f1] | 434 | /** Load balancing thread
|
---|
| 435 | *
|
---|
| 436 | * SMP load balancing thread, supervising thread supplies
|
---|
| 437 | * for the CPU it's wired to.
|
---|
| 438 | *
|
---|
| 439 | * @param arg Generic thread argument (unused).
|
---|
| 440 | *
|
---|
[f761f1eb] | 441 | */
|
---|
| 442 | void kcpulb(void *arg)
|
---|
| 443 | {
|
---|
| 444 | thread_t *t;
|
---|
[248fc1a] | 445 | int count, average, i, j, k = 0;
|
---|
[22f7769] | 446 | ipl_t ipl;
|
---|
[f761f1eb] | 447 |
|
---|
| 448 | loop:
|
---|
| 449 | /*
|
---|
[3260ada] | 450 | * Work in 1s intervals.
|
---|
[f761f1eb] | 451 | */
|
---|
[3260ada] | 452 | thread_sleep(1);
|
---|
[f761f1eb] | 453 |
|
---|
| 454 | not_satisfied:
|
---|
| 455 | /*
|
---|
| 456 | * Calculate the number of threads that will be migrated/stolen from
|
---|
| 457 | * other CPU's. Note that situation can have changed between two
|
---|
| 458 | * passes. Each time get the most up to date counts.
|
---|
| 459 | */
|
---|
[444ec64] | 460 | average = atomic_get(&nrdy) / config.cpu_active + 1;
|
---|
[248fc1a] | 461 | count = average - atomic_get(&CPU->nrdy);
|
---|
[f761f1eb] | 462 |
|
---|
[444ec64] | 463 | if (count <= 0)
|
---|
[f761f1eb] | 464 | goto satisfied;
|
---|
| 465 |
|
---|
| 466 | /*
|
---|
| 467 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
|
---|
| 468 | */
|
---|
| 469 | for (j=RQ_COUNT-1; j >= 0; j--) {
|
---|
| 470 | for (i=0; i < config.cpu_active; i++) {
|
---|
| 471 | link_t *l;
|
---|
| 472 | runq_t *r;
|
---|
| 473 | cpu_t *cpu;
|
---|
| 474 |
|
---|
| 475 | cpu = &cpus[(i + k) % config.cpu_active];
|
---|
| 476 |
|
---|
| 477 | /*
|
---|
| 478 | * Not interested in ourselves.
|
---|
| 479 | * Doesn't require interrupt disabling for kcpulb is X_WIRED.
|
---|
| 480 | */
|
---|
[43114c5] | 481 | if (CPU == cpu)
|
---|
[248fc1a] | 482 | continue;
|
---|
| 483 | if (atomic_get(&cpu->nrdy) <= average)
|
---|
| 484 | continue;
|
---|
[f761f1eb] | 485 |
|
---|
[444ec64] | 486 | ipl = interrupts_disable();
|
---|
[18e0a6c] | 487 | r = &cpu->rq[j];
|
---|
[f761f1eb] | 488 | spinlock_lock(&r->lock);
|
---|
| 489 | if (r->n == 0) {
|
---|
| 490 | spinlock_unlock(&r->lock);
|
---|
[22f7769] | 491 | interrupts_restore(ipl);
|
---|
[f761f1eb] | 492 | continue;
|
---|
| 493 | }
|
---|
| 494 |
|
---|
| 495 | t = NULL;
|
---|
| 496 | l = r->rq_head.prev; /* search rq from the back */
|
---|
| 497 | while (l != &r->rq_head) {
|
---|
| 498 | t = list_get_instance(l, thread_t, rq_link);
|
---|
| 499 | /*
|
---|
[76cec1e] | 500 | * We don't want to steal CPU-wired threads neither threads already stolen.
|
---|
[f761f1eb] | 501 | * The latter prevents threads from migrating between CPU's without ever being run.
|
---|
[76cec1e] | 502 | * We don't want to steal threads whose FPU context is still in CPU.
|
---|
[6a27d63] | 503 | */
|
---|
[f761f1eb] | 504 | spinlock_lock(&t->lock);
|
---|
[6a27d63] | 505 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
|
---|
[f761f1eb] | 506 | /*
|
---|
| 507 | * Remove t from r.
|
---|
| 508 | */
|
---|
| 509 | spinlock_unlock(&t->lock);
|
---|
| 510 |
|
---|
[248fc1a] | 511 | atomic_dec(&cpu->nrdy);
|
---|
[59e07c91] | 512 | atomic_dec(&nrdy);
|
---|
[f761f1eb] | 513 |
|
---|
[76cec1e] | 514 | r->n--;
|
---|
[f761f1eb] | 515 | list_remove(&t->rq_link);
|
---|
| 516 |
|
---|
| 517 | break;
|
---|
| 518 | }
|
---|
| 519 | spinlock_unlock(&t->lock);
|
---|
| 520 | l = l->prev;
|
---|
| 521 | t = NULL;
|
---|
| 522 | }
|
---|
| 523 | spinlock_unlock(&r->lock);
|
---|
| 524 |
|
---|
| 525 | if (t) {
|
---|
| 526 | /*
|
---|
| 527 | * Ready t on local CPU
|
---|
| 528 | */
|
---|
| 529 | spinlock_lock(&t->lock);
|
---|
| 530 | #ifdef KCPULB_VERBOSE
|
---|
[248fc1a] | 531 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
|
---|
[f761f1eb] | 532 | #endif
|
---|
| 533 | t->flags |= X_STOLEN;
|
---|
| 534 | spinlock_unlock(&t->lock);
|
---|
| 535 |
|
---|
| 536 | thread_ready(t);
|
---|
| 537 |
|
---|
[22f7769] | 538 | interrupts_restore(ipl);
|
---|
[f761f1eb] | 539 |
|
---|
| 540 | if (--count == 0)
|
---|
| 541 | goto satisfied;
|
---|
| 542 |
|
---|
| 543 | /*
|
---|
[76cec1e] | 544 | * We are not satisfied yet, focus on another CPU next time.
|
---|
[f761f1eb] | 545 | */
|
---|
| 546 | k++;
|
---|
| 547 |
|
---|
| 548 | continue;
|
---|
| 549 | }
|
---|
[22f7769] | 550 | interrupts_restore(ipl);
|
---|
[f761f1eb] | 551 | }
|
---|
| 552 | }
|
---|
| 553 |
|
---|
[248fc1a] | 554 | if (atomic_get(&CPU->nrdy)) {
|
---|
[f761f1eb] | 555 | /*
|
---|
| 556 | * Be a little bit light-weight and let migrated threads run.
|
---|
| 557 | */
|
---|
| 558 | scheduler();
|
---|
[3260ada] | 559 | } else {
|
---|
[f761f1eb] | 560 | /*
|
---|
| 561 | * We failed to migrate a single thread.
|
---|
[3260ada] | 562 | * Give up this turn.
|
---|
[f761f1eb] | 563 | */
|
---|
[3260ada] | 564 | goto loop;
|
---|
[f761f1eb] | 565 | }
|
---|
| 566 |
|
---|
| 567 | goto not_satisfied;
|
---|
[76cec1e] | 568 |
|
---|
[f761f1eb] | 569 | satisfied:
|
---|
| 570 | goto loop;
|
---|
| 571 | }
|
---|
| 572 |
|
---|
[5f85c91] | 573 | #endif /* CONFIG_SMP */
|
---|
[10e16a7] | 574 |
|
---|
| 575 |
|
---|
| 576 | /** Print information about threads & scheduler queues */
|
---|
| 577 | void sched_print_list(void)
|
---|
| 578 | {
|
---|
| 579 | ipl_t ipl;
|
---|
| 580 | int cpu,i;
|
---|
| 581 | runq_t *r;
|
---|
| 582 | thread_t *t;
|
---|
| 583 | link_t *cur;
|
---|
| 584 |
|
---|
| 585 | /* We are going to mess with scheduler structures,
|
---|
| 586 | * let's not be interrupted */
|
---|
| 587 | ipl = interrupts_disable();
|
---|
| 588 | printf("*********** Scheduler dump ***********\n");
|
---|
| 589 | for (cpu=0;cpu < config.cpu_count; cpu++) {
|
---|
| 590 | if (!cpus[cpu].active)
|
---|
| 591 | continue;
|
---|
| 592 | spinlock_lock(&cpus[cpu].lock);
|
---|
| 593 | printf("cpu%d: nrdy: %d needs_relink: %d\n",
|
---|
[248fc1a] | 594 | cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
|
---|
[10e16a7] | 595 |
|
---|
| 596 | for (i=0; i<RQ_COUNT; i++) {
|
---|
| 597 | r = &cpus[cpu].rq[i];
|
---|
| 598 | spinlock_lock(&r->lock);
|
---|
| 599 | if (!r->n) {
|
---|
| 600 | spinlock_unlock(&r->lock);
|
---|
| 601 | continue;
|
---|
| 602 | }
|
---|
[3260ada] | 603 | printf("\tRq %d: ", i);
|
---|
[10e16a7] | 604 | for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
|
---|
| 605 | t = list_get_instance(cur, thread_t, rq_link);
|
---|
| 606 | printf("%d(%s) ", t->tid,
|
---|
| 607 | thread_states[t->state]);
|
---|
| 608 | }
|
---|
| 609 | printf("\n");
|
---|
| 610 | spinlock_unlock(&r->lock);
|
---|
| 611 | }
|
---|
| 612 | spinlock_unlock(&cpus[cpu].lock);
|
---|
| 613 | }
|
---|
| 614 |
|
---|
| 615 | interrupts_restore(ipl);
|
---|
| 616 | }
|
---|