[f761f1eb] | 1 | /*
|
---|
| 2 | * Copyright (C) 2001-2004 Jakub Jermar
|
---|
| 3 | * All rights reserved.
|
---|
| 4 | *
|
---|
| 5 | * Redistribution and use in source and binary forms, with or without
|
---|
| 6 | * modification, are permitted provided that the following conditions
|
---|
| 7 | * are met:
|
---|
| 8 | *
|
---|
| 9 | * - Redistributions of source code must retain the above copyright
|
---|
| 10 | * notice, this list of conditions and the following disclaimer.
|
---|
| 11 | * - Redistributions in binary form must reproduce the above copyright
|
---|
| 12 | * notice, this list of conditions and the following disclaimer in the
|
---|
| 13 | * documentation and/or other materials provided with the distribution.
|
---|
| 14 | * - The name of the author may not be used to endorse or promote products
|
---|
| 15 | * derived from this software without specific prior written permission.
|
---|
| 16 | *
|
---|
| 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
---|
| 18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
---|
| 19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
---|
| 20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
---|
| 21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
---|
| 22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
---|
| 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
---|
| 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
| 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
---|
| 26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
| 27 | */
|
---|
| 28 |
|
---|
| 29 | #include <proc/scheduler.h>
|
---|
| 30 | #include <proc/thread.h>
|
---|
| 31 | #include <proc/task.h>
|
---|
| 32 | #include <cpu.h>
|
---|
| 33 | #include <mm/vm.h>
|
---|
| 34 | #include <config.h>
|
---|
| 35 | #include <context.h>
|
---|
| 36 | #include <func.h>
|
---|
| 37 | #include <arch.h>
|
---|
| 38 | #include <arch/asm.h>
|
---|
| 39 | #include <list.h>
|
---|
| 40 | #include <typedefs.h>
|
---|
| 41 | #include <mm/page.h>
|
---|
| 42 | #include <synch/spinlock.h>
|
---|
| 43 |
|
---|
| 44 | #ifdef __SMP__
|
---|
[397c77f] | 45 | #include <arch/smp/atomic.h>
|
---|
[f761f1eb] | 46 | #endif /* __SMP__ */
|
---|
| 47 |
|
---|
| 48 | /*
|
---|
| 49 | * NOTE ON ATOMIC READS:
|
---|
| 50 | * Some architectures cannot read __u32 atomically.
|
---|
| 51 | * For that reason, all accesses to nrdy and the likes must be protected by spinlock.
|
---|
| 52 | */
|
---|
| 53 |
|
---|
| 54 | spinlock_t nrdylock;
|
---|
| 55 | volatile int nrdy;
|
---|
| 56 |
|
---|
| 57 | void scheduler_init(void)
|
---|
| 58 | {
|
---|
| 59 | spinlock_initialize(&nrdylock);
|
---|
| 60 | }
|
---|
| 61 |
|
---|
| 62 | /* cpu_priority_high()'d */
|
---|
| 63 | struct thread *find_best_thread(void)
|
---|
| 64 | {
|
---|
| 65 | thread_t *t;
|
---|
| 66 | runq_t *r;
|
---|
| 67 | int i, n;
|
---|
| 68 |
|
---|
| 69 | loop:
|
---|
| 70 | cpu_priority_high();
|
---|
| 71 |
|
---|
| 72 | spinlock_lock(&the->cpu->lock);
|
---|
| 73 | n = the->cpu->nrdy;
|
---|
| 74 | spinlock_unlock(&the->cpu->lock);
|
---|
| 75 |
|
---|
| 76 | cpu_priority_low();
|
---|
| 77 |
|
---|
| 78 | if (n == 0) {
|
---|
| 79 | #ifdef __SMP__
|
---|
| 80 | /*
|
---|
| 81 | * If the load balancing thread is not running, wake it up and
|
---|
| 82 | * set CPU-private flag that the kcpulb has been started.
|
---|
| 83 | */
|
---|
| 84 | if (test_and_set(&the->cpu->kcpulbstarted) == 0) {
|
---|
| 85 | waitq_wakeup(&the->cpu->kcpulb_wq, 0);
|
---|
| 86 | goto loop;
|
---|
| 87 | }
|
---|
| 88 | #endif /* __SMP__ */
|
---|
| 89 |
|
---|
| 90 | /*
|
---|
| 91 | * For there was nothing to run, the CPU goes to sleep
|
---|
| 92 | * until a hardware interrupt or an IPI comes.
|
---|
| 93 | * This improves energy saving and hyperthreading.
|
---|
| 94 | * On the other hand, several hardware interrupts can be ignored.
|
---|
| 95 | */
|
---|
| 96 | cpu_sleep();
|
---|
| 97 | goto loop;
|
---|
| 98 | }
|
---|
| 99 |
|
---|
| 100 | cpu_priority_high();
|
---|
| 101 |
|
---|
| 102 | for (i = 0; i<RQ_COUNT; i++) {
|
---|
| 103 | r = &the->cpu->rq[i];
|
---|
| 104 | spinlock_lock(&r->lock);
|
---|
| 105 | if (r->n == 0) {
|
---|
| 106 | /*
|
---|
| 107 | * If this queue is empty, try a lower-priority queue.
|
---|
| 108 | */
|
---|
| 109 | spinlock_unlock(&r->lock);
|
---|
| 110 | continue;
|
---|
| 111 | }
|
---|
| 112 |
|
---|
| 113 | spinlock_lock(&nrdylock);
|
---|
| 114 | nrdy--;
|
---|
| 115 | spinlock_unlock(&nrdylock);
|
---|
| 116 |
|
---|
| 117 | spinlock_lock(&the->cpu->lock);
|
---|
| 118 | the->cpu->nrdy--;
|
---|
| 119 | spinlock_unlock(&the->cpu->lock);
|
---|
| 120 |
|
---|
| 121 | r->n--;
|
---|
| 122 |
|
---|
| 123 | /*
|
---|
| 124 | * Take the first thread from the queue.
|
---|
| 125 | */
|
---|
| 126 | t = list_get_instance(r->rq_head.next, thread_t, rq_link);
|
---|
| 127 | list_remove(&t->rq_link);
|
---|
| 128 |
|
---|
| 129 | spinlock_unlock(&r->lock);
|
---|
| 130 |
|
---|
| 131 | spinlock_lock(&t->lock);
|
---|
| 132 | t->cpu = the->cpu;
|
---|
| 133 |
|
---|
| 134 | t->ticks = us2ticks((i+1)*10000);
|
---|
| 135 | t->pri = i; /* eventually correct rq index */
|
---|
| 136 |
|
---|
| 137 | /*
|
---|
| 138 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
|
---|
| 139 | */
|
---|
| 140 | t->flags &= ~X_STOLEN;
|
---|
| 141 | spinlock_unlock(&t->lock);
|
---|
| 142 |
|
---|
| 143 | return t;
|
---|
| 144 | }
|
---|
| 145 | goto loop;
|
---|
| 146 |
|
---|
| 147 | }
|
---|
| 148 |
|
---|
| 149 | /*
|
---|
| 150 | * This function prevents low priority threads from starving in rq's.
|
---|
| 151 | * When it decides to relink rq's, it reconnects respective pointers
|
---|
| 152 | * so that in result threads with 'pri' greater or equal 'start' are
|
---|
| 153 | * moved to a higher-priority queue.
|
---|
| 154 | */
|
---|
| 155 | void relink_rq(int start)
|
---|
| 156 | {
|
---|
| 157 | link_t head;
|
---|
| 158 | runq_t *r;
|
---|
| 159 | int i, n;
|
---|
| 160 |
|
---|
| 161 | list_initialize(&head);
|
---|
| 162 | spinlock_lock(&the->cpu->lock);
|
---|
| 163 | if (the->cpu->needs_relink > NEEDS_RELINK_MAX) {
|
---|
| 164 | for (i = start; i<RQ_COUNT-1; i++) {
|
---|
| 165 | /* remember and empty rq[i + 1] */
|
---|
| 166 | r = &the->cpu->rq[i + 1];
|
---|
| 167 | spinlock_lock(&r->lock);
|
---|
| 168 | list_concat(&head, &r->rq_head);
|
---|
| 169 | n = r->n;
|
---|
| 170 | r->n = 0;
|
---|
| 171 | spinlock_unlock(&r->lock);
|
---|
| 172 |
|
---|
| 173 | /* append rq[i + 1] to rq[i] */
|
---|
| 174 | r = &the->cpu->rq[i];
|
---|
| 175 | spinlock_lock(&r->lock);
|
---|
| 176 | list_concat(&r->rq_head, &head);
|
---|
| 177 | r->n += n;
|
---|
| 178 | spinlock_unlock(&r->lock);
|
---|
| 179 | }
|
---|
| 180 | the->cpu->needs_relink = 0;
|
---|
| 181 | }
|
---|
| 182 | spinlock_unlock(&the->cpu->lock);
|
---|
| 183 |
|
---|
| 184 | }
|
---|
| 185 |
|
---|
| 186 | /*
|
---|
| 187 | * The scheduler.
|
---|
| 188 | */
|
---|
| 189 | void scheduler(void)
|
---|
| 190 | {
|
---|
| 191 | volatile pri_t pri;
|
---|
| 192 |
|
---|
| 193 | pri = cpu_priority_high();
|
---|
| 194 |
|
---|
| 195 | if (haltstate)
|
---|
| 196 | halt();
|
---|
| 197 |
|
---|
| 198 | if (the->thread) {
|
---|
| 199 | spinlock_lock(&the->thread->lock);
|
---|
| 200 | if (!context_save(&the->thread->saved_context)) {
|
---|
| 201 | /*
|
---|
| 202 | * This is the place where threads leave scheduler();
|
---|
| 203 | */
|
---|
| 204 | spinlock_unlock(&the->thread->lock);
|
---|
| 205 | cpu_priority_restore(the->thread->saved_context.pri);
|
---|
| 206 | return;
|
---|
| 207 | }
|
---|
| 208 | the->thread->saved_context.pri = pri;
|
---|
| 209 | }
|
---|
| 210 |
|
---|
| 211 | /*
|
---|
| 212 | * We may not keep the old stack.
|
---|
| 213 | * Reason: If we kept the old stack and got blocked, for instance, in
|
---|
| 214 | * find_best_thread(), the old thread could get rescheduled by another
|
---|
| 215 | * CPU and overwrite the part of its own stack that was also used by
|
---|
| 216 | * the scheduler on this CPU.
|
---|
| 217 | *
|
---|
| 218 | * Moreover, we have to bypass the compiler-generated POP sequence
|
---|
| 219 | * which is fooled by SP being set to the very top of the stack.
|
---|
| 220 | * Therefore the scheduler() function continues in
|
---|
| 221 | * scheduler_separated_stack().
|
---|
| 222 | */
|
---|
| 223 | context_save(&the->cpu->saved_context);
|
---|
| 224 | the->cpu->saved_context.sp = (__address) &the->cpu->stack[CPU_STACK_SIZE-8];
|
---|
| 225 | the->cpu->saved_context.pc = (__address) scheduler_separated_stack;
|
---|
| 226 | context_restore(&the->cpu->saved_context);
|
---|
| 227 | /* not reached */
|
---|
| 228 | }
|
---|
| 229 |
|
---|
| 230 | void scheduler_separated_stack(void)
|
---|
| 231 | {
|
---|
| 232 | int priority;
|
---|
| 233 |
|
---|
| 234 | if (the->thread) {
|
---|
| 235 | switch (the->thread->state) {
|
---|
| 236 | case Running:
|
---|
| 237 | the->thread->state = Ready;
|
---|
| 238 | spinlock_unlock(&the->thread->lock);
|
---|
| 239 | thread_ready(the->thread);
|
---|
| 240 | break;
|
---|
| 241 |
|
---|
| 242 | case Exiting:
|
---|
| 243 | frame_free((__address) the->thread->kstack);
|
---|
| 244 | if (the->thread->ustack) {
|
---|
| 245 | frame_free((__address) the->thread->ustack);
|
---|
| 246 | }
|
---|
| 247 |
|
---|
| 248 | /*
|
---|
| 249 | * Detach from the containing task.
|
---|
| 250 | */
|
---|
| 251 | spinlock_lock(&the->task->lock);
|
---|
| 252 | list_remove(&the->thread->th_link);
|
---|
| 253 | spinlock_unlock(&the->task->lock);
|
---|
| 254 |
|
---|
| 255 | spinlock_unlock(&the->thread->lock);
|
---|
| 256 |
|
---|
| 257 | spinlock_lock(&threads_lock);
|
---|
| 258 | list_remove(&the->thread->threads_link);
|
---|
| 259 | spinlock_unlock(&threads_lock);
|
---|
| 260 |
|
---|
| 261 | free(the->thread);
|
---|
| 262 |
|
---|
| 263 | break;
|
---|
| 264 |
|
---|
| 265 | case Sleeping:
|
---|
| 266 | /*
|
---|
| 267 | * Prefer the thread after it's woken up.
|
---|
| 268 | */
|
---|
| 269 | the->thread->pri = -1;
|
---|
| 270 |
|
---|
| 271 | /*
|
---|
| 272 | * We need to release wq->lock which we locked in waitq_sleep().
|
---|
| 273 | * Address of wq->lock is kept in the->thread->sleep_queue.
|
---|
| 274 | */
|
---|
| 275 | spinlock_unlock(&the->thread->sleep_queue->lock);
|
---|
| 276 |
|
---|
| 277 | /*
|
---|
| 278 | * Check for possible requests for out-of-context invocation.
|
---|
| 279 | */
|
---|
| 280 | if (the->thread->call_me) {
|
---|
| 281 | the->thread->call_me(the->thread->call_me_with);
|
---|
| 282 | the->thread->call_me = NULL;
|
---|
| 283 | the->thread->call_me_with = NULL;
|
---|
| 284 | }
|
---|
| 285 |
|
---|
| 286 | spinlock_unlock(&the->thread->lock);
|
---|
| 287 |
|
---|
| 288 | break;
|
---|
| 289 |
|
---|
| 290 | default:
|
---|
| 291 | /*
|
---|
| 292 | * Entering state is unexpected.
|
---|
| 293 | */
|
---|
| 294 | panic("tid%d: unexpected state %s\n", the->thread->tid, thread_states[the->thread->state]);
|
---|
| 295 | break;
|
---|
| 296 | }
|
---|
| 297 | the->thread = NULL;
|
---|
| 298 | }
|
---|
| 299 |
|
---|
| 300 | the->thread = find_best_thread();
|
---|
| 301 |
|
---|
| 302 | spinlock_lock(&the->thread->lock);
|
---|
| 303 | priority = the->thread->pri;
|
---|
| 304 | spinlock_unlock(&the->thread->lock);
|
---|
| 305 |
|
---|
| 306 | relink_rq(priority);
|
---|
| 307 |
|
---|
| 308 | spinlock_lock(&the->thread->lock);
|
---|
| 309 |
|
---|
| 310 | /*
|
---|
| 311 | * If both the old and the new task are the same, lots of work is avoided.
|
---|
| 312 | */
|
---|
| 313 | if (the->task != the->thread->task) {
|
---|
| 314 | vm_t *m1 = NULL;
|
---|
| 315 | vm_t *m2;
|
---|
| 316 |
|
---|
| 317 | if (the->task) {
|
---|
| 318 | spinlock_lock(&the->task->lock);
|
---|
| 319 | m1 = the->task->vm;
|
---|
| 320 | spinlock_unlock(&the->task->lock);
|
---|
| 321 | }
|
---|
| 322 |
|
---|
| 323 | spinlock_lock(&the->thread->task->lock);
|
---|
| 324 | m2 = the->thread->task->vm;
|
---|
| 325 | spinlock_unlock(&the->thread->task->lock);
|
---|
| 326 |
|
---|
| 327 | /*
|
---|
| 328 | * Note that it is possible for two tasks to share one vm mapping.
|
---|
| 329 | */
|
---|
| 330 | if (m1 != m2) {
|
---|
| 331 | /*
|
---|
| 332 | * Both tasks and vm mappings are different.
|
---|
| 333 | * Replace the old one with the new one.
|
---|
| 334 | */
|
---|
| 335 | if (m1) {
|
---|
| 336 | vm_uninstall(m1);
|
---|
| 337 | }
|
---|
| 338 | vm_install(m2);
|
---|
| 339 | }
|
---|
| 340 | the->task = the->thread->task;
|
---|
| 341 | }
|
---|
| 342 |
|
---|
| 343 | the->thread->state = Running;
|
---|
| 344 |
|
---|
| 345 | #ifdef SCHEDULER_VERBOSE
|
---|
| 346 | printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", the->cpu->id, the->thread->tid, the->thread->pri, the->thread->ticks, the->cpu->nrdy);
|
---|
| 347 | #endif
|
---|
| 348 |
|
---|
| 349 | context_restore(&the->thread->saved_context);
|
---|
| 350 | /* not reached */
|
---|
| 351 | }
|
---|
| 352 |
|
---|
| 353 | #ifdef __SMP__
|
---|
| 354 | /*
|
---|
| 355 | * This is the load balancing thread.
|
---|
| 356 | * It supervises thread supplies for the CPU it's wired to.
|
---|
| 357 | */
|
---|
| 358 | void kcpulb(void *arg)
|
---|
| 359 | {
|
---|
| 360 | thread_t *t;
|
---|
| 361 | int count, i, j, k = 0;
|
---|
| 362 | pri_t pri;
|
---|
| 363 |
|
---|
| 364 | loop:
|
---|
| 365 | /*
|
---|
| 366 | * Sleep until there's some work to do.
|
---|
| 367 | */
|
---|
| 368 | waitq_sleep(&the->cpu->kcpulb_wq);
|
---|
| 369 |
|
---|
| 370 | not_satisfied:
|
---|
| 371 | /*
|
---|
| 372 | * Calculate the number of threads that will be migrated/stolen from
|
---|
| 373 | * other CPU's. Note that situation can have changed between two
|
---|
| 374 | * passes. Each time get the most up to date counts.
|
---|
| 375 | */
|
---|
| 376 | pri = cpu_priority_high();
|
---|
| 377 | spinlock_lock(&the->cpu->lock);
|
---|
| 378 | count = nrdy / config.cpu_active;
|
---|
| 379 | count -= the->cpu->nrdy;
|
---|
| 380 | spinlock_unlock(&the->cpu->lock);
|
---|
| 381 | cpu_priority_restore(pri);
|
---|
| 382 |
|
---|
| 383 | if (count <= 0)
|
---|
| 384 | goto satisfied;
|
---|
| 385 |
|
---|
| 386 | /*
|
---|
| 387 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
|
---|
| 388 | */
|
---|
| 389 | for (j=RQ_COUNT-1; j >= 0; j--) {
|
---|
| 390 | for (i=0; i < config.cpu_active; i++) {
|
---|
| 391 | link_t *l;
|
---|
| 392 | runq_t *r;
|
---|
| 393 | cpu_t *cpu;
|
---|
| 394 |
|
---|
| 395 | cpu = &cpus[(i + k) % config.cpu_active];
|
---|
| 396 | r = &cpu->rq[j];
|
---|
| 397 |
|
---|
| 398 | /*
|
---|
| 399 | * Not interested in ourselves.
|
---|
| 400 | * Doesn't require interrupt disabling for kcpulb is X_WIRED.
|
---|
| 401 | */
|
---|
| 402 | if (the->cpu == cpu)
|
---|
| 403 | continue;
|
---|
| 404 |
|
---|
| 405 | restart: pri = cpu_priority_high();
|
---|
| 406 | spinlock_lock(&r->lock);
|
---|
| 407 | if (r->n == 0) {
|
---|
| 408 | spinlock_unlock(&r->lock);
|
---|
| 409 | cpu_priority_restore(pri);
|
---|
| 410 | continue;
|
---|
| 411 | }
|
---|
| 412 |
|
---|
| 413 | t = NULL;
|
---|
| 414 | l = r->rq_head.prev; /* search rq from the back */
|
---|
| 415 | while (l != &r->rq_head) {
|
---|
| 416 | t = list_get_instance(l, thread_t, rq_link);
|
---|
| 417 | /*
|
---|
| 418 | * We don't want to steal CPU-wired threads neither threads already stolen.
|
---|
| 419 | * The latter prevents threads from migrating between CPU's without ever being run.
|
---|
| 420 | */
|
---|
| 421 | spinlock_lock(&t->lock);
|
---|
| 422 | if (!(t->flags & (X_WIRED | X_STOLEN))) {
|
---|
| 423 | /*
|
---|
| 424 | * Remove t from r.
|
---|
| 425 | */
|
---|
| 426 |
|
---|
| 427 | spinlock_unlock(&t->lock);
|
---|
| 428 |
|
---|
| 429 | /*
|
---|
| 430 | * Here we have to avoid deadlock with relink_rq(),
|
---|
| 431 | * because it locks cpu and r in a different order than we do.
|
---|
| 432 | */
|
---|
| 433 | if (!spinlock_trylock(&cpu->lock)) {
|
---|
| 434 | /* Release all locks and try again. */
|
---|
| 435 | spinlock_unlock(&r->lock);
|
---|
| 436 | cpu_priority_restore(pri);
|
---|
| 437 | goto restart;
|
---|
| 438 | }
|
---|
| 439 | cpu->nrdy--;
|
---|
| 440 | spinlock_unlock(&cpu->lock);
|
---|
| 441 |
|
---|
| 442 | spinlock_lock(&nrdylock);
|
---|
| 443 | nrdy--;
|
---|
| 444 | spinlock_unlock(&nrdylock);
|
---|
| 445 |
|
---|
| 446 | r->n--;
|
---|
| 447 | list_remove(&t->rq_link);
|
---|
| 448 |
|
---|
| 449 | break;
|
---|
| 450 | }
|
---|
| 451 | spinlock_unlock(&t->lock);
|
---|
| 452 | l = l->prev;
|
---|
| 453 | t = NULL;
|
---|
| 454 | }
|
---|
| 455 | spinlock_unlock(&r->lock);
|
---|
| 456 |
|
---|
| 457 | if (t) {
|
---|
| 458 | /*
|
---|
| 459 | * Ready t on local CPU
|
---|
| 460 | */
|
---|
| 461 | spinlock_lock(&t->lock);
|
---|
| 462 | #ifdef KCPULB_VERBOSE
|
---|
| 463 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", the->cpu->id, t->tid, the->cpu->id, the->cpu->nrdy, nrdy / config.cpu_active);
|
---|
| 464 | #endif
|
---|
| 465 | t->flags |= X_STOLEN;
|
---|
| 466 | spinlock_unlock(&t->lock);
|
---|
| 467 |
|
---|
| 468 | thread_ready(t);
|
---|
| 469 |
|
---|
| 470 | cpu_priority_restore(pri);
|
---|
| 471 |
|
---|
| 472 | if (--count == 0)
|
---|
| 473 | goto satisfied;
|
---|
| 474 |
|
---|
| 475 | /*
|
---|
| 476 | * We are not satisfied yet, focus on another CPU next time.
|
---|
| 477 | */
|
---|
| 478 | k++;
|
---|
| 479 |
|
---|
| 480 | continue;
|
---|
| 481 | }
|
---|
| 482 | cpu_priority_restore(pri);
|
---|
| 483 | }
|
---|
| 484 | }
|
---|
| 485 |
|
---|
| 486 | if (the->cpu->nrdy) {
|
---|
| 487 | /*
|
---|
| 488 | * Be a little bit light-weight and let migrated threads run.
|
---|
| 489 | */
|
---|
| 490 | scheduler();
|
---|
| 491 | }
|
---|
| 492 | else {
|
---|
| 493 | /*
|
---|
| 494 | * We failed to migrate a single thread.
|
---|
| 495 | * Something more sophisticated should be done.
|
---|
| 496 | */
|
---|
| 497 | scheduler();
|
---|
| 498 | }
|
---|
| 499 |
|
---|
| 500 | goto not_satisfied;
|
---|
| 501 |
|
---|
| 502 | satisfied:
|
---|
| 503 | /*
|
---|
| 504 | * Tell find_best_thread() to wake us up later again.
|
---|
| 505 | */
|
---|
| 506 | the->cpu->kcpulbstarted = 0;
|
---|
| 507 | goto loop;
|
---|
| 508 | }
|
---|
| 509 |
|
---|
| 510 | #endif /* __SMP__ */
|
---|