Changes in kernel/generic/src/proc/thread.c [f22dc820:8ad7dd1] in mainline
- File:
-
- 1 edited
-
kernel/generic/src/proc/thread.c (modified) (11 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/thread.c
rf22dc820 r8ad7dd1 46 46 #include <synch/spinlock.h> 47 47 #include <synch/waitq.h> 48 #include <synch/workqueue.h> 49 #include <synch/rcu.h> 48 50 #include <cpu.h> 49 51 #include <str.h> … … 192 194 kmflags &= ~FRAME_HIGHMEM; 193 195 194 thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); 195 if (!thread->kstack) { 196 uintptr_t stack_phys = 197 frame_alloc(STACK_FRAMES, kmflags, STACK_SIZE - 1); 198 if (!stack_phys) { 196 199 #ifdef CONFIG_FPU 197 200 if (thread->saved_fpu_context) … … 201 204 } 202 205 206 thread->kstack = (uint8_t *) PA2KA(stack_phys); 207 203 208 #ifdef CONFIG_UDEBUG 204 209 mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE); … … 216 221 thr_destructor_arch(thread); 217 222 218 frame_free(KA2PA(thread->kstack) );223 frame_free(KA2PA(thread->kstack), STACK_FRAMES); 219 224 220 225 #ifdef CONFIG_FPU … … 260 265 } 261 266 267 /** Invoked right before thread_ready() readies the thread. thread is locked. */ 268 static void before_thread_is_ready(thread_t *thread) 269 { 270 ASSERT(irq_spinlock_locked(&thread->lock)); 271 workq_before_thread_is_ready(thread); 272 } 273 262 274 /** Make thread ready 263 275 * … … 272 284 273 285 ASSERT(thread->state != Ready); 286 287 before_thread_is_ready(thread); 274 288 275 289 int i = (thread->priority < RQ_COUNT - 1) ? 276 290 ++thread->priority : thread->priority; 277 291 278 292 cpu_t *cpu; 279 293 if (thread->wired || thread->nomigrate || thread->fpu_context_engaged) { 294 /* Cannot ready to another CPU */ 280 295 ASSERT(thread->cpu != NULL); 281 296 cpu = thread->cpu; 282 } else 297 } else if (thread->stolen) { 298 /* Ready to the stealing CPU */ 283 299 cpu = CPU; 300 } else if (thread->cpu) { 301 /* Prefer the CPU on which the thread ran last */ 302 ASSERT(thread->cpu != NULL); 303 cpu = thread->cpu; 304 } else { 305 cpu = CPU; 306 } 284 307 285 308 thread->state = Ready; … … 297 320 298 321 atomic_inc(&nrdy); 299 // FIXME: Why is the avg value not used300 // avg = atomic_get(&nrdy) / config.cpu_active;301 322 atomic_inc(&cpu->nrdy); 302 323 } … … 374 395 thread->task = task; 375 396 397 thread->workq = NULL; 398 376 399 thread->fpu_context_exists = false; 377 400 thread->fpu_context_engaged = false; … … 388 411 /* Might depend on previous initialization */ 389 412 thread_create_arch(thread); 413 414 rcu_thread_init(thread); 390 415 391 416 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH) … … 498 523 */ 499 524 ipc_cleanup(); 500 futex_ cleanup();525 futex_task_cleanup(); 501 526 LOG("Cleanup of task %" PRIu64" completed.", TASK->taskid); 502 527 } … … 518 543 /* Not reached */ 519 544 while (true); 545 } 546 547 /** Interrupts an existing thread so that it may exit as soon as possible. 548 * 549 * Threads that are blocked waiting for a synchronization primitive 550 * are woken up with a return code of ESYNCH_INTERRUPTED if the 551 * blocking call was interruptable. See waitq_sleep_timeout(). 552 * 553 * The caller must guarantee the thread object is valid during the entire 554 * function, eg by holding the threads_lock lock. 555 * 556 * Interrupted threads automatically exit when returning back to user space. 557 * 558 * @param thread A valid thread object. The caller must guarantee it 559 * will remain valid until thread_interrupt() exits. 560 */ 561 void thread_interrupt(thread_t *thread) 562 { 563 ASSERT(thread != NULL); 564 565 irq_spinlock_lock(&thread->lock, true); 566 567 thread->interrupted = true; 568 bool sleeping = (thread->state == Sleeping); 569 570 irq_spinlock_unlock(&thread->lock, true); 571 572 if (sleeping) 573 waitq_interrupt_sleep(thread); 574 } 575 576 /** Returns true if the thread was interrupted. 577 * 578 * @param thread A valid thread object. User must guarantee it will 579 * be alive during the entire call. 580 * @return true if the thread was already interrupted via thread_interrupt(). 581 */ 582 bool thread_interrupted(thread_t *thread) 583 { 584 ASSERT(thread != NULL); 585 586 bool interrupted; 587 588 irq_spinlock_lock(&thread->lock, true); 589 interrupted = thread->interrupted; 590 irq_spinlock_unlock(&thread->lock, true); 591 592 return interrupted; 520 593 } 521 594
Note:
See TracChangeset
for help on using the changeset viewer.
