Changeset 6eef3c4 in mainline for kernel/generic/src/proc
- Timestamp:
- 2012-06-20T16:18:37Z (13 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 8b36bf2, f22dc820
- Parents:
- abfc9f3
- Location:
- kernel/generic/src/proc
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/program.c
rabfc9f3 r6eef3c4 102 102 */ 103 103 prg->main_thread = thread_create(uinit, kernel_uarg, prg->task, 104 THREAD_FLAG_USPACE, "uinit" , false);104 THREAD_FLAG_USPACE, "uinit"); 105 105 if (!prg->main_thread) { 106 106 free(kernel_uarg); -
kernel/generic/src/proc/scheduler.c
rabfc9f3 r6eef3c4 98 98 else { 99 99 fpu_init(); 100 THREAD->fpu_context_exists = 1;100 THREAD->fpu_context_exists = true; 101 101 } 102 102 #endif … … 142 142 143 143 /* Don't prevent migration */ 144 CPU->fpu_owner->fpu_context_engaged = 0;144 CPU->fpu_owner->fpu_context_engaged = false; 145 145 irq_spinlock_unlock(&CPU->fpu_owner->lock, false); 146 146 CPU->fpu_owner = NULL; … … 163 163 } 164 164 fpu_init(); 165 THREAD->fpu_context_exists = 1;165 THREAD->fpu_context_exists = true; 166 166 } 167 167 168 168 CPU->fpu_owner = THREAD; 169 THREAD->fpu_context_engaged = 1;169 THREAD->fpu_context_engaged = true; 170 170 irq_spinlock_unlock(&THREAD->lock, false); 171 171 … … 248 248 249 249 /* 250 * Clear the THREAD_FLAG_STOLEN flag so thatt can be migrated250 * Clear the stolen flag so that it can be migrated 251 251 * when load balancing needs emerge. 252 252 */ 253 thread-> flags &= ~THREAD_FLAG_STOLEN;253 thread->stolen = false; 254 254 irq_spinlock_unlock(&thread->lock, false); 255 255 … … 630 630 irq_spinlock_lock(&thread->lock, false); 631 631 632 if (!(thread->flags & THREAD_FLAG_WIRED) && 633 !(thread->flags & THREAD_FLAG_STOLEN) && 634 !thread->nomigrate && 635 !thread->fpu_context_engaged) { 632 if ((!thread->wired) && (!thread->stolen) && 633 (!thread->nomigrate) && 634 (!thread->fpu_context_engaged)) { 636 635 /* 637 636 * Remove thread from ready queue. … … 670 669 #endif 671 670 672 thread-> flags |= THREAD_FLAG_STOLEN;671 thread->stolen = true; 673 672 thread->state = Entering; 674 673 -
kernel/generic/src/proc/thread.c
rabfc9f3 r6eef3c4 191 191 kmflags |= FRAME_LOWMEM; 192 192 kmflags &= ~FRAME_HIGHMEM; 193 193 194 194 thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); 195 195 if (!thread->kstack) { … … 247 247 } 248 248 249 /** Wire thread to the given CPU 250 * 251 * @param cpu CPU to wire the thread to. 252 * 253 */ 254 void thread_wire(thread_t *thread, cpu_t *cpu) 255 { 256 irq_spinlock_lock(&thread->lock, true); 257 thread->cpu = cpu; 258 thread->wired = true; 259 irq_spinlock_unlock(&thread->lock, true); 260 } 261 249 262 /** Make thread ready 250 263 * … … 260 273 ASSERT(thread->state != Ready); 261 274 262 int i = (thread->priority < RQ_COUNT - 1) 263 ?++thread->priority : thread->priority;264 265 cpu_t *cpu = CPU;266 if (thread-> flags & THREAD_FLAG_WIRED) {275 int i = (thread->priority < RQ_COUNT - 1) ? 276 ++thread->priority : thread->priority; 277 278 cpu_t *cpu; 279 if (thread->wired) { 267 280 ASSERT(thread->cpu != NULL); 268 281 cpu = thread->cpu; 269 } 282 } else 283 cpu = CPU; 284 270 285 thread->state = Ready; 271 286 … … 298 313 * @param flags Thread flags. 299 314 * @param name Symbolic name (a copy is made). 300 * @param uncounted Thread's accounting doesn't affect accumulated task301 * accounting.302 315 * 303 316 * @return New thread's structure on success, NULL on failure. … … 305 318 */ 306 319 thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, 307 unsigned int flags, const char *name, bool uncounted)320 thread_flags_t flags, const char *name) 308 321 { 309 322 thread_t *thread = (thread_t *) slab_alloc(thread_slab, 0); … … 335 348 thread->ucycles = 0; 336 349 thread->kcycles = 0; 337 thread->uncounted = uncounted; 350 thread->uncounted = 351 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED); 338 352 thread->priority = -1; /* Start in rq[0] */ 339 353 thread->cpu = NULL; 340 thread->flags = flags; 354 thread->wired = false; 355 thread->stolen = false; 356 thread->uspace = 357 ((flags & THREAD_FLAG_USPACE) == THREAD_FLAG_USPACE); 358 341 359 thread->nomigrate = 0; 342 360 thread->state = Entering; … … 356 374 thread->task = task; 357 375 358 thread->fpu_context_exists = 0;359 thread->fpu_context_engaged = 0;376 thread->fpu_context_exists = false; 377 thread->fpu_context_engaged = false; 360 378 361 379 avltree_node_initialize(&thread->threads_tree_node); … … 371 389 thread_create_arch(thread); 372 390 373 if ( !(flags & THREAD_FLAG_NOATTACH))391 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH) 374 392 thread_attach(thread, task); 375 393 … … 437 455 438 456 /* Must not count kbox thread into lifecount */ 439 if (thread-> flags & THREAD_FLAG_USPACE)457 if (thread->uspace) 440 458 atomic_inc(&task->lifecount); 441 459 … … 459 477 void thread_exit(void) 460 478 { 461 if (THREAD-> flags & THREAD_FLAG_USPACE) {479 if (THREAD->uspace) { 462 480 #ifdef CONFIG_UDEBUG 463 481 /* Generate udebug THREAD_E event */ 464 482 udebug_thread_e_event(); 465 483 466 484 /* 467 485 * This thread will not execute any code or system calls from … … 506 524 { 507 525 ASSERT(THREAD); 508 526 509 527 THREAD->nomigrate++; 510 528 } … … 515 533 ASSERT(THREAD); 516 534 ASSERT(THREAD->nomigrate > 0); 517 518 THREAD->nomigrate--; 535 536 if (THREAD->nomigrate > 0) 537 THREAD->nomigrate--; 519 538 } 520 539 … … 865 884 866 885 thread_t *thread = thread_create(uinit, kernel_uarg, TASK, 867 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf , false);886 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf); 868 887 if (thread) { 869 888 if (uspace_thread_id != NULL) {
Note:
See TracChangeset
for help on using the changeset viewer.