Changeset 6eef3c4 in mainline for kernel/generic/src
- Timestamp:
- 2012-06-20T16:18:37Z (13 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 8b36bf2, f22dc820
- Parents:
- abfc9f3
- Location:
- kernel/generic/src
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/console/cmd.c
rabfc9f3 r6eef3c4 724 724 thread_t *thread; 725 725 if ((thread = thread_create((void (*)(void *)) cmd_call0, 726 (void *) argv, TASK, THREAD_FLAG_WIRED, "call0", false))) { 727 irq_spinlock_lock(&thread->lock, true); 728 thread->cpu = &cpus[i]; 729 irq_spinlock_unlock(&thread->lock, true); 730 726 (void *) argv, TASK, THREAD_FLAG_NONE, "call0"))) { 731 727 printf("cpu%u: ", i); 732 728 thread_wire(thread, &cpus[i]); 733 729 thread_ready(thread); 734 730 thread_join(thread); -
kernel/generic/src/ipc/kbox.c
rabfc9f3 r6eef3c4 244 244 245 245 /* Create a kbox thread */ 246 thread_t *kb_thread = thread_create(kbox_thread_proc, NULL, task, 0,247 "kbox", false);246 thread_t *kb_thread = thread_create(kbox_thread_proc, NULL, task, 247 THREAD_FLAG_NONE, "kbox"); 248 248 if (!kb_thread) { 249 249 mutex_unlock(&task->kb.cleanup_lock); -
kernel/generic/src/main/kinit.c
rabfc9f3 r6eef3c4 116 116 * Just a beautification. 117 117 */ 118 thread = thread_create(kmp, NULL, TASK, THREAD_FLAG_WIRED, "kmp", true); 118 thread = thread_create(kmp, NULL, TASK, 119 THREAD_FLAG_UNCOUNTED, "kmp"); 119 120 if (thread != NULL) { 120 irq_spinlock_lock(&thread->lock, false); 121 thread->cpu = &cpus[0]; 122 irq_spinlock_unlock(&thread->lock, false); 121 thread_wire(thread, &cpus[0]); 123 122 thread_ready(thread); 124 123 } else … … 134 133 135 134 for (i = 0; i < config.cpu_count; i++) { 136 thread = thread_create(kcpulb, NULL, TASK, THREAD_FLAG_WIRED, "kcpulb", true); 135 thread = thread_create(kcpulb, NULL, TASK, 136 THREAD_FLAG_UNCOUNTED, "kcpulb"); 137 137 if (thread != NULL) { 138 irq_spinlock_lock(&thread->lock, false); 139 thread->cpu = &cpus[i]; 140 irq_spinlock_unlock(&thread->lock, false); 138 thread_wire(thread, &cpus[i]); 141 139 thread_ready(thread); 142 140 } else … … 152 150 153 151 /* Start thread computing system load */ 154 thread = thread_create(kload, NULL, TASK, 0, "kload", false); 152 thread = thread_create(kload, NULL, TASK, THREAD_FLAG_NONE, 153 "kload"); 155 154 if (thread != NULL) 156 155 thread_ready(thread); … … 163 162 * Create kernel console. 164 163 */ 165 thread = thread_create(kconsole_thread, NULL, TASK, 0, "kconsole", false); 164 thread = thread_create(kconsole_thread, NULL, TASK, 165 THREAD_FLAG_NONE, "kconsole"); 166 166 if (thread != NULL) 167 167 thread_ready(thread); -
kernel/generic/src/main/main.c
rabfc9f3 r6eef3c4 276 276 * Create the first thread. 277 277 */ 278 thread_t *kinit_thread = 279 thread_create(kinit, NULL, kernel, 0, "kinit", true);278 thread_t *kinit_thread = thread_create(kinit, NULL, kernel, 279 THREAD_FLAG_UNCOUNTED, "kinit"); 280 280 if (!kinit_thread) 281 281 panic("Cannot create kinit thread."); -
kernel/generic/src/proc/program.c
rabfc9f3 r6eef3c4 102 102 */ 103 103 prg->main_thread = thread_create(uinit, kernel_uarg, prg->task, 104 THREAD_FLAG_USPACE, "uinit" , false);104 THREAD_FLAG_USPACE, "uinit"); 105 105 if (!prg->main_thread) { 106 106 free(kernel_uarg); -
kernel/generic/src/proc/scheduler.c
rabfc9f3 r6eef3c4 98 98 else { 99 99 fpu_init(); 100 THREAD->fpu_context_exists = 1;100 THREAD->fpu_context_exists = true; 101 101 } 102 102 #endif … … 142 142 143 143 /* Don't prevent migration */ 144 CPU->fpu_owner->fpu_context_engaged = 0;144 CPU->fpu_owner->fpu_context_engaged = false; 145 145 irq_spinlock_unlock(&CPU->fpu_owner->lock, false); 146 146 CPU->fpu_owner = NULL; … … 163 163 } 164 164 fpu_init(); 165 THREAD->fpu_context_exists = 1;165 THREAD->fpu_context_exists = true; 166 166 } 167 167 168 168 CPU->fpu_owner = THREAD; 169 THREAD->fpu_context_engaged = 1;169 THREAD->fpu_context_engaged = true; 170 170 irq_spinlock_unlock(&THREAD->lock, false); 171 171 … … 248 248 249 249 /* 250 * Clear the THREAD_FLAG_STOLEN flag so thatt can be migrated250 * Clear the stolen flag so that it can be migrated 251 251 * when load balancing needs emerge. 252 252 */ 253 thread-> flags &= ~THREAD_FLAG_STOLEN;253 thread->stolen = false; 254 254 irq_spinlock_unlock(&thread->lock, false); 255 255 … … 630 630 irq_spinlock_lock(&thread->lock, false); 631 631 632 if (!(thread->flags & THREAD_FLAG_WIRED) && 633 !(thread->flags & THREAD_FLAG_STOLEN) && 634 !thread->nomigrate && 635 !thread->fpu_context_engaged) { 632 if ((!thread->wired) && (!thread->stolen) && 633 (!thread->nomigrate) && 634 (!thread->fpu_context_engaged)) { 636 635 /* 637 636 * Remove thread from ready queue. … … 670 669 #endif 671 670 672 thread-> flags |= THREAD_FLAG_STOLEN;671 thread->stolen = true; 673 672 thread->state = Entering; 674 673 -
kernel/generic/src/proc/thread.c
rabfc9f3 r6eef3c4 191 191 kmflags |= FRAME_LOWMEM; 192 192 kmflags &= ~FRAME_HIGHMEM; 193 193 194 194 thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); 195 195 if (!thread->kstack) { … … 247 247 } 248 248 249 /** Wire thread to the given CPU 250 * 251 * @param cpu CPU to wire the thread to. 252 * 253 */ 254 void thread_wire(thread_t *thread, cpu_t *cpu) 255 { 256 irq_spinlock_lock(&thread->lock, true); 257 thread->cpu = cpu; 258 thread->wired = true; 259 irq_spinlock_unlock(&thread->lock, true); 260 } 261 249 262 /** Make thread ready 250 263 * … … 260 273 ASSERT(thread->state != Ready); 261 274 262 int i = (thread->priority < RQ_COUNT - 1) 263 ?++thread->priority : thread->priority;264 265 cpu_t *cpu = CPU;266 if (thread-> flags & THREAD_FLAG_WIRED) {275 int i = (thread->priority < RQ_COUNT - 1) ? 276 ++thread->priority : thread->priority; 277 278 cpu_t *cpu; 279 if (thread->wired) { 267 280 ASSERT(thread->cpu != NULL); 268 281 cpu = thread->cpu; 269 } 282 } else 283 cpu = CPU; 284 270 285 thread->state = Ready; 271 286 … … 298 313 * @param flags Thread flags. 299 314 * @param name Symbolic name (a copy is made). 300 * @param uncounted Thread's accounting doesn't affect accumulated task301 * accounting.302 315 * 303 316 * @return New thread's structure on success, NULL on failure. … … 305 318 */ 306 319 thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, 307 unsigned int flags, const char *name, bool uncounted)320 thread_flags_t flags, const char *name) 308 321 { 309 322 thread_t *thread = (thread_t *) slab_alloc(thread_slab, 0); … … 335 348 thread->ucycles = 0; 336 349 thread->kcycles = 0; 337 thread->uncounted = uncounted; 350 thread->uncounted = 351 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED); 338 352 thread->priority = -1; /* Start in rq[0] */ 339 353 thread->cpu = NULL; 340 thread->flags = flags; 354 thread->wired = false; 355 thread->stolen = false; 356 thread->uspace = 357 ((flags & THREAD_FLAG_USPACE) == THREAD_FLAG_USPACE); 358 341 359 thread->nomigrate = 0; 342 360 thread->state = Entering; … … 356 374 thread->task = task; 357 375 358 thread->fpu_context_exists = 0;359 thread->fpu_context_engaged = 0;376 thread->fpu_context_exists = false; 377 thread->fpu_context_engaged = false; 360 378 361 379 avltree_node_initialize(&thread->threads_tree_node); … … 371 389 thread_create_arch(thread); 372 390 373 if ( !(flags & THREAD_FLAG_NOATTACH))391 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH) 374 392 thread_attach(thread, task); 375 393 … … 437 455 438 456 /* Must not count kbox thread into lifecount */ 439 if (thread-> flags & THREAD_FLAG_USPACE)457 if (thread->uspace) 440 458 atomic_inc(&task->lifecount); 441 459 … … 459 477 void thread_exit(void) 460 478 { 461 if (THREAD-> flags & THREAD_FLAG_USPACE) {479 if (THREAD->uspace) { 462 480 #ifdef CONFIG_UDEBUG 463 481 /* Generate udebug THREAD_E event */ 464 482 udebug_thread_e_event(); 465 483 466 484 /* 467 485 * This thread will not execute any code or system calls from … … 506 524 { 507 525 ASSERT(THREAD); 508 526 509 527 THREAD->nomigrate++; 510 528 } … … 515 533 ASSERT(THREAD); 516 534 ASSERT(THREAD->nomigrate > 0); 517 518 THREAD->nomigrate--; 535 536 if (THREAD->nomigrate > 0) 537 THREAD->nomigrate--; 519 538 } 520 539 … … 865 884 866 885 thread_t *thread = thread_create(uinit, kernel_uarg, TASK, 867 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf , false);886 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf); 868 887 if (thread) { 869 888 if (uspace_thread_id != NULL) { -
kernel/generic/src/udebug/udebug.c
rabfc9f3 r6eef3c4 410 410 411 411 mutex_lock(&thread->udebug.lock); 412 unsigned int flags = thread->flags;413 412 414 413 /* Only process userspace threads. */ 415 if ( (flags & THREAD_FLAG_USPACE) != 0) {414 if (thread->uspace) { 416 415 /* Prevent any further debug activity in thread. */ 417 416 thread->udebug.active = false; -
kernel/generic/src/udebug/udebug_ops.c
rabfc9f3 r6eef3c4 95 95 96 96 /* Verify that 'thread' is a userspace thread. */ 97 if ( (thread->flags & THREAD_FLAG_USPACE) == 0) {97 if (!thread->uspace) { 98 98 /* It's not, deny its existence */ 99 99 irq_spinlock_unlock(&thread->lock, true); … … 200 200 201 201 mutex_lock(&thread->udebug.lock); 202 if ( (thread->flags & THREAD_FLAG_USPACE) != 0) {202 if (thread->uspace) { 203 203 thread->udebug.active = true; 204 204 mutex_unlock(&thread->udebug.lock); … … 393 393 394 394 irq_spinlock_lock(&thread->lock, false); 395 int flags = thread->flags;395 bool uspace = thread->uspace; 396 396 irq_spinlock_unlock(&thread->lock, false); 397 397 398 398 /* Not interested in kernel threads. */ 399 if ( (flags & THREAD_FLAG_USPACE) == 0)399 if (!uspace) 400 400 continue; 401 401
Note:
See TracChangeset
for help on using the changeset viewer.