Changeset e6a78b9 in mainline for kernel/generic/src/proc
- Timestamp:
- 2012-06-29T15:31:44Z (13 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 9432f08
- Parents:
- 34ab31c0 (diff), 0bbd13e (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel/generic/src/proc
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/program.c
r34ab31c0 re6a78b9 71 71 int program_create(as_t *as, uintptr_t entry_addr, char *name, program_t *prg) 72 72 { 73 uspace_arg_t *kernel_uarg; 74 75 kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); 76 kernel_uarg->uspace_entry = (void *) entry_addr; 77 kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS; 78 kernel_uarg->uspace_thread_function = NULL; 79 kernel_uarg->uspace_thread_arg = NULL; 80 kernel_uarg->uspace_uarg = NULL; 81 73 prg->loader_status = EE_OK; 82 74 prg->task = task_create(as, name); 83 75 if (!prg->task) … … 91 83 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, 92 84 STACK_SIZE, AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, 0); 93 if (!area) 85 if (!area) { 86 task_destroy(prg->task); 94 87 return ENOMEM; 88 } 89 90 uspace_arg_t *kernel_uarg = (uspace_arg_t *) 91 malloc(sizeof(uspace_arg_t), 0); 92 93 kernel_uarg->uspace_entry = (void *) entry_addr; 94 kernel_uarg->uspace_stack = (void *) virt; 95 kernel_uarg->uspace_stack_size = STACK_SIZE; 96 kernel_uarg->uspace_thread_function = NULL; 97 kernel_uarg->uspace_thread_arg = NULL; 98 kernel_uarg->uspace_uarg = NULL; 95 99 96 100 /* … … 98 102 */ 99 103 prg->main_thread = thread_create(uinit, kernel_uarg, prg->task, 100 THREAD_FLAG_USPACE, "uinit", false); 101 if (!prg->main_thread) 104 THREAD_FLAG_USPACE, "uinit"); 105 if (!prg->main_thread) { 106 free(kernel_uarg); 107 as_area_destroy(as, virt); 108 task_destroy(prg->task); 102 109 return ELIMIT; 110 } 103 111 104 112 return EOK; … … 111 119 * executable image. The task is returned in *task. 112 120 * 113 * @param image_addr Address of an executable program image. 114 * @param name Name to set for the program's task. 115 * @param prg Buffer for storing program info. If image_addr 116 * points to a loader image, p->task will be set to 117 * NULL and EOK will be returned. 121 * @param[in] image_addr Address of an executable program image. 122 * @param[in] name Name to set for the program's task. 123 * @param[out] prg Buffer for storing program info. 124 * If image_addr points to a loader image, 125 * prg->task will be set to NULL and EOK 126 * will be returned. 118 127 * 119 128 * @return EOK on success or negative error code. … … 126 135 return ENOMEM; 127 136 128 unsigned int rc= elf_load((elf_header_t *) image_addr, as, 0);129 if ( rc!= EE_OK) {137 prg->loader_status = elf_load((elf_header_t *) image_addr, as, 0); 138 if (prg->loader_status != EE_OK) { 130 139 as_destroy(as); 131 140 prg->task = NULL; 132 141 prg->main_thread = NULL; 133 142 134 if ( rc!= EE_LOADER)143 if (prg->loader_status != EE_LOADER) 135 144 return ENOTSUP; 136 145 … … 140 149 141 150 program_loader = image_addr; 142 LOG("Registered program loader at %p", 143 (void *) image_addr); 151 printf("Program loader at %p\n", (void *) image_addr); 144 152 145 153 return EOK; … … 171 179 } 172 180 173 unsigned int rc= elf_load((elf_header_t *) program_loader, as,181 prg->loader_status = elf_load((elf_header_t *) program_loader, as, 174 182 ELD_F_LOADER); 175 if ( rc!= EE_OK) {183 if (prg->loader_status != EE_OK) { 176 184 as_destroy(as); 177 printf("Cannot spawn loader (%s)\n", elf_error(rc)); 185 printf("Cannot spawn loader (%s)\n", 186 elf_error(prg->loader_status)); 178 187 return ENOENT; 179 188 } -
kernel/generic/src/proc/scheduler.c
r34ab31c0 re6a78b9 98 98 else { 99 99 fpu_init(); 100 THREAD->fpu_context_exists = 1;100 THREAD->fpu_context_exists = true; 101 101 } 102 102 #endif … … 142 142 143 143 /* Don't prevent migration */ 144 CPU->fpu_owner->fpu_context_engaged = 0;144 CPU->fpu_owner->fpu_context_engaged = false; 145 145 irq_spinlock_unlock(&CPU->fpu_owner->lock, false); 146 146 CPU->fpu_owner = NULL; … … 163 163 } 164 164 fpu_init(); 165 THREAD->fpu_context_exists = 1;165 THREAD->fpu_context_exists = true; 166 166 } 167 167 168 168 CPU->fpu_owner = THREAD; 169 THREAD->fpu_context_engaged = 1;169 THREAD->fpu_context_engaged = true; 170 170 irq_spinlock_unlock(&THREAD->lock, false); 171 171 … … 248 248 249 249 /* 250 * Clear the THREAD_FLAG_STOLEN flag so thatt can be migrated250 * Clear the stolen flag so that it can be migrated 251 251 * when load balancing needs emerge. 252 252 */ 253 thread-> flags &= ~THREAD_FLAG_STOLEN;253 thread->stolen = false; 254 254 irq_spinlock_unlock(&thread->lock, false); 255 255 … … 630 630 irq_spinlock_lock(&thread->lock, false); 631 631 632 if (!(thread->flags & THREAD_FLAG_WIRED) && 633 !(thread->flags & THREAD_FLAG_STOLEN) && 634 !thread->nomigrate && 635 !thread->fpu_context_engaged) { 632 if ((!thread->wired) && (!thread->stolen) && 633 (!thread->nomigrate) && 634 (!thread->fpu_context_engaged)) { 636 635 /* 637 636 * Remove thread from ready queue. … … 670 669 #endif 671 670 672 thread-> flags |= THREAD_FLAG_STOLEN;671 thread->stolen = true; 673 672 thread->state = Entering; 674 673 -
kernel/generic/src/proc/task.c
r34ab31c0 re6a78b9 90 90 TASK = NULL; 91 91 avltree_create(&tasks_tree); 92 task_slab = slab_cache_create("task_ slab", sizeof(task_t), 0,92 task_slab = slab_cache_create("task_t", sizeof(task_t), 0, 93 93 tsk_constructor, NULL, 0); 94 94 } -
kernel/generic/src/proc/thread.c
r34ab31c0 re6a78b9 191 191 kmflags |= FRAME_LOWMEM; 192 192 kmflags &= ~FRAME_HIGHMEM; 193 193 194 194 thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); 195 195 if (!thread->kstack) { … … 236 236 237 237 atomic_set(&nrdy, 0); 238 thread_slab = slab_cache_create("thread_ slab", sizeof(thread_t), 0,238 thread_slab = slab_cache_create("thread_t", sizeof(thread_t), 0, 239 239 thr_constructor, thr_destructor, 0); 240 240 241 241 #ifdef CONFIG_FPU 242 fpu_context_slab = slab_cache_create("fpu_ slab", sizeof(fpu_context_t),243 FPU_CONTEXT_ALIGN, NULL, NULL, 0);242 fpu_context_slab = slab_cache_create("fpu_context_t", 243 sizeof(fpu_context_t), FPU_CONTEXT_ALIGN, NULL, NULL, 0); 244 244 #endif 245 245 … … 247 247 } 248 248 249 /** Wire thread to the given CPU 250 * 251 * @param cpu CPU to wire the thread to. 252 * 253 */ 254 void thread_wire(thread_t *thread, cpu_t *cpu) 255 { 256 irq_spinlock_lock(&thread->lock, true); 257 thread->cpu = cpu; 258 thread->wired = true; 259 irq_spinlock_unlock(&thread->lock, true); 260 } 261 249 262 /** Make thread ready 250 263 * … … 260 273 ASSERT(thread->state != Ready); 261 274 262 int i = (thread->priority < RQ_COUNT - 1) 263 ?++thread->priority : thread->priority;264 265 cpu_t *cpu = CPU;266 if (thread-> flags & THREAD_FLAG_WIRED) {275 int i = (thread->priority < RQ_COUNT - 1) ? 276 ++thread->priority : thread->priority; 277 278 cpu_t *cpu; 279 if (thread->wired || thread->nomigrate || thread->fpu_context_engaged) { 267 280 ASSERT(thread->cpu != NULL); 268 281 cpu = thread->cpu; 269 } 282 } else 283 cpu = CPU; 284 270 285 thread->state = Ready; 271 286 … … 298 313 * @param flags Thread flags. 299 314 * @param name Symbolic name (a copy is made). 300 * @param uncounted Thread's accounting doesn't affect accumulated task301 * accounting.302 315 * 303 316 * @return New thread's structure on success, NULL on failure. … … 305 318 */ 306 319 thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, 307 unsigned int flags, const char *name, bool uncounted)320 thread_flags_t flags, const char *name) 308 321 { 309 322 thread_t *thread = (thread_t *) slab_alloc(thread_slab, 0); … … 335 348 thread->ucycles = 0; 336 349 thread->kcycles = 0; 337 thread->uncounted = uncounted; 350 thread->uncounted = 351 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED); 338 352 thread->priority = -1; /* Start in rq[0] */ 339 353 thread->cpu = NULL; 340 thread->flags = flags; 354 thread->wired = false; 355 thread->stolen = false; 356 thread->uspace = 357 ((flags & THREAD_FLAG_USPACE) == THREAD_FLAG_USPACE); 358 341 359 thread->nomigrate = 0; 342 360 thread->state = Entering; … … 356 374 thread->task = task; 357 375 358 thread->fpu_context_exists = 0;359 thread->fpu_context_engaged = 0;376 thread->fpu_context_exists = false; 377 thread->fpu_context_engaged = false; 360 378 361 379 avltree_node_initialize(&thread->threads_tree_node); … … 371 389 thread_create_arch(thread); 372 390 373 if ( !(flags & THREAD_FLAG_NOATTACH))391 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH) 374 392 thread_attach(thread, task); 375 393 … … 437 455 438 456 /* Must not count kbox thread into lifecount */ 439 if (thread-> flags & THREAD_FLAG_USPACE)457 if (thread->uspace) 440 458 atomic_inc(&task->lifecount); 441 459 … … 459 477 void thread_exit(void) 460 478 { 461 if (THREAD-> flags & THREAD_FLAG_USPACE) {479 if (THREAD->uspace) { 462 480 #ifdef CONFIG_UDEBUG 463 481 /* Generate udebug THREAD_E event */ 464 482 udebug_thread_e_event(); 465 483 466 484 /* 467 485 * This thread will not execute any code or system calls from … … 506 524 { 507 525 ASSERT(THREAD); 508 526 509 527 THREAD->nomigrate++; 510 528 } … … 515 533 ASSERT(THREAD); 516 534 ASSERT(THREAD->nomigrate > 0); 517 518 THREAD->nomigrate--; 535 536 if (THREAD->nomigrate > 0) 537 THREAD->nomigrate--; 519 538 } 520 539 … … 854 873 * In case of failure, kernel_uarg will be deallocated in this function. 855 874 * In case of success, kernel_uarg will be freed in uinit(). 856 *857 875 */ 858 876 uspace_arg_t *kernel_uarg = … … 866 884 867 885 thread_t *thread = thread_create(uinit, kernel_uarg, TASK, 868 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf , false);886 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf); 869 887 if (thread) { 870 888 if (uspace_thread_id != NULL) {
Note:
See TracChangeset
for help on using the changeset viewer.