Changeset 8565a42 in mainline for kernel/generic/src/proc
- Timestamp:
- 2018-03-02T20:34:50Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- a1a81f69, d5e5fd1
- Parents:
- 3061bc1 (diff), 34e1206 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-03-02 20:34:50)
- git-committer:
- GitHub <noreply@…> (2018-03-02 20:34:50)
- Location:
- kernel/generic/src/proc
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/program.c
r3061bc1 r8565a42 75 75 if (!prg->task) 76 76 return ELIMIT; 77 77 78 78 /* 79 79 * Create the stack address space area. … … 94 94 return ENOMEM; 95 95 } 96 96 97 97 uspace_arg_t *kernel_uarg = (uspace_arg_t *) 98 98 malloc(sizeof(uspace_arg_t), 0); 99 99 100 100 kernel_uarg->uspace_entry = (void *) entry_addr; 101 101 kernel_uarg->uspace_stack = (void *) virt; … … 104 104 kernel_uarg->uspace_thread_arg = NULL; 105 105 kernel_uarg->uspace_uarg = NULL; 106 106 107 107 /* 108 108 * Create the main thread. … … 117 117 return ELIMIT; 118 118 } 119 119 120 120 return EOK; 121 121 } … … 142 142 if (!as) 143 143 return ENOMEM; 144 144 145 145 prg->loader_status = elf_load((elf_header_t *) image_addr, as, 0); 146 146 if (prg->loader_status != EE_OK) { … … 148 148 prg->task = NULL; 149 149 prg->main_thread = NULL; 150 150 151 151 if (prg->loader_status != EE_LOADER) 152 152 return ENOTSUP; 153 153 154 154 /* Register image as the program loader */ 155 155 if (program_loader != NULL) 156 156 return ELIMIT; 157 157 158 158 program_loader = image_addr; 159 159 log(LF_OTHER, LVL_NOTE, "Program loader at %p", (void *) image_addr); 160 160 161 161 return EOK; 162 162 } 163 163 164 164 return program_create(as, ((elf_header_t *) image_addr)->e_entry, 165 165 name, prg); … … 179 179 if (!as) 180 180 return ENOMEM; 181 181 182 182 void *loader = program_loader; 183 183 if (!loader) { … … 187 187 return ENOENT; 188 188 } 189 189 190 190 prg->loader_status = elf_load((elf_header_t *) program_loader, as, 191 191 ELD_F_LOADER); … … 196 196 return ENOENT; 197 197 } 198 198 199 199 return program_create(as, ((elf_header_t *) program_loader)->e_entry, 200 200 name, prg); … … 230 230 if (name_len > TASK_NAME_BUFLEN - 1) 231 231 name_len = TASK_NAME_BUFLEN - 1; 232 232 233 233 char namebuf[TASK_NAME_BUFLEN]; 234 234 errno_t rc = copy_from_uspace(namebuf, uspace_name, name_len); 235 235 if (rc != EOK) 236 236 return (sys_errno_t) rc; 237 237 238 238 namebuf[name_len] = 0; 239 239 240 240 /* Spawn the new task. */ 241 241 program_t prg; … … 243 243 if (rc != EOK) 244 244 return rc; 245 245 246 246 // FIXME: control the permissions 247 247 perm_set(prg.task, perm_get(TASK)); 248 248 program_ready(&prg); 249 249 250 250 return EOK; 251 251 } -
kernel/generic/src/proc/scheduler.c
r3061bc1 r8565a42 90 90 before_thread_runs_arch(); 91 91 rcu_before_thread_runs(); 92 92 93 93 #ifdef CONFIG_FPU_LAZY 94 94 if (THREAD == CPU->fpu_owner) … … 105 105 } 106 106 #endif 107 107 108 108 #ifdef CONFIG_UDEBUG 109 109 if (THREAD->btrace) { … … 113 113 stack_trace_istate(istate); 114 114 } 115 115 116 116 THREAD->btrace = false; 117 117 } … … 141 141 fpu_enable(); 142 142 irq_spinlock_lock(&CPU->lock, false); 143 143 144 144 /* Save old context */ 145 145 if (CPU->fpu_owner != NULL) { 146 146 irq_spinlock_lock(&CPU->fpu_owner->lock, false); 147 147 fpu_context_save(CPU->fpu_owner->saved_fpu_context); 148 148 149 149 /* Don't prevent migration */ 150 150 CPU->fpu_owner->fpu_context_engaged = false; … … 152 152 CPU->fpu_owner = NULL; 153 153 } 154 154 155 155 irq_spinlock_lock(&THREAD->lock, false); 156 156 if (THREAD->fpu_context_exists) { … … 164 164 THREAD->saved_fpu_context = 165 165 (fpu_context_t *) slab_alloc(fpu_context_cache, 0); 166 166 167 167 /* We may have switched CPUs during slab_alloc */ 168 168 goto restart; … … 171 171 THREAD->fpu_context_exists = true; 172 172 } 173 173 174 174 CPU->fpu_owner = THREAD; 175 175 THREAD->fpu_context_engaged = true; 176 176 irq_spinlock_unlock(&THREAD->lock, false); 177 177 178 178 irq_spinlock_unlock(&CPU->lock, false); 179 179 } … … 201 201 { 202 202 assert(CPU != NULL); 203 203 204 204 loop: 205 205 206 206 if (atomic_get(&CPU->nrdy) == 0) { 207 207 /* … … 214 214 irq_spinlock_unlock(&CPU->lock, false); 215 215 interrupts_enable(); 216 216 217 217 /* 218 218 * An interrupt might occur right now and wake up a thread. … … 226 226 227 227 assert(!CPU->idle); 228 228 229 229 unsigned int i; 230 230 for (i = 0; i < RQ_COUNT; i++) { … … 237 237 continue; 238 238 } 239 239 240 240 atomic_dec(&CPU->nrdy); 241 241 atomic_dec(&nrdy); 242 242 CPU->rq[i].n--; 243 243 244 244 /* 245 245 * Take the first thread from the queue. … … 248 248 list_first(&CPU->rq[i].rq), thread_t, rq_link); 249 249 list_remove(&thread->rq_link); 250 250 251 251 irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock); 252 252 253 253 thread->cpu = CPU; 254 254 thread->ticks = us2ticks((i + 1) * 10000); 255 255 thread->priority = i; /* Correct rq index */ 256 256 257 257 /* 258 258 * Clear the stolen flag so that it can be migrated … … 261 261 thread->stolen = false; 262 262 irq_spinlock_unlock(&thread->lock, false); 263 263 264 264 return thread; 265 265 } 266 266 267 267 goto loop; 268 268 } … … 282 282 { 283 283 list_t list; 284 284 285 285 list_initialize(&list); 286 286 irq_spinlock_lock(&CPU->lock, false); 287 287 288 288 if (CPU->needs_relink > NEEDS_RELINK_MAX) { 289 289 int i; 290 290 for (i = start; i < RQ_COUNT - 1; i++) { 291 291 /* Remember and empty rq[i + 1] */ 292 292 293 293 irq_spinlock_lock(&CPU->rq[i + 1].lock, false); 294 294 list_concat(&list, &CPU->rq[i + 1].rq); … … 296 296 CPU->rq[i + 1].n = 0; 297 297 irq_spinlock_unlock(&CPU->rq[i + 1].lock, false); 298 298 299 299 /* Append rq[i + 1] to rq[i] */ 300 300 301 301 irq_spinlock_lock(&CPU->rq[i].lock, false); 302 302 list_concat(&CPU->rq[i].rq, &list); … … 304 304 irq_spinlock_unlock(&CPU->rq[i].lock, false); 305 305 } 306 306 307 307 CPU->needs_relink = 0; 308 308 } 309 309 310 310 irq_spinlock_unlock(&CPU->lock, false); 311 311 } … … 321 321 { 322 322 volatile ipl_t ipl; 323 323 324 324 assert(CPU != NULL); 325 325 326 326 ipl = interrupts_disable(); 327 327 328 328 if (atomic_get(&haltstate)) 329 329 halt(); 330 330 331 331 if (THREAD) { 332 332 irq_spinlock_lock(&THREAD->lock, false); 333 333 334 334 /* Update thread kernel accounting */ 335 335 THREAD->kcycles += get_cycle() - THREAD->last_cycle; 336 336 337 337 #if (defined CONFIG_FPU) && (!defined CONFIG_FPU_LAZY) 338 338 fpu_context_save(THREAD->saved_fpu_context); … … 342 342 * This is the place where threads leave scheduler(); 343 343 */ 344 344 345 345 /* Save current CPU cycle */ 346 346 THREAD->last_cycle = get_cycle(); 347 347 348 348 irq_spinlock_unlock(&THREAD->lock, false); 349 349 interrupts_restore(THREAD->saved_context.ipl); 350 350 351 351 return; 352 352 } 353 353 354 354 /* 355 355 * Interrupt priority level of preempted thread is recorded … … 360 360 THREAD->saved_context.ipl = ipl; 361 361 } 362 362 363 363 /* 364 364 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, AS … … 368 368 */ 369 369 the_copy(THE, (the_t *) CPU->stack); 370 370 371 371 /* 372 372 * We may not keep the old stack. … … 386 386 (uintptr_t) CPU->stack, STACK_SIZE); 387 387 context_restore(&CPU->saved_context); 388 388 389 389 /* Not reached */ 390 390 } … … 402 402 task_t *old_task = TASK; 403 403 as_t *old_as = AS; 404 404 405 405 assert((!THREAD) || (irq_spinlock_locked(&THREAD->lock))); 406 406 assert(CPU != NULL); 407 407 assert(interrupts_disabled()); 408 408 409 409 /* 410 410 * Hold the current task and the address space to prevent their … … 414 414 if (old_task) 415 415 task_hold(old_task); 416 416 417 417 if (old_as) 418 418 as_hold(old_as); 419 419 420 420 if (THREAD) { 421 421 /* Must be run after the switch to scheduler stack */ 422 422 after_thread_ran(); 423 423 424 424 switch (THREAD->state) { 425 425 case Running: … … 427 427 thread_ready(THREAD); 428 428 break; 429 429 430 430 case Exiting: 431 431 rcu_thread_exiting(); … … 452 452 WAKEUP_FIRST); 453 453 irq_spinlock_unlock(&THREAD->join_wq.lock, false); 454 454 455 455 THREAD->state = Lingering; 456 456 irq_spinlock_unlock(&THREAD->lock, false); 457 457 } 458 458 break; 459 459 460 460 case Sleeping: 461 461 /* … … 463 463 */ 464 464 THREAD->priority = -1; 465 465 466 466 /* 467 467 * We need to release wq->lock which we locked in … … 470 470 */ 471 471 irq_spinlock_unlock(&THREAD->sleep_queue->lock, false); 472 472 473 473 irq_spinlock_unlock(&THREAD->lock, false); 474 474 break; 475 475 476 476 default: 477 477 /* … … 482 482 break; 483 483 } 484 484 485 485 THREAD = NULL; 486 486 } 487 487 488 488 THREAD = find_best_thread(); 489 489 490 490 irq_spinlock_lock(&THREAD->lock, false); 491 491 int priority = THREAD->priority; 492 492 irq_spinlock_unlock(&THREAD->lock, false); 493 493 494 494 relink_rq(priority); 495 495 496 496 /* 497 497 * If both the old and the new task are the same, … … 500 500 if (TASK != THREAD->task) { 501 501 as_t *new_as = THREAD->task->as; 502 502 503 503 /* 504 504 * Note that it is possible for two tasks … … 512 512 as_switch(old_as, new_as); 513 513 } 514 514 515 515 TASK = THREAD->task; 516 516 before_task_runs(); 517 517 } 518 518 519 519 if (old_task) 520 520 task_release(old_task); 521 521 522 522 if (old_as) 523 523 as_release(old_as); 524 524 525 525 irq_spinlock_lock(&THREAD->lock, false); 526 526 THREAD->state = Running; 527 527 528 528 #ifdef SCHEDULER_VERBOSE 529 529 log(LF_OTHER, LVL_DEBUG, … … 532 532 THREAD->ticks, atomic_get(&CPU->nrdy)); 533 533 #endif 534 534 535 535 /* 536 536 * Some architectures provide late kernel PA2KA(identity) … … 542 542 */ 543 543 before_thread_runs(); 544 544 545 545 /* 546 546 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to … … 548 548 */ 549 549 the_copy(THE, (the_t *) THREAD->kstack); 550 550 551 551 context_restore(&THREAD->saved_context); 552 552 553 553 /* Not reached */ 554 554 } … … 567 567 atomic_count_t average; 568 568 atomic_count_t rdy; 569 569 570 570 /* 571 571 * Detach kcpulb as nobody will call thread_join_timeout() on it. 572 572 */ 573 573 thread_detach(THREAD); 574 574 575 575 loop: 576 576 /* … … 578 578 */ 579 579 thread_sleep(1); 580 580 581 581 not_satisfied: 582 582 /* … … 588 588 average = atomic_get(&nrdy) / config.cpu_active + 1; 589 589 rdy = atomic_get(&CPU->nrdy); 590 590 591 591 if (average <= rdy) 592 592 goto satisfied; 593 593 594 594 atomic_count_t count = average - rdy; 595 595 596 596 /* 597 597 * Searching least priority queues on all CPU's first and most priority … … 601 601 size_t acpu_bias = 0; 602 602 int rq; 603 603 604 604 for (rq = RQ_COUNT - 1; rq >= 0; rq--) { 605 605 for (acpu = 0; acpu < config.cpu_active; acpu++) { 606 606 cpu_t *cpu = &cpus[(acpu + acpu_bias) % config.cpu_active]; 607 607 608 608 /* 609 609 * Not interested in ourselves. … … 614 614 if (CPU == cpu) 615 615 continue; 616 616 617 617 if (atomic_get(&cpu->nrdy) <= average) 618 618 continue; 619 619 620 620 irq_spinlock_lock(&(cpu->rq[rq].lock), true); 621 621 if (cpu->rq[rq].n == 0) { … … 623 623 continue; 624 624 } 625 625 626 626 thread_t *thread = NULL; 627 627 628 628 /* Search rq from the back */ 629 629 link_t *link = cpu->rq[rq].rq.head.prev; 630 630 631 631 while (link != &(cpu->rq[rq].rq.head)) { 632 632 thread = (thread_t *) list_get_instance(link, 633 633 thread_t, rq_link); 634 634 635 635 /* 636 636 * Do not steal CPU-wired threads, threads … … 640 640 */ 641 641 irq_spinlock_lock(&thread->lock, false); 642 642 643 643 if ((!thread->wired) && (!thread->stolen) && 644 644 (!thread->nomigrate) && … … 649 649 irq_spinlock_unlock(&thread->lock, 650 650 false); 651 651 652 652 atomic_dec(&cpu->nrdy); 653 653 atomic_dec(&nrdy); 654 654 655 655 cpu->rq[rq].n--; 656 656 list_remove(&thread->rq_link); 657 657 658 658 break; 659 659 } 660 660 661 661 irq_spinlock_unlock(&thread->lock, false); 662 662 663 663 link = link->prev; 664 664 thread = NULL; 665 665 } 666 666 667 667 if (thread) { 668 668 /* 669 669 * Ready thread on local CPU 670 670 */ 671 671 672 672 irq_spinlock_pass(&(cpu->rq[rq].lock), 673 673 &thread->lock); 674 674 675 675 #ifdef KCPULB_VERBOSE 676 676 log(LF_OTHER, LVL_DEBUG, … … 680 680 atomic_get(&nrdy) / config.cpu_active); 681 681 #endif 682 682 683 683 thread->stolen = true; 684 684 thread->state = Entering; 685 685 686 686 irq_spinlock_unlock(&thread->lock, true); 687 687 thread_ready(thread); 688 688 689 689 if (--count == 0) 690 690 goto satisfied; 691 691 692 692 /* 693 693 * We are not satisfied yet, focus on another … … 696 696 */ 697 697 acpu_bias++; 698 698 699 699 continue; 700 700 } else 701 701 irq_spinlock_unlock(&(cpu->rq[rq].lock), true); 702 703 } 704 } 705 702 703 } 704 } 705 706 706 if (atomic_get(&CPU->nrdy)) { 707 707 /* … … 718 718 goto loop; 719 719 } 720 720 721 721 goto not_satisfied; 722 722 723 723 satisfied: 724 724 goto loop; … … 735 735 if (!cpus[cpu].active) 736 736 continue; 737 737 738 738 irq_spinlock_lock(&cpus[cpu].lock, true); 739 739 740 740 printf("cpu%u: address=%p, nrdy=%" PRIua ", needs_relink=%zu\n", 741 741 cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), 742 742 cpus[cpu].needs_relink); 743 743 744 744 unsigned int i; 745 745 for (i = 0; i < RQ_COUNT; i++) { … … 749 749 continue; 750 750 } 751 751 752 752 printf("\trq[%u]: ", i); 753 753 list_foreach(cpus[cpu].rq[i].rq, rq_link, thread_t, … … 757 757 } 758 758 printf("\n"); 759 759 760 760 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false); 761 761 } 762 762 763 763 irq_spinlock_unlock(&cpus[cpu].lock, true); 764 764 } -
kernel/generic/src/proc/task.c
r3061bc1 r8565a42 107 107 task_t *task = avltree_get_instance(node, task_t, tasks_tree_node); 108 108 size_t *cnt = (size_t *) arg; 109 109 110 110 if (task != TASK) { 111 111 (*cnt)++; 112 112 113 113 #ifdef CONFIG_DEBUG 114 114 printf("[%"PRIu64"] ", task->taskid); 115 115 #endif 116 116 117 117 task_kill_internal(task); 118 118 } 119 119 120 120 /* Continue the walk */ 121 121 return true; … … 138 138 task_release(task_0); 139 139 } 140 140 141 141 /* Repeat until there are any tasks except TASK */ 142 142 do { … … 144 144 printf("Killing tasks... "); 145 145 #endif 146 146 147 147 irq_spinlock_lock(&tasks_lock, true); 148 148 tasks_left = 0; 149 149 avltree_walk(&tasks_tree, task_done_walker, &tasks_left); 150 150 irq_spinlock_unlock(&tasks_lock, true); 151 151 152 152 thread_sleep(1); 153 153 154 154 #ifdef CONFIG_DEBUG 155 155 printf("\n"); … … 165 165 if (rc != EOK) 166 166 return rc; 167 167 168 168 atomic_set(&task->refcount, 0); 169 169 atomic_set(&task->lifecount, 0); 170 170 171 171 irq_spinlock_initialize(&task->lock, "task_t_lock"); 172 172 173 173 list_initialize(&task->threads); 174 174 175 175 ipc_answerbox_init(&task->answerbox, task); 176 176 177 177 spinlock_initialize(&task->active_calls_lock, "active_calls_lock"); 178 178 list_initialize(&task->active_calls); 179 179 180 180 #ifdef CONFIG_UDEBUG 181 181 /* Init kbox stuff */ … … 184 184 mutex_initialize(&task->kb.cleanup_lock, MUTEX_PASSIVE); 185 185 #endif 186 186 187 187 return EOK; 188 188 } … … 191 191 { 192 192 task_t *task = (task_t *) obj; 193 193 194 194 caps_task_free(task); 195 195 return 0; … … 210 210 return NULL; 211 211 } 212 212 213 213 task_create_arch(task); 214 214 215 215 task->as = as; 216 216 str_cpy(task->name, TASK_NAME_BUFLEN, name); 217 217 218 218 task->container = CONTAINER; 219 219 task->perms = 0; … … 231 231 232 232 event_task_init(task); 233 233 234 234 task->answerbox.active = true; 235 235 … … 237 237 /* Init debugging stuff */ 238 238 udebug_task_init(&task->udebug); 239 239 240 240 /* Init kbox stuff */ 241 241 task->kb.box.active = true; 242 242 task->kb.finished = false; 243 243 #endif 244 244 245 245 if ((ipc_phone_0) && 246 246 (container_check(ipc_phone_0->task->container, task->container))) { … … 253 253 return NULL; 254 254 } 255 255 256 256 kobject_t *phone_obj = kobject_get(task, phone_handle, 257 257 KOBJECT_TYPE_PHONE); 258 258 (void) ipc_phone_connect(phone_obj->phone, ipc_phone_0); 259 259 } 260 260 261 261 futex_task_init(task); 262 262 263 263 /* 264 264 * Get a reference to the address space. 265 265 */ 266 266 as_hold(task->as); 267 267 268 268 irq_spinlock_lock(&tasks_lock, true); 269 269 270 270 task->taskid = ++task_counter; 271 271 avltree_node_initialize(&task->tasks_tree_node); 272 272 task->tasks_tree_node.key = task->taskid; 273 273 avltree_insert(&tasks_tree, &task->tasks_tree_node); 274 274 275 275 irq_spinlock_unlock(&tasks_lock, true); 276 276 277 277 return task; 278 278 } … … 291 291 avltree_delete(&tasks_tree, &task->tasks_tree_node); 292 292 irq_spinlock_unlock(&tasks_lock, true); 293 293 294 294 /* 295 295 * Perform architecture specific task destruction. 296 296 */ 297 297 task_destroy_arch(task); 298 298 299 299 /* 300 300 * Free up dynamically allocated state. 301 301 */ 302 302 futex_task_deinit(task); 303 303 304 304 /* 305 305 * Drop our reference to the address space. 306 306 */ 307 307 as_release(task->as); 308 308 309 309 slab_free(task_cache, task); 310 310 } … … 388 388 { 389 389 char namebuf[TASK_NAME_BUFLEN]; 390 390 391 391 /* Cap length of name and copy it from userspace. */ 392 392 if (name_len > TASK_NAME_BUFLEN - 1) 393 393 name_len = TASK_NAME_BUFLEN - 1; 394 394 395 395 errno_t rc = copy_from_uspace(namebuf, uspace_name, name_len); 396 396 if (rc != EOK) 397 397 return (sys_errno_t) rc; 398 398 399 399 namebuf[name_len] = '\0'; 400 400 401 401 /* 402 402 * As the task name is referenced also from the … … 404 404 * of the update. 405 405 */ 406 406 407 407 irq_spinlock_lock(&tasks_lock, true); 408 408 irq_spinlock_lock(&TASK->lock, false); 409 409 irq_spinlock_lock(&threads_lock, false); 410 410 411 411 /* Set task name */ 412 412 str_cpy(TASK->name, TASK_NAME_BUFLEN, namebuf); 413 413 414 414 irq_spinlock_unlock(&threads_lock, false); 415 415 irq_spinlock_unlock(&TASK->lock, false); 416 416 irq_spinlock_unlock(&tasks_lock, true); 417 417 418 418 return EOK; 419 419 } … … 432 432 if (rc != EOK) 433 433 return (sys_errno_t) rc; 434 434 435 435 return (sys_errno_t) task_kill(taskid); 436 436 } … … 453 453 avltree_node_t *node = 454 454 avltree_search(&tasks_tree, (avltree_key_t) id); 455 455 456 456 if (node) 457 457 return avltree_get_instance(node, task_t, tasks_tree_node); 458 458 459 459 return NULL; 460 460 } … … 478 478 uint64_t uret = task->ucycles; 479 479 uint64_t kret = task->kcycles; 480 480 481 481 /* Current values of threads */ 482 482 list_foreach(task->threads, th_link, thread_t, thread) { 483 483 irq_spinlock_lock(&thread->lock, false); 484 484 485 485 /* Process only counted threads */ 486 486 if (!thread->uncounted) { … … 489 489 thread_update_accounting(false); 490 490 } 491 491 492 492 uret += thread->ucycles; 493 493 kret += thread->kcycles; 494 494 } 495 495 496 496 irq_spinlock_unlock(&thread->lock, false); 497 497 } 498 498 499 499 *ucycles = uret; 500 500 *kcycles = kret; … … 505 505 irq_spinlock_lock(&task->lock, false); 506 506 irq_spinlock_lock(&threads_lock, false); 507 507 508 508 /* 509 509 * Interrupt all threads. 510 510 */ 511 511 512 512 list_foreach(task->threads, th_link, thread_t, thread) { 513 513 bool sleeping = false; 514 514 515 515 irq_spinlock_lock(&thread->lock, false); 516 516 517 517 thread->interrupted = true; 518 518 if (thread->state == Sleeping) 519 519 sleeping = true; 520 520 521 521 irq_spinlock_unlock(&thread->lock, false); 522 522 523 523 if (sleeping) 524 524 waitq_interrupt_sleep(thread); 525 525 } 526 526 527 527 irq_spinlock_unlock(&threads_lock, false); 528 528 irq_spinlock_unlock(&task->lock, false); … … 543 543 if (id == 1) 544 544 return EPERM; 545 545 546 546 irq_spinlock_lock(&tasks_lock, true); 547 547 548 548 task_t *task = task_find_by_id(id); 549 549 if (!task) { … … 551 551 return ENOENT; 552 552 } 553 553 554 554 task_kill_internal(task); 555 555 irq_spinlock_unlock(&tasks_lock, true); 556 556 557 557 return EOK; 558 558 } … … 583 583 } 584 584 } 585 585 586 586 irq_spinlock_lock(&tasks_lock, true); 587 587 task_kill_internal(TASK); 588 588 irq_spinlock_unlock(&tasks_lock, true); 589 589 590 590 thread_exit(); 591 591 } … … 599 599 { 600 600 task_kill_self(notify); 601 601 602 602 /* Unreachable */ 603 603 return EOK; … … 609 609 task_t *task = avltree_get_instance(node, task_t, tasks_tree_node); 610 610 irq_spinlock_lock(&task->lock, false); 611 611 612 612 uint64_t ucycles; 613 613 uint64_t kcycles; … … 616 616 order_suffix(ucycles, &ucycles, &usuffix); 617 617 order_suffix(kcycles, &kcycles, &ksuffix); 618 618 619 619 #ifdef __32_BITS__ 620 620 if (*additional) … … 627 627 ucycles, usuffix, kcycles, ksuffix); 628 628 #endif 629 629 630 630 #ifdef __64_BITS__ 631 631 if (*additional) … … 637 637 task->taskid, task->name, task->container, task, task->as); 638 638 #endif 639 639 640 640 irq_spinlock_unlock(&task->lock, false); 641 641 return true; … … 651 651 /* Messing with task structures, avoid deadlock */ 652 652 irq_spinlock_lock(&tasks_lock, true); 653 653 654 654 #ifdef __32_BITS__ 655 655 if (additional) … … 659 659 " [ucycles ] [kcycles ]\n"); 660 660 #endif 661 661 662 662 #ifdef __64_BITS__ 663 663 if (additional) … … 668 668 " [as ]\n"); 669 669 #endif 670 670 671 671 avltree_walk(&tasks_tree, task_print_walker, &additional); 672 672 673 673 irq_spinlock_unlock(&tasks_lock, true); 674 674 } -
kernel/generic/src/proc/thread.c
r3061bc1 r8565a42 122 122 void *arg = THREAD->thread_arg; 123 123 THREAD->last_cycle = get_cycle(); 124 124 125 125 /* This is where each thread wakes up after its creation */ 126 126 irq_spinlock_unlock(&THREAD->lock, false); 127 127 interrupts_enable(); 128 128 129 129 f(arg); 130 130 131 131 /* Accumulate accounting to the task */ 132 132 irq_spinlock_lock(&THREAD->lock, true); … … 137 137 uint64_t kcycles = THREAD->kcycles; 138 138 THREAD->kcycles = 0; 139 139 140 140 irq_spinlock_pass(&THREAD->lock, &TASK->lock); 141 141 TASK->ucycles += ucycles; … … 144 144 } else 145 145 irq_spinlock_unlock(&THREAD->lock, true); 146 146 147 147 thread_exit(); 148 148 149 149 /* Not reached */ 150 150 } … … 156 156 { 157 157 thread_t *thread = (thread_t *) obj; 158 158 159 159 irq_spinlock_initialize(&thread->lock, "thread_t_lock"); 160 160 link_initialize(&thread->rq_link); 161 161 link_initialize(&thread->wq_link); 162 162 link_initialize(&thread->th_link); 163 163 164 164 /* call the architecture-specific part of the constructor */ 165 165 thr_constructor_arch(thread); 166 166 167 167 #ifdef CONFIG_FPU 168 168 #ifdef CONFIG_FPU_LAZY … … 174 174 #endif /* CONFIG_FPU_LAZY */ 175 175 #endif /* CONFIG_FPU */ 176 176 177 177 /* 178 178 * Allocate the kernel stack from the low-memory to prevent an infinite … … 193 193 kmflags |= FRAME_LOWMEM; 194 194 kmflags &= ~FRAME_HIGHMEM; 195 195 196 196 uintptr_t stack_phys = 197 197 frame_alloc(STACK_FRAMES, kmflags, STACK_SIZE - 1); … … 203 203 return ENOMEM; 204 204 } 205 205 206 206 thread->kstack = (uint8_t *) PA2KA(stack_phys); 207 207 208 208 #ifdef CONFIG_UDEBUG 209 209 mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE); 210 210 #endif 211 211 212 212 return EOK; 213 213 } … … 217 217 { 218 218 thread_t *thread = (thread_t *) obj; 219 219 220 220 /* call the architecture-specific part of the destructor */ 221 221 thr_destructor_arch(thread); 222 222 223 223 frame_free(KA2PA(thread->kstack), STACK_FRAMES); 224 224 225 225 #ifdef CONFIG_FPU 226 226 if (thread->saved_fpu_context) 227 227 slab_free(fpu_context_cache, thread->saved_fpu_context); 228 228 #endif 229 229 230 230 return STACK_FRAMES; /* number of frames freed */ 231 231 } … … 239 239 { 240 240 THREAD = NULL; 241 241 242 242 atomic_set(&nrdy, 0); 243 243 thread_cache = slab_cache_create("thread_t", sizeof(thread_t), 0, 244 244 thr_constructor, thr_destructor, 0); 245 245 246 246 #ifdef CONFIG_FPU 247 247 fpu_context_cache = slab_cache_create("fpu_context_t", 248 248 sizeof(fpu_context_t), FPU_CONTEXT_ALIGN, NULL, NULL, 0); 249 249 #endif 250 250 251 251 avltree_create(&threads_tree); 252 252 } … … 282 282 { 283 283 irq_spinlock_lock(&thread->lock, true); 284 284 285 285 assert(thread->state != Ready); 286 286 287 287 before_thread_is_ready(thread); 288 288 289 289 int i = (thread->priority < RQ_COUNT - 1) ? 290 290 ++thread->priority : thread->priority; … … 305 305 cpu = CPU; 306 306 } 307 307 308 308 thread->state = Ready; 309 309 310 310 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock)); 311 311 312 312 /* 313 313 * Append thread to respective ready queue 314 314 * on respective processor. 315 315 */ 316 316 317 317 list_append(&thread->rq_link, &cpu->rq[i].rq); 318 318 cpu->rq[i].n++; 319 319 irq_spinlock_unlock(&(cpu->rq[i].lock), true); 320 320 321 321 atomic_inc(&nrdy); 322 322 atomic_inc(&cpu->nrdy); … … 344 344 if (!thread) 345 345 return NULL; 346 346 347 347 /* Not needed, but good for debugging */ 348 348 memsetb(thread->kstack, STACK_SIZE, 0); 349 349 350 350 irq_spinlock_lock(&tidlock, true); 351 351 thread->tid = ++last_tid; 352 352 irq_spinlock_unlock(&tidlock, true); 353 353 354 354 context_save(&thread->saved_context); 355 355 context_set(&thread->saved_context, FADDR(cushion), 356 356 (uintptr_t) thread->kstack, STACK_SIZE); 357 357 358 358 the_initialize((the_t *) thread->kstack); 359 359 360 360 ipl_t ipl = interrupts_disable(); 361 361 thread->saved_context.ipl = interrupts_read(); 362 362 interrupts_restore(ipl); 363 363 364 364 str_cpy(thread->name, THREAD_NAME_BUFLEN, name); 365 365 366 366 thread->thread_code = func; 367 367 thread->thread_arg = arg; … … 377 377 thread->uspace = 378 378 ((flags & THREAD_FLAG_USPACE) == THREAD_FLAG_USPACE); 379 379 380 380 thread->nomigrate = 0; 381 381 thread->state = Entering; 382 382 383 383 timeout_initialize(&thread->sleep_timeout); 384 384 thread->sleep_interruptible = false; 385 385 thread->sleep_queue = NULL; 386 386 thread->timeout_pending = false; 387 387 388 388 thread->in_copy_from_uspace = false; 389 389 thread->in_copy_to_uspace = false; 390 390 391 391 thread->interrupted = false; 392 392 thread->detached = false; 393 393 waitq_initialize(&thread->join_wq); 394 394 395 395 thread->task = task; 396 396 397 397 thread->workq = NULL; 398 398 399 399 thread->fpu_context_exists = false; 400 400 thread->fpu_context_engaged = false; 401 401 402 402 avltree_node_initialize(&thread->threads_tree_node); 403 403 thread->threads_tree_node.key = (uintptr_t) thread; 404 404 405 405 #ifdef CONFIG_UDEBUG 406 406 /* Initialize debugging stuff */ … … 408 408 udebug_thread_initialize(&thread->udebug); 409 409 #endif 410 410 411 411 /* Might depend on previous initialization */ 412 412 thread_create_arch(thread); 413 413 414 414 rcu_thread_init(thread); 415 415 416 416 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH) 417 417 thread_attach(thread, task); 418 418 419 419 return thread; 420 420 } … … 435 435 assert(thread->task); 436 436 assert(thread->cpu); 437 437 438 438 irq_spinlock_lock(&thread->cpu->lock, false); 439 439 if (thread->cpu->fpu_owner == thread) 440 440 thread->cpu->fpu_owner = NULL; 441 441 irq_spinlock_unlock(&thread->cpu->lock, false); 442 442 443 443 irq_spinlock_pass(&thread->lock, &threads_lock); 444 444 445 445 avltree_delete(&threads_tree, &thread->threads_tree_node); 446 446 447 447 irq_spinlock_pass(&threads_lock, &thread->task->lock); 448 448 449 449 /* 450 450 * Detach from the containing task. … … 452 452 list_remove(&thread->th_link); 453 453 irq_spinlock_unlock(&thread->task->lock, irq_res); 454 454 455 455 /* 456 456 * Drop the reference to the containing task. … … 475 475 */ 476 476 irq_spinlock_lock(&task->lock, true); 477 477 478 478 /* Hold a reference to the task. */ 479 479 task_hold(task); 480 480 481 481 /* Must not count kbox thread into lifecount */ 482 482 if (thread->uspace) 483 483 atomic_inc(&task->lifecount); 484 484 485 485 list_append(&thread->th_link, &task->threads); 486 486 487 487 irq_spinlock_pass(&task->lock, &threads_lock); 488 488 489 489 /* 490 490 * Register this thread in the system-wide list. … … 506 506 /* Generate udebug THREAD_E event */ 507 507 udebug_thread_e_event(); 508 508 509 509 /* 510 510 * This thread will not execute any code or system calls from … … 527 527 } 528 528 } 529 529 530 530 restart: 531 531 irq_spinlock_lock(&THREAD->lock, true); … … 535 535 goto restart; 536 536 } 537 537 538 538 THREAD->state = Exiting; 539 539 irq_spinlock_unlock(&THREAD->lock, true); 540 540 541 541 scheduler(); 542 542 543 543 /* Not reached */ 544 544 while (true); … … 562 562 { 563 563 assert(thread != NULL); 564 564 565 565 irq_spinlock_lock(&thread->lock, true); 566 566 567 567 thread->interrupted = true; 568 568 bool sleeping = (thread->state == Sleeping); 569 569 570 570 irq_spinlock_unlock(&thread->lock, true); 571 571 572 572 if (sleeping) 573 573 waitq_interrupt_sleep(thread); … … 583 583 { 584 584 assert(thread != NULL); 585 585 586 586 bool interrupted; 587 587 588 588 irq_spinlock_lock(&thread->lock, true); 589 589 interrupted = thread->interrupted; 590 590 irq_spinlock_unlock(&thread->lock, true); 591 591 592 592 return interrupted; 593 593 } … … 597 597 { 598 598 assert(THREAD); 599 599 600 600 THREAD->nomigrate++; 601 601 } … … 606 606 assert(THREAD); 607 607 assert(THREAD->nomigrate > 0); 608 608 609 609 if (THREAD->nomigrate > 0) 610 610 THREAD->nomigrate--; … … 624 624 while (sec > 0) { 625 625 uint32_t period = (sec > 1000) ? 1000 : sec; 626 626 627 627 thread_usleep(period * 1000000); 628 628 sec -= period; … … 643 643 if (thread == THREAD) 644 644 return EINVAL; 645 645 646 646 /* 647 647 * Since thread join can only be called once on an undetached thread, 648 648 * the thread pointer is guaranteed to be still valid. 649 649 */ 650 650 651 651 irq_spinlock_lock(&thread->lock, true); 652 652 assert(!thread->detached); 653 653 irq_spinlock_unlock(&thread->lock, true); 654 654 655 655 return waitq_sleep_timeout(&thread->join_wq, usec, flags, NULL); 656 656 } … … 672 672 irq_spinlock_lock(&thread->lock, true); 673 673 assert(!thread->detached); 674 674 675 675 if (thread->state == Lingering) { 676 676 /* … … 683 683 thread->detached = true; 684 684 } 685 685 686 686 irq_spinlock_unlock(&thread->lock, true); 687 687 } … … 697 697 { 698 698 waitq_t wq; 699 699 700 700 waitq_initialize(&wq); 701 701 702 702 (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING, NULL); 703 703 } … … 707 707 bool *additional = (bool *) arg; 708 708 thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node); 709 709 710 710 uint64_t ucycles, kcycles; 711 711 char usuffix, ksuffix; 712 712 order_suffix(thread->ucycles, &ucycles, &usuffix); 713 713 order_suffix(thread->kcycles, &kcycles, &ksuffix); 714 714 715 715 char *name; 716 716 if (str_cmp(thread->name, "uinit") == 0) … … 718 718 else 719 719 name = thread->name; 720 720 721 721 #ifdef __32_BITS__ 722 722 if (*additional) … … 729 729 thread->task, thread->task->container); 730 730 #endif 731 731 732 732 #ifdef __64_BITS__ 733 733 if (*additional) … … 741 741 thread->task, thread->task->container); 742 742 #endif 743 743 744 744 if (*additional) { 745 745 if (thread->cpu) … … 747 747 else 748 748 printf("none "); 749 749 750 750 if (thread->state == Sleeping) { 751 751 #ifdef __32_BITS__ 752 752 printf(" %10p", thread->sleep_queue); 753 753 #endif 754 754 755 755 #ifdef __64_BITS__ 756 756 printf(" %18p", thread->sleep_queue); 757 757 #endif 758 758 } 759 759 760 760 printf("\n"); 761 761 } 762 762 763 763 return true; 764 764 } … … 773 773 /* Messing with thread structures, avoid deadlock */ 774 774 irq_spinlock_lock(&threads_lock, true); 775 775 776 776 #ifdef __32_BITS__ 777 777 if (additional) … … 782 782 " [ctn]\n"); 783 783 #endif 784 784 785 785 #ifdef __64_BITS__ 786 786 if (additional) { … … 791 791 " [task ] [ctn]\n"); 792 792 #endif 793 793 794 794 avltree_walk(&threads_tree, thread_walker, &additional); 795 795 796 796 irq_spinlock_unlock(&threads_lock, true); 797 797 } … … 814 814 avltree_node_t *node = 815 815 avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) thread)); 816 816 817 817 return node != NULL; 818 818 } … … 832 832 assert(interrupts_disabled()); 833 833 assert(irq_spinlock_locked(&THREAD->lock)); 834 834 835 835 if (user) 836 836 THREAD->ucycles += time - THREAD->last_cycle; 837 837 else 838 838 THREAD->kcycles += time - THREAD->last_cycle; 839 839 840 840 THREAD->last_cycle = time; 841 841 } … … 846 846 (thread_t *) avltree_get_instance(node, thread_t, threads_tree_node); 847 847 thread_iterator_t *iterator = (thread_iterator_t *) arg; 848 848 849 849 if (thread->tid == iterator->thread_id) { 850 850 iterator->thread = thread; 851 851 return false; 852 852 } 853 853 854 854 return true; 855 855 } … … 869 869 assert(interrupts_disabled()); 870 870 assert(irq_spinlock_locked(&threads_lock)); 871 871 872 872 thread_iterator_t iterator; 873 873 874 874 iterator.thread_id = thread_id; 875 875 iterator.thread = NULL; 876 876 877 877 avltree_walk(&threads_tree, thread_search_walker, (void *) &iterator); 878 878 879 879 return iterator.thread; 880 880 } … … 885 885 { 886 886 irq_spinlock_lock(&threads_lock, true); 887 887 888 888 thread_t *thread = thread_find_by_id(thread_id); 889 889 if (thread == NULL) { … … 892 892 return; 893 893 } 894 894 895 895 irq_spinlock_lock(&thread->lock, false); 896 896 897 897 /* 898 898 * Schedule a stack trace to be printed … … 906 906 * is probably justifiable. 907 907 */ 908 908 909 909 bool sleeping = false; 910 910 istate_t *istate = thread->udebug.uspace_state; … … 916 916 } else 917 917 printf("Thread interrupt state not available.\n"); 918 918 919 919 irq_spinlock_unlock(&thread->lock, false); 920 920 921 921 if (sleeping) 922 922 waitq_interrupt_sleep(thread); 923 923 924 924 irq_spinlock_unlock(&threads_lock, true); 925 925 } … … 935 935 if (name_len > THREAD_NAME_BUFLEN - 1) 936 936 name_len = THREAD_NAME_BUFLEN - 1; 937 937 938 938 char namebuf[THREAD_NAME_BUFLEN]; 939 939 errno_t rc = copy_from_uspace(namebuf, uspace_name, name_len); 940 940 if (rc != EOK) 941 941 return (sys_errno_t) rc; 942 942 943 943 namebuf[name_len] = 0; 944 944 945 945 /* 946 946 * In case of failure, kernel_uarg will be deallocated in this function. … … 949 949 uspace_arg_t *kernel_uarg = 950 950 (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); 951 951 952 952 rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); 953 953 if (rc != EOK) { … … 955 955 return (sys_errno_t) rc; 956 956 } 957 957 958 958 thread_t *thread = thread_create(uinit, kernel_uarg, TASK, 959 959 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf); … … 968 968 * creation now. 969 969 */ 970 970 971 971 /* 972 972 * The new thread structure is initialized, but … … 976 976 slab_free(thread_cache, thread); 977 977 free(kernel_uarg); 978 978 979 979 return (sys_errno_t) rc; 980 980 } 981 981 } 982 982 983 983 #ifdef CONFIG_UDEBUG 984 984 /* … … 994 994 #endif 995 995 thread_ready(thread); 996 996 997 997 return 0; 998 998 } else 999 999 free(kernel_uarg); 1000 1000 1001 1001 return (sys_errno_t) ENOMEM; 1002 1002 }
Note:
See TracChangeset
for help on using the changeset viewer.