Changeset da1bafb in mainline for kernel/generic/src/proc
- Timestamp:
- 2010-05-24T18:57:31Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 0095368
- Parents:
- 666f492
- Location:
- kernel/generic/src/proc
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Scheduler and load balancing. 36 36 * 37 37 * This file contains the scheduler and kcpulb kernel thread which … … 68 68 static void scheduler_separated_stack(void); 69 69 70 atomic_t nrdy; 70 atomic_t nrdy; /**< Number of ready threads in the system. */ 71 71 72 72 /** Carry out actions before new task runs. */ … … 89 89 before_thread_runs_arch(); 90 90 #ifdef CONFIG_FPU_LAZY 91 if(THREAD == CPU->fpu_owner) 91 if(THREAD == CPU->fpu_owner) 92 92 fpu_enable(); 93 93 else 94 fpu_disable(); 94 fpu_disable(); 95 95 #else 96 96 fpu_enable(); … … 123 123 restart: 124 124 fpu_enable(); 125 spinlock_lock(&CPU->lock);126 125 irq_spinlock_lock(&CPU->lock, false); 126 127 127 /* Save old context */ 128 if (CPU->fpu_owner != NULL) { 129 spinlock_lock(&CPU->fpu_owner->lock);128 if (CPU->fpu_owner != NULL) { 129 irq_spinlock_lock(&CPU->fpu_owner->lock, false); 130 130 fpu_context_save(CPU->fpu_owner->saved_fpu_context); 131 /* don't prevent migration */ 131 132 /* Don't prevent migration */ 132 133 CPU->fpu_owner->fpu_context_engaged = 0; 133 spinlock_unlock(&CPU->fpu_owner->lock);134 irq_spinlock_unlock(&CPU->fpu_owner->lock, false); 134 135 CPU->fpu_owner = NULL; 135 136 } 136 137 spinlock_lock(&THREAD->lock);137 138 irq_spinlock_lock(&THREAD->lock, false); 138 139 if (THREAD->fpu_context_exists) { 139 140 fpu_context_restore(THREAD->saved_fpu_context); … … 142 143 if (!THREAD->saved_fpu_context) { 143 144 /* Might sleep */ 144 spinlock_unlock(&THREAD->lock);145 spinlock_unlock(&CPU->lock);145 irq_spinlock_unlock(&THREAD->lock, false); 146 irq_spinlock_unlock(&CPU->lock, false); 146 147 THREAD->saved_fpu_context = 147 148 (fpu_context_t *) slab_alloc(fpu_context_slab, 0); 149 148 150 /* We may have switched CPUs during slab_alloc */ 149 goto restart; 151 goto restart; 150 152 } 151 153 fpu_init(); 152 154 THREAD->fpu_context_exists = 1; 153 155 } 156 154 157 CPU->fpu_owner = THREAD; 155 158 THREAD->fpu_context_engaged = 1; 156 spinlock_unlock(&THREAD->lock);157 158 spinlock_unlock(&CPU->lock);159 } 160 #endif 159 irq_spinlock_unlock(&THREAD->lock, false); 160 161 irq_spinlock_unlock(&CPU->lock, false); 162 } 163 #endif /* CONFIG_FPU_LAZY */ 161 164 162 165 /** Initialize scheduler … … 180 183 static thread_t *find_best_thread(void) 181 184 { 182 thread_t *t;183 runq_t *r;184 int i;185 186 185 ASSERT(CPU != NULL); 187 186 188 187 loop: 189 188 … … 194 193 * This improves energy saving and hyperthreading. 195 194 */ 196 195 197 196 /* Mark CPU as it was idle this clock tick */ 198 spinlock_lock(&CPU->lock);199 200 spinlock_unlock(&CPU->lock);201 202 203 197 irq_spinlock_lock(&CPU->lock, false); 198 CPU->idle = true; 199 irq_spinlock_unlock(&CPU->lock, false); 200 201 interrupts_enable(); 202 /* 204 203 * An interrupt might occur right now and wake up a thread. 205 204 * In such case, the CPU will continue to go to sleep 206 205 * even though there is a runnable thread. 207 206 */ 208 cpu_sleep(); 209 interrupts_disable(); 210 goto loop; 211 } 212 207 cpu_sleep(); 208 interrupts_disable(); 209 goto loop; 210 } 211 212 unsigned int i; 213 213 for (i = 0; i < RQ_COUNT; i++) { 214 r = &CPU->rq[i]; 215 spinlock_lock(&r->lock); 216 if (r->n == 0) { 214 irq_spinlock_lock(&(CPU->rq[i].lock), false); 215 if (CPU->rq[i].n == 0) { 217 216 /* 218 217 * If this queue is empty, try a lower-priority queue. 219 218 */ 220 spinlock_unlock(&r->lock);219 irq_spinlock_unlock(&(CPU->rq[i].lock), false); 221 220 continue; 222 221 } 223 222 224 223 atomic_dec(&CPU->nrdy); 225 224 atomic_dec(&nrdy); 226 r->n--;227 225 CPU->rq[i].n--; 226 228 227 /* 229 228 * Take the first thread from the queue. 230 229 */ 231 t = list_get_instance(r->rq_head.next, thread_t, rq_link); 232 list_remove(&t->rq_link); 233 234 spinlock_unlock(&r->lock); 235 236 spinlock_lock(&t->lock); 237 t->cpu = CPU; 238 239 t->ticks = us2ticks((i + 1) * 10000); 240 t->priority = i; /* correct rq index */ 241 230 thread_t *thread = 231 list_get_instance(CPU->rq[i].rq_head.next, thread_t, rq_link); 232 list_remove(&thread->rq_link); 233 234 irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock); 235 236 thread->cpu = CPU; 237 thread->ticks = us2ticks((i + 1) * 10000); 238 thread->priority = i; /* Correct rq index */ 239 242 240 /* 243 241 * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated 244 242 * when load balancing needs emerge. 245 243 */ 246 t->flags &= ~THREAD_FLAG_STOLEN; 247 spinlock_unlock(&t->lock); 248 249 return t; 250 } 244 thread->flags &= ~THREAD_FLAG_STOLEN; 245 irq_spinlock_unlock(&thread->lock, false); 246 247 return thread; 248 } 249 251 250 goto loop; 252 253 251 } 254 252 … … 267 265 { 268 266 link_t head; 269 runq_t *r; 270 int i, n; 271 267 272 268 list_initialize(&head); 273 spinlock_lock(&CPU->lock); 269 irq_spinlock_lock(&CPU->lock, false); 270 274 271 if (CPU->needs_relink > NEEDS_RELINK_MAX) { 272 int i; 275 273 for (i = start; i < RQ_COUNT - 1; i++) { 276 /* remember and empty rq[i + 1] */277 r = &CPU->rq[i + 1];278 spinlock_lock(&r->lock);279 list_concat(&head, & r->rq_head);280 n = r->n;281 r->n = 0;282 spinlock_unlock(&r->lock);283 284 /* append rq[i + 1] to rq[i] */285 r = &CPU->rq[i];286 spinlock_lock(&r->lock);287 list_concat(& r->rq_head, &head);288 r->n += n;289 spinlock_unlock(&r->lock);274 /* Remember and empty rq[i + 1] */ 275 276 irq_spinlock_lock(&CPU->rq[i + 1].lock, false); 277 list_concat(&head, &CPU->rq[i + 1].rq_head); 278 size_t n = CPU->rq[i + 1].n; 279 CPU->rq[i + 1].n = 0; 280 irq_spinlock_unlock(&CPU->rq[i + 1].lock, false); 281 282 /* Append rq[i + 1] to rq[i] */ 283 284 irq_spinlock_lock(&CPU->rq[i].lock, false); 285 list_concat(&CPU->rq[i].rq_head, &head); 286 CPU->rq[i].n += n; 287 irq_spinlock_unlock(&CPU->rq[i].lock, false); 290 288 } 289 291 290 CPU->needs_relink = 0; 292 291 } 293 spinlock_unlock(&CPU->lock);294 292 293 irq_spinlock_unlock(&CPU->lock, false); 295 294 } 296 295 … … 305 304 { 306 305 volatile ipl_t ipl; 307 306 308 307 ASSERT(CPU != NULL); 309 308 310 309 ipl = interrupts_disable(); 311 310 312 311 if (atomic_get(&haltstate)) 313 312 halt(); 314 313 315 314 if (THREAD) { 316 spinlock_lock(&THREAD->lock);315 irq_spinlock_lock(&THREAD->lock, false); 317 316 318 317 /* Update thread kernel accounting */ … … 330 329 THREAD->last_cycle = get_cycle(); 331 330 332 spinlock_unlock(&THREAD->lock);331 irq_spinlock_unlock(&THREAD->lock, false); 333 332 interrupts_restore(THREAD->saved_context.ipl); 334 333 335 334 return; 336 335 } 337 336 338 337 /* 339 338 * Interrupt priority level of preempted thread is recorded 340 339 * here to facilitate scheduler() invocations from 341 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 340 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 341 * 342 342 */ 343 343 THREAD->saved_context.ipl = ipl; 344 344 } 345 345 346 346 /* 347 347 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM 348 348 * and preemption counter. At this point THE could be coming either 349 349 * from THREAD's or CPU's stack. 350 * 350 351 */ 351 352 the_copy(THE, (the_t *) CPU->stack); 352 353 353 354 /* 354 355 * We may not keep the old stack. … … 362 363 * Therefore the scheduler() function continues in 363 364 * scheduler_separated_stack(). 365 * 364 366 */ 365 367 context_save(&CPU->saved_context); … … 367 369 (uintptr_t) CPU->stack, CPU_STACK_SIZE); 368 370 context_restore(&CPU->saved_context); 369 /* not reached */ 371 372 /* Not reached */ 370 373 } 371 374 … … 377 380 * 378 381 * Assume THREAD->lock is held. 382 * 379 383 */ 380 384 void scheduler_separated_stack(void) 381 385 { 382 int priority;383 386 DEADLOCK_PROBE_INIT(p_joinwq); 384 387 task_t *old_task = TASK; 385 388 as_t *old_as = AS; 386 389 387 390 ASSERT(CPU != NULL); 388 391 … … 391 394 * possible destruction should thread_destroy() be called on this or any 392 395 * other processor while the scheduler is still using them. 396 * 393 397 */ 394 398 if (old_task) 395 399 task_hold(old_task); 400 396 401 if (old_as) 397 402 as_hold(old_as); 398 403 399 404 if (THREAD) { 400 /* must be run after the switch to scheduler stack */405 /* Must be run after the switch to scheduler stack */ 401 406 after_thread_ran(); 402 407 403 408 switch (THREAD->state) { 404 409 case Running: 405 spinlock_unlock(&THREAD->lock);410 irq_spinlock_unlock(&THREAD->lock, false); 406 411 thread_ready(THREAD); 407 412 break; 408 413 409 414 case Exiting: 410 415 repeat: 411 416 if (THREAD->detached) { 412 thread_destroy(THREAD );417 thread_destroy(THREAD, false); 413 418 } else { 414 419 /* 415 420 * The thread structure is kept allocated until 416 421 * somebody calls thread_detach() on it. 422 * 417 423 */ 418 if (! spinlock_trylock(&THREAD->join_wq.lock)) {424 if (!irq_spinlock_trylock(&THREAD->join_wq.lock)) { 419 425 /* 420 426 * Avoid deadlock. 427 * 421 428 */ 422 spinlock_unlock(&THREAD->lock);429 irq_spinlock_unlock(&THREAD->lock, false); 423 430 delay(HZ); 424 spinlock_lock(&THREAD->lock);431 irq_spinlock_lock(&THREAD->lock, false); 425 432 DEADLOCK_PROBE(p_joinwq, 426 433 DEADLOCK_THRESHOLD); … … 429 436 _waitq_wakeup_unsafe(&THREAD->join_wq, 430 437 WAKEUP_FIRST); 431 spinlock_unlock(&THREAD->join_wq.lock);438 irq_spinlock_unlock(&THREAD->join_wq.lock, false); 432 439 433 440 THREAD->state = Lingering; 434 spinlock_unlock(&THREAD->lock);441 irq_spinlock_unlock(&THREAD->lock, false); 435 442 } 436 443 break; … … 439 446 /* 440 447 * Prefer the thread after it's woken up. 448 * 441 449 */ 442 450 THREAD->priority = -1; 443 451 444 452 /* 445 453 * We need to release wq->lock which we locked in 446 454 * waitq_sleep(). Address of wq->lock is kept in 447 455 * THREAD->sleep_queue. 456 * 448 457 */ 449 spinlock_unlock(&THREAD->sleep_queue->lock);450 458 irq_spinlock_unlock(&THREAD->sleep_queue->lock, false); 459 451 460 /* 452 461 * Check for possible requests for out-of-context 453 462 * invocation. 463 * 454 464 */ 455 465 if (THREAD->call_me) { … … 458 468 THREAD->call_me_with = NULL; 459 469 } 460 461 spinlock_unlock(&THREAD->lock);462 470 471 irq_spinlock_unlock(&THREAD->lock, false); 472 463 473 break; 464 474 465 475 default: 466 476 /* 467 477 * Entering state is unexpected. 478 * 468 479 */ 469 480 panic("tid%" PRIu64 ": unexpected state %s.", … … 471 482 break; 472 483 } 473 484 474 485 THREAD = NULL; 475 486 } 476 487 477 488 THREAD = find_best_thread(); 478 489 479 spinlock_lock(&THREAD->lock);480 priority = THREAD->priority;481 spinlock_unlock(&THREAD->lock);482 483 relink_rq(priority); 484 490 irq_spinlock_lock(&THREAD->lock, false); 491 int priority = THREAD->priority; 492 irq_spinlock_unlock(&THREAD->lock, false); 493 494 relink_rq(priority); 495 485 496 /* 486 497 * If both the old and the new task are the same, lots of work is 487 498 * avoided. 499 * 488 500 */ 489 501 if (TASK != THREAD->task) { … … 493 505 * Note that it is possible for two tasks to share one address 494 506 * space. 507 ( 495 508 */ 496 509 if (old_as != new_as) { … … 498 511 * Both tasks and address spaces are different. 499 512 * Replace the old one with the new one. 513 * 500 514 */ 501 515 as_switch(old_as, new_as); 502 516 } 503 517 504 518 TASK = THREAD->task; 505 519 before_task_runs(); 506 520 } 507 521 508 522 if (old_task) 509 523 task_release(old_task); 524 510 525 if (old_as) 511 526 as_release(old_as); 512 527 513 spinlock_lock(&THREAD->lock);528 irq_spinlock_lock(&THREAD->lock, false); 514 529 THREAD->state = Running; 515 530 516 531 #ifdef SCHEDULER_VERBOSE 517 532 printf("cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 518 533 ", nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, 519 534 THREAD->ticks, atomic_get(&CPU->nrdy)); 520 #endif 521 535 #endif 536 522 537 /* 523 538 * Some architectures provide late kernel PA2KA(identity) … … 527 542 * necessary, is to be mapped in before_thread_runs(). This 528 543 * function must be executed before the switch to the new stack. 544 * 529 545 */ 530 546 before_thread_runs(); 531 547 532 548 /* 533 549 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to 534 550 * thread's stack. 551 * 535 552 */ 536 553 the_copy(THE, (the_t *) THREAD->kstack); 537 554 538 555 context_restore(&THREAD->saved_context); 539 /* not reached */ 556 557 /* Not reached */ 540 558 } 541 559 … … 551 569 void kcpulb(void *arg) 552 570 { 553 thread_t *t;554 int count;555 571 atomic_count_t average; 556 unsigned int i; 557 int j; 558 int k = 0; 559 ipl_t ipl; 560 572 atomic_count_t rdy; 573 561 574 /* 562 575 * Detach kcpulb as nobody will call thread_join_timeout() on it. … … 569 582 */ 570 583 thread_sleep(1); 571 584 572 585 not_satisfied: 573 586 /* … … 575 588 * other CPU's. Note that situation can have changed between two 576 589 * passes. Each time get the most up to date counts. 590 * 577 591 */ 578 592 average = atomic_get(&nrdy) / config.cpu_active + 1; 579 count = average -atomic_get(&CPU->nrdy);580 581 if ( count <= 0)593 rdy = atomic_get(&CPU->nrdy); 594 595 if (average <= rdy) 582 596 goto satisfied; 583 597 598 atomic_count_t count = average - rdy; 599 584 600 /* 585 601 * Searching least priority queues on all CPU's first and most priority 586 602 * queues on all CPU's last. 587 */ 588 for (j = RQ_COUNT - 1; j >= 0; j--) { 589 for (i = 0; i < config.cpu_active; i++) { 590 link_t *l; 591 runq_t *r; 592 cpu_t *cpu; 593 594 cpu = &cpus[(i + k) % config.cpu_active]; 595 603 * 604 */ 605 size_t acpu; 606 size_t acpu_bias = 0; 607 int rq; 608 609 for (rq = RQ_COUNT - 1; rq >= 0; rq--) { 610 for (acpu = 0; acpu < config.cpu_active; acpu++) { 611 cpu_t *cpu = &cpus[(acpu + acpu_bias) % config.cpu_active]; 612 596 613 /* 597 614 * Not interested in ourselves. 598 615 * Doesn't require interrupt disabling for kcpulb has 599 616 * THREAD_FLAG_WIRED. 617 * 600 618 */ 601 619 if (CPU == cpu) 602 620 continue; 621 603 622 if (atomic_get(&cpu->nrdy) <= average) 604 623 continue; 605 606 ipl = interrupts_disable(); 607 r = &cpu->rq[j]; 608 spinlock_lock(&r->lock); 609 if (r->n == 0) { 610 spinlock_unlock(&r->lock); 611 interrupts_restore(ipl); 624 625 irq_spinlock_lock(&(cpu->rq[rq].lock), true); 626 if (cpu->rq[rq].n == 0) { 627 irq_spinlock_unlock(&(cpu->rq[rq].lock), true); 612 628 continue; 613 629 } 614 615 t = NULL; 616 l = r->rq_head.prev; /* search rq from the back */ 617 while (l != &r->rq_head) { 618 t = list_get_instance(l, thread_t, rq_link); 630 631 thread_t *thread = NULL; 632 633 /* Search rq from the back */ 634 link_t *link = cpu->rq[rq].rq_head.prev; 635 636 while (link != &(cpu->rq[rq].rq_head)) { 637 thread = (thread_t *) list_get_instance(link, thread_t, rq_link); 638 619 639 /* 620 640 * We don't want to steal CPU-wired threads … … 624 644 * steal threads whose FPU context is still in 625 645 * CPU. 646 * 626 647 */ 627 spinlock_lock(&t->lock);628 if ((!(t->flags & (THREAD_FLAG_WIRED |629 THREAD_FLAG_STOLEN))) &&630 (!(t->fpu_context_engaged))) {648 irq_spinlock_lock(&thread->lock, false); 649 650 if ((!(thread->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) 651 && (!(thread->fpu_context_engaged))) { 631 652 /* 632 * Remove t from r.653 * Remove thread from ready queue. 633 654 */ 634 spinlock_unlock(&t->lock);655 irq_spinlock_unlock(&thread->lock, false); 635 656 636 657 atomic_dec(&cpu->nrdy); 637 658 atomic_dec(&nrdy); 638 639 r->n--;640 list_remove(&t ->rq_link);641 659 660 cpu->rq[rq].n--; 661 list_remove(&thread->rq_link); 662 642 663 break; 643 664 } 644 spinlock_unlock(&t->lock); 645 l = l->prev; 646 t = NULL; 665 666 irq_spinlock_unlock(&thread->lock, false); 667 668 link = link->prev; 669 thread = NULL; 647 670 } 648 spinlock_unlock(&r->lock); 649 650 if (t) { 671 672 if (thread) { 651 673 /* 652 * Ready t on local CPU 674 * Ready thread on local CPU 675 * 653 676 */ 654 spinlock_lock(&t->lock); 677 678 irq_spinlock_pass(&(cpu->rq[rq].lock), &thread->lock); 679 655 680 #ifdef KCPULB_VERBOSE 656 681 printf("kcpulb%u: TID %" PRIu64 " -> cpu%u, " … … 659 684 atomic_get(&nrdy) / config.cpu_active); 660 685 #endif 661 t->flags |= THREAD_FLAG_STOLEN; 662 t->state = Entering; 663 spinlock_unlock(&t->lock); 664 665 thread_ready(t); 666 667 interrupts_restore(ipl); 668 686 687 thread->flags |= THREAD_FLAG_STOLEN; 688 thread->state = Entering; 689 690 irq_spinlock_unlock(&thread->lock, true); 691 thread_ready(thread); 692 669 693 if (--count == 0) 670 694 goto satisfied; 671 695 672 696 /* 673 697 * We are not satisfied yet, focus on another 674 698 * CPU next time. 699 * 675 700 */ 676 k++;701 acpu_bias++; 677 702 678 703 continue; 679 } 680 interrupts_restore(ipl); 704 } else 705 irq_spinlock_unlock(&(cpu->rq[rq].lock), true); 706 681 707 } 682 708 } 683 709 684 710 if (atomic_get(&CPU->nrdy)) { 685 711 /* 686 712 * Be a little bit light-weight and let migrated threads run. 713 * 687 714 */ 688 715 scheduler(); … … 691 718 * We failed to migrate a single thread. 692 719 * Give up this turn. 720 * 693 721 */ 694 722 goto loop; 695 723 } 696 724 697 725 goto not_satisfied; 698 726 699 727 satisfied: 700 728 goto loop; 701 729 } 702 703 730 #endif /* CONFIG_SMP */ 704 731 705 706 /** Print information about threads & scheduler queues */ 732 /** Print information about threads & scheduler queues 733 * 734 */ 707 735 void sched_print_list(void) 708 736 { 709 ipl_t ipl; 710 unsigned int cpu, i; 711 runq_t *r; 712 thread_t *t; 713 link_t *cur; 714 715 /* We are going to mess with scheduler structures, 716 * let's not be interrupted */ 717 ipl = interrupts_disable(); 737 size_t cpu; 718 738 for (cpu = 0; cpu < config.cpu_count; cpu++) { 719 720 739 if (!cpus[cpu].active) 721 740 continue; 722 723 spinlock_lock(&cpus[cpu].lock); 741 742 irq_spinlock_lock(&cpus[cpu].lock, true); 743 724 744 printf("cpu%u: address=%p, nrdy=%ld, needs_relink=%" PRIs "\n", 725 745 cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), 726 746 cpus[cpu].needs_relink); 727 747 748 unsigned int i; 728 749 for (i = 0; i < RQ_COUNT; i++) { 729 r = &cpus[cpu].rq[i]; 730 spinlock_lock(&r->lock); 731 if (!r->n) { 732 spinlock_unlock(&r->lock); 750 irq_spinlock_lock(&(cpus[cpu].rq[i].lock), false); 751 if (cpus[cpu].rq[i].n == 0) { 752 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false); 733 753 continue; 734 754 } 755 735 756 printf("\trq[%u]: ", i); 736 for (cur = r->rq_head.next; cur != &r->rq_head; 737 cur = cur->next) { 738 t = list_get_instance(cur, thread_t, rq_link); 739 printf("%" PRIu64 "(%s) ", t->tid, 740 thread_states[t->state]); 757 link_t *cur; 758 for (cur = cpus[cpu].rq[i].rq_head.next; 759 cur != &(cpus[cpu].rq[i].rq_head); 760 cur = cur->next) { 761 thread_t *thread = list_get_instance(cur, thread_t, rq_link); 762 printf("%" PRIu64 "(%s) ", thread->tid, 763 thread_states[thread->state]); 741 764 } 742 765 printf("\n"); 743 spinlock_unlock(&r->lock); 766 767 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false); 744 768 } 745 spinlock_unlock(&cpus[cpu].lock); 746 } 747 748 interrupts_restore(ipl); 769 770 irq_spinlock_unlock(&cpus[cpu].lock, true); 771 } 749 772 } 750 773 -
kernel/generic/src/proc/task.c
r666f492 rda1bafb 60 60 61 61 /** Spinlock protecting the tasks_tree AVL tree. */ 62 SPINLOCK_INITIALIZE(tasks_lock);62 IRQ_SPINLOCK_INITIALIZE(tasks_lock); 63 63 64 64 /** AVL tree of active tasks. … … 81 81 /* Forward declarations. */ 82 82 static void task_kill_internal(task_t *); 83 static int tsk_constructor(void *, int); 84 85 /** Initialize kernel tasks support. */ 83 static int tsk_constructor(void *, unsigned int); 84 85 /** Initialize kernel tasks support. 86 * 87 */ 86 88 void task_init(void) 87 89 { … … 92 94 } 93 95 94 /* 96 /** Task finish walker. 97 * 95 98 * The idea behind this walker is to kill and count all tasks different from 96 99 * TASK. 100 * 97 101 */ 98 102 static bool task_done_walker(avltree_node_t *node, void *arg) 99 103 { 100 task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);101 unsigned *cnt = (unsigned*) arg;102 103 if (t != TASK) {104 task_t *task = avltree_get_instance(node, task_t, tasks_tree_node); 105 size_t *cnt = (size_t *) arg; 106 107 if (task != TASK) { 104 108 (*cnt)++; 109 105 110 #ifdef CONFIG_DEBUG 106 printf("[%"PRIu64"] ", t->taskid); 107 #endif 108 task_kill_internal(t); 111 printf("[%"PRIu64"] ", task->taskid); 112 #endif 113 114 task_kill_internal(task); 109 115 } 110 116 … … 113 119 } 114 120 115 /** Kill all tasks except the current task. */ 121 /** Kill all tasks except the current task. 122 * 123 */ 116 124 void task_done(void) 117 125 { 118 unsignedtasks_left;119 120 do {/* Repeat until there are any tasks except TASK */121 /* Messing with task structures, avoid deadlock */126 size_t tasks_left; 127 128 /* Repeat until there are any tasks except TASK */ 129 do { 122 130 #ifdef CONFIG_DEBUG 123 131 printf("Killing tasks... "); 124 132 #endif 125 ipl_t ipl = interrupts_disable();126 spinlock_lock(&tasks_lock);133 134 irq_spinlock_lock(&tasks_lock, true); 127 135 tasks_left = 0; 128 136 avltree_walk(&tasks_tree, task_done_walker, &tasks_left); 129 spinlock_unlock(&tasks_lock);130 interrupts_restore(ipl);137 irq_spinlock_unlock(&tasks_lock, true); 138 131 139 thread_sleep(1); 140 132 141 #ifdef CONFIG_DEBUG 133 142 printf("\n"); 134 143 #endif 135 } while (tasks_left); 136 } 137 138 int tsk_constructor(void *obj, int kmflags) 139 { 140 task_t *ta = obj; 141 int i; 142 143 atomic_set(&ta->refcount, 0); 144 atomic_set(&ta->lifecount, 0); 145 atomic_set(&ta->active_calls, 0); 146 147 spinlock_initialize(&ta->lock, "task_ta_lock"); 148 mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE); 149 150 list_initialize(&ta->th_head); 151 list_initialize(&ta->sync_box_head); 152 153 ipc_answerbox_init(&ta->answerbox, ta); 144 } while (tasks_left > 0); 145 } 146 147 int tsk_constructor(void *obj, unsigned int kmflags) 148 { 149 task_t *task = (task_t *) obj; 150 151 atomic_set(&task->refcount, 0); 152 atomic_set(&task->lifecount, 0); 153 atomic_set(&task->active_calls, 0); 154 155 irq_spinlock_initialize(&task->lock, "task_t_lock"); 156 mutex_initialize(&task->futexes_lock, MUTEX_PASSIVE); 157 158 list_initialize(&task->th_head); 159 list_initialize(&task->sync_box_head); 160 161 ipc_answerbox_init(&task->answerbox, task); 162 163 size_t i; 154 164 for (i = 0; i < IPC_MAX_PHONES; i++) 155 ipc_phone_init(&ta ->phones[i]);165 ipc_phone_init(&task->phones[i]); 156 166 157 167 #ifdef CONFIG_UDEBUG 158 168 /* Init kbox stuff */ 159 ta ->kb.thread = NULL;160 ipc_answerbox_init(&ta ->kb.box, ta);161 mutex_initialize(&ta ->kb.cleanup_lock, MUTEX_PASSIVE);169 task->kb.thread = NULL; 170 ipc_answerbox_init(&task->kb.box, task); 171 mutex_initialize(&task->kb.cleanup_lock, MUTEX_PASSIVE); 162 172 #endif 163 173 … … 175 185 task_t *task_create(as_t *as, const char *name) 176 186 { 177 ipl_t ipl; 178 task_t *ta; 179 180 ta = (task_t *) slab_alloc(task_slab, 0); 181 task_create_arch(ta); 182 ta->as = as; 183 memcpy(ta->name, name, TASK_NAME_BUFLEN); 184 ta->name[TASK_NAME_BUFLEN - 1] = 0; 185 186 ta->context = CONTEXT; 187 ta->capabilities = 0; 188 ta->ucycles = 0; 189 ta->kcycles = 0; 190 191 ta->ipc_info.call_sent = 0; 192 ta->ipc_info.call_recieved = 0; 193 ta->ipc_info.answer_sent = 0; 194 ta->ipc_info.answer_recieved = 0; 195 ta->ipc_info.irq_notif_recieved = 0; 196 ta->ipc_info.forwarded = 0; 197 187 task_t *task = (task_t *) slab_alloc(task_slab, 0); 188 task_create_arch(task); 189 190 task->as = as; 191 str_cpy(task->name, TASK_NAME_BUFLEN, name); 192 193 task->context = CONTEXT; 194 task->capabilities = 0; 195 task->ucycles = 0; 196 task->kcycles = 0; 197 198 task->ipc_info.call_sent = 0; 199 task->ipc_info.call_recieved = 0; 200 task->ipc_info.answer_sent = 0; 201 task->ipc_info.answer_recieved = 0; 202 task->ipc_info.irq_notif_recieved = 0; 203 task->ipc_info.forwarded = 0; 204 198 205 #ifdef CONFIG_UDEBUG 199 206 /* Init debugging stuff */ 200 udebug_task_init(&ta ->udebug);207 udebug_task_init(&task->udebug); 201 208 202 209 /* Init kbox stuff */ 203 ta ->kb.finished = false;210 task->kb.finished = false; 204 211 #endif 205 212 206 213 if ((ipc_phone_0) && 207 (context_check(ipc_phone_0->task->context, ta ->context)))208 ipc_phone_connect(&ta ->phones[0], ipc_phone_0);209 210 btree_create(&ta ->futexes);214 (context_check(ipc_phone_0->task->context, task->context))) 215 ipc_phone_connect(&task->phones[0], ipc_phone_0); 216 217 btree_create(&task->futexes); 211 218 212 219 /* 213 220 * Get a reference to the address space. 214 221 */ 215 as_hold(ta ->as);216 217 i pl = interrupts_disable();218 spinlock_lock(&tasks_lock);219 ta ->taskid = ++task_counter;220 avltree_node_initialize(&ta ->tasks_tree_node);221 ta ->tasks_tree_node.key = ta->taskid;222 avltree_insert(&tasks_tree, &ta ->tasks_tree_node);223 spinlock_unlock(&tasks_lock);224 i nterrupts_restore(ipl);225 226 return ta ;222 as_hold(task->as); 223 224 irq_spinlock_lock(&tasks_lock, true); 225 226 task->taskid = ++task_counter; 227 avltree_node_initialize(&task->tasks_tree_node); 228 task->tasks_tree_node.key = task->taskid; 229 avltree_insert(&tasks_tree, &task->tasks_tree_node); 230 231 irq_spinlock_unlock(&tasks_lock, true); 232 233 return task; 227 234 } 228 235 229 236 /** Destroy task. 230 237 * 231 * @param t Task to be destroyed.232 * 233 */ 234 void task_destroy(task_t *t )238 * @param task Task to be destroyed. 239 * 240 */ 241 void task_destroy(task_t *task) 235 242 { 236 243 /* 237 244 * Remove the task from the task B+tree. 238 245 */ 239 spinlock_lock(&tasks_lock);240 avltree_delete(&tasks_tree, &t ->tasks_tree_node);241 spinlock_unlock(&tasks_lock);246 irq_spinlock_lock(&tasks_lock, true); 247 avltree_delete(&tasks_tree, &task->tasks_tree_node); 248 irq_spinlock_unlock(&tasks_lock, true); 242 249 243 250 /* 244 251 * Perform architecture specific task destruction. 245 252 */ 246 task_destroy_arch(t );253 task_destroy_arch(task); 247 254 248 255 /* 249 256 * Free up dynamically allocated state. 250 257 */ 251 btree_destroy(&t ->futexes);258 btree_destroy(&task->futexes); 252 259 253 260 /* 254 261 * Drop our reference to the address space. 255 262 */ 256 as_release(t ->as);257 258 slab_free(task_slab, t );263 as_release(task->as); 264 265 slab_free(task_slab, task); 259 266 } 260 267 … … 263 270 * Holding a reference to a task prevents destruction of that task. 264 271 * 265 * @param t Task to be held. 266 */ 267 void task_hold(task_t *t) 268 { 269 atomic_inc(&t->refcount); 272 * @param task Task to be held. 273 * 274 */ 275 void task_hold(task_t *task) 276 { 277 atomic_inc(&task->refcount); 270 278 } 271 279 … … 274 282 * The last one to release a reference to a task destroys the task. 275 283 * 276 * @param t Task to be released. 277 */ 278 void task_release(task_t *t) 279 { 280 if ((atomic_predec(&t->refcount)) == 0) 281 task_destroy(t); 284 * @param task Task to be released. 285 * 286 */ 287 void task_release(task_t *task) 288 { 289 if ((atomic_predec(&task->refcount)) == 0) 290 task_destroy(task); 282 291 } 283 292 … … 346 355 347 356 if (node) 348 return avltree_get_instance(node, task_t, tasks_tree_node); 357 return avltree_get_instance(node, task_t, tasks_tree_node); 349 358 350 359 return NULL; … … 356 365 * already disabled. 357 366 * 358 * @param t Pointer to thread.367 * @param task Pointer to the task. 359 368 * @param ucycles Out pointer to sum of all user cycles. 360 369 * @param kcycles Out pointer to sum of all kernel cycles. 361 370 * 362 371 */ 363 void task_get_accounting(task_t *t , uint64_t *ucycles, uint64_t *kcycles)372 void task_get_accounting(task_t *task, uint64_t *ucycles, uint64_t *kcycles) 364 373 { 365 374 /* Accumulated values of task */ 366 uint64_t uret = t ->ucycles;367 uint64_t kret = t ->kcycles;375 uint64_t uret = task->ucycles; 376 uint64_t kret = task->kcycles; 368 377 369 378 /* Current values of threads */ 370 379 link_t *cur; 371 for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) { 372 thread_t *thr = list_get_instance(cur, thread_t, th_link); 373 374 spinlock_lock(&thr->lock); 380 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 381 thread_t *thread = list_get_instance(cur, thread_t, th_link); 382 383 irq_spinlock_lock(&thread->lock, false); 384 375 385 /* Process only counted threads */ 376 if (!thr ->uncounted) {377 if (thr == THREAD) {386 if (!thread->uncounted) { 387 if (thread == THREAD) { 378 388 /* Update accounting of current thread */ 379 389 thread_update_accounting(false); 380 } 381 uret += thr->ucycles; 382 kret += thr->kcycles; 390 } 391 392 uret += thread->ucycles; 393 kret += thread->kcycles; 383 394 } 384 spinlock_unlock(&thr->lock); 395 396 irq_spinlock_unlock(&thread->lock, false); 385 397 } 386 398 … … 389 401 } 390 402 391 static void task_kill_internal(task_t *ta )403 static void task_kill_internal(task_t *task) 392 404 { 393 405 link_t *cur; … … 396 408 * Interrupt all threads. 397 409 */ 398 spinlock_lock(&ta->lock);399 for (cur = ta ->th_head.next; cur != &ta->th_head; cur = cur->next) {400 thread_t *thr ;410 irq_spinlock_lock(&task->lock, false); 411 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 412 thread_t *thread = list_get_instance(cur, thread_t, th_link); 401 413 bool sleeping = false; 402 414 403 thr = list_get_instance(cur, thread_t, th_link); 404 405 spinlock_lock(&thr->lock); 406 thr->interrupted = true; 407 if (thr->state == Sleeping) 415 irq_spinlock_lock(&thread->lock, false); 416 417 thread->interrupted = true; 418 if (thread->state == Sleeping) 408 419 sleeping = true; 409 spinlock_unlock(&thr->lock); 420 421 irq_spinlock_unlock(&thread->lock, false); 410 422 411 423 if (sleeping) 412 waitq_interrupt_sleep(thr );424 waitq_interrupt_sleep(thread); 413 425 } 414 spinlock_unlock(&ta->lock); 426 427 irq_spinlock_unlock(&task->lock, false); 415 428 } 416 429 … … 427 440 int task_kill(task_id_t id) 428 441 { 429 ipl_t ipl;430 task_t *ta;431 432 442 if (id == 1) 433 443 return EPERM; 434 444 435 i pl = interrupts_disable();436 spinlock_lock(&tasks_lock);437 if (!(ta = task_find_by_id(id))) {438 spinlock_unlock(&tasks_lock);439 i nterrupts_restore(ipl);445 irq_spinlock_lock(&tasks_lock, true); 446 447 task_t *task = task_find_by_id(id); 448 if (!task) { 449 irq_spinlock_unlock(&tasks_lock, true); 440 450 return ENOENT; 441 451 } 442 task_kill_internal(ta); 443 spinlock_unlock(&tasks_lock); 444 interrupts_restore(ipl); 445 return 0; 452 453 task_kill_internal(task); 454 irq_spinlock_unlock(&tasks_lock, true); 455 456 return EOK; 446 457 } 447 458 448 459 static bool task_print_walker(avltree_node_t *node, void *arg) 449 460 { 450 task_t *t = avltree_get_instance(node, task_t, tasks_tree_node); 451 int j; 452 453 spinlock_lock(&t->lock); 461 task_t *task = avltree_get_instance(node, task_t, tasks_tree_node); 462 irq_spinlock_lock(&task->lock, false); 454 463 455 464 uint64_t ucycles; 456 465 uint64_t kcycles; 457 466 char usuffix, ksuffix; 458 task_get_accounting(t , &ucycles, &kcycles);467 task_get_accounting(task, &ucycles, &kcycles); 459 468 order_suffix(ucycles, &ucycles, &usuffix); 460 469 order_suffix(kcycles, &kcycles, &ksuffix); 461 470 462 #ifdef __32_BITS__ 471 #ifdef __32_BITS__ 463 472 printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %10p %10p %9" PRIu64 "%c %9" 464 PRIu64 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as,465 ucycles, usuffix, kcycles, ksuffix, atomic_get(&t->refcount),466 atomic_get(&t->active_calls));473 PRIu64 "%c %7ld %6ld", task->taskid, task->name, task->context, 474 task, task->as, ucycles, usuffix, kcycles, ksuffix, 475 atomic_get(&task->refcount), atomic_get(&task->active_calls)); 467 476 #endif 468 477 469 478 #ifdef __64_BITS__ 470 479 printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %18p %18p %9" PRIu64 "%c %9" 471 PRIu64 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, 472 ucycles, usuffix, kcycles, ksuffix, atomic_get(&t->refcount), 473 atomic_get(&t->active_calls)); 474 #endif 475 476 for (j = 0; j < IPC_MAX_PHONES; j++) { 477 if (t->phones[j].callee) 478 printf(" %d:%p", j, t->phones[j].callee); 480 PRIu64 "%c %7ld %6ld", task->taskid, task->name, task->context, 481 task, task->as, ucycles, usuffix, kcycles, ksuffix, 482 atomic_get(&task->refcount), atomic_get(&task->active_calls)); 483 #endif 484 485 size_t i; 486 for (i = 0; i < IPC_MAX_PHONES; i++) { 487 if (task->phones[i].callee) 488 printf(" %" PRIs ":%p", i, task->phones[i].callee); 479 489 } 480 490 printf("\n"); 481 491 482 spinlock_unlock(&t->lock);492 irq_spinlock_unlock(&task->lock, false); 483 493 return true; 484 494 } … … 487 497 void task_print_list(void) 488 498 { 489 ipl_t ipl;490 491 499 /* Messing with task structures, avoid deadlock */ 492 ipl = interrupts_disable(); 493 spinlock_lock(&tasks_lock); 500 irq_spinlock_lock(&tasks_lock, true); 494 501 495 502 #ifdef __32_BITS__ … … 509 516 avltree_walk(&tasks_tree, task_print_walker, NULL); 510 517 511 spinlock_unlock(&tasks_lock); 512 interrupts_restore(ipl); 518 irq_spinlock_unlock(&tasks_lock, true); 513 519 } 514 520 -
kernel/generic/src/proc/thread.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Thread management functions. 36 36 */ 37 37 … … 94 94 * 95 95 * For locking rules, see declaration thereof. 96 */ 97 SPINLOCK_INITIALIZE(threads_lock); 96 * 97 */ 98 IRQ_SPINLOCK_INITIALIZE(threads_lock); 98 99 99 100 /** AVL tree of all threads. … … 101 102 * When a thread is found in the threads_tree AVL tree, it is guaranteed to 102 103 * exist as long as the threads_lock is held. 103 */ 104 avltree_t threads_tree; 105 106 SPINLOCK_INITIALIZE(tidlock); 107 thread_id_t last_tid = 0; 104 * 105 */ 106 avltree_t threads_tree; 107 108 IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock); 109 static thread_id_t last_tid = 0; 108 110 109 111 static slab_cache_t *thread_slab; 112 110 113 #ifdef CONFIG_FPU 111 114 slab_cache_t *fpu_context_slab; … … 125 128 void *arg = THREAD->thread_arg; 126 129 THREAD->last_cycle = get_cycle(); 127 130 128 131 /* This is where each thread wakes up after its creation */ 129 spinlock_unlock(&THREAD->lock);132 irq_spinlock_unlock(&THREAD->lock, false); 130 133 interrupts_enable(); 131 134 132 135 f(arg); 133 136 134 137 /* Accumulate accounting to the task */ 135 ipl_t ipl = interrupts_disable(); 136 137 spinlock_lock(&THREAD->lock); 138 irq_spinlock_lock(&THREAD->lock, true); 138 139 if (!THREAD->uncounted) { 139 140 thread_update_accounting(true); … … 142 143 uint64_t kcycles = THREAD->kcycles; 143 144 THREAD->kcycles = 0; 144 145 spinlock_unlock(&THREAD->lock);146 145 147 spinlock_lock(&TASK->lock);146 irq_spinlock_pass(&THREAD->lock, &TASK->lock); 148 147 TASK->ucycles += ucycles; 149 148 TASK->kcycles += kcycles; 150 spinlock_unlock(&TASK->lock);149 irq_spinlock_unlock(&TASK->lock, true); 151 150 } else 152 spinlock_unlock(&THREAD->lock); 153 154 interrupts_restore(ipl); 151 irq_spinlock_unlock(&THREAD->lock, true); 155 152 156 153 thread_exit(); 157 /* not reached */ 158 } 159 160 /** Initialization and allocation for thread_t structure */ 161 static int thr_constructor(void *obj, int kmflags) 162 { 163 thread_t *t = (thread_t *) obj; 164 165 spinlock_initialize(&t->lock, "thread_t_lock"); 166 link_initialize(&t->rq_link); 167 link_initialize(&t->wq_link); 168 link_initialize(&t->th_link); 169 154 155 /* Not reached */ 156 } 157 158 /** Initialization and allocation for thread_t structure 159 * 160 */ 161 static int thr_constructor(void *obj, unsigned int kmflags) 162 { 163 thread_t *thread = (thread_t *) obj; 164 165 irq_spinlock_initialize(&thread->lock, "thread_t_lock"); 166 link_initialize(&thread->rq_link); 167 link_initialize(&thread->wq_link); 168 link_initialize(&thread->th_link); 169 170 170 /* call the architecture-specific part of the constructor */ 171 thr_constructor_arch(t );171 thr_constructor_arch(thread); 172 172 173 173 #ifdef CONFIG_FPU 174 174 #ifdef CONFIG_FPU_LAZY 175 t ->saved_fpu_context = NULL;176 #else 177 t ->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);178 if (!t ->saved_fpu_context)175 thread->saved_fpu_context = NULL; 176 #else /* CONFIG_FPU_LAZY */ 177 thread->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags); 178 if (!thread->saved_fpu_context) 179 179 return -1; 180 #endif 181 #endif 182 183 t ->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);184 if (!t ->kstack) {180 #endif /* CONFIG_FPU_LAZY */ 181 #endif /* CONFIG_FPU */ 182 183 thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); 184 if (!thread->kstack) { 185 185 #ifdef CONFIG_FPU 186 if (t ->saved_fpu_context)187 slab_free(fpu_context_slab, t ->saved_fpu_context);186 if (thread->saved_fpu_context) 187 slab_free(fpu_context_slab, thread->saved_fpu_context); 188 188 #endif 189 189 return -1; 190 190 } 191 191 192 192 #ifdef CONFIG_UDEBUG 193 mutex_initialize(&t ->udebug.lock, MUTEX_PASSIVE);194 #endif 195 193 mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE); 194 #endif 195 196 196 return 0; 197 197 } 198 198 199 199 /** Destruction of thread_t object */ 200 static int thr_destructor(void *obj)201 { 202 thread_t *t = (thread_t *) obj;203 200 static size_t thr_destructor(void *obj) 201 { 202 thread_t *thread = (thread_t *) obj; 203 204 204 /* call the architecture-specific part of the destructor */ 205 thr_destructor_arch(t); 206 207 frame_free(KA2PA(t->kstack)); 205 thr_destructor_arch(thread); 206 207 frame_free(KA2PA(thread->kstack)); 208 208 209 #ifdef CONFIG_FPU 209 if (t->saved_fpu_context) 210 slab_free(fpu_context_slab, t->saved_fpu_context); 211 #endif 212 return 1; /* One page freed */ 210 if (thread->saved_fpu_context) 211 slab_free(fpu_context_slab, thread->saved_fpu_context); 212 #endif 213 214 return 1; /* One page freed */ 213 215 } 214 216 … … 221 223 { 222 224 THREAD = NULL; 225 223 226 atomic_set(&nrdy, 0); 224 227 thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0, 225 228 thr_constructor, thr_destructor, 0); 226 229 227 230 #ifdef CONFIG_FPU 228 231 fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t), 229 232 FPU_CONTEXT_ALIGN, NULL, NULL, 0); 230 233 #endif 231 234 232 235 avltree_create(&threads_tree); 233 236 } … … 235 238 /** Make thread ready 236 239 * 237 * Switch thread t to the ready state.240 * Switch thread to the ready state. 238 241 * 239 242 * @param t Thread to make ready. 240 243 * 241 244 */ 242 void thread_ready(thread_t *t) 243 { 244 cpu_t *cpu; 245 runq_t *r; 246 ipl_t ipl; 247 int i, avg; 248 249 ipl = interrupts_disable(); 250 251 spinlock_lock(&t->lock); 252 253 ASSERT(!(t->state == Ready)); 254 255 i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority; 256 257 cpu = CPU; 258 if (t->flags & THREAD_FLAG_WIRED) { 259 ASSERT(t->cpu != NULL); 260 cpu = t->cpu; 245 void thread_ready(thread_t *thread) 246 { 247 irq_spinlock_lock(&thread->lock, true); 248 249 ASSERT(!(thread->state == Ready)); 250 251 int i = (thread->priority < RQ_COUNT - 1) 252 ? ++thread->priority : thread->priority; 253 254 cpu_t *cpu = CPU; 255 if (thread->flags & THREAD_FLAG_WIRED) { 256 ASSERT(thread->cpu != NULL); 257 cpu = thread->cpu; 261 258 } 262 t->state = Ready; 263 spinlock_unlock(&t->lock); 259 thread->state = Ready; 260 261 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock)); 264 262 265 263 /* 266 * Append t to respective ready queue on respective processor. 264 * Append thread to respective ready queue 265 * on respective processor. 267 266 */ 268 r = &cpu->rq[i]; 269 spinlock_lock(&r->lock); 270 list_append(&t->rq_link, &r->rq_head); 271 r->n++; 272 spinlock_unlock(&r->lock); 273 267 268 list_append(&thread->rq_link, &cpu->rq[i].rq_head); 269 cpu->rq[i].n++; 270 irq_spinlock_unlock(&(cpu->rq[i].lock), true); 271 274 272 atomic_inc(&nrdy); 275 // FIXME: Why is the avg value n ever read?276 avg = atomic_get(&nrdy) / config.cpu_active;273 // FIXME: Why is the avg value not used 274 // avg = atomic_get(&nrdy) / config.cpu_active; 277 275 atomic_inc(&cpu->nrdy); 278 276 } 277 278 /** Create new thread 279 * 280 * Create a new thread. 281 * 282 * @param func Thread's implementing function. 283 * @param arg Thread's implementing function argument. 284 * @param task Task to which the thread belongs. The caller must 285 * guarantee that the task won't cease to exist during the 286 * call. The task's lock may not be held. 287 * @param flags Thread flags. 288 * @param name Symbolic name (a copy is made). 289 * @param uncounted Thread's accounting doesn't affect accumulated task 290 * accounting. 291 * 292 * @return New thread's structure on success, NULL on failure. 293 * 294 */ 295 thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, 296 unsigned int flags, const char *name, bool uncounted) 297 { 298 thread_t *thread = (thread_t *) slab_alloc(thread_slab, 0); 299 if (!thread) 300 return NULL; 301 302 /* Not needed, but good for debugging */ 303 memsetb(thread->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0); 304 305 irq_spinlock_lock(&tidlock, true); 306 thread->tid = ++last_tid; 307 irq_spinlock_unlock(&tidlock, true); 308 309 context_save(&thread->saved_context); 310 context_set(&thread->saved_context, FADDR(cushion), 311 (uintptr_t) thread->kstack, THREAD_STACK_SIZE); 312 313 the_initialize((the_t *) thread->kstack); 314 315 ipl_t ipl = interrupts_disable(); 316 thread->saved_context.ipl = interrupts_read(); 279 317 interrupts_restore(ipl); 280 } 281 282 /** Create new thread 283 * 284 * Create a new thread. 285 * 286 * @param func Thread's implementing function. 287 * @param arg Thread's implementing function argument. 288 * @param task Task to which the thread belongs. The caller must 289 * guarantee that the task won't cease to exist during the 290 * call. The task's lock may not be held. 291 * @param flags Thread flags. 292 * @param name Symbolic name (a copy is made). 293 * @param uncounted Thread's accounting doesn't affect accumulated task 294 * accounting. 295 * 296 * @return New thread's structure on success, NULL on failure. 297 * 298 */ 299 thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, 300 int flags, const char *name, bool uncounted) 301 { 302 thread_t *t; 303 ipl_t ipl; 304 305 t = (thread_t *) slab_alloc(thread_slab, 0); 306 if (!t) 307 return NULL; 308 309 /* Not needed, but good for debugging */ 310 memsetb(t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0); 311 312 ipl = interrupts_disable(); 313 spinlock_lock(&tidlock); 314 t->tid = ++last_tid; 315 spinlock_unlock(&tidlock); 316 interrupts_restore(ipl); 317 318 context_save(&t->saved_context); 319 context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack, 320 THREAD_STACK_SIZE); 321 322 the_initialize((the_t *) t->kstack); 323 324 ipl = interrupts_disable(); 325 t->saved_context.ipl = interrupts_read(); 326 interrupts_restore(ipl); 327 328 memcpy(t->name, name, THREAD_NAME_BUFLEN); 329 t->name[THREAD_NAME_BUFLEN - 1] = 0; 330 331 t->thread_code = func; 332 t->thread_arg = arg; 333 t->ticks = -1; 334 t->ucycles = 0; 335 t->kcycles = 0; 336 t->uncounted = uncounted; 337 t->priority = -1; /* start in rq[0] */ 338 t->cpu = NULL; 339 t->flags = flags; 340 t->state = Entering; 341 t->call_me = NULL; 342 t->call_me_with = NULL; 343 344 timeout_initialize(&t->sleep_timeout); 345 t->sleep_interruptible = false; 346 t->sleep_queue = NULL; 347 t->timeout_pending = 0; 348 349 t->in_copy_from_uspace = false; 350 t->in_copy_to_uspace = false; 351 352 t->interrupted = false; 353 t->detached = false; 354 waitq_initialize(&t->join_wq); 355 356 t->rwlock_holder_type = RWLOCK_NONE; 357 358 t->task = task; 359 360 t->fpu_context_exists = 0; 361 t->fpu_context_engaged = 0; 362 363 avltree_node_initialize(&t->threads_tree_node); 364 t->threads_tree_node.key = (uintptr_t) t; 365 318 319 str_cpy(thread->name, THREAD_NAME_BUFLEN, name); 320 321 thread->thread_code = func; 322 thread->thread_arg = arg; 323 thread->ticks = -1; 324 thread->ucycles = 0; 325 thread->kcycles = 0; 326 thread->uncounted = uncounted; 327 thread->priority = -1; /* Start in rq[0] */ 328 thread->cpu = NULL; 329 thread->flags = flags; 330 thread->state = Entering; 331 thread->call_me = NULL; 332 thread->call_me_with = NULL; 333 334 timeout_initialize(&thread->sleep_timeout); 335 thread->sleep_interruptible = false; 336 thread->sleep_queue = NULL; 337 thread->timeout_pending = false; 338 339 thread->in_copy_from_uspace = false; 340 thread->in_copy_to_uspace = false; 341 342 thread->interrupted = false; 343 thread->detached = false; 344 waitq_initialize(&thread->join_wq); 345 346 thread->rwlock_holder_type = RWLOCK_NONE; 347 348 thread->task = task; 349 350 thread->fpu_context_exists = 0; 351 thread->fpu_context_engaged = 0; 352 353 avltree_node_initialize(&thread->threads_tree_node); 354 thread->threads_tree_node.key = (uintptr_t) thread; 355 366 356 #ifdef CONFIG_UDEBUG 367 357 /* Init debugging stuff */ 368 udebug_thread_initialize(&t ->udebug);369 #endif 370 371 /* might depend on previous initialization */372 thread_create_arch(t );373 358 udebug_thread_initialize(&thread->udebug); 359 #endif 360 361 /* Might depend on previous initialization */ 362 thread_create_arch(thread); 363 374 364 if (!(flags & THREAD_FLAG_NOATTACH)) 375 thread_attach(t , task);376 377 return t ;365 thread_attach(thread, task); 366 367 return thread; 378 368 } 379 369 … … 381 371 * 382 372 * Detach thread from all queues, cpus etc. and destroy it. 383 * 384 * Assume thread->lock is held!! 385 */ 386 void thread_destroy(thread_t *t) 387 { 388 ASSERT(t->state == Exiting || t->state == Lingering); 389 ASSERT(t->task); 390 ASSERT(t->cpu); 391 392 spinlock_lock(&t->cpu->lock); 393 if (t->cpu->fpu_owner == t) 394 t->cpu->fpu_owner = NULL; 395 spinlock_unlock(&t->cpu->lock); 396 397 spinlock_unlock(&t->lock); 398 399 spinlock_lock(&threads_lock); 400 avltree_delete(&threads_tree, &t->threads_tree_node); 401 spinlock_unlock(&threads_lock); 402 373 * Assume thread->lock is held! 374 * 375 * @param thread Thread to be destroyed. 376 * @param irq_res Indicate whether it should unlock thread->lock 377 * in interrupts-restore mode. 378 * 379 */ 380 void thread_destroy(thread_t *thread, bool irq_res) 381 { 382 ASSERT((thread->state == Exiting) || (thread->state == Lingering)); 383 ASSERT(thread->task); 384 ASSERT(thread->cpu); 385 386 irq_spinlock_lock(&thread->cpu->lock, false); 387 if (thread->cpu->fpu_owner == thread) 388 thread->cpu->fpu_owner = NULL; 389 irq_spinlock_unlock(&thread->cpu->lock, false); 390 391 irq_spinlock_pass(&thread->lock, &threads_lock); 392 393 avltree_delete(&threads_tree, &thread->threads_tree_node); 394 395 irq_spinlock_pass(&threads_lock, &thread->task->lock); 396 403 397 /* 404 398 * Detach from the containing task. 405 399 */ 406 spinlock_lock(&t->task->lock); 407 list_remove(&t->th_link); 408 spinlock_unlock(&t->task->lock); 409 400 list_remove(&thread->th_link); 401 irq_spinlock_unlock(&thread->task->lock, irq_res); 402 410 403 /* 411 404 * Drop the reference to the containing task. 412 405 */ 413 task_release(t->task); 414 415 slab_free(thread_slab, t); 406 task_release(thread->task); 407 slab_free(thread_slab, thread); 416 408 } 417 409 … … 421 413 * threads_tree. 422 414 * 423 * @param t Thread to be attached to the task. 424 * @param task Task to which the thread is to be attached. 425 */ 426 void thread_attach(thread_t *t, task_t *task) 427 { 428 ipl_t ipl; 429 415 * @param t Thread to be attached to the task. 416 * @param task Task to which the thread is to be attached. 417 * 418 */ 419 void thread_attach(thread_t *thread, task_t *task) 420 { 430 421 /* 431 422 * Attach to the specified task. 432 423 */ 433 ipl = interrupts_disable(); 434 spinlock_lock(&task->lock); 435 424 irq_spinlock_lock(&task->lock, true); 425 436 426 /* Hold a reference to the task. */ 437 427 task_hold(task); 438 428 439 429 /* Must not count kbox thread into lifecount */ 440 if (t ->flags & THREAD_FLAG_USPACE)430 if (thread->flags & THREAD_FLAG_USPACE) 441 431 atomic_inc(&task->lifecount); 442 443 list_append(&t->th_link, &task->th_head); 444 spinlock_unlock(&task->lock); 445 432 433 list_append(&thread->th_link, &task->th_head); 434 435 irq_spinlock_pass(&task->lock, &threads_lock); 436 446 437 /* 447 438 * Register this thread in the system-wide list. 448 439 */ 449 spinlock_lock(&threads_lock); 450 avltree_insert(&threads_tree, &t->threads_tree_node); 451 spinlock_unlock(&threads_lock); 452 453 interrupts_restore(ipl); 440 avltree_insert(&threads_tree, &thread->threads_tree_node); 441 irq_spinlock_unlock(&threads_lock, true); 454 442 } 455 443 456 444 /** Terminate thread. 457 445 * 458 * End current thread execution and switch it to the exiting state. All pending 459 * timeouts are executed. 446 * End current thread execution and switch it to the exiting state. 447 * All pending timeouts are executed. 448 * 460 449 */ 461 450 void thread_exit(void) 462 451 { 463 ipl_t ipl;464 465 452 if (THREAD->flags & THREAD_FLAG_USPACE) { 466 453 #ifdef CONFIG_UDEBUG … … 475 462 * can only be created by threads of the same task. 476 463 * We are safe to perform cleanup. 464 * 477 465 */ 478 466 ipc_cleanup(); … … 481 469 } 482 470 } 483 471 484 472 restart: 485 ipl = interrupts_disable(); 486 spinlock_lock(&THREAD->lock); 487 if (THREAD->timeout_pending) { 488 /* busy waiting for timeouts in progress */ 489 spinlock_unlock(&THREAD->lock); 490 interrupts_restore(ipl); 473 irq_spinlock_lock(&THREAD->lock, true); 474 if (THREAD->timeout_pending) { 475 /* Busy waiting for timeouts in progress */ 476 irq_spinlock_unlock(&THREAD->lock, true); 491 477 goto restart; 492 478 } 493 479 494 480 THREAD->state = Exiting; 495 spinlock_unlock(&THREAD->lock); 481 irq_spinlock_unlock(&THREAD->lock, true); 482 496 483 scheduler(); 497 484 498 485 /* Not reached */ 499 while (1) 500 ; 501 } 502 486 while (true); 487 } 503 488 504 489 /** Thread sleep … … 515 500 while (sec > 0) { 516 501 uint32_t period = (sec > 1000) ? 1000 : sec; 517 502 518 503 thread_usleep(period * 1000000); 519 504 sec -= period; … … 523 508 /** Wait for another thread to exit. 524 509 * 525 * @param t Thread to join on exit.526 * @param usec Timeout in microseconds.527 * @param flags Mode of operation.510 * @param thread Thread to join on exit. 511 * @param usec Timeout in microseconds. 512 * @param flags Mode of operation. 528 513 * 529 514 * @return An error code from errno.h or an error code from synch.h. 530 */ 531 int thread_join_timeout(thread_t *t, uint32_t usec, int flags) 532 { 533 ipl_t ipl; 534 int rc; 535 536 if (t == THREAD) 515 * 516 */ 517 int thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags) 518 { 519 if (thread == THREAD) 537 520 return EINVAL; 538 521 539 522 /* 540 523 * Since thread join can only be called once on an undetached thread, … … 542 525 */ 543 526 544 ipl = interrupts_disable(); 545 spinlock_lock(&t->lock); 546 ASSERT(!t->detached); 547 spinlock_unlock(&t->lock); 548 interrupts_restore(ipl); 549 550 rc = waitq_sleep_timeout(&t->join_wq, usec, flags); 551 552 return rc; 527 irq_spinlock_lock(&thread->lock, true); 528 ASSERT(!thread->detached); 529 irq_spinlock_unlock(&thread->lock, true); 530 531 return waitq_sleep_timeout(&thread->join_wq, usec, flags); 553 532 } 554 533 … … 558 537 * state, deallocate its resources. 559 538 * 560 * @param t Thread to be detached. 561 */ 562 void thread_detach(thread_t *t) 563 { 564 ipl_t ipl; 565 539 * @param thread Thread to be detached. 540 * 541 */ 542 void thread_detach(thread_t *thread) 543 { 566 544 /* 567 545 * Since the thread is expected not to be already detached, 568 546 * pointer to it must be still valid. 569 547 */ 570 ipl = interrupts_disable(); 571 spinlock_lock(&t->lock); 572 ASSERT(!t->detached); 573 if (t->state == Lingering) { 574 thread_destroy(t); /* unlocks &t->lock */ 575 interrupts_restore(ipl); 548 irq_spinlock_lock(&thread->lock, true); 549 ASSERT(!thread->detached); 550 551 if (thread->state == Lingering) { 552 /* 553 * Unlock &thread->lock and restore 554 * interrupts in thread_destroy(). 555 */ 556 thread_destroy(thread, true); 576 557 return; 577 558 } else { 578 t ->detached = true;559 thread->detached = true; 579 560 } 580 spinlock_unlock(&t->lock);581 i nterrupts_restore(ipl);561 562 irq_spinlock_unlock(&thread->lock, true); 582 563 } 583 564 … … 601 582 * 602 583 * Register a function and its argument to be executed 603 * on next context switch to the current thread. 584 * on next context switch to the current thread. Must 585 * be called with interrupts disabled. 604 586 * 605 587 * @param call_me Out-of-context function. … … 609 591 void thread_register_call_me(void (* call_me)(void *), void *call_me_with) 610 592 { 611 ipl_t ipl; 612 613 ipl = interrupts_disable(); 614 spinlock_lock(&THREAD->lock); 593 irq_spinlock_lock(&THREAD->lock, false); 615 594 THREAD->call_me = call_me; 616 595 THREAD->call_me_with = call_me_with; 617 spinlock_unlock(&THREAD->lock); 618 interrupts_restore(ipl); 596 irq_spinlock_unlock(&THREAD->lock, false); 619 597 } 620 598 621 599 static bool thread_walker(avltree_node_t *node, void *arg) 622 600 { 623 thread_t *t = avltree_get_instance(node, thread_t, threads_tree_node);601 thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node); 624 602 625 603 uint64_t ucycles, kcycles; 626 604 char usuffix, ksuffix; 627 order_suffix(t ->ucycles, &ucycles, &usuffix);628 order_suffix(t ->kcycles, &kcycles, &ksuffix);629 605 order_suffix(thread->ucycles, &ucycles, &usuffix); 606 order_suffix(thread->kcycles, &kcycles, &ksuffix); 607 630 608 #ifdef __32_BITS__ 631 609 printf("%-6" PRIu64" %-10s %10p %-8s %10p %-3" PRIu32 " %10p %10p %9" 632 PRIu64 "%c %9" PRIu64 "%c ", t ->tid, t->name, t,633 thread_states[t ->state], t->task, t->task->context, t->thread_code,634 t ->kstack, ucycles, usuffix, kcycles, ksuffix);635 #endif 636 610 PRIu64 "%c %9" PRIu64 "%c ", thread->tid, thread->name, thread, 611 thread_states[thread->state], thread->task, thread->task->context, 612 thread->thread_code, thread->kstack, ucycles, usuffix, kcycles, ksuffix); 613 #endif 614 637 615 #ifdef __64_BITS__ 638 616 printf("%-6" PRIu64" %-10s %18p %-8s %18p %-3" PRIu32 " %18p %18p %9" 639 PRIu64 "%c %9" PRIu64 "%c ", t ->tid, t->name, t,640 thread_states[t ->state], t->task, t->task->context, t->thread_code,641 t ->kstack, ucycles, usuffix, kcycles, ksuffix);642 #endif 643 644 if (t ->cpu)645 printf("%-4u", t ->cpu->id);617 PRIu64 "%c %9" PRIu64 "%c ", thread->tid, thread->name, thread, 618 thread_states[thread->state], thread->task, thread->task->context, 619 thread->thread_code, thread->kstack, ucycles, usuffix, kcycles, ksuffix); 620 #endif 621 622 if (thread->cpu) 623 printf("%-4u", thread->cpu->id); 646 624 else 647 625 printf("none"); 648 649 if (t ->state == Sleeping) {626 627 if (thread->state == Sleeping) { 650 628 #ifdef __32_BITS__ 651 printf(" %10p", t ->sleep_queue);652 #endif 653 629 printf(" %10p", thread->sleep_queue); 630 #endif 631 654 632 #ifdef __64_BITS__ 655 printf(" %18p", t ->sleep_queue);633 printf(" %18p", thread->sleep_queue); 656 634 #endif 657 635 } 658 636 659 637 printf("\n"); 660 638 661 639 return true; 662 640 } 663 641 664 /** Print list of threads debug info */ 642 /** Print list of threads debug info 643 * 644 */ 665 645 void thread_print_list(void) 666 646 { 667 ipl_t ipl;668 669 647 /* Messing with thread structures, avoid deadlock */ 670 ipl = interrupts_disable(); 671 spinlock_lock(&threads_lock); 672 673 #ifdef __32_BITS__ 648 irq_spinlock_lock(&threads_lock, true); 649 650 #ifdef __32_BITS__ 674 651 printf("tid name address state task " 675 652 "ctx code stack ucycles kcycles cpu " … … 679 656 "----------\n"); 680 657 #endif 681 658 682 659 #ifdef __64_BITS__ 683 660 printf("tid name address state task " … … 688 665 "------------------\n"); 689 666 #endif 690 667 691 668 avltree_walk(&threads_tree, thread_walker, NULL); 692 693 spinlock_unlock(&threads_lock); 694 interrupts_restore(ipl); 669 670 irq_spinlock_unlock(&threads_lock, true); 695 671 } 696 672 … … 700 676 * interrupts must be already disabled. 701 677 * 702 * @param t Pointer to thread.678 * @param thread Pointer to thread. 703 679 * 704 680 * @return True if thread t is known to the system, false otherwise. 705 * /706 bool thread_exists(thread_t *t) 707 { 708 avltree_node_t *node; 709 710 node = avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) t));681 * 682 */ 683 bool thread_exists(thread_t *thread) 684 { 685 avltree_node_t *node = 686 avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) thread)); 711 687 712 688 return node != NULL; … … 718 694 * interrupts must be already disabled. 719 695 * 720 * @param user True to update user accounting, false for kernel. 696 * @param user True to update user accounting, false for kernel. 697 * 721 698 */ 722 699 void thread_update_accounting(bool user) 723 700 { 724 701 uint64_t time = get_cycle(); 725 if (user) { 702 703 if (user) 726 704 THREAD->ucycles += time - THREAD->last_cycle; 727 } else {705 else 728 706 THREAD->kcycles += time - THREAD->last_cycle; 729 }707 730 708 THREAD->last_cycle = time; 731 709 } … … 774 752 size_t name_len, thread_id_t *uspace_thread_id) 775 753 { 776 thread_t *t;777 char namebuf[THREAD_NAME_BUFLEN];778 uspace_arg_t *kernel_uarg;779 int rc;780 781 754 if (name_len > THREAD_NAME_BUFLEN - 1) 782 755 name_len = THREAD_NAME_BUFLEN - 1; 783 784 rc = copy_from_uspace(namebuf, uspace_name, name_len); 756 757 char namebuf[THREAD_NAME_BUFLEN]; 758 int rc = copy_from_uspace(namebuf, uspace_name, name_len); 785 759 if (rc != 0) 786 760 return (unative_t) rc; 787 761 788 762 namebuf[name_len] = 0; 789 763 790 764 /* 791 765 * In case of failure, kernel_uarg will be deallocated in this function. 792 766 * In case of success, kernel_uarg will be freed in uinit(). 767 * 793 768 */ 794 kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); 769 uspace_arg_t *kernel_uarg = 770 (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); 795 771 796 772 rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); … … 799 775 return (unative_t) rc; 800 776 } 801 802 t = thread_create(uinit, kernel_uarg, TASK,777 778 thread_t *thread = thread_create(uinit, kernel_uarg, TASK, 803 779 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf, false); 804 if (t ) {780 if (thread) { 805 781 if (uspace_thread_id != NULL) { 806 int rc; 807 808 rc = copy_to_uspace(uspace_thread_id, &t->tid, 809 sizeof(t->tid)); 782 rc = copy_to_uspace(uspace_thread_id, &thread->tid, 783 sizeof(thread->tid)); 810 784 if (rc != 0) { 811 785 /* … … 813 787 * has already been created. We need to undo its 814 788 * creation now. 789 * 815 790 */ 816 791 817 792 /* 818 793 * The new thread structure is initialized, but … … 820 795 * We can safely deallocate it. 821 796 */ 822 slab_free(thread_slab, t );823 824 797 slab_free(thread_slab, thread); 798 free(kernel_uarg); 799 825 800 return (unative_t) rc; 826 801 } 827 802 } 803 828 804 #ifdef CONFIG_UDEBUG 829 805 /* … … 833 809 * THREAD_B events for threads that already existed 834 810 * and could be detected with THREAD_READ before. 811 * 835 812 */ 836 udebug_thread_b_event_attach(t , TASK);813 udebug_thread_b_event_attach(thread, TASK); 837 814 #else 838 thread_attach(t , TASK);839 #endif 840 thread_ready(t );841 815 thread_attach(thread, TASK); 816 #endif 817 thread_ready(thread); 818 842 819 return 0; 843 820 } else 844 821 free(kernel_uarg); 845 822 846 823 return (unative_t) ENOMEM; 847 824 } … … 853 830 { 854 831 thread_exit(); 832 855 833 /* Unreachable */ 856 834 return 0; … … 863 841 * 864 842 * @return 0 on success or an error code from @ref errno.h. 843 * 865 844 */ 866 845 unative_t sys_thread_get_id(thread_id_t *uspace_thread_id) … … 869 848 * No need to acquire lock on THREAD because tid 870 849 * remains constant for the lifespan of the thread. 850 * 871 851 */ 872 852 return (unative_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
Note:
See TracChangeset
for help on using the changeset viewer.