Changeset b0f00a9 in mainline for kernel/generic/src/proc
- Timestamp:
- 2011-11-06T22:21:05Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 898e847
- Parents:
- 2bdf8313 (diff), 7b5f4c9 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/generic/src/proc
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/program.c
r2bdf8313 rb0f00a9 40 40 #include <proc/thread.h> 41 41 #include <proc/task.h> 42 #include <proc/uarg.h>43 42 #include <mm/as.h> 44 43 #include <mm/slab.h> … … 48 47 #include <ipc/ipcrsc.h> 49 48 #include <security/cap.h> 50 #include <lib/elf .h>49 #include <lib/elf_load.h> 51 50 #include <errno.h> 52 51 #include <print.h> -
kernel/generic/src/proc/scheduler.c
r2bdf8313 rb0f00a9 237 237 * Take the first thread from the queue. 238 238 */ 239 thread_t *thread = 240 list_ get_instance(CPU->rq[i].rq_head.next, thread_t, rq_link);239 thread_t *thread = list_get_instance( 240 list_first(&CPU->rq[i].rq), thread_t, rq_link); 241 241 list_remove(&thread->rq_link); 242 242 … … 273 273 static void relink_rq(int start) 274 274 { 275 li nk_t head;276 277 list_initialize(& head);275 list_t list; 276 277 list_initialize(&list); 278 278 irq_spinlock_lock(&CPU->lock, false); 279 279 … … 284 284 285 285 irq_spinlock_lock(&CPU->rq[i + 1].lock, false); 286 list_concat(& head, &CPU->rq[i + 1].rq_head);286 list_concat(&list, &CPU->rq[i + 1].rq); 287 287 size_t n = CPU->rq[i + 1].n; 288 288 CPU->rq[i + 1].n = 0; … … 292 292 293 293 irq_spinlock_lock(&CPU->rq[i].lock, false); 294 list_concat(&CPU->rq[i].rq _head, &head);294 list_concat(&CPU->rq[i].rq, &list); 295 295 CPU->rq[i].n += n; 296 296 irq_spinlock_unlock(&CPU->rq[i].lock, false); … … 586 586 * Searching least priority queues on all CPU's first and most priority 587 587 * queues on all CPU's last. 588 *589 588 */ 590 589 size_t acpu; … … 617 616 618 617 /* Search rq from the back */ 619 link_t *link = cpu->rq[rq].rq_head.prev; 620 621 while (link != &(cpu->rq[rq].rq_head)) { 622 thread = (thread_t *) list_get_instance(link, thread_t, rq_link); 618 link_t *link = cpu->rq[rq].rq.head.prev; 619 620 while (link != &(cpu->rq[rq].rq.head)) { 621 thread = (thread_t *) list_get_instance(link, 622 thread_t, rq_link); 623 623 624 624 /* 625 * We don't want to steal CPU-wired threads 626 * neither threads already stolen. The latter 627 * prevents threads from migrating between CPU's 628 * without ever being run. We don't want to 629 * steal threads whose FPU context is still in 630 * CPU. 631 * 625 * Do not steal CPU-wired threads, threads 626 * already stolen, threads for which migration 627 * was temporarily disabled or threads whose 628 * FPU context is still in the CPU. 632 629 */ 633 630 irq_spinlock_lock(&thread->lock, false); 634 631 635 if ((!(thread->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) 636 && (!(thread->fpu_context_engaged))) { 632 if (!(thread->flags & THREAD_FLAG_WIRED) && 633 !(thread->flags & THREAD_FLAG_STOLEN) && 634 !thread->nomigrate && 635 !thread->fpu_context_engaged) { 637 636 /* 638 637 * Remove thread from ready queue. 639 638 */ 640 irq_spinlock_unlock(&thread->lock, false); 639 irq_spinlock_unlock(&thread->lock, 640 false); 641 641 642 642 atomic_dec(&cpu->nrdy); … … 660 660 */ 661 661 662 irq_spinlock_pass(&(cpu->rq[rq].lock), &thread->lock); 662 irq_spinlock_pass(&(cpu->rq[rq].lock), 663 &thread->lock); 663 664 664 665 #ifdef KCPULB_VERBOSE … … 739 740 740 741 printf("\trq[%u]: ", i); 741 link_t *cur; 742 for (cur = cpus[cpu].rq[i].rq_head.next; 743 cur != &(cpus[cpu].rq[i].rq_head); 744 cur = cur->next) { 745 thread_t *thread = list_get_instance(cur, thread_t, rq_link); 742 list_foreach(cpus[cpu].rq[i].rq, cur) { 743 thread_t *thread = list_get_instance(cur, 744 thread_t, rq_link); 746 745 printf("%" PRIu64 "(%s) ", thread->tid, 747 746 thread_states[thread->state]); -
kernel/generic/src/proc/task.c
r2bdf8313 rb0f00a9 50 50 #include <ipc/ipc.h> 51 51 #include <ipc/ipcrsc.h> 52 #include <ipc/event.h> 52 53 #include <print.h> 53 54 #include <errno.h> … … 57 58 #include <syscall/copy.h> 58 59 #include <macros.h> 59 #include <ipc/event.h>60 60 61 61 /** Spinlock protecting the tasks_tree AVL tree. */ … … 155 155 mutex_initialize(&task->futexes_lock, MUTEX_PASSIVE); 156 156 157 list_initialize(&task->th _head);158 list_initialize(&task->sync_box _head);157 list_initialize(&task->threads); 158 list_initialize(&task->sync_boxes); 159 159 160 160 ipc_answerbox_init(&task->answerbox, task); … … 201 201 task->ipc_info.irq_notif_received = 0; 202 202 task->ipc_info.forwarded = 0; 203 204 event_task_init(task); 203 205 204 206 #ifdef CONFIG_UDEBUG … … 435 437 436 438 /* Current values of threads */ 437 link_t *cur; 438 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 439 list_foreach(task->threads, cur) { 439 440 thread_t *thread = list_get_instance(cur, thread_t, th_link); 440 441 … … 468 469 */ 469 470 470 link_t *cur; 471 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 471 list_foreach(task->threads, cur) { 472 472 thread_t *thread = list_get_instance(cur, thread_t, th_link); 473 473 bool sleeping = false; -
kernel/generic/src/proc/thread.c
r2bdf8313 rb0f00a9 39 39 #include <proc/thread.h> 40 40 #include <proc/task.h> 41 #include <proc/uarg.h>42 41 #include <mm/frame.h> 43 42 #include <mm/page.h> … … 45 44 #include <arch/cycle.h> 46 45 #include <arch.h> 47 #include <synch/synch.h>48 46 #include <synch/spinlock.h> 49 47 #include <synch/waitq.h> … … 55 53 #include <time/clock.h> 56 54 #include <time/timeout.h> 55 #include <time/delay.h> 57 56 #include <config.h> 58 57 #include <arch/interrupt.h> … … 259 258 */ 260 259 261 list_append(&thread->rq_link, &cpu->rq[i].rq _head);260 list_append(&thread->rq_link, &cpu->rq[i].rq); 262 261 cpu->rq[i].n++; 263 262 irq_spinlock_unlock(&(cpu->rq[i].lock), true); … … 321 320 thread->cpu = NULL; 322 321 thread->flags = flags; 322 thread->nomigrate = 0; 323 323 thread->state = Entering; 324 324 … … 421 421 atomic_inc(&task->lifecount); 422 422 423 list_append(&thread->th_link, &task->th _head);423 list_append(&thread->th_link, &task->threads); 424 424 425 425 irq_spinlock_pass(&task->lock, &threads_lock); … … 481 481 /* Not reached */ 482 482 while (true); 483 } 484 485 /** Prevent the current thread from being migrated to another processor. */ 486 void thread_migration_disable(void) 487 { 488 ASSERT(THREAD); 489 490 THREAD->nomigrate++; 491 } 492 493 /** Allow the current thread to be migrated to another processor. */ 494 void thread_migration_enable(void) 495 { 496 ASSERT(THREAD); 497 ASSERT(THREAD->nomigrate > 0); 498 499 THREAD->nomigrate--; 483 500 } 484 501 … … 912 929 } 913 930 931 sysarg_t sys_thread_udelay(uint32_t usec) 932 { 933 delay(usec); 934 return 0; 935 } 936 914 937 /** @} 915 938 */
Note:
See TracChangeset
for help on using the changeset viewer.
