Changeset ea7890e7 in mainline for kernel/generic/src/proc/thread.c


Ignore:
Timestamp:
2007-06-01T15:47:46Z (18 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
07be3c4
Parents:
ff3a34b
Message:

More efficient and simpler task termination.

Based on the assumption, that after its creation, only the task itself can create more threads for itself,
the last thread with userspace context to execute thread_exit() will perform futex and IPC cleanup. When
the task has no threads, it is destroyed. Both the cleanup and destruction is controlled by reference
counting.

As for userspace threads, even though there could be a global garbage collector for joining threads, it is
much simpler if the uinit thread detaches itself before switching to userspace.

task_kill() is now an idempotent operation. It just instructs the threads within a task to exit.

Change in the name of a thread state: Undead → JoinMe.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/proc/thread.c

    rff3a34b rea7890e7  
    6868#include <syscall/copy.h>
    6969#include <errno.h>
     70#include <console/klog.h>
    7071
    7172
     
    7879        "Entering",
    7980        "Exiting",
    80         "Undead"
     81        "JoinMe"
    8182};
    8283
     
    329330
    330331        t->interrupted = false;
    331         t->join_type = None;
    332332        t->detached = false;
    333333        waitq_initialize(&t->join_wq);
     
    343343        thread_create_arch(t); 
    344344
    345         ipl = interrupts_disable();     
    346         spinlock_lock(&task->lock);
    347         if (!task->accept_new_threads) {
    348                 spinlock_unlock(&task->lock);
    349                 slab_free(thread_slab, t);
    350                 interrupts_restore(ipl);
    351                 return NULL;
    352         } else {
    353                 /*
    354                  * Bump the reference count so that this task cannot be
    355                  * destroyed while the new thread is being attached to it.
    356                  */
    357                 task->refcount++;
    358         }
    359         spinlock_unlock(&task->lock);
    360         interrupts_restore(ipl);
    361 
    362345        if (!(flags & THREAD_FLAG_NOATTACH))
    363346                thread_attach(t, task);
     
    374357void thread_destroy(thread_t *t)
    375358{
    376         bool destroy_task = false;
    377 
    378         ASSERT(t->state == Exiting || t->state == Undead);
     359        ASSERT(t->state == Exiting || t->state == JoinMe);
    379360        ASSERT(t->task);
    380361        ASSERT(t->cpu);
     
    396377        spinlock_lock(&t->task->lock);
    397378        list_remove(&t->th_link);
    398         if (--t->task->refcount == 0) {
    399                 t->task->accept_new_threads = false;
    400                 destroy_task = true;
    401         }
    402379        spinlock_unlock(&t->task->lock);       
    403        
    404         if (destroy_task)
     380
     381        /*
     382         * t is guaranteed to be the very last thread of its task.
     383         * It is safe to destroy the task.
     384         */
     385        if (atomic_predec(&t->task->refcount) == 0)
    405386                task_destroy(t->task);
    406387       
     
    432413         * Attach to the current task.
    433414         */
    434         ipl = interrupts_disable();     
     415        ipl = interrupts_disable();
    435416        spinlock_lock(&task->lock);
    436         ASSERT(task->refcount);
     417        atomic_inc(&task->refcount);
     418        atomic_inc(&task->lifecount);
    437419        list_append(&t->th_link, &task->th_head);
    438         if (task->refcount == 1)
    439                 task->main_thread = t;
    440420        spinlock_unlock(&task->lock);
    441421
     
    459439{
    460440        ipl_t ipl;
     441
     442        if (atomic_predec(&TASK->lifecount) == 0) {
     443                /*
     444                 * We are the last thread in the task that still has not exited.
     445                 * With the exception of the moment the task was created, new
     446                 * threads can only be created by threads of the same task.
     447                 * We are safe to perform cleanup.
     448                 */
     449                if (THREAD->flags & THREAD_FLAG_USPACE) {
     450                        ipc_cleanup();
     451                        futex_cleanup();
     452                        klog_printf("Cleanup of task %llu completed.",
     453                            TASK->taskid);
     454                }
     455        }
    461456
    462457restart:
     
    469464                goto restart;
    470465        }
     466       
    471467        THREAD->state = Exiting;
    472468        spinlock_unlock(&THREAD->lock);
     
    525521/** Detach thread.
    526522 *
    527  * Mark the thread as detached, if the thread is already in the Undead state,
     523 * Mark the thread as detached, if the thread is already in the JoinMe state,
    528524 * deallocate its resources.
    529525 *
     
    541537        spinlock_lock(&t->lock);
    542538        ASSERT(!t->detached);
    543         if (t->state == Undead) {
     539        if (t->state == JoinMe) {
    544540                thread_destroy(t);      /* unlocks &t->lock */
    545541                interrupts_restore(ipl);
     
    703699                            sizeof(t->tid));
    704700                        if (rc != 0) {
    705                                 ipl_t ipl;
    706 
    707701                                /*
    708702                                 * We have encountered a failure, but the thread
     
    712706
    713707                                /*
    714                                  * The new thread structure is initialized,
    715                                  * but is still not visible to the system.
     708                                 * The new thread structure is initialized, but
     709                                 * is still not visible to the system.
    716710                                 * We can safely deallocate it.
    717711                                 */
    718712                                slab_free(thread_slab, t);
    719713                                free(kernel_uarg);
    720 
    721                                 /*
    722                                  * Now we need to decrement the task reference
    723                                  * counter. Because we are running within the
    724                                  * same task, thread t is not the last thread
    725                                  * in the task, so it is safe to merely
    726                                  * decrement the counter.
    727                                  */
    728                                 ipl = interrupts_disable();
    729                                 spinlock_lock(&TASK->lock);
    730                                 TASK->refcount--;
    731                                 spinlock_unlock(&TASK->lock);
    732                                 interrupts_restore(ipl);
    733714
    734715                                return (unative_t) rc;
Note: See TracChangeset for help on using the changeset viewer.