Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/proc/thread.c

    ree42e43 r22e6802  
    11/*
    2  * Copyright (c) 2010 Jakub Jermar
     2 * Copyright (c) 2001-2004 Jakub Jermar
    33 * All rights reserved.
    44 *
     
    3333/**
    3434 * @file
    35  * @brief Thread management functions.
     35 * @brief       Thread management functions.
    3636 */
    3737
     
    4848#include <synch/spinlock.h>
    4949#include <synch/waitq.h>
     50#include <synch/rwlock.h>
    5051#include <cpu.h>
    51 #include <str.h>
     52#include <func.h>
    5253#include <context.h>
    5354#include <adt/avl.h>
     
    7576
    7677/** Thread states */
    77 const char *thread_states[] = {
     78char *thread_states[] = {
    7879        "Invalid",
    7980        "Running",
     
    8384        "Exiting",
    8485        "Lingering"
    85 };
    86 
    87 typedef struct {
    88         thread_id_t thread_id;
    89         thread_t *thread;
    90 } thread_iterator_t;
     86};
    9187
    9288/** Lock protecting the threads_tree AVL tree.
    9389 *
    9490 * For locking rules, see declaration thereof.
    95  *
    96  */
    97 IRQ_SPINLOCK_INITIALIZE(threads_lock);
     91 */
     92SPINLOCK_INITIALIZE(threads_lock);
    9893
    9994/** AVL tree of all threads.
     
    10196 * When a thread is found in the threads_tree AVL tree, it is guaranteed to
    10297 * exist as long as the threads_lock is held.
    103  *
    104  */
    105 avltree_t threads_tree;
    106 
    107 IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock);
    108 static thread_id_t last_tid = 0;
     98 */
     99avltree_t threads_tree;         
     100
     101SPINLOCK_INITIALIZE(tidlock);
     102thread_id_t last_tid = 0;
    109103
    110104static slab_cache_t *thread_slab;
    111 
    112105#ifdef CONFIG_FPU
    113106slab_cache_t *fpu_context_slab;
     
    127120        void *arg = THREAD->thread_arg;
    128121        THREAD->last_cycle = get_cycle();
    129        
     122
    130123        /* This is where each thread wakes up after its creation */
    131         irq_spinlock_unlock(&THREAD->lock, false);
     124        spinlock_unlock(&THREAD->lock);
    132125        interrupts_enable();
    133        
     126
    134127        f(arg);
    135128       
    136129        /* Accumulate accounting to the task */
    137         irq_spinlock_lock(&THREAD->lock, true);
     130        ipl_t ipl = interrupts_disable();
     131       
     132        spinlock_lock(&THREAD->lock);
    138133        if (!THREAD->uncounted) {
    139                 thread_update_accounting(true);
    140                 uint64_t ucycles = THREAD->ucycles;
    141                 THREAD->ucycles = 0;
    142                 uint64_t kcycles = THREAD->kcycles;
    143                 THREAD->kcycles = 0;
     134                thread_update_accounting();
     135                uint64_t cycles = THREAD->cycles;
     136                THREAD->cycles = 0;
     137                spinlock_unlock(&THREAD->lock);
    144138               
    145                 irq_spinlock_pass(&THREAD->lock, &TASK->lock);
    146                 TASK->ucycles += ucycles;
    147                 TASK->kcycles += kcycles;
    148                 irq_spinlock_unlock(&TASK->lock, true);
     139                spinlock_lock(&TASK->lock);
     140                TASK->cycles += cycles;
     141                spinlock_unlock(&TASK->lock);
    149142        } else
    150                 irq_spinlock_unlock(&THREAD->lock, true);
     143                spinlock_unlock(&THREAD->lock);
     144       
     145        interrupts_restore(ipl);
    151146       
    152147        thread_exit();
    153        
    154         /* Not reached */
    155 }
    156 
    157 /** Initialization and allocation for thread_t structure
    158  *
    159  */
    160 static int thr_constructor(void *obj, unsigned int kmflags)
    161 {
    162         thread_t *thread = (thread_t *) obj;
    163        
    164         irq_spinlock_initialize(&thread->lock, "thread_t_lock");
    165         link_initialize(&thread->rq_link);
    166         link_initialize(&thread->wq_link);
    167         link_initialize(&thread->th_link);
    168        
     148        /* not reached */
     149}
     150
     151/** Initialization and allocation for thread_t structure */
     152static int thr_constructor(void *obj, int kmflags)
     153{
     154        thread_t *t = (thread_t *) obj;
     155
     156        spinlock_initialize(&t->lock, "thread_t_lock");
     157        link_initialize(&t->rq_link);
     158        link_initialize(&t->wq_link);
     159        link_initialize(&t->th_link);
     160
    169161        /* call the architecture-specific part of the constructor */
    170         thr_constructor_arch(thread);
     162        thr_constructor_arch(t);
    171163       
    172164#ifdef CONFIG_FPU
    173165#ifdef CONFIG_FPU_LAZY
    174         thread->saved_fpu_context = NULL;
    175 #else /* CONFIG_FPU_LAZY */
    176         thread->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);
    177         if (!thread->saved_fpu_context)
     166        t->saved_fpu_context = NULL;
     167#else
     168        t->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);
     169        if (!t->saved_fpu_context)
    178170                return -1;
    179 #endif /* CONFIG_FPU_LAZY */
    180 #endif /* CONFIG_FPU */
    181        
    182         thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
    183         if (!thread->kstack) {
     171#endif
     172#endif
     173
     174        t->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
     175        if (!t->kstack) {
    184176#ifdef CONFIG_FPU
    185                 if (thread->saved_fpu_context)
    186                         slab_free(fpu_context_slab, thread->saved_fpu_context);
     177                if (t->saved_fpu_context)
     178                        slab_free(fpu_context_slab, t->saved_fpu_context);
    187179#endif
    188180                return -1;
    189181        }
    190        
     182
    191183#ifdef CONFIG_UDEBUG
    192         mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE);
    193 #endif
    194        
     184        mutex_initialize(&t->udebug.lock, MUTEX_PASSIVE);
     185#endif
     186
    195187        return 0;
    196188}
    197189
    198190/** Destruction of thread_t object */
    199 static size_t thr_destructor(void *obj)
    200 {
    201         thread_t *thread = (thread_t *) obj;
    202        
     191static int thr_destructor(void *obj)
     192{
     193        thread_t *t = (thread_t *) obj;
     194
    203195        /* call the architecture-specific part of the destructor */
    204         thr_destructor_arch(thread);
    205        
    206         frame_free(KA2PA(thread->kstack));
    207        
     196        thr_destructor_arch(t);
     197
     198        frame_free(KA2PA(t->kstack));
    208199#ifdef CONFIG_FPU
    209         if (thread->saved_fpu_context)
    210                 slab_free(fpu_context_slab, thread->saved_fpu_context);
    211 #endif
    212        
    213         return 1;  /* One page freed */
     200        if (t->saved_fpu_context)
     201                slab_free(fpu_context_slab, t->saved_fpu_context);
     202#endif
     203        return 1; /* One page freed */
    214204}
    215205
     
    222212{
    223213        THREAD = NULL;
    224        
    225214        atomic_set(&nrdy, 0);
    226215        thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0,
    227216            thr_constructor, thr_destructor, 0);
    228        
     217
    229218#ifdef CONFIG_FPU
    230219        fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t),
    231220            FPU_CONTEXT_ALIGN, NULL, NULL, 0);
    232221#endif
    233        
     222
    234223        avltree_create(&threads_tree);
    235224}
     
    237226/** Make thread ready
    238227 *
    239  * Switch thread to the ready state.
     228 * Switch thread t to the ready state.
    240229 *
    241230 * @param t Thread to make ready.
    242231 *
    243232 */
    244 void thread_ready(thread_t *thread)
    245 {
    246         irq_spinlock_lock(&thread->lock, true);
    247        
    248         ASSERT(!(thread->state == Ready));
    249        
    250         int i = (thread->priority < RQ_COUNT - 1)
    251             ? ++thread->priority : thread->priority;
    252        
    253         cpu_t *cpu = CPU;
    254         if (thread->flags & THREAD_FLAG_WIRED) {
    255                 ASSERT(thread->cpu != NULL);
    256                 cpu = thread->cpu;
     233void thread_ready(thread_t *t)
     234{
     235        cpu_t *cpu;
     236        runq_t *r;
     237        ipl_t ipl;
     238        int i, avg;
     239
     240        ipl = interrupts_disable();
     241
     242        spinlock_lock(&t->lock);
     243
     244        ASSERT(!(t->state == Ready));
     245
     246        i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority;
     247       
     248        cpu = CPU;
     249        if (t->flags & THREAD_FLAG_WIRED) {
     250                ASSERT(t->cpu != NULL);
     251                cpu = t->cpu;
    257252        }
    258         thread->state = Ready;
    259        
    260         irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
     253        t->state = Ready;
     254        spinlock_unlock(&t->lock);
    261255       
    262256        /*
    263          * Append thread to respective ready queue
    264          * on respective processor.
     257         * Append t to respective ready queue on respective processor.
    265258         */
    266        
    267         list_append(&thread->rq_link, &cpu->rq[i].rq_head);
    268         cpu->rq[i].n++;
    269         irq_spinlock_unlock(&(cpu->rq[i].lock), true);
    270        
     259        r = &cpu->rq[i];
     260        spinlock_lock(&r->lock);
     261        list_append(&t->rq_link, &r->rq_head);
     262        r->n++;
     263        spinlock_unlock(&r->lock);
     264
    271265        atomic_inc(&nrdy);
    272         // FIXME: Why is the avg value not used
    273         // avg = atomic_get(&nrdy) / config.cpu_active;
     266        avg = atomic_get(&nrdy) / config.cpu_active;
    274267        atomic_inc(&cpu->nrdy);
     268
     269        interrupts_restore(ipl);
    275270}
    276271
     
    279274 * Create a new thread.
    280275 *
    281  * @param func      Thread's implementing function.
    282  * @param arg       Thread's implementing function argument.
    283  * @param task      Task to which the thread belongs. The caller must
    284  *                  guarantee that the task won't cease to exist during the
    285  *                  call. The task's lock may not be held.
    286  * @param flags     Thread flags.
    287  * @param name      Symbolic name (a copy is made).
    288  * @param uncounted Thread's accounting doesn't affect accumulated task
    289  *                  accounting.
    290  *
    291  * @return New thread's structure on success, NULL on failure.
     276 * @param func          Thread's implementing function.
     277 * @param arg           Thread's implementing function argument.
     278 * @param task          Task to which the thread belongs. The caller must
     279 *                      guarantee that the task won't cease to exist during the
     280 *                      call. The task's lock may not be held.
     281 * @param flags         Thread flags.
     282 * @param name          Symbolic name (a copy is made).
     283 * @param uncounted     Thread's accounting doesn't affect accumulated task
     284 *                      accounting.
     285 *
     286 * @return              New thread's structure on success, NULL on failure.
    292287 *
    293288 */
    294289thread_t *thread_create(void (* func)(void *), void *arg, task_t *task,
    295     unsigned int flags, const char *name, bool uncounted)
    296 {
    297         thread_t *thread = (thread_t *) slab_alloc(thread_slab, 0);
    298         if (!thread)
     290    int flags, char *name, bool uncounted)
     291{
     292        thread_t *t;
     293        ipl_t ipl;
     294       
     295        t = (thread_t *) slab_alloc(thread_slab, 0);
     296        if (!t)
    299297                return NULL;
    300298       
    301299        /* Not needed, but good for debugging */
    302         memsetb(thread->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0);
    303        
    304         irq_spinlock_lock(&tidlock, true);
    305         thread->tid = ++last_tid;
    306         irq_spinlock_unlock(&tidlock, true);
    307        
    308         context_save(&thread->saved_context);
    309         context_set(&thread->saved_context, FADDR(cushion),
    310             (uintptr_t) thread->kstack, THREAD_STACK_SIZE);
    311        
    312         the_initialize((the_t *) thread->kstack);
    313        
    314         ipl_t ipl = interrupts_disable();
    315         thread->saved_context.ipl = interrupts_read();
     300        memsetb(t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0);
     301       
     302        ipl = interrupts_disable();
     303        spinlock_lock(&tidlock);
     304        t->tid = ++last_tid;
     305        spinlock_unlock(&tidlock);
    316306        interrupts_restore(ipl);
    317307       
    318         str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
    319        
    320         thread->thread_code = func;
    321         thread->thread_arg = arg;
    322         thread->ticks = -1;
    323         thread->ucycles = 0;
    324         thread->kcycles = 0;
    325         thread->uncounted = uncounted;
    326         thread->priority = -1;          /* Start in rq[0] */
    327         thread->cpu = NULL;
    328         thread->flags = flags;
    329         thread->state = Entering;
    330        
    331         timeout_initialize(&thread->sleep_timeout);
    332         thread->sleep_interruptible = false;
    333         thread->sleep_queue = NULL;
    334         thread->timeout_pending = false;
    335        
    336         thread->in_copy_from_uspace = false;
    337         thread->in_copy_to_uspace = false;
    338        
    339         thread->interrupted = false;
    340         thread->detached = false;
    341         waitq_initialize(&thread->join_wq);
    342        
    343         thread->task = task;
    344        
    345         thread->fpu_context_exists = 0;
    346         thread->fpu_context_engaged = 0;
    347        
    348         avltree_node_initialize(&thread->threads_tree_node);
    349         thread->threads_tree_node.key = (uintptr_t) thread;
    350        
     308        context_save(&t->saved_context);
     309        context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack,
     310            THREAD_STACK_SIZE);
     311       
     312        the_initialize((the_t *) t->kstack);
     313       
     314        ipl = interrupts_disable();
     315        t->saved_context.ipl = interrupts_read();
     316        interrupts_restore(ipl);
     317       
     318        memcpy(t->name, name, THREAD_NAME_BUFLEN);
     319        t->name[THREAD_NAME_BUFLEN - 1] = 0;
     320       
     321        t->thread_code = func;
     322        t->thread_arg = arg;
     323        t->ticks = -1;
     324        t->cycles = 0;
     325        t->uncounted = uncounted;
     326        t->priority = -1;               /* start in rq[0] */
     327        t->cpu = NULL;
     328        t->flags = flags;
     329        t->state = Entering;
     330        t->call_me = NULL;
     331        t->call_me_with = NULL;
     332       
     333        timeout_initialize(&t->sleep_timeout);
     334        t->sleep_interruptible = false;
     335        t->sleep_queue = NULL;
     336        t->timeout_pending = 0;
     337
     338        t->in_copy_from_uspace = false;
     339        t->in_copy_to_uspace = false;
     340
     341        t->interrupted = false;
     342        t->detached = false;
     343        waitq_initialize(&t->join_wq);
     344       
     345        t->rwlock_holder_type = RWLOCK_NONE;
     346               
     347        t->task = task;
     348       
     349        t->fpu_context_exists = 0;
     350        t->fpu_context_engaged = 0;
     351
     352        avltree_node_initialize(&t->threads_tree_node);
     353        t->threads_tree_node.key = (uintptr_t) t;
     354
    351355#ifdef CONFIG_UDEBUG
    352356        /* Init debugging stuff */
    353         udebug_thread_initialize(&thread->udebug);
    354 #endif
    355        
    356         /* Might depend on previous initialization */
    357         thread_create_arch(thread);
    358        
     357        udebug_thread_initialize(&t->udebug);
     358#endif
     359
     360        /* might depend on previous initialization */
     361        thread_create_arch(t); 
     362
    359363        if (!(flags & THREAD_FLAG_NOATTACH))
    360                 thread_attach(thread, task);
    361        
    362         return thread;
     364                thread_attach(t, task);
     365
     366        return t;
    363367}
    364368
     
    367371 * Detach thread from all queues, cpus etc. and destroy it.
    368372 *
    369  * @param thread  Thread to be destroyed.
    370  * @param irq_res Indicate whether it should unlock thread->lock
    371  *                in interrupts-restore mode.
    372  *
    373  */
    374 void thread_destroy(thread_t *thread, bool irq_res)
    375 {
    376         ASSERT(irq_spinlock_locked(&thread->lock));
    377         ASSERT((thread->state == Exiting) || (thread->state == Lingering));
    378         ASSERT(thread->task);
    379         ASSERT(thread->cpu);
    380        
    381         irq_spinlock_lock(&thread->cpu->lock, false);
    382         if (thread->cpu->fpu_owner == thread)
    383                 thread->cpu->fpu_owner = NULL;
    384         irq_spinlock_unlock(&thread->cpu->lock, false);
    385        
    386         irq_spinlock_pass(&thread->lock, &threads_lock);
    387        
    388         avltree_delete(&threads_tree, &thread->threads_tree_node);
    389        
    390         irq_spinlock_pass(&threads_lock, &thread->task->lock);
    391        
     373 * Assume thread->lock is held!!
     374 */
     375void thread_destroy(thread_t *t)
     376{
     377        ASSERT(t->state == Exiting || t->state == Lingering);
     378        ASSERT(t->task);
     379        ASSERT(t->cpu);
     380
     381        spinlock_lock(&t->cpu->lock);
     382        if (t->cpu->fpu_owner == t)
     383                t->cpu->fpu_owner = NULL;
     384        spinlock_unlock(&t->cpu->lock);
     385
     386        spinlock_unlock(&t->lock);
     387
     388        spinlock_lock(&threads_lock);
     389        avltree_delete(&threads_tree, &t->threads_tree_node);
     390        spinlock_unlock(&threads_lock);
     391
    392392        /*
    393393         * Detach from the containing task.
    394394         */
    395         list_remove(&thread->th_link);
    396         irq_spinlock_unlock(&thread->task->lock, irq_res);
    397        
     395        spinlock_lock(&t->task->lock);
     396        list_remove(&t->th_link);
     397        spinlock_unlock(&t->task->lock);       
     398
    398399        /*
    399          * Drop the reference to the containing task.
     400         * t is guaranteed to be the very last thread of its task.
     401         * It is safe to destroy the task.
    400402         */
    401         task_release(thread->task);
    402         slab_free(thread_slab, thread);
     403        if (atomic_predec(&t->task->refcount) == 0)
     404                task_destroy(t->task);
     405       
     406        slab_free(thread_slab, t);
    403407}
    404408
     
    408412 * threads_tree.
    409413 *
    410  * @param t    Thread to be attached to the task.
    411  * @param task Task to which the thread is to be attached.
    412  *
    413  */
    414 void thread_attach(thread_t *thread, task_t *task)
    415 {
     414 * @param t     Thread to be attached to the task.
     415 * @param task  Task to which the thread is to be attached.
     416 */
     417void thread_attach(thread_t *t, task_t *task)
     418{
     419        ipl_t ipl;
     420
    416421        /*
    417422         * Attach to the specified task.
    418423         */
    419         irq_spinlock_lock(&task->lock, true);
    420        
    421         /* Hold a reference to the task. */
    422         task_hold(task);
    423        
     424        ipl = interrupts_disable();
     425        spinlock_lock(&task->lock);
     426
     427        atomic_inc(&task->refcount);
     428
    424429        /* Must not count kbox thread into lifecount */
    425         if (thread->flags & THREAD_FLAG_USPACE)
     430        if (t->flags & THREAD_FLAG_USPACE)
    426431                atomic_inc(&task->lifecount);
    427        
    428         list_append(&thread->th_link, &task->th_head);
    429        
    430         irq_spinlock_pass(&task->lock, &threads_lock);
    431        
     432
     433        list_append(&t->th_link, &task->th_head);
     434        spinlock_unlock(&task->lock);
     435
    432436        /*
    433437         * Register this thread in the system-wide list.
    434438         */
    435         avltree_insert(&threads_tree, &thread->threads_tree_node);
    436         irq_spinlock_unlock(&threads_lock, true);
     439        spinlock_lock(&threads_lock);
     440        avltree_insert(&threads_tree, &t->threads_tree_node);
     441        spinlock_unlock(&threads_lock);
     442       
     443        interrupts_restore(ipl);
    437444}
    438445
    439446/** Terminate thread.
    440447 *
    441  * End current thread execution and switch it to the exiting state.
    442  * All pending timeouts are executed.
    443  *
     448 * End current thread execution and switch it to the exiting state. All pending
     449 * timeouts are executed.
    444450 */
    445451void thread_exit(void)
    446452{
     453        ipl_t ipl;
     454
    447455        if (THREAD->flags & THREAD_FLAG_USPACE) {
    448456#ifdef CONFIG_UDEBUG
    449457                /* Generate udebug THREAD_E event */
    450458                udebug_thread_e_event();
    451 
    452                 /*
    453                  * This thread will not execute any code or system calls from
    454                  * now on.
    455                  */
    456                 udebug_stoppable_begin();
    457459#endif
    458460                if (atomic_predec(&TASK->lifecount) == 0) {
     
    463465                         * can only be created by threads of the same task.
    464466                         * We are safe to perform cleanup.
    465                          *
    466467                         */
    467468                        ipc_cleanup();
     
    470471                }
    471472        }
    472        
     473
    473474restart:
    474         irq_spinlock_lock(&THREAD->lock, true);
    475         if (THREAD->timeout_pending) {
    476                 /* Busy waiting for timeouts in progress */
    477                 irq_spinlock_unlock(&THREAD->lock, true);
     475        ipl = interrupts_disable();
     476        spinlock_lock(&THREAD->lock);
     477        if (THREAD->timeout_pending) {
     478                /* busy waiting for timeouts in progress */
     479                spinlock_unlock(&THREAD->lock);
     480                interrupts_restore(ipl);
    478481                goto restart;
    479482        }
    480483       
    481484        THREAD->state = Exiting;
    482         irq_spinlock_unlock(&THREAD->lock, true);
    483        
     485        spinlock_unlock(&THREAD->lock);
    484486        scheduler();
    485        
     487
    486488        /* Not reached */
    487         while (true);
    488 }
     489        while (1)
     490                ;
     491}
     492
    489493
    490494/** Thread sleep
     
    501505        while (sec > 0) {
    502506                uint32_t period = (sec > 1000) ? 1000 : sec;
    503                
     507       
    504508                thread_usleep(period * 1000000);
    505509                sec -= period;
     
    509513/** Wait for another thread to exit.
    510514 *
    511  * @param thread Thread to join on exit.
    512  * @param usec   Timeout in microseconds.
    513  * @param flags  Mode of operation.
     515 * @param t Thread to join on exit.
     516 * @param usec Timeout in microseconds.
     517 * @param flags Mode of operation.
    514518 *
    515519 * @return An error code from errno.h or an error code from synch.h.
    516  *
    517  */
    518 int thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
    519 {
    520         if (thread == THREAD)
     520 */
     521int thread_join_timeout(thread_t *t, uint32_t usec, int flags)
     522{
     523        ipl_t ipl;
     524        int rc;
     525
     526        if (t == THREAD)
    521527                return EINVAL;
    522        
     528
    523529        /*
    524530         * Since thread join can only be called once on an undetached thread,
     
    526532         */
    527533       
    528         irq_spinlock_lock(&thread->lock, true);
    529         ASSERT(!thread->detached);
    530         irq_spinlock_unlock(&thread->lock, true);
    531        
    532         return waitq_sleep_timeout(&thread->join_wq, usec, flags);
     534        ipl = interrupts_disable();
     535        spinlock_lock(&t->lock);
     536        ASSERT(!t->detached);
     537        spinlock_unlock(&t->lock);
     538        interrupts_restore(ipl);
     539       
     540        rc = waitq_sleep_timeout(&t->join_wq, usec, flags);
     541       
     542        return rc;     
    533543}
    534544
     
    538548 * state, deallocate its resources.
    539549 *
    540  * @param thread Thread to be detached.
    541  *
    542  */
    543 void thread_detach(thread_t *thread)
    544 {
     550 * @param t Thread to be detached.
     551 */
     552void thread_detach(thread_t *t)
     553{
     554        ipl_t ipl;
     555
    545556        /*
    546557         * Since the thread is expected not to be already detached,
    547558         * pointer to it must be still valid.
    548559         */
    549         irq_spinlock_lock(&thread->lock, true);
    550         ASSERT(!thread->detached);
    551        
    552         if (thread->state == Lingering) {
    553                 /*
    554                  * Unlock &thread->lock and restore
    555                  * interrupts in thread_destroy().
    556                  */
    557                 thread_destroy(thread, true);
     560        ipl = interrupts_disable();
     561        spinlock_lock(&t->lock);
     562        ASSERT(!t->detached);
     563        if (t->state == Lingering) {
     564                thread_destroy(t);      /* unlocks &t->lock */
     565                interrupts_restore(ipl);
    558566                return;
    559567        } else {
    560                 thread->detached = true;
     568                t->detached = true;
    561569        }
    562        
    563         irq_spinlock_unlock(&thread->lock, true);
     570        spinlock_unlock(&t->lock);
     571        interrupts_restore(ipl);
    564572}
    565573
     
    580588}
    581589
     590/** Register thread out-of-context invocation
     591 *
     592 * Register a function and its argument to be executed
     593 * on next context switch to the current thread.
     594 *
     595 * @param call_me      Out-of-context function.
     596 * @param call_me_with Out-of-context function argument.
     597 *
     598 */
     599void thread_register_call_me(void (* call_me)(void *), void *call_me_with)
     600{
     601        ipl_t ipl;
     602       
     603        ipl = interrupts_disable();
     604        spinlock_lock(&THREAD->lock);
     605        THREAD->call_me = call_me;
     606        THREAD->call_me_with = call_me_with;
     607        spinlock_unlock(&THREAD->lock);
     608        interrupts_restore(ipl);
     609}
     610
    582611static bool thread_walker(avltree_node_t *node, void *arg)
    583612{
    584         bool *additional = (bool *) arg;
    585         thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node);
    586        
    587         uint64_t ucycles, kcycles;
    588         char usuffix, ksuffix;
    589         order_suffix(thread->ucycles, &ucycles, &usuffix);
    590         order_suffix(thread->kcycles, &kcycles, &ksuffix);
    591        
     613        thread_t *t = avltree_get_instance(node, thread_t, threads_tree_node);
     614       
     615        uint64_t cycles;
     616        char suffix;
     617        order(t->cycles, &cycles, &suffix);
     618
    592619#ifdef __32_BITS__
    593         if (*additional)
    594                 printf("%-8" PRIu64" %10p %9" PRIu64 "%c %9" PRIu64 "%c ",
    595                     thread->tid, thread->kstack, ucycles, usuffix,
    596                     kcycles, ksuffix);
     620        printf("%-6" PRIu64" %-10s %10p %-8s %10p %-3" PRIu32 " %10p %10p %9" PRIu64 "%c ",
     621            t->tid, t->name, t, thread_states[t->state], t->task,
     622        t->task->context, t->thread_code, t->kstack, cycles, suffix);
     623#endif
     624
     625#ifdef __64_BITS__
     626        printf("%-6" PRIu64" %-10s %18p %-8s %18p %-3" PRIu32 " %18p %18p %9" PRIu64 "%c ",
     627            t->tid, t->name, t, thread_states[t->state], t->task,
     628        t->task->context, t->thread_code, t->kstack, cycles, suffix);
     629#endif
     630                       
     631        if (t->cpu)
     632                printf("%-4u", t->cpu->id);
    597633        else
    598                 printf("%-8" PRIu64" %-14s %10p %-8s %10p %-5" PRIu32 " %10p\n",
    599                     thread->tid, thread->name, thread, thread_states[thread->state],
    600                     thread->task, thread->task->context, thread->thread_code);
    601 #endif
    602        
     634                printf("none");
     635                       
     636        if (t->state == Sleeping) {
     637#ifdef __32_BITS__
     638                printf(" %10p", t->sleep_queue);
     639#endif
     640
    603641#ifdef __64_BITS__
    604         if (*additional)
    605                 printf("%-8" PRIu64" %18p %18p\n"
    606                     "         %9" PRIu64 "%c %9" PRIu64 "%c ",
    607                     thread->tid, thread->thread_code, thread->kstack,
    608                     ucycles, usuffix, kcycles, ksuffix);
    609         else
    610                 printf("%-8" PRIu64" %-14s %18p %-8s %18p %-5" PRIu32 "\n",
    611                     thread->tid, thread->name, thread, thread_states[thread->state],
    612                     thread->task, thread->task->context);
    613 #endif
    614        
    615         if (*additional) {
    616                 if (thread->cpu)
    617                         printf("%-5u", thread->cpu->id);
    618                 else
    619                         printf("none ");
    620                
    621                 if (thread->state == Sleeping) {
    622 #ifdef __32_BITS__
    623                         printf(" %10p", thread->sleep_queue);
    624 #endif
     642                printf(" %18p", t->sleep_queue);
     643#endif
     644        }
    625645                       
     646        printf("\n");
     647
     648        return true;
     649}
     650
     651/** Print list of threads debug info */
     652void thread_print_list(void)
     653{
     654        ipl_t ipl;
     655       
     656        /* Messing with thread structures, avoid deadlock */
     657        ipl = interrupts_disable();
     658        spinlock_lock(&threads_lock);
     659
     660#ifdef __32_BITS__     
     661        printf("tid    name       address    state    task       "
     662                "ctx code       stack      cycles     cpu  "
     663                "waitqueue\n");
     664        printf("------ ---------- ---------- -------- ---------- "
     665                "--- ---------- ---------- ---------- ---- "
     666                "----------\n");
     667#endif
     668
    626669#ifdef __64_BITS__
    627                         printf(" %18p", thread->sleep_queue);
    628 #endif
    629                 }
    630                
    631                 printf("\n");
    632         }
    633        
    634         return true;
    635 }
    636 
    637 /** Print list of threads debug info
    638  *
    639  * @param additional Print additional information.
    640  *
    641  */
    642 void thread_print_list(bool additional)
    643 {
    644         /* Messing with thread structures, avoid deadlock */
    645         irq_spinlock_lock(&threads_lock, true);
    646        
    647 #ifdef __32_BITS__
    648         if (additional)
    649                 printf("[id    ] [stack   ] [ucycles ] [kcycles ] [cpu]"
    650                     " [waitqueue]\n");
    651         else
    652                 printf("[id    ] [name        ] [address ] [state ] [task    ]"
    653                     " [ctx] [code    ]\n");
    654 #endif
    655        
    656 #ifdef __64_BITS__
    657         if (additional) {
    658                 printf("[id    ] [code            ] [stack           ]\n"
    659                     "         [ucycles ] [kcycles ] [cpu] [waitqueue       ]\n");
    660         } else
    661                 printf("[id    ] [name        ] [address         ] [state ]"
    662                     " [task            ] [ctx]\n");
    663 #endif
    664        
    665         avltree_walk(&threads_tree, thread_walker, &additional);
    666        
    667         irq_spinlock_unlock(&threads_lock, true);
     670        printf("tid    name       address            state    task               "
     671                "ctx code               stack              cycles     cpu  "
     672                "waitqueue\n");
     673        printf("------ ---------- ------------------ -------- ------------------ "
     674                "--- ------------------ ------------------ ---------- ---- "
     675                "------------------\n");
     676#endif
     677
     678        avltree_walk(&threads_tree, thread_walker, NULL);
     679
     680        spinlock_unlock(&threads_lock);
     681        interrupts_restore(ipl);
    668682}
    669683
     
    673687 * interrupts must be already disabled.
    674688 *
    675  * @param thread Pointer to thread.
     689 * @param t Pointer to thread.
    676690 *
    677691 * @return True if thread t is known to the system, false otherwise.
    678  *
    679  */
    680 bool thread_exists(thread_t *thread)
    681 {
    682         ASSERT(interrupts_disabled());
    683         ASSERT(irq_spinlock_locked(&threads_lock));
    684 
    685         avltree_node_t *node =
    686             avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) thread));
     692 */
     693bool thread_exists(thread_t *t)
     694{
     695        avltree_node_t *node;
     696
     697        node = avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) t));
    687698       
    688699        return node != NULL;
     
    694705 * interrupts must be already disabled.
    695706 *
    696  * @param user True to update user accounting, false for kernel.
    697  *
    698  */
    699 void thread_update_accounting(bool user)
     707 */
     708void thread_update_accounting(void)
    700709{
    701710        uint64_t time = get_cycle();
    702 
    703         ASSERT(interrupts_disabled());
    704         ASSERT(irq_spinlock_locked(&THREAD->lock));
    705        
    706         if (user)
    707                 THREAD->ucycles += time - THREAD->last_cycle;
    708         else
    709                 THREAD->kcycles += time - THREAD->last_cycle;
    710        
     711        THREAD->cycles += time - THREAD->last_cycle;
    711712        THREAD->last_cycle = time;
    712713}
    713 
    714 static bool thread_search_walker(avltree_node_t *node, void *arg)
    715 {
    716         thread_t *thread =
    717             (thread_t *) avltree_get_instance(node, thread_t, threads_tree_node);
    718         thread_iterator_t *iterator = (thread_iterator_t *) arg;
    719        
    720         if (thread->tid == iterator->thread_id) {
    721                 iterator->thread = thread;
    722                 return false;
    723         }
    724        
    725         return true;
    726 }
    727 
    728 /** Find thread structure corresponding to thread ID.
    729  *
    730  * The threads_lock must be already held by the caller of this function and
    731  * interrupts must be disabled.
    732  *
    733  * @param id Thread ID.
    734  *
    735  * @return Thread structure address or NULL if there is no such thread ID.
    736  *
    737  */
    738 thread_t *thread_find_by_id(thread_id_t thread_id)
    739 {
    740         ASSERT(interrupts_disabled());
    741         ASSERT(irq_spinlock_locked(&threads_lock));
    742 
    743         thread_iterator_t iterator;
    744        
    745         iterator.thread_id = thread_id;
    746         iterator.thread = NULL;
    747        
    748         avltree_walk(&threads_tree, thread_search_walker, (void *) &iterator);
    749        
    750         return iterator.thread;
    751 }
    752 
    753714
    754715/** Process syscall to create new thread.
     
    758719    size_t name_len, thread_id_t *uspace_thread_id)
    759720{
     721        thread_t *t;
     722        char namebuf[THREAD_NAME_BUFLEN];
     723        uspace_arg_t *kernel_uarg;
     724        int rc;
     725
    760726        if (name_len > THREAD_NAME_BUFLEN - 1)
    761727                name_len = THREAD_NAME_BUFLEN - 1;
    762        
    763         char namebuf[THREAD_NAME_BUFLEN];
    764         int rc = copy_from_uspace(namebuf, uspace_name, name_len);
     728
     729        rc = copy_from_uspace(namebuf, uspace_name, name_len);
    765730        if (rc != 0)
    766731                return (unative_t) rc;
    767        
     732
    768733        namebuf[name_len] = 0;
    769        
     734
    770735        /*
    771736         * In case of failure, kernel_uarg will be deallocated in this function.
    772737         * In case of success, kernel_uarg will be freed in uinit().
    773          *
    774738         */
    775         uspace_arg_t *kernel_uarg =
    776             (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
     739        kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
    777740       
    778741        rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
     
    781744                return (unative_t) rc;
    782745        }
    783        
    784         thread_t *thread = thread_create(uinit, kernel_uarg, TASK,
     746
     747        t = thread_create(uinit, kernel_uarg, TASK,
    785748            THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf, false);
    786         if (thread) {
     749        if (t) {
    787750                if (uspace_thread_id != NULL) {
    788                         rc = copy_to_uspace(uspace_thread_id, &thread->tid,
    789                             sizeof(thread->tid));
     751                        int rc;
     752
     753                        rc = copy_to_uspace(uspace_thread_id, &t->tid,
     754                            sizeof(t->tid));
    790755                        if (rc != 0) {
    791756                                /*
     
    793758                                 * has already been created. We need to undo its
    794759                                 * creation now.
    795                                  *
    796760                                 */
    797                                
     761
    798762                                /*
    799763                                 * The new thread structure is initialized, but
     
    801765                                 * We can safely deallocate it.
    802766                                 */
    803                                 slab_free(thread_slab, thread);
    804                                 free(kernel_uarg);
    805                                
     767                                slab_free(thread_slab, t);
     768                                free(kernel_uarg);
     769
    806770                                return (unative_t) rc;
    807771                         }
    808772                }
    809                
    810773#ifdef CONFIG_UDEBUG
    811774                /*
     
    815778                 * THREAD_B events for threads that already existed
    816779                 * and could be detected with THREAD_READ before.
    817                  *
    818780                 */
    819                 udebug_thread_b_event_attach(thread, TASK);
     781                udebug_thread_b_event_attach(t, TASK);
    820782#else
    821                 thread_attach(thread, TASK);
    822 #endif
    823                 thread_ready(thread);
    824                
     783                thread_attach(t, TASK);
     784#endif
     785                thread_ready(t);
     786
    825787                return 0;
    826788        } else
    827789                free(kernel_uarg);
    828        
     790
    829791        return (unative_t) ENOMEM;
    830792}
     
    836798{
    837799        thread_exit();
    838        
    839800        /* Unreachable */
    840801        return 0;
     
    847808 *
    848809 * @return 0 on success or an error code from @ref errno.h.
    849  *
    850810 */
    851811unative_t sys_thread_get_id(thread_id_t *uspace_thread_id)
     
    854814         * No need to acquire lock on THREAD because tid
    855815         * remains constant for the lifespan of the thread.
    856          *
    857816         */
    858817        return (unative_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
Note: See TracChangeset for help on using the changeset viewer.