Changeset da1bafb in mainline for kernel/generic/src/proc/thread.c


Ignore:
Timestamp:
2010-05-24T18:57:31Z (14 years ago)
Author:
Martin Decky <martin@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
0095368
Parents:
666f492
Message:

major code revision

  • replace spinlocks taken with interrupts disabled with irq_spinlocks
  • change spacing (not indendation) to be tab-size independent
  • use unsigned integer types where appropriate (especially bit flags)
  • visual separation
  • remove argument names in function prototypes
  • string changes
  • correct some formating directives
  • replace various cryptic single-character variables (t, a, m, c, b, etc.) with proper identifiers (thread, task, timeout, as, itm, itc, etc.)
  • unify some assembler constructs
  • unused page table levels are now optimized out in compile time
  • replace several ints (with boolean semantics) with bools
  • use specifically sized types instead of generic types where appropriate (size_t, uint32_t, btree_key_t)
  • improve comments
  • split asserts with conjuction into multiple independent asserts
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/proc/thread.c

    r666f492 rda1bafb  
    3333/**
    3434 * @file
    35  * @brief       Thread management functions.
     35 * @brief Thread management functions.
    3636 */
    3737
     
    9494 *
    9595 * For locking rules, see declaration thereof.
    96  */
    97 SPINLOCK_INITIALIZE(threads_lock);
     96 *
     97 */
     98IRQ_SPINLOCK_INITIALIZE(threads_lock);
    9899
    99100/** AVL tree of all threads.
     
    101102 * When a thread is found in the threads_tree AVL tree, it is guaranteed to
    102103 * exist as long as the threads_lock is held.
    103  */
    104 avltree_t threads_tree;         
    105 
    106 SPINLOCK_INITIALIZE(tidlock);
    107 thread_id_t last_tid = 0;
     104 *
     105 */
     106avltree_t threads_tree;
     107
     108IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock);
     109static thread_id_t last_tid = 0;
    108110
    109111static slab_cache_t *thread_slab;
     112
    110113#ifdef CONFIG_FPU
    111114slab_cache_t *fpu_context_slab;
     
    125128        void *arg = THREAD->thread_arg;
    126129        THREAD->last_cycle = get_cycle();
    127 
     130       
    128131        /* This is where each thread wakes up after its creation */
    129         spinlock_unlock(&THREAD->lock);
     132        irq_spinlock_unlock(&THREAD->lock, false);
    130133        interrupts_enable();
    131 
     134       
    132135        f(arg);
    133136       
    134137        /* Accumulate accounting to the task */
    135         ipl_t ipl = interrupts_disable();
    136        
    137         spinlock_lock(&THREAD->lock);
     138        irq_spinlock_lock(&THREAD->lock, true);
    138139        if (!THREAD->uncounted) {
    139140                thread_update_accounting(true);
     
    142143                uint64_t kcycles = THREAD->kcycles;
    143144                THREAD->kcycles = 0;
    144 
    145                 spinlock_unlock(&THREAD->lock);
    146145               
    147                 spinlock_lock(&TASK->lock);
     146                irq_spinlock_pass(&THREAD->lock, &TASK->lock);
    148147                TASK->ucycles += ucycles;
    149148                TASK->kcycles += kcycles;
    150                 spinlock_unlock(&TASK->lock);
     149                irq_spinlock_unlock(&TASK->lock, true);
    151150        } else
    152                 spinlock_unlock(&THREAD->lock);
    153        
    154         interrupts_restore(ipl);
     151                irq_spinlock_unlock(&THREAD->lock, true);
    155152       
    156153        thread_exit();
    157         /* not reached */
    158 }
    159 
    160 /** Initialization and allocation for thread_t structure */
    161 static int thr_constructor(void *obj, int kmflags)
    162 {
    163         thread_t *t = (thread_t *) obj;
    164 
    165         spinlock_initialize(&t->lock, "thread_t_lock");
    166         link_initialize(&t->rq_link);
    167         link_initialize(&t->wq_link);
    168         link_initialize(&t->th_link);
    169 
     154       
     155        /* Not reached */
     156}
     157
     158/** Initialization and allocation for thread_t structure
     159 *
     160 */
     161static int thr_constructor(void *obj, unsigned int kmflags)
     162{
     163        thread_t *thread = (thread_t *) obj;
     164       
     165        irq_spinlock_initialize(&thread->lock, "thread_t_lock");
     166        link_initialize(&thread->rq_link);
     167        link_initialize(&thread->wq_link);
     168        link_initialize(&thread->th_link);
     169       
    170170        /* call the architecture-specific part of the constructor */
    171         thr_constructor_arch(t);
     171        thr_constructor_arch(thread);
    172172       
    173173#ifdef CONFIG_FPU
    174174#ifdef CONFIG_FPU_LAZY
    175         t->saved_fpu_context = NULL;
    176 #else
    177         t->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);
    178         if (!t->saved_fpu_context)
     175        thread->saved_fpu_context = NULL;
     176#else /* CONFIG_FPU_LAZY */
     177        thread->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);
     178        if (!thread->saved_fpu_context)
    179179                return -1;
    180 #endif
    181 #endif
    182 
    183         t->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
    184         if (!t->kstack) {
     180#endif /* CONFIG_FPU_LAZY */
     181#endif /* CONFIG_FPU */
     182       
     183        thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
     184        if (!thread->kstack) {
    185185#ifdef CONFIG_FPU
    186                 if (t->saved_fpu_context)
    187                         slab_free(fpu_context_slab, t->saved_fpu_context);
     186                if (thread->saved_fpu_context)
     187                        slab_free(fpu_context_slab, thread->saved_fpu_context);
    188188#endif
    189189                return -1;
    190190        }
    191 
     191       
    192192#ifdef CONFIG_UDEBUG
    193         mutex_initialize(&t->udebug.lock, MUTEX_PASSIVE);
    194 #endif
    195 
     193        mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE);
     194#endif
     195       
    196196        return 0;
    197197}
    198198
    199199/** Destruction of thread_t object */
    200 static int thr_destructor(void *obj)
    201 {
    202         thread_t *t = (thread_t *) obj;
    203 
     200static size_t thr_destructor(void *obj)
     201{
     202        thread_t *thread = (thread_t *) obj;
     203       
    204204        /* call the architecture-specific part of the destructor */
    205         thr_destructor_arch(t);
    206 
    207         frame_free(KA2PA(t->kstack));
     205        thr_destructor_arch(thread);
     206       
     207        frame_free(KA2PA(thread->kstack));
     208       
    208209#ifdef CONFIG_FPU
    209         if (t->saved_fpu_context)
    210                 slab_free(fpu_context_slab, t->saved_fpu_context);
    211 #endif
    212         return 1; /* One page freed */
     210        if (thread->saved_fpu_context)
     211                slab_free(fpu_context_slab, thread->saved_fpu_context);
     212#endif
     213       
     214        return 1;  /* One page freed */
    213215}
    214216
     
    221223{
    222224        THREAD = NULL;
     225       
    223226        atomic_set(&nrdy, 0);
    224227        thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0,
    225228            thr_constructor, thr_destructor, 0);
    226 
     229       
    227230#ifdef CONFIG_FPU
    228231        fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t),
    229232            FPU_CONTEXT_ALIGN, NULL, NULL, 0);
    230233#endif
    231 
     234       
    232235        avltree_create(&threads_tree);
    233236}
     
    235238/** Make thread ready
    236239 *
    237  * Switch thread t to the ready state.
     240 * Switch thread to the ready state.
    238241 *
    239242 * @param t Thread to make ready.
    240243 *
    241244 */
    242 void thread_ready(thread_t *t)
    243 {
    244         cpu_t *cpu;
    245         runq_t *r;
    246         ipl_t ipl;
    247         int i, avg;
    248 
    249         ipl = interrupts_disable();
    250 
    251         spinlock_lock(&t->lock);
    252 
    253         ASSERT(!(t->state == Ready));
    254 
    255         i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority;
    256        
    257         cpu = CPU;
    258         if (t->flags & THREAD_FLAG_WIRED) {
    259                 ASSERT(t->cpu != NULL);
    260                 cpu = t->cpu;
     245void thread_ready(thread_t *thread)
     246{
     247        irq_spinlock_lock(&thread->lock, true);
     248       
     249        ASSERT(!(thread->state == Ready));
     250       
     251        int i = (thread->priority < RQ_COUNT - 1)
     252            ? ++thread->priority : thread->priority;
     253       
     254        cpu_t *cpu = CPU;
     255        if (thread->flags & THREAD_FLAG_WIRED) {
     256                ASSERT(thread->cpu != NULL);
     257                cpu = thread->cpu;
    261258        }
    262         t->state = Ready;
    263         spinlock_unlock(&t->lock);
     259        thread->state = Ready;
     260       
     261        irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
    264262       
    265263        /*
    266          * Append t to respective ready queue on respective processor.
     264         * Append thread to respective ready queue
     265         * on respective processor.
    267266         */
    268         r = &cpu->rq[i];
    269         spinlock_lock(&r->lock);
    270         list_append(&t->rq_link, &r->rq_head);
    271         r->n++;
    272         spinlock_unlock(&r->lock);
    273 
     267       
     268        list_append(&thread->rq_link, &cpu->rq[i].rq_head);
     269        cpu->rq[i].n++;
     270        irq_spinlock_unlock(&(cpu->rq[i].lock), true);
     271       
    274272        atomic_inc(&nrdy);
    275         // FIXME: Why is the avg value never read?
    276         avg = atomic_get(&nrdy) / config.cpu_active;
     273        // FIXME: Why is the avg value not used
     274        // avg = atomic_get(&nrdy) / config.cpu_active;
    277275        atomic_inc(&cpu->nrdy);
    278 
     276}
     277
     278/** Create new thread
     279 *
     280 * Create a new thread.
     281 *
     282 * @param func      Thread's implementing function.
     283 * @param arg       Thread's implementing function argument.
     284 * @param task      Task to which the thread belongs. The caller must
     285 *                  guarantee that the task won't cease to exist during the
     286 *                  call. The task's lock may not be held.
     287 * @param flags     Thread flags.
     288 * @param name      Symbolic name (a copy is made).
     289 * @param uncounted Thread's accounting doesn't affect accumulated task
     290 *                  accounting.
     291 *
     292 * @return New thread's structure on success, NULL on failure.
     293 *
     294 */
     295thread_t *thread_create(void (* func)(void *), void *arg, task_t *task,
     296    unsigned int flags, const char *name, bool uncounted)
     297{
     298        thread_t *thread = (thread_t *) slab_alloc(thread_slab, 0);
     299        if (!thread)
     300                return NULL;
     301       
     302        /* Not needed, but good for debugging */
     303        memsetb(thread->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0);
     304       
     305        irq_spinlock_lock(&tidlock, true);
     306        thread->tid = ++last_tid;
     307        irq_spinlock_unlock(&tidlock, true);
     308       
     309        context_save(&thread->saved_context);
     310        context_set(&thread->saved_context, FADDR(cushion),
     311            (uintptr_t) thread->kstack, THREAD_STACK_SIZE);
     312       
     313        the_initialize((the_t *) thread->kstack);
     314       
     315        ipl_t ipl = interrupts_disable();
     316        thread->saved_context.ipl = interrupts_read();
    279317        interrupts_restore(ipl);
    280 }
    281 
    282 /** Create new thread
    283  *
    284  * Create a new thread.
    285  *
    286  * @param func          Thread's implementing function.
    287  * @param arg           Thread's implementing function argument.
    288  * @param task          Task to which the thread belongs. The caller must
    289  *                      guarantee that the task won't cease to exist during the
    290  *                      call. The task's lock may not be held.
    291  * @param flags         Thread flags.
    292  * @param name          Symbolic name (a copy is made).
    293  * @param uncounted     Thread's accounting doesn't affect accumulated task
    294  *                      accounting.
    295  *
    296  * @return              New thread's structure on success, NULL on failure.
    297  *
    298  */
    299 thread_t *thread_create(void (* func)(void *), void *arg, task_t *task,
    300     int flags, const char *name, bool uncounted)
    301 {
    302         thread_t *t;
    303         ipl_t ipl;
    304        
    305         t = (thread_t *) slab_alloc(thread_slab, 0);
    306         if (!t)
    307                 return NULL;
    308        
    309         /* Not needed, but good for debugging */
    310         memsetb(t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0);
    311        
    312         ipl = interrupts_disable();
    313         spinlock_lock(&tidlock);
    314         t->tid = ++last_tid;
    315         spinlock_unlock(&tidlock);
    316         interrupts_restore(ipl);
    317        
    318         context_save(&t->saved_context);
    319         context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack,
    320             THREAD_STACK_SIZE);
    321        
    322         the_initialize((the_t *) t->kstack);
    323        
    324         ipl = interrupts_disable();
    325         t->saved_context.ipl = interrupts_read();
    326         interrupts_restore(ipl);
    327        
    328         memcpy(t->name, name, THREAD_NAME_BUFLEN);
    329         t->name[THREAD_NAME_BUFLEN - 1] = 0;
    330        
    331         t->thread_code = func;
    332         t->thread_arg = arg;
    333         t->ticks = -1;
    334         t->ucycles = 0;
    335         t->kcycles = 0;
    336         t->uncounted = uncounted;
    337         t->priority = -1;               /* start in rq[0] */
    338         t->cpu = NULL;
    339         t->flags = flags;
    340         t->state = Entering;
    341         t->call_me = NULL;
    342         t->call_me_with = NULL;
    343        
    344         timeout_initialize(&t->sleep_timeout);
    345         t->sleep_interruptible = false;
    346         t->sleep_queue = NULL;
    347         t->timeout_pending = 0;
    348 
    349         t->in_copy_from_uspace = false;
    350         t->in_copy_to_uspace = false;
    351 
    352         t->interrupted = false;
    353         t->detached = false;
    354         waitq_initialize(&t->join_wq);
    355        
    356         t->rwlock_holder_type = RWLOCK_NONE;
    357                
    358         t->task = task;
    359        
    360         t->fpu_context_exists = 0;
    361         t->fpu_context_engaged = 0;
    362 
    363         avltree_node_initialize(&t->threads_tree_node);
    364         t->threads_tree_node.key = (uintptr_t) t;
    365 
     318       
     319        str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
     320       
     321        thread->thread_code = func;
     322        thread->thread_arg = arg;
     323        thread->ticks = -1;
     324        thread->ucycles = 0;
     325        thread->kcycles = 0;
     326        thread->uncounted = uncounted;
     327        thread->priority = -1;          /* Start in rq[0] */
     328        thread->cpu = NULL;
     329        thread->flags = flags;
     330        thread->state = Entering;
     331        thread->call_me = NULL;
     332        thread->call_me_with = NULL;
     333       
     334        timeout_initialize(&thread->sleep_timeout);
     335        thread->sleep_interruptible = false;
     336        thread->sleep_queue = NULL;
     337        thread->timeout_pending = false;
     338       
     339        thread->in_copy_from_uspace = false;
     340        thread->in_copy_to_uspace = false;
     341       
     342        thread->interrupted = false;
     343        thread->detached = false;
     344        waitq_initialize(&thread->join_wq);
     345       
     346        thread->rwlock_holder_type = RWLOCK_NONE;
     347       
     348        thread->task = task;
     349       
     350        thread->fpu_context_exists = 0;
     351        thread->fpu_context_engaged = 0;
     352       
     353        avltree_node_initialize(&thread->threads_tree_node);
     354        thread->threads_tree_node.key = (uintptr_t) thread;
     355       
    366356#ifdef CONFIG_UDEBUG
    367357        /* Init debugging stuff */
    368         udebug_thread_initialize(&t->udebug);
    369 #endif
    370 
    371         /* might depend on previous initialization */
    372         thread_create_arch(t); 
    373 
     358        udebug_thread_initialize(&thread->udebug);
     359#endif
     360       
     361        /* Might depend on previous initialization */
     362        thread_create_arch(thread);
     363       
    374364        if (!(flags & THREAD_FLAG_NOATTACH))
    375                 thread_attach(t, task);
    376 
    377         return t;
     365                thread_attach(thread, task);
     366       
     367        return thread;
    378368}
    379369
     
    381371 *
    382372 * Detach thread from all queues, cpus etc. and destroy it.
    383  *
    384  * Assume thread->lock is held!!
    385  */
    386 void thread_destroy(thread_t *t)
    387 {
    388         ASSERT(t->state == Exiting || t->state == Lingering);
    389         ASSERT(t->task);
    390         ASSERT(t->cpu);
    391 
    392         spinlock_lock(&t->cpu->lock);
    393         if (t->cpu->fpu_owner == t)
    394                 t->cpu->fpu_owner = NULL;
    395         spinlock_unlock(&t->cpu->lock);
    396 
    397         spinlock_unlock(&t->lock);
    398 
    399         spinlock_lock(&threads_lock);
    400         avltree_delete(&threads_tree, &t->threads_tree_node);
    401         spinlock_unlock(&threads_lock);
    402 
     373 * Assume thread->lock is held!
     374 *
     375 * @param thread  Thread to be destroyed.
     376 * @param irq_res Indicate whether it should unlock thread->lock
     377 *                in interrupts-restore mode.
     378 *
     379 */
     380void thread_destroy(thread_t *thread, bool irq_res)
     381{
     382        ASSERT((thread->state == Exiting) || (thread->state == Lingering));
     383        ASSERT(thread->task);
     384        ASSERT(thread->cpu);
     385       
     386        irq_spinlock_lock(&thread->cpu->lock, false);
     387        if (thread->cpu->fpu_owner == thread)
     388                thread->cpu->fpu_owner = NULL;
     389        irq_spinlock_unlock(&thread->cpu->lock, false);
     390       
     391        irq_spinlock_pass(&thread->lock, &threads_lock);
     392       
     393        avltree_delete(&threads_tree, &thread->threads_tree_node);
     394       
     395        irq_spinlock_pass(&threads_lock, &thread->task->lock);
     396       
    403397        /*
    404398         * Detach from the containing task.
    405399         */
    406         spinlock_lock(&t->task->lock);
    407         list_remove(&t->th_link);
    408         spinlock_unlock(&t->task->lock);       
    409 
     400        list_remove(&thread->th_link);
     401        irq_spinlock_unlock(&thread->task->lock, irq_res);
     402       
    410403        /*
    411404         * Drop the reference to the containing task.
    412405         */
    413         task_release(t->task);
    414        
    415         slab_free(thread_slab, t);
     406        task_release(thread->task);
     407        slab_free(thread_slab, thread);
    416408}
    417409
     
    421413 * threads_tree.
    422414 *
    423  * @param t     Thread to be attached to the task.
    424  * @param task  Task to which the thread is to be attached.
    425  */
    426 void thread_attach(thread_t *t, task_t *task)
    427 {
    428         ipl_t ipl;
    429 
     415 * @param t    Thread to be attached to the task.
     416 * @param task Task to which the thread is to be attached.
     417 *
     418 */
     419void thread_attach(thread_t *thread, task_t *task)
     420{
    430421        /*
    431422         * Attach to the specified task.
    432423         */
    433         ipl = interrupts_disable();
    434         spinlock_lock(&task->lock);
    435 
     424        irq_spinlock_lock(&task->lock, true);
     425       
    436426        /* Hold a reference to the task. */
    437427        task_hold(task);
    438 
     428       
    439429        /* Must not count kbox thread into lifecount */
    440         if (t->flags & THREAD_FLAG_USPACE)
     430        if (thread->flags & THREAD_FLAG_USPACE)
    441431                atomic_inc(&task->lifecount);
    442 
    443         list_append(&t->th_link, &task->th_head);
    444         spinlock_unlock(&task->lock);
    445 
     432       
     433        list_append(&thread->th_link, &task->th_head);
     434       
     435        irq_spinlock_pass(&task->lock, &threads_lock);
     436       
    446437        /*
    447438         * Register this thread in the system-wide list.
    448439         */
    449         spinlock_lock(&threads_lock);
    450         avltree_insert(&threads_tree, &t->threads_tree_node);
    451         spinlock_unlock(&threads_lock);
    452        
    453         interrupts_restore(ipl);
     440        avltree_insert(&threads_tree, &thread->threads_tree_node);
     441        irq_spinlock_unlock(&threads_lock, true);
    454442}
    455443
    456444/** Terminate thread.
    457445 *
    458  * End current thread execution and switch it to the exiting state. All pending
    459  * timeouts are executed.
     446 * End current thread execution and switch it to the exiting state.
     447 * All pending timeouts are executed.
     448 *
    460449 */
    461450void thread_exit(void)
    462451{
    463         ipl_t ipl;
    464 
    465452        if (THREAD->flags & THREAD_FLAG_USPACE) {
    466453#ifdef CONFIG_UDEBUG
     
    475462                         * can only be created by threads of the same task.
    476463                         * We are safe to perform cleanup.
     464                         *
    477465                         */
    478466                        ipc_cleanup();
     
    481469                }
    482470        }
    483 
     471       
    484472restart:
    485         ipl = interrupts_disable();
    486         spinlock_lock(&THREAD->lock);
    487         if (THREAD->timeout_pending) {
    488                 /* busy waiting for timeouts in progress */
    489                 spinlock_unlock(&THREAD->lock);
    490                 interrupts_restore(ipl);
     473        irq_spinlock_lock(&THREAD->lock, true);
     474        if (THREAD->timeout_pending) {
     475                /* Busy waiting for timeouts in progress */
     476                irq_spinlock_unlock(&THREAD->lock, true);
    491477                goto restart;
    492478        }
    493479       
    494480        THREAD->state = Exiting;
    495         spinlock_unlock(&THREAD->lock);
     481        irq_spinlock_unlock(&THREAD->lock, true);
     482       
    496483        scheduler();
    497 
     484       
    498485        /* Not reached */
    499         while (1)
    500                 ;
    501 }
    502 
     486        while (true);
     487}
    503488
    504489/** Thread sleep
     
    515500        while (sec > 0) {
    516501                uint32_t period = (sec > 1000) ? 1000 : sec;
    517        
     502               
    518503                thread_usleep(period * 1000000);
    519504                sec -= period;
     
    523508/** Wait for another thread to exit.
    524509 *
    525  * @param t Thread to join on exit.
    526  * @param usec Timeout in microseconds.
    527  * @param flags Mode of operation.
     510 * @param thread Thread to join on exit.
     511 * @param usec   Timeout in microseconds.
     512 * @param flags  Mode of operation.
    528513 *
    529514 * @return An error code from errno.h or an error code from synch.h.
    530  */
    531 int thread_join_timeout(thread_t *t, uint32_t usec, int flags)
    532 {
    533         ipl_t ipl;
    534         int rc;
    535 
    536         if (t == THREAD)
     515 *
     516 */
     517int thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
     518{
     519        if (thread == THREAD)
    537520                return EINVAL;
    538 
     521       
    539522        /*
    540523         * Since thread join can only be called once on an undetached thread,
     
    542525         */
    543526       
    544         ipl = interrupts_disable();
    545         spinlock_lock(&t->lock);
    546         ASSERT(!t->detached);
    547         spinlock_unlock(&t->lock);
    548         interrupts_restore(ipl);
    549        
    550         rc = waitq_sleep_timeout(&t->join_wq, usec, flags);
    551        
    552         return rc;     
     527        irq_spinlock_lock(&thread->lock, true);
     528        ASSERT(!thread->detached);
     529        irq_spinlock_unlock(&thread->lock, true);
     530       
     531        return waitq_sleep_timeout(&thread->join_wq, usec, flags);
    553532}
    554533
     
    558537 * state, deallocate its resources.
    559538 *
    560  * @param t Thread to be detached.
    561  */
    562 void thread_detach(thread_t *t)
    563 {
    564         ipl_t ipl;
    565 
     539 * @param thread Thread to be detached.
     540 *
     541 */
     542void thread_detach(thread_t *thread)
     543{
    566544        /*
    567545         * Since the thread is expected not to be already detached,
    568546         * pointer to it must be still valid.
    569547         */
    570         ipl = interrupts_disable();
    571         spinlock_lock(&t->lock);
    572         ASSERT(!t->detached);
    573         if (t->state == Lingering) {
    574                 thread_destroy(t);      /* unlocks &t->lock */
    575                 interrupts_restore(ipl);
     548        irq_spinlock_lock(&thread->lock, true);
     549        ASSERT(!thread->detached);
     550       
     551        if (thread->state == Lingering) {
     552                /*
     553                 * Unlock &thread->lock and restore
     554                 * interrupts in thread_destroy().
     555                 */
     556                thread_destroy(thread, true);
    576557                return;
    577558        } else {
    578                 t->detached = true;
     559                thread->detached = true;
    579560        }
    580         spinlock_unlock(&t->lock);
    581         interrupts_restore(ipl);
     561       
     562        irq_spinlock_unlock(&thread->lock, true);
    582563}
    583564
     
    601582 *
    602583 * Register a function and its argument to be executed
    603  * on next context switch to the current thread.
     584 * on next context switch to the current thread. Must
     585 * be called with interrupts disabled.
    604586 *
    605587 * @param call_me      Out-of-context function.
     
    609591void thread_register_call_me(void (* call_me)(void *), void *call_me_with)
    610592{
    611         ipl_t ipl;
    612        
    613         ipl = interrupts_disable();
    614         spinlock_lock(&THREAD->lock);
     593        irq_spinlock_lock(&THREAD->lock, false);
    615594        THREAD->call_me = call_me;
    616595        THREAD->call_me_with = call_me_with;
    617         spinlock_unlock(&THREAD->lock);
    618         interrupts_restore(ipl);
     596        irq_spinlock_unlock(&THREAD->lock, false);
    619597}
    620598
    621599static bool thread_walker(avltree_node_t *node, void *arg)
    622600{
    623         thread_t *t = avltree_get_instance(node, thread_t, threads_tree_node);
     601        thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node);
    624602       
    625603        uint64_t ucycles, kcycles;
    626604        char usuffix, ksuffix;
    627         order_suffix(t->ucycles, &ucycles, &usuffix);
    628         order_suffix(t->kcycles, &kcycles, &ksuffix);
    629 
     605        order_suffix(thread->ucycles, &ucycles, &usuffix);
     606        order_suffix(thread->kcycles, &kcycles, &ksuffix);
     607       
    630608#ifdef __32_BITS__
    631609        printf("%-6" PRIu64" %-10s %10p %-8s %10p %-3" PRIu32 " %10p %10p %9"
    632                 PRIu64 "%c %9" PRIu64 "%c ", t->tid, t->name, t,
    633                 thread_states[t->state], t->task, t->task->context, t->thread_code,
    634                 t->kstack, ucycles, usuffix, kcycles, ksuffix);
    635 #endif
    636 
     610                PRIu64 "%c %9" PRIu64 "%c ", thread->tid, thread->name, thread,
     611                thread_states[thread->state], thread->task, thread->task->context,
     612                thread->thread_code, thread->kstack, ucycles, usuffix, kcycles, ksuffix);
     613#endif
     614       
    637615#ifdef __64_BITS__
    638616        printf("%-6" PRIu64" %-10s %18p %-8s %18p %-3" PRIu32 " %18p %18p %9"
    639                 PRIu64 "%c %9" PRIu64 "%c ", t->tid, t->name, t,
    640                 thread_states[t->state], t->task, t->task->context, t->thread_code,
    641                 t->kstack, ucycles, usuffix, kcycles, ksuffix);
    642 #endif
    643                        
    644         if (t->cpu)
    645                 printf("%-4u", t->cpu->id);
     617                PRIu64 "%c %9" PRIu64 "%c ", thread->tid, thread->name, thread,
     618                thread_states[thread->state], thread->task, thread->task->context,
     619                thread->thread_code, thread->kstack, ucycles, usuffix, kcycles, ksuffix);
     620#endif
     621       
     622        if (thread->cpu)
     623                printf("%-4u", thread->cpu->id);
    646624        else
    647625                printf("none");
    648                        
    649         if (t->state == Sleeping) {
     626       
     627        if (thread->state == Sleeping) {
    650628#ifdef __32_BITS__
    651                 printf(" %10p", t->sleep_queue);
    652 #endif
    653 
     629                printf(" %10p", thread->sleep_queue);
     630#endif
     631               
    654632#ifdef __64_BITS__
    655                 printf(" %18p", t->sleep_queue);
     633                printf(" %18p", thread->sleep_queue);
    656634#endif
    657635        }
    658                        
     636       
    659637        printf("\n");
    660 
     638       
    661639        return true;
    662640}
    663641
    664 /** Print list of threads debug info */
     642/** Print list of threads debug info
     643 *
     644 */
    665645void thread_print_list(void)
    666646{
    667         ipl_t ipl;
    668        
    669647        /* Messing with thread structures, avoid deadlock */
    670         ipl = interrupts_disable();
    671         spinlock_lock(&threads_lock);
    672 
    673 #ifdef __32_BITS__     
     648        irq_spinlock_lock(&threads_lock, true);
     649       
     650#ifdef __32_BITS__
    674651        printf("tid    name       address    state    task       "
    675652                "ctx code       stack      ucycles    kcycles    cpu  "
     
    679656                "----------\n");
    680657#endif
    681 
     658       
    682659#ifdef __64_BITS__
    683660        printf("tid    name       address            state    task               "
     
    688665                "------------------\n");
    689666#endif
    690 
     667       
    691668        avltree_walk(&threads_tree, thread_walker, NULL);
    692 
    693         spinlock_unlock(&threads_lock);
    694         interrupts_restore(ipl);
     669       
     670        irq_spinlock_unlock(&threads_lock, true);
    695671}
    696672
     
    700676 * interrupts must be already disabled.
    701677 *
    702  * @param t Pointer to thread.
     678 * @param thread Pointer to thread.
    703679 *
    704680 * @return True if thread t is known to the system, false otherwise.
    705  */
    706 bool thread_exists(thread_t *t)
    707 {
    708         avltree_node_t *node;
    709 
    710         node = avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) t));
     681 *
     682 */
     683bool thread_exists(thread_t *thread)
     684{
     685        avltree_node_t *node =
     686            avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) thread));
    711687       
    712688        return node != NULL;
     
    718694 * interrupts must be already disabled.
    719695 *
    720  * @param user  True to update user accounting, false for kernel.
     696 * @param user True to update user accounting, false for kernel.
     697 *
    721698 */
    722699void thread_update_accounting(bool user)
    723700{
    724701        uint64_t time = get_cycle();
    725         if (user) {
     702       
     703        if (user)
    726704                THREAD->ucycles += time - THREAD->last_cycle;
    727         } else {
     705        else
    728706                THREAD->kcycles += time - THREAD->last_cycle;
    729         }
     707       
    730708        THREAD->last_cycle = time;
    731709}
     
    774752    size_t name_len, thread_id_t *uspace_thread_id)
    775753{
    776         thread_t *t;
    777         char namebuf[THREAD_NAME_BUFLEN];
    778         uspace_arg_t *kernel_uarg;
    779         int rc;
    780 
    781754        if (name_len > THREAD_NAME_BUFLEN - 1)
    782755                name_len = THREAD_NAME_BUFLEN - 1;
    783 
    784         rc = copy_from_uspace(namebuf, uspace_name, name_len);
     756       
     757        char namebuf[THREAD_NAME_BUFLEN];
     758        int rc = copy_from_uspace(namebuf, uspace_name, name_len);
    785759        if (rc != 0)
    786760                return (unative_t) rc;
    787 
     761       
    788762        namebuf[name_len] = 0;
    789 
     763       
    790764        /*
    791765         * In case of failure, kernel_uarg will be deallocated in this function.
    792766         * In case of success, kernel_uarg will be freed in uinit().
     767         *
    793768         */
    794         kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
     769        uspace_arg_t *kernel_uarg =
     770            (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
    795771       
    796772        rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
     
    799775                return (unative_t) rc;
    800776        }
    801 
    802         t = thread_create(uinit, kernel_uarg, TASK,
     777       
     778        thread_t *thread = thread_create(uinit, kernel_uarg, TASK,
    803779            THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf, false);
    804         if (t) {
     780        if (thread) {
    805781                if (uspace_thread_id != NULL) {
    806                         int rc;
    807 
    808                         rc = copy_to_uspace(uspace_thread_id, &t->tid,
    809                             sizeof(t->tid));
     782                        rc = copy_to_uspace(uspace_thread_id, &thread->tid,
     783                            sizeof(thread->tid));
    810784                        if (rc != 0) {
    811785                                /*
     
    813787                                 * has already been created. We need to undo its
    814788                                 * creation now.
     789                                 *
    815790                                 */
    816 
     791                               
    817792                                /*
    818793                                 * The new thread structure is initialized, but
     
    820795                                 * We can safely deallocate it.
    821796                                 */
    822                                 slab_free(thread_slab, t);
    823                                 free(kernel_uarg);
    824 
     797                                slab_free(thread_slab, thread);
     798                                free(kernel_uarg);
     799                               
    825800                                return (unative_t) rc;
    826801                         }
    827802                }
     803               
    828804#ifdef CONFIG_UDEBUG
    829805                /*
     
    833809                 * THREAD_B events for threads that already existed
    834810                 * and could be detected with THREAD_READ before.
     811                 *
    835812                 */
    836                 udebug_thread_b_event_attach(t, TASK);
     813                udebug_thread_b_event_attach(thread, TASK);
    837814#else
    838                 thread_attach(t, TASK);
    839 #endif
    840                 thread_ready(t);
    841 
     815                thread_attach(thread, TASK);
     816#endif
     817                thread_ready(thread);
     818               
    842819                return 0;
    843820        } else
    844821                free(kernel_uarg);
    845 
     822       
    846823        return (unative_t) ENOMEM;
    847824}
     
    853830{
    854831        thread_exit();
     832       
    855833        /* Unreachable */
    856834        return 0;
     
    863841 *
    864842 * @return 0 on success or an error code from @ref errno.h.
     843 *
    865844 */
    866845unative_t sys_thread_get_id(thread_id_t *uspace_thread_id)
     
    869848         * No need to acquire lock on THREAD because tid
    870849         * remains constant for the lifespan of the thread.
     850         *
    871851         */
    872852        return (unative_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
Note: See TracChangeset for help on using the changeset viewer.