Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/proc/thread.c

    r8ad7dd1 rf22dc820  
    4646#include <synch/spinlock.h>
    4747#include <synch/waitq.h>
    48 #include <synch/workqueue.h>
    49 #include <synch/rcu.h>
    5048#include <cpu.h>
    5149#include <str.h>
     
    194192        kmflags &= ~FRAME_HIGHMEM;
    195193       
    196         uintptr_t stack_phys =
    197             frame_alloc(STACK_FRAMES, kmflags, STACK_SIZE - 1);
    198         if (!stack_phys) {
     194        thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
     195        if (!thread->kstack) {
    199196#ifdef CONFIG_FPU
    200197                if (thread->saved_fpu_context)
     
    204201        }
    205202       
    206         thread->kstack = (uint8_t *) PA2KA(stack_phys);
    207        
    208203#ifdef CONFIG_UDEBUG
    209204        mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE);
     
    221216        thr_destructor_arch(thread);
    222217       
    223         frame_free(KA2PA(thread->kstack), STACK_FRAMES);
     218        frame_free(KA2PA(thread->kstack));
    224219       
    225220#ifdef CONFIG_FPU
     
    265260}
    266261
    267 /** Invoked right before thread_ready() readies the thread. thread is locked. */
    268 static void before_thread_is_ready(thread_t *thread)
    269 {
    270         ASSERT(irq_spinlock_locked(&thread->lock));
    271         workq_before_thread_is_ready(thread);
    272 }
    273 
    274262/** Make thread ready
    275263 *
     
    284272       
    285273        ASSERT(thread->state != Ready);
    286 
    287         before_thread_is_ready(thread);
    288274       
    289275        int i = (thread->priority < RQ_COUNT - 1) ?
    290276            ++thread->priority : thread->priority;
    291 
     277       
    292278        cpu_t *cpu;
    293279        if (thread->wired || thread->nomigrate || thread->fpu_context_engaged) {
    294                 /* Cannot ready to another CPU */
    295280                ASSERT(thread->cpu != NULL);
    296281                cpu = thread->cpu;
    297         } else if (thread->stolen) {
    298                 /* Ready to the stealing CPU */
     282        } else
    299283                cpu = CPU;
    300         } else if (thread->cpu) {
    301                 /* Prefer the CPU on which the thread ran last */
    302                 ASSERT(thread->cpu != NULL);
    303                 cpu = thread->cpu;
    304         } else {
    305                 cpu = CPU;
    306         }
    307284       
    308285        thread->state = Ready;
     
    320297       
    321298        atomic_inc(&nrdy);
     299        // FIXME: Why is the avg value not used
     300        // avg = atomic_get(&nrdy) / config.cpu_active;
    322301        atomic_inc(&cpu->nrdy);
    323302}
     
    395374        thread->task = task;
    396375       
    397         thread->workq = NULL;
    398        
    399376        thread->fpu_context_exists = false;
    400377        thread->fpu_context_engaged = false;
     
    411388        /* Might depend on previous initialization */
    412389        thread_create_arch(thread);
    413        
    414         rcu_thread_init(thread);
    415390       
    416391        if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH)
     
    523498                         */
    524499                        ipc_cleanup();
    525                         futex_task_cleanup();
     500                        futex_cleanup();
    526501                        LOG("Cleanup of task %" PRIu64" completed.", TASK->taskid);
    527502                }
     
    543518        /* Not reached */
    544519        while (true);
    545 }
    546 
    547 /** Interrupts an existing thread so that it may exit as soon as possible.
    548  *
    549  * Threads that are blocked waiting for a synchronization primitive
    550  * are woken up with a return code of ESYNCH_INTERRUPTED if the
    551  * blocking call was interruptable. See waitq_sleep_timeout().
    552  *
    553  * The caller must guarantee the thread object is valid during the entire
    554  * function, eg by holding the threads_lock lock.
    555  *
    556  * Interrupted threads automatically exit when returning back to user space.
    557  *
    558  * @param thread A valid thread object. The caller must guarantee it
    559  *               will remain valid until thread_interrupt() exits.
    560  */
    561 void thread_interrupt(thread_t *thread)
    562 {
    563         ASSERT(thread != NULL);
    564        
    565         irq_spinlock_lock(&thread->lock, true);
    566        
    567         thread->interrupted = true;
    568         bool sleeping = (thread->state == Sleeping);
    569        
    570         irq_spinlock_unlock(&thread->lock, true);
    571        
    572         if (sleeping)
    573                 waitq_interrupt_sleep(thread);
    574 }
    575 
    576 /** Returns true if the thread was interrupted.
    577  *
    578  * @param thread A valid thread object. User must guarantee it will
    579  *               be alive during the entire call.
    580  * @return true if the thread was already interrupted via thread_interrupt().
    581  */
    582 bool thread_interrupted(thread_t *thread)
    583 {
    584         ASSERT(thread != NULL);
    585        
    586         bool interrupted;
    587        
    588         irq_spinlock_lock(&thread->lock, true);
    589         interrupted = thread->interrupted;
    590         irq_spinlock_unlock(&thread->lock, true);
    591        
    592         return interrupted;
    593520}
    594521
Note: See TracChangeset for help on using the changeset viewer.