Changeset 32fffef0 in mainline for kernel/generic


Ignore:
Timestamp:
2006-08-29T11:06:57Z (19 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
0fa6044
Parents:
c8ea4a8b
Message:

Define architecture-specific thread sub-constructors and sub-destructors on all architectures.
Define the THREAD_FLAG_USPACE which means that the thread runs in user space.
The forementioned changes allow for allocating of user window buffer on sparc64
threads that execute in userspace.

A lot of formatting and indentation fixes.

Location:
kernel/generic
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/mm/frame.h

    rc8ea4a8b r32fffef0  
    2828 */
    2929
    30  /** @addtogroup genericmm
     30/** @addtogroup genericmm
    3131 * @{
    3232 */
     
    3434 */
    3535
    36 #ifndef __FRAME_H__
    37 #define __FRAME_H__
     36#ifndef KERN_FRAME_H_
     37#define KERN_FRAME_H_
    3838
    3939#include <arch/types.h>
     
    6262#define FRAME_NO_RECLAIM        0x4     /* do not start reclaiming when no free memory */
    6363
    64 #define FRAME_OK                0       /* frame_alloc return status */
    65 #define FRAME_NO_MEMORY         1       /* frame_alloc return status */
    66 #define FRAME_ERROR             2       /* frame_alloc return status */
    67 
    6864static inline uintptr_t PFN2ADDR(pfn_t frame)
    6965{
     
    8985#define IS_BUDDY_RIGHT_BLOCK_ABS(zone, frame)   (((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 1)
    9086
    91 #define frame_alloc(order, flags)                               frame_alloc_generic(order, flags, NULL)
     87#define frame_alloc(order, flags)               frame_alloc_generic(order, flags, NULL)
    9288
    9389extern void frame_init(void);
     
    112108#endif
    113109
    114  /** @}
     110/** @}
    115111 */
    116 
  • kernel/generic/include/proc/thread.h

    rc8ea4a8b r32fffef0  
    5353#define THREAD_STACK_SIZE       STACK_SIZE
    5454
    55 /**< Thread states. */
     55/** Thread states. */
    5656enum state {
    5757        Invalid,        /**< It is an error, if thread is found in this state. */
     
    6666extern char *thread_states[];
    6767
    68 /**< Join types. */
     68/** Join types. */
    6969typedef enum {
    7070        None,
     
    7373} thread_join_type_t;
    7474
    75 #define X_WIRED         (1<<0)
    76 #define X_STOLEN        (1<<1)
     75/* Thread flags */
     76#define THREAD_FLAG_WIRED       (1<<0)  /**< Thread cannot be migrated to another CPU. */
     77#define THREAD_FLAG_STOLEN      (1<<1)  /**< Thread was migrated to another CPU and has not run yet. */
     78#define THREAD_FLAG_USPACE      (1<<2)  /**< Thread executes in userspace. */
    7779
    7880#define THREAD_NAME_BUFLEN      20
     
    128130         * Defined only if thread doesn't run.
    129131         * It means that fpu context is in CPU that last time executes this thread.
    130          * This disables migration
     132         * This disables migration.
    131133         */
    132134        int fpu_context_engaged;
     
    150152        thread_arch_t arch;                     /**< Architecture-specific data. */
    151153
    152         uint8_t *kstack;                                /**< Thread's kernel stack. */
     154        uint8_t *kstack;                        /**< Thread's kernel stack. */
    153155};
    154156
     
    171173extern void thread_create_arch(thread_t *t);
    172174#endif
     175#ifndef thr_constructor_arch
     176extern void thr_constructor_arch(thread_t *t);
     177#endif
     178#ifndef thr_destructor_arch
     179extern void thr_destructor_arch(thread_t *t);
     180#endif
    173181
    174182extern void thread_sleep(uint32_t sec);
  • kernel/generic/src/main/kinit.c

    rc8ea4a8b r32fffef0  
    101101                 * Just a beautification.
    102102                 */
    103                 if ((t = thread_create(kmp, NULL, TASK, 0, "kmp"))) {
     103                if ((t = thread_create(kmp, NULL, TASK, THREAD_FLAG_WIRED, "kmp"))) {
    104104                        spinlock_lock(&t->lock);
    105                         t->flags |= X_WIRED;
    106105                        t->cpu = &cpus[0];
    107106                        spinlock_unlock(&t->lock);
     
    127126                for (i = 0; i < config.cpu_count; i++) {
    128127
    129                         if ((t = thread_create(kcpulb, NULL, TASK, 0, "kcpulb"))) {
     128                        if ((t = thread_create(kcpulb, NULL, TASK, THREAD_FLAG_WIRED, "kcpulb"))) {
    130129                                spinlock_lock(&t->lock);                       
    131                                 t->flags |= X_WIRED;
    132130                                t->cpu = &cpus[i];
    133131                                spinlock_unlock(&t->lock);
  • kernel/generic/src/mm/frame.c

    rc8ea4a8b r32fffef0  
    929929 * @param order  Allocate exactly 2^order frames.
    930930 * @param flags  Flags for host zone selection and address processing.
    931  * @param status Allocation status (FRAME_OK on success), unused if NULL.
    932931 * @param pzone  Preferred zone
    933932 *
     
    988987/** Free a frame.
    989988 *
    990  * Find respective frame structure for supplied PFN.
     989 * Find respective frame structure for supplied physical frame address.
    991990 * Decrement frame reference count.
    992991 * If it drops to zero, move the frame structure to free list.
  • kernel/generic/src/proc/scheduler.c

    rc8ea4a8b r32fffef0  
    142142                        spinlock_unlock(&THREAD->lock);
    143143                        spinlock_unlock(&CPU->lock);
    144                         THREAD->saved_fpu_context = slab_alloc(fpu_context_slab,
    145                                                                0);
     144                        THREAD->saved_fpu_context = slab_alloc(fpu_context_slab, 0);
    146145                        /* We may have switched CPUs during slab_alloc */
    147146                        goto restart;
     
    236235
    237236                /*
    238                  * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
     237                 * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated
     238                 * when load balancing needs emerge.
    239239                 */
    240                 t->flags &= ~X_STOLEN;
     240                t->flags &= ~THREAD_FLAG_STOLEN;
    241241                spinlock_unlock(&t->lock);
    242242
     
    350350         */
    351351        context_save(&CPU->saved_context);
    352         context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (uintptr_t) CPU->stack, CPU_STACK_SIZE);
     352        context_set(&CPU->saved_context, FADDR(scheduler_separated_stack),
     353                (uintptr_t) CPU->stack, CPU_STACK_SIZE);
    353354        context_restore(&CPU->saved_context);
    354355        /* not reached */
     
    484485
    485486#ifdef SCHEDULER_VERBOSE
    486         printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
     487        printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n",
     488                CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
    487489#endif 
    488490
     
    557559                        /*
    558560                         * Not interested in ourselves.
    559                          * Doesn't require interrupt disabling for kcpulb is X_WIRED.
     561                         * Doesn't require interrupt disabling for kcpulb has THREAD_FLAG_WIRED.
    560562                         */
    561563                        if (CPU == cpu)
     
    578580                                t = list_get_instance(l, thread_t, rq_link);
    579581                                /*
    580                                  * We don't want to steal CPU-wired threads neither threads already stolen.
    581                                  * The latter prevents threads from migrating between CPU's without ever being run.
    582                                  * We don't want to steal threads whose FPU context is still in CPU.
     582                                 * We don't want to steal CPU-wired threads neither threads already
     583                                 * stolen. The latter prevents threads from migrating between CPU's
     584                                 * without ever being run. We don't want to steal threads whose FPU
     585                                 * context is still in CPU.
    583586                                 */
    584587                                spinlock_lock(&t->lock);
    585                                 if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
     588                                if ((!(t->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) &&
     589                                        (!(t->fpu_context_engaged)) ) {
    586590                                        /*
    587591                                         * Remove t from r.
     
    609613                                spinlock_lock(&t->lock);
    610614#ifdef KCPULB_VERBOSE
    611                                 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
     615                                printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n",
     616                                        CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy),
     617                                        atomic_get(&nrdy) / config.cpu_active);
    612618#endif
    613                                 t->flags |= X_STOLEN;
     619                                t->flags |= THREAD_FLAG_STOLEN;
    614620                                t->state = Entering;
    615621                                spinlock_unlock(&t->lock);
  • kernel/generic/src/proc/thread.c

    rc8ea4a8b r32fffef0  
    130130        link_initialize(&t->wq_link);
    131131        link_initialize(&t->th_link);
     132
     133        /* call the architecture-specific part of the constructor */
     134        thr_constructor_arch(t);
    132135       
    133136#ifdef ARCH_HAS_FPU
     
    157160{
    158161        thread_t *t = (thread_t *) obj;
     162
     163        /* call the architecture-specific part of the destructor */
     164        thr_destructor_arch(t);
    159165
    160166        frame_free(KA2PA(t->kstack));
     
    211217       
    212218        cpu = CPU;
    213         if (t->flags & X_WIRED) {
     219        if (t->flags & THREAD_FLAG_WIRED) {
    214220                cpu = t->cpu;
    215221        }
     
    296302        if (!t)
    297303                return NULL;
    298 
    299         thread_create_arch(t);
    300304       
    301305        /* Not needed, but good for debugging */
     
    324328        t->priority = -1;               /* start in rq[0] */
    325329        t->cpu = NULL;
    326         t->flags = 0;
     330        t->flags = flags;
    327331        t->state = Entering;
    328332        t->call_me = NULL;
     
    348352        t->fpu_context_exists = 0;
    349353        t->fpu_context_engaged = 0;
     354
     355        thread_create_arch(t);          /* might depend on previous initialization */
    350356       
    351357        /*
     
    590596        }
    591597
    592         if ((t = thread_create(uinit, kernel_uarg, TASK, 0, namebuf))) {
     598        if ((t = thread_create(uinit, kernel_uarg, TASK, THREAD_FLAG_USPACE, namebuf))) {
    593599                tid = t->tid;
    594600                thread_ready(t);
Note: See TracChangeset for help on using the changeset viewer.