Changeset 4e33b6b in mainline


Ignore:
Timestamp:
2007-01-07T14:44:33Z (17 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
d78d603
Parents:
c109dd0
Message:

More formatting changes.

Location:
kernel
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/sparc64/src/drivers/tick.c

    rc109dd0 r4e33b6b  
    100100                CPU->missed_clock_ticks++;
    101101        }
    102         CPU->arch.next_tick_cmpr = tick_read() + (CPU->arch.clock_frequency / HZ)
    103                 - drift;
     102        CPU->arch.next_tick_cmpr = tick_read() + (CPU->arch.clock_frequency /
     103                HZ) - drift;
    104104        tick_compare_write(CPU->arch.next_tick_cmpr);
    105105        clock();
  • kernel/generic/src/cpu/cpu.c

    rc109dd0 r4e33b6b  
    6060        int i, j;
    6161       
    62         #ifdef CONFIG_SMP
     62#ifdef CONFIG_SMP
    6363        if (config.cpu_active == 1) {
    64         #endif /* CONFIG_SMP */
     64#endif /* CONFIG_SMP */
    6565                cpus = (cpu_t *) malloc(sizeof(cpu_t) * config.cpu_count,
    6666                                        FRAME_ATOMIC);
     
    8484                }
    8585               
    86         #ifdef CONFIG_SMP
     86#ifdef CONFIG_SMP
    8787        }
    88         #endif /* CONFIG_SMP */
     88#endif /* CONFIG_SMP */
    8989
    9090        CPU = &cpus[config.cpu_active-1];
  • kernel/generic/src/proc/scheduler.c

    rc109dd0 r4e33b6b  
    11/*
    2  * Copyright (C) 2001-2004 Jakub Jermar
     2 * Copyright (C) 2001-2007 Jakub Jermar
    33 * All rights reserved.
    44 *
     
    143143                        spinlock_unlock(&THREAD->lock);
    144144                        spinlock_unlock(&CPU->lock);
    145                         THREAD->saved_fpu_context = slab_alloc(fpu_context_slab, 0);
     145                        THREAD->saved_fpu_context =
     146                                slab_alloc(fpu_context_slab, 0);
    146147                        /* We may have switched CPUs during slab_alloc */
    147148                        goto restart;
     
    232233                t->cpu = CPU;
    233234
    234                 t->ticks = us2ticks((i+1)*10000);
     235                t->ticks = us2ticks((i + 1) * 10000);
    235236                t->priority = i;        /* correct rq index */
    236237
     
    268269        spinlock_lock(&CPU->lock);
    269270        if (CPU->needs_relink > NEEDS_RELINK_MAX) {
    270                 for (i = start; i<RQ_COUNT-1; i++) {
     271                for (i = start; i < RQ_COUNT - 1; i++) {
    271272                        /* remember and empty rq[i + 1] */
    272273                        r = &CPU->rq[i + 1];
     
    332333
    333334                /*
    334                  * Interrupt priority level of preempted thread is recorded here
    335                  * to facilitate scheduler() invocations from interrupts_disable()'d
    336                  * code (e.g. waitq_sleep_timeout()).
     335                 * Interrupt priority level of preempted thread is recorded
     336                 * here to facilitate scheduler() invocations from
     337                 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()).
    337338                 */
    338339                THREAD->saved_context.ipl = ipl;
     
    395396                        } else {
    396397                                /*
    397                                  * The thread structure is kept allocated until somebody
    398                                  * calls thread_detach() on it.
     398                                 * The thread structure is kept allocated until
     399                                 * somebody calls thread_detach() on it.
    399400                                 */
    400401                                if (!spinlock_trylock(&THREAD->join_wq.lock)) {
     
    422423
    423424                        /*
    424                          * We need to release wq->lock which we locked in waitq_sleep().
    425                          * Address of wq->lock is kept in THREAD->sleep_queue.
     425                         * We need to release wq->lock which we locked in
     426                         * waitq_sleep(). Address of wq->lock is kept in
     427                         * THREAD->sleep_queue.
    426428                         */
    427429                        spinlock_unlock(&THREAD->sleep_queue->lock);
    428430
    429431                        /*
    430                          * Check for possible requests for out-of-context invocation.
     432                         * Check for possible requests for out-of-context
     433                         * invocation.
    431434                         */
    432435                        if (THREAD->call_me) {
     
    444447                         * Entering state is unexpected.
    445448                         */
    446                         panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
     449                        panic("tid%d: unexpected state %s\n", THREAD->tid,
     450                                thread_states[THREAD->state]);
    447451                        break;
    448452                }
     
    460464
    461465        /*
    462          * If both the old and the new task are the same, lots of work is avoided.
     466         * If both the old and the new task are the same, lots of work is
     467         * avoided.
    463468         */
    464469        if (TASK != THREAD->task) {
     
    477482               
    478483                /*
    479                  * Note that it is possible for two tasks to share one address space.
     484                 * Note that it is possible for two tasks to share one address
     485                 * space.
    480486                 */
    481487                if (as1 != as2) {
     
    494500
    495501#ifdef SCHEDULER_VERBOSE
    496         printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n",
    497                 CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
     502        printf("cpu%d: tid %d (priority=%d, ticks=%lld, nrdy=%ld)\n",
     503                CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks,
     504                        atomic_get(&CPU->nrdy));
    498505#endif 
    499506
     
    509516
    510517        /*
    511          * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
     518         * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
     519         * thread's stack.
    512520         */
    513521        the_copy(THE, (the_t *) THREAD->kstack);
     
    556564
    557565        /*
    558          * Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
    559          */
    560         for (j=RQ_COUNT-1; j >= 0; j--) {
    561                 for (i=0; i < config.cpu_active; i++) {
     566         * Searching least priority queues on all CPU's first and most priority
     567         * queues on all CPU's last.
     568         */
     569        for (j= RQ_COUNT - 1; j >= 0; j--) {
     570                for (i = 0; i < config.cpu_active; i++) {
    562571                        link_t *l;
    563572                        runq_t *r;
     
    568577                        /*
    569578                         * Not interested in ourselves.
    570                          * Doesn't require interrupt disabling for kcpulb has THREAD_FLAG_WIRED.
     579                         * Doesn't require interrupt disabling for kcpulb has
     580                         * THREAD_FLAG_WIRED.
    571581                         */
    572582                        if (CPU == cpu)
     
    589599                                t = list_get_instance(l, thread_t, rq_link);
    590600                                /*
    591                                  * We don't want to steal CPU-wired threads neither threads already
    592                                  * stolen. The latter prevents threads from migrating between CPU's
    593                                  * without ever being run. We don't want to steal threads whose FPU
    594                                  * context is still in CPU.
     601                                 * We don't want to steal CPU-wired threads
     602                                 * neither threads already stolen. The latter
     603                                 * prevents threads from migrating between CPU's
     604                                 * without ever being run. We don't want to
     605                                 * steal threads whose FPU context is still in
     606                                 * CPU.
    595607                                 */
    596608                                spinlock_lock(&t->lock);
    597                                 if ((!(t->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) &&
     609                                if ((!(t->flags & (THREAD_FLAG_WIRED |
     610                                        THREAD_FLAG_STOLEN))) &&
    598611                                        (!(t->fpu_context_engaged)) ) {
    599612                                        /*
     
    622635                                spinlock_lock(&t->lock);
    623636#ifdef KCPULB_VERBOSE
    624                                 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n",
    625                                         CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy),
     637                                printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, "
     638                                        "avg=%nd\n", CPU->id, t->tid, CPU->id,
     639                                        atomic_get(&CPU->nrdy),
    626640                                        atomic_get(&nrdy) / config.cpu_active);
    627641#endif
     
    638652                                       
    639653                                /*
    640                                  * We are not satisfied yet, focus on another CPU next time.
     654                                 * We are not satisfied yet, focus on another
     655                                 * CPU next time.
    641656                                 */
    642657                                k++;
     
    689704                spinlock_lock(&cpus[cpu].lock);
    690705                printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n",
    691                        cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
     706                        cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy),
     707                        cpus[cpu].needs_relink);
    692708               
    693                 for (i=0; i<RQ_COUNT; i++) {
     709                for (i = 0; i < RQ_COUNT; i++) {
    694710                        r = &cpus[cpu].rq[i];
    695711                        spinlock_lock(&r->lock);
     
    699715                        }
    700716                        printf("\trq[%d]: ", i);
    701                         for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) {
     717                        for (cur = r->rq_head.next; cur != &r->rq_head;
     718                                cur = cur->next) {
    702719                                t = list_get_instance(cur, thread_t, rq_link);
    703720                                printf("%d(%s) ", t->tid,
    704                                        thread_states[t->state]);
     721                                        thread_states[t->state]);
    705722                        }
    706723                        printf("\n");
  • kernel/generic/src/proc/thread.c

    rc109dd0 r4e33b6b  
    8181};
    8282
    83 /** Lock protecting the threads_btree B+tree. For locking rules, see declaration thereof. */
     83/** Lock protecting the threads_btree B+tree.
     84 *
     85 * For locking rules, see declaration thereof.
     86 */
    8487SPINLOCK_INITIALIZE(threads_lock);
    8588
    8689/** B+tree of all threads.
    8790 *
    88  * When a thread is found in the threads_btree B+tree, it is guaranteed to exist as long
    89  * as the threads_lock is held.
     91 * When a thread is found in the threads_btree B+tree, it is guaranteed to
     92 * exist as long as the threads_lock is held.
    9093 */
    9194btree_t threads_btree;         
     
    99102#endif
    100103
    101 /** Thread wrapper
    102  *
    103  * This wrapper is provided to ensure that every thread
    104  * makes a call to thread_exit() when its implementing
    105  * function returns.
     104/** Thread wrapper.
     105 *
     106 * This wrapper is provided to ensure that every thread makes a call to
     107 * thread_exit() when its implementing function returns.
    106108 *
    107109 * interrupts_disable() is assumed.
     
    202204        THREAD = NULL;
    203205        atomic_set(&nrdy,0);
    204         thread_slab = slab_cache_create("thread_slab",
    205                                         sizeof(thread_t),0,
    206                                         thr_constructor, thr_destructor, 0);
     206        thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0,
     207                thr_constructor, thr_destructor, 0);
     208
    207209#ifdef ARCH_HAS_FPU
    208         fpu_context_slab = slab_cache_create("fpu_slab",
    209                                              sizeof(fpu_context_t),
    210                                              FPU_CONTEXT_ALIGN,
    211                                              NULL, NULL, 0);
     210        fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t),
     211                FPU_CONTEXT_ALIGN, NULL, NULL, 0);
    212212#endif
    213213
     
    235235        ASSERT(! (t->state == Ready));
    236236
    237         i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority;
     237        i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority;
    238238       
    239239        cpu = CPU;
     
    268268void thread_destroy(thread_t *t)
    269269{
    270         bool destroy_task = false;     
     270        bool destroy_task = false;
    271271
    272272        ASSERT(t->state == Exiting || t->state == Undead);
     
    275275
    276276        spinlock_lock(&t->cpu->lock);
    277         if(t->cpu->fpu_owner==t)
    278                 t->cpu->fpu_owner=NULL;
     277        if(t->cpu->fpu_owner == t)
     278                t->cpu->fpu_owner = NULL;
    279279        spinlock_unlock(&t->cpu->lock);
    280280
     
    311311 * @param flags     Thread flags.
    312312 * @param name      Symbolic name.
    313  * @param uncounted Thread's accounting doesn't affect accumulated task accounting.
     313 * @param uncounted Thread's accounting doesn't affect accumulated task
     314 *       accounting.
    314315 *
    315316 * @return New thread's structure on success, NULL on failure.
    316317 *
    317318 */
    318 thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name, bool uncounted)
     319thread_t *thread_create(void (* func)(void *), void *arg, task_t *task,
     320        int flags, char *name, bool uncounted)
    319321{
    320322        thread_t *t;
     
    326328       
    327329        /* Not needed, but good for debugging */
    328         memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0);
     330        memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES,
     331                0);
    329332       
    330333        ipl = interrupts_disable();
     
    335338       
    336339        context_save(&t->saved_context);
    337         context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack, THREAD_STACK_SIZE);
     340        context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack,
     341                THREAD_STACK_SIZE);
    338342       
    339343        the_initialize((the_t *) t->kstack);
     
    377381        t->fpu_context_engaged = 0;
    378382
    379         thread_create_arch(t);          /* might depend on previous initialization */
     383        /* might depend on previous initialization */
     384        thread_create_arch(t); 
    380385       
    381386        /*
     
    399404         */
    400405        spinlock_lock(&threads_lock);
    401         btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t, NULL);
     406        btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t,
     407                NULL);
    402408        spinlock_unlock(&threads_lock);
    403409       
     
    409415/** Terminate thread.
    410416 *
    411  * End current thread execution and switch it to the exiting
    412  * state. All pending timeouts are executed.
    413  *
     417 * End current thread execution and switch it to the exiting state. All pending
     418 * timeouts are executed.
    414419 */
    415420void thread_exit(void)
     
    420425        ipl = interrupts_disable();
    421426        spinlock_lock(&THREAD->lock);
    422         if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */
     427        if (THREAD->timeout_pending) {
     428                /* busy waiting for timeouts in progress */
    423429                spinlock_unlock(&THREAD->lock);
    424430                interrupts_restore(ipl);
     
    444450void thread_sleep(uint32_t sec)
    445451{
    446         thread_usleep(sec*1000000);
     452        thread_usleep(sec * 1000000);
    447453}
    448454
  • kernel/generic/src/synch/waitq.c

    rc109dd0 r4e33b6b  
    189189 
    190190 * If usec is greater than zero, regardless of the value of the
    191  * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either timeout,
    192  * interruption or wakeup comes.
    193  *
    194  * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags, the call
    195  * will not return until wakeup or interruption comes.
    196  *
    197  * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the call will
    198  * immediately return, reporting either success or failure.
    199  *
    200  * @return      Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, ESYNCH_INTERRUPTED,
    201  *              ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED.
    202  *
    203  * @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time
    204  * of the call there was no pending wakeup.
     191 * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either
     192 * timeout, interruption or wakeup comes.
     193 *
     194 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags,
     195 * the call will not return until wakeup or interruption comes.
     196 *
     197 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the
     198 * call will immediately return, reporting either success or failure.
     199 *
     200 * @return One of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, ESYNCH_INTERRUPTED,
     201 * ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED.
     202 *
     203 * @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time of the
     204 * call there was no pending wakeup.
    205205 *
    206206 * @li ESYNCH_TIMEOUT means that the sleep timed out.
     
    352352                }
    353353                THREAD->timeout_pending = true;
    354                 timeout_register(&THREAD->sleep_timeout, (uint64_t) usec, waitq_timeouted_sleep, THREAD);
     354                timeout_register(&THREAD->sleep_timeout, (uint64_t) usec,
     355                        waitq_timeouted_sleep, THREAD);
    355356        }
    356357
     
    365366        spinlock_unlock(&THREAD->lock);
    366367
    367         scheduler();    /* wq->lock is released in scheduler_separated_stack() */
     368        /* wq->lock is released in scheduler_separated_stack() */
     369        scheduler();
    368370       
    369371        return ESYNCH_OK_BLOCKED;
     
    373375/** Wake up first thread sleeping in a wait queue
    374376 *
    375  * Wake up first thread sleeping in a wait queue.
    376  * This is the SMP- and IRQ-safe wrapper meant for
    377  * general use.
    378  *
    379  * Besides its 'normal' wakeup operation, it attempts
    380  * to unregister possible timeout.
     377 * Wake up first thread sleeping in a wait queue. This is the SMP- and IRQ-safe
     378 * wrapper meant for general use.
     379 *
     380 * Besides its 'normal' wakeup operation, it attempts to unregister possible
     381 * timeout.
    381382 *
    382383 * @param wq Pointer to wait queue.
    383  * @param all If this is non-zero, all sleeping threads
    384  *        will be woken up and missed count will be zeroed.
     384 * @param all If this is non-zero, all sleeping threads will be woken up and
     385 *      missed count will be zeroed.
    385386 */
    386387void waitq_wakeup(waitq_t *wq, bool all)
     
    399400/** Internal SMP- and IRQ-unsafe version of waitq_wakeup()
    400401 *
    401  * This is the internal SMP- and IRQ-unsafe version
    402  * of waitq_wakeup(). It assumes wq->lock is already
    403  * locked and interrupts are already disabled.
     402 * This is the internal SMP- and IRQ-unsafe version of waitq_wakeup(). It
     403 * assumes wq->lock is already locked and interrupts are already disabled.
    404404 *
    405405 * @param wq Pointer to wait queue.
    406  * @param all If this is non-zero, all sleeping threads
    407  *        will be woken up and missed count will be zeroed.
     406 * @param all If this is non-zero, all sleeping threads will be woken up and
     407 *      missed count will be zeroed.
    408408 */
    409409void _waitq_wakeup_unsafe(waitq_t *wq, bool all)
Note: See TracChangeset for help on using the changeset viewer.