Changeset 6f4495f5 in mainline for kernel/generic/src/synch


Ignore:
Timestamp:
2007-01-27T17:32:13Z (19 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
1ba41c5
Parents:
51baa8a
Message:

Indentaion and formatting changes even Martin will like :-)

Location:
kernel/generic/src/synch
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/synch/futex.c

    r51baa8a r6f4495f5  
    103103 *
    104104 * @param uaddr Userspace address of the futex counter.
    105  * @param usec If non-zero, number of microseconds this thread is willing to sleep.
     105 * @param usec If non-zero, number of microseconds this thread is willing to
     106 *     sleep.
    106107 * @param flags Select mode of operation.
    107108 *
    108  * @return One of ESYNCH_TIMEOUT, ESYNCH_OK_ATOMIC and ESYNCH_OK_BLOCKED. See synch.h.
    109  *        If there is no physical mapping for uaddr ENOENT is returned.
     109 * @return One of ESYNCH_TIMEOUT, ESYNCH_OK_ATOMIC and ESYNCH_OK_BLOCKED. See
     110 *     synch.h. If there is no physical mapping for uaddr ENOENT is returned.
    110111 */
    111112unative_t sys_futex_sleep_timeout(uintptr_t uaddr, uint32_t usec, int flags)
     
    135136        futex = futex_find(paddr);
    136137       
    137         return (unative_t) waitq_sleep_timeout(&futex->wq, usec, flags | SYNCH_FLAGS_INTERRUPTIBLE);
     138        return (unative_t) waitq_sleep_timeout(&futex->wq, usec, flags |
     139            SYNCH_FLAGS_INTERRUPTIBLE);
    138140}
    139141
     
    243245                                 */
    244246                                futex->refcount++;
    245                                 btree_insert(&TASK->futexes, paddr, futex, leaf);
     247                                btree_insert(&TASK->futexes, paddr, futex,
     248                                    leaf);
    246249                        }
    247250                        mutex_unlock(&TASK->futexes_lock);
     
    272275/** Compute hash index into futex hash table.
    273276 *
    274  * @param key Address where the key (i.e. physical address of futex counter) is stored.
     277 * @param key Address where the key (i.e. physical address of futex counter) is
     278 *     stored.
    275279 *
    276280 * @return Index into futex hash table.
     
    283287/** Compare futex hash table item with a key.
    284288 *
    285  * @param key Address where the key (i.e. physical address of futex counter) is stored.
     289 * @param key Address where the key (i.e. physical address of futex counter) is
     290 *     stored.
    286291 *
    287292 * @return True if the item matches the key. False otherwise.
     
    317322        mutex_lock(&TASK->futexes_lock);
    318323
    319         for (cur = TASK->futexes.leaf_head.next; cur != &TASK->futexes.leaf_head; cur = cur->next) {
     324        for (cur = TASK->futexes.leaf_head.next;
     325            cur != &TASK->futexes.leaf_head; cur = cur->next) {
    320326                btree_node_t *node;
    321327                int i;
  • kernel/generic/src/synch/rwlock.c

    r51baa8a r6f4495f5  
    221221                case ESYNCH_OK_BLOCKED:         
    222222                        /*
    223                          * We were woken with rwl->readers_in already incremented.
    224                          * Note that this arrangement avoids race condition between
    225                          * two concurrent readers. (Race is avoided if 'exclusive' is
    226                          * locked at the same time as 'readers_in' is incremented.
    227                          * Same time means both events happen atomically when
    228                          * rwl->lock is held.)
     223                         * We were woken with rwl->readers_in already
     224                         * incremented.
     225                         *
     226                         * Note that this arrangement avoids race condition
     227                         * between two concurrent readers. (Race is avoided if
     228                         * 'exclusive' is locked at the same time as
     229                         * 'readers_in' is incremented. Same time means both
     230                         * events happen atomically when rwl->lock is held.)
    229231                         */
    230232                        interrupts_restore(ipl);
     
    324326
    325327        if (!list_empty(&rwl->exclusive.sem.wq.head))
    326                 t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, wq_link);
     328                t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t,
     329                    wq_link);
    327330        do {
    328331                if (t) {
     
    344347                        /*
    345348                         * Waking up a reader.
    346                          * We are responsible for incrementing rwl->readers_in for it.
     349                         * We are responsible for incrementing rwl->readers_in
     350                         * for it.
    347351                         */
    348352                         rwl->readers_in++;
     
    361365                t = NULL;
    362366                if (!list_empty(&rwl->exclusive.sem.wq.head)) {
    363                         t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, wq_link);
     367                        t = list_get_instance(rwl->exclusive.sem.wq.head.next,
     368                            thread_t, wq_link);
    364369                        if (t) {
    365370                                spinlock_lock(&t->lock);
  • kernel/generic/src/synch/spinlock.c

    r51baa8a r6f4495f5  
    108108#endif
    109109                if (i++ > DEADLOCK_THRESHOLD) {
    110                         printf("cpu%d: looping on spinlock %.*p:%s, caller=%.*p",
    111                                CPU->id, sizeof(uintptr_t) * 2, sl, sl->name, sizeof(uintptr_t) * 2, CALLER);
     110                        printf("cpu%d: looping on spinlock %.*p:%s, "
     111                            "caller=%.*p", CPU->id, sizeof(uintptr_t) * 2, sl,
     112                            sl->name, sizeof(uintptr_t) * 2, CALLER);
    112113                        symbol = get_symtab_entry(CALLER);
    113114                        if (symbol)
  • kernel/generic/src/synch/waitq.c

    r51baa8a r6f4495f5  
    187187 * The sleep can be interrupted only if the
    188188 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags.
    189  
     189 *
    190190 * If usec is greater than zero, regardless of the value of the
    191191 * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either
     
    353353                THREAD->timeout_pending = true;
    354354                timeout_register(&THREAD->sleep_timeout, (uint64_t) usec,
    355                         waitq_timeouted_sleep, THREAD);
     355                    waitq_timeouted_sleep, THREAD);
    356356        }
    357357
Note: See TracChangeset for help on using the changeset viewer.