Changeset b1c57a8 in mainline for kernel/generic/include


Ignore:
Timestamp:
2014-10-09T15:03:55Z (11 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
e367939c
Parents:
21799398 (diff), 207e8880 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge from lp:~adam-hraska+lp/helenos/rcu/.

Only merge from the feature branch and resolve all conflicts.

Location:
kernel/generic/include
Files:
9 added
15 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/adt/list.h

    r21799398 rb1c57a8  
    5252} list_t;
    5353
     54
     55extern int list_member(const link_t *, const list_t *);
     56extern void list_splice(list_t *, link_t *);
     57extern unsigned int list_count(const list_t *);
     58
     59
    5460/** Declare and initialize statically allocated list.
    5561 *
     
    8086            _link != &(list).head; _link = _link->prev)
    8187
     88/** Unlike list_foreach(), allows removing items while traversing a list.
     89 *
     90 * @code
     91 * list_t mylist;
     92 * typedef struct item {
     93 *     int value;
     94 *     link_t item_link;
     95 * } item_t;
     96 *
     97 * //..
     98 *
     99 * // Print each list element's value and remove the element from the list.
     100 * list_foreach_safe(mylist, cur_link, next_link) {
     101 *     item_t *cur_item = list_get_instance(cur_link, item_t, item_link);
     102 *     printf("%d\n", cur_item->value);
     103 *     list_remove(cur_link);
     104 * }
     105 * @endcode
     106 *
     107 * @param list List to traverse.
     108 * @param iterator Iterator to the current element of the list.
     109 *             The item this iterator points may be safely removed
     110 *             from the list.
     111 * @param next_iter Iterator to the next element of the list.
     112 */
     113#define list_foreach_safe(list, iterator, next_iter) \
     114        for (link_t *iterator = (list).head.next, \
     115                *next_iter = iterator->next; \
     116                iterator != &(list).head; \
     117                iterator = next_iter, next_iter = iterator->next)
     118
     119       
    82120#define assert_link_not_used(link) \
    83121        ASSERT(!link_used(link))
     
    289327{
    290328        headless_list_split_or_concat(part1, part2);
     329}
     330
     331/** Concatenate two lists
     332 *
     333 * Concatenate lists @a list1 and @a list2, producing a single
     334 * list @a list1 containing items from both (in @a list1, @a list2
     335 * order) and empty list @a list2.
     336 *
     337 * @param list1         First list and concatenated output
     338 * @param list2         Second list and empty output.
     339 *
     340 */
     341NO_TRACE static inline void list_concat(list_t *list1, list_t *list2)
     342{
     343        list_splice(list2, list1->head.prev);
    291344}
    292345
     
    340393}
    341394
    342 extern int list_member(const link_t *, const list_t *);
    343 extern void list_concat(list_t *, list_t *);
    344 extern unsigned int list_count(const list_t *);
    345 
    346395#endif
    347396
  • kernel/generic/include/arch.h

    r21799398 rb1c57a8  
    3636#define KERN_ARCH_H_
    3737
    38 #include <arch/arch.h>
    39 #include <proc/thread.h>
    40 #include <proc/task.h>
    41 #include <mm/as.h>
     38#include <arch/arch.h>  /* arch_pre_main() */
     39#include <arch/asm.h>   /* get_stack_base() */
     40#include <config.h>
     41
    4242
    4343/*
     
    4949#define THE  ((the_t * )(get_stack_base()))
    5050
    51 #define CPU                  THE->cpu
    52 #define THREAD               THE->thread
    53 #define TASK                 THE->task
    54 #define AS                   THE->as
    55 #define PREEMPTION_DISABLED  THE->preemption_disabled
    5651#define MAGIC                UINT32_C(0xfacefeed)
    5752
     
    6257        ((THE->task) ? (THE->task->container) : (DEFAULT_CONTAINER))
    6358
     59/* Fwd decl. to avoid include hell. */
     60struct thread;
     61struct task;
     62struct cpu;
     63struct as;
     64
    6465/**
    6566 * For each possible kernel stack, structure
     
    6869 */
    6970typedef struct {
    70         size_t preemption_disabled;  /**< Preemption disabled counter. */
    71         thread_t *thread;            /**< Current thread. */
    72         task_t *task;                /**< Current task. */
    73         cpu_t *cpu;                  /**< Executing cpu. */
    74         as_t *as;                    /**< Current address space. */
    75         uint32_t magic;              /**< Magic value */
     71        size_t preemption;     /**< Preemption disabled counter and flag. */
     72#ifdef RCU_PREEMPT_A
     73        size_t rcu_nesting;    /**< RCU nesting count and flag. */
     74#endif
     75        struct thread *thread; /**< Current thread. */
     76        struct task *task;     /**< Current task. */
     77        struct cpu *cpu;       /**< Executing cpu. */
     78        struct as *as;         /**< Current address space. */
     79        uint32_t magic;        /**< Magic value */
    7680} the_t;
    7781
     
    9195extern void *arch_construct_function(fncptr_t *, void *, void *);
    9296
     97
    9398#endif
    9499
  • kernel/generic/include/atomic.h

    r21799398 rb1c57a8  
    5353}
    5454
     55
     56/*
     57 * If the architecture does not provide operations that are atomic
     58 * only with respect to the local cpu (eg exception handlers) and
     59 * not other cpus, implement these cpu local atomic operations with
     60 * full blown smp-safe atomics.
     61 */
     62#ifndef local_atomic_exchange
     63#define local_atomic_exchange(var_addr, new_val) \
     64        __atomic_exchange_n((var_addr), (new_val), __ATOMIC_RELAXED)
     65#endif
     66
     67
     68
    5569#endif
    5670
  • kernel/generic/include/cpu.h

    r21799398 rb1c57a8  
    3838#include <mm/tlb.h>
    3939#include <synch/spinlock.h>
     40#include <synch/rcu_types.h>
    4041#include <proc/scheduler.h>
    4142#include <arch/cpu.h>
    4243#include <arch/context.h>
     44#include <adt/list.h>
     45#include <arch.h>
     46
     47#define CPU                  THE->cpu
     48
    4349
    4450/** CPU structure.
     
    94100       
    95101        /**
     102         * SMP calls to invoke on this CPU.
     103         */
     104        SPINLOCK_DECLARE(smp_calls_lock);
     105        list_t smp_pending_calls;
     106       
     107        /** RCU per-cpu data. Uses own locking. */
     108        rcu_cpu_data_t rcu;
     109       
     110        /**
    96111         * Stack used by scheduler when there is no running thread.
    97112         */
  • kernel/generic/include/lib/memfnc.h

    r21799398 rb1c57a8  
    5050    ATTRIBUTE_OPTIMIZE("-fno-tree-loop-distribute-patterns") DO_NOT_DISCARD;
    5151
     52#define alloca(size) __builtin_alloca((size))
     53
    5254#endif
    5355
  • kernel/generic/include/macros.h

    r21799398 rb1c57a8  
    157157        })
    158158
     159
     160#ifndef member_to_inst
     161#define member_to_inst(ptr_member, type, member_identif) \
     162        ((type*) (((void*)(ptr_member)) - ((void*)&(((type*)0)->member_identif))))
     163#endif
     164
     165
    159166#endif
    160167
  • kernel/generic/include/memstr.h

    r21799398 rb1c57a8  
    4747#define memset(dst, val, cnt)  __builtin_memset((dst), (val), (cnt))
    4848#define memcpy(dst, src, cnt)  __builtin_memcpy((dst), (src), (cnt))
     49#define bzero(dst, cnt)        memset((dst), 0, (cnt))
    4950
    5051extern void memsetb(void *, size_t, uint8_t);
  • kernel/generic/include/mm/as.h

    r21799398 rb1c57a8  
    4848#include <adt/btree.h>
    4949#include <lib/elf.h>
     50#include <arch.h>
     51
     52#define AS                   THE->as
     53
    5054
    5155/**
  • kernel/generic/include/preemption.h

    r21799398 rb1c57a8  
    3636#define KERN_PREEMPTION_H_
    3737
    38 extern void preemption_disable(void);
    39 extern void preemption_enable(void);
     38#include <arch.h>
     39#include <compiler/barrier.h>
     40#include <debug.h>
     41
     42#define PREEMPTION_INC         (1 << 0)
     43#define PREEMPTION_DISABLED    (PREEMPTION_INC <= THE->preemption)
     44#define PREEMPTION_ENABLED     (!PREEMPTION_DISABLED)
     45
     46/** Increment preemption disabled counter. */
     47#define preemption_disable() \
     48        do { \
     49                THE->preemption += PREEMPTION_INC; \
     50                compiler_barrier(); \
     51        } while (0)
     52
     53/** Restores preemption but never reschedules. */
     54#define preemption_enable() \
     55        do { \
     56                ASSERT(PREEMPTION_DISABLED); \
     57                compiler_barrier(); \
     58                THE->preemption -= PREEMPTION_INC; \
     59        } while (0)
     60
    4061
    4162#endif
  • kernel/generic/include/proc/task.h

    r21799398 rb1c57a8  
    4343#include <synch/mutex.h>
    4444#include <synch/futex.h>
     45#include <synch/workqueue.h>
    4546#include <adt/avl.h>
    4647#include <adt/btree.h>
     48#include <adt/cht.h>
    4749#include <adt/list.h>
    4850#include <security/cap.h>
     
    5759#include <mm/as.h>
    5860#include <abi/sysinfo.h>
     61#include <arch.h>
     62
     63#define TASK                 THE->task
     64
    5965
    6066struct thread;
     
    123129        task_arch_t arch;
    124130       
    125         /**
    126          * Serializes access to the B+tree of task's futexes. This mutex is
    127          * independent on the task spinlock.
    128          */
    129         mutex_t futexes_lock;
    130         /** B+tree of futexes referenced by this task. */
    131         btree_t futexes;
     131        struct futex_cache {
     132                /** CHT mapping virtual addresses of futex variables to futex objects.*/
     133                cht_t ht;
     134                /** Serializes access to futex_list.*/
     135                spinlock_t list_lock;
     136                /** List of all futexes accesses by this task. */
     137                list_t list;
     138                work_t destroy_work;
     139        } *futexes;
    132140       
    133141        /** Accumulated accounting. */
  • kernel/generic/include/proc/thread.h

    r21799398 rb1c57a8  
    4141#include <cpu.h>
    4242#include <synch/spinlock.h>
     43#include <synch/rcu_types.h>
    4344#include <adt/avl.h>
    4445#include <mm/slab.h>
     
    4849#include <udebug/udebug.h>
    4950#include <abi/sysinfo.h>
     51#include <arch.h>
     52
     53
     54#define THREAD              THE->thread
    5055
    5156#define THREAD_NAME_BUFLEN  20
     
    180185        /** Thread ID. */
    181186        thread_id_t tid;
     187
     188        /** Work queue this thread belongs to or NULL. Immutable. */
     189        struct work_queue *workq;
     190        /** Links work queue threads. Protected by workq->lock. */
     191        link_t workq_link;
     192        /** True if the worker was blocked and is not running. Use thread->lock. */
     193        bool workq_blocked;
     194        /** True if the worker will block in order to become idle. Use workq->lock. */
     195        bool workq_idling;
     196       
     197        /** RCU thread related data. Protected by its own locks. */
     198        rcu_thread_data_t rcu;
    182199       
    183200        /** Architecture-specific data. */
     
    217234extern void thread_ready(thread_t *);
    218235extern void thread_exit(void) __attribute__((noreturn));
     236extern void thread_interrupt(thread_t *);
     237extern bool thread_interrupted(thread_t *);
    219238
    220239#ifndef thread_create_arch
  • kernel/generic/include/synch/condvar.h

    r21799398 rb1c57a8  
    3939#include <synch/waitq.h>
    4040#include <synch/mutex.h>
     41#include <synch/spinlock.h>
    4142#include <abi/synch.h>
    4243
     
    5051        _condvar_wait_timeout((cv), (mtx), (usec), SYNCH_FLAGS_NONE)
    5152
     53#ifdef CONFIG_SMP
     54#define _condvar_wait_timeout_spinlock(cv, lock, usec, flags) \
     55        _condvar_wait_timeout_spinlock_impl((cv), (lock), (usec), (flags))
     56#else
     57#define _condvar_wait_timeout_spinlock(cv, lock, usec, flags) \
     58        _condvar_wait_timeout_spinlock_impl((cv), NULL, (usec), (flags))
     59#endif
     60
    5261extern void condvar_initialize(condvar_t *cv);
    5362extern void condvar_signal(condvar_t *cv);
     
    5564extern int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec,
    5665    int flags);
     66extern int _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock,
     67        uint32_t usec, int flags);
     68extern int _condvar_wait_timeout_irq_spinlock(condvar_t *cv,
     69        irq_spinlock_t *irq_lock, uint32_t usec, int flags);
     70
    5771
    5872#endif
  • kernel/generic/include/synch/futex.h

    r21799398 rb1c57a8  
    5555extern sysarg_t sys_futex_wakeup(uintptr_t);
    5656
    57 extern void futex_cleanup(void);
     57extern void futex_task_cleanup(void);
     58extern void futex_task_init(struct task *);
     59extern void futex_task_deinit(struct task *);
    5860
    5961#endif
  • kernel/generic/include/synch/semaphore.h

    r21799398 rb1c57a8  
    5353        _semaphore_down_timeout((s), (usec), SYNCH_FLAGS_NONE)
    5454
     55#define semaphore_down_interruptable(s) \
     56        (ESYNCH_INTERRUPTED != _semaphore_down_timeout((s), SYNCH_NO_TIMEOUT, \
     57                SYNCH_FLAGS_INTERRUPTIBLE))
     58
    5559extern void semaphore_initialize(semaphore_t *, int);
    5660extern int _semaphore_down_timeout(semaphore_t *, uint32_t, unsigned int);
  • kernel/generic/include/synch/spinlock.h

    r21799398 rb1c57a8  
    4545#ifdef CONFIG_SMP
    4646
    47 typedef struct {
     47typedef struct spinlock {
    4848        atomic_t val;
    4949       
     
    163163/* On UP systems, spinlocks are effectively left out. */
    164164
     165/* Allow the use of spinlock_t as an incomplete type. */
     166typedef struct spinlock spinlock_t;
     167
    165168#define SPINLOCK_DECLARE(name)
    166169#define SPINLOCK_EXTERN(name)
     
    177180
    178181#define spinlock_lock(lock)     preemption_disable()
    179 #define spinlock_trylock(lock)  (preemption_disable(), 1)
     182#define spinlock_trylock(lock)  ({ preemption_disable(); 1; })
    180183#define spinlock_unlock(lock)   preemption_enable()
    181184#define spinlock_locked(lock)   1
Note: See TracChangeset for help on using the changeset viewer.