Changeset b1c57a8 in mainline for kernel/generic/include
- Timestamp:
- 2014-10-09T15:03:55Z (11 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- e367939c
- Parents:
- 21799398 (diff), 207e8880 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel/generic/include
- Files:
-
- 9 added
- 15 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/adt/list.h
r21799398 rb1c57a8 52 52 } list_t; 53 53 54 55 extern int list_member(const link_t *, const list_t *); 56 extern void list_splice(list_t *, link_t *); 57 extern unsigned int list_count(const list_t *); 58 59 54 60 /** Declare and initialize statically allocated list. 55 61 * … … 80 86 _link != &(list).head; _link = _link->prev) 81 87 88 /** Unlike list_foreach(), allows removing items while traversing a list. 89 * 90 * @code 91 * list_t mylist; 92 * typedef struct item { 93 * int value; 94 * link_t item_link; 95 * } item_t; 96 * 97 * //.. 98 * 99 * // Print each list element's value and remove the element from the list. 100 * list_foreach_safe(mylist, cur_link, next_link) { 101 * item_t *cur_item = list_get_instance(cur_link, item_t, item_link); 102 * printf("%d\n", cur_item->value); 103 * list_remove(cur_link); 104 * } 105 * @endcode 106 * 107 * @param list List to traverse. 108 * @param iterator Iterator to the current element of the list. 109 * The item this iterator points may be safely removed 110 * from the list. 111 * @param next_iter Iterator to the next element of the list. 112 */ 113 #define list_foreach_safe(list, iterator, next_iter) \ 114 for (link_t *iterator = (list).head.next, \ 115 *next_iter = iterator->next; \ 116 iterator != &(list).head; \ 117 iterator = next_iter, next_iter = iterator->next) 118 119 82 120 #define assert_link_not_used(link) \ 83 121 ASSERT(!link_used(link)) … … 289 327 { 290 328 headless_list_split_or_concat(part1, part2); 329 } 330 331 /** Concatenate two lists 332 * 333 * Concatenate lists @a list1 and @a list2, producing a single 334 * list @a list1 containing items from both (in @a list1, @a list2 335 * order) and empty list @a list2. 336 * 337 * @param list1 First list and concatenated output 338 * @param list2 Second list and empty output. 339 * 340 */ 341 NO_TRACE static inline void list_concat(list_t *list1, list_t *list2) 342 { 343 list_splice(list2, list1->head.prev); 291 344 } 292 345 … … 340 393 } 341 394 342 extern int list_member(const link_t *, const list_t *);343 extern void list_concat(list_t *, list_t *);344 extern unsigned int list_count(const list_t *);345 346 395 #endif 347 396 -
kernel/generic/include/arch.h
r21799398 rb1c57a8 36 36 #define KERN_ARCH_H_ 37 37 38 #include <arch/arch.h> 39 #include < proc/thread.h>40 #include < proc/task.h>41 #include <mm/as.h> 38 #include <arch/arch.h> /* arch_pre_main() */ 39 #include <arch/asm.h> /* get_stack_base() */ 40 #include <config.h> 41 42 42 43 43 /* … … 49 49 #define THE ((the_t * )(get_stack_base())) 50 50 51 #define CPU THE->cpu52 #define THREAD THE->thread53 #define TASK THE->task54 #define AS THE->as55 #define PREEMPTION_DISABLED THE->preemption_disabled56 51 #define MAGIC UINT32_C(0xfacefeed) 57 52 … … 62 57 ((THE->task) ? (THE->task->container) : (DEFAULT_CONTAINER)) 63 58 59 /* Fwd decl. to avoid include hell. */ 60 struct thread; 61 struct task; 62 struct cpu; 63 struct as; 64 64 65 /** 65 66 * For each possible kernel stack, structure … … 68 69 */ 69 70 typedef struct { 70 size_t preemption_disabled; /**< Preemption disabled counter. */ 71 thread_t *thread; /**< Current thread. */ 72 task_t *task; /**< Current task. */ 73 cpu_t *cpu; /**< Executing cpu. */ 74 as_t *as; /**< Current address space. */ 75 uint32_t magic; /**< Magic value */ 71 size_t preemption; /**< Preemption disabled counter and flag. */ 72 #ifdef RCU_PREEMPT_A 73 size_t rcu_nesting; /**< RCU nesting count and flag. */ 74 #endif 75 struct thread *thread; /**< Current thread. */ 76 struct task *task; /**< Current task. */ 77 struct cpu *cpu; /**< Executing cpu. */ 78 struct as *as; /**< Current address space. */ 79 uint32_t magic; /**< Magic value */ 76 80 } the_t; 77 81 … … 91 95 extern void *arch_construct_function(fncptr_t *, void *, void *); 92 96 97 93 98 #endif 94 99 -
kernel/generic/include/atomic.h
r21799398 rb1c57a8 53 53 } 54 54 55 56 /* 57 * If the architecture does not provide operations that are atomic 58 * only with respect to the local cpu (eg exception handlers) and 59 * not other cpus, implement these cpu local atomic operations with 60 * full blown smp-safe atomics. 61 */ 62 #ifndef local_atomic_exchange 63 #define local_atomic_exchange(var_addr, new_val) \ 64 __atomic_exchange_n((var_addr), (new_val), __ATOMIC_RELAXED) 65 #endif 66 67 68 55 69 #endif 56 70 -
kernel/generic/include/cpu.h
r21799398 rb1c57a8 38 38 #include <mm/tlb.h> 39 39 #include <synch/spinlock.h> 40 #include <synch/rcu_types.h> 40 41 #include <proc/scheduler.h> 41 42 #include <arch/cpu.h> 42 43 #include <arch/context.h> 44 #include <adt/list.h> 45 #include <arch.h> 46 47 #define CPU THE->cpu 48 43 49 44 50 /** CPU structure. … … 94 100 95 101 /** 102 * SMP calls to invoke on this CPU. 103 */ 104 SPINLOCK_DECLARE(smp_calls_lock); 105 list_t smp_pending_calls; 106 107 /** RCU per-cpu data. Uses own locking. */ 108 rcu_cpu_data_t rcu; 109 110 /** 96 111 * Stack used by scheduler when there is no running thread. 97 112 */ -
kernel/generic/include/lib/memfnc.h
r21799398 rb1c57a8 50 50 ATTRIBUTE_OPTIMIZE("-fno-tree-loop-distribute-patterns") DO_NOT_DISCARD; 51 51 52 #define alloca(size) __builtin_alloca((size)) 53 52 54 #endif 53 55 -
kernel/generic/include/macros.h
r21799398 rb1c57a8 157 157 }) 158 158 159 160 #ifndef member_to_inst 161 #define member_to_inst(ptr_member, type, member_identif) \ 162 ((type*) (((void*)(ptr_member)) - ((void*)&(((type*)0)->member_identif)))) 163 #endif 164 165 159 166 #endif 160 167 -
kernel/generic/include/memstr.h
r21799398 rb1c57a8 47 47 #define memset(dst, val, cnt) __builtin_memset((dst), (val), (cnt)) 48 48 #define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) 49 #define bzero(dst, cnt) memset((dst), 0, (cnt)) 49 50 50 51 extern void memsetb(void *, size_t, uint8_t); -
kernel/generic/include/mm/as.h
r21799398 rb1c57a8 48 48 #include <adt/btree.h> 49 49 #include <lib/elf.h> 50 #include <arch.h> 51 52 #define AS THE->as 53 50 54 51 55 /** -
kernel/generic/include/preemption.h
r21799398 rb1c57a8 36 36 #define KERN_PREEMPTION_H_ 37 37 38 extern void preemption_disable(void); 39 extern void preemption_enable(void); 38 #include <arch.h> 39 #include <compiler/barrier.h> 40 #include <debug.h> 41 42 #define PREEMPTION_INC (1 << 0) 43 #define PREEMPTION_DISABLED (PREEMPTION_INC <= THE->preemption) 44 #define PREEMPTION_ENABLED (!PREEMPTION_DISABLED) 45 46 /** Increment preemption disabled counter. */ 47 #define preemption_disable() \ 48 do { \ 49 THE->preemption += PREEMPTION_INC; \ 50 compiler_barrier(); \ 51 } while (0) 52 53 /** Restores preemption but never reschedules. */ 54 #define preemption_enable() \ 55 do { \ 56 ASSERT(PREEMPTION_DISABLED); \ 57 compiler_barrier(); \ 58 THE->preemption -= PREEMPTION_INC; \ 59 } while (0) 60 40 61 41 62 #endif -
kernel/generic/include/proc/task.h
r21799398 rb1c57a8 43 43 #include <synch/mutex.h> 44 44 #include <synch/futex.h> 45 #include <synch/workqueue.h> 45 46 #include <adt/avl.h> 46 47 #include <adt/btree.h> 48 #include <adt/cht.h> 47 49 #include <adt/list.h> 48 50 #include <security/cap.h> … … 57 59 #include <mm/as.h> 58 60 #include <abi/sysinfo.h> 61 #include <arch.h> 62 63 #define TASK THE->task 64 59 65 60 66 struct thread; … … 123 129 task_arch_t arch; 124 130 125 /** 126 * Serializes access to the B+tree of task's futexes. This mutex is 127 * independent on the task spinlock. 128 */ 129 mutex_t futexes_lock; 130 /** B+tree of futexes referenced by this task. */ 131 btree_t futexes; 131 struct futex_cache { 132 /** CHT mapping virtual addresses of futex variables to futex objects.*/ 133 cht_t ht; 134 /** Serializes access to futex_list.*/ 135 spinlock_t list_lock; 136 /** List of all futexes accesses by this task. */ 137 list_t list; 138 work_t destroy_work; 139 } *futexes; 132 140 133 141 /** Accumulated accounting. */ -
kernel/generic/include/proc/thread.h
r21799398 rb1c57a8 41 41 #include <cpu.h> 42 42 #include <synch/spinlock.h> 43 #include <synch/rcu_types.h> 43 44 #include <adt/avl.h> 44 45 #include <mm/slab.h> … … 48 49 #include <udebug/udebug.h> 49 50 #include <abi/sysinfo.h> 51 #include <arch.h> 52 53 54 #define THREAD THE->thread 50 55 51 56 #define THREAD_NAME_BUFLEN 20 … … 180 185 /** Thread ID. */ 181 186 thread_id_t tid; 187 188 /** Work queue this thread belongs to or NULL. Immutable. */ 189 struct work_queue *workq; 190 /** Links work queue threads. Protected by workq->lock. */ 191 link_t workq_link; 192 /** True if the worker was blocked and is not running. Use thread->lock. */ 193 bool workq_blocked; 194 /** True if the worker will block in order to become idle. Use workq->lock. */ 195 bool workq_idling; 196 197 /** RCU thread related data. Protected by its own locks. */ 198 rcu_thread_data_t rcu; 182 199 183 200 /** Architecture-specific data. */ … … 217 234 extern void thread_ready(thread_t *); 218 235 extern void thread_exit(void) __attribute__((noreturn)); 236 extern void thread_interrupt(thread_t *); 237 extern bool thread_interrupted(thread_t *); 219 238 220 239 #ifndef thread_create_arch -
kernel/generic/include/synch/condvar.h
r21799398 rb1c57a8 39 39 #include <synch/waitq.h> 40 40 #include <synch/mutex.h> 41 #include <synch/spinlock.h> 41 42 #include <abi/synch.h> 42 43 … … 50 51 _condvar_wait_timeout((cv), (mtx), (usec), SYNCH_FLAGS_NONE) 51 52 53 #ifdef CONFIG_SMP 54 #define _condvar_wait_timeout_spinlock(cv, lock, usec, flags) \ 55 _condvar_wait_timeout_spinlock_impl((cv), (lock), (usec), (flags)) 56 #else 57 #define _condvar_wait_timeout_spinlock(cv, lock, usec, flags) \ 58 _condvar_wait_timeout_spinlock_impl((cv), NULL, (usec), (flags)) 59 #endif 60 52 61 extern void condvar_initialize(condvar_t *cv); 53 62 extern void condvar_signal(condvar_t *cv); … … 55 64 extern int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec, 56 65 int flags); 66 extern int _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock, 67 uint32_t usec, int flags); 68 extern int _condvar_wait_timeout_irq_spinlock(condvar_t *cv, 69 irq_spinlock_t *irq_lock, uint32_t usec, int flags); 70 57 71 58 72 #endif -
kernel/generic/include/synch/futex.h
r21799398 rb1c57a8 55 55 extern sysarg_t sys_futex_wakeup(uintptr_t); 56 56 57 extern void futex_cleanup(void); 57 extern void futex_task_cleanup(void); 58 extern void futex_task_init(struct task *); 59 extern void futex_task_deinit(struct task *); 58 60 59 61 #endif -
kernel/generic/include/synch/semaphore.h
r21799398 rb1c57a8 53 53 _semaphore_down_timeout((s), (usec), SYNCH_FLAGS_NONE) 54 54 55 #define semaphore_down_interruptable(s) \ 56 (ESYNCH_INTERRUPTED != _semaphore_down_timeout((s), SYNCH_NO_TIMEOUT, \ 57 SYNCH_FLAGS_INTERRUPTIBLE)) 58 55 59 extern void semaphore_initialize(semaphore_t *, int); 56 60 extern int _semaphore_down_timeout(semaphore_t *, uint32_t, unsigned int); -
kernel/generic/include/synch/spinlock.h
r21799398 rb1c57a8 45 45 #ifdef CONFIG_SMP 46 46 47 typedef struct {47 typedef struct spinlock { 48 48 atomic_t val; 49 49 … … 163 163 /* On UP systems, spinlocks are effectively left out. */ 164 164 165 /* Allow the use of spinlock_t as an incomplete type. */ 166 typedef struct spinlock spinlock_t; 167 165 168 #define SPINLOCK_DECLARE(name) 166 169 #define SPINLOCK_EXTERN(name) … … 177 180 178 181 #define spinlock_lock(lock) preemption_disable() 179 #define spinlock_trylock(lock) ( preemption_disable(), 1)182 #define spinlock_trylock(lock) ({ preemption_disable(); 1; }) 180 183 #define spinlock_unlock(lock) preemption_enable() 181 184 #define spinlock_locked(lock) 1
Note:
See TracChangeset
for help on using the changeset viewer.