Changeset 1b20da0 in mainline for kernel/generic/include
- Timestamp:
- 2018-02-28T17:52:03Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 3061bc1
- Parents:
- df6ded8
- git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-02-28 17:26:03)
- git-committer:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-02-28 17:52:03)
- Location:
- kernel/generic/include
- Files:
-
- 22 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/adt/avl.h
rdf6ded8 r1b20da0 62 62 struct avltree_node 63 63 { 64 /** 64 /** 65 65 * Pointer to the left descendant of this node. 66 66 * … … 70 70 struct avltree_node *lft; 71 71 72 /** 72 /** 73 73 * Pointer to the right descendant of this node. 74 74 * … … 82 82 83 83 /** Node's key. */ 84 avltree_key_t key; 84 avltree_key_t key; 85 85 86 86 /** … … 97 97 struct avltree_node *root; 98 98 99 /** 99 /** 100 100 * Base of the tree is a value that is smaller or equal than every value 101 101 * in the tree (valid for positive keys otherwise ignore this atribute). 102 * 102 * 103 103 * The base is added to the current key when a new node is inserted into 104 104 * the tree. The base is changed to the key of the node which is deleted 105 105 * with avltree_delete_min(). 106 106 */ 107 avltree_key_t base; 107 avltree_key_t base; 108 108 }; 109 109 -
kernel/generic/include/adt/btree.h
rdf6ded8 r1b20da0 55 55 btree_key_t key[BTREE_MAX_KEYS + 1]; 56 56 57 /** 57 /** 58 58 * Pointers to values. Sorted according to the key array. Defined only in 59 59 * leaf-level. There is room for storing value for the extra key. -
kernel/generic/include/adt/cht.h
rdf6ded8 r1b20da0 46 46 /** Concurrent hash table node link. */ 47 47 typedef struct cht_link { 48 /* Must be placed first. 49 * 50 * The function pointer (rcu_link.func) is used to store the item's 51 * mixed memoized hash. If in use by RCU (ie waiting for deferred 52 * destruction) the hash will contain the value of 48 /* Must be placed first. 49 * 50 * The function pointer (rcu_link.func) is used to store the item's 51 * mixed memoized hash. If in use by RCU (ie waiting for deferred 52 * destruction) the hash will contain the value of 53 53 * cht_t.op->remove_callback. 54 54 */ … … 64 64 typedef struct cht_ops { 65 65 /** Returns the hash of the item. 66 * 66 * 67 67 * Applicable also to items that were logically deleted from the table 68 68 * but have yet to be physically removed by means of remove_callback(). … … 80 80 81 81 /** Groups hash table buckets with their count. 82 * 82 * 83 83 * It allows both the number of buckets as well as the bucket array 84 84 * to be swapped atomically when resing the table. … … 100 100 /** Resized table buckets that will replace b once resize is complete. */ 101 101 cht_buckets_t *new_b; 102 /** Invalid memoized hash value. 103 * 102 /** Invalid memoized hash value. 103 * 104 104 * If cht_link.hash contains this value the item had been logically 105 105 * removed and is waiting to be freed. Such hashes (and the associated 106 * items) are disregarded and skipped or the actual hash must be 106 * items) are disregarded and skipped or the actual hash must be 107 107 * determined via op->hash(). 108 108 */ … … 116 116 work_t resize_work; 117 117 /** If positive the table should grow or shrink. 118 * 118 * 119 119 * If not 0 resize work had already been posted to the system work queue. 120 120 */ … … 133 133 134 134 extern bool cht_create_simple(cht_t *h, cht_ops_t *op); 135 extern bool cht_create(cht_t *h, size_t init_size, size_t min_size, 135 extern bool cht_create(cht_t *h, size_t init_size, size_t min_size, 136 136 size_t max_load, bool can_block, cht_ops_t *op); 137 137 extern void cht_destroy(cht_t *h); -
kernel/generic/include/adt/fifo.h
rdf6ded8 r1b20da0 72 72 * 73 73 * FIFO is allocated dynamically. 74 * This macro is suitable for creating larger FIFOs. 74 * This macro is suitable for creating larger FIFOs. 75 75 * 76 76 * @param name Name of FIFO. -
kernel/generic/include/adt/hash.h
rdf6ded8 r1b20da0 75 75 76 76 /** Produces a uniform hash affecting all output bits from the skewed input. */ 77 static inline size_t hash_mix(size_t hash) 77 static inline size_t hash_mix(size_t hash) 78 78 { 79 79 #ifdef __32_BITS__ … … 87 87 88 88 /** Use to create a hash from multiple values. 89 * 89 * 90 90 * Typical usage: 91 91 * @code … … 101 101 static inline size_t hash_combine(size_t seed, size_t hash) 102 102 { 103 /* 103 /* 104 104 * todo: use Bob Jenkin's proper mixing hash pass: 105 105 * http://burtleburtle.net/bob/c/lookup3.c -
kernel/generic/include/adt/hash_table.h
rdf6ded8 r1b20da0 2 2 * Copyright (c) 2006 Jakub Jermar 3 3 * Copyright (c) 2012 Adam Hraska 4 * 4 * 5 5 * All rights reserved. 6 6 * … … 62 62 63 63 /** Hash table item removal callback. 64 * 64 * 65 65 * Must not invoke any mutating functions of the hash table. 66 66 * -
kernel/generic/include/adt/list.h
rdf6ded8 r1b20da0 66 66 67 67 /** Initializer for statically allocated list. 68 * 68 * 69 69 * @code 70 70 * struct named_list { 71 71 * const char *name; 72 72 * list_t list; 73 * } var = { 74 * .name = "default name", 75 * .list = LIST_INITIALIZER(name_list.list) 73 * } var = { 74 * .name = "default name", 75 * .list = LIST_INITIALIZER(name_list.list) 76 76 * }; 77 77 * @endcode … … 111 111 * link_t item_link; 112 112 * } item_t; 113 * 113 * 114 114 * //.. 115 * 115 * 116 116 * // Print each list element's value and remove the element from the list. 117 117 * list_foreach_safe(mylist, cur_link, next_link) { … … 121 121 * } 122 122 * @endcode 123 * 123 * 124 124 * @param list List to traverse. 125 125 * @param iterator Iterator to the current element of the list. -
kernel/generic/include/arch.h
rdf6ded8 r1b20da0 71 71 #ifdef RCU_PREEMPT_A 72 72 size_t rcu_nesting; /**< RCU nesting count and flag. */ 73 #endif 73 #endif 74 74 struct thread *thread; /**< Current thread. */ 75 75 struct task *task; /**< Current task. */ -
kernel/generic/include/cpu/cpu_mask.h
rdf6ded8 r1b20da0 39 39 #include <lib/memfnc.h> 40 40 41 /** Iterates over all cpu id's whose bit is included in the cpu mask. 42 * 41 /** Iterates over all cpu id's whose bit is included in the cpu mask. 42 * 43 43 * Example usage: 44 44 * @code 45 45 * DEFINE_CPU_MASK(cpu_mask); 46 46 * cpu_mask_active(&cpu_mask); 47 * 47 * 48 48 * cpu_mask_for_each(cpu_mask, cpu_id) { 49 49 * printf("Cpu with logical id %u is active.\n", cpu_id); … … 53 53 #define cpu_mask_for_each(mask, cpu_id) \ 54 54 for (unsigned int (cpu_id) = 0; (cpu_id) < config.cpu_count; ++(cpu_id)) \ 55 if (cpu_mask_is_set(&(mask), (cpu_id))) 55 if (cpu_mask_is_set(&(mask), (cpu_id))) 56 56 57 57 /** Allocates a cpu_mask_t on stack. */ … … 74 74 extern bool cpu_mask_is_none(cpu_mask_t *); 75 75 76 #endif /* KERN_CPU_CPU_MASK_H_ */ 76 #endif /* KERN_CPU_CPU_MASK_H_ */ 77 77 78 78 /** @} -
kernel/generic/include/ddi/irq.h
rdf6ded8 r1b20da0 126 126 127 127 /** Notification configuration structure. */ 128 ipc_notif_cfg_t notif_cfg; 128 ipc_notif_cfg_t notif_cfg; 129 129 } irq_t; 130 130 -
kernel/generic/include/fpu_context.h
rdf6ded8 r1b20da0 1 1 /* 2 * Copyright (c) 2005 Jakub Vana 2 * Copyright (c) 2005 Jakub Vana 3 3 * All rights reserved. 4 4 * … … 27 27 */ 28 28 29 /** @addtogroup generic 29 /** @addtogroup generic 30 30 * @{ 31 31 */ -
kernel/generic/include/ipc/sysipc_ops.h
rdf6ded8 r1b20da0 1 1 /* 2 * Copyright (c) 2012 Jakub Jermar 2 * Copyright (c) 2012 Jakub Jermar 3 3 * All rights reserved. 4 4 * … … 84 84 /** 85 85 * This callback is called from request_preprocess(). 86 * 86 * 87 87 * Context: caller 88 88 * Caller alive: guaranteed -
kernel/generic/include/ipc/sysipc_priv.h
rdf6ded8 r1b20da0 1 1 /* 2 * Copyright (c) 2012 Jakub Jermar 2 * Copyright (c) 2012 Jakub Jermar 3 3 * All rights reserved. 4 4 * -
kernel/generic/include/mm/as.h
rdf6ded8 r1b20da0 72 72 73 73 /** The page fault was resolved by as_page_fault(). */ 74 #define AS_PF_OK 0 74 #define AS_PF_OK 0 75 75 76 76 /** The page fault was caused by memcpy_from_uspace() or memcpy_to_uspace(). */ -
kernel/generic/include/mm/page.h
rdf6ded8 r1b20da0 42 42 43 43 #define P2SZ(pages) \ 44 ((pages) << PAGE_WIDTH) 44 ((pages) << PAGE_WIDTH) 45 45 46 46 /** Operations to manipulate page mappings. */ -
kernel/generic/include/mm/tlb.h
rdf6ded8 r1b20da0 73 73 extern void tlb_shootdown_ipi_recv(void); 74 74 #else 75 #define tlb_shootdown_start(w, x, y, z) interrupts_disable() 75 #define tlb_shootdown_start(w, x, y, z) interrupts_disable() 76 76 #define tlb_shootdown_finalize(i) (interrupts_restore(i)); 77 77 #define tlb_shootdown_ipi_recv() -
kernel/generic/include/proc/thread.h
rdf6ded8 r1b20da0 190 190 struct work_queue *workq; 191 191 /** Links work queue threads. Protected by workq->lock. */ 192 link_t workq_link; 192 link_t workq_link; 193 193 /** True if the worker was blocked and is not running. Use thread->lock. */ 194 194 bool workq_blocked; -
kernel/generic/include/security/perm.h
rdf6ded8 r1b20da0 40 40 * holder to perform certain security sensitive tasks. 41 41 * 42 * Each task can have arbitrary combination of the permissions 42 * Each task can have arbitrary combination of the permissions 43 43 * defined in this file. Therefore, they are required to be powers 44 44 * of two. -
kernel/generic/include/synch/rcu.h
rdf6ded8 r1b20da0 41 41 42 42 43 /** Use to assign a pointer to newly initialized data to a rcu reader 43 /** Use to assign a pointer to newly initialized data to a rcu reader 44 44 * accessible pointer. 45 * 45 * 46 46 * Example: 47 47 * @code … … 50 50 * int grade; 51 51 * } exam_t; 52 * 52 * 53 53 * exam_t *exam_list; 54 54 * // .. 55 * 55 * 56 56 * // Insert at the beginning of the list. 57 57 * exam_t *my_exam = malloc(sizeof(exam_t), 0); … … 59 59 * my_exam->next = exam_list; 60 60 * rcu_assign(exam_list, my_exam); 61 * 61 * 62 62 * // Changes properly propagate. Every reader either sees 63 63 * // the old version of exam_list or the new version with … … 65 65 * rcu_synchronize(); 66 66 * // Now we can be sure every reader sees my_exam. 67 * 67 * 68 68 * @endcode 69 69 */ … … 75 75 76 76 /** Use to access RCU protected data in a reader section. 77 * 77 * 78 78 * Example: 79 79 * @code 80 80 * exam_t *exam_list; 81 81 * // ... 82 * 82 * 83 83 * rcu_read_lock(); 84 84 * exam_t *first_exam = rcu_access(exam_list); 85 * // We can now safely use first_exam, it won't change 85 * // We can now safely use first_exam, it won't change 86 86 * // under us while we're using it. 87 87 * … … 131 131 void _rcu_preempted_unlock(void); 132 132 133 /** Delimits the start of an RCU reader critical section. 134 * 133 /** Delimits the start of an RCU reader critical section. 134 * 135 135 * Reader sections may be nested and are preemptible. You must not 136 136 * however block/sleep within reader sections. … … 165 165 assert(PREEMPTION_DISABLED || interrupts_disabled()); 166 166 167 /* 168 * A new GP was started since the last time we passed a QS. 167 /* 168 * A new GP was started since the last time we passed a QS. 169 169 * Notify the detector we have reached a new QS. 170 170 */ 171 171 if (CPU->rcu.last_seen_gp != _rcu_cur_gp) { 172 172 rcu_gp_t cur_gp = ACCESS_ONCE(_rcu_cur_gp); 173 /* 174 * Contain memory accesses within a reader critical section. 173 /* 174 * Contain memory accesses within a reader critical section. 175 175 * If we are in rcu_lock() it also makes changes prior to the 176 176 * start of the GP visible in the reader section. … … 180 180 * Acknowledge we passed a QS since the beginning of rcu.cur_gp. 181 181 * Cache coherency will lazily transport the value to the 182 * detector while it sleeps in gp_sleep(). 183 * 182 * detector while it sleeps in gp_sleep(). 183 * 184 184 * Note that there is a theoretical possibility that we 185 * overwrite a more recent/greater last_seen_gp here with 185 * overwrite a more recent/greater last_seen_gp here with 186 186 * an older/smaller value. If this cpu is interrupted here 187 * while in rcu_lock() reader sections in the interrupt handler 188 * will update last_seen_gp to the same value as is currently 189 * in local cur_gp. However, if the cpu continues processing 190 * interrupts and the detector starts a new GP immediately, 191 * local interrupt handlers may update last_seen_gp again (ie 192 * properly ack the new GP) with a value greater than local cur_gp. 193 * Resetting last_seen_gp to a previous value here is however 194 * benign and we only have to remember that this reader may end up 187 * while in rcu_lock() reader sections in the interrupt handler 188 * will update last_seen_gp to the same value as is currently 189 * in local cur_gp. However, if the cpu continues processing 190 * interrupts and the detector starts a new GP immediately, 191 * local interrupt handlers may update last_seen_gp again (ie 192 * properly ack the new GP) with a value greater than local cur_gp. 193 * Resetting last_seen_gp to a previous value here is however 194 * benign and we only have to remember that this reader may end up 195 195 * in cur_preempted even after the GP ends. That is why we 196 * append next_preempted to cur_preempted rather than overwriting 196 * append next_preempted to cur_preempted rather than overwriting 197 197 * it as if cur_preempted were empty. 198 198 */ … … 201 201 } 202 202 203 /** Delimits the start of an RCU reader critical section. 204 * 203 /** Delimits the start of an RCU reader critical section. 204 * 205 205 * Reader sections may be nested and are preemptable. You must not 206 206 * however block/sleep within reader sections. … … 229 229 _rcu_record_qs(); 230 230 231 /* 232 * The thread was preempted while in a critical section or 233 * the detector is eagerly waiting for this cpu's reader to finish. 231 /* 232 * The thread was preempted while in a critical section or 233 * the detector is eagerly waiting for this cpu's reader to finish. 234 234 */ 235 235 if (CPU->rcu.signal_unlock) { -
kernel/generic/include/synch/rcu_types.h
rdf6ded8 r1b20da0 72 72 73 73 /** True if we should signal the detector that we exited a reader section. 74 * 74 * 75 75 * Equal to (THREAD->rcu.was_preempted || CPU->rcu.is_delaying_gp). 76 76 */ 77 77 bool signal_unlock; 78 78 79 /** The number of times an RCU reader section is nested on this cpu. 80 * 81 * If positive, it is definitely executing reader code. If zero, 79 /** The number of times an RCU reader section is nested on this cpu. 80 * 81 * If positive, it is definitely executing reader code. If zero, 82 82 * the thread might already be executing reader code thanks to 83 83 * cpu instruction reordering. … … 92 92 /** Number of callbacks in cur_cbs. */ 93 93 size_t cur_cbs_cnt; 94 /** Callbacks to invoke once the next grace period ends, ie next_cbs_gp. 94 /** Callbacks to invoke once the next grace period ends, ie next_cbs_gp. 95 95 * Accessed by the local reclaimer only. 96 96 */ … … 102 102 /** Tail of arriving_cbs list. Disable interrupts to access. */ 103 103 rcu_item_t **parriving_cbs_tail; 104 /** Number of callbacks currently in arriving_cbs. 104 /** Number of callbacks currently in arriving_cbs. 105 105 * Disable interrupts to access. 106 106 */ … … 110 110 rcu_gp_t cur_cbs_gp; 111 111 /** At the end of this grace period callbacks in next_cbs will be invoked. 112 * 113 * Should be the next grace period but it allows the reclaimer to 112 * 113 * Should be the next grace period but it allows the reclaimer to 114 114 * notice if it missed a grace period end announcement. In that 115 115 * case it can execute next_cbs without waiting for another GP. 116 * 116 * 117 117 * Invariant: next_cbs_gp >= cur_cbs_gp 118 118 */ … … 143 143 /** RCU related per-thread data. */ 144 144 typedef struct rcu_thread_data { 145 /** 146 * Nesting count of the thread's RCU read sections when the thread 145 /** 146 * Nesting count of the thread's RCU read sections when the thread 147 147 * is not running. 148 148 */ … … 151 151 #ifdef RCU_PREEMPT_PODZIMEK 152 152 153 /** True if the thread was preempted in a reader section. 153 /** True if the thread was preempted in a reader section. 154 154 * 155 155 * The thread is placed into rcu.cur_preempted or rcu.next_preempted 156 * and must remove itself in rcu_read_unlock(). 157 * 156 * and must remove itself in rcu_read_unlock(). 157 * 158 158 * Access with interrupts disabled. 159 159 */ -
kernel/generic/include/synch/workqueue.h
rdf6ded8 r1b20da0 53 53 /* Magic number for integrity checks. */ 54 54 uint32_t cookie; 55 #endif 55 #endif 56 56 } work_t; 57 57 -
kernel/generic/include/userspace.h
rdf6ded8 r1b20da0 40 40 41 41 /** Switch to user-space (CPU user priviledge level) */ 42 extern void userspace(uspace_arg_t *uarg) __attribute__ ((noreturn)); 42 extern void userspace(uspace_arg_t *uarg) __attribute__ ((noreturn)); 43 43 44 44 #endif
Note:
See TracChangeset
for help on using the changeset viewer.