source: mainline/kernel/generic/include/proc/thread.h@ 09ab0a9a

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 09ab0a9a was 09ab0a9a, checked in by Jiri Svoboda <jiri@…>, 7 years ago

Fix vertical spacing with new Ccheck revision.

  • Property mode set to 100644
File size: 8.1 KB
Line 
1/*
2 * Copyright (c) 2001-2007 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericproc
30 * @{
31 */
32/** @file
33 */
34
35#ifndef KERN_THREAD_H_
36#define KERN_THREAD_H_
37
38#include <synch/waitq.h>
39#include <proc/task.h>
40#include <time/timeout.h>
41#include <cpu.h>
42#include <synch/spinlock.h>
43#include <synch/rcu_types.h>
44#include <adt/avl.h>
45#include <mm/slab.h>
46#include <arch/cpu.h>
47#include <mm/tlb.h>
48#include <abi/proc/uarg.h>
49#include <udebug/udebug.h>
50#include <abi/proc/thread.h>
51#include <abi/sysinfo.h>
52#include <arch.h>
53
54#define THREAD THE->thread
55
56#define THREAD_NAME_BUFLEN 20
57
58extern const char *thread_states[];
59
60/* Thread flags */
61typedef enum {
62 THREAD_FLAG_NONE = 0,
63 /** Thread executes in user space. */
64 THREAD_FLAG_USPACE = (1 << 0),
65 /** Thread will be attached by the caller. */
66 THREAD_FLAG_NOATTACH = (1 << 1),
67 /** Thread accounting doesn't affect accumulated task accounting. */
68 THREAD_FLAG_UNCOUNTED = (1 << 2)
69} thread_flags_t;
70
71/** Thread structure. There is one per thread. */
72typedef struct thread {
73 link_t rq_link; /**< Run queue link. */
74 link_t wq_link; /**< Wait queue link. */
75 link_t th_link; /**< Links to threads within containing task. */
76
77 /** Threads linkage to the threads_tree. */
78 avltree_node_t threads_tree_node;
79
80 /** Lock protecting thread structure.
81 *
82 * Protects the whole thread structure except list links above.
83 */
84 IRQ_SPINLOCK_DECLARE(lock);
85
86 char name[THREAD_NAME_BUFLEN];
87
88 /** Function implementing the thread. */
89 void (*thread_code)(void *);
90 /** Argument passed to thread_code() function. */
91 void *thread_arg;
92
93 /**
94 * From here, the stored context is restored
95 * when the thread is scheduled.
96 */
97 context_t saved_context;
98
99 /**
100 * From here, the stored timeout context
101 * is restored when sleep times out.
102 */
103 context_t sleep_timeout_context;
104
105 /**
106 * From here, the stored interruption context
107 * is restored when sleep is interrupted.
108 */
109 context_t sleep_interruption_context;
110
111 /** If true, the thread can be interrupted from sleep. */
112 bool sleep_interruptible;
113
114 /**
115 * If true, and this thread's sleep returns without a wakeup
116 * (timed out or interrupted), waitq ignores the next wakeup.
117 * This is necessary for futex to be able to handle those conditions.
118 */
119 bool sleep_composable;
120
121 /** Wait queue in which this thread sleeps. */
122 waitq_t *sleep_queue;
123 /** Timeout used for timeoutable sleeping. */
124 timeout_t sleep_timeout;
125 /** Flag signalling sleep timeout in progress. */
126 volatile bool timeout_pending;
127
128 /**
129 * True if this thread is executing copy_from_uspace().
130 * False otherwise.
131 */
132 bool in_copy_from_uspace;
133
134 /**
135 * True if this thread is executing copy_to_uspace().
136 * False otherwise.
137 */
138 bool in_copy_to_uspace;
139
140 /**
141 * If true, the thread will not go to sleep at all and will call
142 * thread_exit() before returning to userspace.
143 */
144 bool interrupted;
145
146 /** If true, thread_join_timeout() cannot be used on this thread. */
147 bool detached;
148 /** Waitq for thread_join_timeout(). */
149 waitq_t join_wq;
150 /** Link used in the joiner_head list. */
151 link_t joiner_link;
152
153 fpu_context_t *saved_fpu_context;
154 bool fpu_context_exists;
155
156 /*
157 * Defined only if thread doesn't run.
158 * It means that fpu context is in CPU that last time executes this
159 * thread. This disables migration.
160 */
161 bool fpu_context_engaged;
162
163 /* The thread will not be migrated if nomigrate is non-zero. */
164 unsigned int nomigrate;
165
166 /** Thread state. */
167 state_t state;
168
169 /** Thread CPU. */
170 cpu_t *cpu;
171 /** Containing task. */
172 task_t *task;
173 /** Thread is wired to CPU. */
174 bool wired;
175 /** Thread was migrated to another CPU and has not run yet. */
176 bool stolen;
177 /** Thread is executed in user space. */
178 bool uspace;
179
180 /** Ticks before preemption. */
181 uint64_t ticks;
182
183 /** Thread accounting. */
184 uint64_t ucycles;
185 uint64_t kcycles;
186 /** Last sampled cycle. */
187 uint64_t last_cycle;
188 /** Thread doesn't affect accumulated accounting. */
189 bool uncounted;
190
191 /** Thread's priority. Implemented as index to CPU->rq */
192 int priority;
193 /** Thread ID. */
194 thread_id_t tid;
195
196 /** Work queue this thread belongs to or NULL. Immutable. */
197 struct work_queue *workq;
198 /** Links work queue threads. Protected by workq->lock. */
199 link_t workq_link;
200 /** True if the worker was blocked and is not running. Use thread->lock. */
201 bool workq_blocked;
202 /** True if the worker will block in order to become idle. Use workq->lock. */
203 bool workq_idling;
204
205 /** RCU thread related data. Protected by its own locks. */
206 rcu_thread_data_t rcu;
207
208 /** Architecture-specific data. */
209 thread_arch_t arch;
210
211 /** Thread's kernel stack. */
212 uint8_t *kstack;
213
214#ifdef CONFIG_UDEBUG
215 /**
216 * If true, the scheduler will print a stack trace
217 * to the kernel console upon scheduling this thread.
218 */
219 bool btrace;
220
221 /** Debugging stuff */
222 udebug_thread_t udebug;
223#endif /* CONFIG_UDEBUG */
224} thread_t;
225
226/** Thread list lock.
227 *
228 * This lock protects the threads_tree.
229 * Must be acquired before T.lock for each T of type thread_t.
230 *
231 */
232IRQ_SPINLOCK_EXTERN(threads_lock);
233
234/** AVL tree containing all threads. */
235extern avltree_t threads_tree;
236
237extern void thread_init(void);
238extern thread_t *thread_create(void (*)(void *), void *, task_t *,
239 thread_flags_t, const char *);
240extern void thread_wire(thread_t *, cpu_t *);
241extern void thread_attach(thread_t *, task_t *);
242extern void thread_ready(thread_t *);
243extern void thread_exit(void) __attribute__((noreturn));
244extern void thread_interrupt(thread_t *);
245extern bool thread_interrupted(thread_t *);
246
247#ifndef thread_create_arch
248extern void thread_create_arch(thread_t *);
249#endif
250
251#ifndef thr_constructor_arch
252extern void thr_constructor_arch(thread_t *);
253#endif
254
255#ifndef thr_destructor_arch
256extern void thr_destructor_arch(thread_t *);
257#endif
258
259extern void thread_sleep(uint32_t);
260extern void thread_usleep(uint32_t);
261
262#define thread_join(t) \
263 thread_join_timeout((t), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
264
265extern errno_t thread_join_timeout(thread_t *, uint32_t, unsigned int);
266extern void thread_detach(thread_t *);
267
268extern void thread_print_list(bool);
269extern void thread_destroy(thread_t *, bool);
270extern thread_t *thread_find_by_id(thread_id_t);
271extern void thread_update_accounting(bool);
272extern bool thread_exists(thread_t *);
273
274extern void thread_migration_disable(void);
275extern void thread_migration_enable(void);
276
277#ifdef CONFIG_UDEBUG
278extern void thread_stack_trace(thread_id_t);
279#endif
280
281/** Fpu context slab cache. */
282extern slab_cache_t *fpu_context_cache;
283
284/* Thread syscall prototypes. */
285extern sys_errno_t sys_thread_create(uspace_arg_t *, char *, size_t,
286 thread_id_t *);
287extern sys_errno_t sys_thread_exit(int);
288extern sys_errno_t sys_thread_get_id(thread_id_t *);
289extern sys_errno_t sys_thread_usleep(uint32_t);
290extern sys_errno_t sys_thread_udelay(uint32_t);
291
292#endif
293
294/** @}
295 */
Note: See TracBrowser for help on using the repository browser.