source: mainline/kernel/generic/include/proc/thread.h@ d23712e

Last change on this file since d23712e was d23712e, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 17 months ago

Use thread state variable instead of a cpu local variable to pass state

  • Property mode set to 100644
File size: 8.4 KB
Line 
1/*
2 * Copyright (c) 2001-2007 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup kernel_generic_proc
30 * @{
31 */
32/** @file
33 */
34
35#ifndef KERN_THREAD_H_
36#define KERN_THREAD_H_
37
38#include <synch/waitq.h>
39#include <proc/task.h>
40#include <time/timeout.h>
41#include <cpu.h>
42#include <synch/spinlock.h>
43#include <adt/odict.h>
44#include <mm/slab.h>
45#include <arch/cpu.h>
46#include <mm/tlb.h>
47#include <abi/proc/uarg.h>
48#include <udebug/udebug.h>
49#include <abi/proc/thread.h>
50#include <abi/sysinfo.h>
51#include <arch.h>
52
53#define THREAD CURRENT->thread
54
55#define THREAD_NAME_BUFLEN 20
56
57extern const char *thread_states[];
58
59/* Thread flags */
60typedef enum {
61 THREAD_FLAG_NONE = 0,
62 /** Thread executes in user space. */
63 THREAD_FLAG_USPACE = (1 << 0),
64 /** Thread will be attached by the caller. */
65 THREAD_FLAG_NOATTACH = (1 << 1),
66 /** Thread accounting doesn't affect accumulated task accounting. */
67 THREAD_FLAG_UNCOUNTED = (1 << 2)
68} thread_flags_t;
69
70/** Thread structure. There is one per thread. */
71typedef struct thread {
72 atomic_refcount_t refcount;
73
74 link_t rq_link; /**< Run queue link. */
75 link_t wq_link; /**< Wait queue link. */
76 link_t th_link; /**< Links to threads within containing task. */
77
78 /** Link to @c threads ordered dictionary. */
79 odlink_t lthreads;
80
81 /** Tracking variable for thread_wait/thread_wakeup */
82 atomic_int sleep_state;
83
84 /**
85 * If true, the thread is terminating.
86 * It will not go to sleep in interruptible synchronization functions
87 * and will call thread_exit() before returning to userspace.
88 */
89 volatile bool interrupted;
90
91 /** Wait queue in which this thread sleeps. Used for debug printouts. */
92 _Atomic(waitq_t *) sleep_queue;
93
94 /** Waitq for thread_join_timeout(). */
95 waitq_t join_wq;
96
97 /** Thread accounting. */
98 atomic_time_stat_t ucycles;
99 atomic_time_stat_t kcycles;
100
101 /** Architecture-specific data. */
102 thread_arch_t arch;
103
104#ifdef CONFIG_UDEBUG
105 /**
106 * If true, the scheduler will print a stack trace
107 * to the kernel console upon scheduling this thread.
108 */
109 atomic_int_fast8_t btrace;
110
111 /** Debugging stuff */
112 udebug_thread_t udebug;
113#endif /* CONFIG_UDEBUG */
114
115 /*
116 * Immutable fields.
117 *
118 * These fields are only modified during initialization, and are not
119 * changed at any time between initialization and destruction.
120 * Can be accessed without synchronization in most places.
121 */
122
123 /** Thread ID. */
124 thread_id_t tid;
125
126 /** Function implementing the thread. */
127 void (*thread_code)(void *);
128 /** Argument passed to thread_code() function. */
129 void *thread_arg;
130
131 char name[THREAD_NAME_BUFLEN];
132
133 /** Thread is executed in user space. */
134 bool uspace;
135
136 /** Thread doesn't affect accumulated accounting. */
137 bool uncounted;
138
139 /** Containing task. */
140 task_t *task;
141
142 /** Thread's kernel stack. */
143 uint8_t *kstack;
144
145 /*
146 * Local fields.
147 *
148 * These fields can be safely accessed from code that _controls execution_
149 * of this thread. Code controls execution of a thread if either:
150 * - it runs in the context of said thread AND interrupts are disabled
151 * (interrupts can and will access these fields)
152 * - the thread is not running, and the code accessing it can legally
153 * add/remove the thread to/from a runqueue, i.e., either:
154 * - it is allowed to enqueue thread in a new runqueue
155 * - it holds the lock to the runqueue containing the thread
156 *
157 */
158
159 /**
160 * From here, the stored context is restored
161 * when the thread is scheduled.
162 */
163 context_t saved_context;
164
165 // TODO: we only need one of the two bools below
166
167 /**
168 * True if this thread is executing copy_from_uspace().
169 * False otherwise.
170 */
171 bool in_copy_from_uspace;
172
173 /**
174 * True if this thread is executing copy_to_uspace().
175 * False otherwise.
176 */
177 bool in_copy_to_uspace;
178
179 /*
180 * FPU context is a special case. If lazy FPU switching is disabled,
181 * it acts as a regular local field. However, if lazy switching is enabled,
182 * the context is synchronized via CPU->fpu_lock
183 */
184#ifdef CONFIG_FPU
185 fpu_context_t fpu_context;
186#endif
187 bool fpu_context_exists;
188
189 /* The thread will not be migrated if nomigrate is non-zero. */
190 unsigned int nomigrate;
191
192 /** Thread was migrated to another CPU and has not run yet. */
193 bool stolen;
194
195 /**
196 * Thread state (state_t).
197 * This is atomic because we read it via some commands for debug output,
198 * otherwise it could just be a regular local.
199 */
200 atomic_int_fast32_t state;
201
202 /** Thread CPU. */
203 _Atomic(cpu_t *) cpu;
204
205 /** Thread's priority. Implemented as index to CPU->rq */
206 atomic_int_fast32_t priority;
207
208 /** Last sampled cycle. */
209 uint64_t last_cycle;
210} thread_t;
211
212IRQ_SPINLOCK_EXTERN(threads_lock);
213extern odict_t threads;
214
215extern void thread_init(void);
216extern thread_t *thread_create(void (*)(void *), void *, task_t *,
217 thread_flags_t, const char *);
218extern void thread_wire(thread_t *, cpu_t *);
219extern void thread_attach(thread_t *, task_t *);
220extern void thread_start(thread_t *);
221extern void thread_requeue_sleeping(thread_t *);
222extern void thread_exit(void) __attribute__((noreturn));
223extern void thread_interrupt(thread_t *);
224
225enum sleep_state {
226 SLEEP_INITIAL,
227 SLEEP_ASLEEP,
228 SLEEP_WOKE,
229};
230
231typedef enum {
232 THREAD_OK,
233 THREAD_TERMINATING,
234} thread_termination_state_t;
235
236typedef enum {
237 THREAD_WAIT_SUCCESS,
238 THREAD_WAIT_TIMEOUT,
239} thread_wait_result_t;
240
241extern thread_termination_state_t thread_wait_start(void);
242extern thread_wait_result_t thread_wait_finish(deadline_t);
243extern void thread_wakeup(thread_t *);
244
245static inline thread_t *thread_ref(thread_t *thread)
246{
247 refcount_up(&thread->refcount);
248 return thread;
249}
250
251static inline thread_t *thread_try_ref(thread_t *thread)
252{
253 if (refcount_try_up(&thread->refcount))
254 return thread;
255 else
256 return NULL;
257}
258
259extern void thread_put(thread_t *);
260
261#ifndef thread_create_arch
262extern errno_t thread_create_arch(thread_t *, thread_flags_t);
263#endif
264
265#ifndef thr_constructor_arch
266extern void thr_constructor_arch(thread_t *);
267#endif
268
269#ifndef thr_destructor_arch
270extern void thr_destructor_arch(thread_t *);
271#endif
272
273extern void thread_sleep(uint32_t);
274extern void thread_usleep(uint32_t);
275
276extern errno_t thread_join(thread_t *);
277extern errno_t thread_join_timeout(thread_t *, uint32_t, unsigned int);
278extern void thread_detach(thread_t *);
279
280extern void thread_yield(void);
281
282extern void thread_print_list(bool);
283extern thread_t *thread_find_by_id(thread_id_t);
284extern size_t thread_count(void);
285extern thread_t *thread_first(void);
286extern thread_t *thread_next(thread_t *);
287extern void thread_update_accounting(bool);
288extern thread_t *thread_try_get(thread_t *);
289
290extern void thread_migration_disable(void);
291extern void thread_migration_enable(void);
292
293#ifdef CONFIG_UDEBUG
294extern void thread_stack_trace(thread_id_t);
295#endif
296
297/** Fpu context slab cache. */
298extern slab_cache_t *fpu_context_cache;
299
300/* Thread syscall prototypes. */
301extern sys_errno_t sys_thread_create(uspace_ptr_uspace_arg_t, uspace_ptr_char, size_t,
302 uspace_ptr_thread_id_t);
303extern sys_errno_t sys_thread_exit(int);
304extern sys_errno_t sys_thread_get_id(uspace_ptr_thread_id_t);
305extern sys_errno_t sys_thread_usleep(uint32_t);
306extern sys_errno_t sys_thread_udelay(uint32_t);
307
308#endif
309
310/** @}
311 */
Note: See TracBrowser for help on using the repository browser.