source: mainline/kernel/generic/include/proc/thread.h@ 515f1b1

Last change on this file since 515f1b1 was 515f1b1, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 19 months ago

Organize thread_t fields by access constraints

  • Property mode set to 100644
File size: 8.4 KB
Line 
1/*
2 * Copyright (c) 2001-2007 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup kernel_generic_proc
30 * @{
31 */
32/** @file
33 */
34
35#ifndef KERN_THREAD_H_
36#define KERN_THREAD_H_
37
38#include <synch/waitq.h>
39#include <proc/task.h>
40#include <time/timeout.h>
41#include <cpu.h>
42#include <synch/spinlock.h>
43#include <adt/odict.h>
44#include <mm/slab.h>
45#include <arch/cpu.h>
46#include <mm/tlb.h>
47#include <abi/proc/uarg.h>
48#include <udebug/udebug.h>
49#include <abi/proc/thread.h>
50#include <abi/sysinfo.h>
51#include <arch.h>
52
53#define THREAD CURRENT->thread
54
55#define THREAD_NAME_BUFLEN 20
56
57extern const char *thread_states[];
58
59/* Thread flags */
60typedef enum {
61 THREAD_FLAG_NONE = 0,
62 /** Thread executes in user space. */
63 THREAD_FLAG_USPACE = (1 << 0),
64 /** Thread will be attached by the caller. */
65 THREAD_FLAG_NOATTACH = (1 << 1),
66 /** Thread accounting doesn't affect accumulated task accounting. */
67 THREAD_FLAG_UNCOUNTED = (1 << 2)
68} thread_flags_t;
69
70/** Thread structure. There is one per thread. */
71typedef struct thread {
72 atomic_refcount_t refcount;
73
74 link_t rq_link; /**< Run queue link. */
75 link_t wq_link; /**< Wait queue link. */
76 link_t th_link; /**< Links to threads within containing task. */
77
78 /** Link to @c threads ordered dictionary. */
79 odlink_t lthreads;
80
81 /** Tracking variable for thread_wait/thread_wakeup */
82 atomic_int sleep_state;
83
84 /**
85 * If true, the thread is terminating.
86 * It will not go to sleep in interruptible synchronization functions
87 * and will call thread_exit() before returning to userspace.
88 */
89 volatile bool interrupted;
90
91 /** Wait queue in which this thread sleeps. Used for debug printouts. */
92 _Atomic(waitq_t *) sleep_queue;
93
94 /** Waitq for thread_join_timeout(). */
95 waitq_t join_wq;
96
97 /** Thread accounting. */
98 atomic_time_stat_t ucycles;
99 atomic_time_stat_t kcycles;
100
101 /** Lock protecting thread structure.
102 *
103 * Protects the whole thread structure except fields listed above.
104 */
105 IRQ_SPINLOCK_DECLARE(lock);
106
107 /** Architecture-specific data. */
108 thread_arch_t arch;
109
110#ifdef CONFIG_UDEBUG
111 /**
112 * If true, the scheduler will print a stack trace
113 * to the kernel console upon scheduling this thread.
114 */
115 atomic_int_fast8_t btrace;
116
117 /** Debugging stuff */
118 udebug_thread_t udebug;
119#endif /* CONFIG_UDEBUG */
120
121 /*
122 * Immutable fields.
123 *
124 * These fields are only modified during initialization, and are not
125 * changed at any time between initialization and destruction.
126 * Can be accessed without synchronization in most places.
127 */
128
129 /** Thread ID. */
130 thread_id_t tid;
131
132 /** Function implementing the thread. */
133 void (*thread_code)(void *);
134 /** Argument passed to thread_code() function. */
135 void *thread_arg;
136
137 char name[THREAD_NAME_BUFLEN];
138
139 /** Thread is executed in user space. */
140 bool uspace;
141
142 /** Thread doesn't affect accumulated accounting. */
143 bool uncounted;
144
145 /** Containing task. */
146 task_t *task;
147
148 /** Thread's kernel stack. */
149 uint8_t *kstack;
150
151 /*
152 * Local fields.
153 *
154 * These fields can be safely accessed from code that _controls execution_
155 * of this thread. Code controls execution of a thread if either:
156 * - it runs in the context of said thread AND interrupts are disabled
157 * (interrupts can and will access these fields)
158 * - the thread is not running, and the code accessing it can legally
159 * add/remove the thread to/from a runqueue, i.e., either:
160 * - it is allowed to enqueue thread in a new runqueue
161 * - it holds the lock to the runqueue containing the thread
162 *
163 */
164
165 /**
166 * From here, the stored context is restored
167 * when the thread is scheduled.
168 */
169 context_t saved_context;
170
171 // TODO: we only need one of the two bools below
172
173 /**
174 * True if this thread is executing copy_from_uspace().
175 * False otherwise.
176 */
177 bool in_copy_from_uspace;
178
179 /**
180 * True if this thread is executing copy_to_uspace().
181 * False otherwise.
182 */
183 bool in_copy_to_uspace;
184
185 /*
186 * FPU context is a special case. If lazy FPU switching is disabled,
187 * it acts as a regular local field. However, if lazy switching is enabled,
188 * the context is synchronized via CPU->fpu_lock
189 */
190#ifdef CONFIG_FPU
191 fpu_context_t fpu_context;
192#endif
193 bool fpu_context_exists;
194
195 /* The thread will not be migrated if nomigrate is non-zero. */
196 unsigned int nomigrate;
197
198 /** Thread was migrated to another CPU and has not run yet. */
199 bool stolen;
200
201 /** Thread state. */
202 atomic_int_fast32_t state;
203
204 /** Thread CPU. */
205 _Atomic(cpu_t *) cpu;
206
207 /** Thread's priority. Implemented as index to CPU->rq */
208 atomic_int_fast32_t priority;
209
210 /** Last sampled cycle. */
211 uint64_t last_cycle;
212} thread_t;
213
214IRQ_SPINLOCK_EXTERN(threads_lock);
215extern odict_t threads;
216
217extern void thread_init(void);
218extern thread_t *thread_create(void (*)(void *), void *, task_t *,
219 thread_flags_t, const char *);
220extern void thread_wire(thread_t *, cpu_t *);
221extern void thread_attach(thread_t *, task_t *);
222extern void thread_start(thread_t *);
223extern void thread_requeue_sleeping(thread_t *);
224extern void thread_exit(void) __attribute__((noreturn));
225extern void thread_interrupt(thread_t *);
226
227enum sleep_state {
228 SLEEP_INITIAL,
229 SLEEP_ASLEEP,
230 SLEEP_WOKE,
231};
232
233typedef enum {
234 THREAD_OK,
235 THREAD_TERMINATING,
236} thread_termination_state_t;
237
238typedef enum {
239 THREAD_WAIT_SUCCESS,
240 THREAD_WAIT_TIMEOUT,
241} thread_wait_result_t;
242
243extern thread_termination_state_t thread_wait_start(void);
244extern thread_wait_result_t thread_wait_finish(deadline_t);
245extern void thread_wakeup(thread_t *);
246
247static inline thread_t *thread_ref(thread_t *thread)
248{
249 refcount_up(&thread->refcount);
250 return thread;
251}
252
253static inline thread_t *thread_try_ref(thread_t *thread)
254{
255 if (refcount_try_up(&thread->refcount))
256 return thread;
257 else
258 return NULL;
259}
260
261extern void thread_put(thread_t *);
262
263#ifndef thread_create_arch
264extern errno_t thread_create_arch(thread_t *, thread_flags_t);
265#endif
266
267#ifndef thr_constructor_arch
268extern void thr_constructor_arch(thread_t *);
269#endif
270
271#ifndef thr_destructor_arch
272extern void thr_destructor_arch(thread_t *);
273#endif
274
275extern void thread_sleep(uint32_t);
276extern void thread_usleep(uint32_t);
277
278extern errno_t thread_join(thread_t *);
279extern errno_t thread_join_timeout(thread_t *, uint32_t, unsigned int);
280extern void thread_detach(thread_t *);
281
282extern void thread_yield(void);
283
284extern void thread_print_list(bool);
285extern thread_t *thread_find_by_id(thread_id_t);
286extern size_t thread_count(void);
287extern thread_t *thread_first(void);
288extern thread_t *thread_next(thread_t *);
289extern void thread_update_accounting(bool);
290extern thread_t *thread_try_get(thread_t *);
291
292extern void thread_migration_disable(void);
293extern void thread_migration_enable(void);
294
295#ifdef CONFIG_UDEBUG
296extern void thread_stack_trace(thread_id_t);
297#endif
298
299/** Fpu context slab cache. */
300extern slab_cache_t *fpu_context_cache;
301
302/* Thread syscall prototypes. */
303extern sys_errno_t sys_thread_create(uspace_ptr_uspace_arg_t, uspace_ptr_char, size_t,
304 uspace_ptr_thread_id_t);
305extern sys_errno_t sys_thread_exit(int);
306extern sys_errno_t sys_thread_get_id(uspace_ptr_thread_id_t);
307extern sys_errno_t sys_thread_usleep(uint32_t);
308extern sys_errno_t sys_thread_udelay(uint32_t);
309
310#endif
311
312/** @}
313 */
Note: See TracBrowser for help on using the repository browser.