source: mainline/kernel/generic/include/proc/thread.h@ b23c88e

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since b23c88e was b23c88e, checked in by Adam Hraska <adam.hraska+hos@…>, 13 years ago

preemption_disable: Replaced memory barriers with compiler barriers. Added checks if reschedule is needed once preemption is enabled.

  • Property mode set to 100644
File size: 7.8 KB
Line 
1/*
2 * Copyright (c) 2001-2007 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericproc
30 * @{
31 */
32/** @file
33 */
34
35#ifndef KERN_THREAD_H_
36#define KERN_THREAD_H_
37
38#include <synch/waitq.h>
39#include <proc/task.h>
40#include <time/timeout.h>
41#include <cpu.h>
42#include <synch/spinlock.h>
43#include <synch/rcu.h>
44#include <adt/avl.h>
45#include <mm/slab.h>
46#include <arch/cpu.h>
47#include <mm/tlb.h>
48#include <abi/proc/uarg.h>
49#include <udebug/udebug.h>
50#include <abi/sysinfo.h>
51
52#define THREAD_NAME_BUFLEN 20
53
54extern const char *thread_states[];
55
56/* Thread flags */
57typedef enum {
58 THREAD_FLAG_NONE = 0,
59 /** Thread executes in user space. */
60 THREAD_FLAG_USPACE = (1 << 0),
61 /** Thread will be attached by the caller. */
62 THREAD_FLAG_NOATTACH = (1 << 1),
63 /** Thread accounting doesn't affect accumulated task accounting. */
64 THREAD_FLAG_UNCOUNTED = (1 << 2)
65} thread_flags_t;
66
67/** Thread structure. There is one per thread. */
68typedef struct thread {
69 link_t rq_link; /**< Run queue link. */
70 link_t wq_link; /**< Wait queue link. */
71 link_t th_link; /**< Links to threads within containing task. */
72
73 /** Threads linkage to the threads_tree. */
74 avltree_node_t threads_tree_node;
75
76 /** Lock protecting thread structure.
77 *
78 * Protects the whole thread structure except list links above.
79 */
80 IRQ_SPINLOCK_DECLARE(lock);
81
82 char name[THREAD_NAME_BUFLEN];
83
84 /** Function implementing the thread. */
85 void (*thread_code)(void *);
86 /** Argument passed to thread_code() function. */
87 void *thread_arg;
88
89 /**
90 * From here, the stored context is restored
91 * when the thread is scheduled.
92 */
93 context_t saved_context;
94
95 /**
96 * From here, the stored timeout context
97 * is restored when sleep times out.
98 */
99 context_t sleep_timeout_context;
100
101 /**
102 * From here, the stored interruption context
103 * is restored when sleep is interrupted.
104 */
105 context_t sleep_interruption_context;
106
107 /** If true, the thread can be interrupted from sleep. */
108 bool sleep_interruptible;
109 /** Wait queue in which this thread sleeps. */
110 waitq_t *sleep_queue;
111 /** Timeout used for timeoutable sleeping. */
112 timeout_t sleep_timeout;
113 /** Flag signalling sleep timeout in progress. */
114 volatile bool timeout_pending;
115
116 /**
117 * True if this thread is executing copy_from_uspace().
118 * False otherwise.
119 */
120 bool in_copy_from_uspace;
121
122 /**
123 * True if this thread is executing copy_to_uspace().
124 * False otherwise.
125 */
126 bool in_copy_to_uspace;
127
128 /**
129 * If true, the thread will not go to sleep at all and will call
130 * thread_exit() before returning to userspace.
131 */
132 bool interrupted;
133
134 /** If true, thread_join_timeout() cannot be used on this thread. */
135 bool detached;
136 /** Waitq for thread_join_timeout(). */
137 waitq_t join_wq;
138 /** Link used in the joiner_head list. */
139 link_t joiner_link;
140
141 fpu_context_t *saved_fpu_context;
142 bool fpu_context_exists;
143
144 /*
145 * Defined only if thread doesn't run.
146 * It means that fpu context is in CPU that last time executes this
147 * thread. This disables migration.
148 */
149 bool fpu_context_engaged;
150
151 /* The thread will not be migrated if nomigrate is non-zero. */
152 unsigned int nomigrate;
153
154 /** Thread state. */
155 state_t state;
156
157 /** The thread would have been rescheduled had it not disabled preemption.*/
158 bool need_resched;
159
160 /** Thread CPU. */
161 cpu_t *cpu;
162 /** Containing task. */
163 task_t *task;
164 /** Thread is wired to CPU. */
165 bool wired;
166 /** Thread was migrated to another CPU and has not run yet. */
167 bool stolen;
168 /** Thread is executed in user space. */
169 bool uspace;
170
171 /** Ticks before preemption. */
172 uint64_t ticks;
173
174 /** Thread accounting. */
175 uint64_t ucycles;
176 uint64_t kcycles;
177 /** Last sampled cycle. */
178 uint64_t last_cycle;
179 /** Thread doesn't affect accumulated accounting. */
180 bool uncounted;
181
182 /** Thread's priority. Implemented as index to CPU->rq */
183 int priority;
184 /** Thread ID. */
185 thread_id_t tid;
186
187 /** Work queue this thread belongs to or NULL. Immutable. */
188 struct work_queue *workq;
189 /** Links work queue threads. Protected by workq->lock. */
190 link_t workq_link;
191 /** True if the worker was blocked and is not running. Use thread->lock. */
192 bool workq_blocked;
193 /** True if the worker will block in order to become idle. Use workq->lock. */
194 bool workq_idling;
195
196 /** RCU thread related data. Protected by its own locks. */
197 rcu_thread_data_t rcu;
198
199 /** Architecture-specific data. */
200 thread_arch_t arch;
201
202 /** Thread's kernel stack. */
203 uint8_t *kstack;
204
205#ifdef CONFIG_UDEBUG
206 /**
207 * If true, the scheduler will print a stack trace
208 * to the kernel console upon scheduling this thread.
209 */
210 bool btrace;
211
212 /** Debugging stuff */
213 udebug_thread_t udebug;
214#endif /* CONFIG_UDEBUG */
215} thread_t;
216
217/** Thread list lock.
218 *
219 * This lock protects the threads_tree.
220 * Must be acquired before T.lock for each T of type thread_t.
221 *
222 */
223IRQ_SPINLOCK_EXTERN(threads_lock);
224
225/** AVL tree containing all threads. */
226extern avltree_t threads_tree;
227
228extern void thread_init(void);
229extern thread_t *thread_create(void (*)(void *), void *, task_t *,
230 thread_flags_t, const char *);
231extern void thread_wire(thread_t *, cpu_t *);
232extern void thread_attach(thread_t *, task_t *);
233extern void thread_ready(thread_t *);
234extern void thread_exit(void) __attribute__((noreturn));
235extern void thread_interrupt(thread_t *);
236extern bool thread_interrupted(thread_t *);
237
238#ifndef thread_create_arch
239extern void thread_create_arch(thread_t *);
240#endif
241
242#ifndef thr_constructor_arch
243extern void thr_constructor_arch(thread_t *);
244#endif
245
246#ifndef thr_destructor_arch
247extern void thr_destructor_arch(thread_t *);
248#endif
249
250extern void thread_sleep(uint32_t);
251extern void thread_usleep(uint32_t);
252
253#define thread_join(t) \
254 thread_join_timeout((t), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
255
256extern int thread_join_timeout(thread_t *, uint32_t, unsigned int);
257extern void thread_detach(thread_t *);
258
259extern void thread_print_list(bool);
260extern void thread_destroy(thread_t *, bool);
261extern thread_t *thread_find_by_id(thread_id_t);
262extern void thread_update_accounting(bool);
263extern bool thread_exists(thread_t *);
264
265extern void thread_migration_disable(void);
266extern void thread_migration_enable(void);
267
268#ifdef CONFIG_UDEBUG
269extern void thread_stack_trace(thread_id_t);
270#endif
271
272/** Fpu context slab cache. */
273extern slab_cache_t *fpu_context_slab;
274
275/* Thread syscall prototypes. */
276extern sysarg_t sys_thread_create(uspace_arg_t *, char *, size_t,
277 thread_id_t *);
278extern sysarg_t sys_thread_exit(int);
279extern sysarg_t sys_thread_get_id(thread_id_t *);
280extern sysarg_t sys_thread_usleep(uint32_t);
281extern sysarg_t sys_thread_udelay(uint32_t);
282
283#endif
284
285/** @}
286 */
Note: See TracBrowser for help on using the repository browser.