source: mainline/kernel/generic/include/proc/thread.h@ 181a746

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 181a746 was 181a746, checked in by Adam Hraska <adam.hraska+hos@…>, 13 years ago

rcu: Added preemptible RCU's core API implementation.

  • Property mode set to 100644
File size: 7.7 KB
Line 
1/*
2 * Copyright (c) 2001-2007 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericproc
30 * @{
31 */
32/** @file
33 */
34
35#ifndef KERN_THREAD_H_
36#define KERN_THREAD_H_
37
38#include <synch/waitq.h>
39#include <proc/task.h>
40#include <time/timeout.h>
41#include <cpu.h>
42#include <synch/spinlock.h>
43#include <synch/rcu.h>
44#include <adt/avl.h>
45#include <mm/slab.h>
46#include <arch/cpu.h>
47#include <mm/tlb.h>
48#include <abi/proc/uarg.h>
49#include <udebug/udebug.h>
50#include <abi/sysinfo.h>
51
52#define THREAD_NAME_BUFLEN 20
53
54extern const char *thread_states[];
55
56/* Thread flags */
57typedef enum {
58 THREAD_FLAG_NONE = 0,
59 /** Thread executes in user space. */
60 THREAD_FLAG_USPACE = (1 << 0),
61 /** Thread will be attached by the caller. */
62 THREAD_FLAG_NOATTACH = (1 << 1),
63 /** Thread accounting doesn't affect accumulated task accounting. */
64 THREAD_FLAG_UNCOUNTED = (1 << 2)
65} thread_flags_t;
66
67/** Thread structure. There is one per thread. */
68typedef struct thread {
69 link_t rq_link; /**< Run queue link. */
70 link_t wq_link; /**< Wait queue link. */
71 link_t th_link; /**< Links to threads within containing task. */
72
73 /** Threads linkage to the threads_tree. */
74 avltree_node_t threads_tree_node;
75
76 /** Lock protecting thread structure.
77 *
78 * Protects the whole thread structure except list links above.
79 */
80 IRQ_SPINLOCK_DECLARE(lock);
81
82 char name[THREAD_NAME_BUFLEN];
83
84 /** Function implementing the thread. */
85 void (*thread_code)(void *);
86 /** Argument passed to thread_code() function. */
87 void *thread_arg;
88
89 /**
90 * From here, the stored context is restored
91 * when the thread is scheduled.
92 */
93 context_t saved_context;
94
95 /**
96 * From here, the stored timeout context
97 * is restored when sleep times out.
98 */
99 context_t sleep_timeout_context;
100
101 /**
102 * From here, the stored interruption context
103 * is restored when sleep is interrupted.
104 */
105 context_t sleep_interruption_context;
106
107 /** If true, the thread can be interrupted from sleep. */
108 bool sleep_interruptible;
109 /** Wait queue in which this thread sleeps. */
110 waitq_t *sleep_queue;
111 /** Timeout used for timeoutable sleeping. */
112 timeout_t sleep_timeout;
113 /** Flag signalling sleep timeout in progress. */
114 volatile bool timeout_pending;
115
116 /**
117 * True if this thread is executing copy_from_uspace().
118 * False otherwise.
119 */
120 bool in_copy_from_uspace;
121
122 /**
123 * True if this thread is executing copy_to_uspace().
124 * False otherwise.
125 */
126 bool in_copy_to_uspace;
127
128 /**
129 * If true, the thread will not go to sleep at all and will call
130 * thread_exit() before returning to userspace.
131 */
132 bool interrupted;
133
134 /** If true, thread_join_timeout() cannot be used on this thread. */
135 bool detached;
136 /** Waitq for thread_join_timeout(). */
137 waitq_t join_wq;
138 /** Link used in the joiner_head list. */
139 link_t joiner_link;
140
141 fpu_context_t *saved_fpu_context;
142 bool fpu_context_exists;
143
144 /*
145 * Defined only if thread doesn't run.
146 * It means that fpu context is in CPU that last time executes this
147 * thread. This disables migration.
148 */
149 bool fpu_context_engaged;
150
151 /* The thread will not be migrated if nomigrate is non-zero. */
152 unsigned int nomigrate;
153
154 /** Thread state. */
155 state_t state;
156
157 /** Thread CPU. */
158 cpu_t *cpu;
159 /** Containing task. */
160 task_t *task;
161 /** Thread is wired to CPU. */
162 bool wired;
163 /** Thread was migrated to another CPU and has not run yet. */
164 bool stolen;
165 /** Thread is executed in user space. */
166 bool uspace;
167
168 /** Ticks before preemption. */
169 uint64_t ticks;
170
171 /** Thread accounting. */
172 uint64_t ucycles;
173 uint64_t kcycles;
174 /** Last sampled cycle. */
175 uint64_t last_cycle;
176 /** Thread doesn't affect accumulated accounting. */
177 bool uncounted;
178
179 /** Thread's priority. Implemented as index to CPU->rq */
180 int priority;
181 /** Thread ID. */
182 thread_id_t tid;
183
184 /** Work queue this thread belongs to or NULL. Immutable. */
185 struct work_queue *workq;
186 /** Links work queue threads. Protected by workq->lock. */
187 link_t workq_link;
188 /** True if the worker was blocked and is not running. Use thread->lock. */
189 bool workq_blocked;
190 /** True if the worker will block in order to become idle. Use workq->lock. */
191 bool workq_idling;
192
193 /** RCU thread related data. Protected by its own locks. */
194 rcu_thread_data_t rcu;
195
196 /** Architecture-specific data. */
197 thread_arch_t arch;
198
199 /** Thread's kernel stack. */
200 uint8_t *kstack;
201
202#ifdef CONFIG_UDEBUG
203 /**
204 * If true, the scheduler will print a stack trace
205 * to the kernel console upon scheduling this thread.
206 */
207 bool btrace;
208
209 /** Debugging stuff */
210 udebug_thread_t udebug;
211#endif /* CONFIG_UDEBUG */
212} thread_t;
213
214/** Thread list lock.
215 *
216 * This lock protects the threads_tree.
217 * Must be acquired before T.lock for each T of type thread_t.
218 *
219 */
220IRQ_SPINLOCK_EXTERN(threads_lock);
221
222/** AVL tree containing all threads. */
223extern avltree_t threads_tree;
224
225extern void thread_init(void);
226extern thread_t *thread_create(void (*)(void *), void *, task_t *,
227 thread_flags_t, const char *);
228extern void thread_wire(thread_t *, cpu_t *);
229extern void thread_attach(thread_t *, task_t *);
230extern void thread_ready(thread_t *);
231extern void thread_exit(void) __attribute__((noreturn));
232extern void thread_interrupt(thread_t *);
233extern bool thread_interrupted(thread_t *);
234
235#ifndef thread_create_arch
236extern void thread_create_arch(thread_t *);
237#endif
238
239#ifndef thr_constructor_arch
240extern void thr_constructor_arch(thread_t *);
241#endif
242
243#ifndef thr_destructor_arch
244extern void thr_destructor_arch(thread_t *);
245#endif
246
247extern void thread_sleep(uint32_t);
248extern void thread_usleep(uint32_t);
249
250#define thread_join(t) \
251 thread_join_timeout((t), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
252
253extern int thread_join_timeout(thread_t *, uint32_t, unsigned int);
254extern void thread_detach(thread_t *);
255
256extern void thread_print_list(bool);
257extern void thread_destroy(thread_t *, bool);
258extern thread_t *thread_find_by_id(thread_id_t);
259extern void thread_update_accounting(bool);
260extern bool thread_exists(thread_t *);
261
262extern void thread_migration_disable(void);
263extern void thread_migration_enable(void);
264
265#ifdef CONFIG_UDEBUG
266extern void thread_stack_trace(thread_id_t);
267#endif
268
269/** Fpu context slab cache. */
270extern slab_cache_t *fpu_context_slab;
271
272/* Thread syscall prototypes. */
273extern sysarg_t sys_thread_create(uspace_arg_t *, char *, size_t,
274 thread_id_t *);
275extern sysarg_t sys_thread_exit(int);
276extern sysarg_t sys_thread_get_id(thread_id_t *);
277extern sysarg_t sys_thread_usleep(uint32_t);
278extern sysarg_t sys_thread_udelay(uint32_t);
279
280#endif
281
282/** @}
283 */
Note: See TracBrowser for help on using the repository browser.