source: mainline/kernel/generic/include/proc/thread.h@ a35b458

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since a35b458 was a35b458, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

style: Remove trailing whitespace on _all_ lines, including empty ones, for particular file types.

Command used: tools/srepl '\s\+$' '' -- *.c *.h *.py *.sh *.s *.S *.ag

Currently, whitespace on empty lines is very inconsistent.
There are two basic choices: Either remove the whitespace, or keep empty lines
indented to the level of surrounding code. The former is AFAICT more common,
and also much easier to do automatically.

Alternatively, we could write script for automatic indentation, and use that
instead. However, if such a script exists, it's possible to use the indented
style locally, by having the editor apply relevant conversions on load/save,
without affecting remote repository. IMO, it makes more sense to adopt
the simpler rule.

  • Property mode set to 100644
File size: 7.8 KB
RevLine 
[f761f1eb]1/*
[ea7890e7]2 * Copyright (c) 2001-2007 Jakub Jermar
[f761f1eb]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[764c302]29/** @addtogroup genericproc
[b45c443]30 * @{
31 */
32/** @file
33 */
34
[06e1e95]35#ifndef KERN_THREAD_H_
36#define KERN_THREAD_H_
[f761f1eb]37
[b3f8fb7]38#include <synch/waitq.h>
39#include <proc/task.h>
[831a04d0]40#include <time/timeout.h>
[b3f8fb7]41#include <cpu.h>
[ea7890e7]42#include <synch/spinlock.h>
[8e3ed06]43#include <synch/rcu_types.h>
[5dcee525]44#include <adt/avl.h>
[f76fed4]45#include <mm/slab.h>
[b3f8fb7]46#include <arch/cpu.h>
47#include <mm/tlb.h>
[c0699467]48#include <abi/proc/uarg.h>
[9a1b20c]49#include <udebug/udebug.h>
[9ba415e]50#include <abi/proc/thread.h>
[c0699467]51#include <abi/sysinfo.h>
[1066041]52#include <arch.h>
53
54
55#define THREAD THE->thread
[f761f1eb]56
[da1bafb]57#define THREAD_NAME_BUFLEN 20
[f761f1eb]58
[a000878c]59extern const char *thread_states[];
[f761f1eb]60
[32fffef0]61/* Thread flags */
[6eef3c4]62typedef enum {
63 THREAD_FLAG_NONE = 0,
64 /** Thread executes in user space. */
65 THREAD_FLAG_USPACE = (1 << 0),
66 /** Thread will be attached by the caller. */
67 THREAD_FLAG_NOATTACH = (1 << 1),
68 /** Thread accounting doesn't affect accumulated task accounting. */
69 THREAD_FLAG_UNCOUNTED = (1 << 2)
70} thread_flags_t;
[f761f1eb]71
[4c60255]72/** Thread structure. There is one per thread. */
73typedef struct thread {
[da1bafb]74 link_t rq_link; /**< Run queue link. */
75 link_t wq_link; /**< Wait queue link. */
76 link_t th_link; /**< Links to threads within containing task. */
[a35b458]77
[5dcee525]78 /** Threads linkage to the threads_tree. */
79 avltree_node_t threads_tree_node;
[a35b458]80
[4c60255]81 /** Lock protecting thread structure.
82 *
83 * Protects the whole thread structure except list links above.
84 */
[da1bafb]85 IRQ_SPINLOCK_DECLARE(lock);
[a35b458]86
[4c60255]87 char name[THREAD_NAME_BUFLEN];
[a35b458]88
[80bcaed]89 /** Function implementing the thread. */
[df58e44]90 void (*thread_code)(void *);
[80bcaed]91 /** Argument passed to thread_code() function. */
92 void *thread_arg;
[a35b458]93
[80bcaed]94 /**
[df58e44]95 * From here, the stored context is restored
96 * when the thread is scheduled.
[80bcaed]97 */
[4c60255]98 context_t saved_context;
[a35b458]99
[80bcaed]100 /**
[df58e44]101 * From here, the stored timeout context
102 * is restored when sleep times out.
[80bcaed]103 */
[4c60255]104 context_t sleep_timeout_context;
[a35b458]105
[80bcaed]106 /**
[df58e44]107 * From here, the stored interruption context
108 * is restored when sleep is interrupted.
[80bcaed]109 */
[4c60255]110 context_t sleep_interruption_context;
[a35b458]111
[80bcaed]112 /** If true, the thread can be interrupted from sleep. */
113 bool sleep_interruptible;
114 /** Wait queue in which this thread sleeps. */
115 waitq_t *sleep_queue;
116 /** Timeout used for timeoutable sleeping. */
117 timeout_t sleep_timeout;
118 /** Flag signalling sleep timeout in progress. */
[da1bafb]119 volatile bool timeout_pending;
[a35b458]120
[80bcaed]121 /**
122 * True if this thread is executing copy_from_uspace().
123 * False otherwise.
124 */
[4c60255]125 bool in_copy_from_uspace;
[a35b458]126
[80bcaed]127 /**
128 * True if this thread is executing copy_to_uspace().
129 * False otherwise.
130 */
[4c60255]131 bool in_copy_to_uspace;
[a35b458]132
[4c60255]133 /**
[80bcaed]134 * If true, the thread will not go to sleep at all and will call
135 * thread_exit() before returning to userspace.
[4c60255]136 */
[da1bafb]137 bool interrupted;
[a35b458]138
[80bcaed]139 /** If true, thread_join_timeout() cannot be used on this thread. */
140 bool detached;
141 /** Waitq for thread_join_timeout(). */
142 waitq_t join_wq;
[ea7890e7]143 /** Link used in the joiner_head list. */
144 link_t joiner_link;
[a35b458]145
[4c60255]146 fpu_context_t *saved_fpu_context;
[6eef3c4]147 bool fpu_context_exists;
[a35b458]148
[4c60255]149 /*
150 * Defined only if thread doesn't run.
[80bcaed]151 * It means that fpu context is in CPU that last time executes this
152 * thread. This disables migration.
[4c60255]153 */
[6eef3c4]154 bool fpu_context_engaged;
[a35b458]155
[43ac0cc]156 /* The thread will not be migrated if nomigrate is non-zero. */
[6eef3c4]157 unsigned int nomigrate;
[a35b458]158
[6eef3c4]159 /** Thread state. */
[80bcaed]160 state_t state;
[a35b458]161
[6eef3c4]162 /** Thread CPU. */
[80bcaed]163 cpu_t *cpu;
164 /** Containing task. */
165 task_t *task;
[6eef3c4]166 /** Thread is wired to CPU. */
167 bool wired;
168 /** Thread was migrated to another CPU and has not run yet. */
169 bool stolen;
170 /** Thread is executed in user space. */
171 bool uspace;
[a35b458]172
[80bcaed]173 /** Ticks before preemption. */
174 uint64_t ticks;
[a35b458]175
[80bcaed]176 /** Thread accounting. */
[a2a00e8]177 uint64_t ucycles;
178 uint64_t kcycles;
[80bcaed]179 /** Last sampled cycle. */
180 uint64_t last_cycle;
[da1bafb]181 /** Thread doesn't affect accumulated accounting. */
[80bcaed]182 bool uncounted;
[a35b458]183
[80bcaed]184 /** Thread's priority. Implemented as index to CPU->rq */
185 int priority;
186 /** Thread ID. */
[201abde]187 thread_id_t tid;
[8a64e81e]188
189 /** Work queue this thread belongs to or NULL. Immutable. */
190 struct work_queue *workq;
191 /** Links work queue threads. Protected by workq->lock. */
[1b20da0]192 link_t workq_link;
[181a746]193 /** True if the worker was blocked and is not running. Use thread->lock. */
[8a64e81e]194 bool workq_blocked;
[181a746]195 /** True if the worker will block in order to become idle. Use workq->lock. */
[8a64e81e]196 bool workq_idling;
[a35b458]197
[181a746]198 /** RCU thread related data. Protected by its own locks. */
199 rcu_thread_data_t rcu;
[a35b458]200
[80bcaed]201 /** Architecture-specific data. */
202 thread_arch_t arch;
[a35b458]203
[80bcaed]204 /** Thread's kernel stack. */
205 uint8_t *kstack;
[a35b458]206
[9a1b20c]207#ifdef CONFIG_UDEBUG
[5b7a107]208 /**
209 * If true, the scheduler will print a stack trace
210 * to the kernel console upon scheduling this thread.
211 */
212 bool btrace;
[a35b458]213
[9a1b20c]214 /** Debugging stuff */
215 udebug_thread_t udebug;
[da1bafb]216#endif /* CONFIG_UDEBUG */
[4c60255]217} thread_t;
218
[05e2a7ad]219/** Thread list lock.
220 *
[5dcee525]221 * This lock protects the threads_tree.
[05e2a7ad]222 * Must be acquired before T.lock for each T of type thread_t.
223 *
224 */
[da1bafb]225IRQ_SPINLOCK_EXTERN(threads_lock);
[05e2a7ad]226
[5dcee525]227/** AVL tree containing all threads. */
228extern avltree_t threads_tree;
[f761f1eb]229
230extern void thread_init(void);
[da1bafb]231extern thread_t *thread_create(void (*)(void *), void *, task_t *,
[6eef3c4]232 thread_flags_t, const char *);
233extern void thread_wire(thread_t *, cpu_t *);
[d52b82ad]234extern void thread_attach(thread_t *, task_t *);
235extern void thread_ready(thread_t *);
[874621f]236extern void thread_exit(void) __attribute__((noreturn));
[518dd43]237extern void thread_interrupt(thread_t *);
238extern bool thread_interrupted(thread_t *);
[f761f1eb]239
[3fa424a9]240#ifndef thread_create_arch
[d52b82ad]241extern void thread_create_arch(thread_t *);
[3fa424a9]242#endif
[da1bafb]243
[32fffef0]244#ifndef thr_constructor_arch
[d52b82ad]245extern void thr_constructor_arch(thread_t *);
[32fffef0]246#endif
[da1bafb]247
[32fffef0]248#ifndef thr_destructor_arch
[d52b82ad]249extern void thr_destructor_arch(thread_t *);
[32fffef0]250#endif
[3fa424a9]251
[d52b82ad]252extern void thread_sleep(uint32_t);
253extern void thread_usleep(uint32_t);
[f761f1eb]254
[80bcaed]255#define thread_join(t) \
256 thread_join_timeout((t), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
[da1bafb]257
[b7fd2a0]258extern errno_t thread_join_timeout(thread_t *, uint32_t, unsigned int);
[d52b82ad]259extern void thread_detach(thread_t *);
[fe19611]260
[48dcc69]261extern void thread_print_list(bool);
[da1bafb]262extern void thread_destroy(thread_t *, bool);
[e1b6742]263extern thread_t *thread_find_by_id(thread_id_t);
[a2a00e8]264extern void thread_update_accounting(bool);
[d52b82ad]265extern bool thread_exists(thread_t *);
[f761f1eb]266
[43ac0cc]267extern void thread_migration_disable(void);
268extern void thread_migration_enable(void);
269
[5b7a107]270#ifdef CONFIG_UDEBUG
[df58e44]271extern void thread_stack_trace(thread_id_t);
[5b7a107]272#endif
[f761f1eb]273
[80bcaed]274/** Fpu context slab cache. */
[82d515e9]275extern slab_cache_t *fpu_context_cache;
[f76fed4]276
[80bcaed]277/* Thread syscall prototypes. */
[b7fd2a0]278extern sys_errno_t sys_thread_create(uspace_arg_t *, char *, size_t,
[d52b82ad]279 thread_id_t *);
[b7fd2a0]280extern sys_errno_t sys_thread_exit(int);
281extern sys_errno_t sys_thread_get_id(thread_id_t *);
282extern sys_errno_t sys_thread_usleep(uint32_t);
283extern sys_errno_t sys_thread_udelay(uint32_t);
[9f52563]284
[f761f1eb]285#endif
[b45c443]286
[764c302]287/** @}
[b45c443]288 */
Note: See TracBrowser for help on using the repository browser.