source: mainline/kernel/generic/include/proc/task.h@ 8be8cfa

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 8be8cfa was 8be8cfa, checked in by Martin Decky <martin@…>, 18 years ago

spinlock extern declaration macro

  • Property mode set to 100644
File size: 8.9 KB
Line 
1/*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericproc
30 * @{
31 */
32/** @file
33 */
34
35#ifndef KERN_TASK_H_
36#define KERN_TASK_H_
37
38#include <cpu.h>
39#include <synch/spinlock.h>
40#include <synch/mutex.h>
41#include <synch/rwlock.h>
42#include <synch/futex.h>
43#include <adt/btree.h>
44#include <adt/list.h>
45#include <security/cap.h>
46#include <arch/proc/task.h>
47#include <arch/proc/thread.h>
48#include <arch/context.h>
49#include <arch/fpu_context.h>
50#include <arch/cpu.h>
51#include <mm/tlb.h>
52#include <proc/scheduler.h>
53
54#define IPC_MAX_PHONES 16
55#define THREAD_NAME_BUFLEN 20
56
57struct answerbox;
58struct task;
59struct thread;
60
61typedef enum {
62 IPC_PHONE_FREE = 0, /**< Phone is free and can be allocated */
63 IPC_PHONE_CONNECTING, /**< Phone is connecting somewhere */
64 IPC_PHONE_CONNECTED, /**< Phone is connected */
65 IPC_PHONE_HUNGUP, /**< Phone is hung up, waiting for answers to come */
66 IPC_PHONE_SLAMMED /**< Phone was hungup from server */
67} ipc_phone_state_t;
68
69/** Structure identifying phone (in TASK structure) */
70typedef struct {
71 SPINLOCK_DECLARE(lock);
72 link_t link;
73 struct answerbox *callee;
74 ipc_phone_state_t state;
75 atomic_t active_calls;
76} phone_t;
77
78typedef struct answerbox {
79 SPINLOCK_DECLARE(lock);
80
81 struct task *task;
82
83 waitq_t wq;
84
85 link_t connected_phones; /**< Phones connected to this answerbox */
86 link_t calls; /**< Received calls */
87 link_t dispatched_calls; /* Should be hash table in the future */
88
89 link_t answers; /**< Answered calls */
90
91 SPINLOCK_DECLARE(irq_lock);
92 link_t irq_notifs; /**< Notifications from IRQ handlers */
93 link_t irq_head; /**< IRQs with notifications to this answerbox. */
94} answerbox_t;
95
96/** Task structure. */
97typedef struct task {
98 /** Task lock.
99 *
100 * Must be acquired before threads_lock and thread lock of any of its threads.
101 */
102 SPINLOCK_DECLARE(lock);
103
104 char *name;
105 struct thread *main_thread; /**< Pointer to the main thread. */
106 link_t th_head; /**< List of threads contained in this task. */
107 as_t *as; /**< Address space. */
108 task_id_t taskid; /**< Unique identity of task */
109 context_id_t context; /**< Task security context */
110
111 /** If this is true, new threads can become part of the task. */
112 bool accept_new_threads;
113
114 count_t refcount; /**< Number of references (i.e. threads). */
115
116 cap_t capabilities; /**< Task capabilities. */
117
118 /* IPC stuff */
119 answerbox_t answerbox; /**< Communication endpoint */
120 phone_t phones[IPC_MAX_PHONES];
121 atomic_t active_calls; /**< Active asynchronous messages.
122 * It is used for limiting uspace to
123 * certain extent. */
124
125 task_arch_t arch; /**< Architecture specific task data. */
126
127 /**
128 * Serializes access to the B+tree of task's futexes. This mutex is
129 * independent on the task spinlock.
130 */
131 mutex_t futexes_lock;
132 btree_t futexes; /**< B+tree of futexes referenced by this task. */
133
134 uint64_t cycles; /**< Accumulated accounting. */
135} task_t;
136
137typedef void (* timeout_handler_t)(void *arg);
138
139typedef struct {
140 SPINLOCK_DECLARE(lock);
141
142 link_t link; /**< Link to the list of active timeouts on THE->cpu */
143
144 uint64_t ticks; /**< Timeout will be activated in this amount of clock() ticks. */
145
146 timeout_handler_t handler; /**< Function that will be called on timeout activation. */
147 void *arg; /**< Argument to be passed to handler() function. */
148
149 cpu_t *cpu; /**< On which processor is this timeout registered. */
150} timeout_t;
151
152/** Thread states. */
153typedef enum {
154 Invalid, /**< It is an error, if thread is found in this state. */
155 Running, /**< State of a thread that is currently executing on some CPU. */
156 Sleeping, /**< Thread in this state is waiting for an event. */
157 Ready, /**< State of threads in a run queue. */
158 Entering, /**< Threads are in this state before they are first readied. */
159 Exiting, /**< After a thread calls thread_exit(), it is put into Exiting state. */
160 Undead /**< Threads that were not detached but exited are in the Undead state. */
161} state_t;
162
163/** Join types. */
164typedef enum {
165 None,
166 TaskClnp, /**< The thread will be joined by ktaskclnp thread. */
167 TaskGC /**< The thread will be joined by ktaskgc thread. */
168} thread_join_type_t;
169
170/** Thread structure. There is one per thread. */
171typedef struct thread {
172 link_t rq_link; /**< Run queue link. */
173 link_t wq_link; /**< Wait queue link. */
174 link_t th_link; /**< Links to threads within containing task. */
175
176 /** Lock protecting thread structure.
177 *
178 * Protects the whole thread structure except list links above.
179 */
180 SPINLOCK_DECLARE(lock);
181
182 char name[THREAD_NAME_BUFLEN];
183
184 void (* thread_code)(void *); /**< Function implementing the thread. */
185 void *thread_arg; /**< Argument passed to thread_code() function. */
186
187 /** From here, the stored context is restored when the thread is scheduled. */
188 context_t saved_context;
189 /** From here, the stored timeout context is restored when sleep times out. */
190 context_t sleep_timeout_context;
191 /** From here, the stored interruption context is restored when sleep is interrupted. */
192 context_t sleep_interruption_context;
193
194 bool sleep_interruptible; /**< If true, the thread can be interrupted from sleep. */
195 waitq_t *sleep_queue; /**< Wait queue in which this thread sleeps. */
196 timeout_t sleep_timeout; /**< Timeout used for timeoutable sleeping. */
197 volatile int timeout_pending; /**< Flag signalling sleep timeout in progress. */
198
199 /** True if this thread is executing copy_from_uspace(). False otherwise. */
200 bool in_copy_from_uspace;
201 /** True if this thread is executing copy_to_uspace(). False otherwise. */
202 bool in_copy_to_uspace;
203
204 /**
205 * If true, the thread will not go to sleep at all and will
206 * call thread_exit() before returning to userspace.
207 */
208 bool interrupted;
209
210 thread_join_type_t join_type; /**< Who joinins the thread. */
211 bool detached; /**< If true, thread_join_timeout() cannot be used on this thread. */
212 waitq_t join_wq; /**< Waitq for thread_join_timeout(). */
213
214 fpu_context_t *saved_fpu_context;
215 int fpu_context_exists;
216
217 /*
218 * Defined only if thread doesn't run.
219 * It means that fpu context is in CPU that last time executes this thread.
220 * This disables migration.
221 */
222 int fpu_context_engaged;
223
224 rwlock_type_t rwlock_holder_type;
225
226 void (* call_me)(void *); /**< Funtion to be called in scheduler before the thread is put asleep. */
227 void *call_me_with; /**< Argument passed to call_me(). */
228
229 state_t state; /**< Thread's state. */
230 int flags; /**< Thread's flags. */
231
232 cpu_t *cpu; /**< Thread's CPU. */
233 task_t *task; /**< Containing task. */
234
235 uint64_t ticks; /**< Ticks before preemption. */
236
237 uint64_t cycles; /**< Thread accounting. */
238 uint64_t last_cycle; /**< Last sampled cycle. */
239 bool uncounted; /**< Thread doesn't affect accumulated accounting. */
240
241 int priority; /**< Thread's priority. Implemented as index to CPU->rq */
242 uint32_t tid; /**< Thread ID. */
243
244 thread_arch_t arch; /**< Architecture-specific data. */
245
246 uint8_t *kstack; /**< Thread's kernel stack. */
247} thread_t;
248
249SPINLOCK_EXTERN(tasks_lock);
250extern btree_t tasks_btree;
251
252extern void task_init(void);
253extern task_t *task_create(as_t *as, char *name);
254extern void task_destroy(task_t *t);
255extern task_t *task_run_program(void *program_addr, char *name);
256extern task_t *task_find_by_id(task_id_t id);
257extern int task_kill(task_id_t id);
258extern uint64_t task_get_accounting(task_t *t);
259
260extern void cap_set(task_t *t, cap_t caps);
261extern cap_t cap_get(task_t *t);
262
263
264#ifndef task_create_arch
265extern void task_create_arch(task_t *t);
266#endif
267
268#ifndef task_destroy_arch
269extern void task_destroy_arch(task_t *t);
270#endif
271
272extern unative_t sys_task_get_id(task_id_t *uspace_task_id);
273
274#endif
275
276/** @}
277 */
Note: See TracBrowser for help on using the repository browser.