source: mainline/generic/src/proc/thread.c@ dc747e3

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since dc747e3 was dc747e3, checked in by Jakub Jermar <jakub@…>, 20 years ago

Add SPINLOCK_DECLARE and SPINLOCK_INITIALIZE macros.
SPINLOCK_DECLARE is to be used instead of direct spinlock_t declarations
in dynamically allocated structures on which spinlock_initialize() is called after
their creation.
SPINLOCK_INITIALIZE is to be used instead of direct spinlock_t declarations
of global spinlocks. It declares and initializes the spinlock.
Moreover, both macros are empty on UP so that -Wall warnings about unused structures
get supressed.

  • Property mode set to 100644
File size: 7.4 KB
Line 
1/*
2 * Copyright (C) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <proc/scheduler.h>
30#include <proc/thread.h>
31#include <proc/task.h>
32#include <mm/heap.h>
33#include <mm/frame.h>
34#include <mm/page.h>
35#include <arch/asm.h>
36#include <arch.h>
37#include <synch/synch.h>
38#include <synch/spinlock.h>
39#include <synch/waitq.h>
40#include <synch/rwlock.h>
41#include <cpu.h>
42#include <func.h>
43#include <context.h>
44#include <list.h>
45#include <typedefs.h>
46#include <time/clock.h>
47#include <list.h>
48#include <config.h>
49#include <arch/interrupt.h>
50#include <smp/ipi.h>
51#include <arch/faddr.h>
52#include <arch/atomic.h>
53#include <memstr.h>
54
55char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; /**< Thread states */
56
57SPINLOCK_INITIALIZE(threads_lock); /**< Lock protecting threads_head list. For locking rules, see declaration thereof. */
58link_t threads_head; /**< List of all threads. */
59
60SPINLOCK_INITIALIZE(tidlock);
61__u32 last_tid = 0;
62
63
64/** Thread wrapper
65 *
66 * This wrapper is provided to ensure that every thread
67 * makes a call to thread_exit() when its implementing
68 * function returns.
69 *
70 * interrupts_disable() is assumed.
71 *
72 */
73static void cushion(void)
74{
75 void (*f)(void *) = THREAD->thread_code;
76 void *arg = THREAD->thread_arg;
77
78 /* this is where each thread wakes up after its creation */
79 before_thread_runs();
80
81 spinlock_unlock(&THREAD->lock);
82 interrupts_enable();
83
84 f(arg);
85 thread_exit();
86 /* not reached */
87}
88
89
90/** Initialize threads
91 *
92 * Initialize kernel threads support.
93 *
94 */
95void thread_init(void)
96{
97 THREAD = NULL;
98 nrdy = 0;
99 list_initialize(&threads_head);
100}
101
102
103/** Make thread ready
104 *
105 * Switch thread t to the ready state.
106 *
107 * @param t Thread to make ready.
108 *
109 */
110void thread_ready(thread_t *t)
111{
112 cpu_t *cpu;
113 runq_t *r;
114 ipl_t ipl;
115 int i, avg, send_ipi = 0;
116
117 ipl = interrupts_disable();
118
119 spinlock_lock(&t->lock);
120
121 i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority;
122
123 cpu = CPU;
124 if (t->flags & X_WIRED) {
125 cpu = t->cpu;
126 }
127 spinlock_unlock(&t->lock);
128
129 /*
130 * Append t to respective ready queue on respective processor.
131 */
132 r = &cpu->rq[i];
133 spinlock_lock(&r->lock);
134 list_append(&t->rq_link, &r->rq_head);
135 r->n++;
136 spinlock_unlock(&r->lock);
137
138 atomic_inc(&nrdy);
139 avg = nrdy / config.cpu_active;
140
141 spinlock_lock(&cpu->lock);
142 if ((++cpu->nrdy) > avg) {
143 /*
144 * If there are idle halted CPU's, this will wake them up.
145 */
146 ipi_broadcast(VECTOR_WAKEUP_IPI);
147 }
148 spinlock_unlock(&cpu->lock);
149
150 interrupts_restore(ipl);
151}
152
153
154/** Create new thread
155 *
156 * Create a new thread.
157 *
158 * @param func Thread's implementing function.
159 * @param arg Thread's implementing function argument.
160 * @param task Task to which the thread belongs.
161 * @param flags Thread flags.
162 *
163 * @return New thread's structure on success, NULL on failure.
164 *
165 */
166thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags)
167{
168 thread_t *t;
169 __address frame_ks, frame_us = NULL;
170
171 t = (thread_t *) malloc(sizeof(thread_t));
172 if (t) {
173 ipl_t ipl;
174
175 spinlock_initialize(&t->lock, "thread_t_lock");
176
177 frame_ks = frame_alloc(FRAME_KA, ONE_FRAME);
178 if (THREAD_USER_STACK & flags) {
179 frame_us = frame_alloc(FRAME_KA, ONE_FRAME);
180 }
181
182 ipl = interrupts_disable();
183 spinlock_lock(&tidlock);
184 t->tid = ++last_tid;
185 spinlock_unlock(&tidlock);
186 interrupts_restore(ipl);
187
188 memsetb(frame_ks, THREAD_STACK_SIZE, 0);
189 link_initialize(&t->rq_link);
190 link_initialize(&t->wq_link);
191 link_initialize(&t->th_link);
192 link_initialize(&t->threads_link);
193 t->kstack = (__u8 *) frame_ks;
194 t->ustack = (__u8 *) frame_us;
195
196 context_save(&t->saved_context);
197 context_set(&t->saved_context, FADDR(cushion), (__address) t->kstack, THREAD_STACK_SIZE);
198
199 the_initialize((the_t *) t->kstack);
200
201 ipl = interrupts_disable();
202 t->saved_context.ipl = interrupts_read();
203 interrupts_restore(ipl);
204
205 t->thread_code = func;
206 t->thread_arg = arg;
207 t->ticks = -1;
208 t->priority = -1; /* start in rq[0] */
209 t->cpu = NULL;
210 t->flags = 0;
211 t->state = Entering;
212 t->call_me = NULL;
213 t->call_me_with = NULL;
214
215 timeout_initialize(&t->sleep_timeout);
216 t->sleep_queue = NULL;
217 t->timeout_pending = 0;
218
219 t->rwlock_holder_type = RWLOCK_NONE;
220
221 t->task = task;
222
223 t->fpu_context_exists=0;
224 t->fpu_context_engaged=0;
225
226 /*
227 * Register this thread in the system-wide list.
228 */
229 ipl = interrupts_disable();
230 spinlock_lock(&threads_lock);
231 list_append(&t->threads_link, &threads_head);
232 spinlock_unlock(&threads_lock);
233
234 /*
235 * Attach to the containing task.
236 */
237 spinlock_lock(&task->lock);
238 list_append(&t->th_link, &task->th_head);
239 spinlock_unlock(&task->lock);
240
241 interrupts_restore(ipl);
242 }
243
244 return t;
245}
246
247
248/** Make thread exiting
249 *
250 * End current thread execution and switch it to the exiting
251 * state. All pending timeouts are executed.
252 *
253 */
254void thread_exit(void)
255{
256 ipl_t ipl;
257
258restart:
259 ipl = interrupts_disable();
260 spinlock_lock(&THREAD->lock);
261 if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */
262 spinlock_unlock(&THREAD->lock);
263 interrupts_restore(ipl);
264 goto restart;
265 }
266 THREAD->state = Exiting;
267 spinlock_unlock(&THREAD->lock);
268 scheduler();
269}
270
271
272/** Thread sleep
273 *
274 * Suspend execution of the current thread.
275 *
276 * @param sec Number of seconds to sleep.
277 *
278 */
279void thread_sleep(__u32 sec)
280{
281 thread_usleep(sec*1000000);
282}
283
284
285/** Thread usleep
286 *
287 * Suspend execution of the current thread.
288 *
289 * @param usec Number of microseconds to sleep.
290 *
291 */
292void thread_usleep(__u32 usec)
293{
294 waitq_t wq;
295
296 waitq_initialize(&wq);
297
298 (void) waitq_sleep_timeout(&wq, usec, SYNCH_NON_BLOCKING);
299}
300
301
302/** Register thread out-of-context invocation
303 *
304 * Register a function and its argument to be executed
305 * on next context switch to the current thread.
306 *
307 * @param call_me Out-of-context function.
308 * @param call_me_with Out-of-context function argument.
309 *
310 */
311void thread_register_call_me(void (* call_me)(void *), void *call_me_with)
312{
313 ipl_t ipl;
314
315 ipl = interrupts_disable();
316 spinlock_lock(&THREAD->lock);
317 THREAD->call_me = call_me;
318 THREAD->call_me_with = call_me_with;
319 spinlock_unlock(&THREAD->lock);
320 interrupts_restore(ipl);
321}
Note: See TracBrowser for help on using the repository browser.