source: mainline/src/proc/thread.c@ 8262010

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 8262010 was 8262010, checked in by Jakub Jermar <jakub@…>, 20 years ago

Switch from mm-based 'the' mechanism to macro-based 'cpu_private_data[CPU_ID_ARCH]' mechanism.
Added l_apic_id() and some other minor APIC changes.
Move gdtr to K_DATA_START section.
Move K_DATA_START section immediately behind K_TEXT_START section so that real-mode addresses work even with growing size of kernel code.

  • Property mode set to 100644
File size: 6.0 KB
RevLine 
[f761f1eb]1/*
2 * Copyright (C) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <proc/scheduler.h>
30#include <proc/thread.h>
31#include <proc/task.h>
32#include <mm/heap.h>
33#include <mm/frame.h>
34#include <mm/page.h>
35#include <arch/asm.h>
36#include <arch.h>
37#include <synch/synch.h>
38#include <synch/spinlock.h>
39#include <synch/waitq.h>
40#include <synch/rwlock.h>
41#include <cpu.h>
42#include <func.h>
43#include <context.h>
44#include <list.h>
45#include <typedefs.h>
46#include <time/clock.h>
47#include <list.h>
[4ffa9e0]48#include <config.h>
49#include <arch/interrupt.h>
[26a8604f]50#include <smp/ipi.h>
[f761f1eb]51
52char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"};
53
54spinlock_t threads_lock;
55link_t threads_head;
56
57static spinlock_t tidlock;
58__u32 last_tid = 0;
59
60/*
61 * cushion() is provided to ensure that every thread
62 * makes a call to thread_exit() when its implementing
63 * function returns.
64 *
65 * cpu_priority_high()'d
66 */
67void cushion(void)
68{
[43114c5]69 void (*f)(void *) = THREAD->thread_code;
70 void *arg = THREAD->thread_arg;
[f761f1eb]71
72 /* this is where each thread wakes up after its creation */
[43114c5]73 spinlock_unlock(&THREAD->lock);
[f761f1eb]74 cpu_priority_low();
75
76 f(arg);
77 thread_exit();
78 /* not reached */
79}
80
81void thread_init(void)
82{
[43114c5]83 THREAD = NULL;
[f761f1eb]84 nrdy = 0;
85 spinlock_initialize(&threads_lock);
86 list_initialize(&threads_head);
87}
88
89void thread_ready(thread_t *t)
90{
91 cpu_t *cpu;
92 runq_t *r;
93 pri_t pri;
[4ffa9e0]94 int i, avg, send_ipi = 0;
[f761f1eb]95
96 pri = cpu_priority_high();
97
98 spinlock_lock(&t->lock);
99
100 i = (t->pri < RQ_COUNT -1) ? ++t->pri : t->pri;
101
[8262010]102 cpu = CPU;
[f761f1eb]103 if (t->flags & X_WIRED) {
104 cpu = t->cpu;
105 }
106 spinlock_unlock(&t->lock);
107
108 /*
109 * Append t to respective ready queue on respective processor.
110 */
111 r = &cpu->rq[i];
112 spinlock_lock(&r->lock);
113 list_append(&t->rq_link, &r->rq_head);
114 r->n++;
115 spinlock_unlock(&r->lock);
116
117 spinlock_lock(&nrdylock);
[4ffa9e0]118 avg = ++nrdy / config.cpu_active;
[f761f1eb]119 spinlock_unlock(&nrdylock);
120
121 spinlock_lock(&cpu->lock);
[26a8604f]122 if ((++cpu->nrdy) > avg) {
[4ffa9e0]123 /*
124 * If there are idle halted CPU's, this will wake them up.
125 */
[3418c41]126 ipi_broadcast(VECTOR_WAKEUP_IPI);
[4ffa9e0]127 }
[f761f1eb]128 spinlock_unlock(&cpu->lock);
129
130 cpu_priority_restore(pri);
131}
132
133thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags)
134{
135 thread_t *t;
136 __address frame_ks, frame_us = NULL;
137
138 t = (thread_t *) malloc(sizeof(thread_t));
139 if (t) {
140 pri_t pri;
141
142 spinlock_initialize(&t->lock);
143
144 frame_ks = frame_alloc(FRAME_KA);
145 if (THREAD_USER_STACK & flags) {
146 frame_us = frame_alloc(0);
147 }
148
149 pri = cpu_priority_high();
150 spinlock_lock(&tidlock);
151 t->tid = ++last_tid;
152 spinlock_unlock(&tidlock);
153 cpu_priority_restore(pri);
154
155 memsetb(frame_ks, THREAD_STACK_SIZE, 0);
156 link_initialize(&t->rq_link);
157 link_initialize(&t->wq_link);
158 link_initialize(&t->th_link);
159 link_initialize(&t->threads_link);
160 t->kstack = (__u8 *) frame_ks;
161 t->ustack = (__u8 *) frame_us;
162
163
164 context_save(&t->saved_context);
165 t->saved_context.pc = (__address) cushion;
166 t->saved_context.sp = (__address) &t->kstack[THREAD_STACK_SIZE-8];
167
168 pri = cpu_priority_high();
169 t->saved_context.pri = cpu_priority_read();
170 cpu_priority_restore(pri);
171
172 t->thread_code = func;
173 t->thread_arg = arg;
174 t->ticks = -1;
175 t->pri = -1; /* start in rq[0] */
176 t->cpu = NULL;
177 t->flags = 0;
178 t->state = Entering;
179 t->call_me = NULL;
180 t->call_me_with = NULL;
181
182 timeout_initialize(&t->sleep_timeout);
183 t->sleep_queue = NULL;
184 t->timeout_pending = 0;
185
186 t->rwlock_holder_type = RWLOCK_NONE;
187
188 t->task = task;
189
190 /*
191 * Register this thread in the system-wide list.
192 */
193 pri = cpu_priority_high();
194 spinlock_lock(&threads_lock);
195 list_append(&t->threads_link, &threads_head);
196 spinlock_unlock(&threads_lock);
197
198 /*
199 * Attach to the containing task.
200 */
201 spinlock_lock(&task->lock);
202 list_append(&t->th_link, &task->th_head);
203 spinlock_unlock(&task->lock);
204
205 cpu_priority_restore(pri);
206 }
207
208 return t;
209}
210
211void thread_exit(void)
212{
213 pri_t pri;
214
215restart:
216 pri = cpu_priority_high();
[43114c5]217 spinlock_lock(&THREAD->lock);
218 if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */
219 spinlock_unlock(&THREAD->lock);
[f761f1eb]220 cpu_priority_restore(pri);
221 goto restart;
222 }
[43114c5]223 THREAD->state = Exiting;
224 spinlock_unlock(&THREAD->lock);
[f761f1eb]225 scheduler();
226}
227
228void thread_sleep(__u32 sec)
229{
230 thread_usleep(sec*1000000);
231}
232
233/*
234 * Suspend execution of current thread for usec microseconds.
235 */
236void thread_usleep(__u32 usec)
237{
238 waitq_t wq;
239
240 waitq_initialize(&wq);
241
242 (void) waitq_sleep_timeout(&wq, usec, SYNCH_NON_BLOCKING);
243}
244
245void thread_register_call_me(void (* call_me)(void *), void *call_me_with)
246{
247 pri_t pri;
248
249 pri = cpu_priority_high();
[43114c5]250 spinlock_lock(&THREAD->lock);
251 THREAD->call_me = call_me;
252 THREAD->call_me_with = call_me_with;
253 spinlock_unlock(&THREAD->lock);
[f761f1eb]254 cpu_priority_restore(pri);
255}
Note: See TracBrowser for help on using the repository browser.