source: mainline/generic/include/proc/thread.h@ 2ba7810

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 2ba7810 was f76fed4, checked in by Ondrej Palkovsky <ondrap@…>, 19 years ago

Added lazy fpu context allocation.

  • threads that don't use fpu, don't get allocated fpu context
  • fpu context alignment on AMD64 nicely disappeared
  • Property mode set to 100644
File size: 5.0 KB
Line 
1/*
2 * Copyright (C) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#ifndef __THREAD_H__
30#define __THREAD_H__
31
32#include <arch/thread.h>
33#include <synch/spinlock.h>
34#include <arch/context.h>
35#include <fpu_context.h>
36#include <arch/types.h>
37#include <typedefs.h>
38#include <time/timeout.h>
39#include <synch/rwlock.h>
40#include <config.h>
41#include <adt/list.h>
42#include <mm/slab.h>
43
44#define THREAD_STACK_SIZE STACK_SIZE
45
46#define THREAD_USER_STACK 1
47
48enum state {
49 Invalid, /**< It is an error, if thread is found in this state. */
50 Running, /**< State of a thread that is currently executing on some CPU. */
51 Sleeping, /**< Thread in this state is waiting for an event. */
52 Ready, /**< State of threads in a run queue. */
53 Entering, /**< Threads are in this state before they are first readied. */
54 Exiting /**< After a thread calls thread_exit(), it is put into Exiting state. */
55};
56
57extern char *thread_states[];
58
59#define X_WIRED (1<<0)
60#define X_STOLEN (1<<1)
61
62struct thread {
63 link_t rq_link; /**< Run queue link. */
64 link_t wq_link; /**< Wait queue link. */
65 link_t th_link; /**< Links to threads within containing task. */
66 link_t threads_link; /**< Link to the list of all threads. */
67
68 /** Lock protecting thread structure.
69 *
70 * Protects the whole thread structure except list links above.
71 * Must be acquired before T.lock for each T of type task_t.
72 *
73 */
74 SPINLOCK_DECLARE(lock);
75
76 void (* thread_code)(void *); /**< Function implementing the thread. */
77 void *thread_arg; /**< Argument passed to thread_code() function. */
78
79 context_t saved_context; /**< From here, the stored context is restored when the thread is scheduled. */
80 context_t sleep_timeout_context; /**< From here, the stored failover context is restored when sleep times out. */
81
82 waitq_t *sleep_queue; /**< Wait queue in which this thread sleeps. */
83 timeout_t sleep_timeout; /**< Timeout used for timeoutable sleeping. */
84 volatile int timeout_pending; /**< Flag signalling sleep timeout in progress. */
85
86 fpu_context_t *saved_fpu_context;
87 int fpu_context_exists;
88
89 /*
90 * Defined only if thread doesn't run.
91 * It means that fpu context is in CPU that last time executes this thread.
92 * This disables migration
93 */
94 int fpu_context_engaged;
95
96 rwlock_type_t rwlock_holder_type;
97
98 void (* call_me)(void *); /**< Funtion to be called in scheduler before the thread is put asleep. */
99 void *call_me_with; /**< Argument passed to call_me(). */
100
101 state_t state; /**< Thread's state. */
102 int flags; /**< Thread's flags. */
103
104 cpu_t *cpu; /**< Thread's CPU. */
105 task_t *task; /**< Containing task. */
106
107 __u64 ticks; /**< Ticks before preemption. */
108
109 int priority; /**< Thread's priority. Implemented as index to CPU->rq */
110 __u32 tid; /**< Thread ID. */
111
112 ARCH_THREAD_DATA; /**< Architecture-specific data. */
113
114 __u8 *kstack; /**< Thread's kernel stack. */
115};
116
117/** Thread list lock.
118 *
119 * This lock protects all link_t structures chained in threads_head.
120 * Must be acquired before T.lock for each T of type thread_t.
121 *
122 */
123extern spinlock_t threads_lock;
124
125extern link_t threads_head; /**< List of all threads in the system. */
126
127extern void thread_init(void);
128extern thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags);
129extern void thread_ready(thread_t *t);
130extern void thread_exit(void);
131
132extern void thread_sleep(__u32 sec);
133extern void thread_usleep(__u32 usec);
134
135extern void thread_register_call_me(void (* call_me)(void *), void *call_me_with);
136extern void thread_print_list(void);
137extern void thread_destroy(thread_t *t);
138
139
140/* Fpu context slab cache */
141extern slab_cache_t *fpu_context_slab;
142
143#endif
Note: See TracBrowser for help on using the repository browser.