source: mainline/kernel/arch/amd64/include/asm.h@ 6aea2e00

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 6aea2e00 was 6aea2e00, checked in by Martin Decky <martin@…>, 18 years ago

use the hlt instruction more properly

  • Property mode set to 100644
File size: 6.2 KB
Line 
1/*
2 * Copyright (c) 2005 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup amd64
30 * @{
31 */
32/** @file
33 */
34
35#ifndef KERN_amd64_ASM_H_
36#define KERN_amd64_ASM_H_
37
38#include <config.h>
39
40extern void asm_delay_loop(uint32_t t);
41extern void asm_fake_loop(uint32_t t);
42
43/** Return base address of current stack.
44 *
45 * Return the base address of the current stack.
46 * The stack is assumed to be STACK_SIZE bytes long.
47 * The stack must start on page boundary.
48 */
49static inline uintptr_t get_stack_base(void)
50{
51 uintptr_t v;
52
53 asm volatile ("andq %%rsp, %0\n" : "=r" (v) : "0" (~((uint64_t)STACK_SIZE-1)));
54
55 return v;
56}
57
58static inline void cpu_sleep(void)
59{
60 asm volatile ("hlt\n");
61};
62
63static inline void cpu_halt(void)
64{
65 asm volatile ("hlt\n");
66};
67
68
69/** Byte from port
70 *
71 * Get byte from port
72 *
73 * @param port Port to read from
74 * @return Value read
75 */
76static inline uint8_t inb(uint16_t port) { uint8_t val; __asm__ volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port) ); return val; }
77
78/** Byte to port
79 *
80 * Output byte to port
81 *
82 * @param port Port to write to
83 * @param val Value to write
84 */
85static inline void outb(uint16_t port, uint8_t val) { __asm__ volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port) ); }
86
87/** Swap Hidden part of GS register with visible one */
88static inline void swapgs(void) { __asm__ volatile("swapgs"); }
89
90/** Enable interrupts.
91 *
92 * Enable interrupts and return previous
93 * value of EFLAGS.
94 *
95 * @return Old interrupt priority level.
96 */
97static inline ipl_t interrupts_enable(void) {
98 ipl_t v;
99 __asm__ volatile (
100 "pushfq\n"
101 "popq %0\n"
102 "sti\n"
103 : "=r" (v)
104 );
105 return v;
106}
107
108/** Disable interrupts.
109 *
110 * Disable interrupts and return previous
111 * value of EFLAGS.
112 *
113 * @return Old interrupt priority level.
114 */
115static inline ipl_t interrupts_disable(void) {
116 ipl_t v;
117 __asm__ volatile (
118 "pushfq\n"
119 "popq %0\n"
120 "cli\n"
121 : "=r" (v)
122 );
123 return v;
124}
125
126/** Restore interrupt priority level.
127 *
128 * Restore EFLAGS.
129 *
130 * @param ipl Saved interrupt priority level.
131 */
132static inline void interrupts_restore(ipl_t ipl) {
133 __asm__ volatile (
134 "pushq %0\n"
135 "popfq\n"
136 : : "r" (ipl)
137 );
138}
139
140/** Return interrupt priority level.
141 *
142 * Return EFLAFS.
143 *
144 * @return Current interrupt priority level.
145 */
146static inline ipl_t interrupts_read(void) {
147 ipl_t v;
148 __asm__ volatile (
149 "pushfq\n"
150 "popq %0\n"
151 : "=r" (v)
152 );
153 return v;
154}
155
156/** Write to MSR */
157static inline void write_msr(uint32_t msr, uint64_t value)
158{
159 __asm__ volatile (
160 "wrmsr;" : : "c" (msr),
161 "a" ((uint32_t)(value)),
162 "d" ((uint32_t)(value >> 32))
163 );
164}
165
166static inline unative_t read_msr(uint32_t msr)
167{
168 uint32_t ax, dx;
169
170 __asm__ volatile (
171 "rdmsr;" : "=a"(ax), "=d"(dx) : "c" (msr)
172 );
173 return ((uint64_t)dx << 32) | ax;
174}
175
176
177/** Enable local APIC
178 *
179 * Enable local APIC in MSR.
180 */
181static inline void enable_l_apic_in_msr()
182{
183 __asm__ volatile (
184 "movl $0x1b, %%ecx\n"
185 "rdmsr\n"
186 "orl $(1<<11),%%eax\n"
187 "orl $(0xfee00000),%%eax\n"
188 "wrmsr\n"
189 :
190 :
191 :"%eax","%ecx","%edx"
192 );
193}
194
195static inline uintptr_t * get_ip()
196{
197 uintptr_t *ip;
198
199 __asm__ volatile (
200 "mov %%rip, %0"
201 : "=r" (ip)
202 );
203 return ip;
204}
205
206/** Invalidate TLB Entry.
207 *
208 * @param addr Address on a page whose TLB entry is to be invalidated.
209 */
210static inline void invlpg(uintptr_t addr)
211{
212 __asm__ volatile ("invlpg %0\n" :: "m" (*((unative_t *)addr)));
213}
214
215/** Load GDTR register from memory.
216 *
217 * @param gdtr_reg Address of memory from where to load GDTR.
218 */
219static inline void gdtr_load(struct ptr_16_64 *gdtr_reg)
220{
221 __asm__ volatile ("lgdtq %0\n" : : "m" (*gdtr_reg));
222}
223
224/** Store GDTR register to memory.
225 *
226 * @param gdtr_reg Address of memory to where to load GDTR.
227 */
228static inline void gdtr_store(struct ptr_16_64 *gdtr_reg)
229{
230 __asm__ volatile ("sgdtq %0\n" : : "m" (*gdtr_reg));
231}
232
233/** Load IDTR register from memory.
234 *
235 * @param idtr_reg Address of memory from where to load IDTR.
236 */
237static inline void idtr_load(struct ptr_16_64 *idtr_reg)
238{
239 __asm__ volatile ("lidtq %0\n" : : "m" (*idtr_reg));
240}
241
242/** Load TR from descriptor table.
243 *
244 * @param sel Selector specifying descriptor of TSS segment.
245 */
246static inline void tr_load(uint16_t sel)
247{
248 __asm__ volatile ("ltr %0" : : "r" (sel));
249}
250
251#define GEN_READ_REG(reg) static inline unative_t read_ ##reg (void) \
252 { \
253 unative_t res; \
254 __asm__ volatile ("movq %%" #reg ", %0" : "=r" (res) ); \
255 return res; \
256 }
257
258#define GEN_WRITE_REG(reg) static inline void write_ ##reg (unative_t regn) \
259 { \
260 __asm__ volatile ("movq %0, %%" #reg : : "r" (regn)); \
261 }
262
263GEN_READ_REG(cr0);
264GEN_READ_REG(cr2);
265GEN_READ_REG(cr3);
266GEN_WRITE_REG(cr3);
267
268GEN_READ_REG(dr0);
269GEN_READ_REG(dr1);
270GEN_READ_REG(dr2);
271GEN_READ_REG(dr3);
272GEN_READ_REG(dr6);
273GEN_READ_REG(dr7);
274
275GEN_WRITE_REG(dr0);
276GEN_WRITE_REG(dr1);
277GEN_WRITE_REG(dr2);
278GEN_WRITE_REG(dr3);
279GEN_WRITE_REG(dr6);
280GEN_WRITE_REG(dr7);
281
282extern size_t interrupt_handler_size;
283extern void interrupt_handlers(void);
284
285#endif
286
287/** @}
288 */
Note: See TracBrowser for help on using the repository browser.