source: mainline/kernel/arch/amd64/include/asm.h@ df4ed85

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since df4ed85 was df4ed85, checked in by Jakub Jermar <jakub@…>, 18 years ago

© versus ©

  • Property mode set to 100644
File size: 6.3 KB
Line 
1/*
2 * Copyright (c) 2005 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup amd64
30 * @{
31 */
32/** @file
33 */
34
35#ifndef KERN_amd64_ASM_H_
36#define KERN_amd64_ASM_H_
37
38#include <arch/pm.h>
39#include <arch/types.h>
40#include <config.h>
41
42extern void asm_delay_loop(uint32_t t);
43extern void asm_fake_loop(uint32_t t);
44
45/** Return base address of current stack.
46 *
47 * Return the base address of the current stack.
48 * The stack is assumed to be STACK_SIZE bytes long.
49 * The stack must start on page boundary.
50 */
51static inline uintptr_t get_stack_base(void)
52{
53 uintptr_t v;
54
55 __asm__ volatile ("andq %%rsp, %0\n" : "=r" (v) : "0" (~((uint64_t)STACK_SIZE-1)));
56
57 return v;
58}
59
60static inline void cpu_sleep(void) { __asm__ volatile ("hlt\n"); };
61static inline void cpu_halt(void) { __asm__ volatile ("hlt\n"); };
62
63
64/** Byte from port
65 *
66 * Get byte from port
67 *
68 * @param port Port to read from
69 * @return Value read
70 */
71static inline uint8_t inb(uint16_t port) { uint8_t val; __asm__ volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port) ); return val; }
72
73/** Byte to port
74 *
75 * Output byte to port
76 *
77 * @param port Port to write to
78 * @param val Value to write
79 */
80static inline void outb(uint16_t port, uint8_t val) { __asm__ volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port) ); }
81
82/** Swap Hidden part of GS register with visible one */
83static inline void swapgs(void) { __asm__ volatile("swapgs"); }
84
85/** Enable interrupts.
86 *
87 * Enable interrupts and return previous
88 * value of EFLAGS.
89 *
90 * @return Old interrupt priority level.
91 */
92static inline ipl_t interrupts_enable(void) {
93 ipl_t v;
94 __asm__ volatile (
95 "pushfq\n"
96 "popq %0\n"
97 "sti\n"
98 : "=r" (v)
99 );
100 return v;
101}
102
103/** Disable interrupts.
104 *
105 * Disable interrupts and return previous
106 * value of EFLAGS.
107 *
108 * @return Old interrupt priority level.
109 */
110static inline ipl_t interrupts_disable(void) {
111 ipl_t v;
112 __asm__ volatile (
113 "pushfq\n"
114 "popq %0\n"
115 "cli\n"
116 : "=r" (v)
117 );
118 return v;
119}
120
121/** Restore interrupt priority level.
122 *
123 * Restore EFLAGS.
124 *
125 * @param ipl Saved interrupt priority level.
126 */
127static inline void interrupts_restore(ipl_t ipl) {
128 __asm__ volatile (
129 "pushq %0\n"
130 "popfq\n"
131 : : "r" (ipl)
132 );
133}
134
135/** Return interrupt priority level.
136 *
137 * Return EFLAFS.
138 *
139 * @return Current interrupt priority level.
140 */
141static inline ipl_t interrupts_read(void) {
142 ipl_t v;
143 __asm__ volatile (
144 "pushfq\n"
145 "popq %0\n"
146 : "=r" (v)
147 );
148 return v;
149}
150
151/** Write to MSR */
152static inline void write_msr(uint32_t msr, uint64_t value)
153{
154 __asm__ volatile (
155 "wrmsr;" : : "c" (msr),
156 "a" ((uint32_t)(value)),
157 "d" ((uint32_t)(value >> 32))
158 );
159}
160
161static inline unative_t read_msr(uint32_t msr)
162{
163 uint32_t ax, dx;
164
165 __asm__ volatile (
166 "rdmsr;" : "=a"(ax), "=d"(dx) : "c" (msr)
167 );
168 return ((uint64_t)dx << 32) | ax;
169}
170
171
172/** Enable local APIC
173 *
174 * Enable local APIC in MSR.
175 */
176static inline void enable_l_apic_in_msr()
177{
178 __asm__ volatile (
179 "movl $0x1b, %%ecx\n"
180 "rdmsr\n"
181 "orl $(1<<11),%%eax\n"
182 "orl $(0xfee00000),%%eax\n"
183 "wrmsr\n"
184 :
185 :
186 :"%eax","%ecx","%edx"
187 );
188}
189
190static inline uintptr_t * get_ip()
191{
192 uintptr_t *ip;
193
194 __asm__ volatile (
195 "mov %%rip, %0"
196 : "=r" (ip)
197 );
198 return ip;
199}
200
201/** Invalidate TLB Entry.
202 *
203 * @param addr Address on a page whose TLB entry is to be invalidated.
204 */
205static inline void invlpg(uintptr_t addr)
206{
207 __asm__ volatile ("invlpg %0\n" :: "m" (*((unative_t *)addr)));
208}
209
210/** Load GDTR register from memory.
211 *
212 * @param gdtr_reg Address of memory from where to load GDTR.
213 */
214static inline void gdtr_load(struct ptr_16_64 *gdtr_reg)
215{
216 __asm__ volatile ("lgdtq %0\n" : : "m" (*gdtr_reg));
217}
218
219/** Store GDTR register to memory.
220 *
221 * @param gdtr_reg Address of memory to where to load GDTR.
222 */
223static inline void gdtr_store(struct ptr_16_64 *gdtr_reg)
224{
225 __asm__ volatile ("sgdtq %0\n" : : "m" (*gdtr_reg));
226}
227
228/** Load IDTR register from memory.
229 *
230 * @param idtr_reg Address of memory from where to load IDTR.
231 */
232static inline void idtr_load(struct ptr_16_64 *idtr_reg)
233{
234 __asm__ volatile ("lidtq %0\n" : : "m" (*idtr_reg));
235}
236
237/** Load TR from descriptor table.
238 *
239 * @param sel Selector specifying descriptor of TSS segment.
240 */
241static inline void tr_load(uint16_t sel)
242{
243 __asm__ volatile ("ltr %0" : : "r" (sel));
244}
245
246#define GEN_READ_REG(reg) static inline unative_t read_ ##reg (void) \
247 { \
248 unative_t res; \
249 __asm__ volatile ("movq %%" #reg ", %0" : "=r" (res) ); \
250 return res; \
251 }
252
253#define GEN_WRITE_REG(reg) static inline void write_ ##reg (unative_t regn) \
254 { \
255 __asm__ volatile ("movq %0, %%" #reg : : "r" (regn)); \
256 }
257
258GEN_READ_REG(cr0);
259GEN_READ_REG(cr2);
260GEN_READ_REG(cr3);
261GEN_WRITE_REG(cr3);
262
263GEN_READ_REG(dr0);
264GEN_READ_REG(dr1);
265GEN_READ_REG(dr2);
266GEN_READ_REG(dr3);
267GEN_READ_REG(dr6);
268GEN_READ_REG(dr7);
269
270GEN_WRITE_REG(dr0);
271GEN_WRITE_REG(dr1);
272GEN_WRITE_REG(dr2);
273GEN_WRITE_REG(dr3);
274GEN_WRITE_REG(dr6);
275GEN_WRITE_REG(dr7);
276
277extern size_t interrupt_handler_size;
278extern void interrupt_handlers(void);
279
280#endif
281
282/** @}
283 */
Note: See TracBrowser for help on using the repository browser.