source: mainline/arch/amd64/include/asm.h@ 7dd2561

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 7dd2561 was 7910cff, checked in by Jakub Jermar <jakub@…>, 20 years ago

Finer grained TLB invalidate functions for ia32 and amd64. Not yet deployed.

  • Property mode set to 100644
File size: 4.8 KB
Line 
1/*
2 * Copyright (C) 2005 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#ifndef __amd64_ASM_H__
30#define __amd64_ASM_H__
31
32#include <arch/types.h>
33#include <config.h>
34
35extern void asm_delay_loop(__u32 t);
36extern void asm_fake_loop(__u32 t);
37
38/** Return base address of current stack.
39 *
40 * Return the base address of the current stack.
41 * The stack is assumed to be STACK_SIZE bytes long.
42 * The stack must start on page boundary.
43 */
44static inline __address get_stack_base(void)
45{
46 __address v;
47
48 __asm__ volatile ("andq %%rsp, %0\n" : "=r" (v) : "0" (~((__u64)STACK_SIZE-1)));
49
50 return v;
51}
52
53static inline void cpu_sleep(void) { __asm__ volatile ("hlt\n"); };
54static inline void cpu_halt(void) { __asm__ volatile ("hlt\n"); };
55
56
57static inline __u8 inb(__u16 port)
58{
59 __u8 out;
60
61 __asm__ volatile (
62 "mov %1, %%dx\n"
63 "inb %%dx,%%al\n"
64 "mov %%al, %0\n"
65 :"=m"(out)
66 :"m"(port)
67 :"%rdx","%rax"
68 );
69 return out;
70}
71
72static inline __u8 outb(__u16 port,__u8 b)
73{
74 __asm__ volatile (
75 "mov %0,%%dx\n"
76 "mov %1,%%al\n"
77 "outb %%al,%%dx\n"
78 :
79 :"m"( port), "m" (b)
80 :"%rdx","%rax"
81 );
82}
83
84/** Enable interrupts.
85 *
86 * Enable interrupts and return previous
87 * value of EFLAGS.
88 *
89 * @return Old interrupt priority level.
90 */
91static inline ipl_t interrupts_enable(void) {
92 ipl_t v;
93 __asm__ volatile (
94 "pushfq\n"
95 "popq %0\n"
96 "sti\n"
97 : "=r" (v)
98 );
99 return v;
100}
101
102/** Disable interrupts.
103 *
104 * Disable interrupts and return previous
105 * value of EFLAGS.
106 *
107 * @return Old interrupt priority level.
108 */
109static inline ipl_t interrupts_disable(void) {
110 ipl_t v;
111 __asm__ volatile (
112 "pushfq\n"
113 "popq %0\n"
114 "cli\n"
115 : "=r" (v)
116 );
117 return v;
118}
119
120/** Restore interrupt priority level.
121 *
122 * Restore EFLAGS.
123 *
124 * @param ipl Saved interrupt priority level.
125 */
126static inline void interrupts_restore(ipl_t ipl) {
127 __asm__ volatile (
128 "pushq %0\n"
129 "popfq\n"
130 : : "r" (ipl)
131 );
132}
133
134/** Return interrupt priority level.
135 *
136 * Return EFLAFS.
137 *
138 * @return Current interrupt priority level.
139 */
140static inline ipl_t interrupts_read(void) {
141 ipl_t v;
142 __asm__ volatile (
143 "pushfq\n"
144 "popq %0\n"
145 : "=r" (v)
146 );
147 return v;
148}
149
150/** Read CR0
151 *
152 * Return value in CR0
153 *
154 * @return Value read.
155 */
156static inline __u64 read_cr0(void)
157{
158 __u64 v;
159 __asm__ volatile ("movq %%cr0,%0\n" : "=r" (v));
160 return v;
161}
162
163/** Read CR2
164 *
165 * Return value in CR2
166 *
167 * @return Value read.
168 */
169static inline __u64 read_cr2(void)
170{
171 __u64 v;
172 __asm__ volatile ("movq %%cr2,%0\n" : "=r" (v));
173 return v;
174}
175
176/** Write CR3
177 *
178 * Write value to CR3.
179 *
180 * @param v Value to be written.
181 */
182static inline void write_cr3(__u64 v)
183{
184 __asm__ volatile ("movq %0,%%cr3\n" : : "r" (v));
185}
186
187/** Read CR3
188 *
189 * Return value in CR3
190 *
191 * @return Value read.
192 */
193static inline __u64 read_cr3(void)
194{
195 __u64 v;
196 __asm__ volatile ("movq %%cr3,%0" : "=r" (v));
197 return v;
198}
199
200
201/** Enable local APIC
202 *
203 * Enable local APIC in MSR.
204 */
205static inline void enable_l_apic_in_msr()
206{
207 __asm__ volatile (
208 "movl $0x1b, %%ecx\n"
209 "rdmsr\n"
210 "orl $(1<<11),%%eax\n"
211 "orl $(0xfee00000),%%eax\n"
212 "wrmsr\n"
213 :
214 :
215 :"%eax","%ecx","%edx"
216 );
217}
218
219static inline __address * get_ip()
220{
221 __address *ip;
222
223 __asm__ volatile (
224 "mov %%rip, %0"
225 : "=r" (ip)
226 );
227 return ip;
228}
229
230/** Invalidate TLB Entry.
231 *
232 * @param addr Address on a page whose TLB entry is to be invalidated.
233 */
234static inline void invlpg(__address addr)
235{
236 __asm__ volatile ("invlpg %0\n" :: "m" (addr));
237}
238
239extern size_t interrupt_handler_size;
240extern void interrupt_handlers(void);
241
242#endif
Note: See TracBrowser for help on using the repository browser.