1 | /*
|
---|
2 | * Copyright (c) 2006 Martin Decky
|
---|
3 | * All rights reserved.
|
---|
4 | *
|
---|
5 | * Redistribution and use in source and binary forms, with or without
|
---|
6 | * modification, are permitted provided that the following conditions
|
---|
7 | * are met:
|
---|
8 | *
|
---|
9 | * - Redistributions of source code must retain the above copyright
|
---|
10 | * notice, this list of conditions and the following disclaimer.
|
---|
11 | * - Redistributions in binary form must reproduce the above copyright
|
---|
12 | * notice, this list of conditions and the following disclaimer in the
|
---|
13 | * documentation and/or other materials provided with the distribution.
|
---|
14 | * - The name of the author may not be used to endorse or promote products
|
---|
15 | * derived from this software without specific prior written permission.
|
---|
16 | *
|
---|
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
---|
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
---|
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
---|
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
---|
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
---|
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
---|
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
---|
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
---|
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
27 | */
|
---|
28 |
|
---|
29 | /** @addtogroup ia32xen_mm
|
---|
30 | * @{
|
---|
31 | */
|
---|
32 | /** @file
|
---|
33 | */
|
---|
34 |
|
---|
35 | #ifndef KERN_ia32xen_PAGE_H_
|
---|
36 | #define KERN_ia32xen_PAGE_H_
|
---|
37 |
|
---|
38 | #include <arch/mm/frame.h>
|
---|
39 |
|
---|
40 | #define PAGE_WIDTH FRAME_WIDTH
|
---|
41 | #define PAGE_SIZE FRAME_SIZE
|
---|
42 |
|
---|
43 | #define PAGE_COLOR_BITS 0 /* dummy */
|
---|
44 |
|
---|
45 | #ifdef KERNEL
|
---|
46 |
|
---|
47 | #ifndef __ASM__
|
---|
48 | # define KA2PA(x) (((uintptr_t) (x)) - 0x80000000)
|
---|
49 | # define PA2KA(x) (((uintptr_t) (x)) + 0x80000000)
|
---|
50 | #else
|
---|
51 | # define KA2PA(x) ((x) - 0x80000000)
|
---|
52 | # define PA2KA(x) ((x) + 0x80000000)
|
---|
53 | #endif
|
---|
54 |
|
---|
55 | /*
|
---|
56 | * Implementation of generic 4-level page table interface.
|
---|
57 | * IA-32 has 2-level page tables, so PTL1 and PTL2 are left out.
|
---|
58 | */
|
---|
59 |
|
---|
60 | /* Number of entries in each level. */
|
---|
61 | #define PTL0_ENTRIES_ARCH 1024
|
---|
62 | #define PTL1_ENTRIES_ARCH 0
|
---|
63 | #define PTL2_ENTRIES_ARCH 0
|
---|
64 | #define PTL3_ENTRIES_ARCH 1024
|
---|
65 |
|
---|
66 | /* Page table size for each level. */
|
---|
67 | #define PTL0_SIZE_ARCH ONE_FRAME
|
---|
68 | #define PTL1_SIZE_ARCH 0
|
---|
69 | #define PTL2_SIZE_ARCH 0
|
---|
70 | #define PTL3_SIZE_ARCH ONE_FRAME
|
---|
71 |
|
---|
72 | /* Macros calculating indices into page tables in each level. */
|
---|
73 | #define PTL0_INDEX_ARCH(vaddr) (((vaddr) >> 22) & 0x3ff)
|
---|
74 | #define PTL1_INDEX_ARCH(vaddr) 0
|
---|
75 | #define PTL2_INDEX_ARCH(vaddr) 0
|
---|
76 | #define PTL3_INDEX_ARCH(vaddr) (((vaddr) >> 12) & 0x3ff)
|
---|
77 |
|
---|
78 | /* Get PTE address accessors for each level. */
|
---|
79 | #define GET_PTL1_ADDRESS_ARCH(ptl0, i) \
|
---|
80 | ((pte_t *) MA2PA((((pte_t *) (ptl0))[(i)].frame_address) << 12))
|
---|
81 | #define GET_PTL2_ADDRESS_ARCH(ptl1, i) \
|
---|
82 | (ptl1)
|
---|
83 | #define GET_PTL3_ADDRESS_ARCH(ptl2, i) \
|
---|
84 | (ptl2)
|
---|
85 | #define GET_FRAME_ADDRESS_ARCH(ptl3, i) \
|
---|
86 | ((uintptr_t) MA2PA((((pte_t *) (ptl3))[(i)].frame_address) << 12))
|
---|
87 |
|
---|
88 | /* Set PTE address accessors for each level. */
|
---|
89 | #define SET_PTL0_ADDRESS_ARCH(ptl0) \
|
---|
90 | { \
|
---|
91 | mmuext_op_t mmu_ext; \
|
---|
92 | \
|
---|
93 | mmu_ext.cmd = MMUEXT_NEW_BASEPTR; \
|
---|
94 | mmu_ext.mfn = ADDR2PFN(PA2MA(ptl0)); \
|
---|
95 | ASSERT(xen_mmuext_op(&mmu_ext, 1, NULL, DOMID_SELF) == 0); \
|
---|
96 | }
|
---|
97 |
|
---|
98 | #define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) \
|
---|
99 | { \
|
---|
100 | mmuext_op_t mmu_ext; \
|
---|
101 | \
|
---|
102 | mmu_ext.cmd = MMUEXT_PIN_L1_TABLE; \
|
---|
103 | mmu_ext.mfn = ADDR2PFN(PA2MA(a)); \
|
---|
104 | ASSERT(xen_mmuext_op(&mmu_ext, 1, NULL, DOMID_SELF) == 0); \
|
---|
105 | \
|
---|
106 | mmu_update_t update; \
|
---|
107 | \
|
---|
108 | update.ptr = PA2MA(KA2PA(&((pte_t *) (ptl0))[(i)])); \
|
---|
109 | update.val = PA2MA(a); \
|
---|
110 | ASSERT(xen_mmu_update(&update, 1, NULL, DOMID_SELF) == 0); \
|
---|
111 | }
|
---|
112 |
|
---|
113 | #define SET_PTL2_ADDRESS_ARCH(ptl1, i, a)
|
---|
114 | #define SET_PTL3_ADDRESS_ARCH(ptl2, i, a)
|
---|
115 | #define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) \
|
---|
116 | { \
|
---|
117 | mmu_update_t update; \
|
---|
118 | \
|
---|
119 | update.ptr = PA2MA(KA2PA(&((pte_t *) (ptl3))[(i)])); \
|
---|
120 | update.val = PA2MA(a); \
|
---|
121 | ASSERT(xen_mmu_update(&update, 1, NULL, DOMID_SELF) == 0); \
|
---|
122 | }
|
---|
123 |
|
---|
124 | /* Get PTE flags accessors for each level. */
|
---|
125 | #define GET_PTL1_FLAGS_ARCH(ptl0, i) \
|
---|
126 | get_pt_flags((pte_t *) (ptl0), (index_t) (i))
|
---|
127 | #define GET_PTL2_FLAGS_ARCH(ptl1, i) \
|
---|
128 | PAGE_PRESENT
|
---|
129 | #define GET_PTL3_FLAGS_ARCH(ptl2, i) \
|
---|
130 | PAGE_PRESENT
|
---|
131 | #define GET_FRAME_FLAGS_ARCH(ptl3, i) \
|
---|
132 | get_pt_flags((pte_t *) (ptl3), (index_t) (i))
|
---|
133 |
|
---|
134 | /* Set PTE flags accessors for each level. */
|
---|
135 | #define SET_PTL1_FLAGS_ARCH(ptl0, i, x) \
|
---|
136 | set_pt_flags((pte_t *) (ptl0), (index_t) (i), (x))
|
---|
137 | #define SET_PTL2_FLAGS_ARCH(ptl1, i, x)
|
---|
138 | #define SET_PTL3_FLAGS_ARCH(ptl2, i, x)
|
---|
139 | #define SET_FRAME_FLAGS_ARCH(ptl3, i, x) \
|
---|
140 | set_pt_flags((pte_t *) (ptl3), (index_t) (i), (x))
|
---|
141 |
|
---|
142 | /* Query macros for the last level. */
|
---|
143 | #define PTE_VALID_ARCH(p) \
|
---|
144 | (*((uint32_t *) (p)) != 0)
|
---|
145 | #define PTE_PRESENT_ARCH(p) \
|
---|
146 | ((p)->present != 0)
|
---|
147 | #define PTE_GET_FRAME_ARCH(p) \
|
---|
148 | ((p)->frame_address << FRAME_WIDTH)
|
---|
149 | #define PTE_WRITABLE_ARCH(p) \
|
---|
150 | ((p)->writeable != 0)
|
---|
151 | #define PTE_EXECUTABLE_ARCH(p) \
|
---|
152 | 1
|
---|
153 |
|
---|
154 | #ifndef __ASM__
|
---|
155 |
|
---|
156 | #include <mm/mm.h>
|
---|
157 | #include <arch/hypercall.h>
|
---|
158 | #include <arch/interrupt.h>
|
---|
159 |
|
---|
160 | /* Page fault error codes. */
|
---|
161 |
|
---|
162 | /** When bit on this position is 0, the page fault was caused by a not-present
|
---|
163 | * page.
|
---|
164 | */
|
---|
165 | #define PFERR_CODE_P (1 << 0)
|
---|
166 |
|
---|
167 | /** When bit on this position is 1, the page fault was caused by a write. */
|
---|
168 | #define PFERR_CODE_RW (1 << 1)
|
---|
169 |
|
---|
170 | /** When bit on this position is 1, the page fault was caused in user mode. */
|
---|
171 | #define PFERR_CODE_US (1 << 2)
|
---|
172 |
|
---|
173 | /** When bit on this position is 1, a reserved bit was set in page directory. */
|
---|
174 | #define PFERR_CODE_RSVD (1 << 3)
|
---|
175 |
|
---|
176 | typedef struct {
|
---|
177 | uint64_t ptr; /**< Machine address of PTE */
|
---|
178 | union { /**< New contents of PTE */
|
---|
179 | uint64_t val;
|
---|
180 | pte_t pte;
|
---|
181 | };
|
---|
182 | } mmu_update_t;
|
---|
183 |
|
---|
184 | typedef struct {
|
---|
185 | unsigned int cmd;
|
---|
186 | union {
|
---|
187 | unsigned long mfn;
|
---|
188 | unsigned long linear_addr;
|
---|
189 | };
|
---|
190 | union {
|
---|
191 | unsigned int nr_ents;
|
---|
192 | void *vcpumask;
|
---|
193 | };
|
---|
194 | } mmuext_op_t;
|
---|
195 |
|
---|
196 | static inline int xen_update_va_mapping(const void *va, const pte_t pte,
|
---|
197 | const unsigned int flags)
|
---|
198 | {
|
---|
199 | return hypercall4(XEN_UPDATE_VA_MAPPING, va, pte, 0, flags);
|
---|
200 | }
|
---|
201 |
|
---|
202 | static inline int xen_mmu_update(const mmu_update_t *req,
|
---|
203 | const unsigned int count, unsigned int *success_count, domid_t domid)
|
---|
204 | {
|
---|
205 | return hypercall4(XEN_MMU_UPDATE, req, count, success_count, domid);
|
---|
206 | }
|
---|
207 |
|
---|
208 | static inline int xen_mmuext_op(const mmuext_op_t *op, const unsigned int count,
|
---|
209 | unsigned int *success_count, domid_t domid)
|
---|
210 | {
|
---|
211 | return hypercall4(XEN_MMUEXT_OP, op, count, success_count, domid);
|
---|
212 | }
|
---|
213 |
|
---|
214 | static inline int get_pt_flags(pte_t *pt, index_t i)
|
---|
215 | {
|
---|
216 | pte_t *p = &pt[i];
|
---|
217 |
|
---|
218 | return ((!p->page_cache_disable) << PAGE_CACHEABLE_SHIFT |
|
---|
219 | (!p->present) << PAGE_PRESENT_SHIFT |
|
---|
220 | p->uaccessible << PAGE_USER_SHIFT |
|
---|
221 | 1 << PAGE_READ_SHIFT |
|
---|
222 | p->writeable << PAGE_WRITE_SHIFT |
|
---|
223 | 1 << PAGE_EXEC_SHIFT |
|
---|
224 | p->global << PAGE_GLOBAL_SHIFT);
|
---|
225 | }
|
---|
226 |
|
---|
227 | static inline void set_pt_flags(pte_t *pt, index_t i, int flags)
|
---|
228 | {
|
---|
229 | pte_t p = pt[i];
|
---|
230 |
|
---|
231 | p.page_cache_disable = !(flags & PAGE_CACHEABLE);
|
---|
232 | p.present = !(flags & PAGE_NOT_PRESENT);
|
---|
233 | p.uaccessible = (flags & PAGE_USER) != 0;
|
---|
234 | p.writeable = (flags & PAGE_WRITE) != 0;
|
---|
235 | p.global = (flags & PAGE_GLOBAL) != 0;
|
---|
236 |
|
---|
237 | /*
|
---|
238 | * Ensure that there is at least one bit set even if the present bit is cleared.
|
---|
239 | */
|
---|
240 | p.soft_valid = true;
|
---|
241 |
|
---|
242 | mmu_update_t update;
|
---|
243 |
|
---|
244 | update.ptr = PA2MA(KA2PA(&(pt[i])));
|
---|
245 | update.pte = p;
|
---|
246 | xen_mmu_update(&update, 1, NULL, DOMID_SELF);
|
---|
247 | }
|
---|
248 |
|
---|
249 | extern void page_arch_init(void);
|
---|
250 | extern void page_fault(int n, istate_t *istate);
|
---|
251 |
|
---|
252 | #endif /* __ASM__ */
|
---|
253 |
|
---|
254 | #endif /* KERNEL */
|
---|
255 |
|
---|
256 | #endif
|
---|
257 |
|
---|
258 | /** @}
|
---|
259 | */
|
---|