source: mainline/kernel/arch/amd64/src/mm/page.c@ e49e234

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since e49e234 was e49e234, checked in by Martin Decky <martin@…>, 16 years ago

kernel memory management revisited (phase 2): map physical memory according to zones

  • ia32: register reserved and ACPI zones
  • pareas are now used only for mapping of present physical memory (hw_area() is gone)
  • firmware zones and physical addresses outside any zones are allowed to be mapped generally
  • fix nasty antient bug in zones_insert_zone()
  • Property mode set to 100644
File size: 7.3 KB
Line 
1/*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup amd64mm
30 * @{
31 */
32/** @file
33 */
34
35#include <arch/mm/page.h>
36#include <genarch/mm/page_pt.h>
37#include <arch/mm/frame.h>
38#include <mm/page.h>
39#include <mm/frame.h>
40#include <mm/as.h>
41#include <arch/interrupt.h>
42#include <arch/asm.h>
43#include <config.h>
44#include <memstr.h>
45#include <interrupt.h>
46#include <print.h>
47#include <panic.h>
48#include <align.h>
49
50/* Definitions for identity page mapper */
51pte_t helper_ptl1[512] __attribute__((aligned (PAGE_SIZE)));
52pte_t helper_ptl2[512] __attribute__((aligned (PAGE_SIZE)));
53pte_t helper_ptl3[512] __attribute__((aligned (PAGE_SIZE)));
54extern pte_t ptl_0; /* From boot.S */
55
56#define PTL1_PRESENT(ptl0, page) (!(GET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
57#define PTL2_PRESENT(ptl1, page) (!(GET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
58#define PTL3_PRESENT(ptl2, page) (!(GET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page)) & PAGE_NOT_PRESENT))
59
60#define PTL1_ADDR(ptl0, page) ((pte_t *)PA2KA(GET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page))))
61#define PTL2_ADDR(ptl1, page) ((pte_t *)PA2KA(GET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page))))
62#define PTL3_ADDR(ptl2, page) ((pte_t *)PA2KA(GET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page))))
63
64#define SETUP_PTL1(ptl0, page, tgt) { \
65 SET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
66 SET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
67 }
68#define SETUP_PTL2(ptl1, page, tgt) { \
69 SET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
70 SET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
71 }
72#define SETUP_PTL3(ptl2, page, tgt) { \
73 SET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
74 SET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
75 }
76#define SETUP_FRAME(ptl3, page, tgt) { \
77 SET_FRAME_ADDRESS_ARCH(ptl3, PTL3_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
78 SET_FRAME_FLAGS_ARCH(ptl3, PTL3_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
79 }
80
81
82void page_arch_init(void)
83{
84 uintptr_t cur;
85 unsigned int i;
86 int identity_flags = PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL | PAGE_WRITE;
87
88 if (config.cpu_active == 1) {
89 page_mapping_operations = &pt_mapping_operations;
90
91 /*
92 * PA2KA(identity) mapping for all frames.
93 */
94 for (cur = 0; cur < last_frame; cur += FRAME_SIZE) {
95 /* Standard identity mapping */
96 page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, identity_flags);
97 }
98
99 /* Upper kernel mapping
100 * - from zero to top of kernel (include bottom addresses
101 * because some are needed for init)
102 */
103 for (cur = PA2KA_CODE(0); cur < config.base + config.kernel_size; cur += FRAME_SIZE)
104 page_mapping_insert(AS_KERNEL, cur, KA2PA(cur), identity_flags);
105
106 for (cur = config.stack_base; cur < config.stack_base + config.stack_size; cur += FRAME_SIZE)
107 page_mapping_insert(AS_KERNEL, cur, KA2PA(cur), identity_flags);
108
109 for (i = 0; i < init.cnt; i++) {
110 for (cur = init.tasks[i].addr; cur < init.tasks[i].addr + init.tasks[i].size; cur += FRAME_SIZE)
111 page_mapping_insert(AS_KERNEL, PA2KA_CODE(KA2PA(cur)), KA2PA(cur), identity_flags);
112 }
113
114 exc_register(14, "page_fault", (iroutine) page_fault);
115 write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);
116 } else
117 write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);
118}
119
120
121/** Identity page mapper
122 *
123 * We need to map whole physical memory identically before the page subsystem
124 * is initializaed. This thing clears page table and fills in the specific
125 * items.
126 */
127void ident_page_fault(int n, istate_t *istate)
128{
129 uintptr_t page;
130 static uintptr_t oldpage = 0;
131 pte_t *aptl_1, *aptl_2, *aptl_3;
132
133 page = read_cr2();
134 if (oldpage) {
135 /* Unmap old address */
136 aptl_1 = PTL1_ADDR(&ptl_0, oldpage);
137 aptl_2 = PTL2_ADDR(aptl_1, oldpage);
138 aptl_3 = PTL3_ADDR(aptl_2, oldpage);
139
140 SET_FRAME_FLAGS_ARCH(aptl_3, PTL3_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
141 if (KA2PA(aptl_3) == KA2PA(helper_ptl3))
142 SET_PTL3_FLAGS_ARCH(aptl_2, PTL2_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
143 if (KA2PA(aptl_2) == KA2PA(helper_ptl2))
144 SET_PTL2_FLAGS_ARCH(aptl_1, PTL1_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
145 if (KA2PA(aptl_1) == KA2PA(helper_ptl1))
146 SET_PTL1_FLAGS_ARCH(&ptl_0, PTL0_INDEX_ARCH(oldpage), PAGE_NOT_PRESENT);
147 }
148 if (PTL1_PRESENT(&ptl_0, page))
149 aptl_1 = PTL1_ADDR(&ptl_0, page);
150 else {
151 SETUP_PTL1(&ptl_0, page, helper_ptl1);
152 aptl_1 = helper_ptl1;
153 }
154
155 if (PTL2_PRESENT(aptl_1, page))
156 aptl_2 = PTL2_ADDR(aptl_1, page);
157 else {
158 SETUP_PTL2(aptl_1, page, helper_ptl2);
159 aptl_2 = helper_ptl2;
160 }
161
162 if (PTL3_PRESENT(aptl_2, page))
163 aptl_3 = PTL3_ADDR(aptl_2, page);
164 else {
165 SETUP_PTL3(aptl_2, page, helper_ptl3);
166 aptl_3 = helper_ptl3;
167 }
168
169 SETUP_FRAME(aptl_3, page, page);
170
171 oldpage = page;
172}
173
174
175void page_fault(int n, istate_t *istate)
176{
177 uintptr_t page;
178 pf_access_t access;
179
180 page = read_cr2();
181
182 if (istate->error_word & PFERR_CODE_RSVD)
183 panic("Reserved bit set in page table entry.");
184
185 if (istate->error_word & PFERR_CODE_RW)
186 access = PF_ACCESS_WRITE;
187 else if (istate->error_word & PFERR_CODE_ID)
188 access = PF_ACCESS_EXEC;
189 else
190 access = PF_ACCESS_READ;
191
192 if (as_page_fault(page, access, istate) == AS_PF_FAULT) {
193 fault_if_from_uspace(istate, "Page fault: %#x.", page);
194
195 decode_istate(n, istate);
196 printf("Page fault address: %llx.\n", page);
197 panic("Page fault.");
198 }
199}
200
201
202uintptr_t hw_map(uintptr_t physaddr, size_t size)
203{
204 if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
205 panic("Unable to map physical memory %p (%d bytes).", physaddr, size)
206
207 uintptr_t virtaddr = PA2KA(last_frame);
208 pfn_t i;
209 for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
210 page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE);
211
212 last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
213
214 return virtaddr;
215}
216
217/** @}
218 */
Note: See TracBrowser for help on using the repository browser.