source: mainline/kernel/genarch/src/mm/page_pt.c@ 4874c2d

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 4874c2d was f47fd19, checked in by Jakub Jermar <jakub@…>, 19 years ago

sparc64 work.
Define the istate structure.
Move the identity-mapping handler to assembly.
Make the preemptible handler more general so that TL=1 MMU exceptions can make use of it.

Little bit of formatting and indentation.

  • Property mode set to 100644
File size: 8.0 KB
RevLine 
[6d7ffa65]1/*
2 * Copyright (C) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[f47fd19]29/** @addtogroup genarchmm
[b45c443]30 * @{
31 */
32
[0f27b4c]33/**
[b45c443]34 * @file
[0f27b4c]35 * @brief Virtual Address Translation for hierarchical 4-level page tables.
36 */
37
[6d7ffa65]38#include <genarch/mm/page_pt.h>
39#include <mm/page.h>
40#include <mm/frame.h>
[ef67bab]41#include <mm/as.h>
[6d7ffa65]42#include <arch/mm/page.h>
[fc1e4f6]43#include <arch/mm/as.h>
[6d7ffa65]44#include <arch/types.h>
45#include <typedefs.h>
46#include <arch/asm.h>
47#include <memstr.h>
48
[7f1c620]49static void pt_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags);
50static void pt_mapping_remove(as_t *as, uintptr_t page);
51static pte_t *pt_mapping_find(as_t *as, uintptr_t page);
[6d7ffa65]52
[f5935ed]53page_mapping_operations_t pt_mapping_operations = {
[6d7ffa65]54 .mapping_insert = pt_mapping_insert,
[8f00329]55 .mapping_remove = pt_mapping_remove,
[6d7ffa65]56 .mapping_find = pt_mapping_find
57};
58
59/** Map page to frame using hierarchical page tables.
60 *
[9179d0a]61 * Map virtual address page to physical address frame
62 * using flags.
[6d7ffa65]63 *
[2299914]64 * The page table must be locked and interrupts must be disabled.
[ef67bab]65 *
66 * @param as Address space to wich page belongs.
[6d7ffa65]67 * @param page Virtual address of the page to be mapped.
68 * @param frame Physical address of memory frame to which the mapping is done.
69 * @param flags Flags to be used for mapping.
70 */
[7f1c620]71void pt_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags)
[6d7ffa65]72{
73 pte_t *ptl0, *ptl1, *ptl2, *ptl3;
[2e9eae2]74 pte_t *newpt;
[6d7ffa65]75
[7f1c620]76 ptl0 = (pte_t *) PA2KA((uintptr_t) as->page_table);
[6d7ffa65]77
78 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) {
[2e9eae2]79 newpt = (pte_t *)frame_alloc(ONE_FRAME, FRAME_KA);
[7f1c620]80 memsetb((uintptr_t)newpt, PAGE_SIZE, 0);
[6d7ffa65]81 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt));
82 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE);
83 }
84
85 ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
86
87 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) {
[2e9eae2]88 newpt = (pte_t *)frame_alloc(ONE_FRAME, FRAME_KA);
[7f1c620]89 memsetb((uintptr_t)newpt, PAGE_SIZE, 0);
[6d7ffa65]90 SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt));
91 SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE);
92 }
93
94 ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
95
96 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) {
[2e9eae2]97 newpt = (pte_t *)frame_alloc(ONE_FRAME, FRAME_KA);
[7f1c620]98 memsetb((uintptr_t)newpt, PAGE_SIZE, 0);
[6d7ffa65]99 SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt));
100 SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE);
101 }
102
103 ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
104
105 SET_FRAME_ADDRESS(ptl3, PTL3_INDEX(page), frame);
106 SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), flags);
107}
108
[8f00329]109/** Remove mapping of page from hierarchical page tables.
110 *
[9179d0a]111 * Remove any mapping of page within address space as.
[8f00329]112 * TLB shootdown should follow in order to make effects of
113 * this call visible.
114 *
[ecbdc724]115 * Empty page tables except PTL0 are freed.
116 *
[2299914]117 * The page table must be locked and interrupts must be disabled.
[8f00329]118 *
[9179d0a]119 * @param as Address space to wich page belongs.
[8f00329]120 * @param page Virtual address of the page to be demapped.
121 */
[7f1c620]122void pt_mapping_remove(as_t *as, uintptr_t page)
[8f00329]123{
124 pte_t *ptl0, *ptl1, *ptl2, *ptl3;
[ecbdc724]125 bool empty = true;
126 int i;
127
128 /*
129 * First, remove the mapping, if it exists.
130 */
[8f00329]131
[7f1c620]132 ptl0 = (pte_t *) PA2KA((uintptr_t) as->page_table);
[8f00329]133
134 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
135 return;
136
137 ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
138
139 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
140 return;
141
142 ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
143
144 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
145 return;
146
147 ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
148
149 /* Destroy the mapping. Setting to PAGE_NOT_PRESENT is not sufficient. */
[7f1c620]150 memsetb((uintptr_t) &ptl3[PTL3_INDEX(page)], sizeof(pte_t), 0);
[ecbdc724]151
152 /*
153 * Second, free all empty tables along the way from PTL3 down to PTL0.
154 */
155
156 /* check PTL3 */
157 for (i = 0; i < PTL3_ENTRIES; i++) {
158 if (PTE_VALID(&ptl3[i])) {
159 empty = false;
160 break;
161 }
162 }
163 if (empty) {
164 /*
165 * PTL3 is empty.
166 * Release the frame and remove PTL3 pointer from preceding table.
167 */
[7f1c620]168 frame_free(KA2PA((uintptr_t) ptl3));
[ecbdc724]169 if (PTL2_ENTRIES)
[7f1c620]170 memsetb((uintptr_t) &ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0);
[ecbdc724]171 else if (PTL1_ENTRIES)
[7f1c620]172 memsetb((uintptr_t) &ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
[ecbdc724]173 else
[7f1c620]174 memsetb((uintptr_t) &ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
[ecbdc724]175 } else {
176 /*
177 * PTL3 is not empty.
178 * Therefore, there must be a path from PTL0 to PTL3 and
179 * thus nothing to free in higher levels.
180 */
181 return;
182 }
183
184 /* check PTL2, empty is still true */
185 if (PTL2_ENTRIES) {
186 for (i = 0; i < PTL2_ENTRIES; i++) {
187 if (PTE_VALID(&ptl2[i])) {
188 empty = false;
189 break;
190 }
191 }
192 if (empty) {
193 /*
194 * PTL2 is empty.
195 * Release the frame and remove PTL2 pointer from preceding table.
196 */
[7f1c620]197 frame_free(KA2PA((uintptr_t) ptl2));
[ecbdc724]198 if (PTL1_ENTRIES)
[7f1c620]199 memsetb((uintptr_t) &ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
[ecbdc724]200 else
[7f1c620]201 memsetb((uintptr_t) &ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
[ecbdc724]202 }
203 else {
204 /*
205 * PTL2 is not empty.
206 * Therefore, there must be a path from PTL0 to PTL2 and
207 * thus nothing to free in higher levels.
208 */
209 return;
210 }
211 }
212
213 /* check PTL1, empty is still true */
214 if (PTL1_ENTRIES) {
215 for (i = 0; i < PTL1_ENTRIES; i++) {
216 if (PTE_VALID(&ptl1[i])) {
217 empty = false;
218 break;
219 }
220 }
221 if (empty) {
222 /*
223 * PTL1 is empty.
224 * Release the frame and remove PTL1 pointer from preceding table.
225 */
[7f1c620]226 frame_free(KA2PA((uintptr_t) ptl1));
227 memsetb((uintptr_t) &ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
[ecbdc724]228 }
229 }
230
[8f00329]231}
232
[6d7ffa65]233/** Find mapping for virtual page in hierarchical page tables.
234 *
235 * Find mapping for virtual page.
236 *
[2299914]237 * The page table must be locked and interrupts must be disabled.
[ef67bab]238 *
[9179d0a]239 * @param as Address space to which page belongs.
[6d7ffa65]240 * @param page Virtual page.
241 *
242 * @return NULL if there is no such mapping; entry from PTL3 describing the mapping otherwise.
243 */
[7f1c620]244pte_t *pt_mapping_find(as_t *as, uintptr_t page)
[6d7ffa65]245{
246 pte_t *ptl0, *ptl1, *ptl2, *ptl3;
247
[7f1c620]248 ptl0 = (pte_t *) PA2KA((uintptr_t) as->page_table);
[6d7ffa65]249
250 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
251 return NULL;
252
253 ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
254
255 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
256 return NULL;
257
258 ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
259
260 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
261 return NULL;
262
263 ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
264
265 return &ptl3[PTL3_INDEX(page)];
266}
[b45c443]267
[f47fd19]268/** @}
[b45c443]269 */
Note: See TracBrowser for help on using the repository browser.