source: mainline/kernel/arch/ppc64/src/mm/page.c@ f651e80

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since f651e80 was f651e80, checked in by Jiri Svoboda <jirik.svoboda@…>, 17 years ago

Make newlines in panic messages consistent. Add periods at end of messages so that it is obvious whether they are printed entirely.

  • Property mode set to 100644
File size: 7.2 KB
Line 
1/*
2 * Copyright (c) 2005 Martin Decky
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup ppc64mm
30 * @{
31 */
32/** @file
33 */
34
35#include <arch/mm/page.h>
36#include <genarch/mm/page_pt.h>
37#include <arch/mm/frame.h>
38#include <arch/asm.h>
39#include <mm/frame.h>
40#include <mm/page.h>
41#include <mm/as.h>
42#include <arch.h>
43#include <arch/types.h>
44#include <arch/exception.h>
45#include <align.h>
46#include <config.h>
47#include <print.h>
48#include <symtab.h>
49
50static phte_t *phte;
51
52
53/** Try to find PTE for faulting address
54 *
55 * Try to find PTE for faulting address.
56 * The as->lock must be held on entry to this function
57 * if lock is true.
58 *
59 * @param as Address space.
60 * @param lock Lock/unlock the address space.
61 * @param badvaddr Faulting virtual address.
62 * @param access Access mode that caused the fault.
63 * @param istate Pointer to interrupted state.
64 * @param pfrc Pointer to variable where as_page_fault() return code will be stored.
65 * @return PTE on success, NULL otherwise.
66 *
67 */
68static pte_t *find_mapping_and_check(as_t *as, bool lock, uintptr_t badvaddr, int access,
69 istate_t *istate, int *pfrc)
70{
71 /*
72 * Check if the mapping exists in page tables.
73 */
74 pte_t *pte = page_mapping_find(as, badvaddr);
75 if ((pte) && (pte->p)) {
76 /*
77 * Mapping found in page tables.
78 * Immediately succeed.
79 */
80 return pte;
81 } else {
82 int rc;
83
84 /*
85 * Mapping not found in page tables.
86 * Resort to higher-level page fault handler.
87 */
88 page_table_unlock(as, lock);
89 switch (rc = as_page_fault(badvaddr, access, istate)) {
90 case AS_PF_OK:
91 /*
92 * The higher-level page fault handler succeeded,
93 * The mapping ought to be in place.
94 */
95 page_table_lock(as, lock);
96 pte = page_mapping_find(as, badvaddr);
97 ASSERT((pte) && (pte->p));
98 *pfrc = 0;
99 return pte;
100 case AS_PF_DEFER:
101 page_table_lock(as, lock);
102 *pfrc = rc;
103 return NULL;
104 case AS_PF_FAULT:
105 page_table_lock(as, lock);
106 *pfrc = rc;
107 return NULL;
108 default:
109 panic("Unexpected rc (%d).", rc);
110 }
111 }
112}
113
114
115static void pht_refill_fail(uintptr_t badvaddr, istate_t *istate)
116{
117 char *symbol = "";
118 char *sym2 = "";
119
120 char *s = get_symtab_entry(istate->pc);
121 if (s)
122 symbol = s;
123 s = get_symtab_entry(istate->lr);
124 if (s)
125 sym2 = s;
126 panic("%p: PHT Refill Exception at %p (%s<-%s).", badvaddr, istate->pc, symbol, sym2);
127}
128
129
130static void pht_insert(const uintptr_t vaddr, const pfn_t pfn)
131{
132 uint32_t page = (vaddr >> 12) & 0xffff;
133 uint32_t api = (vaddr >> 22) & 0x3f;
134 uint32_t vsid;
135
136 asm volatile (
137 "mfsrin %0, %1\n"
138 : "=r" (vsid)
139 : "r" (vaddr)
140 );
141
142 /* Primary hash (xor) */
143 uint32_t h = 0;
144 uint32_t hash = vsid ^ page;
145 uint32_t base = (hash & 0x3ff) << 3;
146 uint32_t i;
147 bool found = false;
148
149 /* Find unused or colliding
150 PTE in PTEG */
151 for (i = 0; i < 8; i++) {
152 if ((!phte[base + i].v) || ((phte[base + i].vsid == vsid) && (phte[base + i].api == api))) {
153 found = true;
154 break;
155 }
156 }
157
158 if (!found) {
159 /* Secondary hash (not) */
160 uint32_t base2 = (~hash & 0x3ff) << 3;
161
162 /* Find unused or colliding
163 PTE in PTEG */
164 for (i = 0; i < 8; i++) {
165 if ((!phte[base2 + i].v) || ((phte[base2 + i].vsid == vsid) && (phte[base2 + i].api == api))) {
166 found = true;
167 base = base2;
168 h = 1;
169 break;
170 }
171 }
172
173 if (!found) {
174 // TODO: A/C precedence groups
175 i = page % 8;
176 }
177 }
178
179 phte[base + i].v = 1;
180 phte[base + i].vsid = vsid;
181 phte[base + i].h = h;
182 phte[base + i].api = api;
183 phte[base + i].rpn = pfn;
184 phte[base + i].r = 0;
185 phte[base + i].c = 0;
186 phte[base + i].pp = 2; // FIXME
187}
188
189
190/** Process Instruction/Data Storage Interrupt
191 *
192 * @param data True if Data Storage Interrupt.
193 * @param istate Interrupted register context.
194 *
195 */
196void pht_refill(bool data, istate_t *istate)
197{
198 uintptr_t badvaddr;
199 pte_t *pte;
200 int pfrc;
201 as_t *as;
202 bool lock;
203
204 if (AS == NULL) {
205 as = AS_KERNEL;
206 lock = false;
207 } else {
208 as = AS;
209 lock = true;
210 }
211
212 if (data) {
213 asm volatile (
214 "mfdar %0\n"
215 : "=r" (badvaddr)
216 );
217 } else
218 badvaddr = istate->pc;
219
220 page_table_lock(as, lock);
221
222 pte = find_mapping_and_check(as, lock, badvaddr, PF_ACCESS_READ /* FIXME */, istate, &pfrc);
223 if (!pte) {
224 switch (pfrc) {
225 case AS_PF_FAULT:
226 goto fail;
227 break;
228 case AS_PF_DEFER:
229 /*
230 * The page fault came during copy_from_uspace()
231 * or copy_to_uspace().
232 */
233 page_table_unlock(as, lock);
234 return;
235 default:
236 panic("Unexpected pfrc (%d).", pfrc);
237 }
238 }
239
240 pte->a = 1; /* Record access to PTE */
241 pht_insert(badvaddr, pte->pfn);
242
243 page_table_unlock(as, lock);
244 return;
245
246fail:
247 page_table_unlock(as, lock);
248 pht_refill_fail(badvaddr, istate);
249}
250
251
252void pht_init(void)
253{
254 memsetb(phte, 1 << PHT_BITS, 0);
255}
256
257
258void page_arch_init(void)
259{
260 if (config.cpu_active == 1) {
261 page_mapping_operations = &pt_mapping_operations;
262
263 uintptr_t cur;
264 int flags;
265
266 for (cur = 128 << 20; cur < last_frame; cur += FRAME_SIZE) {
267 flags = PAGE_CACHEABLE | PAGE_WRITE;
268 if ((PA2KA(cur) >= config.base) && (PA2KA(cur) < config.base + config.kernel_size))
269 flags |= PAGE_GLOBAL;
270 page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
271 }
272
273 /* Allocate page hash table */
274 phte_t *physical_phte = (phte_t *) frame_alloc(PHT_ORDER, FRAME_KA | FRAME_ATOMIC);
275
276 ASSERT((uintptr_t) physical_phte % (1 << PHT_BITS) == 0);
277 pht_init();
278
279 asm volatile (
280 "mtsdr1 %0\n"
281 :
282 : "r" ((uintptr_t) physical_phte)
283 );
284 }
285}
286
287
288uintptr_t hw_map(uintptr_t physaddr, size_t size)
289{
290 if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
291 panic("Unable to map physical memory %p (%" PRIs " bytes).", physaddr, size)
292
293 uintptr_t virtaddr = PA2KA(last_frame);
294 pfn_t i;
295 for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
296 page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE);
297
298 last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
299
300 return virtaddr;
301}
302
303/** @}
304 */
Note: See TracBrowser for help on using the repository browser.