source: mainline/kernel/arch/sparc64/src/mm/sun4u/tlb.c@ 1dbc43f

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 1dbc43f was 1dbc43f, checked in by Jakub Jermar <jakub@…>, 13 years ago

Unify user page fault handling in as_page_fault().

  • Remove lots of architecture-dependent boilerplate code.
  • Property mode set to 100644
File size: 14.0 KB
RevLine 
[0d04024]1/*
[df4ed85]2 * Copyright (c) 2005 Jakub Jermar
[0d04024]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[7008097]29/** @addtogroup sparc64mm
[b45c443]30 * @{
31 */
32/** @file
33 */
34
[0d04024]35#include <arch/mm/tlb.h>
36#include <mm/tlb.h>
[f47fd19]37#include <mm/as.h>
38#include <mm/asid.h>
[0cfc4d38]39#include <arch/mm/frame.h>
40#include <arch/mm/page.h>
41#include <arch/mm/mmu.h>
[f47fd19]42#include <arch/interrupt.h>
[e2bf639]43#include <interrupt.h>
[f47fd19]44#include <arch.h>
[0d04024]45#include <print.h>
[d99c1d2]46#include <typedefs.h>
[0cfc4d38]47#include <config.h>
[49b6d32]48#include <arch/trap/trap.h>
[7bb6b06]49#include <arch/trap/exception.h>
[008029d]50#include <panic.h>
[b6fba84]51#include <arch/asm.h>
[387416b]52#include <genarch/mm/page_ht.h>
[02f441c0]53
[29b2bbf]54#ifdef CONFIG_TSB
55#include <arch/mm/tsb.h>
56#endif
57
[98000fb]58static void dtlb_pte_copy(pte_t *, size_t, bool);
59static void itlb_pte_copy(pte_t *, size_t);
[f47fd19]60
[a000878c]61const char *context_encoding[] = {
[b6fba84]62 "Primary",
63 "Secondary",
64 "Nucleus",
65 "Reserved"
66};
[0d04024]67
68void tlb_arch_init(void)
69{
[c6e314a]70 /*
[c23baab]71 * Invalidate all non-locked DTLB and ITLB entries.
[c6e314a]72 */
[c23baab]73 tlb_invalidate_all();
[8cee705]74
75 /*
76 * Clear both SFSRs.
77 */
78 dtlb_sfsr_write(0);
79 itlb_sfsr_write(0);
[97f1691]80}
[b6fba84]81
[97f1691]82/** Insert privileged mapping into DMMU TLB.
83 *
[965dc18]84 * @param page Virtual page address.
85 * @param frame Physical frame address.
86 * @param pagesize Page size.
87 * @param locked True for permanent mappings, false otherwise.
88 * @param cacheable True if the mapping is cacheable, false otherwise.
[97f1691]89 */
[2057572]90void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
91 bool locked, bool cacheable)
[97f1691]92{
93 tlb_tag_access_reg_t tag;
94 tlb_data_t data;
95 page_address_t pg;
96 frame_address_t fr;
[b6fba84]97
[97f1691]98 pg.address = page;
99 fr.address = frame;
[02f441c0]100
[965dc18]101 tag.context = ASID_KERNEL;
[02f441c0]102 tag.vpn = pg.vpn;
103
104 dtlb_tag_access_write(tag.value);
105
106 data.value = 0;
107 data.v = true;
[97f1691]108 data.size = pagesize;
[02f441c0]109 data.pfn = fr.pfn;
[97f1691]110 data.l = locked;
111 data.cp = cacheable;
[92778f2]112#ifdef CONFIG_VIRT_IDX_DCACHE
[97f1691]113 data.cv = cacheable;
[92778f2]114#endif /* CONFIG_VIRT_IDX_DCACHE */
[02f441c0]115 data.p = true;
116 data.w = true;
[d681c17]117 data.g = false;
[02f441c0]118
119 dtlb_data_in_write(data.value);
[0d04024]120}
121
[a7961271]122/** Copy PTE to TLB.
123 *
[965dc18]124 * @param t Page Table Entry to be copied.
125 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
126 * @param ro If true, the entry will be created read-only, regardless
127 * of its w field.
[a7961271]128 */
[98000fb]129void dtlb_pte_copy(pte_t *t, size_t index, bool ro)
[a7961271]130{
131 tlb_tag_access_reg_t tag;
132 tlb_data_t data;
133 page_address_t pg;
134 frame_address_t fr;
135
[2057572]136 pg.address = t->page + (index << MMU_PAGE_WIDTH);
137 fr.address = t->frame + (index << MMU_PAGE_WIDTH);
[a7961271]138
139 tag.value = 0;
140 tag.context = t->as->asid;
141 tag.vpn = pg.vpn;
[2057572]142
[a7961271]143 dtlb_tag_access_write(tag.value);
[2057572]144
[a7961271]145 data.value = 0;
146 data.v = true;
147 data.size = PAGESIZE_8K;
148 data.pfn = fr.pfn;
149 data.l = false;
150 data.cp = t->c;
[92778f2]151#ifdef CONFIG_VIRT_IDX_DCACHE
[a7961271]152 data.cv = t->c;
[92778f2]153#endif /* CONFIG_VIRT_IDX_DCACHE */
[cfa70add]154 data.p = t->k; /* p like privileged */
[a7961271]155 data.w = ro ? false : t->w;
156 data.g = t->g;
[2057572]157
[a7961271]158 dtlb_data_in_write(data.value);
159}
160
[29b2bbf]161/** Copy PTE to ITLB.
162 *
[965dc18]163 * @param t Page Table Entry to be copied.
164 * @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
[29b2bbf]165 */
[98000fb]166void itlb_pte_copy(pte_t *t, size_t index)
[f47fd19]167{
[a7961271]168 tlb_tag_access_reg_t tag;
169 tlb_data_t data;
170 page_address_t pg;
171 frame_address_t fr;
172
[2057572]173 pg.address = t->page + (index << MMU_PAGE_WIDTH);
174 fr.address = t->frame + (index << MMU_PAGE_WIDTH);
[a7961271]175
176 tag.value = 0;
177 tag.context = t->as->asid;
178 tag.vpn = pg.vpn;
179
180 itlb_tag_access_write(tag.value);
181
182 data.value = 0;
183 data.v = true;
184 data.size = PAGESIZE_8K;
185 data.pfn = fr.pfn;
186 data.l = false;
187 data.cp = t->c;
[cfa70add]188 data.p = t->k; /* p like privileged */
[a7961271]189 data.w = false;
190 data.g = t->g;
191
192 itlb_data_in_write(data.value);
[f47fd19]193}
194
[008029d]195/** ITLB miss handler. */
[96b02eb9]196void fast_instruction_access_mmu_miss(sysarg_t unused, istate_t *istate)
[008029d]197{
[2bf4936]198 uintptr_t page_16k = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
[98000fb]199 size_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
[a7961271]200 pte_t *t;
201
[0ff03f3]202 t = page_mapping_find(AS, page_16k, true);
[a7961271]203 if (t && PTE_EXECUTABLE(t)) {
204 /*
205 * The mapping was found in the software page hash table.
206 * Insert it into ITLB.
207 */
208 t->a = true;
[2057572]209 itlb_pte_copy(t, index);
[29b2bbf]210#ifdef CONFIG_TSB
[2057572]211 itsb_pte_copy(t, index);
[29b2bbf]212#endif
[a7961271]213 } else {
214 /*
[771cd22]215 * Forward the page fault to the address space page fault
216 * handler.
[7008097]217 */
[1dbc43f]218 as_page_fault(page_16k, PF_ACCESS_EXEC, istate);
[a7961271]219 }
[008029d]220}
221
[f47fd19]222/** DTLB miss handler.
223 *
[771cd22]224 * Note that some faults (e.g. kernel faults) were already resolved by the
225 * low-level, assembly language part of the fast_data_access_mmu_miss handler.
[36f19c0]226 *
[965dc18]227 * @param tag Content of the TLB Tag Access register as it existed
228 * when the trap happened. This is to prevent confusion
229 * created by clobbered Tag Access register during a nested
230 * DTLB miss.
231 * @param istate Interrupted state saved on the stack.
[f47fd19]232 */
[36f19c0]233void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate)
[008029d]234{
[2bf4936]235 uintptr_t page_8k;
236 uintptr_t page_16k;
[98000fb]237 size_t index;
[f47fd19]238 pte_t *t;
[0e78394]239 as_t *as = AS;
[7cb53f62]240
[2bf4936]241 page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH;
242 page_16k = ALIGN_DOWN(page_8k, PAGE_SIZE);
[2057572]243 index = tag.vpn % MMU_PAGES_PER_PAGE;
[fd85ae5]244
[f47fd19]245 if (tag.context == ASID_KERNEL) {
246 if (!tag.vpn) {
247 /* NULL access in kernel */
[1dbc43f]248 panic("NULL pointer dereference.");
[2bf4936]249 } else if (page_8k >= end_of_identity) {
[0e78394]250 /* Kernel non-identity. */
251 as = AS_KERNEL;
[1f5714e]252 } else {
[1dbc43f]253 panic("Unexpected kernel page fault.");
[f47fd19]254 }
[68656282]255 }
256
[0e78394]257 t = page_mapping_find(as, page_16k, true);
[f47fd19]258 if (t) {
259 /*
260 * The mapping was found in the software page hash table.
261 * Insert it into DTLB.
262 */
[a7961271]263 t->a = true;
[2057572]264 dtlb_pte_copy(t, index, true);
[29b2bbf]265#ifdef CONFIG_TSB
[2057572]266 dtsb_pte_copy(t, index, true);
[29b2bbf]267#endif
[f47fd19]268 } else {
269 /*
[2057572]270 * Forward the page fault to the address space page fault
271 * handler.
[0e78394]272 */
[1dbc43f]273 as_page_fault(page_16k, PF_ACCESS_READ, istate);
[f47fd19]274 }
[008029d]275}
276
[36f19c0]277/** DTLB protection fault handler.
278 *
[965dc18]279 * @param tag Content of the TLB Tag Access register as it existed
280 * when the trap happened. This is to prevent confusion
281 * created by clobbered Tag Access register during a nested
282 * DTLB miss.
283 * @param istate Interrupted state saved on the stack.
[36f19c0]284 */
285void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate)
[008029d]286{
[2bf4936]287 uintptr_t page_16k;
[98000fb]288 size_t index;
[e0b241f]289 pte_t *t;
[0e78394]290 as_t *as = AS;
[e0b241f]291
[2bf4936]292 page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
[2057572]293 index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */
[e0b241f]294
[0e78394]295 if (tag.context == ASID_KERNEL)
296 as = AS_KERNEL;
297
298 t = page_mapping_find(as, page_16k, true);
[e0b241f]299 if (t && PTE_WRITABLE(t)) {
300 /*
[771cd22]301 * The mapping was found in the software page hash table and is
302 * writable. Demap the old mapping and insert an updated mapping
303 * into DTLB.
[e0b241f]304 */
305 t->a = true;
306 t->d = true;
[2057572]307 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
[2bf4936]308 page_16k + index * MMU_PAGE_SIZE);
[2057572]309 dtlb_pte_copy(t, index, false);
[29b2bbf]310#ifdef CONFIG_TSB
[2057572]311 dtsb_pte_copy(t, index, false);
[29b2bbf]312#endif
[e0b241f]313 } else {
314 /*
[771cd22]315 * Forward the page fault to the address space page fault
316 * handler.
[e0b241f]317 */
[1dbc43f]318 as_page_fault(page_16k, PF_ACCESS_WRITE, istate);
[e0b241f]319 }
[008029d]320}
321
[965dc18]322/** Print TLB entry (for debugging purposes).
323 *
324 * The diag field has been left out in order to make this function more generic
325 * (there is no diag field in US3 architeture).
326 *
327 * @param i TLB entry number
328 * @param t TLB entry tag
329 * @param d TLB entry data
330 */
331static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d)
332{
[7e752b2]333 printf("%u: vpn=%#" PRIx64 ", context=%u, v=%u, size=%u, nfo=%u, "
334 "ie=%u, soft2=%#x, pfn=%#x, soft=%#x, l=%u, "
335 "cp=%u, cv=%u, e=%u, p=%u, w=%u, g=%u\n", i, (uint64_t) t.vpn,
[965dc18]336 t.context, d.v, d.size, d.nfo, d.ie, d.soft2,
337 d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
338}
339
340#if defined (US)
341
[0d04024]342/** Print contents of both TLBs. */
343void tlb_print(void)
344{
345 int i;
346 tlb_data_t d;
347 tlb_tag_read_reg_t t;
348
349 printf("I-TLB contents:\n");
350 for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
351 d.value = itlb_data_access_read(i);
[c52ed6b]352 t.value = itlb_tag_read_read(i);
[965dc18]353 print_tlb_entry(i, t, d);
[0d04024]354 }
355
356 printf("D-TLB contents:\n");
357 for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
358 d.value = dtlb_data_access_read(i);
[c52ed6b]359 t.value = dtlb_tag_read_read(i);
[965dc18]360 print_tlb_entry(i, t, d);
[0d04024]361 }
[965dc18]362}
363
364#elif defined (US3)
[0d04024]365
[965dc18]366/** Print contents of all TLBs. */
367void tlb_print(void)
368{
369 int i;
370 tlb_data_t d;
371 tlb_tag_read_reg_t t;
372
373 printf("TLB_ISMALL contents:\n");
374 for (i = 0; i < tlb_ismall_size(); i++) {
375 d.value = dtlb_data_access_read(TLB_ISMALL, i);
376 t.value = dtlb_tag_read_read(TLB_ISMALL, i);
377 print_tlb_entry(i, t, d);
378 }
379
380 printf("TLB_IBIG contents:\n");
381 for (i = 0; i < tlb_ibig_size(); i++) {
382 d.value = dtlb_data_access_read(TLB_IBIG, i);
383 t.value = dtlb_tag_read_read(TLB_IBIG, i);
384 print_tlb_entry(i, t, d);
385 }
386
387 printf("TLB_DSMALL contents:\n");
388 for (i = 0; i < tlb_dsmall_size(); i++) {
389 d.value = dtlb_data_access_read(TLB_DSMALL, i);
390 t.value = dtlb_tag_read_read(TLB_DSMALL, i);
391 print_tlb_entry(i, t, d);
392 }
393
394 printf("TLB_DBIG_1 contents:\n");
395 for (i = 0; i < tlb_dbig_size(); i++) {
396 d.value = dtlb_data_access_read(TLB_DBIG_0, i);
397 t.value = dtlb_tag_read_read(TLB_DBIG_0, i);
398 print_tlb_entry(i, t, d);
399 }
400
401 printf("TLB_DBIG_2 contents:\n");
402 for (i = 0; i < tlb_dbig_size(); i++) {
403 d.value = dtlb_data_access_read(TLB_DBIG_1, i);
404 t.value = dtlb_tag_read_read(TLB_DBIG_1, i);
405 print_tlb_entry(i, t, d);
406 }
[0d04024]407}
[dbb6886]408
[965dc18]409#endif
410
[8c2214e]411void describe_dmmu_fault(void)
412{
413 tlb_sfsr_reg_t sfsr;
414 uintptr_t sfar;
415
416 sfsr.value = dtlb_sfsr_read();
417 sfar = dtlb_sfar_read();
418
419#if defined (US)
420 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
421 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
422 sfsr.ow, sfsr.fv);
423#elif defined (US3)
424 printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, "
425 "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft,
426 sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
427#endif
[7e752b2]428
429 printf("DTLB SFAR: address=%p\n", (void *) sfar);
[8c2214e]430
431 dtlb_sfsr_write(0);
432}
433
[8cee705]434void dump_sfsr_and_sfar(void)
435{
436 tlb_sfsr_reg_t sfsr;
437 uintptr_t sfar;
438
439 sfsr.value = dtlb_sfsr_read();
440 sfar = dtlb_sfar_read();
441
[965dc18]442#if defined (US)
[771cd22]443 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
[2057572]444 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
445 sfsr.ow, sfsr.fv);
[965dc18]446#elif defined (US3)
447 printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, "
448 "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft,
449 sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
450#endif
451
[7e752b2]452 printf("DTLB SFAR: address=%p\n", (void *) sfar);
[8cee705]453
454 dtlb_sfsr_write(0);
455}
456
[687246b]457#if defined (US)
[965dc18]458/** Invalidate all unlocked ITLB and DTLB entries. */
459void tlb_invalidate_all(void)
460{
461 int i;
462
[8dbc18c]463 /*
464 * Walk all ITLB and DTLB entries and remove all unlocked mappings.
465 *
466 * The kernel doesn't use global mappings so any locked global mappings
[965dc18]467 * found must have been created by someone else. Their only purpose now
[8dbc18c]468 * is to collide with proper mappings. Invalidate immediately. It should
469 * be safe to invalidate them as late as now.
470 */
471
[965dc18]472 tlb_data_t d;
473 tlb_tag_read_reg_t t;
474
[dbb6886]475 for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
476 d.value = itlb_data_access_read(i);
[8dbc18c]477 if (!d.l || d.g) {
[dbb6886]478 t.value = itlb_tag_read_read(i);
479 d.v = false;
480 itlb_tag_access_write(t.value);
481 itlb_data_access_write(i, d.value);
482 }
483 }
[965dc18]484
[dbb6886]485 for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
486 d.value = dtlb_data_access_read(i);
[8dbc18c]487 if (!d.l || d.g) {
[dbb6886]488 t.value = dtlb_tag_read_read(i);
489 d.v = false;
490 dtlb_tag_access_write(t.value);
491 dtlb_data_access_write(i, d.value);
492 }
493 }
[965dc18]494
[687246b]495}
[965dc18]496
[687246b]497#elif defined (US3)
[965dc18]498
[687246b]499/** Invalidate all unlocked ITLB and DTLB entries. */
500void tlb_invalidate_all(void)
501{
502 itlb_demap(TLB_DEMAP_ALL, 0, 0);
503 dtlb_demap(TLB_DEMAP_ALL, 0, 0);
[dbb6886]504}
505
[687246b]506#endif
507
[771cd22]508/** Invalidate all ITLB and DTLB entries that belong to specified ASID
509 * (Context).
[dbb6886]510 *
511 * @param asid Address Space ID.
512 */
513void tlb_invalidate_asid(asid_t asid)
514{
[fd85ae5]515 tlb_context_reg_t pc_save, ctx;
[ed166f7]516
[fd85ae5]517 /* switch to nucleus because we are mapped by the primary context */
518 nucleus_enter();
519
520 ctx.v = pc_save.v = mmu_primary_context_read();
[ed166f7]521 ctx.context = asid;
[fd85ae5]522 mmu_primary_context_write(ctx.v);
523
524 itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
525 dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
[ed166f7]526
[fd85ae5]527 mmu_primary_context_write(pc_save.v);
[ed166f7]528
[fd85ae5]529 nucleus_leave();
[dbb6886]530}
531
[771cd22]532/** Invalidate all ITLB and DTLB entries for specified page range in specified
533 * address space.
[dbb6886]534 *
[965dc18]535 * @param asid Address Space ID.
536 * @param page First page which to sweep out from ITLB and DTLB.
537 * @param cnt Number of ITLB and DTLB entries to invalidate.
[dbb6886]538 */
[98000fb]539void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
[dbb6886]540{
[6c441cf8]541 unsigned int i;
[fd85ae5]542 tlb_context_reg_t pc_save, ctx;
[ed166f7]543
[fd85ae5]544 /* switch to nucleus because we are mapped by the primary context */
545 nucleus_enter();
546
547 ctx.v = pc_save.v = mmu_primary_context_read();
[ed166f7]548 ctx.context = asid;
[fd85ae5]549 mmu_primary_context_write(ctx.v);
[4512d7e]550
[2057572]551 for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) {
[454f1da]552 itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
[2057572]553 page + i * MMU_PAGE_SIZE);
[454f1da]554 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
[2057572]555 page + i * MMU_PAGE_SIZE);
[4512d7e]556 }
[ed166f7]557
[fd85ae5]558 mmu_primary_context_write(pc_save.v);
559
560 nucleus_leave();
[dbb6886]561}
[b45c443]562
[10b890b]563/** @}
[b45c443]564 */
Note: See TracBrowser for help on using the repository browser.