source: mainline/kernel/genarch/src/mm/page_ht.c@ a55ddc64

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since a55ddc64 was a55ddc64, checked in by Jakub Jermar <jakub@…>, 14 years ago

Allocate PTEs from low memory.

  • Introduce a dedicated slab cache for pte_t's.
  • This should limit the depth of nested page faults.
  • Property mode set to 100644
File size: 6.8 KB
Line 
1/*
2 * Copyright (c) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genarchmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Virtual Address Translation (VAT) for global page hash table.
36 */
37
38#include <genarch/mm/page_ht.h>
39#include <mm/page.h>
40#include <arch/mm/page.h>
41#include <mm/frame.h>
42#include <mm/slab.h>
43#include <mm/as.h>
44#include <arch/mm/asid.h>
45#include <typedefs.h>
46#include <arch/asm.h>
47#include <synch/spinlock.h>
48#include <arch.h>
49#include <debug.h>
50#include <memstr.h>
51#include <adt/hash_table.h>
52#include <align.h>
53
54static size_t hash(sysarg_t[]);
55static bool compare(sysarg_t[], size_t, link_t *);
56static void remove_callback(link_t *);
57
58static void ht_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int);
59static void ht_mapping_remove(as_t *, uintptr_t);
60static pte_t *ht_mapping_find(as_t *, uintptr_t, bool);
61
62slab_cache_t *pte_cache = NULL;
63
64/**
65 * This lock protects the page hash table. It must be acquired
66 * after address space lock and after any address space area
67 * locks.
68 *
69 */
70mutex_t page_ht_lock;
71
72/** Page hash table.
73 *
74 * The page hash table may be accessed only when page_ht_lock is held.
75 *
76 */
77hash_table_t page_ht;
78
79/** Hash table operations for page hash table. */
80hash_table_operations_t ht_operations = {
81 .hash = hash,
82 .compare = compare,
83 .remove_callback = remove_callback
84};
85
86/** Page mapping operations for page hash table architectures. */
87page_mapping_operations_t ht_mapping_operations = {
88 .mapping_insert = ht_mapping_insert,
89 .mapping_remove = ht_mapping_remove,
90 .mapping_find = ht_mapping_find
91};
92
93/** Compute page hash table index.
94 *
95 * @param key Array of two keys (i.e. page and address space).
96 *
97 * @return Index into page hash table.
98 *
99 */
100size_t hash(sysarg_t key[])
101{
102 as_t *as = (as_t *) key[KEY_AS];
103 uintptr_t page = (uintptr_t) key[KEY_PAGE];
104
105 /*
106 * Virtual page addresses have roughly the same probability
107 * of occurring. Least significant bits of VPN compose the
108 * hash index.
109 *
110 */
111 size_t index = ((page >> PAGE_WIDTH) & (PAGE_HT_ENTRIES - 1));
112
113 /*
114 * Address space structures are likely to be allocated from
115 * similar addresses. Least significant bits compose the
116 * hash index.
117 *
118 */
119 index |= ((sysarg_t) as) & (PAGE_HT_ENTRIES - 1);
120
121 return index;
122}
123
124/** Compare page hash table item with page and/or address space.
125 *
126 * @param key Array of one or two keys (i.e. page and/or address space).
127 * @param keys Number of keys passed.
128 * @param item Item to compare the keys with.
129 *
130 * @return true on match, false otherwise.
131 *
132 */
133bool compare(sysarg_t key[], size_t keys, link_t *item)
134{
135 ASSERT(item);
136 ASSERT(keys > 0);
137 ASSERT(keys <= PAGE_HT_KEYS);
138
139 /*
140 * Convert item to PTE.
141 *
142 */
143 pte_t *pte = hash_table_get_instance(item, pte_t, link);
144
145 if (keys == PAGE_HT_KEYS)
146 return (key[KEY_AS] == (uintptr_t) pte->as) &&
147 (key[KEY_PAGE] == pte->page);
148
149 return (key[KEY_AS] == (uintptr_t) pte->as);
150}
151
152/** Callback on page hash table item removal.
153 *
154 * @param item Page hash table item being removed.
155 *
156 */
157void remove_callback(link_t *item)
158{
159 ASSERT(item);
160
161 /*
162 * Convert item to PTE.
163 *
164 */
165 pte_t *pte = hash_table_get_instance(item, pte_t, link);
166
167 slab_free(pte_cache, pte);
168}
169
170/** Map page to frame using page hash table.
171 *
172 * Map virtual address page to physical address frame
173 * using flags.
174 *
175 * @param as Address space to which page belongs.
176 * @param page Virtual address of the page to be mapped.
177 * @param frame Physical address of memory frame to which the mapping is done.
178 * @param flags Flags to be used for mapping.
179 *
180 */
181void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
182 unsigned int flags)
183{
184 sysarg_t key[2] = {
185 (uintptr_t) as,
186 page = ALIGN_DOWN(page, PAGE_SIZE)
187 };
188
189 ASSERT(page_table_locked(as));
190
191 if (!hash_table_find(&page_ht, key)) {
192 pte_t *pte = slab_alloc(pte_cache, FRAME_LOWMEM | FRAME_ATOMIC);
193 ASSERT(pte != NULL);
194
195 pte->g = (flags & PAGE_GLOBAL) != 0;
196 pte->x = (flags & PAGE_EXEC) != 0;
197 pte->w = (flags & PAGE_WRITE) != 0;
198 pte->k = !(flags & PAGE_USER);
199 pte->c = (flags & PAGE_CACHEABLE) != 0;
200 pte->p = !(flags & PAGE_NOT_PRESENT);
201 pte->a = false;
202 pte->d = false;
203
204 pte->as = as;
205 pte->page = ALIGN_DOWN(page, PAGE_SIZE);
206 pte->frame = ALIGN_DOWN(frame, FRAME_SIZE);
207
208 hash_table_insert(&page_ht, key, &pte->link);
209 }
210}
211
212/** Remove mapping of page from page hash table.
213 *
214 * Remove any mapping of page within address space as.
215 * TLB shootdown should follow in order to make effects of
216 * this call visible.
217 *
218 * @param as Address space to which page belongs.
219 * @param page Virtual address of the page to be demapped.
220 *
221 */
222void ht_mapping_remove(as_t *as, uintptr_t page)
223{
224 sysarg_t key[2] = {
225 (uintptr_t) as,
226 page = ALIGN_DOWN(page, PAGE_SIZE)
227 };
228
229 ASSERT(page_table_locked(as));
230
231 /*
232 * Note that removed PTE's will be freed
233 * by remove_callback().
234 */
235 hash_table_remove(&page_ht, key, 2);
236}
237
238
239/** Find mapping for virtual page in page hash table.
240 *
241 * @param as Address space to which page belongs.
242 * @param page Virtual page.
243 * @param nolock True if the page tables need not be locked.
244 *
245 * @return NULL if there is no such mapping; requested mapping otherwise.
246 *
247 */
248pte_t *ht_mapping_find(as_t *as, uintptr_t page, bool nolock)
249{
250 sysarg_t key[2] = {
251 (uintptr_t) as,
252 page = ALIGN_DOWN(page, PAGE_SIZE)
253 };
254
255 ASSERT(nolock || page_table_locked(as));
256
257 link_t *cur = hash_table_find(&page_ht, key);
258 if (cur)
259 return hash_table_get_instance(cur, pte_t, link);
260
261 return NULL;
262}
263
264/** @}
265 */
Note: See TracBrowser for help on using the repository browser.