source: mainline/kernel/genarch/src/mm/page_ht.c@ 63e27ef

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 63e27ef was 63e27ef, checked in by Jiri Svoboda <jiri@…>, 8 years ago

ASSERT → assert

  • Property mode set to 100644
File size: 8.5 KB
Line 
1/*
2 * Copyright (c) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genarchmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Virtual Address Translation (VAT) for global page hash table.
36 */
37
38#include <genarch/mm/page_ht.h>
39#include <mm/page.h>
40#include <arch/mm/page.h>
41#include <mm/frame.h>
42#include <mm/slab.h>
43#include <mm/as.h>
44#include <arch/mm/asid.h>
45#include <typedefs.h>
46#include <arch/asm.h>
47#include <arch/barrier.h>
48#include <synch/spinlock.h>
49#include <arch.h>
50#include <assert.h>
51#include <adt/hash_table.h>
52#include <align.h>
53
54static size_t hash(sysarg_t[]);
55static bool compare(sysarg_t[], size_t, link_t *);
56static void remove_callback(link_t *);
57
58static void ht_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int);
59static void ht_mapping_remove(as_t *, uintptr_t);
60static bool ht_mapping_find(as_t *, uintptr_t, bool, pte_t *);
61static void ht_mapping_update(as_t *, uintptr_t, bool, pte_t *);
62static void ht_mapping_make_global(uintptr_t, size_t);
63
64slab_cache_t *pte_cache = NULL;
65
66/**
67 * This lock protects the page hash table. It must be acquired
68 * after address space lock and after any address space area
69 * locks.
70 *
71 */
72IRQ_SPINLOCK_STATIC_INITIALIZE(page_ht_lock);
73
74/** Page hash table.
75 *
76 * The page hash table may be accessed only when page_ht_lock is held.
77 *
78 */
79hash_table_t page_ht;
80
81/** Hash table operations for page hash table. */
82hash_table_operations_t ht_operations = {
83 .hash = hash,
84 .compare = compare,
85 .remove_callback = remove_callback
86};
87
88/** Page mapping operations for page hash table architectures. */
89page_mapping_operations_t ht_mapping_operations = {
90 .mapping_insert = ht_mapping_insert,
91 .mapping_remove = ht_mapping_remove,
92 .mapping_find = ht_mapping_find,
93 .mapping_update = ht_mapping_update,
94 .mapping_make_global = ht_mapping_make_global
95};
96
97/** Compute page hash table index.
98 *
99 * @param key Array of two keys (i.e. page and address space).
100 *
101 * @return Index into page hash table.
102 *
103 */
104size_t hash(sysarg_t key[])
105{
106 as_t *as = (as_t *) key[KEY_AS];
107 uintptr_t page = (uintptr_t) key[KEY_PAGE];
108
109 /*
110 * Virtual page addresses have roughly the same probability
111 * of occurring. Least significant bits of VPN compose the
112 * hash index.
113 *
114 */
115 size_t index = ((page >> PAGE_WIDTH) & (PAGE_HT_ENTRIES - 1));
116
117 /*
118 * Address space structures are likely to be allocated from
119 * similar addresses. Least significant bits compose the
120 * hash index.
121 *
122 */
123 index |= ((sysarg_t) as) & (PAGE_HT_ENTRIES - 1);
124
125 return index;
126}
127
128/** Compare page hash table item with page and/or address space.
129 *
130 * @param key Array of one or two keys (i.e. page and/or address space).
131 * @param keys Number of keys passed.
132 * @param item Item to compare the keys with.
133 *
134 * @return true on match, false otherwise.
135 *
136 */
137bool compare(sysarg_t key[], size_t keys, link_t *item)
138{
139 assert(item);
140 assert(keys > 0);
141 assert(keys <= PAGE_HT_KEYS);
142
143 /*
144 * Convert item to PTE.
145 *
146 */
147 pte_t *pte = hash_table_get_instance(item, pte_t, link);
148
149 if (keys == PAGE_HT_KEYS)
150 return (key[KEY_AS] == (uintptr_t) pte->as) &&
151 (key[KEY_PAGE] == pte->page);
152
153 return (key[KEY_AS] == (uintptr_t) pte->as);
154}
155
156/** Callback on page hash table item removal.
157 *
158 * @param item Page hash table item being removed.
159 *
160 */
161void remove_callback(link_t *item)
162{
163 assert(item);
164
165 /*
166 * Convert item to PTE.
167 *
168 */
169 pte_t *pte = hash_table_get_instance(item, pte_t, link);
170
171 slab_free(pte_cache, pte);
172}
173
174/** Map page to frame using page hash table.
175 *
176 * Map virtual address page to physical address frame
177 * using flags.
178 *
179 * @param as Address space to which page belongs.
180 * @param page Virtual address of the page to be mapped.
181 * @param frame Physical address of memory frame to which the mapping is done.
182 * @param flags Flags to be used for mapping.
183 *
184 */
185void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
186 unsigned int flags)
187{
188 sysarg_t key[2] = {
189 (uintptr_t) as,
190 page = ALIGN_DOWN(page, PAGE_SIZE)
191 };
192
193 assert(page_table_locked(as));
194
195 irq_spinlock_lock(&page_ht_lock, true);
196
197 if (!hash_table_find(&page_ht, key)) {
198 pte_t *pte = slab_alloc(pte_cache, FRAME_LOWMEM | FRAME_ATOMIC);
199 assert(pte != NULL);
200
201 pte->g = (flags & PAGE_GLOBAL) != 0;
202 pte->x = (flags & PAGE_EXEC) != 0;
203 pte->w = (flags & PAGE_WRITE) != 0;
204 pte->k = !(flags & PAGE_USER);
205 pte->c = (flags & PAGE_CACHEABLE) != 0;
206 pte->p = !(flags & PAGE_NOT_PRESENT);
207 pte->a = false;
208 pte->d = false;
209
210 pte->as = as;
211 pte->page = ALIGN_DOWN(page, PAGE_SIZE);
212 pte->frame = ALIGN_DOWN(frame, FRAME_SIZE);
213
214 /*
215 * Make sure that a concurrent ht_mapping_find() will see the
216 * new entry only after it is fully initialized.
217 */
218 write_barrier();
219
220 hash_table_insert(&page_ht, key, &pte->link);
221 }
222
223 irq_spinlock_unlock(&page_ht_lock, true);
224}
225
226/** Remove mapping of page from page hash table.
227 *
228 * Remove any mapping of page within address space as.
229 * TLB shootdown should follow in order to make effects of
230 * this call visible.
231 *
232 * @param as Address space to which page belongs.
233 * @param page Virtual address of the page to be demapped.
234 *
235 */
236void ht_mapping_remove(as_t *as, uintptr_t page)
237{
238 sysarg_t key[2] = {
239 (uintptr_t) as,
240 page = ALIGN_DOWN(page, PAGE_SIZE)
241 };
242
243 assert(page_table_locked(as));
244
245 irq_spinlock_lock(&page_ht_lock, true);
246
247 /*
248 * Note that removed PTE's will be freed
249 * by remove_callback().
250 */
251 hash_table_remove(&page_ht, key, 2);
252
253 irq_spinlock_unlock(&page_ht_lock, true);
254}
255
256static pte_t *ht_mapping_find_internal(as_t *as, uintptr_t page, bool nolock)
257{
258 sysarg_t key[2] = {
259 (uintptr_t) as,
260 page = ALIGN_DOWN(page, PAGE_SIZE)
261 };
262
263 assert(nolock || page_table_locked(as));
264
265 link_t *cur = hash_table_find(&page_ht, key);
266 if (cur)
267 return hash_table_get_instance(cur, pte_t, link);
268
269 return NULL;
270}
271
272/** Find mapping for virtual page in page hash table.
273 *
274 * @param as Address space to which page belongs.
275 * @param page Virtual page.
276 * @param nolock True if the page tables need not be locked.
277 * @param[out] pte Structure that will receive a copy of the found PTE.
278 *
279 * @return True if the mapping was found, false otherwise.
280 */
281bool ht_mapping_find(as_t *as, uintptr_t page, bool nolock, pte_t *pte)
282{
283 irq_spinlock_lock(&page_ht_lock, true);
284
285 pte_t *t = ht_mapping_find_internal(as, page, nolock);
286 if (t)
287 *pte = *t;
288
289 irq_spinlock_unlock(&page_ht_lock, true);
290
291 return t != NULL;
292}
293
294/** Update mapping for virtual page in page hash table.
295 *
296 * @param as Address space to which page belongs.
297 * @param page Virtual page.
298 * @param nolock True if the page tables need not be locked.
299 * @param pte New PTE.
300 */
301void ht_mapping_update(as_t *as, uintptr_t page, bool nolock, pte_t *pte)
302{
303 irq_spinlock_lock(&page_ht_lock, true);
304
305 pte_t *t = ht_mapping_find_internal(as, page, nolock);
306 if (!t)
307 panic("Updating non-existent PTE");
308
309 assert(pte->as == t->as);
310 assert(pte->page == t->page);
311 assert(pte->frame == t->frame);
312 assert(pte->g == t->g);
313 assert(pte->x == t->x);
314 assert(pte->w == t->w);
315 assert(pte->k == t->k);
316 assert(pte->c == t->c);
317 assert(pte->p == t->p);
318
319 t->a = pte->a;
320 t->d = pte->d;
321
322 irq_spinlock_unlock(&page_ht_lock, true);
323}
324
325void ht_mapping_make_global(uintptr_t base, size_t size)
326{
327 /* nothing to do */
328}
329
330/** @}
331 */
Note: See TracBrowser for help on using the repository browser.