source: mainline/kernel/genarch/src/mm/page_ht.c@ 63e27ef

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 63e27ef was 63e27ef, checked in by Jiri Svoboda <jiri@…>, 8 years ago

ASSERT → assert

  • Property mode set to 100644
File size: 8.5 KB
RevLine 
[6d7ffa65]1/*
[df4ed85]2 * Copyright (c) 2006 Jakub Jermar
[6d7ffa65]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[f47fd19]29/** @addtogroup genarchmm
[b45c443]30 * @{
31 */
32
[0f27b4c]33/**
[b45c443]34 * @file
[da1bafb]35 * @brief Virtual Address Translation (VAT) for global page hash table.
[0f27b4c]36 */
37
[6d7ffa65]38#include <genarch/mm/page_ht.h>
39#include <mm/page.h>
[c7ec94a4]40#include <arch/mm/page.h>
[6d7ffa65]41#include <mm/frame.h>
[5e3757d]42#include <mm/slab.h>
[fc1e4f6]43#include <mm/as.h>
[677a6d5]44#include <arch/mm/asid.h>
[d99c1d2]45#include <typedefs.h>
[6d7ffa65]46#include <arch/asm.h>
[7d68da80]47#include <arch/barrier.h>
[2a003d5b]48#include <synch/spinlock.h>
49#include <arch.h>
[63e27ef]50#include <assert.h>
[c7ec94a4]51#include <adt/hash_table.h>
[a2a46ba]52#include <align.h>
[c7ec94a4]53
[96b02eb9]54static size_t hash(sysarg_t[]);
55static bool compare(sysarg_t[], size_t, link_t *);
[da1bafb]56static void remove_callback(link_t *);
[c7ec94a4]57
[da1bafb]58static void ht_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int);
59static void ht_mapping_remove(as_t *, uintptr_t);
[38dc82d]60static bool ht_mapping_find(as_t *, uintptr_t, bool, pte_t *);
[346b12a2]61static void ht_mapping_update(as_t *, uintptr_t, bool, pte_t *);
[c868e2d]62static void ht_mapping_make_global(uintptr_t, size_t);
[6d7ffa65]63
[a55ddc64]64slab_cache_t *pte_cache = NULL;
65
[2a003d5b]66/**
[2299914]67 * This lock protects the page hash table. It must be acquired
68 * after address space lock and after any address space area
69 * locks.
[da1bafb]70 *
[2a003d5b]71 */
[fb63c06]72IRQ_SPINLOCK_STATIC_INITIALIZE(page_ht_lock);
[2a003d5b]73
[da1bafb]74/** Page hash table.
75 *
[2a003d5b]76 * The page hash table may be accessed only when page_ht_lock is held.
[da1bafb]77 *
[2a003d5b]78 */
[c7ec94a4]79hash_table_t page_ht;
[2a003d5b]80
[c7ec94a4]81/** Hash table operations for page hash table. */
82hash_table_operations_t ht_operations = {
83 .hash = hash,
84 .compare = compare,
85 .remove_callback = remove_callback
86};
[6d7ffa65]87
[f5935ed]88/** Page mapping operations for page hash table architectures. */
89page_mapping_operations_t ht_mapping_operations = {
[6d7ffa65]90 .mapping_insert = ht_mapping_insert,
[8f00329]91 .mapping_remove = ht_mapping_remove,
[c868e2d]92 .mapping_find = ht_mapping_find,
[346b12a2]93 .mapping_update = ht_mapping_update,
[c868e2d]94 .mapping_make_global = ht_mapping_make_global
[6d7ffa65]95};
96
[c7ec94a4]97/** Compute page hash table index.
98 *
99 * @param key Array of two keys (i.e. page and address space).
100 *
101 * @return Index into page hash table.
[da1bafb]102 *
[c7ec94a4]103 */
[96b02eb9]104size_t hash(sysarg_t key[])
[c7ec94a4]105{
106 as_t *as = (as_t *) key[KEY_AS];
[7f1c620]107 uintptr_t page = (uintptr_t) key[KEY_PAGE];
[c7ec94a4]108
109 /*
110 * Virtual page addresses have roughly the same probability
111 * of occurring. Least significant bits of VPN compose the
112 * hash index.
[da1bafb]113 *
[c7ec94a4]114 */
[da1bafb]115 size_t index = ((page >> PAGE_WIDTH) & (PAGE_HT_ENTRIES - 1));
[c7ec94a4]116
117 /*
118 * Address space structures are likely to be allocated from
119 * similar addresses. Least significant bits compose the
120 * hash index.
[da1bafb]121 *
[c7ec94a4]122 */
[96b02eb9]123 index |= ((sysarg_t) as) & (PAGE_HT_ENTRIES - 1);
[c7ec94a4]124
125 return index;
126}
127
128/** Compare page hash table item with page and/or address space.
129 *
[da1bafb]130 * @param key Array of one or two keys (i.e. page and/or address space).
[c7ec94a4]131 * @param keys Number of keys passed.
132 * @param item Item to compare the keys with.
133 *
134 * @return true on match, false otherwise.
[da1bafb]135 *
[c7ec94a4]136 */
[96b02eb9]137bool compare(sysarg_t key[], size_t keys, link_t *item)
[c7ec94a4]138{
[63e27ef]139 assert(item);
140 assert(keys > 0);
141 assert(keys <= PAGE_HT_KEYS);
[da1bafb]142
[c7ec94a4]143 /*
144 * Convert item to PTE.
[da1bafb]145 *
[c7ec94a4]146 */
[da1bafb]147 pte_t *pte = hash_table_get_instance(item, pte_t, link);
148
149 if (keys == PAGE_HT_KEYS)
150 return (key[KEY_AS] == (uintptr_t) pte->as) &&
151 (key[KEY_PAGE] == pte->page);
152
153 return (key[KEY_AS] == (uintptr_t) pte->as);
[c7ec94a4]154}
155
156/** Callback on page hash table item removal.
157 *
158 * @param item Page hash table item being removed.
[da1bafb]159 *
[c7ec94a4]160 */
161void remove_callback(link_t *item)
162{
[63e27ef]163 assert(item);
[da1bafb]164
[c7ec94a4]165 /*
166 * Convert item to PTE.
[da1bafb]167 *
[c7ec94a4]168 */
[da1bafb]169 pte_t *pte = hash_table_get_instance(item, pte_t, link);
170
[a55ddc64]171 slab_free(pte_cache, pte);
[c7ec94a4]172}
173
[6d7ffa65]174/** Map page to frame using page hash table.
175 *
[9179d0a]176 * Map virtual address page to physical address frame
[da1bafb]177 * using flags.
[6d7ffa65]178 *
[da1bafb]179 * @param as Address space to which page belongs.
180 * @param page Virtual address of the page to be mapped.
[6d7ffa65]181 * @param frame Physical address of memory frame to which the mapping is done.
182 * @param flags Flags to be used for mapping.
[da1bafb]183 *
[6d7ffa65]184 */
[da1bafb]185void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
186 unsigned int flags)
[6d7ffa65]187{
[96b02eb9]188 sysarg_t key[2] = {
[2057572]189 (uintptr_t) as,
190 page = ALIGN_DOWN(page, PAGE_SIZE)
191 };
[1d432f9]192
[63e27ef]193 assert(page_table_locked(as));
[fb63c06]194
195 irq_spinlock_lock(&page_ht_lock, true);
[2a003d5b]196
[c7ec94a4]197 if (!hash_table_find(&page_ht, key)) {
[a55ddc64]198 pte_t *pte = slab_alloc(pte_cache, FRAME_LOWMEM | FRAME_ATOMIC);
[63e27ef]199 assert(pte != NULL);
[da1bafb]200
201 pte->g = (flags & PAGE_GLOBAL) != 0;
202 pte->x = (flags & PAGE_EXEC) != 0;
203 pte->w = (flags & PAGE_WRITE) != 0;
204 pte->k = !(flags & PAGE_USER);
205 pte->c = (flags & PAGE_CACHEABLE) != 0;
206 pte->p = !(flags & PAGE_NOT_PRESENT);
207 pte->a = false;
208 pte->d = false;
209
210 pte->as = as;
211 pte->page = ALIGN_DOWN(page, PAGE_SIZE);
212 pte->frame = ALIGN_DOWN(frame, FRAME_SIZE);
[7d68da80]213
[de73242]214 /*
215 * Make sure that a concurrent ht_mapping_find() will see the
216 * new entry only after it is fully initialized.
217 */
[7d68da80]218 write_barrier();
[da1bafb]219
220 hash_table_insert(&page_ht, key, &pte->link);
[0c0410b]221 }
[fb63c06]222
223 irq_spinlock_unlock(&page_ht_lock, true);
[6d7ffa65]224}
225
[8f00329]226/** Remove mapping of page from page hash table.
227 *
[9179d0a]228 * Remove any mapping of page within address space as.
[8f00329]229 * TLB shootdown should follow in order to make effects of
230 * this call visible.
231 *
[235e6c7]232 * @param as Address space to which page belongs.
[8f00329]233 * @param page Virtual address of the page to be demapped.
[da1bafb]234 *
[8f00329]235 */
[7f1c620]236void ht_mapping_remove(as_t *as, uintptr_t page)
[8f00329]237{
[96b02eb9]238 sysarg_t key[2] = {
[2057572]239 (uintptr_t) as,
240 page = ALIGN_DOWN(page, PAGE_SIZE)
241 };
[1d432f9]242
[63e27ef]243 assert(page_table_locked(as));
[8f00329]244
[fb63c06]245 irq_spinlock_lock(&page_ht_lock, true);
246
[8f00329]247 /*
248 * Note that removed PTE's will be freed
249 * by remove_callback().
250 */
251 hash_table_remove(&page_ht, key, 2);
[fb63c06]252
253 irq_spinlock_unlock(&page_ht_lock, true);
[8f00329]254}
255
[346b12a2]256static pte_t *ht_mapping_find_internal(as_t *as, uintptr_t page, bool nolock)
257{
258 sysarg_t key[2] = {
259 (uintptr_t) as,
260 page = ALIGN_DOWN(page, PAGE_SIZE)
261 };
262
[63e27ef]263 assert(nolock || page_table_locked(as));
[fb63c06]264
[346b12a2]265 link_t *cur = hash_table_find(&page_ht, key);
266 if (cur)
267 return hash_table_get_instance(cur, pte_t, link);
268
269 return NULL;
270}
[8f00329]271
[6d7ffa65]272/** Find mapping for virtual page in page hash table.
273 *
[38dc82d]274 * @param as Address space to which page belongs.
275 * @param page Virtual page.
276 * @param nolock True if the page tables need not be locked.
277 * @param[out] pte Structure that will receive a copy of the found PTE.
[da1bafb]278 *
[38dc82d]279 * @return True if the mapping was found, false otherwise.
[6d7ffa65]280 */
[38dc82d]281bool ht_mapping_find(as_t *as, uintptr_t page, bool nolock, pte_t *pte)
[6d7ffa65]282{
[fb63c06]283 irq_spinlock_lock(&page_ht_lock, true);
284
[346b12a2]285 pte_t *t = ht_mapping_find_internal(as, page, nolock);
286 if (t)
287 *pte = *t;
[fb63c06]288
289 irq_spinlock_unlock(&page_ht_lock, true);
[0c0410b]290
[346b12a2]291 return t != NULL;
292}
293
294/** Update mapping for virtual page in page hash table.
295 *
296 * @param as Address space to which page belongs.
297 * @param page Virtual page.
298 * @param nolock True if the page tables need not be locked.
299 * @param pte New PTE.
300 */
301void ht_mapping_update(as_t *as, uintptr_t page, bool nolock, pte_t *pte)
302{
[fb63c06]303 irq_spinlock_lock(&page_ht_lock, true);
304
[346b12a2]305 pte_t *t = ht_mapping_find_internal(as, page, nolock);
306 if (!t)
307 panic("Updating non-existent PTE");
[da1bafb]308
[63e27ef]309 assert(pte->as == t->as);
310 assert(pte->page == t->page);
311 assert(pte->frame == t->frame);
312 assert(pte->g == t->g);
313 assert(pte->x == t->x);
314 assert(pte->w == t->w);
315 assert(pte->k == t->k);
316 assert(pte->c == t->c);
317 assert(pte->p == t->p);
[346b12a2]318
319 t->a = pte->a;
320 t->d = pte->d;
[fb63c06]321
322 irq_spinlock_unlock(&page_ht_lock, true);
[6d7ffa65]323}
[b45c443]324
[c868e2d]325void ht_mapping_make_global(uintptr_t base, size_t size)
326{
327 /* nothing to do */
328}
329
[f47fd19]330/** @}
[b45c443]331 */
Note: See TracBrowser for help on using the repository browser.