source: mainline/kernel/genarch/src/mm/page_ht.c@ d776329b

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since d776329b was fb63c06, checked in by Jakub Jermar <jakub@…>, 9 years ago

Make page hash table critical sections smaller

After the change of the page mapping interface to work exclusively with
a copy of the actual PTE, the critical section around page hash table
look-ups, insertions and deletions can be much smaller.

This change necessitated the change of the page_ht_lock mutex into a
spinlock, because the page mapping API can be used from within TLB
shootdown sequence, which is basically a spinlock-protected critical
section and we cannot take a mutex while holding a spinlock.

  • Property mode set to 100644
File size: 8.5 KB
RevLine 
[6d7ffa65]1/*
[df4ed85]2 * Copyright (c) 2006 Jakub Jermar
[6d7ffa65]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[f47fd19]29/** @addtogroup genarchmm
[b45c443]30 * @{
31 */
32
[0f27b4c]33/**
[b45c443]34 * @file
[da1bafb]35 * @brief Virtual Address Translation (VAT) for global page hash table.
[0f27b4c]36 */
37
[6d7ffa65]38#include <genarch/mm/page_ht.h>
39#include <mm/page.h>
[c7ec94a4]40#include <arch/mm/page.h>
[6d7ffa65]41#include <mm/frame.h>
[5e3757d]42#include <mm/slab.h>
[fc1e4f6]43#include <mm/as.h>
[677a6d5]44#include <arch/mm/asid.h>
[d99c1d2]45#include <typedefs.h>
[6d7ffa65]46#include <arch/asm.h>
[7d68da80]47#include <arch/barrier.h>
[2a003d5b]48#include <synch/spinlock.h>
49#include <arch.h>
[0c0410b]50#include <debug.h>
[ef67bab]51#include <memstr.h>
[c7ec94a4]52#include <adt/hash_table.h>
[a2a46ba]53#include <align.h>
[c7ec94a4]54
[96b02eb9]55static size_t hash(sysarg_t[]);
56static bool compare(sysarg_t[], size_t, link_t *);
[da1bafb]57static void remove_callback(link_t *);
[c7ec94a4]58
[da1bafb]59static void ht_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int);
60static void ht_mapping_remove(as_t *, uintptr_t);
[38dc82d]61static bool ht_mapping_find(as_t *, uintptr_t, bool, pte_t *);
[346b12a2]62static void ht_mapping_update(as_t *, uintptr_t, bool, pte_t *);
[c868e2d]63static void ht_mapping_make_global(uintptr_t, size_t);
[6d7ffa65]64
[a55ddc64]65slab_cache_t *pte_cache = NULL;
66
[2a003d5b]67/**
[2299914]68 * This lock protects the page hash table. It must be acquired
69 * after address space lock and after any address space area
70 * locks.
[da1bafb]71 *
[2a003d5b]72 */
[fb63c06]73IRQ_SPINLOCK_STATIC_INITIALIZE(page_ht_lock);
[2a003d5b]74
[da1bafb]75/** Page hash table.
76 *
[2a003d5b]77 * The page hash table may be accessed only when page_ht_lock is held.
[da1bafb]78 *
[2a003d5b]79 */
[c7ec94a4]80hash_table_t page_ht;
[2a003d5b]81
[c7ec94a4]82/** Hash table operations for page hash table. */
83hash_table_operations_t ht_operations = {
84 .hash = hash,
85 .compare = compare,
86 .remove_callback = remove_callback
87};
[6d7ffa65]88
[f5935ed]89/** Page mapping operations for page hash table architectures. */
90page_mapping_operations_t ht_mapping_operations = {
[6d7ffa65]91 .mapping_insert = ht_mapping_insert,
[8f00329]92 .mapping_remove = ht_mapping_remove,
[c868e2d]93 .mapping_find = ht_mapping_find,
[346b12a2]94 .mapping_update = ht_mapping_update,
[c868e2d]95 .mapping_make_global = ht_mapping_make_global
[6d7ffa65]96};
97
[c7ec94a4]98/** Compute page hash table index.
99 *
100 * @param key Array of two keys (i.e. page and address space).
101 *
102 * @return Index into page hash table.
[da1bafb]103 *
[c7ec94a4]104 */
[96b02eb9]105size_t hash(sysarg_t key[])
[c7ec94a4]106{
107 as_t *as = (as_t *) key[KEY_AS];
[7f1c620]108 uintptr_t page = (uintptr_t) key[KEY_PAGE];
[c7ec94a4]109
110 /*
111 * Virtual page addresses have roughly the same probability
112 * of occurring. Least significant bits of VPN compose the
113 * hash index.
[da1bafb]114 *
[c7ec94a4]115 */
[da1bafb]116 size_t index = ((page >> PAGE_WIDTH) & (PAGE_HT_ENTRIES - 1));
[c7ec94a4]117
118 /*
119 * Address space structures are likely to be allocated from
120 * similar addresses. Least significant bits compose the
121 * hash index.
[da1bafb]122 *
[c7ec94a4]123 */
[96b02eb9]124 index |= ((sysarg_t) as) & (PAGE_HT_ENTRIES - 1);
[c7ec94a4]125
126 return index;
127}
128
129/** Compare page hash table item with page and/or address space.
130 *
[da1bafb]131 * @param key Array of one or two keys (i.e. page and/or address space).
[c7ec94a4]132 * @param keys Number of keys passed.
133 * @param item Item to compare the keys with.
134 *
135 * @return true on match, false otherwise.
[da1bafb]136 *
[c7ec94a4]137 */
[96b02eb9]138bool compare(sysarg_t key[], size_t keys, link_t *item)
[c7ec94a4]139{
140 ASSERT(item);
[da1bafb]141 ASSERT(keys > 0);
142 ASSERT(keys <= PAGE_HT_KEYS);
143
[c7ec94a4]144 /*
145 * Convert item to PTE.
[da1bafb]146 *
[c7ec94a4]147 */
[da1bafb]148 pte_t *pte = hash_table_get_instance(item, pte_t, link);
149
150 if (keys == PAGE_HT_KEYS)
151 return (key[KEY_AS] == (uintptr_t) pte->as) &&
152 (key[KEY_PAGE] == pte->page);
153
154 return (key[KEY_AS] == (uintptr_t) pte->as);
[c7ec94a4]155}
156
157/** Callback on page hash table item removal.
158 *
159 * @param item Page hash table item being removed.
[da1bafb]160 *
[c7ec94a4]161 */
162void remove_callback(link_t *item)
163{
164 ASSERT(item);
[da1bafb]165
[c7ec94a4]166 /*
167 * Convert item to PTE.
[da1bafb]168 *
[c7ec94a4]169 */
[da1bafb]170 pte_t *pte = hash_table_get_instance(item, pte_t, link);
171
[a55ddc64]172 slab_free(pte_cache, pte);
[c7ec94a4]173}
174
[6d7ffa65]175/** Map page to frame using page hash table.
176 *
[9179d0a]177 * Map virtual address page to physical address frame
[da1bafb]178 * using flags.
[6d7ffa65]179 *
[da1bafb]180 * @param as Address space to which page belongs.
181 * @param page Virtual address of the page to be mapped.
[6d7ffa65]182 * @param frame Physical address of memory frame to which the mapping is done.
183 * @param flags Flags to be used for mapping.
[da1bafb]184 *
[6d7ffa65]185 */
[da1bafb]186void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
187 unsigned int flags)
[6d7ffa65]188{
[96b02eb9]189 sysarg_t key[2] = {
[2057572]190 (uintptr_t) as,
191 page = ALIGN_DOWN(page, PAGE_SIZE)
192 };
[1d432f9]193
194 ASSERT(page_table_locked(as));
[fb63c06]195
196 irq_spinlock_lock(&page_ht_lock, true);
[2a003d5b]197
[c7ec94a4]198 if (!hash_table_find(&page_ht, key)) {
[a55ddc64]199 pte_t *pte = slab_alloc(pte_cache, FRAME_LOWMEM | FRAME_ATOMIC);
[da1bafb]200 ASSERT(pte != NULL);
201
202 pte->g = (flags & PAGE_GLOBAL) != 0;
203 pte->x = (flags & PAGE_EXEC) != 0;
204 pte->w = (flags & PAGE_WRITE) != 0;
205 pte->k = !(flags & PAGE_USER);
206 pte->c = (flags & PAGE_CACHEABLE) != 0;
207 pte->p = !(flags & PAGE_NOT_PRESENT);
208 pte->a = false;
209 pte->d = false;
210
211 pte->as = as;
212 pte->page = ALIGN_DOWN(page, PAGE_SIZE);
213 pte->frame = ALIGN_DOWN(frame, FRAME_SIZE);
[7d68da80]214
[de73242]215 /*
216 * Make sure that a concurrent ht_mapping_find() will see the
217 * new entry only after it is fully initialized.
218 */
[7d68da80]219 write_barrier();
[da1bafb]220
221 hash_table_insert(&page_ht, key, &pte->link);
[0c0410b]222 }
[fb63c06]223
224 irq_spinlock_unlock(&page_ht_lock, true);
[6d7ffa65]225}
226
[8f00329]227/** Remove mapping of page from page hash table.
228 *
[9179d0a]229 * Remove any mapping of page within address space as.
[8f00329]230 * TLB shootdown should follow in order to make effects of
231 * this call visible.
232 *
[235e6c7]233 * @param as Address space to which page belongs.
[8f00329]234 * @param page Virtual address of the page to be demapped.
[da1bafb]235 *
[8f00329]236 */
[7f1c620]237void ht_mapping_remove(as_t *as, uintptr_t page)
[8f00329]238{
[96b02eb9]239 sysarg_t key[2] = {
[2057572]240 (uintptr_t) as,
241 page = ALIGN_DOWN(page, PAGE_SIZE)
242 };
[1d432f9]243
244 ASSERT(page_table_locked(as));
[8f00329]245
[fb63c06]246 irq_spinlock_lock(&page_ht_lock, true);
247
[8f00329]248 /*
249 * Note that removed PTE's will be freed
250 * by remove_callback().
251 */
252 hash_table_remove(&page_ht, key, 2);
[fb63c06]253
254 irq_spinlock_unlock(&page_ht_lock, true);
[8f00329]255}
256
[346b12a2]257static pte_t *ht_mapping_find_internal(as_t *as, uintptr_t page, bool nolock)
258{
259 sysarg_t key[2] = {
260 (uintptr_t) as,
261 page = ALIGN_DOWN(page, PAGE_SIZE)
262 };
263
264 ASSERT(nolock || page_table_locked(as));
[fb63c06]265
[346b12a2]266 link_t *cur = hash_table_find(&page_ht, key);
267 if (cur)
268 return hash_table_get_instance(cur, pte_t, link);
269
270 return NULL;
271}
[8f00329]272
[6d7ffa65]273/** Find mapping for virtual page in page hash table.
274 *
[38dc82d]275 * @param as Address space to which page belongs.
276 * @param page Virtual page.
277 * @param nolock True if the page tables need not be locked.
278 * @param[out] pte Structure that will receive a copy of the found PTE.
[da1bafb]279 *
[38dc82d]280 * @return True if the mapping was found, false otherwise.
[6d7ffa65]281 */
[38dc82d]282bool ht_mapping_find(as_t *as, uintptr_t page, bool nolock, pte_t *pte)
[6d7ffa65]283{
[fb63c06]284 irq_spinlock_lock(&page_ht_lock, true);
285
[346b12a2]286 pte_t *t = ht_mapping_find_internal(as, page, nolock);
287 if (t)
288 *pte = *t;
[fb63c06]289
290 irq_spinlock_unlock(&page_ht_lock, true);
[0c0410b]291
[346b12a2]292 return t != NULL;
293}
294
295/** Update mapping for virtual page in page hash table.
296 *
297 * @param as Address space to which page belongs.
298 * @param page Virtual page.
299 * @param nolock True if the page tables need not be locked.
300 * @param pte New PTE.
301 */
302void ht_mapping_update(as_t *as, uintptr_t page, bool nolock, pte_t *pte)
303{
[fb63c06]304 irq_spinlock_lock(&page_ht_lock, true);
305
[346b12a2]306 pte_t *t = ht_mapping_find_internal(as, page, nolock);
307 if (!t)
308 panic("Updating non-existent PTE");
[da1bafb]309
[346b12a2]310 ASSERT(pte->as == t->as);
311 ASSERT(pte->page == t->page);
312 ASSERT(pte->frame == t->frame);
313 ASSERT(pte->g == t->g);
314 ASSERT(pte->x == t->x);
315 ASSERT(pte->w == t->w);
316 ASSERT(pte->k == t->k);
317 ASSERT(pte->c == t->c);
318 ASSERT(pte->p == t->p);
319
320 t->a = pte->a;
321 t->d = pte->d;
[fb63c06]322
323 irq_spinlock_unlock(&page_ht_lock, true);
[6d7ffa65]324}
[b45c443]325
[c868e2d]326void ht_mapping_make_global(uintptr_t base, size_t size)
327{
328 /* nothing to do */
329}
330
[f47fd19]331/** @}
[b45c443]332 */
Note: See TracBrowser for help on using the repository browser.