source: mainline/kernel/genarch/src/mm/page_ht.c@ bd1fab90

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since bd1fab90 was 82cbf8c6, checked in by Jakub Jermar <jakub@…>, 8 years ago

Replace the old hash table implementation in the kernel with the newer one

This replaces the original hash table implementation with the resizable one
already used in uspace. Along the way, the IRQ hash table code was streamlined
and cleaned up.

  • Property mode set to 100644
File size: 8.1 KB
Line 
1/*
2 * Copyright (c) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genarchmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Virtual Address Translation (VAT) for global page hash table.
36 */
37
38#include <genarch/mm/page_ht.h>
39#include <mm/page.h>
40#include <arch/mm/page.h>
41#include <mm/frame.h>
42#include <mm/slab.h>
43#include <mm/as.h>
44#include <arch/mm/asid.h>
45#include <typedefs.h>
46#include <arch/asm.h>
47#include <arch/barrier.h>
48#include <synch/spinlock.h>
49#include <arch.h>
50#include <assert.h>
51#include <adt/hash.h>
52#include <adt/hash_table.h>
53#include <align.h>
54
55static size_t ht_hash(const ht_link_t *);
56static size_t ht_key_hash(void *);
57static bool ht_key_equal(void *, const ht_link_t *);
58static void ht_remove_callback(ht_link_t *);
59
60static void ht_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int);
61static void ht_mapping_remove(as_t *, uintptr_t);
62static bool ht_mapping_find(as_t *, uintptr_t, bool, pte_t *);
63static void ht_mapping_update(as_t *, uintptr_t, bool, pte_t *);
64static void ht_mapping_make_global(uintptr_t, size_t);
65
66slab_cache_t *pte_cache = NULL;
67
68/**
69 * This lock protects the page hash table. It must be acquired
70 * after address space lock and after any address space area
71 * locks.
72 *
73 */
74IRQ_SPINLOCK_STATIC_INITIALIZE(page_ht_lock);
75
76/** Page hash table.
77 *
78 * The page hash table may be accessed only when page_ht_lock is held.
79 *
80 */
81hash_table_t page_ht;
82
83/** Hash table operations for page hash table. */
84hash_table_ops_t ht_ops = {
85 .hash = ht_hash,
86 .key_hash = ht_key_hash,
87 .key_equal = ht_key_equal,
88 .remove_callback = ht_remove_callback
89};
90
91/** Page mapping operations for page hash table architectures. */
92page_mapping_operations_t ht_mapping_operations = {
93 .mapping_insert = ht_mapping_insert,
94 .mapping_remove = ht_mapping_remove,
95 .mapping_find = ht_mapping_find,
96 .mapping_update = ht_mapping_update,
97 .mapping_make_global = ht_mapping_make_global
98};
99
100/** Return the hash of the key stored in the item */
101size_t ht_hash(const ht_link_t *item)
102{
103 pte_t *pte = hash_table_get_inst(item, pte_t, link);
104 size_t hash = 0;
105 hash = hash_combine(hash, (uintptr_t) pte->as);
106 hash = hash_combine(hash, pte->page >> PAGE_WIDTH);
107 return hash;
108}
109
110/** Return the hash of the key. */
111size_t ht_key_hash(void *arg)
112{
113 uintptr_t *key = (uintptr_t *) arg;
114 size_t hash = 0;
115 hash = hash_combine(hash, key[KEY_AS]);
116 hash = hash_combine(hash, key[KEY_PAGE] >> PAGE_WIDTH);
117 return hash;
118}
119
120/** Return true if the key is equal to the item's lookup key. */
121bool ht_key_equal(void *arg, const ht_link_t *item)
122{
123 uintptr_t *key = (uintptr_t *) arg;
124 pte_t *pte = hash_table_get_inst(item, pte_t, link);
125 return (key[KEY_AS] == (uintptr_t) pte->as) &&
126 (key[KEY_PAGE] == pte->page);
127}
128
129/** Callback on page hash table item removal.
130 *
131 * @param item Page hash table item being removed.
132 *
133 */
134void ht_remove_callback(ht_link_t *item)
135{
136 assert(item);
137
138 pte_t *pte = hash_table_get_inst(item, pte_t, link);
139 slab_free(pte_cache, pte);
140}
141
142/** Map page to frame using page hash table.
143 *
144 * Map virtual address page to physical address frame
145 * using flags.
146 *
147 * @param as Address space to which page belongs.
148 * @param page Virtual address of the page to be mapped.
149 * @param frame Physical address of memory frame to which the mapping is done.
150 * @param flags Flags to be used for mapping.
151 *
152 */
153void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
154 unsigned int flags)
155{
156 uintptr_t key[2] = {
157 [KEY_AS] = (uintptr_t) as,
158 [KEY_PAGE] = ALIGN_DOWN(page, PAGE_SIZE)
159 };
160
161 assert(page_table_locked(as));
162
163 irq_spinlock_lock(&page_ht_lock, true);
164
165 if (!hash_table_find(&page_ht, key)) {
166 pte_t *pte = slab_alloc(pte_cache, FRAME_LOWMEM | FRAME_ATOMIC);
167 assert(pte != NULL);
168
169 pte->g = (flags & PAGE_GLOBAL) != 0;
170 pte->x = (flags & PAGE_EXEC) != 0;
171 pte->w = (flags & PAGE_WRITE) != 0;
172 pte->k = !(flags & PAGE_USER);
173 pte->c = (flags & PAGE_CACHEABLE) != 0;
174 pte->p = !(flags & PAGE_NOT_PRESENT);
175 pte->a = false;
176 pte->d = false;
177
178 pte->as = as;
179 pte->page = ALIGN_DOWN(page, PAGE_SIZE);
180 pte->frame = ALIGN_DOWN(frame, FRAME_SIZE);
181
182 /*
183 * Make sure that a concurrent ht_mapping_find() will see the
184 * new entry only after it is fully initialized.
185 */
186 write_barrier();
187
188 hash_table_insert(&page_ht, &pte->link);
189 }
190
191 irq_spinlock_unlock(&page_ht_lock, true);
192}
193
194/** Remove mapping of page from page hash table.
195 *
196 * Remove any mapping of page within address space as.
197 * TLB shootdown should follow in order to make effects of
198 * this call visible.
199 *
200 * @param as Address space to which page belongs.
201 * @param page Virtual address of the page to be demapped.
202 *
203 */
204void ht_mapping_remove(as_t *as, uintptr_t page)
205{
206 uintptr_t key[2] = {
207 [KEY_AS] = (uintptr_t) as,
208 [KEY_PAGE] = ALIGN_DOWN(page, PAGE_SIZE)
209 };
210
211 assert(page_table_locked(as));
212
213 irq_spinlock_lock(&page_ht_lock, true);
214
215 /*
216 * Note that removed PTE's will be freed
217 * by remove_callback().
218 */
219 hash_table_remove(&page_ht, key);
220
221 irq_spinlock_unlock(&page_ht_lock, true);
222}
223
224static pte_t *
225ht_mapping_find_internal(as_t *as, uintptr_t page, bool nolock)
226{
227 uintptr_t key[2] = {
228 [KEY_AS] = (uintptr_t) as,
229 [KEY_PAGE] = ALIGN_DOWN(page, PAGE_SIZE)
230 };
231
232 assert(nolock || page_table_locked(as));
233
234 ht_link_t *cur = hash_table_find(&page_ht, key);
235 if (cur)
236 return hash_table_get_inst(cur, pte_t, link);
237
238 return NULL;
239}
240
241/** Find mapping for virtual page in page hash table.
242 *
243 * @param as Address space to which page belongs.
244 * @param page Virtual page.
245 * @param nolock True if the page tables need not be locked.
246 * @param[out] pte Structure that will receive a copy of the found PTE.
247 *
248 * @return True if the mapping was found, false otherwise.
249 */
250bool ht_mapping_find(as_t *as, uintptr_t page, bool nolock, pte_t *pte)
251{
252 irq_spinlock_lock(&page_ht_lock, true);
253
254 pte_t *t = ht_mapping_find_internal(as, page, nolock);
255 if (t)
256 *pte = *t;
257
258 irq_spinlock_unlock(&page_ht_lock, true);
259
260 return t != NULL;
261}
262
263/** Update mapping for virtual page in page hash table.
264 *
265 * @param as Address space to which page belongs.
266 * @param page Virtual page.
267 * @param nolock True if the page tables need not be locked.
268 * @param pte New PTE.
269 */
270void ht_mapping_update(as_t *as, uintptr_t page, bool nolock, pte_t *pte)
271{
272 irq_spinlock_lock(&page_ht_lock, true);
273
274 pte_t *t = ht_mapping_find_internal(as, page, nolock);
275 if (!t)
276 panic("Updating non-existent PTE");
277
278 assert(pte->as == t->as);
279 assert(pte->page == t->page);
280 assert(pte->frame == t->frame);
281 assert(pte->g == t->g);
282 assert(pte->x == t->x);
283 assert(pte->w == t->w);
284 assert(pte->k == t->k);
285 assert(pte->c == t->c);
286 assert(pte->p == t->p);
287
288 t->a = pte->a;
289 t->d = pte->d;
290
291 irq_spinlock_unlock(&page_ht_lock, true);
292}
293
294void ht_mapping_make_global(uintptr_t base, size_t size)
295{
296 /* nothing to do */
297}
298
299/** @}
300 */
Note: See TracBrowser for help on using the repository browser.