source: mainline/kernel/generic/src/mm/backend_anon.c@ 89dcf93

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 89dcf93 was 5df1963, checked in by Martin Decky <martin@…>, 12 years ago

bitmap frame allocator does not keep track of the size of the allocated frame blocks
to avoid memory leaks the number of allocated frames needs to be passed explicitly during deallocation

  • Property mode set to 100644
File size: 8.3 KB
Line 
1/*
2 * Copyright (c) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Backend for anonymous memory address space areas.
36 *
37 */
38
39#include <mm/as.h>
40#include <mm/page.h>
41#include <mm/reserve.h>
42#include <genarch/mm/page_pt.h>
43#include <genarch/mm/page_ht.h>
44#include <mm/frame.h>
45#include <mm/slab.h>
46#include <mm/km.h>
47#include <synch/mutex.h>
48#include <adt/list.h>
49#include <adt/btree.h>
50#include <errno.h>
51#include <typedefs.h>
52#include <align.h>
53#include <memstr.h>
54#include <arch.h>
55
56static bool anon_create(as_area_t *);
57static bool anon_resize(as_area_t *, size_t);
58static void anon_share(as_area_t *);
59static void anon_destroy(as_area_t *);
60
61static bool anon_is_resizable(as_area_t *);
62static bool anon_is_shareable(as_area_t *);
63
64static int anon_page_fault(as_area_t *, uintptr_t, pf_access_t);
65static void anon_frame_free(as_area_t *, uintptr_t, uintptr_t);
66
67mem_backend_t anon_backend = {
68 .create = anon_create,
69 .resize = anon_resize,
70 .share = anon_share,
71 .destroy = anon_destroy,
72
73 .is_resizable = anon_is_resizable,
74 .is_shareable = anon_is_shareable,
75
76 .page_fault = anon_page_fault,
77 .frame_free = anon_frame_free,
78};
79
80bool anon_create(as_area_t *area)
81{
82 if (area->flags & AS_AREA_LATE_RESERVE)
83 return true;
84
85 return reserve_try_alloc(area->pages);
86}
87
88bool anon_resize(as_area_t *area, size_t new_pages)
89{
90 if (area->flags & AS_AREA_LATE_RESERVE)
91 return true;
92
93 if (new_pages > area->pages)
94 return reserve_try_alloc(new_pages - area->pages);
95 else if (new_pages < area->pages)
96 reserve_free(area->pages - new_pages);
97
98 return true;
99}
100
101/** Share the anonymous address space area.
102 *
103 * Sharing of anonymous area is done by duplicating its entire mapping
104 * to the pagemap. Page faults will primarily search for frames there.
105 *
106 * The address space and address space area must be already locked.
107 *
108 * @param area Address space area to be shared.
109 */
110void anon_share(as_area_t *area)
111{
112 ASSERT(mutex_locked(&area->as->lock));
113 ASSERT(mutex_locked(&area->lock));
114 ASSERT(!(area->flags & AS_AREA_LATE_RESERVE));
115
116 /*
117 * Copy used portions of the area to sh_info's page map.
118 */
119 mutex_lock(&area->sh_info->lock);
120 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t,
121 node) {
122 unsigned int i;
123
124 for (i = 0; i < node->keys; i++) {
125 uintptr_t base = node->key[i];
126 size_t count = (size_t) node->value[i];
127 unsigned int j;
128
129 for (j = 0; j < count; j++) {
130 pte_t *pte;
131
132 page_table_lock(area->as, false);
133 pte = page_mapping_find(area->as,
134 base + P2SZ(j), false);
135 ASSERT(pte && PTE_VALID(pte) &&
136 PTE_PRESENT(pte));
137 btree_insert(&area->sh_info->pagemap,
138 (base + P2SZ(j)) - area->base,
139 (void *) PTE_GET_FRAME(pte), NULL);
140 page_table_unlock(area->as, false);
141
142 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));
143 frame_reference_add(pfn);
144 }
145
146 }
147 }
148 mutex_unlock(&area->sh_info->lock);
149}
150
151void anon_destroy(as_area_t *area)
152{
153 if (area->flags & AS_AREA_LATE_RESERVE)
154 return;
155
156 reserve_free(area->pages);
157}
158
159bool anon_is_resizable(as_area_t *area)
160{
161 return true;
162}
163
164bool anon_is_shareable(as_area_t *area)
165{
166 return !(area->flags & AS_AREA_LATE_RESERVE);
167}
168
169/** Service a page fault in the anonymous memory address space area.
170 *
171 * The address space area and page tables must be already locked.
172 *
173 * @param area Pointer to the address space area.
174 * @param upage Faulting virtual page.
175 * @param access Access mode that caused the fault (i.e. read/write/exec).
176 *
177 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e.
178 * serviced).
179 */
180int anon_page_fault(as_area_t *area, uintptr_t upage, pf_access_t access)
181{
182 uintptr_t kpage;
183 uintptr_t frame;
184
185 ASSERT(page_table_locked(AS));
186 ASSERT(mutex_locked(&area->lock));
187 ASSERT(IS_ALIGNED(upage, PAGE_SIZE));
188
189 if (!as_area_check_access(area, access))
190 return AS_PF_FAULT;
191
192 if (area->sh_info) {
193 btree_node_t *leaf;
194
195 /*
196 * The area is shared, chances are that the mapping can be found
197 * in the pagemap of the address space area share info
198 * structure.
199 * In the case that the pagemap does not contain the respective
200 * mapping, a new frame is allocated and the mapping is created.
201 */
202 mutex_lock(&area->sh_info->lock);
203 frame = (uintptr_t) btree_search(&area->sh_info->pagemap,
204 upage - area->base, &leaf);
205 if (!frame) {
206 bool allocate = true;
207 unsigned int i;
208
209 /*
210 * Zero can be returned as a valid frame address.
211 * Just a small workaround.
212 */
213 for (i = 0; i < leaf->keys; i++) {
214 if (leaf->key[i] == upage - area->base) {
215 allocate = false;
216 break;
217 }
218 }
219 if (allocate) {
220 kpage = km_temporary_page_get(&frame,
221 FRAME_NO_RESERVE);
222 memsetb((void *) kpage, PAGE_SIZE, 0);
223 km_temporary_page_put(kpage);
224
225 /*
226 * Insert the address of the newly allocated
227 * frame to the pagemap.
228 */
229 btree_insert(&area->sh_info->pagemap,
230 upage - area->base, (void *) frame, leaf);
231 }
232 }
233 frame_reference_add(ADDR2PFN(frame));
234 mutex_unlock(&area->sh_info->lock);
235 } else {
236
237 /*
238 * In general, there can be several reasons that
239 * can have caused this fault.
240 *
241 * - non-existent mapping: the area is an anonymous
242 * area (e.g. heap or stack) and so far has not been
243 * allocated a frame for the faulting page
244 *
245 * - non-present mapping: another possibility,
246 * currently not implemented, would be frame
247 * reuse; when this becomes a possibility,
248 * do not forget to distinguish between
249 * the different causes
250 */
251
252 if (area->flags & AS_AREA_LATE_RESERVE) {
253 /*
254 * Reserve the memory for this page now.
255 */
256 if (!reserve_try_alloc(1))
257 return AS_PF_SILENT;
258 }
259
260 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE);
261 memsetb((void *) kpage, PAGE_SIZE, 0);
262 km_temporary_page_put(kpage);
263 }
264
265 /*
266 * Map 'upage' to 'frame'.
267 * Note that TLB shootdown is not attempted as only new information is
268 * being inserted into page tables.
269 */
270 page_mapping_insert(AS, upage, frame, as_area_get_flags(area));
271 if (!used_space_insert(area, upage, 1))
272 panic("Cannot insert used space.");
273
274 return AS_PF_OK;
275}
276
277/** Free a frame that is backed by the anonymous memory backend.
278 *
279 * The address space area and page tables must be already locked.
280 *
281 * @param area Ignored.
282 * @param page Virtual address of the page corresponding to the frame.
283 * @param frame Frame to be released.
284 */
285void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
286{
287 ASSERT(page_table_locked(area->as));
288 ASSERT(mutex_locked(&area->lock));
289
290 if (area->flags & AS_AREA_LATE_RESERVE) {
291 /*
292 * In case of the late reserve areas, physical memory will not
293 * be unreserved when the area is destroyed so we need to use
294 * the normal unreserving frame_free().
295 */
296 frame_free(frame, 1);
297 } else {
298 /*
299 * The reserve will be given back when the area is destroyed or
300 * resized, so use the frame_free_noreserve() which does not
301 * manipulate the reserve or it would be given back twice.
302 */
303 frame_free_noreserve(frame, 1);
304 }
305}
306
307/** @}
308 */
Note: See TracBrowser for help on using the repository browser.