source: mainline/kernel/generic/src/mm/backend_anon.c@ ae7d03c

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since ae7d03c was a35b458, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

style: Remove trailing whitespace on _all_ lines, including empty ones, for particular file types.

Command used: tools/srepl '\s\+$' '' -- *.c *.h *.py *.sh *.s *.S *.ag

Currently, whitespace on empty lines is very inconsistent.
There are two basic choices: Either remove the whitespace, or keep empty lines
indented to the level of surrounding code. The former is AFAICT more common,
and also much easier to do automatically.

Alternatively, we could write script for automatic indentation, and use that
instead. However, if such a script exists, it's possible to use the indented
style locally, by having the editor apply relevant conversions on load/save,
without affecting remote repository. IMO, it makes more sense to adopt
the simpler rule.

  • Property mode set to 100644
File size: 8.5 KB
Line 
1/*
2 * Copyright (c) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Backend for anonymous memory address space areas.
36 *
37 */
38
39#include <assert.h>
40#include <mm/as.h>
41#include <mm/page.h>
42#include <mm/reserve.h>
43#include <genarch/mm/page_pt.h>
44#include <genarch/mm/page_ht.h>
45#include <mm/frame.h>
46#include <mm/slab.h>
47#include <mm/km.h>
48#include <synch/mutex.h>
49#include <adt/list.h>
50#include <adt/btree.h>
51#include <errno.h>
52#include <typedefs.h>
53#include <align.h>
54#include <mem.h>
55#include <arch.h>
56
57static bool anon_create(as_area_t *);
58static bool anon_resize(as_area_t *, size_t);
59static void anon_share(as_area_t *);
60static void anon_destroy(as_area_t *);
61
62static bool anon_is_resizable(as_area_t *);
63static bool anon_is_shareable(as_area_t *);
64
65static int anon_page_fault(as_area_t *, uintptr_t, pf_access_t);
66static void anon_frame_free(as_area_t *, uintptr_t, uintptr_t);
67
68mem_backend_t anon_backend = {
69 .create = anon_create,
70 .resize = anon_resize,
71 .share = anon_share,
72 .destroy = anon_destroy,
73
74 .is_resizable = anon_is_resizable,
75 .is_shareable = anon_is_shareable,
76
77 .page_fault = anon_page_fault,
78 .frame_free = anon_frame_free,
79
80 .create_shared_data = NULL,
81 .destroy_shared_data = NULL
82};
83
84bool anon_create(as_area_t *area)
85{
86 if (area->flags & AS_AREA_LATE_RESERVE)
87 return true;
88
89 return reserve_try_alloc(area->pages);
90}
91
92bool anon_resize(as_area_t *area, size_t new_pages)
93{
94 if (area->flags & AS_AREA_LATE_RESERVE)
95 return true;
96
97 if (new_pages > area->pages)
98 return reserve_try_alloc(new_pages - area->pages);
99 else if (new_pages < area->pages)
100 reserve_free(area->pages - new_pages);
101
102 return true;
103}
104
105/** Share the anonymous address space area.
106 *
107 * Sharing of anonymous area is done by duplicating its entire mapping
108 * to the pagemap. Page faults will primarily search for frames there.
109 *
110 * The address space and address space area must be already locked.
111 *
112 * @param area Address space area to be shared.
113 */
114void anon_share(as_area_t *area)
115{
116 assert(mutex_locked(&area->as->lock));
117 assert(mutex_locked(&area->lock));
118 assert(!(area->flags & AS_AREA_LATE_RESERVE));
119
120 /*
121 * Copy used portions of the area to sh_info's page map.
122 */
123 mutex_lock(&area->sh_info->lock);
124 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t,
125 node) {
126 unsigned int i;
127
128 for (i = 0; i < node->keys; i++) {
129 uintptr_t base = node->key[i];
130 size_t count = (size_t) node->value[i];
131 unsigned int j;
132
133 for (j = 0; j < count; j++) {
134 pte_t pte;
135 bool found;
136
137 page_table_lock(area->as, false);
138 found = page_mapping_find(area->as,
139 base + P2SZ(j), false, &pte);
140
141 assert(found);
142 assert(PTE_VALID(&pte));
143 assert(PTE_PRESENT(&pte));
144
145 btree_insert(&area->sh_info->pagemap,
146 (base + P2SZ(j)) - area->base,
147 (void *) PTE_GET_FRAME(&pte), NULL);
148 page_table_unlock(area->as, false);
149
150 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(&pte));
151 frame_reference_add(pfn);
152 }
153
154 }
155 }
156 mutex_unlock(&area->sh_info->lock);
157}
158
159void anon_destroy(as_area_t *area)
160{
161 if (area->flags & AS_AREA_LATE_RESERVE)
162 return;
163
164 reserve_free(area->pages);
165}
166
167bool anon_is_resizable(as_area_t *area)
168{
169 return true;
170}
171
172bool anon_is_shareable(as_area_t *area)
173{
174 return !(area->flags & AS_AREA_LATE_RESERVE);
175}
176
177/** Service a page fault in the anonymous memory address space area.
178 *
179 * The address space area and page tables must be already locked.
180 *
181 * @param area Pointer to the address space area.
182 * @param upage Faulting virtual page.
183 * @param access Access mode that caused the fault (i.e. read/write/exec).
184 *
185 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e.
186 * serviced).
187 */
188int anon_page_fault(as_area_t *area, uintptr_t upage, pf_access_t access)
189{
190 uintptr_t kpage;
191 uintptr_t frame;
192
193 assert(page_table_locked(AS));
194 assert(mutex_locked(&area->lock));
195 assert(IS_ALIGNED(upage, PAGE_SIZE));
196
197 if (!as_area_check_access(area, access))
198 return AS_PF_FAULT;
199
200 mutex_lock(&area->sh_info->lock);
201 if (area->sh_info->shared) {
202 btree_node_t *leaf;
203
204 /*
205 * The area is shared, chances are that the mapping can be found
206 * in the pagemap of the address space area share info
207 * structure.
208 * In the case that the pagemap does not contain the respective
209 * mapping, a new frame is allocated and the mapping is created.
210 */
211 frame = (uintptr_t) btree_search(&area->sh_info->pagemap,
212 upage - area->base, &leaf);
213 if (!frame) {
214 bool allocate = true;
215 unsigned int i;
216
217 /*
218 * Zero can be returned as a valid frame address.
219 * Just a small workaround.
220 */
221 for (i = 0; i < leaf->keys; i++) {
222 if (leaf->key[i] == upage - area->base) {
223 allocate = false;
224 break;
225 }
226 }
227 if (allocate) {
228 kpage = km_temporary_page_get(&frame,
229 FRAME_NO_RESERVE);
230 memsetb((void *) kpage, PAGE_SIZE, 0);
231 km_temporary_page_put(kpage);
232
233 /*
234 * Insert the address of the newly allocated
235 * frame to the pagemap.
236 */
237 btree_insert(&area->sh_info->pagemap,
238 upage - area->base, (void *) frame, leaf);
239 }
240 }
241 frame_reference_add(ADDR2PFN(frame));
242 } else {
243
244 /*
245 * In general, there can be several reasons that
246 * can have caused this fault.
247 *
248 * - non-existent mapping: the area is an anonymous
249 * area (e.g. heap or stack) and so far has not been
250 * allocated a frame for the faulting page
251 *
252 * - non-present mapping: another possibility,
253 * currently not implemented, would be frame
254 * reuse; when this becomes a possibility,
255 * do not forget to distinguish between
256 * the different causes
257 */
258
259 if (area->flags & AS_AREA_LATE_RESERVE) {
260 /*
261 * Reserve the memory for this page now.
262 */
263 if (!reserve_try_alloc(1)) {
264 mutex_unlock(&area->sh_info->lock);
265 return AS_PF_SILENT;
266 }
267 }
268
269 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE);
270 memsetb((void *) kpage, PAGE_SIZE, 0);
271 km_temporary_page_put(kpage);
272 }
273 mutex_unlock(&area->sh_info->lock);
274
275 /*
276 * Map 'upage' to 'frame'.
277 * Note that TLB shootdown is not attempted as only new information is
278 * being inserted into page tables.
279 */
280 page_mapping_insert(AS, upage, frame, as_area_get_flags(area));
281 if (!used_space_insert(area, upage, 1))
282 panic("Cannot insert used space.");
283
284 return AS_PF_OK;
285}
286
287/** Free a frame that is backed by the anonymous memory backend.
288 *
289 * The address space area and page tables must be already locked.
290 *
291 * @param area Ignored.
292 * @param page Virtual address of the page corresponding to the frame.
293 * @param frame Frame to be released.
294 */
295void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
296{
297 assert(page_table_locked(area->as));
298 assert(mutex_locked(&area->lock));
299
300 if (area->flags & AS_AREA_LATE_RESERVE) {
301 /*
302 * In case of the late reserve areas, physical memory will not
303 * be unreserved when the area is destroyed so we need to use
304 * the normal unreserving frame_free().
305 */
306 frame_free(frame, 1);
307 } else {
308 /*
309 * The reserve will be given back when the area is destroyed or
310 * resized, so use the frame_free_noreserve() which does not
311 * manipulate the reserve or it would be given back twice.
312 */
313 frame_free_noreserve(frame, 1);
314 }
315}
316
317/** @}
318 */
Note: See TracBrowser for help on using the repository browser.