source: mainline/kernel/generic/src/mm/km.c@ 314f4b59

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 314f4b59 was a35b458, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

style: Remove trailing whitespace on _all_ lines, including empty ones, for particular file types.

Command used: tools/srepl '\s\+$' '' -- *.c *.h *.py *.sh *.s *.S *.ag

Currently, whitespace on empty lines is very inconsistent.
There are two basic choices: Either remove the whitespace, or keep empty lines
indented to the level of surrounding code. The former is AFAICT more common,
and also much easier to do automatically.

Alternatively, we could write script for automatic indentation, and use that
instead. However, if such a script exists, it's possible to use the indented
style locally, by having the editor apply relevant conversions on load/save,
without affecting remote repository. IMO, it makes more sense to adopt
the simpler rule.

  • Property mode set to 100644
File size: 7.5 KB
Line 
1/*
2 * Copyright (c) 2011 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericmm
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Kernel virtual memory setup.
36 */
37
38#include <mm/km.h>
39#include <arch/mm/km.h>
40#include <assert.h>
41#include <mm/page.h>
42#include <mm/frame.h>
43#include <mm/asid.h>
44#include <config.h>
45#include <typedefs.h>
46#include <lib/ra.h>
47#include <arch.h>
48#include <align.h>
49#include <macros.h>
50#include <bitops.h>
51#include <proc/thread.h>
52
53static ra_arena_t *km_ni_arena;
54
55#define DEFERRED_PAGES_MAX (PAGE_SIZE / sizeof(uintptr_t))
56
57/** Number of freed pages in the deferred buffer. */
58static volatile unsigned deferred_pages;
59/** Buffer of deferred freed pages. */
60static uintptr_t deferred_page[DEFERRED_PAGES_MAX];
61
62/** Flush the buffer of deferred freed pages.
63 *
64 * @return Number of freed pages.
65 */
66static unsigned km_flush_deferred(void)
67{
68 unsigned i = 0;
69 ipl_t ipl;
70
71 ipl = tlb_shootdown_start(TLB_INVL_ASID, ASID_KERNEL, 0, 0);
72
73 for (i = 0; i < deferred_pages; i++) {
74 page_mapping_remove(AS_KERNEL, deferred_page[i]);
75 km_page_free(deferred_page[i], PAGE_SIZE);
76 }
77
78 tlb_invalidate_asid(ASID_KERNEL);
79
80 as_invalidate_translation_cache(AS_KERNEL, 0, -1);
81 tlb_shootdown_finalize(ipl);
82
83 return i;
84}
85
86/** Architecture dependent setup of identity-mapped kernel memory. */
87void km_identity_init(void)
88{
89 km_identity_arch_init();
90 config.identity_configured = true;
91}
92
93/** Architecture dependent setup of non-identity-mapped kernel memory. */
94void km_non_identity_init(void)
95{
96 km_ni_arena = ra_arena_create();
97 assert(km_ni_arena != NULL);
98 km_non_identity_arch_init();
99 config.non_identity_configured = true;
100}
101
102bool km_is_non_identity(uintptr_t addr)
103{
104 return km_is_non_identity_arch(addr);
105}
106
107void km_non_identity_span_add(uintptr_t base, size_t size)
108{
109 bool span_added;
110
111 page_mapping_make_global(base, size);
112
113 span_added = ra_span_add(km_ni_arena, base, size);
114 assert(span_added);
115}
116
117uintptr_t km_page_alloc(size_t size, size_t align)
118{
119 uintptr_t base;
120 if (ra_alloc(km_ni_arena, size, align, &base))
121 return base;
122 else
123 return (uintptr_t) NULL;
124}
125
126void km_page_free(uintptr_t page, size_t size)
127{
128 ra_free(km_ni_arena, page, size);
129}
130
131static uintptr_t
132km_map_aligned(uintptr_t paddr, size_t size, unsigned int flags)
133{
134 uintptr_t vaddr;
135 size_t align;
136 uintptr_t offs;
137
138 assert(ALIGN_DOWN(paddr, FRAME_SIZE) == paddr);
139 assert(ALIGN_UP(size, FRAME_SIZE) == size);
140
141 /* Enforce natural or at least PAGE_SIZE alignment. */
142 align = ispwr2(size) ? size : (1U << (fnzb(size) + 1));
143 vaddr = km_page_alloc(size, max(PAGE_SIZE, align));
144
145 page_table_lock(AS_KERNEL, true);
146 for (offs = 0; offs < size; offs += PAGE_SIZE) {
147 page_mapping_insert(AS_KERNEL, vaddr + offs, paddr + offs,
148 flags);
149 }
150 page_table_unlock(AS_KERNEL, true);
151
152 return vaddr;
153}
154
155static void km_unmap_aligned(uintptr_t vaddr, size_t size)
156{
157 uintptr_t offs;
158 ipl_t ipl;
159
160 assert(ALIGN_DOWN(vaddr, PAGE_SIZE) == vaddr);
161 assert(ALIGN_UP(size, PAGE_SIZE) == size);
162
163 page_table_lock(AS_KERNEL, true);
164
165 ipl = tlb_shootdown_start(TLB_INVL_ASID, ASID_KERNEL, 0, 0);
166
167 for (offs = 0; offs < size; offs += PAGE_SIZE)
168 page_mapping_remove(AS_KERNEL, vaddr + offs);
169
170 tlb_invalidate_asid(ASID_KERNEL);
171
172 as_invalidate_translation_cache(AS_KERNEL, 0, -1);
173 tlb_shootdown_finalize(ipl);
174 page_table_unlock(AS_KERNEL, true);
175
176 km_page_free(vaddr, size);
177}
178
179/** Map a piece of physical address space into the virtual address space.
180 *
181 * @param paddr Physical address to be mapped. May be unaligned.
182 * @param size Size of area starting at paddr to be mapped.
183 * @param flags Protection flags to be used for the mapping.
184 *
185 * @return New virtual address mapped to paddr.
186 */
187uintptr_t km_map(uintptr_t paddr, size_t size, unsigned int flags)
188{
189 uintptr_t page;
190 size_t offs;
191
192 offs = paddr - ALIGN_DOWN(paddr, FRAME_SIZE);
193 page = km_map_aligned(ALIGN_DOWN(paddr, FRAME_SIZE),
194 ALIGN_UP(size + offs, FRAME_SIZE), flags);
195
196 return page + offs;
197}
198
199/** Unmap a piece of virtual address space.
200 *
201 * @param vaddr Virtual address to be unmapped. May be unaligned, but
202 * it must a value previously returned by km_map().
203 * @param size Size of area starting at vaddr to be unmapped.
204 */
205void km_unmap(uintptr_t vaddr, size_t size)
206{
207 size_t offs;
208
209 offs = vaddr - ALIGN_DOWN(vaddr, PAGE_SIZE);
210 km_unmap_aligned(ALIGN_DOWN(vaddr, PAGE_SIZE),
211 ALIGN_UP(size + offs, PAGE_SIZE));
212}
213
214/** Unmap kernel non-identity page.
215 *
216 * @param[in] page Non-identity page to be unmapped.
217 */
218static void km_unmap_deferred(uintptr_t page)
219{
220 page_table_lock(AS_KERNEL, true);
221
222 if (deferred_pages == DEFERRED_PAGES_MAX) {
223 (void) km_flush_deferred();
224 deferred_pages = 0;
225 }
226
227 deferred_page[deferred_pages++] = page;
228
229 page_table_unlock(AS_KERNEL, true);
230}
231
232/** Create a temporary page.
233 *
234 * The page is mapped read/write to a newly allocated frame of physical memory.
235 * The page must be returned back to the system by a call to
236 * km_temporary_page_put().
237 *
238 * @param[inout] framep Pointer to a variable which will receive the physical
239 * address of the allocated frame.
240 * @param[in] flags Frame allocation flags. FRAME_NONE, FRAME_NO_RESERVE
241 * and FRAME_ATOMIC bits are allowed.
242 * @return Virtual address of the allocated frame.
243 */
244uintptr_t km_temporary_page_get(uintptr_t *framep, frame_flags_t flags)
245{
246 assert(THREAD);
247 assert(framep);
248 assert(!(flags & ~(FRAME_NO_RESERVE | FRAME_ATOMIC)));
249
250 /*
251 * Allocate a frame, preferably from high memory.
252 */
253 uintptr_t page;
254 uintptr_t frame;
255
256 frame = frame_alloc(1, FRAME_HIGHMEM | FRAME_ATOMIC | flags, 0);
257 if (frame) {
258 page = km_map(frame, PAGE_SIZE,
259 PAGE_READ | PAGE_WRITE | PAGE_CACHEABLE);
260 if (!page) {
261 frame_free(frame, 1);
262 goto lowmem;
263 }
264 } else {
265lowmem:
266 frame = frame_alloc(1, FRAME_LOWMEM | flags, 0);
267 if (!frame)
268 return (uintptr_t) NULL;
269
270 page = PA2KA(frame);
271 }
272
273 *framep = frame;
274 return page;
275}
276
277/** Destroy a temporary page.
278 *
279 * This function destroys a temporary page previously created by
280 * km_temporary_page_get(). The page destruction may be immediate or deferred.
281 * The frame mapped by the destroyed page is not freed.
282 *
283 * @param[in] page Temporary page to be destroyed.
284 */
285void km_temporary_page_put(uintptr_t page)
286{
287 assert(THREAD);
288
289 if (km_is_non_identity(page))
290 km_unmap_deferred(page);
291}
292
293/** @}
294 */
295
Note: See TracBrowser for help on using the repository browser.