source: mainline/kernel/generic/src/ddi/ddi.c@ a35b458

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since a35b458 was a35b458, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 7 years ago

style: Remove trailing whitespace on _all_ lines, including empty ones, for particular file types.

Command used: tools/srepl '\s\+$' '' -- *.c *.h *.py *.sh *.s *.S *.ag

Currently, whitespace on empty lines is very inconsistent.
There are two basic choices: Either remove the whitespace, or keep empty lines
indented to the level of surrounding code. The former is AFAICT more common,
and also much easier to do automatically.

Alternatively, we could write script for automatic indentation, and use that
instead. However, if such a script exists, it's possible to use the indented
style locally, by having the editor apply relevant conversions on load/save,
without affecting remote repository. IMO, it makes more sense to adopt
the simpler rule.

  • Property mode set to 100644
File size: 12.0 KB
Line 
1/*
2 * Copyright (c) 2006 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup genericddi
30 * @{
31 */
32
33/**
34 * @file
35 * @brief Device Driver Interface functions.
36 *
37 * This file contains functions that comprise the Device Driver Interface.
38 * These are the functions for mapping physical memory and enabling I/O
39 * space to tasks.
40 */
41
42#include <assert.h>
43#include <ddi/ddi.h>
44#include <proc/task.h>
45#include <security/perm.h>
46#include <mm/frame.h>
47#include <mm/as.h>
48#include <mm/page.h>
49#include <synch/mutex.h>
50#include <syscall/copy.h>
51#include <adt/btree.h>
52#include <arch.h>
53#include <align.h>
54#include <errno.h>
55#include <trace.h>
56#include <bitops.h>
57
58/** This lock protects the parea_btree. */
59static mutex_t parea_lock;
60
61/** B+tree with enabled physical memory areas. */
62static btree_t parea_btree;
63
64/** Initialize DDI.
65 *
66 */
67void ddi_init(void)
68{
69 btree_create(&parea_btree);
70 mutex_initialize(&parea_lock, MUTEX_PASSIVE);
71}
72
73/** Enable piece of physical memory for mapping by physmem_map().
74 *
75 * @param parea Pointer to physical area structure.
76 *
77 */
78void ddi_parea_register(parea_t *parea)
79{
80 mutex_lock(&parea_lock);
81
82 /*
83 * We don't check for overlaps here as the kernel is pretty sane.
84 */
85 btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL);
86
87 mutex_unlock(&parea_lock);
88}
89
90/** Map piece of physical memory into virtual address space of current task.
91 *
92 * @param phys Physical address of the starting frame.
93 * @param pages Number of pages to map.
94 * @param flags Address space area flags for the mapping.
95 * @param virt Virtual address of the starting page.
96 * @param bound Lowest virtual address bound.
97 *
98 * @return EOK on success.
99 * @return EPERM if the caller lacks permissions to use this syscall.
100 * @return EBADMEM if phys is not page aligned.
101 * @return ENOENT if there is no task matching the specified ID or
102 * the physical address space is not enabled for mapping.
103 * @return ENOMEM if there was a problem in creating address space area.
104 *
105 */
106NO_TRACE static errno_t physmem_map(uintptr_t phys, size_t pages,
107 unsigned int flags, uintptr_t *virt, uintptr_t bound)
108{
109 assert(TASK);
110
111 if ((phys % FRAME_SIZE) != 0)
112 return EBADMEM;
113
114 /*
115 * Unprivileged tasks are only allowed to map pareas
116 * which are explicitly marked as such.
117 */
118 bool priv =
119 ((perm_get(TASK) & PERM_MEM_MANAGER) == PERM_MEM_MANAGER);
120
121 mem_backend_data_t backend_data;
122 backend_data.base = phys;
123 backend_data.frames = pages;
124 backend_data.anonymous = false;
125
126 /*
127 * Check if the memory region is explicitly enabled
128 * for mapping by any parea structure.
129 */
130
131 mutex_lock(&parea_lock);
132 btree_node_t *nodep;
133 parea_t *parea = (parea_t *) btree_search(&parea_btree,
134 (btree_key_t) phys, &nodep);
135
136 if ((parea != NULL) && (parea->frames >= pages)) {
137 if ((!priv) && (!parea->unpriv)) {
138 mutex_unlock(&parea_lock);
139 return EPERM;
140 }
141
142 goto map;
143 }
144
145 parea = NULL;
146 mutex_unlock(&parea_lock);
147
148 /*
149 * Check if the memory region is part of physical
150 * memory generally enabled for mapping.
151 */
152
153 irq_spinlock_lock(&zones.lock, true);
154 size_t znum = find_zone(ADDR2PFN(phys), pages, 0);
155
156 if (znum == (size_t) -1) {
157 /*
158 * Frames not found in any zone
159 * -> assume it is a hardware device and allow mapping
160 * for privileged tasks.
161 */
162 irq_spinlock_unlock(&zones.lock, true);
163
164 if (!priv)
165 return EPERM;
166
167 goto map;
168 }
169
170 if (zones.info[znum].flags & (ZONE_FIRMWARE | ZONE_RESERVED)) {
171 /*
172 * Frames are part of firmware or reserved zone
173 * -> allow mapping for privileged tasks.
174 */
175 irq_spinlock_unlock(&zones.lock, true);
176
177 if (!priv)
178 return EPERM;
179
180 goto map;
181 }
182
183 irq_spinlock_unlock(&zones.lock, true);
184 return ENOENT;
185
186map:
187 if (!as_area_create(TASK->as, flags, FRAMES2SIZE(pages),
188 AS_AREA_ATTR_NONE, &phys_backend, &backend_data, virt, bound)) {
189 /*
190 * The address space area was not created.
191 * We report it using ENOMEM.
192 */
193
194 if (parea != NULL)
195 mutex_unlock(&parea_lock);
196
197 return ENOMEM;
198 }
199
200 /*
201 * Mapping is created on-demand during page fault.
202 */
203
204 if (parea != NULL) {
205 parea->mapped = true;
206 mutex_unlock(&parea_lock);
207 }
208
209 return EOK;
210}
211
212NO_TRACE static errno_t physmem_unmap(uintptr_t virt)
213{
214 assert(TASK);
215
216 return as_area_destroy(TASK->as, virt);
217}
218
219/** Wrapper for SYS_PHYSMEM_MAP syscall.
220 *
221 * @param phys Physical base address to map
222 * @param pages Number of pages
223 * @param flags Flags of newly mapped pages
224 * @param virt_ptr Destination virtual address
225 * @param bound Lowest virtual address bound.
226 *
227 * @return 0 on success, otherwise it returns error code found in errno.h
228 *
229 */
230sys_errno_t sys_physmem_map(uintptr_t phys, size_t pages, unsigned int flags,
231 void *virt_ptr, uintptr_t bound)
232{
233 uintptr_t virt;
234 errno_t rc = copy_from_uspace(&virt, virt_ptr, sizeof(virt));
235 if (rc != EOK)
236 return rc;
237
238 rc = physmem_map(ALIGN_DOWN(phys, FRAME_SIZE), pages, flags, &virt,
239 bound);
240 if (rc != EOK)
241 return rc;
242
243 rc = copy_to_uspace(virt_ptr, &virt, sizeof(virt));
244 if (rc != EOK) {
245 physmem_unmap((uintptr_t) virt);
246 return rc;
247 }
248
249 return EOK;
250}
251
252sys_errno_t sys_physmem_unmap(uintptr_t virt)
253{
254 return physmem_unmap(virt);
255}
256
257/** Enable range of I/O space for task.
258 *
259 * @param id Task ID of the destination task.
260 * @param ioaddr Starting I/O address.
261 * @param size Size of the enabled I/O space.
262 *
263 * @return 0 on success, EPERM if the caller lacks permissions to use this
264 * syscall, ENOENT if there is no task matching the specified ID.
265 *
266 */
267NO_TRACE static errno_t iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size)
268{
269 /*
270 * Make sure the caller is authorised to make this syscall.
271 */
272 perm_t perms = perm_get(TASK);
273 if (!(perms & PERM_IO_MANAGER))
274 return EPERM;
275
276 irq_spinlock_lock(&tasks_lock, true);
277
278 task_t *task = task_find_by_id(id);
279
280 if ((!task) || (!container_check(CONTAINER, task->container))) {
281 /*
282 * There is no task with the specified ID
283 * or the task belongs to a different security
284 * context.
285 */
286 irq_spinlock_unlock(&tasks_lock, true);
287 return ENOENT;
288 }
289
290 /* Lock the task and release the lock protecting tasks_btree. */
291 irq_spinlock_exchange(&tasks_lock, &task->lock);
292 errno_t rc = ddi_iospace_enable_arch(task, ioaddr, size);
293 irq_spinlock_unlock(&task->lock, true);
294
295 return rc;
296}
297
298/** Disable range of I/O space for task.
299 *
300 * @param id Task ID of the destination task.
301 * @param ioaddr Starting I/O address.
302 * @param size Size of the enabled I/O space.
303 *
304 * @return 0 on success, EPERM if the caller lacks permissions to use this
305 * syscall, ENOENT if there is no task matching the specified ID.
306 *
307 */
308NO_TRACE static errno_t iospace_disable(task_id_t id, uintptr_t ioaddr, size_t size)
309{
310 /*
311 * Make sure the caller is authorised to make this syscall.
312 */
313 perm_t perms = perm_get(TASK);
314 if (!(perms & PERM_IO_MANAGER))
315 return EPERM;
316
317 irq_spinlock_lock(&tasks_lock, true);
318
319 task_t *task = task_find_by_id(id);
320
321 if ((!task) || (!container_check(CONTAINER, task->container))) {
322 /*
323 * There is no task with the specified ID
324 * or the task belongs to a different security
325 * context.
326 */
327 irq_spinlock_unlock(&tasks_lock, true);
328 return ENOENT;
329 }
330
331 /* Lock the task and release the lock protecting tasks_btree. */
332 irq_spinlock_exchange(&tasks_lock, &task->lock);
333 errno_t rc = ddi_iospace_disable_arch(task, ioaddr, size);
334 irq_spinlock_unlock(&task->lock, true);
335
336 return rc;
337}
338
339/** Wrapper for SYS_ENABLE_IOSPACE syscall.
340 *
341 * @param uspace_io_arg User space address of DDI argument structure.
342 *
343 * @return 0 on success, otherwise it returns error code found in errno.h
344 *
345 */
346sys_errno_t sys_iospace_enable(ddi_ioarg_t *uspace_io_arg)
347{
348 ddi_ioarg_t arg;
349 errno_t rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t));
350 if (rc != EOK)
351 return (sys_errno_t) rc;
352
353 return (sys_errno_t) iospace_enable((task_id_t) arg.task_id,
354 (uintptr_t) arg.ioaddr, (size_t) arg.size);
355}
356
357sys_errno_t sys_iospace_disable(ddi_ioarg_t *uspace_io_arg)
358{
359 ddi_ioarg_t arg;
360 errno_t rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t));
361 if (rc != EOK)
362 return (sys_errno_t) rc;
363
364 return (sys_errno_t) iospace_disable((task_id_t) arg.task_id,
365 (uintptr_t) arg.ioaddr, (size_t) arg.size);
366}
367
368NO_TRACE static errno_t dmamem_map(uintptr_t virt, size_t size, unsigned int map_flags,
369 unsigned int flags, uintptr_t *phys)
370{
371 assert(TASK);
372
373 // TODO: implement locking of non-anonymous mapping
374 return page_find_mapping(virt, phys);
375}
376
377NO_TRACE static errno_t dmamem_map_anonymous(size_t size, uintptr_t constraint,
378 unsigned int map_flags, unsigned int flags, uintptr_t *phys,
379 uintptr_t *virt, uintptr_t bound)
380{
381 assert(TASK);
382
383 size_t frames = SIZE2FRAMES(size);
384 if (frames == 0)
385 return EINVAL;
386
387 *phys = frame_alloc(frames, FRAME_ATOMIC, constraint);
388 if (*phys == 0)
389 return ENOMEM;
390
391 mem_backend_data_t backend_data;
392 backend_data.base = *phys;
393 backend_data.frames = frames;
394 backend_data.anonymous = true;
395
396 if (!as_area_create(TASK->as, map_flags, size,
397 AS_AREA_ATTR_NONE, &phys_backend, &backend_data, virt, bound)) {
398 frame_free(*phys, frames);
399 return ENOMEM;
400 }
401
402 return EOK;
403}
404
405NO_TRACE static errno_t dmamem_unmap(uintptr_t virt, size_t size)
406{
407 // TODO: implement unlocking & unmap
408 return EOK;
409}
410
411NO_TRACE static errno_t dmamem_unmap_anonymous(uintptr_t virt)
412{
413 return as_area_destroy(TASK->as, virt);
414}
415
416sys_errno_t sys_dmamem_map(size_t size, unsigned int map_flags, unsigned int flags,
417 void *phys_ptr, void *virt_ptr, uintptr_t bound)
418{
419 if ((flags & DMAMEM_FLAGS_ANONYMOUS) == 0) {
420 /*
421 * Non-anonymous DMA mapping
422 */
423
424 uintptr_t phys;
425 errno_t rc = dmamem_map((uintptr_t) virt_ptr, size, map_flags,
426 flags, &phys);
427
428 if (rc != EOK)
429 return rc;
430
431 rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys));
432 if (rc != EOK) {
433 dmamem_unmap((uintptr_t) virt_ptr, size);
434 return rc;
435 }
436 } else {
437 /*
438 * Anonymous DMA mapping
439 */
440
441 uintptr_t constraint;
442 errno_t rc = copy_from_uspace(&constraint, phys_ptr,
443 sizeof(constraint));
444 if (rc != EOK)
445 return rc;
446
447 uintptr_t virt;
448 rc = copy_from_uspace(&virt, virt_ptr, sizeof(virt));
449 if (rc != EOK)
450 return rc;
451
452 uintptr_t phys;
453 rc = dmamem_map_anonymous(size, constraint, map_flags, flags,
454 &phys, &virt, bound);
455 if (rc != EOK)
456 return rc;
457
458 rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys));
459 if (rc != EOK) {
460 dmamem_unmap_anonymous((uintptr_t) virt);
461 return rc;
462 }
463
464 rc = copy_to_uspace(virt_ptr, &virt, sizeof(virt));
465 if (rc != EOK) {
466 dmamem_unmap_anonymous((uintptr_t) virt);
467 return rc;
468 }
469 }
470
471 return EOK;
472}
473
474sys_errno_t sys_dmamem_unmap(uintptr_t virt, size_t size, unsigned int flags)
475{
476 if ((flags & DMAMEM_FLAGS_ANONYMOUS) == 0)
477 return dmamem_unmap(virt, size);
478 else
479 return dmamem_unmap_anonymous(virt);
480}
481
482/** @}
483 */
Note: See TracBrowser for help on using the repository browser.