source: mainline/kernel/generic/src/ddi/ddi.c@ c6ae4c2

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since c6ae4c2 was c6ae4c2, checked in by Martin Decky <martin@…>, 14 years ago

implement basic DMA memory mapping routines
no persistent locking (pinning) of the DMA memory is currently implemented (this can be dangerous)
no unmapping is implemented
special allocations (< 16 MB, < 4 GB) are not supported yet

  • Property mode set to 100644
File size: 9.0 KB
RevLine 
[9a8d91b]1/*
[df4ed85]2 * Copyright (c) 2006 Jakub Jermar
[9a8d91b]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
[b45c443]28
[06e1e95]29/** @addtogroup genericddi
[b45c443]30 * @{
31 */
[e49e234]32
[9179d0a]33/**
[b45c443]34 * @file
[e49e234]35 * @brief Device Driver Interface functions.
[9179d0a]36 *
37 * This file contains functions that comprise the Device Driver Interface.
38 * These are the functions for mapping physical memory and enabling I/O
39 * space to tasks.
40 */
[9a8d91b]41
42#include <ddi/ddi.h>
43#include <proc/task.h>
44#include <security/cap.h>
45#include <mm/frame.h>
46#include <mm/as.h>
[c6ae4c2]47#include <mm/page.h>
[373acb4]48#include <synch/mutex.h>
[e3c762cd]49#include <syscall/copy.h>
[e49e234]50#include <adt/btree.h>
[9a8d91b]51#include <arch.h>
52#include <align.h>
53#include <errno.h>
[7a0359b]54#include <trace.h>
[c6ae4c2]55#include <bitops.h>
[9a8d91b]56
[f8ddd17]57/** This lock protects the parea_btree. */
[373acb4]58static mutex_t parea_lock;
[f8ddd17]59
[e49e234]60/** B+tree with enabled physical memory areas. */
61static btree_t parea_btree;
[ae318d3]62
[da1bafb]63/** Initialize DDI.
64 *
65 */
[f8ddd17]66void ddi_init(void)
67{
[e49e234]68 btree_create(&parea_btree);
[373acb4]69 mutex_initialize(&parea_lock, MUTEX_PASSIVE);
[f8ddd17]70}
71
72/** Enable piece of physical memory for mapping by physmem_map().
73 *
74 * @param parea Pointer to physical area structure.
75 *
76 */
77void ddi_parea_register(parea_t *parea)
78{
[373acb4]79 mutex_lock(&parea_lock);
[f8ddd17]80
81 /*
[e49e234]82 * We don't check for overlaps here as the kernel is pretty sane.
[f8ddd17]83 */
[e49e234]84 btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL);
[ae318d3]85
[373acb4]86 mutex_unlock(&parea_lock);
[f8ddd17]87}
88
[8da51ad]89/** Map piece of physical memory into virtual address space of current task.
[9a8d91b]90 *
[c6ae4c2]91 * @param phys Physical address of the starting frame.
92 * @param virt Virtual address of the starting page.
[9a8d91b]93 * @param pages Number of pages to map.
[6212095]94 * @param flags Address space area flags for the mapping.
[9a8d91b]95 *
[c6ae4c2]96 * @return EOK on success.
97 * @return EPERM if the caller lacks capabilities to use this syscall.
98 * @return EBADMEM if phys or virt is not page aligned.
99 * @return ENOENT if there is no task matching the specified ID or
100 * the physical address space is not enabled for mapping.
101 * @return ENOMEM if there was a problem in creating address space area.
[e49e234]102 *
[9a8d91b]103 */
[c6ae4c2]104NO_TRACE static int ddi_physmem_map(uintptr_t phys, uintptr_t virt, size_t pages,
[da1bafb]105 unsigned int flags)
[9a8d91b]106{
[e49e234]107 ASSERT(TASK);
[d7533c7]108
[c6ae4c2]109 if ((phys % FRAME_SIZE) != 0)
[d7533c7]110 return EBADMEM;
111
[c6ae4c2]112 if ((virt % PAGE_SIZE) != 0)
[d7533c7]113 return EBADMEM;
[9a8d91b]114
115 /*
[d7533c7]116 * Unprivileged tasks are only allowed to map pareas
117 * which are explicitly marked as such.
[9a8d91b]118 */
[d7533c7]119 bool priv =
120 ((cap_get(TASK) & CAP_MEM_MANAGER) == CAP_MEM_MANAGER);
[ae318d3]121
[e49e234]122 mem_backend_data_t backend_data;
[c6ae4c2]123 backend_data.base = phys;
[e49e234]124 backend_data.frames = pages;
[ae318d3]125
[b366a6f4]126 /*
127 * Check if the memory region is explicitly enabled
128 * for mapping by any parea structure.
129 */
130
131 mutex_lock(&parea_lock);
132 btree_node_t *nodep;
133 parea_t *parea = (parea_t *) btree_search(&parea_btree,
[c6ae4c2]134 (btree_key_t) phys, &nodep);
[b366a6f4]135
136 if ((parea != NULL) && (parea->frames >= pages)) {
137 if ((!priv) && (!parea->unpriv)) {
138 mutex_unlock(&parea_lock);
139 return EPERM;
140 }
141
142 goto map;
143 }
144
145 parea = NULL;
146 mutex_unlock(&parea_lock);
147
148 /*
149 * Check if the memory region is part of physical
150 * memory generally enabled for mapping.
151 */
152
[da1bafb]153 irq_spinlock_lock(&zones.lock, true);
[c6ae4c2]154 size_t znum = find_zone(ADDR2PFN(phys), pages, 0);
[ae318d3]155
[98000fb]156 if (znum == (size_t) -1) {
[d7533c7]157 /*
158 * Frames not found in any zone
159 * -> assume it is a hardware device and allow mapping
160 * for privileged tasks.
[e49e234]161 */
[da1bafb]162 irq_spinlock_unlock(&zones.lock, true);
[d7533c7]163
164 if (!priv)
165 return EPERM;
166
[e49e234]167 goto map;
[ae318d3]168 }
169
[e49e234]170 if (zones.info[znum].flags & ZONE_FIRMWARE) {
[d7533c7]171 /*
172 * Frames are part of firmware
173 * -> allow mapping for privileged tasks.
174 */
[da1bafb]175 irq_spinlock_unlock(&zones.lock, true);
[d7533c7]176
177 if (!priv)
178 return EPERM;
179
[e49e234]180 goto map;
181 }
[ae318d3]182
[da1bafb]183 irq_spinlock_unlock(&zones.lock, true);
[e49e234]184 return ENOENT;
185
186map:
[c6ae4c2]187 if (!as_area_create(TASK->as, flags, FRAMES2SIZE(pages), virt,
[e49e234]188 AS_AREA_ATTR_NONE, &phys_backend, &backend_data)) {
[9a8d91b]189 /*
[b366a6f4]190 * The address space area was not created.
[9a8d91b]191 * We report it using ENOMEM.
192 */
[b366a6f4]193
194 if (parea != NULL)
195 mutex_unlock(&parea_lock);
196
[9a8d91b]197 return ENOMEM;
198 }
199
[0ee077ee]200 /*
201 * Mapping is created on-demand during page fault.
202 */
[b366a6f4]203
204 if (parea != NULL) {
205 parea->mapped = true;
206 mutex_unlock(&parea_lock);
207 }
208
209 return EOK;
[9a8d91b]210}
211
[f52e54da]212/** Enable range of I/O space for task.
213 *
214 * @param id Task ID of the destination task.
215 * @param ioaddr Starting I/O address.
216 * @param size Size of the enabled I/O space..
217 *
[f8ddd17]218 * @return 0 on success, EPERM if the caller lacks capabilities to use this
[e49e234]219 * syscall, ENOENT if there is no task matching the specified ID.
220 *
[f52e54da]221 */
[7a0359b]222NO_TRACE static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr,
223 size_t size)
[f52e54da]224{
225 /*
226 * Make sure the caller is authorised to make this syscall.
227 */
[e49e234]228 cap_t caps = cap_get(TASK);
[f52e54da]229 if (!(caps & CAP_IO_MANAGER))
230 return EPERM;
231
[da1bafb]232 irq_spinlock_lock(&tasks_lock, true);
[f52e54da]233
[e49e234]234 task_t *task = task_find_by_id(id);
[f52e54da]235
[473d5d2]236 if ((!task) || (!container_check(CONTAINER, task->container))) {
[f52e54da]237 /*
[cfffb290]238 * There is no task with the specified ID
239 * or the task belongs to a different security
240 * context.
[f52e54da]241 */
[da1bafb]242 irq_spinlock_unlock(&tasks_lock, true);
[f52e54da]243 return ENOENT;
244 }
[e49e234]245
[f52e54da]246 /* Lock the task and release the lock protecting tasks_btree. */
[da1bafb]247 irq_spinlock_exchange(&tasks_lock, &task->lock);
[f52e54da]248
[e49e234]249 int rc = ddi_iospace_enable_arch(task, ioaddr, size);
250
[da1bafb]251 irq_spinlock_unlock(&task->lock, true);
[e49e234]252
[f52e54da]253 return rc;
254}
255
[5a8b2a2]256/** Wrapper for SYS_PHYSMEM_MAP syscall.
[9a8d91b]257 *
[c6ae4c2]258 * @param phys Physical base address to map
259 * @param virt Destination virtual address
[8da51ad]260 * @param pages Number of pages
261 * @param flags Flags of newly mapped pages
[9a8d91b]262 *
263 * @return 0 on success, otherwise it returns error code found in errno.h
[e49e234]264 *
265 */
[c6ae4c2]266sysarg_t sys_physmem_map(uintptr_t phys, uintptr_t virt,
267 size_t pages, unsigned int flags)
[9a8d91b]268{
[c6ae4c2]269 return (sysarg_t)
270 ddi_physmem_map(ALIGN_DOWN(phys, FRAME_SIZE),
271 ALIGN_DOWN(virt, PAGE_SIZE), pages, flags);
[9a8d91b]272}
[f52e54da]273
274/** Wrapper for SYS_ENABLE_IOSPACE syscall.
275 *
[abbc16e]276 * @param uspace_io_arg User space address of DDI argument structure.
[f52e54da]277 *
278 * @return 0 on success, otherwise it returns error code found in errno.h
[e49e234]279 *
280 */
[96b02eb9]281sysarg_t sys_iospace_enable(ddi_ioarg_t *uspace_io_arg)
[f52e54da]282{
283 ddi_ioarg_t arg;
[e49e234]284 int rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t));
[e3c762cd]285 if (rc != 0)
[96b02eb9]286 return (sysarg_t) rc;
[e49e234]287
[96b02eb9]288 return (sysarg_t) ddi_iospace_enable((task_id_t) arg.task_id,
[f619ec11]289 (uintptr_t) arg.ioaddr, (size_t) arg.size);
[f52e54da]290}
[2bb8648]291
[c6ae4c2]292NO_TRACE static int dmamem_map(uintptr_t virt, size_t size,
293 unsigned int map_flags, unsigned int flags, void **phys)
294{
295 ASSERT(TASK);
296
297 if ((flags & DMAMEM_FLAGS_ANONYMOUS) == 0) {
298 // TODO: implement locking of non-anonymous mapping
299 return page_find_mapping(virt, phys);
300 } else {
301 // TODO: implement locking
302
303 if ((virt % PAGE_SIZE) != 0)
304 return EBADMEM;
305
306 size_t pages = SIZE2FRAMES(size);
307 uint8_t order;
308
309 /* We need the 2^order >= pages */
310 if (pages == 1)
311 order = 0;
312 else
313 order = fnzb(pages - 1) + 1;
314
315 *phys = frame_alloc_noreserve(order, 0);
316 if (*phys == NULL)
317 return ENOMEM;
318
319 mem_backend_data_t backend_data;
320 backend_data.base = (uintptr_t) *phys;
321 backend_data.frames = pages;
322
323 if (!as_area_create(TASK->as, map_flags, size, virt,
324 AS_AREA_ATTR_NONE, &phys_backend, &backend_data)) {
325 frame_free_noreserve((uintptr_t) *phys);
326 return ENOMEM;
327 }
328
329 return EOK;
330 }
331}
332
333NO_TRACE static int dmamem_unmap(uintptr_t virt, size_t size,
334 unsigned int flags)
335{
336 // TODO: implement unlocking & unmap
337 return EOK;
338}
339
340sysarg_t sys_dmamem_map(uintptr_t virt, size_t size, unsigned int map_flags,
341 unsigned int flags, void *phys_ptr)
342{
343 void *phys;
344 int rc = dmamem_map(virt, size, map_flags, flags, &phys);
345 if (rc != EOK)
346 return rc;
347
348 rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys));
349 if (rc != EOK) {
350 dmamem_unmap(virt, size, flags);
351 return rc;
352 }
353
354 return EOK;
355}
356
357sysarg_t sys_dmamem_unmap(uintptr_t virt, size_t size, unsigned int flags)
358{
359 return dmamem_unmap(virt, size, flags);
360}
361
[06e1e95]362/** @}
[b45c443]363 */
Note: See TracBrowser for help on using the repository browser.