source: mainline/kernel/generic/src/ddi/ddi.c@ e2a0d76

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since e2a0d76 was e2a0d76, checked in by Martin Decky <martin@…>, 12 years ago

cstyle

  • Property mode set to 100644
File size: 10.7 KB
RevLine 
[9a8d91b]1/*
[df4ed85]2 * Copyright (c) 2006 Jakub Jermar
[9a8d91b]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
[b45c443]28
[06e1e95]29/** @addtogroup genericddi
[b45c443]30 * @{
31 */
[e49e234]32
[9179d0a]33/**
[b45c443]34 * @file
[e49e234]35 * @brief Device Driver Interface functions.
[9179d0a]36 *
37 * This file contains functions that comprise the Device Driver Interface.
38 * These are the functions for mapping physical memory and enabling I/O
39 * space to tasks.
40 */
[9a8d91b]41
42#include <ddi/ddi.h>
43#include <proc/task.h>
44#include <security/cap.h>
45#include <mm/frame.h>
46#include <mm/as.h>
[c6ae4c2]47#include <mm/page.h>
[373acb4]48#include <synch/mutex.h>
[e3c762cd]49#include <syscall/copy.h>
[e49e234]50#include <adt/btree.h>
[9a8d91b]51#include <arch.h>
52#include <align.h>
53#include <errno.h>
[7a0359b]54#include <trace.h>
[c6ae4c2]55#include <bitops.h>
[9a8d91b]56
[f8ddd17]57/** This lock protects the parea_btree. */
[373acb4]58static mutex_t parea_lock;
[f8ddd17]59
[e49e234]60/** B+tree with enabled physical memory areas. */
61static btree_t parea_btree;
[ae318d3]62
[da1bafb]63/** Initialize DDI.
64 *
65 */
[f8ddd17]66void ddi_init(void)
67{
[e49e234]68 btree_create(&parea_btree);
[373acb4]69 mutex_initialize(&parea_lock, MUTEX_PASSIVE);
[f8ddd17]70}
71
72/** Enable piece of physical memory for mapping by physmem_map().
73 *
74 * @param parea Pointer to physical area structure.
75 *
76 */
77void ddi_parea_register(parea_t *parea)
78{
[373acb4]79 mutex_lock(&parea_lock);
[f8ddd17]80
81 /*
[e49e234]82 * We don't check for overlaps here as the kernel is pretty sane.
[f8ddd17]83 */
[e49e234]84 btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL);
[ae318d3]85
[373acb4]86 mutex_unlock(&parea_lock);
[f8ddd17]87}
88
[8da51ad]89/** Map piece of physical memory into virtual address space of current task.
[9a8d91b]90 *
[c6ae4c2]91 * @param phys Physical address of the starting frame.
[9a8d91b]92 * @param pages Number of pages to map.
[6212095]93 * @param flags Address space area flags for the mapping.
[fbcdeb8]94 * @param virt Virtual address of the starting page.
95 * @param bound Lowest virtual address bound.
[9a8d91b]96 *
[c6ae4c2]97 * @return EOK on success.
98 * @return EPERM if the caller lacks capabilities to use this syscall.
[fbcdeb8]99 * @return EBADMEM if phys is not page aligned.
[c6ae4c2]100 * @return ENOENT if there is no task matching the specified ID or
101 * the physical address space is not enabled for mapping.
102 * @return ENOMEM if there was a problem in creating address space area.
[e49e234]103 *
[9a8d91b]104 */
[fbcdeb8]105NO_TRACE static int physmem_map(uintptr_t phys, size_t pages,
106 unsigned int flags, uintptr_t *virt, uintptr_t bound)
[9a8d91b]107{
[e49e234]108 ASSERT(TASK);
[d7533c7]109
[c6ae4c2]110 if ((phys % FRAME_SIZE) != 0)
[d7533c7]111 return EBADMEM;
112
[9a8d91b]113 /*
[d7533c7]114 * Unprivileged tasks are only allowed to map pareas
115 * which are explicitly marked as such.
[9a8d91b]116 */
[d7533c7]117 bool priv =
118 ((cap_get(TASK) & CAP_MEM_MANAGER) == CAP_MEM_MANAGER);
[ae318d3]119
[e49e234]120 mem_backend_data_t backend_data;
[c6ae4c2]121 backend_data.base = phys;
[e49e234]122 backend_data.frames = pages;
[ae318d3]123
[b366a6f4]124 /*
125 * Check if the memory region is explicitly enabled
126 * for mapping by any parea structure.
127 */
128
129 mutex_lock(&parea_lock);
130 btree_node_t *nodep;
131 parea_t *parea = (parea_t *) btree_search(&parea_btree,
[c6ae4c2]132 (btree_key_t) phys, &nodep);
[b366a6f4]133
134 if ((parea != NULL) && (parea->frames >= pages)) {
135 if ((!priv) && (!parea->unpriv)) {
136 mutex_unlock(&parea_lock);
137 return EPERM;
138 }
139
140 goto map;
141 }
142
143 parea = NULL;
144 mutex_unlock(&parea_lock);
145
146 /*
147 * Check if the memory region is part of physical
148 * memory generally enabled for mapping.
149 */
150
[da1bafb]151 irq_spinlock_lock(&zones.lock, true);
[c6ae4c2]152 size_t znum = find_zone(ADDR2PFN(phys), pages, 0);
[ae318d3]153
[98000fb]154 if (znum == (size_t) -1) {
[d7533c7]155 /*
156 * Frames not found in any zone
157 * -> assume it is a hardware device and allow mapping
158 * for privileged tasks.
[e49e234]159 */
[da1bafb]160 irq_spinlock_unlock(&zones.lock, true);
[d7533c7]161
162 if (!priv)
163 return EPERM;
164
[e49e234]165 goto map;
[ae318d3]166 }
167
[3164e3b]168 if (zones.info[znum].flags & (ZONE_FIRMWARE | ZONE_RESERVED)) {
[d7533c7]169 /*
[3164e3b]170 * Frames are part of firmware or reserved zone
[d7533c7]171 * -> allow mapping for privileged tasks.
172 */
[da1bafb]173 irq_spinlock_unlock(&zones.lock, true);
[d7533c7]174
175 if (!priv)
176 return EPERM;
177
[e49e234]178 goto map;
179 }
[ae318d3]180
[da1bafb]181 irq_spinlock_unlock(&zones.lock, true);
[e49e234]182 return ENOENT;
183
184map:
[fbcdeb8]185 if (!as_area_create(TASK->as, flags, FRAMES2SIZE(pages),
186 AS_AREA_ATTR_NONE, &phys_backend, &backend_data, virt, bound)) {
[9a8d91b]187 /*
[b366a6f4]188 * The address space area was not created.
[9a8d91b]189 * We report it using ENOMEM.
190 */
[b366a6f4]191
192 if (parea != NULL)
193 mutex_unlock(&parea_lock);
194
[9a8d91b]195 return ENOMEM;
196 }
197
[0ee077ee]198 /*
199 * Mapping is created on-demand during page fault.
200 */
[b366a6f4]201
202 if (parea != NULL) {
203 parea->mapped = true;
204 mutex_unlock(&parea_lock);
205 }
206
207 return EOK;
[9a8d91b]208}
209
[fbcdeb8]210NO_TRACE static int physmem_unmap(uintptr_t virt)
211{
212 // TODO: implement unmap
213 return EOK;
214}
215
216/** Wrapper for SYS_PHYSMEM_MAP syscall.
217 *
218 * @param phys Physical base address to map
219 * @param pages Number of pages
220 * @param flags Flags of newly mapped pages
221 * @param virt_ptr Destination virtual address
222 * @param bound Lowest virtual address bound.
223 *
224 * @return 0 on success, otherwise it returns error code found in errno.h
225 *
226 */
227sysarg_t sys_physmem_map(uintptr_t phys, size_t pages, unsigned int flags,
228 void *virt_ptr, uintptr_t bound)
229{
230 uintptr_t virt = (uintptr_t) -1;
231 int rc = physmem_map(ALIGN_DOWN(phys, FRAME_SIZE), pages, flags,
232 &virt, bound);
233 if (rc != EOK)
234 return rc;
235
236 rc = copy_to_uspace(virt_ptr, &virt, sizeof(virt));
237 if (rc != EOK) {
238 physmem_unmap((uintptr_t) virt);
239 return rc;
240 }
241
242 return EOK;
243}
244
245sysarg_t sys_physmem_unmap(uintptr_t virt)
246{
247 return physmem_unmap(virt);
248}
249
[f52e54da]250/** Enable range of I/O space for task.
251 *
252 * @param id Task ID of the destination task.
253 * @param ioaddr Starting I/O address.
254 * @param size Size of the enabled I/O space..
255 *
[f8ddd17]256 * @return 0 on success, EPERM if the caller lacks capabilities to use this
[e49e234]257 * syscall, ENOENT if there is no task matching the specified ID.
258 *
[f52e54da]259 */
[fbcdeb8]260NO_TRACE static int iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size)
[f52e54da]261{
262 /*
263 * Make sure the caller is authorised to make this syscall.
264 */
[e49e234]265 cap_t caps = cap_get(TASK);
[f52e54da]266 if (!(caps & CAP_IO_MANAGER))
267 return EPERM;
268
[da1bafb]269 irq_spinlock_lock(&tasks_lock, true);
[f52e54da]270
[e49e234]271 task_t *task = task_find_by_id(id);
[f52e54da]272
[473d5d2]273 if ((!task) || (!container_check(CONTAINER, task->container))) {
[f52e54da]274 /*
[cfffb290]275 * There is no task with the specified ID
276 * or the task belongs to a different security
277 * context.
[f52e54da]278 */
[da1bafb]279 irq_spinlock_unlock(&tasks_lock, true);
[f52e54da]280 return ENOENT;
281 }
[e49e234]282
[f52e54da]283 /* Lock the task and release the lock protecting tasks_btree. */
[da1bafb]284 irq_spinlock_exchange(&tasks_lock, &task->lock);
[e49e234]285 int rc = ddi_iospace_enable_arch(task, ioaddr, size);
[da1bafb]286 irq_spinlock_unlock(&task->lock, true);
[e49e234]287
[f52e54da]288 return rc;
289}
290
291/** Wrapper for SYS_ENABLE_IOSPACE syscall.
292 *
[abbc16e]293 * @param uspace_io_arg User space address of DDI argument structure.
[f52e54da]294 *
295 * @return 0 on success, otherwise it returns error code found in errno.h
[e49e234]296 *
297 */
[96b02eb9]298sysarg_t sys_iospace_enable(ddi_ioarg_t *uspace_io_arg)
[f52e54da]299{
300 ddi_ioarg_t arg;
[e49e234]301 int rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t));
[e3c762cd]302 if (rc != 0)
[96b02eb9]303 return (sysarg_t) rc;
[e49e234]304
[fbcdeb8]305 return (sysarg_t) iospace_enable((task_id_t) arg.task_id,
[f619ec11]306 (uintptr_t) arg.ioaddr, (size_t) arg.size);
[f52e54da]307}
[2bb8648]308
[fbcdeb8]309sysarg_t sys_iospace_disable(ddi_ioarg_t *uspace_io_arg)
310{
311 // TODO: implement
312 return ENOTSUP;
313}
314
315NO_TRACE static int dmamem_map(uintptr_t virt, size_t size, unsigned int map_flags,
[8cbf1c3]316 unsigned int flags, uintptr_t *phys)
[c6ae4c2]317{
318 ASSERT(TASK);
319
[fbcdeb8]320 // TODO: implement locking of non-anonymous mapping
321 return page_find_mapping(virt, phys);
322}
323
324NO_TRACE static int dmamem_map_anonymous(size_t size, unsigned int map_flags,
[8cbf1c3]325 unsigned int flags, uintptr_t *phys, uintptr_t *virt, uintptr_t bound)
[fbcdeb8]326{
327 ASSERT(TASK);
328
[e2a0d76]329 size_t frames = SIZE2FRAMES(size);
[fbcdeb8]330 uint8_t order;
331
[e2a0d76]332 /* We need the 2^order >= frames */
333 if (frames == 1)
[fbcdeb8]334 order = 0;
335 else
[e2a0d76]336 order = fnzb(frames - 1) + 1;
[fbcdeb8]337
[8cbf1c3]338 *phys = frame_alloc_noreserve(order, 0, 0);
339 if (*phys == 0)
[fbcdeb8]340 return ENOMEM;
341
342 mem_backend_data_t backend_data;
[8cbf1c3]343 backend_data.base = *phys;
[e2a0d76]344 backend_data.frames = frames;
[fbcdeb8]345
346 if (!as_area_create(TASK->as, map_flags, size,
347 AS_AREA_ATTR_NONE, &phys_backend, &backend_data, virt, bound)) {
[8cbf1c3]348 frame_free_noreserve(*phys);
[fbcdeb8]349 return ENOMEM;
[c6ae4c2]350 }
[fbcdeb8]351
352 return EOK;
[c6ae4c2]353}
354
[fbcdeb8]355NO_TRACE static int dmamem_unmap(uintptr_t virt, size_t size)
[c6ae4c2]356{
357 // TODO: implement unlocking & unmap
358 return EOK;
359}
360
[fbcdeb8]361NO_TRACE static int dmamem_unmap_anonymous(uintptr_t virt)
[c6ae4c2]362{
[2c0b348]363 // TODO: This is an ugly hack
364 as_t *as = TASK->as;
365
366 mutex_lock(&as->lock);
367 as_area_t *area = find_locked_area(as, virt);
368 if (!area) {
369 mutex_unlock(&as->lock);
370 return ENOENT;
371 }
372 frame_free_noreserve(area->backend_data.base);
373 area->backend_data.base = 0;
374 area->backend_data.frames = 0;
375 mutex_unlock(&area->lock);
376 mutex_unlock(&as->lock);
377
378 return as_area_destroy(as, virt);
[fbcdeb8]379}
380
381sysarg_t sys_dmamem_map(size_t size, unsigned int map_flags, unsigned int flags,
382 void *phys_ptr, void *virt_ptr, uintptr_t bound)
383{
384 if ((flags & DMAMEM_FLAGS_ANONYMOUS) == 0) {
385 /*
386 * Non-anonymous DMA mapping
387 */
388
[8cbf1c3]389 uintptr_t phys;
[fbcdeb8]390 int rc = dmamem_map((uintptr_t) virt_ptr, size, map_flags,
391 flags, &phys);
392
393 if (rc != EOK)
394 return rc;
395
396 rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys));
397 if (rc != EOK) {
398 dmamem_unmap((uintptr_t) virt_ptr, size);
399 return rc;
400 }
401 } else {
402 /*
403 * Anonymous DMA mapping
404 */
405
[8cbf1c3]406 uintptr_t phys;
[fbcdeb8]407 uintptr_t virt = (uintptr_t) -1;
408 int rc = dmamem_map_anonymous(size, map_flags, flags,
409 &phys, &virt, bound);
410 if (rc != EOK)
411 return rc;
412
413 rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys));
414 if (rc != EOK) {
415 dmamem_unmap_anonymous((uintptr_t) virt);
416 return rc;
417 }
418
419 rc = copy_to_uspace(virt_ptr, &virt, sizeof(virt));
420 if (rc != EOK) {
421 dmamem_unmap_anonymous((uintptr_t) virt);
422 return rc;
423 }
[c6ae4c2]424 }
425
426 return EOK;
427}
428
429sysarg_t sys_dmamem_unmap(uintptr_t virt, size_t size, unsigned int flags)
430{
[fbcdeb8]431 if ((flags & DMAMEM_FLAGS_ANONYMOUS) == 0)
432 return dmamem_unmap(virt, size);
433 else
434 return dmamem_unmap_anonymous(virt);
[c6ae4c2]435}
436
[06e1e95]437/** @}
[b45c443]438 */
Note: See TracBrowser for help on using the repository browser.