source: mainline/kernel/generic/src/ddi/ddi.c@ 597fa24

Last change on this file since 597fa24 was 597fa24, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 3 months ago

Enable static initialization of kernel synchronization primitives

  • Property mode set to 100644
File size: 13.6 KB
RevLine 
[9a8d91b]1/*
[df4ed85]2 * Copyright (c) 2006 Jakub Jermar
[9a8d91b]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
[b45c443]28
[174156fd]29/** @addtogroup kernel_generic_ddi
[b45c443]30 * @{
31 */
[e49e234]32
[9179d0a]33/**
[b45c443]34 * @file
[e49e234]35 * @brief Device Driver Interface functions.
[9179d0a]36 *
37 * This file contains functions that comprise the Device Driver Interface.
38 * These are the functions for mapping physical memory and enabling I/O
39 * space to tasks.
40 */
[9a8d91b]41
[63e27ef]42#include <assert.h>
[9a8d91b]43#include <ddi/ddi.h>
44#include <proc/task.h>
[719a208]45#include <security/perm.h>
[9a8d91b]46#include <mm/frame.h>
47#include <mm/as.h>
[46e886f]48#include <mm/km.h>
[c6ae4c2]49#include <mm/page.h>
[373acb4]50#include <synch/mutex.h>
[e3c762cd]51#include <syscall/copy.h>
[6f7071b]52#include <adt/odict.h>
[9a8d91b]53#include <arch.h>
54#include <align.h>
55#include <errno.h>
[b169619]56#include <memw.h>
[7a0359b]57#include <trace.h>
[c6ae4c2]58#include <bitops.h>
[46e886f]59#include <arch/asm.h>
[9a8d91b]60
[6f7071b]61/** This lock protects the @c pareas ordered dictionary. */
[597fa24]62static MUTEX_INITIALIZE(pareas_lock, MUTEX_PASSIVE);
[f8ddd17]63
[6f7071b]64/** Ordered dictionary of enabled physical memory areas by base address. */
65static odict_t pareas;
66
67static void *pareas_getkey(odlink_t *);
68static int pareas_cmp(void *, void *);
[ae318d3]69
[da1bafb]70/** Initialize DDI.
71 *
72 */
[f8ddd17]73void ddi_init(void)
74{
[6f7071b]75 odict_initialize(&pareas, pareas_getkey, pareas_cmp);
76}
77
78/** Initialize physical area structure.
79 *
80 * This should always be called first on the parea structure before
81 * filling in fields and calling ddi_parea_register.
82 *
83 * @param parea Pointer to physical area structure.
84 *
85 */
86void ddi_parea_init(parea_t *parea)
87{
88 memset(parea, 0, sizeof(parea_t));
[f8ddd17]89}
90
91/** Enable piece of physical memory for mapping by physmem_map().
92 *
93 * @param parea Pointer to physical area structure.
94 *
95 */
96void ddi_parea_register(parea_t *parea)
97{
[6f7071b]98 mutex_lock(&pareas_lock);
[a35b458]99
[f8ddd17]100 /*
[e49e234]101 * We don't check for overlaps here as the kernel is pretty sane.
[f8ddd17]102 */
[6f7071b]103 odict_insert(&parea->lpareas, &pareas, NULL);
[a35b458]104
[6f7071b]105 mutex_unlock(&pareas_lock);
[f8ddd17]106}
107
[e037cf37]108/** Norify physical area has been unmapped.
109 *
110 * @param parea Physical area
111 */
112void ddi_parea_unmap_notify(parea_t *parea)
113{
114 parea->mapped = false;
115 if (parea->mapped_changed != NULL)
116 parea->mapped_changed(parea->arg);
117}
118
[8da51ad]119/** Map piece of physical memory into virtual address space of current task.
[9a8d91b]120 *
[c6ae4c2]121 * @param phys Physical address of the starting frame.
[9a8d91b]122 * @param pages Number of pages to map.
[6212095]123 * @param flags Address space area flags for the mapping.
[fbcdeb8]124 * @param virt Virtual address of the starting page.
125 * @param bound Lowest virtual address bound.
[9a8d91b]126 *
[c6ae4c2]127 * @return EOK on success.
[719a208]128 * @return EPERM if the caller lacks permissions to use this syscall.
[fbcdeb8]129 * @return EBADMEM if phys is not page aligned.
[c6ae4c2]130 * @return ENOENT if there is no task matching the specified ID or
131 * the physical address space is not enabled for mapping.
132 * @return ENOMEM if there was a problem in creating address space area.
[e49e234]133 *
[9a8d91b]134 */
[8df5f20]135_NO_TRACE static errno_t physmem_map(uintptr_t phys, size_t pages,
[fbcdeb8]136 unsigned int flags, uintptr_t *virt, uintptr_t bound)
[9a8d91b]137{
[63e27ef]138 assert(TASK);
[a35b458]139
[c6ae4c2]140 if ((phys % FRAME_SIZE) != 0)
[d7533c7]141 return EBADMEM;
[a35b458]142
[9a8d91b]143 /*
[d7533c7]144 * Unprivileged tasks are only allowed to map pareas
145 * which are explicitly marked as such.
[9a8d91b]146 */
[d7533c7]147 bool priv =
[719a208]148 ((perm_get(TASK) & PERM_MEM_MANAGER) == PERM_MEM_MANAGER);
[a35b458]149
[e49e234]150 mem_backend_data_t backend_data;
[c6ae4c2]151 backend_data.base = phys;
[e49e234]152 backend_data.frames = pages;
[c101dc0]153 backend_data.anonymous = false;
[a35b458]154
[b366a6f4]155 /*
156 * Check if the memory region is explicitly enabled
157 * for mapping by any parea structure.
158 */
[a35b458]159
[6f7071b]160 mutex_lock(&pareas_lock);
161 odlink_t *odlink = odict_find_eq(&pareas, &phys, NULL);
162 parea_t *parea = odlink != NULL ?
163 odict_get_instance(odlink, parea_t, lpareas) : NULL;
[a35b458]164
[b366a6f4]165 if ((parea != NULL) && (parea->frames >= pages)) {
166 if ((!priv) && (!parea->unpriv)) {
[6f7071b]167 mutex_unlock(&pareas_lock);
[b366a6f4]168 return EPERM;
169 }
[a35b458]170
[b366a6f4]171 goto map;
172 }
[a35b458]173
[b366a6f4]174 parea = NULL;
[6f7071b]175 mutex_unlock(&pareas_lock);
[a35b458]176
[b366a6f4]177 /*
178 * Check if the memory region is part of physical
179 * memory generally enabled for mapping.
180 */
[a35b458]181
[da1bafb]182 irq_spinlock_lock(&zones.lock, true);
[c6ae4c2]183 size_t znum = find_zone(ADDR2PFN(phys), pages, 0);
[a35b458]184
[98000fb]185 if (znum == (size_t) -1) {
[d7533c7]186 /*
187 * Frames not found in any zone
188 * -> assume it is a hardware device and allow mapping
189 * for privileged tasks.
[e49e234]190 */
[da1bafb]191 irq_spinlock_unlock(&zones.lock, true);
[a35b458]192
[d7533c7]193 if (!priv)
194 return EPERM;
[a35b458]195
[e49e234]196 goto map;
[ae318d3]197 }
[a35b458]198
[3164e3b]199 if (zones.info[znum].flags & (ZONE_FIRMWARE | ZONE_RESERVED)) {
[d7533c7]200 /*
[3164e3b]201 * Frames are part of firmware or reserved zone
[d7533c7]202 * -> allow mapping for privileged tasks.
203 */
[da1bafb]204 irq_spinlock_unlock(&zones.lock, true);
[a35b458]205
[d7533c7]206 if (!priv)
207 return EPERM;
[a35b458]208
[e49e234]209 goto map;
210 }
[a35b458]211
[da1bafb]212 irq_spinlock_unlock(&zones.lock, true);
[e49e234]213 return ENOENT;
[a35b458]214
[e49e234]215map:
[e037cf37]216 backend_data.parea = parea;
217
[fbcdeb8]218 if (!as_area_create(TASK->as, flags, FRAMES2SIZE(pages),
219 AS_AREA_ATTR_NONE, &phys_backend, &backend_data, virt, bound)) {
[9a8d91b]220 /*
[b366a6f4]221 * The address space area was not created.
[9a8d91b]222 * We report it using ENOMEM.
223 */
[a35b458]224
[b366a6f4]225 if (parea != NULL)
[6f7071b]226 mutex_unlock(&pareas_lock);
[a35b458]227
[9a8d91b]228 return ENOMEM;
229 }
[a35b458]230
[0ee077ee]231 /*
232 * Mapping is created on-demand during page fault.
233 */
[a35b458]234
[b366a6f4]235 if (parea != NULL) {
236 parea->mapped = true;
[6f7071b]237 mutex_unlock(&pareas_lock);
[b366a6f4]238 }
[a35b458]239
[b366a6f4]240 return EOK;
[9a8d91b]241}
242
[8df5f20]243_NO_TRACE static errno_t physmem_unmap(uintptr_t virt)
[fbcdeb8]244{
[63e27ef]245 assert(TASK);
[8cd680c]246
247 return as_area_destroy(TASK->as, virt);
[fbcdeb8]248}
249
250/** Wrapper for SYS_PHYSMEM_MAP syscall.
251 *
252 * @param phys Physical base address to map
253 * @param pages Number of pages
254 * @param flags Flags of newly mapped pages
255 * @param virt_ptr Destination virtual address
256 * @param bound Lowest virtual address bound.
257 *
258 * @return 0 on success, otherwise it returns error code found in errno.h
259 *
260 */
[b7fd2a0]261sys_errno_t sys_physmem_map(uintptr_t phys, size_t pages, unsigned int flags,
[5a5269d]262 uspace_ptr_uintptr_t virt_ptr, uintptr_t bound)
[fbcdeb8]263{
[bf9cb2f]264 uintptr_t virt;
[b7fd2a0]265 errno_t rc = copy_from_uspace(&virt, virt_ptr, sizeof(virt));
[bf9cb2f]266 if (rc != EOK)
267 return rc;
[a35b458]268
[bf9cb2f]269 rc = physmem_map(ALIGN_DOWN(phys, FRAME_SIZE), pages, flags, &virt,
270 bound);
[fbcdeb8]271 if (rc != EOK)
272 return rc;
[a35b458]273
[fbcdeb8]274 rc = copy_to_uspace(virt_ptr, &virt, sizeof(virt));
275 if (rc != EOK) {
[5a5269d]276 physmem_unmap(virt);
[fbcdeb8]277 return rc;
278 }
[a35b458]279
[fbcdeb8]280 return EOK;
281}
282
[b7fd2a0]283sys_errno_t sys_physmem_unmap(uintptr_t virt)
[fbcdeb8]284{
285 return physmem_unmap(virt);
286}
287
[6f7071b]288/** Get key function for the @c pareas ordered dictionary.
289 *
290 * @param odlink Link
291 * @return Pointer to base address cast as 'void *'
292 */
293static void *pareas_getkey(odlink_t *odlink)
294{
295 parea_t *parea = odict_get_instance(odlink, parea_t, lpareas);
296 return (void *) &parea->pbase;
297}
298
299/** Key comparison function for the @c pareas ordered dictionary.
300 *
301 * @param a Pointer to parea A base
302 * @param b Pointer to parea B base
303 * @return -1, 0, 1 iff base of A is less than, equal to, greater than B
304 */
305static int pareas_cmp(void *a, void *b)
306{
307 uintptr_t pa = *(uintptr_t *)a;
308 uintptr_t pb = *(uintptr_t *)b;
309
310 if (pa < pb)
311 return -1;
312 else if (pa == pb)
313 return 0;
314 else
315 return +1;
316}
317
[f52e54da]318/** Enable range of I/O space for task.
319 *
[8cd680c]320 * @param id Task ID of the destination task.
[f52e54da]321 * @param ioaddr Starting I/O address.
[8cd680c]322 * @param size Size of the enabled I/O space.
[f52e54da]323 *
[719a208]324 * @return 0 on success, EPERM if the caller lacks permissions to use this
[e49e234]325 * syscall, ENOENT if there is no task matching the specified ID.
326 *
[f52e54da]327 */
[8df5f20]328_NO_TRACE static errno_t iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size)
[f52e54da]329{
330 /*
331 * Make sure the caller is authorised to make this syscall.
332 */
[719a208]333 perm_t perms = perm_get(TASK);
334 if (!(perms & PERM_IO_MANAGER))
[f52e54da]335 return EPERM;
[a35b458]336
[e49e234]337 task_t *task = task_find_by_id(id);
[a35b458]338
[07d4271]339 if (!task)
[f52e54da]340 return ENOENT;
[a35b458]341
[07d4271]342 errno_t rc = ENOENT;
343
344 irq_spinlock_lock(&task->lock, true);
[8cd680c]345
[07d4271]346 /* Check that the task belongs to the correct security context. */
347 if (container_check(CONTAINER, task->container))
348 rc = ddi_iospace_enable_arch(task, ioaddr, size);
349
350 irq_spinlock_unlock(&task->lock, true);
351 task_release(task);
[8cd680c]352 return rc;
353}
354
355/** Disable range of I/O space for task.
356 *
357 * @param id Task ID of the destination task.
358 * @param ioaddr Starting I/O address.
359 * @param size Size of the enabled I/O space.
360 *
[719a208]361 * @return 0 on success, EPERM if the caller lacks permissions to use this
[8cd680c]362 * syscall, ENOENT if there is no task matching the specified ID.
363 *
364 */
[8df5f20]365_NO_TRACE static errno_t iospace_disable(task_id_t id, uintptr_t ioaddr, size_t size)
[8cd680c]366{
367 /*
368 * Make sure the caller is authorised to make this syscall.
369 */
[719a208]370 perm_t perms = perm_get(TASK);
371 if (!(perms & PERM_IO_MANAGER))
[8cd680c]372 return EPERM;
[a35b458]373
[8cd680c]374 task_t *task = task_find_by_id(id);
[a35b458]375
[07d4271]376 if (!task)
[8cd680c]377 return ENOENT;
[a35b458]378
[07d4271]379 errno_t rc = ENOENT;
380
381 irq_spinlock_lock(&task->lock, true);
[a35b458]382
[07d4271]383 /* Check that the task belongs to the correct security context. */
384 if (container_check(CONTAINER, task->container))
385 rc = ddi_iospace_disable_arch(task, ioaddr, size);
386
387 irq_spinlock_unlock(&task->lock, true);
388 task_release(task);
[f52e54da]389 return rc;
390}
391
392/** Wrapper for SYS_ENABLE_IOSPACE syscall.
393 *
[abbc16e]394 * @param uspace_io_arg User space address of DDI argument structure.
[f52e54da]395 *
396 * @return 0 on success, otherwise it returns error code found in errno.h
[e49e234]397 *
398 */
[5a5269d]399sys_errno_t sys_iospace_enable(uspace_ptr_ddi_ioarg_t uspace_io_arg)
[f52e54da]400{
401 ddi_ioarg_t arg;
[b7fd2a0]402 errno_t rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t));
[a53ed3a]403 if (rc != EOK)
[b7fd2a0]404 return (sys_errno_t) rc;
[a35b458]405
[b7fd2a0]406 return (sys_errno_t) iospace_enable((task_id_t) arg.task_id,
[f619ec11]407 (uintptr_t) arg.ioaddr, (size_t) arg.size);
[f52e54da]408}
[2bb8648]409
[5a5269d]410sys_errno_t sys_iospace_disable(uspace_ptr_ddi_ioarg_t uspace_io_arg)
[fbcdeb8]411{
[8cd680c]412 ddi_ioarg_t arg;
[b7fd2a0]413 errno_t rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t));
[a53ed3a]414 if (rc != EOK)
[b7fd2a0]415 return (sys_errno_t) rc;
[8cd680c]416
[b7fd2a0]417 return (sys_errno_t) iospace_disable((task_id_t) arg.task_id,
[8cd680c]418 (uintptr_t) arg.ioaddr, (size_t) arg.size);
[fbcdeb8]419}
420
[8df5f20]421_NO_TRACE static errno_t dmamem_map(uintptr_t virt, size_t size, unsigned int map_flags,
[8cbf1c3]422 unsigned int flags, uintptr_t *phys)
[c6ae4c2]423{
[63e27ef]424 assert(TASK);
[a35b458]425
[fbcdeb8]426 // TODO: implement locking of non-anonymous mapping
427 return page_find_mapping(virt, phys);
428}
429
[8df5f20]430_NO_TRACE static errno_t dmamem_map_anonymous(size_t size, uintptr_t constraint,
[b0c2075]431 unsigned int map_flags, unsigned int flags, uintptr_t *phys,
432 uintptr_t *virt, uintptr_t bound)
[fbcdeb8]433{
[63e27ef]434 assert(TASK);
[a35b458]435
[e2a0d76]436 size_t frames = SIZE2FRAMES(size);
[14741a0]437 if (frames == 0)
438 return EINVAL;
439
[482f968]440 // FIXME: probably need to ensure that the memory is suitable for DMA
[a17cced]441 *phys = frame_alloc(frames, FRAME_ATOMIC, constraint);
[8cbf1c3]442 if (*phys == 0)
[fbcdeb8]443 return ENOMEM;
[a35b458]444
[fbcdeb8]445 mem_backend_data_t backend_data;
[8cbf1c3]446 backend_data.base = *phys;
[e2a0d76]447 backend_data.frames = frames;
[c101dc0]448 backend_data.anonymous = true;
[7d83c54]449 backend_data.parea = NULL;
[a35b458]450
[fbcdeb8]451 if (!as_area_create(TASK->as, map_flags, size,
452 AS_AREA_ATTR_NONE, &phys_backend, &backend_data, virt, bound)) {
[a17cced]453 frame_free(*phys, frames);
[fbcdeb8]454 return ENOMEM;
[c6ae4c2]455 }
[a35b458]456
[fbcdeb8]457 return EOK;
[c6ae4c2]458}
459
[8df5f20]460_NO_TRACE static errno_t dmamem_unmap(uintptr_t virt, size_t size)
[c6ae4c2]461{
462 // TODO: implement unlocking & unmap
463 return EOK;
464}
465
[8df5f20]466_NO_TRACE static errno_t dmamem_unmap_anonymous(uintptr_t virt)
[c6ae4c2]467{
[c101dc0]468 return as_area_destroy(TASK->as, virt);
[fbcdeb8]469}
470
[b7fd2a0]471sys_errno_t sys_dmamem_map(size_t size, unsigned int map_flags, unsigned int flags,
[5a5269d]472 uspace_ptr_uintptr_t phys_ptr, uspace_ptr_uintptr_t virt_ptr, uintptr_t bound)
[fbcdeb8]473{
474 if ((flags & DMAMEM_FLAGS_ANONYMOUS) == 0) {
475 /*
476 * Non-anonymous DMA mapping
477 */
[a35b458]478
[8cbf1c3]479 uintptr_t phys;
[5a5269d]480 errno_t rc = dmamem_map(virt_ptr, size, map_flags,
[fbcdeb8]481 flags, &phys);
[a35b458]482
[fbcdeb8]483 if (rc != EOK)
484 return rc;
[a35b458]485
[fbcdeb8]486 rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys));
487 if (rc != EOK) {
[5a5269d]488 dmamem_unmap(virt_ptr, size);
[fbcdeb8]489 return rc;
490 }
491 } else {
492 /*
493 * Anonymous DMA mapping
494 */
[a35b458]495
[b0c2075]496 uintptr_t constraint;
[b7fd2a0]497 errno_t rc = copy_from_uspace(&constraint, phys_ptr,
[b0c2075]498 sizeof(constraint));
499 if (rc != EOK)
500 return rc;
[a35b458]501
[bf9cb2f]502 uintptr_t virt;
503 rc = copy_from_uspace(&virt, virt_ptr, sizeof(virt));
504 if (rc != EOK)
505 return rc;
[a35b458]506
[8cbf1c3]507 uintptr_t phys;
[b0c2075]508 rc = dmamem_map_anonymous(size, constraint, map_flags, flags,
[fbcdeb8]509 &phys, &virt, bound);
510 if (rc != EOK)
511 return rc;
[a35b458]512
[fbcdeb8]513 rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys));
514 if (rc != EOK) {
[5a5269d]515 dmamem_unmap_anonymous(virt);
[fbcdeb8]516 return rc;
517 }
[a35b458]518
[fbcdeb8]519 rc = copy_to_uspace(virt_ptr, &virt, sizeof(virt));
520 if (rc != EOK) {
[5a5269d]521 dmamem_unmap_anonymous(virt);
[fbcdeb8]522 return rc;
523 }
[c6ae4c2]524 }
[a35b458]525
[c6ae4c2]526 return EOK;
527}
528
[b7fd2a0]529sys_errno_t sys_dmamem_unmap(uintptr_t virt, size_t size, unsigned int flags)
[c6ae4c2]530{
[fbcdeb8]531 if ((flags & DMAMEM_FLAGS_ANONYMOUS) == 0)
532 return dmamem_unmap(virt, size);
533 else
534 return dmamem_unmap_anonymous(virt);
[c6ae4c2]535}
[46e886f]536void *pio_map(void *phys, size_t size)
537{
538#ifdef IO_SPACE_BOUNDARY
539 if (phys < IO_SPACE_BOUNDARY)
540 return phys;
541#endif
542 return (void *) km_map((uintptr_t) phys, size, KM_NATURAL_ALIGNMENT,
543 PAGE_READ | PAGE_WRITE | PAGE_NOT_CACHEABLE);
544}
545
546void pio_unmap(void *phys, void *virt, size_t size)
547{
548#ifdef IO_SPACE_BOUNDARY
549 if (phys < IO_SPACE_BOUNDARY)
550 return;
551#endif
552 km_unmap((uintptr_t) virt, size);
553}
[c6ae4c2]554
[06e1e95]555/** @}
[b45c443]556 */
Note: See TracBrowser for help on using the repository browser.