source: mainline/uspace/lib/usb/src/dma_buffer.c@ 1b20da0

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 1b20da0 was e0a5d4c, checked in by Ondřej Hlavatý <aearsis@…>, 8 years ago

usb: update copyrights

The data was generated by a script, guided manually. If you feel your
name is missing somewhere, please add it!

The semi-automated process was roughly:

1) Changes per file and author (limited to our team) were counted
2) Trivial numbers were thrown away
3) Authors were sorted by lines added to file
4) All previous copyrights were replaced by the newly generated one
5) Hunks changing only year were discarded

It seems that a lot of my copyrights were added. It is due to me being
both sticking my nose everywhere and lazy to update the copyright right
away :)

  • Property mode set to 100644
File size: 6.2 KB
Line 
1/*
2 * Copyright (c) 2018 Ondrej Hlavaty
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28/** @addtogroup libusb
29 * @{
30 */
31/** @file
32 */
33
34#include <align.h>
35#include <as.h>
36#include <ddi.h>
37#include <stddef.h>
38
39#include "usb/dma_buffer.h"
40
41dma_policy_t dma_policy_create(unsigned flags, size_t chunk_size)
42{
43 assert((chunk_size & (chunk_size - 1)) == 0); /* Check if power of 2 */
44 assert(chunk_size >= PAGE_SIZE || chunk_size == 0);
45
46 return ((chunk_size - 1) & DMA_POLICY_CHUNK_SIZE_MASK)
47 | (flags & DMA_POLICY_FLAGS_MASK);
48}
49
50/**
51 * As the driver is typically using only a few buffers at once, we cache the
52 * physical mapping to avoid calling the kernel unnecessarily often. This cache
53 * is global for a task.
54 *
55 * TODO: "few" is currently limited to one.
56 */
57static struct {
58 const void *last;
59 uintptr_t phys;
60} phys_mapping_cache = { 0 };
61
62static void cache_insert(const void *v, uintptr_t p)
63{
64 phys_mapping_cache.last = v;
65 phys_mapping_cache.phys = p;
66}
67
68static void cache_evict(const void *v)
69{
70 if (phys_mapping_cache.last == v)
71 phys_mapping_cache.last = NULL;
72}
73
74static bool cache_find(const void *v, uintptr_t *p)
75{
76 *p = phys_mapping_cache.phys;
77 return phys_mapping_cache.last == v;
78}
79
80/**
81 * Allocate a DMA buffer.
82 *
83 * @param[in] db dma_buffer_t structure to fill
84 * @param[in] size Size of the required memory space
85 * @param[in] policy dma_policy_t flags to guide the allocation
86 * @return Error code.
87 */
88errno_t dma_buffer_alloc_policy(dma_buffer_t *db, size_t size, dma_policy_t policy)
89{
90 assert(db);
91
92 const size_t real_size = ALIGN_UP(size, PAGE_SIZE);
93 const bool need_4gib = !!(policy & DMA_POLICY_4GiB);
94
95 const uintptr_t flags = need_4gib ? DMAMEM_4GiB : 0;
96
97 uintptr_t phys;
98 void *address = AS_AREA_ANY;
99
100 const int err = dmamem_map_anonymous(real_size,
101 flags, AS_AREA_READ | AS_AREA_WRITE, 0,
102 &phys, &address);
103 if (err)
104 return err;
105
106 /* Access the pages to force mapping */
107 volatile char *buf = address;
108 for (size_t i = 0; i < size; i += PAGE_SIZE)
109 buf[i] = 0xff;
110
111 db->virt = address;
112 db->policy = dma_policy_create(policy, 0);
113 cache_insert(db->virt, phys);
114
115 return EOK;
116}
117
118
119/**
120 * Allocate a DMA buffer using the default policy.
121 *
122 * @param[in] db dma_buffer_t structure to fill
123 * @param[in] size Size of the required memory space
124 * @return Error code.
125 */
126errno_t dma_buffer_alloc(dma_buffer_t *db, size_t size)
127{
128 return dma_buffer_alloc_policy(db, size, DMA_POLICY_DEFAULT);
129}
130
131
132/**
133 * Free a DMA buffer.
134 *
135 * @param[in] db dma_buffer_t structure buffer of which will be freed
136 */
137void dma_buffer_free(dma_buffer_t *db)
138{
139 if (db->virt) {
140 dmamem_unmap_anonymous(db->virt);
141 db->virt = NULL;
142 db->policy = 0;
143 }
144}
145
146/**
147 * Convert a pointer inside a buffer to physical address.
148 *
149 * @param[in] db Buffer at which virt is pointing
150 * @param[in] virt Pointer somewhere inside db
151 */
152uintptr_t dma_buffer_phys(const dma_buffer_t *db, const void *virt)
153{
154 const size_t chunk_mask = dma_policy_chunk_mask(db->policy);
155 const uintptr_t offset = (virt - db->virt) & chunk_mask;
156 const void *chunk_base = virt - offset;
157
158 uintptr_t phys;
159
160 if (!cache_find(chunk_base, &phys)) {
161 if (as_get_physical_mapping(chunk_base, &phys))
162 return 0;
163 cache_insert(chunk_base, phys);
164 }
165
166 return phys + offset;
167}
168
169static bool dma_buffer_is_4gib(dma_buffer_t *db, size_t size)
170{
171 if (sizeof(uintptr_t) <= 32)
172 return true;
173
174 const size_t chunk_size = dma_policy_chunk_mask(db->policy) + 1;
175 const size_t chunks = chunk_size ? 1 : size / chunk_size;
176
177 for (size_t c = 0; c < chunks; c++) {
178 const void *addr = db->virt + (c * chunk_size);
179 const uintptr_t phys = dma_buffer_phys(db, addr);
180
181 if ((phys & DMAMEM_4GiB) != 0)
182 return false;
183 }
184
185 return true;
186}
187
188/**
189 * Lock an arbitrary buffer for DMA operations, creating a DMA buffer.
190 *
191 * FIXME: To handle page-unaligned buffers, we need to calculate the base
192 * address and lock the whole first page. But as the operation is not yet
193 * implemented in the kernel, it doesn't matter.
194 */
195errno_t dma_buffer_lock(dma_buffer_t *db, void *virt, size_t size)
196{
197 assert(virt);
198
199 uintptr_t phys;
200
201 const errno_t err = dmamem_map(virt, size, 0, 0, &phys);
202 if (err)
203 return err;
204
205 db->virt = virt;
206 db->policy = dma_policy_create(0, PAGE_SIZE);
207 cache_insert(virt, phys);
208
209 unsigned flags = -1U;
210 if (!dma_buffer_is_4gib(db, size))
211 flags &= ~DMA_POLICY_4GiB;
212 db->policy = dma_policy_create(flags, PAGE_SIZE);
213
214 return EOK;
215}
216
217/**
218 * Unlock a buffer for DMA operations.
219 */
220void dma_buffer_unlock(dma_buffer_t *db, size_t size)
221{
222 if (db->virt) {
223 dmamem_unmap(db->virt, size);
224 db->virt = NULL;
225 db->policy = 0;
226 }
227}
228
229/**
230 * Must be called when the buffer is received over IPC. Clears potentially
231 * leftover value from different buffer mapped to the same virtual address.
232 */
233void dma_buffer_acquire(dma_buffer_t *db)
234{
235 cache_evict(db->virt);
236}
237
238/**
239 * Counterpart of acquire.
240 */
241void dma_buffer_release(dma_buffer_t *db)
242{
243 cache_evict(db->virt);
244}
245
246/**
247 * @}
248 */
Note: See TracBrowser for help on using the repository browser.