| 1 | /*
 | 
|---|
| 2 |  * Copyright (c) 2018 Ondrej Hlavaty
 | 
|---|
| 3 |  * All rights reserved.
 | 
|---|
| 4 |  *
 | 
|---|
| 5 |  * Redistribution and use in source and binary forms, with or without
 | 
|---|
| 6 |  * modification, are permitted provided that the following conditions
 | 
|---|
| 7 |  * are met:
 | 
|---|
| 8 |  *
 | 
|---|
| 9 |  * - Redistributions of source code must retain the above copyright
 | 
|---|
| 10 |  *   notice, this list of conditions and the following disclaimer.
 | 
|---|
| 11 |  * - Redistributions in binary form must reproduce the above copyright
 | 
|---|
| 12 |  *   notice, this list of conditions and the following disclaimer in the
 | 
|---|
| 13 |  *   documentation and/or other materials provided with the distribution.
 | 
|---|
| 14 |  * - The name of the author may not be used to endorse or promote products
 | 
|---|
| 15 |  *   derived from this software without specific prior written permission.
 | 
|---|
| 16 |  *
 | 
|---|
| 17 |  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 | 
|---|
| 18 |  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 | 
|---|
| 19 |  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 | 
|---|
| 20 |  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
 | 
|---|
| 21 |  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 | 
|---|
| 22 |  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 | 
|---|
| 23 |  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 | 
|---|
| 24 |  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | 
|---|
| 25 |  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 | 
|---|
| 26 |  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | 
|---|
| 27 |  */
 | 
|---|
| 28 | /**  @addtogroup libusb
 | 
|---|
| 29 |  * @{
 | 
|---|
| 30 |  */
 | 
|---|
| 31 | /** @file
 | 
|---|
| 32 |  */
 | 
|---|
| 33 | 
 | 
|---|
| 34 | #include <align.h>
 | 
|---|
| 35 | #include <as.h>
 | 
|---|
| 36 | #include <ddi.h>
 | 
|---|
| 37 | #include <stddef.h>
 | 
|---|
| 38 | 
 | 
|---|
| 39 | #include "usb/dma_buffer.h"
 | 
|---|
| 40 | 
 | 
|---|
| 41 | dma_policy_t dma_policy_create(unsigned flags, size_t chunk_size)
 | 
|---|
| 42 | {
 | 
|---|
| 43 |         assert((chunk_size & (chunk_size - 1)) == 0); /* Check if power of 2 */
 | 
|---|
| 44 |         assert(chunk_size >= PAGE_SIZE || chunk_size == 0);
 | 
|---|
| 45 | 
 | 
|---|
| 46 |         return ((chunk_size - 1) & DMA_POLICY_CHUNK_SIZE_MASK) |
 | 
|---|
| 47 |             (flags & DMA_POLICY_FLAGS_MASK);
 | 
|---|
| 48 | }
 | 
|---|
| 49 | 
 | 
|---|
| 50 | /**
 | 
|---|
| 51 |  * As the driver is typically using only a few buffers at once, we cache the
 | 
|---|
| 52 |  * physical mapping to avoid calling the kernel unnecessarily often. This cache
 | 
|---|
| 53 |  * is global for a task.
 | 
|---|
| 54 |  *
 | 
|---|
| 55 |  * TODO: "few" is currently limited to one.
 | 
|---|
| 56 |  */
 | 
|---|
| 57 | static struct {
 | 
|---|
| 58 |         const void *last;
 | 
|---|
| 59 |         uintptr_t phys;
 | 
|---|
| 60 | } phys_mapping_cache = { 0 };
 | 
|---|
| 61 | 
 | 
|---|
| 62 | static void cache_insert(const void *v, uintptr_t p)
 | 
|---|
| 63 | {
 | 
|---|
| 64 |         phys_mapping_cache.last = v;
 | 
|---|
| 65 |         phys_mapping_cache.phys = p;
 | 
|---|
| 66 | }
 | 
|---|
| 67 | 
 | 
|---|
| 68 | static void cache_evict(const void *v)
 | 
|---|
| 69 | {
 | 
|---|
| 70 |         if (phys_mapping_cache.last == v)
 | 
|---|
| 71 |                 phys_mapping_cache.last = NULL;
 | 
|---|
| 72 | }
 | 
|---|
| 73 | 
 | 
|---|
| 74 | static bool cache_find(const void *v, uintptr_t *p)
 | 
|---|
| 75 | {
 | 
|---|
| 76 |         *p = phys_mapping_cache.phys;
 | 
|---|
| 77 |         return phys_mapping_cache.last == v;
 | 
|---|
| 78 | }
 | 
|---|
| 79 | 
 | 
|---|
| 80 | /**
 | 
|---|
| 81 |  * Allocate a DMA buffer.
 | 
|---|
| 82 |  *
 | 
|---|
| 83 |  * @param[in] db dma_buffer_t structure to fill
 | 
|---|
| 84 |  * @param[in] size Size of the required memory space
 | 
|---|
| 85 |  * @param[in] policy dma_policy_t flags to guide the allocation
 | 
|---|
| 86 |  * @return Error code.
 | 
|---|
| 87 |  */
 | 
|---|
| 88 | errno_t dma_buffer_alloc_policy(dma_buffer_t *db, size_t size, dma_policy_t policy)
 | 
|---|
| 89 | {
 | 
|---|
| 90 |         assert(db);
 | 
|---|
| 91 | 
 | 
|---|
| 92 |         const size_t real_size = ALIGN_UP(size, PAGE_SIZE);
 | 
|---|
| 93 |         const bool need_4gib = !!(policy & DMA_POLICY_4GiB);
 | 
|---|
| 94 | 
 | 
|---|
| 95 |         const uintptr_t flags = need_4gib ? DMAMEM_4GiB : 0;
 | 
|---|
| 96 | 
 | 
|---|
| 97 |         uintptr_t phys;
 | 
|---|
| 98 |         void *address = AS_AREA_ANY;
 | 
|---|
| 99 | 
 | 
|---|
| 100 |         const int err = dmamem_map_anonymous(real_size,
 | 
|---|
| 101 |             flags, AS_AREA_READ | AS_AREA_WRITE, 0,
 | 
|---|
| 102 |             &phys, &address);
 | 
|---|
| 103 |         if (err)
 | 
|---|
| 104 |                 return err;
 | 
|---|
| 105 | 
 | 
|---|
| 106 |         /* Access the pages to force mapping */
 | 
|---|
| 107 |         volatile char *buf = address;
 | 
|---|
| 108 |         for (size_t i = 0; i < size; i += PAGE_SIZE)
 | 
|---|
| 109 |                 buf[i] = 0xff;
 | 
|---|
| 110 | 
 | 
|---|
| 111 |         db->virt = address;
 | 
|---|
| 112 |         db->policy = dma_policy_create(policy, 0);
 | 
|---|
| 113 |         cache_insert(db->virt, phys);
 | 
|---|
| 114 | 
 | 
|---|
| 115 |         return EOK;
 | 
|---|
| 116 | }
 | 
|---|
| 117 | 
 | 
|---|
| 118 | /**
 | 
|---|
| 119 |  * Allocate a DMA buffer using the default policy.
 | 
|---|
| 120 |  *
 | 
|---|
| 121 |  * @param[in] db dma_buffer_t structure to fill
 | 
|---|
| 122 |  * @param[in] size Size of the required memory space
 | 
|---|
| 123 |  * @return Error code.
 | 
|---|
| 124 |  */
 | 
|---|
| 125 | errno_t dma_buffer_alloc(dma_buffer_t *db, size_t size)
 | 
|---|
| 126 | {
 | 
|---|
| 127 |         return dma_buffer_alloc_policy(db, size, DMA_POLICY_DEFAULT);
 | 
|---|
| 128 | }
 | 
|---|
| 129 | 
 | 
|---|
| 130 | /**
 | 
|---|
| 131 |  * Free a DMA buffer.
 | 
|---|
| 132 |  *
 | 
|---|
| 133 |  * @param[in] db dma_buffer_t structure buffer of which will be freed
 | 
|---|
| 134 |  */
 | 
|---|
| 135 | void dma_buffer_free(dma_buffer_t *db)
 | 
|---|
| 136 | {
 | 
|---|
| 137 |         if (db->virt) {
 | 
|---|
| 138 |                 dmamem_unmap_anonymous(db->virt);
 | 
|---|
| 139 |                 db->virt = NULL;
 | 
|---|
| 140 |                 db->policy = 0;
 | 
|---|
| 141 |         }
 | 
|---|
| 142 | }
 | 
|---|
| 143 | 
 | 
|---|
| 144 | /**
 | 
|---|
| 145 |  * Convert a pointer inside a buffer to physical address.
 | 
|---|
| 146 |  *
 | 
|---|
| 147 |  * @param[in] db Buffer at which virt is pointing
 | 
|---|
| 148 |  * @param[in] virt Pointer somewhere inside db
 | 
|---|
| 149 |  */
 | 
|---|
| 150 | uintptr_t dma_buffer_phys(const dma_buffer_t *db, const void *virt)
 | 
|---|
| 151 | {
 | 
|---|
| 152 |         const size_t chunk_mask = dma_policy_chunk_mask(db->policy);
 | 
|---|
| 153 |         const uintptr_t offset = (virt - db->virt) & chunk_mask;
 | 
|---|
| 154 |         const void *chunk_base = virt - offset;
 | 
|---|
| 155 | 
 | 
|---|
| 156 |         uintptr_t phys;
 | 
|---|
| 157 | 
 | 
|---|
| 158 |         if (!cache_find(chunk_base, &phys)) {
 | 
|---|
| 159 |                 if (as_get_physical_mapping(chunk_base, &phys))
 | 
|---|
| 160 |                         return 0;
 | 
|---|
| 161 |                 cache_insert(chunk_base, phys);
 | 
|---|
| 162 |         }
 | 
|---|
| 163 | 
 | 
|---|
| 164 |         return phys + offset;
 | 
|---|
| 165 | }
 | 
|---|
| 166 | 
 | 
|---|
| 167 | static bool dma_buffer_is_4gib(dma_buffer_t *db, size_t size)
 | 
|---|
| 168 | {
 | 
|---|
| 169 |         if (sizeof(uintptr_t) <= 32)
 | 
|---|
| 170 |                 return true;
 | 
|---|
| 171 | 
 | 
|---|
| 172 |         const size_t chunk_size = dma_policy_chunk_mask(db->policy) + 1;
 | 
|---|
| 173 |         const size_t chunks = chunk_size ? 1 : size / chunk_size;
 | 
|---|
| 174 | 
 | 
|---|
| 175 |         for (size_t c = 0; c < chunks; c++) {
 | 
|---|
| 176 |                 const void *addr = db->virt + (c * chunk_size);
 | 
|---|
| 177 |                 const uintptr_t phys = dma_buffer_phys(db, addr);
 | 
|---|
| 178 | 
 | 
|---|
| 179 |                 if ((phys & DMAMEM_4GiB) != 0)
 | 
|---|
| 180 |                         return false;
 | 
|---|
| 181 |         }
 | 
|---|
| 182 | 
 | 
|---|
| 183 |         return true;
 | 
|---|
| 184 | }
 | 
|---|
| 185 | 
 | 
|---|
| 186 | /**
 | 
|---|
| 187 |  * Lock an arbitrary buffer for DMA operations, creating a DMA buffer.
 | 
|---|
| 188 |  *
 | 
|---|
| 189 |  * FIXME: To handle page-unaligned buffers, we need to calculate the base
 | 
|---|
| 190 |  *        address and lock the whole first page. But as the operation is not yet
 | 
|---|
| 191 |  *        implemented in the kernel, it doesn't matter.
 | 
|---|
| 192 |  */
 | 
|---|
| 193 | errno_t dma_buffer_lock(dma_buffer_t *db, void *virt, size_t size)
 | 
|---|
| 194 | {
 | 
|---|
| 195 |         assert(virt);
 | 
|---|
| 196 | 
 | 
|---|
| 197 |         uintptr_t phys;
 | 
|---|
| 198 | 
 | 
|---|
| 199 |         const errno_t err = dmamem_map(virt, size, 0, 0, &phys);
 | 
|---|
| 200 |         if (err)
 | 
|---|
| 201 |                 return err;
 | 
|---|
| 202 | 
 | 
|---|
| 203 |         db->virt = virt;
 | 
|---|
| 204 |         db->policy = dma_policy_create(0, PAGE_SIZE);
 | 
|---|
| 205 |         cache_insert(virt, phys);
 | 
|---|
| 206 | 
 | 
|---|
| 207 |         unsigned flags = -1U;
 | 
|---|
| 208 |         if (!dma_buffer_is_4gib(db, size))
 | 
|---|
| 209 |                 flags &= ~DMA_POLICY_4GiB;
 | 
|---|
| 210 |         db->policy = dma_policy_create(flags, PAGE_SIZE);
 | 
|---|
| 211 | 
 | 
|---|
| 212 |         return EOK;
 | 
|---|
| 213 | }
 | 
|---|
| 214 | 
 | 
|---|
| 215 | /**
 | 
|---|
| 216 |  * Unlock a buffer for DMA operations.
 | 
|---|
| 217 |  */
 | 
|---|
| 218 | void dma_buffer_unlock(dma_buffer_t *db, size_t size)
 | 
|---|
| 219 | {
 | 
|---|
| 220 |         if (db->virt) {
 | 
|---|
| 221 |                 dmamem_unmap(db->virt, size);
 | 
|---|
| 222 |                 db->virt = NULL;
 | 
|---|
| 223 |                 db->policy = 0;
 | 
|---|
| 224 |         }
 | 
|---|
| 225 | }
 | 
|---|
| 226 | 
 | 
|---|
| 227 | /**
 | 
|---|
| 228 |  * Must be called when the buffer is received over IPC. Clears potentially
 | 
|---|
| 229 |  * leftover value from different buffer mapped to the same virtual address.
 | 
|---|
| 230 |  */
 | 
|---|
| 231 | void dma_buffer_acquire(dma_buffer_t *db)
 | 
|---|
| 232 | {
 | 
|---|
| 233 |         cache_evict(db->virt);
 | 
|---|
| 234 | }
 | 
|---|
| 235 | 
 | 
|---|
| 236 | /**
 | 
|---|
| 237 |  * Counterpart of acquire.
 | 
|---|
| 238 |  */
 | 
|---|
| 239 | void dma_buffer_release(dma_buffer_t *db)
 | 
|---|
| 240 | {
 | 
|---|
| 241 |         cache_evict(db->virt);
 | 
|---|
| 242 | }
 | 
|---|
| 243 | 
 | 
|---|
| 244 | /**
 | 
|---|
| 245 |  * @}
 | 
|---|
| 246 |  */
 | 
|---|