Changeset 1d758fc in mainline for uspace/lib/usb/src/dma_buffer.c
- Timestamp:
- 2018-02-12T10:11:47Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 5fe3f954
- Parents:
- 2f762a7
- git-author:
- Ondřej Hlavatý <aearsis@…> (2018-02-05 03:28:50)
- git-committer:
- Ondřej Hlavatý <aearsis@…> (2018-02-12 10:11:47)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/lib/usb/src/dma_buffer.c
r2f762a7 r1d758fc 39 39 #include "usb/dma_buffer.h" 40 40 41 dma_policy_t dma_policy_create(unsigned flags, size_t chunk_size) 42 { 43 assert((chunk_size & (chunk_size - 1)) == 0); /* Check if power of 2 */ 44 assert(chunk_size >= PAGE_SIZE || chunk_size == 0); 45 46 return ((chunk_size - 1) & DMA_POLICY_CHUNK_SIZE_MASK) 47 | (flags & DMA_POLICY_FLAGS_MASK); 48 } 49 50 /** 51 * As the driver is typically using only a few buffers at once, we cache the 52 * physical mapping to avoid calling the kernel unnecessarily often. This cache 53 * is global for a task. 54 * 55 * TODO: "few" is currently limited to one. 56 */ 57 static struct { 58 const void *last; 59 uintptr_t phys; 60 } phys_mapping_cache = { 0 }; 61 62 static void cache_insert(const void *v, uintptr_t p) 63 { 64 phys_mapping_cache.last = v; 65 phys_mapping_cache.phys = p; 66 } 67 68 static void cache_evict(const void *v) 69 { 70 if (phys_mapping_cache.last == v) 71 phys_mapping_cache.last = NULL; 72 } 73 74 static bool cache_find(const void *v, uintptr_t *p) 75 { 76 *p = phys_mapping_cache.phys; 77 return phys_mapping_cache.last == v; 78 } 79 41 80 /** 42 81 * Allocate a DMA buffer. 43 *44 * XXX: Currently cannot make much use of missing constraints, as it always45 * allocates page-aligned contiguous buffer. We rely on it in dma_buffer_phys.46 82 * 47 83 * @param[in] db dma_buffer_t structure to fill … … 62 98 void *address = AS_AREA_ANY; 63 99 64 const int ret= dmamem_map_anonymous(real_size,100 const int err = dmamem_map_anonymous(real_size, 65 101 flags, AS_AREA_READ | AS_AREA_WRITE, 0, 66 102 &phys, &address); 67 68 if (ret == EOK) { 69 /* Access the pages to force mapping */ 70 volatile char *buf = address; 71 for (size_t i = 0; i < size; i += PAGE_SIZE) 72 buf[i] = 0xff; 73 74 db->virt = address; 75 db->phys = phys; 76 } 77 return ret; 103 if (err) 104 return err; 105 106 /* Access the pages to force mapping */ 107 volatile char *buf = address; 108 for (size_t i = 0; i < size; i += PAGE_SIZE) 109 buf[i] = 0xff; 110 111 db->virt = address; 112 db->policy = dma_policy_create(policy, 0); 113 cache_insert(db->virt, phys); 114 115 return EOK; 78 116 } 79 117 … … 102 140 dmamem_unmap_anonymous(db->virt); 103 141 db->virt = NULL; 104 db->p hys= 0;142 db->policy = 0; 105 143 } 106 144 } … … 112 150 * @param[in] virt Pointer somewhere inside db 113 151 */ 114 uintptr_t dma_buffer_phys(const dma_buffer_t *db, void *virt) 115 { 116 return db->phys + (virt - db->virt); 117 } 118 119 /** 120 * Check whether a memory area is compatible with a policy. 121 * 122 * Useful to skip copying when the buffer is already ready to be given to 123 * hardware as is. 124 * 125 * Note that the "as_get_physical_mapping" fails when the page is not mapped 126 * yet, and that the caller is responsible for forcing the mapping. 127 */ 128 bool dma_buffer_check_policy(const void *buffer, size_t size, const dma_policy_t policy) 129 { 130 uintptr_t addr = (uintptr_t) buffer; 131 132 const bool check_4gib = !!(policy & DMA_POLICY_4GiB); 133 const bool check_crossing = !!(policy & DMA_POLICY_NOT_CROSSING); 134 const bool check_alignment = !!(policy & DMA_POLICY_PAGE_ALIGNED); 135 const bool check_contiguous = !!(policy & DMA_POLICY_CONTIGUOUS); 136 137 /* Check the two conditions that are easy */ 138 if (check_crossing && (addr + size - 1) / PAGE_SIZE != addr / PAGE_SIZE) 139 goto violated; 140 141 if (check_alignment && ((uintptr_t) buffer) % PAGE_SIZE) 142 goto violated; 143 144 /* 145 * For these conditions, we need to walk through pages and check 146 * physical address of each one 147 */ 148 if (check_contiguous || check_4gib) { 149 const void *virt = buffer; 150 uintptr_t phys; 151 152 /* Get the mapping of the first page */ 153 if (as_get_physical_mapping(virt, &phys)) 154 goto error; 155 156 /* First page can already break 4GiB condition */ 157 if (check_4gib && (phys & DMAMEM_4GiB) != 0) 158 goto violated; 159 160 while (size >= PAGE_SIZE) { 161 /* Move to the next page */ 162 virt += PAGE_SIZE; 163 size -= PAGE_SIZE; 164 165 uintptr_t last_phys = phys; 166 if (as_get_physical_mapping(virt, &phys)) 167 goto error; 168 169 if (check_contiguous && (phys - last_phys) != PAGE_SIZE) 170 goto violated; 171 172 if (check_4gib && (phys & DMAMEM_4GiB) != 0) 173 goto violated; 174 } 175 } 176 177 /* All checks passed */ 152 uintptr_t dma_buffer_phys(const dma_buffer_t *db, const void *virt) 153 { 154 const size_t chunk_mask = dma_policy_chunk_mask(db->policy); 155 const uintptr_t offset = (virt - db->virt) & chunk_mask; 156 const void *chunk_base = virt - offset; 157 158 uintptr_t phys; 159 160 if (!cache_find(chunk_base, &phys)) { 161 if (as_get_physical_mapping(chunk_base, &phys)) 162 return 0; 163 cache_insert(chunk_base, phys); 164 } 165 166 return phys + offset; 167 } 168 169 static bool dma_buffer_is_4gib(dma_buffer_t *db, size_t size) 170 { 171 if (sizeof(uintptr_t) <= 32) 172 return true; 173 174 const size_t chunk_size = dma_policy_chunk_mask(db->policy) + 1; 175 const size_t chunks = chunk_size ? 1 : size / chunk_size; 176 177 for (size_t c = 0; c < chunks; c++) { 178 const void *addr = db->virt + (c * chunk_size); 179 const uintptr_t phys = dma_buffer_phys(db, addr); 180 181 if ((phys & DMAMEM_4GiB) != 0) 182 return false; 183 } 184 178 185 return true; 179 180 violated:181 error:182 return false;183 186 } 184 187 … … 192 195 errno_t dma_buffer_lock(dma_buffer_t *db, void *virt, size_t size) 193 196 { 197 assert(virt); 198 199 uintptr_t phys; 200 201 const errno_t err = dmamem_map(virt, size, 0, 0, &phys); 202 if (err) 203 return err; 204 194 205 db->virt = virt; 195 return dmamem_map(db->virt, size, 0, 0, &db->phys); 206 db->policy = dma_policy_create(0, PAGE_SIZE); 207 cache_insert(virt, phys); 208 209 unsigned flags = -1U; 210 if (!dma_buffer_is_4gib(db, size)) 211 flags &= ~DMA_POLICY_4GiB; 212 db->policy = dma_policy_create(flags, PAGE_SIZE); 213 214 return EOK; 196 215 } 197 216 … … 204 223 dmamem_unmap(db->virt, size); 205 224 db->virt = NULL; 206 db->phys = 0; 207 } 225 db->policy = 0; 226 } 227 } 228 229 /** 230 * Must be called when the buffer is received over IPC. Clears potentially 231 * leftover value from different buffer mapped to the same virtual address. 232 */ 233 void dma_buffer_acquire(dma_buffer_t *db) 234 { 235 cache_evict(db->virt); 236 } 237 238 /** 239 * Counterpart of acquire. 240 */ 241 void dma_buffer_release(dma_buffer_t *db) 242 { 243 cache_evict(db->virt); 208 244 } 209 245
Note:
See TracChangeset
for help on using the changeset viewer.