Changeset fbcdeb8 in mainline for kernel/generic/src
- Timestamp:
- 2011-12-19T17:30:39Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 58f6229
- Parents:
- 24cf31f1
- Location:
- kernel/generic/src
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/ddi/ddi.c
r24cf31f1 rfbcdeb8 90 90 * 91 91 * @param phys Physical address of the starting frame. 92 * @param virt Virtual address of the starting page.93 92 * @param pages Number of pages to map. 94 93 * @param flags Address space area flags for the mapping. 94 * @param virt Virtual address of the starting page. 95 * @param bound Lowest virtual address bound. 95 96 * 96 97 * @return EOK on success. 97 98 * @return EPERM if the caller lacks capabilities to use this syscall. 98 * @return EBADMEM if phys or virtis not page aligned.99 * @return EBADMEM if phys is not page aligned. 99 100 * @return ENOENT if there is no task matching the specified ID or 100 101 * the physical address space is not enabled for mapping. … … 102 103 * 103 104 */ 104 NO_TRACE static int ddi_physmem_map(uintptr_t phys, uintptr_t virt, size_t pages,105 unsigned int flags )105 NO_TRACE static int physmem_map(uintptr_t phys, size_t pages, 106 unsigned int flags, uintptr_t *virt, uintptr_t bound) 106 107 { 107 108 ASSERT(TASK); 108 109 109 110 if ((phys % FRAME_SIZE) != 0) 110 return EBADMEM;111 112 if ((virt % PAGE_SIZE) != 0)113 111 return EBADMEM; 114 112 … … 185 183 186 184 map: 187 if (!as_area_create(TASK->as, flags, FRAMES2SIZE(pages), virt,188 AS_AREA_ATTR_NONE, &phys_backend, &backend_data )) {185 if (!as_area_create(TASK->as, flags, FRAMES2SIZE(pages), 186 AS_AREA_ATTR_NONE, &phys_backend, &backend_data, virt, bound)) { 189 187 /* 190 188 * The address space area was not created. … … 210 208 } 211 209 210 NO_TRACE static int physmem_unmap(uintptr_t virt) 211 { 212 // TODO: implement unmap 213 return EOK; 214 } 215 216 /** Wrapper for SYS_PHYSMEM_MAP syscall. 217 * 218 * @param phys Physical base address to map 219 * @param pages Number of pages 220 * @param flags Flags of newly mapped pages 221 * @param virt_ptr Destination virtual address 222 * @param bound Lowest virtual address bound. 223 * 224 * @return 0 on success, otherwise it returns error code found in errno.h 225 * 226 */ 227 sysarg_t sys_physmem_map(uintptr_t phys, size_t pages, unsigned int flags, 228 void *virt_ptr, uintptr_t bound) 229 { 230 uintptr_t virt = (uintptr_t) -1; 231 int rc = physmem_map(ALIGN_DOWN(phys, FRAME_SIZE), pages, flags, 232 &virt, bound); 233 if (rc != EOK) 234 return rc; 235 236 rc = copy_to_uspace(virt_ptr, &virt, sizeof(virt)); 237 if (rc != EOK) { 238 physmem_unmap((uintptr_t) virt); 239 return rc; 240 } 241 242 return EOK; 243 } 244 245 sysarg_t sys_physmem_unmap(uintptr_t virt) 246 { 247 return physmem_unmap(virt); 248 } 249 212 250 /** Enable range of I/O space for task. 213 251 * … … 220 258 * 221 259 */ 222 NO_TRACE static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, 223 size_t size) 260 NO_TRACE static int iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size) 224 261 { 225 262 /* … … 246 283 /* Lock the task and release the lock protecting tasks_btree. */ 247 284 irq_spinlock_exchange(&tasks_lock, &task->lock); 248 249 285 int rc = ddi_iospace_enable_arch(task, ioaddr, size); 250 251 286 irq_spinlock_unlock(&task->lock, true); 252 287 253 288 return rc; 254 }255 256 /** Wrapper for SYS_PHYSMEM_MAP syscall.257 *258 * @param phys Physical base address to map259 * @param virt Destination virtual address260 * @param pages Number of pages261 * @param flags Flags of newly mapped pages262 *263 * @return 0 on success, otherwise it returns error code found in errno.h264 *265 */266 sysarg_t sys_physmem_map(uintptr_t phys, uintptr_t virt,267 size_t pages, unsigned int flags)268 {269 return (sysarg_t)270 ddi_physmem_map(ALIGN_DOWN(phys, FRAME_SIZE),271 ALIGN_DOWN(virt, PAGE_SIZE), pages, flags);272 289 } 273 290 … … 286 303 return (sysarg_t) rc; 287 304 288 return (sysarg_t) ddi_iospace_enable((task_id_t) arg.task_id,305 return (sysarg_t) iospace_enable((task_id_t) arg.task_id, 289 306 (uintptr_t) arg.ioaddr, (size_t) arg.size); 290 307 } 291 308 292 NO_TRACE static int dmamem_map(uintptr_t virt, size_t size, 293 unsigned int map_flags, unsigned int flags, void **phys) 309 sysarg_t sys_iospace_disable(ddi_ioarg_t *uspace_io_arg) 310 { 311 // TODO: implement 312 return ENOTSUP; 313 } 314 315 NO_TRACE static int dmamem_map(uintptr_t virt, size_t size, unsigned int map_flags, 316 unsigned int flags, void **phys) 294 317 { 295 318 ASSERT(TASK); 296 319 320 // TODO: implement locking of non-anonymous mapping 321 return page_find_mapping(virt, phys); 322 } 323 324 NO_TRACE static int dmamem_map_anonymous(size_t size, unsigned int map_flags, 325 unsigned int flags, void **phys, uintptr_t *virt, uintptr_t bound) 326 { 327 ASSERT(TASK); 328 329 size_t pages = SIZE2FRAMES(size); 330 uint8_t order; 331 332 /* We need the 2^order >= pages */ 333 if (pages == 1) 334 order = 0; 335 else 336 order = fnzb(pages - 1) + 1; 337 338 *phys = frame_alloc_noreserve(order, 0); 339 if (*phys == NULL) 340 return ENOMEM; 341 342 mem_backend_data_t backend_data; 343 backend_data.base = (uintptr_t) *phys; 344 backend_data.frames = pages; 345 346 if (!as_area_create(TASK->as, map_flags, size, 347 AS_AREA_ATTR_NONE, &phys_backend, &backend_data, virt, bound)) { 348 frame_free_noreserve((uintptr_t) *phys); 349 return ENOMEM; 350 } 351 352 return EOK; 353 } 354 355 NO_TRACE static int dmamem_unmap(uintptr_t virt, size_t size) 356 { 357 // TODO: implement unlocking & unmap 358 return EOK; 359 } 360 361 NO_TRACE static int dmamem_unmap_anonymous(uintptr_t virt) 362 { 363 // TODO: implement unlocking & unmap 364 return EOK; 365 } 366 367 sysarg_t sys_dmamem_map(size_t size, unsigned int map_flags, unsigned int flags, 368 void *phys_ptr, void *virt_ptr, uintptr_t bound) 369 { 297 370 if ((flags & DMAMEM_FLAGS_ANONYMOUS) == 0) { 298 // TODO: implement locking of non-anonymous mapping 299 return page_find_mapping(virt, phys); 371 /* 372 * Non-anonymous DMA mapping 373 */ 374 375 void *phys; 376 int rc = dmamem_map((uintptr_t) virt_ptr, size, map_flags, 377 flags, &phys); 378 379 if (rc != EOK) 380 return rc; 381 382 rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys)); 383 if (rc != EOK) { 384 dmamem_unmap((uintptr_t) virt_ptr, size); 385 return rc; 386 } 300 387 } else { 301 // TODO: implement locking 302 303 if ((virt % PAGE_SIZE) != 0) 304 return EBADMEM; 305 306 size_t pages = SIZE2FRAMES(size); 307 uint8_t order; 308 309 /* We need the 2^order >= pages */ 310 if (pages == 1) 311 order = 0; 312 else 313 order = fnzb(pages - 1) + 1; 314 315 *phys = frame_alloc_noreserve(order, 0); 316 if (*phys == NULL) 317 return ENOMEM; 318 319 mem_backend_data_t backend_data; 320 backend_data.base = (uintptr_t) *phys; 321 backend_data.frames = pages; 322 323 if (!as_area_create(TASK->as, map_flags, size, virt, 324 AS_AREA_ATTR_NONE, &phys_backend, &backend_data)) { 325 frame_free_noreserve((uintptr_t) *phys); 326 return ENOMEM; 388 /* 389 * Anonymous DMA mapping 390 */ 391 392 void *phys; 393 uintptr_t virt = (uintptr_t) -1; 394 int rc = dmamem_map_anonymous(size, map_flags, flags, 395 &phys, &virt, bound); 396 if (rc != EOK) 397 return rc; 398 399 rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys)); 400 if (rc != EOK) { 401 dmamem_unmap_anonymous((uintptr_t) virt); 402 return rc; 327 403 } 328 404 329 return EOK; 330 } 331 } 332 333 NO_TRACE static int dmamem_unmap(uintptr_t virt, size_t size, 334 unsigned int flags) 335 { 336 // TODO: implement unlocking & unmap 337 return EOK; 338 } 339 340 sysarg_t sys_dmamem_map(uintptr_t virt, size_t size, unsigned int map_flags, 341 unsigned int flags, void *phys_ptr) 342 { 343 void *phys; 344 int rc = dmamem_map(virt, size, map_flags, flags, &phys); 345 if (rc != EOK) 346 return rc; 347 348 rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys)); 349 if (rc != EOK) { 350 dmamem_unmap(virt, size, flags); 351 return rc; 405 rc = copy_to_uspace(virt_ptr, &virt, sizeof(virt)); 406 if (rc != EOK) { 407 dmamem_unmap_anonymous((uintptr_t) virt); 408 return rc; 409 } 352 410 } 353 411 … … 357 415 sysarg_t sys_dmamem_unmap(uintptr_t virt, size_t size, unsigned int flags) 358 416 { 359 return dmamem_unmap(virt, size, flags); 417 if ((flags & DMAMEM_FLAGS_ANONYMOUS) == 0) 418 return dmamem_unmap(virt, size); 419 else 420 return dmamem_unmap_anonymous(virt); 360 421 } 361 422 -
kernel/generic/src/ipc/sysipc.c
r24cf31f1 rfbcdeb8 271 271 irq_spinlock_unlock(&answer->sender->lock, true); 272 272 273 uintptr_t dst_base = (uintptr_t) -1; 273 274 int rc = as_area_share(as, IPC_GET_ARG1(*olddata), 274 IPC_GET_ARG2(*olddata), AS, 275 IPC_GET_ARG1(answer->data), IPC_GET_ARG3(*olddata)); 275 IPC_GET_ARG2(*olddata), AS, IPC_GET_ARG3(*olddata), 276 &dst_base, IPC_GET_ARG1(answer->data)); 277 278 if (rc == EOK) 279 rc = copy_to_uspace((void *) IPC_GET_ARG2(answer->data), 280 &dst_base, sizeof(dst_base)); 281 276 282 IPC_SET_RETVAL(answer->data, rc); 277 283 return rc; … … 283 289 irq_spinlock_unlock(&answer->sender->lock, true); 284 290 291 uintptr_t dst_base = (uintptr_t) -1; 285 292 int rc = as_area_share(AS, IPC_GET_ARG1(answer->data), 286 IPC_GET_ARG2(*olddata), as, IPC_GET_ARG1(*olddata), 287 IPC_GET_ARG2(answer->data)); 293 IPC_GET_ARG1(*olddata), as, IPC_GET_ARG2(answer->data), 294 &dst_base, IPC_GET_ARG3(answer->data)); 295 IPC_SET_ARG4(answer->data, dst_base); 288 296 IPC_SET_RETVAL(answer->data, rc); 289 297 } -
kernel/generic/src/lib/elf.c
r24cf31f1 rfbcdeb8 226 226 size_t mem_sz = entry->p_memsz + (entry->p_vaddr - base); 227 227 228 as_area_t *area = as_area_create(as, flags, mem_sz, base,229 AS_AREA_ATTR_NONE, &elf_backend, &backend_data );228 as_area_t *area = as_area_create(as, flags, mem_sz, 229 AS_AREA_ATTR_NONE, &elf_backend, &backend_data, &base, 0); 230 230 if (!area) 231 231 return EE_MEMORY; -
kernel/generic/src/mm/as.c
r24cf31f1 rfbcdeb8 387 387 } 388 388 389 /** Return pointer to unmapped address space area 390 * 391 * The address space must be already locked when calling 392 * this function. 393 * 394 * @param as Address space. 395 * @param bound Lowest address bound. 396 * @param size Requested size of the allocation. 397 * 398 * @return Address of the beginning of unmapped address space area. 399 * @return -1 if no suitable address space area was found. 400 * 401 */ 402 NO_TRACE static uintptr_t as_get_unmapped_area(as_t *as, uintptr_t bound, 403 size_t size) 404 { 405 ASSERT(mutex_locked(&as->lock)); 406 407 if (size == 0) 408 return (uintptr_t) -1; 409 410 /* 411 * Make sure we allocate from page-aligned 412 * address. Check for possible overflow in 413 * each step. 414 */ 415 416 size_t pages = SIZE2FRAMES(size); 417 418 /* 419 * Find the lowest unmapped address aligned on the size 420 * boundary, not smaller than bound and of the required size. 421 */ 422 423 /* First check the bound address itself */ 424 uintptr_t addr = ALIGN_UP(bound, PAGE_SIZE); 425 if ((addr >= bound) && 426 (check_area_conflicts(as, addr, pages, NULL))) 427 return addr; 428 429 /* Eventually check the addresses behind each area */ 430 list_foreach(as->as_area_btree.leaf_list, cur) { 431 btree_node_t *node = 432 list_get_instance(cur, btree_node_t, leaf_link); 433 434 for (btree_key_t i = 0; i < node->keys; i++) { 435 as_area_t *area = (as_area_t *) node->value[i]; 436 437 mutex_lock(&area->lock); 438 439 addr = 440 ALIGN_UP(area->base + P2SZ(area->pages), PAGE_SIZE); 441 bool avail = 442 ((addr >= bound) && (addr >= area->base) && 443 (check_area_conflicts(as, addr, pages, area))); 444 445 mutex_unlock(&area->lock); 446 447 if (avail) 448 return addr; 449 } 450 } 451 452 /* No suitable address space area found */ 453 return (uintptr_t) -1; 454 } 455 389 456 /** Create address space area of common attributes. 390 457 * … … 394 461 * @param flags Flags of the area memory. 395 462 * @param size Size of area. 396 * @param base Base address of area.397 463 * @param attrs Attributes of the area. 398 464 * @param backend Address space area backend. NULL if no backend is used. 399 465 * @param backend_data NULL or a pointer to an array holding two void *. 466 * @param base Starting virtual address of the area. 467 * If set to -1, a suitable mappable area is found. 468 * @param bound Lowest address bound if base is set to -1. 469 * Otherwise ignored. 400 470 * 401 471 * @return Address space area on success or NULL on failure. … … 403 473 */ 404 474 as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size, 405 u intptr_t base, unsigned int attrs, mem_backend_t *backend,406 mem_backend_data_t *backend_data )407 { 408 if (( base % PAGE_SIZE) != 0)475 unsigned int attrs, mem_backend_t *backend, 476 mem_backend_data_t *backend_data, uintptr_t *base, uintptr_t bound) 477 { 478 if ((*base != (uintptr_t) -1) && ((*base % PAGE_SIZE) != 0)) 409 479 return NULL; 410 480 … … 420 490 mutex_lock(&as->lock); 421 491 422 if (!check_area_conflicts(as, base, pages, NULL)) { 492 if (*base == (uintptr_t) -1) { 493 *base = as_get_unmapped_area(as, bound, size); 494 if (*base == (uintptr_t) -1) { 495 mutex_unlock(&as->lock); 496 return NULL; 497 } 498 } 499 500 if (!check_area_conflicts(as, *base, pages, NULL)) { 423 501 mutex_unlock(&as->lock); 424 502 return NULL; … … 434 512 area->pages = pages; 435 513 area->resident = 0; 436 area->base = base;514 area->base = *base; 437 515 area->sh_info = NULL; 438 516 area->backend = backend; … … 452 530 453 531 btree_create(&area->used_space); 454 btree_insert(&as->as_area_btree, base, (void *) area, NULL); 532 btree_insert(&as->as_area_btree, *base, (void *) area, 533 NULL); 455 534 456 535 mutex_unlock(&as->lock); … … 860 939 * @param acc_size Expected size of the source area. 861 940 * @param dst_as Pointer to destination address space. 862 * @param dst_base Target base address.863 941 * @param dst_flags_mask Destination address space area flags mask. 942 * @param dst_base Target base address. If set to -1, 943 * a suitable mappable area is found. 944 * @param bound Lowest address bound if dst_base is set to -1. 945 * Otherwise ignored. 864 946 * 865 947 * @return Zero on success. … … 873 955 */ 874 956 int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, 875 as_t *dst_as, uintptr_t dst_base, unsigned int dst_flags_mask) 957 as_t *dst_as, unsigned int dst_flags_mask, uintptr_t *dst_base, 958 uintptr_t bound) 876 959 { 877 960 mutex_lock(&src_as->lock); … … 945 1028 * to support sharing in less privileged mode. 946 1029 */ 947 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, 948 dst_base, AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 1030 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, 1031 src_size, AS_AREA_ATTR_PARTIAL, src_backend, 1032 &src_backend_data, dst_base, bound); 949 1033 if (!dst_area) { 950 1034 /* … … 1955 2039 */ 1956 2040 1957 /** Wrapper for as_area_create(). */ 1958 sysarg_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags)1959 { 1960 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,1961 AS_AREA_ATTR_NONE, &anon_backend, NULL))1962 return (sysarg_t) address;1963 else2041 sysarg_t sys_as_area_create(uintptr_t base, size_t size, unsigned int flags, 2042 uintptr_t bound) 2043 { 2044 uintptr_t virt = base; 2045 as_area_t *area = as_area_create(AS, flags | AS_AREA_CACHEABLE, size, 2046 AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, bound); 2047 if (area == NULL) 1964 2048 return (sysarg_t) -1; 1965 } 1966 1967 /** Wrapper for as_area_resize(). */ 2049 2050 return (sysarg_t) virt; 2051 } 2052 1968 2053 sysarg_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags) 1969 2054 { … … 1971 2056 } 1972 2057 1973 /** Wrapper for as_area_change_flags(). */1974 2058 sysarg_t sys_as_area_change_flags(uintptr_t address, unsigned int flags) 1975 2059 { … … 1977 2061 } 1978 2062 1979 /** Wrapper for as_area_destroy(). */1980 2063 sysarg_t sys_as_area_destroy(uintptr_t address) 1981 2064 { 1982 2065 return (sysarg_t) as_area_destroy(AS, address); 1983 }1984 1985 /** Return pointer to unmapped address space area1986 *1987 * @param base Lowest address bound.1988 * @param size Requested size of the allocation.1989 *1990 * @return Pointer to the beginning of unmapped address space area.1991 *1992 */1993 sysarg_t sys_as_get_unmapped_area(uintptr_t base, size_t size)1994 {1995 if (size == 0)1996 return 0;1997 1998 /*1999 * Make sure we allocate from page-aligned2000 * address. Check for possible overflow in2001 * each step.2002 */2003 2004 size_t pages = SIZE2FRAMES(size);2005 uintptr_t ret = 0;2006 2007 /*2008 * Find the lowest unmapped address aligned on the sz2009 * boundary, not smaller than base and of the required size.2010 */2011 2012 mutex_lock(&AS->lock);2013 2014 /* First check the base address itself */2015 uintptr_t addr = ALIGN_UP(base, PAGE_SIZE);2016 if ((addr >= base) &&2017 (check_area_conflicts(AS, addr, pages, NULL)))2018 ret = addr;2019 2020 /* Eventually check the addresses behind each area */2021 list_foreach(AS->as_area_btree.leaf_list, cur) {2022 if (ret != 0)2023 break;2024 2025 btree_node_t *node =2026 list_get_instance(cur, btree_node_t, leaf_link);2027 2028 btree_key_t i;2029 for (i = 0; (ret == 0) && (i < node->keys); i++) {2030 uintptr_t addr;2031 2032 as_area_t *area = (as_area_t *) node->value[i];2033 2034 mutex_lock(&area->lock);2035 2036 addr = ALIGN_UP(area->base + P2SZ(area->pages),2037 PAGE_SIZE);2038 2039 if ((addr >= base) && (addr >= area->base) &&2040 (check_area_conflicts(AS, addr, pages, area)))2041 ret = addr;2042 2043 mutex_unlock(&area->lock);2044 }2045 }2046 2047 mutex_unlock(&AS->lock);2048 2049 return (sysarg_t) ret;2050 2066 } 2051 2067 -
kernel/generic/src/proc/program.c
r24cf31f1 rfbcdeb8 87 87 * Create the stack address space area. 88 88 */ 89 uintptr_t virt = USTACK_ADDRESS; 89 90 as_area_t *area = as_area_create(as, 90 91 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, 91 STACK_SIZE, USTACK_ADDRESS, AS_AREA_ATTR_NONE, 92 &anon_backend, NULL); 92 STACK_SIZE, AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, 0); 93 93 if (!area) 94 94 return ENOMEM; -
kernel/generic/src/syscall/syscall.c
r24cf31f1 rfbcdeb8 146 146 (syshandler_t) sys_as_area_change_flags, 147 147 (syshandler_t) sys_as_area_destroy, 148 (syshandler_t) sys_as_get_unmapped_area,149 148 150 149 /* Page mapping related syscalls. */ … … 176 175 (syshandler_t) sys_device_assign_devno, 177 176 (syshandler_t) sys_physmem_map, 177 (syshandler_t) sys_physmem_unmap, 178 178 (syshandler_t) sys_dmamem_map, 179 179 (syshandler_t) sys_dmamem_unmap, 180 180 (syshandler_t) sys_iospace_enable, 181 (syshandler_t) sys_iospace_disable, 181 182 (syshandler_t) sys_irq_register, 182 183 (syshandler_t) sys_irq_unregister,
Note:
See TracChangeset
for help on using the changeset viewer.