Changeset 153cc76a in mainline for kernel/generic/src
- Timestamp:
- 2011-12-23T16:42:22Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 7e1b130
- Parents:
- 4291215 (diff), 2f0dd2a (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/generic/src
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/ddi/ddi.c
r4291215 r153cc76a 45 45 #include <mm/frame.h> 46 46 #include <mm/as.h> 47 #include <mm/page.h> 47 48 #include <synch/mutex.h> 48 49 #include <syscall/copy.h> … … 52 53 #include <errno.h> 53 54 #include <trace.h> 55 #include <bitops.h> 54 56 55 57 /** This lock protects the parea_btree. */ … … 87 89 /** Map piece of physical memory into virtual address space of current task. 88 90 * 89 * @param pf Physical address of the starting frame. 90 * @param vp Virtual address of the starting page. 91 * @param phys Physical address of the starting frame. 91 92 * @param pages Number of pages to map. 92 93 * @param flags Address space area flags for the mapping. 93 * 94 * @return 0 on success, EPERM if the caller lacks capabilities to use this 95 * syscall, EBADMEM if pf or vf is not page aligned, ENOENT if there 96 * is no task matching the specified ID or the physical address space 97 * is not enabled for mapping and ENOMEM if there was a problem in 98 * creating address space area. 99 * 100 */ 101 NO_TRACE static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, size_t pages, 102 unsigned int flags) 94 * @param virt Virtual address of the starting page. 95 * @param bound Lowest virtual address bound. 96 * 97 * @return EOK on success. 98 * @return EPERM if the caller lacks capabilities to use this syscall. 99 * @return EBADMEM if phys is not page aligned. 100 * @return ENOENT if there is no task matching the specified ID or 101 * the physical address space is not enabled for mapping. 102 * @return ENOMEM if there was a problem in creating address space area. 103 * 104 */ 105 NO_TRACE static int physmem_map(uintptr_t phys, size_t pages, 106 unsigned int flags, uintptr_t *virt, uintptr_t bound) 103 107 { 104 108 ASSERT(TASK); 105 109 106 if ((pf % FRAME_SIZE) != 0) 107 return EBADMEM; 108 109 if ((vp % PAGE_SIZE) != 0) 110 if ((phys % FRAME_SIZE) != 0) 110 111 return EBADMEM; 111 112 … … 118 119 119 120 mem_backend_data_t backend_data; 120 backend_data.base = p f;121 backend_data.base = phys; 121 122 backend_data.frames = pages; 122 123 … … 129 130 btree_node_t *nodep; 130 131 parea_t *parea = (parea_t *) btree_search(&parea_btree, 131 (btree_key_t) p f, &nodep);132 (btree_key_t) phys, &nodep); 132 133 133 134 if ((parea != NULL) && (parea->frames >= pages)) { … … 149 150 150 151 irq_spinlock_lock(&zones.lock, true); 151 size_t znum = find_zone(ADDR2PFN(p f), pages, 0);152 size_t znum = find_zone(ADDR2PFN(phys), pages, 0); 152 153 153 154 if (znum == (size_t) -1) { … … 182 183 183 184 map: 184 if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp,185 AS_AREA_ATTR_NONE, &phys_backend, &backend_data )) {185 if (!as_area_create(TASK->as, flags, FRAMES2SIZE(pages), 186 AS_AREA_ATTR_NONE, &phys_backend, &backend_data, virt, bound)) { 186 187 /* 187 188 * The address space area was not created. … … 207 208 } 208 209 210 NO_TRACE static int physmem_unmap(uintptr_t virt) 211 { 212 // TODO: implement unmap 213 return EOK; 214 } 215 216 /** Wrapper for SYS_PHYSMEM_MAP syscall. 217 * 218 * @param phys Physical base address to map 219 * @param pages Number of pages 220 * @param flags Flags of newly mapped pages 221 * @param virt_ptr Destination virtual address 222 * @param bound Lowest virtual address bound. 223 * 224 * @return 0 on success, otherwise it returns error code found in errno.h 225 * 226 */ 227 sysarg_t sys_physmem_map(uintptr_t phys, size_t pages, unsigned int flags, 228 void *virt_ptr, uintptr_t bound) 229 { 230 uintptr_t virt = (uintptr_t) -1; 231 int rc = physmem_map(ALIGN_DOWN(phys, FRAME_SIZE), pages, flags, 232 &virt, bound); 233 if (rc != EOK) 234 return rc; 235 236 rc = copy_to_uspace(virt_ptr, &virt, sizeof(virt)); 237 if (rc != EOK) { 238 physmem_unmap((uintptr_t) virt); 239 return rc; 240 } 241 242 return EOK; 243 } 244 245 sysarg_t sys_physmem_unmap(uintptr_t virt) 246 { 247 return physmem_unmap(virt); 248 } 249 209 250 /** Enable range of I/O space for task. 210 251 * … … 217 258 * 218 259 */ 219 NO_TRACE static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, 220 size_t size) 260 NO_TRACE static int iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size) 221 261 { 222 262 /* … … 243 283 /* Lock the task and release the lock protecting tasks_btree. */ 244 284 irq_spinlock_exchange(&tasks_lock, &task->lock); 245 246 285 int rc = ddi_iospace_enable_arch(task, ioaddr, size); 247 248 286 irq_spinlock_unlock(&task->lock, true); 249 287 250 288 return rc; 251 }252 253 /** Wrapper for SYS_PHYSMEM_MAP syscall.254 *255 * @param phys_base Physical base address to map256 * @param virt_base Destination virtual address257 * @param pages Number of pages258 * @param flags Flags of newly mapped pages259 *260 * @return 0 on success, otherwise it returns error code found in errno.h261 *262 */263 sysarg_t sys_physmem_map(sysarg_t phys_base, sysarg_t virt_base,264 sysarg_t pages, sysarg_t flags)265 {266 return (sysarg_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base,267 FRAME_SIZE), ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE),268 (size_t) pages, (int) flags);269 289 } 270 290 … … 283 303 return (sysarg_t) rc; 284 304 285 return (sysarg_t) ddi_iospace_enable((task_id_t) arg.task_id,305 return (sysarg_t) iospace_enable((task_id_t) arg.task_id, 286 306 (uintptr_t) arg.ioaddr, (size_t) arg.size); 287 307 } 288 308 309 sysarg_t sys_iospace_disable(ddi_ioarg_t *uspace_io_arg) 310 { 311 // TODO: implement 312 return ENOTSUP; 313 } 314 315 NO_TRACE static int dmamem_map(uintptr_t virt, size_t size, unsigned int map_flags, 316 unsigned int flags, void **phys) 317 { 318 ASSERT(TASK); 319 320 // TODO: implement locking of non-anonymous mapping 321 return page_find_mapping(virt, phys); 322 } 323 324 NO_TRACE static int dmamem_map_anonymous(size_t size, unsigned int map_flags, 325 unsigned int flags, void **phys, uintptr_t *virt, uintptr_t bound) 326 { 327 ASSERT(TASK); 328 329 size_t pages = SIZE2FRAMES(size); 330 uint8_t order; 331 332 /* We need the 2^order >= pages */ 333 if (pages == 1) 334 order = 0; 335 else 336 order = fnzb(pages - 1) + 1; 337 338 *phys = frame_alloc_noreserve(order, 0); 339 if (*phys == NULL) 340 return ENOMEM; 341 342 mem_backend_data_t backend_data; 343 backend_data.base = (uintptr_t) *phys; 344 backend_data.frames = pages; 345 346 if (!as_area_create(TASK->as, map_flags, size, 347 AS_AREA_ATTR_NONE, &phys_backend, &backend_data, virt, bound)) { 348 frame_free_noreserve((uintptr_t) *phys); 349 return ENOMEM; 350 } 351 352 return EOK; 353 } 354 355 NO_TRACE static int dmamem_unmap(uintptr_t virt, size_t size) 356 { 357 // TODO: implement unlocking & unmap 358 return EOK; 359 } 360 361 NO_TRACE static int dmamem_unmap_anonymous(uintptr_t virt) 362 { 363 // TODO: implement unlocking & unmap 364 return EOK; 365 } 366 367 sysarg_t sys_dmamem_map(size_t size, unsigned int map_flags, unsigned int flags, 368 void *phys_ptr, void *virt_ptr, uintptr_t bound) 369 { 370 if ((flags & DMAMEM_FLAGS_ANONYMOUS) == 0) { 371 /* 372 * Non-anonymous DMA mapping 373 */ 374 375 void *phys; 376 int rc = dmamem_map((uintptr_t) virt_ptr, size, map_flags, 377 flags, &phys); 378 379 if (rc != EOK) 380 return rc; 381 382 rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys)); 383 if (rc != EOK) { 384 dmamem_unmap((uintptr_t) virt_ptr, size); 385 return rc; 386 } 387 } else { 388 /* 389 * Anonymous DMA mapping 390 */ 391 392 void *phys; 393 uintptr_t virt = (uintptr_t) -1; 394 int rc = dmamem_map_anonymous(size, map_flags, flags, 395 &phys, &virt, bound); 396 if (rc != EOK) 397 return rc; 398 399 rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys)); 400 if (rc != EOK) { 401 dmamem_unmap_anonymous((uintptr_t) virt); 402 return rc; 403 } 404 405 rc = copy_to_uspace(virt_ptr, &virt, sizeof(virt)); 406 if (rc != EOK) { 407 dmamem_unmap_anonymous((uintptr_t) virt); 408 return rc; 409 } 410 } 411 412 return EOK; 413 } 414 415 sysarg_t sys_dmamem_unmap(uintptr_t virt, size_t size, unsigned int flags) 416 { 417 if ((flags & DMAMEM_FLAGS_ANONYMOUS) == 0) 418 return dmamem_unmap(virt, size); 419 else 420 return dmamem_unmap_anonymous(virt); 421 } 422 289 423 /** @} 290 424 */ -
kernel/generic/src/ipc/irq.c
r4291215 r153cc76a 42 42 * 43 43 * The structure of a notification message is as follows: 44 * - IMETHOD: interface and method as registered by the SYS_REGISTER_IRQ45 * syscall44 * - IMETHOD: interface and method as registered by 45 * the SYS_IRQ_REGISTER syscall 46 46 * - ARG1: payload modified by a 'top-half' handler 47 47 * - ARG2: payload modified by a 'top-half' handler … … 365 365 return IRQ_DECLINE; 366 366 367 #define CMD_MEM_READ(target) \368 do { \369 void *va = code->cmds[i].addr; \370 if (AS != irq->driver_as) \371 as_switch(AS, irq->driver_as); \372 memcpy_from_uspace(&target, va, (sizeof(target))); \373 if (dstarg) \374 scratch[dstarg] = target; \375 } while(0)376 377 #define CMD_MEM_WRITE(val) \378 do { \379 void *va = code->cmds[i].addr; \380 if (AS != irq->driver_as) \381 as_switch(AS, irq->driver_as); \382 memcpy_to_uspace(va, &val, sizeof(val)); \383 } while (0)384 385 367 as_t *current_as = AS; 386 size_t i; 387 for (i = 0; i < code->cmdcount; i++) { 368 if (current_as != irq->driver_as) 369 as_switch(AS, irq->driver_as); 370 371 for (size_t i = 0; i < code->cmdcount; i++) { 388 372 uint32_t dstval; 373 void *va; 374 uint8_t val8; 375 uint16_t val16; 376 uint32_t val32; 377 389 378 uintptr_t srcarg = code->cmds[i].srcarg; 390 379 uintptr_t dstarg = code->cmds[i].dstarg; … … 442 431 } 443 432 break; 444 case CMD_MEM_READ_8: { 445 uint8_t val; 446 CMD_MEM_READ(val); 447 break; 448 } 449 case CMD_MEM_READ_16: { 450 uint16_t val; 451 CMD_MEM_READ(val); 452 break; 453 } 454 case CMD_MEM_READ_32: { 455 uint32_t val; 456 CMD_MEM_READ(val); 457 break; 458 } 459 case CMD_MEM_WRITE_8: { 460 uint8_t val = code->cmds[i].value; 461 CMD_MEM_WRITE(val); 462 break; 463 } 464 case CMD_MEM_WRITE_16: { 465 uint16_t val = code->cmds[i].value; 466 CMD_MEM_WRITE(val); 467 break; 468 } 469 case CMD_MEM_WRITE_32: { 470 uint32_t val = code->cmds[i].value; 471 CMD_MEM_WRITE(val); 472 break; 473 } 433 case CMD_MEM_READ_8: 434 va = code->cmds[i].addr; 435 memcpy_from_uspace(&val8, va, sizeof(val8)); 436 if (dstarg) 437 scratch[dstarg] = val8; 438 break; 439 case CMD_MEM_READ_16: 440 va = code->cmds[i].addr; 441 memcpy_from_uspace(&val16, va, sizeof(val16)); 442 if (dstarg) 443 scratch[dstarg] = val16; 444 break; 445 case CMD_MEM_READ_32: 446 va = code->cmds[i].addr; 447 memcpy_from_uspace(&val32, va, sizeof(val32)); 448 if (dstarg) 449 scratch[dstarg] = val32; 450 break; 451 case CMD_MEM_WRITE_8: 452 val8 = code->cmds[i].value; 453 va = code->cmds[i].addr; 454 memcpy_to_uspace(va, &val8, sizeof(val8)); 455 break; 456 case CMD_MEM_WRITE_16: 457 val16 = code->cmds[i].value; 458 va = code->cmds[i].addr; 459 memcpy_to_uspace(va, &val16, sizeof(val16)); 460 break; 461 case CMD_MEM_WRITE_32: 462 val32 = code->cmds[i].value; 463 va = code->cmds[i].addr; 464 memcpy_to_uspace(va, &val32, sizeof(val32)); 465 break; 474 466 case CMD_MEM_WRITE_A_8: 475 467 if (srcarg) { 476 uint8_t val = scratch[srcarg]; 477 CMD_MEM_WRITE(val); 468 val8 = scratch[srcarg]; 469 va = code->cmds[i].addr; 470 memcpy_to_uspace(va, &val8, sizeof(val8)); 478 471 } 479 472 break; 480 473 case CMD_MEM_WRITE_A_16: 481 474 if (srcarg) { 482 uint16_t val = scratch[srcarg]; 483 CMD_MEM_WRITE(val); 475 val16 = scratch[srcarg]; 476 va = code->cmds[i].addr; 477 memcpy_to_uspace(va, &val16, sizeof(val16)); 484 478 } 485 479 break; 486 480 case CMD_MEM_WRITE_A_32: 487 481 if (srcarg) { 488 uint32_t val = scratch[srcarg]; 489 CMD_MEM_WRITE(val); 482 val32 = scratch[srcarg]; 483 va = code->cmds[i].addr; 484 memcpy_to_uspace(va, &val32, sizeof(val32)); 490 485 } 491 486 break; … … 513 508 } 514 509 } 510 515 511 if (AS != current_as) 516 512 as_switch(AS, current_as); -
kernel/generic/src/ipc/sysipc.c
r4291215 r153cc76a 271 271 irq_spinlock_unlock(&answer->sender->lock, true); 272 272 273 uintptr_t dst_base = (uintptr_t) -1; 273 274 int rc = as_area_share(as, IPC_GET_ARG1(*olddata), 274 IPC_GET_ARG2(*olddata), AS, 275 IPC_GET_ARG1(answer->data), IPC_GET_ARG3(*olddata)); 275 IPC_GET_ARG2(*olddata), AS, IPC_GET_ARG3(*olddata), 276 &dst_base, IPC_GET_ARG1(answer->data)); 277 278 if (rc == EOK) 279 rc = copy_to_uspace((void *) IPC_GET_ARG2(answer->data), 280 &dst_base, sizeof(dst_base)); 281 276 282 IPC_SET_RETVAL(answer->data, rc); 277 283 return rc; 278 284 } 279 285 } else if (IPC_GET_IMETHOD(*olddata) == IPC_M_SHARE_IN) { 280 if (!IPC_GET_RETVAL(answer->data)) { 286 if (!IPC_GET_RETVAL(answer->data)) { 281 287 irq_spinlock_lock(&answer->sender->lock, true); 282 288 as_t *as = answer->sender->as; 283 289 irq_spinlock_unlock(&answer->sender->lock, true); 284 290 291 uintptr_t dst_base = (uintptr_t) -1; 285 292 int rc = as_area_share(AS, IPC_GET_ARG1(answer->data), 286 IPC_GET_ARG2(*olddata), as, IPC_GET_ARG1(*olddata), 287 IPC_GET_ARG2(answer->data)); 293 IPC_GET_ARG1(*olddata), as, IPC_GET_ARG2(answer->data), 294 &dst_base, IPC_GET_ARG3(answer->data)); 295 IPC_SET_ARG4(answer->data, dst_base); 288 296 IPC_SET_RETVAL(answer->data, rc); 289 297 } … … 1185 1193 * 1186 1194 */ 1187 sysarg_t sys_ register_irq(inr_t inr, devno_t devno, sysarg_t imethod,1195 sysarg_t sys_irq_register(inr_t inr, devno_t devno, sysarg_t imethod, 1188 1196 irq_code_t *ucode) 1189 1197 { … … 1202 1210 * 1203 1211 */ 1204 sysarg_t sys_ unregister_irq(inr_t inr, devno_t devno)1212 sysarg_t sys_irq_unregister(inr_t inr, devno_t devno) 1205 1213 { 1206 1214 if (!(cap_get(TASK) & CAP_IRQ_REG)) -
kernel/generic/src/lib/elf.c
r4291215 r153cc76a 226 226 size_t mem_sz = entry->p_memsz + (entry->p_vaddr - base); 227 227 228 as_area_t *area = as_area_create(as, flags, mem_sz, base,229 AS_AREA_ATTR_NONE, &elf_backend, &backend_data );228 as_area_t *area = as_area_create(as, flags, mem_sz, 229 AS_AREA_ATTR_NONE, &elf_backend, &backend_data, &base, 0); 230 230 if (!area) 231 231 return EE_MEMORY; -
kernel/generic/src/mm/as.c
r4291215 r153cc76a 387 387 } 388 388 389 /** Return pointer to unmapped address space area 390 * 391 * The address space must be already locked when calling 392 * this function. 393 * 394 * @param as Address space. 395 * @param bound Lowest address bound. 396 * @param size Requested size of the allocation. 397 * 398 * @return Address of the beginning of unmapped address space area. 399 * @return -1 if no suitable address space area was found. 400 * 401 */ 402 NO_TRACE static uintptr_t as_get_unmapped_area(as_t *as, uintptr_t bound, 403 size_t size) 404 { 405 ASSERT(mutex_locked(&as->lock)); 406 407 if (size == 0) 408 return (uintptr_t) -1; 409 410 /* 411 * Make sure we allocate from page-aligned 412 * address. Check for possible overflow in 413 * each step. 414 */ 415 416 size_t pages = SIZE2FRAMES(size); 417 418 /* 419 * Find the lowest unmapped address aligned on the size 420 * boundary, not smaller than bound and of the required size. 421 */ 422 423 /* First check the bound address itself */ 424 uintptr_t addr = ALIGN_UP(bound, PAGE_SIZE); 425 if ((addr >= bound) && 426 (check_area_conflicts(as, addr, pages, NULL))) 427 return addr; 428 429 /* Eventually check the addresses behind each area */ 430 list_foreach(as->as_area_btree.leaf_list, cur) { 431 btree_node_t *node = 432 list_get_instance(cur, btree_node_t, leaf_link); 433 434 for (btree_key_t i = 0; i < node->keys; i++) { 435 as_area_t *area = (as_area_t *) node->value[i]; 436 437 mutex_lock(&area->lock); 438 439 addr = 440 ALIGN_UP(area->base + P2SZ(area->pages), PAGE_SIZE); 441 bool avail = 442 ((addr >= bound) && (addr >= area->base) && 443 (check_area_conflicts(as, addr, pages, area))); 444 445 mutex_unlock(&area->lock); 446 447 if (avail) 448 return addr; 449 } 450 } 451 452 /* No suitable address space area found */ 453 return (uintptr_t) -1; 454 } 455 389 456 /** Create address space area of common attributes. 390 457 * … … 394 461 * @param flags Flags of the area memory. 395 462 * @param size Size of area. 396 * @param base Base address of area.397 463 * @param attrs Attributes of the area. 398 464 * @param backend Address space area backend. NULL if no backend is used. 399 465 * @param backend_data NULL or a pointer to an array holding two void *. 466 * @param base Starting virtual address of the area. 467 * If set to -1, a suitable mappable area is found. 468 * @param bound Lowest address bound if base is set to -1. 469 * Otherwise ignored. 400 470 * 401 471 * @return Address space area on success or NULL on failure. … … 403 473 */ 404 474 as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size, 405 u intptr_t base, unsigned int attrs, mem_backend_t *backend,406 mem_backend_data_t *backend_data )407 { 408 if (( base % PAGE_SIZE) != 0)475 unsigned int attrs, mem_backend_t *backend, 476 mem_backend_data_t *backend_data, uintptr_t *base, uintptr_t bound) 477 { 478 if ((*base != (uintptr_t) -1) && ((*base % PAGE_SIZE) != 0)) 409 479 return NULL; 410 480 … … 420 490 mutex_lock(&as->lock); 421 491 422 if (!check_area_conflicts(as, base, pages, NULL)) { 492 if (*base == (uintptr_t) -1) { 493 *base = as_get_unmapped_area(as, bound, size); 494 if (*base == (uintptr_t) -1) { 495 mutex_unlock(&as->lock); 496 return NULL; 497 } 498 } 499 500 if (!check_area_conflicts(as, *base, pages, NULL)) { 423 501 mutex_unlock(&as->lock); 424 502 return NULL; … … 434 512 area->pages = pages; 435 513 area->resident = 0; 436 area->base = base;514 area->base = *base; 437 515 area->sh_info = NULL; 438 516 area->backend = backend; … … 452 530 453 531 btree_create(&area->used_space); 454 btree_insert(&as->as_area_btree, base, (void *) area, NULL); 532 btree_insert(&as->as_area_btree, *base, (void *) area, 533 NULL); 455 534 456 535 mutex_unlock(&as->lock); … … 860 939 * @param acc_size Expected size of the source area. 861 940 * @param dst_as Pointer to destination address space. 862 * @param dst_base Target base address.863 941 * @param dst_flags_mask Destination address space area flags mask. 942 * @param dst_base Target base address. If set to -1, 943 * a suitable mappable area is found. 944 * @param bound Lowest address bound if dst_base is set to -1. 945 * Otherwise ignored. 864 946 * 865 947 * @return Zero on success. … … 873 955 */ 874 956 int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, 875 as_t *dst_as, uintptr_t dst_base, unsigned int dst_flags_mask) 957 as_t *dst_as, unsigned int dst_flags_mask, uintptr_t *dst_base, 958 uintptr_t bound) 876 959 { 877 960 mutex_lock(&src_as->lock); … … 945 1028 * to support sharing in less privileged mode. 946 1029 */ 947 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, 948 dst_base, AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 1030 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, 1031 src_size, AS_AREA_ATTR_PARTIAL, src_backend, 1032 &src_backend_data, dst_base, bound); 949 1033 if (!dst_area) { 950 1034 /* … … 1955 2039 */ 1956 2040 1957 /** Wrapper for as_area_create(). */ 1958 sysarg_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags)1959 { 1960 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,1961 AS_AREA_ATTR_NONE, &anon_backend, NULL))1962 return (sysarg_t) address;1963 else2041 sysarg_t sys_as_area_create(uintptr_t base, size_t size, unsigned int flags, 2042 uintptr_t bound) 2043 { 2044 uintptr_t virt = base; 2045 as_area_t *area = as_area_create(AS, flags | AS_AREA_CACHEABLE, size, 2046 AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, bound); 2047 if (area == NULL) 1964 2048 return (sysarg_t) -1; 1965 } 1966 1967 /** Wrapper for as_area_resize(). */ 2049 2050 return (sysarg_t) virt; 2051 } 2052 1968 2053 sysarg_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags) 1969 2054 { … … 1971 2056 } 1972 2057 1973 /** Wrapper for as_area_change_flags(). */1974 2058 sysarg_t sys_as_area_change_flags(uintptr_t address, unsigned int flags) 1975 2059 { … … 1977 2061 } 1978 2062 1979 /** Wrapper for as_area_destroy(). */1980 2063 sysarg_t sys_as_area_destroy(uintptr_t address) 1981 2064 { 1982 2065 return (sysarg_t) as_area_destroy(AS, address); 1983 }1984 1985 /** Return pointer to unmapped address space area1986 *1987 * @param base Lowest address bound.1988 * @param size Requested size of the allocation.1989 *1990 * @return Pointer to the beginning of unmapped address space area.1991 *1992 */1993 sysarg_t sys_as_get_unmapped_area(uintptr_t base, size_t size)1994 {1995 if (size == 0)1996 return 0;1997 1998 /*1999 * Make sure we allocate from page-aligned2000 * address. Check for possible overflow in2001 * each step.2002 */2003 2004 size_t pages = SIZE2FRAMES(size);2005 uintptr_t ret = 0;2006 2007 /*2008 * Find the lowest unmapped address aligned on the sz2009 * boundary, not smaller than base and of the required size.2010 */2011 2012 mutex_lock(&AS->lock);2013 2014 /* First check the base address itself */2015 uintptr_t addr = ALIGN_UP(base, PAGE_SIZE);2016 if ((addr >= base) &&2017 (check_area_conflicts(AS, addr, pages, NULL)))2018 ret = addr;2019 2020 /* Eventually check the addresses behind each area */2021 list_foreach(AS->as_area_btree.leaf_list, cur) {2022 if (ret != 0)2023 break;2024 2025 btree_node_t *node =2026 list_get_instance(cur, btree_node_t, leaf_link);2027 2028 btree_key_t i;2029 for (i = 0; (ret == 0) && (i < node->keys); i++) {2030 uintptr_t addr;2031 2032 as_area_t *area = (as_area_t *) node->value[i];2033 2034 mutex_lock(&area->lock);2035 2036 addr = ALIGN_UP(area->base + P2SZ(area->pages),2037 PAGE_SIZE);2038 2039 if ((addr >= base) && (addr >= area->base) &&2040 (check_area_conflicts(AS, addr, pages, area)))2041 ret = addr;2042 2043 mutex_unlock(&area->lock);2044 }2045 }2046 2047 mutex_unlock(&AS->lock);2048 2049 return (sysarg_t) ret;2050 2066 } 2051 2067 -
kernel/generic/src/mm/page.c
r4291215 r153cc76a 53 53 * We assume that the other processors are either not using the mapping yet 54 54 * (i.e. during the bootstrap) or are executing the TLB shootdown code. While 55 * we don't care much about the former case, the processors in the latter case 55 * we don't care much about the former case, the processors in the latter case 56 56 * will do an implicit serialization by virtue of running the TLB shootdown 57 57 * interrupt handler. … … 74 74 #include <syscall/copy.h> 75 75 #include <errno.h> 76 #include <align.h> 76 77 77 78 /** Virtual operations for page subsystem. */ … … 176 177 } 177 178 179 int page_find_mapping(uintptr_t virt, void **phys) 180 { 181 mutex_lock(&AS->lock); 182 183 pte_t *pte = page_mapping_find(AS, virt, false); 184 if ((!PTE_VALID(pte)) || (!PTE_PRESENT(pte))) { 185 mutex_unlock(&AS->lock); 186 return ENOENT; 187 } 188 189 *phys = (void *) PTE_GET_FRAME(pte) + 190 (virt - ALIGN_DOWN(virt, PAGE_SIZE)); 191 192 mutex_unlock(&AS->lock); 193 194 return EOK; 195 } 196 178 197 /** Syscall wrapper for getting mapping of a virtual page. 179 * 180 * @retval EOK Everything went find, @p uspace_frame and @p uspace_node 181 * contains correct values. 182 * @retval ENOENT Virtual address has no mapping. 183 */ 184 sysarg_t sys_page_find_mapping(uintptr_t virt_address, 185 uintptr_t *uspace_frame) 186 { 187 mutex_lock(&AS->lock); 188 189 pte_t *pte = page_mapping_find(AS, virt_address, false); 190 if (!PTE_VALID(pte) || !PTE_PRESENT(pte)) { 191 mutex_unlock(&AS->lock); 192 193 return (sysarg_t) ENOENT; 194 } 195 196 uintptr_t phys_address = PTE_GET_FRAME(pte); 197 198 mutex_unlock(&AS->lock); 199 200 int rc = copy_to_uspace(uspace_frame, 201 &phys_address, sizeof(phys_address)); 202 if (rc != EOK) { 203 return (sysarg_t) rc; 204 } 205 206 return EOK; 198 * 199 * @return EOK on success. 200 * @return ENOENT if no virtual address mapping found. 201 * 202 */ 203 sysarg_t sys_page_find_mapping(uintptr_t virt, void *phys_ptr) 204 { 205 void *phys; 206 int rc = page_find_mapping(virt, &phys); 207 if (rc != EOK) 208 return rc; 209 210 rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys)); 211 return (sysarg_t) rc; 207 212 } 208 213 -
kernel/generic/src/proc/program.c
r4291215 r153cc76a 87 87 * Create the stack address space area. 88 88 */ 89 uintptr_t virt = USTACK_ADDRESS; 89 90 as_area_t *area = as_area_create(as, 90 91 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, 91 STACK_SIZE, USTACK_ADDRESS, AS_AREA_ATTR_NONE, 92 &anon_backend, NULL); 92 STACK_SIZE, AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, 0); 93 93 if (!area) 94 94 return ENOMEM; -
kernel/generic/src/syscall/syscall.c
r4291215 r153cc76a 146 146 (syshandler_t) sys_as_area_change_flags, 147 147 (syshandler_t) sys_as_area_destroy, 148 (syshandler_t) sys_as_get_unmapped_area,149 148 150 149 /* Page mapping related syscalls. */ … … 176 175 (syshandler_t) sys_device_assign_devno, 177 176 (syshandler_t) sys_physmem_map, 177 (syshandler_t) sys_physmem_unmap, 178 (syshandler_t) sys_dmamem_map, 179 (syshandler_t) sys_dmamem_unmap, 178 180 (syshandler_t) sys_iospace_enable, 179 (syshandler_t) sys_register_irq, 180 (syshandler_t) sys_unregister_irq, 181 (syshandler_t) sys_iospace_disable, 182 (syshandler_t) sys_irq_register, 183 (syshandler_t) sys_irq_unregister, 181 184 182 185 /* Sysinfo syscalls. */
Note:
See TracChangeset
for help on using the changeset viewer.
