Changeset 49ff5f3 in mainline for kernel/generic/src
- Timestamp:
- 2012-04-18T20:55:21Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 7769ec9
- Parents:
- e895352 (diff), 63920b0 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/generic/src
- Files:
-
- 2 added
- 27 edited
-
console/cmd.c (modified) (1 diff)
-
console/console.c (modified) (1 diff)
-
console/kconsole.c (modified) (1 diff)
-
cpu/cpu.c (modified) (1 diff)
-
ddi/ddi.c (modified) (12 diffs)
-
ipc/irq.c (modified) (13 diffs)
-
ipc/sysipc.c (modified) (3 diffs)
-
lib/elf.c (modified) (1 diff)
-
lib/func.c (modified) (1 diff)
-
lib/ra.c (added)
-
lib/rd.c (modified) (2 diffs)
-
lib/str.c (modified) (5 diffs)
-
main/kinit.c (modified) (4 diffs)
-
main/main.c (modified) (7 diffs)
-
main/version.c (modified) (1 diff)
-
mm/as.c (modified) (12 diffs)
-
mm/backend_anon.c (modified) (7 diffs)
-
mm/backend_elf.c (modified) (12 diffs)
-
mm/frame.c (modified) (22 diffs)
-
mm/km.c (added)
-
mm/page.c (modified) (4 diffs)
-
mm/reserve.c (modified) (5 diffs)
-
printf/printf_core.c (modified) (2 diffs)
-
proc/program.c (modified) (1 diff)
-
proc/thread.c (modified) (1 diff)
-
syscall/copy.c (modified) (2 diffs)
-
syscall/syscall.c (modified) (2 diffs)
-
sysinfo/stats.c (modified) (25 diffs)
-
sysinfo/sysinfo.c (modified) (22 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/console/cmd.c
re895352 r49ff5f3 906 906 ((char *) argv->buffer)[0] <= '9') { 907 907 uint64_t value; 908 rc = str_uint64 ((char *) argv->buffer, NULL, 0, true, &value);908 rc = str_uint64_t((char *) argv->buffer, NULL, 0, true, &value); 909 909 if (rc == EOK) 910 910 addr = (uintptr_t) value; -
kernel/generic/src/console/console.c
re895352 r49ff5f3 57 57 58 58 /** Kernel log cyclic buffer */ 59 static wchar_t klog[KLOG_LENGTH] __attribute__ ((aligned(PAGE_SIZE)));59 wchar_t klog[KLOG_LENGTH] __attribute__((aligned(PAGE_SIZE))); 60 60 61 61 /** Kernel log initialized */ -
kernel/generic/src/console/kconsole.c
re895352 r49ff5f3 472 472 /* It's a number - convert it */ 473 473 uint64_t value; 474 int rc = str_uint64 (text, NULL, 0, true, &value);474 int rc = str_uint64_t(text, NULL, 0, true, &value); 475 475 switch (rc) { 476 476 case EINVAL: -
kernel/generic/src/cpu/cpu.c
re895352 r49ff5f3 74 74 for (i = 0; i < config.cpu_count; i++) { 75 75 cpus[i].stack = (uint8_t *) frame_alloc(STACK_FRAMES, 76 FRAME_ KA | FRAME_ATOMIC);76 FRAME_LOWMEM | FRAME_KA | FRAME_ATOMIC); 77 77 cpus[i].id = i; 78 78 -
kernel/generic/src/ddi/ddi.c
re895352 r49ff5f3 45 45 #include <mm/frame.h> 46 46 #include <mm/as.h> 47 #include <mm/page.h> 47 48 #include <synch/mutex.h> 48 49 #include <syscall/copy.h> … … 52 53 #include <errno.h> 53 54 #include <trace.h> 55 #include <bitops.h> 54 56 55 57 /** This lock protects the parea_btree. */ … … 87 89 /** Map piece of physical memory into virtual address space of current task. 88 90 * 89 * @param pf Physical address of the starting frame. 90 * @param vp Virtual address of the starting page. 91 * @param phys Physical address of the starting frame. 91 92 * @param pages Number of pages to map. 92 93 * @param flags Address space area flags for the mapping. 93 * 94 * @return 0 on success, EPERM if the caller lacks capabilities to use this 95 * syscall, EBADMEM if pf or vf is not page aligned, ENOENT if there 96 * is no task matching the specified ID or the physical address space 97 * is not enabled for mapping and ENOMEM if there was a problem in 98 * creating address space area. 99 * 100 */ 101 NO_TRACE static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, size_t pages, 102 unsigned int flags) 94 * @param virt Virtual address of the starting page. 95 * @param bound Lowest virtual address bound. 96 * 97 * @return EOK on success. 98 * @return EPERM if the caller lacks capabilities to use this syscall. 99 * @return EBADMEM if phys is not page aligned. 100 * @return ENOENT if there is no task matching the specified ID or 101 * the physical address space is not enabled for mapping. 102 * @return ENOMEM if there was a problem in creating address space area. 103 * 104 */ 105 NO_TRACE static int physmem_map(uintptr_t phys, size_t pages, 106 unsigned int flags, uintptr_t *virt, uintptr_t bound) 103 107 { 104 108 ASSERT(TASK); 105 109 106 if ((pf % FRAME_SIZE) != 0) 107 return EBADMEM; 108 109 if ((vp % PAGE_SIZE) != 0) 110 if ((phys % FRAME_SIZE) != 0) 110 111 return EBADMEM; 111 112 … … 118 119 119 120 mem_backend_data_t backend_data; 120 backend_data.base = p f;121 backend_data.base = phys; 121 122 backend_data.frames = pages; 122 123 … … 129 130 btree_node_t *nodep; 130 131 parea_t *parea = (parea_t *) btree_search(&parea_btree, 131 (btree_key_t) p f, &nodep);132 (btree_key_t) phys, &nodep); 132 133 133 134 if ((parea != NULL) && (parea->frames >= pages)) { … … 149 150 150 151 irq_spinlock_lock(&zones.lock, true); 151 size_t znum = find_zone(ADDR2PFN(p f), pages, 0);152 size_t znum = find_zone(ADDR2PFN(phys), pages, 0); 152 153 153 154 if (znum == (size_t) -1) { … … 165 166 } 166 167 167 if (zones.info[znum].flags & ZONE_FIRMWARE) {168 /* 169 * Frames are part of firmware 168 if (zones.info[znum].flags & (ZONE_FIRMWARE | ZONE_RESERVED)) { 169 /* 170 * Frames are part of firmware or reserved zone 170 171 * -> allow mapping for privileged tasks. 171 172 */ … … 182 183 183 184 map: 184 if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp,185 AS_AREA_ATTR_NONE, &phys_backend, &backend_data )) {185 if (!as_area_create(TASK->as, flags, FRAMES2SIZE(pages), 186 AS_AREA_ATTR_NONE, &phys_backend, &backend_data, virt, bound)) { 186 187 /* 187 188 * The address space area was not created. … … 207 208 } 208 209 210 NO_TRACE static int physmem_unmap(uintptr_t virt) 211 { 212 // TODO: implement unmap 213 return EOK; 214 } 215 216 /** Wrapper for SYS_PHYSMEM_MAP syscall. 217 * 218 * @param phys Physical base address to map 219 * @param pages Number of pages 220 * @param flags Flags of newly mapped pages 221 * @param virt_ptr Destination virtual address 222 * @param bound Lowest virtual address bound. 223 * 224 * @return 0 on success, otherwise it returns error code found in errno.h 225 * 226 */ 227 sysarg_t sys_physmem_map(uintptr_t phys, size_t pages, unsigned int flags, 228 void *virt_ptr, uintptr_t bound) 229 { 230 uintptr_t virt = (uintptr_t) -1; 231 int rc = physmem_map(ALIGN_DOWN(phys, FRAME_SIZE), pages, flags, 232 &virt, bound); 233 if (rc != EOK) 234 return rc; 235 236 rc = copy_to_uspace(virt_ptr, &virt, sizeof(virt)); 237 if (rc != EOK) { 238 physmem_unmap((uintptr_t) virt); 239 return rc; 240 } 241 242 return EOK; 243 } 244 245 sysarg_t sys_physmem_unmap(uintptr_t virt) 246 { 247 return physmem_unmap(virt); 248 } 249 209 250 /** Enable range of I/O space for task. 210 251 * … … 217 258 * 218 259 */ 219 NO_TRACE static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, 220 size_t size) 260 NO_TRACE static int iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size) 221 261 { 222 262 /* … … 243 283 /* Lock the task and release the lock protecting tasks_btree. */ 244 284 irq_spinlock_exchange(&tasks_lock, &task->lock); 245 246 285 int rc = ddi_iospace_enable_arch(task, ioaddr, size); 247 248 286 irq_spinlock_unlock(&task->lock, true); 249 287 250 288 return rc; 251 }252 253 /** Wrapper for SYS_PHYSMEM_MAP syscall.254 *255 * @param phys_base Physical base address to map256 * @param virt_base Destination virtual address257 * @param pages Number of pages258 * @param flags Flags of newly mapped pages259 *260 * @return 0 on success, otherwise it returns error code found in errno.h261 *262 */263 sysarg_t sys_physmem_map(sysarg_t phys_base, sysarg_t virt_base,264 sysarg_t pages, sysarg_t flags)265 {266 return (sysarg_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base,267 FRAME_SIZE), ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE),268 (size_t) pages, (int) flags);269 289 } 270 290 … … 283 303 return (sysarg_t) rc; 284 304 285 return (sysarg_t) ddi_iospace_enable((task_id_t) arg.task_id,305 return (sysarg_t) iospace_enable((task_id_t) arg.task_id, 286 306 (uintptr_t) arg.ioaddr, (size_t) arg.size); 287 307 } 288 308 309 sysarg_t sys_iospace_disable(ddi_ioarg_t *uspace_io_arg) 310 { 311 // TODO: implement 312 return ENOTSUP; 313 } 314 315 NO_TRACE static int dmamem_map(uintptr_t virt, size_t size, unsigned int map_flags, 316 unsigned int flags, void **phys) 317 { 318 ASSERT(TASK); 319 320 // TODO: implement locking of non-anonymous mapping 321 return page_find_mapping(virt, phys); 322 } 323 324 NO_TRACE static int dmamem_map_anonymous(size_t size, unsigned int map_flags, 325 unsigned int flags, void **phys, uintptr_t *virt, uintptr_t bound) 326 { 327 ASSERT(TASK); 328 329 size_t pages = SIZE2FRAMES(size); 330 uint8_t order; 331 332 /* We need the 2^order >= pages */ 333 if (pages == 1) 334 order = 0; 335 else 336 order = fnzb(pages - 1) + 1; 337 338 *phys = frame_alloc_noreserve(order, 0); 339 if (*phys == NULL) 340 return ENOMEM; 341 342 mem_backend_data_t backend_data; 343 backend_data.base = (uintptr_t) *phys; 344 backend_data.frames = pages; 345 346 if (!as_area_create(TASK->as, map_flags, size, 347 AS_AREA_ATTR_NONE, &phys_backend, &backend_data, virt, bound)) { 348 frame_free_noreserve((uintptr_t) *phys); 349 return ENOMEM; 350 } 351 352 return EOK; 353 } 354 355 NO_TRACE static int dmamem_unmap(uintptr_t virt, size_t size) 356 { 357 // TODO: implement unlocking & unmap 358 return EOK; 359 } 360 361 NO_TRACE static int dmamem_unmap_anonymous(uintptr_t virt) 362 { 363 // TODO: implement unlocking & unmap 364 return EOK; 365 } 366 367 sysarg_t sys_dmamem_map(size_t size, unsigned int map_flags, unsigned int flags, 368 void *phys_ptr, void *virt_ptr, uintptr_t bound) 369 { 370 if ((flags & DMAMEM_FLAGS_ANONYMOUS) == 0) { 371 /* 372 * Non-anonymous DMA mapping 373 */ 374 375 void *phys; 376 int rc = dmamem_map((uintptr_t) virt_ptr, size, map_flags, 377 flags, &phys); 378 379 if (rc != EOK) 380 return rc; 381 382 rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys)); 383 if (rc != EOK) { 384 dmamem_unmap((uintptr_t) virt_ptr, size); 385 return rc; 386 } 387 } else { 388 /* 389 * Anonymous DMA mapping 390 */ 391 392 void *phys; 393 uintptr_t virt = (uintptr_t) -1; 394 int rc = dmamem_map_anonymous(size, map_flags, flags, 395 &phys, &virt, bound); 396 if (rc != EOK) 397 return rc; 398 399 rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys)); 400 if (rc != EOK) { 401 dmamem_unmap_anonymous((uintptr_t) virt); 402 return rc; 403 } 404 405 rc = copy_to_uspace(virt_ptr, &virt, sizeof(virt)); 406 if (rc != EOK) { 407 dmamem_unmap_anonymous((uintptr_t) virt); 408 return rc; 409 } 410 } 411 412 return EOK; 413 } 414 415 sysarg_t sys_dmamem_unmap(uintptr_t virt, size_t size, unsigned int flags) 416 { 417 if ((flags & DMAMEM_FLAGS_ANONYMOUS) == 0) 418 return dmamem_unmap(virt, size); 419 else 420 return dmamem_unmap_anonymous(virt); 421 } 422 289 423 /** @} 290 424 */ -
kernel/generic/src/ipc/irq.c
re895352 r49ff5f3 42 42 * 43 43 * The structure of a notification message is as follows: 44 * - IMETHOD: interface and method as registered by the SYS_REGISTER_IRQ45 * syscall44 * - IMETHOD: interface and method as registered by 45 * the SYS_IRQ_REGISTER syscall 46 46 * - ARG1: payload modified by a 'top-half' handler 47 47 * - ARG2: payload modified by a 'top-half' handler … … 74 74 #include <arch.h> 75 75 #include <mm/slab.h> 76 #include <mm/page.h> 77 #include <mm/km.h> 76 78 #include <errno.h> 77 79 #include <ddi/irq.h> … … 81 83 #include <console/console.h> 82 84 #include <print.h> 85 #include <macros.h> 86 87 static void ranges_unmap(irq_pio_range_t *ranges, size_t rangecount) 88 { 89 size_t i; 90 91 for (i = 0; i < rangecount; i++) { 92 #ifdef IO_SPACE_BOUNDARY 93 if ((void *) ranges[i].base >= IO_SPACE_BOUNDARY) 94 #endif 95 km_unmap(ranges[i].base, ranges[i].size); 96 } 97 } 98 99 static int ranges_map_and_apply(irq_pio_range_t *ranges, size_t rangecount, 100 irq_cmd_t *cmds, size_t cmdcount) 101 { 102 uintptr_t *pbase; 103 size_t i, j; 104 105 /* Copy the physical base addresses aside. */ 106 pbase = malloc(rangecount * sizeof(uintptr_t), 0); 107 for (i = 0; i < rangecount; i++) 108 pbase[i] = ranges[i].base; 109 110 /* Map the PIO ranges into the kernel virtual address space. */ 111 for (i = 0; i < rangecount; i++) { 112 #ifdef IO_SPACE_BOUNDARY 113 if ((void *) ranges[i].base < IO_SPACE_BOUNDARY) 114 continue; 115 #endif 116 ranges[i].base = km_map(pbase[i], ranges[i].size, 117 PAGE_READ | PAGE_WRITE | PAGE_KERNEL | PAGE_NOT_CACHEABLE); 118 if (!ranges[i].base) { 119 ranges_unmap(ranges, i); 120 free(pbase); 121 return ENOMEM; 122 } 123 } 124 125 /* Rewrite the pseudocode addresses from physical to kernel virtual. */ 126 for (i = 0; i < cmdcount; i++) { 127 uintptr_t addr; 128 size_t size; 129 130 /* Process only commands that use an address. */ 131 switch (cmds[i].cmd) { 132 case CMD_PIO_READ_8: 133 case CMD_PIO_WRITE_8: 134 case CMD_PIO_WRITE_A_8: 135 size = 1; 136 break; 137 case CMD_PIO_READ_16: 138 case CMD_PIO_WRITE_16: 139 case CMD_PIO_WRITE_A_16: 140 size = 2; 141 break; 142 case CMD_PIO_READ_32: 143 case CMD_PIO_WRITE_32: 144 case CMD_PIO_WRITE_A_32: 145 size = 4; 146 break; 147 default: 148 /* Move onto the next command. */ 149 continue; 150 } 151 152 addr = (uintptr_t) cmds[i].addr; 153 154 for (j = 0; j < rangecount; j++) { 155 156 /* Find the matching range. */ 157 if (!iswithin(pbase[j], ranges[j].size, addr, size)) 158 continue; 159 160 /* Switch the command to a kernel virtual address. */ 161 addr -= pbase[j]; 162 addr += ranges[j].base; 163 164 cmds[i].addr = (void *) addr; 165 break; 166 } 167 168 if (j == rangecount) { 169 /* 170 * The address used in this command is outside of all 171 * defined ranges. 172 */ 173 ranges_unmap(ranges, rangecount); 174 free(pbase); 175 return EINVAL; 176 } 177 } 178 179 free(pbase); 180 return EOK; 181 } 83 182 84 183 /** Free the top-half pseudocode. … … 90 189 { 91 190 if (code) { 191 ranges_unmap(code->ranges, code->rangecount); 192 free(code->ranges); 92 193 free(code->cmds); 93 194 free(code); … … 104 205 static irq_code_t *code_from_uspace(irq_code_t *ucode) 105 206 { 207 irq_pio_range_t *ranges = NULL; 208 irq_cmd_t *cmds = NULL; 209 106 210 irq_code_t *code = malloc(sizeof(*code), 0); 107 211 int rc = copy_from_uspace(code, ucode, sizeof(*code)); 108 if (rc != 0) { 109 free(code); 110 return NULL; 111 } 112 113 if (code->cmdcount > IRQ_MAX_PROG_SIZE) { 114 free(code); 115 return NULL; 116 } 117 118 irq_cmd_t *ucmds = code->cmds; 119 code->cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0); 120 rc = copy_from_uspace(code->cmds, ucmds, 212 if (rc != EOK) 213 goto error; 214 215 if ((code->rangecount > IRQ_MAX_RANGE_COUNT) || 216 (code->cmdcount > IRQ_MAX_PROG_SIZE)) 217 goto error; 218 219 ranges = malloc(sizeof(code->ranges[0]) * code->rangecount, 0); 220 rc = copy_from_uspace(ranges, code->ranges, 221 sizeof(code->ranges[0]) * code->rangecount); 222 if (rc != EOK) 223 goto error; 224 225 cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0); 226 rc = copy_from_uspace(cmds, code->cmds, 121 227 sizeof(code->cmds[0]) * code->cmdcount); 122 if (rc != 0) { 123 free(code->cmds); 124 free(code); 125 return NULL; 126 } 127 228 if (rc != EOK) 229 goto error; 230 231 rc = ranges_map_and_apply(ranges, code->rangecount, cmds, 232 code->cmdcount); 233 if (rc != EOK) 234 goto error; 235 236 code->ranges = ranges; 237 code->cmds = cmds; 238 128 239 return code; 240 241 error: 242 if (cmds) 243 free(cmds); 244 if (ranges) 245 free(ranges); 246 free(code); 247 return NULL; 129 248 } 130 249 … … 174 293 irq->notif_cfg.code = code; 175 294 irq->notif_cfg.counter = 0; 176 irq->driver_as = AS;177 295 178 296 /* … … 239 357 240 358 ASSERT(irq->notif_cfg.answerbox == box); 241 242 /* Free up the pseudo code and associated structures. */243 code_free(irq->notif_cfg.code);244 359 245 360 /* Remove the IRQ from the answerbox's list. */ … … 260 375 irq_spinlock_unlock(&box->irq_lock, false); 261 376 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true); 377 378 /* Free up the pseudo code and associated structures. */ 379 code_free(irq->notif_cfg.code); 262 380 263 381 /* Free up the IRQ structure. */ … … 307 425 list_remove(&irq->notif_cfg.link); 308 426 309 /* Free up the pseudo code and associated structures. */310 code_free(irq->notif_cfg.code);311 312 427 /* 313 428 * We need to drop the IRQ lock now because hash_table_remove() … … 321 436 /* Remove from the hash table. */ 322 437 hash_table_remove(&irq_uspace_hash_table, key, 2); 323 438 439 /* 440 * Release both locks so that we can free the pseudo code. 441 */ 442 irq_spinlock_unlock(&box->irq_lock, false); 443 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true); 444 445 code_free(irq->notif_cfg.code); 324 446 free(irq); 447 448 /* Reacquire both locks before taking another round. */ 449 irq_spinlock_lock(&irq_uspace_hash_table_lock, true); 450 irq_spinlock_lock(&box->irq_lock, false); 325 451 } 326 452 … … 365 491 return IRQ_DECLINE; 366 492 367 #define CMD_MEM_READ(target) \ 368 do { \ 369 void *va = code->cmds[i].addr; \ 370 if (AS != irq->driver_as) \ 371 as_switch(AS, irq->driver_as); \ 372 memcpy_from_uspace(&target, va, (sizeof(target))); \ 373 if (dstarg) \ 374 scratch[dstarg] = target; \ 375 } while(0) 376 377 #define CMD_MEM_WRITE(val) \ 378 do { \ 379 void *va = code->cmds[i].addr; \ 380 if (AS != irq->driver_as) \ 381 as_switch(AS, irq->driver_as); \ 382 memcpy_to_uspace(va, &val, sizeof(val)); \ 383 } while (0) 384 385 as_t *current_as = AS; 386 size_t i; 387 for (i = 0; i < code->cmdcount; i++) { 493 for (size_t i = 0; i < code->cmdcount; i++) { 388 494 uint32_t dstval; 495 389 496 uintptr_t srcarg = code->cmds[i].srcarg; 390 497 uintptr_t dstarg = code->cmds[i].dstarg; … … 442 549 } 443 550 break; 444 case CMD_MEM_READ_8: {445 uint8_t val;446 CMD_MEM_READ(val);447 break;448 }449 case CMD_MEM_READ_16: {450 uint16_t val;451 CMD_MEM_READ(val);452 break;453 }454 case CMD_MEM_READ_32: {455 uint32_t val;456 CMD_MEM_READ(val);457 break;458 }459 case CMD_MEM_WRITE_8: {460 uint8_t val = code->cmds[i].value;461 CMD_MEM_WRITE(val);462 break;463 }464 case CMD_MEM_WRITE_16: {465 uint16_t val = code->cmds[i].value;466 CMD_MEM_WRITE(val);467 break;468 }469 case CMD_MEM_WRITE_32: {470 uint32_t val = code->cmds[i].value;471 CMD_MEM_WRITE(val);472 break;473 }474 case CMD_MEM_WRITE_A_8:475 if (srcarg) {476 uint8_t val = scratch[srcarg];477 CMD_MEM_WRITE(val);478 }479 break;480 case CMD_MEM_WRITE_A_16:481 if (srcarg) {482 uint16_t val = scratch[srcarg];483 CMD_MEM_WRITE(val);484 }485 break;486 case CMD_MEM_WRITE_A_32:487 if (srcarg) {488 uint32_t val = scratch[srcarg];489 CMD_MEM_WRITE(val);490 }491 break;492 551 case CMD_BTEST: 493 552 if ((srcarg) && (dstarg)) { … … 503 562 break; 504 563 case CMD_ACCEPT: 505 if (AS != current_as)506 as_switch(AS, current_as);507 564 return IRQ_ACCEPT; 508 565 case CMD_DECLINE: 509 566 default: 510 if (AS != current_as)511 as_switch(AS, current_as);512 567 return IRQ_DECLINE; 513 568 } 514 569 } 515 if (AS != current_as)516 as_switch(AS, current_as);517 570 518 571 return IRQ_DECLINE; -
kernel/generic/src/ipc/sysipc.c
re895352 r49ff5f3 271 271 irq_spinlock_unlock(&answer->sender->lock, true); 272 272 273 uintptr_t dst_base = (uintptr_t) -1; 273 274 int rc = as_area_share(as, IPC_GET_ARG1(*olddata), 274 IPC_GET_ARG2(*olddata), AS, 275 IPC_GET_ARG1(answer->data), IPC_GET_ARG3(*olddata)); 275 IPC_GET_ARG2(*olddata), AS, IPC_GET_ARG3(*olddata), 276 &dst_base, IPC_GET_ARG1(answer->data)); 277 278 if (rc == EOK) 279 rc = copy_to_uspace((void *) IPC_GET_ARG2(answer->data), 280 &dst_base, sizeof(dst_base)); 281 276 282 IPC_SET_RETVAL(answer->data, rc); 277 283 return rc; 278 284 } 279 285 } else if (IPC_GET_IMETHOD(*olddata) == IPC_M_SHARE_IN) { 280 if (!IPC_GET_RETVAL(answer->data)) { 286 if (!IPC_GET_RETVAL(answer->data)) { 281 287 irq_spinlock_lock(&answer->sender->lock, true); 282 288 as_t *as = answer->sender->as; 283 289 irq_spinlock_unlock(&answer->sender->lock, true); 284 290 291 uintptr_t dst_base = (uintptr_t) -1; 285 292 int rc = as_area_share(AS, IPC_GET_ARG1(answer->data), 286 IPC_GET_ARG2(*olddata), as, IPC_GET_ARG1(*olddata), 287 IPC_GET_ARG2(answer->data)); 293 IPC_GET_ARG1(*olddata), as, IPC_GET_ARG2(answer->data), 294 &dst_base, IPC_GET_ARG3(answer->data)); 295 IPC_SET_ARG4(answer->data, dst_base); 288 296 IPC_SET_RETVAL(answer->data, rc); 289 297 } … … 1185 1193 * 1186 1194 */ 1187 sysarg_t sys_ register_irq(inr_t inr, devno_t devno, sysarg_t imethod,1195 sysarg_t sys_irq_register(inr_t inr, devno_t devno, sysarg_t imethod, 1188 1196 irq_code_t *ucode) 1189 1197 { … … 1202 1210 * 1203 1211 */ 1204 sysarg_t sys_ unregister_irq(inr_t inr, devno_t devno)1212 sysarg_t sys_irq_unregister(inr_t inr, devno_t devno) 1205 1213 { 1206 1214 if (!(cap_get(TASK) & CAP_IRQ_REG)) -
kernel/generic/src/lib/elf.c
re895352 r49ff5f3 226 226 size_t mem_sz = entry->p_memsz + (entry->p_vaddr - base); 227 227 228 as_area_t *area = as_area_create(as, flags, mem_sz, base,229 AS_AREA_ATTR_NONE, &elf_backend, &backend_data );228 as_area_t *area = as_area_create(as, flags, mem_sz, 229 AS_AREA_ATTR_NONE, &elf_backend, &backend_data, &base, 0); 230 230 if (!area) 231 231 return EE_MEMORY; -
kernel/generic/src/lib/func.c
re895352 r49ff5f3 53 53 void halt() 54 54 { 55 #if def CONFIG_DEBUG55 #if (defined(CONFIG_DEBUG)) && (defined(CONFIG_KCONSOLE)) 56 56 bool rundebugger = false; 57 57 -
kernel/generic/src/lib/rd.c
re895352 r49ff5f3 33 33 /** 34 34 * @file 35 * @brief RAM disk support.35 * @brief RAM disk support. 36 36 * 37 37 * Support for RAM disk images. … … 39 39 40 40 #include <lib/rd.h> 41 #include <byteorder.h>42 41 #include <mm/frame.h> 43 42 #include <sysinfo/sysinfo.h> 44 43 #include <ddi/ddi.h> 45 #include <align.h>46 44 47 static parea_t rd_parea; /**< Physical memory area for rd. */ 45 /** Physical memory area for RAM disk. */ 46 static parea_t rd_parea; 48 47 49 /** 50 * RAM disk initialization routine. At this point, the RAM disk memory is shared 51 * and information about the share is provided as sysinfo values to the 52 * userspace tasks. 53 */ 54 int init_rd(rd_header_t *header, size_t size) 48 /** RAM disk initialization routine 49 * 50 * The information about the RAM disk is provided as sysinfo 51 * values to the uspace tasks. 52 * 53 */ 54 void init_rd(void *data, size_t size) 55 55 { 56 /* Identify RAM disk */ 57 if ((header->magic[0] != RD_MAG0) || (header->magic[1] != RD_MAG1) || 58 (header->magic[2] != RD_MAG2) || (header->magic[3] != RD_MAG3)) 59 return RE_INVALID; 56 uintptr_t base = (uintptr_t) data; 57 ASSERT((base % FRAME_SIZE) == 0); 60 58 61 /* Identify version */ 62 if (header->version != RD_VERSION) 63 return RE_UNSUPPORTED; 64 65 uint32_t hsize; 66 uint64_t dsize; 67 switch (header->data_type) { 68 case RD_DATA_LSB: 69 hsize = uint32_t_le2host(header->header_size); 70 dsize = uint64_t_le2host(header->data_size); 71 break; 72 case RD_DATA_MSB: 73 hsize = uint32_t_be2host(header->header_size); 74 dsize = uint64_t_be2host(header->data_size); 75 break; 76 default: 77 return RE_UNSUPPORTED; 78 } 79 80 if ((hsize % FRAME_SIZE) || (dsize % FRAME_SIZE)) 81 return RE_UNSUPPORTED; 82 83 if (hsize > size) 84 return RE_INVALID; 85 86 if ((uint64_t) hsize + dsize > size) 87 dsize = size - hsize; 88 89 rd_parea.pbase = ALIGN_DOWN((uintptr_t) KA2PA((void *) header + hsize), 90 FRAME_SIZE); 91 rd_parea.frames = SIZE2FRAMES(dsize); 59 rd_parea.pbase = base; 60 rd_parea.frames = SIZE2FRAMES(size); 92 61 rd_parea.unpriv = false; 93 62 rd_parea.mapped = false; 94 63 ddi_parea_register(&rd_parea); 95 64 96 65 sysinfo_set_item_val("rd", NULL, true); 97 sysinfo_set_item_val("rd.header_size", NULL, hsize); 98 sysinfo_set_item_val("rd.size", NULL, dsize); 99 sysinfo_set_item_val("rd.address.physical", NULL, 100 (sysarg_t) KA2PA((void *) header + hsize)); 101 102 return RE_OK; 66 sysinfo_set_item_val("rd.size", NULL, size); 67 sysinfo_set_item_val("rd.address.physical", NULL, (sysarg_t) base); 103 68 } 104 69 -
kernel/generic/src/lib/str.c
re895352 r49ff5f3 111 111 #include <debug.h> 112 112 #include <macros.h> 113 114 /** Check the condition if wchar_t is signed */ 115 #ifdef WCHAR_IS_UNSIGNED 116 #define WCHAR_SIGNED_CHECK(cond) (true) 117 #else 118 #define WCHAR_SIGNED_CHECK(cond) (cond) 119 #endif 113 120 114 121 /** Byte mask consisting of lowest @n bits (out of 8) */ … … 206 213 * 207 214 * @return EOK if the character was encoded successfully, EOVERFLOW if there 208 * was not enough space in the output buffer or EINVAL if the character209 * code was invalid.210 */ 211 int chr_encode( wchar_t ch, char *str, size_t *offset, size_t size)215 * was not enough space in the output buffer or EINVAL if the character 216 * code was invalid. 217 */ 218 int chr_encode(const wchar_t ch, char *str, size_t *offset, size_t size) 212 219 { 213 220 if (*offset >= size) … … 427 434 bool ascii_check(wchar_t ch) 428 435 { 429 if ( (ch >= 0) && (ch <= 127))436 if (WCHAR_SIGNED_CHECK(ch >= 0) && (ch <= 127)) 430 437 return true; 431 438 … … 440 447 bool chr_check(wchar_t ch) 441 448 { 442 if ( (ch >= 0) && (ch <= 1114111))449 if (WCHAR_SIGNED_CHECK(ch >= 0) && (ch <= 1114111)) 443 450 return true; 444 451 … … 893 900 * 894 901 */ 895 int str_uint64 (const char *nptr, char **endptr, unsigned int base,902 int str_uint64_t(const char *nptr, char **endptr, unsigned int base, 896 903 bool strict, uint64_t *result) 897 904 { -
kernel/generic/src/main/kinit.c
re895352 r49ff5f3 57 57 #include <mm/as.h> 58 58 #include <mm/frame.h> 59 #include <mm/km.h> 59 60 #include <print.h> 60 61 #include <memstr.h> … … 68 69 #include <str.h> 69 70 #include <sysinfo/stats.h> 71 #include <align.h> 70 72 71 73 #ifdef CONFIG_SMP … … 178 180 179 181 for (i = 0; i < init.cnt; i++) { 180 if (init.tasks[i]. addr % FRAME_SIZE) {181 printf("init[%zu] .addris not frame aligned\n", i);182 if (init.tasks[i].paddr % FRAME_SIZE) { 183 printf("init[%zu]: Address is not frame aligned\n", i); 182 184 programs[i].task = NULL; 183 185 continue; … … 199 201 str_cpy(namebuf + INIT_PREFIX_LEN, 200 202 TASK_NAME_BUFLEN - INIT_PREFIX_LEN, name); 201 202 int rc = program_create_from_image((void *) init.tasks[i].addr, 203 namebuf, &programs[i]); 204 205 if ((rc == 0) && (programs[i].task != NULL)) { 203 204 /* 205 * Create virtual memory mappings for init task images. 206 */ 207 uintptr_t page = km_map(init.tasks[i].paddr, 208 init.tasks[i].size, 209 PAGE_READ | PAGE_WRITE | PAGE_CACHEABLE); 210 ASSERT(page); 211 212 int rc = program_create_from_image((void *) page, namebuf, 213 &programs[i]); 214 215 if (rc == 0) { 216 if (programs[i].task != NULL) { 217 /* 218 * Set capabilities to init userspace tasks. 219 */ 220 cap_set(programs[i].task, CAP_CAP | CAP_MEM_MANAGER | 221 CAP_IO_MANAGER | CAP_IRQ_REG); 222 223 if (!ipc_phone_0) 224 ipc_phone_0 = &programs[i].task->answerbox; 225 } 226 206 227 /* 207 * Set capabilities to init userspace tasks. 228 * If programs[i].task == NULL then it is 229 * the program loader and it was registered 230 * successfully. 208 231 */ 209 cap_set(programs[i].task, CAP_CAP | CAP_MEM_MANAGER | 210 CAP_IO_MANAGER | CAP_IRQ_REG); 211 212 if (!ipc_phone_0) 213 ipc_phone_0 = &programs[i].task->answerbox; 214 } else if (rc == 0) { 215 /* It was the program loader and was registered */ 216 } else { 217 /* RAM disk image */ 218 int rd = init_rd((rd_header_t *) init.tasks[i].addr, init.tasks[i].size); 219 220 if (rd != RE_OK) 221 printf("Init binary %zu not used (error %d)\n", i, rd); 222 } 232 } else if (i == init.cnt - 1) { 233 /* 234 * Assume the last task is the RAM disk. 235 */ 236 init_rd((void *) init.tasks[i].paddr, init.tasks[i].size); 237 } else 238 printf("init[%zu]: Init binary load failed (error %d)\n", i, rc); 223 239 } 224 240 -
kernel/generic/src/main/main.c
re895352 r49ff5f3 68 68 #include <mm/page.h> 69 69 #include <genarch/mm/page_pt.h> 70 #include <mm/km.h> 70 71 #include <mm/tlb.h> 71 72 #include <mm/as.h> … … 86 87 #include <sysinfo/sysinfo.h> 87 88 #include <sysinfo/stats.h> 89 #include <lib/ra.h> 88 90 89 91 /** Global configuration structure. */ 90 config_t config; 92 config_t config = { 93 .identity_configured = false, 94 .non_identity_configured = false, 95 .physmem_end = 0 96 }; 91 97 92 98 /** Initial user-space tasks */ … … 145 151 size_t i; 146 152 for (i = 0; i < init.cnt; i++) { 147 if (PA_OVERLAPS(config.stack_base, config.stack_size, 148 init.tasks[i].addr, init.tasks[i].size)) 149 config.stack_base = ALIGN_UP(init.tasks[i].addr + 150 init.tasks[i].size, config.stack_size); 153 if (overlaps(KA2PA(config.stack_base), config.stack_size, 154 init.tasks[i].paddr, init.tasks[i].size)) { 155 /* 156 * The init task overlaps with the memory behind the 157 * kernel image so it must be in low memory and we can 158 * use PA2KA on the init task's physical address. 159 */ 160 config.stack_base = ALIGN_UP( 161 PA2KA(init.tasks[i].paddr) + init.tasks[i].size, 162 config.stack_size); 163 } 151 164 } 152 165 … … 205 218 */ 206 219 arch_pre_mm_init(); 220 km_identity_init(); 207 221 frame_init(); 208 209 /* Initialize at least 1 memory segment big enough for slab to work. */210 222 slab_cache_init(); 223 ra_init(); 211 224 sysinfo_init(); 212 225 btree_init(); … … 214 227 page_init(); 215 228 tlb_init(); 229 km_non_identity_init(); 216 230 ddi_init(); 217 231 arch_post_mm_init(); … … 243 257 for (i = 0; i < init.cnt; i++) 244 258 LOG("init[%zu].addr=%p, init[%zu].size=%zu", 245 i, (void *) init.tasks[i]. addr, i, init.tasks[i].size);259 i, (void *) init.tasks[i].paddr, i, init.tasks[i].size); 246 260 } else 247 261 printf("No init binaries found.\n"); … … 262 276 * Create the first thread. 263 277 */ 264 thread_t *kinit_thread 265 =thread_create(kinit, NULL, kernel, 0, "kinit", true);278 thread_t *kinit_thread = 279 thread_create(kinit, NULL, kernel, 0, "kinit", true); 266 280 if (!kinit_thread) 267 281 panic("Cannot create kinit thread."); -
kernel/generic/src/main/version.c
re895352 r49ff5f3 38 38 39 39 static const char *project = "SPARTAN kernel"; 40 static const char *copyright = "Copyright (c) 2001-2011 HelenOS project";40 static const char *copyright = STRING(COPYRIGHT); 41 41 static const char *release = STRING(RELEASE); 42 42 static const char *name = STRING(NAME); -
kernel/generic/src/mm/as.c
re895352 r49ff5f3 387 387 } 388 388 389 /** Return pointer to unmapped address space area 390 * 391 * The address space must be already locked when calling 392 * this function. 393 * 394 * @param as Address space. 395 * @param bound Lowest address bound. 396 * @param size Requested size of the allocation. 397 * 398 * @return Address of the beginning of unmapped address space area. 399 * @return -1 if no suitable address space area was found. 400 * 401 */ 402 NO_TRACE static uintptr_t as_get_unmapped_area(as_t *as, uintptr_t bound, 403 size_t size) 404 { 405 ASSERT(mutex_locked(&as->lock)); 406 407 if (size == 0) 408 return (uintptr_t) -1; 409 410 /* 411 * Make sure we allocate from page-aligned 412 * address. Check for possible overflow in 413 * each step. 414 */ 415 416 size_t pages = SIZE2FRAMES(size); 417 418 /* 419 * Find the lowest unmapped address aligned on the size 420 * boundary, not smaller than bound and of the required size. 421 */ 422 423 /* First check the bound address itself */ 424 uintptr_t addr = ALIGN_UP(bound, PAGE_SIZE); 425 if ((addr >= bound) && 426 (check_area_conflicts(as, addr, pages, NULL))) 427 return addr; 428 429 /* Eventually check the addresses behind each area */ 430 list_foreach(as->as_area_btree.leaf_list, cur) { 431 btree_node_t *node = 432 list_get_instance(cur, btree_node_t, leaf_link); 433 434 for (btree_key_t i = 0; i < node->keys; i++) { 435 as_area_t *area = (as_area_t *) node->value[i]; 436 437 mutex_lock(&area->lock); 438 439 addr = 440 ALIGN_UP(area->base + P2SZ(area->pages), PAGE_SIZE); 441 bool avail = 442 ((addr >= bound) && (addr >= area->base) && 443 (check_area_conflicts(as, addr, pages, area))); 444 445 mutex_unlock(&area->lock); 446 447 if (avail) 448 return addr; 449 } 450 } 451 452 /* No suitable address space area found */ 453 return (uintptr_t) -1; 454 } 455 389 456 /** Create address space area of common attributes. 390 457 * … … 394 461 * @param flags Flags of the area memory. 395 462 * @param size Size of area. 396 * @param base Base address of area.397 463 * @param attrs Attributes of the area. 398 464 * @param backend Address space area backend. NULL if no backend is used. 399 465 * @param backend_data NULL or a pointer to an array holding two void *. 466 * @param base Starting virtual address of the area. 467 * If set to -1, a suitable mappable area is found. 468 * @param bound Lowest address bound if base is set to -1. 469 * Otherwise ignored. 400 470 * 401 471 * @return Address space area on success or NULL on failure. … … 403 473 */ 404 474 as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size, 405 u intptr_t base, unsigned int attrs, mem_backend_t *backend,406 mem_backend_data_t *backend_data )407 { 408 if (( base % PAGE_SIZE) != 0)475 unsigned int attrs, mem_backend_t *backend, 476 mem_backend_data_t *backend_data, uintptr_t *base, uintptr_t bound) 477 { 478 if ((*base != (uintptr_t) -1) && ((*base % PAGE_SIZE) != 0)) 409 479 return NULL; 410 480 … … 420 490 mutex_lock(&as->lock); 421 491 422 if (!check_area_conflicts(as, base, pages, NULL)) { 492 if (*base == (uintptr_t) -1) { 493 *base = as_get_unmapped_area(as, bound, size); 494 if (*base == (uintptr_t) -1) { 495 mutex_unlock(&as->lock); 496 return NULL; 497 } 498 } 499 500 if (!check_area_conflicts(as, *base, pages, NULL)) { 423 501 mutex_unlock(&as->lock); 424 502 return NULL; … … 434 512 area->pages = pages; 435 513 area->resident = 0; 436 area->base = base;514 area->base = *base; 437 515 area->sh_info = NULL; 438 516 area->backend = backend; … … 452 530 453 531 btree_create(&area->used_space); 454 btree_insert(&as->as_area_btree, base, (void *) area, NULL); 532 btree_insert(&as->as_area_btree, *base, (void *) area, 533 NULL); 455 534 456 535 mutex_unlock(&as->lock); … … 860 939 * @param acc_size Expected size of the source area. 861 940 * @param dst_as Pointer to destination address space. 862 * @param dst_base Target base address.863 941 * @param dst_flags_mask Destination address space area flags mask. 942 * @param dst_base Target base address. If set to -1, 943 * a suitable mappable area is found. 944 * @param bound Lowest address bound if dst_base is set to -1. 945 * Otherwise ignored. 864 946 * 865 947 * @return Zero on success. … … 873 955 */ 874 956 int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, 875 as_t *dst_as, uintptr_t dst_base, unsigned int dst_flags_mask) 957 as_t *dst_as, unsigned int dst_flags_mask, uintptr_t *dst_base, 958 uintptr_t bound) 876 959 { 877 960 mutex_lock(&src_as->lock); … … 945 1028 * to support sharing in less privileged mode. 946 1029 */ 947 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, 948 dst_base, AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 1030 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, 1031 src_size, AS_AREA_ATTR_PARTIAL, src_backend, 1032 &src_backend_data, dst_base, bound); 949 1033 if (!dst_area) { 950 1034 /* … … 1955 2039 */ 1956 2040 1957 /** Wrapper for as_area_create(). */ 1958 sysarg_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags)1959 { 1960 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,1961 AS_AREA_ATTR_NONE, &anon_backend, NULL))1962 return (sysarg_t) address;1963 else2041 sysarg_t sys_as_area_create(uintptr_t base, size_t size, unsigned int flags, 2042 uintptr_t bound) 2043 { 2044 uintptr_t virt = base; 2045 as_area_t *area = as_area_create(AS, flags | AS_AREA_CACHEABLE, size, 2046 AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, bound); 2047 if (area == NULL) 1964 2048 return (sysarg_t) -1; 1965 } 1966 1967 /** Wrapper for as_area_resize(). */ 2049 2050 return (sysarg_t) virt; 2051 } 2052 1968 2053 sysarg_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags) 1969 2054 { … … 1971 2056 } 1972 2057 1973 /** Wrapper for as_area_change_flags(). */1974 2058 sysarg_t sys_as_area_change_flags(uintptr_t address, unsigned int flags) 1975 2059 { … … 1977 2061 } 1978 2062 1979 /** Wrapper for as_area_destroy(). */1980 2063 sysarg_t sys_as_area_destroy(uintptr_t address) 1981 2064 { 1982 2065 return (sysarg_t) as_area_destroy(AS, address); 1983 }1984 1985 /** Return pointer to unmapped address space area1986 *1987 * @param base Lowest address bound.1988 * @param size Requested size of the allocation.1989 *1990 * @return Pointer to the beginning of unmapped address space area.1991 *1992 */1993 sysarg_t sys_as_get_unmapped_area(uintptr_t base, size_t size)1994 {1995 if (size == 0)1996 return 0;1997 1998 /*1999 * Make sure we allocate from page-aligned2000 * address. Check for possible overflow in2001 * each step.2002 */2003 2004 size_t pages = SIZE2FRAMES(size);2005 uintptr_t ret = 0;2006 2007 /*2008 * Find the lowest unmapped address aligned on the sz2009 * boundary, not smaller than base and of the required size.2010 */2011 2012 mutex_lock(&AS->lock);2013 2014 /* First check the base address itself */2015 uintptr_t addr = ALIGN_UP(base, PAGE_SIZE);2016 if ((addr >= base) &&2017 (check_area_conflicts(AS, addr, pages, NULL)))2018 ret = addr;2019 2020 /* Eventually check the addresses behind each area */2021 list_foreach(AS->as_area_btree.leaf_list, cur) {2022 if (ret != 0)2023 break;2024 2025 btree_node_t *node =2026 list_get_instance(cur, btree_node_t, leaf_link);2027 2028 btree_key_t i;2029 for (i = 0; (ret == 0) && (i < node->keys); i++) {2030 uintptr_t addr;2031 2032 as_area_t *area = (as_area_t *) node->value[i];2033 2034 mutex_lock(&area->lock);2035 2036 addr = ALIGN_UP(area->base + P2SZ(area->pages),2037 PAGE_SIZE);2038 2039 if ((addr >= base) && (addr >= area->base) &&2040 (check_area_conflicts(AS, addr, pages, area)))2041 ret = addr;2042 2043 mutex_unlock(&area->lock);2044 }2045 }2046 2047 mutex_unlock(&AS->lock);2048 2049 return (sysarg_t) ret;2050 2066 } 2051 2067 -
kernel/generic/src/mm/backend_anon.c
re895352 r49ff5f3 44 44 #include <mm/frame.h> 45 45 #include <mm/slab.h> 46 #include <mm/km.h> 46 47 #include <synch/mutex.h> 47 48 #include <adt/list.h> … … 155 156 int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) 156 157 { 158 uintptr_t upage = ALIGN_DOWN(addr, PAGE_SIZE); 159 uintptr_t kpage; 157 160 uintptr_t frame; 158 161 … … 175 178 mutex_lock(&area->sh_info->lock); 176 179 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 177 ALIGN_DOWN(addr, PAGE_SIZE)- area->base, &leaf);180 upage - area->base, &leaf); 178 181 if (!frame) { 179 182 bool allocate = true; … … 185 188 */ 186 189 for (i = 0; i < leaf->keys; i++) { 187 if (leaf->key[i] == 188 ALIGN_DOWN(addr, PAGE_SIZE) - area->base) { 190 if (leaf->key[i] == upage - area->base) { 189 191 allocate = false; 190 192 break; … … 192 194 } 193 195 if (allocate) { 194 frame = (uintptr_t) frame_alloc_noreserve( 195 ONE_FRAME, 0); 196 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 196 kpage = km_temporary_page_get(&frame, 197 FRAME_NO_RESERVE); 198 memsetb((void *) kpage, PAGE_SIZE, 0); 199 km_temporary_page_put(kpage); 197 200 198 201 /* … … 201 204 */ 202 205 btree_insert(&area->sh_info->pagemap, 203 ALIGN_DOWN(addr, PAGE_SIZE) - area->base, 204 (void *) frame, leaf); 206 upage - area->base, (void *) frame, leaf); 205 207 } 206 208 } … … 223 225 * the different causes 224 226 */ 225 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 226 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 227 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE); 228 memsetb((void *) kpage, PAGE_SIZE, 0); 229 km_temporary_page_put(kpage); 227 230 } 228 231 229 232 /* 230 * Map ' page' to 'frame'.233 * Map 'upage' to 'frame'. 231 234 * Note that TLB shootdown is not attempted as only new information is 232 235 * being inserted into page tables. 233 236 */ 234 page_mapping_insert(AS, addr, frame, as_area_get_flags(area));235 if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))237 page_mapping_insert(AS, upage, frame, as_area_get_flags(area)); 238 if (!used_space_insert(area, upage, 1)) 236 239 panic("Cannot insert used space."); 237 240 -
kernel/generic/src/mm/backend_elf.c
re895352 r49ff5f3 44 44 #include <mm/page.h> 45 45 #include <mm/reserve.h> 46 #include <mm/km.h> 46 47 #include <genarch/mm/page_pt.h> 47 48 #include <genarch/mm/page_ht.h> … … 229 230 elf_segment_header_t *entry = area->backend_data.segment; 230 231 btree_node_t *leaf; 231 uintptr_t base, frame, page, start_anon; 232 uintptr_t base; 233 uintptr_t frame; 234 uintptr_t kpage; 235 uintptr_t upage; 236 uintptr_t start_anon; 232 237 size_t i; 233 238 bool dirty = false; … … 249 254 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE)); 250 255 251 /* Virtual address of faulting page */252 page = ALIGN_DOWN(addr, PAGE_SIZE);256 /* Virtual address of faulting page */ 257 upage = ALIGN_DOWN(addr, PAGE_SIZE); 253 258 254 259 /* Virtual address of the end of initialized part of segment */ … … 264 269 mutex_lock(&area->sh_info->lock); 265 270 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 266 page - area->base, &leaf);271 upage - area->base, &leaf); 267 272 if (!frame) { 268 273 unsigned int i; … … 273 278 274 279 for (i = 0; i < leaf->keys; i++) { 275 if (leaf->key[i] == page - area->base) {280 if (leaf->key[i] == upage - area->base) { 276 281 found = true; 277 282 break; … … 281 286 if (frame || found) { 282 287 frame_reference_add(ADDR2PFN(frame)); 283 page_mapping_insert(AS, addr, frame,288 page_mapping_insert(AS, upage, frame, 284 289 as_area_get_flags(area)); 285 if (!used_space_insert(area, page, 1))290 if (!used_space_insert(area, upage, 1)) 286 291 panic("Cannot insert used space."); 287 292 mutex_unlock(&area->sh_info->lock); … … 294 299 * mapping. 295 300 */ 296 if ( page >= entry->p_vaddr &&page + PAGE_SIZE <= start_anon) {301 if (upage >= entry->p_vaddr && upage + PAGE_SIZE <= start_anon) { 297 302 /* 298 303 * Initialized portion of the segment. The memory is backed … … 304 309 */ 305 310 if (entry->p_flags & PF_W) { 306 frame = (uintptr_t)frame_alloc_noreserve(ONE_FRAME, 0);307 memcpy((void *) PA2KA(frame),308 (void *) (base + i * FRAME_SIZE), FRAME_SIZE);311 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE); 312 memcpy((void *) kpage, (void *) (base + i * PAGE_SIZE), 313 PAGE_SIZE); 309 314 if (entry->p_flags & PF_X) { 310 smc_coherence_block((void *) PA2KA(frame), 311 FRAME_SIZE); 315 smc_coherence_block((void *) kpage, PAGE_SIZE); 312 316 } 317 km_temporary_page_put(kpage); 313 318 dirty = true; 314 319 } else { 315 frame = KA2PA(base + i * FRAME_SIZE); 320 pte_t *pte = page_mapping_find(AS_KERNEL, 321 base + i * FRAME_SIZE, true); 322 323 ASSERT(pte); 324 ASSERT(PTE_PRESENT(pte)); 325 326 frame = PTE_GET_FRAME(pte); 316 327 } 317 } else if ( page >= start_anon) {328 } else if (upage >= start_anon) { 318 329 /* 319 330 * This is the uninitialized portion of the segment. … … 322 333 * and cleared. 323 334 */ 324 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 325 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 335 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE); 336 memsetb((void *) kpage, PAGE_SIZE, 0); 337 km_temporary_page_put(kpage); 326 338 dirty = true; 327 339 } else { … … 334 346 * (The segment can be and often is shorter than 1 page). 335 347 */ 336 if ( page < entry->p_vaddr)337 pad_lo = entry->p_vaddr - page;348 if (upage < entry->p_vaddr) 349 pad_lo = entry->p_vaddr - upage; 338 350 else 339 351 pad_lo = 0; 340 352 341 if (start_anon < page + PAGE_SIZE)342 pad_hi = page + PAGE_SIZE - start_anon;353 if (start_anon < upage + PAGE_SIZE) 354 pad_hi = upage + PAGE_SIZE - start_anon; 343 355 else 344 356 pad_hi = 0; 345 357 346 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0);347 memcpy((void *) ( PA2KA(frame)+ pad_lo),348 (void *) (base + i * FRAME_SIZE + pad_lo),349 FRAME_SIZE - pad_lo - pad_hi);358 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE); 359 memcpy((void *) (kpage + pad_lo), 360 (void *) (base + i * PAGE_SIZE + pad_lo), 361 PAGE_SIZE - pad_lo - pad_hi); 350 362 if (entry->p_flags & PF_X) { 351 smc_coherence_block((void *) ( PA2KA(frame)+ pad_lo),352 FRAME_SIZE - pad_lo - pad_hi);363 smc_coherence_block((void *) (kpage + pad_lo), 364 PAGE_SIZE - pad_lo - pad_hi); 353 365 } 354 memsetb((void *) PA2KA(frame), pad_lo, 0);355 memsetb((void *) ( PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi,356 0);366 memsetb((void *) kpage, pad_lo, 0); 367 memsetb((void *) (kpage + PAGE_SIZE - pad_hi), pad_hi, 0); 368 km_temporary_page_put(kpage); 357 369 dirty = true; 358 370 } … … 360 372 if (dirty && area->sh_info) { 361 373 frame_reference_add(ADDR2PFN(frame)); 362 btree_insert(&area->sh_info->pagemap, page - area->base,374 btree_insert(&area->sh_info->pagemap, upage - area->base, 363 375 (void *) frame, leaf); 364 376 } … … 367 379 mutex_unlock(&area->sh_info->lock); 368 380 369 page_mapping_insert(AS, addr, frame, as_area_get_flags(area));370 if (!used_space_insert(area, page, 1))381 page_mapping_insert(AS, upage, frame, as_area_get_flags(area)); 382 if (!used_space_insert(area, upage, 1)) 371 383 panic("Cannot insert used space."); 372 384 -
kernel/generic/src/mm/frame.c
re895352 r49ff5f3 240 240 NO_TRACE static bool zone_can_alloc(zone_t *zone, uint8_t order) 241 241 { 242 return ( zone_flags_available(zone->flags)243 &&buddy_system_can_alloc(zone->buddy_system, order));242 return ((zone->flags & ZONE_AVAILABLE) && 243 buddy_system_can_alloc(zone->buddy_system, order)); 244 244 } 245 245 … … 265 265 * Check whether the zone meets the search criteria. 266 266 */ 267 if ( (zones.info[i].flags & flags) == flags) {267 if (ZONE_FLAGS_MATCH(zones.info[i].flags, flags)) { 268 268 /* 269 269 * Check if the zone has 2^order frames area available. … … 460 460 NO_TRACE static pfn_t zone_frame_alloc(zone_t *zone, uint8_t order) 461 461 { 462 ASSERT(zone _flags_available(zone->flags));462 ASSERT(zone->flags & ZONE_AVAILABLE); 463 463 464 464 /* Allocate frames from zone buddy system */ … … 490 490 NO_TRACE static size_t zone_frame_free(zone_t *zone, size_t frame_idx) 491 491 { 492 ASSERT(zone _flags_available(zone->flags));492 ASSERT(zone->flags & ZONE_AVAILABLE); 493 493 494 494 frame_t *frame = &zone->frames[frame_idx]; … … 518 518 NO_TRACE static void zone_mark_unavailable(zone_t *zone, size_t frame_idx) 519 519 { 520 ASSERT(zone _flags_available(zone->flags));520 ASSERT(zone->flags & ZONE_AVAILABLE); 521 521 522 522 frame_t *frame = zone_get_frame(zone, frame_idx); … … 549 549 buddy_system_t *buddy) 550 550 { 551 ASSERT(zone _flags_available(zones.info[z1].flags));552 ASSERT(zone _flags_available(zones.info[z2].flags));551 ASSERT(zones.info[z1].flags & ZONE_AVAILABLE); 552 ASSERT(zones.info[z2].flags & ZONE_AVAILABLE); 553 553 ASSERT(zones.info[z1].flags == zones.info[z2].flags); 554 554 ASSERT(zones.info[z1].base < zones.info[z2].base); … … 645 645 NO_TRACE static void return_config_frames(size_t znum, pfn_t pfn, size_t count) 646 646 { 647 ASSERT(zone _flags_available(zones.info[znum].flags));647 ASSERT(zones.info[znum].flags & ZONE_AVAILABLE); 648 648 649 649 size_t cframes = SIZE2FRAMES(zone_conf_size(count)); … … 681 681 size_t count) 682 682 { 683 ASSERT(zone _flags_available(zones.info[znum].flags));683 ASSERT(zones.info[znum].flags & ZONE_AVAILABLE); 684 684 ASSERT(frame_idx + count < zones.info[znum].count); 685 685 … … 723 723 * set of flags 724 724 */ 725 if ((z1 >= zones.count) || (z2 >= zones.count) 726 || (z2 - z1 != 1) 727 || (!zone_flags_available(zones.info[z1].flags)) 728 || (!zone_flags_available(zones.info[z2].flags)) 729 || (zones.info[z1].flags != zones.info[z2].flags)) { 725 if ((z1 >= zones.count) || (z2 >= zones.count) || (z2 - z1 != 1) || 726 (zones.info[z1].flags != zones.info[z2].flags)) { 730 727 ret = false; 731 728 goto errout; … … 828 825 zone->buddy_system = buddy; 829 826 830 if ( zone_flags_available(flags)) {827 if (flags & ZONE_AVAILABLE) { 831 828 /* 832 829 * Compute order for buddy system and initialize … … 865 862 { 866 863 return (count * sizeof(frame_t) + buddy_conf_size(fnzb(count))); 864 } 865 866 /** Allocate external configuration frames from low memory. */ 867 pfn_t zone_external_conf_alloc(size_t count) 868 { 869 size_t size = zone_conf_size(count); 870 size_t order = ispwr2(size) ? fnzb(size) : (fnzb(size) + 1); 871 872 return ADDR2PFN((uintptr_t) frame_alloc(order - FRAME_WIDTH, 873 FRAME_LOWMEM | FRAME_ATOMIC)); 867 874 } 868 875 … … 888 895 irq_spinlock_lock(&zones.lock, true); 889 896 890 if ( zone_flags_available(flags)) { /* Create available zone */897 if (flags & ZONE_AVAILABLE) { /* Create available zone */ 891 898 /* Theoretically we could have NULL here, practically make sure 892 899 * nobody tries to do that. If some platform requires, remove … … 894 901 */ 895 902 ASSERT(confframe != ADDR2PFN((uintptr_t ) NULL)); 903 904 /* Update the known end of physical memory. */ 905 config.physmem_end = max(config.physmem_end, PFN2ADDR(start + count)); 896 906 897 907 /* If confframe is supposed to be inside our zone, then make sure … … 914 924 for (i = 0; i < init.cnt; i++) 915 925 if (overlaps(addr, PFN2ADDR(confcount), 916 KA2PA(init.tasks[i].addr),926 init.tasks[i].paddr, 917 927 init.tasks[i].size)) { 918 928 overlap = true; … … 1232 1242 1233 1243 /* Tell the architecture to create some memory */ 1234 frame_ arch_init();1244 frame_low_arch_init(); 1235 1245 if (config.cpu_active == 1) { 1236 1246 frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)), … … 1241 1251 size_t i; 1242 1252 for (i = 0; i < init.cnt; i++) { 1243 pfn_t pfn = ADDR2PFN( KA2PA(init.tasks[i].addr));1253 pfn_t pfn = ADDR2PFN(init.tasks[i].paddr); 1244 1254 frame_mark_unavailable(pfn, 1245 1255 SIZE2FRAMES(init.tasks[i].size)); … … 1255 1265 frame_mark_unavailable(0, 1); 1256 1266 } 1267 frame_high_arch_init(); 1268 } 1269 1270 /** Adjust bounds of physical memory region according to low/high memory split. 1271 * 1272 * @param low[in] If true, the adjustment is performed to make the region 1273 * fit in the low memory. Otherwise the adjustment is 1274 * performed to make the region fit in the high memory. 1275 * @param basep[inout] Pointer to a variable which contains the region's base 1276 * address and which may receive the adjusted base address. 1277 * @param sizep[inout] Pointer to a variable which contains the region's size 1278 * and which may receive the adjusted size. 1279 * @retun True if the region still exists even after the 1280 * adjustment, false otherwise. 1281 */ 1282 bool frame_adjust_zone_bounds(bool low, uintptr_t *basep, size_t *sizep) 1283 { 1284 uintptr_t limit = KA2PA(config.identity_base) + config.identity_size; 1285 1286 if (low) { 1287 if (*basep > limit) 1288 return false; 1289 if (*basep + *sizep > limit) 1290 *sizep = limit - *basep; 1291 } else { 1292 if (*basep + *sizep <= limit) 1293 return false; 1294 if (*basep <= limit) { 1295 *sizep -= limit - *basep; 1296 *basep = limit; 1297 } 1298 } 1299 return true; 1257 1300 } 1258 1301 … … 1293 1336 *total += (uint64_t) FRAMES2SIZE(zones.info[i].count); 1294 1337 1295 if (zone _flags_available(zones.info[i].flags)) {1338 if (zones.info[i].flags & ZONE_AVAILABLE) { 1296 1339 *busy += (uint64_t) FRAMES2SIZE(zones.info[i].busy_count); 1297 1340 *free += (uint64_t) FRAMES2SIZE(zones.info[i].free_count); … … 1344 1387 irq_spinlock_unlock(&zones.lock, true); 1345 1388 1346 bool available = zone_flags_available(flags);1389 bool available = ((flags & ZONE_AVAILABLE) != 0); 1347 1390 1348 1391 printf("%-4zu", i); … … 1356 1399 #endif 1357 1400 1358 printf(" %12zu %c%c%c ", count, 1359 available ? 'A' : ' ', 1360 (flags & ZONE_RESERVED) ? 'R' : ' ', 1361 (flags & ZONE_FIRMWARE) ? 'F' : ' '); 1401 printf(" %12zu %c%c%c%c%c ", count, 1402 available ? 'A' : '-', 1403 (flags & ZONE_RESERVED) ? 'R' : '-', 1404 (flags & ZONE_FIRMWARE) ? 'F' : '-', 1405 (flags & ZONE_LOWMEM) ? 'L' : '-', 1406 (flags & ZONE_HIGHMEM) ? 'H' : '-'); 1362 1407 1363 1408 if (available) … … 1401 1446 irq_spinlock_unlock(&zones.lock, true); 1402 1447 1403 bool available = zone_flags_available(flags);1448 bool available = ((flags & ZONE_AVAILABLE) != 0); 1404 1449 1405 1450 uint64_t size; … … 1411 1456 printf("Zone size: %zu frames (%" PRIu64 " %s)\n", count, 1412 1457 size, size_suffix); 1413 printf("Zone flags: %c%c%c\n", 1414 available ? 'A' : ' ', 1415 (flags & ZONE_RESERVED) ? 'R' : ' ', 1416 (flags & ZONE_FIRMWARE) ? 'F' : ' '); 1458 printf("Zone flags: %c%c%c%c%c\n", 1459 available ? 'A' : '-', 1460 (flags & ZONE_RESERVED) ? 'R' : '-', 1461 (flags & ZONE_FIRMWARE) ? 'F' : '-', 1462 (flags & ZONE_LOWMEM) ? 'L' : '-', 1463 (flags & ZONE_HIGHMEM) ? 'H' : '-'); 1417 1464 1418 1465 if (available) { -
kernel/generic/src/mm/page.c
re895352 r49ff5f3 53 53 * We assume that the other processors are either not using the mapping yet 54 54 * (i.e. during the bootstrap) or are executing the TLB shootdown code. While 55 * we don't care much about the former case, the processors in the latter case 55 * we don't care much about the former case, the processors in the latter case 56 56 * will do an implicit serialization by virtue of running the TLB shootdown 57 57 * interrupt handler. … … 74 74 #include <syscall/copy.h> 75 75 #include <errno.h> 76 #include <align.h> 76 77 77 78 /** Virtual operations for page subsystem. */ … … 81 82 { 82 83 page_arch_init(); 83 }84 85 /** Map memory structure86 *87 * Identity-map memory structure88 * considering possible crossings89 * of page boundaries.90 *91 * @param addr Address of the structure.92 * @param size Size of the structure.93 *94 */95 void map_structure(uintptr_t addr, size_t size)96 {97 size_t length = size + (addr - (addr & ~(PAGE_SIZE - 1)));98 size_t cnt = length / PAGE_SIZE + (length % PAGE_SIZE > 0);99 100 size_t i;101 for (i = 0; i < cnt; i++)102 page_mapping_insert(AS_KERNEL, addr + i * PAGE_SIZE,103 addr + i * PAGE_SIZE, PAGE_NOT_CACHEABLE | PAGE_WRITE);104 105 /* Repel prefetched accesses to the old mapping. */106 memory_barrier();107 84 } 108 85 … … 176 153 } 177 154 155 /** Make the mapping shared by all page tables (not address spaces). 156 * 157 * @param base Starting virtual address of the range that is made global. 158 * @param size Size of the address range that is made global. 159 */ 160 void page_mapping_make_global(uintptr_t base, size_t size) 161 { 162 ASSERT(page_mapping_operations); 163 ASSERT(page_mapping_operations->mapping_make_global); 164 165 return page_mapping_operations->mapping_make_global(base, size); 166 } 167 168 int page_find_mapping(uintptr_t virt, void **phys) 169 { 170 page_table_lock(AS, true); 171 172 pte_t *pte = page_mapping_find(AS, virt, false); 173 if ((!PTE_VALID(pte)) || (!PTE_PRESENT(pte))) { 174 page_table_unlock(AS, true); 175 return ENOENT; 176 } 177 178 *phys = (void *) PTE_GET_FRAME(pte) + 179 (virt - ALIGN_DOWN(virt, PAGE_SIZE)); 180 181 page_table_unlock(AS, true); 182 183 return EOK; 184 } 185 178 186 /** Syscall wrapper for getting mapping of a virtual page. 179 * 180 * @retval EOK Everything went find, @p uspace_frame and @p uspace_node 181 * contains correct values. 182 * @retval ENOENT Virtual address has no mapping. 183 */ 184 sysarg_t sys_page_find_mapping(uintptr_t virt_address, 185 uintptr_t *uspace_frame) 186 { 187 mutex_lock(&AS->lock); 188 189 pte_t *pte = page_mapping_find(AS, virt_address, false); 190 if (!PTE_VALID(pte) || !PTE_PRESENT(pte)) { 191 mutex_unlock(&AS->lock); 192 193 return (sysarg_t) ENOENT; 194 } 195 196 uintptr_t phys_address = PTE_GET_FRAME(pte); 197 198 mutex_unlock(&AS->lock); 199 200 int rc = copy_to_uspace(uspace_frame, 201 &phys_address, sizeof(phys_address)); 202 if (rc != EOK) { 203 return (sysarg_t) rc; 204 } 205 206 return EOK; 187 * 188 * @return EOK on success. 189 * @return ENOENT if no virtual address mapping found. 190 * 191 */ 192 sysarg_t sys_page_find_mapping(uintptr_t virt, void *phys_ptr) 193 { 194 void *phys; 195 int rc = page_find_mapping(virt, &phys); 196 if (rc != EOK) 197 return rc; 198 199 rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys)); 200 return (sysarg_t) rc; 207 201 } 208 202 -
kernel/generic/src/mm/reserve.c
re895352 r49ff5f3 42 42 #include <typedefs.h> 43 43 #include <arch/types.h> 44 #include <debug.h> 45 46 static bool reserve_initialized = false; 44 47 45 48 IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(reserve_lock, "reserve_lock"); … … 54 57 { 55 58 reserve = frame_total_free_get(); 59 reserve_initialized = true; 56 60 } 57 61 … … 67 71 { 68 72 bool reserved = false; 73 74 ASSERT(reserve_initialized); 69 75 70 76 irq_spinlock_lock(&reserve_lock, true); … … 111 117 void reserve_force_alloc(size_t size) 112 118 { 119 if (!reserve_initialized) 120 return; 121 113 122 irq_spinlock_lock(&reserve_lock, true); 114 123 reserve -= size; … … 122 131 void reserve_free(size_t size) 123 132 { 133 if (!reserve_initialized) 134 return; 135 124 136 irq_spinlock_lock(&reserve_lock, true); 125 137 reserve += size; -
kernel/generic/src/printf/printf_core.c
re895352 r49ff5f3 284 284 /* Print leading spaces. */ 285 285 size_t strw = str_length(str); 286 if ( precision == 0)286 if ((precision == 0) || (precision > strw)) 287 287 precision = strw; 288 288 … … 332 332 /* Print leading spaces. */ 333 333 size_t strw = wstr_length(str); 334 if ( precision == 0)334 if ((precision == 0) || (precision > strw)) 335 335 precision = strw; 336 336 -
kernel/generic/src/proc/program.c
re895352 r49ff5f3 87 87 * Create the stack address space area. 88 88 */ 89 uintptr_t virt = USTACK_ADDRESS; 89 90 as_area_t *area = as_area_create(as, 90 91 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, 91 STACK_SIZE, USTACK_ADDRESS, AS_AREA_ATTR_NONE, 92 &anon_backend, NULL); 92 STACK_SIZE, AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, 0); 93 93 if (!area) 94 94 return ENOMEM; -
kernel/generic/src/proc/thread.c
re895352 r49ff5f3 173 173 #endif /* CONFIG_FPU */ 174 174 175 /* 176 * Allocate the kernel stack from the low-memory to prevent an infinite 177 * nesting of TLB-misses when accessing the stack from the part of the 178 * TLB-miss handler written in C. 179 * 180 * Note that low-memory is safe to be used for the stack as it will be 181 * covered by the kernel identity mapping, which guarantees not to 182 * nest TLB-misses infinitely (either via some hardware mechanism or 183 * by the construciton of the assembly-language part of the TLB-miss 184 * handler). 185 * 186 * This restriction can be lifted once each architecture provides 187 * a similar guarantee, for example by locking the kernel stack 188 * in the TLB whenever it is allocated from the high-memory and the 189 * thread is being scheduled to run. 190 */ 191 kmflags |= FRAME_LOWMEM; 192 kmflags &= ~FRAME_HIGHMEM; 193 175 194 thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); 176 195 if (!thread->kstack) { -
kernel/generic/src/syscall/copy.c
re895352 r49ff5f3 56 56 * @param size Size of the data to be copied. 57 57 * 58 * @return 0on success or error code from @ref errno.h.58 * @return EOK on success or error code from @ref errno.h. 59 59 */ 60 60 int copy_from_uspace(void *dst, const void *uspace_src, size_t size) … … 94 94 95 95 interrupts_restore(ipl); 96 return !rc ? EPERM : 0;96 return !rc ? EPERM : EOK; 97 97 } 98 98 -
kernel/generic/src/syscall/syscall.c
re895352 r49ff5f3 146 146 (syshandler_t) sys_as_area_change_flags, 147 147 (syshandler_t) sys_as_area_destroy, 148 (syshandler_t) sys_as_get_unmapped_area,149 148 150 149 /* Page mapping related syscalls. */ … … 176 175 (syshandler_t) sys_device_assign_devno, 177 176 (syshandler_t) sys_physmem_map, 177 (syshandler_t) sys_physmem_unmap, 178 (syshandler_t) sys_dmamem_map, 179 (syshandler_t) sys_dmamem_unmap, 178 180 (syshandler_t) sys_iospace_enable, 179 (syshandler_t) sys_register_irq, 180 (syshandler_t) sys_unregister_irq, 181 (syshandler_t) sys_iospace_disable, 182 (syshandler_t) sys_irq_register, 183 (syshandler_t) sys_irq_unregister, 181 184 182 185 /* Sysinfo syscalls. */ 186 (syshandler_t) sys_sysinfo_get_keys_size, 187 (syshandler_t) sys_sysinfo_get_keys, 183 188 (syshandler_t) sys_sysinfo_get_val_type, 184 189 (syshandler_t) sys_sysinfo_get_value, -
kernel/generic/src/sysinfo/stats.c
re895352 r49ff5f3 83 83 * 84 84 * @param item Sysinfo item (unused). 85 * @param data Unused. 85 86 * 86 87 * @return System uptime (in secords). 87 88 * 88 89 */ 89 static sysarg_t get_stats_uptime(struct sysinfo_item *item )90 static sysarg_t get_stats_uptime(struct sysinfo_item *item, void *data) 90 91 { 91 92 /* This doesn't have to be very accurate */ … … 98 99 * @param size Size of the returned data. 99 100 * @param dry_run Do not get the data, just calculate the size. 101 * @param data Unused. 100 102 * 101 103 * @return Data containing several stats_cpu_t structures. … … 104 106 */ 105 107 static void *get_stats_cpus(struct sysinfo_item *item, size_t *size, 106 bool dry_run )108 bool dry_run, void *data) 107 109 { 108 110 *size = sizeof(stats_cpu_t) * config.cpu_count; … … 249 251 ASSERT(interrupts_disabled()); 250 252 ASSERT(irq_spinlock_locked(&task->lock)); 251 253 252 254 stats_task->task_id = task->taskid; 253 255 str_cpy(stats_task->name, TASK_NAME_BUFLEN, task->name); … … 293 295 * @param size Size of the returned data. 294 296 * @param dry_run Do not get the data, just calculate the size. 297 * @param data Unused. 295 298 * 296 299 * @return Data containing several stats_task_t structures. … … 299 302 */ 300 303 static void *get_stats_tasks(struct sysinfo_item *item, size_t *size, 301 bool dry_run )304 bool dry_run, void *data) 302 305 { 303 306 /* Messing with task structures, avoid deadlock */ … … 350 353 ASSERT(interrupts_disabled()); 351 354 ASSERT(irq_spinlock_locked(&thread->lock)); 352 355 353 356 stats_thread->thread_id = thread->tid; 354 357 stats_thread->task_id = thread->task->taskid; … … 398 401 * @param size Size of the returned data. 399 402 * @param dry_run Do not get the data, just calculate the size. 403 * @param data Unused. 400 404 * 401 405 * @return Data containing several stats_task_t structures. … … 404 408 */ 405 409 static void *get_stats_threads(struct sysinfo_item *item, size_t *size, 406 bool dry_run )410 bool dry_run, void *data) 407 411 { 408 412 /* Messing with threads structures, avoid deadlock */ … … 451 455 * @param name Task ID (string-encoded number). 452 456 * @param dry_run Do not get the data, just calculate the size. 457 * @param data Unused. 453 458 * 454 459 * @return Sysinfo return holder. The type of the returned … … 460 465 * 461 466 */ 462 static sysinfo_return_t get_stats_task(const char *name, bool dry_run) 467 static sysinfo_return_t get_stats_task(const char *name, bool dry_run, 468 void *data) 463 469 { 464 470 /* Initially no return value */ … … 468 474 /* Parse the task ID */ 469 475 task_id_t task_id; 470 if (str_uint64 (name, NULL, 0, true, &task_id) != EOK)476 if (str_uint64_t(name, NULL, 0, true, &task_id) != EOK) 471 477 return ret; 472 478 … … 520 526 * @param name Thread ID (string-encoded number). 521 527 * @param dry_run Do not get the data, just calculate the size. 528 * @param data Unused. 522 529 * 523 530 * @return Sysinfo return holder. The type of the returned … … 529 536 * 530 537 */ 531 static sysinfo_return_t get_stats_thread(const char *name, bool dry_run) 538 static sysinfo_return_t get_stats_thread(const char *name, bool dry_run, 539 void *data) 532 540 { 533 541 /* Initially no return value */ … … 537 545 /* Parse the thread ID */ 538 546 thread_id_t thread_id; 539 if (str_uint64 (name, NULL, 0, true, &thread_id) != EOK)547 if (str_uint64_t(name, NULL, 0, true, &thread_id) != EOK) 540 548 return ret; 541 549 … … 586 594 * @param size Size of the returned data. 587 595 * @param dry_run Do not get the data, just calculate the size. 596 * @param data Unused. 588 597 * 589 598 * @return Data containing several stats_exc_t structures. … … 592 601 */ 593 602 static void *get_stats_exceptions(struct sysinfo_item *item, size_t *size, 594 bool dry_run )603 bool dry_run, void *data) 595 604 { 596 605 *size = sizeof(stats_exc_t) * IVT_ITEMS; … … 634 643 * @param name Exception number (string-encoded number). 635 644 * @param dry_run Do not get the data, just calculate the size. 645 * @param data Unused. 636 646 * 637 647 * @return Sysinfo return holder. The type of the returned … … 643 653 * 644 654 */ 645 static sysinfo_return_t get_stats_exception(const char *name, bool dry_run) 655 static sysinfo_return_t get_stats_exception(const char *name, bool dry_run, 656 void *data) 646 657 { 647 658 /* Initially no return value */ … … 651 662 /* Parse the exception number */ 652 663 uint64_t excn; 653 if (str_uint64 (name, NULL, 0, true, &excn) != EOK)664 if (str_uint64_t(name, NULL, 0, true, &excn) != EOK) 654 665 return ret; 655 666 … … 705 716 * @param size Size of the returned data. 706 717 * @param dry_run Do not get the data, just calculate the size. 718 * @param data Unused. 707 719 * 708 720 * @return Data containing stats_physmem_t. … … 711 723 */ 712 724 static void *get_stats_physmem(struct sysinfo_item *item, size_t *size, 713 bool dry_run )725 bool dry_run, void *data) 714 726 { 715 727 *size = sizeof(stats_physmem_t); … … 735 747 * @param size Size of the returned data. 736 748 * @param dry_run Do not get the data, just calculate the size. 749 * @param data Unused. 737 750 * 738 751 * @return Data several load_t values. … … 741 754 */ 742 755 static void *get_stats_load(struct sysinfo_item *item, size_t *size, 743 bool dry_run )756 bool dry_run, void *data) 744 757 { 745 758 *size = sizeof(load_t) * LOAD_STEPS; … … 810 823 mutex_initialize(&load_lock, MUTEX_PASSIVE); 811 824 812 sysinfo_set_item_ fn_val("system.uptime", NULL, get_stats_uptime);813 sysinfo_set_item_ fn_data("system.cpus", NULL, get_stats_cpus);814 sysinfo_set_item_ fn_data("system.physmem", NULL, get_stats_physmem);815 sysinfo_set_item_ fn_data("system.load", NULL, get_stats_load);816 sysinfo_set_item_ fn_data("system.tasks", NULL, get_stats_tasks);817 sysinfo_set_item_ fn_data("system.threads", NULL, get_stats_threads);818 sysinfo_set_item_ fn_data("system.exceptions", NULL, get_stats_exceptions);819 sysinfo_set_subtree_fn("system.tasks", NULL, get_stats_task );820 sysinfo_set_subtree_fn("system.threads", NULL, get_stats_thread );821 sysinfo_set_subtree_fn("system.exceptions", NULL, get_stats_exception );825 sysinfo_set_item_gen_val("system.uptime", NULL, get_stats_uptime, NULL); 826 sysinfo_set_item_gen_data("system.cpus", NULL, get_stats_cpus, NULL); 827 sysinfo_set_item_gen_data("system.physmem", NULL, get_stats_physmem, NULL); 828 sysinfo_set_item_gen_data("system.load", NULL, get_stats_load, NULL); 829 sysinfo_set_item_gen_data("system.tasks", NULL, get_stats_tasks, NULL); 830 sysinfo_set_item_gen_data("system.threads", NULL, get_stats_threads, NULL); 831 sysinfo_set_item_gen_data("system.exceptions", NULL, get_stats_exceptions, NULL); 832 sysinfo_set_subtree_fn("system.tasks", NULL, get_stats_task, NULL); 833 sysinfo_set_subtree_fn("system.threads", NULL, get_stats_thread, NULL); 834 sysinfo_set_subtree_fn("system.exceptions", NULL, get_stats_exception, NULL); 822 835 } 823 836 -
kernel/generic/src/sysinfo/sysinfo.c
re895352 r49ff5f3 1 1 /* 2 2 * Copyright (c) 2006 Jakub Vana 3 * Copyright (c) 2012 Martin Decky 3 4 * All rights reserved. 4 5 * … … 99 100 sizeof(sysinfo_item_t), 0, sysinfo_item_constructor, 100 101 sysinfo_item_destructor, SLAB_CACHE_MAGDEFERRED); 101 102 102 103 mutex_initialize(&sysinfo_lock, MUTEX_ACTIVE); 103 104 } … … 110 111 * @param subtree Current sysinfo (sub)tree root item. 111 112 * @param ret If the return value is NULL, this argument 112 * can be either also NULL (i.e. no item was113 * can be set either to NULL (i.e. no item was 113 114 * found and no data was generated) or the 114 115 * original pointer is used to store the value … … 125 126 { 126 127 ASSERT(subtree != NULL); 127 ASSERT(ret != NULL);128 128 129 129 sysinfo_item_t *cur = subtree; … … 151 151 case SYSINFO_SUBTREE_FUNCTION: 152 152 /* Get generated data */ 153 **ret = cur->subtree.get_data(name + i + 1, dry_run); 153 if (ret != NULL) 154 **ret = cur->subtree.generator.fn(name + i + 1, 155 dry_run, cur->subtree.generator.data); 156 154 157 return NULL; 155 158 default: 156 159 /* Not found, no data generated */ 157 *ret = NULL; 160 if (ret != NULL) 161 *ret = NULL; 162 158 163 return NULL; 159 164 } … … 164 169 165 170 /* Not found, no data generated */ 166 *ret = NULL; 171 if (ret != NULL) 172 *ret = NULL; 173 167 174 return NULL; 168 175 } … … 352 359 * a new root item (NULL for global sysinfo root). 353 360 * @param fn Numeric value generator function. 354 * 355 */ 356 void sysinfo_set_item_fn_val(const char *name, sysinfo_item_t **root, 357 sysinfo_fn_val_t fn) 361 * @param data Private data. 362 * 363 */ 364 void sysinfo_set_item_gen_val(const char *name, sysinfo_item_t **root, 365 sysinfo_fn_val_t fn, void *data) 358 366 { 359 367 /* Protect sysinfo tree consistency */ … … 366 374 if (item != NULL) { 367 375 item->val_type = SYSINFO_VAL_FUNCTION_VAL; 368 item->val.fn_val = fn; 376 item->val.gen_val.fn = fn; 377 item->val.gen_val.data = data; 369 378 } 370 379 … … 383 392 * a new root item (NULL for global sysinfo root). 384 393 * @param fn Binary data generator function. 385 * 386 */ 387 void sysinfo_set_item_fn_data(const char *name, sysinfo_item_t **root, 388 sysinfo_fn_data_t fn) 394 * @param data Private data. 395 * 396 */ 397 void sysinfo_set_item_gen_data(const char *name, sysinfo_item_t **root, 398 sysinfo_fn_data_t fn, void *data) 389 399 { 390 400 /* Protect sysinfo tree consistency */ … … 397 407 if (item != NULL) { 398 408 item->val_type = SYSINFO_VAL_FUNCTION_DATA; 399 item->val.fn_data = fn; 409 item->val.gen_data.fn = fn; 410 item->val.gen_data.data = data; 400 411 } 401 412 … … 431 442 * a new root item (NULL for global sysinfo root). 432 443 * @param fn Subtree generator function. 444 * @param data Private data to be passed to the generator. 433 445 * 434 446 */ 435 447 void sysinfo_set_subtree_fn(const char *name, sysinfo_item_t **root, 436 sysinfo_fn_subtree_t fn )448 sysinfo_fn_subtree_t fn, void *data) 437 449 { 438 450 /* Protect sysinfo tree consistency */ … … 448 460 if ((item != NULL) && (item->subtree_type != SYSINFO_SUBTREE_TABLE)) { 449 461 item->subtree_type = SYSINFO_SUBTREE_FUNCTION; 450 item->subtree.get_data = fn; 462 item->subtree.generator.fn = fn; 463 item->subtree.generator.data = data; 451 464 } 452 465 … … 456 469 /** Sysinfo dump indentation helper routine 457 470 * 458 * @param depth Number of indentation characters to print. 459 * 460 */ 461 NO_TRACE static void sysinfo_indent(unsigned int depth) 462 { 463 unsigned int i; 464 for (i = 0; i < depth; i++) 465 printf(" "); 471 * @param depth Number of spaces to print. 472 * 473 */ 474 NO_TRACE static void sysinfo_indent(size_t spaces) 475 { 476 for (size_t i = 0; i < spaces; i++) 477 printf(" "); 466 478 } 467 479 … … 470 482 * Should be called with sysinfo_lock held. 471 483 * 472 * @param root Root item of the current (sub)tree. 473 * @param depth Current depth in the sysinfo tree. 474 * 475 */ 476 NO_TRACE static void sysinfo_dump_internal(sysinfo_item_t *root, unsigned int depth) 477 { 478 sysinfo_item_t *cur = root; 479 484 * @param root Root item of the current (sub)tree. 485 * @param spaces Current indentation level. 486 * 487 */ 488 NO_TRACE static void sysinfo_dump_internal(sysinfo_item_t *root, size_t spaces) 489 { 480 490 /* Walk all siblings */ 481 while (cur != NULL) { 482 sysinfo_indent(depth); 491 for (sysinfo_item_t *cur = root; cur; cur = cur->next) { 492 size_t length; 493 494 if (spaces == 0) { 495 printf("%s", cur->name); 496 length = str_length(cur->name); 497 } else { 498 sysinfo_indent(spaces); 499 printf(".%s", cur->name); 500 length = str_length(cur->name) + 1; 501 } 483 502 484 503 sysarg_t val; … … 488 507 switch (cur->val_type) { 489 508 case SYSINFO_VAL_UNDEFINED: 490 printf(" + %s\n", cur->name);509 printf(" [undefined]\n"); 491 510 break; 492 511 case SYSINFO_VAL_VAL: 493 printf(" + %s -> %" PRIun" (%#" PRIxn ")\n", cur->name,494 cur->val.val , cur->val.val);512 printf(" -> %" PRIun" (%#" PRIxn ")\n", cur->val.val, 513 cur->val.val); 495 514 break; 496 515 case SYSINFO_VAL_DATA: 497 printf("+ %s (%zu bytes)\n", cur->name, 498 cur->val.data.size); 516 printf(" (%zu bytes)\n", cur->val.data.size); 499 517 break; 500 518 case SYSINFO_VAL_FUNCTION_VAL: 501 val = cur->val. fn_val(cur);502 printf(" + %s -> %" PRIun" (%#" PRIxn ") [generated]\n",503 cur->name, val,val);519 val = cur->val.gen_val.fn(cur, cur->val.gen_val.data); 520 printf(" -> %" PRIun" (%#" PRIxn ") [generated]\n", val, 521 val); 504 522 break; 505 523 case SYSINFO_VAL_FUNCTION_DATA: 506 524 /* N.B.: No data was actually returned (only a dry run) */ 507 (void) cur->val. fn_data(cur, &size, true);508 printf("+ %s (%zu bytes) [generated]\n", cur->name,509 size);525 (void) cur->val.gen_data.fn(cur, &size, true, 526 cur->val.gen_data.data); 527 printf(" (%zu bytes) [generated]\n", size); 510 528 break; 511 529 default: … … 518 536 break; 519 537 case SYSINFO_SUBTREE_TABLE: 520 sysinfo_dump_internal(cur->subtree.table, depth + 1);538 sysinfo_dump_internal(cur->subtree.table, spaces + length); 521 539 break; 522 540 case SYSINFO_SUBTREE_FUNCTION: 523 sysinfo_indent( depth + 1);524 printf(" + [generated subtree]\n");541 sysinfo_indent(spaces + length); 542 printf("<generated subtree>\n"); 525 543 break; 526 544 default: 527 sysinfo_indent( depth + 1);528 printf(" + [unknown subtree]\n");545 sysinfo_indent(spaces + length); 546 printf("<unknown subtree>\n"); 529 547 } 530 531 cur = cur->next;532 548 } 533 549 } … … 594 610 break; 595 611 case SYSINFO_VAL_FUNCTION_VAL: 596 ret.val = item->val. fn_val(item);612 ret.val = item->val.gen_val.fn(item, item->val.gen_val.data); 597 613 break; 598 614 case SYSINFO_VAL_FUNCTION_DATA: 599 ret.data.data = item->val. fn_data(item, &ret.data.size,600 dry_run );615 ret.data.data = item->val.gen_data.fn(item, &ret.data.size, 616 dry_run, item->val.gen_data.data); 601 617 break; 602 618 } … … 635 651 ASSERT(path); 636 652 637 if ((copy_from_uspace(path, ptr, size + 1) == 0) 638 &&(path[size] == 0)) {653 if ((copy_from_uspace(path, ptr, size + 1) == 0) && 654 (path[size] == 0)) { 639 655 /* 640 656 * Prevent other functions from messing with sysinfo while we … … 645 661 mutex_unlock(&sysinfo_lock); 646 662 } 663 647 664 free(path); 648 665 return ret; 666 } 667 668 /** Return sysinfo keys determined by name 669 * 670 * Should be called with sysinfo_lock held. 671 * 672 * @param name Sysinfo path. 673 * @param root Root item of the sysinfo (sub)tree. 674 * If it is NULL then consider the global 675 * sysinfo tree. 676 * @param dry_run Do not actually get any generated 677 * binary data, just calculate the size. 678 * 679 * @return Item value (constant or generated). 680 * 681 */ 682 NO_TRACE static sysinfo_return_t sysinfo_get_keys(const char *name, 683 sysinfo_item_t **root, bool dry_run) 684 { 685 if (root == NULL) 686 root = &global_root; 687 688 sysinfo_item_t *subtree = NULL; 689 690 if (name[0] != 0) { 691 /* Try to find the item */ 692 sysinfo_item_t *item = 693 sysinfo_find_item(name, *root, NULL, dry_run); 694 if ((item != NULL) && 695 (item->subtree_type == SYSINFO_SUBTREE_TABLE)) 696 subtree = item->subtree.table; 697 } else 698 subtree = *root; 699 700 sysinfo_return_t ret; 701 702 if (subtree != NULL) { 703 /* 704 * Calculate the size of subkeys. 705 */ 706 size_t size = 0; 707 for (sysinfo_item_t *cur = subtree; cur; cur = cur->next) 708 size += str_size(cur->name) + 1; 709 710 if (dry_run) { 711 ret.tag = SYSINFO_VAL_DATA; 712 ret.data.data = NULL; 713 ret.data.size = size; 714 } else { 715 /* Allocate buffer for subkeys */ 716 char *names = (char *) malloc(size, FRAME_ATOMIC); 717 if (names == NULL) 718 return ret; 719 720 size_t pos = 0; 721 for (sysinfo_item_t *cur = subtree; cur; cur = cur->next) { 722 str_cpy(names + pos, size - pos, cur->name); 723 pos += str_size(cur->name) + 1; 724 } 725 726 /* Correct return value */ 727 ret.tag = SYSINFO_VAL_DATA; 728 ret.data.data = (void *) names; 729 ret.data.size = size; 730 } 731 } else { 732 /* No item in the fixed sysinfo tree */ 733 ret.tag = SYSINFO_VAL_UNDEFINED; 734 } 735 736 return ret; 737 } 738 739 /** Return sysinfo keys determined by name from user space 740 * 741 * The path string passed from the user space has to be properly 742 * null-terminated (the last passed character must be null). 743 * 744 * @param ptr Sysinfo path in the user address space. 745 * @param size Size of the path string. 746 * @param dry_run Do not actually get any generated 747 * binary data, just calculate the size. 748 * 749 */ 750 NO_TRACE static sysinfo_return_t sysinfo_get_keys_uspace(void *ptr, size_t size, 751 bool dry_run) 752 { 753 sysinfo_return_t ret; 754 ret.tag = SYSINFO_VAL_UNDEFINED; 755 756 if (size > SYSINFO_MAX_PATH) 757 return ret; 758 759 char *path = (char *) malloc(size + 1, 0); 760 ASSERT(path); 761 762 if ((copy_from_uspace(path, ptr, size + 1) == 0) && 763 (path[size] == 0)) { 764 /* 765 * Prevent other functions from messing with sysinfo while we 766 * are reading it. 767 */ 768 mutex_lock(&sysinfo_lock); 769 ret = sysinfo_get_keys(path, NULL, dry_run); 770 mutex_unlock(&sysinfo_lock); 771 } 772 773 free(path); 774 return ret; 775 } 776 777 /** Get the sysinfo keys size (syscall) 778 * 779 * The path string passed from the user space has 780 * to be properly null-terminated (the last passed 781 * character must be null). 782 * 783 * @param path_ptr Sysinfo path in the user address space. 784 * @param path_size Size of the path string. 785 * @param size_ptr User space pointer where to store the 786 * keys size. 787 * 788 * @return Error code (EOK in case of no error). 789 * 790 */ 791 sysarg_t sys_sysinfo_get_keys_size(void *path_ptr, size_t path_size, 792 void *size_ptr) 793 { 794 int rc; 795 796 /* 797 * Get the keys. 798 * 799 * N.B.: There is no need to free any potential keys 800 * since we request a dry run. 801 */ 802 sysinfo_return_t ret = 803 sysinfo_get_keys_uspace(path_ptr, path_size, true); 804 805 /* Check return data tag */ 806 if (ret.tag == SYSINFO_VAL_DATA) 807 rc = copy_to_uspace(size_ptr, &ret.data.size, 808 sizeof(ret.data.size)); 809 else 810 rc = EINVAL; 811 812 return (sysarg_t) rc; 813 } 814 815 /** Get the sysinfo keys (syscall) 816 * 817 * The path string passed from the user space has 818 * to be properly null-terminated (the last passed 819 * character must be null). 820 * 821 * If the user space buffer size does not equal 822 * the actual size of the returned data, the data 823 * is truncated. 824 * 825 * The actual size of data returned is stored to 826 * size_ptr. 827 * 828 * @param path_ptr Sysinfo path in the user address space. 829 * @param path_size Size of the path string. 830 * @param buffer_ptr User space pointer to the buffer where 831 * to store the binary data. 832 * @param buffer_size User space buffer size. 833 * @param size_ptr User space pointer where to store the 834 * binary data size. 835 * 836 * @return Error code (EOK in case of no error). 837 * 838 */ 839 sysarg_t sys_sysinfo_get_keys(void *path_ptr, size_t path_size, 840 void *buffer_ptr, size_t buffer_size, size_t *size_ptr) 841 { 842 int rc; 843 844 /* Get the keys */ 845 sysinfo_return_t ret = sysinfo_get_keys_uspace(path_ptr, path_size, 846 false); 847 848 /* Check return data tag */ 849 if (ret.tag == SYSINFO_VAL_DATA) { 850 size_t size = min(ret.data.size, buffer_size); 851 rc = copy_to_uspace(buffer_ptr, ret.data.data, size); 852 if (rc == EOK) 853 rc = copy_to_uspace(size_ptr, &size, sizeof(size)); 854 855 free(ret.data.data); 856 } else 857 rc = EINVAL; 858 859 return (sysarg_t) rc; 649 860 } 650 861 … … 672 883 673 884 /* 674 * Map generated value types to constant types (user space does not care675 * whether the value is constant or generated).885 * Map generated value types to constant types (user space does 886 * not care whether the value is constant or generated). 676 887 */ 677 888 if (ret.tag == SYSINFO_VAL_FUNCTION_VAL) … … 701 912 { 702 913 int rc; 703 914 704 915 /* 705 916 * Get the item. 706 917 * 707 * N.B.: There is no need to free any potential generated binary data708 * since we request a dry run.918 * N.B.: There is no need to free any potential generated binary 919 * data since we request a dry run. 709 920 */ 710 921 sysinfo_return_t ret = sysinfo_get_item_uspace(path_ptr, path_size, true); … … 741 952 * Get the item. 742 953 * 743 * N.B.: There is no need to free any potential generated binary data744 * since we request a dry run.954 * N.B.: There is no need to free any potential generated binary 955 * data since we request a dry run. 745 956 */ 746 957 sysinfo_return_t ret = sysinfo_get_item_uspace(path_ptr, path_size, true);
Note:
See TracChangeset
for help on using the changeset viewer.
