Changeset ad7a6c9 in mainline for kernel/generic/src
- Timestamp:
- 2011-03-30T13:10:24Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 4ae90f9
- Parents:
- 6e50466 (diff), d6b81941 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/generic/src
- Files:
-
- 25 edited
- 1 moved
-
adt/avl.c (modified) (1 diff)
-
console/cmd.c (modified) (37 diffs)
-
console/console.c (modified) (1 diff)
-
ddi/ddi.c (modified) (6 diffs)
-
ddi/irq.c (modified) (2 diffs)
-
interrupt/interrupt.c (modified) (2 diffs)
-
ipc/ipc.c (modified) (5 diffs)
-
ipc/irq.c (modified) (6 diffs)
-
ipc/sysipc.c (modified) (3 diffs)
-
lib/elf.c (modified) (2 diffs)
-
lib/memfnc.c (moved) (moved from uspace/srv/net/il/ip/ip_module.c ) (2 diffs)
-
lib/memstr.c (modified) (2 diffs)
-
lib/rd.c (modified) (1 diff)
-
main/main.c (modified) (2 diffs)
-
mm/as.c (modified) (94 diffs)
-
mm/backend_phys.c (modified) (1 diff)
-
mm/slab.c (modified) (2 diffs)
-
proc/program.c (modified) (2 diffs)
-
proc/scheduler.c (modified) (16 diffs)
-
proc/task.c (modified) (4 diffs)
-
proc/thread.c (modified) (10 diffs)
-
synch/waitq.c (modified) (6 diffs)
-
syscall/syscall.c (modified) (8 diffs)
-
sysinfo/stats.c (modified) (4 diffs)
-
sysinfo/sysinfo.c (modified) (4 diffs)
-
time/clock.c (modified) (2 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/adt/avl.c
r6e50466 rad7a6c9 723 723 void avltree_walk(avltree_t *t, avltree_walker_t walker, void *arg) 724 724 { 725 _avltree_walk(t->root, walker, arg); 725 if (t->root) 726 _avltree_walk(t->root, walker, arg); 726 727 } 727 728 -
kernel/generic/src/console/cmd.c
r6e50466 rad7a6c9 78 78 static cmd_info_t help_info = { 79 79 .name = "help", 80 .description = "List ofsupported commands.",80 .description = "List supported commands.", 81 81 .func = cmd_help, 82 82 .argc = 0 83 83 }; 84 84 85 /* Data and methods for 'reboot' command. */ 85 86 static int cmd_reboot(cmd_arg_t *argv); 86 87 static cmd_info_t reboot_info = { 87 88 .name = "reboot", 88 .description = "Reboot .",89 .description = "Reboot system.", 89 90 .func = cmd_reboot, 90 91 .argc = 0 91 92 }; 92 93 94 /* Data and methods for 'uptime' command. */ 93 95 static int cmd_uptime(cmd_arg_t *argv); 94 96 static cmd_info_t uptime_info = { 95 97 .name = "uptime", 96 .description = " Print uptime information.",98 .description = "Show system uptime.", 97 99 .func = cmd_uptime, 98 100 .argc = 0 99 101 }; 100 102 103 /* Data and methods for 'continue' command. */ 101 104 static int cmd_continue(cmd_arg_t *argv); 102 105 static cmd_info_t continue_info = { … … 108 111 109 112 #ifdef CONFIG_TEST 113 114 /* Data and methods for 'test' command. */ 110 115 static char test_buf[MAX_CMDLINE + 1]; 111 116 static int cmd_test(cmd_arg_t *argv); … … 119 124 static cmd_info_t test_info = { 120 125 .name = "test", 121 .description = " Print list ofkernel tests or run a test.",126 .description = "<test> List kernel tests or run a test.", 122 127 .func = cmd_test, 123 128 .argc = 1, … … 125 130 }; 126 131 132 /* Data and methods for 'bench' command. */ 127 133 static int cmd_bench(cmd_arg_t *argv); 128 134 static cmd_arg_t bench_argv[] = { … … 138 144 static cmd_info_t bench_info = { 139 145 .name = "bench", 140 .description = " Run kernel test as benchmark.",146 .description = "<test> <count> Run kernel test as benchmark.", 141 147 .func = cmd_bench, 142 148 .argc = 2, 143 149 .argv = bench_argv 144 150 }; 145 #endif 151 152 #endif /* CONFIG_TEST */ 146 153 147 154 /* Data and methods for 'description' command. */ 148 155 static int cmd_desc(cmd_arg_t *argv); 149 156 static void desc_help(void); 150 static char desc_buf[MAX_CMDLINE +1];157 static char desc_buf[MAX_CMDLINE + 1]; 151 158 static cmd_arg_t desc_argv = { 152 159 .type = ARG_TYPE_STRING, … … 156 163 static cmd_info_t desc_info = { 157 164 .name = "describe", 158 .description = " Describe specified command.",165 .description = "<command> Describe specified command.", 159 166 .help = desc_help, 160 167 .func = cmd_desc, … … 165 172 /* Data and methods for 'symaddr' command. */ 166 173 static int cmd_symaddr(cmd_arg_t *argv); 167 static char symaddr_buf[MAX_CMDLINE +1];174 static char symaddr_buf[MAX_CMDLINE + 1]; 168 175 static cmd_arg_t symaddr_argv = { 169 176 .type = ARG_TYPE_STRING, … … 173 180 static cmd_info_t symaddr_info = { 174 181 .name = "symaddr", 175 .description = " Return symbol address.",182 .description = "<symbol> Return symbol address.", 176 183 .func = cmd_symaddr, 177 184 .argc = 1, … … 179 186 }; 180 187 181 static char set_buf[MAX_CMDLINE+1]; 188 /* Data and methods for 'set4' command. */ 189 static char set_buf[MAX_CMDLINE + 1]; 182 190 static int cmd_set4(cmd_arg_t *argv); 183 191 static cmd_arg_t set4_argv[] = { … … 193 201 static cmd_info_t set4_info = { 194 202 .name = "set4", 195 .description = " set <dest_addr> <value> - 4byte version",203 .description = "<addr> <value> Set 4B memory location to a value.", 196 204 .func = cmd_set4, 197 205 .argc = 2, … … 213 221 static cmd_info_t call0_info = { 214 222 .name = "call0", 215 .description = " call0 <function> -> call function().",223 .description = "<function> Call function().", 216 224 .func = cmd_call0, 217 225 .argc = 1, … … 228 236 static cmd_info_t mcall0_info = { 229 237 .name = "mcall0", 230 .description = " mcall0 <function> -> call function() on each CPU.",238 .description = "<function> Call function() on each CPU.", 231 239 .func = cmd_mcall0, 232 240 .argc = 1, … … 250 258 static cmd_info_t call1_info = { 251 259 .name = "call1", 252 .description = " call1 <function> <arg1> -> call function(arg1).",260 .description = "<function> <arg1> Call function(arg1).", 253 261 .func = cmd_call1, 254 262 .argc = 2, … … 277 285 static cmd_info_t call2_info = { 278 286 .name = "call2", 279 .description = " call2 <function> <arg1> <arg2> -> call function(arg1,arg2).",287 .description = "<function> <arg1> <arg2> Call function(arg1, arg2).", 280 288 .func = cmd_call2, 281 289 .argc = 3, … … 310 318 static cmd_info_t call3_info = { 311 319 .name = "call3", 312 .description = " call3 <function> <arg1> <arg2> <arg3> -> call function(arg1,arg2,arg3).",320 .description = "<function> <arg1> <arg2> <arg3> Call function(arg1, arg2, arg3).", 313 321 .func = cmd_call3, 314 322 .argc = 4, … … 340 348 cmd_info_t tlb_info = { 341 349 .name = "tlb", 342 .description = "Print TLB of current processor.",350 .description = "Print TLB of the current CPU.", 343 351 .help = NULL, 344 352 .func = cmd_tlb, … … 377 385 }; 378 386 387 #ifdef CONFIG_UDEBUG 388 389 /* Data and methods for 'btrace' command */ 390 static int cmd_btrace(cmd_arg_t *argv); 391 static cmd_arg_t btrace_argv = { 392 .type = ARG_TYPE_INT, 393 }; 394 static cmd_info_t btrace_info = { 395 .name = "btrace", 396 .description = "<threadid> Show thread stack trace.", 397 .func = cmd_btrace, 398 .argc = 1, 399 .argv = &btrace_argv 400 }; 401 402 #endif /* CONFIG_UDEBUG */ 379 403 380 404 static int cmd_sched(cmd_arg_t *argv); 381 405 static cmd_info_t sched_info = { 382 406 .name = "scheduler", 383 .description = " List allscheduler information.",407 .description = "Show scheduler information.", 384 408 .func = cmd_sched, 385 409 .argc = 0 … … 406 430 static cmd_info_t zones_info = { 407 431 .name = "zones", 408 .description = "List ofmemory zones.",432 .description = "List memory zones.", 409 433 .func = cmd_zones, 410 434 .argc = 0 435 }; 436 437 /* Data and methods for 'zone' command */ 438 static int cmd_zone(cmd_arg_t *argv); 439 static cmd_arg_t zone_argv = { 440 .type = ARG_TYPE_INT, 441 }; 442 443 static cmd_info_t zone_info = { 444 .name = "zone", 445 .description = "<zone> Show memory zone structure.", 446 .func = cmd_zone, 447 .argc = 1, 448 .argv = &zone_argv 411 449 }; 412 450 … … 418 456 static cmd_info_t ipc_info = { 419 457 .name = "ipc", 420 .description = " ipc <taskid> Show IPC information of giventask.",458 .description = "<taskid> Show IPC information of a task.", 421 459 .func = cmd_ipc, 422 460 .argc = 1, … … 431 469 static cmd_info_t kill_info = { 432 470 .name = "kill", 433 .description = " kill<taskid> Kill a task.",471 .description = "<taskid> Kill a task.", 434 472 .func = cmd_kill, 435 473 .argc = 1, 436 474 .argv = &kill_argv 437 };438 439 /* Data and methods for 'zone' command */440 static int cmd_zone(cmd_arg_t *argv);441 static cmd_arg_t zone_argv = {442 .type = ARG_TYPE_INT,443 };444 445 static cmd_info_t zone_info = {446 .name = "zone",447 .description = "Show memory zone structure.",448 .func = cmd_zone,449 .argc = 1,450 .argv = &zone_argv451 475 }; 452 476 … … 482 506 &cpus_info, 483 507 &desc_info, 484 &reboot_info,485 &uptime_info,486 508 &halt_info, 487 509 &help_info, 488 510 &ipc_info, 489 511 &kill_info, 512 &physmem_info, 513 &reboot_info, 514 &sched_info, 490 515 &set4_info, 491 516 &slabs_info, 517 &symaddr_info, 492 518 &sysinfo_info, 493 &symaddr_info, 494 &sched_info, 519 &tasks_info, 495 520 &threads_info, 496 &tasks_info,497 &physmem_info,498 521 &tlb_info, 522 &uptime_info, 499 523 &version_info, 500 524 &zones_info, … … 504 528 &bench_info, 505 529 #endif 530 #ifdef CONFIG_UDEBUG 531 &btrace_info, 532 #endif 506 533 NULL 507 534 }; … … 526 553 for (i = 0; basic_commands[i]; i++) { 527 554 cmd_initialize(basic_commands[i]); 528 if (!cmd_register(basic_commands[i])) 529 printf("Cannot register command %s\n", basic_commands[i]->name); 530 } 531 } 532 555 } 556 557 for (i = 0; basic_commands[i]; i++) { 558 if (!cmd_register(basic_commands[i])) { 559 printf("Cannot register command %s\n", 560 basic_commands[i]->name); 561 } 562 } 563 } 533 564 534 565 /** List supported commands. … … 574 605 } 575 606 576 577 607 /** Reboot the system. 578 608 * … … 588 618 return 1; 589 619 } 590 591 620 592 621 /** Print system uptime information. … … 824 853 } 825 854 826 827 855 /** Print detailed description of 'describe' command. */ 828 856 void desc_help(void) … … 911 939 * @return Always 1 912 940 */ 913 int cmd_slabs(cmd_arg_t * argv)941 int cmd_slabs(cmd_arg_t *argv) 914 942 { 915 943 slab_print_list(); … … 923 951 * @return Always 1 924 952 */ 925 int cmd_sysinfo(cmd_arg_t * argv)953 int cmd_sysinfo(cmd_arg_t *argv) 926 954 { 927 955 sysinfo_dump(NULL); … … 929 957 } 930 958 931 932 /** Command for listings Thread information 959 /** Command for listing thread information 933 960 * 934 961 * @param argv Ignored … … 948 975 } 949 976 950 /** Command for listing s Task information977 /** Command for listing task information 951 978 * 952 979 * @param argv Ignored … … 966 993 } 967 994 968 /** Command for listings Thread information 995 #ifdef CONFIG_UDEBUG 996 997 /** Command for printing thread stack trace 998 * 999 * @param argv Integer argument from cmdline expected 1000 * 1001 * return Always 1 1002 * 1003 */ 1004 int cmd_btrace(cmd_arg_t *argv) 1005 { 1006 thread_stack_trace(argv[0].intval); 1007 return 1; 1008 } 1009 1010 #endif /* CONFIG_UDEBUG */ 1011 1012 /** Command for printing scheduler information 969 1013 * 970 1014 * @param argv Ignores … … 972 1016 * @return Always 1 973 1017 */ 974 int cmd_sched(cmd_arg_t * argv)1018 int cmd_sched(cmd_arg_t *argv) 975 1019 { 976 1020 sched_print_list(); … … 984 1028 * return Always 1 985 1029 */ 986 int cmd_zones(cmd_arg_t * argv)1030 int cmd_zones(cmd_arg_t *argv) 987 1031 { 988 1032 zones_print_list(); … … 996 1040 * return Always 1 997 1041 */ 998 int cmd_zone(cmd_arg_t * argv)1042 int cmd_zone(cmd_arg_t *argv) 999 1043 { 1000 1044 zone_print_one(argv[0].intval); … … 1002 1046 } 1003 1047 1004 /** Command for printing task ipcdetails1048 /** Command for printing task IPC details 1005 1049 * 1006 1050 * @param argv Integer argument from cmdline expected … … 1008 1052 * return Always 1 1009 1053 */ 1010 int cmd_ipc(cmd_arg_t * argv)1054 int cmd_ipc(cmd_arg_t *argv) 1011 1055 { 1012 1056 ipc_print_task(argv[0].intval); … … 1020 1064 * return 0 on failure, 1 on success. 1021 1065 */ 1022 int cmd_kill(cmd_arg_t * argv)1066 int cmd_kill(cmd_arg_t *argv) 1023 1067 { 1024 1068 if (task_kill(argv[0].intval) != EOK) -
kernel/generic/src/console/console.c
r6e50466 rad7a6c9 160 160 klog_parea.pbase = (uintptr_t) faddr; 161 161 klog_parea.frames = SIZE2FRAMES(sizeof(klog)); 162 klog_parea.unpriv = false; 162 163 ddi_parea_register(&klog_parea); 163 164 -
kernel/generic/src/ddi/ddi.c
r6e50466 rad7a6c9 104 104 { 105 105 ASSERT(TASK); 106 ASSERT((pf % FRAME_SIZE) == 0); 107 ASSERT((vp % PAGE_SIZE) == 0); 108 109 /* 110 * Make sure the caller is authorised to make this syscall. 111 */ 112 cap_t caps = cap_get(TASK); 113 if (!(caps & CAP_MEM_MANAGER)) 114 return EPERM; 106 107 if ((pf % FRAME_SIZE) != 0) 108 return EBADMEM; 109 110 if ((vp % PAGE_SIZE) != 0) 111 return EBADMEM; 112 113 /* 114 * Unprivileged tasks are only allowed to map pareas 115 * which are explicitly marked as such. 116 */ 117 bool priv = 118 ((cap_get(TASK) & CAP_MEM_MANAGER) == CAP_MEM_MANAGER); 115 119 116 120 mem_backend_data_t backend_data; … … 123 127 124 128 if (znum == (size_t) -1) { 125 /* Frames not found in any zones 126 * -> assume it is hardware device and allow mapping 129 /* 130 * Frames not found in any zone 131 * -> assume it is a hardware device and allow mapping 132 * for privileged tasks. 127 133 */ 128 134 irq_spinlock_unlock(&zones.lock, true); 135 136 if (!priv) 137 return EPERM; 138 129 139 goto map; 130 140 } 131 141 132 142 if (zones.info[znum].flags & ZONE_FIRMWARE) { 133 /* Frames are part of firmware */ 143 /* 144 * Frames are part of firmware 145 * -> allow mapping for privileged tasks. 146 */ 134 147 irq_spinlock_unlock(&zones.lock, true); 148 149 if (!priv) 150 return EPERM; 151 135 152 goto map; 136 153 } … … 138 155 if (zone_flags_available(zones.info[znum].flags)) { 139 156 /* 140 * Frames are part of physical memory, check if the memory141 * region is enabled for mapping.157 * Frames are part of physical memory, check 158 * if the memory region is enabled for mapping. 142 159 */ 143 160 irq_spinlock_unlock(&zones.lock, true); … … 150 167 if ((!parea) || (parea->frames < pages)) { 151 168 mutex_unlock(&parea_lock); 152 goto err; 169 return ENOENT; 170 } 171 172 if (!priv) { 173 if (!parea->unpriv) { 174 mutex_unlock(&parea_lock); 175 return EPERM; 176 } 153 177 } 154 178 … … 158 182 159 183 irq_spinlock_unlock(&zones.lock, true); 160 161 err:162 184 return ENOENT; 163 185 … … 258 280 } 259 281 260 /** Disable or enable specified interrupts.261 *262 * @param irq the interrupt to be enabled/disabled.263 * @param enable if true enable the interrupt, disable otherwise.264 *265 * @retutn Zero on success, error code otherwise.266 */267 sysarg_t sys_interrupt_enable(int irq, int enable)268 {269 /* FIXME: this needs to be generic code, or better not be in kernel at all. */270 #if 0271 cap_t task_cap = cap_get(TASK);272 if (!(task_cap & CAP_IRQ_REG))273 return EPERM;274 275 if (irq < 0 || irq > 16) {276 return EINVAL;277 }278 279 uint16_t irq_mask = (uint16_t)(1 << irq);280 if (enable) {281 trap_virtual_enable_irqs(irq_mask);282 } else {283 trap_virtual_disable_irqs(irq_mask);284 }285 286 #endif287 return 0;288 }289 290 282 /** @} 291 283 */ -
kernel/generic/src/ddi/irq.c
r6e50466 rad7a6c9 136 136 static size_t buckets; 137 137 138 /** Last valid INR. */ 139 inr_t last_inr = 0; 140 138 141 /** Initialize IRQ subsystem. 139 142 * … … 145 148 { 146 149 buckets = chains; 150 last_inr = inrs - 1; 151 147 152 /* 148 153 * Be smart about the choice of the hash table operations. -
kernel/generic/src/interrupt/interrupt.c
r6e50466 rad7a6c9 45 45 #include <console/console.h> 46 46 #include <console/cmd.h> 47 #include <ipc/event.h>48 47 #include <synch/mutex.h> 49 48 #include <time/delay.h> … … 188 187 printf("\n"); 189 188 189 task_kill_self(true); 190 } 191 192 /** Get istate structure of a thread. 193 * 194 * Get pointer to the istate structure at the bottom of the kernel stack. 195 * 196 * This function can be called in interrupt or user context. In interrupt 197 * context the istate structure is created by the low-level exception 198 * handler. In user context the istate structure is created by the 199 * low-level syscall handler. 200 */ 201 istate_t *istate_get(thread_t *thread) 202 { 190 203 /* 191 * Userspace can subscribe for FAULT events to take action 192 * whenever a thread faults. (E.g. take a dump, run a debugger). 193 * The notification is always available, but unless Udebug is enabled, 194 * that's all you get. 204 * The istate structure should be right at the bottom of the kernel 205 * stack. 195 206 */ 196 if (event_is_subscribed(EVENT_FAULT)) { 197 /* Notify the subscriber that a fault occurred. */ 198 event_notify_3(EVENT_FAULT, LOWER32(TASK->taskid), 199 UPPER32(TASK->taskid), (sysarg_t) THREAD); 200 201 #ifdef CONFIG_UDEBUG 202 /* Wait for a debugging session. */ 203 udebug_thread_fault(); 204 #endif 205 } 206 207 task_kill(TASK->taskid); 208 thread_exit(); 207 return (istate_t *) ((uint8_t *) thread->kstack + THREAD_STACK_SIZE - 208 sizeof(istate_t)); 209 209 } 210 210 -
kernel/generic/src/ipc/ipc.c
r6e50466 rad7a6c9 295 295 atomic_inc(&phone->active_calls); 296 296 call->data.phone = phone; 297 call->data.task = TASK; 297 298 } 298 299 … … 406 407 call->caller_phone = call->data.phone; 407 408 call->data.phone = newphone; 409 call->data.task = TASK; 408 410 } 409 411 … … 688 690 irq_spinlock_exchange(&tasks_lock, &task->lock); 689 691 690 /* Print opened phones & details */ 691 printf("PHONE:\n"); 692 printf("[phone id] [calls] [state\n"); 692 693 693 694 size_t i; 694 695 for (i = 0; i < IPC_MAX_PHONES; i++) { 695 696 if (SYNCH_FAILED(mutex_trylock(&task->phones[i].lock))) { 696 printf("% zu: mutex busy\n", i);697 printf("%-10zu (mutex busy)\n", i); 697 698 continue; 698 699 } 699 700 700 701 if (task->phones[i].state != IPC_PHONE_FREE) { 701 printf("%zu: ", i); 702 printf("%-10zu %7" PRIun " ", i, 703 atomic_get(&task->phones[i].active_calls)); 702 704 703 705 switch (task->phones[i].state) { 704 706 case IPC_PHONE_CONNECTING: 705 printf("connecting ");707 printf("connecting"); 706 708 break; 707 709 case IPC_PHONE_CONNECTED: 708 printf("connected to: %p ", 709 task->phones[i].callee); 710 printf("connected to %" PRIu64 " (%s)", 711 task->phones[i].callee->task->taskid, 712 task->phones[i].callee->task->name); 710 713 break; 711 714 case IPC_PHONE_SLAMMED: 712 printf("slammed by : %p ",715 printf("slammed by %p", 713 716 task->phones[i].callee); 714 717 break; 715 718 case IPC_PHONE_HUNGUP: 716 printf("hung up - was: %p ",719 printf("hung up by %p", 717 720 task->phones[i].callee); 718 721 break; … … 721 724 } 722 725 723 printf("active: %" PRIun "\n", 724 atomic_get(&task->phones[i].active_calls)); 726 printf("\n"); 725 727 } 726 728 … … 730 732 irq_spinlock_lock(&task->answerbox.lock, false); 731 733 734 #ifdef __32_BITS__ 735 printf("[call id ] [method] [arg1] [arg2] [arg3] [arg4] [arg5]" 736 " [flags] [sender\n"); 737 #endif 738 739 #ifdef __64_BITS__ 740 printf("[call id ] [method] [arg1] [arg2] [arg3] [arg4]" 741 " [arg5] [flags] [sender\n"); 742 #endif 743 732 744 link_t *cur; 733 745 734 /* Print answerbox - calls */ 735 printf("ABOX - CALLS:\n"); 746 printf(" --- incomming calls ---\n"); 736 747 for (cur = task->answerbox.calls.next; cur != &task->answerbox.calls; 737 748 cur = cur->next) { 738 749 call_t *call = list_get_instance(cur, call_t, link); 739 printf("Callid: %p Srctask:%" PRIu64 " M:%" PRIun 740 " A1:%" PRIun " A2:%" PRIun " A3:%" PRIun 741 " A4:%" PRIun " A5:%" PRIun " Flags:%x\n", call, 742 call->sender->taskid, 750 751 #ifdef __32_BITS__ 752 printf("%10p ", call); 753 #endif 754 755 #ifdef __64_BITS__ 756 printf("%18p ", call); 757 #endif 758 759 printf("%-8" PRIun " %-6" PRIun " %-6" PRIun " %-6" PRIun 760 " %-6" PRIun " %-6" PRIun " %-7x %" PRIu64 " (%s)\n", 743 761 IPC_GET_IMETHOD(call->data), IPC_GET_ARG1(call->data), 744 762 IPC_GET_ARG2(call->data), IPC_GET_ARG3(call->data), 745 763 IPC_GET_ARG4(call->data), IPC_GET_ARG5(call->data), 746 call->flags); 747 } 748 749 /* Print answerbox - dispatched calls */ 750 printf("ABOX - DISPATCHED CALLS:\n"); 764 call->flags, call->sender->taskid, call->sender->name); 765 } 766 767 printf(" --- dispatched calls ---\n"); 751 768 for (cur = task->answerbox.dispatched_calls.next; 752 769 cur != &task->answerbox.dispatched_calls; 753 770 cur = cur->next) { 754 771 call_t *call = list_get_instance(cur, call_t, link); 755 printf("Callid: %p Srctask:%" PRIu64 " M:%" PRIun 756 " A1:%" PRIun " A2:%" PRIun " A3:%" PRIun 757 " A4:%" PRIun " A5:%" PRIun " Flags:%x\n", call, 758 call->sender->taskid, 772 773 #ifdef __32_BITS__ 774 printf("%10p ", call); 775 #endif 776 777 #ifdef __64_BITS__ 778 printf("%18p ", call); 779 #endif 780 781 printf("%-8" PRIun " %-6" PRIun " %-6" PRIun " %-6" PRIun 782 " %-6" PRIun " %-6" PRIun " %-7x %" PRIu64 " (%s)\n", 759 783 IPC_GET_IMETHOD(call->data), IPC_GET_ARG1(call->data), 760 784 IPC_GET_ARG2(call->data), IPC_GET_ARG3(call->data), 761 785 IPC_GET_ARG4(call->data), IPC_GET_ARG5(call->data), 762 call->flags); 763 } 764 765 /* Print answerbox - answers */ 766 printf("ABOX - ANSWERS:\n"); 786 call->flags, call->sender->taskid, call->sender->name); 787 } 788 789 printf(" --- incoming answers ---\n"); 767 790 for (cur = task->answerbox.answers.next; 768 791 cur != &task->answerbox.answers; 769 792 cur = cur->next) { 770 793 call_t *call = list_get_instance(cur, call_t, link); 771 printf("Callid:%p M:%" PRIun " A1:%" PRIun " A2:%" PRIun 772 " A3:%" PRIun " A4:%" PRIun " A5:%" PRIun " Flags:%x\n", 773 call, IPC_GET_IMETHOD(call->data), IPC_GET_ARG1(call->data), 794 795 #ifdef __32_BITS__ 796 printf("%10p ", call); 797 #endif 798 799 #ifdef __64_BITS__ 800 printf("%18p ", call); 801 #endif 802 803 printf("%-8" PRIun " %-6" PRIun " %-6" PRIun " %-6" PRIun 804 " %-6" PRIun " %-6" PRIun " %-7x %" PRIu64 " (%s)\n", 805 IPC_GET_IMETHOD(call->data), IPC_GET_ARG1(call->data), 774 806 IPC_GET_ARG2(call->data), IPC_GET_ARG3(call->data), 775 807 IPC_GET_ARG4(call->data), IPC_GET_ARG5(call->data), 776 call->flags );808 call->flags, call->sender->taskid, call->sender->name); 777 809 } 778 810 -
kernel/generic/src/ipc/irq.c
r6e50466 rad7a6c9 42 42 * 43 43 * The structure of a notification message is as follows: 44 * - IMETHOD: interface and method as registered by the SYS_ IPC_REGISTER_IRQ44 * - IMETHOD: interface and method as registered by the SYS_REGISTER_IRQ 45 45 * syscall 46 46 * - ARG1: payload modified by a 'top-half' handler … … 131 131 /** Register an answerbox as a receiving end for IRQ notifications. 132 132 * 133 * @param box Receiving answerbox. 134 * @param inr IRQ number. 135 * @param devno Device number. 136 * @param imethod Interface and method to be associated 137 * with the notification. 138 * @param ucode Uspace pointer to top-half pseudocode. 139 * 140 * @return EBADMEM, ENOENT or EEXISTS on failure or 0 on success. 133 * @param box Receiving answerbox. 134 * @param inr IRQ number. 135 * @param devno Device number. 136 * @param imethod Interface and method to be associated with the 137 * notification. 138 * @param ucode Uspace pointer to top-half pseudocode. 139 * @return EOK on success or a negative error code. 141 140 * 142 141 */ … … 148 147 (sysarg_t) devno 149 148 }; 149 150 if ((inr < 0) || (inr > last_inr)) 151 return ELIMIT; 150 152 151 153 irq_code_t *code; … … 208 210 /** Unregister task from IRQ notification. 209 211 * 210 * @param box Answerbox associated with the notification.211 * @param inr IRQ number.212 * @param devno Device number.213 * 212 * @param box Answerbox associated with the notification. 213 * @param inr IRQ number. 214 * @param devno Device number. 215 * @return EOK on success or a negative error code. 214 216 */ 215 217 int ipc_irq_unregister(answerbox_t *box, inr_t inr, devno_t devno) … … 219 221 (sysarg_t) devno 220 222 }; 223 224 if ((inr < 0) || (inr > last_inr)) 225 return ELIMIT; 221 226 222 227 irq_spinlock_lock(&irq_uspace_hash_table_lock, true); … … 399 404 (uint32_t) code->cmds[i].value); 400 405 break; 406 case CMD_PIO_WRITE_A_8: 407 if (srcarg) { 408 pio_write_8((ioport8_t *) code->cmds[i].addr, 409 (uint8_t) scratch[srcarg]); 410 } 411 break; 412 case CMD_PIO_WRITE_A_16: 413 if (srcarg) { 414 pio_write_16((ioport16_t *) code->cmds[i].addr, 415 (uint16_t) scratch[srcarg]); 416 } 417 break; 418 case CMD_PIO_WRITE_A_32: 419 if (srcarg) { 420 pio_write_32((ioport32_t *) code->cmds[i].addr, 421 (uint32_t) scratch[srcarg]); 422 } 423 break; 401 424 case CMD_BTEST: 402 425 if ((srcarg) && (dstarg)) { -
kernel/generic/src/ipc/sysipc.c
r6e50466 rad7a6c9 248 248 /* The connection was accepted */ 249 249 phone_connect(phoneid, &answer->sender->answerbox); 250 /* Set 'task hash' as arg4 of response */ 251 IPC_SET_ARG4(answer->data, (sysarg_t) TASK); 250 252 /* Set 'phone hash' as arg5 of response */ 251 253 IPC_SET_ARG5(answer->data, … … 1103 1105 * 1104 1106 */ 1105 sysarg_t sys_ ipc_register_irq(inr_t inr, devno_t devno, sysarg_t imethod,1107 sysarg_t sys_register_irq(inr_t inr, devno_t devno, sysarg_t imethod, 1106 1108 irq_code_t *ucode) 1107 1109 { … … 1120 1122 * 1121 1123 */ 1122 sysarg_t sys_ ipc_unregister_irq(inr_t inr, devno_t devno)1124 sysarg_t sys_unregister_irq(inr_t inr, devno_t devno) 1123 1125 { 1124 1126 if (!(cap_get(TASK) & CAP_IRQ_REG)) -
kernel/generic/src/lib/elf.c
r6e50466 rad7a6c9 157 157 case PT_NULL: 158 158 case PT_PHDR: 159 case PT_NOTE: 159 160 break; 160 161 case PT_LOAD: … … 173 174 break; 174 175 case PT_SHLIB: 175 case PT_NOTE:176 176 case PT_LOPROC: 177 177 case PT_HIPROC: -
kernel/generic/src/lib/memfnc.c
r6e50466 rad7a6c9 1 /*2 * Copyright (c) 20 09 Lukas Mejdrech1 /* 2 * Copyright (c) 2011 Martin Decky 3 3 * All rights reserved. 4 4 * … … 27 27 */ 28 28 29 /** @addtogroup ip29 /** @addtogroup generic 30 30 * @{ 31 31 */ 32 32 33 /** @file 34 * IP standalone module implementation. 35 * Contains skeleton module functions mapping. 36 * The functions are used by the module skeleton as module specific entry 37 * points. 33 /** 34 * @file 35 * @brief Memory string functions. 38 36 * 39 * @see module.c 37 * This file provides architecture independent functions to manipulate blocks 38 * of memory. These functions are optimized as much as generic functions of 39 * this type can be. 40 40 */ 41 41 42 #include <async.h> 43 #include <stdio.h> 44 #include <ipc/ipc.h> 45 #include <ipc/services.h> 46 #include <errno.h> 42 #include <lib/memfnc.h> 43 #include <typedefs.h> 47 44 48 #include <net/modules.h> 49 #include <net_interface.h> 50 #include <net/packet.h> 51 #include <il_local.h> 52 53 #include "ip.h" 54 #include "ip_module.h" 55 56 /** IP module global data. */ 57 extern ip_globals_t ip_globals; 58 59 int 60 il_module_message_standalone(ipc_callid_t callid, ipc_call_t *call, 61 ipc_call_t *answer, int *answer_count) 45 /** Fill block of memory. 46 * 47 * Fill cnt bytes at dst address with the value val. 48 * 49 * @param dst Destination address to fill. 50 * @param val Value to fill. 51 * @param cnt Number of bytes to fill. 52 * 53 * @return Destination address. 54 * 55 */ 56 void *memset(void *dst, int val, size_t cnt) 62 57 { 63 return ip_message_standalone(callid, call, answer, answer_count); 58 size_t i; 59 uint8_t *ptr = (uint8_t *) dst; 60 61 for (i = 0; i < cnt; i++) 62 ptr[i] = val; 63 64 return dst; 64 65 } 65 66 66 int il_module_start_standalone(async_client_conn_t client_connection) 67 /** Move memory block without overlapping. 68 * 69 * Copy cnt bytes from src address to dst address. The source 70 * and destination memory areas cannot overlap. 71 * 72 * @param dst Destination address to copy to. 73 * @param src Source address to copy from. 74 * @param cnt Number of bytes to copy. 75 * 76 * @return Destination address. 77 * 78 */ 79 void *memcpy(void *dst, const void *src, size_t cnt) 67 80 { 68 sysarg_t phonehash;69 intrc;81 uint8_t *dp = (uint8_t *) dst; 82 const uint8_t *sp = (uint8_t *) src; 70 83 71 async_set_client_connection(client_connection); 72 ip_globals.net_phone = net_connect_module(); 73 74 rc = pm_init(); 75 if (rc != EOK) 76 return rc; 84 while (cnt-- != 0) 85 *dp++ = *sp++; 77 86 78 rc = ip_initialize(client_connection); 79 if (rc != EOK) 80 goto out; 81 82 rc = ipc_connect_to_me(PHONE_NS, SERVICE_IP, 0, 0, &phonehash); 83 if (rc != EOK) 84 goto out; 85 86 async_manager(); 87 88 out: 89 pm_destroy(); 90 return rc; 87 return dst; 91 88 } 92 89 -
kernel/generic/src/lib/memstr.c
r6e50466 rad7a6c9 28 28 */ 29 29 30 /** @addtogroup generic 30 /** @addtogroup generic 31 31 * @{ 32 32 */ … … 34 34 /** 35 35 * @file 36 * @brief Memory string operations.36 * @brief Memory string operations. 37 37 * 38 * This file provides architecture independent functions to manipulate blocks of 39 * memory. These functions are optimized as much as generic functions of this 40 * type can be. However, architectures are free to provide even more optimized 41 * versions of these functions. 38 * This file provides architecture independent functions to manipulate blocks 39 * of memory. These functions are optimized as much as generic functions of 40 * this type can be. 42 41 */ 43 42 44 43 #include <memstr.h> 45 44 #include <typedefs.h> 46 #include <align.h>47 45 48 /** Copyblock of memory.46 /** Fill block of memory. 49 47 * 50 * Copy cnt bytes from src address to dst address. The copying is done 51 * word-by-word and then byte-by-byte. The source and destination memory areas 52 * cannot overlap. 48 * Fill cnt bytes at dst address with the value val. 53 49 * 54 * @param src Source address to copy from.55 * @param dst Destination address to copy to.56 * @param cnt Number of bytes to copy.50 * @param dst Destination address to fill. 51 * @param cnt Number of bytes to fill. 52 * @param val Value to fill. 57 53 * 58 * @return Destination address.59 54 */ 60 void *_memcpy(void *dst, const void *src, size_t cnt)55 void memsetb(void *dst, size_t cnt, uint8_t val) 61 56 { 62 unsigned int i, j; 57 memset(dst, val, cnt); 58 } 59 60 /** Fill block of memory. 61 * 62 * Fill cnt words at dst address with the value val. The filling 63 * is done word-by-word. 64 * 65 * @param dst Destination address to fill. 66 * @param cnt Number of words to fill. 67 * @param val Value to fill. 68 * 69 */ 70 void memsetw(void *dst, size_t cnt, uint16_t val) 71 { 72 size_t i; 73 uint16_t *ptr = (uint16_t *) dst; 63 74 64 if (ALIGN_UP((uintptr_t) src, sizeof(sysarg_t)) != (uintptr_t) src || 65 ALIGN_UP((uintptr_t) dst, sizeof(sysarg_t)) != (uintptr_t) dst) { 66 for (i = 0; i < cnt; i++) 67 ((uint8_t *) dst)[i] = ((uint8_t *) src)[i]; 68 } else { 69 for (i = 0; i < cnt / sizeof(sysarg_t); i++) 70 ((sysarg_t *) dst)[i] = ((sysarg_t *) src)[i]; 71 72 for (j = 0; j < cnt % sizeof(sysarg_t); j++) 73 ((uint8_t *)(((sysarg_t *) dst) + i))[j] = 74 ((uint8_t *)(((sysarg_t *) src) + i))[j]; 75 } 76 77 return (char *) dst; 75 for (i = 0; i < cnt; i++) 76 ptr[i] = val; 78 77 } 79 78 80 79 /** Move memory block with possible overlapping. 81 80 * 82 * Copy cnt bytes from src address to dst address. The source and destination83 * memory areas may overlap.81 * Copy cnt bytes from src address to dst address. The source 82 * and destination memory areas may overlap. 84 83 * 85 * @param src Source address to copy from.86 * @param dst Destination address to copy to.87 * @param cnt Number of bytes to copy.84 * @param dst Destination address to copy to. 85 * @param src Source address to copy from. 86 * @param cnt Number of bytes to copy. 88 87 * 89 * @return Destination address. 88 * @return Destination address. 89 * 90 90 */ 91 void *memmove(void *dst, const void *src, size_t n)91 void *memmove(void *dst, const void *src, size_t cnt) 92 92 { 93 const uint8_t *sp;94 uint8_t *dp;95 96 93 /* Nothing to do? */ 97 94 if (src == dst) 98 95 return dst; 99 96 100 97 /* Non-overlapping? */ 101 if (dst >= src + n || src >= dst + n) { 102 return memcpy(dst, src, n); 103 } 104 98 if ((dst >= src + cnt) || (src >= dst + cnt)) 99 return memcpy(dst, src, cnt); 100 101 uint8_t *dp; 102 const uint8_t *sp; 103 105 104 /* Which direction? */ 106 105 if (src > dst) { 107 106 /* Forwards. */ 107 dp = dst; 108 108 sp = src; 109 dp = dst; 110 111 while (n-- != 0) 109 110 while (cnt-- != 0) 112 111 *dp++ = *sp++; 113 112 } else { 114 113 /* Backwards. */ 115 sp = src + (n- 1);116 dp = dst + (n- 1);117 118 while ( n-- != 0)114 dp = dst + (cnt - 1); 115 sp = src + (cnt - 1); 116 117 while (cnt-- != 0) 119 118 *dp-- = *sp--; 120 119 } 121 120 122 121 return dst; 123 }124 125 /** Fill block of memory126 *127 * Fill cnt bytes at dst address with the value x. The filling is done128 * byte-by-byte.129 *130 * @param dst Destination address to fill.131 * @param cnt Number of bytes to fill.132 * @param x Value to fill.133 *134 */135 void _memsetb(void *dst, size_t cnt, uint8_t x)136 {137 unsigned int i;138 uint8_t *p = (uint8_t *) dst;139 140 for (i = 0; i < cnt; i++)141 p[i] = x;142 }143 144 /** Fill block of memory.145 *146 * Fill cnt words at dst address with the value x. The filling is done147 * word-by-word.148 *149 * @param dst Destination address to fill.150 * @param cnt Number of words to fill.151 * @param x Value to fill.152 *153 */154 void _memsetw(void *dst, size_t cnt, uint16_t x)155 {156 unsigned int i;157 uint16_t *p = (uint16_t *) dst;158 159 for (i = 0; i < cnt; i++)160 p[i] = x;161 122 } 162 123 -
kernel/generic/src/lib/rd.c
r6e50466 rad7a6c9 90 90 FRAME_SIZE); 91 91 rd_parea.frames = SIZE2FRAMES(dsize); 92 rd_parea.unpriv = false; 92 93 ddi_parea_register(&rd_parea); 93 94 -
kernel/generic/src/main/main.c
r6e50466 rad7a6c9 185 185 LOG("\nconfig.base=%p config.kernel_size=%zu" 186 186 "\nconfig.stack_base=%p config.stack_size=%zu", 187 config.base, config.kernel_size, config.stack_base,188 config.stack_size);187 (void *) config.base, config.kernel_size, 188 (void *) config.stack_base, config.stack_size); 189 189 190 190 #ifdef CONFIG_KCONSOLE … … 242 242 for (i = 0; i < init.cnt; i++) 243 243 LOG("init[%zu].addr=%p, init[%zu].size=%zu", 244 i, init.tasks[i].addr, i, init.tasks[i].size);244 i, (void *) init.tasks[i].addr, i, init.tasks[i].size); 245 245 } else 246 246 printf("No init binaries found.\n"); -
kernel/generic/src/mm/as.c
r6e50466 rad7a6c9 71 71 #include <memstr.h> 72 72 #include <macros.h> 73 #include <bitops.h> 73 74 #include <arch.h> 74 75 #include <errno.h> … … 86 87 * Each architecture decides what functions will be used to carry out 87 88 * address space operations such as creating or locking page tables. 88 *89 89 */ 90 90 as_operations_t *as_operations = NULL; 91 91 92 /** 93 * Slab for as_t objects. 92 /** Slab for as_t objects. 94 93 * 95 94 */ 96 95 static slab_cache_t *as_slab; 97 96 98 /** 99 * This lock serializes access to the ASID subsystem.100 * Itprotects:97 /** ASID subsystem lock. 98 * 99 * This lock protects: 101 100 * - inactive_as_with_asid_head list 102 101 * - as->asid for each as of the as_t type … … 107 106 108 107 /** 109 * This list contains address spaces that are not active on any 110 * processor and that have valid ASID. 111 * 108 * Inactive address spaces (on all processors) 109 * that have valid ASID. 112 110 */ 113 111 LIST_INITIALIZE(inactive_as_with_asid_head); … … 123 121 mutex_initialize(&as->lock, MUTEX_PASSIVE); 124 122 125 int rc = as_constructor_arch(as, flags); 126 127 return rc; 123 return as_constructor_arch(as, flags); 128 124 } 129 125 130 126 NO_TRACE static size_t as_destructor(void *obj) 131 127 { 132 as_t *as = (as_t *) obj; 133 return as_destructor_arch(as); 128 return as_destructor_arch((as_t *) obj); 134 129 } 135 130 … … 146 141 panic("Cannot create kernel address space."); 147 142 148 /* Make sure the kernel address space 143 /* 144 * Make sure the kernel address space 149 145 * reference count never drops to zero. 150 146 */ … … 195 191 { 196 192 DEADLOCK_PROBE_INIT(p_asidlock); 197 193 198 194 ASSERT(as != AS); 199 195 ASSERT(atomic_get(&as->refcount) == 0); … … 203 199 * lock its mutex. 204 200 */ 205 201 206 202 /* 207 203 * We need to avoid deadlock between TLB shootdown and asidlock. … … 210 206 * disabled to prevent nested context switches. We also depend on the 211 207 * fact that so far no spinlocks are held. 212 *213 208 */ 214 209 preemption_disable(); … … 235 230 spinlock_unlock(&asidlock); 236 231 interrupts_restore(ipl); 237 232 238 233 239 234 /* … … 241 236 * The B+tree must be walked carefully because it is 242 237 * also being destroyed. 243 *244 238 */ 245 239 bool cond = true; … … 268 262 /** Hold a reference to an address space. 269 263 * 270 * Holding a reference to an address space prevents destruction of that address271 * space.264 * Holding a reference to an address space prevents destruction 265 * of that address space. 272 266 * 273 267 * @param as Address space to be held. … … 281 275 /** Release a reference to an address space. 282 276 * 283 * The last one to release a reference to an address space destroys the address284 * space.277 * The last one to release a reference to an address space 278 * destroys the address space. 285 279 * 286 280 * @param asAddress space to be released. … … 295 289 /** Check area conflicts with other areas. 296 290 * 297 * @param as Address space.298 * @param vaStarting virtual address of the area being tested.299 * @param size Size ofthe area being tested.300 * @param avoid _areaDo not touch this area.291 * @param as Address space. 292 * @param addr Starting virtual address of the area being tested. 293 * @param count Number of pages in the area being tested. 294 * @param avoid Do not touch this area. 301 295 * 302 296 * @return True if there is no conflict, false otherwise. 303 297 * 304 298 */ 305 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, 306 as_area_t *avoid_area) 307 { 299 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr, 300 size_t count, as_area_t *avoid) 301 { 302 ASSERT((addr % PAGE_SIZE) == 0); 308 303 ASSERT(mutex_locked(&as->lock)); 309 304 310 305 /* 311 306 * We don't want any area to have conflicts with NULL page. 312 * 313 */ 314 if (overlaps(va, size, (uintptr_t) NULL, PAGE_SIZE)) 307 */ 308 if (overlaps(addr, count << PAGE_WIDTH, (uintptr_t) NULL, PAGE_SIZE)) 315 309 return false; 316 310 … … 321 315 * record in the left neighbour, the leftmost record in the right 322 316 * neighbour and all records in the leaf node itself. 323 *324 317 */ 325 318 btree_node_t *leaf; 326 319 as_area_t *area = 327 (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);320 (as_area_t *) btree_search(&as->as_area_btree, addr, &leaf); 328 321 if (area) { 329 if (area != avoid _area)322 if (area != avoid) 330 323 return false; 331 324 } … … 337 330 area = (as_area_t *) node->value[node->keys - 1]; 338 331 339 mutex_lock(&area->lock); 340 341 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 332 if (area != avoid) { 333 mutex_lock(&area->lock); 334 335 if (overlaps(addr, count << PAGE_WIDTH, 336 area->base, area->pages << PAGE_WIDTH)) { 337 mutex_unlock(&area->lock); 338 return false; 339 } 340 342 341 mutex_unlock(&area->lock); 343 return false; 344 } 345 346 mutex_unlock(&area->lock); 342 } 347 343 } 348 344 … … 351 347 area = (as_area_t *) node->value[0]; 352 348 353 mutex_lock(&area->lock); 354 355 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 349 if (area != avoid) { 350 mutex_lock(&area->lock); 351 352 if (overlaps(addr, count << PAGE_WIDTH, 353 area->base, area->pages << PAGE_WIDTH)) { 354 mutex_unlock(&area->lock); 355 return false; 356 } 357 356 358 mutex_unlock(&area->lock); 357 return false; 358 } 359 360 mutex_unlock(&area->lock); 359 } 361 360 } 362 361 … … 366 365 area = (as_area_t *) leaf->value[i]; 367 366 368 if (area == avoid _area)367 if (area == avoid) 369 368 continue; 370 369 371 370 mutex_lock(&area->lock); 372 371 373 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 372 if (overlaps(addr, count << PAGE_WIDTH, 373 area->base, area->pages << PAGE_WIDTH)) { 374 374 mutex_unlock(&area->lock); 375 375 return false; … … 382 382 * So far, the area does not conflict with other areas. 383 383 * Check if it doesn't conflict with kernel address space. 384 *385 384 */ 386 385 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 387 return !overlaps( va, size,386 return !overlaps(addr, count << PAGE_WIDTH, 388 387 KERNEL_ADDRESS_SPACE_START, 389 388 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); … … 412 411 mem_backend_data_t *backend_data) 413 412 { 414 if ( base % PAGE_SIZE)413 if ((base % PAGE_SIZE) != 0) 415 414 return NULL; 416 415 417 if ( !size)416 if (size == 0) 418 417 return NULL; 418 419 size_t pages = SIZE2FRAMES(size); 419 420 420 421 /* Writeable executable areas are not supported. */ … … 424 425 mutex_lock(&as->lock); 425 426 426 if (!check_area_conflicts(as, base, size, NULL)) {427 if (!check_area_conflicts(as, base, pages, NULL)) { 427 428 mutex_unlock(&as->lock); 428 429 return NULL; … … 436 437 area->flags = flags; 437 438 area->attributes = attrs; 438 area->pages = SIZE2FRAMES(size); 439 area->pages = pages; 440 area->resident = 0; 439 441 area->base = base; 440 442 area->sh_info = NULL; … … 479 481 * to find out whether this is a miss or va belongs to an address 480 482 * space area found there. 481 *482 483 */ 483 484 … … 490 491 mutex_lock(&area->lock); 491 492 492 if ((area->base <= va) && (va < area->base + area->pages * PAGE_SIZE)) 493 if ((area->base <= va) && 494 (va < area->base + (area->pages << PAGE_WIDTH))) 493 495 return area; 494 496 … … 499 501 * Second, locate the left neighbour and test its last record. 500 502 * Because of its position in the B+tree, it must have base < va. 501 *502 503 */ 503 504 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); … … 507 508 mutex_lock(&area->lock); 508 509 509 if (va < area->base + area->pages * PAGE_SIZE)510 if (va < area->base + (area->pages << PAGE_WIDTH)) 510 511 return area; 511 512 … … 534 535 /* 535 536 * Locate the area. 536 *537 537 */ 538 538 as_area_t *area = find_area_and_lock(as, address); … … 546 546 * Remapping of address space areas associated 547 547 * with memory mapped devices is not supported. 548 *549 548 */ 550 549 mutex_unlock(&area->lock); … … 557 556 * Remapping of shared address space areas 558 557 * is not supported. 559 *560 558 */ 561 559 mutex_unlock(&area->lock); … … 568 566 /* 569 567 * Zero size address space areas are not allowed. 570 *571 568 */ 572 569 mutex_unlock(&area->lock); … … 576 573 577 574 if (pages < area->pages) { 578 uintptr_t start_free = area->base + pages * PAGE_SIZE;575 uintptr_t start_free = area->base + (pages << PAGE_WIDTH); 579 576 580 577 /* 581 578 * Shrinking the area. 582 579 * No need to check for overlaps. 583 *584 580 */ 585 581 … … 588 584 /* 589 585 * Start TLB shootdown sequence. 590 *591 586 */ 592 587 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, 593 area->base + pages * PAGE_SIZE, area->pages - pages);588 area->base + (pages << PAGE_WIDTH), area->pages - pages); 594 589 595 590 /* … … 599 594 * is also the right way to remove part of the used_space 600 595 * B+tree leaf list. 601 *602 596 */ 603 597 bool cond = true; … … 615 609 size_t i = 0; 616 610 617 if (overlaps(ptr, size * PAGE_SIZE, area->base,618 pages * PAGE_SIZE)) {611 if (overlaps(ptr, size << PAGE_WIDTH, area->base, 612 pages << PAGE_WIDTH)) { 619 613 620 if (ptr + size * PAGE_SIZE<= start_free) {614 if (ptr + (size << PAGE_WIDTH) <= start_free) { 621 615 /* 622 616 * The whole interval fits 623 617 * completely in the resized 624 618 * address space area. 625 *626 619 */ 627 620 break; … … 632 625 * to b and c overlaps with the resized 633 626 * address space area. 634 *635 627 */ 636 628 … … 652 644 for (; i < size; i++) { 653 645 pte_t *pte = page_mapping_find(as, ptr + 654 i * PAGE_SIZE);646 (i << PAGE_WIDTH)); 655 647 656 648 ASSERT(pte); … … 661 653 (area->backend->frame_free)) { 662 654 area->backend->frame_free(area, 663 ptr + i * PAGE_SIZE,655 ptr + (i << PAGE_WIDTH), 664 656 PTE_GET_FRAME(pte)); 665 657 } 666 658 667 659 page_mapping_remove(as, ptr + 668 i * PAGE_SIZE);660 (i << PAGE_WIDTH)); 669 661 } 670 662 } … … 673 665 /* 674 666 * Finish TLB shootdown sequence. 675 * 676 */ 677 678 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, 667 */ 668 669 tlb_invalidate_pages(as->asid, area->base + (pages << PAGE_WIDTH), 679 670 area->pages - pages); 680 671 681 672 /* 682 673 * Invalidate software translation caches (e.g. TSB on sparc64). 683 *684 674 */ 685 675 as_invalidate_translation_cache(as, area->base + 686 pages * PAGE_SIZE, area->pages - pages);676 (pages << PAGE_WIDTH), area->pages - pages); 687 677 tlb_shootdown_finalize(ipl); 688 678 … … 692 682 * Growing the area. 693 683 * Check for overlaps with other address space areas. 694 * 695 */ 696 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, 697 area)) { 684 */ 685 if (!check_area_conflicts(as, address, pages, area)) { 698 686 mutex_unlock(&area->lock); 699 687 mutex_unlock(&as->lock); … … 794 782 795 783 for (size = 0; size < (size_t) node->value[i]; size++) { 796 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 784 pte_t *pte = 785 page_mapping_find(as, ptr + (size << PAGE_WIDTH)); 797 786 798 787 ASSERT(pte); … … 803 792 (area->backend->frame_free)) { 804 793 area->backend->frame_free(area, 805 ptr + size * PAGE_SIZE, PTE_GET_FRAME(pte));794 ptr + (size << PAGE_WIDTH), PTE_GET_FRAME(pte)); 806 795 } 807 796 808 page_mapping_remove(as, ptr + size * PAGE_SIZE);797 page_mapping_remove(as, ptr + (size << PAGE_WIDTH)); 809 798 } 810 799 } … … 813 802 /* 814 803 * Finish TLB shootdown sequence. 815 *816 804 */ 817 805 … … 821 809 * Invalidate potential software translation caches (e.g. TSB on 822 810 * sparc64). 823 *824 811 */ 825 812 as_invalidate_translation_cache(as, area->base, area->pages); … … 839 826 /* 840 827 * Remove the empty area from address space. 841 *842 828 */ 843 829 btree_remove(&as->as_area_btree, base, NULL); … … 881 867 /* 882 868 * Could not find the source address space area. 883 *884 869 */ 885 870 mutex_unlock(&src_as->lock); … … 891 876 * There is no backend or the backend does not 892 877 * know how to share the area. 893 *894 878 */ 895 879 mutex_unlock(&src_area->lock); … … 898 882 } 899 883 900 size_t src_size = src_area->pages * PAGE_SIZE;884 size_t src_size = src_area->pages << PAGE_WIDTH; 901 885 unsigned int src_flags = src_area->flags; 902 886 mem_backend_t *src_backend = src_area->backend; … … 918 902 * First, prepare the area for sharing. 919 903 * Then it will be safe to unlock it. 920 *921 904 */ 922 905 share_info_t *sh_info = src_area->sh_info; … … 930 913 /* 931 914 * Call the backend to setup sharing. 932 *933 915 */ 934 916 src_area->backend->share(src_area); … … 949 931 * The flags of the source area are masked against dst_flags_mask 950 932 * to support sharing in less privileged mode. 951 *952 933 */ 953 934 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, … … 966 947 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL 967 948 * attribute and set the sh_info. 968 *969 949 */ 970 950 mutex_lock(&dst_as->lock); … … 989 969 NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access) 990 970 { 971 ASSERT(mutex_locked(&area->lock)); 972 991 973 int flagmap[] = { 992 974 [PF_ACCESS_READ] = AS_AREA_READ, … … 994 976 [PF_ACCESS_EXEC] = AS_AREA_EXEC 995 977 }; 996 997 ASSERT(mutex_locked(&area->lock));998 978 999 979 if (!(area->flags & flagmap[access])) … … 1066 1046 /* 1067 1047 * Compute total number of used pages in the used_space B+tree 1068 *1069 1048 */ 1070 1049 size_t used_pages = 0; … … 1088 1067 /* 1089 1068 * Start TLB shootdown sequence. 1090 *1091 1069 */ 1092 1070 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, … … 1096 1074 * Remove used pages from page tables and remember their frame 1097 1075 * numbers. 1098 *1099 1076 */ 1100 1077 size_t frame_idx = 0; … … 1111 1088 1112 1089 for (size = 0; size < (size_t) node->value[i]; size++) { 1113 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 1090 pte_t *pte = 1091 page_mapping_find(as, ptr + (size << PAGE_WIDTH)); 1114 1092 1115 1093 ASSERT(pte); … … 1120 1098 1121 1099 /* Remove old mapping */ 1122 page_mapping_remove(as, ptr + size * PAGE_SIZE);1100 page_mapping_remove(as, ptr + (size << PAGE_WIDTH)); 1123 1101 } 1124 1102 } … … 1127 1105 /* 1128 1106 * Finish TLB shootdown sequence. 1129 *1130 1107 */ 1131 1108 … … 1135 1112 * Invalidate potential software translation caches (e.g. TSB on 1136 1113 * sparc64). 1137 *1138 1114 */ 1139 1115 as_invalidate_translation_cache(as, area->base, area->pages); … … 1168 1144 1169 1145 /* Insert the new mapping */ 1170 page_mapping_insert(as, ptr + size * PAGE_SIZE,1146 page_mapping_insert(as, ptr + (size << PAGE_WIDTH), 1171 1147 old_frame[frame_idx++], page_flags); 1172 1148 … … 1217 1193 * No area contained mapping for 'page'. 1218 1194 * Signal page fault to low-level handler. 1219 *1220 1195 */ 1221 1196 mutex_unlock(&AS->lock); … … 1237 1212 * The address space area is not backed by any backend 1238 1213 * or the backend cannot handle page faults. 1239 *1240 1214 */ 1241 1215 mutex_unlock(&area->lock); … … 1249 1223 * To avoid race condition between two page faults on the same address, 1250 1224 * we need to make sure the mapping has not been already inserted. 1251 *1252 1225 */ 1253 1226 pte_t *pte; … … 1267 1240 /* 1268 1241 * Resort to the backend page fault handler. 1269 *1270 1242 */ 1271 1243 if (area->backend->page_fault(area, page, access) != AS_PF_OK) { … … 1322 1294 * preemption is disabled. We should not be 1323 1295 * holding any other lock. 1324 *1325 1296 */ 1326 1297 (void) interrupts_enable(); … … 1342 1313 * list of inactive address spaces with assigned 1343 1314 * ASID. 1344 *1345 1315 */ 1346 1316 ASSERT(old_as->asid != ASID_INVALID); … … 1353 1323 * Perform architecture-specific tasks when the address space 1354 1324 * is being removed from the CPU. 1355 *1356 1325 */ 1357 1326 as_deinstall_arch(old_as); … … 1360 1329 /* 1361 1330 * Second, prepare the new address space. 1362 *1363 1331 */ 1364 1332 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { … … 1376 1344 * Perform architecture-specific steps. 1377 1345 * (e.g. write ASID to hardware register etc.) 1378 *1379 1346 */ 1380 1347 as_install_arch(new_as); … … 1395 1362 { 1396 1363 ASSERT(mutex_locked(&area->lock)); 1397 1364 1398 1365 return area_flags_to_page_flags(area->flags); 1399 1366 } … … 1499 1466 1500 1467 if (src_area) { 1501 size = src_area->pages * PAGE_SIZE;1468 size = src_area->pages << PAGE_WIDTH; 1502 1469 mutex_unlock(&src_area->lock); 1503 1470 } else … … 1516 1483 * @param count Number of page to be marked. 1517 1484 * 1518 * @return Zero on failure and non-zeroon success.1519 * 1520 */ 1521 intused_space_insert(as_area_t *area, uintptr_t page, size_t count)1485 * @return False on failure or true on success. 1486 * 1487 */ 1488 bool used_space_insert(as_area_t *area, uintptr_t page, size_t count) 1522 1489 { 1523 1490 ASSERT(mutex_locked(&area->lock)); … … 1530 1497 /* 1531 1498 * We hit the beginning of some used space. 1532 * 1533 */ 1534 return 0; 1499 */ 1500 return false; 1535 1501 } 1536 1502 1537 1503 if (!leaf->keys) { 1538 1504 btree_insert(&area->used_space, page, (void *) count, leaf); 1539 return 1;1505 goto success; 1540 1506 } 1541 1507 … … 1551 1517 * somewhere between the rightmost interval of 1552 1518 * the left neigbour and the first interval of the leaf. 1553 *1554 1519 */ 1555 1520 1556 1521 if (page >= right_pg) { 1557 1522 /* Do nothing. */ 1558 } else if (overlaps(page, count * PAGE_SIZE, left_pg,1559 left_cnt * PAGE_SIZE)) {1523 } else if (overlaps(page, count << PAGE_WIDTH, left_pg, 1524 left_cnt << PAGE_WIDTH)) { 1560 1525 /* The interval intersects with the left interval. */ 1561 return 0;1562 } else if (overlaps(page, count * PAGE_SIZE, right_pg,1563 right_cnt * PAGE_SIZE)) {1526 return false; 1527 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1528 right_cnt << PAGE_WIDTH)) { 1564 1529 /* The interval intersects with the right interval. */ 1565 return 0;1566 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&1567 (page + count * PAGE_SIZE== right_pg)) {1530 return false; 1531 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1532 (page + (count << PAGE_WIDTH) == right_pg)) { 1568 1533 /* 1569 1534 * The interval can be added by merging the two already 1570 1535 * present intervals. 1571 *1572 1536 */ 1573 1537 node->value[node->keys - 1] += count + right_cnt; 1574 1538 btree_remove(&area->used_space, right_pg, leaf); 1575 return 1;1576 } else if (page == left_pg + left_cnt * PAGE_SIZE) {1539 goto success; 1540 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1577 1541 /* 1578 1542 * The interval can be added by simply growing the left 1579 1543 * interval. 1580 *1581 1544 */ 1582 1545 node->value[node->keys - 1] += count; 1583 return 1;1584 } else if (page + count * PAGE_SIZE== right_pg) {1546 goto success; 1547 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1585 1548 /* 1586 1549 * The interval can be addded by simply moving base of 1587 1550 * the right interval down and increasing its size 1588 1551 * accordingly. 1589 *1590 1552 */ 1591 1553 leaf->value[0] += count; 1592 1554 leaf->key[0] = page; 1593 return 1;1555 goto success; 1594 1556 } else { 1595 1557 /* 1596 1558 * The interval is between both neigbouring intervals, 1597 1559 * but cannot be merged with any of them. 1598 *1599 1560 */ 1600 1561 btree_insert(&area->used_space, page, (void *) count, 1601 1562 leaf); 1602 return 1;1563 goto success; 1603 1564 } 1604 1565 } else if (page < leaf->key[0]) { … … 1609 1570 * Investigate the border case in which the left neighbour does 1610 1571 * not exist but the interval fits from the left. 1611 * 1612 */ 1613 1614 if (overlaps(page, count * PAGE_SIZE, right_pg, 1615 right_cnt * PAGE_SIZE)) { 1572 */ 1573 1574 if (overlaps(page, count << PAGE_WIDTH, right_pg, 1575 right_cnt << PAGE_WIDTH)) { 1616 1576 /* The interval intersects with the right interval. */ 1617 return 0;1618 } else if (page + count * PAGE_SIZE== right_pg) {1577 return false; 1578 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1619 1579 /* 1620 1580 * The interval can be added by moving the base of the 1621 1581 * right interval down and increasing its size 1622 1582 * accordingly. 1623 *1624 1583 */ 1625 1584 leaf->key[0] = page; 1626 1585 leaf->value[0] += count; 1627 return 1;1586 goto success; 1628 1587 } else { 1629 1588 /* 1630 1589 * The interval doesn't adjoin with the right interval. 1631 1590 * It must be added individually. 1632 *1633 1591 */ 1634 1592 btree_insert(&area->used_space, page, (void *) count, 1635 1593 leaf); 1636 return 1;1594 goto success; 1637 1595 } 1638 1596 } … … 1649 1607 * somewhere between the leftmost interval of 1650 1608 * the right neigbour and the last interval of the leaf. 1651 *1652 1609 */ 1653 1610 1654 1611 if (page < left_pg) { 1655 1612 /* Do nothing. */ 1656 } else if (overlaps(page, count * PAGE_SIZE, left_pg,1657 left_cnt * PAGE_SIZE)) {1613 } else if (overlaps(page, count << PAGE_WIDTH, left_pg, 1614 left_cnt << PAGE_WIDTH)) { 1658 1615 /* The interval intersects with the left interval. */ 1659 return 0;1660 } else if (overlaps(page, count * PAGE_SIZE, right_pg,1661 right_cnt * PAGE_SIZE)) {1616 return false; 1617 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1618 right_cnt << PAGE_WIDTH)) { 1662 1619 /* The interval intersects with the right interval. */ 1663 return 0;1664 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&1665 (page + count * PAGE_SIZE== right_pg)) {1620 return false; 1621 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1622 (page + (count << PAGE_WIDTH) == right_pg)) { 1666 1623 /* 1667 1624 * The interval can be added by merging the two already 1668 1625 * present intervals. 1669 *1670 1626 */ 1671 1627 leaf->value[leaf->keys - 1] += count + right_cnt; 1672 1628 btree_remove(&area->used_space, right_pg, node); 1673 return 1;1674 } else if (page == left_pg + left_cnt * PAGE_SIZE) {1629 goto success; 1630 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1675 1631 /* 1676 1632 * The interval can be added by simply growing the left 1677 1633 * interval. 1678 *1679 1634 */ 1680 leaf->value[leaf->keys - 1] += count;1681 return 1;1682 } else if (page + count * PAGE_SIZE== right_pg) {1635 leaf->value[leaf->keys - 1] += count; 1636 goto success; 1637 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1683 1638 /* 1684 1639 * The interval can be addded by simply moving base of 1685 1640 * the right interval down and increasing its size 1686 1641 * accordingly. 1687 *1688 1642 */ 1689 1643 node->value[0] += count; 1690 1644 node->key[0] = page; 1691 return 1;1645 goto success; 1692 1646 } else { 1693 1647 /* 1694 1648 * The interval is between both neigbouring intervals, 1695 1649 * but cannot be merged with any of them. 1696 *1697 1650 */ 1698 1651 btree_insert(&area->used_space, page, (void *) count, 1699 1652 leaf); 1700 return 1;1653 goto success; 1701 1654 } 1702 1655 } else if (page >= leaf->key[leaf->keys - 1]) { … … 1707 1660 * Investigate the border case in which the right neighbour 1708 1661 * does not exist but the interval fits from the right. 1709 * 1710 */ 1711 1712 if (overlaps(page, count * PAGE_SIZE, left_pg, 1713 left_cnt * PAGE_SIZE)) { 1662 */ 1663 1664 if (overlaps(page, count << PAGE_WIDTH, left_pg, 1665 left_cnt << PAGE_WIDTH)) { 1714 1666 /* The interval intersects with the left interval. */ 1715 return 0;1716 } else if (left_pg + left_cnt * PAGE_SIZE== page) {1667 return false; 1668 } else if (left_pg + (left_cnt << PAGE_WIDTH) == page) { 1717 1669 /* 1718 1670 * The interval can be added by growing the left 1719 1671 * interval. 1720 *1721 1672 */ 1722 1673 leaf->value[leaf->keys - 1] += count; 1723 return 1;1674 goto success; 1724 1675 } else { 1725 1676 /* 1726 1677 * The interval doesn't adjoin with the left interval. 1727 1678 * It must be added individually. 1728 *1729 1679 */ 1730 1680 btree_insert(&area->used_space, page, (void *) count, 1731 1681 leaf); 1732 return 1;1682 goto success; 1733 1683 } 1734 1684 } … … 1738 1688 * only between two other intervals of the leaf. The two border cases 1739 1689 * were already resolved. 1740 *1741 1690 */ 1742 1691 btree_key_t i; … … 1750 1699 /* 1751 1700 * The interval fits between left_pg and right_pg. 1752 *1753 1701 */ 1754 1702 1755 if (overlaps(page, count * PAGE_SIZE, left_pg,1756 left_cnt * PAGE_SIZE)) {1703 if (overlaps(page, count << PAGE_WIDTH, left_pg, 1704 left_cnt << PAGE_WIDTH)) { 1757 1705 /* 1758 1706 * The interval intersects with the left 1759 1707 * interval. 1760 *1761 1708 */ 1762 return 0;1763 } else if (overlaps(page, count * PAGE_SIZE, right_pg,1764 right_cnt * PAGE_SIZE)) {1709 return false; 1710 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1711 right_cnt << PAGE_WIDTH)) { 1765 1712 /* 1766 1713 * The interval intersects with the right 1767 1714 * interval. 1768 *1769 1715 */ 1770 return 0;1771 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&1772 (page + count * PAGE_SIZE== right_pg)) {1716 return false; 1717 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1718 (page + (count << PAGE_WIDTH) == right_pg)) { 1773 1719 /* 1774 1720 * The interval can be added by merging the two 1775 1721 * already present intervals. 1776 *1777 1722 */ 1778 1723 leaf->value[i - 1] += count + right_cnt; 1779 1724 btree_remove(&area->used_space, right_pg, leaf); 1780 return 1;1781 } else if (page == left_pg + left_cnt * PAGE_SIZE) {1725 goto success; 1726 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1782 1727 /* 1783 1728 * The interval can be added by simply growing 1784 1729 * the left interval. 1785 *1786 1730 */ 1787 1731 leaf->value[i - 1] += count; 1788 return 1;1789 } else if (page + count * PAGE_SIZE== right_pg) {1732 goto success; 1733 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1790 1734 /* 1791 1735 * The interval can be addded by simply moving 1792 1736 * base of the right interval down and 1793 1737 * increasing its size accordingly. 1794 *1795 1738 */ 1796 1739 leaf->value[i] += count; 1797 1740 leaf->key[i] = page; 1798 return 1;1741 goto success; 1799 1742 } else { 1800 1743 /* … … 1802 1745 * intervals, but cannot be merged with any of 1803 1746 * them. 1804 *1805 1747 */ 1806 1748 btree_insert(&area->used_space, page, 1807 1749 (void *) count, leaf); 1808 return 1;1750 goto success; 1809 1751 } 1810 1752 } … … 1813 1755 panic("Inconsistency detected while adding %zu pages of used " 1814 1756 "space at %p.", count, (void *) page); 1757 1758 success: 1759 area->resident += count; 1760 return true; 1815 1761 } 1816 1762 … … 1823 1769 * @param count Number of page to be marked. 1824 1770 * 1825 * @return Zero on failure and non-zeroon success.1826 * 1827 */ 1828 intused_space_remove(as_area_t *area, uintptr_t page, size_t count)1771 * @return False on failure or true on success. 1772 * 1773 */ 1774 bool used_space_remove(as_area_t *area, uintptr_t page, size_t count) 1829 1775 { 1830 1776 ASSERT(mutex_locked(&area->lock)); … … 1837 1783 /* 1838 1784 * We are lucky, page is the beginning of some interval. 1839 *1840 1785 */ 1841 1786 if (count > pages) { 1842 return 0;1787 return false; 1843 1788 } else if (count == pages) { 1844 1789 btree_remove(&area->used_space, page, leaf); 1845 return 1;1790 goto success; 1846 1791 } else { 1847 1792 /* 1848 1793 * Find the respective interval. 1849 1794 * Decrease its size and relocate its start address. 1850 *1851 1795 */ 1852 1796 btree_key_t i; 1853 1797 for (i = 0; i < leaf->keys; i++) { 1854 1798 if (leaf->key[i] == page) { 1855 leaf->key[i] += count * PAGE_SIZE;1799 leaf->key[i] += count << PAGE_WIDTH; 1856 1800 leaf->value[i] -= count; 1857 return 1;1801 goto success; 1858 1802 } 1859 1803 } 1804 1860 1805 goto error; 1861 1806 } … … 1867 1812 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1868 1813 1869 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,1870 count * PAGE_SIZE)) {1871 if (page + count * PAGE_SIZE==1872 left_pg + left_cnt * PAGE_SIZE) {1814 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1815 count << PAGE_WIDTH)) { 1816 if (page + (count << PAGE_WIDTH) == 1817 left_pg + (left_cnt << PAGE_WIDTH)) { 1873 1818 /* 1874 1819 * The interval is contained in the rightmost … … 1876 1821 * removed by updating the size of the bigger 1877 1822 * interval. 1878 *1879 1823 */ 1880 1824 node->value[node->keys - 1] -= count; 1881 return 1;1882 } else if (page + count * PAGE_SIZE<1883 left_pg + left_cnt*PAGE_SIZE) {1825 goto success; 1826 } else if (page + (count << PAGE_WIDTH) < 1827 left_pg + (left_cnt << PAGE_WIDTH)) { 1884 1828 /* 1885 1829 * The interval is contained in the rightmost … … 1888 1832 * the original interval and also inserting a 1889 1833 * new interval. 1890 *1891 1834 */ 1892 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1893 (page + count*PAGE_SIZE)) >> PAGE_WIDTH;1835 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) - 1836 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH; 1894 1837 node->value[node->keys - 1] -= count + new_cnt; 1895 1838 btree_insert(&area->used_space, page + 1896 count * PAGE_SIZE, (void *) new_cnt, leaf);1897 return 1;1839 (count << PAGE_WIDTH), (void *) new_cnt, leaf); 1840 goto success; 1898 1841 } 1899 1842 } 1900 return 0; 1843 1844 return false; 1901 1845 } else if (page < leaf->key[0]) 1902 return 0;1846 return false; 1903 1847 1904 1848 if (page > leaf->key[leaf->keys - 1]) { … … 1906 1850 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1907 1851 1908 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,1909 count * PAGE_SIZE)) {1910 if (page + count * PAGE_SIZE==1911 left_pg + left_cnt * PAGE_SIZE) {1852 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1853 count << PAGE_WIDTH)) { 1854 if (page + (count << PAGE_WIDTH) == 1855 left_pg + (left_cnt << PAGE_WIDTH)) { 1912 1856 /* 1913 1857 * The interval is contained in the rightmost 1914 1858 * interval of the leaf and can be removed by 1915 1859 * updating the size of the bigger interval. 1916 *1917 1860 */ 1918 1861 leaf->value[leaf->keys - 1] -= count; 1919 return 1;1920 } else if (page + count * PAGE_SIZE< left_pg +1921 left_cnt * PAGE_SIZE) {1862 goto success; 1863 } else if (page + (count << PAGE_WIDTH) < left_pg + 1864 (left_cnt << PAGE_WIDTH)) { 1922 1865 /* 1923 1866 * The interval is contained in the rightmost … … 1926 1869 * original interval and also inserting a new 1927 1870 * interval. 1928 *1929 1871 */ 1930 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1931 (page + count * PAGE_SIZE)) >> PAGE_WIDTH;1872 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) - 1873 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH; 1932 1874 leaf->value[leaf->keys - 1] -= count + new_cnt; 1933 1875 btree_insert(&area->used_space, page + 1934 count * PAGE_SIZE, (void *) new_cnt, leaf);1935 return 1;1876 (count << PAGE_WIDTH), (void *) new_cnt, leaf); 1877 goto success; 1936 1878 } 1937 1879 } 1938 return 0; 1880 1881 return false; 1939 1882 } 1940 1883 1941 1884 /* 1942 1885 * The border cases have been already resolved. 1943 * Now the interval can be only between intervals of the leaf. 1886 * Now the interval can be only between intervals of the leaf. 1944 1887 */ 1945 1888 btree_key_t i; … … 1953 1896 * to (i - 1) and i. 1954 1897 */ 1955 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,1956 count * PAGE_SIZE)) {1957 if (page + count * PAGE_SIZE==1958 left_pg + left_cnt*PAGE_SIZE) {1898 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1899 count << PAGE_WIDTH)) { 1900 if (page + (count << PAGE_WIDTH) == 1901 left_pg + (left_cnt << PAGE_WIDTH)) { 1959 1902 /* 1960 1903 * The interval is contained in the … … 1962 1905 * be removed by updating the size of 1963 1906 * the bigger interval. 1964 *1965 1907 */ 1966 1908 leaf->value[i - 1] -= count; 1967 return 1;1968 } else if (page + count * PAGE_SIZE<1969 left_pg + left_cnt * PAGE_SIZE) {1909 goto success; 1910 } else if (page + (count << PAGE_WIDTH) < 1911 left_pg + (left_cnt << PAGE_WIDTH)) { 1970 1912 /* 1971 1913 * The interval is contained in the … … 1976 1918 */ 1977 1919 size_t new_cnt = ((left_pg + 1978 left_cnt * PAGE_SIZE) -1979 (page + count * PAGE_SIZE)) >>1920 (left_cnt << PAGE_WIDTH)) - 1921 (page + (count << PAGE_WIDTH))) >> 1980 1922 PAGE_WIDTH; 1981 1923 leaf->value[i - 1] -= count + new_cnt; 1982 1924 btree_insert(&area->used_space, page + 1983 count * PAGE_SIZE, (void *) new_cnt,1925 (count << PAGE_WIDTH), (void *) new_cnt, 1984 1926 leaf); 1985 return 1;1927 goto success; 1986 1928 } 1987 1929 } 1988 return 0; 1930 1931 return false; 1989 1932 } 1990 1933 } … … 1993 1936 panic("Inconsistency detected while removing %zu pages of used " 1994 1937 "space from %p.", count, (void *) page); 1938 1939 success: 1940 area->resident -= count; 1941 return true; 1995 1942 } 1996 1943 … … 2027 1974 } 2028 1975 1976 /** Return pointer to unmapped address space area 1977 * 1978 * @param base Lowest address bound. 1979 * @param size Requested size of the allocation. 1980 * 1981 * @return Pointer to the beginning of unmapped address space area. 1982 * 1983 */ 1984 sysarg_t sys_as_get_unmapped_area(uintptr_t base, size_t size) 1985 { 1986 if (size == 0) 1987 return 0; 1988 1989 /* 1990 * Make sure we allocate from page-aligned 1991 * address. Check for possible overflow in 1992 * each step. 1993 */ 1994 1995 size_t pages = SIZE2FRAMES(size); 1996 uintptr_t ret = 0; 1997 1998 /* 1999 * Find the lowest unmapped address aligned on the sz 2000 * boundary, not smaller than base and of the required size. 2001 */ 2002 2003 mutex_lock(&AS->lock); 2004 2005 /* First check the base address itself */ 2006 uintptr_t addr = ALIGN_UP(base, PAGE_SIZE); 2007 if ((addr >= base) && 2008 (check_area_conflicts(AS, addr, pages, NULL))) 2009 ret = addr; 2010 2011 /* Eventually check the addresses behind each area */ 2012 link_t *cur; 2013 for (cur = AS->as_area_btree.leaf_head.next; 2014 (ret == 0) && (cur != &AS->as_area_btree.leaf_head); 2015 cur = cur->next) { 2016 btree_node_t *node = 2017 list_get_instance(cur, btree_node_t, leaf_link); 2018 2019 btree_key_t i; 2020 for (i = 0; (ret == 0) && (i < node->keys); i++) { 2021 as_area_t *area = (as_area_t *) node->value[i]; 2022 2023 mutex_lock(&area->lock); 2024 2025 uintptr_t addr = 2026 ALIGN_UP(area->base + (area->pages << PAGE_WIDTH), 2027 PAGE_SIZE); 2028 2029 if ((addr >= base) && (addr >= area->base) && 2030 (check_area_conflicts(AS, addr, pages, area))) 2031 ret = addr; 2032 2033 mutex_unlock(&area->lock); 2034 } 2035 } 2036 2037 mutex_unlock(&AS->lock); 2038 2039 return (sysarg_t) ret; 2040 } 2041 2029 2042 /** Get list of adress space areas. 2030 2043 * … … 2093 2106 mutex_lock(&as->lock); 2094 2107 2095 /* print out info about address space areas */2108 /* Print out info about address space areas */ 2096 2109 link_t *cur; 2097 2110 for (cur = as->as_area_btree.leaf_head.next; -
kernel/generic/src/mm/backend_phys.c
r6e50466 rad7a6c9 81 81 page_mapping_insert(AS, addr, base + (addr - area->base), 82 82 as_area_get_flags(area)); 83 if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1)) 84 panic("Cannot insert used space."); 83 84 if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1)) 85 panic("Cannot insert used space."); 85 86 86 87 return AS_PF_OK; -
kernel/generic/src/mm/slab.c
r6e50466 rad7a6c9 806 806 } 807 807 808 /** Go through all caches and reclaim what is possible 809 * 810 * Interrupts must be disabled before calling this function, 811 * otherwise memory allocation from interrupts can deadlock. 812 * 813 */ 808 /** Go through all caches and reclaim what is possible */ 814 809 size_t slab_reclaim(unsigned int flags) 815 810 { 816 irq_spinlock_lock(&slab_cache_lock, false);811 irq_spinlock_lock(&slab_cache_lock, true); 817 812 818 813 size_t frames = 0; … … 824 819 } 825 820 826 irq_spinlock_unlock(&slab_cache_lock, false);821 irq_spinlock_unlock(&slab_cache_lock, true); 827 822 828 823 return frames; -
kernel/generic/src/proc/program.c
r6e50466 rad7a6c9 171 171 void *loader = program_loader; 172 172 if (!loader) { 173 as_destroy(as); 173 174 printf("Cannot spawn loader as none was registered\n"); 174 175 return ENOENT; … … 179 180 if (rc != EE_OK) { 180 181 as_destroy(as); 182 printf("Cannot spawn loader (%s)\n", elf_error(rc)); 181 183 return ENOENT; 182 184 } -
kernel/generic/src/proc/scheduler.c
r6e50466 rad7a6c9 62 62 #include <print.h> 63 63 #include <debug.h> 64 65 static void before_task_runs(void); 66 static void before_thread_runs(void); 67 static void after_thread_ran(void); 64 #include <stacktrace.h> 65 68 66 static void scheduler_separated_stack(void); 69 67 … … 71 69 72 70 /** Carry out actions before new task runs. */ 73 void before_task_runs(void)71 static void before_task_runs(void) 74 72 { 75 73 before_task_runs_arch(); … … 80 78 * Perform actions that need to be 81 79 * taken before the newly selected 82 * t read is passed control.80 * thread is passed control. 83 81 * 84 82 * THREAD->lock is locked on entry 85 83 * 86 84 */ 87 void before_thread_runs(void)85 static void before_thread_runs(void) 88 86 { 89 87 before_thread_runs_arch(); 88 90 89 #ifdef CONFIG_FPU_LAZY 91 if (THREAD == CPU->fpu_owner)90 if (THREAD == CPU->fpu_owner) 92 91 fpu_enable(); 93 92 else … … 102 101 } 103 102 #endif 103 104 #ifdef CONFIG_UDEBUG 105 if (THREAD->btrace) { 106 istate_t *istate = THREAD->udebug.uspace_state; 107 if (istate != NULL) { 108 printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid); 109 stack_trace_istate(istate); 110 } 111 112 THREAD->btrace = false; 113 } 114 #endif 104 115 } 105 116 … … 113 124 * 114 125 */ 115 void after_thread_ran(void)126 static void after_thread_ran(void) 116 127 { 117 128 after_thread_ran_arch(); … … 391 402 * possible destruction should thread_destroy() be called on this or any 392 403 * other processor while the scheduler is still using them. 393 *394 404 */ 395 405 if (old_task) … … 417 427 * The thread structure is kept allocated until 418 428 * somebody calls thread_detach() on it. 419 *420 429 */ 421 430 if (!irq_spinlock_trylock(&THREAD->join_wq.lock)) { 422 431 /* 423 432 * Avoid deadlock. 424 *425 433 */ 426 434 irq_spinlock_unlock(&THREAD->lock, false); … … 443 451 /* 444 452 * Prefer the thread after it's woken up. 445 *446 453 */ 447 454 THREAD->priority = -1; … … 451 458 * waitq_sleep(). Address of wq->lock is kept in 452 459 * THREAD->sleep_queue. 453 *454 460 */ 455 461 irq_spinlock_unlock(&THREAD->sleep_queue->lock, false); … … 461 467 /* 462 468 * Entering state is unexpected. 463 *464 469 */ 465 470 panic("tid%" PRIu64 ": unexpected state %s.", … … 480 485 481 486 /* 482 * If both the old and the new task are the same, lots of work is 483 * avoided. 484 * 487 * If both the old and the new task are the same, 488 * lots of work is avoided. 485 489 */ 486 490 if (TASK != THREAD->task) { … … 488 492 489 493 /* 490 * Note that it is possible for two tasks to share one address 491 * space. 492 ( 494 * Note that it is possible for two tasks 495 * to share one address space. 493 496 */ 494 497 if (old_as != new_as) { … … 496 499 * Both tasks and address spaces are different. 497 500 * Replace the old one with the new one. 498 *499 501 */ 500 502 as_switch(old_as, new_as); … … 527 529 * necessary, is to be mapped in before_thread_runs(). This 528 530 * function must be executed before the switch to the new stack. 529 *530 531 */ 531 532 before_thread_runs(); … … 534 535 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to 535 536 * thread's stack. 536 *537 537 */ 538 538 the_copy(THE, (the_t *) THREAD->kstack); … … 658 658 /* 659 659 * Ready thread on local CPU 660 *661 660 */ 662 661 -
kernel/generic/src/proc/task.c
r6e50466 rad7a6c9 342 342 sysarg_t sys_task_set_name(const char *uspace_name, size_t name_len) 343 343 { 344 int rc;345 344 char namebuf[TASK_NAME_BUFLEN]; 346 345 347 346 /* Cap length of name and copy it from userspace. */ 348 349 347 if (name_len > TASK_NAME_BUFLEN - 1) 350 348 name_len = TASK_NAME_BUFLEN - 1; 351 349 352 rc = copy_from_uspace(namebuf, uspace_name, name_len);350 int rc = copy_from_uspace(namebuf, uspace_name, name_len); 353 351 if (rc != 0) 354 352 return (sysarg_t) rc; 355 353 356 354 namebuf[name_len] = '\0'; 355 356 /* 357 * As the task name is referenced also from the 358 * threads, lock the threads' lock for the course 359 * of the update. 360 */ 361 362 irq_spinlock_lock(&tasks_lock, true); 363 irq_spinlock_lock(&TASK->lock, false); 364 irq_spinlock_lock(&threads_lock, false); 365 366 /* Set task name */ 357 367 str_cpy(TASK->name, TASK_NAME_BUFLEN, namebuf); 358 368 369 irq_spinlock_unlock(&threads_lock, false); 370 irq_spinlock_unlock(&TASK->lock, false); 371 irq_spinlock_unlock(&tasks_lock, true); 372 359 373 return EOK; 374 } 375 376 /** Syscall to forcefully terminate a task 377 * 378 * @param uspace_taskid Pointer to task ID in user space. 379 * 380 * @return 0 on success or an error code from @ref errno.h. 381 * 382 */ 383 sysarg_t sys_task_kill(task_id_t *uspace_taskid) 384 { 385 task_id_t taskid; 386 int rc = copy_from_uspace(&taskid, uspace_taskid, sizeof(taskid)); 387 if (rc != 0) 388 return (sysarg_t) rc; 389 390 return (sysarg_t) task_kill(taskid); 360 391 } 361 392 … … 430 461 static void task_kill_internal(task_t *task) 431 462 { 463 irq_spinlock_lock(&task->lock, false); 464 irq_spinlock_lock(&threads_lock, false); 465 466 /* 467 * Interrupt all threads. 468 */ 469 432 470 link_t *cur; 433 434 /*435 * Interrupt all threads.436 */437 irq_spinlock_lock(&task->lock, false);438 471 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 439 472 thread_t *thread = list_get_instance(cur, thread_t, th_link); … … 452 485 } 453 486 487 irq_spinlock_unlock(&threads_lock, false); 454 488 irq_spinlock_unlock(&task->lock, false); 455 489 } … … 481 515 irq_spinlock_unlock(&tasks_lock, true); 482 516 517 return EOK; 518 } 519 520 /** Kill the currently running task. 521 * 522 * @param notify Send out fault notifications. 523 * 524 * @return Zero on success or an error code from errno.h. 525 * 526 */ 527 void task_kill_self(bool notify) 528 { 529 /* 530 * User space can subscribe for FAULT events to take action 531 * whenever a task faults (to take a dump, run a debugger, etc.). 532 * The notification is always available, but unless udebug is enabled, 533 * that's all you get. 534 */ 535 if (notify) { 536 if (event_is_subscribed(EVENT_FAULT)) { 537 /* Notify the subscriber that a fault occurred. */ 538 event_notify_3(EVENT_FAULT, LOWER32(TASK->taskid), 539 UPPER32(TASK->taskid), (sysarg_t) THREAD); 540 541 #ifdef CONFIG_UDEBUG 542 /* Wait for a debugging session. */ 543 udebug_thread_fault(); 544 #endif 545 } 546 } 547 548 irq_spinlock_lock(&tasks_lock, true); 549 task_kill_internal(TASK); 550 irq_spinlock_unlock(&tasks_lock, true); 551 552 thread_exit(); 553 } 554 555 /** Process syscall to terminate the current task. 556 * 557 * @param notify Send out fault notifications. 558 * 559 */ 560 sysarg_t sys_task_exit(sysarg_t notify) 561 { 562 task_kill_self(notify); 563 564 /* Unreachable */ 483 565 return EOK; 484 566 } -
kernel/generic/src/proc/thread.c
r6e50466 rad7a6c9 239 239 * Switch thread to the ready state. 240 240 * 241 * @param t Thread to make ready.241 * @param thread Thread to make ready. 242 242 * 243 243 */ … … 246 246 irq_spinlock_lock(&thread->lock, true); 247 247 248 ASSERT( !(thread->state == Ready));248 ASSERT(thread->state != Ready); 249 249 250 250 int i = (thread->priority < RQ_COUNT - 1) … … 350 350 351 351 #ifdef CONFIG_UDEBUG 352 /* Init debugging stuff */ 352 /* Initialize debugging stuff */ 353 thread->btrace = false; 353 354 udebug_thread_initialize(&thread->udebug); 354 355 #endif … … 535 536 /** Detach thread. 536 537 * 537 * Mark the thread as detached , if the thread is already in the Lingering538 * state, deallocate its resources.538 * Mark the thread as detached. If the thread is already 539 * in the Lingering state, deallocate its resources. 539 540 * 540 541 * @param thread Thread to be detached. … … 590 591 order_suffix(thread->kcycles, &kcycles, &ksuffix); 591 592 593 char *name; 594 if (str_cmp(thread->name, "uinit") == 0) 595 name = thread->task->name; 596 else 597 name = thread->name; 598 592 599 #ifdef __32_BITS__ 593 600 if (*additional) 594 printf("%-8" PRIu64 "%10p %9" PRIu64 "%c %9" PRIu64 "%c ",595 thread->tid, thread-> kstack, ucycles, usuffix,596 kcycles, ksuffix);601 printf("%-8" PRIu64 " %10p %10p %9" PRIu64 "%c %9" PRIu64 "%c ", 602 thread->tid, thread->thread_code, thread->kstack, 603 ucycles, usuffix, kcycles, ksuffix); 597 604 else 598 printf("%-8" PRIu64 " %-14s %10p %-8s %10p %-5" PRIu32 " %10p\n",599 thread->tid, thread->name, thread, thread_states[thread->state],600 thread->task, thread->task->context , thread->thread_code);605 printf("%-8" PRIu64 " %-14s %10p %-8s %10p %-5" PRIu32 "\n", 606 thread->tid, name, thread, thread_states[thread->state], 607 thread->task, thread->task->context); 601 608 #endif 602 609 603 610 #ifdef __64_BITS__ 604 611 if (*additional) 605 printf("%-8" PRIu64 " %18p %18p\n"612 printf("%-8" PRIu64 " %18p %18p\n" 606 613 " %9" PRIu64 "%c %9" PRIu64 "%c ", 607 614 thread->tid, thread->thread_code, thread->kstack, 608 615 ucycles, usuffix, kcycles, ksuffix); 609 616 else 610 printf("%-8" PRIu64 " %-14s %18p %-8s %18p %-5" PRIu32 "\n",611 thread->tid, thread->name, thread, thread_states[thread->state],617 printf("%-8" PRIu64 " %-14s %18p %-8s %18p %-5" PRIu32 "\n", 618 thread->tid, name, thread, thread_states[thread->state], 612 619 thread->task, thread->task->context); 613 620 #endif … … 647 654 #ifdef __32_BITS__ 648 655 if (additional) 649 printf("[id ] [ stack ] [ucycles ] [kcycles ] [cpu]"650 " [ waitqueue]\n");656 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]" 657 " [cpu] [waitqueue]\n"); 651 658 else 652 659 printf("[id ] [name ] [address ] [state ] [task ]" 653 " [ctx] [code ]\n");660 " [ctx]\n"); 654 661 #endif 655 662 … … 740 747 ASSERT(interrupts_disabled()); 741 748 ASSERT(irq_spinlock_locked(&threads_lock)); 742 749 743 750 thread_iterator_t iterator; 744 751 … … 751 758 } 752 759 760 #ifdef CONFIG_UDEBUG 761 762 void thread_stack_trace(thread_id_t thread_id) 763 { 764 irq_spinlock_lock(&threads_lock, true); 765 766 thread_t *thread = thread_find_by_id(thread_id); 767 if (thread == NULL) { 768 printf("No such thread.\n"); 769 irq_spinlock_unlock(&threads_lock, true); 770 return; 771 } 772 773 irq_spinlock_lock(&thread->lock, false); 774 775 /* 776 * Schedule a stack trace to be printed 777 * just before the thread is scheduled next. 778 * 779 * If the thread is sleeping then try to interrupt 780 * the sleep. Any request for printing an uspace stack 781 * trace from within the kernel should be always 782 * considered a last resort debugging means, therefore 783 * forcing the thread's sleep to be interrupted 784 * is probably justifiable. 785 */ 786 787 bool sleeping = false; 788 istate_t *istate = thread->udebug.uspace_state; 789 if (istate != NULL) { 790 printf("Scheduling thread stack trace.\n"); 791 thread->btrace = true; 792 if (thread->state == Sleeping) 793 sleeping = true; 794 } else 795 printf("Thread interrupt state not available.\n"); 796 797 irq_spinlock_unlock(&thread->lock, false); 798 799 if (sleeping) 800 waitq_interrupt_sleep(thread); 801 802 irq_spinlock_unlock(&threads_lock, true); 803 } 804 805 #endif /* CONFIG_UDEBUG */ 753 806 754 807 /** Process syscall to create new thread. … … 793 846 * has already been created. We need to undo its 794 847 * creation now. 795 *796 848 */ 797 849 … … 815 867 * THREAD_B events for threads that already existed 816 868 * and could be detected with THREAD_READ before. 817 *818 869 */ 819 870 udebug_thread_b_event_attach(thread, TASK); -
kernel/generic/src/synch/waitq.c
r6e50466 rad7a6c9 127 127 /** Interrupt sleeping thread. 128 128 * 129 * This routine attempts to interrupt a thread from its sleep in a waitqueue. 130 * If the thread is not found sleeping, no action is taken. 129 * This routine attempts to interrupt a thread from its sleep in 130 * a waitqueue. If the thread is not found sleeping, no action 131 * is taken. 132 * 133 * The threads_lock must be already held and interrupts must be 134 * disabled upon calling this function. 131 135 * 132 136 * @param thread Thread to be interrupted. … … 138 142 DEADLOCK_PROBE_INIT(p_wqlock); 139 143 140 irq_spinlock_lock(&threads_lock, true); 141 if (!thread_exists(thread)) 142 goto out; 144 /* 145 * The thread is quaranteed to exist because 146 * threads_lock is held. 147 */ 143 148 144 149 grab_locks: … … 150 155 /* 151 156 * The sleep cannot be interrupted. 152 *153 157 */ 154 158 irq_spinlock_unlock(&thread->lock, false); 155 goto out;159 return; 156 160 } 157 161 158 162 if (!irq_spinlock_trylock(&wq->lock)) { 163 /* Avoid deadlock */ 159 164 irq_spinlock_unlock(&thread->lock, false); 160 165 DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD); 161 /* Avoid deadlock */162 166 goto grab_locks; 163 167 } … … 173 177 irq_spinlock_unlock(&wq->lock, false); 174 178 } 179 175 180 irq_spinlock_unlock(&thread->lock, false); 176 181 177 182 if (do_wakeup) 178 183 thread_ready(thread); 179 180 out:181 irq_spinlock_unlock(&threads_lock, true);182 184 } 183 185 … … 370 372 * If the thread was already interrupted, 371 373 * don't go to sleep at all. 372 *373 374 */ 374 375 if (THREAD->interrupted) { … … 381 382 * Set context that will be restored if the sleep 382 383 * of this thread is ever interrupted. 383 *384 384 */ 385 385 THREAD->sleep_interruptible = true; -
kernel/generic/src/syscall/syscall.c
r6e50466 rad7a6c9 45 45 #include <debug.h> 46 46 #include <ddi/device.h> 47 #include <interrupt.h> 47 48 #include <ipc/sysipc.h> 48 49 #include <synch/futex.h> … … 66 67 #ifdef CONFIG_UDEBUG 67 68 /* 69 * An istate_t-compatible record was created on the stack by the 70 * low-level syscall handler. This is the userspace space state 71 * structure. 72 */ 73 THREAD->udebug.uspace_state = istate_get(THREAD); 74 75 /* 68 76 * Early check for undebugged tasks. We do not lock anything as this 69 77 * test need not be precise in either direction. 70 *71 78 */ 72 79 if (THREAD->udebug.active) … … 79 86 } else { 80 87 printf("Task %" PRIu64": Unknown syscall %#" PRIxn, TASK->taskid, id); 81 task_kill(TASK->taskid); 82 thread_exit(); 88 task_kill_self(true); 83 89 } 84 90 … … 98 104 udebug_stoppable_end(); 99 105 } 106 107 /* Clear userspace state pointer */ 108 THREAD->udebug.uspace_state = NULL; 100 109 #endif 101 110 … … 120 129 (syshandler_t) sys_task_get_id, 121 130 (syshandler_t) sys_task_set_name, 131 (syshandler_t) sys_task_kill, 132 (syshandler_t) sys_task_exit, 122 133 (syshandler_t) sys_program_spawn_loader, 123 134 … … 132 143 (syshandler_t) sys_as_area_change_flags, 133 144 (syshandler_t) sys_as_area_destroy, 145 (syshandler_t) sys_as_get_unmapped_area, 134 146 135 147 /* IPC related syscalls. */ … … 145 157 (syshandler_t) sys_ipc_poke, 146 158 (syshandler_t) sys_ipc_hangup, 147 (syshandler_t) sys_ipc_register_irq,148 (syshandler_t) sys_ipc_unregister_irq,149 159 (syshandler_t) sys_ipc_connect_kbox, 150 160 … … 160 170 (syshandler_t) sys_physmem_map, 161 171 (syshandler_t) sys_iospace_enable, 162 (syshandler_t) sys_interrupt_enable, 172 (syshandler_t) sys_register_irq, 173 (syshandler_t) sys_unregister_irq, 163 174 164 175 /* Sysinfo syscalls */ -
kernel/generic/src/sysinfo/stats.c
r6e50466 rad7a6c9 160 160 static size_t get_task_virtmem(as_t *as) 161 161 { 162 size_t result = 0;163 164 162 /* 165 * We are holding some spinlocks here and therefore are not allowed to 166 * block. Only attempt to lock the address space and address space area 167 * mutexes conditionally. If it is not possible to lock either object, 168 * allow the statistics to be inexact by skipping the respective object. 169 * 170 * Note that it may be infinitely better to let the address space 171 * management code compute these statistics as it proceeds instead of 172 * having them calculated here over and over again here. 163 * We are holding spinlocks here and therefore are not allowed to 164 * block. Only attempt to lock the address space and address space 165 * area mutexes conditionally. If it is not possible to lock either 166 * object, return inexact statistics by skipping the respective object. 173 167 */ 174 168 175 169 if (SYNCH_FAILED(mutex_trylock(&as->lock))) 176 return result * PAGE_SIZE; 170 return 0; 171 172 size_t pages = 0; 177 173 178 174 /* Walk the B+ tree and count pages */ … … 189 185 if (SYNCH_FAILED(mutex_trylock(&area->lock))) 190 186 continue; 191 result += area->pages; 187 188 pages += area->pages; 192 189 mutex_unlock(&area->lock); 193 190 } … … 196 193 mutex_unlock(&as->lock); 197 194 198 return result * PAGE_SIZE; 195 return (pages << PAGE_WIDTH); 196 } 197 198 /** Get the resident (used) size of a virtual address space 199 * 200 * @param as Address space. 201 * 202 * @return Size of the resident (used) virtual address space (bytes). 203 * 204 */ 205 static size_t get_task_resmem(as_t *as) 206 { 207 /* 208 * We are holding spinlocks here and therefore are not allowed to 209 * block. Only attempt to lock the address space and address space 210 * area mutexes conditionally. If it is not possible to lock either 211 * object, return inexact statistics by skipping the respective object. 212 */ 213 214 if (SYNCH_FAILED(mutex_trylock(&as->lock))) 215 return 0; 216 217 size_t pages = 0; 218 219 /* Walk the B+ tree and count pages */ 220 link_t *cur; 221 for (cur = as->as_area_btree.leaf_head.next; 222 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 223 btree_node_t *node = 224 list_get_instance(cur, btree_node_t, leaf_link); 225 226 unsigned int i; 227 for (i = 0; i < node->keys; i++) { 228 as_area_t *area = node->value[i]; 229 230 if (SYNCH_FAILED(mutex_trylock(&area->lock))) 231 continue; 232 233 pages += area->resident; 234 mutex_unlock(&area->lock); 235 } 236 } 237 238 mutex_unlock(&as->lock); 239 240 return (pages << PAGE_WIDTH); 199 241 } 200 242 … … 215 257 str_cpy(stats_task->name, TASK_NAME_BUFLEN, task->name); 216 258 stats_task->virtmem = get_task_virtmem(task->as); 259 stats_task->resmem = get_task_resmem(task->as); 217 260 stats_task->threads = atomic_get(&task->refcount); 218 261 task_get_accounting(task, &(stats_task->ucycles), -
kernel/generic/src/sysinfo/sysinfo.c
r6e50466 rad7a6c9 40 40 #include <arch/asm.h> 41 41 #include <errno.h> 42 #include <macros.h> 42 43 43 44 /** Maximal sysinfo path length */ … … 761 762 * character must be null). 762 763 * 763 * The user space buffer must be sized exactly according 764 * to the size of the binary data, otherwise the request 765 * fails. 764 * If the user space buffer size does not equal 765 * the actual size of the returned data, the data 766 * is truncated. Whether this is actually a fatal 767 * error or the data can be still interpreted as valid 768 * depends on the nature of the data and has to be 769 * decided by the user space. 770 * 771 * The actual size of data returned is stored to 772 * size_ptr. 766 773 * 767 774 * @param path_ptr Sysinfo path in the user address space. … … 770 777 * to store the binary data. 771 778 * @param buffer_size User space buffer size. 779 * @param size_ptr User space pointer where to store the 780 * binary data size. 772 781 * 773 782 * @return Error code (EOK in case of no error). … … 775 784 */ 776 785 sysarg_t sys_sysinfo_get_data(void *path_ptr, size_t path_size, 777 void *buffer_ptr, size_t buffer_size )786 void *buffer_ptr, size_t buffer_size, size_t *size_ptr) 778 787 { 779 788 int rc; 780 789 781 790 /* Get the item */ 782 sysinfo_return_t ret = sysinfo_get_item_uspace(path_ptr, path_size, false); 783 791 sysinfo_return_t ret = sysinfo_get_item_uspace(path_ptr, path_size, 792 false); 793 784 794 /* Only constant or generated binary data is considered */ 785 if ((ret.tag == SYSINFO_VAL_DATA) || (ret.tag == SYSINFO_VAL_FUNCTION_DATA)) { 786 /* Check destination buffer size */ 787 if (ret.data.size == buffer_size) 788 rc = copy_to_uspace(buffer_ptr, ret.data.data, 789 ret.data.size); 790 else 791 rc = ENOMEM; 795 if ((ret.tag == SYSINFO_VAL_DATA) || 796 (ret.tag == SYSINFO_VAL_FUNCTION_DATA)) { 797 size_t size = min(ret.data.size, buffer_size); 798 rc = copy_to_uspace(buffer_ptr, ret.data.data, size); 799 if (rc == EOK) 800 rc = copy_to_uspace(size_ptr, &size, sizeof(size)); 792 801 } else 793 802 rc = EINVAL; -
kernel/generic/src/time/clock.c
r6e50466 rad7a6c9 93 93 clock_parea.pbase = (uintptr_t) faddr; 94 94 clock_parea.frames = 1; 95 clock_parea.unpriv = true; 95 96 ddi_parea_register(&clock_parea); 96 97 … … 100 101 * 101 102 */ 102 sysinfo_set_item_val("clock.cacheable", NULL, (sysarg_t) true);103 103 sysinfo_set_item_val("clock.faddr", NULL, (sysarg_t) faddr); 104 104 }
Note:
See TracChangeset
for help on using the changeset viewer.
