Changeset 8b655705 in mainline for kernel/generic/src
- Timestamp:
- 2011-04-15T19:38:07Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 9dd730d1
- Parents:
- 6b9e85b (diff), b2fb47f (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel/generic/src
- Files:
-
- 24 edited
- 1 moved
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/adt/avl.c
r6b9e85b r8b655705 723 723 void avltree_walk(avltree_t *t, avltree_walker_t walker, void *arg) 724 724 { 725 _avltree_walk(t->root, walker, arg); 725 if (t->root) 726 _avltree_walk(t->root, walker, arg); 726 727 } 727 728 -
kernel/generic/src/console/cmd.c
r6b9e85b r8b655705 78 78 static cmd_info_t help_info = { 79 79 .name = "help", 80 .description = "List ofsupported commands.",80 .description = "List supported commands.", 81 81 .func = cmd_help, 82 82 .argc = 0 83 83 }; 84 84 85 /* Data and methods for 'reboot' command. */ 85 86 static int cmd_reboot(cmd_arg_t *argv); 86 87 static cmd_info_t reboot_info = { 87 88 .name = "reboot", 88 .description = "Reboot .",89 .description = "Reboot system.", 89 90 .func = cmd_reboot, 90 91 .argc = 0 91 92 }; 92 93 94 /* Data and methods for 'uptime' command. */ 93 95 static int cmd_uptime(cmd_arg_t *argv); 94 96 static cmd_info_t uptime_info = { 95 97 .name = "uptime", 96 .description = " Print uptime information.",98 .description = "Show system uptime.", 97 99 .func = cmd_uptime, 98 100 .argc = 0 99 101 }; 100 102 103 /* Data and methods for 'continue' command. */ 101 104 static int cmd_continue(cmd_arg_t *argv); 102 105 static cmd_info_t continue_info = { … … 108 111 109 112 #ifdef CONFIG_TEST 113 114 /* Data and methods for 'test' command. */ 110 115 static char test_buf[MAX_CMDLINE + 1]; 111 116 static int cmd_test(cmd_arg_t *argv); … … 119 124 static cmd_info_t test_info = { 120 125 .name = "test", 121 .description = " Print list ofkernel tests or run a test.",126 .description = "<test> List kernel tests or run a test.", 122 127 .func = cmd_test, 123 128 .argc = 1, … … 125 130 }; 126 131 132 /* Data and methods for 'bench' command. */ 127 133 static int cmd_bench(cmd_arg_t *argv); 128 134 static cmd_arg_t bench_argv[] = { … … 138 144 static cmd_info_t bench_info = { 139 145 .name = "bench", 140 .description = " Run kernel test as benchmark.",146 .description = "<test> <count> Run kernel test as benchmark.", 141 147 .func = cmd_bench, 142 148 .argc = 2, 143 149 .argv = bench_argv 144 150 }; 145 #endif 151 152 #endif /* CONFIG_TEST */ 146 153 147 154 /* Data and methods for 'description' command. */ 148 155 static int cmd_desc(cmd_arg_t *argv); 149 156 static void desc_help(void); 150 static char desc_buf[MAX_CMDLINE +1];157 static char desc_buf[MAX_CMDLINE + 1]; 151 158 static cmd_arg_t desc_argv = { 152 159 .type = ARG_TYPE_STRING, … … 156 163 static cmd_info_t desc_info = { 157 164 .name = "describe", 158 .description = " Describe specified command.",165 .description = "<command> Describe specified command.", 159 166 .help = desc_help, 160 167 .func = cmd_desc, … … 165 172 /* Data and methods for 'symaddr' command. */ 166 173 static int cmd_symaddr(cmd_arg_t *argv); 167 static char symaddr_buf[MAX_CMDLINE +1];174 static char symaddr_buf[MAX_CMDLINE + 1]; 168 175 static cmd_arg_t symaddr_argv = { 169 176 .type = ARG_TYPE_STRING, … … 173 180 static cmd_info_t symaddr_info = { 174 181 .name = "symaddr", 175 .description = " Return symbol address.",182 .description = "<symbol> Return symbol address.", 176 183 .func = cmd_symaddr, 177 184 .argc = 1, … … 179 186 }; 180 187 181 static char set_buf[MAX_CMDLINE+1]; 188 /* Data and methods for 'set4' command. */ 189 static char set_buf[MAX_CMDLINE + 1]; 182 190 static int cmd_set4(cmd_arg_t *argv); 183 191 static cmd_arg_t set4_argv[] = { … … 193 201 static cmd_info_t set4_info = { 194 202 .name = "set4", 195 .description = " set <dest_addr> <value> - 4byte version",203 .description = "<addr> <value> Set 4B memory location to a value.", 196 204 .func = cmd_set4, 197 205 .argc = 2, … … 213 221 static cmd_info_t call0_info = { 214 222 .name = "call0", 215 .description = " call0 <function> -> call function().",223 .description = "<function> Call function().", 216 224 .func = cmd_call0, 217 225 .argc = 1, … … 228 236 static cmd_info_t mcall0_info = { 229 237 .name = "mcall0", 230 .description = " mcall0 <function> -> call function() on each CPU.",238 .description = "<function> Call function() on each CPU.", 231 239 .func = cmd_mcall0, 232 240 .argc = 1, … … 250 258 static cmd_info_t call1_info = { 251 259 .name = "call1", 252 .description = " call1 <function> <arg1> -> call function(arg1).",260 .description = "<function> <arg1> Call function(arg1).", 253 261 .func = cmd_call1, 254 262 .argc = 2, … … 277 285 static cmd_info_t call2_info = { 278 286 .name = "call2", 279 .description = " call2 <function> <arg1> <arg2> -> call function(arg1,arg2).",287 .description = "<function> <arg1> <arg2> Call function(arg1, arg2).", 280 288 .func = cmd_call2, 281 289 .argc = 3, … … 310 318 static cmd_info_t call3_info = { 311 319 .name = "call3", 312 .description = " call3 <function> <arg1> <arg2> <arg3> -> call function(arg1,arg2,arg3).",320 .description = "<function> <arg1> <arg2> <arg3> Call function(arg1, arg2, arg3).", 313 321 .func = cmd_call3, 314 322 .argc = 4, … … 340 348 cmd_info_t tlb_info = { 341 349 .name = "tlb", 342 .description = "Print TLB of current processor.",350 .description = "Print TLB of the current CPU.", 343 351 .help = NULL, 344 352 .func = cmd_tlb, … … 377 385 }; 378 386 387 #ifdef CONFIG_UDEBUG 388 389 /* Data and methods for 'btrace' command */ 390 static int cmd_btrace(cmd_arg_t *argv); 391 static cmd_arg_t btrace_argv = { 392 .type = ARG_TYPE_INT, 393 }; 394 static cmd_info_t btrace_info = { 395 .name = "btrace", 396 .description = "<threadid> Show thread stack trace.", 397 .func = cmd_btrace, 398 .argc = 1, 399 .argv = &btrace_argv 400 }; 401 402 #endif /* CONFIG_UDEBUG */ 379 403 380 404 static int cmd_sched(cmd_arg_t *argv); 381 405 static cmd_info_t sched_info = { 382 406 .name = "scheduler", 383 .description = " List allscheduler information.",407 .description = "Show scheduler information.", 384 408 .func = cmd_sched, 385 409 .argc = 0 … … 406 430 static cmd_info_t zones_info = { 407 431 .name = "zones", 408 .description = "List ofmemory zones.",432 .description = "List memory zones.", 409 433 .func = cmd_zones, 410 434 .argc = 0 435 }; 436 437 /* Data and methods for 'zone' command */ 438 static int cmd_zone(cmd_arg_t *argv); 439 static cmd_arg_t zone_argv = { 440 .type = ARG_TYPE_INT, 441 }; 442 443 static cmd_info_t zone_info = { 444 .name = "zone", 445 .description = "<zone> Show memory zone structure.", 446 .func = cmd_zone, 447 .argc = 1, 448 .argv = &zone_argv 411 449 }; 412 450 … … 418 456 static cmd_info_t ipc_info = { 419 457 .name = "ipc", 420 .description = " ipc <taskid> Show IPC information of giventask.",458 .description = "<taskid> Show IPC information of a task.", 421 459 .func = cmd_ipc, 422 460 .argc = 1, … … 431 469 static cmd_info_t kill_info = { 432 470 .name = "kill", 433 .description = " kill<taskid> Kill a task.",471 .description = "<taskid> Kill a task.", 434 472 .func = cmd_kill, 435 473 .argc = 1, 436 474 .argv = &kill_argv 437 };438 439 /* Data and methods for 'zone' command */440 static int cmd_zone(cmd_arg_t *argv);441 static cmd_arg_t zone_argv = {442 .type = ARG_TYPE_INT,443 };444 445 static cmd_info_t zone_info = {446 .name = "zone",447 .description = "Show memory zone structure.",448 .func = cmd_zone,449 .argc = 1,450 .argv = &zone_argv451 475 }; 452 476 … … 482 506 &cpus_info, 483 507 &desc_info, 484 &reboot_info,485 &uptime_info,486 508 &halt_info, 487 509 &help_info, 488 510 &ipc_info, 489 511 &kill_info, 512 &physmem_info, 513 &reboot_info, 514 &sched_info, 490 515 &set4_info, 491 516 &slabs_info, 517 &symaddr_info, 492 518 &sysinfo_info, 493 &symaddr_info, 494 &sched_info, 519 &tasks_info, 495 520 &threads_info, 496 &tasks_info,497 &physmem_info,498 521 &tlb_info, 522 &uptime_info, 499 523 &version_info, 500 524 &zones_info, … … 504 528 &bench_info, 505 529 #endif 530 #ifdef CONFIG_UDEBUG 531 &btrace_info, 532 #endif 506 533 NULL 507 534 }; … … 526 553 for (i = 0; basic_commands[i]; i++) { 527 554 cmd_initialize(basic_commands[i]); 528 if (!cmd_register(basic_commands[i])) 529 printf("Cannot register command %s\n", basic_commands[i]->name); 530 } 531 } 532 555 } 556 557 for (i = 0; basic_commands[i]; i++) { 558 if (!cmd_register(basic_commands[i])) { 559 printf("Cannot register command %s\n", 560 basic_commands[i]->name); 561 } 562 } 563 } 533 564 534 565 /** List supported commands. … … 574 605 } 575 606 576 577 607 /** Reboot the system. 578 608 * … … 588 618 return 1; 589 619 } 590 591 620 592 621 /** Print system uptime information. … … 824 853 } 825 854 826 827 855 /** Print detailed description of 'describe' command. */ 828 856 void desc_help(void) … … 911 939 * @return Always 1 912 940 */ 913 int cmd_slabs(cmd_arg_t * 941 int cmd_slabs(cmd_arg_t *argv) 914 942 { 915 943 slab_print_list(); … … 923 951 * @return Always 1 924 952 */ 925 int cmd_sysinfo(cmd_arg_t * 953 int cmd_sysinfo(cmd_arg_t *argv) 926 954 { 927 955 sysinfo_dump(NULL); … … 929 957 } 930 958 931 932 /** Command for listings Thread information 959 /** Command for listing thread information 933 960 * 934 961 * @param argv Ignored … … 948 975 } 949 976 950 /** Command for listing s Task information977 /** Command for listing task information 951 978 * 952 979 * @param argv Ignored … … 966 993 } 967 994 968 /** Command for listings Thread information 995 #ifdef CONFIG_UDEBUG 996 997 /** Command for printing thread stack trace 998 * 999 * @param argv Integer argument from cmdline expected 1000 * 1001 * return Always 1 1002 * 1003 */ 1004 int cmd_btrace(cmd_arg_t *argv) 1005 { 1006 thread_stack_trace(argv[0].intval); 1007 return 1; 1008 } 1009 1010 #endif /* CONFIG_UDEBUG */ 1011 1012 /** Command for printing scheduler information 969 1013 * 970 1014 * @param argv Ignores … … 972 1016 * @return Always 1 973 1017 */ 974 int cmd_sched(cmd_arg_t * 1018 int cmd_sched(cmd_arg_t *argv) 975 1019 { 976 1020 sched_print_list(); … … 984 1028 * return Always 1 985 1029 */ 986 int cmd_zones(cmd_arg_t * 1030 int cmd_zones(cmd_arg_t *argv) 987 1031 { 988 1032 zones_print_list(); … … 996 1040 * return Always 1 997 1041 */ 998 int cmd_zone(cmd_arg_t * 1042 int cmd_zone(cmd_arg_t *argv) 999 1043 { 1000 1044 zone_print_one(argv[0].intval); … … 1002 1046 } 1003 1047 1004 /** Command for printing task ipcdetails1048 /** Command for printing task IPC details 1005 1049 * 1006 1050 * @param argv Integer argument from cmdline expected … … 1008 1052 * return Always 1 1009 1053 */ 1010 int cmd_ipc(cmd_arg_t * 1054 int cmd_ipc(cmd_arg_t *argv) 1011 1055 { 1012 1056 ipc_print_task(argv[0].intval); … … 1020 1064 * return 0 on failure, 1 on success. 1021 1065 */ 1022 int cmd_kill(cmd_arg_t * 1066 int cmd_kill(cmd_arg_t *argv) 1023 1067 { 1024 1068 if (task_kill(argv[0].intval) != EOK) -
kernel/generic/src/console/console.c
r6b9e85b r8b655705 160 160 klog_parea.pbase = (uintptr_t) faddr; 161 161 klog_parea.frames = SIZE2FRAMES(sizeof(klog)); 162 klog_parea.unpriv = false; 162 163 ddi_parea_register(&klog_parea); 163 164 -
kernel/generic/src/ddi/ddi.c
r6b9e85b r8b655705 104 104 { 105 105 ASSERT(TASK); 106 ASSERT((pf % FRAME_SIZE) == 0); 107 ASSERT((vp % PAGE_SIZE) == 0); 108 109 /* 110 * Make sure the caller is authorised to make this syscall. 111 */ 112 cap_t caps = cap_get(TASK); 113 if (!(caps & CAP_MEM_MANAGER)) 114 return EPERM; 106 107 if ((pf % FRAME_SIZE) != 0) 108 return EBADMEM; 109 110 if ((vp % PAGE_SIZE) != 0) 111 return EBADMEM; 112 113 /* 114 * Unprivileged tasks are only allowed to map pareas 115 * which are explicitly marked as such. 116 */ 117 bool priv = 118 ((cap_get(TASK) & CAP_MEM_MANAGER) == CAP_MEM_MANAGER); 115 119 116 120 mem_backend_data_t backend_data; … … 123 127 124 128 if (znum == (size_t) -1) { 125 /* Frames not found in any zones 126 * -> assume it is hardware device and allow mapping 129 /* 130 * Frames not found in any zone 131 * -> assume it is a hardware device and allow mapping 132 * for privileged tasks. 127 133 */ 128 134 irq_spinlock_unlock(&zones.lock, true); 135 136 if (!priv) 137 return EPERM; 138 129 139 goto map; 130 140 } 131 141 132 142 if (zones.info[znum].flags & ZONE_FIRMWARE) { 133 /* Frames are part of firmware */ 143 /* 144 * Frames are part of firmware 145 * -> allow mapping for privileged tasks. 146 */ 134 147 irq_spinlock_unlock(&zones.lock, true); 148 149 if (!priv) 150 return EPERM; 151 135 152 goto map; 136 153 } … … 138 155 if (zone_flags_available(zones.info[znum].flags)) { 139 156 /* 140 * Frames are part of physical memory, check if the memory141 * region is enabled for mapping.157 * Frames are part of physical memory, check 158 * if the memory region is enabled for mapping. 142 159 */ 143 160 irq_spinlock_unlock(&zones.lock, true); … … 150 167 if ((!parea) || (parea->frames < pages)) { 151 168 mutex_unlock(&parea_lock); 152 goto err; 169 return ENOENT; 170 } 171 172 if (!priv) { 173 if (!parea->unpriv) { 174 mutex_unlock(&parea_lock); 175 return EPERM; 176 } 153 177 } 154 178 … … 158 182 159 183 irq_spinlock_unlock(&zones.lock, true); 160 161 err:162 184 return ENOENT; 163 185 -
kernel/generic/src/interrupt/interrupt.c
r6b9e85b r8b655705 45 45 #include <console/console.h> 46 46 #include <console/cmd.h> 47 #include <ipc/event.h>48 47 #include <synch/mutex.h> 49 48 #include <time/delay.h> … … 188 187 printf("\n"); 189 188 190 /* 191 * Userspace can subscribe for FAULT events to take action 192 * whenever a thread faults. (E.g. take a dump, run a debugger). 193 * The notification is always available, but unless Udebug is enabled, 194 * that's all you get. 195 */ 196 if (event_is_subscribed(EVENT_FAULT)) { 197 /* Notify the subscriber that a fault occurred. */ 198 event_notify_3(EVENT_FAULT, LOWER32(TASK->taskid), 199 UPPER32(TASK->taskid), (sysarg_t) THREAD); 200 201 #ifdef CONFIG_UDEBUG 202 /* Wait for a debugging session. */ 203 udebug_thread_fault(); 204 #endif 205 } 206 207 task_kill(TASK->taskid); 208 thread_exit(); 189 task_kill_self(true); 209 190 } 210 191 -
kernel/generic/src/ipc/ipc.c
r6b9e85b r8b655705 295 295 atomic_inc(&phone->active_calls); 296 296 call->data.phone = phone; 297 call->data.task = TASK; 297 298 } 298 299 … … 406 407 call->caller_phone = call->data.phone; 407 408 call->data.phone = newphone; 409 call->data.task = TASK; 408 410 } 409 411 … … 688 690 irq_spinlock_exchange(&tasks_lock, &task->lock); 689 691 690 /* Print opened phones & details */ 691 printf("PHONE:\n"); 692 printf("[phone id] [calls] [state\n"); 692 693 693 694 size_t i; 694 695 for (i = 0; i < IPC_MAX_PHONES; i++) { 695 696 if (SYNCH_FAILED(mutex_trylock(&task->phones[i].lock))) { 696 printf("% zu: mutex busy\n", i);697 printf("%-10zu (mutex busy)\n", i); 697 698 continue; 698 699 } 699 700 700 701 if (task->phones[i].state != IPC_PHONE_FREE) { 701 printf("%zu: ", i); 702 printf("%-10zu %7" PRIun " ", i, 703 atomic_get(&task->phones[i].active_calls)); 702 704 703 705 switch (task->phones[i].state) { 704 706 case IPC_PHONE_CONNECTING: 705 printf("connecting 707 printf("connecting"); 706 708 break; 707 709 case IPC_PHONE_CONNECTED: 708 printf("connected to : %p (%" PRIu64 ")",709 task->phones[i].callee ,710 task->phones[i].callee->task-> taskid);710 printf("connected to %" PRIu64 " (%s)", 711 task->phones[i].callee->task->taskid, 712 task->phones[i].callee->task->name); 711 713 break; 712 714 case IPC_PHONE_SLAMMED: 713 printf("slammed by : %p ",715 printf("slammed by %p", 714 716 task->phones[i].callee); 715 717 break; 716 718 case IPC_PHONE_HUNGUP: 717 printf("hung up - was: %p ",719 printf("hung up by %p", 718 720 task->phones[i].callee); 719 721 break; … … 722 724 } 723 725 724 printf("active: %" PRIun "\n", 725 atomic_get(&task->phones[i].active_calls)); 726 printf("\n"); 726 727 } 727 728 … … 731 732 irq_spinlock_lock(&task->answerbox.lock, false); 732 733 734 #ifdef __32_BITS__ 735 printf("[call id ] [method] [arg1] [arg2] [arg3] [arg4] [arg5]" 736 " [flags] [sender\n"); 737 #endif 738 739 #ifdef __64_BITS__ 740 printf("[call id ] [method] [arg1] [arg2] [arg3] [arg4]" 741 " [arg5] [flags] [sender\n"); 742 #endif 743 733 744 link_t *cur; 734 745 735 /* Print answerbox - calls */ 736 printf("ABOX - CALLS:\n"); 746 printf(" --- incomming calls ---\n"); 737 747 for (cur = task->answerbox.calls.next; cur != &task->answerbox.calls; 738 748 cur = cur->next) { 739 749 call_t *call = list_get_instance(cur, call_t, link); 740 printf("Callid: %p Srctask:%" PRIu64 " M:%" PRIun 741 " A1:%" PRIun " A2:%" PRIun " A3:%" PRIun 742 " A4:%" PRIun " A5:%" PRIun " Flags:%x\n", call, 743 call->sender->taskid, 750 751 #ifdef __32_BITS__ 752 printf("%10p ", call); 753 #endif 754 755 #ifdef __64_BITS__ 756 printf("%18p ", call); 757 #endif 758 759 printf("%-8" PRIun " %-6" PRIun " %-6" PRIun " %-6" PRIun 760 " %-6" PRIun " %-6" PRIun " %-7x %" PRIu64 " (%s)\n", 744 761 IPC_GET_IMETHOD(call->data), IPC_GET_ARG1(call->data), 745 762 IPC_GET_ARG2(call->data), IPC_GET_ARG3(call->data), 746 763 IPC_GET_ARG4(call->data), IPC_GET_ARG5(call->data), 747 call->flags); 748 } 749 750 /* Print answerbox - dispatched calls */ 751 printf("ABOX - DISPATCHED CALLS:\n"); 764 call->flags, call->sender->taskid, call->sender->name); 765 } 766 767 printf(" --- dispatched calls ---\n"); 752 768 for (cur = task->answerbox.dispatched_calls.next; 753 769 cur != &task->answerbox.dispatched_calls; 754 770 cur = cur->next) { 755 771 call_t *call = list_get_instance(cur, call_t, link); 756 printf("Callid: %p Srctask:%" PRIu64 " M:%" PRIun 757 " A1:%" PRIun " A2:%" PRIun " A3:%" PRIun 758 " A4:%" PRIun " A5:%" PRIun " Flags:%x\n", call, 759 call->sender->taskid, 772 773 #ifdef __32_BITS__ 774 printf("%10p ", call); 775 #endif 776 777 #ifdef __64_BITS__ 778 printf("%18p ", call); 779 #endif 780 781 printf("%-8" PRIun " %-6" PRIun " %-6" PRIun " %-6" PRIun 782 " %-6" PRIun " %-6" PRIun " %-7x %" PRIu64 " (%s)\n", 760 783 IPC_GET_IMETHOD(call->data), IPC_GET_ARG1(call->data), 761 784 IPC_GET_ARG2(call->data), IPC_GET_ARG3(call->data), 762 785 IPC_GET_ARG4(call->data), IPC_GET_ARG5(call->data), 763 call->flags); 764 } 765 766 /* Print answerbox - answers */ 767 printf("ABOX - ANSWERS:\n"); 786 call->flags, call->sender->taskid, call->sender->name); 787 } 788 789 printf(" --- incoming answers ---\n"); 768 790 for (cur = task->answerbox.answers.next; 769 791 cur != &task->answerbox.answers; 770 792 cur = cur->next) { 771 793 call_t *call = list_get_instance(cur, call_t, link); 772 printf("Callid:%p M:%" PRIun " A1:%" PRIun " A2:%" PRIun 773 " A3:%" PRIun " A4:%" PRIun " A5:%" PRIun " Flags:%x\n", 774 call, IPC_GET_IMETHOD(call->data), IPC_GET_ARG1(call->data), 794 795 #ifdef __32_BITS__ 796 printf("%10p ", call); 797 #endif 798 799 #ifdef __64_BITS__ 800 printf("%18p ", call); 801 #endif 802 803 printf("%-8" PRIun " %-6" PRIun " %-6" PRIun " %-6" PRIun 804 " %-6" PRIun " %-6" PRIun " %-7x %" PRIu64 " (%s)\n", 805 IPC_GET_IMETHOD(call->data), IPC_GET_ARG1(call->data), 775 806 IPC_GET_ARG2(call->data), IPC_GET_ARG3(call->data), 776 807 IPC_GET_ARG4(call->data), IPC_GET_ARG5(call->data), 777 call->flags );808 call->flags, call->sender->taskid, call->sender->name); 778 809 } 779 810 -
kernel/generic/src/ipc/irq.c
r6b9e85b r8b655705 42 42 * 43 43 * The structure of a notification message is as follows: 44 * - IMETHOD: interface and method as registered by the SYS_ IPC_REGISTER_IRQ44 * - IMETHOD: interface and method as registered by the SYS_REGISTER_IRQ 45 45 * syscall 46 46 * - ARG1: payload modified by a 'top-half' handler -
kernel/generic/src/ipc/sysipc.c
r6b9e85b r8b655705 248 248 /* The connection was accepted */ 249 249 phone_connect(phoneid, &answer->sender->answerbox); 250 /* Set 'task hash' as arg4 of response */ 251 IPC_SET_ARG4(answer->data, (sysarg_t) TASK); 250 252 /* Set 'phone hash' as arg5 of response */ 251 253 IPC_SET_ARG5(answer->data, … … 424 426 case IPC_M_DATA_READ: { 425 427 size_t size = IPC_GET_ARG2(call->data); 426 if ( (size <= 0 || (size > DATA_XFER_LIMIT)))428 if (size <= 0) 427 429 return ELIMIT; 428 430 if (size > DATA_XFER_LIMIT) { 431 int flags = IPC_GET_ARG3(call->data); 432 if (flags & IPC_XF_RESTRICT) 433 IPC_SET_ARG2(call->data, DATA_XFER_LIMIT); 434 else 435 return ELIMIT; 436 } 429 437 break; 430 438 } … … 433 441 size_t size = IPC_GET_ARG2(call->data); 434 442 435 if (size > DATA_XFER_LIMIT) 436 return ELIMIT; 443 if (size > DATA_XFER_LIMIT) { 444 int flags = IPC_GET_ARG3(call->data); 445 if (flags & IPC_XF_RESTRICT) { 446 size = DATA_XFER_LIMIT; 447 IPC_SET_ARG2(call->data, size); 448 } else 449 return ELIMIT; 450 } 437 451 438 452 call->buffer = (uint8_t *) malloc(size, 0); … … 1103 1117 * 1104 1118 */ 1105 sysarg_t sys_ ipc_register_irq(inr_t inr, devno_t devno, sysarg_t imethod,1119 sysarg_t sys_register_irq(inr_t inr, devno_t devno, sysarg_t imethod, 1106 1120 irq_code_t *ucode) 1107 1121 { … … 1120 1134 * 1121 1135 */ 1122 sysarg_t sys_ ipc_unregister_irq(inr_t inr, devno_t devno)1136 sysarg_t sys_unregister_irq(inr_t inr, devno_t devno) 1123 1137 { 1124 1138 if (!(cap_get(TASK) & CAP_IRQ_REG)) -
kernel/generic/src/lib/elf.c
r6b9e85b r8b655705 157 157 case PT_NULL: 158 158 case PT_PHDR: 159 case PT_NOTE: 159 160 break; 160 161 case PT_LOAD: … … 173 174 break; 174 175 case PT_SHLIB: 175 case PT_NOTE:176 176 case PT_LOPROC: 177 177 case PT_HIPROC: -
kernel/generic/src/lib/memfnc.c
r6b9e85b r8b655705 1 /* 2 * Copyright (c) 2007 Jan Hudecek 3 * Copyright (c) 2008 Martin Decky 1 /* 2 * Copyright (c) 2011 Martin Decky 4 3 * All rights reserved. 5 4 * … … 28 27 */ 29 28 30 /** @addtogroup generic proc29 /** @addtogroup generic 31 30 * @{ 32 31 */ 33 /** @file tasklet.h 34 * @brief Tasklets declarations 32 33 /** 34 * @file 35 * @brief Memory string functions. 36 * 37 * This file provides architecture independent functions to manipulate blocks 38 * of memory. These functions are optimized as much as generic functions of 39 * this type can be. 35 40 */ 36 41 37 #i fndef KERN_TASKLET_H_38 # define KERN_TASKLET_H_42 #include <lib/memfnc.h> 43 #include <typedefs.h> 39 44 40 #include <adt/list.h> 45 /** Fill block of memory. 46 * 47 * Fill cnt bytes at dst address with the value val. 48 * 49 * @param dst Destination address to fill. 50 * @param val Value to fill. 51 * @param cnt Number of bytes to fill. 52 * 53 * @return Destination address. 54 * 55 */ 56 void *memset(void *dst, int val, size_t cnt) 57 { 58 size_t i; 59 uint8_t *ptr = (uint8_t *) dst; 60 61 for (i = 0; i < cnt; i++) 62 ptr[i] = val; 63 64 return dst; 65 } 41 66 42 /** Tasklet callback type */ 43 typedef void (* tasklet_callback_t)(void *arg); 44 45 /** Tasklet state */ 46 typedef enum { 47 NotActive, 48 Scheduled, 49 InProgress, 50 Disabled 51 } tasklet_state_t; 52 53 /** Structure describing a tasklet */ 54 typedef struct tasklet_descriptor { 55 link_t link; 67 /** Move memory block without overlapping. 68 * 69 * Copy cnt bytes from src address to dst address. The source 70 * and destination memory areas cannot overlap. 71 * 72 * @param dst Destination address to copy to. 73 * @param src Source address to copy from. 74 * @param cnt Number of bytes to copy. 75 * 76 * @return Destination address. 77 * 78 */ 79 void *memcpy(void *dst, const void *src, size_t cnt) 80 { 81 uint8_t *dp = (uint8_t *) dst; 82 const uint8_t *sp = (uint8_t *) src; 56 83 57 /** Callback to call */58 tasklet_callback_t callback;84 while (cnt-- != 0) 85 *dp++ = *sp++; 59 86 60 /** Argument passed to the callback */ 61 void *arg; 62 63 /** State of the tasklet */ 64 tasklet_state_t state; 65 } tasklet_descriptor_t; 66 67 68 extern void tasklet_init(void); 69 70 #endif 87 return dst; 88 } 71 89 72 90 /** @} -
kernel/generic/src/lib/memstr.c
r6b9e85b r8b655705 28 28 */ 29 29 30 /** @addtogroup generic 30 /** @addtogroup generic 31 31 * @{ 32 32 */ … … 34 34 /** 35 35 * @file 36 * @brief 36 * @brief Memory string operations. 37 37 * 38 * This file provides architecture independent functions to manipulate blocks of 39 * memory. These functions are optimized as much as generic functions of this 40 * type can be. However, architectures are free to provide even more optimized 41 * versions of these functions. 38 * This file provides architecture independent functions to manipulate blocks 39 * of memory. These functions are optimized as much as generic functions of 40 * this type can be. 42 41 */ 43 42 44 43 #include <memstr.h> 45 44 #include <typedefs.h> 46 #include <align.h>47 45 48 /** Copyblock of memory.46 /** Fill block of memory. 49 47 * 50 * Copy cnt bytes from src address to dst address. The copying is done 51 * word-by-word and then byte-by-byte. The source and destination memory areas 52 * cannot overlap. 48 * Fill cnt bytes at dst address with the value val. 53 49 * 54 * @param src Source address to copy from.55 * @param dst Destination address to copy to.56 * @param cnt Number of bytes to copy.50 * @param dst Destination address to fill. 51 * @param cnt Number of bytes to fill. 52 * @param val Value to fill. 57 53 * 58 * @return Destination address.59 54 */ 60 void *_memcpy(void *dst, const void *src, size_t cnt)55 void memsetb(void *dst, size_t cnt, uint8_t val) 61 56 { 62 unsigned int i, j; 57 memset(dst, val, cnt); 58 } 59 60 /** Fill block of memory. 61 * 62 * Fill cnt words at dst address with the value val. The filling 63 * is done word-by-word. 64 * 65 * @param dst Destination address to fill. 66 * @param cnt Number of words to fill. 67 * @param val Value to fill. 68 * 69 */ 70 void memsetw(void *dst, size_t cnt, uint16_t val) 71 { 72 size_t i; 73 uint16_t *ptr = (uint16_t *) dst; 63 74 64 if (ALIGN_UP((uintptr_t) src, sizeof(sysarg_t)) != (uintptr_t) src || 65 ALIGN_UP((uintptr_t) dst, sizeof(sysarg_t)) != (uintptr_t) dst) { 66 for (i = 0; i < cnt; i++) 67 ((uint8_t *) dst)[i] = ((uint8_t *) src)[i]; 68 } else { 69 for (i = 0; i < cnt / sizeof(sysarg_t); i++) 70 ((sysarg_t *) dst)[i] = ((sysarg_t *) src)[i]; 71 72 for (j = 0; j < cnt % sizeof(sysarg_t); j++) 73 ((uint8_t *)(((sysarg_t *) dst) + i))[j] = 74 ((uint8_t *)(((sysarg_t *) src) + i))[j]; 75 } 76 77 return (char *) dst; 75 for (i = 0; i < cnt; i++) 76 ptr[i] = val; 78 77 } 79 78 80 79 /** Move memory block with possible overlapping. 81 80 * 82 * Copy cnt bytes from src address to dst address. The source and destination83 * memory areas may overlap.81 * Copy cnt bytes from src address to dst address. The source 82 * and destination memory areas may overlap. 84 83 * 85 * @param src Source address to copy from.86 * @param dst Destination address to copy to.87 * @param cnt 84 * @param dst Destination address to copy to. 85 * @param src Source address to copy from. 86 * @param cnt Number of bytes to copy. 88 87 * 89 * @return Destination address. 88 * @return Destination address. 89 * 90 90 */ 91 void *memmove(void *dst, const void *src, size_t n)91 void *memmove(void *dst, const void *src, size_t cnt) 92 92 { 93 const uint8_t *sp;94 uint8_t *dp;95 96 93 /* Nothing to do? */ 97 94 if (src == dst) 98 95 return dst; 99 96 100 97 /* Non-overlapping? */ 101 if (dst >= src + n || src >= dst + n) { 102 return memcpy(dst, src, n); 103 } 104 98 if ((dst >= src + cnt) || (src >= dst + cnt)) 99 return memcpy(dst, src, cnt); 100 101 uint8_t *dp; 102 const uint8_t *sp; 103 105 104 /* Which direction? */ 106 105 if (src > dst) { 107 106 /* Forwards. */ 107 dp = dst; 108 108 sp = src; 109 dp = dst; 110 111 while (n-- != 0) 109 110 while (cnt-- != 0) 112 111 *dp++ = *sp++; 113 112 } else { 114 113 /* Backwards. */ 115 sp = src + (n- 1);116 dp = dst + (n- 1);117 118 while ( n-- != 0)114 dp = dst + (cnt - 1); 115 sp = src + (cnt - 1); 116 117 while (cnt-- != 0) 119 118 *dp-- = *sp--; 120 119 } 121 120 122 121 return dst; 123 }124 125 /** Fill block of memory126 *127 * Fill cnt bytes at dst address with the value x. The filling is done128 * byte-by-byte.129 *130 * @param dst Destination address to fill.131 * @param cnt Number of bytes to fill.132 * @param x Value to fill.133 *134 */135 void _memsetb(void *dst, size_t cnt, uint8_t x)136 {137 unsigned int i;138 uint8_t *p = (uint8_t *) dst;139 140 for (i = 0; i < cnt; i++)141 p[i] = x;142 }143 144 /** Fill block of memory.145 *146 * Fill cnt words at dst address with the value x. The filling is done147 * word-by-word.148 *149 * @param dst Destination address to fill.150 * @param cnt Number of words to fill.151 * @param x Value to fill.152 *153 */154 void _memsetw(void *dst, size_t cnt, uint16_t x)155 {156 unsigned int i;157 uint16_t *p = (uint16_t *) dst;158 159 for (i = 0; i < cnt; i++)160 p[i] = x;161 122 } 162 123 -
kernel/generic/src/lib/rd.c
r6b9e85b r8b655705 90 90 FRAME_SIZE); 91 91 rd_parea.frames = SIZE2FRAMES(dsize); 92 rd_parea.unpriv = false; 92 93 ddi_parea_register(&rd_parea); 93 94 -
kernel/generic/src/main/main.c
r6b9e85b r8b655705 58 58 #include <proc/thread.h> 59 59 #include <proc/task.h> 60 #include <proc/tasklet.h>61 60 #include <main/kinit.h> 62 61 #include <main/version.h> … … 217 216 tlb_init(); 218 217 ddi_init(); 219 tasklet_init();220 218 arch_post_mm_init(); 221 219 arch_pre_smp_init(); -
kernel/generic/src/mm/as.c
r6b9e85b r8b655705 71 71 #include <memstr.h> 72 72 #include <macros.h> 73 #include <bitops.h> 73 74 #include <arch.h> 74 75 #include <errno.h> … … 82 83 * Each architecture decides what functions will be used to carry out 83 84 * address space operations such as creating or locking page tables. 84 *85 85 */ 86 86 as_operations_t *as_operations = NULL; 87 87 88 /** 89 * Slab for as_t objects. 88 /** Slab for as_t objects. 90 89 * 91 90 */ 92 91 static slab_cache_t *as_slab; 93 92 94 /** 95 * This lock serializes access to the ASID subsystem.96 * Itprotects:93 /** ASID subsystem lock. 94 * 95 * This lock protects: 97 96 * - inactive_as_with_asid_head list 98 97 * - as->asid for each as of the as_t type … … 103 102 104 103 /** 105 * This list contains address spaces that are not active on any 106 * processor and that have valid ASID. 107 * 104 * Inactive address spaces (on all processors) 105 * that have valid ASID. 108 106 */ 109 107 LIST_INITIALIZE(inactive_as_with_asid_head); … … 119 117 mutex_initialize(&as->lock, MUTEX_PASSIVE); 120 118 121 int rc = as_constructor_arch(as, flags); 122 123 return rc; 119 return as_constructor_arch(as, flags); 124 120 } 125 121 126 122 NO_TRACE static size_t as_destructor(void *obj) 127 123 { 128 as_t *as = (as_t *) obj; 129 return as_destructor_arch(as); 124 return as_destructor_arch((as_t *) obj); 130 125 } 131 126 … … 142 137 panic("Cannot create kernel address space."); 143 138 144 /* Make sure the kernel address space 139 /* 140 * Make sure the kernel address space 145 141 * reference count never drops to zero. 146 142 */ … … 191 187 { 192 188 DEADLOCK_PROBE_INIT(p_asidlock); 193 189 194 190 ASSERT(as != AS); 195 191 ASSERT(atomic_get(&as->refcount) == 0); … … 199 195 * lock its mutex. 200 196 */ 201 197 202 198 /* 203 199 * We need to avoid deadlock between TLB shootdown and asidlock. … … 206 202 * disabled to prevent nested context switches. We also depend on the 207 203 * fact that so far no spinlocks are held. 208 *209 204 */ 210 205 preemption_disable(); … … 231 226 spinlock_unlock(&asidlock); 232 227 interrupts_restore(ipl); 233 228 234 229 235 230 /* … … 237 232 * The B+tree must be walked carefully because it is 238 233 * also being destroyed. 239 *240 234 */ 241 235 bool cond = true; … … 264 258 /** Hold a reference to an address space. 265 259 * 266 * Holding a reference to an address space prevents destruction of that address267 * space.260 * Holding a reference to an address space prevents destruction 261 * of that address space. 268 262 * 269 263 * @param as Address space to be held. … … 277 271 /** Release a reference to an address space. 278 272 * 279 * The last one to release a reference to an address space destroys the address280 * space.273 * The last one to release a reference to an address space 274 * destroys the address space. 281 275 * 282 276 * @param asAddress space to be released. … … 291 285 /** Check area conflicts with other areas. 292 286 * 293 * @param as 294 * @param vaStarting virtual address of the area being tested.295 * @param size Size ofthe area being tested.296 * @param avoid _areaDo not touch this area.287 * @param as Address space. 288 * @param addr Starting virtual address of the area being tested. 289 * @param count Number of pages in the area being tested. 290 * @param avoid Do not touch this area. 297 291 * 298 292 * @return True if there is no conflict, false otherwise. 299 293 * 300 294 */ 301 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, 302 as_area_t *avoid_area) 303 { 295 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr, 296 size_t count, as_area_t *avoid) 297 { 298 ASSERT((addr % PAGE_SIZE) == 0); 304 299 ASSERT(mutex_locked(&as->lock)); 305 300 306 301 /* 307 302 * We don't want any area to have conflicts with NULL page. 308 * 309 */ 310 if (overlaps(va, size, (uintptr_t) NULL, PAGE_SIZE)) 303 */ 304 if (overlaps(addr, count << PAGE_WIDTH, (uintptr_t) NULL, PAGE_SIZE)) 311 305 return false; 312 306 … … 317 311 * record in the left neighbour, the leftmost record in the right 318 312 * neighbour and all records in the leaf node itself. 319 *320 313 */ 321 314 btree_node_t *leaf; 322 315 as_area_t *area = 323 (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);316 (as_area_t *) btree_search(&as->as_area_btree, addr, &leaf); 324 317 if (area) { 325 if (area != avoid _area)318 if (area != avoid) 326 319 return false; 327 320 } … … 333 326 area = (as_area_t *) node->value[node->keys - 1]; 334 327 335 mutex_lock(&area->lock); 336 337 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 328 if (area != avoid) { 329 mutex_lock(&area->lock); 330 331 if (overlaps(addr, count << PAGE_WIDTH, 332 area->base, area->pages << PAGE_WIDTH)) { 333 mutex_unlock(&area->lock); 334 return false; 335 } 336 338 337 mutex_unlock(&area->lock); 339 return false; 340 } 341 342 mutex_unlock(&area->lock); 338 } 343 339 } 344 340 … … 347 343 area = (as_area_t *) node->value[0]; 348 344 349 mutex_lock(&area->lock); 350 351 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 345 if (area != avoid) { 346 mutex_lock(&area->lock); 347 348 if (overlaps(addr, count << PAGE_WIDTH, 349 area->base, area->pages << PAGE_WIDTH)) { 350 mutex_unlock(&area->lock); 351 return false; 352 } 353 352 354 mutex_unlock(&area->lock); 353 return false; 354 } 355 356 mutex_unlock(&area->lock); 355 } 357 356 } 358 357 … … 362 361 area = (as_area_t *) leaf->value[i]; 363 362 364 if (area == avoid _area)363 if (area == avoid) 365 364 continue; 366 365 367 366 mutex_lock(&area->lock); 368 367 369 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 368 if (overlaps(addr, count << PAGE_WIDTH, 369 area->base, area->pages << PAGE_WIDTH)) { 370 370 mutex_unlock(&area->lock); 371 371 return false; … … 378 378 * So far, the area does not conflict with other areas. 379 379 * Check if it doesn't conflict with kernel address space. 380 *381 380 */ 382 381 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 383 return !overlaps( va, size,382 return !overlaps(addr, count << PAGE_WIDTH, 384 383 KERNEL_ADDRESS_SPACE_START, 385 384 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); … … 408 407 mem_backend_data_t *backend_data) 409 408 { 410 if ( base % PAGE_SIZE)409 if ((base % PAGE_SIZE) != 0) 411 410 return NULL; 412 411 413 if ( !size)412 if (size == 0) 414 413 return NULL; 414 415 size_t pages = SIZE2FRAMES(size); 415 416 416 417 /* Writeable executable areas are not supported. */ … … 420 421 mutex_lock(&as->lock); 421 422 422 if (!check_area_conflicts(as, base, size, NULL)) {423 if (!check_area_conflicts(as, base, pages, NULL)) { 423 424 mutex_unlock(&as->lock); 424 425 return NULL; … … 432 433 area->flags = flags; 433 434 area->attributes = attrs; 434 area->pages = SIZE2FRAMES(size); 435 area->pages = pages; 436 area->resident = 0; 435 437 area->base = base; 436 438 area->sh_info = NULL; … … 475 477 * to find out whether this is a miss or va belongs to an address 476 478 * space area found there. 477 *478 479 */ 479 480 … … 486 487 mutex_lock(&area->lock); 487 488 488 if ((area->base <= va) && (va < area->base + area->pages * PAGE_SIZE)) 489 if ((area->base <= va) && 490 (va < area->base + (area->pages << PAGE_WIDTH))) 489 491 return area; 490 492 … … 495 497 * Second, locate the left neighbour and test its last record. 496 498 * Because of its position in the B+tree, it must have base < va. 497 *498 499 */ 499 500 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); … … 503 504 mutex_lock(&area->lock); 504 505 505 if (va < area->base + area->pages * PAGE_SIZE)506 if (va < area->base + (area->pages << PAGE_WIDTH)) 506 507 return area; 507 508 … … 530 531 /* 531 532 * Locate the area. 532 *533 533 */ 534 534 as_area_t *area = find_area_and_lock(as, address); … … 542 542 * Remapping of address space areas associated 543 543 * with memory mapped devices is not supported. 544 *545 544 */ 546 545 mutex_unlock(&area->lock); … … 553 552 * Remapping of shared address space areas 554 553 * is not supported. 555 *556 554 */ 557 555 mutex_unlock(&area->lock); … … 564 562 /* 565 563 * Zero size address space areas are not allowed. 566 *567 564 */ 568 565 mutex_unlock(&area->lock); … … 572 569 573 570 if (pages < area->pages) { 574 uintptr_t start_free = area->base + pages * PAGE_SIZE;571 uintptr_t start_free = area->base + (pages << PAGE_WIDTH); 575 572 576 573 /* 577 574 * Shrinking the area. 578 575 * No need to check for overlaps. 579 *580 576 */ 581 577 … … 584 580 /* 585 581 * Start TLB shootdown sequence. 586 *587 582 */ 588 583 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, 589 area->base + pages * PAGE_SIZE, area->pages - pages);584 area->base + (pages << PAGE_WIDTH), area->pages - pages); 590 585 591 586 /* … … 595 590 * is also the right way to remove part of the used_space 596 591 * B+tree leaf list. 597 *598 592 */ 599 593 bool cond = true; … … 611 605 size_t i = 0; 612 606 613 if (overlaps(ptr, size * PAGE_SIZE, area->base,614 pages * PAGE_SIZE)) {607 if (overlaps(ptr, size << PAGE_WIDTH, area->base, 608 pages << PAGE_WIDTH)) { 615 609 616 if (ptr + size * PAGE_SIZE<= start_free) {610 if (ptr + (size << PAGE_WIDTH) <= start_free) { 617 611 /* 618 612 * The whole interval fits 619 613 * completely in the resized 620 614 * address space area. 621 *622 615 */ 623 616 break; … … 628 621 * to b and c overlaps with the resized 629 622 * address space area. 630 *631 623 */ 632 624 … … 648 640 for (; i < size; i++) { 649 641 pte_t *pte = page_mapping_find(as, ptr + 650 i * PAGE_SIZE);642 (i << PAGE_WIDTH)); 651 643 652 644 ASSERT(pte); … … 657 649 (area->backend->frame_free)) { 658 650 area->backend->frame_free(area, 659 ptr + i * PAGE_SIZE,651 ptr + (i << PAGE_WIDTH), 660 652 PTE_GET_FRAME(pte)); 661 653 } 662 654 663 655 page_mapping_remove(as, ptr + 664 i * PAGE_SIZE);656 (i << PAGE_WIDTH)); 665 657 } 666 658 } … … 669 661 /* 670 662 * Finish TLB shootdown sequence. 671 * 672 */ 673 674 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, 663 */ 664 665 tlb_invalidate_pages(as->asid, area->base + (pages << PAGE_WIDTH), 675 666 area->pages - pages); 676 667 677 668 /* 678 669 * Invalidate software translation caches (e.g. TSB on sparc64). 679 *680 670 */ 681 671 as_invalidate_translation_cache(as, area->base + 682 pages * PAGE_SIZE, area->pages - pages);672 (pages << PAGE_WIDTH), area->pages - pages); 683 673 tlb_shootdown_finalize(ipl); 684 674 … … 688 678 * Growing the area. 689 679 * Check for overlaps with other address space areas. 690 * 691 */ 692 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, 693 area)) { 680 */ 681 if (!check_area_conflicts(as, address, pages, area)) { 694 682 mutex_unlock(&area->lock); 695 683 mutex_unlock(&as->lock); … … 790 778 791 779 for (size = 0; size < (size_t) node->value[i]; size++) { 792 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 780 pte_t *pte = 781 page_mapping_find(as, ptr + (size << PAGE_WIDTH)); 793 782 794 783 ASSERT(pte); … … 799 788 (area->backend->frame_free)) { 800 789 area->backend->frame_free(area, 801 ptr + size * PAGE_SIZE, PTE_GET_FRAME(pte));790 ptr + (size << PAGE_WIDTH), PTE_GET_FRAME(pte)); 802 791 } 803 792 804 page_mapping_remove(as, ptr + size * PAGE_SIZE);793 page_mapping_remove(as, ptr + (size << PAGE_WIDTH)); 805 794 } 806 795 } … … 809 798 /* 810 799 * Finish TLB shootdown sequence. 811 *812 800 */ 813 801 … … 817 805 * Invalidate potential software translation caches (e.g. TSB on 818 806 * sparc64). 819 *820 807 */ 821 808 as_invalidate_translation_cache(as, area->base, area->pages); … … 835 822 /* 836 823 * Remove the empty area from address space. 837 *838 824 */ 839 825 btree_remove(&as->as_area_btree, base, NULL); … … 877 863 /* 878 864 * Could not find the source address space area. 879 *880 865 */ 881 866 mutex_unlock(&src_as->lock); … … 887 872 * There is no backend or the backend does not 888 873 * know how to share the area. 889 *890 874 */ 891 875 mutex_unlock(&src_area->lock); … … 894 878 } 895 879 896 size_t src_size = src_area->pages * PAGE_SIZE;880 size_t src_size = src_area->pages << PAGE_WIDTH; 897 881 unsigned int src_flags = src_area->flags; 898 882 mem_backend_t *src_backend = src_area->backend; … … 914 898 * First, prepare the area for sharing. 915 899 * Then it will be safe to unlock it. 916 *917 900 */ 918 901 share_info_t *sh_info = src_area->sh_info; … … 926 909 /* 927 910 * Call the backend to setup sharing. 928 *929 911 */ 930 912 src_area->backend->share(src_area); … … 945 927 * The flags of the source area are masked against dst_flags_mask 946 928 * to support sharing in less privileged mode. 947 *948 929 */ 949 930 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, … … 962 943 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL 963 944 * attribute and set the sh_info. 964 *965 945 */ 966 946 mutex_lock(&dst_as->lock); … … 985 965 NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access) 986 966 { 967 ASSERT(mutex_locked(&area->lock)); 968 987 969 int flagmap[] = { 988 970 [PF_ACCESS_READ] = AS_AREA_READ, … … 990 972 [PF_ACCESS_EXEC] = AS_AREA_EXEC 991 973 }; 992 993 ASSERT(mutex_locked(&area->lock));994 974 995 975 if (!(area->flags & flagmap[access])) … … 1062 1042 /* 1063 1043 * Compute total number of used pages in the used_space B+tree 1064 *1065 1044 */ 1066 1045 size_t used_pages = 0; … … 1084 1063 /* 1085 1064 * Start TLB shootdown sequence. 1086 *1087 1065 */ 1088 1066 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, … … 1092 1070 * Remove used pages from page tables and remember their frame 1093 1071 * numbers. 1094 *1095 1072 */ 1096 1073 size_t frame_idx = 0; … … 1107 1084 1108 1085 for (size = 0; size < (size_t) node->value[i]; size++) { 1109 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 1086 pte_t *pte = 1087 page_mapping_find(as, ptr + (size << PAGE_WIDTH)); 1110 1088 1111 1089 ASSERT(pte); … … 1116 1094 1117 1095 /* Remove old mapping */ 1118 page_mapping_remove(as, ptr + size * PAGE_SIZE);1096 page_mapping_remove(as, ptr + (size << PAGE_WIDTH)); 1119 1097 } 1120 1098 } … … 1123 1101 /* 1124 1102 * Finish TLB shootdown sequence. 1125 *1126 1103 */ 1127 1104 … … 1131 1108 * Invalidate potential software translation caches (e.g. TSB on 1132 1109 * sparc64). 1133 *1134 1110 */ 1135 1111 as_invalidate_translation_cache(as, area->base, area->pages); … … 1164 1140 1165 1141 /* Insert the new mapping */ 1166 page_mapping_insert(as, ptr + size * PAGE_SIZE,1142 page_mapping_insert(as, ptr + (size << PAGE_WIDTH), 1167 1143 old_frame[frame_idx++], page_flags); 1168 1144 … … 1213 1189 * No area contained mapping for 'page'. 1214 1190 * Signal page fault to low-level handler. 1215 *1216 1191 */ 1217 1192 mutex_unlock(&AS->lock); … … 1233 1208 * The address space area is not backed by any backend 1234 1209 * or the backend cannot handle page faults. 1235 *1236 1210 */ 1237 1211 mutex_unlock(&area->lock); … … 1245 1219 * To avoid race condition between two page faults on the same address, 1246 1220 * we need to make sure the mapping has not been already inserted. 1247 *1248 1221 */ 1249 1222 pte_t *pte; … … 1263 1236 /* 1264 1237 * Resort to the backend page fault handler. 1265 *1266 1238 */ 1267 1239 if (area->backend->page_fault(area, page, access) != AS_PF_OK) { … … 1318 1290 * preemption is disabled. We should not be 1319 1291 * holding any other lock. 1320 *1321 1292 */ 1322 1293 (void) interrupts_enable(); … … 1338 1309 * list of inactive address spaces with assigned 1339 1310 * ASID. 1340 *1341 1311 */ 1342 1312 ASSERT(old_as->asid != ASID_INVALID); … … 1349 1319 * Perform architecture-specific tasks when the address space 1350 1320 * is being removed from the CPU. 1351 *1352 1321 */ 1353 1322 as_deinstall_arch(old_as); … … 1356 1325 /* 1357 1326 * Second, prepare the new address space. 1358 *1359 1327 */ 1360 1328 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { … … 1372 1340 * Perform architecture-specific steps. 1373 1341 * (e.g. write ASID to hardware register etc.) 1374 *1375 1342 */ 1376 1343 as_install_arch(new_as); … … 1391 1358 { 1392 1359 ASSERT(mutex_locked(&area->lock)); 1393 1360 1394 1361 return area_flags_to_page_flags(area->flags); 1395 1362 } … … 1495 1462 1496 1463 if (src_area) { 1497 size = src_area->pages * PAGE_SIZE;1464 size = src_area->pages << PAGE_WIDTH; 1498 1465 mutex_unlock(&src_area->lock); 1499 1466 } else … … 1512 1479 * @param count Number of page to be marked. 1513 1480 * 1514 * @return Zero on failure and non-zeroon success.1515 * 1516 */ 1517 intused_space_insert(as_area_t *area, uintptr_t page, size_t count)1481 * @return False on failure or true on success. 1482 * 1483 */ 1484 bool used_space_insert(as_area_t *area, uintptr_t page, size_t count) 1518 1485 { 1519 1486 ASSERT(mutex_locked(&area->lock)); … … 1526 1493 /* 1527 1494 * We hit the beginning of some used space. 1528 * 1529 */ 1530 return 0; 1495 */ 1496 return false; 1531 1497 } 1532 1498 1533 1499 if (!leaf->keys) { 1534 1500 btree_insert(&area->used_space, page, (void *) count, leaf); 1535 return 1;1501 goto success; 1536 1502 } 1537 1503 … … 1547 1513 * somewhere between the rightmost interval of 1548 1514 * the left neigbour and the first interval of the leaf. 1549 *1550 1515 */ 1551 1516 1552 1517 if (page >= right_pg) { 1553 1518 /* Do nothing. */ 1554 } else if (overlaps(page, count * PAGE_SIZE, left_pg,1555 left_cnt * PAGE_SIZE)) {1519 } else if (overlaps(page, count << PAGE_WIDTH, left_pg, 1520 left_cnt << PAGE_WIDTH)) { 1556 1521 /* The interval intersects with the left interval. */ 1557 return 0;1558 } else if (overlaps(page, count * PAGE_SIZE, right_pg,1559 right_cnt * PAGE_SIZE)) {1522 return false; 1523 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1524 right_cnt << PAGE_WIDTH)) { 1560 1525 /* The interval intersects with the right interval. */ 1561 return 0;1562 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&1563 (page + count * PAGE_SIZE== right_pg)) {1526 return false; 1527 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1528 (page + (count << PAGE_WIDTH) == right_pg)) { 1564 1529 /* 1565 1530 * The interval can be added by merging the two already 1566 1531 * present intervals. 1567 *1568 1532 */ 1569 1533 node->value[node->keys - 1] += count + right_cnt; 1570 1534 btree_remove(&area->used_space, right_pg, leaf); 1571 return 1;1572 } else if (page == left_pg + left_cnt * PAGE_SIZE) {1535 goto success; 1536 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1573 1537 /* 1574 1538 * The interval can be added by simply growing the left 1575 1539 * interval. 1576 *1577 1540 */ 1578 1541 node->value[node->keys - 1] += count; 1579 return 1;1580 } else if (page + count * PAGE_SIZE== right_pg) {1542 goto success; 1543 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1581 1544 /* 1582 1545 * The interval can be addded by simply moving base of 1583 1546 * the right interval down and increasing its size 1584 1547 * accordingly. 1585 *1586 1548 */ 1587 1549 leaf->value[0] += count; 1588 1550 leaf->key[0] = page; 1589 return 1;1551 goto success; 1590 1552 } else { 1591 1553 /* 1592 1554 * The interval is between both neigbouring intervals, 1593 1555 * but cannot be merged with any of them. 1594 *1595 1556 */ 1596 1557 btree_insert(&area->used_space, page, (void *) count, 1597 1558 leaf); 1598 return 1;1559 goto success; 1599 1560 } 1600 1561 } else if (page < leaf->key[0]) { … … 1605 1566 * Investigate the border case in which the left neighbour does 1606 1567 * not exist but the interval fits from the left. 1607 * 1608 */ 1609 1610 if (overlaps(page, count * PAGE_SIZE, right_pg, 1611 right_cnt * PAGE_SIZE)) { 1568 */ 1569 1570 if (overlaps(page, count << PAGE_WIDTH, right_pg, 1571 right_cnt << PAGE_WIDTH)) { 1612 1572 /* The interval intersects with the right interval. */ 1613 return 0;1614 } else if (page + count * PAGE_SIZE== right_pg) {1573 return false; 1574 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1615 1575 /* 1616 1576 * The interval can be added by moving the base of the 1617 1577 * right interval down and increasing its size 1618 1578 * accordingly. 1619 *1620 1579 */ 1621 1580 leaf->key[0] = page; 1622 1581 leaf->value[0] += count; 1623 return 1;1582 goto success; 1624 1583 } else { 1625 1584 /* 1626 1585 * The interval doesn't adjoin with the right interval. 1627 1586 * It must be added individually. 1628 *1629 1587 */ 1630 1588 btree_insert(&area->used_space, page, (void *) count, 1631 1589 leaf); 1632 return 1;1590 goto success; 1633 1591 } 1634 1592 } … … 1645 1603 * somewhere between the leftmost interval of 1646 1604 * the right neigbour and the last interval of the leaf. 1647 *1648 1605 */ 1649 1606 1650 1607 if (page < left_pg) { 1651 1608 /* Do nothing. */ 1652 } else if (overlaps(page, count * PAGE_SIZE, left_pg,1653 left_cnt * PAGE_SIZE)) {1609 } else if (overlaps(page, count << PAGE_WIDTH, left_pg, 1610 left_cnt << PAGE_WIDTH)) { 1654 1611 /* The interval intersects with the left interval. */ 1655 return 0;1656 } else if (overlaps(page, count * PAGE_SIZE, right_pg,1657 right_cnt * PAGE_SIZE)) {1612 return false; 1613 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1614 right_cnt << PAGE_WIDTH)) { 1658 1615 /* The interval intersects with the right interval. */ 1659 return 0;1660 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&1661 (page + count * PAGE_SIZE== right_pg)) {1616 return false; 1617 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1618 (page + (count << PAGE_WIDTH) == right_pg)) { 1662 1619 /* 1663 1620 * The interval can be added by merging the two already 1664 1621 * present intervals. 1665 *1666 1622 */ 1667 1623 leaf->value[leaf->keys - 1] += count + right_cnt; 1668 1624 btree_remove(&area->used_space, right_pg, node); 1669 return 1;1670 } else if (page == left_pg + left_cnt * PAGE_SIZE) {1625 goto success; 1626 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1671 1627 /* 1672 1628 * The interval can be added by simply growing the left 1673 1629 * interval. 1674 *1675 1630 */ 1676 leaf->value[leaf->keys - 1] += 1677 return 1;1678 } else if (page + count * PAGE_SIZE== right_pg) {1631 leaf->value[leaf->keys - 1] += count; 1632 goto success; 1633 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1679 1634 /* 1680 1635 * The interval can be addded by simply moving base of 1681 1636 * the right interval down and increasing its size 1682 1637 * accordingly. 1683 *1684 1638 */ 1685 1639 node->value[0] += count; 1686 1640 node->key[0] = page; 1687 return 1;1641 goto success; 1688 1642 } else { 1689 1643 /* 1690 1644 * The interval is between both neigbouring intervals, 1691 1645 * but cannot be merged with any of them. 1692 *1693 1646 */ 1694 1647 btree_insert(&area->used_space, page, (void *) count, 1695 1648 leaf); 1696 return 1;1649 goto success; 1697 1650 } 1698 1651 } else if (page >= leaf->key[leaf->keys - 1]) { … … 1703 1656 * Investigate the border case in which the right neighbour 1704 1657 * does not exist but the interval fits from the right. 1705 * 1706 */ 1707 1708 if (overlaps(page, count * PAGE_SIZE, left_pg, 1709 left_cnt * PAGE_SIZE)) { 1658 */ 1659 1660 if (overlaps(page, count << PAGE_WIDTH, left_pg, 1661 left_cnt << PAGE_WIDTH)) { 1710 1662 /* The interval intersects with the left interval. */ 1711 return 0;1712 } else if (left_pg + left_cnt * PAGE_SIZE== page) {1663 return false; 1664 } else if (left_pg + (left_cnt << PAGE_WIDTH) == page) { 1713 1665 /* 1714 1666 * The interval can be added by growing the left 1715 1667 * interval. 1716 *1717 1668 */ 1718 1669 leaf->value[leaf->keys - 1] += count; 1719 return 1;1670 goto success; 1720 1671 } else { 1721 1672 /* 1722 1673 * The interval doesn't adjoin with the left interval. 1723 1674 * It must be added individually. 1724 *1725 1675 */ 1726 1676 btree_insert(&area->used_space, page, (void *) count, 1727 1677 leaf); 1728 return 1;1678 goto success; 1729 1679 } 1730 1680 } … … 1734 1684 * only between two other intervals of the leaf. The two border cases 1735 1685 * were already resolved. 1736 *1737 1686 */ 1738 1687 btree_key_t i; … … 1746 1695 /* 1747 1696 * The interval fits between left_pg and right_pg. 1748 *1749 1697 */ 1750 1698 1751 if (overlaps(page, count * PAGE_SIZE, left_pg,1752 left_cnt * PAGE_SIZE)) {1699 if (overlaps(page, count << PAGE_WIDTH, left_pg, 1700 left_cnt << PAGE_WIDTH)) { 1753 1701 /* 1754 1702 * The interval intersects with the left 1755 1703 * interval. 1756 *1757 1704 */ 1758 return 0;1759 } else if (overlaps(page, count * PAGE_SIZE, right_pg,1760 right_cnt * PAGE_SIZE)) {1705 return false; 1706 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1707 right_cnt << PAGE_WIDTH)) { 1761 1708 /* 1762 1709 * The interval intersects with the right 1763 1710 * interval. 1764 *1765 1711 */ 1766 return 0;1767 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&1768 (page + count * PAGE_SIZE== right_pg)) {1712 return false; 1713 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1714 (page + (count << PAGE_WIDTH) == right_pg)) { 1769 1715 /* 1770 1716 * The interval can be added by merging the two 1771 1717 * already present intervals. 1772 *1773 1718 */ 1774 1719 leaf->value[i - 1] += count + right_cnt; 1775 1720 btree_remove(&area->used_space, right_pg, leaf); 1776 return 1;1777 } else if (page == left_pg + left_cnt * PAGE_SIZE) {1721 goto success; 1722 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1778 1723 /* 1779 1724 * The interval can be added by simply growing 1780 1725 * the left interval. 1781 *1782 1726 */ 1783 1727 leaf->value[i - 1] += count; 1784 return 1;1785 } else if (page + count * PAGE_SIZE== right_pg) {1728 goto success; 1729 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1786 1730 /* 1787 1731 * The interval can be addded by simply moving 1788 1732 * base of the right interval down and 1789 1733 * increasing its size accordingly. 1790 *1791 1734 */ 1792 1735 leaf->value[i] += count; 1793 1736 leaf->key[i] = page; 1794 return 1;1737 goto success; 1795 1738 } else { 1796 1739 /* … … 1798 1741 * intervals, but cannot be merged with any of 1799 1742 * them. 1800 *1801 1743 */ 1802 1744 btree_insert(&area->used_space, page, 1803 1745 (void *) count, leaf); 1804 return 1;1746 goto success; 1805 1747 } 1806 1748 } … … 1809 1751 panic("Inconsistency detected while adding %zu pages of used " 1810 1752 "space at %p.", count, (void *) page); 1753 1754 success: 1755 area->resident += count; 1756 return true; 1811 1757 } 1812 1758 … … 1819 1765 * @param count Number of page to be marked. 1820 1766 * 1821 * @return Zero on failure and non-zeroon success.1822 * 1823 */ 1824 intused_space_remove(as_area_t *area, uintptr_t page, size_t count)1767 * @return False on failure or true on success. 1768 * 1769 */ 1770 bool used_space_remove(as_area_t *area, uintptr_t page, size_t count) 1825 1771 { 1826 1772 ASSERT(mutex_locked(&area->lock)); … … 1833 1779 /* 1834 1780 * We are lucky, page is the beginning of some interval. 1835 *1836 1781 */ 1837 1782 if (count > pages) { 1838 return 0;1783 return false; 1839 1784 } else if (count == pages) { 1840 1785 btree_remove(&area->used_space, page, leaf); 1841 return 1;1786 goto success; 1842 1787 } else { 1843 1788 /* 1844 1789 * Find the respective interval. 1845 1790 * Decrease its size and relocate its start address. 1846 *1847 1791 */ 1848 1792 btree_key_t i; 1849 1793 for (i = 0; i < leaf->keys; i++) { 1850 1794 if (leaf->key[i] == page) { 1851 leaf->key[i] += count * PAGE_SIZE;1795 leaf->key[i] += count << PAGE_WIDTH; 1852 1796 leaf->value[i] -= count; 1853 return 1;1797 goto success; 1854 1798 } 1855 1799 } 1800 1856 1801 goto error; 1857 1802 } … … 1863 1808 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1864 1809 1865 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,1866 count * PAGE_SIZE)) {1867 if (page + count * PAGE_SIZE==1868 left_pg + left_cnt * PAGE_SIZE) {1810 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1811 count << PAGE_WIDTH)) { 1812 if (page + (count << PAGE_WIDTH) == 1813 left_pg + (left_cnt << PAGE_WIDTH)) { 1869 1814 /* 1870 1815 * The interval is contained in the rightmost … … 1872 1817 * removed by updating the size of the bigger 1873 1818 * interval. 1874 *1875 1819 */ 1876 1820 node->value[node->keys - 1] -= count; 1877 return 1;1878 } else if (page + count * PAGE_SIZE<1879 left_pg + left_cnt*PAGE_SIZE) {1821 goto success; 1822 } else if (page + (count << PAGE_WIDTH) < 1823 left_pg + (left_cnt << PAGE_WIDTH)) { 1880 1824 /* 1881 1825 * The interval is contained in the rightmost … … 1884 1828 * the original interval and also inserting a 1885 1829 * new interval. 1886 *1887 1830 */ 1888 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1889 (page + count*PAGE_SIZE)) >> PAGE_WIDTH;1831 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) - 1832 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH; 1890 1833 node->value[node->keys - 1] -= count + new_cnt; 1891 1834 btree_insert(&area->used_space, page + 1892 count * PAGE_SIZE, (void *) new_cnt, leaf);1893 return 1;1835 (count << PAGE_WIDTH), (void *) new_cnt, leaf); 1836 goto success; 1894 1837 } 1895 1838 } 1896 return 0; 1839 1840 return false; 1897 1841 } else if (page < leaf->key[0]) 1898 return 0;1842 return false; 1899 1843 1900 1844 if (page > leaf->key[leaf->keys - 1]) { … … 1902 1846 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1903 1847 1904 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,1905 count * PAGE_SIZE)) {1906 if (page + count * PAGE_SIZE==1907 left_pg + left_cnt * PAGE_SIZE) {1848 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1849 count << PAGE_WIDTH)) { 1850 if (page + (count << PAGE_WIDTH) == 1851 left_pg + (left_cnt << PAGE_WIDTH)) { 1908 1852 /* 1909 1853 * The interval is contained in the rightmost 1910 1854 * interval of the leaf and can be removed by 1911 1855 * updating the size of the bigger interval. 1912 *1913 1856 */ 1914 1857 leaf->value[leaf->keys - 1] -= count; 1915 return 1;1916 } else if (page + count * PAGE_SIZE< left_pg +1917 left_cnt * PAGE_SIZE) {1858 goto success; 1859 } else if (page + (count << PAGE_WIDTH) < left_pg + 1860 (left_cnt << PAGE_WIDTH)) { 1918 1861 /* 1919 1862 * The interval is contained in the rightmost … … 1922 1865 * original interval and also inserting a new 1923 1866 * interval. 1924 *1925 1867 */ 1926 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1927 (page + count * PAGE_SIZE)) >> PAGE_WIDTH;1868 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) - 1869 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH; 1928 1870 leaf->value[leaf->keys - 1] -= count + new_cnt; 1929 1871 btree_insert(&area->used_space, page + 1930 count * PAGE_SIZE, (void *) new_cnt, leaf);1931 return 1;1872 (count << PAGE_WIDTH), (void *) new_cnt, leaf); 1873 goto success; 1932 1874 } 1933 1875 } 1934 return 0; 1876 1877 return false; 1935 1878 } 1936 1879 1937 1880 /* 1938 1881 * The border cases have been already resolved. 1939 * Now the interval can be only between intervals of the leaf. 1882 * Now the interval can be only between intervals of the leaf. 1940 1883 */ 1941 1884 btree_key_t i; … … 1949 1892 * to (i - 1) and i. 1950 1893 */ 1951 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,1952 count * PAGE_SIZE)) {1953 if (page + count * PAGE_SIZE==1954 left_pg + left_cnt*PAGE_SIZE) {1894 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1895 count << PAGE_WIDTH)) { 1896 if (page + (count << PAGE_WIDTH) == 1897 left_pg + (left_cnt << PAGE_WIDTH)) { 1955 1898 /* 1956 1899 * The interval is contained in the … … 1958 1901 * be removed by updating the size of 1959 1902 * the bigger interval. 1960 *1961 1903 */ 1962 1904 leaf->value[i - 1] -= count; 1963 return 1;1964 } else if (page + count * PAGE_SIZE<1965 left_pg + left_cnt * PAGE_SIZE) {1905 goto success; 1906 } else if (page + (count << PAGE_WIDTH) < 1907 left_pg + (left_cnt << PAGE_WIDTH)) { 1966 1908 /* 1967 1909 * The interval is contained in the … … 1972 1914 */ 1973 1915 size_t new_cnt = ((left_pg + 1974 left_cnt * PAGE_SIZE) -1975 (page + count * PAGE_SIZE)) >>1916 (left_cnt << PAGE_WIDTH)) - 1917 (page + (count << PAGE_WIDTH))) >> 1976 1918 PAGE_WIDTH; 1977 1919 leaf->value[i - 1] -= count + new_cnt; 1978 1920 btree_insert(&area->used_space, page + 1979 count * PAGE_SIZE, (void *) new_cnt,1921 (count << PAGE_WIDTH), (void *) new_cnt, 1980 1922 leaf); 1981 return 1;1923 goto success; 1982 1924 } 1983 1925 } 1984 return 0; 1926 1927 return false; 1985 1928 } 1986 1929 } … … 1989 1932 panic("Inconsistency detected while removing %zu pages of used " 1990 1933 "space from %p.", count, (void *) page); 1934 1935 success: 1936 area->resident -= count; 1937 return true; 1991 1938 } 1992 1939 … … 2023 1970 } 2024 1971 1972 /** Return pointer to unmapped address space area 1973 * 1974 * @param base Lowest address bound. 1975 * @param size Requested size of the allocation. 1976 * 1977 * @return Pointer to the beginning of unmapped address space area. 1978 * 1979 */ 1980 sysarg_t sys_as_get_unmapped_area(uintptr_t base, size_t size) 1981 { 1982 if (size == 0) 1983 return 0; 1984 1985 /* 1986 * Make sure we allocate from page-aligned 1987 * address. Check for possible overflow in 1988 * each step. 1989 */ 1990 1991 size_t pages = SIZE2FRAMES(size); 1992 uintptr_t ret = 0; 1993 1994 /* 1995 * Find the lowest unmapped address aligned on the sz 1996 * boundary, not smaller than base and of the required size. 1997 */ 1998 1999 mutex_lock(&AS->lock); 2000 2001 /* First check the base address itself */ 2002 uintptr_t addr = ALIGN_UP(base, PAGE_SIZE); 2003 if ((addr >= base) && 2004 (check_area_conflicts(AS, addr, pages, NULL))) 2005 ret = addr; 2006 2007 /* Eventually check the addresses behind each area */ 2008 link_t *cur; 2009 for (cur = AS->as_area_btree.leaf_head.next; 2010 (ret == 0) && (cur != &AS->as_area_btree.leaf_head); 2011 cur = cur->next) { 2012 btree_node_t *node = 2013 list_get_instance(cur, btree_node_t, leaf_link); 2014 2015 btree_key_t i; 2016 for (i = 0; (ret == 0) && (i < node->keys); i++) { 2017 as_area_t *area = (as_area_t *) node->value[i]; 2018 2019 mutex_lock(&area->lock); 2020 2021 uintptr_t addr = 2022 ALIGN_UP(area->base + (area->pages << PAGE_WIDTH), 2023 PAGE_SIZE); 2024 2025 if ((addr >= base) && (addr >= area->base) && 2026 (check_area_conflicts(AS, addr, pages, area))) 2027 ret = addr; 2028 2029 mutex_unlock(&area->lock); 2030 } 2031 } 2032 2033 mutex_unlock(&AS->lock); 2034 2035 return (sysarg_t) ret; 2036 } 2037 2025 2038 /** Get list of adress space areas. 2026 2039 * … … 2089 2102 mutex_lock(&as->lock); 2090 2103 2091 /* print out info about address space areas */2104 /* Print out info about address space areas */ 2092 2105 link_t *cur; 2093 2106 for (cur = as->as_area_btree.leaf_head.next; -
kernel/generic/src/mm/backend_elf.c
r6b9e85b r8b655705 213 213 if (!as_area_check_access(area, access)) 214 214 return AS_PF_FAULT; 215 216 ASSERT((addr >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) && 217 (addr < entry->p_vaddr + entry->p_memsz)); 215 216 if (addr < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) 217 return AS_PF_FAULT; 218 219 if (addr >= entry->p_vaddr + entry->p_memsz) 220 return AS_PF_FAULT; 221 218 222 i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH; 219 223 base = (uintptr_t) -
kernel/generic/src/mm/backend_phys.c
r6b9e85b r8b655705 112 112 page_mapping_insert(AS, addr, base + (addr - area->base), 113 113 as_area_get_flags(area)); 114 if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1)) 115 panic("Cannot insert used space."); 114 115 if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1)) 116 panic("Cannot insert used space."); 116 117 117 118 return AS_PF_OK; -
kernel/generic/src/proc/program.c
r6b9e85b r8b655705 171 171 void *loader = program_loader; 172 172 if (!loader) { 173 as_destroy(as); 173 174 printf("Cannot spawn loader as none was registered\n"); 174 175 return ENOENT; … … 179 180 if (rc != EE_OK) { 180 181 as_destroy(as); 182 printf("Cannot spawn loader (%s)\n", elf_error(rc)); 181 183 return ENOENT; 182 184 } -
kernel/generic/src/proc/scheduler.c
r6b9e85b r8b655705 62 62 #include <print.h> 63 63 #include <debug.h> 64 65 static void before_task_runs(void); 66 static void before_thread_runs(void); 67 static void after_thread_ran(void); 64 #include <stacktrace.h> 65 68 66 static void scheduler_separated_stack(void); 69 67 … … 71 69 72 70 /** Carry out actions before new task runs. */ 73 void before_task_runs(void)71 static void before_task_runs(void) 74 72 { 75 73 before_task_runs_arch(); … … 80 78 * Perform actions that need to be 81 79 * taken before the newly selected 82 * t read is passed control.80 * thread is passed control. 83 81 * 84 82 * THREAD->lock is locked on entry 85 83 * 86 84 */ 87 void before_thread_runs(void)85 static void before_thread_runs(void) 88 86 { 89 87 before_thread_runs_arch(); 88 90 89 #ifdef CONFIG_FPU_LAZY 91 if (THREAD == CPU->fpu_owner)90 if (THREAD == CPU->fpu_owner) 92 91 fpu_enable(); 93 92 else … … 102 101 } 103 102 #endif 103 104 #ifdef CONFIG_UDEBUG 105 if (THREAD->btrace) { 106 istate_t *istate = THREAD->udebug.uspace_state; 107 if (istate != NULL) { 108 printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid); 109 stack_trace_istate(istate); 110 } 111 112 THREAD->btrace = false; 113 } 114 #endif 104 115 } 105 116 … … 113 124 * 114 125 */ 115 void after_thread_ran(void)126 static void after_thread_ran(void) 116 127 { 117 128 after_thread_ran_arch(); … … 391 402 * possible destruction should thread_destroy() be called on this or any 392 403 * other processor while the scheduler is still using them. 393 *394 404 */ 395 405 if (old_task) … … 417 427 * The thread structure is kept allocated until 418 428 * somebody calls thread_detach() on it. 419 *420 429 */ 421 430 if (!irq_spinlock_trylock(&THREAD->join_wq.lock)) { 422 431 /* 423 432 * Avoid deadlock. 424 *425 433 */ 426 434 irq_spinlock_unlock(&THREAD->lock, false); … … 443 451 /* 444 452 * Prefer the thread after it's woken up. 445 *446 453 */ 447 454 THREAD->priority = -1; … … 451 458 * waitq_sleep(). Address of wq->lock is kept in 452 459 * THREAD->sleep_queue. 453 *454 460 */ 455 461 irq_spinlock_unlock(&THREAD->sleep_queue->lock, false); … … 461 467 /* 462 468 * Entering state is unexpected. 463 *464 469 */ 465 470 panic("tid%" PRIu64 ": unexpected state %s.", … … 480 485 481 486 /* 482 * If both the old and the new task are the same, lots of work is 483 * avoided. 484 * 487 * If both the old and the new task are the same, 488 * lots of work is avoided. 485 489 */ 486 490 if (TASK != THREAD->task) { … … 488 492 489 493 /* 490 * Note that it is possible for two tasks to share one address 491 * space. 492 ( 494 * Note that it is possible for two tasks 495 * to share one address space. 493 496 */ 494 497 if (old_as != new_as) { … … 496 499 * Both tasks and address spaces are different. 497 500 * Replace the old one with the new one. 498 *499 501 */ 500 502 as_switch(old_as, new_as); … … 527 529 * necessary, is to be mapped in before_thread_runs(). This 528 530 * function must be executed before the switch to the new stack. 529 *530 531 */ 531 532 before_thread_runs(); … … 534 535 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to 535 536 * thread's stack. 536 *537 537 */ 538 538 the_copy(THE, (the_t *) THREAD->kstack); … … 658 658 /* 659 659 * Ready thread on local CPU 660 *661 660 */ 662 661 -
kernel/generic/src/proc/task.c
r6b9e85b r8b655705 342 342 sysarg_t sys_task_set_name(const char *uspace_name, size_t name_len) 343 343 { 344 int rc;345 344 char namebuf[TASK_NAME_BUFLEN]; 346 345 347 346 /* Cap length of name and copy it from userspace. */ 348 349 347 if (name_len > TASK_NAME_BUFLEN - 1) 350 348 name_len = TASK_NAME_BUFLEN - 1; 351 349 352 rc = copy_from_uspace(namebuf, uspace_name, name_len);350 int rc = copy_from_uspace(namebuf, uspace_name, name_len); 353 351 if (rc != 0) 354 352 return (sysarg_t) rc; 355 353 356 354 namebuf[name_len] = '\0'; 355 356 /* 357 * As the task name is referenced also from the 358 * threads, lock the threads' lock for the course 359 * of the update. 360 */ 361 362 irq_spinlock_lock(&tasks_lock, true); 363 irq_spinlock_lock(&TASK->lock, false); 364 irq_spinlock_lock(&threads_lock, false); 365 366 /* Set task name */ 357 367 str_cpy(TASK->name, TASK_NAME_BUFLEN, namebuf); 358 368 369 irq_spinlock_unlock(&threads_lock, false); 370 irq_spinlock_unlock(&TASK->lock, false); 371 irq_spinlock_unlock(&tasks_lock, true); 372 359 373 return EOK; 360 374 } … … 370 384 { 371 385 task_id_t taskid; 372 int rc; 373 374 rc = copy_from_uspace(&taskid, uspace_taskid, sizeof(taskid)); 386 int rc = copy_from_uspace(&taskid, uspace_taskid, sizeof(taskid)); 375 387 if (rc != 0) 376 388 return (sysarg_t) rc; 377 389 378 390 return (sysarg_t) task_kill(taskid); 379 391 } … … 449 461 static void task_kill_internal(task_t *task) 450 462 { 463 irq_spinlock_lock(&task->lock, false); 464 irq_spinlock_lock(&threads_lock, false); 465 466 /* 467 * Interrupt all threads. 468 */ 469 451 470 link_t *cur; 452 453 /*454 * Interrupt all threads.455 */456 irq_spinlock_lock(&task->lock, false);457 471 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 458 472 thread_t *thread = list_get_instance(cur, thread_t, th_link); … … 471 485 } 472 486 487 irq_spinlock_unlock(&threads_lock, false); 473 488 irq_spinlock_unlock(&task->lock, false); 474 489 } … … 500 515 irq_spinlock_unlock(&tasks_lock, true); 501 516 517 return EOK; 518 } 519 520 /** Kill the currently running task. 521 * 522 * @param notify Send out fault notifications. 523 * 524 * @return Zero on success or an error code from errno.h. 525 * 526 */ 527 void task_kill_self(bool notify) 528 { 529 /* 530 * User space can subscribe for FAULT events to take action 531 * whenever a task faults (to take a dump, run a debugger, etc.). 532 * The notification is always available, but unless udebug is enabled, 533 * that's all you get. 534 */ 535 if (notify) { 536 if (event_is_subscribed(EVENT_FAULT)) { 537 /* Notify the subscriber that a fault occurred. */ 538 event_notify_3(EVENT_FAULT, LOWER32(TASK->taskid), 539 UPPER32(TASK->taskid), (sysarg_t) THREAD); 540 541 #ifdef CONFIG_UDEBUG 542 /* Wait for a debugging session. */ 543 udebug_thread_fault(); 544 #endif 545 } 546 } 547 548 irq_spinlock_lock(&tasks_lock, true); 549 task_kill_internal(TASK); 550 irq_spinlock_unlock(&tasks_lock, true); 551 552 thread_exit(); 553 } 554 555 /** Process syscall to terminate the current task. 556 * 557 * @param notify Send out fault notifications. 558 * 559 */ 560 sysarg_t sys_task_exit(sysarg_t notify) 561 { 562 task_kill_self(notify); 563 564 /* Unreachable */ 502 565 return EOK; 503 566 } -
kernel/generic/src/proc/thread.c
r6b9e85b r8b655705 239 239 * Switch thread to the ready state. 240 240 * 241 * @param t Thread to make ready.241 * @param thread Thread to make ready. 242 242 * 243 243 */ … … 246 246 irq_spinlock_lock(&thread->lock, true); 247 247 248 ASSERT( !(thread->state == Ready));248 ASSERT(thread->state != Ready); 249 249 250 250 int i = (thread->priority < RQ_COUNT - 1) … … 350 350 351 351 #ifdef CONFIG_UDEBUG 352 /* Init debugging stuff */ 352 /* Initialize debugging stuff */ 353 thread->btrace = false; 353 354 udebug_thread_initialize(&thread->udebug); 354 355 #endif … … 535 536 /** Detach thread. 536 537 * 537 * Mark the thread as detached , if the thread is already in the Lingering538 * state, deallocate its resources.538 * Mark the thread as detached. If the thread is already 539 * in the Lingering state, deallocate its resources. 539 540 * 540 541 * @param thread Thread to be detached. … … 590 591 order_suffix(thread->kcycles, &kcycles, &ksuffix); 591 592 593 char *name; 594 if (str_cmp(thread->name, "uinit") == 0) 595 name = thread->task->name; 596 else 597 name = thread->name; 598 592 599 #ifdef __32_BITS__ 593 600 if (*additional) 594 printf("%-8" PRIu64 "%10p %9" PRIu64 "%c %9" PRIu64 "%c ",595 thread->tid, thread-> kstack, ucycles, usuffix,596 kcycles, ksuffix);601 printf("%-8" PRIu64 " %10p %10p %9" PRIu64 "%c %9" PRIu64 "%c ", 602 thread->tid, thread->thread_code, thread->kstack, 603 ucycles, usuffix, kcycles, ksuffix); 597 604 else 598 printf("%-8" PRIu64 " %-14s %10p %-8s %10p %-5" PRIu32 " %10p\n",599 thread->tid, thread->name, thread, thread_states[thread->state],600 thread->task, thread->task->context , thread->thread_code);605 printf("%-8" PRIu64 " %-14s %10p %-8s %10p %-5" PRIu32 "\n", 606 thread->tid, name, thread, thread_states[thread->state], 607 thread->task, thread->task->context); 601 608 #endif 602 609 603 610 #ifdef __64_BITS__ 604 611 if (*additional) 605 printf("%-8" PRIu64 " %18p %18p\n"612 printf("%-8" PRIu64 " %18p %18p\n" 606 613 " %9" PRIu64 "%c %9" PRIu64 "%c ", 607 614 thread->tid, thread->thread_code, thread->kstack, 608 615 ucycles, usuffix, kcycles, ksuffix); 609 616 else 610 printf("%-8" PRIu64 " %-14s %18p %-8s %18p %-5" PRIu32 "\n",611 thread->tid, thread->name, thread, thread_states[thread->state],617 printf("%-8" PRIu64 " %-14s %18p %-8s %18p %-5" PRIu32 "\n", 618 thread->tid, name, thread, thread_states[thread->state], 612 619 thread->task, thread->task->context); 613 620 #endif … … 647 654 #ifdef __32_BITS__ 648 655 if (additional) 649 printf("[id ] [ stack ] [ucycles ] [kcycles ] [cpu]"650 " [ waitqueue]\n");656 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]" 657 " [cpu] [waitqueue]\n"); 651 658 else 652 659 printf("[id ] [name ] [address ] [state ] [task ]" 653 " [ctx] [code ]\n");660 " [ctx]\n"); 654 661 #endif 655 662 … … 740 747 ASSERT(interrupts_disabled()); 741 748 ASSERT(irq_spinlock_locked(&threads_lock)); 742 749 743 750 thread_iterator_t iterator; 744 751 … … 751 758 } 752 759 760 #ifdef CONFIG_UDEBUG 761 762 void thread_stack_trace(thread_id_t thread_id) 763 { 764 irq_spinlock_lock(&threads_lock, true); 765 766 thread_t *thread = thread_find_by_id(thread_id); 767 if (thread == NULL) { 768 printf("No such thread.\n"); 769 irq_spinlock_unlock(&threads_lock, true); 770 return; 771 } 772 773 irq_spinlock_lock(&thread->lock, false); 774 775 /* 776 * Schedule a stack trace to be printed 777 * just before the thread is scheduled next. 778 * 779 * If the thread is sleeping then try to interrupt 780 * the sleep. Any request for printing an uspace stack 781 * trace from within the kernel should be always 782 * considered a last resort debugging means, therefore 783 * forcing the thread's sleep to be interrupted 784 * is probably justifiable. 785 */ 786 787 bool sleeping = false; 788 istate_t *istate = thread->udebug.uspace_state; 789 if (istate != NULL) { 790 printf("Scheduling thread stack trace.\n"); 791 thread->btrace = true; 792 if (thread->state == Sleeping) 793 sleeping = true; 794 } else 795 printf("Thread interrupt state not available.\n"); 796 797 irq_spinlock_unlock(&thread->lock, false); 798 799 if (sleeping) 800 waitq_interrupt_sleep(thread); 801 802 irq_spinlock_unlock(&threads_lock, true); 803 } 804 805 #endif /* CONFIG_UDEBUG */ 753 806 754 807 /** Process syscall to create new thread. … … 793 846 * has already been created. We need to undo its 794 847 * creation now. 795 *796 848 */ 797 849 … … 815 867 * THREAD_B events for threads that already existed 816 868 * and could be detected with THREAD_READ before. 817 *818 869 */ 819 870 udebug_thread_b_event_attach(thread, TASK); -
kernel/generic/src/synch/waitq.c
r6b9e85b r8b655705 127 127 /** Interrupt sleeping thread. 128 128 * 129 * This routine attempts to interrupt a thread from its sleep in a waitqueue. 130 * If the thread is not found sleeping, no action is taken. 129 * This routine attempts to interrupt a thread from its sleep in 130 * a waitqueue. If the thread is not found sleeping, no action 131 * is taken. 132 * 133 * The threads_lock must be already held and interrupts must be 134 * disabled upon calling this function. 131 135 * 132 136 * @param thread Thread to be interrupted. … … 138 142 DEADLOCK_PROBE_INIT(p_wqlock); 139 143 140 irq_spinlock_lock(&threads_lock, true); 141 if (!thread_exists(thread)) 142 goto out; 144 /* 145 * The thread is quaranteed to exist because 146 * threads_lock is held. 147 */ 143 148 144 149 grab_locks: … … 150 155 /* 151 156 * The sleep cannot be interrupted. 152 *153 157 */ 154 158 irq_spinlock_unlock(&thread->lock, false); 155 goto out;159 return; 156 160 } 157 161 158 162 if (!irq_spinlock_trylock(&wq->lock)) { 163 /* Avoid deadlock */ 159 164 irq_spinlock_unlock(&thread->lock, false); 160 165 DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD); 161 /* Avoid deadlock */162 166 goto grab_locks; 163 167 } … … 173 177 irq_spinlock_unlock(&wq->lock, false); 174 178 } 179 175 180 irq_spinlock_unlock(&thread->lock, false); 176 181 177 182 if (do_wakeup) 178 183 thread_ready(thread); 179 180 out:181 irq_spinlock_unlock(&threads_lock, true);182 184 } 183 185 … … 370 372 * If the thread was already interrupted, 371 373 * don't go to sleep at all. 372 *373 374 */ 374 375 if (THREAD->interrupted) { … … 381 382 * Set context that will be restored if the sleep 382 383 * of this thread is ever interrupted. 383 *384 384 */ 385 385 THREAD->sleep_interruptible = true; -
kernel/generic/src/syscall/syscall.c
r6b9e85b r8b655705 86 86 } else { 87 87 printf("Task %" PRIu64": Unknown syscall %#" PRIxn, TASK->taskid, id); 88 task_kill(TASK->taskid); 89 thread_exit(); 88 task_kill_self(true); 90 89 } 91 90 … … 131 130 (syshandler_t) sys_task_set_name, 132 131 (syshandler_t) sys_task_kill, 132 (syshandler_t) sys_task_exit, 133 133 (syshandler_t) sys_program_spawn_loader, 134 134 … … 143 143 (syshandler_t) sys_as_area_change_flags, 144 144 (syshandler_t) sys_as_area_destroy, 145 (syshandler_t) sys_as_get_unmapped_area, 145 146 146 147 /* IPC related syscalls. */ … … 156 157 (syshandler_t) sys_ipc_poke, 157 158 (syshandler_t) sys_ipc_hangup, 158 (syshandler_t) sys_ipc_register_irq,159 (syshandler_t) sys_ipc_unregister_irq,160 159 (syshandler_t) sys_ipc_connect_kbox, 161 160 … … 171 170 (syshandler_t) sys_physmem_map, 172 171 (syshandler_t) sys_iospace_enable, 172 (syshandler_t) sys_register_irq, 173 (syshandler_t) sys_unregister_irq, 173 174 174 175 /* Sysinfo syscalls */ -
kernel/generic/src/sysinfo/stats.c
r6b9e85b r8b655705 160 160 static size_t get_task_virtmem(as_t *as) 161 161 { 162 size_t result = 0;163 164 162 /* 165 * We are holding some spinlocks here and therefore are not allowed to 166 * block. Only attempt to lock the address space and address space area 167 * mutexes conditionally. If it is not possible to lock either object, 168 * allow the statistics to be inexact by skipping the respective object. 169 * 170 * Note that it may be infinitely better to let the address space 171 * management code compute these statistics as it proceeds instead of 172 * having them calculated here over and over again here. 163 * We are holding spinlocks here and therefore are not allowed to 164 * block. Only attempt to lock the address space and address space 165 * area mutexes conditionally. If it is not possible to lock either 166 * object, return inexact statistics by skipping the respective object. 173 167 */ 174 168 175 169 if (SYNCH_FAILED(mutex_trylock(&as->lock))) 176 return result * PAGE_SIZE; 170 return 0; 171 172 size_t pages = 0; 177 173 178 174 /* Walk the B+ tree and count pages */ … … 189 185 if (SYNCH_FAILED(mutex_trylock(&area->lock))) 190 186 continue; 191 result += area->pages; 187 188 pages += area->pages; 192 189 mutex_unlock(&area->lock); 193 190 } … … 196 193 mutex_unlock(&as->lock); 197 194 198 return result * PAGE_SIZE; 195 return (pages << PAGE_WIDTH); 196 } 197 198 /** Get the resident (used) size of a virtual address space 199 * 200 * @param as Address space. 201 * 202 * @return Size of the resident (used) virtual address space (bytes). 203 * 204 */ 205 static size_t get_task_resmem(as_t *as) 206 { 207 /* 208 * We are holding spinlocks here and therefore are not allowed to 209 * block. Only attempt to lock the address space and address space 210 * area mutexes conditionally. If it is not possible to lock either 211 * object, return inexact statistics by skipping the respective object. 212 */ 213 214 if (SYNCH_FAILED(mutex_trylock(&as->lock))) 215 return 0; 216 217 size_t pages = 0; 218 219 /* Walk the B+ tree and count pages */ 220 link_t *cur; 221 for (cur = as->as_area_btree.leaf_head.next; 222 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 223 btree_node_t *node = 224 list_get_instance(cur, btree_node_t, leaf_link); 225 226 unsigned int i; 227 for (i = 0; i < node->keys; i++) { 228 as_area_t *area = node->value[i]; 229 230 if (SYNCH_FAILED(mutex_trylock(&area->lock))) 231 continue; 232 233 pages += area->resident; 234 mutex_unlock(&area->lock); 235 } 236 } 237 238 mutex_unlock(&as->lock); 239 240 return (pages << PAGE_WIDTH); 199 241 } 200 242 … … 215 257 str_cpy(stats_task->name, TASK_NAME_BUFLEN, task->name); 216 258 stats_task->virtmem = get_task_virtmem(task->as); 259 stats_task->resmem = get_task_resmem(task->as); 217 260 stats_task->threads = atomic_get(&task->refcount); 218 261 task_get_accounting(task, &(stats_task->ucycles), -
kernel/generic/src/sysinfo/sysinfo.c
r6b9e85b r8b655705 40 40 #include <arch/asm.h> 41 41 #include <errno.h> 42 #include <macros.h> 42 43 43 44 /** Maximal sysinfo path length */ … … 761 762 * character must be null). 762 763 * 763 * The user space buffer must be sized exactly according 764 * to the size of the binary data, otherwise the request 765 * fails. 764 * If the user space buffer size does not equal 765 * the actual size of the returned data, the data 766 * is truncated. Whether this is actually a fatal 767 * error or the data can be still interpreted as valid 768 * depends on the nature of the data and has to be 769 * decided by the user space. 770 * 771 * The actual size of data returned is stored to 772 * size_ptr. 766 773 * 767 774 * @param path_ptr Sysinfo path in the user address space. … … 770 777 * to store the binary data. 771 778 * @param buffer_size User space buffer size. 779 * @param size_ptr User space pointer where to store the 780 * binary data size. 772 781 * 773 782 * @return Error code (EOK in case of no error). … … 775 784 */ 776 785 sysarg_t sys_sysinfo_get_data(void *path_ptr, size_t path_size, 777 void *buffer_ptr, size_t buffer_size )786 void *buffer_ptr, size_t buffer_size, size_t *size_ptr) 778 787 { 779 788 int rc; 780 789 781 790 /* Get the item */ 782 sysinfo_return_t ret = sysinfo_get_item_uspace(path_ptr, path_size, false); 783 791 sysinfo_return_t ret = sysinfo_get_item_uspace(path_ptr, path_size, 792 false); 793 784 794 /* Only constant or generated binary data is considered */ 785 if ((ret.tag == SYSINFO_VAL_DATA) || (ret.tag == SYSINFO_VAL_FUNCTION_DATA)) { 786 /* Check destination buffer size */ 787 if (ret.data.size == buffer_size) 788 rc = copy_to_uspace(buffer_ptr, ret.data.data, 789 ret.data.size); 790 else 791 rc = ENOMEM; 795 if ((ret.tag == SYSINFO_VAL_DATA) || 796 (ret.tag == SYSINFO_VAL_FUNCTION_DATA)) { 797 size_t size = min(ret.data.size, buffer_size); 798 rc = copy_to_uspace(buffer_ptr, ret.data.data, size); 799 if (rc == EOK) 800 rc = copy_to_uspace(size_ptr, &size, sizeof(size)); 792 801 } else 793 802 rc = EINVAL; -
kernel/generic/src/time/clock.c
r6b9e85b r8b655705 93 93 clock_parea.pbase = (uintptr_t) faddr; 94 94 clock_parea.frames = 1; 95 clock_parea.unpriv = true; 95 96 ddi_parea_register(&clock_parea); 96 97 … … 100 101 * 101 102 */ 102 sysinfo_set_item_val("clock.cacheable", NULL, (sysarg_t) true);103 103 sysinfo_set_item_val("clock.faddr", NULL, (sysarg_t) faddr); 104 104 }
Note:
See TracChangeset
for help on using the changeset viewer.