Changeset 9e87562 in mainline
- Timestamp:
- 2017-09-18T20:52:12Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 6abfd250
- Parents:
- e5f5ce0
- Location:
- kernel/generic
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/cap/cap.h
re5f5ce0 r9e87562 38 38 #include <typedefs.h> 39 39 #include <ipc/ipc.h> 40 #include <adt/list.h> 41 #include <synch/mutex.h> 40 42 41 43 #define MAX_CAPS 64 42 43 #define for_each_cap(task, cap, type) \44 for (int i = 0, l = 1; i < MAX_CAPS && l; i++) \45 for (cap_t *(cap) = cap_get((task), i, (type)); \46 (cap) && !(l = 0); (cap) = NULL, l = 1)47 48 #define for_each_cap_current(cap, type) \49 for_each_cap(TASK, (cap), (type))50 44 51 45 typedef enum { … … 53 47 CAP_TYPE_ALLOCATED, 54 48 CAP_TYPE_PHONE, 55 CAP_TYPE_IRQ 49 CAP_TYPE_IRQ, 50 CAP_TYPE_MAX 56 51 } cap_type_t; 57 52 … … 62 57 bool (* can_reclaim)(struct cap *); 63 58 59 /* Link to the task's capabilities of the same type. */ 60 link_t link; 61 64 62 /* The underlying kernel object. */ 65 63 void *kobject; 66 64 } cap_t; 67 65 66 typedef struct cap_info { 67 mutex_t lock; 68 69 list_t type_list[CAP_TYPE_MAX]; 70 71 cap_t *caps; 72 } cap_info_t; 73 68 74 struct task; 69 75 70 void caps_task_alloc(struct task *); 71 void caps_task_free(struct task *); 72 void caps_task_init(struct task *); 76 extern void caps_task_alloc(struct task *); 77 extern void caps_task_free(struct task *); 78 extern void caps_task_init(struct task *); 79 extern bool caps_apply_to_all(struct task *, cap_type_t, 80 bool (*)(cap_t *, void *), void *); 81 extern void caps_lock(struct task *); 82 extern void caps_unlock(struct task *); 73 83 74 84 extern void cap_initialize(cap_t *, int); 75 85 extern cap_t *cap_get(struct task *, int, cap_type_t); 76 extern cap_t *cap_get_current(int, cap_type_t);77 86 extern int cap_alloc(struct task *); 87 extern void cap_publish(struct task *, int, cap_type_t, void *); 88 extern cap_t *cap_unpublish(struct task *, int, cap_type_t); 78 89 extern void cap_free(struct task *, int); 79 90 -
kernel/generic/include/proc/task.h
re5f5ce0 r9e87562 60 60 #include <abi/sysinfo.h> 61 61 #include <arch.h> 62 #include <cap/cap.h> 62 63 63 64 #define TASK THE->task … … 98 99 99 100 /** Capabilities */ 100 struct cap *caps;101 cap_info_t *cap_info; 101 102 102 103 /* IPC stuff */ -
kernel/generic/src/cap/cap.c
re5f5ce0 r9e87562 35 35 #include <cap/cap.h> 36 36 #include <proc/task.h> 37 #include <synch/ spinlock.h>37 #include <synch/mutex.h> 38 38 #include <abi/errno.h> 39 39 #include <mm/slab.h> 40 #include <adt/list.h> 40 41 41 42 void cap_initialize(cap_t *cap, int handle) … … 44 45 cap->handle = handle; 45 46 cap->can_reclaim = NULL; 47 link_initialize(&cap->link); 46 48 } 47 49 48 50 void caps_task_alloc(task_t *task) 49 51 { 50 task->caps = malloc(sizeof(cap_t) * MAX_CAPS, 0); 52 task->cap_info = (cap_info_t *) malloc(sizeof(cap_info_t), 0); 53 task->cap_info->caps = malloc(sizeof(cap_t) * MAX_CAPS, 0); 51 54 } 52 55 53 56 void caps_task_init(task_t *task) 54 57 { 58 mutex_initialize(&task->cap_info->lock, MUTEX_PASSIVE); 59 60 for (int i = 0; i < CAP_TYPE_MAX; i++) 61 list_initialize(&task->cap_info->type_list[i]); 62 55 63 for (int i = 0; i < MAX_CAPS; i++) 56 cap_initialize(&task->cap s[i], i);64 cap_initialize(&task->cap_info->caps[i], i); 57 65 } 58 66 59 67 void caps_task_free(task_t *task) 60 68 { 61 free(task->caps); 69 free(task->cap_info->caps); 70 free(task->cap_info); 71 } 72 73 bool caps_apply_to_all(task_t *task, cap_type_t type, 74 bool (*cb)(cap_t *, void *), void *arg) 75 { 76 bool done = true; 77 78 mutex_lock(&task->cap_info->lock); 79 list_foreach_safe(task->cap_info->type_list[type], cur, next) { 80 cap_t *cap = list_get_instance(cur, cap_t, link); 81 done = cb(cap, arg); 82 if (!done) 83 break; 84 } 85 mutex_unlock(&task->cap_info->lock); 86 87 return done; 88 } 89 90 void caps_lock(task_t *task) 91 { 92 mutex_lock(&task->cap_info->lock); 93 } 94 95 void caps_unlock(task_t *task) 96 { 97 mutex_unlock(&task->cap_info->lock); 62 98 } 63 99 64 100 cap_t *cap_get(task_t *task, int handle, cap_type_t type) 65 101 { 102 assert(mutex_locked(&task->cap_info->lock)); 103 66 104 if ((handle < 0) || (handle >= MAX_CAPS)) 67 105 return NULL; 68 if (task->cap s[handle].type != type)106 if (task->cap_info->caps[handle].type != type) 69 107 return NULL; 70 return &task->caps[handle]; 71 } 72 73 cap_t *cap_get_current(int handle, cap_type_t type) 74 { 75 return cap_get(TASK, handle, type); 108 return &task->cap_info->caps[handle]; 76 109 } 77 110 … … 80 113 int handle; 81 114 82 irq_spinlock_lock(&task->lock, true);115 mutex_lock(&task->cap_info->lock); 83 116 for (handle = 0; handle < MAX_CAPS; handle++) { 84 cap_t *cap = &task->cap s[handle];117 cap_t *cap = &task->cap_info->caps[handle]; 85 118 if (cap->type > CAP_TYPE_ALLOCATED) { 86 119 if (cap->can_reclaim && cap->can_reclaim(cap)) … … 89 122 if (cap->type == CAP_TYPE_INVALID) { 90 123 cap->type = CAP_TYPE_ALLOCATED; 91 irq_spinlock_unlock(&task->lock, true);124 mutex_unlock(&task->cap_info->lock); 92 125 return handle; 93 126 } 94 127 } 95 irq_spinlock_unlock(&task->lock, true);128 mutex_unlock(&task->cap_info->lock); 96 129 97 130 return ELIMIT; 131 } 132 133 void cap_publish(task_t *task, int handle, cap_type_t type, void *kobject) 134 { 135 mutex_lock(&task->cap_info->lock); 136 cap_t *cap = cap_get(task, handle, CAP_TYPE_ALLOCATED); 137 assert(cap); 138 cap->type = type; 139 cap->kobject = kobject; 140 list_append(&cap->link, &task->cap_info->type_list[type]); 141 mutex_unlock(&task->cap_info->lock); 142 } 143 144 cap_t *cap_unpublish(task_t *task, int handle, cap_type_t type) 145 { 146 cap_t *cap; 147 148 mutex_lock(&task->cap_info->lock); 149 cap = cap_get(task, handle, type); 150 if (cap) { 151 list_remove(&cap->link); 152 cap->type = CAP_TYPE_ALLOCATED; 153 } 154 mutex_unlock(&task->cap_info->lock); 155 156 return cap; 98 157 } 99 158 … … 102 161 assert(handle >= 0); 103 162 assert(handle < MAX_CAPS); 104 assert(task->cap s[handle].type == CAP_TYPE_ALLOCATED);163 assert(task->cap_info->caps[handle].type == CAP_TYPE_ALLOCATED); 105 164 106 irq_spinlock_lock(&task->lock, true);107 cap_initialize(&task->cap s[handle], handle);108 irq_spinlock_unlock(&task->lock, true);165 mutex_lock(&task->cap_info->lock); 166 cap_initialize(&task->cap_info->caps[handle], handle); 167 mutex_unlock(&task->cap_info->lock); 109 168 } 110 169 -
kernel/generic/src/ipc/ipc.c
re5f5ce0 r9e87562 710 710 * Nota bene: there may still be answers waiting for pick up. 711 711 */ 712 spinlock_unlock(&TASK->active_calls_lock); 712 spinlock_unlock(&TASK->active_calls_lock); 713 713 return; 714 714 } … … 723 723 * call on the list. 724 724 */ 725 spinlock_unlock(&TASK->active_calls_lock); 725 spinlock_unlock(&TASK->active_calls_lock); 726 726 goto restart; 727 727 } … … 730 730 731 731 goto restart; 732 } 733 734 static bool phone_cap_wait_cb(cap_t *cap, void *arg) 735 { 736 phone_t *phone = (phone_t *) cap->kobject; 737 bool *restart = (bool *) arg; 738 739 mutex_lock(&phone->lock); 740 if ((phone->state == IPC_PHONE_HUNGUP) && 741 (atomic_get(&phone->active_calls) == 0)) { 742 phone->state = IPC_PHONE_FREE; 743 phone->callee = NULL; 744 } 745 746 /* 747 * We might have had some IPC_PHONE_CONNECTING phones at the beginning 748 * of ipc_cleanup(). Depending on whether these were forgotten or 749 * answered, they will eventually enter the IPC_PHONE_FREE or 750 * IPC_PHONE_CONNECTED states, respectively. In the latter case, the 751 * other side may slam the open phones at any time, in which case we 752 * will get an IPC_PHONE_SLAMMED phone. 753 */ 754 if ((phone->state == IPC_PHONE_CONNECTED) || 755 (phone->state == IPC_PHONE_SLAMMED)) { 756 mutex_unlock(&phone->lock); 757 ipc_phone_hangup(phone); 758 /* 759 * Now there may be one extra active call, which needs to be 760 * forgotten. 761 */ 762 ipc_forget_all_active_calls(); 763 *restart = true; 764 return false; 765 } 766 767 /* 768 * If the hangup succeeded, it has sent a HANGUP message, the IPC is now 769 * in HUNGUP state, we wait for the reply to come 770 */ 771 if (phone->state != IPC_PHONE_FREE) { 772 mutex_unlock(&phone->lock); 773 return false; 774 } 775 776 mutex_unlock(&phone->lock); 777 return true; 732 778 } 733 779 … … 736 782 { 737 783 call_t *call; 738 bool all_clean;784 bool restart; 739 785 740 786 restart: … … 743 789 * Locking is needed as there may be connection handshakes in progress. 744 790 */ 745 all_clean = true; 746 for_each_cap_current(cap, CAP_TYPE_PHONE) { 747 phone_t *phone = (phone_t *) cap->kobject; 748 749 mutex_lock(&phone->lock); 750 if ((phone->state == IPC_PHONE_HUNGUP) && 751 (atomic_get(&phone->active_calls) == 0)) { 752 phone->state = IPC_PHONE_FREE; 753 phone->callee = NULL; 754 } 755 756 /* 757 * We might have had some IPC_PHONE_CONNECTING phones at the 758 * beginning of ipc_cleanup(). Depending on whether these were 759 * forgotten or answered, they will eventually enter the 760 * IPC_PHONE_FREE or IPC_PHONE_CONNECTED states, respectively. 761 * In the latter case, the other side may slam the open phones 762 * at any time, in which case we will get an IPC_PHONE_SLAMMED 763 * phone. 764 */ 765 if ((phone->state == IPC_PHONE_CONNECTED) || 766 (phone->state == IPC_PHONE_SLAMMED)) { 767 mutex_unlock(&phone->lock); 768 ipc_phone_hangup(phone); 769 /* 770 * Now there may be one extra active call, which needs 771 * to be forgotten. 772 */ 773 ipc_forget_all_active_calls(); 774 goto restart; 775 } 776 777 /* 778 * If the hangup succeeded, it has sent a HANGUP message, the 779 * IPC is now in HUNGUP state, we wait for the reply to come 780 */ 781 if (phone->state != IPC_PHONE_FREE) { 782 mutex_unlock(&phone->lock); 783 all_clean = false; 784 break; 785 } 786 787 mutex_unlock(&phone->lock); 788 } 789 790 /* Got into cleanup */ 791 if (all_clean) 791 restart = false; 792 if (caps_apply_to_all(TASK, CAP_TYPE_PHONE, phone_cap_wait_cb, 793 &restart)) { 794 /* Got into cleanup */ 792 795 return; 793 796 } 797 if (restart) 798 goto restart; 799 794 800 call = ipc_wait_for_call(&TASK->answerbox, SYNCH_NO_TIMEOUT, 795 801 SYNCH_FLAGS_NONE); … … 800 806 ipc_call_free(call); 801 807 goto restart; 808 } 809 810 static bool phone_cap_cleanup_cb(cap_t *cap, void *arg) 811 { 812 phone_t *phone = (phone_t *) cap->kobject; 813 ipc_phone_hangup(phone); 814 return true; 815 } 816 817 static bool irq_cap_cleanup_cb(cap_t *cap, void *arg) 818 { 819 ipc_irq_unsubscribe(&TASK->answerbox, cap->handle); 820 return true; 802 821 } 803 822 … … 821 840 822 841 /* Disconnect all our phones ('ipc_phone_hangup') */ 823 for_each_cap_current(cap, CAP_TYPE_PHONE) { 824 phone_t *phone = (phone_t *) cap->kobject; 825 ipc_phone_hangup(phone); 826 } 842 caps_apply_to_all(TASK, CAP_TYPE_PHONE, phone_cap_cleanup_cb, NULL); 827 843 828 844 /* Unsubscribe from any event notifications. */ … … 830 846 831 847 /* Disconnect all connected IRQs */ 832 for_each_cap_current(cap, CAP_TYPE_IRQ) { 833 ipc_irq_unsubscribe(&TASK->answerbox, cap->handle); 834 } 848 caps_apply_to_all(TASK, CAP_TYPE_IRQ, irq_cap_cleanup_cb, NULL); 835 849 836 850 /* Disconnect all phones connected to our regular answerbox */ … … 896 910 } 897 911 912 static bool print_task_phone_cb(cap_t *cap, void *arg) 913 { 914 phone_t *phone = (phone_t *) cap->kobject; 915 916 mutex_lock(&phone->lock); 917 if (phone->state != IPC_PHONE_FREE) { 918 printf("%-11d %7" PRIun " ", cap->handle, 919 atomic_get(&phone->active_calls)); 920 921 switch (phone->state) { 922 case IPC_PHONE_CONNECTING: 923 printf("connecting"); 924 break; 925 case IPC_PHONE_CONNECTED: 926 printf("connected to %" PRIu64 " (%s)", 927 phone->callee->task->taskid, 928 phone->callee->task->name); 929 break; 930 case IPC_PHONE_SLAMMED: 931 printf("slammed by %p", phone->callee); 932 break; 933 case IPC_PHONE_HUNGUP: 934 printf("hung up by %p", phone->callee); 935 break; 936 default: 937 break; 938 } 939 940 printf("\n"); 941 } 942 mutex_unlock(&phone->lock); 943 944 return true; 945 } 946 898 947 /** List answerbox contents. 899 948 * … … 905 954 irq_spinlock_lock(&tasks_lock, true); 906 955 task_t *task = task_find_by_id(taskid); 907 908 956 if (!task) { 909 957 irq_spinlock_unlock(&tasks_lock, true); 910 958 return; 911 959 } 912 913 /* Hand-over-hand locking */ 914 irq_spinlock_exchange(&tasks_lock, &task->lock); 960 task_hold(task); 961 irq_spinlock_unlock(&tasks_lock, true); 915 962 916 963 printf("[phone cap] [calls] [state\n"); 917 964 918 for_each_cap(task, cap, CAP_TYPE_PHONE) { 919 phone_t *phone = (phone_t *) cap->kobject; 920 921 if (SYNCH_FAILED(mutex_trylock(&phone->lock))) { 922 printf("%-11d (mutex busy)\n", cap->handle); 923 continue; 924 } 925 926 if (phone->state != IPC_PHONE_FREE) { 927 printf("%-11d %7" PRIun " ", cap->handle, 928 atomic_get(&phone->active_calls)); 929 930 switch (phone->state) { 931 case IPC_PHONE_CONNECTING: 932 printf("connecting"); 933 break; 934 case IPC_PHONE_CONNECTED: 935 printf("connected to %" PRIu64 " (%s)", 936 phone->callee->task->taskid, 937 phone->callee->task->name); 938 break; 939 case IPC_PHONE_SLAMMED: 940 printf("slammed by %p", phone->callee); 941 break; 942 case IPC_PHONE_HUNGUP: 943 printf("hung up by %p", phone->callee); 944 break; 945 default: 946 break; 947 } 948 949 printf("\n"); 950 } 951 952 mutex_unlock(&phone->lock); 953 } 954 965 caps_apply_to_all(task, CAP_TYPE_PHONE, print_task_phone_cb, NULL); 966 967 irq_spinlock_lock(&task->lock, true); 955 968 irq_spinlock_lock(&task->answerbox.lock, false); 956 969 … … 974 987 irq_spinlock_unlock(&task->answerbox.lock, false); 975 988 irq_spinlock_unlock(&task->lock, true); 989 990 task_release(task); 976 991 } 977 992 -
kernel/generic/src/ipc/ipcrsc.c
re5f5ce0 r9e87562 175 175 phone_t *phone_get(task_t *task, int handle) 176 176 { 177 phone_t *phone; 178 179 caps_lock(task); 177 180 cap_t *cap = cap_get(task, handle, CAP_TYPE_PHONE); 181 phone = (phone_t *) cap->kobject; 182 caps_unlock(task); 178 183 if (!cap) 179 184 return NULL; 180 185 181 return (phone_t *) cap->kobject;186 return phone; 182 187 } 183 188 … … 217 222 phone->state = IPC_PHONE_CONNECTING; 218 223 219 irq_spinlock_lock(&task->lock, true); 224 // FIXME: phase this out eventually 225 mutex_lock(&task->cap_info->lock); 220 226 cap_t *cap = cap_get(task, handle, CAP_TYPE_ALLOCATED); 221 cap->type = CAP_TYPE_PHONE;222 cap->kobject = (void *) phone;223 227 cap->can_reclaim = phone_can_reclaim; 224 irq_spinlock_unlock(&task->lock, true); 228 mutex_unlock(&task->cap_info->lock); 229 230 cap_publish(task, handle, CAP_TYPE_PHONE, phone); 225 231 } 226 232 … … 237 243 void phone_dealloc(int handle) 238 244 { 239 irq_spinlock_lock(&TASK->lock, true); 240 cap_t *cap = cap_get_current(handle, CAP_TYPE_PHONE); 245 cap_t *cap = cap_unpublish(TASK, handle, CAP_TYPE_PHONE); 241 246 assert(cap); 242 cap->type = CAP_TYPE_ALLOCATED;243 irq_spinlock_unlock(&TASK->lock, true);244 247 245 248 phone_t *phone = (phone_t *) cap->kobject; -
kernel/generic/src/ipc/irq.c
re5f5ce0 r9e87562 314 314 } 315 315 316 cap_t *cap = cap_get_current(handle, CAP_TYPE_ALLOCATED);317 assert(cap);318 319 316 irq_initialize(irq); 320 317 irq->inr = inr; … … 327 324 irq->notif_cfg.counter = 0; 328 325 329 cap->kobject = (void *) irq;330 331 326 /* 332 * Insert the IRQ structure into the uspace IRQ hash table and retype 333 * the capability. By retyping the capability inside the critical 334 * section, we make sure another thread cannot attempt to unregister the 335 * IRQ before it is inserted into the hash table. 327 * Insert the IRQ structure into the uspace IRQ hash table. 336 328 */ 337 329 irq_spinlock_lock(&irq_uspace_hash_table_lock, true); 338 330 irq_spinlock_lock(&irq->lock, false); 339 331 340 cap->type = CAP_TYPE_IRQ;341 332 hash_table_insert(&irq_uspace_hash_table, key, &irq->link); 342 333 343 334 irq_spinlock_unlock(&irq->lock, false); 344 335 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true); 336 337 cap_publish(TASK, handle, CAP_TYPE_IRQ, irq); 345 338 346 339 return handle; … … 357 350 int ipc_irq_unsubscribe(answerbox_t *box, int handle) 358 351 { 359 irq_spinlock_lock(&TASK->lock, true); 360 cap_t *cap = cap_get_current(handle, CAP_TYPE_IRQ); 361 if (!cap) { 362 irq_spinlock_unlock(&TASK->lock, true); 352 cap_t *cap = cap_unpublish(TASK, handle, CAP_TYPE_IRQ); 353 if (!cap) 363 354 return ENOENT; 364 }365 /* Make sure only one thread can win the race to unsubscribe. */366 cap->type = CAP_TYPE_ALLOCATED;367 irq_spinlock_unlock(&TASK->lock, true);368 355 369 356 irq_t *irq = (irq_t *) cap->kobject; -
kernel/generic/src/proc/task.c
re5f5ce0 r9e87562 616 616 if (*additional) 617 617 printf("%-8" PRIu64 " %9" PRIu64 "%c %9" PRIu64 "%c " 618 "%9" PRIua , task->taskid, ucycles, usuffix, kcycles,618 "%9" PRIua "\n", task->taskid, ucycles, usuffix, kcycles, 619 619 ksuffix, atomic_get(&task->refcount)); 620 620 else … … 622 622 task->taskid, task->name, task->container, task, task->as); 623 623 #endif 624 625 if (*additional) {626 for_each_cap(task, cap, CAP_TYPE_PHONE) {627 phone_t *phone = (phone_t *) cap->kobject;628 if (phone->callee)629 printf(" %d:%p", cap->handle, phone->callee);630 }631 printf("\n");632 }633 624 634 625 irq_spinlock_unlock(&task->lock, false);
Note:
See TracChangeset
for help on using the changeset viewer.