Changeset da1bafb in mainline for kernel/generic/src
- Timestamp:
- 2010-05-24T18:57:31Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 0095368
- Parents:
- 666f492
- Location:
- kernel/generic/src
- Files:
-
- 36 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/console/chardev.c
r666f492 rda1bafb 52 52 indev->name = name; 53 53 waitq_initialize(&indev->wq); 54 spinlock_initialize(&indev->lock, "indev");54 irq_spinlock_initialize(&indev->lock, "chardev.indev.lock"); 55 55 indev->counter = 0; 56 56 indev->index = 0; … … 68 68 ASSERT(indev); 69 69 70 spinlock_lock(&indev->lock);70 irq_spinlock_lock(&indev->lock, true); 71 71 if (indev->counter == INDEV_BUFLEN - 1) { 72 72 /* Buffer full */ 73 spinlock_unlock(&indev->lock);73 irq_spinlock_unlock(&indev->lock, true); 74 74 return; 75 75 } … … 81 81 indev->index = indev->index % INDEV_BUFLEN; 82 82 waitq_wakeup(&indev->wq, WAKEUP_FIRST); 83 spinlock_unlock(&indev->lock);83 irq_spinlock_unlock(&indev->lock, true); 84 84 } 85 85 … … 114 114 115 115 waitq_sleep(&indev->wq); 116 ipl_t ipl = interrupts_disable(); 117 spinlock_lock(&indev->lock); 116 irq_spinlock_lock(&indev->lock, true); 118 117 wchar_t ch = indev->buffer[(indev->index - indev->counter) % INDEV_BUFLEN]; 119 118 indev->counter--; 120 spinlock_unlock(&indev->lock); 121 interrupts_restore(ipl); 119 irq_spinlock_unlock(&indev->lock, true); 122 120 123 121 return ch; … … 134 132 { 135 133 outdev->name = name; 136 spinlock_initialize(&outdev->lock, " outdev");134 spinlock_initialize(&outdev->lock, "chardev.outdev.lock"); 137 135 link_initialize(&outdev->link); 138 136 list_initialize(&outdev->list); -
kernel/generic/src/console/cmd.c
r666f492 rda1bafb 510 510 void cmd_initialize(cmd_info_t *cmd) 511 511 { 512 spinlock_initialize(&cmd->lock, "cmd ");512 spinlock_initialize(&cmd->lock, "cmd.lock"); 513 513 link_initialize(&cmd->link); 514 514 } … … 681 681 continue; 682 682 683 thread_t *t; 684 if ((t = thread_create((void (*)(void *)) cmd_call0, (void *) argv, TASK, THREAD_FLAG_WIRED, "call0", false))) { 685 spinlock_lock(&t->lock); 686 t->cpu = &cpus[i]; 687 spinlock_unlock(&t->lock); 688 printf("cpu%u: ", i); 689 thread_ready(t); 690 thread_join(t); 691 thread_detach(t); 683 thread_t *thread; 684 if ((thread = thread_create((void (*)(void *)) cmd_call0, 685 (void *) argv, TASK, THREAD_FLAG_WIRED, "call0", false))) { 686 irq_spinlock_lock(&thread->lock, true); 687 thread->cpu = &cpus[i]; 688 irq_spinlock_unlock(&thread->lock, true); 689 690 printf("cpu%" PRIs ": ", i); 691 692 thread_ready(thread); 693 thread_join(thread); 694 thread_detach(thread); 692 695 } else 693 printf("Unable to create thread for cpu% u\n", i);696 printf("Unable to create thread for cpu%" PRIs "\n", i); 694 697 } 695 698 … … 1049 1052 /* Update and read thread accounting 1050 1053 for benchmarking */ 1051 ipl_t ipl = interrupts_disable(); 1052 spinlock_lock(&TASK->lock); 1054 irq_spinlock_lock(&TASK->lock, true); 1053 1055 uint64_t ucycles0, kcycles0; 1054 1056 task_get_accounting(TASK, &ucycles0, &kcycles0); 1055 spinlock_unlock(&TASK->lock); 1056 interrupts_restore(ipl); 1057 irq_spinlock_unlock(&TASK->lock, true); 1057 1058 1058 1059 /* Execute the test */ … … 1061 1062 1062 1063 /* Update and read thread accounting */ 1063 uint64_t ucycles1, kcycles1; 1064 ipl = interrupts_disable(); 1065 spinlock_lock(&TASK->lock); 1064 uint64_t ucycles1, kcycles1; 1065 irq_spinlock_lock(&TASK->lock, true); 1066 1066 task_get_accounting(TASK, &ucycles1, &kcycles1); 1067 spinlock_unlock(&TASK->lock); 1068 interrupts_restore(ipl); 1067 irq_spinlock_unlock(&TASK->lock, true); 1069 1068 1070 1069 uint64_t ucycles, kcycles; … … 1072 1071 order_suffix(ucycles1 - ucycles0, &ucycles, &usuffix); 1073 1072 order_suffix(kcycles1 - kcycles0, &kcycles, &ksuffix); 1074 1073 1075 1074 printf("Time: %" PRIu64 "%c user cycles, %" PRIu64 "%c kernel cycles\n", 1076 1075 ucycles, usuffix, kcycles, ksuffix); 1077 1076 1078 1077 if (ret == NULL) { … … 1080 1079 return true; 1081 1080 } 1082 1081 1083 1082 printf("%s\n", ret); 1084 1083 return false; … … 1106 1105 /* Update and read thread accounting 1107 1106 for benchmarking */ 1108 ipl_t ipl = interrupts_disable(); 1109 spinlock_lock(&TASK->lock); 1107 irq_spinlock_lock(&TASK->lock, true); 1110 1108 uint64_t ucycles0, kcycles0; 1111 1109 task_get_accounting(TASK, &ucycles0, &kcycles0); 1112 spinlock_unlock(&TASK->lock); 1113 interrupts_restore(ipl); 1110 irq_spinlock_unlock(&TASK->lock, true); 1114 1111 1115 1112 /* Execute the test */ … … 1118 1115 1119 1116 /* Update and read thread accounting */ 1120 ipl = interrupts_disable(); 1121 spinlock_lock(&TASK->lock); 1117 irq_spinlock_lock(&TASK->lock, true); 1122 1118 uint64_t ucycles1, kcycles1; 1123 1119 task_get_accounting(TASK, &ucycles1, &kcycles1); 1124 spinlock_unlock(&TASK->lock); 1125 interrupts_restore(ipl); 1126 1120 irq_spinlock_unlock(&TASK->lock, true); 1121 1127 1122 if (ret != NULL) { 1128 1123 printf("%s\n", ret); … … 1135 1130 order_suffix(kcycles1 - kcycles0, &kcycles, &ksuffix); 1136 1131 printf("OK (%" PRIu64 "%c user cycles, %" PRIu64 "%c kernel cycles)\n", 1137 1132 ucycles, usuffix, kcycles, ksuffix); 1138 1133 } 1139 1134 -
kernel/generic/src/console/console.c
r666f492 rda1bafb 62 62 /** Kernel log initialized */ 63 63 static bool klog_inited = false; 64 64 65 /** First kernel log characters */ 65 66 static size_t klog_start = 0; 67 66 68 /** Number of valid kernel log characters */ 67 69 static size_t klog_len = 0; 70 68 71 /** Number of stored (not printed) kernel log characters */ 69 72 static size_t klog_stored = 0; 73 70 74 /** Number of stored kernel log characters for uspace */ 71 75 static size_t klog_uspace = 0; … … 84 88 }; 85 89 86 static void stdout_write(outdev_t * dev, wchar_t ch, bool silent);87 static void stdout_redraw(outdev_t * dev);90 static void stdout_write(outdev_t *, wchar_t, bool); 91 static void stdout_redraw(outdev_t *); 88 92 89 93 static outdev_operations_t stdout_ops = { … … 174 178 stdout->op->redraw(stdout); 175 179 176 /* Force the console to print the prompt */ 177 if ((stdin) && (prev)) 180 if ((stdin) && (prev)) { 181 /* 182 * Force the console to print the prompt. 183 */ 178 184 indev_push_character(stdin, '\n'); 185 } 179 186 } 180 187 -
kernel/generic/src/cpu/cpu.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief CPU subsystem initialization and listing. 36 36 */ 37 37 38 38 #include <cpu.h> 39 39 #include <arch.h> … … 58 58 */ 59 59 void cpu_init(void) { 60 unsigned int i, j;61 62 60 #ifdef CONFIG_SMP 63 61 if (config.cpu_active == 1) { 64 62 #endif /* CONFIG_SMP */ 63 65 64 cpus = (cpu_t *) malloc(sizeof(cpu_t) * config.cpu_count, 66 65 FRAME_ATOMIC); 67 66 if (!cpus) 68 67 panic("Cannot allocate CPU structures."); 69 70 /* initialize everything */68 69 /* Initialize everything */ 71 70 memsetb(cpus, sizeof(cpu_t) * config.cpu_count, 0); 72 71 72 size_t i; 73 73 for (i = 0; i < config.cpu_count; i++) { 74 cpus[i].stack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | FRAME_ATOMIC);75 74 cpus[i].stack = (uint8_t *) frame_alloc(STACK_FRAMES, 75 FRAME_KA | FRAME_ATOMIC); 76 76 cpus[i].id = i; 77 77 78 spinlock_initialize(&cpus[i].lock, "cpu_t.lock"); 79 78 irq_spinlock_initialize(&cpus[i].lock, "cpus[].lock"); 79 80 unsigned int j; 80 81 for (j = 0; j < RQ_COUNT; j++) { 81 spinlock_initialize(&cpus[i].rq[j].lock, "rq_t.lock");82 irq_spinlock_initialize(&cpus[i].rq[j].lock, "cpus[].rq[].lock"); 82 83 list_initialize(&cpus[i].rq[j].rq_head); 83 84 } … … 87 88 } 88 89 #endif /* CONFIG_SMP */ 89 90 90 91 CPU = &cpus[config.cpu_active - 1]; 91 92 92 CPU->active = 1;93 CPU->tlb_active = 1;93 CPU->active = true; 94 CPU->tlb_active = true; 94 95 95 96 cpu_identify(); … … 100 101 void cpu_list(void) 101 102 { 102 unsigned int i;103 103 size_t i; 104 104 105 for (i = 0; i < config.cpu_count; i++) { 105 106 if (cpus[i].active) -
kernel/generic/src/ddi/ddi.c
r666f492 rda1bafb 59 59 static btree_t parea_btree; 60 60 61 /** Initialize DDI. */ 61 /** Initialize DDI. 62 * 63 */ 62 64 void ddi_init(void) 63 65 { … … 97 99 * 98 100 */ 99 static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, size_t pages, int flags) 101 static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, size_t pages, 102 unsigned int flags) 100 103 { 101 104 ASSERT(TASK); … … 114 117 backend_data.frames = pages; 115 118 116 ipl_t ipl = interrupts_disable();117 118 119 /* Find the zone of the physical memory */ 119 spinlock_lock(&zones.lock);120 irq_spinlock_lock(&zones.lock, true); 120 121 size_t znum = find_zone(ADDR2PFN(pf), pages, 0); 121 122 … … 124 125 * -> assume it is hardware device and allow mapping 125 126 */ 126 spinlock_unlock(&zones.lock);127 irq_spinlock_unlock(&zones.lock, true); 127 128 goto map; 128 129 } … … 130 131 if (zones.info[znum].flags & ZONE_FIRMWARE) { 131 132 /* Frames are part of firmware */ 132 spinlock_unlock(&zones.lock);133 irq_spinlock_unlock(&zones.lock, true); 133 134 goto map; 134 135 } 135 136 136 137 if (zone_flags_available(zones.info[znum].flags)) { 137 /* Frames are part of physical memory, check if the memory 138 /* 139 * Frames are part of physical memory, check if the memory 138 140 * region is enabled for mapping. 139 141 */ 140 spinlock_unlock(&zones.lock);142 irq_spinlock_unlock(&zones.lock, true); 141 143 142 144 mutex_lock(&parea_lock); … … 154 156 } 155 157 156 spinlock_unlock(&zones.lock); 158 irq_spinlock_unlock(&zones.lock, true); 159 157 160 err: 158 interrupts_restore(ipl);159 161 return ENOENT; 160 162 161 163 map: 162 interrupts_restore(ipl);163 164 164 if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp, 165 165 AS_AREA_ATTR_NONE, &phys_backend, &backend_data)) { … … 196 196 return EPERM; 197 197 198 ipl_t ipl = interrupts_disable(); 199 spinlock_lock(&tasks_lock); 198 irq_spinlock_lock(&tasks_lock, true); 200 199 201 200 task_t *task = task_find_by_id(id); … … 207 206 * context. 208 207 */ 209 spinlock_unlock(&tasks_lock); 210 interrupts_restore(ipl); 208 irq_spinlock_unlock(&tasks_lock, true); 211 209 return ENOENT; 212 210 } 213 211 214 212 /* Lock the task and release the lock protecting tasks_btree. */ 215 spinlock_lock(&task->lock); 216 spinlock_unlock(&tasks_lock); 213 irq_spinlock_exchange(&tasks_lock, &task->lock); 217 214 218 215 int rc = ddi_iospace_enable_arch(task, ioaddr, size); 219 216 220 spinlock_unlock(&task->lock); 221 interrupts_restore(ipl); 217 irq_spinlock_unlock(&task->lock, true); 222 218 223 219 return rc; -
kernel/generic/src/ddi/irq.c
r666f492 rda1bafb 32 32 /** 33 33 * @file 34 * @brief 34 * @brief IRQ dispatcher. 35 35 * 36 36 * This file provides means of connecting IRQs with particular … … 78 78 #include <arch.h> 79 79 80 #define KEY_INR 81 #define KEY_DEVNO 82 83 /** 84 * Spinlock protecting the kernel IRQ hash table.80 #define KEY_INR 0 81 #define KEY_DEVNO 1 82 83 /** Spinlock protecting the kernel IRQ hash table. 84 * 85 85 * This lock must be taken only when interrupts are disabled. 86 */ 87 SPINLOCK_INITIALIZE(irq_kernel_hash_table_lock); 86 * 87 */ 88 IRQ_SPINLOCK_STATIC_INITIALIZE(irq_kernel_hash_table_lock); 89 88 90 /** The kernel IRQ hash table. */ 89 91 static hash_table_t irq_kernel_hash_table; 90 92 91 /** 92 * Spinlock protecting the uspace IRQ hash table.93 /** Spinlock protecting the uspace IRQ hash table. 94 * 93 95 * This lock must be taken only when interrupts are disabled. 94 */ 95 SPINLOCK_INITIALIZE(irq_uspace_hash_table_lock); 96 * 97 */ 98 IRQ_SPINLOCK_INITIALIZE(irq_uspace_hash_table_lock); 99 96 100 /** The uspace IRQ hash table. */ 97 101 hash_table_t irq_uspace_hash_table; … … 100 104 * Hash table operations for cases when we know that 101 105 * there will be collisions between different keys. 106 * 102 107 */ 103 108 static size_t irq_ht_hash(unative_t *key); … … 116 121 * However, there might be still collisions among 117 122 * elements with single key (sharing of one IRQ). 123 * 118 124 */ 119 125 static size_t irq_lin_hash(unative_t *key); … … 132 138 /** Initialize IRQ subsystem. 133 139 * 134 * @param inrs Numbers of unique IRQ numbers or INRs.140 * @param inrs Numbers of unique IRQ numbers or INRs. 135 141 * @param chains Number of chains in the hash table. 142 * 136 143 */ 137 144 void irq_init(size_t inrs, size_t chains) … … 166 173 memsetb(irq, sizeof(irq_t), 0); 167 174 link_initialize(&irq->link); 168 spinlock_initialize(&irq->lock, "irq.lock");175 irq_spinlock_initialize(&irq->lock, "irq.lock"); 169 176 link_initialize(&irq->notif_cfg.link); 170 177 irq->inr = -1; 171 178 irq->devno = -1; 172 179 173 180 irq_initialize_arch(irq); 174 181 } … … 180 187 * function pointer and handler() function pointer. 181 188 * 182 * @param irq IRQ structure belonging to a device. 183 * @return True on success, false on failure. 189 * @param irq IRQ structure belonging to a device. 190 * 191 * @return True on success, false on failure. 192 * 184 193 */ 185 194 void irq_register(irq_t *irq) 186 195 { 187 ipl_t ipl;188 196 unative_t key[] = { 189 197 (unative_t) irq->inr, … … 191 199 }; 192 200 193 ipl = interrupts_disable(); 194 spinlock_lock(&irq_kernel_hash_table_lock); 195 spinlock_lock(&irq->lock); 201 irq_spinlock_lock(&irq_kernel_hash_table_lock, true); 202 irq_spinlock_lock(&irq->lock, false); 196 203 hash_table_insert(&irq_kernel_hash_table, key, &irq->link); 197 spinlock_unlock(&irq->lock); 198 spinlock_unlock(&irq_kernel_hash_table_lock); 199 interrupts_restore(ipl); 204 irq_spinlock_unlock(&irq->lock, false); 205 irq_spinlock_unlock(&irq_kernel_hash_table_lock, true); 200 206 } 201 207 … … 208 214 unative_t key[] = { 209 215 (unative_t) inr, 210 (unative_t) -1 /* search will use claim() instead of devno */216 (unative_t) -1 /* Search will use claim() instead of devno */ 211 217 }; 212 218 213 spinlock_lock(&irq_uspace_hash_table_lock);219 irq_spinlock_lock(&irq_uspace_hash_table_lock, false); 214 220 lnk = hash_table_find(&irq_uspace_hash_table, key); 215 221 if (lnk) { 216 irq_t *irq; 217 218 irq = hash_table_get_instance(lnk, irq_t, link); 219 spinlock_unlock(&irq_uspace_hash_table_lock); 222 irq_t *irq = hash_table_get_instance(lnk, irq_t, link); 223 irq_spinlock_unlock(&irq_uspace_hash_table_lock, false); 220 224 return irq; 221 225 } 222 spinlock_unlock(&irq_uspace_hash_table_lock);226 irq_spinlock_unlock(&irq_uspace_hash_table_lock, false); 223 227 224 228 return NULL; … … 233 237 unative_t key[] = { 234 238 (unative_t) inr, 235 (unative_t) -1 /* search will use claim() instead of devno */239 (unative_t) -1 /* Search will use claim() instead of devno */ 236 240 }; 237 241 238 spinlock_lock(&irq_kernel_hash_table_lock);242 irq_spinlock_lock(&irq_kernel_hash_table_lock, false); 239 243 lnk = hash_table_find(&irq_kernel_hash_table, key); 240 244 if (lnk) { 241 irq_t *irq; 242 243 irq = hash_table_get_instance(lnk, irq_t, link); 244 spinlock_unlock(&irq_kernel_hash_table_lock); 245 irq_t *irq = hash_table_get_instance(lnk, irq_t, link); 246 irq_spinlock_unlock(&irq_kernel_hash_table_lock, false); 245 247 return irq; 246 248 } 247 spinlock_unlock(&irq_kernel_hash_table_lock);249 irq_spinlock_unlock(&irq_kernel_hash_table_lock, false); 248 250 249 251 return NULL; … … 263 265 * 264 266 * @return IRQ structure of the respective device or NULL. 267 * 265 268 */ 266 269 irq_t *irq_dispatch_and_lock(inr_t inr) 267 270 { 268 irq_t *irq;269 270 271 /* 271 272 * If the kernel console is silenced, … … 277 278 */ 278 279 if (silent) { 279 irq = irq_dispatch_and_lock_uspace(inr);280 irq_t *irq = irq_dispatch_and_lock_uspace(inr); 280 281 if (irq) 281 282 return irq; 283 282 284 return irq_dispatch_and_lock_kernel(inr); 283 285 } 284 286 285 irq = irq_dispatch_and_lock_kernel(inr);287 irq_t *irq = irq_dispatch_and_lock_kernel(inr); 286 288 if (irq) 287 289 return irq; 290 288 291 return irq_dispatch_and_lock_uspace(inr); 289 292 } … … 301 304 * 302 305 * @return Index into the hash table. 306 * 303 307 */ 304 308 size_t irq_ht_hash(unative_t key[]) … … 322 326 * This function assumes interrupts are already disabled. 323 327 * 324 * @param key Keys (i.e. inr and devno).328 * @param key Keys (i.e. inr and devno). 325 329 * @param keys This is 2. 326 330 * @param item The item to compare the key with. 327 331 * 328 332 * @return True on match or false otherwise. 333 * 329 334 */ 330 335 bool irq_ht_compare(unative_t key[], size_t keys, link_t *item) … … 333 338 inr_t inr = (inr_t) key[KEY_INR]; 334 339 devno_t devno = (devno_t) key[KEY_DEVNO]; 335 340 336 341 bool rv; 337 342 338 spinlock_lock(&irq->lock);343 irq_spinlock_lock(&irq->lock, false); 339 344 if (devno == -1) { 340 345 /* Invoked by irq_dispatch_and_lock(). */ … … 348 353 /* unlock only on non-match */ 349 354 if (!rv) 350 spinlock_unlock(&irq->lock);351 355 irq_spinlock_unlock(&irq->lock, false); 356 352 357 return rv; 353 358 } … … 361 366 irq_t *irq __attribute__((unused)) 362 367 = hash_table_get_instance(lnk, irq_t, link); 363 spinlock_unlock(&irq->lock);368 irq_spinlock_unlock(&irq->lock, false); 364 369 } 365 370 … … 374 379 * 375 380 * @return Index into the hash table. 381 * 376 382 */ 377 383 size_t irq_lin_hash(unative_t key[]) … … 395 401 * This function assumes interrupts are already disabled. 396 402 * 397 * @param key Keys (i.e. inr and devno).403 * @param key Keys (i.e. inr and devno). 398 404 * @param keys This is 2. 399 405 * @param item The item to compare the key with. 400 406 * 401 407 * @return True on match or false otherwise. 408 * 402 409 */ 403 410 bool irq_lin_compare(unative_t key[], size_t keys, link_t *item) … … 407 414 bool rv; 408 415 409 spinlock_lock(&irq->lock);416 irq_spinlock_lock(&irq->lock, false); 410 417 if (devno == -1) { 411 418 /* Invoked by irq_dispatch_and_lock() */ … … 418 425 /* unlock only on non-match */ 419 426 if (!rv) 420 spinlock_unlock(&irq->lock);427 irq_spinlock_unlock(&irq->lock, false); 421 428 422 429 return rv; … … 425 432 /** Unlock IRQ structure after hash_table_remove(). 426 433 * 427 * @param lnk Link in the removed and locked IRQ structure. 434 * @param lnk Link in the removed and locked IRQ structure. 435 * 428 436 */ 429 437 void irq_lin_remove(link_t *lnk) … … 431 439 irq_t *irq __attribute__((unused)) 432 440 = hash_table_get_instance(lnk, irq_t, link); 433 spinlock_unlock(&irq->lock);441 irq_spinlock_unlock(&irq->lock, false); 434 442 } 435 443 -
kernel/generic/src/interrupt/interrupt.c
r666f492 rda1bafb 32 32 /** 33 33 * @file 34 * @brief 34 * @brief Interrupt redirector. 35 35 * 36 36 * This file provides means of registering interrupt handlers 37 37 * by kernel functions and calling the handlers when interrupts 38 38 * occur. 39 * 39 40 */ 40 41 … … 61 62 62 63 /** Register exception handler 63 * 64 * @param n Exception number 65 * @param name Description 66 * @param f Exception handler 67 */ 68 iroutine exc_register(int n, const char *name, iroutine f) 64 * 65 * @param n Exception number 66 * @param name Description 67 * @param handler Exception handler 68 * 69 */ 70 iroutine exc_register(int n, const char *name, iroutine handler) 69 71 { 70 72 ASSERT(n < IVT_ITEMS); 71 73 72 iroutine old;73 74 74 spinlock_lock(&exctbl_lock); 75 75 76 old = exc_table[n].f;77 exc_table[n].f = f;76 iroutine old = exc_table[n].f; 77 exc_table[n].f = handler; 78 78 exc_table[n].name = name; 79 79 … … 87 87 * Called directly from the assembler code. 88 88 * CPU is interrupts_disable()'d. 89 * 89 90 */ 90 91 void exc_dispatch(int n, istate_t *istate) 91 92 { 92 93 ASSERT(n < IVT_ITEMS); 93 94 94 95 /* Account user cycles */ 95 96 if (THREAD) { 96 spinlock_lock(&THREAD->lock);97 irq_spinlock_lock(&THREAD->lock, false); 97 98 thread_update_accounting(true); 98 spinlock_unlock(&THREAD->lock);99 } 100 99 irq_spinlock_unlock(&THREAD->lock, false); 100 } 101 101 102 #ifdef CONFIG_UDEBUG 102 if (THREAD) THREAD->udebug.uspace_state = istate; 103 if (THREAD) 104 THREAD->udebug.uspace_state = istate; 103 105 #endif 104 106 105 107 exc_table[n].f(n + IVT_FIRST, istate); 106 108 107 109 #ifdef CONFIG_UDEBUG 108 if (THREAD) THREAD->udebug.uspace_state = NULL; 109 #endif 110 110 if (THREAD) 111 THREAD->udebug.uspace_state = NULL; 112 #endif 113 111 114 /* This is a safe place to exit exiting thread */ 112 if ( THREAD && THREAD->interrupted && istate_from_uspace(istate))115 if ((THREAD) && (THREAD->interrupted) && (istate_from_uspace(istate))) 113 116 thread_exit(); 114 117 115 118 if (THREAD) { 116 spinlock_lock(&THREAD->lock);119 irq_spinlock_lock(&THREAD->lock, false); 117 120 thread_update_accounting(false); 118 spinlock_unlock(&THREAD->lock); 119 } 120 } 121 122 /** Default 'null' exception handler */ 121 irq_spinlock_unlock(&THREAD->lock, false); 122 } 123 } 124 125 /** Default 'null' exception handler 126 * 127 */ 123 128 static void exc_undef(int n, istate_t *istate) 124 129 { … … 127 132 } 128 133 129 /** Terminate thread and task if exception came from userspace. */ 134 /** Terminate thread and task if exception came from userspace. 135 * 136 */ 130 137 void fault_if_from_uspace(istate_t *istate, const char *fmt, ...) 131 138 { 132 task_t *task = TASK;133 va_list args;134 135 139 if (!istate_from_uspace(istate)) 136 140 return; 137 141 138 142 printf("Task %s (%" PRIu64 ") killed due to an exception at " 139 "program counter %p.\n", task->name, task->taskid,143 "program counter %p.\n", TASK->name, TASK->taskid, 140 144 istate_get_pc(istate)); 141 145 142 146 stack_trace_istate(istate); 143 147 144 148 printf("Kill message: "); 149 150 va_list args; 145 151 va_start(args, fmt); 146 152 vprintf(fmt, args); 147 153 va_end(args); 148 154 printf("\n"); 149 155 150 156 /* 151 157 * Userspace can subscribe for FAULT events to take action … … 158 164 event_notify_3(EVENT_FAULT, LOWER32(TASK->taskid), 159 165 UPPER32(TASK->taskid), (unative_t) THREAD); 160 166 161 167 #ifdef CONFIG_UDEBUG 162 168 /* Wait for a debugging session. */ … … 164 170 #endif 165 171 } 166 167 task_kill( task->taskid);172 173 task_kill(TASK->taskid); 168 174 thread_exit(); 169 175 } … … 171 177 #ifdef CONFIG_KCONSOLE 172 178 173 /** kconsole cmd - print all exceptions */ 179 /** Print all exceptions 180 * 181 */ 174 182 static int cmd_exc_print(cmd_arg_t *argv) 175 183 { 176 184 #if (IVT_ITEMS > 0) 177 185 unsigned int i; 178 186 179 187 spinlock_lock(&exctbl_lock); 180 188 181 189 #ifdef __32_BITS__ 182 190 printf("Exc Description Handler Symbol\n"); 183 191 printf("--- -------------------- ---------- --------\n"); 184 192 #endif 185 193 186 194 #ifdef __64_BITS__ 187 195 printf("Exc Description Handler Symbol\n"); … … 191 199 for (i = 0; i < IVT_ITEMS; i++) { 192 200 const char *symbol = symtab_fmt_name_lookup((unative_t) exc_table[i].f); 193 201 194 202 #ifdef __32_BITS__ 195 203 printf("%-3u %-20s %10p %s\n", i + IVT_FIRST, exc_table[i].name, 196 204 exc_table[i].f, symbol); 197 205 #endif 198 206 199 207 #ifdef __64_BITS__ 200 208 printf("%-3u %-20s %18p %s\n", i + IVT_FIRST, exc_table[i].name, … … 216 224 return 1; 217 225 } 218 219 226 220 227 static cmd_info_t exc_info = { … … 227 234 }; 228 235 229 #endif 230 231 /** Initialize generic exception handling support */ 236 #endif /* CONFIG_KCONSOLE */ 237 238 /** Initialize generic exception handling support 239 * 240 */ 232 241 void exc_init(void) 233 242 { 234 int i; 235 243 (void) exc_undef; 244 245 #if (IVT_ITEMS > 0) 246 unsigned int i; 247 236 248 for (i = 0; i < IVT_ITEMS; i++) 237 249 exc_register(i, "undef", (iroutine) exc_undef); 238 250 #endif 251 239 252 #ifdef CONFIG_KCONSOLE 240 253 cmd_initialize(&exc_info); -
kernel/generic/src/ipc/event.c
r666f492 rda1bafb 137 137 IPC_SET_ARG5(call->data, a5); 138 138 139 ipl_t ipl = interrupts_disable(); 140 spinlock_lock(&events[evno].answerbox->irq_lock); 139 irq_spinlock_lock(&events[evno].answerbox->irq_lock, true); 141 140 list_append(&call->link, &events[evno].answerbox->irq_notifs); 142 spinlock_unlock(&events[evno].answerbox->irq_lock); 143 interrupts_restore(ipl); 141 irq_spinlock_unlock(&events[evno].answerbox->irq_lock, true); 144 142 145 143 waitq_wakeup(&events[evno].answerbox->wq, WAKEUP_FIRST); -
kernel/generic/src/ipc/ipc.c
r666f492 rda1bafb 66 66 /** Initialize a call structure. 67 67 * 68 * @param call Call structure to be initialized. 68 * @param call Call structure to be initialized. 69 * 69 70 */ 70 71 static void _ipc_call_init(call_t *call) … … 77 78 78 79 /** Allocate and initialize a call structure. 79 * 80 * 80 81 * The call is initialized, so that the reply will be directed to 81 82 * TASK->answerbox. 82 83 * 83 * @param flags Parameters for slab_alloc (e.g FRAME_ATOMIC). 84 * 85 * @return If flags permit it, return NULL, or initialized kernel 86 * call structure. 87 */ 88 call_t *ipc_call_alloc(int flags) 89 { 90 call_t *call; 91 92 call = slab_alloc(ipc_call_slab, flags); 84 * @param flags Parameters for slab_alloc (e.g FRAME_ATOMIC). 85 * 86 * @return If flags permit it, return NULL, or initialized kernel 87 * call structure. 88 * 89 */ 90 call_t *ipc_call_alloc(unsigned int flags) 91 { 92 call_t *call = slab_alloc(ipc_call_slab, flags); 93 93 if (call) 94 94 _ipc_call_init(call); 95 95 96 96 return call; 97 97 } … … 99 99 /** Deallocate a call structure. 100 100 * 101 * @param call Call structure to be freed. 101 * @param call Call structure to be freed. 102 * 102 103 */ 103 104 void ipc_call_free(call_t *call) … … 111 112 /** Initialize an answerbox structure. 112 113 * 113 * @param box Answerbox structure to be initialized. 114 * @param task Task to which the answerbox belongs. 114 * @param box Answerbox structure to be initialized. 115 * @param task Task to which the answerbox belongs. 116 * 115 117 */ 116 118 void ipc_answerbox_init(answerbox_t *box, task_t *task) 117 119 { 118 spinlock_initialize(&box->lock, "ipc_box_lock");119 spinlock_initialize(&box->irq_lock, "ipc_box_irqlock");120 irq_spinlock_initialize(&box->lock, "ipc.box.lock"); 121 irq_spinlock_initialize(&box->irq_lock, "ipc.box.irqlock"); 120 122 waitq_initialize(&box->wq); 121 123 link_initialize(&box->sync_box_link); … … 131 133 /** Connect a phone to an answerbox. 132 134 * 133 * @param phone Initialized phone structure. 134 * @param box Initialized answerbox structure. 135 * @param phone Initialized phone structure. 136 * @param box Initialized answerbox structure. 137 * 135 138 */ 136 139 void ipc_phone_connect(phone_t *phone, answerbox_t *box) 137 140 { 138 141 mutex_lock(&phone->lock); 139 142 140 143 phone->state = IPC_PHONE_CONNECTED; 141 144 phone->callee = box; 142 143 spinlock_lock(&box->lock);145 146 irq_spinlock_lock(&box->lock, true); 144 147 list_append(&phone->link, &box->connected_phones); 145 spinlock_unlock(&box->lock);146 148 irq_spinlock_unlock(&box->lock, true); 149 147 150 mutex_unlock(&phone->lock); 148 151 } … … 150 153 /** Initialize a phone structure. 151 154 * 152 * @param phone Phone structure to be initialized. 155 * @param phone Phone structure to be initialized. 156 * 153 157 */ 154 158 void ipc_phone_init(phone_t *phone) … … 162 166 /** Helper function to facilitate synchronous calls. 163 167 * 164 * @param phone Destination kernel phone structure. 165 * @param request Call structure with request. 166 * 167 * @return EOK on success or EINTR if the sleep was interrupted. 168 * @param phone Destination kernel phone structure. 169 * @param request Call structure with request. 170 * 171 * @return EOK on success or EINTR if the sleep was interrupted. 172 * 168 173 */ 169 174 int ipc_call_sync(phone_t *phone, call_t *request) 170 175 { 171 answerbox_t *sync_box; 172 ipl_t ipl; 173 174 sync_box = slab_alloc(ipc_answerbox_slab, 0); 176 answerbox_t *sync_box = slab_alloc(ipc_answerbox_slab, 0); 175 177 ipc_answerbox_init(sync_box, TASK); 176 178 177 179 /* 178 180 * Put the answerbox on the TASK's list of synchronous answerboxes so 179 181 * that it can be cleaned up if the call is interrupted. 180 182 */ 181 ipl = interrupts_disable(); 182 spinlock_lock(&TASK->lock); 183 irq_spinlock_lock(&TASK->lock, true); 183 184 list_append(&sync_box->sync_box_link, &TASK->sync_box_head); 184 spinlock_unlock(&TASK->lock); 185 interrupts_restore(ipl); 186 185 irq_spinlock_unlock(&TASK->lock, true); 186 187 187 /* We will receive data in a special box. */ 188 188 request->callerbox = sync_box; 189 189 190 190 ipc_call(phone, request); 191 191 if (!ipc_wait_for_call(sync_box, SYNCH_NO_TIMEOUT, 192 192 SYNCH_FLAGS_INTERRUPTIBLE)) { 193 193 /* The answerbox and the call will be freed by ipc_cleanup(). */ 194 194 return EINTR; 195 195 } 196 196 197 197 /* 198 198 * The answer arrived without interruption so we can remove the 199 199 * answerbox from the TASK's list of synchronous answerboxes. 200 200 */ 201 (void) interrupts_disable(); 202 spinlock_lock(&TASK->lock); 201 irq_spinlock_lock(&TASK->lock, true); 203 202 list_remove(&sync_box->sync_box_link); 204 spinlock_unlock(&TASK->lock); 205 interrupts_restore(ipl); 206 203 irq_spinlock_unlock(&TASK->lock, true); 204 207 205 slab_free(ipc_answerbox_slab, sync_box); 208 206 return EOK; … … 211 209 /** Answer a message which was not dispatched and is not listed in any queue. 212 210 * 213 * @param call Call structure to be answered. 214 * @param selflocked If true, then TASK->answebox is locked. 211 * @param call Call structure to be answered. 212 * @param selflocked If true, then TASK->answebox is locked. 213 * 215 214 */ 216 215 static void _ipc_answer_free_call(call_t *call, bool selflocked) … … 218 217 answerbox_t *callerbox = call->callerbox; 219 218 bool do_lock = ((!selflocked) || callerbox != (&TASK->answerbox)); 220 ipl_t ipl; 221 219 222 220 /* Count sent answer */ 223 ipl = interrupts_disable(); 224 spinlock_lock(&TASK->lock); 221 irq_spinlock_lock(&TASK->lock, true); 225 222 TASK->ipc_info.answer_sent++; 226 spinlock_unlock(&TASK->lock); 227 interrupts_restore(ipl); 228 223 irq_spinlock_unlock(&TASK->lock, true); 224 229 225 call->flags |= IPC_CALL_ANSWERED; 230 226 231 227 if (call->flags & IPC_CALL_FORWARDED) { 232 228 if (call->caller_phone) { … … 235 231 } 236 232 } 237 233 238 234 if (do_lock) 239 spinlock_lock(&callerbox->lock); 235 irq_spinlock_lock(&callerbox->lock, true); 236 240 237 list_append(&call->link, &callerbox->answers); 238 241 239 if (do_lock) 242 spinlock_unlock(&callerbox->lock); 240 irq_spinlock_unlock(&callerbox->lock, true); 241 243 242 waitq_wakeup(&callerbox->wq, WAKEUP_FIRST); 244 243 } … … 246 245 /** Answer a message which is in a callee queue. 247 246 * 248 * @param box Answerbox that is answering the message. 249 * @param call Modified request that is being sent back. 247 * @param box Answerbox that is answering the message. 248 * @param call Modified request that is being sent back. 249 * 250 250 */ 251 251 void ipc_answer(answerbox_t *box, call_t *call) 252 252 { 253 253 /* Remove from active box */ 254 spinlock_lock(&box->lock);254 irq_spinlock_lock(&box->lock, true); 255 255 list_remove(&call->link); 256 spinlock_unlock(&box->lock); 256 irq_spinlock_unlock(&box->lock, true); 257 257 258 /* Send back answer */ 258 259 _ipc_answer_free_call(call, false); … … 264 265 * message and sending it as a normal answer. 265 266 * 266 * @param phone Phone structure the call should appear to come from. 267 * @param call Call structure to be answered. 268 * @param err Return value to be used for the answer. 267 * @param phone Phone structure the call should appear to come from. 268 * @param call Call structure to be answered. 269 * @param err Return value to be used for the answer. 270 * 269 271 */ 270 272 void ipc_backsend_err(phone_t *phone, call_t *call, unative_t err) … … 278 280 /** Unsafe unchecking version of ipc_call. 279 281 * 280 * @param phone Phone structure the call comes from. 281 * @param box Destination answerbox structure. 282 * @param call Call structure with request. 282 * @param phone Phone structure the call comes from. 283 * @param box Destination answerbox structure. 284 * @param call Call structure with request. 285 * 283 286 */ 284 287 static void _ipc_call(phone_t *phone, answerbox_t *box, call_t *call) 285 288 { 286 ipl_t ipl;287 288 289 /* Count sent ipc call */ 289 ipl = interrupts_disable(); 290 spinlock_lock(&TASK->lock); 290 irq_spinlock_lock(&TASK->lock, true); 291 291 TASK->ipc_info.call_sent++; 292 spinlock_unlock(&TASK->lock); 293 interrupts_restore(ipl); 294 292 irq_spinlock_unlock(&TASK->lock, true); 293 295 294 if (!(call->flags & IPC_CALL_FORWARDED)) { 296 295 atomic_inc(&phone->active_calls); 297 296 call->data.phone = phone; 298 297 } 299 300 spinlock_lock(&box->lock);298 299 irq_spinlock_lock(&box->lock, true); 301 300 list_append(&call->link, &box->calls); 302 spinlock_unlock(&box->lock); 301 irq_spinlock_unlock(&box->lock, true); 302 303 303 waitq_wakeup(&box->wq, WAKEUP_FIRST); 304 304 } … … 306 306 /** Send an asynchronous request using a phone to an answerbox. 307 307 * 308 * @param phone Phone structure the call comes from and which is 309 * connected to the destination answerbox. 310 * @param call Call structure with request. 311 * 312 * @return Return 0 on success, ENOENT on error. 308 * @param phone Phone structure the call comes from and which is 309 * connected to the destination answerbox. 310 * @param call Call structure with request. 311 * 312 * @return Return 0 on success, ENOENT on error. 313 * 313 314 */ 314 315 int ipc_call(phone_t *phone, call_t *call) 315 316 { 316 answerbox_t *box;317 318 317 mutex_lock(&phone->lock); 319 318 if (phone->state != IPC_PHONE_CONNECTED) { … … 328 327 ipc_backsend_err(phone, call, ENOENT); 329 328 } 329 330 330 return ENOENT; 331 331 } 332 box = phone->callee; 332 333 answerbox_t *box = phone->callee; 333 334 _ipc_call(phone, box, call); 334 335 … … 342 343 * lazily later. 343 344 * 344 * @param phone Phone structure to be hung up. 345 * 346 * @return Return 0 if the phone is disconnected. 347 * Return -1 if the phone was already disconnected. 345 * @param phone Phone structure to be hung up. 346 * 347 * @return 0 if the phone is disconnected. 348 * @return -1 if the phone was already disconnected. 349 * 348 350 */ 349 351 int ipc_phone_hangup(phone_t *phone) 350 352 { 351 answerbox_t *box;352 call_t *call;353 354 353 mutex_lock(&phone->lock); 355 354 if (phone->state == IPC_PHONE_FREE || … … 359 358 return -1; 360 359 } 361 box = phone->callee; 360 361 answerbox_t *box = phone->callee; 362 362 if (phone->state != IPC_PHONE_SLAMMED) { 363 363 /* Remove myself from answerbox */ 364 spinlock_lock(&box->lock);364 irq_spinlock_lock(&box->lock, true); 365 365 list_remove(&phone->link); 366 spinlock_unlock(&box->lock);367 368 call = ipc_call_alloc(0);366 irq_spinlock_unlock(&box->lock, true); 367 368 call_t *call = ipc_call_alloc(0); 369 369 IPC_SET_METHOD(call->data, IPC_M_PHONE_HUNGUP); 370 370 call->flags |= IPC_CALL_DISCARD_ANSWER; 371 371 _ipc_call(phone, box, call); 372 372 } 373 373 374 374 phone->state = IPC_PHONE_HUNGUP; 375 375 mutex_unlock(&phone->lock); 376 376 377 377 return 0; 378 378 } … … 380 380 /** Forwards call from one answerbox to another one. 381 381 * 382 * @param call 383 * @param newphone 384 * @param oldbox 385 * @param mode 386 * 387 * @return Return0 if forwarding succeeded or an error code if388 * there waserror.389 * 382 * @param call Call structure to be redirected. 383 * @param newphone Phone structure to target answerbox. 384 * @param oldbox Old answerbox structure. 385 * @param mode Flags that specify mode of the forward operation. 386 * 387 * @return 0 if forwarding succeeded or an error code if 388 * there was an error. 389 * 390 390 * The return value serves only as an information for the forwarder, 391 391 * the original caller is notified automatically with EFORWARD. 392 * /393 int ipc_forward(call_t *call, phone_t *newphone, answerbox_t *oldbox, int mode) 394 { 395 ipl_t ipl; 396 392 * 393 */ 394 int ipc_forward(call_t *call, phone_t *newphone, answerbox_t *oldbox, 395 unsigned int mode) 396 { 397 397 /* Count forwarded calls */ 398 ipl = interrupts_disable(); 399 spinlock_lock(&TASK->lock); 398 irq_spinlock_lock(&TASK->lock, true); 400 399 TASK->ipc_info.forwarded++; 401 spinlock_unlock(&TASK->lock); 402 interrupts_restore(ipl); 403 404 spinlock_lock(&oldbox->lock); 400 irq_spinlock_pass(&TASK->lock, &oldbox->lock); 405 401 list_remove(&call->link); 406 spinlock_unlock(&oldbox->lock);407 402 irq_spinlock_unlock(&oldbox->lock, true); 403 408 404 if (mode & IPC_FF_ROUTE_FROM_ME) { 409 405 if (!call->caller_phone) … … 411 407 call->data.phone = newphone; 412 408 } 413 409 414 410 return ipc_call(newphone, call); 415 411 } … … 418 414 /** Wait for a phone call. 419 415 * 420 * @param box Answerbox expecting the call. 421 * @param usec Timeout in microseconds. See documentation for 422 * waitq_sleep_timeout() for decription of its special 423 * meaning. 424 * @param flags Select mode of sleep operation. See documentation for 425 * waitq_sleep_timeout() for description of its special 426 * meaning. 427 * @return Recived call structure or NULL. 428 * 416 * @param box Answerbox expecting the call. 417 * @param usec Timeout in microseconds. See documentation for 418 * waitq_sleep_timeout() for decription of its special 419 * meaning. 420 * @param flags Select mode of sleep operation. See documentation for 421 * waitq_sleep_timeout() for description of its special 422 * meaning. 423 * 424 * @return Recived call structure or NULL. 425 * 429 426 * To distinguish between a call and an answer, have a look at call->flags. 430 */ 431 call_t *ipc_wait_for_call(answerbox_t *box, uint32_t usec, int flags) 427 * 428 */ 429 call_t *ipc_wait_for_call(answerbox_t *box, uint32_t usec, unsigned int flags) 432 430 { 433 431 call_t *request; 434 ipl_t ipl;435 432 uint64_t irq_cnt = 0; 436 433 uint64_t answer_cnt = 0; 437 434 uint64_t call_cnt = 0; 438 435 int rc; 439 436 440 437 restart: 441 438 rc = waitq_sleep_timeout(&box->wq, usec, flags); … … 443 440 return NULL; 444 441 445 spinlock_lock(&box->lock);442 irq_spinlock_lock(&box->lock, true); 446 443 if (!list_empty(&box->irq_notifs)) { 447 444 /* Count recieved IRQ notification */ 448 irq_cnt++; 449 450 ipl = interrupts_disable(); 451 spinlock_lock(&box->irq_lock); 452 445 irq_cnt++; 446 447 irq_spinlock_lock(&box->irq_lock, false); 448 453 449 request = list_get_instance(box->irq_notifs.next, call_t, link); 454 450 list_remove(&request->link); 455 456 spinlock_unlock(&box->irq_lock); 457 interrupts_restore(ipl); 451 452 irq_spinlock_unlock(&box->irq_lock, false); 458 453 } else if (!list_empty(&box->answers)) { 459 454 /* Count recieved answer */ 460 455 answer_cnt++; 461 456 462 457 /* Handle asynchronous answers */ 463 458 request = list_get_instance(box->answers.next, call_t, link); … … 467 462 /* Count recieved call */ 468 463 call_cnt++; 469 464 470 465 /* Handle requests */ 471 466 request = list_get_instance(box->calls.next, call_t, link); 472 467 list_remove(&request->link); 468 473 469 /* Append request to dispatch queue */ 474 470 list_append(&request->link, &box->dispatched_calls); 475 471 } else { 476 472 /* This can happen regularly after ipc_cleanup */ 477 spinlock_unlock(&box->lock);473 irq_spinlock_unlock(&box->lock, true); 478 474 goto restart; 479 475 } 480 spinlock_unlock(&box->lock); 481 482 ipl = interrupts_disable(); 483 spinlock_lock(&TASK->lock); 476 477 irq_spinlock_pass(&box->lock, &TASK->lock); 478 484 479 TASK->ipc_info.irq_notif_recieved += irq_cnt; 485 480 TASK->ipc_info.answer_recieved += answer_cnt; 486 481 TASK->ipc_info.call_recieved += call_cnt; 487 spinlock_unlock(&TASK->lock);488 i nterrupts_restore(ipl);489 482 483 irq_spinlock_unlock(&TASK->lock, true); 484 490 485 return request; 491 486 } … … 493 488 /** Answer all calls from list with EHANGUP answer. 494 489 * 495 * @param lst Head of the list to be cleaned up. 490 * @param lst Head of the list to be cleaned up. 491 * 496 492 */ 497 493 void ipc_cleanup_call_list(link_t *lst) 498 494 { 499 call_t *call;500 501 495 while (!list_empty(lst)) { 502 call = list_get_instance(lst->next, call_t, link);496 call_t *call = list_get_instance(lst->next, call_t, link); 503 497 if (call->buffer) 504 498 free(call->buffer); 499 505 500 list_remove(&call->link); 506 501 507 502 IPC_SET_RETVAL(call->data, EHANGUP); 508 503 _ipc_answer_free_call(call, true); … … 512 507 /** Disconnects all phones connected to an answerbox. 513 508 * 514 * @param box Answerbox to disconnect phones from. 515 * @param notify_box If true, the answerbox will get a hangup message for 516 * each disconnected phone. 509 * @param box Answerbox to disconnect phones from. 510 * @param notify_box If true, the answerbox will get a hangup message for 511 * each disconnected phone. 512 * 517 513 */ 518 514 void ipc_answerbox_slam_phones(answerbox_t *box, bool notify_box) … … 520 516 phone_t *phone; 521 517 DEADLOCK_PROBE_INIT(p_phonelck); 522 ipl_t ipl; 523 call_t *call; 524 525 call = notify_box ? ipc_call_alloc(0) : NULL; 526 518 519 call_t *call = notify_box ? ipc_call_alloc(0) : NULL; 520 527 521 /* Disconnect all phones connected to our answerbox */ 528 522 restart_phones: 529 ipl = interrupts_disable(); 530 spinlock_lock(&box->lock); 523 irq_spinlock_lock(&box->lock, true); 531 524 while (!list_empty(&box->connected_phones)) { 532 525 phone = list_get_instance(box->connected_phones.next, 533 526 phone_t, link); 534 527 if (SYNCH_FAILED(mutex_trylock(&phone->lock))) { 535 spinlock_unlock(&box->lock); 536 interrupts_restore(ipl); 528 irq_spinlock_unlock(&box->lock, true); 537 529 DEADLOCK_PROBE(p_phonelck, DEADLOCK_THRESHOLD); 538 530 goto restart_phones; … … 541 533 /* Disconnect phone */ 542 534 ASSERT(phone->state == IPC_PHONE_CONNECTED); 543 535 544 536 list_remove(&phone->link); 545 537 phone->state = IPC_PHONE_SLAMMED; 546 538 547 539 if (notify_box) { 548 540 mutex_unlock(&phone->lock); 549 spinlock_unlock(&box->lock); 550 interrupts_restore(ipl); 551 541 irq_spinlock_unlock(&box->lock, true); 542 552 543 /* 553 544 * Send one message to the answerbox for each … … 559 550 call->flags |= IPC_CALL_DISCARD_ANSWER; 560 551 _ipc_call(phone, box, call); 561 552 562 553 /* Allocate another call in advance */ 563 554 call = ipc_call_alloc(0); 564 555 565 556 /* Must start again */ 566 557 goto restart_phones; 567 558 } 568 559 569 560 mutex_unlock(&phone->lock); 570 561 } 571 572 spinlock_unlock(&box->lock); 573 interrupts_restore(ipl); 574 562 563 irq_spinlock_unlock(&box->lock, true); 564 575 565 /* Free unused call */ 576 566 if (call) … … 578 568 } 579 569 580 /** Clean sup all IPC communication of the current task.570 /** Clean up all IPC communication of the current task. 581 571 * 582 572 * Note: ipc_hangup sets returning answerbox to TASK->answerbox, you 583 573 * have to change it as well if you want to cleanup other tasks than TASK. 574 * 584 575 */ 585 576 void ipc_cleanup(void) 586 577 { 587 int i;588 call_t *call;589 ipl_t ipl;590 591 578 /* Disconnect all our phones ('ipc_phone_hangup') */ 579 size_t i; 592 580 for (i = 0; i < IPC_MAX_PHONES; i++) 593 581 ipc_phone_hangup(&TASK->phones[i]); 594 582 595 583 /* Unsubscribe from any event notifications. */ 596 584 event_cleanup_answerbox(&TASK->answerbox); 597 585 598 586 /* Disconnect all connected irqs */ 599 587 ipc_irq_cleanup(&TASK->answerbox); 600 588 601 589 /* Disconnect all phones connected to our regular answerbox */ 602 590 ipc_answerbox_slam_phones(&TASK->answerbox, false); 603 591 604 592 #ifdef CONFIG_UDEBUG 605 593 /* Clean up kbox thread and communications */ 606 594 ipc_kbox_cleanup(); 607 595 #endif 608 596 609 597 /* Answer all messages in 'calls' and 'dispatched_calls' queues */ 610 spinlock_lock(&TASK->answerbox.lock);598 irq_spinlock_lock(&TASK->answerbox.lock, true); 611 599 ipc_cleanup_call_list(&TASK->answerbox.dispatched_calls); 612 600 ipc_cleanup_call_list(&TASK->answerbox.calls); 613 spinlock_unlock(&TASK->answerbox.lock);601 irq_spinlock_unlock(&TASK->answerbox.lock, true); 614 602 615 603 /* Wait for all answers to interrupted synchronous calls to arrive */ 616 ipl = interrupts_disable();604 ipl_t ipl = interrupts_disable(); 617 605 while (!list_empty(&TASK->sync_box_head)) { 618 606 answerbox_t *box = list_get_instance(TASK->sync_box_head.next, 619 607 answerbox_t, sync_box_link); 620 608 621 609 list_remove(&box->sync_box_link); 622 call = ipc_wait_for_call(box, SYNCH_NO_TIMEOUT,610 call_t *call = ipc_wait_for_call(box, SYNCH_NO_TIMEOUT, 623 611 SYNCH_FLAGS_NONE); 624 612 ipc_call_free(call); … … 626 614 } 627 615 interrupts_restore(ipl); 628 616 629 617 /* Wait for all answers to asynchronous calls to arrive */ 630 while (1) { 631 /* Go through all phones, until all are FREE... */ 632 /* Locking not needed, no one else should modify 633 * it, when we are in cleanup */ 618 while (true) { 619 /* 620 * Go through all phones, until they are all FREE 621 * Locking is not needed, no one else should modify 622 * it when we are in cleanup 623 */ 634 624 for (i = 0; i < IPC_MAX_PHONES; i++) { 635 625 if (TASK->phones[i].state == IPC_PHONE_HUNGUP && … … 639 629 } 640 630 641 /* Just for sure, we might have had some 642 * IPC_PHONE_CONNECTING phones */ 631 /* 632 * Just for sure, we might have had some 633 * IPC_PHONE_CONNECTING phones 634 */ 643 635 if (TASK->phones[i].state == IPC_PHONE_CONNECTED) 644 636 ipc_phone_hangup(&TASK->phones[i]); 645 /* If the hangup succeeded, it has sent a HANGUP 637 638 /* 639 * If the hangup succeeded, it has sent a HANGUP 646 640 * message, the IPC is now in HUNGUP state, we 647 * wait for the reply to come */ 641 * wait for the reply to come 642 */ 648 643 649 644 if (TASK->phones[i].state != IPC_PHONE_FREE) 650 645 break; 651 646 } 652 /* Voila, got into cleanup */ 647 648 /* Got into cleanup */ 653 649 if (i == IPC_MAX_PHONES) 654 650 break; 655 651 656 call = ipc_wait_for_call(&TASK->answerbox, SYNCH_NO_TIMEOUT,652 call_t *call = ipc_wait_for_call(&TASK->answerbox, SYNCH_NO_TIMEOUT, 657 653 SYNCH_FLAGS_NONE); 658 654 ASSERT((call->flags & IPC_CALL_ANSWERED) || … … 666 662 if (!(call->flags & IPC_CALL_DISCARD_ANSWER)) 667 663 atomic_dec(&TASK->active_calls); 664 668 665 ipc_call_free(call); 669 666 } 670 667 } 671 668 672 673 /** Initilize IPC subsystem */ 669 /** Initilize IPC subsystem 670 * 671 */ 674 672 void ipc_init(void) 675 673 { … … 680 678 } 681 679 682 683 680 /** List answerbox contents. 684 681 * 685 * @param taskid Task ID. 682 * @param taskid Task ID. 683 * 686 684 */ 687 685 void ipc_print_task(task_id_t taskid) 688 686 { 689 task_t *task; 690 int i; 691 call_t *call; 692 link_t *tmp; 693 ipl_t ipl; 694 695 ipl = interrupts_disable(); 696 spinlock_lock(&tasks_lock); 697 task = task_find_by_id(taskid); 698 if (task) 699 spinlock_lock(&task->lock); 700 spinlock_unlock(&tasks_lock); 687 irq_spinlock_lock(&tasks_lock, true); 688 task_t *task = task_find_by_id(taskid); 689 701 690 if (!task) { 702 i nterrupts_restore(ipl);691 irq_spinlock_unlock(&tasks_lock, true); 703 692 return; 704 693 } 705 694 695 /* Hand-over-hand locking */ 696 irq_spinlock_exchange(&tasks_lock, &task->lock); 697 706 698 /* Print opened phones & details */ 707 699 printf("PHONE:\n"); 700 701 size_t i; 708 702 for (i = 0; i < IPC_MAX_PHONES; i++) { 709 703 if (SYNCH_FAILED(mutex_trylock(&task->phones[i].lock))) { … … 711 705 continue; 712 706 } 707 713 708 if (task->phones[i].state != IPC_PHONE_FREE) { 714 printf("%d: ", i); 709 printf("%" PRIs ": ", i); 710 715 711 switch (task->phones[i].state) { 716 712 case IPC_PHONE_CONNECTING: … … 718 714 break; 719 715 case IPC_PHONE_CONNECTED: 720 printf("connected to: %p ", 721 716 printf("connected to: %p ", 717 task->phones[i].callee); 722 718 break; 723 719 case IPC_PHONE_SLAMMED: 724 720 printf("slammed by: %p ", 725 721 task->phones[i].callee); 726 722 break; 727 723 case IPC_PHONE_HUNGUP: 728 724 printf("hung up - was: %p ", 729 725 task->phones[i].callee); 730 726 break; 731 727 default: 732 728 break; 733 729 } 734 printf("active: %ld\n", 730 731 printf("active: %" PRIun "\n", 735 732 atomic_get(&task->phones[i].active_calls)); 736 733 } 734 737 735 mutex_unlock(&task->phones[i].lock); 738 736 } 739 740 737 738 irq_spinlock_lock(&task->answerbox.lock, false); 739 740 link_t *cur; 741 741 742 /* Print answerbox - calls */ 742 spinlock_lock(&task->answerbox.lock);743 743 printf("ABOX - CALLS:\n"); 744 for ( tmp = task->answerbox.calls.next; tmp!= &task->answerbox.calls;745 tmp = tmp->next) {746 call = list_get_instance(tmp, call_t, link);744 for (cur = task->answerbox.calls.next; cur != &task->answerbox.calls; 745 cur = cur->next) { 746 call_t *call = list_get_instance(cur, call_t, link); 747 747 printf("Callid: %p Srctask:%" PRIu64 " M:%" PRIun 748 748 " A1:%" PRIun " A2:%" PRIun " A3:%" PRIun … … 754 754 call->flags); 755 755 } 756 /* Print answerbox - calls */ 756 757 /* Print answerbox - dispatched calls */ 757 758 printf("ABOX - DISPATCHED CALLS:\n"); 758 for ( tmp= task->answerbox.dispatched_calls.next;759 tmp != &task->answerbox.dispatched_calls;760 tmp = tmp->next) {761 call = list_get_instance(tmp, call_t, link);759 for (cur = task->answerbox.dispatched_calls.next; 760 cur != &task->answerbox.dispatched_calls; 761 cur = cur->next) { 762 call_t *call = list_get_instance(cur, call_t, link); 762 763 printf("Callid: %p Srctask:%" PRIu64 " M:%" PRIun 763 764 " A1:%" PRIun " A2:%" PRIun " A3:%" PRIun … … 769 770 call->flags); 770 771 } 771 /* Print answerbox - calls */ 772 773 /* Print answerbox - answers */ 772 774 printf("ABOX - ANSWERS:\n"); 773 for ( tmp= task->answerbox.answers.next;774 tmp!= &task->answerbox.answers;775 tmp = tmp->next) {776 call = list_get_instance(tmp, call_t, link);775 for (cur = task->answerbox.answers.next; 776 cur != &task->answerbox.answers; 777 cur = cur->next) { 778 call_t *call = list_get_instance(cur, call_t, link); 777 779 printf("Callid:%p M:%" PRIun " A1:%" PRIun " A2:%" PRIun 778 780 " A3:%" PRIun " A4:%" PRIun " A5:%" PRIun " Flags:%x\n", … … 782 784 call->flags); 783 785 } 784 785 spinlock_unlock(&task->answerbox.lock); 786 spinlock_unlock(&task->lock); 787 interrupts_restore(ipl); 786 787 irq_spinlock_unlock(&task->answerbox.lock, false); 788 irq_spinlock_unlock(&task->lock, true); 788 789 } 789 790 -
kernel/generic/src/ipc/ipcrsc.c
r666f492 rda1bafb 45 45 * - hangup phone (the caller has hung up) 46 46 * - hangup phone (the answerbox is exiting) 47 * 47 * 48 48 * Locking strategy 49 49 * … … 85 85 * 86 86 * Phone hangup 87 * 87 * 88 88 * *** The caller hangs up (sys_ipc_hangup) *** 89 89 * - The phone is disconnected (no more messages can be sent over this phone), … … 99 99 * 100 100 * Call forwarding 101 * 101 * 102 102 * The call can be forwarded, so that the answer to call is passed directly 103 103 * to the original sender. However, this poses special problems regarding … … 114 114 * 115 115 * Cleanup strategy 116 * 116 * 117 117 * 1) Disconnect all our phones ('ipc_phone_hangup'). 118 118 * … … 123 123 * 124 124 * 4) Wait for all async answers to arrive and dispose of them. 125 * 125 * 126 126 */ 127 127 … … 137 137 * @todo Some speedup (hash table?) 138 138 * 139 * @param callid Userspace hash of the call. Currently it is the call 140 * structure kernel address. 141 * 142 * @return NULL on not found, otherwise pointer to the call 143 * structure. 139 * @param callid Userspace hash of the call. Currently it is the call 140 * structure kernel address. 141 * 142 * @return NULL on not found, otherwise pointer to the call 143 * structure. 144 * 144 145 */ 145 146 call_t *get_call(unative_t callid) 146 147 { 147 148 link_t *lst; 148 call_t * call, *result = NULL;149 150 spinlock_lock(&TASK->answerbox.lock);149 call_t *result = NULL; 150 151 irq_spinlock_lock(&TASK->answerbox.lock, true); 151 152 for (lst = TASK->answerbox.dispatched_calls.next; 152 153 lst != &TASK->answerbox.dispatched_calls; lst = lst->next) { 153 call = list_get_instance(lst, call_t, link);154 call_t *call = list_get_instance(lst, call_t, link); 154 155 if ((unative_t) call == callid) { 155 156 result = call; … … 157 158 } 158 159 } 159 spinlock_unlock(&TASK->answerbox.lock); 160 161 irq_spinlock_unlock(&TASK->answerbox.lock, true); 160 162 return result; 161 163 } … … 163 165 /** Allocate new phone slot in the specified task. 164 166 * 165 * @param t Task for which to allocate a new phone. 166 * 167 * @return New phone handle or -1 if the phone handle limit is 168 * exceeded. 169 */ 170 int phone_alloc(task_t *t) 171 { 172 int i; 173 174 spinlock_lock(&t->lock); 167 * @param task Task for which to allocate a new phone. 168 * 169 * @return New phone handle or -1 if the phone handle limit is 170 * exceeded. 171 * 172 */ 173 int phone_alloc(task_t *task) 174 { 175 irq_spinlock_lock(&task->lock, true); 176 177 size_t i; 175 178 for (i = 0; i < IPC_MAX_PHONES; i++) { 176 if ( t->phones[i].state == IPC_PHONE_HUNGUP&&177 atomic_get(&t->phones[i].active_calls) == 0)178 t ->phones[i].state = IPC_PHONE_FREE;179 180 if (t ->phones[i].state == IPC_PHONE_FREE) {181 t ->phones[i].state = IPC_PHONE_CONNECTING;179 if ((task->phones[i].state == IPC_PHONE_HUNGUP) && 180 (atomic_get(&task->phones[i].active_calls) == 0)) 181 task->phones[i].state = IPC_PHONE_FREE; 182 183 if (task->phones[i].state == IPC_PHONE_FREE) { 184 task->phones[i].state = IPC_PHONE_CONNECTING; 182 185 break; 183 186 } 184 187 } 185 spinlock_unlock(&t->lock); 186 188 189 irq_spinlock_unlock(&task->lock, true); 190 187 191 if (i == IPC_MAX_PHONES) 188 192 return -1; 189 193 190 194 return i; 191 195 } … … 193 197 /** Mark a phone structure free. 194 198 * 195 * @param phone Phone structure to be marked free. 199 * @param phone Phone structure to be marked free. 200 * 196 201 */ 197 202 static void phone_deallocp(phone_t *phone) … … 199 204 ASSERT(phone->state == IPC_PHONE_CONNECTING); 200 205 201 /* atomic operation */206 /* Atomic operation */ 202 207 phone->state = IPC_PHONE_FREE; 203 208 } … … 207 212 * All already sent messages will be correctly processed. 208 213 * 209 * @param phoneid Phone handle of the phone to be freed. 214 * @param phoneid Phone handle of the phone to be freed. 215 * 210 216 */ 211 217 void phone_dealloc(int phoneid) … … 216 222 /** Connect phone to a given answerbox. 217 223 * 218 * @param phoneid 219 * @param box 224 * @param phoneid Phone handle to be connected. 225 * @param box Answerbox to which to connect the phone handle. 220 226 * 221 227 * The procedure _enforces_ that the user first marks the phone 222 228 * busy (e.g. via phone_alloc) and then connects the phone, otherwise 223 229 * race condition may appear. 230 * 224 231 */ 225 232 void phone_connect(int phoneid, answerbox_t *box) -
kernel/generic/src/ipc/irq.c
r666f492 rda1bafb 31 31 * @{ 32 32 */ 33 33 34 /** 34 35 * @file … … 67 68 * structure are finished. Because we hold the hash table lock, we prevent new 68 69 * IRQs from taking new references to the IRQ structure. 70 * 69 71 */ 70 72 … … 81 83 /** Free the top-half pseudocode. 82 84 * 83 * @param code Pointer to the top-half pseudocode. 85 * @param code Pointer to the top-half pseudocode. 86 * 84 87 */ 85 88 static void code_free(irq_code_t *code) … … 93 96 /** Copy the top-half pseudocode from userspace into the kernel. 94 97 * 95 * @param ucode Userspace address of the top-half pseudocode. 96 * 97 * @return Kernel address of the copied pseudocode. 98 * @param ucode Userspace address of the top-half pseudocode. 99 * 100 * @return Kernel address of the copied pseudocode. 101 * 98 102 */ 99 103 static irq_code_t *code_from_uspace(irq_code_t *ucode) 100 104 { 101 irq_code_t *code; 102 irq_cmd_t *ucmds; 103 int rc; 104 105 code = malloc(sizeof(*code), 0); 106 rc = copy_from_uspace(code, ucode, sizeof(*code)); 105 irq_code_t *code = malloc(sizeof(*code), 0); 106 int rc = copy_from_uspace(code, ucode, sizeof(*code)); 107 107 if (rc != 0) { 108 108 free(code); … … 114 114 return NULL; 115 115 } 116 ucmds = code->cmds; 116 117 irq_cmd_t *ucmds = code->cmds; 117 118 code->cmds = malloc(sizeof(code->cmds[0]) * code->cmdcount, 0); 118 119 rc = copy_from_uspace(code->cmds, ucmds, … … 123 124 return NULL; 124 125 } 125 126 126 127 return code; 127 128 } … … 141 142 unative_t method, irq_code_t *ucode) 142 143 { 143 ipl_t ipl;144 irq_code_t *code;145 irq_t *irq;146 link_t *hlp;147 144 unative_t key[] = { 148 145 (unative_t) inr, … … 150 147 }; 151 148 149 irq_code_t *code; 152 150 if (ucode) { 153 151 code = code_from_uspace(ucode); 154 152 if (!code) 155 153 return EBADMEM; 156 } else {154 } else 157 155 code = NULL; 158 }159 156 160 157 /* 161 158 * Allocate and populate the IRQ structure. 162 159 */ 163 irq = malloc(sizeof(irq_t), 0); 160 irq_t *irq = malloc(sizeof(irq_t), 0); 161 164 162 irq_initialize(irq); 165 163 irq->devno = devno; … … 177 175 * answerbox's list. 178 176 */ 179 i pl = interrupts_disable();180 spinlock_lock(&irq_uspace_hash_table_lock);181 hlp = hash_table_find(&irq_uspace_hash_table, key);177 irq_spinlock_lock(&irq_uspace_hash_table_lock, true); 178 179 link_t *hlp = hash_table_find(&irq_uspace_hash_table, key); 182 180 if (hlp) { 183 irq_t *hirq __attribute__((unused)) 184 = hash_table_get_instance(hlp, irq_t, link); 181 irq_t *hirq = hash_table_get_instance(hlp, irq_t, link); 185 182 186 183 /* hirq is locked */ 187 spinlock_unlock(&hirq->lock);184 irq_spinlock_unlock(&hirq->lock, false); 188 185 code_free(code); 189 spinlock_unlock(&irq_uspace_hash_table_lock); 186 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true); 187 190 188 free(irq); 191 interrupts_restore(ipl);192 189 return EEXISTS; 193 190 } 194 191 195 spinlock_lock(&irq->lock); /* Not really necessary, but paranoid */ 196 spinlock_lock(&box->irq_lock); 192 /* Locking is not really necessary, but paranoid */ 193 irq_spinlock_lock(&irq->lock, false); 194 irq_spinlock_lock(&box->irq_lock, false); 195 197 196 hash_table_insert(&irq_uspace_hash_table, key, &irq->link); 198 197 list_append(&irq->notif_cfg.link, &box->irq_head); 199 spinlock_unlock(&box->irq_lock);200 spinlock_unlock(&irq->lock);201 spinlock_unlock(&irq_uspace_hash_table_lock);202 203 interrupts_restore(ipl);198 199 irq_spinlock_unlock(&box->irq_lock, false); 200 irq_spinlock_unlock(&irq->lock, false); 201 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true); 202 204 203 return EOK; 205 204 } … … 207 206 /** Unregister task from IRQ notification. 208 207 * 209 * @param box Answerbox associated with the notification. 210 * @param inr IRQ number. 211 * @param devno Device number. 208 * @param box Answerbox associated with the notification. 209 * @param inr IRQ number. 210 * @param devno Device number. 211 * 212 212 */ 213 213 int ipc_irq_unregister(answerbox_t *box, inr_t inr, devno_t devno) 214 214 { 215 ipl_t ipl;216 215 unative_t key[] = { 217 216 (unative_t) inr, 218 217 (unative_t) devno 219 218 }; 220 link_t *lnk; 221 irq_t *irq; 222 223 ipl = interrupts_disable(); 224 spinlock_lock(&irq_uspace_hash_table_lock); 225 lnk = hash_table_find(&irq_uspace_hash_table, key); 219 220 irq_spinlock_lock(&irq_uspace_hash_table_lock, true); 221 link_t *lnk = hash_table_find(&irq_uspace_hash_table, key); 226 222 if (!lnk) { 227 spinlock_unlock(&irq_uspace_hash_table_lock); 228 interrupts_restore(ipl); 223 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true); 229 224 return ENOENT; 230 225 } 231 irq = hash_table_get_instance(lnk, irq_t, link); 226 227 irq_t *irq = hash_table_get_instance(lnk, irq_t, link); 228 232 229 /* irq is locked */ 233 spinlock_lock(&box->irq_lock);230 irq_spinlock_lock(&box->irq_lock, false); 234 231 235 232 ASSERT(irq->notif_cfg.answerbox == box); … … 237 234 /* Free up the pseudo code and associated structures. */ 238 235 code_free(irq->notif_cfg.code); 239 240 /* Remove the IRQ from the answerbox's list. */ 236 237 /* Remove the IRQ from the answerbox's list. */ 241 238 list_remove(&irq->notif_cfg.link); 242 239 243 240 /* 244 241 * We need to drop the IRQ lock now because hash_table_remove() will try … … 248 245 * the meantime. 249 246 */ 250 spinlock_unlock(&irq->lock);251 247 irq_spinlock_unlock(&irq->lock, false); 248 252 249 /* Remove the IRQ from the uspace IRQ hash table. */ 253 250 hash_table_remove(&irq_uspace_hash_table, key, 2); 254 251 255 spinlock_unlock(&irq_uspace_hash_table_lock);256 spinlock_unlock(&box->irq_lock);252 irq_spinlock_unlock(&box->irq_lock, false); 253 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true); 257 254 258 255 /* Free up the IRQ structure. */ 259 256 free(irq); 260 257 261 interrupts_restore(ipl);262 258 return EOK; 263 259 } 264 265 260 266 261 /** Disconnect all IRQ notifications from an answerbox. … … 270 265 * send notifications to it. 271 266 * 272 * @param box Answerbox for which we want to carry out the cleanup. 267 * @param box Answerbox for which we want to carry out the cleanup. 268 * 273 269 */ 274 270 void ipc_irq_cleanup(answerbox_t *box) 275 271 { 276 ipl_t ipl;277 278 272 loop: 279 ipl = interrupts_disable(); 280 spinlock_lock(&irq_uspace_hash_table_lock); 281 spinlock_lock(&box->irq_lock); 273 irq_spinlock_lock(&irq_uspace_hash_table_lock, true); 274 irq_spinlock_lock(&box->irq_lock, false); 282 275 283 276 while (box->irq_head.next != &box->irq_head) { 284 link_t *cur = box->irq_head.next;285 irq_t *irq;286 277 DEADLOCK_PROBE_INIT(p_irqlock); 287 unative_t key[2]; 288 289 irq = list_get_instance(cur, irq_t, notif_cfg.link); 290 if (!spinlock_trylock(&irq->lock)) { 278 279 irq_t *irq = list_get_instance(box->irq_head.next, irq_t, 280 notif_cfg.link); 281 282 if (!irq_spinlock_trylock(&irq->lock)) { 291 283 /* 292 284 * Avoid deadlock by trying again. 293 285 */ 294 spinlock_unlock(&box->irq_lock); 295 spinlock_unlock(&irq_uspace_hash_table_lock); 296 interrupts_restore(ipl); 286 irq_spinlock_unlock(&box->irq_lock, false); 287 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true); 297 288 DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD); 298 289 goto loop; 299 290 } 291 292 unative_t key[2]; 300 293 key[0] = irq->inr; 301 294 key[1] = irq->devno; 302 303 295 304 296 ASSERT(irq->notif_cfg.answerbox == box); … … 317 309 * didn't drop the hash table lock in the meantime. 318 310 */ 319 spinlock_unlock(&irq->lock);311 irq_spinlock_unlock(&irq->lock, false); 320 312 321 313 /* Remove from the hash table. */ … … 325 317 } 326 318 327 spinlock_unlock(&box->irq_lock); 328 spinlock_unlock(&irq_uspace_hash_table_lock); 329 interrupts_restore(ipl); 319 irq_spinlock_unlock(&box->irq_lock, false); 320 irq_spinlock_unlock(&irq_uspace_hash_table_lock, true); 330 321 } 331 322 332 323 /** Add a call to the proper answerbox queue. 333 324 * 334 * Assume irq->lock is locked. 335 * 336 * @param irq IRQ structure referencing the target answerbox. 337 * @param call IRQ notification call. 325 * Assume irq->lock is locked and interrupts disabled. 326 * 327 * @param irq IRQ structure referencing the target answerbox. 328 * @param call IRQ notification call. 329 * 338 330 */ 339 331 static void send_call(irq_t *irq, call_t *call) 340 332 { 341 spinlock_lock(&irq->notif_cfg.answerbox->irq_lock);333 irq_spinlock_lock(&irq->notif_cfg.answerbox->irq_lock, false); 342 334 list_append(&call->link, &irq->notif_cfg.answerbox->irq_notifs); 343 spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock);344 335 irq_spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock, false); 336 345 337 waitq_wakeup(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST); 346 338 } … … 348 340 /** Apply the top-half pseudo code to find out whether to accept the IRQ or not. 349 341 * 350 * @param irq IRQ structure. 351 * 352 * @return IRQ_ACCEPT if the interrupt is accepted by the 353 * pseudocode. IRQ_DECLINE otherwise. 342 * @param irq IRQ structure. 343 * 344 * @return IRQ_ACCEPT if the interrupt is accepted by the 345 * pseudocode, IRQ_DECLINE otherwise. 346 * 354 347 */ 355 348 irq_ownership_t ipc_irq_top_half_claim(irq_t *irq) 356 349 { 357 unsigned int i;358 unative_t dstval;359 350 irq_code_t *code = irq->notif_cfg.code; 360 unative_t *scratch = irq->notif_cfg.scratch; 361 351 uint32_t *scratch = irq->notif_cfg.scratch; 362 352 363 353 if (!irq->notif_cfg.notify) … … 367 357 return IRQ_DECLINE; 368 358 359 size_t i; 369 360 for (i = 0; i < code->cmdcount; i++) { 370 unsigned int srcarg = code->cmds[i].srcarg; 371 unsigned int dstarg = code->cmds[i].dstarg; 361 uint32_t dstval; 362 uintptr_t srcarg = code->cmds[i].srcarg; 363 uintptr_t dstarg = code->cmds[i].dstarg; 372 364 373 365 if (srcarg >= IPC_CALL_LEN) 374 366 break; 367 375 368 if (dstarg >= IPC_CALL_LEN) 376 369 break; … … 405 398 break; 406 399 case CMD_BTEST: 407 if ( srcarg && dstarg) {400 if ((srcarg) && (dstarg)) { 408 401 dstval = scratch[srcarg] & code->cmds[i].value; 409 402 scratch[dstarg] = dstval; … … 411 404 break; 412 405 case CMD_PREDICATE: 413 if ( srcarg && !scratch[srcarg]) {406 if ((srcarg) && (!scratch[srcarg])) { 414 407 i += code->cmds[i].value; 415 408 continue; … … 427 420 } 428 421 429 430 422 /* IRQ top-half handler. 431 423 * 432 424 * We expect interrupts to be disabled and the irq->lock already held. 433 425 * 434 * @param irq IRQ structure. 426 * @param irq IRQ structure. 427 * 435 428 */ 436 429 void ipc_irq_top_half_handler(irq_t *irq) 437 430 { 438 431 ASSERT(irq); 439 432 440 433 if (irq->notif_cfg.answerbox) { 441 call_t *call; 442 443 call = ipc_call_alloc(FRAME_ATOMIC); 434 call_t *call = ipc_call_alloc(FRAME_ATOMIC); 444 435 if (!call) 445 436 return; … … 448 439 /* Put a counter to the message */ 449 440 call->priv = ++irq->notif_cfg.counter; 450 441 451 442 /* Set up args */ 452 443 IPC_SET_METHOD(call->data, irq->notif_cfg.method); … … 456 447 IPC_SET_ARG4(call->data, irq->notif_cfg.scratch[4]); 457 448 IPC_SET_ARG5(call->data, irq->notif_cfg.scratch[5]); 458 449 459 450 send_call(irq, call); 460 451 } … … 463 454 /** Send notification message. 464 455 * 465 * @param irq IRQ structure. 466 * @param a1 Driver-specific payload argument. 467 * @param a2 Driver-specific payload argument. 468 * @param a3 Driver-specific payload argument. 469 * @param a4 Driver-specific payload argument. 470 * @param a5 Driver-specific payload argument. 456 * @param irq IRQ structure. 457 * @param a1 Driver-specific payload argument. 458 * @param a2 Driver-specific payload argument. 459 * @param a3 Driver-specific payload argument. 460 * @param a4 Driver-specific payload argument. 461 * @param a5 Driver-specific payload argument. 462 * 471 463 */ 472 464 void ipc_irq_send_msg(irq_t *irq, unative_t a1, unative_t a2, unative_t a3, 473 465 unative_t a4, unative_t a5) 474 466 { 475 call_t *call; 476 477 spinlock_lock(&irq->lock); 478 467 irq_spinlock_lock(&irq->lock, true); 468 479 469 if (irq->notif_cfg.answerbox) { 480 call = ipc_call_alloc(FRAME_ATOMIC);470 call_t *call = ipc_call_alloc(FRAME_ATOMIC); 481 471 if (!call) { 482 spinlock_unlock(&irq->lock);472 irq_spinlock_unlock(&irq->lock, true); 483 473 return; 484 474 } 475 485 476 call->flags |= IPC_CALL_NOTIF; 486 477 /* Put a counter to the message */ 487 478 call->priv = ++irq->notif_cfg.counter; 488 479 489 480 IPC_SET_METHOD(call->data, irq->notif_cfg.method); 490 481 IPC_SET_ARG1(call->data, a1); … … 496 487 send_call(irq, call); 497 488 } 498 spinlock_unlock(&irq->lock); 489 490 irq_spinlock_unlock(&irq->lock, true); 499 491 } 500 492 -
kernel/generic/src/ipc/kbox.c
r666f492 rda1bafb 47 47 void ipc_kbox_cleanup(void) 48 48 { 49 bool have_kb_thread; 50 51 /* 49 /* 52 50 * Only hold kb.cleanup_lock while setting kb.finished - 53 51 * this is enough. … … 56 54 TASK->kb.finished = true; 57 55 mutex_unlock(&TASK->kb.cleanup_lock); 58 59 have_kb_thread = (TASK->kb.thread != NULL);60 56 57 bool have_kb_thread = (TASK->kb.thread != NULL); 58 61 59 /* 62 60 * From now on nobody will try to connect phones or attach 63 61 * kbox threads 64 62 */ 65 63 66 64 /* 67 65 * Disconnect all phones connected to our kbox. Passing true for … … 71 69 */ 72 70 ipc_answerbox_slam_phones(&TASK->kb.box, have_kb_thread); 73 74 /* 71 72 /* 75 73 * If the task was being debugged, clean up debugging session. 76 74 * This is necessarry as slamming the phones won't force … … 80 78 udebug_task_cleanup(TASK); 81 79 mutex_unlock(&TASK->udebug.lock); 82 80 83 81 if (have_kb_thread) { 84 82 LOG("Join kb.thread."); … … 88 86 TASK->kb.thread = NULL; 89 87 } 90 88 91 89 /* Answer all messages in 'calls' and 'dispatched_calls' queues. */ 92 spinlock_lock(&TASK->kb.box.lock);90 irq_spinlock_lock(&TASK->kb.box.lock, true); 93 91 ipc_cleanup_call_list(&TASK->kb.box.dispatched_calls); 94 92 ipc_cleanup_call_list(&TASK->kb.box.calls); 95 spinlock_unlock(&TASK->kb.box.lock);93 irq_spinlock_unlock(&TASK->kb.box.lock, true); 96 94 } 97 95 98 96 /** Handle hangup message in kbox. 99 97 * 100 * @param call The IPC_M_PHONE_HUNGUP call structure. 101 * @param last Output, the function stores @c true here if 102 * this was the last phone, @c false otherwise. 103 **/ 98 * @param call The IPC_M_PHONE_HUNGUP call structure. 99 * @param last Output, the function stores @c true here if 100 * this was the last phone, @c false otherwise. 101 * 102 */ 104 103 static void kbox_proc_phone_hungup(call_t *call, bool *last) 105 104 { 106 ipl_t ipl;107 108 105 /* Was it our debugger, who hung up? */ 109 106 if (call->sender == TASK->udebug.debugger) { 110 107 /* Terminate debugging session (if any). */ 111 108 LOG("Terminate debugging session."); 112 ipl = interrupts_disable(); 113 spinlock_lock(&TASK->lock); 109 irq_spinlock_lock(&TASK->lock, true); 114 110 udebug_task_cleanup(TASK); 115 spinlock_unlock(&TASK->lock); 116 interrupts_restore(ipl); 111 irq_spinlock_unlock(&TASK->lock, true); 117 112 } else { 118 113 LOG("Was not debugger."); 119 114 } 120 115 121 116 LOG("Continue with hangup message."); 122 117 IPC_SET_RETVAL(call->data, 0); 123 118 ipc_answer(&TASK->kb.box, call); 124 119 125 120 mutex_lock(&TASK->kb.cleanup_lock); 126 127 ipl = interrupts_disable(); 128 spinlock_lock(&TASK->lock); 129 spinlock_lock(&TASK->kb.box.lock); 121 122 irq_spinlock_lock(&TASK->lock, true); 123 irq_spinlock_lock(&TASK->kb.box.lock, false); 130 124 if (list_empty(&TASK->kb.box.connected_phones)) { 131 125 /* … … 133 127 * gets freed and signal to the caller. 134 128 */ 135 129 136 130 /* Only detach kbox thread unless already terminating. */ 137 131 if (TASK->kb.finished == false) { … … 140 134 TASK->kb.thread = NULL; 141 135 } 142 136 143 137 LOG("Phone list is empty."); 144 138 *last = true; 145 } else {139 } else 146 140 *last = false; 147 } 148 149 spinlock_unlock(&TASK->kb.box.lock); 150 spinlock_unlock(&TASK->lock); 151 interrupts_restore(ipl); 152 141 142 irq_spinlock_unlock(&TASK->kb.box.lock, true); 143 irq_spinlock_unlock(&TASK->lock, false); 144 153 145 mutex_unlock(&TASK->kb.cleanup_lock); 154 146 } … … 159 151 * when all phones are disconnected from the kbox. 160 152 * 161 * @param arg Ignored. 153 * @param arg Ignored. 154 * 162 155 */ 163 156 static void kbox_thread_proc(void *arg) 164 157 { 165 call_t *call; 166 bool done; 167 168 (void)arg; 158 (void) arg; 169 159 LOG("Starting."); 170 done = false;171 160 bool done = false; 161 172 162 while (!done) { 173 call = ipc_wait_for_call(&TASK->kb.box, SYNCH_NO_TIMEOUT,163 call_t *call = ipc_wait_for_call(&TASK->kb.box, SYNCH_NO_TIMEOUT, 174 164 SYNCH_FLAGS_NONE); 175 165 176 166 if (call == NULL) 177 continue; 178 167 continue; /* Try again. */ 168 179 169 switch (IPC_GET_METHOD(call->data)) { 180 170 181 171 case IPC_M_DEBUG_ALL: 182 172 /* Handle debug call. */ 183 173 udebug_call_receive(call); 184 174 break; 185 175 186 176 case IPC_M_PHONE_HUNGUP: 187 177 /* … … 192 182 kbox_proc_phone_hungup(call, &done); 193 183 break; 194 184 195 185 default: 196 186 /* Ignore */ … … 198 188 } 199 189 } 200 190 201 191 LOG("Exiting."); 202 192 } 203 193 204 194 205 /** 206 * Connect phone to a task kernel-box specified by id. 195 /** Connect phone to a task kernel-box specified by id. 207 196 * 208 197 * Note that this is not completely atomic. For optimisation reasons, the task … … 211 200 * cleanup code. 212 201 * 213 * @return Phone id on success, or negative error code. 202 * @return Phone id on success, or negative error code. 203 * 214 204 */ 215 205 int ipc_connect_kbox(task_id_t taskid) 216 206 { 217 int newphid; 218 task_t *ta; 219 thread_t *kb_thread; 220 ipl_t ipl; 221 222 ipl = interrupts_disable(); 223 spinlock_lock(&tasks_lock); 224 225 ta = task_find_by_id(taskid); 226 if (ta == NULL) { 227 spinlock_unlock(&tasks_lock); 228 interrupts_restore(ipl); 207 irq_spinlock_lock(&tasks_lock, true); 208 209 task_t *task = task_find_by_id(taskid); 210 if (task == NULL) { 211 irq_spinlock_unlock(&tasks_lock, true); 229 212 return ENOENT; 230 213 } 231 232 atomic_inc(&ta->refcount); 233 234 spinlock_unlock(&tasks_lock); 235 interrupts_restore(ipl); 236 237 mutex_lock(&ta->kb.cleanup_lock); 238 239 if (atomic_predec(&ta->refcount) == 0) { 240 mutex_unlock(&ta->kb.cleanup_lock); 241 task_destroy(ta); 214 215 atomic_inc(&task->refcount); 216 217 irq_spinlock_unlock(&tasks_lock, true); 218 219 mutex_lock(&task->kb.cleanup_lock); 220 221 if (atomic_predec(&task->refcount) == 0) { 222 mutex_unlock(&task->kb.cleanup_lock); 223 task_destroy(task); 242 224 return ENOENT; 243 225 } 244 245 if (ta ->kb.finished != false) {246 mutex_unlock(&ta ->kb.cleanup_lock);226 227 if (task->kb.finished != false) { 228 mutex_unlock(&task->kb.cleanup_lock); 247 229 return EINVAL; 248 230 } 249 250 newphid = phone_alloc(TASK);231 232 int newphid = phone_alloc(TASK); 251 233 if (newphid < 0) { 252 mutex_unlock(&ta ->kb.cleanup_lock);234 mutex_unlock(&task->kb.cleanup_lock); 253 235 return ELIMIT; 254 236 } 255 237 256 238 /* Connect the newly allocated phone to the kbox */ 257 ipc_phone_connect(&TASK->phones[newphid], &ta ->kb.box);258 259 if (ta ->kb.thread != NULL) {260 mutex_unlock(&ta ->kb.cleanup_lock);239 ipc_phone_connect(&TASK->phones[newphid], &task->kb.box); 240 241 if (task->kb.thread != NULL) { 242 mutex_unlock(&task->kb.cleanup_lock); 261 243 return newphid; 262 244 } 263 245 264 246 /* Create a kbox thread */ 265 kb_thread = thread_create(kbox_thread_proc, NULL, ta, 0,247 thread_t *kb_thread = thread_create(kbox_thread_proc, NULL, task, 0, 266 248 "kbox", false); 267 249 if (!kb_thread) { 268 mutex_unlock(&ta ->kb.cleanup_lock);250 mutex_unlock(&task->kb.cleanup_lock); 269 251 return ENOMEM; 270 252 } 271 272 ta ->kb.thread = kb_thread;253 254 task->kb.thread = kb_thread; 273 255 thread_ready(kb_thread); 274 275 mutex_unlock(&ta ->kb.cleanup_lock);276 256 257 mutex_unlock(&task->kb.cleanup_lock); 258 277 259 return newphid; 278 260 } -
kernel/generic/src/ipc/sysipc.c
r666f492 rda1bafb 56 56 * requests. 57 57 */ 58 #define DATA_XFER_LIMIT (64 * 1024) 58 #define DATA_XFER_LIMIT (64 * 1024) 59 60 #define STRUCT_TO_USPACE(dst, src) copy_to_uspace((dst), (src), sizeof(*(src))) 59 61 60 62 /** Get phone from the current task by ID. 61 63 * 62 * @param phoneid Phone ID. 63 * @param phone Place to store pointer to phone. 64 * @return EOK on success, EINVAL if ID is invalid. 64 * @param phoneid Phone ID. 65 * @param phone Place to store pointer to phone. 66 * 67 * @return EOK on success, EINVAL if ID is invalid. 68 * 65 69 */ 66 70 static int phone_get(unative_t phoneid, phone_t **phone) … … 68 72 if (phoneid >= IPC_MAX_PHONES) 69 73 return EINVAL; 70 74 71 75 *phone = &TASK->phones[phoneid]; 72 76 return EOK; 73 77 } 74 78 75 #define STRUCT_TO_USPACE(dst, src) copy_to_uspace(dst, src, sizeof(*(src)))76 77 79 /** Decide if the method is a system method. 78 80 * 79 * @param method 80 * 81 * @return Return 1if the method is a system method.82 * Otherwise return 0.83 */ 84 static inline intmethod_is_system(unative_t method)81 * @param method Method to be decided. 82 * 83 * @return true if the method is a system method. 84 * 85 */ 86 static inline bool method_is_system(unative_t method) 85 87 { 86 88 if (method <= IPC_M_LAST_SYSTEM) 87 return 1; 88 return 0; 89 return true; 90 91 return false; 89 92 } 90 93 … … 94 97 * it is useless 95 98 * 96 * @param method 97 * 98 * @return Return 1if the method is forwardable.99 * Otherwise return 0.100 */ 101 static inline intmethod_is_forwardable(unative_t method)99 * @param method Method to be decided. 100 * 101 * @return true if the method is forwardable. 102 * 103 */ 104 static inline bool method_is_forwardable(unative_t method) 102 105 { 103 106 switch (method) { … … 106 109 case IPC_M_PHONE_HUNGUP: 107 110 /* This message is meant only for the original recipient. */ 108 return 0;111 return false; 109 112 default: 110 return 1;113 return true; 111 114 } 112 115 } … … 116 119 * - some system messages may be forwarded but their content cannot be altered 117 120 * 118 * @param method 119 * 120 * @return Return 1if the method is immutable on forward.121 * Otherwise return 0.122 */ 123 static inline intmethod_is_immutable(unative_t method)121 * @param method Method to be decided. 122 * 123 * @return true if the method is immutable on forward. 124 * 125 */ 126 static inline bool method_is_immutable(unative_t method) 124 127 { 125 128 switch (method) { … … 128 131 case IPC_M_DATA_WRITE: 129 132 case IPC_M_DATA_READ: 130 return 1;133 return true; 131 134 default: 132 return 0;135 return false; 133 136 } 134 137 } … … 142 145 * for answer_preprocess(). 143 146 * 144 * @param call 145 * 146 * @return Return 1if the old call contents should be saved.147 * Return 0 otherwise.148 */ 149 static inline intanswer_need_old(call_t *call)147 * @param call Call structure to be decided. 148 * 149 * @return true if the old call contents should be saved. 150 * 151 */ 152 static inline bool answer_need_old(call_t *call) 150 153 { 151 154 switch (IPC_GET_METHOD(call->data)) { … … 158 161 case IPC_M_DATA_WRITE: 159 162 case IPC_M_DATA_READ: 160 return 1;163 return true; 161 164 default: 162 return 0;165 return false; 163 166 } 164 167 } … … 168 171 * This function is called directly after sys_ipc_answer(). 169 172 * 170 * @param answer Call structure with the answer. 171 * @param olddata Saved data of the request. 172 * 173 * @return Return 0 on success or an error code. 173 * @param answer Call structure with the answer. 174 * @param olddata Saved data of the request. 175 * 176 * @return Return 0 on success or an error code. 177 * 174 178 */ 175 179 static inline int answer_preprocess(call_t *answer, ipc_data_t *olddata) 176 180 { 177 int phoneid;178 179 181 if ((native_t) IPC_GET_RETVAL(answer->data) == EHANGUP) { 180 182 /* In case of forward, hangup the forwared phone, … … 182 184 */ 183 185 mutex_lock(&answer->data.phone->lock); 184 spinlock_lock(&TASK->answerbox.lock);186 irq_spinlock_lock(&TASK->answerbox.lock, true); 185 187 if (answer->data.phone->state == IPC_PHONE_CONNECTED) { 186 188 list_remove(&answer->data.phone->link); 187 189 answer->data.phone->state = IPC_PHONE_SLAMMED; 188 190 } 189 spinlock_unlock(&TASK->answerbox.lock);191 irq_spinlock_unlock(&TASK->answerbox.lock, true); 190 192 mutex_unlock(&answer->data.phone->lock); 191 193 } 192 194 193 195 if (!olddata) 194 196 return 0; 195 197 196 198 if (IPC_GET_METHOD(*olddata) == IPC_M_CONNECTION_CLONE) { 197 phoneid = IPC_GET_ARG1(*olddata); 198 phone_t *phone = &TASK->phones[phoneid]; 199 int phoneid = IPC_GET_ARG1(*olddata); 200 phone_t *phone = &TASK->phones[phoneid]; 201 199 202 if (IPC_GET_RETVAL(answer->data) != EOK) { 200 203 /* … … 208 211 mutex_lock(&phone->lock); 209 212 if (phone->state == IPC_PHONE_CONNECTED) { 210 spinlock_lock(&phone->callee->lock);213 irq_spinlock_lock(&phone->callee->lock, true); 211 214 list_remove(&phone->link); 212 215 phone->state = IPC_PHONE_SLAMMED; 213 spinlock_unlock(&phone->callee->lock);216 irq_spinlock_unlock(&phone->callee->lock, true); 214 217 } 215 218 mutex_unlock(&phone->lock); 216 219 } 217 220 } else if (IPC_GET_METHOD(*olddata) == IPC_M_CONNECT_ME) { 218 phone_t *phone = (phone_t *)IPC_GET_ARG5(*olddata); 221 phone_t *phone = (phone_t *) IPC_GET_ARG5(*olddata); 222 219 223 if (IPC_GET_RETVAL(answer->data) != EOK) { 220 224 /* … … 226 230 mutex_lock(&phone->lock); 227 231 if (phone->state == IPC_PHONE_CONNECTED) { 228 spinlock_lock(&phone->callee->lock);232 irq_spinlock_lock(&phone->callee->lock, true); 229 233 list_remove(&phone->link); 230 234 phone->state = IPC_PHONE_SLAMMED; 231 spinlock_unlock(&phone->callee->lock);235 irq_spinlock_unlock(&phone->callee->lock, true); 232 236 } 233 237 mutex_unlock(&phone->lock); 234 238 } 235 239 } else if (IPC_GET_METHOD(*olddata) == IPC_M_CONNECT_TO_ME) { 236 phoneid = IPC_GET_ARG5(*olddata); 240 int phoneid = IPC_GET_ARG5(*olddata); 241 237 242 if (IPC_GET_RETVAL(answer->data) != EOK) { 238 243 /* The connection was not accepted */ … … 254 259 if (!IPC_GET_RETVAL(answer->data)) { 255 260 /* Accepted, handle as_area receipt */ 256 ipl_t ipl;257 int rc;258 as_t *as;259 261 260 ipl = interrupts_disable(); 261 spinlock_lock(&answer->sender->lock); 262 as = answer->sender->as; 263 spinlock_unlock(&answer->sender->lock); 264 interrupts_restore(ipl); 262 irq_spinlock_lock(&answer->sender->lock, true); 263 as_t *as = answer->sender->as; 264 irq_spinlock_unlock(&answer->sender->lock, true); 265 265 266 rc = as_area_share(as, IPC_GET_ARG1(*olddata),266 int rc = as_area_share(as, IPC_GET_ARG1(*olddata), 267 267 IPC_GET_ARG2(*olddata), AS, 268 268 IPC_GET_ARG1(answer->data), IPC_GET_ARG3(*olddata)); … … 272 272 } else if (IPC_GET_METHOD(*olddata) == IPC_M_SHARE_IN) { 273 273 if (!IPC_GET_RETVAL(answer->data)) { 274 i pl_t ipl;275 as_t *as ;276 i nt rc;274 irq_spinlock_lock(&answer->sender->lock, true); 275 as_t *as = answer->sender->as; 276 irq_spinlock_unlock(&answer->sender->lock, true); 277 277 278 ipl = interrupts_disable(); 279 spinlock_lock(&answer->sender->lock); 280 as = answer->sender->as; 281 spinlock_unlock(&answer->sender->lock); 282 interrupts_restore(ipl); 283 284 rc = as_area_share(AS, IPC_GET_ARG1(answer->data), 278 int rc = as_area_share(AS, IPC_GET_ARG1(answer->data), 285 279 IPC_GET_ARG2(*olddata), as, IPC_GET_ARG1(*olddata), 286 280 IPC_GET_ARG2(answer->data)); … … 301 295 */ 302 296 IPC_SET_ARG1(answer->data, dst); 303 297 304 298 answer->buffer = malloc(size, 0); 305 299 int rc = copy_from_uspace(answer->buffer, … … 320 314 if (!IPC_GET_RETVAL(answer->data)) { 321 315 /* The recipient agreed to receive data. */ 322 int rc; 323 uintptr_t dst; 324 size_t size; 325 size_t max_size; 326 327 dst = (uintptr_t)IPC_GET_ARG1(answer->data); 328 size = (size_t)IPC_GET_ARG2(answer->data); 329 max_size = (size_t)IPC_GET_ARG2(*olddata); 330 316 uintptr_t dst = (uintptr_t)IPC_GET_ARG1(answer->data); 317 size_t size = (size_t)IPC_GET_ARG2(answer->data); 318 size_t max_size = (size_t)IPC_GET_ARG2(*olddata); 319 331 320 if (size <= max_size) { 332 rc = copy_to_uspace((void *) dst,321 int rc = copy_to_uspace((void *) dst, 333 322 answer->buffer, size); 334 323 if (rc) … … 341 330 answer->buffer = NULL; 342 331 } 332 343 333 return 0; 344 334 } … … 352 342 mutex_lock(&p2->lock); 353 343 mutex_lock(&p1->lock); 354 } else {344 } else 355 345 mutex_lock(&p1->lock); 356 }357 346 } 358 347 … … 366 355 /** Called before the request is sent. 367 356 * 368 * @param call Call structure with the request. 369 * @param phone Phone that the call will be sent through. 370 * 371 * @return Return 0 on success, ELIMIT or EPERM on error. 357 * @param call Call structure with the request. 358 * @param phone Phone that the call will be sent through. 359 * 360 * @return Return 0 on success, ELIMIT or EPERM on error. 361 * 372 362 */ 373 363 static int request_preprocess(call_t *call, phone_t *phone) 374 364 { 375 int newphid;376 size_t size;377 uintptr_t src;378 int rc;379 380 365 switch (IPC_GET_METHOD(call->data)) { 381 366 case IPC_M_CONNECTION_CLONE: { 382 367 phone_t *cloned_phone; 383 384 368 if (phone_get(IPC_GET_ARG1(call->data), &cloned_phone) != EOK) 385 369 return ENOENT; 370 386 371 phones_lock(cloned_phone, phone); 387 372 388 373 if ((cloned_phone->state != IPC_PHONE_CONNECTED) || 389 374 phone->state != IPC_PHONE_CONNECTED) { … … 391 376 return EINVAL; 392 377 } 378 393 379 /* 394 380 * We can be pretty sure now that both tasks exist and we are … … 396 382 * we are effectively preventing them from finishing their 397 383 * potential cleanup. 384 * 398 385 */ 399 newphid = phone_alloc(phone->callee->task);386 int newphid = phone_alloc(phone->callee->task); 400 387 if (newphid < 0) { 401 388 phones_unlock(cloned_phone, phone); 402 389 return ELIMIT; 403 390 } 391 404 392 ipc_phone_connect(&phone->callee->task->phones[newphid], 405 393 cloned_phone->callee); 406 394 phones_unlock(cloned_phone, phone); 395 407 396 /* Set the new phone for the callee. */ 408 397 IPC_SET_ARG1(call->data, newphid); … … 412 401 IPC_SET_ARG5(call->data, (unative_t) phone); 413 402 break; 414 case IPC_M_CONNECT_ME_TO: 415 newphid = phone_alloc(TASK);403 case IPC_M_CONNECT_ME_TO: { 404 int newphid = phone_alloc(TASK); 416 405 if (newphid < 0) 417 406 return ELIMIT; 407 418 408 /* Set arg5 for server */ 419 409 IPC_SET_ARG5(call->data, (unative_t) &TASK->phones[newphid]); … … 421 411 call->priv = newphid; 422 412 break; 423 case IPC_M_SHARE_OUT: 424 size = as_area_get_size(IPC_GET_ARG1(call->data)); 413 } 414 case IPC_M_SHARE_OUT: { 415 size_t size = as_area_get_size(IPC_GET_ARG1(call->data)); 425 416 if (!size) 426 417 return EPERM; 418 427 419 IPC_SET_ARG2(call->data, size); 428 420 break; 429 case IPC_M_DATA_READ: 430 size = IPC_GET_ARG2(call->data); 421 } 422 case IPC_M_DATA_READ: { 423 size_t size = IPC_GET_ARG2(call->data); 431 424 if ((size <= 0 || (size > DATA_XFER_LIMIT))) 432 425 return ELIMIT; 426 433 427 break; 434 case IPC_M_DATA_WRITE: 435 src = IPC_GET_ARG1(call->data); 436 size = IPC_GET_ARG2(call->data); 428 } 429 case IPC_M_DATA_WRITE: { 430 uintptr_t src = IPC_GET_ARG1(call->data); 431 size_t size = IPC_GET_ARG2(call->data); 437 432 438 433 if (size > DATA_XFER_LIMIT) … … 440 435 441 436 call->buffer = (uint8_t *) malloc(size, 0); 442 rc = copy_from_uspace(call->buffer, (void *) src, size);437 int rc = copy_from_uspace(call->buffer, (void *) src, size); 443 438 if (rc != 0) { 444 439 free(call->buffer); 445 440 return rc; 446 441 } 442 447 443 break; 444 } 448 445 #ifdef CONFIG_UDEBUG 449 446 case IPC_M_DEBUG_ALL: … … 453 450 break; 454 451 } 452 455 453 return 0; 456 454 } … … 462 460 /** Do basic kernel processing of received call answer. 463 461 * 464 * @param call Call structure with the answer. 462 * @param call Call structure with the answer. 463 * 465 464 */ 466 465 static void process_answer(call_t *call) … … 469 468 (call->flags & IPC_CALL_FORWARDED)) 470 469 IPC_SET_RETVAL(call->data, EFORWARD); 471 470 472 471 if (call->flags & IPC_CALL_CONN_ME_TO) { 473 472 if (IPC_GET_RETVAL(call->data)) … … 476 475 IPC_SET_ARG5(call->data, call->priv); 477 476 } 478 477 479 478 if (call->buffer) { 480 /* This must be an affirmative answer to IPC_M_DATA_READ. */ 481 /* or IPC_M_DEBUG_ALL/UDEBUG_M_MEM_READ... */ 479 /* 480 * This must be an affirmative answer to IPC_M_DATA_READ 481 * or IPC_M_DEBUG_ALL/UDEBUG_M_MEM_READ... 482 * 483 */ 482 484 uintptr_t dst = IPC_GET_ARG1(call->data); 483 485 size_t size = IPC_GET_ARG2(call->data); … … 492 494 /** Do basic kernel processing of received call request. 493 495 * 494 * @param box Destination answerbox structure. 495 * @param call Call structure with the request. 496 * 497 * @return Return 0 if the call should be passed to userspace. 498 * Return -1 if the call should be ignored. 496 * @param box Destination answerbox structure. 497 * @param call Call structure with the request. 498 * 499 * @return 0 if the call should be passed to userspace. 500 * @return -1 if the call should be ignored. 501 * 499 502 */ 500 503 static int process_request(answerbox_t *box, call_t *call) 501 504 { 502 int phoneid;503 504 505 if (IPC_GET_METHOD(call->data) == IPC_M_CONNECT_TO_ME) { 505 phoneid = phone_alloc(TASK);506 int phoneid = phone_alloc(TASK); 506 507 if (phoneid < 0) { /* Failed to allocate phone */ 507 508 IPC_SET_RETVAL(call->data, ELIMIT); … … 509 510 return -1; 510 511 } 512 511 513 IPC_SET_ARG5(call->data, phoneid); 512 514 } 515 513 516 switch (IPC_GET_METHOD(call->data)) { 514 517 case IPC_M_DEBUG_ALL: … … 517 520 break; 518 521 } 522 519 523 return 0; 520 524 } … … 525 529 * the generic function (i.e. sys_ipc_call_sync_slow()). 526 530 * 527 * @param phoneid Phone handle for the call. 528 * @param method Method of the call. 529 * @param arg1 Service-defined payload argument. 530 * @param arg2 Service-defined payload argument. 531 * @param arg3 Service-defined payload argument. 532 * @param data Address of userspace structure where the reply call will 533 * be stored. 534 * 535 * @return Returns 0 on success. 536 * Return ENOENT if there is no such phone handle. 531 * @param phoneid Phone handle for the call. 532 * @param method Method of the call. 533 * @param arg1 Service-defined payload argument. 534 * @param arg2 Service-defined payload argument. 535 * @param arg3 Service-defined payload argument. 536 * @param data Address of userspace structure where the reply call will 537 * be stored. 538 * 539 * @return 0 on success. 540 * @return ENOENT if there is no such phone handle. 541 * 537 542 */ 538 543 unative_t sys_ipc_call_sync_fast(unative_t phoneid, unative_t method, 539 544 unative_t arg1, unative_t arg2, unative_t arg3, ipc_data_t *data) 540 545 { 541 call_t *call;542 546 phone_t *phone; 543 int res;544 int rc;545 546 547 if (phone_get(phoneid, &phone) != EOK) 547 548 return ENOENT; 548 549 549 call = ipc_call_alloc(0);550 call_t *call = ipc_call_alloc(0); 550 551 IPC_SET_METHOD(call->data, method); 551 552 IPC_SET_ARG1(call->data, arg1); 552 553 IPC_SET_ARG2(call->data, arg2); 553 554 IPC_SET_ARG3(call->data, arg3); 555 554 556 /* 555 557 * To achieve deterministic behavior, zero out arguments that are beyond … … 558 560 IPC_SET_ARG4(call->data, 0); 559 561 IPC_SET_ARG5(call->data, 0); 560 561 if (!(res = request_preprocess(call, phone))) { 562 563 int res = request_preprocess(call, phone); 564 int rc; 565 566 if (!res) { 562 567 #ifdef CONFIG_UDEBUG 563 568 udebug_stoppable_begin(); … … 567 572 udebug_stoppable_end(); 568 573 #endif 574 569 575 if (rc != EOK) { 570 576 /* The call will be freed by ipc_cleanup(). */ 571 577 return rc; 572 578 } 579 573 580 process_answer(call); 574 575 } else {581 582 } else 576 583 IPC_SET_RETVAL(call->data, res); 577 }584 578 585 rc = STRUCT_TO_USPACE(&data->args, &call->data.args); 579 586 ipc_call_free(call); 580 587 if (rc != 0) 581 588 return rc; 582 589 583 590 return 0; 584 591 } … … 586 593 /** Make a synchronous IPC call allowing to transmit the entire payload. 587 594 * 588 * @param phoneid Phone handle for the call. 589 * @param question Userspace address of call data with the request. 590 * @param reply Userspace address of call data where to store the 591 * answer. 592 * 593 * @return Zero on success or an error code. 595 * @param phoneid Phone handle for the call. 596 * @param question Userspace address of call data with the request. 597 * @param reply Userspace address of call data where to store the 598 * answer. 599 * 600 * @return Zero on success or an error code. 601 * 594 602 */ 595 603 unative_t sys_ipc_call_sync_slow(unative_t phoneid, ipc_data_t *question, 596 604 ipc_data_t *reply) 597 605 { 598 call_t *call;599 606 phone_t *phone; 600 int res;601 int rc;602 603 607 if (phone_get(phoneid, &phone) != EOK) 604 608 return ENOENT; 605 609 606 call = ipc_call_alloc(0);607 rc = copy_from_uspace(&call->data.args, &question->args,610 call_t *call = ipc_call_alloc(0); 611 int rc = copy_from_uspace(&call->data.args, &question->args, 608 612 sizeof(call->data.args)); 609 613 if (rc != 0) { … … 611 615 return (unative_t) rc; 612 616 } 613 614 615 if (!(res = request_preprocess(call, phone))) { 617 618 int res = request_preprocess(call, phone); 619 620 if (!res) { 616 621 #ifdef CONFIG_UDEBUG 617 622 udebug_stoppable_begin(); … … 621 626 udebug_stoppable_end(); 622 627 #endif 628 623 629 if (rc != EOK) { 624 630 /* The call will be freed by ipc_cleanup(). */ 625 631 return rc; 626 632 } 633 627 634 process_answer(call); 628 } else 635 } else 629 636 IPC_SET_RETVAL(call->data, res); 630 637 631 638 rc = STRUCT_TO_USPACE(&reply->args, &call->data.args); 632 639 ipc_call_free(call); 633 640 if (rc != 0) 634 641 return rc; 635 642 636 643 return 0; 637 644 } … … 639 646 /** Check that the task did not exceed the allowed limit of asynchronous calls. 640 647 * 641 * @return Return 0 if limit not reached or -1 if limit exceeded. 648 * @return 0 if limit not reached or -1 if limit exceeded. 649 * 642 650 */ 643 651 static int check_call_limit(void) … … 647 655 return -1; 648 656 } 657 649 658 return 0; 650 659 } … … 655 664 * the generic function sys_ipc_call_async_slow(). 656 665 * 657 * @param phoneid Phone handle for the call. 658 * @param method Method of the call. 659 * @param arg1 Service-defined payload argument. 660 * @param arg2 Service-defined payload argument. 661 * @param arg3 Service-defined payload argument. 662 * @param arg4 Service-defined payload argument. 663 * 664 * @return Return call hash on success. 665 * Return IPC_CALLRET_FATAL in case of a fatal error and 666 * IPC_CALLRET_TEMPORARY if there are too many pending 667 * asynchronous requests; answers should be handled first. 666 * @param phoneid Phone handle for the call. 667 * @param method Method of the call. 668 * @param arg1 Service-defined payload argument. 669 * @param arg2 Service-defined payload argument. 670 * @param arg3 Service-defined payload argument. 671 * @param arg4 Service-defined payload argument. 672 * 673 * @return Call hash on success. 674 * @return IPC_CALLRET_FATAL in case of a fatal error. 675 * @return IPC_CALLRET_TEMPORARY if there are too many pending 676 * asynchronous requests; answers should be handled first. 677 * 668 678 */ 669 679 unative_t sys_ipc_call_async_fast(unative_t phoneid, unative_t method, 670 680 unative_t arg1, unative_t arg2, unative_t arg3, unative_t arg4) 671 681 { 672 call_t *call;673 phone_t *phone;674 int res;675 676 682 if (check_call_limit()) 677 683 return IPC_CALLRET_TEMPORARY; 678 684 685 phone_t *phone; 679 686 if (phone_get(phoneid, &phone) != EOK) 680 687 return IPC_CALLRET_FATAL; 681 682 call = ipc_call_alloc(0);688 689 call_t *call = ipc_call_alloc(0); 683 690 IPC_SET_METHOD(call->data, method); 684 691 IPC_SET_ARG1(call->data, arg1); … … 686 693 IPC_SET_ARG3(call->data, arg3); 687 694 IPC_SET_ARG4(call->data, arg4); 695 688 696 /* 689 697 * To achieve deterministic behavior, zero out arguments that are beyond … … 691 699 */ 692 700 IPC_SET_ARG5(call->data, 0); 693 694 if (!(res = request_preprocess(call, phone))) 701 702 int res = request_preprocess(call, phone); 703 704 if (!res) 695 705 ipc_call(phone, call); 696 706 else 697 707 ipc_backsend_err(phone, call, res); 698 708 699 709 return (unative_t) call; 700 710 } … … 702 712 /** Make an asynchronous IPC call allowing to transmit the entire payload. 703 713 * 704 * @param phoneid Phone handle for the call. 705 * @param data Userspace address of call data with the request. 706 * 707 * @return See sys_ipc_call_async_fast(). 714 * @param phoneid Phone handle for the call. 715 * @param data Userspace address of call data with the request. 716 * 717 * @return See sys_ipc_call_async_fast(). 718 * 708 719 */ 709 720 unative_t sys_ipc_call_async_slow(unative_t phoneid, ipc_data_t *data) 710 721 { 711 call_t *call;712 phone_t *phone;713 int res;714 int rc;715 716 722 if (check_call_limit()) 717 723 return IPC_CALLRET_TEMPORARY; 718 724 725 phone_t *phone; 719 726 if (phone_get(phoneid, &phone) != EOK) 720 727 return IPC_CALLRET_FATAL; 721 728 722 call = ipc_call_alloc(0);723 rc = copy_from_uspace(&call->data.args, &data->args,729 call_t *call = ipc_call_alloc(0); 730 int rc = copy_from_uspace(&call->data.args, &data->args, 724 731 sizeof(call->data.args)); 725 732 if (rc != 0) { … … 727 734 return (unative_t) rc; 728 735 } 729 if (!(res = request_preprocess(call, phone))) 736 737 int res = request_preprocess(call, phone); 738 739 if (!res) 730 740 ipc_call(phone, call); 731 741 else 732 742 ipc_backsend_err(phone, call, res); 733 743 734 744 return (unative_t) call; 735 745 } 736 746 737 /** Forward a received call to another destination - common code for both the 738 * fast and the slow version. 739 * 740 * @param callid Hash of the call to forward. 741 * @param phoneid Phone handle to use for forwarding. 742 * @param method New method to use for the forwarded call. 743 * @param arg1 New value of the first argument for the forwarded call. 744 * @param arg2 New value of the second argument for the forwarded call. 745 * @param arg3 New value of the third argument for the forwarded call. 746 * @param arg4 New value of the fourth argument for the forwarded call. 747 * @param arg5 New value of the fifth argument for the forwarded call. 748 * @param mode Flags that specify mode of the forward operation. 749 * @param slow If true, arg3, arg4 and arg5 are considered. Otherwise 750 * the function considers only the fast version arguments: 751 * i.e. arg1 and arg2. 752 * 753 * @return Return 0 on succes, otherwise return an error code. 754 * 755 * Warning: Make sure that ARG5 is not rewritten for certain system IPC 747 /** Forward a received call to another destination 748 * 749 * Common code for both the fast and the slow version. 750 * 751 * @param callid Hash of the call to forward. 752 * @param phoneid Phone handle to use for forwarding. 753 * @param method New method to use for the forwarded call. 754 * @param arg1 New value of the first argument for the forwarded call. 755 * @param arg2 New value of the second argument for the forwarded call. 756 * @param arg3 New value of the third argument for the forwarded call. 757 * @param arg4 New value of the fourth argument for the forwarded call. 758 * @param arg5 New value of the fifth argument for the forwarded call. 759 * @param mode Flags that specify mode of the forward operation. 760 * @param slow If true, arg3, arg4 and arg5 are considered. Otherwise 761 * the function considers only the fast version arguments: 762 * i.e. arg1 and arg2. 763 * 764 * @return 0 on succes, otherwise an error code. 765 * 766 * Warning: Make sure that ARG5 is not rewritten for certain system IPC 767 * 756 768 */ 757 769 static unative_t sys_ipc_forward_common(unative_t callid, unative_t phoneid, 758 770 unative_t method, unative_t arg1, unative_t arg2, unative_t arg3, 759 unative_t arg4, unative_t arg5, int mode, bool slow) 760 { 761 call_t *call; 762 phone_t *phone; 763 764 call = get_call(callid); 771 unative_t arg4, unative_t arg5, unsigned int mode, bool slow) 772 { 773 call_t *call = get_call(callid); 765 774 if (!call) 766 775 return ENOENT; 767 776 768 777 call->flags |= IPC_CALL_FORWARDED; 769 778 779 phone_t *phone; 770 780 if (phone_get(phoneid, &phone) != EOK) { 771 781 IPC_SET_RETVAL(call->data, EFORWARD); … … 773 783 return ENOENT; 774 784 } 775 785 776 786 if (!method_is_forwardable(IPC_GET_METHOD(call->data))) { 777 787 IPC_SET_RETVAL(call->data, EFORWARD); … … 779 789 return EPERM; 780 790 } 781 791 782 792 /* 783 793 * Userspace is not allowed to change method of system methods on … … 790 800 if (IPC_GET_METHOD(call->data) == IPC_M_CONNECT_TO_ME) 791 801 phone_dealloc(IPC_GET_ARG5(call->data)); 792 802 793 803 IPC_SET_ARG1(call->data, method); 794 804 IPC_SET_ARG2(call->data, arg1); 795 805 IPC_SET_ARG3(call->data, arg2); 806 796 807 if (slow) { 797 808 IPC_SET_ARG4(call->data, arg3); … … 812 823 } 813 824 } 814 825 815 826 return ipc_forward(call, phone, &TASK->answerbox, mode); 816 827 } 817 828 818 829 /** Forward a received call to another destination - fast version. 819 *820 * @param callid Hash of the call to forward.821 * @param phoneid Phone handle to use for forwarding.822 * @param method New method to use for the forwarded call.823 * @param arg1 New value of the first argument for the forwarded call.824 * @param arg2 New value of the second argument for the forwarded call.825 * @param mode Flags that specify mode of the forward operation.826 *827 * @return Return 0 on succes, otherwise return an error code.828 830 * 829 831 * In case the original method is a system method, ARG1, ARG2 and ARG3 are … … 833 835 * is a set of immutable methods, for which the new method and arguments are not 834 836 * set and these values are ignored. 837 * 838 * @param callid Hash of the call to forward. 839 * @param phoneid Phone handle to use for forwarding. 840 * @param method New method to use for the forwarded call. 841 * @param arg1 New value of the first argument for the forwarded call. 842 * @param arg2 New value of the second argument for the forwarded call. 843 * @param mode Flags that specify mode of the forward operation. 844 * 845 * @return 0 on succes, otherwise an error code. 846 * 835 847 */ 836 848 unative_t sys_ipc_forward_fast(unative_t callid, unative_t phoneid, 837 unative_t method, unative_t arg1, unative_t arg2, int mode)849 unative_t method, unative_t arg1, unative_t arg2, unsigned int mode) 838 850 { 839 851 return sys_ipc_forward_common(callid, phoneid, method, arg1, arg2, 0, 0, … … 842 854 843 855 /** Forward a received call to another destination - slow version. 844 *845 * @param callid Hash of the call to forward.846 * @param phoneid Phone handle to use for forwarding.847 * @param data Userspace address of the new IPC data.848 * @param mode Flags that specify mode of the forward operation.849 *850 * @return Return 0 on succes, otherwise return an error code.851 856 * 852 857 * This function is the slow verision of the sys_ipc_forward_fast interface. … … 856 861 * methods, it additionally stores the new value of arg3, arg4 and arg5, 857 862 * respectively, to ARG3, ARG4 and ARG5, respectively. 863 * 864 * @param callid Hash of the call to forward. 865 * @param phoneid Phone handle to use for forwarding. 866 * @param data Userspace address of the new IPC data. 867 * @param mode Flags that specify mode of the forward operation. 868 * 869 * @return 0 on succes, otherwise an error code. 870 * 858 871 */ 859 872 unative_t sys_ipc_forward_slow(unative_t callid, unative_t phoneid, 860 ipc_data_t *data, int mode)873 ipc_data_t *data, unsigned int mode) 861 874 { 862 875 ipc_data_t newdata; 863 int rc; 864 865 rc = copy_from_uspace(&newdata.args, &data->args, 876 int rc = copy_from_uspace(&newdata.args, &data->args, 866 877 sizeof(newdata.args)); 867 if (rc != 0) 878 if (rc != 0) 868 879 return (unative_t) rc; 869 880 870 881 return sys_ipc_forward_common(callid, phoneid, 871 882 IPC_GET_METHOD(newdata), IPC_GET_ARG1(newdata), … … 879 890 * than the generic sys_ipc_answer(). 880 891 * 881 * @param callid Hash of the call to be answered. 882 * @param retval Return value of the answer. 883 * @param arg1 Service-defined return value. 884 * @param arg2 Service-defined return value. 885 * @param arg3 Service-defined return value. 886 * @param arg4 Service-defined return value. 887 * 888 * @return Return 0 on success, otherwise return an error code. 892 * @param callid Hash of the call to be answered. 893 * @param retval Return value of the answer. 894 * @param arg1 Service-defined return value. 895 * @param arg2 Service-defined return value. 896 * @param arg3 Service-defined return value. 897 * @param arg4 Service-defined return value. 898 * 899 * @return 0 on success, otherwise an error code. 900 * 889 901 */ 890 902 unative_t sys_ipc_answer_fast(unative_t callid, unative_t retval, 891 903 unative_t arg1, unative_t arg2, unative_t arg3, unative_t arg4) 892 904 { 893 call_t *call;894 ipc_data_t saved_data;895 int saveddata = 0;896 int rc;897 898 905 /* Do not answer notification callids */ 899 906 if (callid & IPC_CALLID_NOTIFICATION) 900 907 return 0; 901 902 call = get_call(callid);908 909 call_t *call = get_call(callid); 903 910 if (!call) 904 911 return ENOENT; 905 912 913 ipc_data_t saved_data; 914 bool saved; 915 906 916 if (answer_need_old(call)) { 907 917 memcpy(&saved_data, &call->data, sizeof(call->data)); 908 saveddata = 1; 909 } 910 918 saved = true; 919 } else 920 saved = false; 921 911 922 IPC_SET_RETVAL(call->data, retval); 912 923 IPC_SET_ARG1(call->data, arg1); … … 914 925 IPC_SET_ARG3(call->data, arg3); 915 926 IPC_SET_ARG4(call->data, arg4); 927 916 928 /* 917 929 * To achieve deterministic behavior, zero out arguments that are beyond … … 919 931 */ 920 932 IPC_SET_ARG5(call->data, 0); 921 rc = answer_preprocess(call, saveddata? &saved_data : NULL);922 933 int rc = answer_preprocess(call, saved ? &saved_data : NULL); 934 923 935 ipc_answer(&TASK->answerbox, call); 924 936 return rc; … … 927 939 /** Answer an IPC call. 928 940 * 929 * @param callid Hash of the call to be answered. 930 * @param data Userspace address of call data with the answer. 931 * 932 * @return Return 0 on success, otherwise return an error code. 941 * @param callid Hash of the call to be answered. 942 * @param data Userspace address of call data with the answer. 943 * 944 * @return 0 on success, otherwise an error code. 945 * 933 946 */ 934 947 unative_t sys_ipc_answer_slow(unative_t callid, ipc_data_t *data) 935 948 { 936 call_t *call;937 ipc_data_t saved_data;938 int saveddata = 0;939 int rc;940 941 949 /* Do not answer notification callids */ 942 950 if (callid & IPC_CALLID_NOTIFICATION) 943 951 return 0; 944 945 call = get_call(callid);952 953 call_t *call = get_call(callid); 946 954 if (!call) 947 955 return ENOENT; 948 956 957 ipc_data_t saved_data; 958 bool saved; 959 949 960 if (answer_need_old(call)) { 950 961 memcpy(&saved_data, &call->data, sizeof(call->data)); 951 saveddata = 1; 952 } 953 rc = copy_from_uspace(&call->data.args, &data->args, 962 saved = true; 963 } else 964 saved = false; 965 966 int rc = copy_from_uspace(&call->data.args, &data->args, 954 967 sizeof(call->data.args)); 955 968 if (rc != 0) 956 969 return rc; 957 958 rc = answer_preprocess(call, saved data? &saved_data : NULL);970 971 rc = answer_preprocess(call, saved ? &saved_data : NULL); 959 972 960 973 ipc_answer(&TASK->answerbox, call); 961 962 974 return rc; 963 975 } … … 965 977 /** Hang up a phone. 966 978 * 967 * @param Phone handle of the phone to be hung up. 968 * 969 * @return Return 0 on success or an error code. 979 * @param Phone handle of the phone to be hung up. 980 * 981 * @return 0 on success or an error code. 982 * 970 983 */ 971 984 unative_t sys_ipc_hangup(unative_t phoneid) 972 985 { 973 986 phone_t *phone; 974 987 975 988 if (phone_get(phoneid, &phone) != EOK) 976 989 return ENOENT; 977 990 978 991 if (ipc_phone_hangup(phone)) 979 992 return -1; 980 993 981 994 return 0; 982 995 } … … 984 997 /** Wait for an incoming IPC call or an answer. 985 998 * 986 * @param calldata Pointer to buffer where the call/answer data is stored. 987 * @param usec Timeout. See waitq_sleep_timeout() for explanation. 988 * @param flags Select mode of sleep operation. See waitq_sleep_timeout() 989 * for explanation. 990 * 991 * @return Hash of the call. 992 * If IPC_CALLID_NOTIFICATION bit is set in the hash, the 993 * call is a notification. IPC_CALLID_ANSWERED denotes an 994 * answer. 995 */ 996 unative_t sys_ipc_wait_for_call(ipc_data_t *calldata, uint32_t usec, int flags) 999 * @param calldata Pointer to buffer where the call/answer data is stored. 1000 * @param usec Timeout. See waitq_sleep_timeout() for explanation. 1001 * @param flags Select mode of sleep operation. See waitq_sleep_timeout() 1002 * for explanation. 1003 * 1004 * @return Hash of the call. 1005 * If IPC_CALLID_NOTIFICATION bit is set in the hash, the 1006 * call is a notification. IPC_CALLID_ANSWERED denotes an 1007 * answer. 1008 * 1009 */ 1010 unative_t sys_ipc_wait_for_call(ipc_data_t *calldata, uint32_t usec, 1011 unsigned int flags) 997 1012 { 998 1013 call_t *call; 999 1014 1000 1015 restart: 1001 1016 1002 1017 #ifdef CONFIG_UDEBUG 1003 1018 udebug_stoppable_begin(); 1004 #endif 1019 #endif 1020 1005 1021 call = ipc_wait_for_call(&TASK->answerbox, usec, 1006 1022 flags | SYNCH_FLAGS_INTERRUPTIBLE); 1007 1023 1008 1024 #ifdef CONFIG_UDEBUG 1009 1025 udebug_stoppable_end(); 1010 1026 #endif 1027 1011 1028 if (!call) 1012 1029 return 0; 1013 1030 1014 1031 if (call->flags & IPC_CALL_NOTIF) { 1015 1032 /* Set in_phone_hash to the interrupt counter */ … … 1017 1034 1018 1035 STRUCT_TO_USPACE(calldata, &call->data); 1019 1036 1020 1037 ipc_call_free(call); 1021 1038 1022 1039 return ((unative_t) call) | IPC_CALLID_NOTIFICATION; 1023 1040 } 1024 1041 1025 1042 if (call->flags & IPC_CALL_ANSWERED) { 1026 1043 process_answer(call); 1027 1044 1028 1045 if (call->flags & IPC_CALL_DISCARD_ANSWER) { 1029 1046 ipc_call_free(call); … … 1037 1054 atomic_dec(&TASK->active_calls); 1038 1055 } 1039 1056 1040 1057 STRUCT_TO_USPACE(&calldata->args, &call->data.args); 1041 1058 ipc_call_free(call); 1042 1059 1043 1060 return ((unative_t) call) | IPC_CALLID_ANSWERED; 1044 1061 } 1045 1062 1046 1063 if (process_request(&TASK->answerbox, call)) 1047 1064 goto restart; 1048 1065 1049 1066 /* Include phone address('id') of the caller in the request, 1050 1067 * copy whole call->data, not only call->data.args */ … … 1055 1072 */ 1056 1073 ipc_data_t saved_data; 1057 int saveddata = 0;1058 1074 bool saved; 1075 1059 1076 if (answer_need_old(call)) { 1060 1077 memcpy(&saved_data, &call->data, sizeof(call->data)); 1061 saveddata = 1; 1062 } 1078 saved = true; 1079 } else 1080 saved = false; 1063 1081 1064 1082 IPC_SET_RETVAL(call->data, EPARTY); 1065 (void) answer_preprocess(call, saved data? &saved_data : NULL);1083 (void) answer_preprocess(call, saved ? &saved_data : NULL); 1066 1084 ipc_answer(&TASK->answerbox, call); 1067 1085 return 0; 1068 1086 } 1069 return (unative_t)call; 1070 } 1071 1072 /** Interrupt one thread from sys_ipc_wait_for_call(). */ 1087 1088 return (unative_t) call; 1089 } 1090 1091 /** Interrupt one thread from sys_ipc_wait_for_call(). 1092 * 1093 */ 1073 1094 unative_t sys_ipc_poke(void) 1074 1095 { 1075 waitq_unsleep(&TASK->answerbox.wq); 1096 waitq_unsleep(&TASK->answerbox.wq); 1076 1097 return EOK; 1077 1098 } … … 1079 1100 /** Connect an IRQ handler to a task. 1080 1101 * 1081 * @param inr IRQ number. 1082 * @param devno Device number. 1083 * @param method Method to be associated with the notification. 1084 * @param ucode Uspace pointer to the top-half pseudocode. 1085 * 1086 * @return EPERM or a return code returned by ipc_irq_register(). 1102 * @param inr IRQ number. 1103 * @param devno Device number. 1104 * @param method Method to be associated with the notification. 1105 * @param ucode Uspace pointer to the top-half pseudocode. 1106 * 1107 * @return EPERM or a return code returned by ipc_irq_register(). 1108 * 1087 1109 */ 1088 1110 unative_t sys_ipc_register_irq(inr_t inr, devno_t devno, unative_t method, … … 1091 1113 if (!(cap_get(TASK) & CAP_IRQ_REG)) 1092 1114 return EPERM; 1093 1115 1094 1116 return ipc_irq_register(&TASK->answerbox, inr, devno, method, ucode); 1095 1117 } … … 1097 1119 /** Disconnect an IRQ handler from a task. 1098 1120 * 1099 * @param inr IRQ number. 1100 * @param devno Device number. 1101 * 1102 * @return Zero on success or EPERM on error.. 1121 * @param inr IRQ number. 1122 * @param devno Device number. 1123 * 1124 * @return Zero on success or EPERM on error. 1125 * 1103 1126 */ 1104 1127 unative_t sys_ipc_unregister_irq(inr_t inr, devno_t devno) … … 1106 1129 if (!(cap_get(TASK) & CAP_IRQ_REG)) 1107 1130 return EPERM; 1108 1131 1109 1132 ipc_irq_unregister(&TASK->answerbox, inr, devno); 1110 1133 1111 1134 return 0; 1112 1135 } … … 1114 1137 #include <console/console.h> 1115 1138 1116 /** 1117 * Syscall connect to a task by id.1118 * 1119 * @return Phone id on success, or negative error code.1139 /** Syscall connect to a task by id. 1140 * 1141 * @return Phone id on success, or negative error code. 1142 * 1120 1143 */ 1121 1144 unative_t sys_ipc_connect_kbox(sysarg64_t *uspace_taskid_arg) … … 1123 1146 #ifdef CONFIG_UDEBUG 1124 1147 sysarg64_t taskid_arg; 1125 int rc; 1126 1127 rc = copy_from_uspace(&taskid_arg, uspace_taskid_arg, sizeof(sysarg64_t)); 1148 int rc = copy_from_uspace(&taskid_arg, uspace_taskid_arg, sizeof(sysarg64_t)); 1128 1149 if (rc != 0) 1129 1150 return (unative_t) rc; 1130 1151 1131 1152 LOG("sys_ipc_connect_kbox(%" PRIu64 ")\n", taskid_arg.value); 1132 1153 1133 1154 return ipc_connect_kbox(taskid_arg.value); 1134 1155 #else -
kernel/generic/src/lib/elf.c
r666f492 rda1bafb 28 28 */ 29 29 30 /** @addtogroup generic 30 /** @addtogroup generic 31 31 * @{ 32 32 */ … … 34 34 /** 35 35 * @file 36 * @brief 36 * @brief Kernel ELF loader. 37 37 */ 38 38 … … 57 57 }; 58 58 59 static int segment_header(elf_segment_header_t *entry, elf_header_t *elf, 60 as_t *as, int flags); 61 static int section_header(elf_section_header_t *entry, elf_header_t *elf, 62 as_t *as); 63 static int load_segment(elf_segment_header_t *entry, elf_header_t *elf, 64 as_t *as); 59 static int segment_header(elf_segment_header_t *, elf_header_t *, as_t *, 60 unsigned int); 61 static int section_header(elf_section_header_t *, elf_header_t *, as_t *); 62 static int load_segment(elf_segment_header_t *, elf_header_t *, as_t *); 65 63 66 64 /** ELF loader 67 65 * 68 66 * @param header Pointer to ELF header in memory 69 * @param as Created and properly mapped address space 70 * @param flags A combination of ELD_F_* 67 * @param as Created and properly mapped address space 68 * @param flags A combination of ELD_F_* 69 * 71 70 * @return EE_OK on success 72 */ 73 unsigned int elf_load(elf_header_t *header, as_t * as, int flags) 74 { 75 int i, rc; 76 71 * 72 */ 73 unsigned int elf_load(elf_header_t *header, as_t *as, unsigned int flags) 74 { 77 75 /* Identify ELF */ 78 if ( header->e_ident[EI_MAG0] != ELFMAG0||79 header->e_ident[EI_MAG1] != ELFMAG1 ||80 header->e_ident[EI_MAG2] != ELFMAG2||81 header->e_ident[EI_MAG3] != ELFMAG3) {76 if ((header->e_ident[EI_MAG0] != ELFMAG0) || 77 (header->e_ident[EI_MAG1] != ELFMAG1) || 78 (header->e_ident[EI_MAG2] != ELFMAG2) || 79 (header->e_ident[EI_MAG3] != ELFMAG3)) 82 80 return EE_INVALID; 83 }84 81 85 82 /* Identify ELF compatibility */ 86 if ( header->e_ident[EI_DATA] != ELF_DATA_ENCODING||87 header->e_machine != ELF_MACHINE ||88 header->e_ident[EI_VERSION] != EV_CURRENT||89 header->e_version != EV_CURRENT||90 header->e_ident[EI_CLASS] != ELF_CLASS) {83 if ((header->e_ident[EI_DATA] != ELF_DATA_ENCODING) || 84 (header->e_machine != ELF_MACHINE) || 85 (header->e_ident[EI_VERSION] != EV_CURRENT) || 86 (header->e_version != EV_CURRENT) || 87 (header->e_ident[EI_CLASS] != ELF_CLASS)) 91 88 return EE_INCOMPATIBLE; 92 } 93 89 94 90 if (header->e_phentsize != sizeof(elf_segment_header_t)) 95 91 return EE_INCOMPATIBLE; 96 92 97 93 if (header->e_shentsize != sizeof(elf_section_header_t)) 98 94 return EE_INCOMPATIBLE; 99 95 100 96 /* Check if the object type is supported. */ 101 97 if (header->e_type != ET_EXEC) 102 98 return EE_UNSUPPORTED; 103 99 104 100 /* Check if the ELF image starts on a page boundary */ 105 if (ALIGN_UP((uintptr_t) header, PAGE_SIZE) != (uintptr_t)header)101 if (ALIGN_UP((uintptr_t) header, PAGE_SIZE) != (uintptr_t) header) 106 102 return EE_UNSUPPORTED; 107 103 108 104 /* Walk through all segment headers and process them. */ 105 elf_half i; 109 106 for (i = 0; i < header->e_phnum; i++) { 110 elf_segment_header_t *seghdr; 111 112 seghdr = &((elf_segment_header_t *)(((uint8_t *) header) + 107 elf_segment_header_t *seghdr = 108 &((elf_segment_header_t *)(((uint8_t *) header) + 113 109 header->e_phoff))[i]; 114 rc = segment_header(seghdr, header, as, flags); 110 111 int rc = segment_header(seghdr, header, as, flags); 115 112 if (rc != EE_OK) 116 113 return rc; 117 114 } 118 115 119 116 /* Inspect all section headers and proccess them. */ 120 117 for (i = 0; i < header->e_shnum; i++) { 121 elf_section_header_t *sechdr; 122 123 sechdr = &((elf_section_header_t *)(((uint8_t *) header) + 118 elf_section_header_t *sechdr = 119 &((elf_section_header_t *)(((uint8_t *) header) + 124 120 header->e_shoff))[i]; 125 rc = section_header(sechdr, header, as); 121 122 int rc = section_header(sechdr, header, as); 126 123 if (rc != EE_OK) 127 124 return rc; 128 125 } 129 126 130 127 return EE_OK; 131 128 } … … 136 133 * 137 134 * @return NULL terminated description of error. 135 * 138 136 */ 139 137 const char *elf_error(unsigned int rc) 140 138 { 141 139 ASSERT(rc < sizeof(error_codes) / sizeof(char *)); 142 140 143 141 return error_codes[rc]; 144 142 } … … 147 145 * 148 146 * @param entry Segment header. 149 * @param elf ELF header.150 * @param as Address space into wich the ELF is being loaded.147 * @param elf ELF header. 148 * @param as Address space into wich the ELF is being loaded. 151 149 * 152 150 * @return EE_OK on success, error code otherwise. 151 * 153 152 */ 154 153 static int segment_header(elf_segment_header_t *entry, elf_header_t *elf, 155 as_t *as, int flags)154 as_t *as, unsigned int flags) 156 155 { 157 156 switch (entry->p_type) { … … 170 169 return EE_UNSUPPORTED; 171 170 } */ 172 if ((flags & ELD_F_LOADER) == 0) {171 if ((flags & ELD_F_LOADER) == 0) 173 172 return EE_LOADER; 174 }175 173 break; 176 174 case PT_SHLIB: … … 187 185 * 188 186 * @param entry Program header entry describing segment to be loaded. 189 * @param elf ELF header.190 * @param as Address space into wich the ELF is being loaded.187 * @param elf ELF header. 188 * @param as Address space into wich the ELF is being loaded. 191 189 * 192 190 * @return EE_OK on success, error code otherwise. 191 * 193 192 */ 194 193 int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as) 195 194 { 196 as_area_t *a;197 int flags = 0;198 195 mem_backend_data_t backend_data; 199 uintptr_t base;200 size_t mem_sz;201 202 196 backend_data.elf = elf; 203 197 backend_data.segment = entry; 204 198 205 199 if (entry->p_align > 1) { 206 200 if ((entry->p_offset % entry->p_align) != 207 (entry->p_vaddr % entry->p_align)) {201 (entry->p_vaddr % entry->p_align)) 208 202 return EE_INVALID; 209 } 210 } 211 203 } 204 205 unsigned int flags = 0; 206 212 207 if (entry->p_flags & PF_X) 213 208 flags |= AS_AREA_EXEC; 209 214 210 if (entry->p_flags & PF_W) 215 211 flags |= AS_AREA_WRITE; 212 216 213 if (entry->p_flags & PF_R) 217 214 flags |= AS_AREA_READ; 215 218 216 flags |= AS_AREA_CACHEABLE; 219 220 /* 217 218 /* 221 219 * Align vaddr down, inserting a little "gap" at the beginning. 222 220 * Adjust area size, so that its end remains in place. 221 * 223 222 */ 224 base = ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE);225 mem_sz = entry->p_memsz + (entry->p_vaddr - base);226 227 a = as_area_create(as, flags, mem_sz, base,223 uintptr_t base = ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE); 224 size_t mem_sz = entry->p_memsz + (entry->p_vaddr - base); 225 226 as_area_t *area = as_area_create(as, flags, mem_sz, base, 228 227 AS_AREA_ATTR_NONE, &elf_backend, &backend_data); 229 if (!a )228 if (!area) 230 229 return EE_MEMORY; 231 230 232 231 /* 233 232 * The segment will be mapped on demand by elf_page_fault(). 233 * 234 234 */ 235 235 236 236 return EE_OK; 237 237 } … … 240 240 * 241 241 * @param entry Segment header. 242 * @param elf ELF header.243 * @param as Address space into wich the ELF is being loaded.242 * @param elf ELF header. 243 * @param as Address space into wich the ELF is being loaded. 244 244 * 245 245 * @return EE_OK on success, error code otherwise. 246 * 246 247 */ 247 248 static int section_header(elf_section_header_t *entry, elf_header_t *elf, -
kernel/generic/src/main/kinit.c
r666f492 rda1bafb 115 115 thread = thread_create(kmp, NULL, TASK, THREAD_FLAG_WIRED, "kmp", true); 116 116 if (thread != NULL) { 117 spinlock_lock(&thread->lock);117 irq_spinlock_lock(&thread->lock, false); 118 118 thread->cpu = &cpus[0]; 119 spinlock_unlock(&thread->lock);119 irq_spinlock_unlock(&thread->lock, false); 120 120 thread_ready(thread); 121 121 } else … … 135 135 thread = thread_create(kcpulb, NULL, TASK, THREAD_FLAG_WIRED, "kcpulb", true); 136 136 if (thread != NULL) { 137 spinlock_lock(&thread->lock);137 irq_spinlock_lock(&thread->lock, false); 138 138 thread->cpu = &cpus[i]; 139 spinlock_unlock(&thread->lock);139 irq_spinlock_unlock(&thread->lock, false); 140 140 thread_ready(thread); 141 141 } else … … 199 199 str_cpy(namebuf + INIT_PREFIX_LEN, 200 200 TASK_NAME_BUFLEN - INIT_PREFIX_LEN, name); 201 201 202 202 int rc = program_create_from_image((void *) init.tasks[i].addr, 203 203 namebuf, &programs[i]); … … 222 222 } 223 223 } 224 224 225 225 /* 226 226 * Run user tasks. … … 230 230 program_ready(&programs[i]); 231 231 } 232 232 233 233 #ifdef CONFIG_KCONSOLE 234 234 if (!stdin) { -
kernel/generic/src/mm/as.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Address space related functions. 36 36 * 37 37 * This file contains address space manipulation functions. … … 86 86 * Each architecture decides what functions will be used to carry out 87 87 * address space operations such as creating or locking page tables. 88 * 88 89 */ 89 90 as_operations_t *as_operations = NULL; … … 91 92 /** 92 93 * Slab for as_t objects. 94 * 93 95 */ 94 96 static slab_cache_t *as_slab; … … 100 102 * - as->asid for each as of the as_t type 101 103 * - asids_allocated counter 104 * 102 105 */ 103 106 SPINLOCK_INITIALIZE(asidlock); … … 106 109 * This list contains address spaces that are not active on any 107 110 * processor and that have valid ASID. 111 * 108 112 */ 109 113 LIST_INITIALIZE(inactive_as_with_asid_head); … … 112 116 as_t *AS_KERNEL = NULL; 113 117 114 static int area_flags_to_page_flags(int);118 static unsigned int area_flags_to_page_flags(unsigned int); 115 119 static as_area_t *find_area_and_lock(as_t *, uintptr_t); 116 120 static bool check_area_conflicts(as_t *, uintptr_t, size_t, as_area_t *); 117 121 static void sh_info_remove_reference(share_info_t *); 118 122 119 static int as_constructor(void *obj, int flags)123 static int as_constructor(void *obj, unsigned int flags) 120 124 { 121 125 as_t *as = (as_t *) obj; 122 int rc; 123 126 124 127 link_initialize(&as->inactive_as_with_asid_link); 125 128 mutex_initialize(&as->lock, MUTEX_PASSIVE); 126 129 127 rc = as_constructor_arch(as, flags);130 int rc = as_constructor_arch(as, flags); 128 131 129 132 return rc; 130 133 } 131 134 132 static int as_destructor(void *obj)135 static size_t as_destructor(void *obj) 133 136 { 134 137 as_t *as = (as_t *) obj; 135 136 138 return as_destructor_arch(as); 137 139 } … … 141 143 { 142 144 as_arch_init(); 143 145 144 146 as_slab = slab_cache_create("as_slab", sizeof(as_t), 0, 145 147 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED); … … 157 159 /** Create address space. 158 160 * 159 * @param flags Flags that influence the way in wich the address space 160 * is created. 161 */ 162 as_t *as_create(int flags) 163 { 164 as_t *as; 165 166 as = (as_t *) slab_alloc(as_slab, 0); 161 * @param flags Flags that influence the way in wich the address 162 * space is created. 163 * 164 */ 165 as_t *as_create(unsigned int flags) 166 { 167 as_t *as = (as_t *) slab_alloc(as_slab, 0); 167 168 (void) as_create_arch(as, 0); 168 169 … … 176 177 atomic_set(&as->refcount, 0); 177 178 as->cpu_refcount = 0; 179 178 180 #ifdef AS_PAGE_TABLE 179 181 as->genarch.page_table = page_table_create(flags); … … 192 194 * We know that we don't hold any spinlock. 193 195 * 194 * @param as Address space to be destroyed. 196 * @param as Address space to be destroyed. 197 * 195 198 */ 196 199 void as_destroy(as_t *as) 197 200 { 198 ipl_t ipl;199 bool cond;200 201 DEADLOCK_PROBE_INIT(p_asidlock); 201 202 … … 214 215 * disabled to prevent nested context switches. We also depend on the 215 216 * fact that so far no spinlocks are held. 217 * 216 218 */ 217 219 preemption_disable(); 218 ipl = interrupts_read(); 220 ipl_t ipl = interrupts_read(); 221 219 222 retry: 220 223 interrupts_disable(); … … 224 227 goto retry; 225 228 } 226 preemption_enable(); /* Interrupts disabled, enable preemption */ 227 if (as->asid != ASID_INVALID && as != AS_KERNEL) { 229 230 /* Interrupts disabled, enable preemption */ 231 preemption_enable(); 232 233 if ((as->asid != ASID_INVALID) && (as != AS_KERNEL)) { 228 234 if (as->cpu_refcount == 0) 229 235 list_remove(&as->inactive_as_with_asid_link); 236 230 237 asid_put(as->asid); 231 238 } 239 232 240 spinlock_unlock(&asidlock); 233 241 234 242 /* 235 243 * Destroy address space areas of the address space. 236 244 * The B+tree must be walked carefully because it is 237 245 * also being destroyed. 238 * /239 for (cond = true; cond; ) {240 btree_node_t *node;241 246 * 247 */ 248 bool cond = true; 249 while (cond) { 242 250 ASSERT(!list_empty(&as->as_area_btree.leaf_head)); 243 node = list_get_instance(as->as_area_btree.leaf_head.next, 251 252 btree_node_t *node = 253 list_get_instance(as->as_area_btree.leaf_head.next, 244 254 btree_node_t, leaf_link); 245 246 if ((cond = node->keys)) {255 256 if ((cond = node->keys)) 247 257 as_area_destroy(as, node->key[0]); 248 } 249 } 250 258 } 259 251 260 btree_destroy(&as->as_area_btree); 261 252 262 #ifdef AS_PAGE_TABLE 253 263 page_table_destroy(as->genarch.page_table); … … 255 265 page_table_destroy(NULL); 256 266 #endif 257 267 258 268 interrupts_restore(ipl); 259 269 260 270 slab_free(as_slab, as); 261 271 } … … 266 276 * space. 267 277 * 268 * @param a Address space to be held. 278 * @param as Address space to be held. 279 * 269 280 */ 270 281 void as_hold(as_t *as) … … 278 289 * space. 279 290 * 280 * @param a Address space to be released. 291 * @param asAddress space to be released. 292 * 281 293 */ 282 294 void as_release(as_t *as) … … 290 302 * The created address space area is added to the target address space. 291 303 * 292 * @param as Target address space. 293 * @param flags Flags of the area memory. 294 * @param size Size of area. 295 * @param base Base address of area. 296 * @param attrs Attributes of the area. 297 * @param backend Address space area backend. NULL if no backend is used. 298 * @param backend_data NULL or a pointer to an array holding two void *. 299 * 300 * @return Address space area on success or NULL on failure. 301 */ 302 as_area_t * 303 as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs, 304 mem_backend_t *backend, mem_backend_data_t *backend_data) 305 { 306 ipl_t ipl; 307 as_area_t *a; 308 304 * @param as Target address space. 305 * @param flags Flags of the area memory. 306 * @param size Size of area. 307 * @param base Base address of area. 308 * @param attrs Attributes of the area. 309 * @param backend Address space area backend. NULL if no backend is used. 310 * @param backend_data NULL or a pointer to an array holding two void *. 311 * 312 * @return Address space area on success or NULL on failure. 313 * 314 */ 315 as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size, 316 uintptr_t base, unsigned int attrs, mem_backend_t *backend, 317 mem_backend_data_t *backend_data) 318 { 309 319 if (base % PAGE_SIZE) 310 320 return NULL; 311 321 312 322 if (!size) 313 323 return NULL; 314 324 315 325 /* Writeable executable areas are not supported. */ 316 326 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) 317 327 return NULL; 318 328 319 ipl = interrupts_disable();329 ipl_t ipl = interrupts_disable(); 320 330 mutex_lock(&as->lock); 321 331 … … 326 336 } 327 337 328 a = (as_area_t *) malloc(sizeof(as_area_t), 0); 329 330 mutex_initialize(&a->lock, MUTEX_PASSIVE); 331 332 a->as = as; 333 a->flags = flags; 334 a->attributes = attrs; 335 a->pages = SIZE2FRAMES(size); 336 a->base = base; 337 a->sh_info = NULL; 338 a->backend = backend; 338 as_area_t *area = (as_area_t *) malloc(sizeof(as_area_t), 0); 339 340 mutex_initialize(&area->lock, MUTEX_PASSIVE); 341 342 area->as = as; 343 area->flags = flags; 344 area->attributes = attrs; 345 area->pages = SIZE2FRAMES(size); 346 area->base = base; 347 area->sh_info = NULL; 348 area->backend = backend; 349 339 350 if (backend_data) 340 a ->backend_data = *backend_data;351 area->backend_data = *backend_data; 341 352 else 342 memsetb(&a->backend_data, sizeof(a->backend_data), 0); 343 344 btree_create(&a->used_space); 345 346 btree_insert(&as->as_area_btree, base, (void *) a, NULL); 347 353 memsetb(&area->backend_data, sizeof(area->backend_data), 0); 354 355 btree_create(&area->used_space); 356 btree_insert(&as->as_area_btree, base, (void *) area, NULL); 357 348 358 mutex_unlock(&as->lock); 349 359 interrupts_restore(ipl); 350 351 return a ;360 361 return area; 352 362 } 353 363 354 364 /** Find address space area and change it. 355 365 * 356 * @param as Address space. 357 * @param address Virtual address belonging to the area to be changed. 358 * Must be page-aligned. 359 * @param size New size of the virtual memory block starting at 360 * address. 361 * @param flags Flags influencing the remap operation. Currently unused. 362 * 363 * @return Zero on success or a value from @ref errno.h otherwise. 364 */ 365 int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags) 366 { 367 as_area_t *area; 368 ipl_t ipl; 369 size_t pages; 370 371 ipl = interrupts_disable(); 366 * @param as Address space. 367 * @param address Virtual address belonging to the area to be changed. 368 * Must be page-aligned. 369 * @param size New size of the virtual memory block starting at 370 * address. 371 * @param flags Flags influencing the remap operation. Currently unused. 372 * 373 * @return Zero on success or a value from @ref errno.h otherwise. 374 * 375 */ 376 int as_area_resize(as_t *as, uintptr_t address, size_t size, unsigned int flags) 377 { 378 ipl_t ipl = interrupts_disable(); 372 379 mutex_lock(&as->lock); 373 380 374 381 /* 375 382 * Locate the area. 376 */ 377 area = find_area_and_lock(as, address); 383 * 384 */ 385 as_area_t *area = find_area_and_lock(as, address); 378 386 if (!area) { 379 387 mutex_unlock(&as->lock); … … 381 389 return ENOENT; 382 390 } 383 391 384 392 if (area->backend == &phys_backend) { 385 393 /* 386 394 * Remapping of address space areas associated 387 395 * with memory mapped devices is not supported. 396 * 388 397 */ 389 398 mutex_unlock(&area->lock); … … 392 401 return ENOTSUP; 393 402 } 403 394 404 if (area->sh_info) { 395 405 /* 396 * Remapping of shared address space areas 406 * Remapping of shared address space areas 397 407 * is not supported. 408 * 398 409 */ 399 410 mutex_unlock(&area->lock); … … 402 413 return ENOTSUP; 403 414 } 404 405 pages = SIZE2FRAMES((address - area->base) + size);415 416 size_t pages = SIZE2FRAMES((address - area->base) + size); 406 417 if (!pages) { 407 418 /* 408 419 * Zero size address space areas are not allowed. 420 * 409 421 */ 410 422 mutex_unlock(&area->lock); … … 415 427 416 428 if (pages < area->pages) { 417 bool cond;418 429 uintptr_t start_free = area->base + pages * PAGE_SIZE; 419 430 420 431 /* 421 432 * Shrinking the area. 422 433 * No need to check for overlaps. 423 */ 424 434 * 435 */ 436 425 437 page_table_lock(as, false); 426 438 427 439 /* 428 440 * Start TLB shootdown sequence. 441 * 429 442 */ 430 443 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base + 431 444 pages * PAGE_SIZE, area->pages - pages); 432 445 433 446 /* 434 447 * Remove frames belonging to used space starting from … … 437 450 * is also the right way to remove part of the used_space 438 451 * B+tree leaf list. 439 * /440 for (cond = true; cond;) {441 btree_node_t *node;442 452 * 453 */ 454 bool cond = true; 455 while (cond) { 443 456 ASSERT(!list_empty(&area->used_space.leaf_head)); 444 node = 457 458 btree_node_t *node = 445 459 list_get_instance(area->used_space.leaf_head.prev, 446 460 btree_node_t, leaf_link); 461 447 462 if ((cond = (bool) node->keys)) { 448 uintptr_t b= node->key[node->keys - 1];449 size_t c=463 uintptr_t ptr = node->key[node->keys - 1]; 464 size_t size = 450 465 (size_t) node->value[node->keys - 1]; 451 unsigned int i = 0;452 453 if (overlaps( b, c* PAGE_SIZE, area->base,466 size_t i = 0; 467 468 if (overlaps(ptr, size * PAGE_SIZE, area->base, 454 469 pages * PAGE_SIZE)) { 455 470 456 if ( b + c* PAGE_SIZE <= start_free) {471 if (ptr + size * PAGE_SIZE <= start_free) { 457 472 /* 458 473 * The whole interval fits 459 474 * completely in the resized 460 475 * address space area. 476 * 461 477 */ 462 478 break; 463 479 } 464 480 465 481 /* 466 482 * Part of the interval corresponding 467 483 * to b and c overlaps with the resized 468 484 * address space area. 485 * 469 486 */ 470 471 cond = false; /* we are almost done */ 472 i = (start_free - b) >> PAGE_WIDTH; 487 488 /* We are almost done */ 489 cond = false; 490 i = (start_free - ptr) >> PAGE_WIDTH; 473 491 if (!used_space_remove(area, start_free, 474 c - i)) 475 panic("Cannot remove used " 476 "space."); 492 size - i)) 493 panic("Cannot remove used space."); 477 494 } else { 478 495 /* … … 480 497 * completely removed. 481 498 */ 482 if (!used_space_remove(area, b, c)) 483 panic("Cannot remove used " 484 "space."); 499 if (!used_space_remove(area, ptr, size)) 500 panic("Cannot remove used space."); 485 501 } 486 487 for (; i < c; i++) { 488 pte_t *pte; 489 490 pte = page_mapping_find(as, b + 502 503 for (; i < size; i++) { 504 pte_t *pte = page_mapping_find(as, ptr + 491 505 i * PAGE_SIZE); 492 ASSERT(pte && PTE_VALID(pte) && 493 PTE_PRESENT(pte)); 494 if (area->backend && 495 area->backend->frame_free) { 506 507 ASSERT(pte); 508 ASSERT(PTE_VALID(pte)); 509 ASSERT(PTE_PRESENT(pte)); 510 511 if ((area->backend) && 512 (area->backend->frame_free)) { 496 513 area->backend->frame_free(area, 497 b+ i * PAGE_SIZE,514 ptr + i * PAGE_SIZE, 498 515 PTE_GET_FRAME(pte)); 499 516 } 500 page_mapping_remove(as, b + 517 518 page_mapping_remove(as, ptr + 501 519 i * PAGE_SIZE); 502 520 } 503 521 } 504 522 } 505 523 506 524 /* 507 525 * Finish TLB shootdown sequence. 508 */ 509 526 * 527 */ 528 510 529 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, 511 530 area->pages - pages); 512 531 513 532 /* 514 533 * Invalidate software translation caches (e.g. TSB on sparc64). 534 * 515 535 */ 516 536 as_invalidate_translation_cache(as, area->base + 517 537 pages * PAGE_SIZE, area->pages - pages); 518 538 tlb_shootdown_finalize(); 519 539 520 540 page_table_unlock(as, false); 521 522 541 } else { 523 542 /* 524 543 * Growing the area. 525 544 * Check for overlaps with other address space areas. 545 * 526 546 */ 527 547 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, 528 548 area)) { 529 549 mutex_unlock(&area->lock); 530 mutex_unlock(&as->lock); 550 mutex_unlock(&as->lock); 531 551 interrupts_restore(ipl); 532 552 return EADDRNOTAVAIL; 533 553 } 534 } 535 554 } 555 536 556 area->pages = pages; 537 557 … … 539 559 mutex_unlock(&as->lock); 540 560 interrupts_restore(ipl); 541 561 542 562 return 0; 543 563 } … … 545 565 /** Destroy address space area. 546 566 * 547 * @param as Address space. 548 * @param address Address within the area to be deleted. 549 * 550 * @return Zero on success or a value from @ref errno.h on failure. 567 * @param as Address space. 568 * @param address Address within the area to be deleted. 569 * 570 * @return Zero on success or a value from @ref errno.h on failure. 571 * 551 572 */ 552 573 int as_area_destroy(as_t *as, uintptr_t address) 553 574 { 554 as_area_t *area; 555 uintptr_t base; 556 link_t *cur; 557 ipl_t ipl; 558 559 ipl = interrupts_disable(); 575 ipl_t ipl = interrupts_disable(); 560 576 mutex_lock(&as->lock); 561 562 a rea = find_area_and_lock(as, address);577 578 as_area_t *area = find_area_and_lock(as, address); 563 579 if (!area) { 564 580 mutex_unlock(&as->lock); … … 566 582 return ENOENT; 567 583 } 568 569 base = area->base;570 584 585 uintptr_t base = area->base; 586 571 587 page_table_lock(as, false); 572 588 573 589 /* 574 590 * Start TLB shootdown sequence. 575 591 */ 576 592 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); 577 593 578 594 /* 579 595 * Visit only the pages mapped by used_space B+tree. 580 596 */ 597 link_t *cur; 581 598 for (cur = area->used_space.leaf_head.next; 582 599 cur != &area->used_space.leaf_head; cur = cur->next) { 583 600 btree_node_t *node; 584 unsigned int i;601 btree_key_t i; 585 602 586 603 node = list_get_instance(cur, btree_node_t, leaf_link); 587 604 for (i = 0; i < node->keys; i++) { 588 uintptr_t b = node->key[i]; 589 size_t j; 590 pte_t *pte; 605 uintptr_t ptr = node->key[i]; 606 size_t size; 591 607 592 for (j = 0; j < (size_t) node->value[i]; j++) { 593 pte = page_mapping_find(as, b + j * PAGE_SIZE); 594 ASSERT(pte && PTE_VALID(pte) && 595 PTE_PRESENT(pte)); 596 if (area->backend && 597 area->backend->frame_free) { 598 area->backend->frame_free(area, b + 599 j * PAGE_SIZE, PTE_GET_FRAME(pte)); 608 for (size = 0; size < (size_t) node->value[i]; size++) { 609 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 610 611 ASSERT(pte); 612 ASSERT(PTE_VALID(pte)); 613 ASSERT(PTE_PRESENT(pte)); 614 615 if ((area->backend) && 616 (area->backend->frame_free)) { 617 area->backend->frame_free(area, 618 ptr + size * PAGE_SIZE, PTE_GET_FRAME(pte)); 600 619 } 601 page_mapping_remove(as, b + j * PAGE_SIZE); 620 621 page_mapping_remove(as, ptr + size * PAGE_SIZE); 602 622 } 603 623 } 604 624 } 605 625 606 626 /* 607 627 * Finish TLB shootdown sequence. 608 */ 609 628 * 629 */ 630 610 631 tlb_invalidate_pages(as->asid, area->base, area->pages); 611 632 612 633 /* 613 634 * Invalidate potential software translation caches (e.g. TSB on 614 635 * sparc64). 636 * 615 637 */ 616 638 as_invalidate_translation_cache(as, area->base, area->pages); 617 639 tlb_shootdown_finalize(); 618 640 619 641 page_table_unlock(as, false); 620 642 621 643 btree_destroy(&area->used_space); 622 644 623 645 area->attributes |= AS_AREA_ATTR_PARTIAL; 624 646 625 647 if (area->sh_info) 626 648 sh_info_remove_reference(area->sh_info); 627 649 628 650 mutex_unlock(&area->lock); 629 651 630 652 /* 631 653 * Remove the empty area from address space. 654 * 632 655 */ 633 656 btree_remove(&as->as_area_btree, base, NULL); … … 647 670 * sh_info of the source area. The process of duplicating the 648 671 * mapping is done through the backend share function. 649 * 650 * @param src_as 651 * @param src_base 652 * @param acc_size 653 * @param dst_as 654 * @param dst_base 672 * 673 * @param src_as Pointer to source address space. 674 * @param src_base Base address of the source address space area. 675 * @param acc_size Expected size of the source area. 676 * @param dst_as Pointer to destination address space. 677 * @param dst_base Target base address. 655 678 * @param dst_flags_mask Destination address space area flags mask. 656 679 * 657 * @return Zero on success or ENOENT if there is no such task or if 658 * there is no such address space area, EPERM if there was 659 * a problem in accepting the area or ENOMEM if there was a 660 * problem in allocating destination address space area. 661 * ENOTSUP is returned if the address space area backend 662 * does not support sharing. 680 * @return Zero on success. 681 * @return ENOENT if there is no such task or such address space. 682 * @return EPERM if there was a problem in accepting the area. 683 * @return ENOMEM if there was a problem in allocating destination 684 * address space area. 685 * @return ENOTSUP if the address space area backend does not support 686 * sharing. 687 * 663 688 */ 664 689 int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, 665 as_t *dst_as, uintptr_t dst_base, int dst_flags_mask) 666 { 667 ipl_t ipl; 668 int src_flags; 669 size_t src_size; 670 as_area_t *src_area, *dst_area; 671 share_info_t *sh_info; 672 mem_backend_t *src_backend; 673 mem_backend_data_t src_backend_data; 674 675 ipl = interrupts_disable(); 690 as_t *dst_as, uintptr_t dst_base, unsigned int dst_flags_mask) 691 { 692 ipl_t ipl = interrupts_disable(); 676 693 mutex_lock(&src_as->lock); 677 src_area = find_area_and_lock(src_as, src_base);694 as_area_t *src_area = find_area_and_lock(src_as, src_base); 678 695 if (!src_area) { 679 696 /* 680 697 * Could not find the source address space area. 698 * 681 699 */ 682 700 mutex_unlock(&src_as->lock); … … 684 702 return ENOENT; 685 703 } 686 687 if ( !src_area->backend || !src_area->backend->share) {704 705 if ((!src_area->backend) || (!src_area->backend->share)) { 688 706 /* 689 707 * There is no backend or the backend does not 690 708 * know how to share the area. 709 * 691 710 */ 692 711 mutex_unlock(&src_area->lock); … … 696 715 } 697 716 698 s rc_size = src_area->pages * PAGE_SIZE;699 src_flags = src_area->flags;700 src_backend = src_area->backend;701 src_backend_data = src_area->backend_data;702 717 size_t src_size = src_area->pages * PAGE_SIZE; 718 unsigned int src_flags = src_area->flags; 719 mem_backend_t *src_backend = src_area->backend; 720 mem_backend_data_t src_backend_data = src_area->backend_data; 721 703 722 /* Share the cacheable flag from the original mapping */ 704 723 if (src_flags & AS_AREA_CACHEABLE) 705 724 dst_flags_mask |= AS_AREA_CACHEABLE; 706 707 if ( src_size != acc_size||708 ( src_flags & dst_flags_mask) != dst_flags_mask) {725 726 if ((src_size != acc_size) || 727 ((src_flags & dst_flags_mask) != dst_flags_mask)) { 709 728 mutex_unlock(&src_area->lock); 710 729 mutex_unlock(&src_as->lock); … … 712 731 return EPERM; 713 732 } 714 733 715 734 /* 716 735 * Now we are committed to sharing the area. 717 736 * First, prepare the area for sharing. 718 737 * Then it will be safe to unlock it. 719 */ 720 sh_info = src_area->sh_info; 738 * 739 */ 740 share_info_t *sh_info = src_area->sh_info; 721 741 if (!sh_info) { 722 742 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0); … … 725 745 btree_create(&sh_info->pagemap); 726 746 src_area->sh_info = sh_info; 747 727 748 /* 728 749 * Call the backend to setup sharing. 750 * 729 751 */ 730 752 src_area->backend->share(src_area); … … 734 756 mutex_unlock(&sh_info->lock); 735 757 } 736 758 737 759 mutex_unlock(&src_area->lock); 738 760 mutex_unlock(&src_as->lock); 739 761 740 762 /* 741 763 * Create copy of the source address space area. … … 745 767 * The flags of the source area are masked against dst_flags_mask 746 768 * to support sharing in less privileged mode. 747 */ 748 dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base, 749 AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 769 * 770 */ 771 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, 772 dst_base, AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 750 773 if (!dst_area) { 751 774 /* … … 757 780 return ENOMEM; 758 781 } 759 782 760 783 /* 761 784 * Now the destination address space area has been 762 785 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL 763 786 * attribute and set the sh_info. 764 */ 765 mutex_lock(&dst_as->lock); 787 * 788 */ 789 mutex_lock(&dst_as->lock); 766 790 mutex_lock(&dst_area->lock); 767 791 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL; 768 792 dst_area->sh_info = sh_info; 769 793 mutex_unlock(&dst_area->lock); 770 mutex_unlock(&dst_as->lock); 771 794 mutex_unlock(&dst_as->lock); 795 772 796 interrupts_restore(ipl); 773 797 … … 779 803 * The address space area must be locked prior to this call. 780 804 * 781 * @param area Address space area. 782 * @param access Access mode. 783 * 784 * @return False if access violates area's permissions, true 785 * otherwise. 805 * @param area Address space area. 806 * @param access Access mode. 807 * 808 * @return False if access violates area's permissions, true 809 * otherwise. 810 * 786 811 */ 787 812 bool as_area_check_access(as_area_t *area, pf_access_t access) … … 792 817 [PF_ACCESS_EXEC] = AS_AREA_EXEC 793 818 }; 794 819 795 820 if (!(area->flags & flagmap[access])) 796 821 return false; … … 813 838 * 814 839 */ 815 int as_area_change_flags(as_t *as, int flags, uintptr_t address) 816 { 817 as_area_t *area; 818 link_t *cur; 819 ipl_t ipl; 820 int page_flags; 821 uintptr_t *old_frame; 822 size_t frame_idx; 823 size_t used_pages; 824 840 int as_area_change_flags(as_t *as, unsigned int flags, uintptr_t address) 841 { 825 842 /* Flags for the new memory mapping */ 826 page_flags = area_flags_to_page_flags(flags);827 828 ipl = interrupts_disable();843 unsigned int page_flags = area_flags_to_page_flags(flags); 844 845 ipl_t ipl = interrupts_disable(); 829 846 mutex_lock(&as->lock); 830 831 a rea = find_area_and_lock(as, address);847 848 as_area_t *area = find_area_and_lock(as, address); 832 849 if (!area) { 833 850 mutex_unlock(&as->lock); … … 835 852 return ENOENT; 836 853 } 837 854 838 855 if ((area->sh_info) || (area->backend != &anon_backend)) { 839 856 /* Copying shared areas not supported yet */ … … 844 861 return ENOTSUP; 845 862 } 846 863 847 864 /* 848 865 * Compute total number of used pages in the used_space B+tree 849 */ 850 used_pages = 0; 851 866 * 867 */ 868 size_t used_pages = 0; 869 link_t *cur; 870 852 871 for (cur = area->used_space.leaf_head.next; 853 872 cur != &area->used_space.leaf_head; cur = cur->next) { 854 btree_node_t *node ;855 unsigned int i;856 857 node = list_get_instance(cur, btree_node_t, leaf_link);858 for (i = 0; i < node->keys; i++) {873 btree_node_t *node 874 = list_get_instance(cur, btree_node_t, leaf_link); 875 btree_key_t i; 876 877 for (i = 0; i < node->keys; i++) 859 878 used_pages += (size_t) node->value[i]; 860 } 861 } 862 879 } 880 863 881 /* An array for storing frame numbers */ 864 old_frame = malloc(used_pages * sizeof(uintptr_t), 0);865 882 uintptr_t *old_frame = malloc(used_pages * sizeof(uintptr_t), 0); 883 866 884 page_table_lock(as, false); 867 885 868 886 /* 869 887 * Start TLB shootdown sequence. 888 * 870 889 */ 871 890 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); 872 891 873 892 /* 874 893 * Remove used pages from page tables and remember their frame 875 894 * numbers. 876 */ 877 frame_idx = 0; 878 895 * 896 */ 897 size_t frame_idx = 0; 898 879 899 for (cur = area->used_space.leaf_head.next; 880 900 cur != &area->used_space.leaf_head; cur = cur->next) { 881 btree_node_t *node ;882 unsigned int i;883 884 node = list_get_instance(cur, btree_node_t, leaf_link);901 btree_node_t *node 902 = list_get_instance(cur, btree_node_t, leaf_link); 903 btree_key_t i; 904 885 905 for (i = 0; i < node->keys; i++) { 886 uintptr_t b = node->key[i]; 887 size_t j; 888 pte_t *pte; 906 uintptr_t ptr = node->key[i]; 907 size_t size; 889 908 890 for (j = 0; j < (size_t) node->value[i]; j++) { 891 pte = page_mapping_find(as, b + j * PAGE_SIZE); 892 ASSERT(pte && PTE_VALID(pte) && 893 PTE_PRESENT(pte)); 909 for (size = 0; size < (size_t) node->value[i]; size++) { 910 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 911 912 ASSERT(pte); 913 ASSERT(PTE_VALID(pte)); 914 ASSERT(PTE_PRESENT(pte)); 915 894 916 old_frame[frame_idx++] = PTE_GET_FRAME(pte); 895 917 896 918 /* Remove old mapping */ 897 page_mapping_remove(as, b + j* PAGE_SIZE);919 page_mapping_remove(as, ptr + size * PAGE_SIZE); 898 920 } 899 921 } 900 922 } 901 923 902 924 /* 903 925 * Finish TLB shootdown sequence. 904 */ 905 926 * 927 */ 928 906 929 tlb_invalidate_pages(as->asid, area->base, area->pages); 907 930 … … 909 932 * Invalidate potential software translation caches (e.g. TSB on 910 933 * sparc64). 934 * 911 935 */ 912 936 as_invalidate_translation_cache(as, area->base, area->pages); 913 937 tlb_shootdown_finalize(); 914 938 915 939 page_table_unlock(as, false); 916 940 917 941 /* 918 942 * Set the new flags. 919 943 */ 920 944 area->flags = flags; 921 945 922 946 /* 923 947 * Map pages back in with new flags. This step is kept separate … … 926 950 */ 927 951 frame_idx = 0; 928 952 929 953 for (cur = area->used_space.leaf_head.next; 930 954 cur != &area->used_space.leaf_head; cur = cur->next) { 931 btree_node_t *node ;932 unsigned int i;933 934 node = list_get_instance(cur, btree_node_t, leaf_link);955 btree_node_t *node 956 = list_get_instance(cur, btree_node_t, leaf_link); 957 btree_key_t i; 958 935 959 for (i = 0; i < node->keys; i++) { 936 uintptr_t b= node->key[i];937 size_t j;960 uintptr_t ptr = node->key[i]; 961 size_t size; 938 962 939 for ( j = 0; j < (size_t) node->value[i]; j++) {963 for (size = 0; size < (size_t) node->value[i]; size++) { 940 964 page_table_lock(as, false); 941 965 942 966 /* Insert the new mapping */ 943 page_mapping_insert(as, b + j* PAGE_SIZE,967 page_mapping_insert(as, ptr + size * PAGE_SIZE, 944 968 old_frame[frame_idx++], page_flags); 945 969 946 970 page_table_unlock(as, false); 947 971 } 948 972 } 949 973 } 950 974 951 975 free(old_frame); 952 976 953 977 mutex_unlock(&area->lock); 954 978 mutex_unlock(&as->lock); 955 979 interrupts_restore(ipl); 956 980 957 981 return 0; 958 982 } 959 960 983 961 984 /** Handle page fault within the current address space. … … 967 990 * Interrupts are assumed disabled. 968 991 * 969 * @param page Faulting page. 970 * @param access Access mode that caused the page fault (i.e. 971 * read/write/exec). 972 * @param istate Pointer to the interrupted state. 973 * 974 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or 975 * AS_PF_DEFER if the fault was caused by copy_to_uspace() 976 * or copy_from_uspace(). 992 * @param page Faulting page. 993 * @param access Access mode that caused the page fault (i.e. 994 * read/write/exec). 995 * @param istate Pointer to the interrupted state. 996 * 997 * @return AS_PF_FAULT on page fault. 998 * @return AS_PF_OK on success. 999 * @return AS_PF_DEFER if the fault was caused by copy_to_uspace() 1000 * or copy_from_uspace(). 1001 * 977 1002 */ 978 1003 int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) 979 1004 { 980 pte_t *pte;981 as_area_t *area;982 983 1005 if (!THREAD) 984 1006 return AS_PF_FAULT; … … 988 1010 989 1011 mutex_lock(&AS->lock); 990 a rea = find_area_and_lock(AS, page);1012 as_area_t *area = find_area_and_lock(AS, page); 991 1013 if (!area) { 992 1014 /* 993 1015 * No area contained mapping for 'page'. 994 1016 * Signal page fault to low-level handler. 1017 * 995 1018 */ 996 1019 mutex_unlock(&AS->lock); 997 1020 goto page_fault; 998 1021 } 999 1022 1000 1023 if (area->attributes & AS_AREA_ATTR_PARTIAL) { 1001 1024 /* … … 1005 1028 mutex_unlock(&area->lock); 1006 1029 mutex_unlock(&AS->lock); 1007 goto page_fault; 1008 } 1009 1010 if ( !area->backend || !area->backend->page_fault) {1030 goto page_fault; 1031 } 1032 1033 if ((!area->backend) || (!area->backend->page_fault)) { 1011 1034 /* 1012 1035 * The address space area is not backed by any backend 1013 1036 * or the backend cannot handle page faults. 1037 * 1014 1038 */ 1015 1039 mutex_unlock(&area->lock); 1016 1040 mutex_unlock(&AS->lock); 1017 goto page_fault; 1018 } 1019 1041 goto page_fault; 1042 } 1043 1020 1044 page_table_lock(AS, false); 1021 1045 … … 1023 1047 * To avoid race condition between two page faults on the same address, 1024 1048 * we need to make sure the mapping has not been already inserted. 1025 */ 1049 * 1050 */ 1051 pte_t *pte; 1026 1052 if ((pte = page_mapping_find(AS, page))) { 1027 1053 if (PTE_PRESENT(pte)) { … … 1039 1065 /* 1040 1066 * Resort to the backend page fault handler. 1067 * 1041 1068 */ 1042 1069 if (area->backend->page_fault(area, page, access) != AS_PF_OK) { … … 1051 1078 mutex_unlock(&AS->lock); 1052 1079 return AS_PF_OK; 1053 1080 1054 1081 page_fault: 1055 1082 if (THREAD->in_copy_from_uspace) { … … 1064 1091 return AS_PF_FAULT; 1065 1092 } 1066 1093 1067 1094 return AS_PF_DEFER; 1068 1095 } … … 1076 1103 * When this function is enetered, no spinlocks may be held. 1077 1104 * 1078 * @param old Old address space or NULL. 1079 * @param new New address space. 1105 * @param old Old address space or NULL. 1106 * @param new New address space. 1107 * 1080 1108 */ 1081 1109 void as_switch(as_t *old_as, as_t *new_as) … … 1083 1111 DEADLOCK_PROBE_INIT(p_asidlock); 1084 1112 preemption_disable(); 1113 1085 1114 retry: 1086 1115 (void) interrupts_disable(); 1087 1116 if (!spinlock_trylock(&asidlock)) { 1088 /* 1117 /* 1089 1118 * Avoid deadlock with TLB shootdown. 1090 1119 * We can enable interrupts here because 1091 1120 * preemption is disabled. We should not be 1092 1121 * holding any other lock. 1122 * 1093 1123 */ 1094 1124 (void) interrupts_enable(); … … 1097 1127 } 1098 1128 preemption_enable(); 1099 1129 1100 1130 /* 1101 1131 * First, take care of the old address space. 1102 */ 1132 */ 1103 1133 if (old_as) { 1104 1134 ASSERT(old_as->cpu_refcount); 1105 if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { 1135 1136 if ((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { 1106 1137 /* 1107 1138 * The old address space is no longer active on … … 1109 1140 * list of inactive address spaces with assigned 1110 1141 * ASID. 1142 * 1111 1143 */ 1112 1144 ASSERT(old_as->asid != ASID_INVALID); 1145 1113 1146 list_append(&old_as->inactive_as_with_asid_link, 1114 1147 &inactive_as_with_asid_head); 1115 1148 } 1116 1149 1117 1150 /* 1118 1151 * Perform architecture-specific tasks when the address space 1119 1152 * is being removed from the CPU. 1153 * 1120 1154 */ 1121 1155 as_deinstall_arch(old_as); 1122 1156 } 1123 1157 1124 1158 /* 1125 1159 * Second, prepare the new address space. 1160 * 1126 1161 */ 1127 1162 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { … … 1131 1166 new_as->asid = asid_get(); 1132 1167 } 1168 1133 1169 #ifdef AS_PAGE_TABLE 1134 1170 SET_PTL0_ADDRESS(new_as->genarch.page_table); … … 1138 1174 * Perform architecture-specific steps. 1139 1175 * (e.g. write ASID to hardware register etc.) 1176 * 1140 1177 */ 1141 1178 as_install_arch(new_as); 1142 1179 1143 1180 spinlock_unlock(&asidlock); 1144 1181 … … 1148 1185 /** Convert address space area flags to page flags. 1149 1186 * 1150 * @param aflags Flags of some address space area. 1151 * 1152 * @return Flags to be passed to page_mapping_insert(). 1153 */ 1154 int area_flags_to_page_flags(int aflags) 1155 { 1156 int flags; 1157 1158 flags = PAGE_USER | PAGE_PRESENT; 1187 * @param aflags Flags of some address space area. 1188 * 1189 * @return Flags to be passed to page_mapping_insert(). 1190 * 1191 */ 1192 unsigned int area_flags_to_page_flags(unsigned int aflags) 1193 { 1194 unsigned int flags = PAGE_USER | PAGE_PRESENT; 1159 1195 1160 1196 if (aflags & AS_AREA_READ) … … 1169 1205 if (aflags & AS_AREA_CACHEABLE) 1170 1206 flags |= PAGE_CACHEABLE; 1171 1207 1172 1208 return flags; 1173 1209 } … … 1178 1214 * Interrupts must be disabled. 1179 1215 * 1180 * @param a Address space area. 1181 * 1182 * @return Flags to be used in page_mapping_insert(). 1183 */ 1184 int as_area_get_flags(as_area_t *a) 1185 { 1186 return area_flags_to_page_flags(a->flags); 1216 * @param area Address space area. 1217 * 1218 * @return Flags to be used in page_mapping_insert(). 1219 * 1220 */ 1221 unsigned int as_area_get_flags(as_area_t *area) 1222 { 1223 return area_flags_to_page_flags(area->flags); 1187 1224 } 1188 1225 … … 1192 1229 * table. 1193 1230 * 1194 * @param flags Flags saying whether the page table is for the kernel 1195 * address space. 1196 * 1197 * @return First entry of the page table. 1198 */ 1199 pte_t *page_table_create(int flags) 1231 * @param flags Flags saying whether the page table is for the kernel 1232 * address space. 1233 * 1234 * @return First entry of the page table. 1235 * 1236 */ 1237 pte_t *page_table_create(unsigned int flags) 1200 1238 { 1201 1239 ASSERT(as_operations); … … 1209 1247 * Destroy page table in architecture specific way. 1210 1248 * 1211 * @param page_table Physical address of PTL0. 1249 * @param page_table Physical address of PTL0. 1250 * 1212 1251 */ 1213 1252 void page_table_destroy(pte_t *page_table) … … 1223 1262 * This function should be called before any page_mapping_insert(), 1224 1263 * page_mapping_remove() and page_mapping_find(). 1225 * 1264 * 1226 1265 * Locking order is such that address space areas must be locked 1227 1266 * prior to this call. Address space can be locked prior to this 1228 1267 * call in which case the lock argument is false. 1229 1268 * 1230 * @param as Address space. 1231 * @param lock If false, do not attempt to lock as->lock. 1269 * @param as Address space. 1270 * @param lock If false, do not attempt to lock as->lock. 1271 * 1232 1272 */ 1233 1273 void page_table_lock(as_t *as, bool lock) … … 1241 1281 /** Unlock page table. 1242 1282 * 1243 * @param as Address space. 1244 * @param unlock If false, do not attempt to unlock as->lock. 1283 * @param as Address space. 1284 * @param unlock If false, do not attempt to unlock as->lock. 1285 * 1245 1286 */ 1246 1287 void page_table_unlock(as_t *as, bool unlock) … … 1257 1298 * The address space must be locked and interrupts must be disabled. 1258 1299 * 1259 * @param as Address space. 1260 * @param va Virtual address. 1261 * 1262 * @return Locked address space area containing va on success or 1263 * NULL on failure. 1300 * @param as Address space. 1301 * @param va Virtual address. 1302 * 1303 * @return Locked address space area containing va on success or 1304 * NULL on failure. 1305 * 1264 1306 */ 1265 1307 as_area_t *find_area_and_lock(as_t *as, uintptr_t va) 1266 1308 { 1267 as_area_t *a; 1268 btree_node_t *leaf, *lnode; 1269 unsigned int i; 1270 1271 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 1272 if (a) { 1309 btree_node_t *leaf; 1310 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 1311 if (area) { 1273 1312 /* va is the base address of an address space area */ 1274 mutex_lock(&a ->lock);1275 return a ;1313 mutex_lock(&area->lock); 1314 return area; 1276 1315 } 1277 1316 … … 1280 1319 * to find out whether this is a miss or va belongs to an address 1281 1320 * space area found there. 1321 * 1282 1322 */ 1283 1323 1284 1324 /* First, search the leaf node itself. */ 1325 btree_key_t i; 1326 1285 1327 for (i = 0; i < leaf->keys; i++) { 1286 a = (as_area_t *) leaf->value[i]; 1287 mutex_lock(&a->lock); 1288 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) { 1289 return a; 1290 } 1291 mutex_unlock(&a->lock); 1292 } 1293 1328 area = (as_area_t *) leaf->value[i]; 1329 1330 mutex_lock(&area->lock); 1331 1332 if ((area->base <= va) && (va < area->base + area->pages * PAGE_SIZE)) 1333 return area; 1334 1335 mutex_unlock(&area->lock); 1336 } 1337 1294 1338 /* 1295 1339 * Second, locate the left neighbour and test its last record. 1296 1340 * Because of its position in the B+tree, it must have base < va. 1297 */ 1298 lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 1341 * 1342 */ 1343 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 1299 1344 if (lnode) { 1300 a = (as_area_t *) lnode->value[lnode->keys - 1]; 1301 mutex_lock(&a->lock); 1302 if (va < a->base + a->pages * PAGE_SIZE) { 1303 return a; 1304 } 1305 mutex_unlock(&a->lock); 1306 } 1307 1345 area = (as_area_t *) lnode->value[lnode->keys - 1]; 1346 1347 mutex_lock(&area->lock); 1348 1349 if (va < area->base + area->pages * PAGE_SIZE) 1350 return area; 1351 1352 mutex_unlock(&area->lock); 1353 } 1354 1308 1355 return NULL; 1309 1356 } … … 1313 1360 * The address space must be locked and interrupts must be disabled. 1314 1361 * 1315 * @param as Address space. 1316 * @param va Starting virtual address of the area being tested. 1317 * @param size Size of the area being tested. 1318 * @param avoid_area Do not touch this area. 1319 * 1320 * @return True if there is no conflict, false otherwise. 1321 */ 1322 bool 1323 check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area) 1324 { 1325 as_area_t *a; 1326 btree_node_t *leaf, *node; 1327 unsigned int i; 1328 1362 * @param as Address space. 1363 * @param va Starting virtual address of the area being tested. 1364 * @param size Size of the area being tested. 1365 * @param avoid_area Do not touch this area. 1366 * 1367 * @return True if there is no conflict, false otherwise. 1368 * 1369 */ 1370 bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, 1371 as_area_t *avoid_area) 1372 { 1329 1373 /* 1330 1374 * We don't want any area to have conflicts with NULL page. 1375 * 1331 1376 */ 1332 1377 if (overlaps(va, size, NULL, PAGE_SIZE)) … … 1339 1384 * record in the left neighbour, the leftmost record in the right 1340 1385 * neighbour and all records in the leaf node itself. 1341 */ 1342 1343 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) { 1344 if (a != avoid_area) 1386 * 1387 */ 1388 btree_node_t *leaf; 1389 as_area_t *area = 1390 (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 1391 if (area) { 1392 if (area != avoid_area) 1345 1393 return false; 1346 1394 } 1347 1395 1348 1396 /* First, check the two border cases. */ 1349 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { 1350 a = (as_area_t *) node->value[node->keys - 1]; 1351 mutex_lock(&a->lock); 1352 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1353 mutex_unlock(&a->lock); 1397 btree_node_t *node = 1398 btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 1399 if (node) { 1400 area = (as_area_t *) node->value[node->keys - 1]; 1401 1402 mutex_lock(&area->lock); 1403 1404 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 1405 mutex_unlock(&area->lock); 1354 1406 return false; 1355 1407 } 1356 mutex_unlock(&a->lock); 1357 } 1408 1409 mutex_unlock(&area->lock); 1410 } 1411 1358 1412 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); 1359 1413 if (node) { 1360 a = (as_area_t *) node->value[0]; 1361 mutex_lock(&a->lock); 1362 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1363 mutex_unlock(&a->lock); 1414 area = (as_area_t *) node->value[0]; 1415 1416 mutex_lock(&area->lock); 1417 1418 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 1419 mutex_unlock(&area->lock); 1364 1420 return false; 1365 1421 } 1366 mutex_unlock(&a->lock); 1422 1423 mutex_unlock(&area->lock); 1367 1424 } 1368 1425 1369 1426 /* Second, check the leaf node. */ 1427 btree_key_t i; 1370 1428 for (i = 0; i < leaf->keys; i++) { 1371 a = (as_area_t *) leaf->value[i];1372 1373 if (a == avoid_area)1429 area = (as_area_t *) leaf->value[i]; 1430 1431 if (area == avoid_area) 1374 1432 continue; 1375 1376 mutex_lock(&a->lock); 1377 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1378 mutex_unlock(&a->lock); 1433 1434 mutex_lock(&area->lock); 1435 1436 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 1437 mutex_unlock(&area->lock); 1379 1438 return false; 1380 1439 } 1381 mutex_unlock(&a->lock); 1382 } 1383 1440 1441 mutex_unlock(&area->lock); 1442 } 1443 1384 1444 /* 1385 1445 * So far, the area does not conflict with other areas. 1386 1446 * Check if it doesn't conflict with kernel address space. 1387 */ 1447 * 1448 */ 1388 1449 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 1389 return !overlaps(va, size, 1450 return !overlaps(va, size, 1390 1451 KERNEL_ADDRESS_SPACE_START, 1391 1452 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 1392 1453 } 1393 1454 1394 1455 return true; 1395 1456 } … … 1397 1458 /** Return size of the address space area with given base. 1398 1459 * 1399 * @param base Arbitrary address insede the address space area. 1400 * 1401 * @return Size of the address space area in bytes or zero if it 1402 * does not exist. 1460 * @param base Arbitrary address insede the address space area. 1461 * 1462 * @return Size of the address space area in bytes or zero if it 1463 * does not exist. 1464 * 1403 1465 */ 1404 1466 size_t as_area_get_size(uintptr_t base) 1405 1467 { 1406 ipl_t ipl;1407 as_area_t *src_area;1408 1468 size_t size; 1409 1410 ipl = interrupts_disable(); 1411 src_area = find_area_and_lock(AS, base); 1469 1470 ipl_t ipl = interrupts_disable(); 1471 as_area_t *src_area = find_area_and_lock(AS, base); 1472 1412 1473 if (src_area) { 1413 1474 size = src_area->pages * PAGE_SIZE; 1414 1475 mutex_unlock(&src_area->lock); 1415 } else {1476 } else 1416 1477 size = 0; 1417 }1478 1418 1479 interrupts_restore(ipl); 1419 1480 return size; … … 1424 1485 * The address space area must be already locked. 1425 1486 * 1426 * @param a Address space area. 1427 * @param page First page to be marked. 1428 * @param count Number of page to be marked. 1429 * 1430 * @return Zero on failure and non-zero on success. 1431 */ 1432 int used_space_insert(as_area_t *a, uintptr_t page, size_t count) 1433 { 1434 btree_node_t *leaf, *node; 1435 size_t pages; 1436 unsigned int i; 1437 1487 * @param area Address space area. 1488 * @param page First page to be marked. 1489 * @param count Number of page to be marked. 1490 * 1491 * @return Zero on failure and non-zero on success. 1492 * 1493 */ 1494 int used_space_insert(as_area_t *area, uintptr_t page, size_t count) 1495 { 1438 1496 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); 1439 1497 ASSERT(count); 1440 1441 pages = (size_t) btree_search(&a->used_space, page, &leaf); 1498 1499 btree_node_t *leaf; 1500 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1442 1501 if (pages) { 1443 1502 /* 1444 1503 * We hit the beginning of some used space. 1504 * 1445 1505 */ 1446 1506 return 0; 1447 1507 } 1448 1508 1449 1509 if (!leaf->keys) { 1450 btree_insert(&a ->used_space, page, (void *) count, leaf);1510 btree_insert(&area->used_space, page, (void *) count, leaf); 1451 1511 return 1; 1452 1512 } 1453 1454 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);1513 1514 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf); 1455 1515 if (node) { 1456 1516 uintptr_t left_pg = node->key[node->keys - 1]; … … 1463 1523 * somewhere between the rightmost interval of 1464 1524 * the left neigbour and the first interval of the leaf. 1465 */ 1466 1525 * 1526 */ 1527 1467 1528 if (page >= right_pg) { 1468 1529 /* Do nothing. */ … … 1474 1535 right_cnt * PAGE_SIZE)) { 1475 1536 /* The interval intersects with the right interval. */ 1476 return 0; 1537 return 0; 1477 1538 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1478 1539 (page + count * PAGE_SIZE == right_pg)) { … … 1480 1541 * The interval can be added by merging the two already 1481 1542 * present intervals. 1543 * 1482 1544 */ 1483 1545 node->value[node->keys - 1] += count + right_cnt; 1484 btree_remove(&a ->used_space, right_pg, leaf);1485 return 1; 1546 btree_remove(&area->used_space, right_pg, leaf); 1547 return 1; 1486 1548 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1487 /* 1549 /* 1488 1550 * The interval can be added by simply growing the left 1489 1551 * interval. 1552 * 1490 1553 */ 1491 1554 node->value[node->keys - 1] += count; … … 1496 1559 * the right interval down and increasing its size 1497 1560 * accordingly. 1561 * 1498 1562 */ 1499 1563 leaf->value[0] += count; … … 1504 1568 * The interval is between both neigbouring intervals, 1505 1569 * but cannot be merged with any of them. 1570 * 1506 1571 */ 1507 btree_insert(&a ->used_space, page, (void *) count,1572 btree_insert(&area->used_space, page, (void *) count, 1508 1573 leaf); 1509 1574 return 1; … … 1512 1577 uintptr_t right_pg = leaf->key[0]; 1513 1578 size_t right_cnt = (size_t) leaf->value[0]; 1514 1579 1515 1580 /* 1516 1581 * Investigate the border case in which the left neighbour does 1517 1582 * not exist but the interval fits from the left. 1518 */ 1519 1583 * 1584 */ 1585 1520 1586 if (overlaps(page, count * PAGE_SIZE, right_pg, 1521 1587 right_cnt * PAGE_SIZE)) { … … 1527 1593 * right interval down and increasing its size 1528 1594 * accordingly. 1595 * 1529 1596 */ 1530 1597 leaf->key[0] = page; … … 1535 1602 * The interval doesn't adjoin with the right interval. 1536 1603 * It must be added individually. 1604 * 1537 1605 */ 1538 btree_insert(&a ->used_space, page, (void *) count,1606 btree_insert(&area->used_space, page, (void *) count, 1539 1607 leaf); 1540 1608 return 1; 1541 1609 } 1542 1610 } 1543 1544 node = btree_leaf_node_right_neighbour(&a ->used_space, leaf);1611 1612 node = btree_leaf_node_right_neighbour(&area->used_space, leaf); 1545 1613 if (node) { 1546 1614 uintptr_t left_pg = leaf->key[leaf->keys - 1]; … … 1553 1621 * somewhere between the leftmost interval of 1554 1622 * the right neigbour and the last interval of the leaf. 1555 */ 1556 1623 * 1624 */ 1625 1557 1626 if (page < left_pg) { 1558 1627 /* Do nothing. */ … … 1564 1633 right_cnt * PAGE_SIZE)) { 1565 1634 /* The interval intersects with the right interval. */ 1566 return 0; 1635 return 0; 1567 1636 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1568 1637 (page + count * PAGE_SIZE == right_pg)) { … … 1570 1639 * The interval can be added by merging the two already 1571 1640 * present intervals. 1572 * */ 1641 * 1642 */ 1573 1643 leaf->value[leaf->keys - 1] += count + right_cnt; 1574 btree_remove(&a ->used_space, right_pg, node);1575 return 1; 1644 btree_remove(&area->used_space, right_pg, node); 1645 return 1; 1576 1646 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1577 1647 /* 1578 1648 * The interval can be added by simply growing the left 1579 1649 * interval. 1580 * */ 1650 * 1651 */ 1581 1652 leaf->value[leaf->keys - 1] += count; 1582 1653 return 1; … … 1586 1657 * the right interval down and increasing its size 1587 1658 * accordingly. 1659 * 1588 1660 */ 1589 1661 node->value[0] += count; … … 1594 1666 * The interval is between both neigbouring intervals, 1595 1667 * but cannot be merged with any of them. 1668 * 1596 1669 */ 1597 btree_insert(&a ->used_space, page, (void *) count,1670 btree_insert(&area->used_space, page, (void *) count, 1598 1671 leaf); 1599 1672 return 1; … … 1602 1675 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1603 1676 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1604 1677 1605 1678 /* 1606 1679 * Investigate the border case in which the right neighbour 1607 1680 * does not exist but the interval fits from the right. 1608 */ 1609 1681 * 1682 */ 1683 1610 1684 if (overlaps(page, count * PAGE_SIZE, left_pg, 1611 1685 left_cnt * PAGE_SIZE)) { … … 1616 1690 * The interval can be added by growing the left 1617 1691 * interval. 1692 * 1618 1693 */ 1619 1694 leaf->value[leaf->keys - 1] += count; … … 1623 1698 * The interval doesn't adjoin with the left interval. 1624 1699 * It must be added individually. 1700 * 1625 1701 */ 1626 btree_insert(&a ->used_space, page, (void *) count,1702 btree_insert(&area->used_space, page, (void *) count, 1627 1703 leaf); 1628 1704 return 1; … … 1634 1710 * only between two other intervals of the leaf. The two border cases 1635 1711 * were already resolved. 1636 */ 1712 * 1713 */ 1714 btree_key_t i; 1637 1715 for (i = 1; i < leaf->keys; i++) { 1638 1716 if (page < leaf->key[i]) { … … 1641 1719 size_t left_cnt = (size_t) leaf->value[i - 1]; 1642 1720 size_t right_cnt = (size_t) leaf->value[i]; 1643 1721 1644 1722 /* 1645 1723 * The interval fits between left_pg and right_pg. 1724 * 1646 1725 */ 1647 1726 1648 1727 if (overlaps(page, count * PAGE_SIZE, left_pg, 1649 1728 left_cnt * PAGE_SIZE)) { … … 1651 1730 * The interval intersects with the left 1652 1731 * interval. 1732 * 1653 1733 */ 1654 1734 return 0; … … 1658 1738 * The interval intersects with the right 1659 1739 * interval. 1740 * 1660 1741 */ 1661 return 0; 1742 return 0; 1662 1743 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1663 1744 (page + count * PAGE_SIZE == right_pg)) { … … 1665 1746 * The interval can be added by merging the two 1666 1747 * already present intervals. 1748 * 1667 1749 */ 1668 1750 leaf->value[i - 1] += count + right_cnt; 1669 btree_remove(&a ->used_space, right_pg, leaf);1670 return 1; 1751 btree_remove(&area->used_space, right_pg, leaf); 1752 return 1; 1671 1753 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1672 1754 /* 1673 1755 * The interval can be added by simply growing 1674 1756 * the left interval. 1757 * 1675 1758 */ 1676 1759 leaf->value[i - 1] += count; … … 1678 1761 } else if (page + count * PAGE_SIZE == right_pg) { 1679 1762 /* 1680 1763 * The interval can be addded by simply moving 1681 1764 * base of the right interval down and 1682 1765 * increasing its size accordingly. 1683 */ 1766 * 1767 */ 1684 1768 leaf->value[i] += count; 1685 1769 leaf->key[i] = page; … … 1690 1774 * intervals, but cannot be merged with any of 1691 1775 * them. 1776 * 1692 1777 */ 1693 btree_insert(&a ->used_space, page,1778 btree_insert(&area->used_space, page, 1694 1779 (void *) count, leaf); 1695 1780 return 1; … … 1697 1782 } 1698 1783 } 1699 1784 1700 1785 panic("Inconsistency detected while adding %" PRIs " pages of used " 1701 1786 "space at %p.", count, page); … … 1706 1791 * The address space area must be already locked. 1707 1792 * 1708 * @param a Address space area. 1709 * @param page First page to be marked. 1710 * @param count Number of page to be marked. 1711 * 1712 * @return Zero on failure and non-zero on success. 1713 */ 1714 int used_space_remove(as_area_t *a, uintptr_t page, size_t count) 1715 { 1716 btree_node_t *leaf, *node; 1717 size_t pages; 1718 unsigned int i; 1719 1793 * @param area Address space area. 1794 * @param page First page to be marked. 1795 * @param count Number of page to be marked. 1796 * 1797 * @return Zero on failure and non-zero on success. 1798 * 1799 */ 1800 int used_space_remove(as_area_t *area, uintptr_t page, size_t count) 1801 { 1720 1802 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); 1721 1803 ASSERT(count); 1722 1723 pages = (size_t) btree_search(&a->used_space, page, &leaf); 1804 1805 btree_node_t *leaf; 1806 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1724 1807 if (pages) { 1725 1808 /* 1726 1809 * We are lucky, page is the beginning of some interval. 1810 * 1727 1811 */ 1728 1812 if (count > pages) { 1729 1813 return 0; 1730 1814 } else if (count == pages) { 1731 btree_remove(&a ->used_space, page, leaf);1815 btree_remove(&area->used_space, page, leaf); 1732 1816 return 1; 1733 1817 } else { … … 1735 1819 * Find the respective interval. 1736 1820 * Decrease its size and relocate its start address. 1821 * 1737 1822 */ 1823 btree_key_t i; 1738 1824 for (i = 0; i < leaf->keys; i++) { 1739 1825 if (leaf->key[i] == page) { … … 1746 1832 } 1747 1833 } 1748 1749 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);1750 if ( node && page < leaf->key[0]) {1834 1835 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf); 1836 if ((node) && (page < leaf->key[0])) { 1751 1837 uintptr_t left_pg = node->key[node->keys - 1]; 1752 1838 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1753 1839 1754 1840 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1755 1841 count * PAGE_SIZE)) { … … 1761 1847 * removed by updating the size of the bigger 1762 1848 * interval. 1849 * 1763 1850 */ 1764 1851 node->value[node->keys - 1] -= count; … … 1766 1853 } else if (page + count * PAGE_SIZE < 1767 1854 left_pg + left_cnt*PAGE_SIZE) { 1768 size_t new_cnt;1769 1770 1855 /* 1771 1856 * The interval is contained in the rightmost … … 1774 1859 * the original interval and also inserting a 1775 1860 * new interval. 1861 * 1776 1862 */ 1777 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1863 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1778 1864 (page + count*PAGE_SIZE)) >> PAGE_WIDTH; 1779 1865 node->value[node->keys - 1] -= count + new_cnt; 1780 btree_insert(&a ->used_space, page +1866 btree_insert(&area->used_space, page + 1781 1867 count * PAGE_SIZE, (void *) new_cnt, leaf); 1782 1868 return 1; … … 1784 1870 } 1785 1871 return 0; 1786 } else if (page < leaf->key[0]) {1872 } else if (page < leaf->key[0]) 1787 1873 return 0; 1788 }1789 1874 1790 1875 if (page > leaf->key[leaf->keys - 1]) { 1791 1876 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1792 1877 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1793 1878 1794 1879 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1795 1880 count * PAGE_SIZE)) { 1796 if (page + count * PAGE_SIZE == 1881 if (page + count * PAGE_SIZE == 1797 1882 left_pg + left_cnt * PAGE_SIZE) { 1798 1883 /* … … 1800 1885 * interval of the leaf and can be removed by 1801 1886 * updating the size of the bigger interval. 1887 * 1802 1888 */ 1803 1889 leaf->value[leaf->keys - 1] -= count; … … 1805 1891 } else if (page + count * PAGE_SIZE < left_pg + 1806 1892 left_cnt * PAGE_SIZE) { 1807 size_t new_cnt;1808 1809 1893 /* 1810 1894 * The interval is contained in the rightmost … … 1813 1897 * original interval and also inserting a new 1814 1898 * interval. 1899 * 1815 1900 */ 1816 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1901 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1817 1902 (page + count * PAGE_SIZE)) >> PAGE_WIDTH; 1818 1903 leaf->value[leaf->keys - 1] -= count + new_cnt; 1819 btree_insert(&a ->used_space, page +1904 btree_insert(&area->used_space, page + 1820 1905 count * PAGE_SIZE, (void *) new_cnt, leaf); 1821 1906 return 1; … … 1823 1908 } 1824 1909 return 0; 1825 } 1910 } 1826 1911 1827 1912 /* … … 1829 1914 * Now the interval can be only between intervals of the leaf. 1830 1915 */ 1916 btree_key_t i; 1831 1917 for (i = 1; i < leaf->keys - 1; i++) { 1832 1918 if (page < leaf->key[i]) { 1833 1919 uintptr_t left_pg = leaf->key[i - 1]; 1834 1920 size_t left_cnt = (size_t) leaf->value[i - 1]; 1835 1921 1836 1922 /* 1837 1923 * Now the interval is between intervals corresponding … … 1847 1933 * be removed by updating the size of 1848 1934 * the bigger interval. 1935 * 1849 1936 */ 1850 1937 leaf->value[i - 1] -= count; … … 1852 1939 } else if (page + count * PAGE_SIZE < 1853 1940 left_pg + left_cnt * PAGE_SIZE) { 1854 size_t new_cnt;1855 1856 1941 /* 1857 1942 * The interval is contained in the … … 1861 1946 * also inserting a new interval. 1862 1947 */ 1863 new_cnt = ((left_pg +1948 size_t new_cnt = ((left_pg + 1864 1949 left_cnt * PAGE_SIZE) - 1865 1950 (page + count * PAGE_SIZE)) >> 1866 1951 PAGE_WIDTH; 1867 1952 leaf->value[i - 1] -= count + new_cnt; 1868 btree_insert(&a ->used_space, page +1953 btree_insert(&area->used_space, page + 1869 1954 count * PAGE_SIZE, (void *) new_cnt, 1870 1955 leaf); … … 1875 1960 } 1876 1961 } 1877 1962 1878 1963 error: 1879 1964 panic("Inconsistency detected while removing %" PRIs " pages of used " … … 1885 1970 * If the reference count drops to 0, the sh_info is deallocated. 1886 1971 * 1887 * @param sh_info Pointer to address space area share info. 1972 * @param sh_info Pointer to address space area share info. 1973 * 1888 1974 */ 1889 1975 void sh_info_remove_reference(share_info_t *sh_info) 1890 1976 { 1891 1977 bool dealloc = false; 1892 1978 1893 1979 mutex_lock(&sh_info->lock); 1894 1980 ASSERT(sh_info->refcount); 1981 1895 1982 if (--sh_info->refcount == 0) { 1896 1983 dealloc = true; … … 1903 1990 for (cur = sh_info->pagemap.leaf_head.next; 1904 1991 cur != &sh_info->pagemap.leaf_head; cur = cur->next) { 1905 btree_node_t *node; 1906 unsigned int i; 1992 btree_node_t *node 1993 = list_get_instance(cur, btree_node_t, leaf_link); 1994 btree_key_t i; 1907 1995 1908 node = list_get_instance(cur, btree_node_t, leaf_link); 1909 for (i = 0; i < node->keys; i++) 1996 for (i = 0; i < node->keys; i++) 1910 1997 frame_free((uintptr_t) node->value[i]); 1911 1998 } … … 1925 2012 1926 2013 /** Wrapper for as_area_create(). */ 1927 unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)2014 unative_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags) 1928 2015 { 1929 2016 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, … … 1935 2022 1936 2023 /** Wrapper for as_area_resize(). */ 1937 unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)2024 unative_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags) 1938 2025 { 1939 2026 return (unative_t) as_area_resize(AS, address, size, 0); … … 1941 2028 1942 2029 /** Wrapper for as_area_change_flags(). */ 1943 unative_t sys_as_area_change_flags(uintptr_t address, int flags)2030 unative_t sys_as_area_change_flags(uintptr_t address, unsigned int flags) 1944 2031 { 1945 2032 return (unative_t) as_area_change_flags(AS, flags, address); … … 1954 2041 /** Get list of adress space areas. 1955 2042 * 1956 * @param as Address space. 1957 * @param obuf Place to save pointer to returned buffer. 1958 * @param osize Place to save size of returned buffer. 2043 * @param as Address space. 2044 * @param obuf Place to save pointer to returned buffer. 2045 * @param osize Place to save size of returned buffer. 2046 * 1959 2047 */ 1960 2048 void as_get_area_info(as_t *as, as_area_info_t **obuf, size_t *osize) 1961 2049 { 1962 ipl_t ipl; 1963 size_t area_cnt, area_idx, i; 2050 ipl_t ipl = interrupts_disable(); 2051 mutex_lock(&as->lock); 2052 2053 /* First pass, count number of areas. */ 2054 2055 size_t area_cnt = 0; 1964 2056 link_t *cur; 1965 1966 as_area_info_t *info; 1967 size_t isize; 1968 1969 ipl = interrupts_disable(); 1970 mutex_lock(&as->lock); 1971 1972 /* First pass, count number of areas. */ 1973 1974 area_cnt = 0; 1975 2057 1976 2058 for (cur = as->as_area_btree.leaf_head.next; 1977 2059 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 1978 btree_node_t *node; 1979 1980 node = list_get_instance(cur, btree_node_t, leaf_link); 2060 btree_node_t *node = 2061 list_get_instance(cur, btree_node_t, leaf_link); 1981 2062 area_cnt += node->keys; 1982 2063 } 1983 1984 1985 info = malloc(isize, 0);1986 2064 2065 size_t isize = area_cnt * sizeof(as_area_info_t); 2066 as_area_info_t *info = malloc(isize, 0); 2067 1987 2068 /* Second pass, record data. */ 1988 1989 area_idx = 0;1990 2069 2070 size_t area_idx = 0; 2071 1991 2072 for (cur = as->as_area_btree.leaf_head.next; 1992 2073 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 1993 btree_node_t *node ;1994 1995 node = list_get_instance(cur, btree_node_t, leaf_link);1996 2074 btree_node_t *node = 2075 list_get_instance(cur, btree_node_t, leaf_link); 2076 btree_key_t i; 2077 1997 2078 for (i = 0; i < node->keys; i++) { 1998 2079 as_area_t *area = node->value[i]; 1999 2080 2000 2081 ASSERT(area_idx < area_cnt); 2001 2082 mutex_lock(&area->lock); 2002 2083 2003 2084 info[area_idx].start_addr = area->base; 2004 2085 info[area_idx].size = FRAMES2SIZE(area->pages); 2005 2086 info[area_idx].flags = area->flags; 2006 2087 ++area_idx; 2007 2088 2008 2089 mutex_unlock(&area->lock); 2009 2090 } 2010 2091 } 2011 2092 2012 2093 mutex_unlock(&as->lock); 2013 2094 interrupts_restore(ipl); 2014 2095 2015 2096 *obuf = info; 2016 2097 *osize = isize; 2017 2098 } 2018 2099 2019 2020 2100 /** Print out information about address space. 2021 2101 * 2022 * @param as Address space. 2102 * @param as Address space. 2103 * 2023 2104 */ 2024 2105 void as_print(as_t *as) 2025 2106 { 2026 ipl_t ipl; 2027 2028 ipl = interrupts_disable(); 2107 ipl_t ipl = interrupts_disable(); 2029 2108 mutex_lock(&as->lock); 2030 2109 … … 2033 2112 for (cur = as->as_area_btree.leaf_head.next; 2034 2113 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2035 btree_node_t *node; 2036 2037 node = list_get_instance(cur, btree_node_t, leaf_link); 2038 2039 unsigned int i; 2114 btree_node_t *node 2115 = list_get_instance(cur, btree_node_t, leaf_link); 2116 btree_key_t i; 2117 2040 2118 for (i = 0; i < node->keys; i++) { 2041 2119 as_area_t *area = node->value[i]; 2042 2120 2043 2121 mutex_lock(&area->lock); 2044 2122 printf("as_area: %p, base=%p, pages=%" PRIs -
kernel/generic/src/mm/frame.c
r666f492 rda1bafb 66 66 * available. 67 67 */ 68 mutex_t mem_avail_mtx;69 condvar_t mem_avail_cv;70 s ize_t mem_avail_req = 0; /**< Number of frames requested. */71 s ize_t mem_avail_gen = 0; /**< Generation counter. */68 static mutex_t mem_avail_mtx; 69 static condvar_t mem_avail_cv; 70 static size_t mem_avail_req = 0; /**< Number of frames requested. */ 71 static size_t mem_avail_gen = 0; /**< Generation counter. */ 72 72 73 73 /********************/ … … 171 171 return total; 172 172 } 173 #endif 173 #endif /* CONFIG_DEBUG */ 174 174 175 175 /** Find a zone with a given frames. … … 199 199 if (i >= zones.count) 200 200 i = 0; 201 201 202 } while (i != hint); 202 203 … … 242 243 if (i >= zones.count) 243 244 i = 0; 245 244 246 } while (i != hint); 245 247 … … 296 298 index = (frame_index(zone, frame)) + 297 299 (1 << frame->buddy_order); 298 } else { 300 } else { /* is_right */ 299 301 index = (frame_index(zone, frame)) - 300 302 (1 << frame->buddy_order); … … 673 675 bool zone_merge(size_t z1, size_t z2) 674 676 { 675 ipl_t ipl = interrupts_disable(); 676 spinlock_lock(&zones.lock); 677 irq_spinlock_lock(&zones.lock, true); 677 678 678 679 bool ret = true; … … 744 745 745 746 errout: 746 spinlock_unlock(&zones.lock); 747 interrupts_restore(ipl); 747 irq_spinlock_unlock(&zones.lock, true); 748 748 749 749 return ret; … … 777 777 * 778 778 */ 779 static void zone_construct(zone_t *zone, buddy_system_t *buddy, pfn_t start, size_t count, zone_flags_t flags) 779 static void zone_construct(zone_t *zone, buddy_system_t *buddy, pfn_t start, 780 size_t count, zone_flags_t flags) 780 781 { 781 782 zone->base = start; … … 841 842 * 842 843 */ 843 size_t zone_create(pfn_t start, size_t count, pfn_t confframe, zone_flags_t flags)844 { 845 ipl_t ipl = interrupts_disable(); 846 spinlock_lock(&zones.lock);844 size_t zone_create(pfn_t start, size_t count, pfn_t confframe, 845 zone_flags_t flags) 846 { 847 irq_spinlock_lock(&zones.lock, true); 847 848 848 849 if (zone_flags_available(flags)) { /* Create available zone */ … … 889 890 size_t znum = zones_insert_zone(start, count); 890 891 if (znum == (size_t) -1) { 891 spinlock_unlock(&zones.lock); 892 interrupts_restore(ipl); 892 irq_spinlock_unlock(&zones.lock, true); 893 893 return (size_t) -1; 894 894 } … … 905 905 } 906 906 907 spinlock_unlock(&zones.lock); 908 interrupts_restore(ipl); 907 irq_spinlock_unlock(&zones.lock, true); 909 908 910 909 return znum; … … 914 913 size_t znum = zones_insert_zone(start, count); 915 914 if (znum == (size_t) -1) { 916 spinlock_unlock(&zones.lock); 917 interrupts_restore(ipl); 915 irq_spinlock_unlock(&zones.lock, true); 918 916 return (size_t) -1; 919 917 } 920 918 zone_construct(&zones.info[znum], NULL, start, count, flags); 921 919 922 spinlock_unlock(&zones.lock); 923 interrupts_restore(ipl); 920 irq_spinlock_unlock(&zones.lock, true); 924 921 925 922 return znum; … … 933 930 void frame_set_parent(pfn_t pfn, void *data, size_t hint) 934 931 { 935 ipl_t ipl = interrupts_disable(); 936 spinlock_lock(&zones.lock); 932 irq_spinlock_lock(&zones.lock, true); 937 933 938 934 size_t znum = find_zone(pfn, 1, hint); … … 943 939 pfn - zones.info[znum].base)->parent = data; 944 940 945 spinlock_unlock(&zones.lock); 946 interrupts_restore(ipl); 941 irq_spinlock_unlock(&zones.lock, true); 947 942 } 948 943 949 944 void *frame_get_parent(pfn_t pfn, size_t hint) 950 945 { 951 ipl_t ipl = interrupts_disable(); 952 spinlock_lock(&zones.lock); 946 irq_spinlock_lock(&zones.lock, true); 953 947 954 948 size_t znum = find_zone(pfn, 1, hint); … … 959 953 pfn - zones.info[znum].base)->parent; 960 954 961 spinlock_unlock(&zones.lock); 962 interrupts_restore(ipl); 955 irq_spinlock_unlock(&zones.lock, true); 963 956 964 957 return res; … … 977 970 { 978 971 size_t size = ((size_t) 1) << order; 979 ipl_t ipl;980 972 size_t hint = pzone ? (*pzone) : 0; 981 973 982 974 loop: 983 ipl = interrupts_disable(); 984 spinlock_lock(&zones.lock); 975 irq_spinlock_lock(&zones.lock, true); 985 976 986 977 /* … … 993 984 if it does not help, reclaim all */ 994 985 if ((znum == (size_t) -1) && (!(flags & FRAME_NO_RECLAIM))) { 995 spinlock_unlock(&zones.lock); 996 interrupts_restore(ipl); 997 986 irq_spinlock_unlock(&zones.lock, true); 998 987 size_t freed = slab_reclaim(0); 999 1000 ipl = interrupts_disable(); 1001 spinlock_lock(&zones.lock); 988 irq_spinlock_lock(&zones.lock, true); 1002 989 1003 990 if (freed > 0) … … 1006 993 1007 994 if (znum == (size_t) -1) { 1008 spinlock_unlock(&zones.lock); 1009 interrupts_restore(ipl); 1010 995 irq_spinlock_unlock(&zones.lock, true); 1011 996 freed = slab_reclaim(SLAB_RECLAIM_ALL); 1012 1013 ipl = interrupts_disable(); 1014 spinlock_lock(&zones.lock); 997 irq_spinlock_lock(&zones.lock, true); 1015 998 1016 999 if (freed > 0) … … 1022 1005 if (znum == (size_t) -1) { 1023 1006 if (flags & FRAME_ATOMIC) { 1024 spinlock_unlock(&zones.lock); 1025 interrupts_restore(ipl); 1007 irq_spinlock_unlock(&zones.lock, true); 1026 1008 return NULL; 1027 1009 } … … 1031 1013 #endif 1032 1014 1033 spinlock_unlock(&zones.lock); 1034 interrupts_restore(ipl); 1035 1015 irq_spinlock_unlock(&zones.lock, true); 1016 1036 1017 if (!THREAD) 1037 1018 panic("Cannot wait for memory to become available."); … … 1069 1050 + zones.info[znum].base; 1070 1051 1071 spinlock_unlock(&zones.lock); 1072 interrupts_restore(ipl); 1052 irq_spinlock_unlock(&zones.lock, true); 1073 1053 1074 1054 if (pzone) … … 1092 1072 void frame_free(uintptr_t frame) 1093 1073 { 1094 ipl_t ipl = interrupts_disable(); 1095 spinlock_lock(&zones.lock); 1074 irq_spinlock_lock(&zones.lock, true); 1096 1075 1097 1076 /* … … 1105 1084 zone_frame_free(&zones.info[znum], pfn - zones.info[znum].base); 1106 1085 1107 spinlock_unlock(&zones.lock); 1108 interrupts_restore(ipl); 1086 irq_spinlock_unlock(&zones.lock, true); 1109 1087 1110 1088 /* … … 1132 1110 void frame_reference_add(pfn_t pfn) 1133 1111 { 1134 ipl_t ipl = interrupts_disable(); 1135 spinlock_lock(&zones.lock); 1112 irq_spinlock_lock(&zones.lock, true); 1136 1113 1137 1114 /* … … 1144 1121 zones.info[znum].frames[pfn - zones.info[znum].base].refcount++; 1145 1122 1146 spinlock_unlock(&zones.lock); 1147 interrupts_restore(ipl); 1148 } 1149 1150 /** Mark given range unavailable in frame zones. */ 1123 irq_spinlock_unlock(&zones.lock, true); 1124 } 1125 1126 /** Mark given range unavailable in frame zones. 1127 * 1128 */ 1151 1129 void frame_mark_unavailable(pfn_t start, size_t count) 1152 1130 { 1153 ipl_t ipl = interrupts_disable(); 1154 spinlock_lock(&zones.lock); 1131 irq_spinlock_lock(&zones.lock, true); 1155 1132 1156 1133 size_t i; … … 1164 1141 } 1165 1142 1166 spinlock_unlock(&zones.lock); 1167 interrupts_restore(ipl); 1168 } 1169 1170 /** Initialize physical memory management. */ 1143 irq_spinlock_unlock(&zones.lock, true); 1144 } 1145 1146 /** Initialize physical memory management. 1147 * 1148 */ 1171 1149 void frame_init(void) 1172 1150 { 1173 1151 if (config.cpu_active == 1) { 1174 1152 zones.count = 0; 1175 spinlock_initialize(&zones.lock, "zones.lock");1153 irq_spinlock_initialize(&zones.lock, "frame.zones.lock"); 1176 1154 mutex_initialize(&mem_avail_mtx, MUTEX_ACTIVE); 1177 1155 condvar_initialize(&mem_avail_cv); … … 1204 1182 } 1205 1183 1206 /** Return total size of all zones. */ 1184 /** Return total size of all zones. 1185 * 1186 */ 1207 1187 uint64_t zones_total_size(void) 1208 1188 { 1209 ipl_t ipl = interrupts_disable(); 1210 spinlock_lock(&zones.lock); 1189 irq_spinlock_lock(&zones.lock, true); 1211 1190 1212 1191 uint64_t total = 0; … … 1215 1194 total += (uint64_t) FRAMES2SIZE(zones.info[i].count); 1216 1195 1217 spinlock_unlock(&zones.lock); 1218 interrupts_restore(ipl); 1196 irq_spinlock_unlock(&zones.lock, true); 1219 1197 1220 1198 return total; … … 1229 1207 ASSERT(free != NULL); 1230 1208 1231 ipl_t ipl = interrupts_disable(); 1232 spinlock_lock(&zones.lock); 1209 irq_spinlock_lock(&zones.lock, true); 1233 1210 1234 1211 *total = 0; … … 1248 1225 } 1249 1226 1250 spinlock_unlock(&zones.lock); 1251 interrupts_restore(ipl); 1252 } 1253 1254 /** Prints list of zones. */ 1227 irq_spinlock_unlock(&zones.lock, true); 1228 } 1229 1230 /** Prints list of zones. 1231 * 1232 */ 1255 1233 void zones_print_list(void) 1256 1234 { … … 1278 1256 size_t i; 1279 1257 for (i = 0;; i++) { 1280 ipl_t ipl = interrupts_disable(); 1281 spinlock_lock(&zones.lock); 1258 irq_spinlock_lock(&zones.lock, true); 1282 1259 1283 1260 if (i >= zones.count) { 1284 spinlock_unlock(&zones.lock); 1285 interrupts_restore(ipl); 1261 irq_spinlock_unlock(&zones.lock, true); 1286 1262 break; 1287 1263 } … … 1293 1269 size_t busy_count = zones.info[i].busy_count; 1294 1270 1295 spinlock_unlock(&zones.lock); 1296 interrupts_restore(ipl); 1271 irq_spinlock_unlock(&zones.lock, true); 1297 1272 1298 1273 bool available = zone_flags_available(flags); … … 1328 1303 void zone_print_one(size_t num) 1329 1304 { 1330 ipl_t ipl = interrupts_disable(); 1331 spinlock_lock(&zones.lock); 1305 irq_spinlock_lock(&zones.lock, true); 1332 1306 size_t znum = (size_t) -1; 1333 1307 … … 1341 1315 1342 1316 if (znum == (size_t) -1) { 1343 spinlock_unlock(&zones.lock); 1344 interrupts_restore(ipl); 1317 irq_spinlock_unlock(&zones.lock, true); 1345 1318 printf("Zone not found.\n"); 1346 1319 return; … … 1353 1326 size_t busy_count = zones.info[i].busy_count; 1354 1327 1355 spinlock_unlock(&zones.lock); 1356 interrupts_restore(ipl); 1328 irq_spinlock_unlock(&zones.lock, true); 1357 1329 1358 1330 bool available = zone_flags_available(flags); -
kernel/generic/src/mm/page.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Virtual Address Translation subsystem. 36 36 * 37 37 * This file contains code for creating, destroying and searching … … 39 39 * Functions here are mere wrappers that call the real implementation. 40 40 * They however, define the single interface. 41 * 41 42 */ 42 43 … … 55 56 * will do an implicit serialization by virtue of running the TLB shootdown 56 57 * interrupt handler. 58 * 57 59 */ 58 60 … … 83 85 * of page boundaries. 84 86 * 85 * @param s Address of the structure. 86 * @param size Size of the structure. 87 * @param addr Address of the structure. 88 * @param size Size of the structure. 89 * 87 90 */ 88 void map_structure(uintptr_t s, size_t size)91 void map_structure(uintptr_t addr, size_t size) 89 92 { 90 int i, cnt, length; 91 92 length = size + (s - (s & ~(PAGE_SIZE - 1))); 93 cnt = length / PAGE_SIZE + (length % PAGE_SIZE > 0); 94 93 size_t length = size + (addr - (addr & ~(PAGE_SIZE - 1))); 94 size_t cnt = length / PAGE_SIZE + (length % PAGE_SIZE > 0); 95 96 size_t i; 95 97 for (i = 0; i < cnt; i++) 96 page_mapping_insert(AS_KERNEL, s+ i * PAGE_SIZE,97 s+ i * PAGE_SIZE, PAGE_NOT_CACHEABLE | PAGE_WRITE);98 98 page_mapping_insert(AS_KERNEL, addr + i * PAGE_SIZE, 99 addr + i * PAGE_SIZE, PAGE_NOT_CACHEABLE | PAGE_WRITE); 100 99 101 /* Repel prefetched accesses to the old mapping. */ 100 102 memory_barrier(); … … 108 110 * The page table must be locked and interrupts must be disabled. 109 111 * 110 * @param as Address space to wich page belongs. 111 * @param page Virtual address of the page to be mapped. 112 * @param frame Physical address of memory frame to which the mapping is 113 * done. 114 * @param flags Flags to be used for mapping. 112 * @param as Address space to wich page belongs. 113 * @param page Virtual address of the page to be mapped. 114 * @param frame Physical address of memory frame to which the mapping is 115 * done. 116 * @param flags Flags to be used for mapping. 117 * 115 118 */ 116 void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags) 119 void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, 120 unsigned int flags) 117 121 { 118 122 ASSERT(page_mapping_operations); … … 133 137 * The page table must be locked and interrupts must be disabled. 134 138 * 135 * @param as Address space to wich page belongs. 136 * @param page Virtual address of the page to be demapped. 139 * @param as Address space to wich page belongs. 140 * @param page Virtual address of the page to be demapped. 141 * 137 142 */ 138 143 void page_mapping_remove(as_t *as, uintptr_t page) … … 142 147 143 148 page_mapping_operations->mapping_remove(as, page); 144 149 145 150 /* Repel prefetched accesses to the old mapping. */ 146 151 memory_barrier(); … … 153 158 * The page table must be locked and interrupts must be disabled. 154 159 * 155 * @param as 156 * @param page 160 * @param as Address space to wich page belongs. 161 * @param page Virtual page. 157 162 * 158 * @return NULL if there is no such mapping; requested mapping 159 * otherwise. 163 * @return NULL if there is no such mapping; requested mapping 164 * otherwise. 165 * 160 166 */ 161 167 pte_t *page_mapping_find(as_t *as, uintptr_t page) … … 163 169 ASSERT(page_mapping_operations); 164 170 ASSERT(page_mapping_operations->mapping_find); 165 171 166 172 return page_mapping_operations->mapping_find(as, page); 167 173 } -
kernel/generic/src/mm/slab.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Slab allocator. 36 36 * 37 37 * The slab allocator is closely modelled after OpenSolaris slab allocator. … … 50 50 * 51 51 * The slab allocator supports per-CPU caches ('magazines') to facilitate 52 * good SMP scaling. 52 * good SMP scaling. 53 53 * 54 54 * When a new object is being allocated, it is first checked, if it is … … 65 65 * thrashing when somebody is allocating/deallocating 1 item at the magazine 66 66 * size boundary. LIFO order is enforced, which should avoid fragmentation 67 * as much as possible. 68 * 67 * as much as possible. 68 * 69 69 * Every cache contains list of full slabs and list of partially full slabs. 70 70 * Empty slabs are immediately freed (thrashing will be avoided because 71 * of magazines). 71 * of magazines). 72 72 * 73 73 * The slab information structure is kept inside the data area, if possible. … … 95 95 * 96 96 * @todo 97 * it might be good to add granularity of locks even to slab level,97 * It might be good to add granularity of locks even to slab level, 98 98 * we could then try_spinlock over all partial slabs and thus improve 99 * scalability even on slab level 99 * scalability even on slab level. 100 * 100 101 */ 101 102 … … 114 115 #include <macros.h> 115 116 116 SPINLOCK_INITIALIZE(slab_cache_lock);117 IRQ_SPINLOCK_STATIC_INITIALIZE(slab_cache_lock); 117 118 static LIST_INITIALIZE(slab_cache_list); 118 119 119 120 /** Magazine cache */ 120 121 static slab_cache_t mag_cache; 122 121 123 /** Cache for cache descriptors */ 122 124 static slab_cache_t slab_cache_cache; 125 123 126 /** Cache for external slab descriptors 124 127 * This time we want per-cpu cache, so do not make it static … … 128 131 */ 129 132 static slab_cache_t *slab_extern_cache; 133 130 134 /** Caches for malloc */ 131 135 static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1]; 136 132 137 static const char *malloc_names[] = { 133 138 "malloc-16", … … 154 159 /** Slab descriptor */ 155 160 typedef struct { 156 slab_cache_t *cache; 157 link_t link; 158 void *start; 159 size_t available; 160 size_t nextavail; 161 slab_cache_t *cache; /**< Pointer to parent cache. */ 162 link_t link; /**< List of full/partial slabs. */ 163 void *start; /**< Start address of first available item. */ 164 size_t available; /**< Count of available items in this slab. */ 165 size_t nextavail; /**< The index of next available item. */ 161 166 } slab_t; 162 167 163 168 #ifdef CONFIG_DEBUG 164 static int _slab_initialized = 0;169 static unsigned int _slab_initialized = 0; 165 170 #endif 166 171 167 172 /**************************************/ 168 173 /* Slab allocation functions */ 169 170 /** 171 * Allocate frames for slab space and initialize 172 * 173 */ 174 static slab_t *slab_space_alloc(slab_cache_t *cache, int flags) 175 { 176 void *data; 174 /**************************************/ 175 176 /** Allocate frames for slab space and initialize 177 * 178 */ 179 static slab_t *slab_space_alloc(slab_cache_t *cache, unsigned int flags) 180 { 181 182 183 size_t zone = 0; 184 185 void *data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); 186 if (!data) { 187 return NULL; 188 } 189 177 190 slab_t *slab; 178 191 size_t fsize; 179 unsigned int i; 180 size_t zone = 0; 181 182 data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); 183 if (!data) { 184 return NULL; 185 } 192 186 193 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) { 187 194 slab = slab_alloc(slab_extern_cache, flags); … … 196 203 197 204 /* Fill in slab structures */ 198 for (i = 0; i < ((unsigned int) 1 << cache->order); i++) 205 size_t i; 206 for (i = 0; i < ((size_t) 1 << cache->order); i++) 199 207 frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone); 200 208 201 209 slab->start = data; 202 210 slab->available = cache->objects; 203 211 slab->nextavail = 0; 204 212 slab->cache = cache; 205 213 206 214 for (i = 0; i < cache->objects; i++) 207 *(( int *) (slab->start + i*cache->size)) = i + 1;208 215 *((size_t *) (slab->start + i * cache->size)) = i + 1; 216 209 217 atomic_inc(&cache->allocated_slabs); 210 218 return slab; 211 219 } 212 220 213 /** 214 * Deallocate space associated with slab 221 /** Deallocate space associated with slab 215 222 * 216 223 * @return number of freed frames 224 * 217 225 */ 218 226 static size_t slab_space_free(slab_cache_t *cache, slab_t *slab) 219 227 { 220 228 frame_free(KA2PA(slab->start)); 221 if (! 229 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) 222 230 slab_free(slab_extern_cache, slab); 223 231 224 232 atomic_dec(&cache->allocated_slabs); 225 233 226 return 1 << cache->order;234 return (1 << cache->order); 227 235 } 228 236 229 237 /** Map object to slab structure */ 230 static slab_t * 238 static slab_t *obj2slab(void *obj) 231 239 { 232 240 return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); 233 241 } 234 242 235 /****************** ********************/243 /******************/ 236 244 /* Slab functions */ 237 238 239 /** 240 * Return object to slab and call a destructor 245 /******************/ 246 247 /** Return object to slab and call a destructor 241 248 * 242 249 * @param slab If the caller knows directly slab of the object, otherwise NULL 243 250 * 244 251 * @return Number of freed pages 252 * 245 253 */ 246 254 static size_t slab_obj_destroy(slab_cache_t *cache, void *obj, slab_t *slab) 247 255 { 248 int freed = 0;249 250 256 if (!slab) 251 257 slab = obj2slab(obj); 252 258 253 259 ASSERT(slab->cache == cache); 254 260 261 size_t freed = 0; 262 255 263 if (cache->destructor) 256 264 freed = cache->destructor(obj); … … 258 266 spinlock_lock(&cache->slablock); 259 267 ASSERT(slab->available < cache->objects); 260 261 *(( int *)obj) = slab->nextavail;268 269 *((size_t *) obj) = slab->nextavail; 262 270 slab->nextavail = (obj - slab->start) / cache->size; 263 271 slab->available++; 264 272 265 273 /* Move it to correct list */ 266 274 if (slab->available == cache->objects) { … … 268 276 list_remove(&slab->link); 269 277 spinlock_unlock(&cache->slablock); 270 278 271 279 return freed + slab_space_free(cache, slab); 272 273 280 } else if (slab->available == 1) { 274 281 /* It was in full, move to partial */ … … 276 283 list_prepend(&slab->link, &cache->partial_slabs); 277 284 } 285 278 286 spinlock_unlock(&cache->slablock); 279 287 return freed; 280 288 } 281 289 282 /** 283 * Take new object from slab or create new if needed 290 /** Take new object from slab or create new if needed 284 291 * 285 292 * @return Object address or null 293 * 286 294 */ 287 295 static void *slab_obj_create(slab_cache_t *cache, int flags) 288 296 { 297 spinlock_lock(&cache->slablock); 298 289 299 slab_t *slab; 290 void *obj; 291 292 spinlock_lock(&cache->slablock); 293 300 294 301 if (list_empty(&cache->partial_slabs)) { 295 /* Allow recursion and reclaiming 302 /* 303 * Allow recursion and reclaiming 296 304 * - this should work, as the slab control structures 297 305 * are small and do not need to allocate with anything 298 306 * other than frame_alloc when they are allocating, 299 307 * that's why we should get recursion at most 1-level deep 308 * 300 309 */ 301 310 spinlock_unlock(&cache->slablock); … … 303 312 if (!slab) 304 313 return NULL; 314 305 315 spinlock_lock(&cache->slablock); 306 316 } else { … … 309 319 list_remove(&slab->link); 310 320 } 311 obj = slab->start + slab->nextavail * cache->size; 312 slab->nextavail = *((int *)obj); 321 322 void *obj = slab->start + slab->nextavail * cache->size; 323 slab->nextavail = *((size_t *) obj); 313 324 slab->available--; 314 325 315 326 if (!slab->available) 316 327 list_prepend(&slab->link, &cache->full_slabs); 317 328 else 318 329 list_prepend(&slab->link, &cache->partial_slabs); 319 330 320 331 spinlock_unlock(&cache->slablock); 321 322 if ( cache->constructor && cache->constructor(obj, flags)) {332 333 if ((cache->constructor) && (cache->constructor(obj, flags))) { 323 334 /* Bad, bad, construction failed */ 324 335 slab_obj_destroy(cache, obj, slab); 325 336 return NULL; 326 337 } 338 327 339 return obj; 328 340 } 329 341 330 /**************************** **********/342 /****************************/ 331 343 /* CPU-Cache slab functions */ 332 333 /** 334 * Finds a full magazine in cache, takes it from list335 * and returns it336 * 337 * @param first If true, return first, else last mag338 */ 339 static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, intfirst)344 /****************************/ 345 346 /** Find a full magazine in cache, take it from list and return it 347 * 348 * @param first If true, return first, else last mag. 349 * 350 */ 351 static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, bool first) 340 352 { 341 353 slab_magazine_t *mag = NULL; 342 354 link_t *cur; 343 355 344 356 spinlock_lock(&cache->maglock); 345 357 if (!list_empty(&cache->magazines)) { … … 348 360 else 349 361 cur = cache->magazines.prev; 362 350 363 mag = list_get_instance(cur, slab_magazine_t, link); 351 364 list_remove(&mag->link); 352 365 atomic_dec(&cache->magazine_counter); 353 366 } 367 354 368 spinlock_unlock(&cache->maglock); 355 369 return mag; 356 370 } 357 371 358 /** Prepend magazine to magazine list in cache */ 372 /** Prepend magazine to magazine list in cache 373 * 374 */ 359 375 static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) 360 376 { 361 377 spinlock_lock(&cache->maglock); 362 378 363 379 list_prepend(&mag->link, &cache->magazines); 364 380 atomic_inc(&cache->magazine_counter); … … 367 383 } 368 384 369 /** 370 * Free all objects in magazine and free memory associated with magazine 385 /** Free all objects in magazine and free memory associated with magazine 371 386 * 372 387 * @return Number of freed pages 388 * 373 389 */ 374 390 static size_t magazine_destroy(slab_cache_t *cache, slab_magazine_t *mag) 375 391 { 376 unsigned int i;392 size_t i; 377 393 size_t frames = 0; 378 394 379 395 for (i = 0; i < mag->busy; i++) { 380 396 frames += slab_obj_destroy(cache, mag->objs[i], NULL); … … 383 399 384 400 slab_free(&mag_cache, mag); 385 401 386 402 return frames; 387 403 } 388 404 389 /** 390 * Find full magazine, set it as current and return it 405 /** Find full magazine, set it as current and return it 391 406 * 392 407 * Assume cpu_magazine lock is held 408 * 393 409 */ 394 410 static slab_magazine_t *get_full_current_mag(slab_cache_t *cache) 395 411 { 396 slab_magazine_t *cmag, *lastmag, *newmag; 397 398 cmag = cache->mag_cache[CPU->id].current; 399 lastmag = cache->mag_cache[CPU->id].last; 412 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current; 413 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 414 400 415 if (cmag) { /* First try local CPU magazines */ 401 416 if (cmag->busy) 402 417 return cmag; 403 404 if ( lastmag && lastmag->busy) {418 419 if ((lastmag) && (lastmag->busy)) { 405 420 cache->mag_cache[CPU->id].current = lastmag; 406 421 cache->mag_cache[CPU->id].last = cmag; … … 408 423 } 409 424 } 425 410 426 /* Local magazines are empty, import one from magazine list */ 411 newmag = get_mag_from_cache(cache, 1);427 slab_magazine_t *newmag = get_mag_from_cache(cache, 1); 412 428 if (!newmag) 413 429 return NULL; 414 430 415 431 if (lastmag) 416 432 magazine_destroy(cache, lastmag); 417 433 418 434 cache->mag_cache[CPU->id].last = cmag; 419 435 cache->mag_cache[CPU->id].current = newmag; 436 420 437 return newmag; 421 438 } 422 439 423 /** 424 * Try to find object in CPU-cache magazines 440 /** Try to find object in CPU-cache magazines 425 441 * 426 442 * @return Pointer to object or NULL if not available 443 * 427 444 */ 428 445 static void *magazine_obj_get(slab_cache_t *cache) 429 446 { 430 slab_magazine_t *mag;431 void *obj;432 433 447 if (!CPU) 434 448 return NULL; 435 449 436 450 spinlock_lock(&cache->mag_cache[CPU->id].lock); 437 438 mag = get_full_current_mag(cache);451 452 slab_magazine_t *mag = get_full_current_mag(cache); 439 453 if (!mag) { 440 454 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 441 455 return NULL; 442 456 } 443 obj = mag->objs[--mag->busy]; 457 458 void *obj = mag->objs[--mag->busy]; 444 459 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 460 445 461 atomic_dec(&cache->cached_objs); 446 462 … … 448 464 } 449 465 450 /** 451 * Assure that the current magazine is empty, return pointer to it, or NULL if 452 * no empty magazine is available and cannot be allocated 466 /** Assure that the current magazine is empty, return pointer to it, 467 * or NULL if no empty magazine is available and cannot be allocated 453 468 * 454 469 * Assume mag_cache[CPU->id].lock is held 455 470 * 456 * We have 2 magazines bound to processor. 457 * First try the current. 458 * If full, try the last. 459 * If full, put to magazines list. 460 * allocate new, exchange last & current 471 * We have 2 magazines bound to processor. 472 * First try the current. 473 * If full, try the last. 474 * If full, put to magazines list. 461 475 * 462 476 */ 463 477 static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache) 464 478 { 465 slab_magazine_t *cmag,*lastmag,*newmag; 466 467 cmag = cache->mag_cache[CPU->id].current; 468 lastmag = cache->mag_cache[CPU->id].last; 469 479 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current; 480 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 481 470 482 if (cmag) { 471 483 if (cmag->busy < cmag->size) 472 484 return cmag; 473 if (lastmag && lastmag->busy < lastmag->size) { 485 486 if ((lastmag) && (lastmag->busy < lastmag->size)) { 474 487 cache->mag_cache[CPU->id].last = cmag; 475 488 cache->mag_cache[CPU->id].current = lastmag; … … 477 490 } 478 491 } 492 479 493 /* current | last are full | nonexistent, allocate new */ 480 /* We do not want to sleep just because of caching */ 481 /* Especially we do not want reclaiming to start, as 482 * this would deadlock */ 483 newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); 494 495 /* 496 * We do not want to sleep just because of caching, 497 * especially we do not want reclaiming to start, as 498 * this would deadlock. 499 * 500 */ 501 slab_magazine_t *newmag = slab_alloc(&mag_cache, 502 FRAME_ATOMIC | FRAME_NO_RECLAIM); 484 503 if (!newmag) 485 504 return NULL; 505 486 506 newmag->size = SLAB_MAG_SIZE; 487 507 newmag->busy = 0; 488 508 489 509 /* Flush last to magazine list */ 490 510 if (lastmag) 491 511 put_mag_to_cache(cache, lastmag); 492 512 493 513 /* Move current as last, save new as current */ 494 cache->mag_cache[CPU->id].last = cmag; 495 cache->mag_cache[CPU->id].current = newmag; 496 514 cache->mag_cache[CPU->id].last = cmag; 515 cache->mag_cache[CPU->id].current = newmag; 516 497 517 return newmag; 498 518 } 499 519 500 /** 501 * Put object into CPU-cache magazine502 * 503 * @return 0 - success, -1 - could not get memory520 /** Put object into CPU-cache magazine 521 * 522 * @return 0 on success, -1 on no memory 523 * 504 524 */ 505 525 static int magazine_obj_put(slab_cache_t *cache, void *obj) 506 526 { 507 slab_magazine_t *mag;508 509 527 if (!CPU) 510 528 return -1; 511 529 512 530 spinlock_lock(&cache->mag_cache[CPU->id].lock); 513 514 mag = make_empty_current_mag(cache);531 532 slab_magazine_t *mag = make_empty_current_mag(cache); 515 533 if (!mag) { 516 534 spinlock_unlock(&cache->mag_cache[CPU->id].lock); … … 519 537 520 538 mag->objs[mag->busy++] = obj; 521 539 522 540 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 541 523 542 atomic_inc(&cache->cached_objs); 543 524 544 return 0; 525 545 } 526 546 527 528 /**************************************/ 547 /************************/ 529 548 /* Slab cache functions */ 530 531 /** Return number of objects that fit in certain cache size */ 532 static unsigned int comp_objects(slab_cache_t *cache) 549 /************************/ 550 551 /** Return number of objects that fit in certain cache size 552 * 553 */ 554 static size_t comp_objects(slab_cache_t *cache) 533 555 { 534 556 if (cache->flags & SLAB_CACHE_SLINSIDE) 535 return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) /536 cache->size;537 else 557 return ((PAGE_SIZE << cache->order) 558 - sizeof(slab_t)) / cache->size; 559 else 538 560 return (PAGE_SIZE << cache->order) / cache->size; 539 561 } 540 562 541 /** Return wasted space in slab */542 static unsigned int badness(slab_cache_t *cache) 543 { 544 unsigned int objects; 545 unsigned int ssize; 546 547 objects = comp_objects(cache);548 ssize = PAGE_SIZE << cache->order;563 /** Return wasted space in slab 564 * 565 */ 566 static size_t badness(slab_cache_t *cache) 567 { 568 size_t objects = comp_objects(cache); 569 size_t ssize = PAGE_SIZE << cache->order; 570 549 571 if (cache->flags & SLAB_CACHE_SLINSIDE) 550 572 ssize -= sizeof(slab_t); 573 551 574 return ssize - objects * cache->size; 552 575 } 553 576 554 /** 555 * Initialize mag_cache structure in slab cache577 /** Initialize mag_cache structure in slab cache 578 * 556 579 */ 557 580 static bool make_magcache(slab_cache_t *cache) 558 581 { 559 unsigned int i;560 561 582 ASSERT(_slab_initialized >= 2); 562 583 563 584 cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count, 564 585 FRAME_ATOMIC); 565 586 if (!cache->mag_cache) 566 587 return false; 567 588 589 size_t i; 568 590 for (i = 0; i < config.cpu_count; i++) { 569 591 memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0); 570 592 spinlock_initialize(&cache->mag_cache[i].lock, 571 "slab_maglock_cpu"); 572 } 593 "slab.cache.mag_cache[].lock"); 594 } 595 573 596 return true; 574 597 } 575 598 576 /** Initialize allocated memory as a slab cache */ 599 /** Initialize allocated memory as a slab cache 600 * 601 */ 577 602 static void _slab_cache_create(slab_cache_t *cache, const char *name, 578 size_t size, size_t align, int (*constructor)(void *obj, int kmflag), 579 int (*destructor)(void *obj), int flags) 580 { 581 int pages; 582 ipl_t ipl; 583 603 size_t size, size_t align, int (*constructor)(void *obj, 604 unsigned int kmflag), size_t (*destructor)(void *obj), unsigned int flags) 605 { 584 606 memsetb(cache, sizeof(*cache), 0); 585 607 cache->name = name; 586 608 587 609 if (align < sizeof(unative_t)) 588 610 align = sizeof(unative_t); 611 589 612 size = ALIGN_UP(size, align); 590 613 591 614 cache->size = size; 592 593 615 cache->constructor = constructor; 594 616 cache->destructor = destructor; 595 617 cache->flags = flags; 596 618 597 619 list_initialize(&cache->full_slabs); 598 620 list_initialize(&cache->partial_slabs); 599 621 list_initialize(&cache->magazines); 600 spinlock_initialize(&cache->slablock, "slab_lock"); 601 spinlock_initialize(&cache->maglock, "slab_maglock"); 622 623 spinlock_initialize(&cache->slablock, "slab.cache.slablock"); 624 spinlock_initialize(&cache->maglock, "slab.cache.maglock"); 625 602 626 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 603 627 (void) make_magcache(cache); 604 628 605 629 /* Compute slab sizes, object counts in slabs etc. */ 606 630 if (cache->size < SLAB_INSIDE_SIZE) 607 631 cache->flags |= SLAB_CACHE_SLINSIDE; 608 632 609 633 /* Minimum slab order */ 610 pages = SIZE2FRAMES(cache->size); 634 size_t pages = SIZE2FRAMES(cache->size); 635 611 636 /* We need the 2^order >= pages */ 612 637 if (pages == 1) … … 614 639 else 615 640 cache->order = fnzb(pages - 1) + 1; 616 617 while (badness(cache) > SLAB_MAX_BADNESS(cache)) {641 642 while (badness(cache) > SLAB_MAX_BADNESS(cache)) 618 643 cache->order += 1; 619 }644 620 645 cache->objects = comp_objects(cache); 646 621 647 /* If info fits in, put it inside */ 622 648 if (badness(cache) > sizeof(slab_t)) 623 649 cache->flags |= SLAB_CACHE_SLINSIDE; 624 650 625 651 /* Add cache to cache list */ 626 ipl = interrupts_disable(); 627 spinlock_lock(&slab_cache_lock); 628 652 irq_spinlock_lock(&slab_cache_lock, true); 629 653 list_append(&cache->link, &slab_cache_list); 630 631 spinlock_unlock(&slab_cache_lock); 632 interrupts_restore(ipl); 633 } 634 635 /** Create slab cache*/654 irq_spinlock_unlock(&slab_cache_lock, true); 655 } 656 657 /** Create slab cache 658 * 659 */ 636 660 slab_cache_t *slab_cache_create(const char *name, size_t size, size_t align, 637 int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj), 638 int flags) 639 { 640 slab_cache_t *cache; 641 642 cache = slab_alloc(&slab_cache_cache, 0); 661 int (*constructor)(void *obj, unsigned int kmflag), 662 size_t (*destructor)(void *obj), unsigned int flags) 663 { 664 slab_cache_t *cache = slab_alloc(&slab_cache_cache, 0); 643 665 _slab_cache_create(cache, name, size, align, constructor, destructor, 644 666 flags); 667 645 668 return cache; 646 669 } 647 670 648 /** 649 * Reclaim space occupied by objects that are already free 671 /** Reclaim space occupied by objects that are already free 650 672 * 651 673 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing 674 * 652 675 * @return Number of freed pages 653 */ 654 static size_t _slab_reclaim(slab_cache_t *cache, int flags) 655 { 656 unsigned int i; 676 * 677 */ 678 static size_t _slab_reclaim(slab_cache_t *cache, unsigned int flags) 679 { 680 if (cache->flags & SLAB_CACHE_NOMAGAZINE) 681 return 0; /* Nothing to do */ 682 683 /* 684 * We count up to original magazine count to avoid 685 * endless loop 686 */ 687 atomic_count_t magcount = atomic_get(&cache->magazine_counter); 688 657 689 slab_magazine_t *mag; 658 690 size_t frames = 0; 659 int magcount; 660 661 if (cache->flags & SLAB_CACHE_NOMAGAZINE) 662 return 0; /* Nothing to do */ 663 664 /* We count up to original magazine count to avoid 665 * endless loop 666 */ 667 magcount = atomic_get(&cache->magazine_counter); 668 while (magcount-- && (mag=get_mag_from_cache(cache, 0))) { 669 frames += magazine_destroy(cache,mag); 670 if (!(flags & SLAB_RECLAIM_ALL) && frames) 691 692 while ((magcount--) && (mag = get_mag_from_cache(cache, 0))) { 693 frames += magazine_destroy(cache, mag); 694 if ((!(flags & SLAB_RECLAIM_ALL)) && (frames)) 671 695 break; 672 696 } … … 675 699 /* Free cpu-bound magazines */ 676 700 /* Destroy CPU magazines */ 701 size_t i; 677 702 for (i = 0; i < config.cpu_count; i++) { 678 703 spinlock_lock(&cache->mag_cache[i].lock); 679 704 680 705 mag = cache->mag_cache[i].current; 681 706 if (mag) … … 687 712 frames += magazine_destroy(cache, mag); 688 713 cache->mag_cache[i].last = NULL; 689 714 690 715 spinlock_unlock(&cache->mag_cache[i].lock); 691 716 } 692 717 } 693 718 694 719 return frames; 695 720 } 696 721 697 /** Check that there are no slabs and remove cache from system */ 722 /** Check that there are no slabs and remove cache from system 723 * 724 */ 698 725 void slab_cache_destroy(slab_cache_t *cache) 699 726 { 700 ipl_t ipl; 701 702 /* First remove cache from link, so that we don't need 727 /* 728 * First remove cache from link, so that we don't need 703 729 * to disable interrupts later 730 * 704 731 */ 705 706 ipl = interrupts_disable(); 707 spinlock_lock(&slab_cache_lock); 708 732 irq_spinlock_lock(&slab_cache_lock, true); 709 733 list_remove(&cache->link); 710 711 spinlock_unlock(&slab_cache_lock); 712 interrupts_restore(ipl); 713 714 /* Do not lock anything, we assume the software is correct and 715 * does not touch the cache when it decides to destroy it */ 734 irq_spinlock_unlock(&slab_cache_lock, true); 735 736 /* 737 * Do not lock anything, we assume the software is correct and 738 * does not touch the cache when it decides to destroy it 739 * 740 */ 716 741 717 742 /* Destroy all magazines */ 718 743 _slab_reclaim(cache, SLAB_RECLAIM_ALL); 719 744 720 745 /* All slabs must be empty */ 721 if ( !list_empty(&cache->full_slabs) ||722 !list_empty(&cache->partial_slabs))746 if ((!list_empty(&cache->full_slabs)) || 747 (!list_empty(&cache->partial_slabs))) 723 748 panic("Destroying cache that is not empty."); 724 749 725 750 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 726 751 free(cache->mag_cache); 752 727 753 slab_free(&slab_cache_cache, cache); 728 754 } 729 755 730 /** Allocate new object from cache - if no flags given, always returns memory */ 731 void *slab_alloc(slab_cache_t *cache, int flags) 732 { 733 ipl_t ipl; 756 /** Allocate new object from cache - if no flags given, always returns memory 757 * 758 */ 759 void *slab_alloc(slab_cache_t *cache, unsigned int flags) 760 { 761 /* Disable interrupts to avoid deadlocks with interrupt handlers */ 762 ipl_t ipl = interrupts_disable(); 763 734 764 void *result = NULL; 735 765 736 /* Disable interrupts to avoid deadlocks with interrupt handlers */ 737 ipl = interrupts_disable(); 738 739 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { 766 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 740 767 result = magazine_obj_get(cache); 741 }768 742 769 if (!result) 743 770 result = slab_obj_create(cache, flags); 744 771 745 772 interrupts_restore(ipl); 746 773 747 774 if (result) 748 775 atomic_inc(&cache->allocated_objs); 749 776 750 777 return result; 751 778 } 752 779 753 /** Return object to cache, use slab if known */ 780 /** Return object to cache, use slab if known 781 * 782 */ 754 783 static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) 755 784 { 756 ipl_t ipl; 757 758 ipl = interrupts_disable(); 759 785 ipl_t ipl = interrupts_disable(); 786 760 787 if ((cache->flags & SLAB_CACHE_NOMAGAZINE) || 761 magazine_obj_put(cache, obj)) {788 (magazine_obj_put(cache, obj))) 762 789 slab_obj_destroy(cache, obj, slab); 763 764 } 790 765 791 interrupts_restore(ipl); 766 792 atomic_dec(&cache->allocated_objs); 767 793 } 768 794 769 /** Return slab object to cache */ 795 /** Return slab object to cache 796 * 797 */ 770 798 void slab_free(slab_cache_t *cache, void *obj) 771 799 { … … 773 801 } 774 802 775 /* Go through all caches and reclaim what is possible */ 776 size_t slab_reclaim(int flags) 777 { 778 slab_cache_t *cache; 803 /** Go through all caches and reclaim what is possible 804 * 805 * Interrupts must be disabled before calling this function, 806 * otherwise memory allocation from interrupts can deadlock. 807 * 808 */ 809 size_t slab_reclaim(unsigned int flags) 810 { 811 irq_spinlock_lock(&slab_cache_lock, false); 812 813 size_t frames = 0; 779 814 link_t *cur; 780 size_t frames = 0;781 782 spinlock_lock(&slab_cache_lock);783 784 /* TODO: Add assert, that interrupts are disabled, otherwise785 * memory allocation from interrupts can deadlock.786 */787 788 815 for (cur = slab_cache_list.next; cur != &slab_cache_list; 789 816 cur = cur->next) { 790 cache = list_get_instance(cur, slab_cache_t, link);817 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link); 791 818 frames += _slab_reclaim(cache, flags); 792 819 } 793 794 spinlock_unlock(&slab_cache_lock);795 820 821 irq_spinlock_unlock(&slab_cache_lock, false); 822 796 823 return frames; 797 824 } 798 825 799 800 /* Print list of slabs */ 826 /* Print list of slabs 827 * 828 */ 801 829 void slab_print_list(void) 802 830 { 803 int skip = 0; 804 805 printf("slab name size pages obj/pg slabs cached allocated" 831 printf("slab name size pages obj/pg slabs cached allocated" 806 832 " ctl\n"); 807 printf("---------------- -------- ------ ------ ------ ------ ---------"833 printf("---------------- -------- ------ -------- ------ ------ ---------" 808 834 " ---\n"); 809 835 836 size_t skip = 0; 810 837 while (true) { 811 slab_cache_t *cache;812 link_t *cur;813 ipl_t ipl;814 int i;815 816 838 /* 817 839 * We must not hold the slab_cache_lock spinlock when printing … … 836 858 * statistics. 837 859 */ 838 839 ipl = interrupts_disable(); 840 spinlock_lock(&slab_cache_lock); 841 860 861 irq_spinlock_lock(&slab_cache_lock, true); 862 863 link_t *cur; 864 size_t i; 842 865 for (i = 0, cur = slab_cache_list.next; 843 i < skip && cur != &slab_cache_list; 844 i++, cur = cur->next) 845 ; 846 866 (i < skip) && (cur != &slab_cache_list); 867 i++, cur = cur->next); 868 847 869 if (cur == &slab_cache_list) { 848 spinlock_unlock(&slab_cache_lock); 849 interrupts_restore(ipl); 870 irq_spinlock_unlock(&slab_cache_lock, true); 850 871 break; 851 872 } 852 873 853 874 skip++; 854 855 cache = list_get_instance(cur, slab_cache_t, link);856 875 876 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link); 877 857 878 const char *name = cache->name; 858 879 uint8_t order = cache->order; 859 880 size_t size = cache->size; 860 unsigned int objects = cache->objects;881 size_t objects = cache->objects; 861 882 long allocated_slabs = atomic_get(&cache->allocated_slabs); 862 883 long cached_objs = atomic_get(&cache->cached_objs); 863 884 long allocated_objs = atomic_get(&cache->allocated_objs); 864 int flags = cache->flags; 865 866 spinlock_unlock(&slab_cache_lock); 867 interrupts_restore(ipl); 868 869 printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n", 885 unsigned int flags = cache->flags; 886 887 irq_spinlock_unlock(&slab_cache_lock, true); 888 889 printf("%-16s %8" PRIs " %6u %8" PRIs " %6ld %6ld %9ld %-3s\n", 870 890 name, size, (1 << order), objects, allocated_slabs, 871 891 cached_objs, allocated_objs, … … 876 896 void slab_cache_init(void) 877 897 { 878 int i, size;879 880 898 /* Initialize magazine cache */ 881 899 _slab_cache_create(&mag_cache, "slab_magazine", … … 883 901 sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE | 884 902 SLAB_CACHE_SLINSIDE); 903 885 904 /* Initialize slab_cache cache */ 886 905 _slab_cache_create(&slab_cache_cache, "slab_cache", 887 906 sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL, 888 907 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); 908 889 909 /* Initialize external slab cache */ 890 910 slab_extern_cache = slab_cache_create("slab_extern", sizeof(slab_t), 0, 891 911 NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); 892 912 893 913 /* Initialize structures for malloc */ 914 size_t i; 915 size_t size; 916 894 917 for (i = 0, size = (1 << SLAB_MIN_MALLOC_W); 895 918 i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1); … … 898 921 NULL, NULL, SLAB_CACHE_MAGDEFERRED); 899 922 } 923 900 924 #ifdef CONFIG_DEBUG 901 925 _slab_initialized = 1; … … 906 930 * 907 931 * Kernel calls this function, when it knows the real number of 908 * processors. 909 * Allocate slab for cpucache and enable it on all existing910 * slabs that are SLAB_CACHE_MAGDEFERRED932 * processors. Allocate slab for cpucache and enable it on all 933 * existing slabs that are SLAB_CACHE_MAGDEFERRED 934 * 911 935 */ 912 936 void slab_enable_cpucache(void) 913 937 { 914 link_t *cur;915 slab_cache_t *s;916 917 938 #ifdef CONFIG_DEBUG 918 939 _slab_initialized = 2; 919 940 #endif 920 921 spinlock_lock(&slab_cache_lock); 922 941 942 irq_spinlock_lock(&slab_cache_lock, false); 943 944 link_t *cur; 923 945 for (cur = slab_cache_list.next; cur != &slab_cache_list; 924 cur = cur->next) {925 s = list_get_instance(cur, slab_cache_t, link);926 if ((s ->flags & SLAB_CACHE_MAGDEFERRED) !=946 cur = cur->next) { 947 slab_cache_t *slab = list_get_instance(cur, slab_cache_t, link); 948 if ((slab->flags & SLAB_CACHE_MAGDEFERRED) != 927 949 SLAB_CACHE_MAGDEFERRED) 928 950 continue; 929 (void) make_magcache(s); 930 s->flags &= ~SLAB_CACHE_MAGDEFERRED; 931 } 932 933 spinlock_unlock(&slab_cache_lock); 934 } 935 936 /**************************************/ 937 /* kalloc/kfree functions */ 938 void *malloc(unsigned int size, int flags) 951 952 (void) make_magcache(slab); 953 slab->flags &= ~SLAB_CACHE_MAGDEFERRED; 954 } 955 956 irq_spinlock_unlock(&slab_cache_lock, false); 957 } 958 959 void *malloc(size_t size, unsigned int flags) 939 960 { 940 961 ASSERT(_slab_initialized); … … 943 964 if (size < (1 << SLAB_MIN_MALLOC_W)) 944 965 size = (1 << SLAB_MIN_MALLOC_W); 945 946 int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;947 966 967 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; 968 948 969 return slab_alloc(malloc_caches[idx], flags); 949 970 } 950 971 951 void *realloc(void *ptr, unsigned int size,int flags)972 void *realloc(void *ptr, size_t size, unsigned int flags) 952 973 { 953 974 ASSERT(_slab_initialized); … … 959 980 if (size < (1 << SLAB_MIN_MALLOC_W)) 960 981 size = (1 << SLAB_MIN_MALLOC_W); 961 int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;982 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; 962 983 963 984 new_ptr = slab_alloc(malloc_caches[idx], flags); … … 980 1001 if (!ptr) 981 1002 return; 982 1003 983 1004 slab_t *slab = obj2slab(ptr); 984 1005 _slab_free(slab->cache, ptr, slab); -
kernel/generic/src/mm/tlb.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Generic TLB shootdown algorithm. 36 36 * 37 37 * The algorithm implemented here is based on the CMU TLB shootdown … … 53 53 #include <cpu.h> 54 54 55 /**56 * This lock is used for synchronisation between sender and57 * recipients of TLB shootdown message. It must be acquired58 * before CPU structure lock.59 */60 SPINLOCK_INITIALIZE(tlblock);61 62 55 void tlb_init(void) 63 56 { … … 66 59 67 60 #ifdef CONFIG_SMP 61 62 /** 63 * This lock is used for synchronisation between sender and 64 * recipients of TLB shootdown message. It must be acquired 65 * before CPU structure lock. 66 * 67 */ 68 IRQ_SPINLOCK_STATIC_INITIALIZE(tlblock); 68 69 69 70 /** Send TLB shootdown message. … … 78 79 * @param page Virtual page address, if required by type. 79 80 * @param count Number of pages, if required by type. 81 * 80 82 */ 81 83 void tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid, 82 84 uintptr_t page, size_t count) 83 85 { 84 unsigned int i; 85 86 CPU->tlb_active = 0; 87 spinlock_lock(&tlblock); 86 CPU->tlb_active = false; 87 irq_spinlock_lock(&tlblock, false); 88 88 89 size_t i; 89 90 for (i = 0; i < config.cpu_count; i++) { 90 91 cpu_t *cpu; … … 92 93 if (i == CPU->id) 93 94 continue; 94 95 95 96 cpu = &cpus[i]; 96 spinlock_lock(&cpu->lock);97 irq_spinlock_lock(&cpu->lock, false); 97 98 if (cpu->tlb_messages_count == TLB_MESSAGE_QUEUE_LEN) { 98 99 /* … … 115 116 cpu->tlb_messages[idx].count = count; 116 117 } 117 spinlock_unlock(&cpu->lock);118 irq_spinlock_unlock(&cpu->lock, false); 118 119 } 119 120 120 121 tlb_shootdown_ipi_send(); 121 122 busy_wait: 122 123 busy_wait: 123 124 for (i = 0; i < config.cpu_count; i++) 124 125 if (cpus[i].tlb_active) … … 126 127 } 127 128 128 /** Finish TLB shootdown sequence. */ 129 /** Finish TLB shootdown sequence. 130 * 131 */ 129 132 void tlb_shootdown_finalize(void) 130 133 { 131 spinlock_unlock(&tlblock);132 CPU->tlb_active = 1;134 irq_spinlock_unlock(&tlblock, false); 135 CPU->tlb_active = true; 133 136 } 134 137 … … 138 141 } 139 142 140 /** Receive TLB shootdown message. */ 143 /** Receive TLB shootdown message. 144 * 145 */ 141 146 void tlb_shootdown_ipi_recv(void) 142 147 { 143 tlb_invalidate_type_t type;144 asid_t asid;145 uintptr_t page;146 size_t count;147 unsigned int i;148 149 148 ASSERT(CPU); 150 149 151 CPU->tlb_active = 0;152 spinlock_lock(&tlblock);153 spinlock_unlock(&tlblock);150 CPU->tlb_active = false; 151 irq_spinlock_lock(&tlblock, false); 152 irq_spinlock_unlock(&tlblock, false); 154 153 155 spinlock_lock(&CPU->lock);154 irq_spinlock_lock(&CPU->lock, false); 156 155 ASSERT(CPU->tlb_messages_count <= TLB_MESSAGE_QUEUE_LEN); 157 156 157 size_t i; 158 158 for (i = 0; i < CPU->tlb_messages_count; CPU->tlb_messages_count--) { 159 t ype = CPU->tlb_messages[i].type;160 asid = CPU->tlb_messages[i].asid;161 page = CPU->tlb_messages[i].page;162 count = CPU->tlb_messages[i].count;163 159 tlb_invalidate_type_t type = CPU->tlb_messages[i].type; 160 asid_t asid = CPU->tlb_messages[i].asid; 161 uintptr_t page = CPU->tlb_messages[i].page; 162 size_t count = CPU->tlb_messages[i].count; 163 164 164 switch (type) { 165 165 case TLB_INVL_ALL: … … 170 170 break; 171 171 case TLB_INVL_PAGES: 172 172 ASSERT(count); 173 173 tlb_invalidate_pages(asid, page, count); 174 174 break; … … 177 177 break; 178 178 } 179 179 180 if (type == TLB_INVL_ALL) 180 181 break; 181 182 } 182 183 183 spinlock_unlock(&CPU->lock);184 CPU->tlb_active = 1;184 irq_spinlock_unlock(&CPU->lock, false); 185 CPU->tlb_active = true; 185 186 } 186 187 -
kernel/generic/src/printf/vprintf.c
r666f492 rda1bafb 42 42 #include <str.h> 43 43 44 SPINLOCK_STATIC_INITIALIZE_NAME(printf_lock, "*printf_lock");44 IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(printf_lock, "*printf_lock"); 45 45 46 46 static int vprintf_str_write(const char *str, size_t size, void *data) … … 93 93 }; 94 94 95 ipl_t ipl = interrupts_disable(); 96 spinlock_lock(&printf_lock); 97 95 irq_spinlock_lock(&printf_lock, true); 98 96 int ret = printf_core(fmt, &ps, ap); 99 100 spinlock_unlock(&printf_lock); 101 interrupts_restore(ipl); 97 irq_spinlock_unlock(&printf_lock, true); 102 98 103 99 return ret; -
kernel/generic/src/proc/scheduler.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Scheduler and load balancing. 36 36 * 37 37 * This file contains the scheduler and kcpulb kernel thread which … … 68 68 static void scheduler_separated_stack(void); 69 69 70 atomic_t nrdy; 70 atomic_t nrdy; /**< Number of ready threads in the system. */ 71 71 72 72 /** Carry out actions before new task runs. */ … … 89 89 before_thread_runs_arch(); 90 90 #ifdef CONFIG_FPU_LAZY 91 if(THREAD == CPU->fpu_owner) 91 if(THREAD == CPU->fpu_owner) 92 92 fpu_enable(); 93 93 else 94 fpu_disable(); 94 fpu_disable(); 95 95 #else 96 96 fpu_enable(); … … 123 123 restart: 124 124 fpu_enable(); 125 spinlock_lock(&CPU->lock);126 125 irq_spinlock_lock(&CPU->lock, false); 126 127 127 /* Save old context */ 128 if (CPU->fpu_owner != NULL) { 129 spinlock_lock(&CPU->fpu_owner->lock);128 if (CPU->fpu_owner != NULL) { 129 irq_spinlock_lock(&CPU->fpu_owner->lock, false); 130 130 fpu_context_save(CPU->fpu_owner->saved_fpu_context); 131 /* don't prevent migration */ 131 132 /* Don't prevent migration */ 132 133 CPU->fpu_owner->fpu_context_engaged = 0; 133 spinlock_unlock(&CPU->fpu_owner->lock);134 irq_spinlock_unlock(&CPU->fpu_owner->lock, false); 134 135 CPU->fpu_owner = NULL; 135 136 } 136 137 spinlock_lock(&THREAD->lock);137 138 irq_spinlock_lock(&THREAD->lock, false); 138 139 if (THREAD->fpu_context_exists) { 139 140 fpu_context_restore(THREAD->saved_fpu_context); … … 142 143 if (!THREAD->saved_fpu_context) { 143 144 /* Might sleep */ 144 spinlock_unlock(&THREAD->lock);145 spinlock_unlock(&CPU->lock);145 irq_spinlock_unlock(&THREAD->lock, false); 146 irq_spinlock_unlock(&CPU->lock, false); 146 147 THREAD->saved_fpu_context = 147 148 (fpu_context_t *) slab_alloc(fpu_context_slab, 0); 149 148 150 /* We may have switched CPUs during slab_alloc */ 149 goto restart; 151 goto restart; 150 152 } 151 153 fpu_init(); 152 154 THREAD->fpu_context_exists = 1; 153 155 } 156 154 157 CPU->fpu_owner = THREAD; 155 158 THREAD->fpu_context_engaged = 1; 156 spinlock_unlock(&THREAD->lock);157 158 spinlock_unlock(&CPU->lock);159 } 160 #endif 159 irq_spinlock_unlock(&THREAD->lock, false); 160 161 irq_spinlock_unlock(&CPU->lock, false); 162 } 163 #endif /* CONFIG_FPU_LAZY */ 161 164 162 165 /** Initialize scheduler … … 180 183 static thread_t *find_best_thread(void) 181 184 { 182 thread_t *t;183 runq_t *r;184 int i;185 186 185 ASSERT(CPU != NULL); 187 186 188 187 loop: 189 188 … … 194 193 * This improves energy saving and hyperthreading. 195 194 */ 196 195 197 196 /* Mark CPU as it was idle this clock tick */ 198 spinlock_lock(&CPU->lock);199 200 spinlock_unlock(&CPU->lock);201 202 203 197 irq_spinlock_lock(&CPU->lock, false); 198 CPU->idle = true; 199 irq_spinlock_unlock(&CPU->lock, false); 200 201 interrupts_enable(); 202 /* 204 203 * An interrupt might occur right now and wake up a thread. 205 204 * In such case, the CPU will continue to go to sleep 206 205 * even though there is a runnable thread. 207 206 */ 208 cpu_sleep(); 209 interrupts_disable(); 210 goto loop; 211 } 212 207 cpu_sleep(); 208 interrupts_disable(); 209 goto loop; 210 } 211 212 unsigned int i; 213 213 for (i = 0; i < RQ_COUNT; i++) { 214 r = &CPU->rq[i]; 215 spinlock_lock(&r->lock); 216 if (r->n == 0) { 214 irq_spinlock_lock(&(CPU->rq[i].lock), false); 215 if (CPU->rq[i].n == 0) { 217 216 /* 218 217 * If this queue is empty, try a lower-priority queue. 219 218 */ 220 spinlock_unlock(&r->lock);219 irq_spinlock_unlock(&(CPU->rq[i].lock), false); 221 220 continue; 222 221 } 223 222 224 223 atomic_dec(&CPU->nrdy); 225 224 atomic_dec(&nrdy); 226 r->n--;227 225 CPU->rq[i].n--; 226 228 227 /* 229 228 * Take the first thread from the queue. 230 229 */ 231 t = list_get_instance(r->rq_head.next, thread_t, rq_link); 232 list_remove(&t->rq_link); 233 234 spinlock_unlock(&r->lock); 235 236 spinlock_lock(&t->lock); 237 t->cpu = CPU; 238 239 t->ticks = us2ticks((i + 1) * 10000); 240 t->priority = i; /* correct rq index */ 241 230 thread_t *thread = 231 list_get_instance(CPU->rq[i].rq_head.next, thread_t, rq_link); 232 list_remove(&thread->rq_link); 233 234 irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock); 235 236 thread->cpu = CPU; 237 thread->ticks = us2ticks((i + 1) * 10000); 238 thread->priority = i; /* Correct rq index */ 239 242 240 /* 243 241 * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated 244 242 * when load balancing needs emerge. 245 243 */ 246 t->flags &= ~THREAD_FLAG_STOLEN; 247 spinlock_unlock(&t->lock); 248 249 return t; 250 } 244 thread->flags &= ~THREAD_FLAG_STOLEN; 245 irq_spinlock_unlock(&thread->lock, false); 246 247 return thread; 248 } 249 251 250 goto loop; 252 253 251 } 254 252 … … 267 265 { 268 266 link_t head; 269 runq_t *r; 270 int i, n; 271 267 272 268 list_initialize(&head); 273 spinlock_lock(&CPU->lock); 269 irq_spinlock_lock(&CPU->lock, false); 270 274 271 if (CPU->needs_relink > NEEDS_RELINK_MAX) { 272 int i; 275 273 for (i = start; i < RQ_COUNT - 1; i++) { 276 /* remember and empty rq[i + 1] */277 r = &CPU->rq[i + 1];278 spinlock_lock(&r->lock);279 list_concat(&head, & r->rq_head);280 n = r->n;281 r->n = 0;282 spinlock_unlock(&r->lock);283 284 /* append rq[i + 1] to rq[i] */285 r = &CPU->rq[i];286 spinlock_lock(&r->lock);287 list_concat(& r->rq_head, &head);288 r->n += n;289 spinlock_unlock(&r->lock);274 /* Remember and empty rq[i + 1] */ 275 276 irq_spinlock_lock(&CPU->rq[i + 1].lock, false); 277 list_concat(&head, &CPU->rq[i + 1].rq_head); 278 size_t n = CPU->rq[i + 1].n; 279 CPU->rq[i + 1].n = 0; 280 irq_spinlock_unlock(&CPU->rq[i + 1].lock, false); 281 282 /* Append rq[i + 1] to rq[i] */ 283 284 irq_spinlock_lock(&CPU->rq[i].lock, false); 285 list_concat(&CPU->rq[i].rq_head, &head); 286 CPU->rq[i].n += n; 287 irq_spinlock_unlock(&CPU->rq[i].lock, false); 290 288 } 289 291 290 CPU->needs_relink = 0; 292 291 } 293 spinlock_unlock(&CPU->lock);294 292 293 irq_spinlock_unlock(&CPU->lock, false); 295 294 } 296 295 … … 305 304 { 306 305 volatile ipl_t ipl; 307 306 308 307 ASSERT(CPU != NULL); 309 308 310 309 ipl = interrupts_disable(); 311 310 312 311 if (atomic_get(&haltstate)) 313 312 halt(); 314 313 315 314 if (THREAD) { 316 spinlock_lock(&THREAD->lock);315 irq_spinlock_lock(&THREAD->lock, false); 317 316 318 317 /* Update thread kernel accounting */ … … 330 329 THREAD->last_cycle = get_cycle(); 331 330 332 spinlock_unlock(&THREAD->lock);331 irq_spinlock_unlock(&THREAD->lock, false); 333 332 interrupts_restore(THREAD->saved_context.ipl); 334 333 335 334 return; 336 335 } 337 336 338 337 /* 339 338 * Interrupt priority level of preempted thread is recorded 340 339 * here to facilitate scheduler() invocations from 341 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 340 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 341 * 342 342 */ 343 343 THREAD->saved_context.ipl = ipl; 344 344 } 345 345 346 346 /* 347 347 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM 348 348 * and preemption counter. At this point THE could be coming either 349 349 * from THREAD's or CPU's stack. 350 * 350 351 */ 351 352 the_copy(THE, (the_t *) CPU->stack); 352 353 353 354 /* 354 355 * We may not keep the old stack. … … 362 363 * Therefore the scheduler() function continues in 363 364 * scheduler_separated_stack(). 365 * 364 366 */ 365 367 context_save(&CPU->saved_context); … … 367 369 (uintptr_t) CPU->stack, CPU_STACK_SIZE); 368 370 context_restore(&CPU->saved_context); 369 /* not reached */ 371 372 /* Not reached */ 370 373 } 371 374 … … 377 380 * 378 381 * Assume THREAD->lock is held. 382 * 379 383 */ 380 384 void scheduler_separated_stack(void) 381 385 { 382 int priority;383 386 DEADLOCK_PROBE_INIT(p_joinwq); 384 387 task_t *old_task = TASK; 385 388 as_t *old_as = AS; 386 389 387 390 ASSERT(CPU != NULL); 388 391 … … 391 394 * possible destruction should thread_destroy() be called on this or any 392 395 * other processor while the scheduler is still using them. 396 * 393 397 */ 394 398 if (old_task) 395 399 task_hold(old_task); 400 396 401 if (old_as) 397 402 as_hold(old_as); 398 403 399 404 if (THREAD) { 400 /* must be run after the switch to scheduler stack */405 /* Must be run after the switch to scheduler stack */ 401 406 after_thread_ran(); 402 407 403 408 switch (THREAD->state) { 404 409 case Running: 405 spinlock_unlock(&THREAD->lock);410 irq_spinlock_unlock(&THREAD->lock, false); 406 411 thread_ready(THREAD); 407 412 break; 408 413 409 414 case Exiting: 410 415 repeat: 411 416 if (THREAD->detached) { 412 thread_destroy(THREAD );417 thread_destroy(THREAD, false); 413 418 } else { 414 419 /* 415 420 * The thread structure is kept allocated until 416 421 * somebody calls thread_detach() on it. 422 * 417 423 */ 418 if (! spinlock_trylock(&THREAD->join_wq.lock)) {424 if (!irq_spinlock_trylock(&THREAD->join_wq.lock)) { 419 425 /* 420 426 * Avoid deadlock. 427 * 421 428 */ 422 spinlock_unlock(&THREAD->lock);429 irq_spinlock_unlock(&THREAD->lock, false); 423 430 delay(HZ); 424 spinlock_lock(&THREAD->lock);431 irq_spinlock_lock(&THREAD->lock, false); 425 432 DEADLOCK_PROBE(p_joinwq, 426 433 DEADLOCK_THRESHOLD); … … 429 436 _waitq_wakeup_unsafe(&THREAD->join_wq, 430 437 WAKEUP_FIRST); 431 spinlock_unlock(&THREAD->join_wq.lock);438 irq_spinlock_unlock(&THREAD->join_wq.lock, false); 432 439 433 440 THREAD->state = Lingering; 434 spinlock_unlock(&THREAD->lock);441 irq_spinlock_unlock(&THREAD->lock, false); 435 442 } 436 443 break; … … 439 446 /* 440 447 * Prefer the thread after it's woken up. 448 * 441 449 */ 442 450 THREAD->priority = -1; 443 451 444 452 /* 445 453 * We need to release wq->lock which we locked in 446 454 * waitq_sleep(). Address of wq->lock is kept in 447 455 * THREAD->sleep_queue. 456 * 448 457 */ 449 spinlock_unlock(&THREAD->sleep_queue->lock);450 458 irq_spinlock_unlock(&THREAD->sleep_queue->lock, false); 459 451 460 /* 452 461 * Check for possible requests for out-of-context 453 462 * invocation. 463 * 454 464 */ 455 465 if (THREAD->call_me) { … … 458 468 THREAD->call_me_with = NULL; 459 469 } 460 461 spinlock_unlock(&THREAD->lock);462 470 471 irq_spinlock_unlock(&THREAD->lock, false); 472 463 473 break; 464 474 465 475 default: 466 476 /* 467 477 * Entering state is unexpected. 478 * 468 479 */ 469 480 panic("tid%" PRIu64 ": unexpected state %s.", … … 471 482 break; 472 483 } 473 484 474 485 THREAD = NULL; 475 486 } 476 487 477 488 THREAD = find_best_thread(); 478 489 479 spinlock_lock(&THREAD->lock);480 priority = THREAD->priority;481 spinlock_unlock(&THREAD->lock);482 483 relink_rq(priority); 484 490 irq_spinlock_lock(&THREAD->lock, false); 491 int priority = THREAD->priority; 492 irq_spinlock_unlock(&THREAD->lock, false); 493 494 relink_rq(priority); 495 485 496 /* 486 497 * If both the old and the new task are the same, lots of work is 487 498 * avoided. 499 * 488 500 */ 489 501 if (TASK != THREAD->task) { … … 493 505 * Note that it is possible for two tasks to share one address 494 506 * space. 507 ( 495 508 */ 496 509 if (old_as != new_as) { … … 498 511 * Both tasks and address spaces are different. 499 512 * Replace the old one with the new one. 513 * 500 514 */ 501 515 as_switch(old_as, new_as); 502 516 } 503 517 504 518 TASK = THREAD->task; 505 519 before_task_runs(); 506 520 } 507 521 508 522 if (old_task) 509 523 task_release(old_task); 524 510 525 if (old_as) 511 526 as_release(old_as); 512 527 513 spinlock_lock(&THREAD->lock);528 irq_spinlock_lock(&THREAD->lock, false); 514 529 THREAD->state = Running; 515 530 516 531 #ifdef SCHEDULER_VERBOSE 517 532 printf("cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 518 533 ", nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, 519 534 THREAD->ticks, atomic_get(&CPU->nrdy)); 520 #endif 521 535 #endif 536 522 537 /* 523 538 * Some architectures provide late kernel PA2KA(identity) … … 527 542 * necessary, is to be mapped in before_thread_runs(). This 528 543 * function must be executed before the switch to the new stack. 544 * 529 545 */ 530 546 before_thread_runs(); 531 547 532 548 /* 533 549 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to 534 550 * thread's stack. 551 * 535 552 */ 536 553 the_copy(THE, (the_t *) THREAD->kstack); 537 554 538 555 context_restore(&THREAD->saved_context); 539 /* not reached */ 556 557 /* Not reached */ 540 558 } 541 559 … … 551 569 void kcpulb(void *arg) 552 570 { 553 thread_t *t;554 int count;555 571 atomic_count_t average; 556 unsigned int i; 557 int j; 558 int k = 0; 559 ipl_t ipl; 560 572 atomic_count_t rdy; 573 561 574 /* 562 575 * Detach kcpulb as nobody will call thread_join_timeout() on it. … … 569 582 */ 570 583 thread_sleep(1); 571 584 572 585 not_satisfied: 573 586 /* … … 575 588 * other CPU's. Note that situation can have changed between two 576 589 * passes. Each time get the most up to date counts. 590 * 577 591 */ 578 592 average = atomic_get(&nrdy) / config.cpu_active + 1; 579 count = average -atomic_get(&CPU->nrdy);580 581 if ( count <= 0)593 rdy = atomic_get(&CPU->nrdy); 594 595 if (average <= rdy) 582 596 goto satisfied; 583 597 598 atomic_count_t count = average - rdy; 599 584 600 /* 585 601 * Searching least priority queues on all CPU's first and most priority 586 602 * queues on all CPU's last. 587 */ 588 for (j = RQ_COUNT - 1; j >= 0; j--) { 589 for (i = 0; i < config.cpu_active; i++) { 590 link_t *l; 591 runq_t *r; 592 cpu_t *cpu; 593 594 cpu = &cpus[(i + k) % config.cpu_active]; 595 603 * 604 */ 605 size_t acpu; 606 size_t acpu_bias = 0; 607 int rq; 608 609 for (rq = RQ_COUNT - 1; rq >= 0; rq--) { 610 for (acpu = 0; acpu < config.cpu_active; acpu++) { 611 cpu_t *cpu = &cpus[(acpu + acpu_bias) % config.cpu_active]; 612 596 613 /* 597 614 * Not interested in ourselves. 598 615 * Doesn't require interrupt disabling for kcpulb has 599 616 * THREAD_FLAG_WIRED. 617 * 600 618 */ 601 619 if (CPU == cpu) 602 620 continue; 621 603 622 if (atomic_get(&cpu->nrdy) <= average) 604 623 continue; 605 606 ipl = interrupts_disable(); 607 r = &cpu->rq[j]; 608 spinlock_lock(&r->lock); 609 if (r->n == 0) { 610 spinlock_unlock(&r->lock); 611 interrupts_restore(ipl); 624 625 irq_spinlock_lock(&(cpu->rq[rq].lock), true); 626 if (cpu->rq[rq].n == 0) { 627 irq_spinlock_unlock(&(cpu->rq[rq].lock), true); 612 628 continue; 613 629 } 614 615 t = NULL; 616 l = r->rq_head.prev; /* search rq from the back */ 617 while (l != &r->rq_head) { 618 t = list_get_instance(l, thread_t, rq_link); 630 631 thread_t *thread = NULL; 632 633 /* Search rq from the back */ 634 link_t *link = cpu->rq[rq].rq_head.prev; 635 636 while (link != &(cpu->rq[rq].rq_head)) { 637 thread = (thread_t *) list_get_instance(link, thread_t, rq_link); 638 619 639 /* 620 640 * We don't want to steal CPU-wired threads … … 624 644 * steal threads whose FPU context is still in 625 645 * CPU. 646 * 626 647 */ 627 spinlock_lock(&t->lock);628 if ((!(t->flags & (THREAD_FLAG_WIRED |629 THREAD_FLAG_STOLEN))) &&630 (!(t->fpu_context_engaged))) {648 irq_spinlock_lock(&thread->lock, false); 649 650 if ((!(thread->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) 651 && (!(thread->fpu_context_engaged))) { 631 652 /* 632 * Remove t from r.653 * Remove thread from ready queue. 633 654 */ 634 spinlock_unlock(&t->lock);655 irq_spinlock_unlock(&thread->lock, false); 635 656 636 657 atomic_dec(&cpu->nrdy); 637 658 atomic_dec(&nrdy); 638 639 r->n--;640 list_remove(&t ->rq_link);641 659 660 cpu->rq[rq].n--; 661 list_remove(&thread->rq_link); 662 642 663 break; 643 664 } 644 spinlock_unlock(&t->lock); 645 l = l->prev; 646 t = NULL; 665 666 irq_spinlock_unlock(&thread->lock, false); 667 668 link = link->prev; 669 thread = NULL; 647 670 } 648 spinlock_unlock(&r->lock); 649 650 if (t) { 671 672 if (thread) { 651 673 /* 652 * Ready t on local CPU 674 * Ready thread on local CPU 675 * 653 676 */ 654 spinlock_lock(&t->lock); 677 678 irq_spinlock_pass(&(cpu->rq[rq].lock), &thread->lock); 679 655 680 #ifdef KCPULB_VERBOSE 656 681 printf("kcpulb%u: TID %" PRIu64 " -> cpu%u, " … … 659 684 atomic_get(&nrdy) / config.cpu_active); 660 685 #endif 661 t->flags |= THREAD_FLAG_STOLEN; 662 t->state = Entering; 663 spinlock_unlock(&t->lock); 664 665 thread_ready(t); 666 667 interrupts_restore(ipl); 668 686 687 thread->flags |= THREAD_FLAG_STOLEN; 688 thread->state = Entering; 689 690 irq_spinlock_unlock(&thread->lock, true); 691 thread_ready(thread); 692 669 693 if (--count == 0) 670 694 goto satisfied; 671 695 672 696 /* 673 697 * We are not satisfied yet, focus on another 674 698 * CPU next time. 699 * 675 700 */ 676 k++;701 acpu_bias++; 677 702 678 703 continue; 679 } 680 interrupts_restore(ipl); 704 } else 705 irq_spinlock_unlock(&(cpu->rq[rq].lock), true); 706 681 707 } 682 708 } 683 709 684 710 if (atomic_get(&CPU->nrdy)) { 685 711 /* 686 712 * Be a little bit light-weight and let migrated threads run. 713 * 687 714 */ 688 715 scheduler(); … … 691 718 * We failed to migrate a single thread. 692 719 * Give up this turn. 720 * 693 721 */ 694 722 goto loop; 695 723 } 696 724 697 725 goto not_satisfied; 698 726 699 727 satisfied: 700 728 goto loop; 701 729 } 702 703 730 #endif /* CONFIG_SMP */ 704 731 705 706 /** Print information about threads & scheduler queues */ 732 /** Print information about threads & scheduler queues 733 * 734 */ 707 735 void sched_print_list(void) 708 736 { 709 ipl_t ipl; 710 unsigned int cpu, i; 711 runq_t *r; 712 thread_t *t; 713 link_t *cur; 714 715 /* We are going to mess with scheduler structures, 716 * let's not be interrupted */ 717 ipl = interrupts_disable(); 737 size_t cpu; 718 738 for (cpu = 0; cpu < config.cpu_count; cpu++) { 719 720 739 if (!cpus[cpu].active) 721 740 continue; 722 723 spinlock_lock(&cpus[cpu].lock); 741 742 irq_spinlock_lock(&cpus[cpu].lock, true); 743 724 744 printf("cpu%u: address=%p, nrdy=%ld, needs_relink=%" PRIs "\n", 725 745 cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), 726 746 cpus[cpu].needs_relink); 727 747 748 unsigned int i; 728 749 for (i = 0; i < RQ_COUNT; i++) { 729 r = &cpus[cpu].rq[i]; 730 spinlock_lock(&r->lock); 731 if (!r->n) { 732 spinlock_unlock(&r->lock); 750 irq_spinlock_lock(&(cpus[cpu].rq[i].lock), false); 751 if (cpus[cpu].rq[i].n == 0) { 752 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false); 733 753 continue; 734 754 } 755 735 756 printf("\trq[%u]: ", i); 736 for (cur = r->rq_head.next; cur != &r->rq_head; 737 cur = cur->next) { 738 t = list_get_instance(cur, thread_t, rq_link); 739 printf("%" PRIu64 "(%s) ", t->tid, 740 thread_states[t->state]); 757 link_t *cur; 758 for (cur = cpus[cpu].rq[i].rq_head.next; 759 cur != &(cpus[cpu].rq[i].rq_head); 760 cur = cur->next) { 761 thread_t *thread = list_get_instance(cur, thread_t, rq_link); 762 printf("%" PRIu64 "(%s) ", thread->tid, 763 thread_states[thread->state]); 741 764 } 742 765 printf("\n"); 743 spinlock_unlock(&r->lock); 766 767 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false); 744 768 } 745 spinlock_unlock(&cpus[cpu].lock); 746 } 747 748 interrupts_restore(ipl); 769 770 irq_spinlock_unlock(&cpus[cpu].lock, true); 771 } 749 772 } 750 773 -
kernel/generic/src/proc/task.c
r666f492 rda1bafb 60 60 61 61 /** Spinlock protecting the tasks_tree AVL tree. */ 62 SPINLOCK_INITIALIZE(tasks_lock);62 IRQ_SPINLOCK_INITIALIZE(tasks_lock); 63 63 64 64 /** AVL tree of active tasks. … … 81 81 /* Forward declarations. */ 82 82 static void task_kill_internal(task_t *); 83 static int tsk_constructor(void *, int); 84 85 /** Initialize kernel tasks support. */ 83 static int tsk_constructor(void *, unsigned int); 84 85 /** Initialize kernel tasks support. 86 * 87 */ 86 88 void task_init(void) 87 89 { … … 92 94 } 93 95 94 /* 96 /** Task finish walker. 97 * 95 98 * The idea behind this walker is to kill and count all tasks different from 96 99 * TASK. 100 * 97 101 */ 98 102 static bool task_done_walker(avltree_node_t *node, void *arg) 99 103 { 100 task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);101 unsigned *cnt = (unsigned*) arg;102 103 if (t != TASK) {104 task_t *task = avltree_get_instance(node, task_t, tasks_tree_node); 105 size_t *cnt = (size_t *) arg; 106 107 if (task != TASK) { 104 108 (*cnt)++; 109 105 110 #ifdef CONFIG_DEBUG 106 printf("[%"PRIu64"] ", t->taskid); 107 #endif 108 task_kill_internal(t); 111 printf("[%"PRIu64"] ", task->taskid); 112 #endif 113 114 task_kill_internal(task); 109 115 } 110 116 … … 113 119 } 114 120 115 /** Kill all tasks except the current task. */ 121 /** Kill all tasks except the current task. 122 * 123 */ 116 124 void task_done(void) 117 125 { 118 unsignedtasks_left;119 120 do {/* Repeat until there are any tasks except TASK */121 /* Messing with task structures, avoid deadlock */126 size_t tasks_left; 127 128 /* Repeat until there are any tasks except TASK */ 129 do { 122 130 #ifdef CONFIG_DEBUG 123 131 printf("Killing tasks... "); 124 132 #endif 125 ipl_t ipl = interrupts_disable();126 spinlock_lock(&tasks_lock);133 134 irq_spinlock_lock(&tasks_lock, true); 127 135 tasks_left = 0; 128 136 avltree_walk(&tasks_tree, task_done_walker, &tasks_left); 129 spinlock_unlock(&tasks_lock);130 interrupts_restore(ipl);137 irq_spinlock_unlock(&tasks_lock, true); 138 131 139 thread_sleep(1); 140 132 141 #ifdef CONFIG_DEBUG 133 142 printf("\n"); 134 143 #endif 135 } while (tasks_left); 136 } 137 138 int tsk_constructor(void *obj, int kmflags) 139 { 140 task_t *ta = obj; 141 int i; 142 143 atomic_set(&ta->refcount, 0); 144 atomic_set(&ta->lifecount, 0); 145 atomic_set(&ta->active_calls, 0); 146 147 spinlock_initialize(&ta->lock, "task_ta_lock"); 148 mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE); 149 150 list_initialize(&ta->th_head); 151 list_initialize(&ta->sync_box_head); 152 153 ipc_answerbox_init(&ta->answerbox, ta); 144 } while (tasks_left > 0); 145 } 146 147 int tsk_constructor(void *obj, unsigned int kmflags) 148 { 149 task_t *task = (task_t *) obj; 150 151 atomic_set(&task->refcount, 0); 152 atomic_set(&task->lifecount, 0); 153 atomic_set(&task->active_calls, 0); 154 155 irq_spinlock_initialize(&task->lock, "task_t_lock"); 156 mutex_initialize(&task->futexes_lock, MUTEX_PASSIVE); 157 158 list_initialize(&task->th_head); 159 list_initialize(&task->sync_box_head); 160 161 ipc_answerbox_init(&task->answerbox, task); 162 163 size_t i; 154 164 for (i = 0; i < IPC_MAX_PHONES; i++) 155 ipc_phone_init(&ta ->phones[i]);165 ipc_phone_init(&task->phones[i]); 156 166 157 167 #ifdef CONFIG_UDEBUG 158 168 /* Init kbox stuff */ 159 ta ->kb.thread = NULL;160 ipc_answerbox_init(&ta ->kb.box, ta);161 mutex_initialize(&ta ->kb.cleanup_lock, MUTEX_PASSIVE);169 task->kb.thread = NULL; 170 ipc_answerbox_init(&task->kb.box, task); 171 mutex_initialize(&task->kb.cleanup_lock, MUTEX_PASSIVE); 162 172 #endif 163 173 … … 175 185 task_t *task_create(as_t *as, const char *name) 176 186 { 177 ipl_t ipl; 178 task_t *ta; 179 180 ta = (task_t *) slab_alloc(task_slab, 0); 181 task_create_arch(ta); 182 ta->as = as; 183 memcpy(ta->name, name, TASK_NAME_BUFLEN); 184 ta->name[TASK_NAME_BUFLEN - 1] = 0; 185 186 ta->context = CONTEXT; 187 ta->capabilities = 0; 188 ta->ucycles = 0; 189 ta->kcycles = 0; 190 191 ta->ipc_info.call_sent = 0; 192 ta->ipc_info.call_recieved = 0; 193 ta->ipc_info.answer_sent = 0; 194 ta->ipc_info.answer_recieved = 0; 195 ta->ipc_info.irq_notif_recieved = 0; 196 ta->ipc_info.forwarded = 0; 197 187 task_t *task = (task_t *) slab_alloc(task_slab, 0); 188 task_create_arch(task); 189 190 task->as = as; 191 str_cpy(task->name, TASK_NAME_BUFLEN, name); 192 193 task->context = CONTEXT; 194 task->capabilities = 0; 195 task->ucycles = 0; 196 task->kcycles = 0; 197 198 task->ipc_info.call_sent = 0; 199 task->ipc_info.call_recieved = 0; 200 task->ipc_info.answer_sent = 0; 201 task->ipc_info.answer_recieved = 0; 202 task->ipc_info.irq_notif_recieved = 0; 203 task->ipc_info.forwarded = 0; 204 198 205 #ifdef CONFIG_UDEBUG 199 206 /* Init debugging stuff */ 200 udebug_task_init(&ta ->udebug);207 udebug_task_init(&task->udebug); 201 208 202 209 /* Init kbox stuff */ 203 ta ->kb.finished = false;210 task->kb.finished = false; 204 211 #endif 205 212 206 213 if ((ipc_phone_0) && 207 (context_check(ipc_phone_0->task->context, ta ->context)))208 ipc_phone_connect(&ta ->phones[0], ipc_phone_0);209 210 btree_create(&ta ->futexes);214 (context_check(ipc_phone_0->task->context, task->context))) 215 ipc_phone_connect(&task->phones[0], ipc_phone_0); 216 217 btree_create(&task->futexes); 211 218 212 219 /* 213 220 * Get a reference to the address space. 214 221 */ 215 as_hold(ta ->as);216 217 i pl = interrupts_disable();218 spinlock_lock(&tasks_lock);219 ta ->taskid = ++task_counter;220 avltree_node_initialize(&ta ->tasks_tree_node);221 ta ->tasks_tree_node.key = ta->taskid;222 avltree_insert(&tasks_tree, &ta ->tasks_tree_node);223 spinlock_unlock(&tasks_lock);224 i nterrupts_restore(ipl);225 226 return ta ;222 as_hold(task->as); 223 224 irq_spinlock_lock(&tasks_lock, true); 225 226 task->taskid = ++task_counter; 227 avltree_node_initialize(&task->tasks_tree_node); 228 task->tasks_tree_node.key = task->taskid; 229 avltree_insert(&tasks_tree, &task->tasks_tree_node); 230 231 irq_spinlock_unlock(&tasks_lock, true); 232 233 return task; 227 234 } 228 235 229 236 /** Destroy task. 230 237 * 231 * @param t Task to be destroyed.232 * 233 */ 234 void task_destroy(task_t *t )238 * @param task Task to be destroyed. 239 * 240 */ 241 void task_destroy(task_t *task) 235 242 { 236 243 /* 237 244 * Remove the task from the task B+tree. 238 245 */ 239 spinlock_lock(&tasks_lock);240 avltree_delete(&tasks_tree, &t ->tasks_tree_node);241 spinlock_unlock(&tasks_lock);246 irq_spinlock_lock(&tasks_lock, true); 247 avltree_delete(&tasks_tree, &task->tasks_tree_node); 248 irq_spinlock_unlock(&tasks_lock, true); 242 249 243 250 /* 244 251 * Perform architecture specific task destruction. 245 252 */ 246 task_destroy_arch(t );253 task_destroy_arch(task); 247 254 248 255 /* 249 256 * Free up dynamically allocated state. 250 257 */ 251 btree_destroy(&t ->futexes);258 btree_destroy(&task->futexes); 252 259 253 260 /* 254 261 * Drop our reference to the address space. 255 262 */ 256 as_release(t ->as);257 258 slab_free(task_slab, t );263 as_release(task->as); 264 265 slab_free(task_slab, task); 259 266 } 260 267 … … 263 270 * Holding a reference to a task prevents destruction of that task. 264 271 * 265 * @param t Task to be held. 266 */ 267 void task_hold(task_t *t) 268 { 269 atomic_inc(&t->refcount); 272 * @param task Task to be held. 273 * 274 */ 275 void task_hold(task_t *task) 276 { 277 atomic_inc(&task->refcount); 270 278 } 271 279 … … 274 282 * The last one to release a reference to a task destroys the task. 275 283 * 276 * @param t Task to be released. 277 */ 278 void task_release(task_t *t) 279 { 280 if ((atomic_predec(&t->refcount)) == 0) 281 task_destroy(t); 284 * @param task Task to be released. 285 * 286 */ 287 void task_release(task_t *task) 288 { 289 if ((atomic_predec(&task->refcount)) == 0) 290 task_destroy(task); 282 291 } 283 292 … … 346 355 347 356 if (node) 348 return avltree_get_instance(node, task_t, tasks_tree_node); 357 return avltree_get_instance(node, task_t, tasks_tree_node); 349 358 350 359 return NULL; … … 356 365 * already disabled. 357 366 * 358 * @param t Pointer to thread.367 * @param task Pointer to the task. 359 368 * @param ucycles Out pointer to sum of all user cycles. 360 369 * @param kcycles Out pointer to sum of all kernel cycles. 361 370 * 362 371 */ 363 void task_get_accounting(task_t *t , uint64_t *ucycles, uint64_t *kcycles)372 void task_get_accounting(task_t *task, uint64_t *ucycles, uint64_t *kcycles) 364 373 { 365 374 /* Accumulated values of task */ 366 uint64_t uret = t ->ucycles;367 uint64_t kret = t ->kcycles;375 uint64_t uret = task->ucycles; 376 uint64_t kret = task->kcycles; 368 377 369 378 /* Current values of threads */ 370 379 link_t *cur; 371 for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) { 372 thread_t *thr = list_get_instance(cur, thread_t, th_link); 373 374 spinlock_lock(&thr->lock); 380 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 381 thread_t *thread = list_get_instance(cur, thread_t, th_link); 382 383 irq_spinlock_lock(&thread->lock, false); 384 375 385 /* Process only counted threads */ 376 if (!thr ->uncounted) {377 if (thr == THREAD) {386 if (!thread->uncounted) { 387 if (thread == THREAD) { 378 388 /* Update accounting of current thread */ 379 389 thread_update_accounting(false); 380 } 381 uret += thr->ucycles; 382 kret += thr->kcycles; 390 } 391 392 uret += thread->ucycles; 393 kret += thread->kcycles; 383 394 } 384 spinlock_unlock(&thr->lock); 395 396 irq_spinlock_unlock(&thread->lock, false); 385 397 } 386 398 … … 389 401 } 390 402 391 static void task_kill_internal(task_t *ta )403 static void task_kill_internal(task_t *task) 392 404 { 393 405 link_t *cur; … … 396 408 * Interrupt all threads. 397 409 */ 398 spinlock_lock(&ta->lock);399 for (cur = ta ->th_head.next; cur != &ta->th_head; cur = cur->next) {400 thread_t *thr ;410 irq_spinlock_lock(&task->lock, false); 411 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 412 thread_t *thread = list_get_instance(cur, thread_t, th_link); 401 413 bool sleeping = false; 402 414 403 thr = list_get_instance(cur, thread_t, th_link); 404 405 spinlock_lock(&thr->lock); 406 thr->interrupted = true; 407 if (thr->state == Sleeping) 415 irq_spinlock_lock(&thread->lock, false); 416 417 thread->interrupted = true; 418 if (thread->state == Sleeping) 408 419 sleeping = true; 409 spinlock_unlock(&thr->lock); 420 421 irq_spinlock_unlock(&thread->lock, false); 410 422 411 423 if (sleeping) 412 waitq_interrupt_sleep(thr );424 waitq_interrupt_sleep(thread); 413 425 } 414 spinlock_unlock(&ta->lock); 426 427 irq_spinlock_unlock(&task->lock, false); 415 428 } 416 429 … … 427 440 int task_kill(task_id_t id) 428 441 { 429 ipl_t ipl;430 task_t *ta;431 432 442 if (id == 1) 433 443 return EPERM; 434 444 435 i pl = interrupts_disable();436 spinlock_lock(&tasks_lock);437 if (!(ta = task_find_by_id(id))) {438 spinlock_unlock(&tasks_lock);439 i nterrupts_restore(ipl);445 irq_spinlock_lock(&tasks_lock, true); 446 447 task_t *task = task_find_by_id(id); 448 if (!task) { 449 irq_spinlock_unlock(&tasks_lock, true); 440 450 return ENOENT; 441 451 } 442 task_kill_internal(ta); 443 spinlock_unlock(&tasks_lock); 444 interrupts_restore(ipl); 445 return 0; 452 453 task_kill_internal(task); 454 irq_spinlock_unlock(&tasks_lock, true); 455 456 return EOK; 446 457 } 447 458 448 459 static bool task_print_walker(avltree_node_t *node, void *arg) 449 460 { 450 task_t *t = avltree_get_instance(node, task_t, tasks_tree_node); 451 int j; 452 453 spinlock_lock(&t->lock); 461 task_t *task = avltree_get_instance(node, task_t, tasks_tree_node); 462 irq_spinlock_lock(&task->lock, false); 454 463 455 464 uint64_t ucycles; 456 465 uint64_t kcycles; 457 466 char usuffix, ksuffix; 458 task_get_accounting(t , &ucycles, &kcycles);467 task_get_accounting(task, &ucycles, &kcycles); 459 468 order_suffix(ucycles, &ucycles, &usuffix); 460 469 order_suffix(kcycles, &kcycles, &ksuffix); 461 470 462 #ifdef __32_BITS__ 471 #ifdef __32_BITS__ 463 472 printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %10p %10p %9" PRIu64 "%c %9" 464 PRIu64 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as,465 ucycles, usuffix, kcycles, ksuffix, atomic_get(&t->refcount),466 atomic_get(&t->active_calls));473 PRIu64 "%c %7ld %6ld", task->taskid, task->name, task->context, 474 task, task->as, ucycles, usuffix, kcycles, ksuffix, 475 atomic_get(&task->refcount), atomic_get(&task->active_calls)); 467 476 #endif 468 477 469 478 #ifdef __64_BITS__ 470 479 printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %18p %18p %9" PRIu64 "%c %9" 471 PRIu64 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, 472 ucycles, usuffix, kcycles, ksuffix, atomic_get(&t->refcount), 473 atomic_get(&t->active_calls)); 474 #endif 475 476 for (j = 0; j < IPC_MAX_PHONES; j++) { 477 if (t->phones[j].callee) 478 printf(" %d:%p", j, t->phones[j].callee); 480 PRIu64 "%c %7ld %6ld", task->taskid, task->name, task->context, 481 task, task->as, ucycles, usuffix, kcycles, ksuffix, 482 atomic_get(&task->refcount), atomic_get(&task->active_calls)); 483 #endif 484 485 size_t i; 486 for (i = 0; i < IPC_MAX_PHONES; i++) { 487 if (task->phones[i].callee) 488 printf(" %" PRIs ":%p", i, task->phones[i].callee); 479 489 } 480 490 printf("\n"); 481 491 482 spinlock_unlock(&t->lock);492 irq_spinlock_unlock(&task->lock, false); 483 493 return true; 484 494 } … … 487 497 void task_print_list(void) 488 498 { 489 ipl_t ipl;490 491 499 /* Messing with task structures, avoid deadlock */ 492 ipl = interrupts_disable(); 493 spinlock_lock(&tasks_lock); 500 irq_spinlock_lock(&tasks_lock, true); 494 501 495 502 #ifdef __32_BITS__ … … 509 516 avltree_walk(&tasks_tree, task_print_walker, NULL); 510 517 511 spinlock_unlock(&tasks_lock); 512 interrupts_restore(ipl); 518 irq_spinlock_unlock(&tasks_lock, true); 513 519 } 514 520 -
kernel/generic/src/proc/thread.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Thread management functions. 36 36 */ 37 37 … … 94 94 * 95 95 * For locking rules, see declaration thereof. 96 */ 97 SPINLOCK_INITIALIZE(threads_lock); 96 * 97 */ 98 IRQ_SPINLOCK_INITIALIZE(threads_lock); 98 99 99 100 /** AVL tree of all threads. … … 101 102 * When a thread is found in the threads_tree AVL tree, it is guaranteed to 102 103 * exist as long as the threads_lock is held. 103 */ 104 avltree_t threads_tree; 105 106 SPINLOCK_INITIALIZE(tidlock); 107 thread_id_t last_tid = 0; 104 * 105 */ 106 avltree_t threads_tree; 107 108 IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock); 109 static thread_id_t last_tid = 0; 108 110 109 111 static slab_cache_t *thread_slab; 112 110 113 #ifdef CONFIG_FPU 111 114 slab_cache_t *fpu_context_slab; … … 125 128 void *arg = THREAD->thread_arg; 126 129 THREAD->last_cycle = get_cycle(); 127 130 128 131 /* This is where each thread wakes up after its creation */ 129 spinlock_unlock(&THREAD->lock);132 irq_spinlock_unlock(&THREAD->lock, false); 130 133 interrupts_enable(); 131 134 132 135 f(arg); 133 136 134 137 /* Accumulate accounting to the task */ 135 ipl_t ipl = interrupts_disable(); 136 137 spinlock_lock(&THREAD->lock); 138 irq_spinlock_lock(&THREAD->lock, true); 138 139 if (!THREAD->uncounted) { 139 140 thread_update_accounting(true); … … 142 143 uint64_t kcycles = THREAD->kcycles; 143 144 THREAD->kcycles = 0; 144 145 spinlock_unlock(&THREAD->lock);146 145 147 spinlock_lock(&TASK->lock);146 irq_spinlock_pass(&THREAD->lock, &TASK->lock); 148 147 TASK->ucycles += ucycles; 149 148 TASK->kcycles += kcycles; 150 spinlock_unlock(&TASK->lock);149 irq_spinlock_unlock(&TASK->lock, true); 151 150 } else 152 spinlock_unlock(&THREAD->lock); 153 154 interrupts_restore(ipl); 151 irq_spinlock_unlock(&THREAD->lock, true); 155 152 156 153 thread_exit(); 157 /* not reached */ 158 } 159 160 /** Initialization and allocation for thread_t structure */ 161 static int thr_constructor(void *obj, int kmflags) 162 { 163 thread_t *t = (thread_t *) obj; 164 165 spinlock_initialize(&t->lock, "thread_t_lock"); 166 link_initialize(&t->rq_link); 167 link_initialize(&t->wq_link); 168 link_initialize(&t->th_link); 169 154 155 /* Not reached */ 156 } 157 158 /** Initialization and allocation for thread_t structure 159 * 160 */ 161 static int thr_constructor(void *obj, unsigned int kmflags) 162 { 163 thread_t *thread = (thread_t *) obj; 164 165 irq_spinlock_initialize(&thread->lock, "thread_t_lock"); 166 link_initialize(&thread->rq_link); 167 link_initialize(&thread->wq_link); 168 link_initialize(&thread->th_link); 169 170 170 /* call the architecture-specific part of the constructor */ 171 thr_constructor_arch(t );171 thr_constructor_arch(thread); 172 172 173 173 #ifdef CONFIG_FPU 174 174 #ifdef CONFIG_FPU_LAZY 175 t ->saved_fpu_context = NULL;176 #else 177 t ->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);178 if (!t ->saved_fpu_context)175 thread->saved_fpu_context = NULL; 176 #else /* CONFIG_FPU_LAZY */ 177 thread->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags); 178 if (!thread->saved_fpu_context) 179 179 return -1; 180 #endif 181 #endif 182 183 t ->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);184 if (!t ->kstack) {180 #endif /* CONFIG_FPU_LAZY */ 181 #endif /* CONFIG_FPU */ 182 183 thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); 184 if (!thread->kstack) { 185 185 #ifdef CONFIG_FPU 186 if (t ->saved_fpu_context)187 slab_free(fpu_context_slab, t ->saved_fpu_context);186 if (thread->saved_fpu_context) 187 slab_free(fpu_context_slab, thread->saved_fpu_context); 188 188 #endif 189 189 return -1; 190 190 } 191 191 192 192 #ifdef CONFIG_UDEBUG 193 mutex_initialize(&t ->udebug.lock, MUTEX_PASSIVE);194 #endif 195 193 mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE); 194 #endif 195 196 196 return 0; 197 197 } 198 198 199 199 /** Destruction of thread_t object */ 200 static int thr_destructor(void *obj)201 { 202 thread_t *t = (thread_t *) obj;203 200 static size_t thr_destructor(void *obj) 201 { 202 thread_t *thread = (thread_t *) obj; 203 204 204 /* call the architecture-specific part of the destructor */ 205 thr_destructor_arch(t); 206 207 frame_free(KA2PA(t->kstack)); 205 thr_destructor_arch(thread); 206 207 frame_free(KA2PA(thread->kstack)); 208 208 209 #ifdef CONFIG_FPU 209 if (t->saved_fpu_context) 210 slab_free(fpu_context_slab, t->saved_fpu_context); 211 #endif 212 return 1; /* One page freed */ 210 if (thread->saved_fpu_context) 211 slab_free(fpu_context_slab, thread->saved_fpu_context); 212 #endif 213 214 return 1; /* One page freed */ 213 215 } 214 216 … … 221 223 { 222 224 THREAD = NULL; 225 223 226 atomic_set(&nrdy, 0); 224 227 thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0, 225 228 thr_constructor, thr_destructor, 0); 226 229 227 230 #ifdef CONFIG_FPU 228 231 fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t), 229 232 FPU_CONTEXT_ALIGN, NULL, NULL, 0); 230 233 #endif 231 234 232 235 avltree_create(&threads_tree); 233 236 } … … 235 238 /** Make thread ready 236 239 * 237 * Switch thread t to the ready state.240 * Switch thread to the ready state. 238 241 * 239 242 * @param t Thread to make ready. 240 243 * 241 244 */ 242 void thread_ready(thread_t *t) 243 { 244 cpu_t *cpu; 245 runq_t *r; 246 ipl_t ipl; 247 int i, avg; 248 249 ipl = interrupts_disable(); 250 251 spinlock_lock(&t->lock); 252 253 ASSERT(!(t->state == Ready)); 254 255 i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority; 256 257 cpu = CPU; 258 if (t->flags & THREAD_FLAG_WIRED) { 259 ASSERT(t->cpu != NULL); 260 cpu = t->cpu; 245 void thread_ready(thread_t *thread) 246 { 247 irq_spinlock_lock(&thread->lock, true); 248 249 ASSERT(!(thread->state == Ready)); 250 251 int i = (thread->priority < RQ_COUNT - 1) 252 ? ++thread->priority : thread->priority; 253 254 cpu_t *cpu = CPU; 255 if (thread->flags & THREAD_FLAG_WIRED) { 256 ASSERT(thread->cpu != NULL); 257 cpu = thread->cpu; 261 258 } 262 t->state = Ready; 263 spinlock_unlock(&t->lock); 259 thread->state = Ready; 260 261 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock)); 264 262 265 263 /* 266 * Append t to respective ready queue on respective processor. 264 * Append thread to respective ready queue 265 * on respective processor. 267 266 */ 268 r = &cpu->rq[i]; 269 spinlock_lock(&r->lock); 270 list_append(&t->rq_link, &r->rq_head); 271 r->n++; 272 spinlock_unlock(&r->lock); 273 267 268 list_append(&thread->rq_link, &cpu->rq[i].rq_head); 269 cpu->rq[i].n++; 270 irq_spinlock_unlock(&(cpu->rq[i].lock), true); 271 274 272 atomic_inc(&nrdy); 275 // FIXME: Why is the avg value n ever read?276 avg = atomic_get(&nrdy) / config.cpu_active;273 // FIXME: Why is the avg value not used 274 // avg = atomic_get(&nrdy) / config.cpu_active; 277 275 atomic_inc(&cpu->nrdy); 278 276 } 277 278 /** Create new thread 279 * 280 * Create a new thread. 281 * 282 * @param func Thread's implementing function. 283 * @param arg Thread's implementing function argument. 284 * @param task Task to which the thread belongs. The caller must 285 * guarantee that the task won't cease to exist during the 286 * call. The task's lock may not be held. 287 * @param flags Thread flags. 288 * @param name Symbolic name (a copy is made). 289 * @param uncounted Thread's accounting doesn't affect accumulated task 290 * accounting. 291 * 292 * @return New thread's structure on success, NULL on failure. 293 * 294 */ 295 thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, 296 unsigned int flags, const char *name, bool uncounted) 297 { 298 thread_t *thread = (thread_t *) slab_alloc(thread_slab, 0); 299 if (!thread) 300 return NULL; 301 302 /* Not needed, but good for debugging */ 303 memsetb(thread->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0); 304 305 irq_spinlock_lock(&tidlock, true); 306 thread->tid = ++last_tid; 307 irq_spinlock_unlock(&tidlock, true); 308 309 context_save(&thread->saved_context); 310 context_set(&thread->saved_context, FADDR(cushion), 311 (uintptr_t) thread->kstack, THREAD_STACK_SIZE); 312 313 the_initialize((the_t *) thread->kstack); 314 315 ipl_t ipl = interrupts_disable(); 316 thread->saved_context.ipl = interrupts_read(); 279 317 interrupts_restore(ipl); 280 } 281 282 /** Create new thread 283 * 284 * Create a new thread. 285 * 286 * @param func Thread's implementing function. 287 * @param arg Thread's implementing function argument. 288 * @param task Task to which the thread belongs. The caller must 289 * guarantee that the task won't cease to exist during the 290 * call. The task's lock may not be held. 291 * @param flags Thread flags. 292 * @param name Symbolic name (a copy is made). 293 * @param uncounted Thread's accounting doesn't affect accumulated task 294 * accounting. 295 * 296 * @return New thread's structure on success, NULL on failure. 297 * 298 */ 299 thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, 300 int flags, const char *name, bool uncounted) 301 { 302 thread_t *t; 303 ipl_t ipl; 304 305 t = (thread_t *) slab_alloc(thread_slab, 0); 306 if (!t) 307 return NULL; 308 309 /* Not needed, but good for debugging */ 310 memsetb(t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0); 311 312 ipl = interrupts_disable(); 313 spinlock_lock(&tidlock); 314 t->tid = ++last_tid; 315 spinlock_unlock(&tidlock); 316 interrupts_restore(ipl); 317 318 context_save(&t->saved_context); 319 context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack, 320 THREAD_STACK_SIZE); 321 322 the_initialize((the_t *) t->kstack); 323 324 ipl = interrupts_disable(); 325 t->saved_context.ipl = interrupts_read(); 326 interrupts_restore(ipl); 327 328 memcpy(t->name, name, THREAD_NAME_BUFLEN); 329 t->name[THREAD_NAME_BUFLEN - 1] = 0; 330 331 t->thread_code = func; 332 t->thread_arg = arg; 333 t->ticks = -1; 334 t->ucycles = 0; 335 t->kcycles = 0; 336 t->uncounted = uncounted; 337 t->priority = -1; /* start in rq[0] */ 338 t->cpu = NULL; 339 t->flags = flags; 340 t->state = Entering; 341 t->call_me = NULL; 342 t->call_me_with = NULL; 343 344 timeout_initialize(&t->sleep_timeout); 345 t->sleep_interruptible = false; 346 t->sleep_queue = NULL; 347 t->timeout_pending = 0; 348 349 t->in_copy_from_uspace = false; 350 t->in_copy_to_uspace = false; 351 352 t->interrupted = false; 353 t->detached = false; 354 waitq_initialize(&t->join_wq); 355 356 t->rwlock_holder_type = RWLOCK_NONE; 357 358 t->task = task; 359 360 t->fpu_context_exists = 0; 361 t->fpu_context_engaged = 0; 362 363 avltree_node_initialize(&t->threads_tree_node); 364 t->threads_tree_node.key = (uintptr_t) t; 365 318 319 str_cpy(thread->name, THREAD_NAME_BUFLEN, name); 320 321 thread->thread_code = func; 322 thread->thread_arg = arg; 323 thread->ticks = -1; 324 thread->ucycles = 0; 325 thread->kcycles = 0; 326 thread->uncounted = uncounted; 327 thread->priority = -1; /* Start in rq[0] */ 328 thread->cpu = NULL; 329 thread->flags = flags; 330 thread->state = Entering; 331 thread->call_me = NULL; 332 thread->call_me_with = NULL; 333 334 timeout_initialize(&thread->sleep_timeout); 335 thread->sleep_interruptible = false; 336 thread->sleep_queue = NULL; 337 thread->timeout_pending = false; 338 339 thread->in_copy_from_uspace = false; 340 thread->in_copy_to_uspace = false; 341 342 thread->interrupted = false; 343 thread->detached = false; 344 waitq_initialize(&thread->join_wq); 345 346 thread->rwlock_holder_type = RWLOCK_NONE; 347 348 thread->task = task; 349 350 thread->fpu_context_exists = 0; 351 thread->fpu_context_engaged = 0; 352 353 avltree_node_initialize(&thread->threads_tree_node); 354 thread->threads_tree_node.key = (uintptr_t) thread; 355 366 356 #ifdef CONFIG_UDEBUG 367 357 /* Init debugging stuff */ 368 udebug_thread_initialize(&t ->udebug);369 #endif 370 371 /* might depend on previous initialization */372 thread_create_arch(t );373 358 udebug_thread_initialize(&thread->udebug); 359 #endif 360 361 /* Might depend on previous initialization */ 362 thread_create_arch(thread); 363 374 364 if (!(flags & THREAD_FLAG_NOATTACH)) 375 thread_attach(t , task);376 377 return t ;365 thread_attach(thread, task); 366 367 return thread; 378 368 } 379 369 … … 381 371 * 382 372 * Detach thread from all queues, cpus etc. and destroy it. 383 * 384 * Assume thread->lock is held!! 385 */ 386 void thread_destroy(thread_t *t) 387 { 388 ASSERT(t->state == Exiting || t->state == Lingering); 389 ASSERT(t->task); 390 ASSERT(t->cpu); 391 392 spinlock_lock(&t->cpu->lock); 393 if (t->cpu->fpu_owner == t) 394 t->cpu->fpu_owner = NULL; 395 spinlock_unlock(&t->cpu->lock); 396 397 spinlock_unlock(&t->lock); 398 399 spinlock_lock(&threads_lock); 400 avltree_delete(&threads_tree, &t->threads_tree_node); 401 spinlock_unlock(&threads_lock); 402 373 * Assume thread->lock is held! 374 * 375 * @param thread Thread to be destroyed. 376 * @param irq_res Indicate whether it should unlock thread->lock 377 * in interrupts-restore mode. 378 * 379 */ 380 void thread_destroy(thread_t *thread, bool irq_res) 381 { 382 ASSERT((thread->state == Exiting) || (thread->state == Lingering)); 383 ASSERT(thread->task); 384 ASSERT(thread->cpu); 385 386 irq_spinlock_lock(&thread->cpu->lock, false); 387 if (thread->cpu->fpu_owner == thread) 388 thread->cpu->fpu_owner = NULL; 389 irq_spinlock_unlock(&thread->cpu->lock, false); 390 391 irq_spinlock_pass(&thread->lock, &threads_lock); 392 393 avltree_delete(&threads_tree, &thread->threads_tree_node); 394 395 irq_spinlock_pass(&threads_lock, &thread->task->lock); 396 403 397 /* 404 398 * Detach from the containing task. 405 399 */ 406 spinlock_lock(&t->task->lock); 407 list_remove(&t->th_link); 408 spinlock_unlock(&t->task->lock); 409 400 list_remove(&thread->th_link); 401 irq_spinlock_unlock(&thread->task->lock, irq_res); 402 410 403 /* 411 404 * Drop the reference to the containing task. 412 405 */ 413 task_release(t->task); 414 415 slab_free(thread_slab, t); 406 task_release(thread->task); 407 slab_free(thread_slab, thread); 416 408 } 417 409 … … 421 413 * threads_tree. 422 414 * 423 * @param t Thread to be attached to the task. 424 * @param task Task to which the thread is to be attached. 425 */ 426 void thread_attach(thread_t *t, task_t *task) 427 { 428 ipl_t ipl; 429 415 * @param t Thread to be attached to the task. 416 * @param task Task to which the thread is to be attached. 417 * 418 */ 419 void thread_attach(thread_t *thread, task_t *task) 420 { 430 421 /* 431 422 * Attach to the specified task. 432 423 */ 433 ipl = interrupts_disable(); 434 spinlock_lock(&task->lock); 435 424 irq_spinlock_lock(&task->lock, true); 425 436 426 /* Hold a reference to the task. */ 437 427 task_hold(task); 438 428 439 429 /* Must not count kbox thread into lifecount */ 440 if (t ->flags & THREAD_FLAG_USPACE)430 if (thread->flags & THREAD_FLAG_USPACE) 441 431 atomic_inc(&task->lifecount); 442 443 list_append(&t->th_link, &task->th_head); 444 spinlock_unlock(&task->lock); 445 432 433 list_append(&thread->th_link, &task->th_head); 434 435 irq_spinlock_pass(&task->lock, &threads_lock); 436 446 437 /* 447 438 * Register this thread in the system-wide list. 448 439 */ 449 spinlock_lock(&threads_lock); 450 avltree_insert(&threads_tree, &t->threads_tree_node); 451 spinlock_unlock(&threads_lock); 452 453 interrupts_restore(ipl); 440 avltree_insert(&threads_tree, &thread->threads_tree_node); 441 irq_spinlock_unlock(&threads_lock, true); 454 442 } 455 443 456 444 /** Terminate thread. 457 445 * 458 * End current thread execution and switch it to the exiting state. All pending 459 * timeouts are executed. 446 * End current thread execution and switch it to the exiting state. 447 * All pending timeouts are executed. 448 * 460 449 */ 461 450 void thread_exit(void) 462 451 { 463 ipl_t ipl;464 465 452 if (THREAD->flags & THREAD_FLAG_USPACE) { 466 453 #ifdef CONFIG_UDEBUG … … 475 462 * can only be created by threads of the same task. 476 463 * We are safe to perform cleanup. 464 * 477 465 */ 478 466 ipc_cleanup(); … … 481 469 } 482 470 } 483 471 484 472 restart: 485 ipl = interrupts_disable(); 486 spinlock_lock(&THREAD->lock); 487 if (THREAD->timeout_pending) { 488 /* busy waiting for timeouts in progress */ 489 spinlock_unlock(&THREAD->lock); 490 interrupts_restore(ipl); 473 irq_spinlock_lock(&THREAD->lock, true); 474 if (THREAD->timeout_pending) { 475 /* Busy waiting for timeouts in progress */ 476 irq_spinlock_unlock(&THREAD->lock, true); 491 477 goto restart; 492 478 } 493 479 494 480 THREAD->state = Exiting; 495 spinlock_unlock(&THREAD->lock); 481 irq_spinlock_unlock(&THREAD->lock, true); 482 496 483 scheduler(); 497 484 498 485 /* Not reached */ 499 while (1) 500 ; 501 } 502 486 while (true); 487 } 503 488 504 489 /** Thread sleep … … 515 500 while (sec > 0) { 516 501 uint32_t period = (sec > 1000) ? 1000 : sec; 517 502 518 503 thread_usleep(period * 1000000); 519 504 sec -= period; … … 523 508 /** Wait for another thread to exit. 524 509 * 525 * @param t Thread to join on exit.526 * @param usec Timeout in microseconds.527 * @param flags Mode of operation.510 * @param thread Thread to join on exit. 511 * @param usec Timeout in microseconds. 512 * @param flags Mode of operation. 528 513 * 529 514 * @return An error code from errno.h or an error code from synch.h. 530 */ 531 int thread_join_timeout(thread_t *t, uint32_t usec, int flags) 532 { 533 ipl_t ipl; 534 int rc; 535 536 if (t == THREAD) 515 * 516 */ 517 int thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags) 518 { 519 if (thread == THREAD) 537 520 return EINVAL; 538 521 539 522 /* 540 523 * Since thread join can only be called once on an undetached thread, … … 542 525 */ 543 526 544 ipl = interrupts_disable(); 545 spinlock_lock(&t->lock); 546 ASSERT(!t->detached); 547 spinlock_unlock(&t->lock); 548 interrupts_restore(ipl); 549 550 rc = waitq_sleep_timeout(&t->join_wq, usec, flags); 551 552 return rc; 527 irq_spinlock_lock(&thread->lock, true); 528 ASSERT(!thread->detached); 529 irq_spinlock_unlock(&thread->lock, true); 530 531 return waitq_sleep_timeout(&thread->join_wq, usec, flags); 553 532 } 554 533 … … 558 537 * state, deallocate its resources. 559 538 * 560 * @param t Thread to be detached. 561 */ 562 void thread_detach(thread_t *t) 563 { 564 ipl_t ipl; 565 539 * @param thread Thread to be detached. 540 * 541 */ 542 void thread_detach(thread_t *thread) 543 { 566 544 /* 567 545 * Since the thread is expected not to be already detached, 568 546 * pointer to it must be still valid. 569 547 */ 570 ipl = interrupts_disable(); 571 spinlock_lock(&t->lock); 572 ASSERT(!t->detached); 573 if (t->state == Lingering) { 574 thread_destroy(t); /* unlocks &t->lock */ 575 interrupts_restore(ipl); 548 irq_spinlock_lock(&thread->lock, true); 549 ASSERT(!thread->detached); 550 551 if (thread->state == Lingering) { 552 /* 553 * Unlock &thread->lock and restore 554 * interrupts in thread_destroy(). 555 */ 556 thread_destroy(thread, true); 576 557 return; 577 558 } else { 578 t ->detached = true;559 thread->detached = true; 579 560 } 580 spinlock_unlock(&t->lock);581 i nterrupts_restore(ipl);561 562 irq_spinlock_unlock(&thread->lock, true); 582 563 } 583 564 … … 601 582 * 602 583 * Register a function and its argument to be executed 603 * on next context switch to the current thread. 584 * on next context switch to the current thread. Must 585 * be called with interrupts disabled. 604 586 * 605 587 * @param call_me Out-of-context function. … … 609 591 void thread_register_call_me(void (* call_me)(void *), void *call_me_with) 610 592 { 611 ipl_t ipl; 612 613 ipl = interrupts_disable(); 614 spinlock_lock(&THREAD->lock); 593 irq_spinlock_lock(&THREAD->lock, false); 615 594 THREAD->call_me = call_me; 616 595 THREAD->call_me_with = call_me_with; 617 spinlock_unlock(&THREAD->lock); 618 interrupts_restore(ipl); 596 irq_spinlock_unlock(&THREAD->lock, false); 619 597 } 620 598 621 599 static bool thread_walker(avltree_node_t *node, void *arg) 622 600 { 623 thread_t *t = avltree_get_instance(node, thread_t, threads_tree_node);601 thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node); 624 602 625 603 uint64_t ucycles, kcycles; 626 604 char usuffix, ksuffix; 627 order_suffix(t ->ucycles, &ucycles, &usuffix);628 order_suffix(t ->kcycles, &kcycles, &ksuffix);629 605 order_suffix(thread->ucycles, &ucycles, &usuffix); 606 order_suffix(thread->kcycles, &kcycles, &ksuffix); 607 630 608 #ifdef __32_BITS__ 631 609 printf("%-6" PRIu64" %-10s %10p %-8s %10p %-3" PRIu32 " %10p %10p %9" 632 PRIu64 "%c %9" PRIu64 "%c ", t ->tid, t->name, t,633 thread_states[t ->state], t->task, t->task->context, t->thread_code,634 t ->kstack, ucycles, usuffix, kcycles, ksuffix);635 #endif 636 610 PRIu64 "%c %9" PRIu64 "%c ", thread->tid, thread->name, thread, 611 thread_states[thread->state], thread->task, thread->task->context, 612 thread->thread_code, thread->kstack, ucycles, usuffix, kcycles, ksuffix); 613 #endif 614 637 615 #ifdef __64_BITS__ 638 616 printf("%-6" PRIu64" %-10s %18p %-8s %18p %-3" PRIu32 " %18p %18p %9" 639 PRIu64 "%c %9" PRIu64 "%c ", t ->tid, t->name, t,640 thread_states[t ->state], t->task, t->task->context, t->thread_code,641 t ->kstack, ucycles, usuffix, kcycles, ksuffix);642 #endif 643 644 if (t ->cpu)645 printf("%-4u", t ->cpu->id);617 PRIu64 "%c %9" PRIu64 "%c ", thread->tid, thread->name, thread, 618 thread_states[thread->state], thread->task, thread->task->context, 619 thread->thread_code, thread->kstack, ucycles, usuffix, kcycles, ksuffix); 620 #endif 621 622 if (thread->cpu) 623 printf("%-4u", thread->cpu->id); 646 624 else 647 625 printf("none"); 648 649 if (t ->state == Sleeping) {626 627 if (thread->state == Sleeping) { 650 628 #ifdef __32_BITS__ 651 printf(" %10p", t ->sleep_queue);652 #endif 653 629 printf(" %10p", thread->sleep_queue); 630 #endif 631 654 632 #ifdef __64_BITS__ 655 printf(" %18p", t ->sleep_queue);633 printf(" %18p", thread->sleep_queue); 656 634 #endif 657 635 } 658 636 659 637 printf("\n"); 660 638 661 639 return true; 662 640 } 663 641 664 /** Print list of threads debug info */ 642 /** Print list of threads debug info 643 * 644 */ 665 645 void thread_print_list(void) 666 646 { 667 ipl_t ipl;668 669 647 /* Messing with thread structures, avoid deadlock */ 670 ipl = interrupts_disable(); 671 spinlock_lock(&threads_lock); 672 673 #ifdef __32_BITS__ 648 irq_spinlock_lock(&threads_lock, true); 649 650 #ifdef __32_BITS__ 674 651 printf("tid name address state task " 675 652 "ctx code stack ucycles kcycles cpu " … … 679 656 "----------\n"); 680 657 #endif 681 658 682 659 #ifdef __64_BITS__ 683 660 printf("tid name address state task " … … 688 665 "------------------\n"); 689 666 #endif 690 667 691 668 avltree_walk(&threads_tree, thread_walker, NULL); 692 693 spinlock_unlock(&threads_lock); 694 interrupts_restore(ipl); 669 670 irq_spinlock_unlock(&threads_lock, true); 695 671 } 696 672 … … 700 676 * interrupts must be already disabled. 701 677 * 702 * @param t Pointer to thread.678 * @param thread Pointer to thread. 703 679 * 704 680 * @return True if thread t is known to the system, false otherwise. 705 * /706 bool thread_exists(thread_t *t) 707 { 708 avltree_node_t *node; 709 710 node = avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) t));681 * 682 */ 683 bool thread_exists(thread_t *thread) 684 { 685 avltree_node_t *node = 686 avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) thread)); 711 687 712 688 return node != NULL; … … 718 694 * interrupts must be already disabled. 719 695 * 720 * @param user True to update user accounting, false for kernel. 696 * @param user True to update user accounting, false for kernel. 697 * 721 698 */ 722 699 void thread_update_accounting(bool user) 723 700 { 724 701 uint64_t time = get_cycle(); 725 if (user) { 702 703 if (user) 726 704 THREAD->ucycles += time - THREAD->last_cycle; 727 } else {705 else 728 706 THREAD->kcycles += time - THREAD->last_cycle; 729 }707 730 708 THREAD->last_cycle = time; 731 709 } … … 774 752 size_t name_len, thread_id_t *uspace_thread_id) 775 753 { 776 thread_t *t;777 char namebuf[THREAD_NAME_BUFLEN];778 uspace_arg_t *kernel_uarg;779 int rc;780 781 754 if (name_len > THREAD_NAME_BUFLEN - 1) 782 755 name_len = THREAD_NAME_BUFLEN - 1; 783 784 rc = copy_from_uspace(namebuf, uspace_name, name_len); 756 757 char namebuf[THREAD_NAME_BUFLEN]; 758 int rc = copy_from_uspace(namebuf, uspace_name, name_len); 785 759 if (rc != 0) 786 760 return (unative_t) rc; 787 761 788 762 namebuf[name_len] = 0; 789 763 790 764 /* 791 765 * In case of failure, kernel_uarg will be deallocated in this function. 792 766 * In case of success, kernel_uarg will be freed in uinit(). 767 * 793 768 */ 794 kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); 769 uspace_arg_t *kernel_uarg = 770 (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); 795 771 796 772 rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); … … 799 775 return (unative_t) rc; 800 776 } 801 802 t = thread_create(uinit, kernel_uarg, TASK,777 778 thread_t *thread = thread_create(uinit, kernel_uarg, TASK, 803 779 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf, false); 804 if (t ) {780 if (thread) { 805 781 if (uspace_thread_id != NULL) { 806 int rc; 807 808 rc = copy_to_uspace(uspace_thread_id, &t->tid, 809 sizeof(t->tid)); 782 rc = copy_to_uspace(uspace_thread_id, &thread->tid, 783 sizeof(thread->tid)); 810 784 if (rc != 0) { 811 785 /* … … 813 787 * has already been created. We need to undo its 814 788 * creation now. 789 * 815 790 */ 816 791 817 792 /* 818 793 * The new thread structure is initialized, but … … 820 795 * We can safely deallocate it. 821 796 */ 822 slab_free(thread_slab, t );823 824 797 slab_free(thread_slab, thread); 798 free(kernel_uarg); 799 825 800 return (unative_t) rc; 826 801 } 827 802 } 803 828 804 #ifdef CONFIG_UDEBUG 829 805 /* … … 833 809 * THREAD_B events for threads that already existed 834 810 * and could be detected with THREAD_READ before. 811 * 835 812 */ 836 udebug_thread_b_event_attach(t , TASK);813 udebug_thread_b_event_attach(thread, TASK); 837 814 #else 838 thread_attach(t , TASK);839 #endif 840 thread_ready(t );841 815 thread_attach(thread, TASK); 816 #endif 817 thread_ready(thread); 818 842 819 return 0; 843 820 } else 844 821 free(kernel_uarg); 845 822 846 823 return (unative_t) ENOMEM; 847 824 } … … 853 830 { 854 831 thread_exit(); 832 855 833 /* Unreachable */ 856 834 return 0; … … 863 841 * 864 842 * @return 0 on success or an error code from @ref errno.h. 843 * 865 844 */ 866 845 unative_t sys_thread_get_id(thread_id_t *uspace_thread_id) … … 869 848 * No need to acquire lock on THREAD because tid 870 849 * remains constant for the lifespan of the thread. 850 * 871 851 */ 872 852 return (unative_t) copy_to_uspace(uspace_thread_id, &THREAD->tid, -
kernel/generic/src/security/cap.c
r666f492 rda1bafb 27 27 */ 28 28 29 /** @addtogroup generic 29 /** @addtogroup generic 30 30 * @{ 31 31 */ 32 32 33 33 /** 34 * @file 35 * @brief 34 * @file cap.c 35 * @brief Capabilities control. 36 36 * 37 37 * @see cap.h 38 38 */ 39 39 40 40 #include <security/cap.h> 41 41 #include <proc/task.h> … … 48 48 /** Set capabilities. 49 49 * 50 * @param t Task whose capabilities are to be changed.50 * @param task Task whose capabilities are to be changed. 51 51 * @param caps New set of capabilities. 52 * 52 53 */ 53 void cap_set(task_t *t , cap_t caps)54 void cap_set(task_t *task, cap_t caps) 54 55 { 55 ipl_t ipl; 56 57 ipl = interrupts_disable(); 58 spinlock_lock(&t->lock); 59 60 t->capabilities = caps; 61 62 spinlock_unlock(&t->lock); 63 interrupts_restore(ipl); 56 irq_spinlock_lock(&task->lock, true); 57 task->capabilities = caps; 58 irq_spinlock_unlock(&task->lock, true); 64 59 } 65 60 66 61 /** Get capabilities. 67 62 * 68 * @param t Task whose capabilities are to be returned. 63 * @param task Task whose capabilities are to be returned. 64 * 69 65 * @return Task's capabilities. 66 * 70 67 */ 71 cap_t cap_get(task_t *t )68 cap_t cap_get(task_t *task) 72 69 { 73 ipl_t ipl; 74 cap_t caps; 75 76 ipl = interrupts_disable(); 77 spinlock_lock(&t->lock); 78 79 caps = t->capabilities; 80 81 spinlock_unlock(&t->lock); 82 interrupts_restore(ipl); 70 irq_spinlock_lock(&task->lock, true); 71 cap_t caps = task->capabilities; 72 irq_spinlock_unlock(&task->lock, true); 83 73 84 74 return caps; … … 93 83 * 94 84 * @return Zero on success or an error code from @ref errno.h. 85 * 95 86 */ 96 87 unative_t sys_cap_grant(sysarg64_t *uspace_taskid_arg, cap_t caps) 97 88 { 98 sysarg64_t taskid_arg;99 task_t *t;100 ipl_t ipl;101 int rc;102 103 89 if (!(cap_get(TASK) & CAP_CAP)) 104 90 return (unative_t) EPERM; 105 91 106 rc = copy_from_uspace(&taskid_arg, uspace_taskid_arg, sizeof(sysarg64_t)); 92 sysarg64_t taskid_arg; 93 int rc = copy_from_uspace(&taskid_arg, uspace_taskid_arg, sizeof(sysarg64_t)); 107 94 if (rc != 0) 108 95 return (unative_t) rc; 109 110 ipl = interrupts_disable(); 111 spinlock_lock(&tasks_lock); 112 t = task_find_by_id((task_id_t) taskid_arg.value); 113 if ((!t) || (!context_check(CONTEXT, t->context))) { 114 spinlock_unlock(&tasks_lock); 115 interrupts_restore(ipl); 96 97 irq_spinlock_lock(&tasks_lock, true); 98 task_t *task = task_find_by_id((task_id_t) taskid_arg.value); 99 100 if ((!task) || (!context_check(CONTEXT, task->context))) { 101 irq_spinlock_unlock(&tasks_lock, true); 116 102 return (unative_t) ENOENT; 117 103 } 118 104 119 spinlock_lock(&t->lock);120 cap_set(t, cap_get(t) | caps);121 spinlock_unlock(&t->lock);105 irq_spinlock_lock(&task->lock, false); 106 task->capabilities |= caps; 107 irq_spinlock_unlock(&task->lock, false); 122 108 123 spinlock_unlock(&tasks_lock); 124 interrupts_restore(ipl); 109 irq_spinlock_unlock(&tasks_lock, true); 125 110 return 0; 126 111 } … … 135 120 * 136 121 * @return Zero on success or an error code from @ref errno.h. 122 * 137 123 */ 138 124 unative_t sys_cap_revoke(sysarg64_t *uspace_taskid_arg, cap_t caps) 139 125 { 140 126 sysarg64_t taskid_arg; 141 task_t *t; 142 ipl_t ipl; 143 int rc; 144 145 rc = copy_from_uspace(&taskid_arg, uspace_taskid_arg, sizeof(sysarg64_t)); 127 int rc = copy_from_uspace(&taskid_arg, uspace_taskid_arg, sizeof(sysarg64_t)); 146 128 if (rc != 0) 147 129 return (unative_t) rc; 148 149 ipl = interrupts_disable(); 150 spinlock_lock(&tasks_lock); 151 t = task_find_by_id((task_id_t) taskid_arg.value); 152 if ((!t) || (!context_check(CONTEXT, t->context))) { 153 spinlock_unlock(&tasks_lock); 154 interrupts_restore(ipl); 130 131 irq_spinlock_lock(&tasks_lock, true); 132 133 task_t *task = task_find_by_id((task_id_t) taskid_arg.value); 134 if ((!task) || (!context_check(CONTEXT, task->context))) { 135 irq_spinlock_unlock(&tasks_lock, true); 155 136 return (unative_t) ENOENT; 156 137 } 157 138 158 139 /* 159 140 * Revoking capabilities is different from granting them in that … … 161 142 * doesn't have CAP_CAP. 162 143 */ 163 if (!(cap_get(TASK) & CAP_CAP) || !(t == TASK)) { 164 spinlock_unlock(&tasks_lock); 165 interrupts_restore(ipl); 144 irq_spinlock_unlock(&TASK->lock, false); 145 146 if ((!(TASK->capabilities & CAP_CAP)) || (task != TASK)) { 147 irq_spinlock_unlock(&TASK->lock, false); 148 irq_spinlock_unlock(&tasks_lock, true); 166 149 return (unative_t) EPERM; 167 150 } 168 151 169 spinlock_lock(&t->lock); 170 cap_set(t, cap_get(t) & ~caps); 171 spinlock_unlock(&t->lock); 172 173 spinlock_unlock(&tasks_lock); 174 175 interrupts_restore(ipl); 152 task->capabilities &= ~caps; 153 irq_spinlock_unlock(&TASK->lock, false); 154 155 irq_spinlock_unlock(&tasks_lock, true); 176 156 return 0; 177 157 } … … 179 159 /** @} 180 160 */ 181 -
kernel/generic/src/synch/mutex.c
r666f492 rda1bafb 67 67 * 68 68 */ 69 int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, int flags)69 int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, unsigned int flags) 70 70 { 71 71 int rc; -
kernel/generic/src/synch/rwlock.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Reader/Writer locks. 36 36 * 37 37 * A reader/writer lock can be held by multiple readers at a time. … … 57 57 * each thread can block on only one rwlock at a time. 58 58 */ 59 59 60 60 #include <synch/rwlock.h> 61 61 #include <synch/spinlock.h> … … 69 69 #include <panic.h> 70 70 71 #define ALLOW_ALL 0 72 #define ALLOW_READERS_ONLY 1 73 74 static void let_others_in(rwlock_t *rwl, int readers_only); 75 static void release_spinlock(void *arg); 71 #define ALLOW_ALL 0 72 #define ALLOW_READERS_ONLY 1 76 73 77 74 /** Initialize reader/writer lock … … 80 77 * 81 78 * @param rwl Reader/Writer lock. 79 * 82 80 */ 83 81 void rwlock_initialize(rwlock_t *rwl) { 84 spinlock_initialize(&rwl->lock, "rwlock_t");82 irq_spinlock_initialize(&rwl->lock, "rwl.lock"); 85 83 mutex_initialize(&rwl->exclusive, MUTEX_PASSIVE); 86 84 rwl->readers_in = 0; 87 85 } 88 86 87 /** Direct handoff of reader/writer lock ownership. 88 * 89 * Direct handoff of reader/writer lock ownership 90 * to waiting readers or a writer. 91 * 92 * Must be called with rwl->lock locked. 93 * Must be called with interrupts_disable()'d. 94 * 95 * @param rwl Reader/Writer lock. 96 * @param readers_only See the description below. 97 * 98 * If readers_only is false: (unlock scenario) 99 * Let the first sleeper on 'exclusive' mutex in, no matter 100 * whether it is a reader or a writer. If there are more leading 101 * readers in line, let each of them in. 102 * 103 * Otherwise: (timeout scenario) 104 * Let all leading readers in. 105 * 106 */ 107 static void let_others_in(rwlock_t *rwl, int readers_only) 108 { 109 rwlock_type_t type = RWLOCK_NONE; 110 thread_t *thread = NULL; 111 bool one_more = true; 112 113 irq_spinlock_lock(&rwl->exclusive.sem.wq.lock, false); 114 115 if (!list_empty(&rwl->exclusive.sem.wq.head)) 116 thread = list_get_instance(rwl->exclusive.sem.wq.head.next, 117 thread_t, wq_link); 118 119 do { 120 if (thread) { 121 irq_spinlock_lock(&thread->lock, false); 122 type = thread->rwlock_holder_type; 123 irq_spinlock_unlock(&thread->lock, false); 124 } 125 126 /* 127 * If readers_only is true, we wake all leading readers 128 * if and only if rwl is locked by another reader. 129 * Assumption: readers_only ==> rwl->readers_in 130 * 131 */ 132 if ((readers_only) && (type != RWLOCK_READER)) 133 break; 134 135 if (type == RWLOCK_READER) { 136 /* 137 * Waking up a reader. 138 * We are responsible for incrementing rwl->readers_in 139 * for it. 140 * 141 */ 142 rwl->readers_in++; 143 } 144 145 /* 146 * Only the last iteration through this loop can increment 147 * rwl->exclusive.sem.wq.missed_wakeup's. All preceeding 148 * iterations will wake up a thread. 149 * 150 */ 151 152 /* 153 * We call the internal version of waitq_wakeup, which 154 * relies on the fact that the waitq is already locked. 155 * 156 */ 157 _waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST); 158 159 thread = NULL; 160 if (!list_empty(&rwl->exclusive.sem.wq.head)) { 161 thread = list_get_instance(rwl->exclusive.sem.wq.head.next, 162 thread_t, wq_link); 163 164 if (thread) { 165 irq_spinlock_lock(&thread->lock, false); 166 if (thread->rwlock_holder_type != RWLOCK_READER) 167 one_more = false; 168 irq_spinlock_unlock(&thread->lock, false); 169 } 170 } 171 } while ((type == RWLOCK_READER) && (thread) && (one_more)); 172 173 irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false); 174 } 175 89 176 /** Acquire reader/writer lock for reading 90 177 * … … 92 179 * Timeout and willingness to block may be specified. 93 180 * 94 * @param rwl Reader/Writer lock.95 * @param usec Timeout in microseconds.181 * @param rwl Reader/Writer lock. 182 * @param usec Timeout in microseconds. 96 183 * @param flags Specify mode of operation. 97 184 * … … 100 187 * 101 188 * @return See comment for waitq_sleep_timeout(). 102 */ 103 int _rwlock_write_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags) 104 { 105 ipl_t ipl; 106 int rc; 107 108 ipl = interrupts_disable(); 109 spinlock_lock(&THREAD->lock); 189 * 190 */ 191 int _rwlock_write_lock_timeout(rwlock_t *rwl, uint32_t usec, unsigned int flags) 192 { 193 irq_spinlock_lock(&THREAD->lock, true); 110 194 THREAD->rwlock_holder_type = RWLOCK_WRITER; 111 spinlock_unlock(&THREAD->lock); 112 interrupts_restore(ipl); 113 195 irq_spinlock_unlock(&THREAD->lock, true); 196 114 197 /* 115 198 * Writers take the easy part. 116 199 * They just need to acquire the exclusive mutex. 200 * 117 201 */ 118 rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);202 int rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags); 119 203 if (SYNCH_FAILED(rc)) { 120 121 204 /* 122 205 * Lock operation timed out or was interrupted. 123 206 * The state of rwl is UNKNOWN at this point. 124 207 * No claims about its holder can be made. 125 * /126 127 i pl = interrupts_disable();128 spinlock_lock(&rwl->lock);208 * 209 */ 210 irq_spinlock_lock(&rwl->lock, true); 211 129 212 /* 130 213 * Now when rwl is locked, we can inspect it again. 131 214 * If it is held by some readers already, we can let 132 215 * readers from the head of the wait queue in. 216 * 133 217 */ 134 218 if (rwl->readers_in) 135 219 let_others_in(rwl, ALLOW_READERS_ONLY); 136 spinlock_unlock(&rwl->lock);137 i nterrupts_restore(ipl);220 221 irq_spinlock_unlock(&rwl->lock, true); 138 222 } 139 223 140 224 return rc; 225 } 226 227 /** Release spinlock callback 228 * 229 * This is a callback function invoked from the scheduler. 230 * The callback is registered in _rwlock_read_lock_timeout(). 231 * 232 * @param arg Spinlock. 233 * 234 */ 235 static void release_spinlock(void *arg) 236 { 237 if (arg != NULL) 238 irq_spinlock_unlock((irq_spinlock_t *) arg, false); 141 239 } 142 240 … … 146 244 * Timeout and willingness to block may be specified. 147 245 * 148 * @param rwl Reader/Writer lock.149 * @param usec Timeout in microseconds.246 * @param rwl Reader/Writer lock. 247 * @param usec Timeout in microseconds. 150 248 * @param flags Select mode of operation. 151 249 * … … 154 252 * 155 253 * @return See comment for waitq_sleep_timeout(). 156 */ 157 int _rwlock_read_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags) 158 { 159 int rc; 160 ipl_t ipl; 161 162 ipl = interrupts_disable(); 163 spinlock_lock(&THREAD->lock); 254 * 255 */ 256 int _rwlock_read_lock_timeout(rwlock_t *rwl, uint32_t usec, unsigned int flags) 257 { 258 /* 259 * Since the locking scenarios get a little bit too 260 * complicated, we do not rely on internal irq_spinlock_t 261 * interrupt disabling logic here and control interrupts 262 * manually. 263 * 264 */ 265 ipl_t ipl = interrupts_disable(); 266 267 irq_spinlock_lock(&THREAD->lock, false); 164 268 THREAD->rwlock_holder_type = RWLOCK_READER; 165 spinlock_unlock(&THREAD->lock); 166 167 spinlock_lock(&rwl->lock); 168 269 irq_spinlock_pass(&THREAD->lock, &rwl->lock); 270 169 271 /* 170 272 * Find out whether we can get what we want without blocking. 273 * 171 274 */ 172 rc = mutex_trylock(&rwl->exclusive);275 int rc = mutex_trylock(&rwl->exclusive); 173 276 if (SYNCH_FAILED(rc)) { 174 175 277 /* 176 278 * 'exclusive' mutex is being held by someone else. … … 178 280 * else waiting for it, we can enter the critical 179 281 * section. 180 */ 181 282 * 283 */ 284 182 285 if (rwl->readers_in) { 183 spinlock_lock(&rwl->exclusive.sem.wq.lock);286 irq_spinlock_lock(&rwl->exclusive.sem.wq.lock, false); 184 287 if (list_empty(&rwl->exclusive.sem.wq.head)) { 185 288 /* 186 289 * We can enter. 187 290 */ 188 spinlock_unlock(&rwl->exclusive.sem.wq.lock);291 irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false); 189 292 goto shortcut; 190 293 } 191 spinlock_unlock(&rwl->exclusive.sem.wq.lock);294 irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false); 192 295 } 193 296 194 297 /* 195 298 * In order to prevent a race condition when a reader … … 197 300 * we register a function to unlock rwl->lock 198 301 * after this thread is put asleep. 199 */ 200 #ifdef CONFIG_SMP 302 * 303 */ 304 #ifdef CONFIG_SMP 201 305 thread_register_call_me(release_spinlock, &rwl->lock); 202 306 #else 203 307 thread_register_call_me(release_spinlock, NULL); 204 205 308 #endif 309 206 310 rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags); 207 311 switch (rc) { … … 209 313 /* 210 314 * release_spinlock() wasn't called 315 * 211 316 */ 212 317 thread_register_call_me(NULL, NULL); 213 spinlock_unlock(&rwl->lock);318 irq_spinlock_unlock(&rwl->lock, false); 214 319 case ESYNCH_TIMEOUT: 215 320 case ESYNCH_INTERRUPTED: … … 217 322 * The sleep timed out. 218 323 * We just restore interrupt priority level. 324 * 219 325 */ 220 case ESYNCH_OK_BLOCKED: 326 case ESYNCH_OK_BLOCKED: 221 327 /* 222 328 * We were woken with rwl->readers_in already … … 228 334 * 'readers_in' is incremented. Same time means both 229 335 * events happen atomically when rwl->lock is held.) 336 * 230 337 */ 231 338 interrupts_restore(ipl); … … 240 347 return rc; 241 348 } 242 349 243 350 shortcut: 244 245 351 /* 246 352 * We can increment readers_in only if we didn't go to sleep. 247 353 * For sleepers, rwlock_let_others_in() will do the job. 354 * 248 355 */ 249 356 rwl->readers_in++; 250 251 spinlock_unlock(&rwl->lock); 357 irq_spinlock_unlock(&rwl->lock, false); 252 358 interrupts_restore(ipl); 253 359 254 360 return ESYNCH_OK_ATOMIC; 255 361 } … … 262 368 * 263 369 * @param rwl Reader/Writer lock. 370 * 264 371 */ 265 372 void rwlock_write_unlock(rwlock_t *rwl) 266 373 { 267 ipl_t ipl; 268 269 ipl = interrupts_disable(); 270 spinlock_lock(&rwl->lock); 374 irq_spinlock_lock(&rwl->lock, true); 271 375 let_others_in(rwl, ALLOW_ALL); 272 spinlock_unlock(&rwl->lock); 273 interrupts_restore(ipl); 274 376 irq_spinlock_unlock(&rwl->lock, true); 275 377 } 276 378 … … 283 385 * 284 386 * @param rwl Reader/Writer lock. 387 * 285 388 */ 286 389 void rwlock_read_unlock(rwlock_t *rwl) 287 390 { 288 ipl_t ipl; 289 290 ipl = interrupts_disable(); 291 spinlock_lock(&rwl->lock); 391 irq_spinlock_lock(&rwl->lock, true); 392 292 393 if (!--rwl->readers_in) 293 394 let_others_in(rwl, ALLOW_ALL); 294 spinlock_unlock(&rwl->lock); 295 interrupts_restore(ipl); 296 } 297 298 299 /** Direct handoff of reader/writer lock ownership. 300 * 301 * Direct handoff of reader/writer lock ownership 302 * to waiting readers or a writer. 303 * 304 * Must be called with rwl->lock locked. 305 * Must be called with interrupts_disable()'d. 306 * 307 * @param rwl Reader/Writer lock. 308 * @param readers_only See the description below. 309 * 310 * If readers_only is false: (unlock scenario) 311 * Let the first sleeper on 'exclusive' mutex in, no matter 312 * whether it is a reader or a writer. If there are more leading 313 * readers in line, let each of them in. 314 * 315 * Otherwise: (timeout scenario) 316 * Let all leading readers in. 317 */ 318 void let_others_in(rwlock_t *rwl, int readers_only) 319 { 320 rwlock_type_t type = RWLOCK_NONE; 321 thread_t *t = NULL; 322 bool one_more = true; 323 324 spinlock_lock(&rwl->exclusive.sem.wq.lock); 325 326 if (!list_empty(&rwl->exclusive.sem.wq.head)) 327 t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, 328 wq_link); 329 do { 330 if (t) { 331 spinlock_lock(&t->lock); 332 type = t->rwlock_holder_type; 333 spinlock_unlock(&t->lock); 334 } 335 336 /* 337 * If readers_only is true, we wake all leading readers 338 * if and only if rwl is locked by another reader. 339 * Assumption: readers_only ==> rwl->readers_in 340 */ 341 if (readers_only && (type != RWLOCK_READER)) 342 break; 343 344 345 if (type == RWLOCK_READER) { 346 /* 347 * Waking up a reader. 348 * We are responsible for incrementing rwl->readers_in 349 * for it. 350 */ 351 rwl->readers_in++; 352 } 353 354 /* 355 * Only the last iteration through this loop can increment 356 * rwl->exclusive.sem.wq.missed_wakeup's. All preceeding 357 * iterations will wake up a thread. 358 */ 359 /* We call the internal version of waitq_wakeup, which 360 * relies on the fact that the waitq is already locked. 361 */ 362 _waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST); 363 364 t = NULL; 365 if (!list_empty(&rwl->exclusive.sem.wq.head)) { 366 t = list_get_instance(rwl->exclusive.sem.wq.head.next, 367 thread_t, wq_link); 368 if (t) { 369 spinlock_lock(&t->lock); 370 if (t->rwlock_holder_type != RWLOCK_READER) 371 one_more = false; 372 spinlock_unlock(&t->lock); 373 } 374 } 375 } while ((type == RWLOCK_READER) && t && one_more); 376 377 spinlock_unlock(&rwl->exclusive.sem.wq.lock); 378 } 379 380 /** Release spinlock callback 381 * 382 * This is a callback function invoked from the scheduler. 383 * The callback is registered in _rwlock_read_lock_timeout(). 384 * 385 * @param arg Spinlock. 386 */ 387 void release_spinlock(void *arg) 388 { 389 spinlock_unlock((spinlock_t *) arg); 395 396 irq_spinlock_unlock(&rwl->lock, true); 390 397 } 391 398 -
kernel/generic/src/synch/semaphore.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Semaphores. 36 36 */ 37 37 … … 47 47 * Initialize semaphore. 48 48 * 49 * @param s Semaphore.49 * @param sem Semaphore. 50 50 * @param val Maximal number of threads allowed to enter critical section. 51 * 51 52 */ 52 void semaphore_initialize(semaphore_t *s , int val)53 void semaphore_initialize(semaphore_t *sem, int val) 53 54 { 54 ipl_t ipl;55 waitq_initialize(&sem->wq); 55 56 56 waitq_initialize(&s->wq); 57 58 ipl = interrupts_disable(); 59 60 spinlock_lock(&s->wq.lock); 61 s->wq.missed_wakeups = val; 62 spinlock_unlock(&s->wq.lock); 63 64 interrupts_restore(ipl); 57 irq_spinlock_lock(&sem->wq.lock, true); 58 sem->wq.missed_wakeups = val; 59 irq_spinlock_unlock(&sem->wq.lock, true); 65 60 } 66 61 … … 70 65 * Conditional mode and mode with timeout can be requested. 71 66 * 72 * @param s Semaphore.73 * @param usec Timeout in microseconds.67 * @param sem Semaphore. 68 * @param usec Timeout in microseconds. 74 69 * @param flags Select mode of operation. 75 70 * … … 78 73 * 79 74 * @return See comment for waitq_sleep_timeout(). 75 * 80 76 */ 81 int _semaphore_down_timeout(semaphore_t *s , uint32_t usec,int flags)77 int _semaphore_down_timeout(semaphore_t *sem, uint32_t usec, unsigned int flags) 82 78 { 83 return waitq_sleep_timeout(&s ->wq, usec, flags);79 return waitq_sleep_timeout(&sem->wq, usec, flags); 84 80 } 85 81 … … 89 85 * 90 86 * @param s Semaphore. 87 * 91 88 */ 92 void semaphore_up(semaphore_t *s )89 void semaphore_up(semaphore_t *sem) 93 90 { 94 waitq_wakeup(&s ->wq, WAKEUP_FIRST);91 waitq_wakeup(&sem->wq, WAKEUP_FIRST); 95 92 } 96 93 -
kernel/generic/src/synch/waitq.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Wait queue. 36 36 * 37 37 * Wait queue is the basic synchronization primitive upon which all … … 41 41 * fashion. Conditional operation as well as timeouts and interruptions 42 42 * are supported. 43 * 43 44 */ 44 45 … … 56 57 #include <arch/cycle.h> 57 58 58 static void waitq_sleep_timed_out(void * data);59 static void waitq_sleep_timed_out(void *); 59 60 60 61 /** Initialize wait queue … … 62 63 * Initialize wait queue. 63 64 * 64 * @param wq Pointer to wait queue to be initialized. 65 * @param wq Pointer to wait queue to be initialized. 66 * 65 67 */ 66 68 void waitq_initialize(waitq_t *wq) 67 69 { 68 spinlock_initialize(&wq->lock, "waitq_lock");70 irq_spinlock_initialize(&wq->lock, "wq.lock"); 69 71 list_initialize(&wq->head); 70 72 wq->missed_wakeups = 0; … … 81 83 * timeout at all. 82 84 * 83 * @param data Pointer to the thread that called waitq_sleep_timeout(). 85 * @param data Pointer to the thread that called waitq_sleep_timeout(). 86 * 84 87 */ 85 88 void waitq_sleep_timed_out(void *data) 86 89 { 87 thread_t *t = (thread_t *) data; 88 waitq_t *wq; 90 thread_t *thread = (thread_t *) data; 89 91 bool do_wakeup = false; 90 92 DEADLOCK_PROBE_INIT(p_wqlock); 91 92 spinlock_lock(&threads_lock);93 if (!thread_exists(t ))93 94 irq_spinlock_lock(&threads_lock, false); 95 if (!thread_exists(thread)) 94 96 goto out; 95 97 96 98 grab_locks: 97 spinlock_lock(&t->lock); 98 if ((wq = t->sleep_queue)) { /* assignment */ 99 if (!spinlock_trylock(&wq->lock)) { 100 spinlock_unlock(&t->lock); 99 irq_spinlock_lock(&thread->lock, false); 100 101 waitq_t *wq; 102 if ((wq = thread->sleep_queue)) { /* Assignment */ 103 if (!irq_spinlock_trylock(&wq->lock)) { 104 irq_spinlock_unlock(&thread->lock, false); 101 105 DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD); 102 goto grab_locks; /* avoid deadlock */ 103 } 104 105 list_remove(&t->wq_link); 106 t->saved_context = t->sleep_timeout_context; 106 /* Avoid deadlock */ 107 goto grab_locks; 108 } 109 110 list_remove(&thread->wq_link); 111 thread->saved_context = thread->sleep_timeout_context; 107 112 do_wakeup = true; 108 t ->sleep_queue = NULL;109 spinlock_unlock(&wq->lock);110 } 111 112 t ->timeout_pending = false;113 spinlock_unlock(&t->lock);113 thread->sleep_queue = NULL; 114 irq_spinlock_unlock(&wq->lock, false); 115 } 116 117 thread->timeout_pending = false; 118 irq_spinlock_unlock(&thread->lock, false); 114 119 115 120 if (do_wakeup) 116 thread_ready(t );117 121 thread_ready(thread); 122 118 123 out: 119 spinlock_unlock(&threads_lock);124 irq_spinlock_unlock(&threads_lock, false); 120 125 } 121 126 … … 125 130 * If the thread is not found sleeping, no action is taken. 126 131 * 127 * @param t Thread to be interrupted. 128 */ 129 void waitq_interrupt_sleep(thread_t *t) 130 { 132 * @param thread Thread to be interrupted. 133 * 134 */ 135 void waitq_interrupt_sleep(thread_t *thread) 136 { 137 bool do_wakeup = false; 138 DEADLOCK_PROBE_INIT(p_wqlock); 139 140 irq_spinlock_lock(&threads_lock, true); 141 if (!thread_exists(thread)) 142 goto out; 143 144 grab_locks: 145 irq_spinlock_lock(&thread->lock, false); 146 131 147 waitq_t *wq; 132 bool do_wakeup = false; 133 ipl_t ipl; 134 DEADLOCK_PROBE_INIT(p_wqlock); 135 136 ipl = interrupts_disable(); 137 spinlock_lock(&threads_lock); 138 if (!thread_exists(t)) 139 goto out; 140 141 grab_locks: 142 spinlock_lock(&t->lock); 143 if ((wq = t->sleep_queue)) { /* assignment */ 144 if (!(t->sleep_interruptible)) { 148 if ((wq = thread->sleep_queue)) { /* Assignment */ 149 if (!(thread->sleep_interruptible)) { 145 150 /* 146 151 * The sleep cannot be interrupted. 152 * 147 153 */ 148 spinlock_unlock(&t->lock);154 irq_spinlock_unlock(&thread->lock, false); 149 155 goto out; 150 156 } 151 152 if (! spinlock_trylock(&wq->lock)) {153 spinlock_unlock(&t->lock);157 158 if (!irq_spinlock_trylock(&wq->lock)) { 159 irq_spinlock_unlock(&thread->lock, false); 154 160 DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD); 155 goto grab_locks; /* avoid deadlock */ 156 } 157 158 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) 159 t->timeout_pending = false; 160 161 list_remove(&t->wq_link); 162 t->saved_context = t->sleep_interruption_context; 161 /* Avoid deadlock */ 162 goto grab_locks; 163 } 164 165 if ((thread->timeout_pending) && 166 (timeout_unregister(&thread->sleep_timeout))) 167 thread->timeout_pending = false; 168 169 list_remove(&thread->wq_link); 170 thread->saved_context = thread->sleep_interruption_context; 163 171 do_wakeup = true; 164 t ->sleep_queue = NULL;165 spinlock_unlock(&wq->lock);166 } 167 spinlock_unlock(&t->lock);168 172 thread->sleep_queue = NULL; 173 irq_spinlock_unlock(&wq->lock, false); 174 } 175 irq_spinlock_unlock(&thread->lock, false); 176 169 177 if (do_wakeup) 170 thread_ready(t );171 178 thread_ready(thread); 179 172 180 out: 173 spinlock_unlock(&threads_lock); 174 interrupts_restore(ipl); 181 irq_spinlock_unlock(&threads_lock, true); 175 182 } 176 183 … … 180 187 * is sleeping interruptibly. 181 188 * 182 * @param wq Pointer to wait queue. 189 * @param wq Pointer to wait queue. 190 * 183 191 */ 184 192 void waitq_unsleep(waitq_t *wq) 185 193 { 186 ipl_t ipl; 187 188 ipl = interrupts_disable(); 189 spinlock_lock(&wq->lock); 190 194 irq_spinlock_lock(&wq->lock, true); 195 191 196 if (!list_empty(&wq->head)) { 192 thread_t *t; 193 194 t = list_get_instance(wq->head.next, thread_t, wq_link); 195 spinlock_lock(&t->lock); 196 ASSERT(t->sleep_interruptible); 197 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) 198 t->timeout_pending = false; 199 list_remove(&t->wq_link); 200 t->saved_context = t->sleep_interruption_context; 201 t->sleep_queue = NULL; 202 spinlock_unlock(&t->lock); 203 thread_ready(t); 204 } 205 206 spinlock_unlock(&wq->lock); 207 interrupts_restore(ipl); 197 thread_t *thread = list_get_instance(wq->head.next, thread_t, wq_link); 198 199 irq_spinlock_lock(&thread->lock, false); 200 201 ASSERT(thread->sleep_interruptible); 202 203 if ((thread->timeout_pending) && 204 (timeout_unregister(&thread->sleep_timeout))) 205 thread->timeout_pending = false; 206 207 list_remove(&thread->wq_link); 208 thread->saved_context = thread->sleep_interruption_context; 209 thread->sleep_queue = NULL; 210 211 irq_spinlock_unlock(&thread->lock, false); 212 thread_ready(thread); 213 } 214 215 irq_spinlock_unlock(&wq->lock, true); 208 216 } 209 217 … … 221 229 * and all the *_timeout() functions use it. 222 230 * 223 * @param wq 224 * @param usec 225 * @param flags 231 * @param wq Pointer to wait queue. 232 * @param usec Timeout in microseconds. 233 * @param flags Specify mode of the sleep. 226 234 * 227 235 * The sleep can be interrupted only if the 228 236 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags. 229 * 237 * 230 238 * If usec is greater than zero, regardless of the value of the 231 239 * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either 232 * timeout, interruption or wakeup comes. 240 * timeout, interruption or wakeup comes. 233 241 * 234 242 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags, … … 238 246 * call will immediately return, reporting either success or failure. 239 247 * 240 * @return Returns one of ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, 241 * ESYNCH_INTERRUPTED, ESYNCH_OK_ATOMIC and 242 * ESYNCH_OK_BLOCKED. 243 * 244 * @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time of 245 * the call there was no pending wakeup. 246 * 247 * @li ESYNCH_TIMEOUT means that the sleep timed out. 248 * 249 * @li ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread. 250 * 251 * @li ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was 252 * a pending wakeup at the time of the call. The caller was not put 253 * asleep at all. 254 * 255 * @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was 256 * attempted. 257 */ 258 int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, int flags) 259 { 260 ipl_t ipl; 261 int rc; 262 248 * @return ESYNCH_WOULD_BLOCK, meaning that the sleep failed because at the 249 * time of the call there was no pending wakeup 250 * @return ESYNCH_TIMEOUT, meaning that the sleep timed out. 251 * @return ESYNCH_INTERRUPTED, meaning that somebody interrupted the sleeping 252 * thread. 253 * @return ESYNCH_OK_ATOMIC, meaning that the sleep succeeded and that there 254 * was a pending wakeup at the time of the call. The caller was not put 255 * asleep at all. 256 * @return ESYNCH_OK_BLOCKED, meaning that the sleep succeeded; the full sleep 257 * was attempted. 258 * 259 */ 260 int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags) 261 { 263 262 ASSERT((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec))); 264 263 265 ipl = waitq_sleep_prepare(wq);266 rc = waitq_sleep_timeout_unsafe(wq, usec, flags);264 ipl_t ipl = waitq_sleep_prepare(wq); 265 int rc = waitq_sleep_timeout_unsafe(wq, usec, flags); 267 266 waitq_sleep_finish(wq, rc, ipl); 268 267 return rc; … … 274 273 * and interrupts disabled. 275 274 * 276 * @param wq Wait queue. 277 * 278 * @return Interrupt level as it existed on entry to this function. 275 * @param wq Wait queue. 276 * 277 * @return Interrupt level as it existed on entry to this function. 278 * 279 279 */ 280 280 ipl_t waitq_sleep_prepare(waitq_t *wq) … … 284 284 restart: 285 285 ipl = interrupts_disable(); 286 287 if (THREAD) { /* needed during system initiailzation */286 287 if (THREAD) { /* Needed during system initiailzation */ 288 288 /* 289 289 * Busy waiting for a delayed timeout. … … 292 292 * Simply, the thread is not allowed to go to sleep if 293 293 * there are timeouts in progress. 294 * 294 295 */ 295 spinlock_lock(&THREAD->lock); 296 irq_spinlock_lock(&THREAD->lock, false); 297 296 298 if (THREAD->timeout_pending) { 297 spinlock_unlock(&THREAD->lock);299 irq_spinlock_unlock(&THREAD->lock, false); 298 300 interrupts_restore(ipl); 299 301 goto restart; 300 302 } 301 spinlock_unlock(&THREAD->lock); 302 } 303 304 spinlock_lock(&wq->lock); 303 304 irq_spinlock_unlock(&THREAD->lock, false); 305 } 306 307 irq_spinlock_lock(&wq->lock, false); 305 308 return ipl; 306 309 } … … 312 315 * lock is released. 313 316 * 314 * @param wq Wait queue. 315 * @param rc Return code of waitq_sleep_timeout_unsafe(). 316 * @param ipl Interrupt level returned by waitq_sleep_prepare(). 317 * @param wq Wait queue. 318 * @param rc Return code of waitq_sleep_timeout_unsafe(). 319 * @param ipl Interrupt level returned by waitq_sleep_prepare(). 320 * 317 321 */ 318 322 void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl) … … 321 325 case ESYNCH_WOULD_BLOCK: 322 326 case ESYNCH_OK_ATOMIC: 323 spinlock_unlock(&wq->lock);327 irq_spinlock_unlock(&wq->lock, false); 324 328 break; 325 329 default: 326 330 break; 327 331 } 332 328 333 interrupts_restore(ipl); 329 334 } … … 335 340 * and followed by a call to waitq_sleep_finish(). 336 341 * 337 * @param wq See waitq_sleep_timeout(). 338 * @param usec See waitq_sleep_timeout(). 339 * @param flags See waitq_sleep_timeout(). 340 * 341 * @return See waitq_sleep_timeout(). 342 */ 343 int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, int flags) 344 { 345 /* checks whether to go to sleep at all */ 342 * @param wq See waitq_sleep_timeout(). 343 * @param usec See waitq_sleep_timeout(). 344 * @param flags See waitq_sleep_timeout(). 345 * 346 * @return See waitq_sleep_timeout(). 347 * 348 */ 349 int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags) 350 { 351 /* Checks whether to go to sleep at all */ 346 352 if (wq->missed_wakeups) { 347 353 wq->missed_wakeups--; 348 354 return ESYNCH_OK_ATOMIC; 349 } 350 else { 355 } else { 351 356 if (PARAM_NON_BLOCKING(flags, usec)) { 352 /* return immediatelly instead of going to sleep */357 /* Return immediatelly instead of going to sleep */ 353 358 return ESYNCH_WOULD_BLOCK; 354 359 } … … 357 362 /* 358 363 * Now we are firmly decided to go to sleep. 364 * 359 365 */ 360 spinlock_lock(&THREAD->lock);361 366 irq_spinlock_lock(&THREAD->lock, false); 367 362 368 if (flags & SYNCH_FLAGS_INTERRUPTIBLE) { 363 364 369 /* 365 370 * If the thread was already interrupted, 366 371 * don't go to sleep at all. 372 * 367 373 */ 368 374 if (THREAD->interrupted) { 369 spinlock_unlock(&THREAD->lock);370 spinlock_unlock(&wq->lock);375 irq_spinlock_unlock(&THREAD->lock, false); 376 irq_spinlock_unlock(&wq->lock, false); 371 377 return ESYNCH_INTERRUPTED; 372 378 } 373 379 374 380 /* 375 381 * Set context that will be restored if the sleep 376 382 * of this thread is ever interrupted. 383 * 377 384 */ 378 385 THREAD->sleep_interruptible = true; … … 380 387 /* Short emulation of scheduler() return code. */ 381 388 THREAD->last_cycle = get_cycle(); 382 spinlock_unlock(&THREAD->lock);389 irq_spinlock_unlock(&THREAD->lock, false); 383 390 return ESYNCH_INTERRUPTED; 384 391 } 385 386 } else { 392 } else 387 393 THREAD->sleep_interruptible = false; 388 } 389 394 390 395 if (usec) { 391 396 /* We use the timeout variant. */ … … 393 398 /* Short emulation of scheduler() return code. */ 394 399 THREAD->last_cycle = get_cycle(); 395 spinlock_unlock(&THREAD->lock);400 irq_spinlock_unlock(&THREAD->lock, false); 396 401 return ESYNCH_TIMEOUT; 397 402 } 403 398 404 THREAD->timeout_pending = true; 399 405 timeout_register(&THREAD->sleep_timeout, (uint64_t) usec, 400 406 waitq_sleep_timed_out, THREAD); 401 407 } 402 408 403 409 list_append(&THREAD->wq_link, &wq->head); 404 410 405 411 /* 406 412 * Suspend execution. 413 * 407 414 */ 408 415 THREAD->state = Sleeping; 409 416 THREAD->sleep_queue = wq; 410 411 spinlock_unlock(&THREAD->lock);412 417 418 irq_spinlock_unlock(&THREAD->lock, false); 419 413 420 /* wq->lock is released in scheduler_separated_stack() */ 414 scheduler(); 421 scheduler(); 415 422 416 423 return ESYNCH_OK_BLOCKED; 417 424 } 418 419 425 420 426 /** Wake up first thread sleeping in a wait queue … … 426 432 * timeout. 427 433 * 428 * @param wq Pointer to wait queue. 429 * @param mode Wakeup mode. 434 * @param wq Pointer to wait queue. 435 * @param mode Wakeup mode. 436 * 430 437 */ 431 438 void waitq_wakeup(waitq_t *wq, wakeup_mode_t mode) 432 439 { 433 ipl_t ipl; 434 435 ipl = interrupts_disable(); 436 spinlock_lock(&wq->lock); 437 440 irq_spinlock_lock(&wq->lock, true); 438 441 _waitq_wakeup_unsafe(wq, mode); 439 440 spinlock_unlock(&wq->lock); 441 interrupts_restore(ipl); 442 irq_spinlock_unlock(&wq->lock, true); 442 443 } 443 444 … … 447 448 * assumes wq->lock is already locked and interrupts are already disabled. 448 449 * 449 * @param wq Pointer to wait queue. 450 * @param mode If mode is WAKEUP_FIRST, then the longest waiting 451 * thread, if any, is woken up. If mode is WAKEUP_ALL, then 452 * all waiting threads, if any, are woken up. If there are 453 * no waiting threads to be woken up, the missed wakeup is 454 * recorded in the wait queue. 450 * @param wq Pointer to wait queue. 451 * @param mode If mode is WAKEUP_FIRST, then the longest waiting 452 * thread, if any, is woken up. If mode is WAKEUP_ALL, then 453 * all waiting threads, if any, are woken up. If there are 454 * no waiting threads to be woken up, the missed wakeup is 455 * recorded in the wait queue. 456 * 455 457 */ 456 458 void _waitq_wakeup_unsafe(waitq_t *wq, wakeup_mode_t mode) 457 459 { 458 thread_t *t;459 460 size_t count = 0; 460 461 loop: 461 462 loop: 462 463 if (list_empty(&wq->head)) { 463 464 wq->missed_wakeups++; 464 if ( count && mode == WAKEUP_ALL)465 if ((count) && (mode == WAKEUP_ALL)) 465 466 wq->missed_wakeups--; 467 466 468 return; 467 469 } 468 470 469 471 count++; 470 t = list_get_instance(wq->head.next, thread_t, wq_link);472 thread_t *thread = list_get_instance(wq->head.next, thread_t, wq_link); 471 473 472 474 /* … … 480 482 * invariant must hold: 481 483 * 482 * t ->sleep_queue != NULL <=> tsleeps in a wait queue484 * thread->sleep_queue != NULL <=> thread sleeps in a wait queue 483 485 * 484 486 * For an observer who locks the thread, the invariant 485 487 * holds only when the lock is held prior to removing 486 488 * it from the wait queue. 489 * 487 490 */ 488 spinlock_lock(&t->lock); 489 list_remove(&t->wq_link); 490 491 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) 492 t->timeout_pending = false; 493 t->sleep_queue = NULL; 494 spinlock_unlock(&t->lock); 495 496 thread_ready(t); 497 491 irq_spinlock_lock(&thread->lock, false); 492 list_remove(&thread->wq_link); 493 494 if ((thread->timeout_pending) && 495 (timeout_unregister(&thread->sleep_timeout))) 496 thread->timeout_pending = false; 497 498 thread->sleep_queue = NULL; 499 irq_spinlock_unlock(&thread->lock, false); 500 501 thread_ready(thread); 502 498 503 if (mode == WAKEUP_ALL) 499 504 goto loop; -
kernel/generic/src/syscall/syscall.c
r666f492 rda1bafb 59 59 unative_t a4, unative_t a5, unative_t a6, unative_t id) 60 60 { 61 unative_t rc;62 ipl_t ipl;63 64 61 /* Do userpace accounting */ 65 ipl = interrupts_disable(); 66 spinlock_lock(&THREAD->lock); 62 irq_spinlock_lock(&THREAD->lock, true); 67 63 thread_update_accounting(true); 68 spinlock_unlock(&THREAD->lock); 69 interrupts_restore(ipl); 70 64 irq_spinlock_unlock(&THREAD->lock, true); 65 71 66 #ifdef CONFIG_UDEBUG 72 67 /* 73 68 * Early check for undebugged tasks. We do not lock anything as this 74 69 * test need not be precise in either direction. 70 * 75 71 */ 76 if (THREAD->udebug.active) {72 if (THREAD->udebug.active) 77 73 udebug_syscall_event(a1, a2, a3, a4, a5, a6, id, 0, false); 78 }79 74 #endif 80 75 76 unative_t rc; 81 77 if (id < SYSCALL_END) { 82 78 rc = syscall_table[id](a1, a2, a3, a4, a5, a6); … … 93 89 if (THREAD->udebug.active) { 94 90 udebug_syscall_event(a1, a2, a3, a4, a5, a6, id, rc, true); 95 91 96 92 /* 97 93 * Stopping point needed for tasks that only invoke … … 103 99 } 104 100 #endif 105 101 106 102 /* Do kernel accounting */ 107 (void) interrupts_disable(); 108 spinlock_lock(&THREAD->lock); 103 irq_spinlock_lock(&THREAD->lock, true); 109 104 thread_update_accounting(false); 110 spinlock_unlock(&THREAD->lock); 111 interrupts_restore(ipl); 105 irq_spinlock_unlock(&THREAD->lock, true); 112 106 113 107 return rc; -
kernel/generic/src/sysinfo/stats.c
r666f492 rda1bafb 110 110 } 111 111 112 /* Each CPU structure is locked separatelly */113 ipl_t ipl = interrupts_disable();114 115 112 size_t i; 116 113 for (i = 0; i < config.cpu_count; i++) { 117 spinlock_lock(&cpus[i].lock);114 irq_spinlock_lock(&cpus[i].lock, true); 118 115 119 116 stats_cpus[i].id = cpus[i].id; … … 123 120 stats_cpus[i].idle_ticks = cpus[i].idle_ticks; 124 121 125 spinlock_unlock(&cpus[i].lock); 126 } 127 128 interrupts_restore(ipl); 122 irq_spinlock_unlock(&cpus[i].lock, true); 123 } 129 124 130 125 return ((void *) stats_cpus); … … 235 230 236 231 /* Interrupts are already disabled */ 237 spinlock_lock(&(task->lock));232 irq_spinlock_lock(&(task->lock), false); 238 233 239 234 /* Record the statistics and increment the iterator */ … … 241 236 (*iterator)++; 242 237 243 spinlock_unlock(&(task->lock));238 irq_spinlock_unlock(&(task->lock), false); 244 239 245 240 return true; … … 260 255 { 261 256 /* Messing with task structures, avoid deadlock */ 262 ipl_t ipl = interrupts_disable(); 263 spinlock_lock(&tasks_lock); 257 irq_spinlock_lock(&tasks_lock, true); 264 258 265 259 /* First walk the task tree to count the tasks */ … … 269 263 if (count == 0) { 270 264 /* No tasks found (strange) */ 271 spinlock_unlock(&tasks_lock); 272 interrupts_restore(ipl); 273 265 irq_spinlock_unlock(&tasks_lock, true); 274 266 *size = 0; 275 267 return NULL; … … 278 270 *size = sizeof(stats_task_t) * count; 279 271 if (dry_run) { 280 spinlock_unlock(&tasks_lock); 281 interrupts_restore(ipl); 272 irq_spinlock_unlock(&tasks_lock, true); 282 273 return NULL; 283 274 } … … 286 277 if (stats_tasks == NULL) { 287 278 /* No free space for allocation */ 288 spinlock_unlock(&tasks_lock); 289 interrupts_restore(ipl); 290 279 irq_spinlock_unlock(&tasks_lock, true); 291 280 *size = 0; 292 281 return NULL; … … 297 286 avltree_walk(&tasks_tree, task_serialize_walker, (void *) &iterator); 298 287 299 spinlock_unlock(&tasks_lock); 300 interrupts_restore(ipl); 288 irq_spinlock_unlock(&tasks_lock, true); 301 289 302 290 return ((void *) stats_tasks); … … 346 334 347 335 /* Interrupts are already disabled */ 348 spinlock_lock(&thread->lock);336 irq_spinlock_lock(&thread->lock, false); 349 337 350 338 /* Record the statistics and increment the iterator */ … … 352 340 (*iterator)++; 353 341 354 spinlock_unlock(&thread->lock);342 irq_spinlock_unlock(&thread->lock, false); 355 343 356 344 return true; … … 371 359 { 372 360 /* Messing with threads structures, avoid deadlock */ 373 ipl_t ipl = interrupts_disable(); 374 spinlock_lock(&threads_lock); 361 irq_spinlock_lock(&threads_lock, true); 375 362 376 363 /* First walk the thread tree to count the threads */ … … 380 367 if (count == 0) { 381 368 /* No threads found (strange) */ 382 spinlock_unlock(&threads_lock); 383 interrupts_restore(ipl); 384 369 irq_spinlock_unlock(&threads_lock, true); 385 370 *size = 0; 386 371 return NULL; … … 389 374 *size = sizeof(stats_thread_t) * count; 390 375 if (dry_run) { 391 spinlock_unlock(&threads_lock); 392 interrupts_restore(ipl); 376 irq_spinlock_unlock(&threads_lock, true); 393 377 return NULL; 394 378 } … … 397 381 if (stats_threads == NULL) { 398 382 /* No free space for allocation */ 399 spinlock_unlock(&threads_lock); 400 interrupts_restore(ipl); 401 383 irq_spinlock_unlock(&threads_lock, true); 402 384 *size = 0; 403 385 return NULL; … … 408 390 avltree_walk(&threads_tree, thread_serialize_walker, (void *) &iterator); 409 391 410 spinlock_unlock(&threads_lock); 411 interrupts_restore(ipl); 392 irq_spinlock_unlock(&threads_lock, true); 412 393 413 394 return ((void *) stats_threads); … … 443 424 444 425 /* Messing with task structures, avoid deadlock */ 445 ipl_t ipl = interrupts_disable(); 446 spinlock_lock(&tasks_lock); 426 irq_spinlock_lock(&tasks_lock, true); 447 427 448 428 task_t *task = task_find_by_id(task_id); 449 429 if (task == NULL) { 450 430 /* No task with this ID */ 451 spinlock_unlock(&tasks_lock); 452 interrupts_restore(ipl); 431 irq_spinlock_unlock(&tasks_lock, true); 453 432 return ret; 454 433 } … … 459 438 ret.data.size = sizeof(stats_task_t); 460 439 461 spinlock_unlock(&tasks_lock);440 irq_spinlock_unlock(&tasks_lock, true); 462 441 } else { 463 442 /* Allocate stats_task_t structure */ … … 465 444 (stats_task_t *) malloc(sizeof(stats_task_t), FRAME_ATOMIC); 466 445 if (stats_task == NULL) { 467 spinlock_unlock(&tasks_lock); 468 interrupts_restore(ipl); 446 irq_spinlock_unlock(&tasks_lock, true); 469 447 return ret; 470 448 } … … 474 452 ret.data.data = (void *) stats_task; 475 453 ret.data.size = sizeof(stats_task_t); 476 454 477 455 /* Hand-over-hand locking */ 478 spinlock_lock(&task->lock); 479 spinlock_unlock(&tasks_lock); 456 irq_spinlock_exchange(&tasks_lock, &task->lock); 480 457 481 458 produce_stats_task(task, stats_task); 482 459 483 spinlock_unlock(&task->lock); 484 } 485 486 interrupts_restore(ipl); 460 irq_spinlock_unlock(&task->lock, true); 461 } 487 462 488 463 return ret; … … 518 493 519 494 /* Messing with threads structures, avoid deadlock */ 520 ipl_t ipl = interrupts_disable(); 521 spinlock_lock(&threads_lock); 495 irq_spinlock_lock(&threads_lock, true); 522 496 523 497 thread_t *thread = thread_find_by_id(thread_id); 524 498 if (thread == NULL) { 525 499 /* No thread with this ID */ 526 spinlock_unlock(&threads_lock); 527 interrupts_restore(ipl); 500 irq_spinlock_unlock(&threads_lock, true); 528 501 return ret; 529 502 } … … 534 507 ret.data.size = sizeof(stats_thread_t); 535 508 536 spinlock_unlock(&threads_lock);509 irq_spinlock_unlock(&threads_lock, true); 537 510 } else { 538 511 /* Allocate stats_thread_t structure */ … … 540 513 (stats_thread_t *) malloc(sizeof(stats_thread_t), FRAME_ATOMIC); 541 514 if (stats_thread == NULL) { 542 spinlock_unlock(&threads_lock); 543 interrupts_restore(ipl); 515 irq_spinlock_unlock(&threads_lock, true); 544 516 return ret; 545 517 } … … 551 523 552 524 /* Hand-over-hand locking */ 553 spinlock_lock(&thread->lock); 554 spinlock_unlock(&threads_lock); 525 irq_spinlock_exchange(&threads_lock, &thread->lock); 555 526 556 527 produce_stats_thread(thread, stats_thread); 557 528 558 spinlock_unlock(&thread->lock); 559 } 560 561 interrupts_restore(ipl); 529 irq_spinlock_unlock(&thread->lock, true); 530 } 562 531 563 532 return ret; … … 673 642 { 674 643 mutex_initialize(&load_lock, MUTEX_PASSIVE); 675 644 676 645 sysinfo_set_item_fn_val("system.uptime", NULL, get_stats_uptime); 677 646 sysinfo_set_item_fn_data("system.cpus", NULL, get_stats_cpus); -
kernel/generic/src/sysinfo/sysinfo.c
r666f492 rda1bafb 58 58 * 59 59 */ 60 static int sysinfo_item_constructor(void *obj, int kmflag)60 static int sysinfo_item_constructor(void *obj, unsigned int kmflag) 61 61 { 62 62 sysinfo_item_t *item = (sysinfo_item_t *) obj; … … 78 78 * 79 79 */ 80 static int sysinfo_item_destructor(void *obj)80 static size_t sysinfo_item_destructor(void *obj) 81 81 { 82 82 sysinfo_item_t *item = (sysinfo_item_t *) obj; -
kernel/generic/src/time/clock.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief High-level clock interrupt handler. 36 36 * 37 37 * This file contains the clock() function which is the source 38 38 * of preemption. It is also responsible for executing expired 39 39 * timeouts. 40 */ 41 40 * 41 */ 42 42 43 #include <time/clock.h> 43 44 #include <time/timeout.h> … … 63 64 static parea_t clock_parea; 64 65 65 /* Variable holding fragment of second, so that we would update 66 * seconds correctly 66 /** Fragment of second 67 * 68 * For updating seconds correctly. 69 * 67 70 */ 68 71 static unative_t secfrag = 0; … … 73 76 * information about realtime data. We allocate 1 page with these 74 77 * data and update it periodically. 78 * 75 79 */ 76 80 void clock_counter_init(void) 77 81 { 78 void *faddr; 79 80 faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC); 82 void *faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC); 81 83 if (!faddr) 82 84 panic("Cannot allocate page for clock."); … … 87 89 uptime->seconds2 = 0; 88 90 uptime->useconds = 0; 89 91 90 92 clock_parea.pbase = (uintptr_t) faddr; 91 93 clock_parea.frames = 1; 92 94 ddi_parea_register(&clock_parea); 93 95 94 96 /* 95 97 * Prepare information for the userspace so that it can successfully 96 98 * physmem_map() the clock_parea. 99 * 97 100 */ 98 101 sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true); … … 100 103 } 101 104 102 103 105 /** Update public counters 104 106 * 105 107 * Update it only on first processor 106 * TODO: Do we really need so many write barriers? 108 * TODO: Do we really need so many write barriers? 109 * 107 110 */ 108 111 static void clock_update_counters(void) … … 131 134 void clock(void) 132 135 { 133 link_t *l;134 timeout_t *h;135 timeout_handler_t f;136 void *arg;137 136 size_t missed_clock_ticks = CPU->missed_clock_ticks; 138 unsigned int i; 139 137 140 138 /* Account lost ticks to CPU usage */ 141 if (CPU->idle) {139 if (CPU->idle) 142 140 CPU->idle_ticks += missed_clock_ticks + 1; 143 } else {141 else 144 142 CPU->busy_ticks += missed_clock_ticks + 1; 145 }143 146 144 CPU->idle = false; 147 145 148 146 /* 149 147 * To avoid lock ordering problems, 150 148 * run all expired timeouts as you visit them. 149 * 151 150 */ 151 size_t i; 152 152 for (i = 0; i <= missed_clock_ticks; i++) { 153 153 clock_update_counters(); 154 spinlock_lock(&CPU->timeoutlock); 155 while ((l = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { 156 h = list_get_instance(l, timeout_t, link); 157 spinlock_lock(&h->lock); 158 if (h->ticks-- != 0) { 159 spinlock_unlock(&h->lock); 154 irq_spinlock_lock(&CPU->timeoutlock, false); 155 156 link_t *cur; 157 while ((cur = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { 158 timeout_t *timeout = list_get_instance(cur, timeout_t, link); 159 160 irq_spinlock_lock(&timeout->lock, false); 161 if (timeout->ticks-- != 0) { 162 irq_spinlock_unlock(&timeout->lock, false); 160 163 break; 161 164 } 162 list_remove(l); 163 f = h->handler; 164 arg = h->arg; 165 timeout_reinitialize(h); 166 spinlock_unlock(&h->lock); 167 spinlock_unlock(&CPU->timeoutlock); 168 169 f(arg); 170 171 spinlock_lock(&CPU->timeoutlock); 165 166 list_remove(cur); 167 timeout_handler_t handler = timeout->handler; 168 void *arg = timeout->arg; 169 timeout_reinitialize(timeout); 170 171 irq_spinlock_unlock(&timeout->lock, false); 172 irq_spinlock_unlock(&CPU->timeoutlock, false); 173 174 handler(arg); 175 176 irq_spinlock_lock(&CPU->timeoutlock, false); 172 177 } 173 spinlock_unlock(&CPU->timeoutlock); 178 179 irq_spinlock_unlock(&CPU->timeoutlock, false); 174 180 } 175 181 CPU->missed_clock_ticks = 0; 176 182 177 183 /* 178 184 * Do CPU usage accounting and find out whether to preempt THREAD. 185 * 179 186 */ 180 187 181 188 if (THREAD) { 182 189 uint64_t ticks; 183 190 184 spinlock_lock(&CPU->lock);191 irq_spinlock_lock(&CPU->lock, false); 185 192 CPU->needs_relink += 1 + missed_clock_ticks; 186 spinlock_unlock(&CPU->lock);187 188 spinlock_lock(&THREAD->lock);193 irq_spinlock_unlock(&CPU->lock, false); 194 195 irq_spinlock_lock(&THREAD->lock, false); 189 196 if ((ticks = THREAD->ticks)) { 190 197 if (ticks >= 1 + missed_clock_ticks) … … 193 200 THREAD->ticks = 0; 194 201 } 195 spinlock_unlock(&THREAD->lock);202 irq_spinlock_unlock(&THREAD->lock, false); 196 203 197 204 if ((!ticks) && (!PREEMPTION_DISABLED)) { 198 #ifdef CONFIG_UDEBUG199 istate_t *istate;200 #endif201 205 scheduler(); 202 206 #ifdef CONFIG_UDEBUG … … 205 209 * before it begins executing userspace code. 206 210 */ 207 istate = THREAD->udebug.uspace_state;208 if ( istate && istate_from_uspace(istate))211 istate_t *istate = THREAD->udebug.uspace_state; 212 if ((istate) && (istate_from_uspace(istate))) 209 213 udebug_before_thread_runs(); 210 214 #endif 211 215 } 212 216 } 213 214 217 } 215 218 -
kernel/generic/src/time/timeout.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Timeout management functions. 36 36 */ 37 37 … … 53 53 void timeout_init(void) 54 54 { 55 spinlock_initialize(&CPU->timeoutlock, "timeout_lock");55 irq_spinlock_initialize(&CPU->timeoutlock, "cpu.timeoutlock"); 56 56 list_initialize(&CPU->timeout_active_head); 57 57 } 58 58 59 60 /** Reinitialize timeout 59 /** Reinitialize timeout 61 60 * 62 61 * Initialize all members except the lock. 63 62 * 64 * @param t Timeout to be initialized. 65 * 66 */ 67 void timeout_reinitialize(timeout_t *t) 68 { 69 t->cpu = NULL; 70 t->ticks = 0; 71 t->handler = NULL; 72 t->arg = NULL; 73 link_initialize(&t->link); 74 } 75 63 * @param timeout Timeout to be initialized. 64 * 65 */ 66 void timeout_reinitialize(timeout_t *timeout) 67 { 68 timeout->cpu = NULL; 69 timeout->ticks = 0; 70 timeout->handler = NULL; 71 timeout->arg = NULL; 72 link_initialize(&timeout->link); 73 } 76 74 77 75 /** Initialize timeout … … 79 77 * Initialize all members including the lock. 80 78 * 81 * @param t Timeout to be initialized. 82 * 83 */ 84 void timeout_initialize(timeout_t *t) 85 { 86 spinlock_initialize(&t->lock, "timeout_t_lock"); 87 timeout_reinitialize(t); 88 } 89 79 * @param timeout Timeout to be initialized. 80 * 81 */ 82 void timeout_initialize(timeout_t *timeout) 83 { 84 irq_spinlock_initialize(&timeout->lock, "timeout_t_lock"); 85 timeout_reinitialize(timeout); 86 } 90 87 91 88 /** Register timeout … … 95 92 * time microseconds (or slightly more). 96 93 * 97 * @param t Timeout structure. 98 * @param time Number of usec in the future to execute the handler. 99 * @param f Timeout handler function. 100 * @param arg Timeout handler argument. 101 * 102 */ 103 void 104 timeout_register(timeout_t *t, uint64_t time, timeout_handler_t f, void *arg) 105 { 106 timeout_t *hlp = NULL; 107 link_t *l, *m; 108 ipl_t ipl; 109 uint64_t sum; 110 111 ipl = interrupts_disable(); 112 spinlock_lock(&CPU->timeoutlock); 113 spinlock_lock(&t->lock); 114 115 if (t->cpu) 116 panic("Unexpected: t->cpu != 0."); 117 118 t->cpu = CPU; 119 t->ticks = us2ticks(time); 120 121 t->handler = f; 122 t->arg = arg; 123 124 /* 125 * Insert t into the active timeouts list according to t->ticks. 126 */ 127 sum = 0; 128 l = CPU->timeout_active_head.next; 129 while (l != &CPU->timeout_active_head) { 130 hlp = list_get_instance(l, timeout_t, link); 131 spinlock_lock(&hlp->lock); 132 if (t->ticks < sum + hlp->ticks) { 133 spinlock_unlock(&hlp->lock); 94 * @param timeout Timeout structure. 95 * @param time Number of usec in the future to execute the handler. 96 * @param handler Timeout handler function. 97 * @param arg Timeout handler argument. 98 * 99 */ 100 void timeout_register(timeout_t *timeout, uint64_t time, 101 timeout_handler_t handler, void *arg) 102 { 103 irq_spinlock_lock(&CPU->timeoutlock, true); 104 irq_spinlock_lock(&timeout->lock, false); 105 106 if (timeout->cpu) 107 panic("Unexpected: timeout->cpu != 0."); 108 109 timeout->cpu = CPU; 110 timeout->ticks = us2ticks(time); 111 112 timeout->handler = handler; 113 timeout->arg = arg; 114 115 /* 116 * Insert timeout into the active timeouts list according to timeout->ticks. 117 */ 118 uint64_t sum = 0; 119 timeout_t *target = NULL; 120 link_t *cur; 121 for (cur = CPU->timeout_active_head.next; 122 cur != &CPU->timeout_active_head; cur = cur->next) { 123 target = list_get_instance(cur, timeout_t, link); 124 irq_spinlock_lock(&target->lock, false); 125 126 if (timeout->ticks < sum + target->ticks) { 127 irq_spinlock_unlock(&target->lock, false); 134 128 break; 135 129 } 136 sum += hlp->ticks;137 s pinlock_unlock(&hlp->lock);138 l = l->next;139 } 140 141 m = l->prev;142 li st_prepend(&t->link, m); /* avoid using l->prev */143 144 /*145 * Adjust t->ticks according to ticks accumulated in h's predecessors.146 * /147 t->ticks -= sum;148 149 /*150 * Decrease ticks of t's immediate succesor by t->ticks.151 */152 if (l != &CPU->timeout_active_head) {153 spinlock_lock(&hlp->lock);154 hlp->ticks -= t->ticks;155 spinlock_unlock(&hlp->lock);156 }157 158 spinlock_unlock(&t->lock);159 spinlock_unlock(&CPU->timeoutlock);160 i nterrupts_restore(ipl);161 } 162 130 131 sum += target->ticks; 132 irq_spinlock_unlock(&target->lock, false); 133 } 134 135 /* Avoid using cur->prev directly */ 136 link_t *prev = cur->prev; 137 list_prepend(&timeout->link, prev); 138 139 /* 140 * Adjust timeout->ticks according to ticks 141 * accumulated in target's predecessors. 142 */ 143 timeout->ticks -= sum; 144 145 /* 146 * Decrease ticks of timeout's immediate succesor by timeout->ticks. 147 */ 148 if (cur != &CPU->timeout_active_head) { 149 irq_spinlock_lock(&target->lock, false); 150 target->ticks -= timeout->ticks; 151 irq_spinlock_unlock(&target->lock, false); 152 } 153 154 irq_spinlock_unlock(&timeout->lock, false); 155 irq_spinlock_unlock(&CPU->timeoutlock, true); 156 } 163 157 164 158 /** Unregister timeout … … 166 160 * Remove timeout from timeout list. 167 161 * 168 * @param t Timeout to unregister. 169 * 170 * @return True on success, false on failure. 171 */ 172 bool timeout_unregister(timeout_t *t) 173 { 174 timeout_t *hlp; 175 link_t *l; 176 ipl_t ipl; 162 * @param timeout Timeout to unregister. 163 * 164 * @return True on success, false on failure. 165 * 166 */ 167 bool timeout_unregister(timeout_t *timeout) 168 { 177 169 DEADLOCK_PROBE_INIT(p_tolock); 178 170 179 171 grab_locks: 180 ipl = interrupts_disable(); 181 spinlock_lock(&t->lock); 182 if (!t->cpu) { 183 spinlock_unlock(&t->lock); 184 interrupts_restore(ipl); 172 irq_spinlock_lock(&timeout->lock, true); 173 if (!timeout->cpu) { 174 irq_spinlock_unlock(&timeout->lock, true); 185 175 return false; 186 176 } 187 if (!spinlock_trylock(&t->cpu->timeoutlock)) {188 spinlock_unlock(&t->lock);189 i nterrupts_restore(ipl);177 178 if (!irq_spinlock_trylock(&timeout->cpu->timeoutlock)) { 179 irq_spinlock_unlock(&timeout->lock, true); 190 180 DEADLOCK_PROBE(p_tolock, DEADLOCK_THRESHOLD); 191 181 goto grab_locks; … … 193 183 194 184 /* 195 * Now we know for sure that t hasn't been activated yet 196 * and is lurking in t->cpu->timeout_active_head queue. 197 */ 198 199 l = t->link.next; 200 if (l != &t->cpu->timeout_active_head) { 201 hlp = list_get_instance(l, timeout_t, link); 202 spinlock_lock(&hlp->lock); 203 hlp->ticks += t->ticks; 204 spinlock_unlock(&hlp->lock); 205 } 206 207 list_remove(&t->link); 208 spinlock_unlock(&t->cpu->timeoutlock); 209 210 timeout_reinitialize(t); 211 spinlock_unlock(&t->lock); 212 213 interrupts_restore(ipl); 185 * Now we know for sure that timeout hasn't been activated yet 186 * and is lurking in timeout->cpu->timeout_active_head queue. 187 */ 188 189 link_t *cur = timeout->link.next; 190 if (cur != &timeout->cpu->timeout_active_head) { 191 timeout_t *tmp = list_get_instance(cur, timeout_t, link); 192 irq_spinlock_lock(&tmp->lock, false); 193 tmp->ticks += timeout->ticks; 194 irq_spinlock_unlock(&tmp->lock, false); 195 } 196 197 list_remove(&timeout->link); 198 irq_spinlock_unlock(&timeout->cpu->timeoutlock, false); 199 200 timeout_reinitialize(timeout); 201 irq_spinlock_unlock(&timeout->lock, true); 202 214 203 return true; 215 204 } -
kernel/generic/src/udebug/udebug.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Udebug hooks and data structure management. 36 36 * 37 37 * Udebug is an interface that makes userspace debuggers possible. 38 38 */ 39 39 40 40 #include <synch/waitq.h> 41 41 #include <debug.h> … … 45 45 #include <arch.h> 46 46 47 48 47 /** Initialize udebug part of task structure. 49 48 * 50 49 * Called as part of task structure initialization. 51 * @param ut Pointer to the structure to initialize. 50 * @param ut Pointer to the structure to initialize. 51 * 52 52 */ 53 53 void udebug_task_init(udebug_task_t *ut) … … 63 63 * 64 64 * Called as part of thread structure initialization. 65 * @param ut Pointer to the structure to initialize. 65 * 66 * @param ut Pointer to the structure to initialize. 67 * 66 68 */ 67 69 void udebug_thread_initialize(udebug_thread_t *ut) … … 70 72 waitq_initialize(&ut->go_wq); 71 73 condvar_initialize(&ut->active_cv); 72 74 73 75 ut->go_call = NULL; 74 76 ut->uspace_state = NULL; … … 76 78 ut->stoppable = true; 77 79 ut->active = false; 78 ut->cur_event = 0; /* none */80 ut->cur_event = 0; /* None */ 79 81 } 80 82 … … 85 87 * is received. 86 88 * 87 * @param wq The wait queue used by the thread to wait for GO messages. 89 * @param wq The wait queue used by the thread to wait for GO messages. 90 * 88 91 */ 89 92 static void udebug_wait_for_go(waitq_t *wq) 90 93 { 91 int rc; 92 ipl_t ipl; 93 94 ipl = waitq_sleep_prepare(wq); 95 96 wq->missed_wakeups = 0; /* Enforce blocking. */ 97 rc = waitq_sleep_timeout_unsafe(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); 98 94 ipl_t ipl = waitq_sleep_prepare(wq); 95 96 wq->missed_wakeups = 0; /* Enforce blocking. */ 97 int rc = waitq_sleep_timeout_unsafe(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); 98 99 99 waitq_sleep_finish(wq, rc, ipl); 100 100 } … … 102 102 /** Start of stoppable section. 103 103 * 104 * A stoppable section is a section of code where if the thread can be stoped. In other words, 105 * if a STOP operation is issued, the thread is guaranteed not to execute 106 * any userspace instructions until the thread is resumed. 104 * A stoppable section is a section of code where if the thread can 105 * be stoped. In other words, if a STOP operation is issued, the thread 106 * is guaranteed not to execute any userspace instructions until the 107 * thread is resumed. 107 108 * 108 109 * Having stoppable sections is better than having stopping points, since 109 110 * a thread can be stopped even when it is blocked indefinitely in a system 110 111 * call (whereas it would not reach any stopping point). 112 * 111 113 */ 112 114 void udebug_stoppable_begin(void) 113 115 { 114 int nsc;115 call_t *db_call, *go_call;116 117 116 ASSERT(THREAD); 118 117 ASSERT(TASK); 119 118 120 119 mutex_lock(&TASK->udebug.lock); 121 122 nsc = --TASK->udebug.not_stoppable_count;123 120 121 int nsc = --TASK->udebug.not_stoppable_count; 122 124 123 /* Lock order OK, THREAD->udebug.lock is after TASK->udebug.lock */ 125 124 mutex_lock(&THREAD->udebug.lock); 126 125 ASSERT(THREAD->udebug.stoppable == false); 127 126 THREAD->udebug.stoppable = true; 128 129 if ( TASK->udebug.dt_state == UDEBUG_TS_BEGINNING && nsc == 0) {127 128 if ((TASK->udebug.dt_state == UDEBUG_TS_BEGINNING) && (nsc == 0)) { 130 129 /* 131 130 * This was the last non-stoppable thread. Reply to 132 131 * DEBUG_BEGIN call. 132 * 133 133 */ 134 135 db_call = TASK->udebug.begin_call;134 135 call_t *db_call = TASK->udebug.begin_call; 136 136 ASSERT(db_call); 137 137 138 138 TASK->udebug.dt_state = UDEBUG_TS_ACTIVE; 139 139 TASK->udebug.begin_call = NULL; 140 140 141 141 IPC_SET_RETVAL(db_call->data, 0); 142 ipc_answer(&TASK->answerbox, db_call); 143 142 ipc_answer(&TASK->answerbox, db_call); 144 143 } else if (TASK->udebug.dt_state == UDEBUG_TS_ACTIVE) { 145 144 /* 146 145 * Active debugging session 147 146 */ 148 147 149 148 if (THREAD->udebug.active == true && 150 149 THREAD->udebug.go == false) { 151 150 /* 152 151 * Thread was requested to stop - answer go call 152 * 153 153 */ 154 154 155 155 /* Make sure nobody takes this call away from us */ 156 go_call = THREAD->udebug.go_call;156 call_t *go_call = THREAD->udebug.go_call; 157 157 THREAD->udebug.go_call = NULL; 158 158 ASSERT(go_call); 159 159 160 160 IPC_SET_RETVAL(go_call->data, 0); 161 161 IPC_SET_ARG1(go_call->data, UDEBUG_EVENT_STOP); 162 162 163 163 THREAD->udebug.cur_event = UDEBUG_EVENT_STOP; 164 165 ipc_answer(&TASK->answerbox, go_call); 164 ipc_answer(&TASK->answerbox, go_call); 166 165 } 167 166 } 168 167 169 168 mutex_unlock(&THREAD->udebug.lock); 170 169 mutex_unlock(&TASK->udebug.lock); … … 174 173 * 175 174 * This is the point where the thread will block if it is stopped. 176 * (As, by definition, a stopped thread must not leave its stoppable section). 175 * (As, by definition, a stopped thread must not leave its stoppable 176 * section). 177 * 177 178 */ 178 179 void udebug_stoppable_end(void) … … 181 182 mutex_lock(&TASK->udebug.lock); 182 183 mutex_lock(&THREAD->udebug.lock); 183 184 if ( THREAD->udebug.active && THREAD->udebug.go == false) {184 185 if ((THREAD->udebug.active) && (THREAD->udebug.go == false)) { 185 186 mutex_unlock(&THREAD->udebug.lock); 186 187 mutex_unlock(&TASK->udebug.lock); 187 188 188 189 udebug_wait_for_go(&THREAD->udebug.go_wq); 189 190 190 191 goto restart; 191 192 /* Must try again - have to lose stoppability atomically. */ … … 194 195 ASSERT(THREAD->udebug.stoppable == true); 195 196 THREAD->udebug.stoppable = false; 196 197 197 198 mutex_unlock(&THREAD->udebug.lock); 198 199 mutex_unlock(&TASK->udebug.lock); … … 203 204 * 204 205 * This function is called from clock(). 206 * 205 207 */ 206 208 void udebug_before_thread_runs(void) … … 215 217 * Must be called before and after servicing a system call. This generates 216 218 * a SYSCALL_B or SYSCALL_E event, depending on the value of @a end_variant. 219 * 217 220 */ 218 221 void udebug_syscall_event(unative_t a1, unative_t a2, unative_t a3, … … 220 223 bool end_variant) 221 224 { 222 call_t *call; 223 udebug_event_t etype; 224 225 etype = end_variant ? UDEBUG_EVENT_SYSCALL_E : UDEBUG_EVENT_SYSCALL_B; 226 225 udebug_event_t etype = 226 end_variant ? UDEBUG_EVENT_SYSCALL_E : UDEBUG_EVENT_SYSCALL_B; 227 227 228 mutex_lock(&TASK->udebug.lock); 228 229 mutex_lock(&THREAD->udebug.lock); 229 230 230 231 /* Must only generate events when in debugging session and is go. */ 231 232 if (THREAD->udebug.active != true || THREAD->udebug.go == false || … … 235 236 return; 236 237 } 237 238 238 239 /* Fill in the GO response. */ 239 call = THREAD->udebug.go_call;240 call_t *call = THREAD->udebug.go_call; 240 241 THREAD->udebug.go_call = NULL; 241 242 242 243 IPC_SET_RETVAL(call->data, 0); 243 244 IPC_SET_ARG1(call->data, etype); 244 245 IPC_SET_ARG2(call->data, id); 245 246 IPC_SET_ARG3(call->data, rc); 246 247 247 248 THREAD->udebug.syscall_args[0] = a1; 248 249 THREAD->udebug.syscall_args[1] = a2; … … 251 252 THREAD->udebug.syscall_args[4] = a5; 252 253 THREAD->udebug.syscall_args[5] = a6; 253 254 254 255 /* 255 256 * Make sure udebug.go is false when going to sleep 256 257 * in case we get woken up by DEBUG_END. (At which 257 258 * point it must be back to the initial true value). 259 * 258 260 */ 259 261 THREAD->udebug.go = false; 260 262 THREAD->udebug.cur_event = etype; 261 263 262 264 ipc_answer(&TASK->answerbox, call); 263 265 264 266 mutex_unlock(&THREAD->udebug.lock); 265 267 mutex_unlock(&TASK->udebug.lock); 266 268 267 269 udebug_wait_for_go(&THREAD->udebug.go_wq); 268 270 } … … 280 282 * and get a THREAD_B event for them. 281 283 * 282 * @param t Structure of the thread being created. Not locked, as the 283 * thread is not executing yet. 284 * @param ta Task to which the thread should be attached. 285 */ 286 void udebug_thread_b_event_attach(struct thread *t, struct task *ta) 287 { 288 call_t *call; 289 284 * @param thread Structure of the thread being created. Not locked, as the 285 * thread is not executing yet. 286 * @param task Task to which the thread should be attached. 287 * 288 */ 289 void udebug_thread_b_event_attach(struct thread *thread, struct task *task) 290 { 290 291 mutex_lock(&TASK->udebug.lock); 291 292 mutex_lock(&THREAD->udebug.lock); 292 293 thread_attach(t , ta);294 293 294 thread_attach(thread, task); 295 295 296 LOG("Check state"); 296 297 297 298 /* Must only generate events when in debugging session */ 298 299 if (THREAD->udebug.active != true) { 299 300 LOG("udebug.active: %s, udebug.go: %s", 300 THREAD->udebug.active ? "Yes(+)" : "No", 301 THREAD->udebug.go ? "Yes(-)" : "No"); 301 THREAD->udebug.active ? "Yes(+)" : "No", 302 THREAD->udebug.go ? "Yes(-)" : "No"); 303 302 304 mutex_unlock(&THREAD->udebug.lock); 303 305 mutex_unlock(&TASK->udebug.lock); 304 306 return; 305 307 } 306 308 307 309 LOG("Trigger event"); 308 call = THREAD->udebug.go_call; 310 311 call_t *call = THREAD->udebug.go_call; 312 309 313 THREAD->udebug.go_call = NULL; 310 314 IPC_SET_RETVAL(call->data, 0); 311 315 IPC_SET_ARG1(call->data, UDEBUG_EVENT_THREAD_B); 312 IPC_SET_ARG2(call->data, (unative_t) t);313 316 IPC_SET_ARG2(call->data, (unative_t) thread); 317 314 318 /* 315 319 * Make sure udebug.go is false when going to sleep 316 320 * in case we get woken up by DEBUG_END. (At which 317 321 * point it must be back to the initial true value). 322 * 318 323 */ 319 324 THREAD->udebug.go = false; 320 325 THREAD->udebug.cur_event = UDEBUG_EVENT_THREAD_B; 321 326 322 327 ipc_answer(&TASK->answerbox, call); 323 328 324 329 mutex_unlock(&THREAD->udebug.lock); 325 330 mutex_unlock(&TASK->udebug.lock); 326 331 327 332 LOG("Wait for Go"); 328 333 udebug_wait_for_go(&THREAD->udebug.go_wq); … … 333 338 * Must be called when the current thread is terminating. 334 339 * Generates a THREAD_E event. 340 * 335 341 */ 336 342 void udebug_thread_e_event(void) 337 343 { 338 call_t *call;339 340 344 mutex_lock(&TASK->udebug.lock); 341 345 mutex_lock(&THREAD->udebug.lock); 342 346 343 347 LOG("Check state"); 344 348 345 349 /* Must only generate events when in debugging session. */ 346 350 if (THREAD->udebug.active != true) { 347 351 LOG("udebug.active: %s, udebug.go: %s", 348 THREAD->udebug.active ? "Yes" : "No", 349 THREAD->udebug.go ? "Yes" : "No"); 352 THREAD->udebug.active ? "Yes" : "No", 353 THREAD->udebug.go ? "Yes" : "No"); 354 350 355 mutex_unlock(&THREAD->udebug.lock); 351 356 mutex_unlock(&TASK->udebug.lock); 352 357 return; 353 358 } 354 359 355 360 LOG("Trigger event"); 356 call = THREAD->udebug.go_call; 361 362 call_t *call = THREAD->udebug.go_call; 363 357 364 THREAD->udebug.go_call = NULL; 358 365 IPC_SET_RETVAL(call->data, 0); 359 366 IPC_SET_ARG1(call->data, UDEBUG_EVENT_THREAD_E); 360 367 361 368 /* Prevent any further debug activity in thread. */ 362 369 THREAD->udebug.active = false; 363 THREAD->udebug.cur_event = 0; /* none */364 THREAD->udebug.go = false; /* set to initial value */365 370 THREAD->udebug.cur_event = 0; /* None */ 371 THREAD->udebug.go = false; /* Set to initial value */ 372 366 373 ipc_answer(&TASK->answerbox, call); 367 374 368 375 mutex_unlock(&THREAD->udebug.lock); 369 376 mutex_unlock(&TASK->udebug.lock); 370 371 /* 377 378 /* 372 379 * This event does not sleep - debugging has finished 373 380 * in this thread. 381 * 374 382 */ 375 383 } 376 384 377 /** 378 * Terminate task debugging session. 379 * 380 * Gracefully terminates the debugging session for a task. If the debugger 385 /** Terminate task debugging session. 386 * 387 * Gracefully terminate the debugging session for a task. If the debugger 381 388 * is still waiting for events on some threads, it will receive a 382 389 * FINISHED event for each of them. 383 390 * 384 * @param ta Task structure. ta->udebug.lock must be already locked. 385 * @return Zero on success or negative error code. 386 */ 387 int udebug_task_cleanup(struct task *ta) 388 { 389 thread_t *t; 391 * @param task Task structure. ta->udebug.lock must be already locked. 392 * 393 * @return Zero on success or negative error code. 394 * 395 */ 396 int udebug_task_cleanup(struct task *task) 397 { 398 if ((task->udebug.dt_state != UDEBUG_TS_BEGINNING) && 399 (task->udebug.dt_state != UDEBUG_TS_ACTIVE)) { 400 return EINVAL; 401 } 402 403 LOG("Task %" PRIu64, task->taskid); 404 405 /* Finish debugging of all userspace threads */ 390 406 link_t *cur; 391 int flags; 392 ipl_t ipl; 393 394 if (ta->udebug.dt_state != UDEBUG_TS_BEGINNING && 395 ta->udebug.dt_state != UDEBUG_TS_ACTIVE) { 396 return EINVAL; 397 } 398 399 LOG("Task %" PRIu64, ta->taskid); 400 401 /* Finish debugging of all userspace threads */ 402 for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { 403 t = list_get_instance(cur, thread_t, th_link); 404 405 mutex_lock(&t->udebug.lock); 406 407 ipl = interrupts_disable(); 408 spinlock_lock(&t->lock); 409 410 flags = t->flags; 411 412 spinlock_unlock(&t->lock); 413 interrupts_restore(ipl); 414 407 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 408 thread_t *thread = list_get_instance(cur, thread_t, th_link); 409 410 mutex_lock(&thread->udebug.lock); 411 unsigned int flags = thread->flags; 412 415 413 /* Only process userspace threads. */ 416 414 if ((flags & THREAD_FLAG_USPACE) != 0) { 417 415 /* Prevent any further debug activity in thread. */ 418 t ->udebug.active = false;419 t ->udebug.cur_event = 0; /* none */420 416 thread->udebug.active = false; 417 thread->udebug.cur_event = 0; /* None */ 418 421 419 /* Is the thread still go? */ 422 if (t ->udebug.go == true) {420 if (thread->udebug.go == true) { 423 421 /* 424 * Yes, so clear go. As active == false,422 * Yes, so clear go. As active == false, 425 423 * this doesn't affect anything. 424 ( 426 425 */ 427 t ->udebug.go = false;428 426 thread->udebug.go = false; 427 429 428 /* Answer GO call */ 430 429 LOG("Answer GO call with EVENT_FINISHED."); 431 IPC_SET_RETVAL(t->udebug.go_call->data, 0); 432 IPC_SET_ARG1(t->udebug.go_call->data, 430 431 IPC_SET_RETVAL(thread->udebug.go_call->data, 0); 432 IPC_SET_ARG1(thread->udebug.go_call->data, 433 433 UDEBUG_EVENT_FINISHED); 434 435 ipc_answer(&ta ->answerbox, t->udebug.go_call);436 t ->udebug.go_call = NULL;434 435 ipc_answer(&task->answerbox, thread->udebug.go_call); 436 thread->udebug.go_call = NULL; 437 437 } else { 438 438 /* 439 439 * Debug_stop is already at initial value. 440 440 * Yet this means the thread needs waking up. 441 * 441 442 */ 442 443 443 444 /* 444 * t 's lock must not be held when calling445 * thread's lock must not be held when calling 445 446 * waitq_wakeup. 447 * 446 448 */ 447 waitq_wakeup(&t ->udebug.go_wq, WAKEUP_FIRST);449 waitq_wakeup(&thread->udebug.go_wq, WAKEUP_FIRST); 448 450 } 449 mutex_unlock(&t->udebug.lock);450 condvar_broadcast(&t->udebug.active_cv);451 } else {452 mutex_unlock(&t->udebug.lock);453 }454 } 455 456 ta ->udebug.dt_state = UDEBUG_TS_INACTIVE;457 ta ->udebug.debugger = NULL;458 451 452 mutex_unlock(&thread->udebug.lock); 453 condvar_broadcast(&thread->udebug.active_cv); 454 } else 455 mutex_unlock(&thread->udebug.lock); 456 } 457 458 task->udebug.dt_state = UDEBUG_TS_INACTIVE; 459 task->udebug.debugger = NULL; 460 459 461 return 0; 460 462 } … … 466 468 * a chance to examine the faulting thead/task. When the debugging session 467 469 * is over, this function returns (so that thread/task cleanup can continue). 470 * 468 471 */ 469 472 void udebug_thread_fault(void) 470 473 { 471 474 udebug_stoppable_begin(); 472 475 473 476 /* Wait until a debugger attends to us. */ 474 477 mutex_lock(&THREAD->udebug.lock); … … 476 479 condvar_wait(&THREAD->udebug.active_cv, &THREAD->udebug.lock); 477 480 mutex_unlock(&THREAD->udebug.lock); 478 481 479 482 /* Make sure the debugging session is over before proceeding. */ 480 483 mutex_lock(&THREAD->udebug.lock); … … 482 485 condvar_wait(&THREAD->udebug.active_cv, &THREAD->udebug.lock); 483 486 mutex_unlock(&THREAD->udebug.lock); 484 487 485 488 udebug_stoppable_end(); 486 489 } -
kernel/generic/src/udebug/udebug_ops.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Udebug operations. 36 36 * 37 37 * Udebug operations on tasks and threads are implemented here. The … … 39 39 * when servicing udebug IPC messages. 40 40 */ 41 41 42 42 #include <debug.h> 43 43 #include <proc/task.h> … … 53 53 #include <memstr.h> 54 54 55 /** 56 * Prepare a thread for a debugging operation. 55 /** Prepare a thread for a debugging operation. 57 56 * 58 57 * Simply put, return thread t with t->udebug.lock held, … … 73 72 * the t->lock spinlock to the t->udebug.lock mutex. 74 73 * 75 * @param t 76 * @param being_go 74 * @param thread Pointer, need not at all be valid. 75 * @param being_go Required thread state. 77 76 * 78 77 * Returns EOK if all went well, or an error code otherwise. 79 */ 80 static int _thread_op_begin(thread_t *t, bool being_go) 81 { 82 ipl_t ipl; 83 84 mutex_lock(&TASK->udebug.lock); 85 78 * 79 */ 80 static int _thread_op_begin(thread_t *thread, bool being_go) 81 { 82 mutex_lock(&TASK->udebug.lock); 83 86 84 /* thread_exists() must be called with threads_lock held */ 87 ipl = interrupts_disable(); 88 spinlock_lock(&threads_lock); 89 90 if (!thread_exists(t)) { 91 spinlock_unlock(&threads_lock); 92 interrupts_restore(ipl); 85 irq_spinlock_lock(&threads_lock, true); 86 87 if (!thread_exists(thread)) { 88 irq_spinlock_unlock(&threads_lock, true); 93 89 mutex_unlock(&TASK->udebug.lock); 94 90 return ENOENT; 95 91 } 96 97 /* t->lock is enough to ensure the thread's existence */ 98 spinlock_lock(&t->lock); 99 spinlock_unlock(&threads_lock); 100 101 /* Verify that 't' is a userspace thread. */ 102 if ((t->flags & THREAD_FLAG_USPACE) == 0) { 92 93 /* thread->lock is enough to ensure the thread's existence */ 94 irq_spinlock_exchange(&threads_lock, &thread->lock); 95 96 /* Verify that 'thread' is a userspace thread. */ 97 if ((thread->flags & THREAD_FLAG_USPACE) == 0) { 103 98 /* It's not, deny its existence */ 104 spinlock_unlock(&t->lock); 105 interrupts_restore(ipl); 99 irq_spinlock_unlock(&thread->lock, true); 106 100 mutex_unlock(&TASK->udebug.lock); 107 101 return ENOENT; 108 102 } 109 103 110 104 /* Verify debugging state. */ 111 if (t ->udebug.active != true) {105 if (thread->udebug.active != true) { 112 106 /* Not in debugging session or undesired GO state */ 113 spinlock_unlock(&t->lock); 114 interrupts_restore(ipl); 107 irq_spinlock_unlock(&thread->lock, true); 115 108 mutex_unlock(&TASK->udebug.lock); 116 109 return ENOENT; 117 110 } 118 111 119 112 /* 120 113 * Since the thread has active == true, TASK->udebug.lock 121 114 * is enough to ensure its existence and that active remains 122 115 * true. 116 * 123 117 */ 124 spinlock_unlock(&t->lock); 125 interrupts_restore(ipl); 126 118 irq_spinlock_unlock(&thread->lock, true); 119 127 120 /* Only mutex TASK->udebug.lock left. */ 128 121 129 122 /* Now verify that the thread belongs to the current task. */ 130 if (t ->task != TASK) {123 if (thread->task != TASK) { 131 124 /* No such thread belonging this task*/ 132 125 mutex_unlock(&TASK->udebug.lock); 133 126 return ENOENT; 134 127 } 135 128 136 129 /* 137 130 * Now we need to grab the thread's debug lock for synchronization 138 131 * of the threads stoppability/stop state. 132 * 139 133 */ 140 mutex_lock(&t ->udebug.lock);141 134 mutex_lock(&thread->udebug.lock); 135 142 136 /* The big task mutex is no longer needed. */ 143 137 mutex_unlock(&TASK->udebug.lock); 144 145 if (t ->udebug.go != being_go) {138 139 if (thread->udebug.go != being_go) { 146 140 /* Not in debugging session or undesired GO state. */ 147 mutex_unlock(&t ->udebug.lock);141 mutex_unlock(&thread->udebug.lock); 148 142 return EINVAL; 149 143 } 150 151 /* Only t ->udebug.lock left. */152 153 return EOK; 144 145 /* Only thread->udebug.lock left. */ 146 147 return EOK; /* All went well. */ 154 148 } 155 149 156 150 /** End debugging operation on a thread. */ 157 static void _thread_op_end(thread_t *t )158 { 159 mutex_unlock(&t ->udebug.lock);151 static void _thread_op_end(thread_t *thread) 152 { 153 mutex_unlock(&thread->udebug.lock); 160 154 } 161 155 … … 171 165 * all the threads become stoppable (i.e. they can be considered stopped). 172 166 * 173 * @param call The BEGIN call we are servicing. 174 * @return 0 (OK, but not done yet), 1 (done) or negative error code. 167 * @param call The BEGIN call we are servicing. 168 * 169 * @return 0 (OK, but not done yet), 1 (done) or negative error code. 170 * 175 171 */ 176 172 int udebug_begin(call_t *call) 177 173 { 178 int reply; 179 180 thread_t *t; 181 link_t *cur; 182 183 LOG("Debugging task %llu", TASK->taskid); 184 mutex_lock(&TASK->udebug.lock); 185 174 LOG("Debugging task %" PRIu64, TASK->taskid); 175 176 mutex_lock(&TASK->udebug.lock); 177 186 178 if (TASK->udebug.dt_state != UDEBUG_TS_INACTIVE) { 187 179 mutex_unlock(&TASK->udebug.lock); 188 180 return EBUSY; 189 181 } 190 182 191 183 TASK->udebug.dt_state = UDEBUG_TS_BEGINNING; 192 184 TASK->udebug.begin_call = call; 193 185 TASK->udebug.debugger = call->sender; 194 186 187 int reply; 188 195 189 if (TASK->udebug.not_stoppable_count == 0) { 196 190 TASK->udebug.dt_state = UDEBUG_TS_ACTIVE; 197 191 TASK->udebug.begin_call = NULL; 198 reply = 1; /* immediate reply */ 199 } else { 200 reply = 0; /* no reply */ 201 } 192 reply = 1; /* immediate reply */ 193 } else 194 reply = 0; /* no reply */ 202 195 203 196 /* Set udebug.active on all of the task's userspace threads. */ 204 197 198 link_t *cur; 205 199 for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { 206 t = list_get_instance(cur, thread_t, th_link); 207 208 mutex_lock(&t->udebug.lock); 209 if ((t->flags & THREAD_FLAG_USPACE) != 0) { 210 t->udebug.active = true; 211 mutex_unlock(&t->udebug.lock); 212 condvar_broadcast(&t->udebug.active_cv); 213 } else { 214 mutex_unlock(&t->udebug.lock); 215 } 216 } 217 200 thread_t *thread = list_get_instance(cur, thread_t, th_link); 201 202 mutex_lock(&thread->udebug.lock); 203 if ((thread->flags & THREAD_FLAG_USPACE) != 0) { 204 thread->udebug.active = true; 205 mutex_unlock(&thread->udebug.lock); 206 condvar_broadcast(&thread->udebug.active_cv); 207 } else 208 mutex_unlock(&thread->udebug.lock); 209 } 210 218 211 mutex_unlock(&TASK->udebug.lock); 219 212 return reply; … … 223 216 * 224 217 * Closes the debugging session for the current task. 218 * 225 219 * @return Zero on success or negative error code. 220 * 226 221 */ 227 222 int udebug_end(void) 228 223 { 229 int rc;230 231 224 LOG("Task %" PRIu64, TASK->taskid); 232 233 mutex_lock(&TASK->udebug.lock); 234 rc = udebug_task_cleanup(TASK);235 mutex_unlock(&TASK->udebug.lock); 236 225 226 mutex_lock(&TASK->udebug.lock); 227 int rc = udebug_task_cleanup(TASK); 228 mutex_unlock(&TASK->udebug.lock); 229 237 230 return rc; 238 231 } … … 242 235 * Sets the event mask that determines which events are enabled. 243 236 * 244 * @param mask Or combination of events that should be enabled. 245 * @return Zero on success or negative error code. 237 * @param mask Or combination of events that should be enabled. 238 * 239 * @return Zero on success or negative error code. 240 * 246 241 */ 247 242 int udebug_set_evmask(udebug_evmask_t mask) 248 243 { 249 244 LOG("mask = 0x%x", mask); 250 251 mutex_lock(&TASK->udebug.lock); 252 245 246 mutex_lock(&TASK->udebug.lock); 247 253 248 if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) { 254 249 mutex_unlock(&TASK->udebug.lock); 255 250 return EINVAL; 256 251 } 257 252 258 253 TASK->udebug.evmask = mask; 259 254 mutex_unlock(&TASK->udebug.lock); 260 255 261 256 return 0; 262 257 } … … 268 263 * a debugging event or STOP occurs, at which point the thread loses GO. 269 264 * 270 * @param t The thread to operate on (unlocked and need not be valid). 271 * @param call The GO call that we are servicing. 272 */ 273 int udebug_go(thread_t *t, call_t *call) 274 { 275 int rc; 276 277 /* On success, this will lock t->udebug.lock. */ 278 rc = _thread_op_begin(t, false); 279 if (rc != EOK) { 265 * @param thread The thread to operate on (unlocked and need not be valid). 266 * @param call The GO call that we are servicing. 267 * 268 */ 269 int udebug_go(thread_t *thread, call_t *call) 270 { 271 /* On success, this will lock thread->udebug.lock. */ 272 int rc = _thread_op_begin(thread, false); 273 if (rc != EOK) 280 274 return rc; 281 } 282 283 t->udebug.go_call = call; 284 t->udebug.go = true; 285 t->udebug.cur_event = 0; /* none */ 286 275 276 thread->udebug.go_call = call; 277 thread->udebug.go = true; 278 thread->udebug.cur_event = 0; /* none */ 279 287 280 /* 288 * Neither t's lock nor threads_lock may be held during wakeup. 281 * Neither thread's lock nor threads_lock may be held during wakeup. 282 * 289 283 */ 290 waitq_wakeup(&t ->udebug.go_wq, WAKEUP_FIRST);291 292 _thread_op_end(t );293 284 waitq_wakeup(&thread->udebug.go_wq, WAKEUP_FIRST); 285 286 _thread_op_end(thread); 287 294 288 return 0; 295 289 } … … 300 294 * can be considered stopped). 301 295 * 302 * @param t The thread to operate on (unlocked and need not be valid). 303 * @param call The GO call that we are servicing. 304 */ 305 int udebug_stop(thread_t *t, call_t *call) 306 { 307 int rc; 308 296 * @param thread The thread to operate on (unlocked and need not be valid). 297 * @param call The GO call that we are servicing. 298 * 299 */ 300 int udebug_stop(thread_t *thread, call_t *call) 301 { 309 302 LOG("udebug_stop()"); 310 303 311 304 /* 312 * On success, this will lock t->udebug.lock. Note that this makes sure 313 * the thread is not stopped. 305 * On success, this will lock thread->udebug.lock. Note that this 306 * makes sure the thread is not stopped. 307 * 314 308 */ 315 rc = _thread_op_begin(t, true);316 if (rc != EOK) {309 int rc = _thread_op_begin(thread, true); 310 if (rc != EOK) 317 311 return rc; 318 } 319 312 320 313 /* Take GO away from the thread. */ 321 t ->udebug.go = false;322 323 if (t ->udebug.stoppable != true) {314 thread->udebug.go = false; 315 316 if (thread->udebug.stoppable != true) { 324 317 /* Answer will be sent when the thread becomes stoppable. */ 325 _thread_op_end(t );318 _thread_op_end(thread); 326 319 return 0; 327 320 } 328 321 329 322 /* 330 323 * Answer GO call. 324 * 331 325 */ 332 326 333 327 /* Make sure nobody takes this call away from us. */ 334 call = t ->udebug.go_call;335 t ->udebug.go_call = NULL;336 328 call = thread->udebug.go_call; 329 thread->udebug.go_call = NULL; 330 337 331 IPC_SET_RETVAL(call->data, 0); 338 332 IPC_SET_ARG1(call->data, UDEBUG_EVENT_STOP); 339 333 340 334 THREAD->udebug.cur_event = UDEBUG_EVENT_STOP; 341 342 _thread_op_end(t );343 335 336 _thread_op_end(thread); 337 344 338 mutex_lock(&TASK->udebug.lock); 345 339 ipc_answer(&TASK->answerbox, call); 346 340 mutex_unlock(&TASK->udebug.lock); 347 341 348 342 return 0; 349 343 } … … 365 359 * a maximum size for the userspace buffer. 366 360 * 367 * @param buffer The buffer for storing thread hashes. 368 * @param buf_size Buffer size in bytes. 369 * @param stored The actual number of bytes copied will be stored here. 370 * @param needed Total number of hashes that could have been saved. 361 * @param buffer The buffer for storing thread hashes. 362 * @param buf_size Buffer size in bytes. 363 * @param stored The actual number of bytes copied will be stored here. 364 * @param needed Total number of hashes that could have been saved. 365 * 371 366 */ 372 367 int udebug_thread_read(void **buffer, size_t buf_size, size_t *stored, 373 368 size_t *needed) 374 369 { 375 thread_t *t;376 link_t *cur;377 unative_t tid;378 size_t copied_ids;379 size_t extra_ids;380 ipl_t ipl;381 unative_t *id_buffer;382 int flags;383 size_t max_ids;384 385 370 LOG("udebug_thread_read()"); 386 371 387 372 /* Allocate a buffer to hold thread IDs */ 388 id_buffer = malloc(buf_size + 1, 0);389 390 mutex_lock(&TASK->udebug.lock); 391 373 unative_t *id_buffer = malloc(buf_size + 1, 0); 374 375 mutex_lock(&TASK->udebug.lock); 376 392 377 /* Verify task state */ 393 378 if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) { … … 395 380 return EINVAL; 396 381 } 397 398 i pl = interrupts_disable();399 spinlock_lock(&TASK->lock);382 383 irq_spinlock_lock(&TASK->lock, true); 384 400 385 /* Copy down the thread IDs */ 401 402 max_ids = buf_size / sizeof(unative_t);403 copied_ids = 0;404 extra_ids = 0;405 386 387 size_t max_ids = buf_size / sizeof(unative_t); 388 size_t copied_ids = 0; 389 size_t extra_ids = 0; 390 406 391 /* FIXME: make sure the thread isn't past debug shutdown... */ 392 link_t *cur; 407 393 for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { 408 t = list_get_instance(cur, thread_t, th_link);409 410 spinlock_lock(&t->lock);411 flags = t->flags;412 spinlock_unlock(&t->lock);413 394 thread_t *thread = list_get_instance(cur, thread_t, th_link); 395 396 irq_spinlock_lock(&thread->lock, false); 397 int flags = thread->flags; 398 irq_spinlock_unlock(&thread->lock, false); 399 414 400 /* Not interested in kernel threads. */ 415 401 if ((flags & THREAD_FLAG_USPACE) == 0) 416 402 continue; 417 403 418 404 if (copied_ids < max_ids) { 419 405 /* Using thread struct pointer as identification hash */ 420 tid = (unative_t) t; 421 id_buffer[copied_ids++] = tid; 422 } else { 406 id_buffer[copied_ids++] = (unative_t) thread; 407 } else 423 408 extra_ids++; 424 } 425 } 426 427 spinlock_unlock(&TASK->lock); 428 interrupts_restore(ipl); 429 430 mutex_unlock(&TASK->udebug.lock); 431 409 } 410 411 irq_spinlock_unlock(&TASK->lock, true); 412 413 mutex_unlock(&TASK->udebug.lock); 414 432 415 *buffer = id_buffer; 433 416 *stored = copied_ids * sizeof(unative_t); 434 417 *needed = (copied_ids + extra_ids) * sizeof(unative_t); 435 418 436 419 return 0; 437 420 } … … 442 425 * Also returns the size of the data. 443 426 * 444 * @param data Place to store pointer to newly allocated block. 445 * @param data_size Place to store size of the data. 446 * 447 * @returns EOK. 427 * @param data Place to store pointer to newly allocated block. 428 * @param data_size Place to store size of the data. 429 * 430 * @returns EOK. 431 * 448 432 */ 449 433 int udebug_name_read(char **data, size_t *data_size) 450 434 { 451 size_t name_size; 452 453 name_size = str_size(TASK->name) + 1; 435 size_t name_size = str_size(TASK->name) + 1; 436 454 437 *data = malloc(name_size, 0); 455 438 *data_size = name_size; 456 439 457 440 memcpy(*data, TASK->name, name_size); 458 441 459 442 return 0; 460 443 } … … 470 453 * this function will fail with an EINVAL error code. 471 454 * 472 * @param t Thread where call arguments are to be read. 473 * @param buffer Place to store pointer to new buffer. 474 * @return EOK on success, ENOENT if @a t is invalid, EINVAL 475 * if thread state is not valid for this operation. 476 */ 477 int udebug_args_read(thread_t *t, void **buffer) 478 { 479 int rc; 480 unative_t *arg_buffer; 481 455 * @param thread Thread where call arguments are to be read. 456 * @param buffer Place to store pointer to new buffer. 457 * 458 * @return EOK on success, ENOENT if @a t is invalid, EINVAL 459 * if thread state is not valid for this operation. 460 * 461 */ 462 int udebug_args_read(thread_t *thread, void **buffer) 463 { 482 464 /* Prepare a buffer to hold the arguments. */ 483 arg_buffer = malloc(6 * sizeof(unative_t), 0);484 465 unative_t *arg_buffer = malloc(6 * sizeof(unative_t), 0); 466 485 467 /* On success, this will lock t->udebug.lock. */ 486 rc = _thread_op_begin(t, false);487 if (rc != EOK) {468 int rc = _thread_op_begin(thread, false); 469 if (rc != EOK) 488 470 return rc; 489 } 490 471 491 472 /* Additionally we need to verify that we are inside a syscall. */ 492 if ( t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B&&493 t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_E) {494 _thread_op_end(t );473 if ((thread->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B) && 474 (thread->udebug.cur_event != UDEBUG_EVENT_SYSCALL_E)) { 475 _thread_op_end(thread); 495 476 return EINVAL; 496 477 } 497 478 498 479 /* Copy to a local buffer before releasing the lock. */ 499 memcpy(arg_buffer, t ->udebug.syscall_args, 6 * sizeof(unative_t));500 501 _thread_op_end(t );502 480 memcpy(arg_buffer, thread->udebug.syscall_args, 6 * sizeof(unative_t)); 481 482 _thread_op_end(thread); 483 503 484 *buffer = arg_buffer; 504 485 return 0; … … 514 495 * call (as opposed to an exception). This is an implementation limit. 515 496 * 516 * @param t Thread whose state is to be read. 517 * @param buffer Place to store pointer to new buffer. 518 * @return EOK on success, ENOENT if @a t is invalid, EINVAL 519 * if thread is not in valid state, EBUSY if istate 520 * is not available. 521 */ 522 int udebug_regs_read(thread_t *t, void **buffer) 523 { 524 istate_t *state, *state_buf; 525 int rc; 526 497 * @param thread Thread whose state is to be read. 498 * @param buffer Place to store pointer to new buffer. 499 * 500 * @return EOK on success, ENOENT if @a t is invalid, EINVAL 501 * if thread is not in valid state, EBUSY if istate 502 * is not available. 503 * 504 */ 505 int udebug_regs_read(thread_t *thread, void **buffer) 506 { 527 507 /* Prepare a buffer to hold the data. */ 528 state_buf = malloc(sizeof(istate_t), 0);529 508 istate_t *state_buf = malloc(sizeof(istate_t), 0); 509 530 510 /* On success, this will lock t->udebug.lock */ 531 rc = _thread_op_begin(t, false);532 if (rc != EOK) {511 int rc = _thread_op_begin(thread, false); 512 if (rc != EOK) 533 513 return rc; 534 } 535 536 state = t->udebug.uspace_state; 514 515 istate_t *state = thread->udebug.uspace_state; 537 516 if (state == NULL) { 538 _thread_op_end(t );517 _thread_op_end(thread); 539 518 return EBUSY; 540 519 } 541 520 542 521 /* Copy to the allocated buffer */ 543 522 memcpy(state_buf, state, sizeof(istate_t)); 544 545 _thread_op_end(t );546 523 524 _thread_op_end(thread); 525 547 526 *buffer = (void *) state_buf; 548 527 return 0; … … 555 534 * and a pointer to it is written into @a buffer. 556 535 * 557 * @param uspace_addr Address from where to start reading. 558 * @param n Number of bytes to read. 559 * @param buffer For storing a pointer to the allocated buffer. 536 * @param uspace_addr Address from where to start reading. 537 * @param n Number of bytes to read. 538 * @param buffer For storing a pointer to the allocated buffer. 539 * 560 540 */ 561 541 int udebug_mem_read(unative_t uspace_addr, size_t n, void **buffer) 562 542 { 563 void *data_buffer;564 int rc;565 566 543 /* Verify task state */ 567 544 mutex_lock(&TASK->udebug.lock); 568 545 569 546 if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) { 570 547 mutex_unlock(&TASK->udebug.lock); 571 548 return EBUSY; 572 549 } 573 574 data_buffer = malloc(n, 0); 575 576 /* NOTE: this is not strictly from a syscall... but that shouldn't 577 * be a problem */ 578 rc = copy_from_uspace(data_buffer, (void *)uspace_addr, n); 579 mutex_unlock(&TASK->udebug.lock); 580 581 if (rc != 0) return rc; 582 550 551 void *data_buffer = malloc(n, 0); 552 553 /* 554 * NOTE: this is not strictly from a syscall... but that shouldn't 555 * be a problem 556 * 557 */ 558 int rc = copy_from_uspace(data_buffer, (void *) uspace_addr, n); 559 mutex_unlock(&TASK->udebug.lock); 560 561 if (rc != 0) 562 return rc; 563 583 564 *buffer = data_buffer; 584 565 return 0;
Note:
See TracChangeset
for help on using the changeset viewer.