Changeset 8013637 in mainline for kernel/generic/src
- Timestamp:
- 2012-07-20T13:51:28Z (13 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 8fccd42
- Parents:
- c5bff3c (diff), 7030bc9 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/generic/src
- Files:
-
- 1 added
- 25 edited
-
adt/btree.c (modified) (1 diff)
-
console/cmd.c (modified) (1 diff)
-
console/kconsole.c (modified) (8 diffs)
-
console/prompt.c (added)
-
debug/symtab.c (modified) (5 diffs)
-
ipc/ipc.c (modified) (1 diff)
-
ipc/kbox.c (modified) (1 diff)
-
ipc/sysipc.c (modified) (3 diffs)
-
lib/ra.c (modified) (7 diffs)
-
lib/rd.c (modified) (2 diffs)
-
main/kinit.c (modified) (6 diffs)
-
main/main.c (modified) (1 diff)
-
main/uinit.c (modified) (2 diffs)
-
mm/as.c (modified) (4 diffs)
-
mm/frame.c (modified) (4 diffs)
-
mm/slab.c (modified) (20 diffs)
-
mm/tlb.c (modified) (2 diffs)
-
proc/program.c (modified) (7 diffs)
-
proc/scheduler.c (modified) (6 diffs)
-
proc/task.c (modified) (1 diff)
-
proc/thread.c (modified) (15 diffs)
-
synch/mutex.c (modified) (3 diffs)
-
synch/spinlock.c (modified) (3 diffs)
-
sysinfo/sysinfo.c (modified) (1 diff)
-
udebug/udebug.c (modified) (1 diff)
-
udebug/udebug_ops.c (modified) (3 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/adt/btree.c
rc5bff3c r8013637 71 71 void btree_init(void) 72 72 { 73 btree_node_slab = slab_cache_create("btree_node_ slab",73 btree_node_slab = slab_cache_create("btree_node_t", 74 74 sizeof(btree_node_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED); 75 75 } -
kernel/generic/src/console/cmd.c
rc5bff3c r8013637 724 724 thread_t *thread; 725 725 if ((thread = thread_create((void (*)(void *)) cmd_call0, 726 (void *) argv, TASK, THREAD_FLAG_WIRED, "call0", false))) { 727 irq_spinlock_lock(&thread->lock, true); 728 thread->cpu = &cpus[i]; 729 irq_spinlock_unlock(&thread->lock, true); 730 726 (void *) argv, TASK, THREAD_FLAG_NONE, "call0"))) { 731 727 printf("cpu%u: ", i); 732 728 thread_wire(thread, &cpus[i]); 733 729 thread_ready(thread); 734 730 thread_join(thread); -
kernel/generic/src/console/kconsole.c
rc5bff3c r8013637 43 43 #include <console/chardev.h> 44 44 #include <console/cmd.h> 45 #include <console/prompt.h> 45 46 #include <print.h> 46 47 #include <panic.h> … … 201 202 * 202 203 */ 203 NO_TRACE static int cmdtab_compl(char *input, size_t size )204 NO_TRACE static int cmdtab_compl(char *input, size_t size, indev_t *indev) 204 205 { 205 206 const char *name = input; 206 207 207 208 size_t found = 0; 209 210 /* 211 * Maximum Match Length: Length of longest matching common 212 * substring in case more than one match is found. 213 */ 214 size_t max_match_len = size; 215 size_t max_match_len_tmp = size; 216 size_t input_len = str_length(input); 208 217 link_t *pos = NULL; 209 218 const char *hint; 210 219 char *output = malloc(MAX_CMDLINE, 0); 220 size_t hints_to_show = MAX_TAB_HINTS - 1; 221 size_t total_hints_shown = 0; 222 bool continue_showing_hints = true; 211 223 212 224 output[0] = 0; … … 218 230 pos = pos->next; 219 231 found++; 232 } 233 234 /* 235 * If the number of possible completions is more than MAX_TAB_HINTS, 236 * ask the user whether to display them or not. 237 */ 238 if (found > MAX_TAB_HINTS) { 239 printf("\n"); 240 continue_showing_hints = 241 console_prompt_display_all_hints(indev, found); 220 242 } 221 243 … … 225 247 while (cmdtab_search_one(name, &pos)) { 226 248 cmd_info_t *hlp = list_get_instance(pos, cmd_info_t, link); 227 printf("%s (%s)\n", hlp->name, hlp->description); 249 250 if (continue_showing_hints) { 251 printf("%s (%s)\n", hlp->name, hlp->description); 252 --hints_to_show; 253 ++total_hints_shown; 254 255 if ((hints_to_show == 0) && (total_hints_shown != found)) { 256 /* Ask user to continue */ 257 continue_showing_hints = 258 console_prompt_more_hints(indev, &hints_to_show); 259 } 260 } 261 228 262 pos = pos->next; 229 } 263 264 for (max_match_len_tmp = 0; 265 (output[max_match_len_tmp] == 266 hlp->name[input_len + max_match_len_tmp]) && 267 (max_match_len_tmp < max_match_len); ++max_match_len_tmp); 268 269 max_match_len = max_match_len_tmp; 270 } 271 272 /* Keep only the characters common in all completions */ 273 output[max_match_len] = 0; 230 274 } 231 275 … … 280 324 continue; 281 325 282 /* Find the beginning of the word 283 and copy it to tmp */ 326 /* 327 * Find the beginning of the word 328 * and copy it to tmp 329 */ 284 330 size_t beg; 285 331 for (beg = position - 1; (beg > 0) && (!isspace(current[beg])); … … 294 340 if (beg == 0) { 295 341 /* Command completion */ 296 found = cmdtab_compl(tmp, STR_BOUNDS(MAX_CMDLINE) );342 found = cmdtab_compl(tmp, STR_BOUNDS(MAX_CMDLINE), indev); 297 343 } else { 298 344 /* Symbol completion */ 299 found = symtab_compl(tmp, STR_BOUNDS(MAX_CMDLINE) );345 found = symtab_compl(tmp, STR_BOUNDS(MAX_CMDLINE), indev); 300 346 } 301 347 302 348 if (found == 0) 303 349 continue; 304 305 if (found > 1) { 306 /* No unique hint, list was printed */ 307 printf("%s> ", prompt); 308 printf("%ls", current); 309 print_cc('\b', wstr_length(current) - position); 310 continue; 311 } 312 313 /* We have a hint */ 314 350 351 /* 352 * We have hints, possibly many. In case of more than one hint, 353 * tmp will contain the common prefix. 354 */ 315 355 size_t off = 0; 316 356 size_t i = 0; … … 318 358 if (!wstr_linsert(current, ch, position + i, MAX_CMDLINE)) 319 359 break; 360 320 361 i++; 321 362 } 363 364 if (found > 1) { 365 /* No unique hint, list was printed */ 366 printf("%s> ", prompt); 367 printf("%ls", current); 368 position += str_length(tmp); 369 print_cc('\b', wstr_length(current) - position); 370 continue; 371 } 372 373 /* We have a hint */ 322 374 323 375 printf("%ls", current + position); … … 540 592 /** Parse command line. 541 593 * 542 * @param cmdline Command line as read from input device. 594 * @param cmdline Command line as read from input device. 543 595 * @param size Size (in bytes) of the string. 544 596 * -
kernel/generic/src/debug/symtab.c
rc5bff3c r8013637 43 43 #include <typedefs.h> 44 44 #include <errno.h> 45 #include <console/prompt.h> 45 46 46 47 /** Get name of a symbol that seems most likely to correspond to address. … … 209 210 * 210 211 */ 211 int symtab_compl(char *input, size_t size )212 int symtab_compl(char *input, size_t size, indev_t *indev) 212 213 { 213 214 #ifdef CONFIG_SYMTAB … … 227 228 char output[MAX_SYMBOL_NAME]; 228 229 230 /* 231 * Maximum Match Length: Length of longest matching common substring in 232 * case more than one match is found. 233 */ 234 size_t max_match_len = size; 235 size_t max_match_len_tmp = size; 236 size_t input_len = str_length(input); 237 char *sym_name; 238 size_t hints_to_show = MAX_TAB_HINTS - 1; 239 size_t total_hints_shown = 0; 240 bool continue_showing_hints = true; 241 229 242 output[0] = 0; 243 244 while ((hint = symtab_search_one(name, &pos))) 245 pos++; 246 247 pos = 0; 230 248 231 249 while ((hint = symtab_search_one(name, &pos))) { … … 235 253 pos++; 236 254 found++; 255 } 256 257 /* 258 * If the number of possible completions is more than MAX_TAB_HINTS, 259 * ask the user whether to display them or not. 260 */ 261 if (found > MAX_TAB_HINTS) { 262 printf("\n"); 263 continue_showing_hints = 264 console_prompt_display_all_hints(indev, found); 237 265 } 238 266 … … 241 269 pos = 0; 242 270 while (symtab_search_one(name, &pos)) { 243 printf("%s\n", symbol_table[pos].symbol_name);271 sym_name = symbol_table[pos].symbol_name; 244 272 pos++; 273 274 if (continue_showing_hints) { 275 /* We are still showing hints */ 276 printf("%s\n", sym_name); 277 --hints_to_show; 278 ++total_hints_shown; 279 280 if ((hints_to_show == 0) && (total_hints_shown != found)) { 281 /* Ask the user to continue */ 282 continue_showing_hints = 283 console_prompt_more_hints(indev, &hints_to_show); 284 } 285 } 286 287 for (max_match_len_tmp = 0; 288 (output[max_match_len_tmp] == 289 sym_name[input_len + max_match_len_tmp]) && 290 (max_match_len_tmp < max_match_len); ++max_match_len_tmp); 291 292 max_match_len = max_match_len_tmp; 245 293 } 294 295 /* Keep only the characters common in all completions */ 296 output[max_match_len] = 0; 246 297 } 247 298 -
kernel/generic/src/ipc/ipc.c
rc5bff3c r8013637 670 670 void ipc_init(void) 671 671 { 672 ipc_call_slab = slab_cache_create(" ipc_call", sizeof(call_t), 0, NULL,672 ipc_call_slab = slab_cache_create("call_t", sizeof(call_t), 0, NULL, 673 673 NULL, 0); 674 ipc_answerbox_slab = slab_cache_create(" ipc_answerbox",674 ipc_answerbox_slab = slab_cache_create("answerbox_t", 675 675 sizeof(answerbox_t), 0, NULL, NULL, 0); 676 676 } -
kernel/generic/src/ipc/kbox.c
rc5bff3c r8013637 244 244 245 245 /* Create a kbox thread */ 246 thread_t *kb_thread = thread_create(kbox_thread_proc, NULL, task, 0,247 "kbox", false);246 thread_t *kb_thread = thread_create(kbox_thread_proc, NULL, task, 247 THREAD_FLAG_NONE, "kbox"); 248 248 if (!kb_thread) { 249 249 mutex_unlock(&task->kb.cleanup_lock); -
kernel/generic/src/ipc/sysipc.c
rc5bff3c r8013637 597 597 if (IPC_GET_IMETHOD(call->data) == IPC_M_CONNECT_TO_ME) { 598 598 int phoneid = phone_alloc(TASK); 599 if (phoneid < 0) { /* Failed to allocate phone */599 if (phoneid < 0) { /* Failed to allocate phone */ 600 600 IPC_SET_RETVAL(call->data, ELIMIT); 601 601 ipc_answer(box, call); … … 883 883 884 884 /* 885 * User space is not allowed to change interface and method of system885 * User space is not allowed to change interface and method of system 886 886 * methods on forward, allow changing ARG1, ARG2, ARG3 and ARG4 by 887 * means of method, arg1, arg2 and arg3.887 * means of imethod, arg1, arg2 and arg3. 888 888 * If the interface and method is immutable, don't change anything. 889 889 */ … … 897 897 IPC_SET_ARG3(call->data, arg2); 898 898 899 if (slow) {899 if (slow) 900 900 IPC_SET_ARG4(call->data, arg3); 901 /*902 * For system methods we deliberately don't903 * overwrite ARG5.904 */905 }901 902 /* 903 * For system methods we deliberately don't 904 * overwrite ARG5. 905 */ 906 906 } else { 907 907 IPC_SET_IMETHOD(call->data, imethod); -
kernel/generic/src/lib/ra.c
rc5bff3c r8013637 185 185 return NULL; 186 186 187 spinlock_initialize(&arena->lock, "arena_lock");187 irq_spinlock_initialize(&arena->lock, "arena_lock"); 188 188 list_initialize(&arena->spans); 189 189 … … 209 209 210 210 /* TODO: check for overlaps */ 211 spinlock_lock(&arena->lock);211 irq_spinlock_lock(&arena->lock, true); 212 212 list_append(&span->span_link, &arena->spans); 213 spinlock_unlock(&arena->lock);213 irq_spinlock_unlock(&arena->lock, true); 214 214 return true; 215 215 } … … 390 390 ASSERT(ispwr2(alignment)); 391 391 392 spinlock_lock(&arena->lock);392 irq_spinlock_lock(&arena->lock, true); 393 393 list_foreach(arena->spans, cur) { 394 394 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link); … … 398 398 break; 399 399 } 400 spinlock_unlock(&arena->lock);400 irq_spinlock_unlock(&arena->lock, true); 401 401 402 402 return base; … … 406 406 void ra_free(ra_arena_t *arena, uintptr_t base, size_t size) 407 407 { 408 spinlock_lock(&arena->lock);408 irq_spinlock_lock(&arena->lock, true); 409 409 list_foreach(arena->spans, cur) { 410 410 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link); … … 412 412 if (iswithin(span->base, span->size, base, size)) { 413 413 ra_span_free(span, base, size); 414 spinlock_unlock(&arena->lock);414 irq_spinlock_unlock(&arena->lock, true); 415 415 return; 416 416 } 417 417 } 418 spinlock_unlock(&arena->lock);418 irq_spinlock_unlock(&arena->lock, true); 419 419 420 420 panic("Freeing to wrong arena (base=%" PRIxn ", size=%" PRIdn ").", … … 424 424 void ra_init(void) 425 425 { 426 ra_segment_cache = slab_cache_create(" segment_cache",426 ra_segment_cache = slab_cache_create("ra_segment_t", 427 427 sizeof(ra_segment_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED); 428 428 } -
kernel/generic/src/lib/rd.c
rc5bff3c r8013637 38 38 */ 39 39 40 #include <print.h> 40 41 #include <lib/rd.h> 41 42 #include <mm/frame.h> … … 66 67 sysinfo_set_item_val("rd.size", NULL, size); 67 68 sysinfo_set_item_val("rd.address.physical", NULL, (sysarg_t) base); 69 70 printf("RAM disk at %p (size %zu bytes)\n", (void *) base, size); 68 71 } 69 72 -
kernel/generic/src/main/kinit.c
rc5bff3c r8013637 116 116 * Just a beautification. 117 117 */ 118 thread = thread_create(kmp, NULL, TASK, THREAD_FLAG_WIRED, "kmp", true); 118 thread = thread_create(kmp, NULL, TASK, 119 THREAD_FLAG_UNCOUNTED, "kmp"); 119 120 if (thread != NULL) { 120 irq_spinlock_lock(&thread->lock, false); 121 thread->cpu = &cpus[0]; 122 irq_spinlock_unlock(&thread->lock, false); 121 thread_wire(thread, &cpus[0]); 123 122 thread_ready(thread); 124 123 } else … … 134 133 135 134 for (i = 0; i < config.cpu_count; i++) { 136 thread = thread_create(kcpulb, NULL, TASK, THREAD_FLAG_WIRED, "kcpulb", true); 135 thread = thread_create(kcpulb, NULL, TASK, 136 THREAD_FLAG_UNCOUNTED, "kcpulb"); 137 137 if (thread != NULL) { 138 irq_spinlock_lock(&thread->lock, false); 139 thread->cpu = &cpus[i]; 140 irq_spinlock_unlock(&thread->lock, false); 138 thread_wire(thread, &cpus[i]); 141 139 thread_ready(thread); 142 140 } else … … 152 150 153 151 /* Start thread computing system load */ 154 thread = thread_create(kload, NULL, TASK, 0, "kload", false); 152 thread = thread_create(kload, NULL, TASK, THREAD_FLAG_NONE, 153 "kload"); 155 154 if (thread != NULL) 156 155 thread_ready(thread); … … 163 162 * Create kernel console. 164 163 */ 165 thread = thread_create(kconsole_thread, NULL, TASK, 0, "kconsole", false); 164 thread = thread_create(kconsole_thread, NULL, TASK, 165 THREAD_FLAG_NONE, "kconsole"); 166 166 if (thread != NULL) 167 167 thread_ready(thread); … … 201 201 str_cpy(namebuf + INIT_PREFIX_LEN, 202 202 TASK_NAME_BUFLEN - INIT_PREFIX_LEN, name); 203 203 204 204 /* 205 205 * Create virtual memory mappings for init task images. … … 236 236 init_rd((void *) init.tasks[i].paddr, init.tasks[i].size); 237 237 } else 238 printf("init[%zu]: Init binary load failed (error %d)\n", i, rc); 238 printf("init[%zu]: Init binary load failed " 239 "(error %d, loader status %u)\n", i, rc, 240 programs[i].loader_status); 239 241 } 240 242 -
kernel/generic/src/main/main.c
rc5bff3c r8013637 276 276 * Create the first thread. 277 277 */ 278 thread_t *kinit_thread = 279 thread_create(kinit, NULL, kernel, 0, "kinit", true);278 thread_t *kinit_thread = thread_create(kinit, NULL, kernel, 279 THREAD_FLAG_UNCOUNTED, "kinit"); 280 280 if (!kinit_thread) 281 281 panic("Cannot create kinit thread."); -
kernel/generic/src/main/uinit.c
rc5bff3c r8013637 56 56 void uinit(void *arg) 57 57 { 58 uspace_arg_t uarg;59 60 58 /* 61 59 * So far, we don't have a use for joining userspace threads so we … … 72 70 #endif 73 71 74 uarg.uspace_entry = ((uspace_arg_t *) arg)->uspace_entry; 75 uarg.uspace_stack = ((uspace_arg_t *) arg)->uspace_stack; 76 uarg.uspace_uarg = ((uspace_arg_t *) arg)->uspace_uarg; 77 uarg.uspace_thread_function = NULL; 78 uarg.uspace_thread_arg = NULL; 72 uspace_arg_t *uarg = (uspace_arg_t *) arg; 73 uspace_arg_t local_uarg; 79 74 80 free((uspace_arg_t *) arg); 75 local_uarg.uspace_entry = uarg->uspace_entry; 76 local_uarg.uspace_stack = uarg->uspace_stack; 77 local_uarg.uspace_stack_size = uarg->uspace_stack_size; 78 local_uarg.uspace_uarg = uarg->uspace_uarg; 79 local_uarg.uspace_thread_function = NULL; 80 local_uarg.uspace_thread_arg = NULL; 81 81 82 userspace(&uarg); 82 free(uarg); 83 84 userspace(&local_uarg); 83 85 } 84 86 -
kernel/generic/src/mm/as.c
rc5bff3c r8013637 130 130 as_arch_init(); 131 131 132 as_slab = slab_cache_create("as_ slab", sizeof(as_t), 0,132 as_slab = slab_cache_create("as_t", sizeof(as_t), 0, 133 133 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED); 134 134 … … 665 665 666 666 page_table_lock(as, false); 667 668 /*669 * Start TLB shootdown sequence.670 */671 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid,672 area->base + P2SZ(pages), area->pages - pages);673 667 674 668 /* … … 726 720 } 727 721 722 /* 723 * Start TLB shootdown sequence. 724 * 725 * The sequence is rather short and can be 726 * repeated multiple times. The reason is that 727 * we don't want to have used_space_remove() 728 * inside the sequence as it may use a blocking 729 * memory allocation for its B+tree. Blocking 730 * while holding the tlblock spinlock is 731 * forbidden and would hit a kernel assertion. 732 */ 733 734 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, 735 as->asid, area->base + P2SZ(pages), 736 area->pages - pages); 737 728 738 for (; i < size; i++) { 729 739 pte_t *pte = page_mapping_find(as, … … 743 753 page_mapping_remove(as, ptr + P2SZ(i)); 744 754 } 755 756 /* 757 * Finish TLB shootdown sequence. 758 */ 759 760 tlb_invalidate_pages(as->asid, 761 area->base + P2SZ(pages), 762 area->pages - pages); 763 764 /* 765 * Invalidate software translation caches 766 * (e.g. TSB on sparc64, PHT on ppc32). 767 */ 768 as_invalidate_translation_cache(as, 769 area->base + P2SZ(pages), 770 area->pages - pages); 771 tlb_shootdown_finalize(ipl); 745 772 } 746 773 } 747 748 /*749 * Finish TLB shootdown sequence.750 */751 752 tlb_invalidate_pages(as->asid, area->base + P2SZ(pages),753 area->pages - pages);754 755 /*756 * Invalidate software translation caches757 * (e.g. TSB on sparc64, PHT on ppc32).758 */759 as_invalidate_translation_cache(as, area->base + P2SZ(pages),760 area->pages - pages);761 tlb_shootdown_finalize(ipl);762 763 774 page_table_unlock(as, false); 764 775 } else { -
kernel/generic/src/mm/frame.c
rc5bff3c r8013637 1086 1086 #endif 1087 1087 1088 /* 1089 * Since the mem_avail_mtx is an active mutex, we need to disable interrupts 1090 * to prevent deadlock with TLB shootdown. 1091 */ 1092 ipl_t ipl = interrupts_disable(); 1088 1093 mutex_lock(&mem_avail_mtx); 1089 1094 … … 1098 1103 1099 1104 mutex_unlock(&mem_avail_mtx); 1105 interrupts_restore(ipl); 1100 1106 1101 1107 #ifdef CONFIG_DEBUG … … 1161 1167 * Signal that some memory has been freed. 1162 1168 */ 1169 1170 1171 /* 1172 * Since the mem_avail_mtx is an active mutex, we need to disable interrupts 1173 * to prevent deadlock with TLB shootdown. 1174 */ 1175 ipl_t ipl = interrupts_disable(); 1163 1176 mutex_lock(&mem_avail_mtx); 1164 1177 if (mem_avail_req > 0) … … 1170 1183 } 1171 1184 mutex_unlock(&mem_avail_mtx); 1185 interrupts_restore(ipl); 1172 1186 1173 1187 if (!(flags & FRAME_NO_RESERVE)) -
kernel/generic/src/mm/slab.c
rc5bff3c r8013637 264 264 freed = cache->destructor(obj); 265 265 266 spinlock_lock(&cache->slablock);266 irq_spinlock_lock(&cache->slablock, true); 267 267 ASSERT(slab->available < cache->objects); 268 268 … … 275 275 /* Free associated memory */ 276 276 list_remove(&slab->link); 277 spinlock_unlock(&cache->slablock);277 irq_spinlock_unlock(&cache->slablock, true); 278 278 279 279 return freed + slab_space_free(cache, slab); … … 284 284 } 285 285 286 spinlock_unlock(&cache->slablock);286 irq_spinlock_unlock(&cache->slablock, true); 287 287 return freed; 288 288 } … … 295 295 NO_TRACE static void *slab_obj_create(slab_cache_t *cache, unsigned int flags) 296 296 { 297 spinlock_lock(&cache->slablock);297 irq_spinlock_lock(&cache->slablock, true); 298 298 299 299 slab_t *slab; … … 308 308 * 309 309 */ 310 spinlock_unlock(&cache->slablock);310 irq_spinlock_unlock(&cache->slablock, true); 311 311 slab = slab_space_alloc(cache, flags); 312 312 if (!slab) 313 313 return NULL; 314 314 315 spinlock_lock(&cache->slablock);315 irq_spinlock_lock(&cache->slablock, true); 316 316 } else { 317 317 slab = list_get_instance(list_first(&cache->partial_slabs), … … 329 329 list_prepend(&slab->link, &cache->partial_slabs); 330 330 331 spinlock_unlock(&cache->slablock);331 irq_spinlock_unlock(&cache->slablock, true); 332 332 333 333 if ((cache->constructor) && (cache->constructor(obj, flags))) { … … 355 355 link_t *cur; 356 356 357 spinlock_lock(&cache->maglock);357 irq_spinlock_lock(&cache->maglock, true); 358 358 if (!list_empty(&cache->magazines)) { 359 359 if (first) … … 366 366 atomic_dec(&cache->magazine_counter); 367 367 } 368 369 spinlock_unlock(&cache->maglock); 368 irq_spinlock_unlock(&cache->maglock, true); 369 370 370 return mag; 371 371 } … … 377 377 slab_magazine_t *mag) 378 378 { 379 spinlock_lock(&cache->maglock);379 irq_spinlock_lock(&cache->maglock, true); 380 380 381 381 list_prepend(&mag->link, &cache->magazines); 382 382 atomic_inc(&cache->magazine_counter); 383 383 384 spinlock_unlock(&cache->maglock);384 irq_spinlock_unlock(&cache->maglock, true); 385 385 } 386 386 … … 414 414 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 415 415 416 ASSERT( spinlock_locked(&cache->mag_cache[CPU->id].lock));416 ASSERT(irq_spinlock_locked(&cache->mag_cache[CPU->id].lock)); 417 417 418 418 if (cmag) { /* First try local CPU magazines */ … … 451 451 return NULL; 452 452 453 spinlock_lock(&cache->mag_cache[CPU->id].lock);453 irq_spinlock_lock(&cache->mag_cache[CPU->id].lock, true); 454 454 455 455 slab_magazine_t *mag = get_full_current_mag(cache); 456 456 if (!mag) { 457 spinlock_unlock(&cache->mag_cache[CPU->id].lock);457 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true); 458 458 return NULL; 459 459 } 460 460 461 461 void *obj = mag->objs[--mag->busy]; 462 spinlock_unlock(&cache->mag_cache[CPU->id].lock);462 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true); 463 463 464 464 atomic_dec(&cache->cached_objs); … … 481 481 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 482 482 483 ASSERT( spinlock_locked(&cache->mag_cache[CPU->id].lock));483 ASSERT(irq_spinlock_locked(&cache->mag_cache[CPU->id].lock)); 484 484 485 485 if (cmag) { … … 531 531 return -1; 532 532 533 spinlock_lock(&cache->mag_cache[CPU->id].lock);533 irq_spinlock_lock(&cache->mag_cache[CPU->id].lock, true); 534 534 535 535 slab_magazine_t *mag = make_empty_current_mag(cache); 536 536 if (!mag) { 537 spinlock_unlock(&cache->mag_cache[CPU->id].lock);537 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true); 538 538 return -1; 539 539 } … … 541 541 mag->objs[mag->busy++] = obj; 542 542 543 spinlock_unlock(&cache->mag_cache[CPU->id].lock);543 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true); 544 544 545 545 atomic_inc(&cache->cached_objs); … … 593 593 for (i = 0; i < config.cpu_count; i++) { 594 594 memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0); 595 spinlock_initialize(&cache->mag_cache[i].lock,595 irq_spinlock_initialize(&cache->mag_cache[i].lock, 596 596 "slab.cache.mag_cache[].lock"); 597 597 } … … 624 624 list_initialize(&cache->magazines); 625 625 626 spinlock_initialize(&cache->slablock, "slab.cache.slablock");627 spinlock_initialize(&cache->maglock, "slab.cache.maglock");626 irq_spinlock_initialize(&cache->slablock, "slab.cache.slablock"); 627 irq_spinlock_initialize(&cache->maglock, "slab.cache.maglock"); 628 628 629 629 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) … … 704 704 size_t i; 705 705 for (i = 0; i < config.cpu_count; i++) { 706 spinlock_lock(&cache->mag_cache[i].lock);706 irq_spinlock_lock(&cache->mag_cache[i].lock, true); 707 707 708 708 mag = cache->mag_cache[i].current; … … 716 716 cache->mag_cache[i].last = NULL; 717 717 718 spinlock_unlock(&cache->mag_cache[i].lock);718 irq_spinlock_unlock(&cache->mag_cache[i].lock, true); 719 719 } 720 720 } … … 891 891 { 892 892 /* Initialize magazine cache */ 893 _slab_cache_create(&mag_cache, "slab_magazine ",893 _slab_cache_create(&mag_cache, "slab_magazine_t", 894 894 sizeof(slab_magazine_t) + SLAB_MAG_SIZE * sizeof(void*), 895 895 sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE | … … 897 897 898 898 /* Initialize slab_cache cache */ 899 _slab_cache_create(&slab_cache_cache, "slab_cache ",899 _slab_cache_create(&slab_cache_cache, "slab_cache_cache", 900 900 sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL, 901 901 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); 902 902 903 903 /* Initialize external slab cache */ 904 slab_extern_cache = slab_cache_create("slab_ extern", sizeof(slab_t), 0,904 slab_extern_cache = slab_cache_create("slab_t", sizeof(slab_t), 0, 905 905 NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); 906 906 -
kernel/generic/src/mm/tlb.c
rc5bff3c r8013637 162 162 163 163 size_t i; 164 for (i = 0; i < CPU->tlb_messages_count; CPU->tlb_messages_count--) {164 for (i = 0; i < CPU->tlb_messages_count; i++) { 165 165 tlb_invalidate_type_t type = CPU->tlb_messages[i].type; 166 166 asid_t asid = CPU->tlb_messages[i].asid; … … 188 188 } 189 189 190 CPU->tlb_messages_count = 0; 190 191 irq_spinlock_unlock(&CPU->lock, false); 191 192 CPU->tlb_active = true; -
kernel/generic/src/proc/program.c
rc5bff3c r8013637 71 71 int program_create(as_t *as, uintptr_t entry_addr, char *name, program_t *prg) 72 72 { 73 uspace_arg_t *kernel_uarg; 74 75 kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); 76 kernel_uarg->uspace_entry = (void *) entry_addr; 77 kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS; 78 kernel_uarg->uspace_thread_function = NULL; 79 kernel_uarg->uspace_thread_arg = NULL; 80 kernel_uarg->uspace_uarg = NULL; 81 73 prg->loader_status = EE_OK; 82 74 prg->task = task_create(as, name); 83 75 if (!prg->task) … … 91 83 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, 92 84 STACK_SIZE, AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, 0); 93 if (!area) 85 if (!area) { 86 task_destroy(prg->task); 94 87 return ENOMEM; 88 } 89 90 uspace_arg_t *kernel_uarg = (uspace_arg_t *) 91 malloc(sizeof(uspace_arg_t), 0); 92 93 kernel_uarg->uspace_entry = (void *) entry_addr; 94 kernel_uarg->uspace_stack = (void *) virt; 95 kernel_uarg->uspace_stack_size = STACK_SIZE; 96 kernel_uarg->uspace_thread_function = NULL; 97 kernel_uarg->uspace_thread_arg = NULL; 98 kernel_uarg->uspace_uarg = NULL; 95 99 96 100 /* … … 98 102 */ 99 103 prg->main_thread = thread_create(uinit, kernel_uarg, prg->task, 100 THREAD_FLAG_USPACE, "uinit", false); 101 if (!prg->main_thread) 104 THREAD_FLAG_USPACE, "uinit"); 105 if (!prg->main_thread) { 106 free(kernel_uarg); 107 as_area_destroy(as, virt); 108 task_destroy(prg->task); 102 109 return ELIMIT; 110 } 103 111 104 112 return EOK; … … 111 119 * executable image. The task is returned in *task. 112 120 * 113 * @param image_addr Address of an executable program image. 114 * @param name Name to set for the program's task. 115 * @param prg Buffer for storing program info. If image_addr 116 * points to a loader image, p->task will be set to 117 * NULL and EOK will be returned. 121 * @param[in] image_addr Address of an executable program image. 122 * @param[in] name Name to set for the program's task. 123 * @param[out] prg Buffer for storing program info. 124 * If image_addr points to a loader image, 125 * prg->task will be set to NULL and EOK 126 * will be returned. 118 127 * 119 128 * @return EOK on success or negative error code. … … 126 135 return ENOMEM; 127 136 128 unsigned int rc= elf_load((elf_header_t *) image_addr, as, 0);129 if ( rc!= EE_OK) {137 prg->loader_status = elf_load((elf_header_t *) image_addr, as, 0); 138 if (prg->loader_status != EE_OK) { 130 139 as_destroy(as); 131 140 prg->task = NULL; 132 141 prg->main_thread = NULL; 133 142 134 if ( rc!= EE_LOADER)143 if (prg->loader_status != EE_LOADER) 135 144 return ENOTSUP; 136 145 … … 140 149 141 150 program_loader = image_addr; 142 LOG("Registered program loader at %p", 143 (void *) image_addr); 151 printf("Program loader at %p\n", (void *) image_addr); 144 152 145 153 return EOK; … … 171 179 } 172 180 173 unsigned int rc= elf_load((elf_header_t *) program_loader, as,181 prg->loader_status = elf_load((elf_header_t *) program_loader, as, 174 182 ELD_F_LOADER); 175 if ( rc!= EE_OK) {183 if (prg->loader_status != EE_OK) { 176 184 as_destroy(as); 177 printf("Cannot spawn loader (%s)\n", elf_error(rc)); 185 printf("Cannot spawn loader (%s)\n", 186 elf_error(prg->loader_status)); 178 187 return ENOENT; 179 188 } -
kernel/generic/src/proc/scheduler.c
rc5bff3c r8013637 98 98 else { 99 99 fpu_init(); 100 THREAD->fpu_context_exists = 1;100 THREAD->fpu_context_exists = true; 101 101 } 102 102 #endif … … 142 142 143 143 /* Don't prevent migration */ 144 CPU->fpu_owner->fpu_context_engaged = 0;144 CPU->fpu_owner->fpu_context_engaged = false; 145 145 irq_spinlock_unlock(&CPU->fpu_owner->lock, false); 146 146 CPU->fpu_owner = NULL; … … 163 163 } 164 164 fpu_init(); 165 THREAD->fpu_context_exists = 1;165 THREAD->fpu_context_exists = true; 166 166 } 167 167 168 168 CPU->fpu_owner = THREAD; 169 THREAD->fpu_context_engaged = 1;169 THREAD->fpu_context_engaged = true; 170 170 irq_spinlock_unlock(&THREAD->lock, false); 171 171 … … 248 248 249 249 /* 250 * Clear the THREAD_FLAG_STOLEN flag so thatt can be migrated250 * Clear the stolen flag so that it can be migrated 251 251 * when load balancing needs emerge. 252 252 */ 253 thread-> flags &= ~THREAD_FLAG_STOLEN;253 thread->stolen = false; 254 254 irq_spinlock_unlock(&thread->lock, false); 255 255 … … 630 630 irq_spinlock_lock(&thread->lock, false); 631 631 632 if (!(thread->flags & THREAD_FLAG_WIRED) && 633 !(thread->flags & THREAD_FLAG_STOLEN) && 634 !thread->nomigrate && 635 !thread->fpu_context_engaged) { 632 if ((!thread->wired) && (!thread->stolen) && 633 (!thread->nomigrate) && 634 (!thread->fpu_context_engaged)) { 636 635 /* 637 636 * Remove thread from ready queue. … … 670 669 #endif 671 670 672 thread-> flags |= THREAD_FLAG_STOLEN;671 thread->stolen = true; 673 672 thread->state = Entering; 674 673 -
kernel/generic/src/proc/task.c
rc5bff3c r8013637 90 90 TASK = NULL; 91 91 avltree_create(&tasks_tree); 92 task_slab = slab_cache_create("task_ slab", sizeof(task_t), 0,92 task_slab = slab_cache_create("task_t", sizeof(task_t), 0, 93 93 tsk_constructor, NULL, 0); 94 94 } -
kernel/generic/src/proc/thread.c
rc5bff3c r8013637 191 191 kmflags |= FRAME_LOWMEM; 192 192 kmflags &= ~FRAME_HIGHMEM; 193 193 194 194 thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); 195 195 if (!thread->kstack) { … … 236 236 237 237 atomic_set(&nrdy, 0); 238 thread_slab = slab_cache_create("thread_ slab", sizeof(thread_t), 0,238 thread_slab = slab_cache_create("thread_t", sizeof(thread_t), 0, 239 239 thr_constructor, thr_destructor, 0); 240 240 241 241 #ifdef CONFIG_FPU 242 fpu_context_slab = slab_cache_create("fpu_ slab", sizeof(fpu_context_t),243 FPU_CONTEXT_ALIGN, NULL, NULL, 0);242 fpu_context_slab = slab_cache_create("fpu_context_t", 243 sizeof(fpu_context_t), FPU_CONTEXT_ALIGN, NULL, NULL, 0); 244 244 #endif 245 245 … … 247 247 } 248 248 249 /** Wire thread to the given CPU 250 * 251 * @param cpu CPU to wire the thread to. 252 * 253 */ 254 void thread_wire(thread_t *thread, cpu_t *cpu) 255 { 256 irq_spinlock_lock(&thread->lock, true); 257 thread->cpu = cpu; 258 thread->wired = true; 259 irq_spinlock_unlock(&thread->lock, true); 260 } 261 249 262 /** Make thread ready 250 263 * … … 260 273 ASSERT(thread->state != Ready); 261 274 262 int i = (thread->priority < RQ_COUNT - 1) 263 ?++thread->priority : thread->priority;264 265 cpu_t *cpu = CPU;266 if (thread-> flags & THREAD_FLAG_WIRED) {275 int i = (thread->priority < RQ_COUNT - 1) ? 276 ++thread->priority : thread->priority; 277 278 cpu_t *cpu; 279 if (thread->wired || thread->nomigrate || thread->fpu_context_engaged) { 267 280 ASSERT(thread->cpu != NULL); 268 281 cpu = thread->cpu; 269 } 282 } else 283 cpu = CPU; 284 270 285 thread->state = Ready; 271 286 … … 298 313 * @param flags Thread flags. 299 314 * @param name Symbolic name (a copy is made). 300 * @param uncounted Thread's accounting doesn't affect accumulated task301 * accounting.302 315 * 303 316 * @return New thread's structure on success, NULL on failure. … … 305 318 */ 306 319 thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, 307 unsigned int flags, const char *name, bool uncounted)320 thread_flags_t flags, const char *name) 308 321 { 309 322 thread_t *thread = (thread_t *) slab_alloc(thread_slab, 0); … … 335 348 thread->ucycles = 0; 336 349 thread->kcycles = 0; 337 thread->uncounted = uncounted; 350 thread->uncounted = 351 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED); 338 352 thread->priority = -1; /* Start in rq[0] */ 339 353 thread->cpu = NULL; 340 thread->flags = flags; 354 thread->wired = false; 355 thread->stolen = false; 356 thread->uspace = 357 ((flags & THREAD_FLAG_USPACE) == THREAD_FLAG_USPACE); 358 341 359 thread->nomigrate = 0; 342 360 thread->state = Entering; … … 356 374 thread->task = task; 357 375 358 thread->fpu_context_exists = 0;359 thread->fpu_context_engaged = 0;376 thread->fpu_context_exists = false; 377 thread->fpu_context_engaged = false; 360 378 361 379 avltree_node_initialize(&thread->threads_tree_node); … … 371 389 thread_create_arch(thread); 372 390 373 if ( !(flags & THREAD_FLAG_NOATTACH))391 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH) 374 392 thread_attach(thread, task); 375 393 … … 437 455 438 456 /* Must not count kbox thread into lifecount */ 439 if (thread-> flags & THREAD_FLAG_USPACE)457 if (thread->uspace) 440 458 atomic_inc(&task->lifecount); 441 459 … … 459 477 void thread_exit(void) 460 478 { 461 if (THREAD-> flags & THREAD_FLAG_USPACE) {479 if (THREAD->uspace) { 462 480 #ifdef CONFIG_UDEBUG 463 481 /* Generate udebug THREAD_E event */ 464 482 udebug_thread_e_event(); 465 483 466 484 /* 467 485 * This thread will not execute any code or system calls from … … 506 524 { 507 525 ASSERT(THREAD); 508 526 509 527 THREAD->nomigrate++; 510 528 } … … 515 533 ASSERT(THREAD); 516 534 ASSERT(THREAD->nomigrate > 0); 517 518 THREAD->nomigrate--; 535 536 if (THREAD->nomigrate > 0) 537 THREAD->nomigrate--; 519 538 } 520 539 … … 854 873 * In case of failure, kernel_uarg will be deallocated in this function. 855 874 * In case of success, kernel_uarg will be freed in uinit(). 856 *857 875 */ 858 876 uspace_arg_t *kernel_uarg = … … 866 884 867 885 thread_t *thread = thread_create(uinit, kernel_uarg, TASK, 868 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf , false);886 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf); 869 887 if (thread) { 870 888 if (uspace_thread_id != NULL) { -
kernel/generic/src/synch/mutex.c
rc5bff3c r8013637 40 40 #include <debug.h> 41 41 #include <arch.h> 42 #include <stacktrace.h> 42 43 43 44 /** Initialize mutex. … … 61 62 return semaphore_count_get(&mtx->sem) <= 0; 62 63 } 64 65 #define MUTEX_DEADLOCK_THRESHOLD 100000000 63 66 64 67 /** Acquire mutex. … … 87 90 ASSERT(!(flags & SYNCH_FLAGS_INTERRUPTIBLE)); 88 91 92 unsigned int cnt = 0; 93 bool deadlock_reported = false; 89 94 do { 95 if (cnt++ > MUTEX_DEADLOCK_THRESHOLD) { 96 printf("cpu%u: looping on active mutex %p\n", 97 CPU->id, mtx); 98 stack_trace(); 99 cnt = 0; 100 deadlock_reported = true; 101 } 90 102 rc = semaphore_trydown(&mtx->sem); 91 103 } while (SYNCH_FAILED(rc) && 92 104 !(flags & SYNCH_FLAGS_NON_BLOCKING)); 105 if (deadlock_reported) 106 printf("cpu%u: not deadlocked\n", CPU->id); 93 107 } 94 108 -
kernel/generic/src/synch/spinlock.c
rc5bff3c r8013637 44 44 #include <debug.h> 45 45 #include <symtab.h> 46 #include <stacktrace.h> 46 47 47 48 #ifdef CONFIG_SMP … … 104 105 "caller=%p (%s)\n", CPU->id, lock, lock->name, 105 106 (void *) CALLER, symtab_fmt_name_lookup(CALLER)); 107 stack_trace(); 106 108 107 109 i = 0; … … 260 262 int rc = spinlock_trylock(&(lock->lock)); 261 263 262 ASSERT_IRQ_SPINLOCK( !lock->guard, lock);264 ASSERT_IRQ_SPINLOCK((!rc) || (!lock->guard), lock); 263 265 return rc; 264 266 } -
kernel/generic/src/sysinfo/sysinfo.c
rc5bff3c r8013637 97 97 void sysinfo_init(void) 98 98 { 99 sysinfo_item_slab = slab_cache_create("sysinfo_item_ slab",99 sysinfo_item_slab = slab_cache_create("sysinfo_item_t", 100 100 sizeof(sysinfo_item_t), 0, sysinfo_item_constructor, 101 101 sysinfo_item_destructor, SLAB_CACHE_MAGDEFERRED); -
kernel/generic/src/udebug/udebug.c
rc5bff3c r8013637 410 410 411 411 mutex_lock(&thread->udebug.lock); 412 unsigned int flags = thread->flags;413 412 414 413 /* Only process userspace threads. */ 415 if ( (flags & THREAD_FLAG_USPACE) != 0) {414 if (thread->uspace) { 416 415 /* Prevent any further debug activity in thread. */ 417 416 thread->udebug.active = false; -
kernel/generic/src/udebug/udebug_ops.c
rc5bff3c r8013637 95 95 96 96 /* Verify that 'thread' is a userspace thread. */ 97 if ( (thread->flags & THREAD_FLAG_USPACE) == 0) {97 if (!thread->uspace) { 98 98 /* It's not, deny its existence */ 99 99 irq_spinlock_unlock(&thread->lock, true); … … 200 200 201 201 mutex_lock(&thread->udebug.lock); 202 if ( (thread->flags & THREAD_FLAG_USPACE) != 0) {202 if (thread->uspace) { 203 203 thread->udebug.active = true; 204 204 mutex_unlock(&thread->udebug.lock); … … 393 393 394 394 irq_spinlock_lock(&thread->lock, false); 395 int flags = thread->flags;395 bool uspace = thread->uspace; 396 396 irq_spinlock_unlock(&thread->lock, false); 397 397 398 398 /* Not interested in kernel threads. */ 399 if ( (flags & THREAD_FLAG_USPACE) == 0)399 if (!uspace) 400 400 continue; 401 401
Note:
See TracChangeset
for help on using the changeset viewer.
