Changeset e6a78b9 in mainline for kernel/generic/src
- Timestamp:
- 2012-06-29T15:31:44Z (13 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 9432f08
- Parents:
- 34ab31c0 (diff), 0bbd13e (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel/generic/src
- Files:
-
- 21 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/adt/btree.c
r34ab31c0 re6a78b9 71 71 void btree_init(void) 72 72 { 73 btree_node_slab = slab_cache_create("btree_node_ slab",73 btree_node_slab = slab_cache_create("btree_node_t", 74 74 sizeof(btree_node_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED); 75 75 } -
kernel/generic/src/console/cmd.c
r34ab31c0 re6a78b9 724 724 thread_t *thread; 725 725 if ((thread = thread_create((void (*)(void *)) cmd_call0, 726 (void *) argv, TASK, THREAD_FLAG_WIRED, "call0", false))) { 727 irq_spinlock_lock(&thread->lock, true); 728 thread->cpu = &cpus[i]; 729 irq_spinlock_unlock(&thread->lock, true); 730 726 (void *) argv, TASK, THREAD_FLAG_NONE, "call0"))) { 731 727 printf("cpu%u: ", i); 732 728 thread_wire(thread, &cpus[i]); 733 729 thread_ready(thread); 734 730 thread_join(thread); -
kernel/generic/src/ipc/ipc.c
r34ab31c0 re6a78b9 670 670 void ipc_init(void) 671 671 { 672 ipc_call_slab = slab_cache_create(" ipc_call", sizeof(call_t), 0, NULL,672 ipc_call_slab = slab_cache_create("call_t", sizeof(call_t), 0, NULL, 673 673 NULL, 0); 674 ipc_answerbox_slab = slab_cache_create(" ipc_answerbox",674 ipc_answerbox_slab = slab_cache_create("answerbox_t", 675 675 sizeof(answerbox_t), 0, NULL, NULL, 0); 676 676 } -
kernel/generic/src/ipc/kbox.c
r34ab31c0 re6a78b9 244 244 245 245 /* Create a kbox thread */ 246 thread_t *kb_thread = thread_create(kbox_thread_proc, NULL, task, 0,247 "kbox", false);246 thread_t *kb_thread = thread_create(kbox_thread_proc, NULL, task, 247 THREAD_FLAG_NONE, "kbox"); 248 248 if (!kb_thread) { 249 249 mutex_unlock(&task->kb.cleanup_lock); -
kernel/generic/src/ipc/sysipc.c
r34ab31c0 re6a78b9 597 597 if (IPC_GET_IMETHOD(call->data) == IPC_M_CONNECT_TO_ME) { 598 598 int phoneid = phone_alloc(TASK); 599 if (phoneid < 0) { /* Failed to allocate phone */599 if (phoneid < 0) { /* Failed to allocate phone */ 600 600 IPC_SET_RETVAL(call->data, ELIMIT); 601 601 ipc_answer(box, call); … … 883 883 884 884 /* 885 * User space is not allowed to change interface and method of system885 * User space is not allowed to change interface and method of system 886 886 * methods on forward, allow changing ARG1, ARG2, ARG3 and ARG4 by 887 * means of method, arg1, arg2 and arg3.887 * means of imethod, arg1, arg2 and arg3. 888 888 * If the interface and method is immutable, don't change anything. 889 889 */ … … 897 897 IPC_SET_ARG3(call->data, arg2); 898 898 899 if (slow) {899 if (slow) 900 900 IPC_SET_ARG4(call->data, arg3); 901 /*902 * For system methods we deliberately don't903 * overwrite ARG5.904 */905 }901 902 /* 903 * For system methods we deliberately don't 904 * overwrite ARG5. 905 */ 906 906 } else { 907 907 IPC_SET_IMETHOD(call->data, imethod); -
kernel/generic/src/lib/ra.c
r34ab31c0 re6a78b9 424 424 void ra_init(void) 425 425 { 426 ra_segment_cache = slab_cache_create(" segment_cache",426 ra_segment_cache = slab_cache_create("ra_segment_t", 427 427 sizeof(ra_segment_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED); 428 428 } -
kernel/generic/src/lib/rd.c
r34ab31c0 re6a78b9 38 38 */ 39 39 40 #include <print.h> 40 41 #include <lib/rd.h> 41 42 #include <mm/frame.h> … … 66 67 sysinfo_set_item_val("rd.size", NULL, size); 67 68 sysinfo_set_item_val("rd.address.physical", NULL, (sysarg_t) base); 69 70 printf("RAM disk at %p (size %zu bytes)\n", (void *) base, size); 68 71 } 69 72 -
kernel/generic/src/main/kinit.c
r34ab31c0 re6a78b9 116 116 * Just a beautification. 117 117 */ 118 thread = thread_create(kmp, NULL, TASK, THREAD_FLAG_WIRED, "kmp", true); 118 thread = thread_create(kmp, NULL, TASK, 119 THREAD_FLAG_UNCOUNTED, "kmp"); 119 120 if (thread != NULL) { 120 irq_spinlock_lock(&thread->lock, false); 121 thread->cpu = &cpus[0]; 122 irq_spinlock_unlock(&thread->lock, false); 121 thread_wire(thread, &cpus[0]); 123 122 thread_ready(thread); 124 123 } else … … 134 133 135 134 for (i = 0; i < config.cpu_count; i++) { 136 thread = thread_create(kcpulb, NULL, TASK, THREAD_FLAG_WIRED, "kcpulb", true); 135 thread = thread_create(kcpulb, NULL, TASK, 136 THREAD_FLAG_UNCOUNTED, "kcpulb"); 137 137 if (thread != NULL) { 138 irq_spinlock_lock(&thread->lock, false); 139 thread->cpu = &cpus[i]; 140 irq_spinlock_unlock(&thread->lock, false); 138 thread_wire(thread, &cpus[i]); 141 139 thread_ready(thread); 142 140 } else … … 152 150 153 151 /* Start thread computing system load */ 154 thread = thread_create(kload, NULL, TASK, 0, "kload", false); 152 thread = thread_create(kload, NULL, TASK, THREAD_FLAG_NONE, 153 "kload"); 155 154 if (thread != NULL) 156 155 thread_ready(thread); … … 163 162 * Create kernel console. 164 163 */ 165 thread = thread_create(kconsole_thread, NULL, TASK, 0, "kconsole", false); 164 thread = thread_create(kconsole_thread, NULL, TASK, 165 THREAD_FLAG_NONE, "kconsole"); 166 166 if (thread != NULL) 167 167 thread_ready(thread); … … 201 201 str_cpy(namebuf + INIT_PREFIX_LEN, 202 202 TASK_NAME_BUFLEN - INIT_PREFIX_LEN, name); 203 203 204 204 /* 205 205 * Create virtual memory mappings for init task images. … … 236 236 init_rd((void *) init.tasks[i].paddr, init.tasks[i].size); 237 237 } else 238 printf("init[%zu]: Init binary load failed (error %d)\n", i, rc); 238 printf("init[%zu]: Init binary load failed " 239 "(error %d, loader status %u)\n", i, rc, 240 programs[i].loader_status); 239 241 } 240 242 -
kernel/generic/src/main/main.c
r34ab31c0 re6a78b9 276 276 * Create the first thread. 277 277 */ 278 thread_t *kinit_thread = 279 thread_create(kinit, NULL, kernel, 0, "kinit", true);278 thread_t *kinit_thread = thread_create(kinit, NULL, kernel, 279 THREAD_FLAG_UNCOUNTED, "kinit"); 280 280 if (!kinit_thread) 281 281 panic("Cannot create kinit thread."); -
kernel/generic/src/main/uinit.c
r34ab31c0 re6a78b9 56 56 void uinit(void *arg) 57 57 { 58 uspace_arg_t uarg;59 60 58 /* 61 59 * So far, we don't have a use for joining userspace threads so we … … 72 70 #endif 73 71 74 uarg.uspace_entry = ((uspace_arg_t *) arg)->uspace_entry; 75 uarg.uspace_stack = ((uspace_arg_t *) arg)->uspace_stack; 76 uarg.uspace_uarg = ((uspace_arg_t *) arg)->uspace_uarg; 77 uarg.uspace_thread_function = NULL; 78 uarg.uspace_thread_arg = NULL; 72 uspace_arg_t *uarg = (uspace_arg_t *) arg; 73 uspace_arg_t local_uarg; 79 74 80 free((uspace_arg_t *) arg); 75 local_uarg.uspace_entry = uarg->uspace_entry; 76 local_uarg.uspace_stack = uarg->uspace_stack; 77 local_uarg.uspace_stack_size = uarg->uspace_stack_size; 78 local_uarg.uspace_uarg = uarg->uspace_uarg; 79 local_uarg.uspace_thread_function = NULL; 80 local_uarg.uspace_thread_arg = NULL; 81 81 82 userspace(&uarg); 82 free(uarg); 83 84 userspace(&local_uarg); 83 85 } 84 86 -
kernel/generic/src/mm/as.c
r34ab31c0 re6a78b9 130 130 as_arch_init(); 131 131 132 as_slab = slab_cache_create("as_ slab", sizeof(as_t), 0,132 as_slab = slab_cache_create("as_t", sizeof(as_t), 0, 133 133 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED); 134 134 -
kernel/generic/src/mm/slab.c
r34ab31c0 re6a78b9 891 891 { 892 892 /* Initialize magazine cache */ 893 _slab_cache_create(&mag_cache, "slab_magazine ",893 _slab_cache_create(&mag_cache, "slab_magazine_t", 894 894 sizeof(slab_magazine_t) + SLAB_MAG_SIZE * sizeof(void*), 895 895 sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE | … … 897 897 898 898 /* Initialize slab_cache cache */ 899 _slab_cache_create(&slab_cache_cache, "slab_cache ",899 _slab_cache_create(&slab_cache_cache, "slab_cache_cache", 900 900 sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL, 901 901 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); 902 902 903 903 /* Initialize external slab cache */ 904 slab_extern_cache = slab_cache_create("slab_ extern", sizeof(slab_t), 0,904 slab_extern_cache = slab_cache_create("slab_t", sizeof(slab_t), 0, 905 905 NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); 906 906 -
kernel/generic/src/proc/program.c
r34ab31c0 re6a78b9 71 71 int program_create(as_t *as, uintptr_t entry_addr, char *name, program_t *prg) 72 72 { 73 uspace_arg_t *kernel_uarg; 74 75 kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); 76 kernel_uarg->uspace_entry = (void *) entry_addr; 77 kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS; 78 kernel_uarg->uspace_thread_function = NULL; 79 kernel_uarg->uspace_thread_arg = NULL; 80 kernel_uarg->uspace_uarg = NULL; 81 73 prg->loader_status = EE_OK; 82 74 prg->task = task_create(as, name); 83 75 if (!prg->task) … … 91 83 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, 92 84 STACK_SIZE, AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, 0); 93 if (!area) 85 if (!area) { 86 task_destroy(prg->task); 94 87 return ENOMEM; 88 } 89 90 uspace_arg_t *kernel_uarg = (uspace_arg_t *) 91 malloc(sizeof(uspace_arg_t), 0); 92 93 kernel_uarg->uspace_entry = (void *) entry_addr; 94 kernel_uarg->uspace_stack = (void *) virt; 95 kernel_uarg->uspace_stack_size = STACK_SIZE; 96 kernel_uarg->uspace_thread_function = NULL; 97 kernel_uarg->uspace_thread_arg = NULL; 98 kernel_uarg->uspace_uarg = NULL; 95 99 96 100 /* … … 98 102 */ 99 103 prg->main_thread = thread_create(uinit, kernel_uarg, prg->task, 100 THREAD_FLAG_USPACE, "uinit", false); 101 if (!prg->main_thread) 104 THREAD_FLAG_USPACE, "uinit"); 105 if (!prg->main_thread) { 106 free(kernel_uarg); 107 as_area_destroy(as, virt); 108 task_destroy(prg->task); 102 109 return ELIMIT; 110 } 103 111 104 112 return EOK; … … 111 119 * executable image. The task is returned in *task. 112 120 * 113 * @param image_addr Address of an executable program image. 114 * @param name Name to set for the program's task. 115 * @param prg Buffer for storing program info. If image_addr 116 * points to a loader image, p->task will be set to 117 * NULL and EOK will be returned. 121 * @param[in] image_addr Address of an executable program image. 122 * @param[in] name Name to set for the program's task. 123 * @param[out] prg Buffer for storing program info. 124 * If image_addr points to a loader image, 125 * prg->task will be set to NULL and EOK 126 * will be returned. 118 127 * 119 128 * @return EOK on success or negative error code. … … 126 135 return ENOMEM; 127 136 128 unsigned int rc= elf_load((elf_header_t *) image_addr, as, 0);129 if ( rc!= EE_OK) {137 prg->loader_status = elf_load((elf_header_t *) image_addr, as, 0); 138 if (prg->loader_status != EE_OK) { 130 139 as_destroy(as); 131 140 prg->task = NULL; 132 141 prg->main_thread = NULL; 133 142 134 if ( rc!= EE_LOADER)143 if (prg->loader_status != EE_LOADER) 135 144 return ENOTSUP; 136 145 … … 140 149 141 150 program_loader = image_addr; 142 LOG("Registered program loader at %p", 143 (void *) image_addr); 151 printf("Program loader at %p\n", (void *) image_addr); 144 152 145 153 return EOK; … … 171 179 } 172 180 173 unsigned int rc= elf_load((elf_header_t *) program_loader, as,181 prg->loader_status = elf_load((elf_header_t *) program_loader, as, 174 182 ELD_F_LOADER); 175 if ( rc!= EE_OK) {183 if (prg->loader_status != EE_OK) { 176 184 as_destroy(as); 177 printf("Cannot spawn loader (%s)\n", elf_error(rc)); 185 printf("Cannot spawn loader (%s)\n", 186 elf_error(prg->loader_status)); 178 187 return ENOENT; 179 188 } -
kernel/generic/src/proc/scheduler.c
r34ab31c0 re6a78b9 98 98 else { 99 99 fpu_init(); 100 THREAD->fpu_context_exists = 1;100 THREAD->fpu_context_exists = true; 101 101 } 102 102 #endif … … 142 142 143 143 /* Don't prevent migration */ 144 CPU->fpu_owner->fpu_context_engaged = 0;144 CPU->fpu_owner->fpu_context_engaged = false; 145 145 irq_spinlock_unlock(&CPU->fpu_owner->lock, false); 146 146 CPU->fpu_owner = NULL; … … 163 163 } 164 164 fpu_init(); 165 THREAD->fpu_context_exists = 1;165 THREAD->fpu_context_exists = true; 166 166 } 167 167 168 168 CPU->fpu_owner = THREAD; 169 THREAD->fpu_context_engaged = 1;169 THREAD->fpu_context_engaged = true; 170 170 irq_spinlock_unlock(&THREAD->lock, false); 171 171 … … 248 248 249 249 /* 250 * Clear the THREAD_FLAG_STOLEN flag so thatt can be migrated250 * Clear the stolen flag so that it can be migrated 251 251 * when load balancing needs emerge. 252 252 */ 253 thread-> flags &= ~THREAD_FLAG_STOLEN;253 thread->stolen = false; 254 254 irq_spinlock_unlock(&thread->lock, false); 255 255 … … 630 630 irq_spinlock_lock(&thread->lock, false); 631 631 632 if (!(thread->flags & THREAD_FLAG_WIRED) && 633 !(thread->flags & THREAD_FLAG_STOLEN) && 634 !thread->nomigrate && 635 !thread->fpu_context_engaged) { 632 if ((!thread->wired) && (!thread->stolen) && 633 (!thread->nomigrate) && 634 (!thread->fpu_context_engaged)) { 636 635 /* 637 636 * Remove thread from ready queue. … … 670 669 #endif 671 670 672 thread-> flags |= THREAD_FLAG_STOLEN;671 thread->stolen = true; 673 672 thread->state = Entering; 674 673 -
kernel/generic/src/proc/task.c
r34ab31c0 re6a78b9 90 90 TASK = NULL; 91 91 avltree_create(&tasks_tree); 92 task_slab = slab_cache_create("task_ slab", sizeof(task_t), 0,92 task_slab = slab_cache_create("task_t", sizeof(task_t), 0, 93 93 tsk_constructor, NULL, 0); 94 94 } -
kernel/generic/src/proc/thread.c
r34ab31c0 re6a78b9 191 191 kmflags |= FRAME_LOWMEM; 192 192 kmflags &= ~FRAME_HIGHMEM; 193 193 194 194 thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); 195 195 if (!thread->kstack) { … … 236 236 237 237 atomic_set(&nrdy, 0); 238 thread_slab = slab_cache_create("thread_ slab", sizeof(thread_t), 0,238 thread_slab = slab_cache_create("thread_t", sizeof(thread_t), 0, 239 239 thr_constructor, thr_destructor, 0); 240 240 241 241 #ifdef CONFIG_FPU 242 fpu_context_slab = slab_cache_create("fpu_ slab", sizeof(fpu_context_t),243 FPU_CONTEXT_ALIGN, NULL, NULL, 0);242 fpu_context_slab = slab_cache_create("fpu_context_t", 243 sizeof(fpu_context_t), FPU_CONTEXT_ALIGN, NULL, NULL, 0); 244 244 #endif 245 245 … … 247 247 } 248 248 249 /** Wire thread to the given CPU 250 * 251 * @param cpu CPU to wire the thread to. 252 * 253 */ 254 void thread_wire(thread_t *thread, cpu_t *cpu) 255 { 256 irq_spinlock_lock(&thread->lock, true); 257 thread->cpu = cpu; 258 thread->wired = true; 259 irq_spinlock_unlock(&thread->lock, true); 260 } 261 249 262 /** Make thread ready 250 263 * … … 260 273 ASSERT(thread->state != Ready); 261 274 262 int i = (thread->priority < RQ_COUNT - 1) 263 ?++thread->priority : thread->priority;264 265 cpu_t *cpu = CPU;266 if (thread-> flags & THREAD_FLAG_WIRED) {275 int i = (thread->priority < RQ_COUNT - 1) ? 276 ++thread->priority : thread->priority; 277 278 cpu_t *cpu; 279 if (thread->wired || thread->nomigrate || thread->fpu_context_engaged) { 267 280 ASSERT(thread->cpu != NULL); 268 281 cpu = thread->cpu; 269 } 282 } else 283 cpu = CPU; 284 270 285 thread->state = Ready; 271 286 … … 298 313 * @param flags Thread flags. 299 314 * @param name Symbolic name (a copy is made). 300 * @param uncounted Thread's accounting doesn't affect accumulated task301 * accounting.302 315 * 303 316 * @return New thread's structure on success, NULL on failure. … … 305 318 */ 306 319 thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, 307 unsigned int flags, const char *name, bool uncounted)320 thread_flags_t flags, const char *name) 308 321 { 309 322 thread_t *thread = (thread_t *) slab_alloc(thread_slab, 0); … … 335 348 thread->ucycles = 0; 336 349 thread->kcycles = 0; 337 thread->uncounted = uncounted; 350 thread->uncounted = 351 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED); 338 352 thread->priority = -1; /* Start in rq[0] */ 339 353 thread->cpu = NULL; 340 thread->flags = flags; 354 thread->wired = false; 355 thread->stolen = false; 356 thread->uspace = 357 ((flags & THREAD_FLAG_USPACE) == THREAD_FLAG_USPACE); 358 341 359 thread->nomigrate = 0; 342 360 thread->state = Entering; … … 356 374 thread->task = task; 357 375 358 thread->fpu_context_exists = 0;359 thread->fpu_context_engaged = 0;376 thread->fpu_context_exists = false; 377 thread->fpu_context_engaged = false; 360 378 361 379 avltree_node_initialize(&thread->threads_tree_node); … … 371 389 thread_create_arch(thread); 372 390 373 if ( !(flags & THREAD_FLAG_NOATTACH))391 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH) 374 392 thread_attach(thread, task); 375 393 … … 437 455 438 456 /* Must not count kbox thread into lifecount */ 439 if (thread-> flags & THREAD_FLAG_USPACE)457 if (thread->uspace) 440 458 atomic_inc(&task->lifecount); 441 459 … … 459 477 void thread_exit(void) 460 478 { 461 if (THREAD-> flags & THREAD_FLAG_USPACE) {479 if (THREAD->uspace) { 462 480 #ifdef CONFIG_UDEBUG 463 481 /* Generate udebug THREAD_E event */ 464 482 udebug_thread_e_event(); 465 483 466 484 /* 467 485 * This thread will not execute any code or system calls from … … 506 524 { 507 525 ASSERT(THREAD); 508 526 509 527 THREAD->nomigrate++; 510 528 } … … 515 533 ASSERT(THREAD); 516 534 ASSERT(THREAD->nomigrate > 0); 517 518 THREAD->nomigrate--; 535 536 if (THREAD->nomigrate > 0) 537 THREAD->nomigrate--; 519 538 } 520 539 … … 854 873 * In case of failure, kernel_uarg will be deallocated in this function. 855 874 * In case of success, kernel_uarg will be freed in uinit(). 856 *857 875 */ 858 876 uspace_arg_t *kernel_uarg = … … 866 884 867 885 thread_t *thread = thread_create(uinit, kernel_uarg, TASK, 868 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf , false);886 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf); 869 887 if (thread) { 870 888 if (uspace_thread_id != NULL) { -
kernel/generic/src/synch/mutex.c
r34ab31c0 re6a78b9 63 63 } 64 64 65 #define MUTEX_DEADLOCK_THRESHOLD 100000000 66 65 67 /** Acquire mutex. 66 68 * … … 91 93 bool deadlock_reported = false; 92 94 do { 93 if (cnt++ > DEADLOCK_THRESHOLD) {95 if (cnt++ > MUTEX_DEADLOCK_THRESHOLD) { 94 96 printf("cpu%u: looping on active mutex %p\n", 95 97 CPU->id, mtx); -
kernel/generic/src/synch/spinlock.c
r34ab31c0 re6a78b9 262 262 int rc = spinlock_trylock(&(lock->lock)); 263 263 264 ASSERT_IRQ_SPINLOCK( !rc || !lock->guard, lock);264 ASSERT_IRQ_SPINLOCK((!rc) || (!lock->guard), lock); 265 265 return rc; 266 266 } -
kernel/generic/src/sysinfo/sysinfo.c
r34ab31c0 re6a78b9 97 97 void sysinfo_init(void) 98 98 { 99 sysinfo_item_slab = slab_cache_create("sysinfo_item_ slab",99 sysinfo_item_slab = slab_cache_create("sysinfo_item_t", 100 100 sizeof(sysinfo_item_t), 0, sysinfo_item_constructor, 101 101 sysinfo_item_destructor, SLAB_CACHE_MAGDEFERRED); -
kernel/generic/src/udebug/udebug.c
r34ab31c0 re6a78b9 410 410 411 411 mutex_lock(&thread->udebug.lock); 412 unsigned int flags = thread->flags;413 412 414 413 /* Only process userspace threads. */ 415 if ( (flags & THREAD_FLAG_USPACE) != 0) {414 if (thread->uspace) { 416 415 /* Prevent any further debug activity in thread. */ 417 416 thread->udebug.active = false; -
kernel/generic/src/udebug/udebug_ops.c
r34ab31c0 re6a78b9 95 95 96 96 /* Verify that 'thread' is a userspace thread. */ 97 if ( (thread->flags & THREAD_FLAG_USPACE) == 0) {97 if (!thread->uspace) { 98 98 /* It's not, deny its existence */ 99 99 irq_spinlock_unlock(&thread->lock, true); … … 200 200 201 201 mutex_lock(&thread->udebug.lock); 202 if ( (thread->flags & THREAD_FLAG_USPACE) != 0) {202 if (thread->uspace) { 203 203 thread->udebug.active = true; 204 204 mutex_unlock(&thread->udebug.lock); … … 393 393 394 394 irq_spinlock_lock(&thread->lock, false); 395 int flags = thread->flags;395 bool uspace = thread->uspace; 396 396 irq_spinlock_unlock(&thread->lock, false); 397 397 398 398 /* Not interested in kernel threads. */ 399 if ( (flags & THREAD_FLAG_USPACE) == 0)399 if (!uspace) 400 400 continue; 401 401
Note:
See TracChangeset
for help on using the changeset viewer.