Changes in kernel/generic/src/proc/thread.c [ee42e43:22e6802] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/thread.c
ree42e43 r22e6802 1 1 /* 2 * Copyright (c) 20 10Jakub Jermar2 * Copyright (c) 2001-2004 Jakub Jermar 3 3 * All rights reserved. 4 4 * … … 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Thread management functions. 36 36 */ 37 37 … … 48 48 #include <synch/spinlock.h> 49 49 #include <synch/waitq.h> 50 #include <synch/rwlock.h> 50 51 #include <cpu.h> 51 #include < str.h>52 #include <func.h> 52 53 #include <context.h> 53 54 #include <adt/avl.h> … … 75 76 76 77 /** Thread states */ 77 c onst char *thread_states[] = {78 char *thread_states[] = { 78 79 "Invalid", 79 80 "Running", … … 83 84 "Exiting", 84 85 "Lingering" 85 }; 86 87 typedef struct { 88 thread_id_t thread_id; 89 thread_t *thread; 90 } thread_iterator_t; 86 }; 91 87 92 88 /** Lock protecting the threads_tree AVL tree. 93 89 * 94 90 * For locking rules, see declaration thereof. 95 * 96 */ 97 IRQ_SPINLOCK_INITIALIZE(threads_lock); 91 */ 92 SPINLOCK_INITIALIZE(threads_lock); 98 93 99 94 /** AVL tree of all threads. … … 101 96 * When a thread is found in the threads_tree AVL tree, it is guaranteed to 102 97 * exist as long as the threads_lock is held. 103 * 104 */ 105 avltree_t threads_tree; 106 107 IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock); 108 static thread_id_t last_tid = 0; 98 */ 99 avltree_t threads_tree; 100 101 SPINLOCK_INITIALIZE(tidlock); 102 thread_id_t last_tid = 0; 109 103 110 104 static slab_cache_t *thread_slab; 111 112 105 #ifdef CONFIG_FPU 113 106 slab_cache_t *fpu_context_slab; … … 127 120 void *arg = THREAD->thread_arg; 128 121 THREAD->last_cycle = get_cycle(); 129 122 130 123 /* This is where each thread wakes up after its creation */ 131 irq_spinlock_unlock(&THREAD->lock, false);124 spinlock_unlock(&THREAD->lock); 132 125 interrupts_enable(); 133 126 134 127 f(arg); 135 128 136 129 /* Accumulate accounting to the task */ 137 irq_spinlock_lock(&THREAD->lock, true); 130 ipl_t ipl = interrupts_disable(); 131 132 spinlock_lock(&THREAD->lock); 138 133 if (!THREAD->uncounted) { 139 thread_update_accounting(true); 140 uint64_t ucycles = THREAD->ucycles; 141 THREAD->ucycles = 0; 142 uint64_t kcycles = THREAD->kcycles; 143 THREAD->kcycles = 0; 134 thread_update_accounting(); 135 uint64_t cycles = THREAD->cycles; 136 THREAD->cycles = 0; 137 spinlock_unlock(&THREAD->lock); 144 138 145 irq_spinlock_pass(&THREAD->lock, &TASK->lock); 146 TASK->ucycles += ucycles; 147 TASK->kcycles += kcycles; 148 irq_spinlock_unlock(&TASK->lock, true); 139 spinlock_lock(&TASK->lock); 140 TASK->cycles += cycles; 141 spinlock_unlock(&TASK->lock); 149 142 } else 150 irq_spinlock_unlock(&THREAD->lock, true); 143 spinlock_unlock(&THREAD->lock); 144 145 interrupts_restore(ipl); 151 146 152 147 thread_exit(); 153 154 /* Not reached */ 155 } 156 157 /** Initialization and allocation for thread_t structure 158 * 159 */ 160 static int thr_constructor(void *obj, unsigned int kmflags) 161 { 162 thread_t *thread = (thread_t *) obj; 163 164 irq_spinlock_initialize(&thread->lock, "thread_t_lock"); 165 link_initialize(&thread->rq_link); 166 link_initialize(&thread->wq_link); 167 link_initialize(&thread->th_link); 168 148 /* not reached */ 149 } 150 151 /** Initialization and allocation for thread_t structure */ 152 static int thr_constructor(void *obj, int kmflags) 153 { 154 thread_t *t = (thread_t *) obj; 155 156 spinlock_initialize(&t->lock, "thread_t_lock"); 157 link_initialize(&t->rq_link); 158 link_initialize(&t->wq_link); 159 link_initialize(&t->th_link); 160 169 161 /* call the architecture-specific part of the constructor */ 170 thr_constructor_arch(t hread);162 thr_constructor_arch(t); 171 163 172 164 #ifdef CONFIG_FPU 173 165 #ifdef CONFIG_FPU_LAZY 174 t hread->saved_fpu_context = NULL;175 #else /* CONFIG_FPU_LAZY */176 t hread->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);177 if (!t hread->saved_fpu_context)166 t->saved_fpu_context = NULL; 167 #else 168 t->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags); 169 if (!t->saved_fpu_context) 178 170 return -1; 179 #endif /* CONFIG_FPU_LAZY */180 #endif /* CONFIG_FPU */181 182 t hread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);183 if (!t hread->kstack) {171 #endif 172 #endif 173 174 t->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); 175 if (!t->kstack) { 184 176 #ifdef CONFIG_FPU 185 if (t hread->saved_fpu_context)186 slab_free(fpu_context_slab, t hread->saved_fpu_context);177 if (t->saved_fpu_context) 178 slab_free(fpu_context_slab, t->saved_fpu_context); 187 179 #endif 188 180 return -1; 189 181 } 190 182 191 183 #ifdef CONFIG_UDEBUG 192 mutex_initialize(&t hread->udebug.lock, MUTEX_PASSIVE);193 #endif 194 184 mutex_initialize(&t->udebug.lock, MUTEX_PASSIVE); 185 #endif 186 195 187 return 0; 196 188 } 197 189 198 190 /** Destruction of thread_t object */ 199 static size_t thr_destructor(void *obj)200 { 201 thread_t *t hread= (thread_t *) obj;202 191 static int thr_destructor(void *obj) 192 { 193 thread_t *t = (thread_t *) obj; 194 203 195 /* call the architecture-specific part of the destructor */ 204 thr_destructor_arch(thread); 205 206 frame_free(KA2PA(thread->kstack)); 207 196 thr_destructor_arch(t); 197 198 frame_free(KA2PA(t->kstack)); 208 199 #ifdef CONFIG_FPU 209 if (thread->saved_fpu_context) 210 slab_free(fpu_context_slab, thread->saved_fpu_context); 211 #endif 212 213 return 1; /* One page freed */ 200 if (t->saved_fpu_context) 201 slab_free(fpu_context_slab, t->saved_fpu_context); 202 #endif 203 return 1; /* One page freed */ 214 204 } 215 205 … … 222 212 { 223 213 THREAD = NULL; 224 225 214 atomic_set(&nrdy, 0); 226 215 thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0, 227 216 thr_constructor, thr_destructor, 0); 228 217 229 218 #ifdef CONFIG_FPU 230 219 fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t), 231 220 FPU_CONTEXT_ALIGN, NULL, NULL, 0); 232 221 #endif 233 222 234 223 avltree_create(&threads_tree); 235 224 } … … 237 226 /** Make thread ready 238 227 * 239 * Switch thread t o the ready state.228 * Switch thread t to the ready state. 240 229 * 241 230 * @param t Thread to make ready. 242 231 * 243 232 */ 244 void thread_ready(thread_t *thread) 245 { 246 irq_spinlock_lock(&thread->lock, true); 247 248 ASSERT(!(thread->state == Ready)); 249 250 int i = (thread->priority < RQ_COUNT - 1) 251 ? ++thread->priority : thread->priority; 252 253 cpu_t *cpu = CPU; 254 if (thread->flags & THREAD_FLAG_WIRED) { 255 ASSERT(thread->cpu != NULL); 256 cpu = thread->cpu; 233 void thread_ready(thread_t *t) 234 { 235 cpu_t *cpu; 236 runq_t *r; 237 ipl_t ipl; 238 int i, avg; 239 240 ipl = interrupts_disable(); 241 242 spinlock_lock(&t->lock); 243 244 ASSERT(!(t->state == Ready)); 245 246 i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority; 247 248 cpu = CPU; 249 if (t->flags & THREAD_FLAG_WIRED) { 250 ASSERT(t->cpu != NULL); 251 cpu = t->cpu; 257 252 } 258 thread->state = Ready; 259 260 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock)); 253 t->state = Ready; 254 spinlock_unlock(&t->lock); 261 255 262 256 /* 263 * Append thread to respective ready queue 264 * on respective processor. 257 * Append t to respective ready queue on respective processor. 265 258 */ 266 267 list_append(&thread->rq_link, &cpu->rq[i].rq_head); 268 cpu->rq[i].n++; 269 irq_spinlock_unlock(&(cpu->rq[i].lock), true); 270 259 r = &cpu->rq[i]; 260 spinlock_lock(&r->lock); 261 list_append(&t->rq_link, &r->rq_head); 262 r->n++; 263 spinlock_unlock(&r->lock); 264 271 265 atomic_inc(&nrdy); 272 // FIXME: Why is the avg value not used 273 // avg = atomic_get(&nrdy) / config.cpu_active; 266 avg = atomic_get(&nrdy) / config.cpu_active; 274 267 atomic_inc(&cpu->nrdy); 268 269 interrupts_restore(ipl); 275 270 } 276 271 … … 279 274 * Create a new thread. 280 275 * 281 * @param func 282 * @param arg 283 * @param task 284 * 285 * 286 * @param flags 287 * @param name 288 * @param uncounted 289 * 290 * 291 * @return New thread's structure on success, NULL on failure.276 * @param func Thread's implementing function. 277 * @param arg Thread's implementing function argument. 278 * @param task Task to which the thread belongs. The caller must 279 * guarantee that the task won't cease to exist during the 280 * call. The task's lock may not be held. 281 * @param flags Thread flags. 282 * @param name Symbolic name (a copy is made). 283 * @param uncounted Thread's accounting doesn't affect accumulated task 284 * accounting. 285 * 286 * @return New thread's structure on success, NULL on failure. 292 287 * 293 288 */ 294 289 thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, 295 unsigned int flags, const char *name, bool uncounted) 296 { 297 thread_t *thread = (thread_t *) slab_alloc(thread_slab, 0); 298 if (!thread) 290 int flags, char *name, bool uncounted) 291 { 292 thread_t *t; 293 ipl_t ipl; 294 295 t = (thread_t *) slab_alloc(thread_slab, 0); 296 if (!t) 299 297 return NULL; 300 298 301 299 /* Not needed, but good for debugging */ 302 memsetb(thread->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0); 303 304 irq_spinlock_lock(&tidlock, true); 305 thread->tid = ++last_tid; 306 irq_spinlock_unlock(&tidlock, true); 307 308 context_save(&thread->saved_context); 309 context_set(&thread->saved_context, FADDR(cushion), 310 (uintptr_t) thread->kstack, THREAD_STACK_SIZE); 311 312 the_initialize((the_t *) thread->kstack); 313 314 ipl_t ipl = interrupts_disable(); 315 thread->saved_context.ipl = interrupts_read(); 300 memsetb(t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0); 301 302 ipl = interrupts_disable(); 303 spinlock_lock(&tidlock); 304 t->tid = ++last_tid; 305 spinlock_unlock(&tidlock); 316 306 interrupts_restore(ipl); 317 307 318 str_cpy(thread->name, THREAD_NAME_BUFLEN, name); 319 320 thread->thread_code = func; 321 thread->thread_arg = arg; 322 thread->ticks = -1; 323 thread->ucycles = 0; 324 thread->kcycles = 0; 325 thread->uncounted = uncounted; 326 thread->priority = -1; /* Start in rq[0] */ 327 thread->cpu = NULL; 328 thread->flags = flags; 329 thread->state = Entering; 330 331 timeout_initialize(&thread->sleep_timeout); 332 thread->sleep_interruptible = false; 333 thread->sleep_queue = NULL; 334 thread->timeout_pending = false; 335 336 thread->in_copy_from_uspace = false; 337 thread->in_copy_to_uspace = false; 338 339 thread->interrupted = false; 340 thread->detached = false; 341 waitq_initialize(&thread->join_wq); 342 343 thread->task = task; 344 345 thread->fpu_context_exists = 0; 346 thread->fpu_context_engaged = 0; 347 348 avltree_node_initialize(&thread->threads_tree_node); 349 thread->threads_tree_node.key = (uintptr_t) thread; 350 308 context_save(&t->saved_context); 309 context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack, 310 THREAD_STACK_SIZE); 311 312 the_initialize((the_t *) t->kstack); 313 314 ipl = interrupts_disable(); 315 t->saved_context.ipl = interrupts_read(); 316 interrupts_restore(ipl); 317 318 memcpy(t->name, name, THREAD_NAME_BUFLEN); 319 t->name[THREAD_NAME_BUFLEN - 1] = 0; 320 321 t->thread_code = func; 322 t->thread_arg = arg; 323 t->ticks = -1; 324 t->cycles = 0; 325 t->uncounted = uncounted; 326 t->priority = -1; /* start in rq[0] */ 327 t->cpu = NULL; 328 t->flags = flags; 329 t->state = Entering; 330 t->call_me = NULL; 331 t->call_me_with = NULL; 332 333 timeout_initialize(&t->sleep_timeout); 334 t->sleep_interruptible = false; 335 t->sleep_queue = NULL; 336 t->timeout_pending = 0; 337 338 t->in_copy_from_uspace = false; 339 t->in_copy_to_uspace = false; 340 341 t->interrupted = false; 342 t->detached = false; 343 waitq_initialize(&t->join_wq); 344 345 t->rwlock_holder_type = RWLOCK_NONE; 346 347 t->task = task; 348 349 t->fpu_context_exists = 0; 350 t->fpu_context_engaged = 0; 351 352 avltree_node_initialize(&t->threads_tree_node); 353 t->threads_tree_node.key = (uintptr_t) t; 354 351 355 #ifdef CONFIG_UDEBUG 352 356 /* Init debugging stuff */ 353 udebug_thread_initialize(&t hread->udebug);354 #endif 355 356 /* Might depend on previous initialization */357 thread_create_arch(t hread);358 357 udebug_thread_initialize(&t->udebug); 358 #endif 359 360 /* might depend on previous initialization */ 361 thread_create_arch(t); 362 359 363 if (!(flags & THREAD_FLAG_NOATTACH)) 360 thread_attach(t hread, task);361 362 return t hread;364 thread_attach(t, task); 365 366 return t; 363 367 } 364 368 … … 367 371 * Detach thread from all queues, cpus etc. and destroy it. 368 372 * 369 * @param thread Thread to be destroyed. 370 * @param irq_res Indicate whether it should unlock thread->lock 371 * in interrupts-restore mode. 372 * 373 */ 374 void thread_destroy(thread_t *thread, bool irq_res) 375 { 376 ASSERT(irq_spinlock_locked(&thread->lock)); 377 ASSERT((thread->state == Exiting) || (thread->state == Lingering)); 378 ASSERT(thread->task); 379 ASSERT(thread->cpu); 380 381 irq_spinlock_lock(&thread->cpu->lock, false); 382 if (thread->cpu->fpu_owner == thread) 383 thread->cpu->fpu_owner = NULL; 384 irq_spinlock_unlock(&thread->cpu->lock, false); 385 386 irq_spinlock_pass(&thread->lock, &threads_lock); 387 388 avltree_delete(&threads_tree, &thread->threads_tree_node); 389 390 irq_spinlock_pass(&threads_lock, &thread->task->lock); 391 373 * Assume thread->lock is held!! 374 */ 375 void thread_destroy(thread_t *t) 376 { 377 ASSERT(t->state == Exiting || t->state == Lingering); 378 ASSERT(t->task); 379 ASSERT(t->cpu); 380 381 spinlock_lock(&t->cpu->lock); 382 if (t->cpu->fpu_owner == t) 383 t->cpu->fpu_owner = NULL; 384 spinlock_unlock(&t->cpu->lock); 385 386 spinlock_unlock(&t->lock); 387 388 spinlock_lock(&threads_lock); 389 avltree_delete(&threads_tree, &t->threads_tree_node); 390 spinlock_unlock(&threads_lock); 391 392 392 /* 393 393 * Detach from the containing task. 394 394 */ 395 list_remove(&thread->th_link); 396 irq_spinlock_unlock(&thread->task->lock, irq_res); 397 395 spinlock_lock(&t->task->lock); 396 list_remove(&t->th_link); 397 spinlock_unlock(&t->task->lock); 398 398 399 /* 399 * Drop the reference to the containing task. 400 * t is guaranteed to be the very last thread of its task. 401 * It is safe to destroy the task. 400 402 */ 401 task_release(thread->task); 402 slab_free(thread_slab, thread); 403 if (atomic_predec(&t->task->refcount) == 0) 404 task_destroy(t->task); 405 406 slab_free(thread_slab, t); 403 407 } 404 408 … … 408 412 * threads_tree. 409 413 * 410 * @param t Thread to be attached to the task. 411 * @param task Task to which the thread is to be attached. 412 * 413 */ 414 void thread_attach(thread_t *thread, task_t *task) 415 { 414 * @param t Thread to be attached to the task. 415 * @param task Task to which the thread is to be attached. 416 */ 417 void thread_attach(thread_t *t, task_t *task) 418 { 419 ipl_t ipl; 420 416 421 /* 417 422 * Attach to the specified task. 418 423 */ 419 i rq_spinlock_lock(&task->lock, true);420 421 /* Hold a reference to the task. */ 422 task_hold(task);423 424 ipl = interrupts_disable(); 425 spinlock_lock(&task->lock); 426 427 atomic_inc(&task->refcount); 428 424 429 /* Must not count kbox thread into lifecount */ 425 if (t hread->flags & THREAD_FLAG_USPACE)430 if (t->flags & THREAD_FLAG_USPACE) 426 431 atomic_inc(&task->lifecount); 427 428 list_append(&thread->th_link, &task->th_head); 429 430 irq_spinlock_pass(&task->lock, &threads_lock); 431 432 433 list_append(&t->th_link, &task->th_head); 434 spinlock_unlock(&task->lock); 435 432 436 /* 433 437 * Register this thread in the system-wide list. 434 438 */ 435 avltree_insert(&threads_tree, &thread->threads_tree_node); 436 irq_spinlock_unlock(&threads_lock, true); 439 spinlock_lock(&threads_lock); 440 avltree_insert(&threads_tree, &t->threads_tree_node); 441 spinlock_unlock(&threads_lock); 442 443 interrupts_restore(ipl); 437 444 } 438 445 439 446 /** Terminate thread. 440 447 * 441 * End current thread execution and switch it to the exiting state. 442 * All pending timeouts are executed. 443 * 448 * End current thread execution and switch it to the exiting state. All pending 449 * timeouts are executed. 444 450 */ 445 451 void thread_exit(void) 446 452 { 453 ipl_t ipl; 454 447 455 if (THREAD->flags & THREAD_FLAG_USPACE) { 448 456 #ifdef CONFIG_UDEBUG 449 457 /* Generate udebug THREAD_E event */ 450 458 udebug_thread_e_event(); 451 452 /*453 * This thread will not execute any code or system calls from454 * now on.455 */456 udebug_stoppable_begin();457 459 #endif 458 460 if (atomic_predec(&TASK->lifecount) == 0) { … … 463 465 * can only be created by threads of the same task. 464 466 * We are safe to perform cleanup. 465 *466 467 */ 467 468 ipc_cleanup(); … … 470 471 } 471 472 } 472 473 473 474 restart: 474 irq_spinlock_lock(&THREAD->lock, true); 475 if (THREAD->timeout_pending) { 476 /* Busy waiting for timeouts in progress */ 477 irq_spinlock_unlock(&THREAD->lock, true); 475 ipl = interrupts_disable(); 476 spinlock_lock(&THREAD->lock); 477 if (THREAD->timeout_pending) { 478 /* busy waiting for timeouts in progress */ 479 spinlock_unlock(&THREAD->lock); 480 interrupts_restore(ipl); 478 481 goto restart; 479 482 } 480 483 481 484 THREAD->state = Exiting; 482 irq_spinlock_unlock(&THREAD->lock, true); 483 485 spinlock_unlock(&THREAD->lock); 484 486 scheduler(); 485 487 486 488 /* Not reached */ 487 while (true); 488 } 489 while (1) 490 ; 491 } 492 489 493 490 494 /** Thread sleep … … 501 505 while (sec > 0) { 502 506 uint32_t period = (sec > 1000) ? 1000 : sec; 503 507 504 508 thread_usleep(period * 1000000); 505 509 sec -= period; … … 509 513 /** Wait for another thread to exit. 510 514 * 511 * @param t hreadThread to join on exit.512 * @param usec 513 * @param flags 515 * @param t Thread to join on exit. 516 * @param usec Timeout in microseconds. 517 * @param flags Mode of operation. 514 518 * 515 519 * @return An error code from errno.h or an error code from synch.h. 516 * 517 */ 518 int thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags) 519 { 520 if (thread == THREAD) 520 */ 521 int thread_join_timeout(thread_t *t, uint32_t usec, int flags) 522 { 523 ipl_t ipl; 524 int rc; 525 526 if (t == THREAD) 521 527 return EINVAL; 522 528 523 529 /* 524 530 * Since thread join can only be called once on an undetached thread, … … 526 532 */ 527 533 528 irq_spinlock_lock(&thread->lock, true); 529 ASSERT(!thread->detached); 530 irq_spinlock_unlock(&thread->lock, true); 531 532 return waitq_sleep_timeout(&thread->join_wq, usec, flags); 534 ipl = interrupts_disable(); 535 spinlock_lock(&t->lock); 536 ASSERT(!t->detached); 537 spinlock_unlock(&t->lock); 538 interrupts_restore(ipl); 539 540 rc = waitq_sleep_timeout(&t->join_wq, usec, flags); 541 542 return rc; 533 543 } 534 544 … … 538 548 * state, deallocate its resources. 539 549 * 540 * @param thread Thread to be detached. 541 * 542 */ 543 void thread_detach(thread_t *thread) 544 { 550 * @param t Thread to be detached. 551 */ 552 void thread_detach(thread_t *t) 553 { 554 ipl_t ipl; 555 545 556 /* 546 557 * Since the thread is expected not to be already detached, 547 558 * pointer to it must be still valid. 548 559 */ 549 irq_spinlock_lock(&thread->lock, true); 550 ASSERT(!thread->detached); 551 552 if (thread->state == Lingering) { 553 /* 554 * Unlock &thread->lock and restore 555 * interrupts in thread_destroy(). 556 */ 557 thread_destroy(thread, true); 560 ipl = interrupts_disable(); 561 spinlock_lock(&t->lock); 562 ASSERT(!t->detached); 563 if (t->state == Lingering) { 564 thread_destroy(t); /* unlocks &t->lock */ 565 interrupts_restore(ipl); 558 566 return; 559 567 } else { 560 t hread->detached = true;568 t->detached = true; 561 569 } 562 563 i rq_spinlock_unlock(&thread->lock, true);570 spinlock_unlock(&t->lock); 571 interrupts_restore(ipl); 564 572 } 565 573 … … 580 588 } 581 589 590 /** Register thread out-of-context invocation 591 * 592 * Register a function and its argument to be executed 593 * on next context switch to the current thread. 594 * 595 * @param call_me Out-of-context function. 596 * @param call_me_with Out-of-context function argument. 597 * 598 */ 599 void thread_register_call_me(void (* call_me)(void *), void *call_me_with) 600 { 601 ipl_t ipl; 602 603 ipl = interrupts_disable(); 604 spinlock_lock(&THREAD->lock); 605 THREAD->call_me = call_me; 606 THREAD->call_me_with = call_me_with; 607 spinlock_unlock(&THREAD->lock); 608 interrupts_restore(ipl); 609 } 610 582 611 static bool thread_walker(avltree_node_t *node, void *arg) 583 612 { 584 bool *additional = (bool *) arg; 585 thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node); 586 587 uint64_t ucycles, kcycles; 588 char usuffix, ksuffix; 589 order_suffix(thread->ucycles, &ucycles, &usuffix); 590 order_suffix(thread->kcycles, &kcycles, &ksuffix); 591 613 thread_t *t = avltree_get_instance(node, thread_t, threads_tree_node); 614 615 uint64_t cycles; 616 char suffix; 617 order(t->cycles, &cycles, &suffix); 618 592 619 #ifdef __32_BITS__ 593 if (*additional) 594 printf("%-8" PRIu64" %10p %9" PRIu64 "%c %9" PRIu64 "%c ", 595 thread->tid, thread->kstack, ucycles, usuffix, 596 kcycles, ksuffix); 620 printf("%-6" PRIu64" %-10s %10p %-8s %10p %-3" PRIu32 " %10p %10p %9" PRIu64 "%c ", 621 t->tid, t->name, t, thread_states[t->state], t->task, 622 t->task->context, t->thread_code, t->kstack, cycles, suffix); 623 #endif 624 625 #ifdef __64_BITS__ 626 printf("%-6" PRIu64" %-10s %18p %-8s %18p %-3" PRIu32 " %18p %18p %9" PRIu64 "%c ", 627 t->tid, t->name, t, thread_states[t->state], t->task, 628 t->task->context, t->thread_code, t->kstack, cycles, suffix); 629 #endif 630 631 if (t->cpu) 632 printf("%-4u", t->cpu->id); 597 633 else 598 printf("%-8" PRIu64" %-14s %10p %-8s %10p %-5" PRIu32 " %10p\n", 599 thread->tid, thread->name, thread, thread_states[thread->state], 600 thread->task, thread->task->context, thread->thread_code); 601 #endif 602 634 printf("none"); 635 636 if (t->state == Sleeping) { 637 #ifdef __32_BITS__ 638 printf(" %10p", t->sleep_queue); 639 #endif 640 603 641 #ifdef __64_BITS__ 604 if (*additional) 605 printf("%-8" PRIu64" %18p %18p\n" 606 " %9" PRIu64 "%c %9" PRIu64 "%c ", 607 thread->tid, thread->thread_code, thread->kstack, 608 ucycles, usuffix, kcycles, ksuffix); 609 else 610 printf("%-8" PRIu64" %-14s %18p %-8s %18p %-5" PRIu32 "\n", 611 thread->tid, thread->name, thread, thread_states[thread->state], 612 thread->task, thread->task->context); 613 #endif 614 615 if (*additional) { 616 if (thread->cpu) 617 printf("%-5u", thread->cpu->id); 618 else 619 printf("none "); 620 621 if (thread->state == Sleeping) { 622 #ifdef __32_BITS__ 623 printf(" %10p", thread->sleep_queue); 624 #endif 642 printf(" %18p", t->sleep_queue); 643 #endif 644 } 625 645 646 printf("\n"); 647 648 return true; 649 } 650 651 /** Print list of threads debug info */ 652 void thread_print_list(void) 653 { 654 ipl_t ipl; 655 656 /* Messing with thread structures, avoid deadlock */ 657 ipl = interrupts_disable(); 658 spinlock_lock(&threads_lock); 659 660 #ifdef __32_BITS__ 661 printf("tid name address state task " 662 "ctx code stack cycles cpu " 663 "waitqueue\n"); 664 printf("------ ---------- ---------- -------- ---------- " 665 "--- ---------- ---------- ---------- ---- " 666 "----------\n"); 667 #endif 668 626 669 #ifdef __64_BITS__ 627 printf(" %18p", thread->sleep_queue); 628 #endif 629 } 630 631 printf("\n"); 632 } 633 634 return true; 635 } 636 637 /** Print list of threads debug info 638 * 639 * @param additional Print additional information. 640 * 641 */ 642 void thread_print_list(bool additional) 643 { 644 /* Messing with thread structures, avoid deadlock */ 645 irq_spinlock_lock(&threads_lock, true); 646 647 #ifdef __32_BITS__ 648 if (additional) 649 printf("[id ] [stack ] [ucycles ] [kcycles ] [cpu]" 650 " [waitqueue]\n"); 651 else 652 printf("[id ] [name ] [address ] [state ] [task ]" 653 " [ctx] [code ]\n"); 654 #endif 655 656 #ifdef __64_BITS__ 657 if (additional) { 658 printf("[id ] [code ] [stack ]\n" 659 " [ucycles ] [kcycles ] [cpu] [waitqueue ]\n"); 660 } else 661 printf("[id ] [name ] [address ] [state ]" 662 " [task ] [ctx]\n"); 663 #endif 664 665 avltree_walk(&threads_tree, thread_walker, &additional); 666 667 irq_spinlock_unlock(&threads_lock, true); 670 printf("tid name address state task " 671 "ctx code stack cycles cpu " 672 "waitqueue\n"); 673 printf("------ ---------- ------------------ -------- ------------------ " 674 "--- ------------------ ------------------ ---------- ---- " 675 "------------------\n"); 676 #endif 677 678 avltree_walk(&threads_tree, thread_walker, NULL); 679 680 spinlock_unlock(&threads_lock); 681 interrupts_restore(ipl); 668 682 } 669 683 … … 673 687 * interrupts must be already disabled. 674 688 * 675 * @param t hreadPointer to thread.689 * @param t Pointer to thread. 676 690 * 677 691 * @return True if thread t is known to the system, false otherwise. 678 * 679 */ 680 bool thread_exists(thread_t *thread) 681 { 682 ASSERT(interrupts_disabled()); 683 ASSERT(irq_spinlock_locked(&threads_lock)); 684 685 avltree_node_t *node = 686 avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) thread)); 692 */ 693 bool thread_exists(thread_t *t) 694 { 695 avltree_node_t *node; 696 697 node = avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) t)); 687 698 688 699 return node != NULL; … … 694 705 * interrupts must be already disabled. 695 706 * 696 * @param user True to update user accounting, false for kernel. 697 * 698 */ 699 void thread_update_accounting(bool user) 707 */ 708 void thread_update_accounting(void) 700 709 { 701 710 uint64_t time = get_cycle(); 702 703 ASSERT(interrupts_disabled()); 704 ASSERT(irq_spinlock_locked(&THREAD->lock)); 705 706 if (user) 707 THREAD->ucycles += time - THREAD->last_cycle; 708 else 709 THREAD->kcycles += time - THREAD->last_cycle; 710 711 THREAD->cycles += time - THREAD->last_cycle; 711 712 THREAD->last_cycle = time; 712 713 } 713 714 static bool thread_search_walker(avltree_node_t *node, void *arg)715 {716 thread_t *thread =717 (thread_t *) avltree_get_instance(node, thread_t, threads_tree_node);718 thread_iterator_t *iterator = (thread_iterator_t *) arg;719 720 if (thread->tid == iterator->thread_id) {721 iterator->thread = thread;722 return false;723 }724 725 return true;726 }727 728 /** Find thread structure corresponding to thread ID.729 *730 * The threads_lock must be already held by the caller of this function and731 * interrupts must be disabled.732 *733 * @param id Thread ID.734 *735 * @return Thread structure address or NULL if there is no such thread ID.736 *737 */738 thread_t *thread_find_by_id(thread_id_t thread_id)739 {740 ASSERT(interrupts_disabled());741 ASSERT(irq_spinlock_locked(&threads_lock));742 743 thread_iterator_t iterator;744 745 iterator.thread_id = thread_id;746 iterator.thread = NULL;747 748 avltree_walk(&threads_tree, thread_search_walker, (void *) &iterator);749 750 return iterator.thread;751 }752 753 714 754 715 /** Process syscall to create new thread. … … 758 719 size_t name_len, thread_id_t *uspace_thread_id) 759 720 { 721 thread_t *t; 722 char namebuf[THREAD_NAME_BUFLEN]; 723 uspace_arg_t *kernel_uarg; 724 int rc; 725 760 726 if (name_len > THREAD_NAME_BUFLEN - 1) 761 727 name_len = THREAD_NAME_BUFLEN - 1; 762 763 char namebuf[THREAD_NAME_BUFLEN]; 764 int rc = copy_from_uspace(namebuf, uspace_name, name_len); 728 729 rc = copy_from_uspace(namebuf, uspace_name, name_len); 765 730 if (rc != 0) 766 731 return (unative_t) rc; 767 732 768 733 namebuf[name_len] = 0; 769 734 770 735 /* 771 736 * In case of failure, kernel_uarg will be deallocated in this function. 772 737 * In case of success, kernel_uarg will be freed in uinit(). 773 *774 738 */ 775 uspace_arg_t *kernel_uarg = 776 (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); 739 kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); 777 740 778 741 rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); … … 781 744 return (unative_t) rc; 782 745 } 783 784 t hread_t *thread= thread_create(uinit, kernel_uarg, TASK,746 747 t = thread_create(uinit, kernel_uarg, TASK, 785 748 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf, false); 786 if (t hread) {749 if (t) { 787 750 if (uspace_thread_id != NULL) { 788 rc = copy_to_uspace(uspace_thread_id, &thread->tid, 789 sizeof(thread->tid)); 751 int rc; 752 753 rc = copy_to_uspace(uspace_thread_id, &t->tid, 754 sizeof(t->tid)); 790 755 if (rc != 0) { 791 756 /* … … 793 758 * has already been created. We need to undo its 794 759 * creation now. 795 *796 760 */ 797 761 798 762 /* 799 763 * The new thread structure is initialized, but … … 801 765 * We can safely deallocate it. 802 766 */ 803 slab_free(thread_slab, t hread);804 free(kernel_uarg);805 767 slab_free(thread_slab, t); 768 free(kernel_uarg); 769 806 770 return (unative_t) rc; 807 771 } 808 772 } 809 810 773 #ifdef CONFIG_UDEBUG 811 774 /* … … 815 778 * THREAD_B events for threads that already existed 816 779 * and could be detected with THREAD_READ before. 817 *818 780 */ 819 udebug_thread_b_event_attach(t hread, TASK);781 udebug_thread_b_event_attach(t, TASK); 820 782 #else 821 thread_attach(t hread, TASK);822 #endif 823 thread_ready(t hread);824 783 thread_attach(t, TASK); 784 #endif 785 thread_ready(t); 786 825 787 return 0; 826 788 } else 827 789 free(kernel_uarg); 828 790 829 791 return (unative_t) ENOMEM; 830 792 } … … 836 798 { 837 799 thread_exit(); 838 839 800 /* Unreachable */ 840 801 return 0; … … 847 808 * 848 809 * @return 0 on success or an error code from @ref errno.h. 849 *850 810 */ 851 811 unative_t sys_thread_get_id(thread_id_t *uspace_thread_id) … … 854 814 * No need to acquire lock on THREAD because tid 855 815 * remains constant for the lifespan of the thread. 856 *857 816 */ 858 817 return (unative_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
Note:
See TracChangeset
for help on using the changeset viewer.