Changes in kernel/generic/src/proc/thread.c [b60c582:ae0300b5] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/thread.c
rb60c582 rae0300b5 1 1 /* 2 * Copyright (c) 20 01-2004Jakub Jermar2 * Copyright (c) 2010 Jakub Jermar 3 3 * All rights reserved. 4 4 * … … 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Thread management functions. 36 36 */ 37 37 … … 48 48 #include <synch/spinlock.h> 49 49 #include <synch/waitq.h> 50 #include <synch/rwlock.h>51 50 #include <cpu.h> 52 #include < func.h>51 #include <str.h> 53 52 #include <context.h> 54 53 #include <adt/avl.h> … … 76 75 77 76 /** Thread states */ 78 c har *thread_states[] = {77 const char *thread_states[] = { 79 78 "Invalid", 80 79 "Running", … … 84 83 "Exiting", 85 84 "Lingering" 86 }; 85 }; 86 87 typedef struct { 88 thread_id_t thread_id; 89 thread_t *thread; 90 } thread_iterator_t; 87 91 88 92 /** Lock protecting the threads_tree AVL tree. 89 93 * 90 94 * For locking rules, see declaration thereof. 91 */ 92 SPINLOCK_INITIALIZE(threads_lock); 95 * 96 */ 97 IRQ_SPINLOCK_INITIALIZE(threads_lock); 93 98 94 99 /** AVL tree of all threads. … … 96 101 * When a thread is found in the threads_tree AVL tree, it is guaranteed to 97 102 * exist as long as the threads_lock is held. 98 */ 99 avltree_t threads_tree; 100 101 SPINLOCK_INITIALIZE(tidlock); 102 thread_id_t last_tid = 0; 103 * 104 */ 105 avltree_t threads_tree; 106 107 IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock); 108 static thread_id_t last_tid = 0; 103 109 104 110 static slab_cache_t *thread_slab; 111 105 112 #ifdef CONFIG_FPU 106 113 slab_cache_t *fpu_context_slab; … … 120 127 void *arg = THREAD->thread_arg; 121 128 THREAD->last_cycle = get_cycle(); 122 129 123 130 /* This is where each thread wakes up after its creation */ 124 spinlock_unlock(&THREAD->lock);131 irq_spinlock_unlock(&THREAD->lock, false); 125 132 interrupts_enable(); 126 133 127 134 f(arg); 128 135 129 136 /* Accumulate accounting to the task */ 130 ipl_t ipl = interrupts_disable(); 131 132 spinlock_lock(&THREAD->lock); 137 irq_spinlock_lock(&THREAD->lock, true); 133 138 if (!THREAD->uncounted) { 134 thread_update_accounting(); 135 uint64_t cycles = THREAD->cycles; 136 THREAD->cycles = 0; 137 spinlock_unlock(&THREAD->lock); 139 thread_update_accounting(true); 140 uint64_t ucycles = THREAD->ucycles; 141 THREAD->ucycles = 0; 142 uint64_t kcycles = THREAD->kcycles; 143 THREAD->kcycles = 0; 138 144 139 spinlock_lock(&TASK->lock); 140 TASK->cycles += cycles; 141 spinlock_unlock(&TASK->lock); 145 irq_spinlock_pass(&THREAD->lock, &TASK->lock); 146 TASK->ucycles += ucycles; 147 TASK->kcycles += kcycles; 148 irq_spinlock_unlock(&TASK->lock, true); 142 149 } else 143 spinlock_unlock(&THREAD->lock); 144 145 interrupts_restore(ipl); 150 irq_spinlock_unlock(&THREAD->lock, true); 146 151 147 152 thread_exit(); 148 /* not reached */ 149 } 150 151 /** Initialization and allocation for thread_t structure */ 152 static int thr_constructor(void *obj, int kmflags) 153 { 154 thread_t *t = (thread_t *) obj; 155 156 spinlock_initialize(&t->lock, "thread_t_lock"); 157 link_initialize(&t->rq_link); 158 link_initialize(&t->wq_link); 159 link_initialize(&t->th_link); 160 153 154 /* Not reached */ 155 } 156 157 /** Initialization and allocation for thread_t structure 158 * 159 */ 160 static int thr_constructor(void *obj, unsigned int kmflags) 161 { 162 thread_t *thread = (thread_t *) obj; 163 164 irq_spinlock_initialize(&thread->lock, "thread_t_lock"); 165 link_initialize(&thread->rq_link); 166 link_initialize(&thread->wq_link); 167 link_initialize(&thread->th_link); 168 161 169 /* call the architecture-specific part of the constructor */ 162 thr_constructor_arch(t );170 thr_constructor_arch(thread); 163 171 164 172 #ifdef CONFIG_FPU 165 173 #ifdef CONFIG_FPU_LAZY 166 t ->saved_fpu_context = NULL;167 #else 168 t ->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);169 if (!t ->saved_fpu_context)174 thread->saved_fpu_context = NULL; 175 #else /* CONFIG_FPU_LAZY */ 176 thread->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags); 177 if (!thread->saved_fpu_context) 170 178 return -1; 171 #endif 172 #endif 173 174 t ->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);175 if (!t ->kstack) {179 #endif /* CONFIG_FPU_LAZY */ 180 #endif /* CONFIG_FPU */ 181 182 thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); 183 if (!thread->kstack) { 176 184 #ifdef CONFIG_FPU 177 if (t ->saved_fpu_context)178 slab_free(fpu_context_slab, t ->saved_fpu_context);185 if (thread->saved_fpu_context) 186 slab_free(fpu_context_slab, thread->saved_fpu_context); 179 187 #endif 180 188 return -1; 181 189 } 182 190 183 191 #ifdef CONFIG_UDEBUG 184 mutex_initialize(&t ->udebug.lock, MUTEX_PASSIVE);185 #endif 186 192 mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE); 193 #endif 194 187 195 return 0; 188 196 } 189 197 190 198 /** Destruction of thread_t object */ 191 static int thr_destructor(void *obj)192 { 193 thread_t *t = (thread_t *) obj;194 199 static size_t thr_destructor(void *obj) 200 { 201 thread_t *thread = (thread_t *) obj; 202 195 203 /* call the architecture-specific part of the destructor */ 196 thr_destructor_arch(t); 197 198 frame_free(KA2PA(t->kstack)); 204 thr_destructor_arch(thread); 205 206 frame_free(KA2PA(thread->kstack)); 207 199 208 #ifdef CONFIG_FPU 200 if (t->saved_fpu_context) 201 slab_free(fpu_context_slab, t->saved_fpu_context); 202 #endif 203 return 1; /* One page freed */ 209 if (thread->saved_fpu_context) 210 slab_free(fpu_context_slab, thread->saved_fpu_context); 211 #endif 212 213 return 1; /* One page freed */ 204 214 } 205 215 … … 212 222 { 213 223 THREAD = NULL; 224 214 225 atomic_set(&nrdy, 0); 215 226 thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0, 216 227 thr_constructor, thr_destructor, 0); 217 228 218 229 #ifdef CONFIG_FPU 219 230 fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t), 220 231 FPU_CONTEXT_ALIGN, NULL, NULL, 0); 221 232 #endif 222 233 223 234 avltree_create(&threads_tree); 224 235 } … … 226 237 /** Make thread ready 227 238 * 228 * Switch thread t to the ready state. 229 * 230 * @param t Thread to make ready. 231 * 232 */ 233 void thread_ready(thread_t *t) 234 { 235 cpu_t *cpu; 236 runq_t *r; 237 ipl_t ipl; 238 int i, avg; 239 240 ipl = interrupts_disable(); 241 242 spinlock_lock(&t->lock); 243 244 ASSERT(!(t->state == Ready)); 245 246 i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority; 247 248 cpu = CPU; 249 if (t->flags & THREAD_FLAG_WIRED) { 250 ASSERT(t->cpu != NULL); 251 cpu = t->cpu; 239 * Switch thread to the ready state. 240 * 241 * @param thread Thread to make ready. 242 * 243 */ 244 void thread_ready(thread_t *thread) 245 { 246 irq_spinlock_lock(&thread->lock, true); 247 248 ASSERT(thread->state != Ready); 249 250 int i = (thread->priority < RQ_COUNT - 1) 251 ? ++thread->priority : thread->priority; 252 253 cpu_t *cpu = CPU; 254 if (thread->flags & THREAD_FLAG_WIRED) { 255 ASSERT(thread->cpu != NULL); 256 cpu = thread->cpu; 252 257 } 253 t->state = Ready; 254 spinlock_unlock(&t->lock); 258 thread->state = Ready; 259 260 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock)); 255 261 256 262 /* 257 * Append t to respective ready queue on respective processor. 263 * Append thread to respective ready queue 264 * on respective processor. 258 265 */ 259 r = &cpu->rq[i]; 260 spinlock_lock(&r->lock); 261 list_append(&t->rq_link, &r->rq_head); 262 r->n++; 263 spinlock_unlock(&r->lock); 264 266 267 list_append(&thread->rq_link, &cpu->rq[i].rq_head); 268 cpu->rq[i].n++; 269 irq_spinlock_unlock(&(cpu->rq[i].lock), true); 270 265 271 atomic_inc(&nrdy); 266 avg = atomic_get(&nrdy) / config.cpu_active; 272 // FIXME: Why is the avg value not used 273 // avg = atomic_get(&nrdy) / config.cpu_active; 267 274 atomic_inc(&cpu->nrdy); 268 275 } 276 277 /** Create new thread 278 * 279 * Create a new thread. 280 * 281 * @param func Thread's implementing function. 282 * @param arg Thread's implementing function argument. 283 * @param task Task to which the thread belongs. The caller must 284 * guarantee that the task won't cease to exist during the 285 * call. The task's lock may not be held. 286 * @param flags Thread flags. 287 * @param name Symbolic name (a copy is made). 288 * @param uncounted Thread's accounting doesn't affect accumulated task 289 * accounting. 290 * 291 * @return New thread's structure on success, NULL on failure. 292 * 293 */ 294 thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, 295 unsigned int flags, const char *name, bool uncounted) 296 { 297 thread_t *thread = (thread_t *) slab_alloc(thread_slab, 0); 298 if (!thread) 299 return NULL; 300 301 /* Not needed, but good for debugging */ 302 memsetb(thread->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0); 303 304 irq_spinlock_lock(&tidlock, true); 305 thread->tid = ++last_tid; 306 irq_spinlock_unlock(&tidlock, true); 307 308 context_save(&thread->saved_context); 309 context_set(&thread->saved_context, FADDR(cushion), 310 (uintptr_t) thread->kstack, THREAD_STACK_SIZE); 311 312 the_initialize((the_t *) thread->kstack); 313 314 ipl_t ipl = interrupts_disable(); 315 thread->saved_context.ipl = interrupts_read(); 269 316 interrupts_restore(ipl); 270 } 271 272 /** Create new thread 273 * 274 * Create a new thread. 275 * 276 * @param func Thread's implementing function. 277 * @param arg Thread's implementing function argument. 278 * @param task Task to which the thread belongs. The caller must 279 * guarantee that the task won't cease to exist during the 280 * call. The task's lock may not be held. 281 * @param flags Thread flags. 282 * @param name Symbolic name (a copy is made). 283 * @param uncounted Thread's accounting doesn't affect accumulated task 284 * accounting. 285 * 286 * @return New thread's structure on success, NULL on failure. 287 * 288 */ 289 thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, 290 int flags, char *name, bool uncounted) 291 { 292 thread_t *t; 293 ipl_t ipl; 294 295 t = (thread_t *) slab_alloc(thread_slab, 0); 296 if (!t) 297 return NULL; 298 299 /* Not needed, but good for debugging */ 300 memsetb(t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0); 301 302 ipl = interrupts_disable(); 303 spinlock_lock(&tidlock); 304 t->tid = ++last_tid; 305 spinlock_unlock(&tidlock); 306 interrupts_restore(ipl); 307 308 context_save(&t->saved_context); 309 context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack, 310 THREAD_STACK_SIZE); 311 312 the_initialize((the_t *) t->kstack); 313 314 ipl = interrupts_disable(); 315 t->saved_context.ipl = interrupts_read(); 316 interrupts_restore(ipl); 317 318 memcpy(t->name, name, THREAD_NAME_BUFLEN); 319 t->name[THREAD_NAME_BUFLEN - 1] = 0; 320 321 t->thread_code = func; 322 t->thread_arg = arg; 323 t->ticks = -1; 324 t->cycles = 0; 325 t->uncounted = uncounted; 326 t->priority = -1; /* start in rq[0] */ 327 t->cpu = NULL; 328 t->flags = flags; 329 t->state = Entering; 330 t->call_me = NULL; 331 t->call_me_with = NULL; 332 333 timeout_initialize(&t->sleep_timeout); 334 t->sleep_interruptible = false; 335 t->sleep_queue = NULL; 336 t->timeout_pending = 0; 337 338 t->in_copy_from_uspace = false; 339 t->in_copy_to_uspace = false; 340 341 t->interrupted = false; 342 t->detached = false; 343 waitq_initialize(&t->join_wq); 344 345 t->rwlock_holder_type = RWLOCK_NONE; 346 347 t->task = task; 348 349 t->fpu_context_exists = 0; 350 t->fpu_context_engaged = 0; 351 352 avltree_node_initialize(&t->threads_tree_node); 353 t->threads_tree_node.key = (uintptr_t) t; 354 317 318 str_cpy(thread->name, THREAD_NAME_BUFLEN, name); 319 320 thread->thread_code = func; 321 thread->thread_arg = arg; 322 thread->ticks = -1; 323 thread->ucycles = 0; 324 thread->kcycles = 0; 325 thread->uncounted = uncounted; 326 thread->priority = -1; /* Start in rq[0] */ 327 thread->cpu = NULL; 328 thread->flags = flags; 329 thread->state = Entering; 330 331 timeout_initialize(&thread->sleep_timeout); 332 thread->sleep_interruptible = false; 333 thread->sleep_queue = NULL; 334 thread->timeout_pending = false; 335 336 thread->in_copy_from_uspace = false; 337 thread->in_copy_to_uspace = false; 338 339 thread->interrupted = false; 340 thread->detached = false; 341 waitq_initialize(&thread->join_wq); 342 343 thread->task = task; 344 345 thread->fpu_context_exists = 0; 346 thread->fpu_context_engaged = 0; 347 348 avltree_node_initialize(&thread->threads_tree_node); 349 thread->threads_tree_node.key = (uintptr_t) thread; 350 355 351 #ifdef CONFIG_UDEBUG 356 /* Init debugging stuff */ 357 udebug_thread_initialize(&t->udebug); 358 #endif 359 360 /* might depend on previous initialization */ 361 thread_create_arch(t); 362 352 /* Initialize debugging stuff */ 353 thread->btrace = false; 354 udebug_thread_initialize(&thread->udebug); 355 #endif 356 357 /* Might depend on previous initialization */ 358 thread_create_arch(thread); 359 363 360 if (!(flags & THREAD_FLAG_NOATTACH)) 364 thread_attach(t , task);365 366 return t ;361 thread_attach(thread, task); 362 363 return thread; 367 364 } 368 365 … … 371 368 * Detach thread from all queues, cpus etc. and destroy it. 372 369 * 373 * Assume thread->lock is held!! 374 */ 375 void thread_destroy(thread_t *t) 376 { 377 ASSERT(t->state == Exiting || t->state == Lingering); 378 ASSERT(t->task); 379 ASSERT(t->cpu); 380 381 spinlock_lock(&t->cpu->lock); 382 if (t->cpu->fpu_owner == t) 383 t->cpu->fpu_owner = NULL; 384 spinlock_unlock(&t->cpu->lock); 385 386 spinlock_unlock(&t->lock); 387 388 spinlock_lock(&threads_lock); 389 avltree_delete(&threads_tree, &t->threads_tree_node); 390 spinlock_unlock(&threads_lock); 391 370 * @param thread Thread to be destroyed. 371 * @param irq_res Indicate whether it should unlock thread->lock 372 * in interrupts-restore mode. 373 * 374 */ 375 void thread_destroy(thread_t *thread, bool irq_res) 376 { 377 ASSERT(irq_spinlock_locked(&thread->lock)); 378 ASSERT((thread->state == Exiting) || (thread->state == Lingering)); 379 ASSERT(thread->task); 380 ASSERT(thread->cpu); 381 382 irq_spinlock_lock(&thread->cpu->lock, false); 383 if (thread->cpu->fpu_owner == thread) 384 thread->cpu->fpu_owner = NULL; 385 irq_spinlock_unlock(&thread->cpu->lock, false); 386 387 irq_spinlock_pass(&thread->lock, &threads_lock); 388 389 avltree_delete(&threads_tree, &thread->threads_tree_node); 390 391 irq_spinlock_pass(&threads_lock, &thread->task->lock); 392 392 393 /* 393 394 * Detach from the containing task. 394 395 */ 395 spinlock_lock(&t->task->lock); 396 list_remove(&t->th_link); 397 spinlock_unlock(&t->task->lock); 398 396 list_remove(&thread->th_link); 397 irq_spinlock_unlock(&thread->task->lock, irq_res); 398 399 399 /* 400 * t is guaranteed to be the very last thread of its task. 401 * It is safe to destroy the task. 400 * Drop the reference to the containing task. 402 401 */ 403 if (atomic_predec(&t->task->refcount) == 0) 404 task_destroy(t->task); 405 406 slab_free(thread_slab, t); 402 task_release(thread->task); 403 slab_free(thread_slab, thread); 407 404 } 408 405 … … 412 409 * threads_tree. 413 410 * 414 * @param t Thread to be attached to the task. 415 * @param task Task to which the thread is to be attached. 416 */ 417 void thread_attach(thread_t *t, task_t *task) 418 { 419 ipl_t ipl; 420 411 * @param t Thread to be attached to the task. 412 * @param task Task to which the thread is to be attached. 413 * 414 */ 415 void thread_attach(thread_t *thread, task_t *task) 416 { 421 417 /* 422 418 * Attach to the specified task. 423 419 */ 424 i pl = interrupts_disable();425 spinlock_lock(&task->lock);426 427 atomic_inc(&task->refcount);428 420 irq_spinlock_lock(&task->lock, true); 421 422 /* Hold a reference to the task. */ 423 task_hold(task); 424 429 425 /* Must not count kbox thread into lifecount */ 430 if (t ->flags & THREAD_FLAG_USPACE)426 if (thread->flags & THREAD_FLAG_USPACE) 431 427 atomic_inc(&task->lifecount); 432 433 list_append(&t->th_link, &task->th_head); 434 spinlock_unlock(&task->lock); 435 428 429 list_append(&thread->th_link, &task->th_head); 430 431 irq_spinlock_pass(&task->lock, &threads_lock); 432 436 433 /* 437 434 * Register this thread in the system-wide list. 438 435 */ 439 spinlock_lock(&threads_lock); 440 avltree_insert(&threads_tree, &t->threads_tree_node); 441 spinlock_unlock(&threads_lock); 442 443 interrupts_restore(ipl); 436 avltree_insert(&threads_tree, &thread->threads_tree_node); 437 irq_spinlock_unlock(&threads_lock, true); 444 438 } 445 439 446 440 /** Terminate thread. 447 441 * 448 * End current thread execution and switch it to the exiting state. All pending 449 * timeouts are executed. 442 * End current thread execution and switch it to the exiting state. 443 * All pending timeouts are executed. 444 * 450 445 */ 451 446 void thread_exit(void) 452 447 { 453 ipl_t ipl;454 455 448 if (THREAD->flags & THREAD_FLAG_USPACE) { 456 449 #ifdef CONFIG_UDEBUG 457 450 /* Generate udebug THREAD_E event */ 458 451 udebug_thread_e_event(); 452 453 /* 454 * This thread will not execute any code or system calls from 455 * now on. 456 */ 457 udebug_stoppable_begin(); 459 458 #endif 460 459 if (atomic_predec(&TASK->lifecount) == 0) { … … 465 464 * can only be created by threads of the same task. 466 465 * We are safe to perform cleanup. 466 * 467 467 */ 468 468 ipc_cleanup(); … … 471 471 } 472 472 } 473 473 474 474 restart: 475 ipl = interrupts_disable(); 476 spinlock_lock(&THREAD->lock); 477 if (THREAD->timeout_pending) { 478 /* busy waiting for timeouts in progress */ 479 spinlock_unlock(&THREAD->lock); 480 interrupts_restore(ipl); 475 irq_spinlock_lock(&THREAD->lock, true); 476 if (THREAD->timeout_pending) { 477 /* Busy waiting for timeouts in progress */ 478 irq_spinlock_unlock(&THREAD->lock, true); 481 479 goto restart; 482 480 } 483 481 484 482 THREAD->state = Exiting; 485 spinlock_unlock(&THREAD->lock); 483 irq_spinlock_unlock(&THREAD->lock, true); 484 486 485 scheduler(); 487 486 488 487 /* Not reached */ 489 while (1) 490 ; 491 } 492 488 while (true); 489 } 493 490 494 491 /** Thread sleep … … 501 498 void thread_sleep(uint32_t sec) 502 499 { 503 thread_usleep(sec * 1000000); 500 /* Sleep in 1000 second steps to support 501 full argument range */ 502 while (sec > 0) { 503 uint32_t period = (sec > 1000) ? 1000 : sec; 504 505 thread_usleep(period * 1000000); 506 sec -= period; 507 } 504 508 } 505 509 506 510 /** Wait for another thread to exit. 507 511 * 508 * @param t Thread to join on exit.509 * @param usec Timeout in microseconds.510 * @param flags Mode of operation.512 * @param thread Thread to join on exit. 513 * @param usec Timeout in microseconds. 514 * @param flags Mode of operation. 511 515 * 512 516 * @return An error code from errno.h or an error code from synch.h. 513 */ 514 int thread_join_timeout(thread_t *t, uint32_t usec, int flags) 515 { 516 ipl_t ipl; 517 int rc; 518 519 if (t == THREAD) 517 * 518 */ 519 int thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags) 520 { 521 if (thread == THREAD) 520 522 return EINVAL; 521 523 522 524 /* 523 525 * Since thread join can only be called once on an undetached thread, … … 525 527 */ 526 528 527 ipl = interrupts_disable(); 528 spinlock_lock(&t->lock); 529 ASSERT(!t->detached); 530 spinlock_unlock(&t->lock); 531 interrupts_restore(ipl); 532 533 rc = waitq_sleep_timeout(&t->join_wq, usec, flags); 534 535 return rc; 529 irq_spinlock_lock(&thread->lock, true); 530 ASSERT(!thread->detached); 531 irq_spinlock_unlock(&thread->lock, true); 532 533 return waitq_sleep_timeout(&thread->join_wq, usec, flags); 536 534 } 537 535 538 536 /** Detach thread. 539 537 * 540 * Mark the thread as detached, if the thread is already in the Lingering 541 * state, deallocate its resources. 542 * 543 * @param t Thread to be detached. 544 */ 545 void thread_detach(thread_t *t) 546 { 547 ipl_t ipl; 548 538 * Mark the thread as detached. If the thread is already 539 * in the Lingering state, deallocate its resources. 540 * 541 * @param thread Thread to be detached. 542 * 543 */ 544 void thread_detach(thread_t *thread) 545 { 549 546 /* 550 547 * Since the thread is expected not to be already detached, 551 548 * pointer to it must be still valid. 552 549 */ 553 ipl = interrupts_disable(); 554 spinlock_lock(&t->lock); 555 ASSERT(!t->detached); 556 if (t->state == Lingering) { 557 thread_destroy(t); /* unlocks &t->lock */ 558 interrupts_restore(ipl); 550 irq_spinlock_lock(&thread->lock, true); 551 ASSERT(!thread->detached); 552 553 if (thread->state == Lingering) { 554 /* 555 * Unlock &thread->lock and restore 556 * interrupts in thread_destroy(). 557 */ 558 thread_destroy(thread, true); 559 559 return; 560 560 } else { 561 t ->detached = true;561 thread->detached = true; 562 562 } 563 spinlock_unlock(&t->lock);564 i nterrupts_restore(ipl);563 564 irq_spinlock_unlock(&thread->lock, true); 565 565 } 566 566 … … 575 575 { 576 576 waitq_t wq; 577 577 578 578 waitq_initialize(&wq); 579 579 580 580 (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING); 581 581 } 582 582 583 /** Register thread out-of-context invocation584 *585 * Register a function and its argument to be executed586 * on next context switch to the current thread.587 *588 * @param call_me Out-of-context function.589 * @param call_me_with Out-of-context function argument.590 *591 */592 void thread_register_call_me(void (* call_me)(void *), void *call_me_with)593 {594 ipl_t ipl;595 596 ipl = interrupts_disable();597 spinlock_lock(&THREAD->lock);598 THREAD->call_me = call_me;599 THREAD->call_me_with = call_me_with;600 spinlock_unlock(&THREAD->lock);601 interrupts_restore(ipl);602 }603 604 583 static bool thread_walker(avltree_node_t *node, void *arg) 605 584 { 606 thread_t *t = avltree_get_instance(node, thread_t, threads_tree_node); 607 608 uint64_t cycles; 609 char suffix; 610 order(t->cycles, &cycles, &suffix); 611 585 bool *additional = (bool *) arg; 586 thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node); 587 588 uint64_t ucycles, kcycles; 589 char usuffix, ksuffix; 590 order_suffix(thread->ucycles, &ucycles, &usuffix); 591 order_suffix(thread->kcycles, &kcycles, &ksuffix); 592 593 char *name; 594 if (str_cmp(thread->name, "uinit") == 0) 595 name = thread->task->name; 596 else 597 name = thread->name; 598 612 599 #ifdef __32_BITS__ 613 printf("%-6" PRIu64" %-10s %10p %-8s %10p %-3" PRIu32 " %10p %10p %9" PRIu64 "%c ", 614 t->tid, t->name, t, thread_states[t->state], t->task, 615 t->task->context, t->thread_code, t->kstack, cycles, suffix); 616 #endif 617 600 if (*additional) 601 printf("%-8" PRIu64 " %10p %10p %9" PRIu64 "%c %9" PRIu64 "%c ", 602 thread->tid, thread->thread_code, thread->kstack, 603 ucycles, usuffix, kcycles, ksuffix); 604 else 605 printf("%-8" PRIu64 " %-14s %10p %-8s %10p %-5" PRIu32 "\n", 606 thread->tid, name, thread, thread_states[thread->state], 607 thread->task, thread->task->context); 608 #endif 609 618 610 #ifdef __64_BITS__ 619 printf("%-6" PRIu64" %-10s %18p %-8s %18p %-3" PRIu32 " %18p %18p %9" PRIu64 "%c ", 620 t->tid, t->name, t, thread_states[t->state], t->task, 621 t->task->context, t->thread_code, t->kstack, cycles, suffix); 611 if (*additional) 612 printf("%-8" PRIu64 " %18p %18p\n" 613 " %9" PRIu64 "%c %9" PRIu64 "%c ", 614 thread->tid, thread->thread_code, thread->kstack, 615 ucycles, usuffix, kcycles, ksuffix); 616 else 617 printf("%-8" PRIu64 " %-14s %18p %-8s %18p %-5" PRIu32 "\n", 618 thread->tid, name, thread, thread_states[thread->state], 619 thread->task, thread->task->context); 620 #endif 621 622 if (*additional) { 623 if (thread->cpu) 624 printf("%-5u", thread->cpu->id); 625 else 626 printf("none "); 627 628 if (thread->state == Sleeping) { 629 #ifdef __32_BITS__ 630 printf(" %10p", thread->sleep_queue); 622 631 #endif 623 632 624 if (t->cpu) 625 printf("%-4u", t->cpu->id); 633 #ifdef __64_BITS__ 634 printf(" %18p", thread->sleep_queue); 635 #endif 636 } 637 638 printf("\n"); 639 } 640 641 return true; 642 } 643 644 /** Print list of threads debug info 645 * 646 * @param additional Print additional information. 647 * 648 */ 649 void thread_print_list(bool additional) 650 { 651 /* Messing with thread structures, avoid deadlock */ 652 irq_spinlock_lock(&threads_lock, true); 653 654 #ifdef __32_BITS__ 655 if (additional) 656 printf("[id ] [code ] [stack ] [ucycles ] [kcycles ]" 657 " [cpu] [waitqueue]\n"); 626 658 else 627 printf("none"); 628 629 if (t->state == Sleeping) { 630 #ifdef __32_BITS__ 631 printf(" %10p", t->sleep_queue); 632 #endif 633 659 printf("[id ] [name ] [address ] [state ] [task ]" 660 " [ctx]\n"); 661 #endif 662 634 663 #ifdef __64_BITS__ 635 printf(" %18p", t->sleep_queue); 636 #endif 637 } 638 639 printf("\n"); 640 641 return true; 642 } 643 644 /** Print list of threads debug info */ 645 void thread_print_list(void) 646 { 647 ipl_t ipl; 648 649 /* Messing with thread structures, avoid deadlock */ 650 ipl = interrupts_disable(); 651 spinlock_lock(&threads_lock); 652 653 #ifdef __32_BITS__ 654 printf("tid name address state task " 655 "ctx code stack cycles cpu " 656 "waitqueue\n"); 657 printf("------ ---------- ---------- -------- ---------- " 658 "--- ---------- ---------- ---------- ---- " 659 "----------\n"); 660 #endif 661 662 #ifdef __64_BITS__ 663 printf("tid name address state task " 664 "ctx code stack cycles cpu " 665 "waitqueue\n"); 666 printf("------ ---------- ------------------ -------- ------------------ " 667 "--- ------------------ ------------------ ---------- ---- " 668 "------------------\n"); 669 #endif 670 671 avltree_walk(&threads_tree, thread_walker, NULL); 672 673 spinlock_unlock(&threads_lock); 674 interrupts_restore(ipl); 664 if (additional) { 665 printf("[id ] [code ] [stack ]\n" 666 " [ucycles ] [kcycles ] [cpu] [waitqueue ]\n"); 667 } else 668 printf("[id ] [name ] [address ] [state ]" 669 " [task ] [ctx]\n"); 670 #endif 671 672 avltree_walk(&threads_tree, thread_walker, &additional); 673 674 irq_spinlock_unlock(&threads_lock, true); 675 675 } 676 676 … … 680 680 * interrupts must be already disabled. 681 681 * 682 * @param t Pointer to thread.682 * @param thread Pointer to thread. 683 683 * 684 684 * @return True if thread t is known to the system, false otherwise. 685 */ 686 bool thread_exists(thread_t *t) 687 { 688 avltree_node_t *node; 689 690 node = avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) t)); 685 * 686 */ 687 bool thread_exists(thread_t *thread) 688 { 689 ASSERT(interrupts_disabled()); 690 ASSERT(irq_spinlock_locked(&threads_lock)); 691 692 avltree_node_t *node = 693 avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) thread)); 691 694 692 695 return node != NULL; … … 698 701 * interrupts must be already disabled. 699 702 * 700 */ 701 void thread_update_accounting(void) 703 * @param user True to update user accounting, false for kernel. 704 * 705 */ 706 void thread_update_accounting(bool user) 702 707 { 703 708 uint64_t time = get_cycle(); 704 THREAD->cycles += time - THREAD->last_cycle; 709 710 ASSERT(interrupts_disabled()); 711 ASSERT(irq_spinlock_locked(&THREAD->lock)); 712 713 if (user) 714 THREAD->ucycles += time - THREAD->last_cycle; 715 else 716 THREAD->kcycles += time - THREAD->last_cycle; 717 705 718 THREAD->last_cycle = time; 706 719 } 707 720 721 static bool thread_search_walker(avltree_node_t *node, void *arg) 722 { 723 thread_t *thread = 724 (thread_t *) avltree_get_instance(node, thread_t, threads_tree_node); 725 thread_iterator_t *iterator = (thread_iterator_t *) arg; 726 727 if (thread->tid == iterator->thread_id) { 728 iterator->thread = thread; 729 return false; 730 } 731 732 return true; 733 } 734 735 /** Find thread structure corresponding to thread ID. 736 * 737 * The threads_lock must be already held by the caller of this function and 738 * interrupts must be disabled. 739 * 740 * @param id Thread ID. 741 * 742 * @return Thread structure address or NULL if there is no such thread ID. 743 * 744 */ 745 thread_t *thread_find_by_id(thread_id_t thread_id) 746 { 747 ASSERT(interrupts_disabled()); 748 ASSERT(irq_spinlock_locked(&threads_lock)); 749 750 thread_iterator_t iterator; 751 752 iterator.thread_id = thread_id; 753 iterator.thread = NULL; 754 755 avltree_walk(&threads_tree, thread_search_walker, (void *) &iterator); 756 757 return iterator.thread; 758 } 759 760 #ifdef CONFIG_UDEBUG 761 762 void thread_stack_trace(thread_id_t thread_id) 763 { 764 irq_spinlock_lock(&threads_lock, true); 765 766 thread_t *thread = thread_find_by_id(thread_id); 767 if (thread == NULL) { 768 printf("No such thread.\n"); 769 irq_spinlock_unlock(&threads_lock, true); 770 return; 771 } 772 773 irq_spinlock_lock(&thread->lock, false); 774 775 /* 776 * Schedule a stack trace to be printed 777 * just before the thread is scheduled next. 778 * 779 * If the thread is sleeping then try to interrupt 780 * the sleep. Any request for printing an uspace stack 781 * trace from within the kernel should be always 782 * considered a last resort debugging means, therefore 783 * forcing the thread's sleep to be interrupted 784 * is probably justifiable. 785 */ 786 787 bool sleeping = false; 788 istate_t *istate = thread->udebug.uspace_state; 789 if (istate != NULL) { 790 printf("Scheduling thread stack trace.\n"); 791 thread->btrace = true; 792 if (thread->state == Sleeping) 793 sleeping = true; 794 } else 795 printf("Thread interrupt state not available.\n"); 796 797 irq_spinlock_unlock(&thread->lock, false); 798 799 if (sleeping) 800 waitq_interrupt_sleep(thread); 801 802 irq_spinlock_unlock(&threads_lock, true); 803 } 804 805 #endif /* CONFIG_UDEBUG */ 806 708 807 /** Process syscall to create new thread. 709 808 * 710 809 */ 711 unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name,810 sysarg_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name, 712 811 size_t name_len, thread_id_t *uspace_thread_id) 713 812 { 714 thread_t *t;715 char namebuf[THREAD_NAME_BUFLEN];716 uspace_arg_t *kernel_uarg;717 int rc;718 719 813 if (name_len > THREAD_NAME_BUFLEN - 1) 720 814 name_len = THREAD_NAME_BUFLEN - 1; 721 722 rc = copy_from_uspace(namebuf, uspace_name, name_len); 815 816 char namebuf[THREAD_NAME_BUFLEN]; 817 int rc = copy_from_uspace(namebuf, uspace_name, name_len); 723 818 if (rc != 0) 724 return ( unative_t) rc;725 819 return (sysarg_t) rc; 820 726 821 namebuf[name_len] = 0; 727 822 728 823 /* 729 824 * In case of failure, kernel_uarg will be deallocated in this function. 730 825 * In case of success, kernel_uarg will be freed in uinit(). 826 * 731 827 */ 732 kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); 828 uspace_arg_t *kernel_uarg = 829 (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); 733 830 734 831 rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); 735 832 if (rc != 0) { 736 833 free(kernel_uarg); 737 return ( unative_t) rc;834 return (sysarg_t) rc; 738 835 } 739 740 t = thread_create(uinit, kernel_uarg, TASK,836 837 thread_t *thread = thread_create(uinit, kernel_uarg, TASK, 741 838 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf, false); 742 if (t ) {839 if (thread) { 743 840 if (uspace_thread_id != NULL) { 744 int rc; 745 746 rc = copy_to_uspace(uspace_thread_id, &t->tid, 747 sizeof(t->tid)); 841 rc = copy_to_uspace(uspace_thread_id, &thread->tid, 842 sizeof(thread->tid)); 748 843 if (rc != 0) { 749 844 /* … … 752 847 * creation now. 753 848 */ 754 849 755 850 /* 756 851 * The new thread structure is initialized, but … … 758 853 * We can safely deallocate it. 759 854 */ 760 slab_free(thread_slab, t );761 762 763 return ( unative_t) rc;855 slab_free(thread_slab, thread); 856 free(kernel_uarg); 857 858 return (sysarg_t) rc; 764 859 } 765 860 } 861 766 862 #ifdef CONFIG_UDEBUG 767 863 /* … … 772 868 * and could be detected with THREAD_READ before. 773 869 */ 774 udebug_thread_b_event_attach(t , TASK);870 udebug_thread_b_event_attach(thread, TASK); 775 871 #else 776 thread_attach(t , TASK);777 #endif 778 thread_ready(t );779 872 thread_attach(thread, TASK); 873 #endif 874 thread_ready(thread); 875 780 876 return 0; 781 877 } else 782 878 free(kernel_uarg); 783 784 return ( unative_t) ENOMEM;879 880 return (sysarg_t) ENOMEM; 785 881 } 786 882 … … 788 884 * 789 885 */ 790 unative_t sys_thread_exit(int uspace_status)886 sysarg_t sys_thread_exit(int uspace_status) 791 887 { 792 888 thread_exit(); 889 793 890 /* Unreachable */ 794 891 return 0; … … 801 898 * 802 899 * @return 0 on success or an error code from @ref errno.h. 803 */ 804 unative_t sys_thread_get_id(thread_id_t *uspace_thread_id) 900 * 901 */ 902 sysarg_t sys_thread_get_id(thread_id_t *uspace_thread_id) 805 903 { 806 904 /* 807 905 * No need to acquire lock on THREAD because tid 808 906 * remains constant for the lifespan of the thread. 907 * 809 908 */ 810 return ( unative_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,909 return (sysarg_t) copy_to_uspace(uspace_thread_id, &THREAD->tid, 811 910 sizeof(THREAD->tid)); 812 911 } 813 912 913 /** Syscall wrapper for sleeping. */ 914 sysarg_t sys_thread_usleep(uint32_t usec) 915 { 916 thread_usleep(usec); 917 return 0; 918 } 919 814 920 /** @} 815 921 */
Note:
See TracChangeset
for help on using the changeset viewer.