Changeset da1bafb in mainline for kernel/generic/src/udebug
- Timestamp:
- 2010-05-24T18:57:31Z (16 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 0095368
- Parents:
- 666f492
- Location:
- kernel/generic/src/udebug
- Files:
-
- 2 edited
-
udebug.c (modified) (20 diffs)
-
udebug_ops.c (modified) (15 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/udebug/udebug.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief Udebug hooks and data structure management.35 * @brief Udebug hooks and data structure management. 36 36 * 37 37 * Udebug is an interface that makes userspace debuggers possible. 38 38 */ 39 39 40 40 #include <synch/waitq.h> 41 41 #include <debug.h> … … 45 45 #include <arch.h> 46 46 47 48 47 /** Initialize udebug part of task structure. 49 48 * 50 49 * Called as part of task structure initialization. 51 * @param ut Pointer to the structure to initialize. 50 * @param ut Pointer to the structure to initialize. 51 * 52 52 */ 53 53 void udebug_task_init(udebug_task_t *ut) … … 63 63 * 64 64 * Called as part of thread structure initialization. 65 * @param ut Pointer to the structure to initialize. 65 * 66 * @param ut Pointer to the structure to initialize. 67 * 66 68 */ 67 69 void udebug_thread_initialize(udebug_thread_t *ut) … … 70 72 waitq_initialize(&ut->go_wq); 71 73 condvar_initialize(&ut->active_cv); 72 74 73 75 ut->go_call = NULL; 74 76 ut->uspace_state = NULL; … … 76 78 ut->stoppable = true; 77 79 ut->active = false; 78 ut->cur_event = 0; /* none */80 ut->cur_event = 0; /* None */ 79 81 } 80 82 … … 85 87 * is received. 86 88 * 87 * @param wq The wait queue used by the thread to wait for GO messages. 89 * @param wq The wait queue used by the thread to wait for GO messages. 90 * 88 91 */ 89 92 static void udebug_wait_for_go(waitq_t *wq) 90 93 { 91 int rc; 92 ipl_t ipl; 93 94 ipl = waitq_sleep_prepare(wq); 95 96 wq->missed_wakeups = 0; /* Enforce blocking. */ 97 rc = waitq_sleep_timeout_unsafe(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); 98 94 ipl_t ipl = waitq_sleep_prepare(wq); 95 96 wq->missed_wakeups = 0; /* Enforce blocking. */ 97 int rc = waitq_sleep_timeout_unsafe(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); 98 99 99 waitq_sleep_finish(wq, rc, ipl); 100 100 } … … 102 102 /** Start of stoppable section. 103 103 * 104 * A stoppable section is a section of code where if the thread can be stoped. In other words, 105 * if a STOP operation is issued, the thread is guaranteed not to execute 106 * any userspace instructions until the thread is resumed. 104 * A stoppable section is a section of code where if the thread can 105 * be stoped. In other words, if a STOP operation is issued, the thread 106 * is guaranteed not to execute any userspace instructions until the 107 * thread is resumed. 107 108 * 108 109 * Having stoppable sections is better than having stopping points, since 109 110 * a thread can be stopped even when it is blocked indefinitely in a system 110 111 * call (whereas it would not reach any stopping point). 112 * 111 113 */ 112 114 void udebug_stoppable_begin(void) 113 115 { 114 int nsc;115 call_t *db_call, *go_call;116 117 116 ASSERT(THREAD); 118 117 ASSERT(TASK); 119 118 120 119 mutex_lock(&TASK->udebug.lock); 121 122 nsc = --TASK->udebug.not_stoppable_count;123 120 121 int nsc = --TASK->udebug.not_stoppable_count; 122 124 123 /* Lock order OK, THREAD->udebug.lock is after TASK->udebug.lock */ 125 124 mutex_lock(&THREAD->udebug.lock); 126 125 ASSERT(THREAD->udebug.stoppable == false); 127 126 THREAD->udebug.stoppable = true; 128 129 if ( TASK->udebug.dt_state == UDEBUG_TS_BEGINNING && nsc == 0) {127 128 if ((TASK->udebug.dt_state == UDEBUG_TS_BEGINNING) && (nsc == 0)) { 130 129 /* 131 130 * This was the last non-stoppable thread. Reply to 132 131 * DEBUG_BEGIN call. 132 * 133 133 */ 134 135 db_call = TASK->udebug.begin_call;134 135 call_t *db_call = TASK->udebug.begin_call; 136 136 ASSERT(db_call); 137 137 138 138 TASK->udebug.dt_state = UDEBUG_TS_ACTIVE; 139 139 TASK->udebug.begin_call = NULL; 140 140 141 141 IPC_SET_RETVAL(db_call->data, 0); 142 ipc_answer(&TASK->answerbox, db_call); 143 142 ipc_answer(&TASK->answerbox, db_call); 144 143 } else if (TASK->udebug.dt_state == UDEBUG_TS_ACTIVE) { 145 144 /* 146 145 * Active debugging session 147 146 */ 148 147 149 148 if (THREAD->udebug.active == true && 150 149 THREAD->udebug.go == false) { 151 150 /* 152 151 * Thread was requested to stop - answer go call 152 * 153 153 */ 154 154 155 155 /* Make sure nobody takes this call away from us */ 156 go_call = THREAD->udebug.go_call;156 call_t *go_call = THREAD->udebug.go_call; 157 157 THREAD->udebug.go_call = NULL; 158 158 ASSERT(go_call); 159 159 160 160 IPC_SET_RETVAL(go_call->data, 0); 161 161 IPC_SET_ARG1(go_call->data, UDEBUG_EVENT_STOP); 162 162 163 163 THREAD->udebug.cur_event = UDEBUG_EVENT_STOP; 164 165 ipc_answer(&TASK->answerbox, go_call); 164 ipc_answer(&TASK->answerbox, go_call); 166 165 } 167 166 } 168 167 169 168 mutex_unlock(&THREAD->udebug.lock); 170 169 mutex_unlock(&TASK->udebug.lock); … … 174 173 * 175 174 * This is the point where the thread will block if it is stopped. 176 * (As, by definition, a stopped thread must not leave its stoppable section). 175 * (As, by definition, a stopped thread must not leave its stoppable 176 * section). 177 * 177 178 */ 178 179 void udebug_stoppable_end(void) … … 181 182 mutex_lock(&TASK->udebug.lock); 182 183 mutex_lock(&THREAD->udebug.lock); 183 184 if ( THREAD->udebug.active && THREAD->udebug.go == false) {184 185 if ((THREAD->udebug.active) && (THREAD->udebug.go == false)) { 185 186 mutex_unlock(&THREAD->udebug.lock); 186 187 mutex_unlock(&TASK->udebug.lock); 187 188 188 189 udebug_wait_for_go(&THREAD->udebug.go_wq); 189 190 190 191 goto restart; 191 192 /* Must try again - have to lose stoppability atomically. */ … … 194 195 ASSERT(THREAD->udebug.stoppable == true); 195 196 THREAD->udebug.stoppable = false; 196 197 197 198 mutex_unlock(&THREAD->udebug.lock); 198 199 mutex_unlock(&TASK->udebug.lock); … … 203 204 * 204 205 * This function is called from clock(). 206 * 205 207 */ 206 208 void udebug_before_thread_runs(void) … … 215 217 * Must be called before and after servicing a system call. This generates 216 218 * a SYSCALL_B or SYSCALL_E event, depending on the value of @a end_variant. 219 * 217 220 */ 218 221 void udebug_syscall_event(unative_t a1, unative_t a2, unative_t a3, … … 220 223 bool end_variant) 221 224 { 222 call_t *call; 223 udebug_event_t etype; 224 225 etype = end_variant ? UDEBUG_EVENT_SYSCALL_E : UDEBUG_EVENT_SYSCALL_B; 226 225 udebug_event_t etype = 226 end_variant ? UDEBUG_EVENT_SYSCALL_E : UDEBUG_EVENT_SYSCALL_B; 227 227 228 mutex_lock(&TASK->udebug.lock); 228 229 mutex_lock(&THREAD->udebug.lock); 229 230 230 231 /* Must only generate events when in debugging session and is go. */ 231 232 if (THREAD->udebug.active != true || THREAD->udebug.go == false || … … 235 236 return; 236 237 } 237 238 238 239 /* Fill in the GO response. */ 239 call = THREAD->udebug.go_call;240 call_t *call = THREAD->udebug.go_call; 240 241 THREAD->udebug.go_call = NULL; 241 242 242 243 IPC_SET_RETVAL(call->data, 0); 243 244 IPC_SET_ARG1(call->data, etype); 244 245 IPC_SET_ARG2(call->data, id); 245 246 IPC_SET_ARG3(call->data, rc); 246 247 247 248 THREAD->udebug.syscall_args[0] = a1; 248 249 THREAD->udebug.syscall_args[1] = a2; … … 251 252 THREAD->udebug.syscall_args[4] = a5; 252 253 THREAD->udebug.syscall_args[5] = a6; 253 254 254 255 /* 255 256 * Make sure udebug.go is false when going to sleep 256 257 * in case we get woken up by DEBUG_END. (At which 257 258 * point it must be back to the initial true value). 259 * 258 260 */ 259 261 THREAD->udebug.go = false; 260 262 THREAD->udebug.cur_event = etype; 261 263 262 264 ipc_answer(&TASK->answerbox, call); 263 265 264 266 mutex_unlock(&THREAD->udebug.lock); 265 267 mutex_unlock(&TASK->udebug.lock); 266 268 267 269 udebug_wait_for_go(&THREAD->udebug.go_wq); 268 270 } … … 280 282 * and get a THREAD_B event for them. 281 283 * 282 * @param t Structure of the thread being created. Not locked, as the 283 * thread is not executing yet. 284 * @param ta Task to which the thread should be attached. 285 */ 286 void udebug_thread_b_event_attach(struct thread *t, struct task *ta) 287 { 288 call_t *call; 289 284 * @param thread Structure of the thread being created. Not locked, as the 285 * thread is not executing yet. 286 * @param task Task to which the thread should be attached. 287 * 288 */ 289 void udebug_thread_b_event_attach(struct thread *thread, struct task *task) 290 { 290 291 mutex_lock(&TASK->udebug.lock); 291 292 mutex_lock(&THREAD->udebug.lock); 292 293 thread_attach(t , ta);294 293 294 thread_attach(thread, task); 295 295 296 LOG("Check state"); 296 297 297 298 /* Must only generate events when in debugging session */ 298 299 if (THREAD->udebug.active != true) { 299 300 LOG("udebug.active: %s, udebug.go: %s", 300 THREAD->udebug.active ? "Yes(+)" : "No", 301 THREAD->udebug.go ? "Yes(-)" : "No"); 301 THREAD->udebug.active ? "Yes(+)" : "No", 302 THREAD->udebug.go ? "Yes(-)" : "No"); 303 302 304 mutex_unlock(&THREAD->udebug.lock); 303 305 mutex_unlock(&TASK->udebug.lock); 304 306 return; 305 307 } 306 308 307 309 LOG("Trigger event"); 308 call = THREAD->udebug.go_call; 310 311 call_t *call = THREAD->udebug.go_call; 312 309 313 THREAD->udebug.go_call = NULL; 310 314 IPC_SET_RETVAL(call->data, 0); 311 315 IPC_SET_ARG1(call->data, UDEBUG_EVENT_THREAD_B); 312 IPC_SET_ARG2(call->data, (unative_t) t);313 316 IPC_SET_ARG2(call->data, (unative_t) thread); 317 314 318 /* 315 319 * Make sure udebug.go is false when going to sleep 316 320 * in case we get woken up by DEBUG_END. (At which 317 321 * point it must be back to the initial true value). 322 * 318 323 */ 319 324 THREAD->udebug.go = false; 320 325 THREAD->udebug.cur_event = UDEBUG_EVENT_THREAD_B; 321 326 322 327 ipc_answer(&TASK->answerbox, call); 323 328 324 329 mutex_unlock(&THREAD->udebug.lock); 325 330 mutex_unlock(&TASK->udebug.lock); 326 331 327 332 LOG("Wait for Go"); 328 333 udebug_wait_for_go(&THREAD->udebug.go_wq); … … 333 338 * Must be called when the current thread is terminating. 334 339 * Generates a THREAD_E event. 340 * 335 341 */ 336 342 void udebug_thread_e_event(void) 337 343 { 338 call_t *call;339 340 344 mutex_lock(&TASK->udebug.lock); 341 345 mutex_lock(&THREAD->udebug.lock); 342 346 343 347 LOG("Check state"); 344 348 345 349 /* Must only generate events when in debugging session. */ 346 350 if (THREAD->udebug.active != true) { 347 351 LOG("udebug.active: %s, udebug.go: %s", 348 THREAD->udebug.active ? "Yes" : "No", 349 THREAD->udebug.go ? "Yes" : "No"); 352 THREAD->udebug.active ? "Yes" : "No", 353 THREAD->udebug.go ? "Yes" : "No"); 354 350 355 mutex_unlock(&THREAD->udebug.lock); 351 356 mutex_unlock(&TASK->udebug.lock); 352 357 return; 353 358 } 354 359 355 360 LOG("Trigger event"); 356 call = THREAD->udebug.go_call; 361 362 call_t *call = THREAD->udebug.go_call; 363 357 364 THREAD->udebug.go_call = NULL; 358 365 IPC_SET_RETVAL(call->data, 0); 359 366 IPC_SET_ARG1(call->data, UDEBUG_EVENT_THREAD_E); 360 367 361 368 /* Prevent any further debug activity in thread. */ 362 369 THREAD->udebug.active = false; 363 THREAD->udebug.cur_event = 0; /* none */364 THREAD->udebug.go = false; /* set to initial value */365 370 THREAD->udebug.cur_event = 0; /* None */ 371 THREAD->udebug.go = false; /* Set to initial value */ 372 366 373 ipc_answer(&TASK->answerbox, call); 367 374 368 375 mutex_unlock(&THREAD->udebug.lock); 369 376 mutex_unlock(&TASK->udebug.lock); 370 371 /* 377 378 /* 372 379 * This event does not sleep - debugging has finished 373 380 * in this thread. 381 * 374 382 */ 375 383 } 376 384 377 /** 378 * Terminate task debugging session. 379 * 380 * Gracefully terminates the debugging session for a task. If the debugger 385 /** Terminate task debugging session. 386 * 387 * Gracefully terminate the debugging session for a task. If the debugger 381 388 * is still waiting for events on some threads, it will receive a 382 389 * FINISHED event for each of them. 383 390 * 384 * @param ta Task structure. ta->udebug.lock must be already locked. 385 * @return Zero on success or negative error code. 386 */ 387 int udebug_task_cleanup(struct task *ta) 388 { 389 thread_t *t; 391 * @param task Task structure. ta->udebug.lock must be already locked. 392 * 393 * @return Zero on success or negative error code. 394 * 395 */ 396 int udebug_task_cleanup(struct task *task) 397 { 398 if ((task->udebug.dt_state != UDEBUG_TS_BEGINNING) && 399 (task->udebug.dt_state != UDEBUG_TS_ACTIVE)) { 400 return EINVAL; 401 } 402 403 LOG("Task %" PRIu64, task->taskid); 404 405 /* Finish debugging of all userspace threads */ 390 406 link_t *cur; 391 int flags; 392 ipl_t ipl; 393 394 if (ta->udebug.dt_state != UDEBUG_TS_BEGINNING && 395 ta->udebug.dt_state != UDEBUG_TS_ACTIVE) { 396 return EINVAL; 397 } 398 399 LOG("Task %" PRIu64, ta->taskid); 400 401 /* Finish debugging of all userspace threads */ 402 for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { 403 t = list_get_instance(cur, thread_t, th_link); 404 405 mutex_lock(&t->udebug.lock); 406 407 ipl = interrupts_disable(); 408 spinlock_lock(&t->lock); 409 410 flags = t->flags; 411 412 spinlock_unlock(&t->lock); 413 interrupts_restore(ipl); 414 407 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 408 thread_t *thread = list_get_instance(cur, thread_t, th_link); 409 410 mutex_lock(&thread->udebug.lock); 411 unsigned int flags = thread->flags; 412 415 413 /* Only process userspace threads. */ 416 414 if ((flags & THREAD_FLAG_USPACE) != 0) { 417 415 /* Prevent any further debug activity in thread. */ 418 t ->udebug.active = false;419 t ->udebug.cur_event = 0; /* none */420 416 thread->udebug.active = false; 417 thread->udebug.cur_event = 0; /* None */ 418 421 419 /* Is the thread still go? */ 422 if (t ->udebug.go == true) {420 if (thread->udebug.go == true) { 423 421 /* 424 * Yes, so clear go. As active == false,422 * Yes, so clear go. As active == false, 425 423 * this doesn't affect anything. 424 ( 426 425 */ 427 t ->udebug.go = false;428 426 thread->udebug.go = false; 427 429 428 /* Answer GO call */ 430 429 LOG("Answer GO call with EVENT_FINISHED."); 431 IPC_SET_RETVAL(t->udebug.go_call->data, 0); 432 IPC_SET_ARG1(t->udebug.go_call->data, 430 431 IPC_SET_RETVAL(thread->udebug.go_call->data, 0); 432 IPC_SET_ARG1(thread->udebug.go_call->data, 433 433 UDEBUG_EVENT_FINISHED); 434 435 ipc_answer(&ta ->answerbox, t->udebug.go_call);436 t ->udebug.go_call = NULL;434 435 ipc_answer(&task->answerbox, thread->udebug.go_call); 436 thread->udebug.go_call = NULL; 437 437 } else { 438 438 /* 439 439 * Debug_stop is already at initial value. 440 440 * Yet this means the thread needs waking up. 441 * 441 442 */ 442 443 443 444 /* 444 * t 's lock must not be held when calling445 * thread's lock must not be held when calling 445 446 * waitq_wakeup. 447 * 446 448 */ 447 waitq_wakeup(&t ->udebug.go_wq, WAKEUP_FIRST);449 waitq_wakeup(&thread->udebug.go_wq, WAKEUP_FIRST); 448 450 } 449 mutex_unlock(&t->udebug.lock);450 condvar_broadcast(&t->udebug.active_cv);451 } else {452 mutex_unlock(&t->udebug.lock);453 }454 } 455 456 ta ->udebug.dt_state = UDEBUG_TS_INACTIVE;457 ta ->udebug.debugger = NULL;458 451 452 mutex_unlock(&thread->udebug.lock); 453 condvar_broadcast(&thread->udebug.active_cv); 454 } else 455 mutex_unlock(&thread->udebug.lock); 456 } 457 458 task->udebug.dt_state = UDEBUG_TS_INACTIVE; 459 task->udebug.debugger = NULL; 460 459 461 return 0; 460 462 } … … 466 468 * a chance to examine the faulting thead/task. When the debugging session 467 469 * is over, this function returns (so that thread/task cleanup can continue). 470 * 468 471 */ 469 472 void udebug_thread_fault(void) 470 473 { 471 474 udebug_stoppable_begin(); 472 475 473 476 /* Wait until a debugger attends to us. */ 474 477 mutex_lock(&THREAD->udebug.lock); … … 476 479 condvar_wait(&THREAD->udebug.active_cv, &THREAD->udebug.lock); 477 480 mutex_unlock(&THREAD->udebug.lock); 478 481 479 482 /* Make sure the debugging session is over before proceeding. */ 480 483 mutex_lock(&THREAD->udebug.lock); … … 482 485 condvar_wait(&THREAD->udebug.active_cv, &THREAD->udebug.lock); 483 486 mutex_unlock(&THREAD->udebug.lock); 484 487 485 488 udebug_stoppable_end(); 486 489 } -
kernel/generic/src/udebug/udebug_ops.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief Udebug operations.35 * @brief Udebug operations. 36 36 * 37 37 * Udebug operations on tasks and threads are implemented here. The … … 39 39 * when servicing udebug IPC messages. 40 40 */ 41 41 42 42 #include <debug.h> 43 43 #include <proc/task.h> … … 53 53 #include <memstr.h> 54 54 55 /** 56 * Prepare a thread for a debugging operation. 55 /** Prepare a thread for a debugging operation. 57 56 * 58 57 * Simply put, return thread t with t->udebug.lock held, … … 73 72 * the t->lock spinlock to the t->udebug.lock mutex. 74 73 * 75 * @param t Pointer, need not at all be valid.76 * @param being_go Required thread state.74 * @param thread Pointer, need not at all be valid. 75 * @param being_go Required thread state. 77 76 * 78 77 * Returns EOK if all went well, or an error code otherwise. 79 */ 80 static int _thread_op_begin(thread_t *t, bool being_go) 81 { 82 ipl_t ipl; 83 84 mutex_lock(&TASK->udebug.lock); 85 78 * 79 */ 80 static int _thread_op_begin(thread_t *thread, bool being_go) 81 { 82 mutex_lock(&TASK->udebug.lock); 83 86 84 /* thread_exists() must be called with threads_lock held */ 87 ipl = interrupts_disable(); 88 spinlock_lock(&threads_lock); 89 90 if (!thread_exists(t)) { 91 spinlock_unlock(&threads_lock); 92 interrupts_restore(ipl); 85 irq_spinlock_lock(&threads_lock, true); 86 87 if (!thread_exists(thread)) { 88 irq_spinlock_unlock(&threads_lock, true); 93 89 mutex_unlock(&TASK->udebug.lock); 94 90 return ENOENT; 95 91 } 96 97 /* t->lock is enough to ensure the thread's existence */ 98 spinlock_lock(&t->lock); 99 spinlock_unlock(&threads_lock); 100 101 /* Verify that 't' is a userspace thread. */ 102 if ((t->flags & THREAD_FLAG_USPACE) == 0) { 92 93 /* thread->lock is enough to ensure the thread's existence */ 94 irq_spinlock_exchange(&threads_lock, &thread->lock); 95 96 /* Verify that 'thread' is a userspace thread. */ 97 if ((thread->flags & THREAD_FLAG_USPACE) == 0) { 103 98 /* It's not, deny its existence */ 104 spinlock_unlock(&t->lock); 105 interrupts_restore(ipl); 99 irq_spinlock_unlock(&thread->lock, true); 106 100 mutex_unlock(&TASK->udebug.lock); 107 101 return ENOENT; 108 102 } 109 103 110 104 /* Verify debugging state. */ 111 if (t ->udebug.active != true) {105 if (thread->udebug.active != true) { 112 106 /* Not in debugging session or undesired GO state */ 113 spinlock_unlock(&t->lock); 114 interrupts_restore(ipl); 107 irq_spinlock_unlock(&thread->lock, true); 115 108 mutex_unlock(&TASK->udebug.lock); 116 109 return ENOENT; 117 110 } 118 111 119 112 /* 120 113 * Since the thread has active == true, TASK->udebug.lock 121 114 * is enough to ensure its existence and that active remains 122 115 * true. 116 * 123 117 */ 124 spinlock_unlock(&t->lock); 125 interrupts_restore(ipl); 126 118 irq_spinlock_unlock(&thread->lock, true); 119 127 120 /* Only mutex TASK->udebug.lock left. */ 128 121 129 122 /* Now verify that the thread belongs to the current task. */ 130 if (t ->task != TASK) {123 if (thread->task != TASK) { 131 124 /* No such thread belonging this task*/ 132 125 mutex_unlock(&TASK->udebug.lock); 133 126 return ENOENT; 134 127 } 135 128 136 129 /* 137 130 * Now we need to grab the thread's debug lock for synchronization 138 131 * of the threads stoppability/stop state. 132 * 139 133 */ 140 mutex_lock(&t ->udebug.lock);141 134 mutex_lock(&thread->udebug.lock); 135 142 136 /* The big task mutex is no longer needed. */ 143 137 mutex_unlock(&TASK->udebug.lock); 144 145 if (t ->udebug.go != being_go) {138 139 if (thread->udebug.go != being_go) { 146 140 /* Not in debugging session or undesired GO state. */ 147 mutex_unlock(&t ->udebug.lock);141 mutex_unlock(&thread->udebug.lock); 148 142 return EINVAL; 149 143 } 150 151 /* Only t ->udebug.lock left. */152 153 return EOK; /* All went well. */144 145 /* Only thread->udebug.lock left. */ 146 147 return EOK; /* All went well. */ 154 148 } 155 149 156 150 /** End debugging operation on a thread. */ 157 static void _thread_op_end(thread_t *t )158 { 159 mutex_unlock(&t ->udebug.lock);151 static void _thread_op_end(thread_t *thread) 152 { 153 mutex_unlock(&thread->udebug.lock); 160 154 } 161 155 … … 171 165 * all the threads become stoppable (i.e. they can be considered stopped). 172 166 * 173 * @param call The BEGIN call we are servicing. 174 * @return 0 (OK, but not done yet), 1 (done) or negative error code. 167 * @param call The BEGIN call we are servicing. 168 * 169 * @return 0 (OK, but not done yet), 1 (done) or negative error code. 170 * 175 171 */ 176 172 int udebug_begin(call_t *call) 177 173 { 178 int reply; 179 180 thread_t *t; 181 link_t *cur; 182 183 LOG("Debugging task %llu", TASK->taskid); 184 mutex_lock(&TASK->udebug.lock); 185 174 LOG("Debugging task %" PRIu64, TASK->taskid); 175 176 mutex_lock(&TASK->udebug.lock); 177 186 178 if (TASK->udebug.dt_state != UDEBUG_TS_INACTIVE) { 187 179 mutex_unlock(&TASK->udebug.lock); 188 180 return EBUSY; 189 181 } 190 182 191 183 TASK->udebug.dt_state = UDEBUG_TS_BEGINNING; 192 184 TASK->udebug.begin_call = call; 193 185 TASK->udebug.debugger = call->sender; 194 186 187 int reply; 188 195 189 if (TASK->udebug.not_stoppable_count == 0) { 196 190 TASK->udebug.dt_state = UDEBUG_TS_ACTIVE; 197 191 TASK->udebug.begin_call = NULL; 198 reply = 1; /* immediate reply */ 199 } else { 200 reply = 0; /* no reply */ 201 } 192 reply = 1; /* immediate reply */ 193 } else 194 reply = 0; /* no reply */ 202 195 203 196 /* Set udebug.active on all of the task's userspace threads. */ 204 197 198 link_t *cur; 205 199 for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { 206 t = list_get_instance(cur, thread_t, th_link); 207 208 mutex_lock(&t->udebug.lock); 209 if ((t->flags & THREAD_FLAG_USPACE) != 0) { 210 t->udebug.active = true; 211 mutex_unlock(&t->udebug.lock); 212 condvar_broadcast(&t->udebug.active_cv); 213 } else { 214 mutex_unlock(&t->udebug.lock); 215 } 216 } 217 200 thread_t *thread = list_get_instance(cur, thread_t, th_link); 201 202 mutex_lock(&thread->udebug.lock); 203 if ((thread->flags & THREAD_FLAG_USPACE) != 0) { 204 thread->udebug.active = true; 205 mutex_unlock(&thread->udebug.lock); 206 condvar_broadcast(&thread->udebug.active_cv); 207 } else 208 mutex_unlock(&thread->udebug.lock); 209 } 210 218 211 mutex_unlock(&TASK->udebug.lock); 219 212 return reply; … … 223 216 * 224 217 * Closes the debugging session for the current task. 218 * 225 219 * @return Zero on success or negative error code. 220 * 226 221 */ 227 222 int udebug_end(void) 228 223 { 229 int rc;230 231 224 LOG("Task %" PRIu64, TASK->taskid); 232 233 mutex_lock(&TASK->udebug.lock); 234 rc = udebug_task_cleanup(TASK);235 mutex_unlock(&TASK->udebug.lock); 236 225 226 mutex_lock(&TASK->udebug.lock); 227 int rc = udebug_task_cleanup(TASK); 228 mutex_unlock(&TASK->udebug.lock); 229 237 230 return rc; 238 231 } … … 242 235 * Sets the event mask that determines which events are enabled. 243 236 * 244 * @param mask Or combination of events that should be enabled. 245 * @return Zero on success or negative error code. 237 * @param mask Or combination of events that should be enabled. 238 * 239 * @return Zero on success or negative error code. 240 * 246 241 */ 247 242 int udebug_set_evmask(udebug_evmask_t mask) 248 243 { 249 244 LOG("mask = 0x%x", mask); 250 251 mutex_lock(&TASK->udebug.lock); 252 245 246 mutex_lock(&TASK->udebug.lock); 247 253 248 if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) { 254 249 mutex_unlock(&TASK->udebug.lock); 255 250 return EINVAL; 256 251 } 257 252 258 253 TASK->udebug.evmask = mask; 259 254 mutex_unlock(&TASK->udebug.lock); 260 255 261 256 return 0; 262 257 } … … 268 263 * a debugging event or STOP occurs, at which point the thread loses GO. 269 264 * 270 * @param t The thread to operate on (unlocked and need not be valid). 271 * @param call The GO call that we are servicing. 272 */ 273 int udebug_go(thread_t *t, call_t *call) 274 { 275 int rc; 276 277 /* On success, this will lock t->udebug.lock. */ 278 rc = _thread_op_begin(t, false); 279 if (rc != EOK) { 265 * @param thread The thread to operate on (unlocked and need not be valid). 266 * @param call The GO call that we are servicing. 267 * 268 */ 269 int udebug_go(thread_t *thread, call_t *call) 270 { 271 /* On success, this will lock thread->udebug.lock. */ 272 int rc = _thread_op_begin(thread, false); 273 if (rc != EOK) 280 274 return rc; 281 } 282 283 t->udebug.go_call = call; 284 t->udebug.go = true; 285 t->udebug.cur_event = 0; /* none */ 286 275 276 thread->udebug.go_call = call; 277 thread->udebug.go = true; 278 thread->udebug.cur_event = 0; /* none */ 279 287 280 /* 288 * Neither t's lock nor threads_lock may be held during wakeup. 281 * Neither thread's lock nor threads_lock may be held during wakeup. 282 * 289 283 */ 290 waitq_wakeup(&t ->udebug.go_wq, WAKEUP_FIRST);291 292 _thread_op_end(t );293 284 waitq_wakeup(&thread->udebug.go_wq, WAKEUP_FIRST); 285 286 _thread_op_end(thread); 287 294 288 return 0; 295 289 } … … 300 294 * can be considered stopped). 301 295 * 302 * @param t The thread to operate on (unlocked and need not be valid). 303 * @param call The GO call that we are servicing. 304 */ 305 int udebug_stop(thread_t *t, call_t *call) 306 { 307 int rc; 308 296 * @param thread The thread to operate on (unlocked and need not be valid). 297 * @param call The GO call that we are servicing. 298 * 299 */ 300 int udebug_stop(thread_t *thread, call_t *call) 301 { 309 302 LOG("udebug_stop()"); 310 303 311 304 /* 312 * On success, this will lock t->udebug.lock. Note that this makes sure 313 * the thread is not stopped. 305 * On success, this will lock thread->udebug.lock. Note that this 306 * makes sure the thread is not stopped. 307 * 314 308 */ 315 rc = _thread_op_begin(t, true);316 if (rc != EOK) {309 int rc = _thread_op_begin(thread, true); 310 if (rc != EOK) 317 311 return rc; 318 } 319 312 320 313 /* Take GO away from the thread. */ 321 t ->udebug.go = false;322 323 if (t ->udebug.stoppable != true) {314 thread->udebug.go = false; 315 316 if (thread->udebug.stoppable != true) { 324 317 /* Answer will be sent when the thread becomes stoppable. */ 325 _thread_op_end(t );318 _thread_op_end(thread); 326 319 return 0; 327 320 } 328 321 329 322 /* 330 323 * Answer GO call. 324 * 331 325 */ 332 326 333 327 /* Make sure nobody takes this call away from us. */ 334 call = t ->udebug.go_call;335 t ->udebug.go_call = NULL;336 328 call = thread->udebug.go_call; 329 thread->udebug.go_call = NULL; 330 337 331 IPC_SET_RETVAL(call->data, 0); 338 332 IPC_SET_ARG1(call->data, UDEBUG_EVENT_STOP); 339 333 340 334 THREAD->udebug.cur_event = UDEBUG_EVENT_STOP; 341 342 _thread_op_end(t );343 335 336 _thread_op_end(thread); 337 344 338 mutex_lock(&TASK->udebug.lock); 345 339 ipc_answer(&TASK->answerbox, call); 346 340 mutex_unlock(&TASK->udebug.lock); 347 341 348 342 return 0; 349 343 } … … 365 359 * a maximum size for the userspace buffer. 366 360 * 367 * @param buffer The buffer for storing thread hashes. 368 * @param buf_size Buffer size in bytes. 369 * @param stored The actual number of bytes copied will be stored here. 370 * @param needed Total number of hashes that could have been saved. 361 * @param buffer The buffer for storing thread hashes. 362 * @param buf_size Buffer size in bytes. 363 * @param stored The actual number of bytes copied will be stored here. 364 * @param needed Total number of hashes that could have been saved. 365 * 371 366 */ 372 367 int udebug_thread_read(void **buffer, size_t buf_size, size_t *stored, 373 368 size_t *needed) 374 369 { 375 thread_t *t;376 link_t *cur;377 unative_t tid;378 size_t copied_ids;379 size_t extra_ids;380 ipl_t ipl;381 unative_t *id_buffer;382 int flags;383 size_t max_ids;384 385 370 LOG("udebug_thread_read()"); 386 371 387 372 /* Allocate a buffer to hold thread IDs */ 388 id_buffer = malloc(buf_size + 1, 0);389 390 mutex_lock(&TASK->udebug.lock); 391 373 unative_t *id_buffer = malloc(buf_size + 1, 0); 374 375 mutex_lock(&TASK->udebug.lock); 376 392 377 /* Verify task state */ 393 378 if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) { … … 395 380 return EINVAL; 396 381 } 397 398 i pl = interrupts_disable();399 spinlock_lock(&TASK->lock);382 383 irq_spinlock_lock(&TASK->lock, true); 384 400 385 /* Copy down the thread IDs */ 401 402 max_ids = buf_size / sizeof(unative_t);403 copied_ids = 0;404 extra_ids = 0;405 386 387 size_t max_ids = buf_size / sizeof(unative_t); 388 size_t copied_ids = 0; 389 size_t extra_ids = 0; 390 406 391 /* FIXME: make sure the thread isn't past debug shutdown... */ 392 link_t *cur; 407 393 for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { 408 t = list_get_instance(cur, thread_t, th_link);409 410 spinlock_lock(&t->lock);411 flags = t->flags;412 spinlock_unlock(&t->lock);413 394 thread_t *thread = list_get_instance(cur, thread_t, th_link); 395 396 irq_spinlock_lock(&thread->lock, false); 397 int flags = thread->flags; 398 irq_spinlock_unlock(&thread->lock, false); 399 414 400 /* Not interested in kernel threads. */ 415 401 if ((flags & THREAD_FLAG_USPACE) == 0) 416 402 continue; 417 403 418 404 if (copied_ids < max_ids) { 419 405 /* Using thread struct pointer as identification hash */ 420 tid = (unative_t) t; 421 id_buffer[copied_ids++] = tid; 422 } else { 406 id_buffer[copied_ids++] = (unative_t) thread; 407 } else 423 408 extra_ids++; 424 } 425 } 426 427 spinlock_unlock(&TASK->lock); 428 interrupts_restore(ipl); 429 430 mutex_unlock(&TASK->udebug.lock); 431 409 } 410 411 irq_spinlock_unlock(&TASK->lock, true); 412 413 mutex_unlock(&TASK->udebug.lock); 414 432 415 *buffer = id_buffer; 433 416 *stored = copied_ids * sizeof(unative_t); 434 417 *needed = (copied_ids + extra_ids) * sizeof(unative_t); 435 418 436 419 return 0; 437 420 } … … 442 425 * Also returns the size of the data. 443 426 * 444 * @param data Place to store pointer to newly allocated block. 445 * @param data_size Place to store size of the data. 446 * 447 * @returns EOK. 427 * @param data Place to store pointer to newly allocated block. 428 * @param data_size Place to store size of the data. 429 * 430 * @returns EOK. 431 * 448 432 */ 449 433 int udebug_name_read(char **data, size_t *data_size) 450 434 { 451 size_t name_size; 452 453 name_size = str_size(TASK->name) + 1; 435 size_t name_size = str_size(TASK->name) + 1; 436 454 437 *data = malloc(name_size, 0); 455 438 *data_size = name_size; 456 439 457 440 memcpy(*data, TASK->name, name_size); 458 441 459 442 return 0; 460 443 } … … 470 453 * this function will fail with an EINVAL error code. 471 454 * 472 * @param t Thread where call arguments are to be read. 473 * @param buffer Place to store pointer to new buffer. 474 * @return EOK on success, ENOENT if @a t is invalid, EINVAL 475 * if thread state is not valid for this operation. 476 */ 477 int udebug_args_read(thread_t *t, void **buffer) 478 { 479 int rc; 480 unative_t *arg_buffer; 481 455 * @param thread Thread where call arguments are to be read. 456 * @param buffer Place to store pointer to new buffer. 457 * 458 * @return EOK on success, ENOENT if @a t is invalid, EINVAL 459 * if thread state is not valid for this operation. 460 * 461 */ 462 int udebug_args_read(thread_t *thread, void **buffer) 463 { 482 464 /* Prepare a buffer to hold the arguments. */ 483 arg_buffer = malloc(6 * sizeof(unative_t), 0);484 465 unative_t *arg_buffer = malloc(6 * sizeof(unative_t), 0); 466 485 467 /* On success, this will lock t->udebug.lock. */ 486 rc = _thread_op_begin(t, false);487 if (rc != EOK) {468 int rc = _thread_op_begin(thread, false); 469 if (rc != EOK) 488 470 return rc; 489 } 490 471 491 472 /* Additionally we need to verify that we are inside a syscall. */ 492 if ( t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B&&493 t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_E) {494 _thread_op_end(t );473 if ((thread->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B) && 474 (thread->udebug.cur_event != UDEBUG_EVENT_SYSCALL_E)) { 475 _thread_op_end(thread); 495 476 return EINVAL; 496 477 } 497 478 498 479 /* Copy to a local buffer before releasing the lock. */ 499 memcpy(arg_buffer, t ->udebug.syscall_args, 6 * sizeof(unative_t));500 501 _thread_op_end(t );502 480 memcpy(arg_buffer, thread->udebug.syscall_args, 6 * sizeof(unative_t)); 481 482 _thread_op_end(thread); 483 503 484 *buffer = arg_buffer; 504 485 return 0; … … 514 495 * call (as opposed to an exception). This is an implementation limit. 515 496 * 516 * @param t Thread whose state is to be read. 517 * @param buffer Place to store pointer to new buffer. 518 * @return EOK on success, ENOENT if @a t is invalid, EINVAL 519 * if thread is not in valid state, EBUSY if istate 520 * is not available. 521 */ 522 int udebug_regs_read(thread_t *t, void **buffer) 523 { 524 istate_t *state, *state_buf; 525 int rc; 526 497 * @param thread Thread whose state is to be read. 498 * @param buffer Place to store pointer to new buffer. 499 * 500 * @return EOK on success, ENOENT if @a t is invalid, EINVAL 501 * if thread is not in valid state, EBUSY if istate 502 * is not available. 503 * 504 */ 505 int udebug_regs_read(thread_t *thread, void **buffer) 506 { 527 507 /* Prepare a buffer to hold the data. */ 528 state_buf = malloc(sizeof(istate_t), 0);529 508 istate_t *state_buf = malloc(sizeof(istate_t), 0); 509 530 510 /* On success, this will lock t->udebug.lock */ 531 rc = _thread_op_begin(t, false);532 if (rc != EOK) {511 int rc = _thread_op_begin(thread, false); 512 if (rc != EOK) 533 513 return rc; 534 } 535 536 state = t->udebug.uspace_state; 514 515 istate_t *state = thread->udebug.uspace_state; 537 516 if (state == NULL) { 538 _thread_op_end(t );517 _thread_op_end(thread); 539 518 return EBUSY; 540 519 } 541 520 542 521 /* Copy to the allocated buffer */ 543 522 memcpy(state_buf, state, sizeof(istate_t)); 544 545 _thread_op_end(t );546 523 524 _thread_op_end(thread); 525 547 526 *buffer = (void *) state_buf; 548 527 return 0; … … 555 534 * and a pointer to it is written into @a buffer. 556 535 * 557 * @param uspace_addr Address from where to start reading. 558 * @param n Number of bytes to read. 559 * @param buffer For storing a pointer to the allocated buffer. 536 * @param uspace_addr Address from where to start reading. 537 * @param n Number of bytes to read. 538 * @param buffer For storing a pointer to the allocated buffer. 539 * 560 540 */ 561 541 int udebug_mem_read(unative_t uspace_addr, size_t n, void **buffer) 562 542 { 563 void *data_buffer;564 int rc;565 566 543 /* Verify task state */ 567 544 mutex_lock(&TASK->udebug.lock); 568 545 569 546 if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) { 570 547 mutex_unlock(&TASK->udebug.lock); 571 548 return EBUSY; 572 549 } 573 574 data_buffer = malloc(n, 0); 575 576 /* NOTE: this is not strictly from a syscall... but that shouldn't 577 * be a problem */ 578 rc = copy_from_uspace(data_buffer, (void *)uspace_addr, n); 579 mutex_unlock(&TASK->udebug.lock); 580 581 if (rc != 0) return rc; 582 550 551 void *data_buffer = malloc(n, 0); 552 553 /* 554 * NOTE: this is not strictly from a syscall... but that shouldn't 555 * be a problem 556 * 557 */ 558 int rc = copy_from_uspace(data_buffer, (void *) uspace_addr, n); 559 mutex_unlock(&TASK->udebug.lock); 560 561 if (rc != 0) 562 return rc; 563 583 564 *buffer = data_buffer; 584 565 return 0;
Note:
See TracChangeset
for help on using the changeset viewer.
