source: mainline/kernel/generic/src/udebug/udebug_ops.c@ c680333

ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since c680333 was 111b9b9, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 3 years ago

Reimplement waitq using thread_wait/wakeup

This adds a few functions to the thread API which can be
summarized as "stop running until woken up by others".
The ordering and context-switching concerns are thus yeeted
to this abstraction and waitq only deals with maintaining
the queues. Overall, this makes the control flow in waitq
much easier to navigate.

  • Property mode set to 100644
File size: 15.7 KB
RevLine 
[9a1b20c]1/*
2 * Copyright (c) 2008 Jiri Svoboda
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[174156fd]29/** @addtogroup kernel_generic
[9a1b20c]30 * @{
31 */
32
33/**
34 * @file
[da1bafb]35 * @brief Udebug operations.
[7dc62af]36 *
37 * Udebug operations on tasks and threads are implemented here. The
38 * functions defined here are called from the udebug_ipc module
39 * when servicing udebug IPC messages.
[9a1b20c]40 */
[da1bafb]41
[0108984a]42#include <debug.h>
[9a1b20c]43#include <proc/task.h>
44#include <proc/thread.h>
45#include <arch.h>
46#include <errno.h>
[b2a1fd92]47#include <stdbool.h>
[19f857a]48#include <str.h>
[9a1b20c]49#include <syscall/copy.h>
50#include <ipc/ipc.h>
51#include <udebug/udebug.h>
52#include <udebug/udebug_ops.h>
[44a7ee5]53#include <mem.h>
[aafed15]54#include <stdlib.h>
[9a1b20c]55
[da1bafb]56/** Prepare a thread for a debugging operation.
[9a1b20c]57 *
58 * Simply put, return thread t with t->udebug.lock held,
59 * but only if it verifies all conditions.
60 *
61 * Specifically, verifies that thread t exists, is a userspace thread,
62 * and belongs to the current task (TASK). Verifies, that the thread
[1378b2b]63 * is (or is not) go according to being_go (typically false).
[8af9950]64 * It also locks t->udebug.lock, making sure that t->udebug.active
[9a1b20c]65 * is true - that the thread is in a valid debugging session.
66 *
67 * With this verified and the t->udebug.lock mutex held, it is ensured
68 * that the thread cannot leave the debugging session, let alone cease
69 * to exist.
70 *
71 * In this function, holding the TASK->udebug.lock mutex prevents the
72 * thread from leaving the debugging session, while relaxing from
73 * the t->lock spinlock to the t->udebug.lock mutex.
74 *
[da1bafb]75 * @param thread Pointer, need not at all be valid.
76 * @param being_go Required thread state.
[7dc62af]77 *
[9a1b20c]78 * Returns EOK if all went well, or an error code otherwise.
[da1bafb]79 *
[9a1b20c]80 */
[b7fd2a0]81static errno_t _thread_op_begin(thread_t *thread, bool being_go)
[9a1b20c]82{
83 mutex_lock(&TASK->udebug.lock);
[a35b458]84
[1871118]85 thread = thread_try_get(thread);
[a35b458]86
[1871118]87 if (!thread) {
[9a1b20c]88 mutex_unlock(&TASK->udebug.lock);
89 return ENOENT;
90 }
[a35b458]91
[1871118]92 irq_spinlock_lock(&thread->lock, true);
[a35b458]93
[da1bafb]94 /* Verify that 'thread' is a userspace thread. */
[6eef3c4]95 if (!thread->uspace) {
[9a1b20c]96 /* It's not, deny its existence */
[da1bafb]97 irq_spinlock_unlock(&thread->lock, true);
[9a1b20c]98 mutex_unlock(&TASK->udebug.lock);
99 return ENOENT;
100 }
[a35b458]101
[1378b2b]102 /* Verify debugging state. */
[da1bafb]103 if (thread->udebug.active != true) {
[9a1b20c]104 /* Not in debugging session or undesired GO state */
[da1bafb]105 irq_spinlock_unlock(&thread->lock, true);
[9a1b20c]106 mutex_unlock(&TASK->udebug.lock);
107 return ENOENT;
108 }
[a35b458]109
[1378b2b]110 /* Now verify that the thread belongs to the current task. */
[da1bafb]111 if (thread->task != TASK) {
[d1582b50]112 /* No such thread belonging this task */
[1871118]113 irq_spinlock_unlock(&thread->lock, true);
[9a1b20c]114 mutex_unlock(&TASK->udebug.lock);
115 return ENOENT;
116 }
[a35b458]117
[1871118]118 irq_spinlock_unlock(&thread->lock, true);
119
120 /* Only mutex TASK->udebug.lock left. */
121
[9a1b20c]122 /*
123 * Now we need to grab the thread's debug lock for synchronization
124 * of the threads stoppability/stop state.
[da1bafb]125 *
[9a1b20c]126 */
[da1bafb]127 mutex_lock(&thread->udebug.lock);
[a35b458]128
[1378b2b]129 /* The big task mutex is no longer needed. */
[9a1b20c]130 mutex_unlock(&TASK->udebug.lock);
[a35b458]131
[da1bafb]132 if (thread->udebug.go != being_go) {
[1378b2b]133 /* Not in debugging session or undesired GO state. */
[da1bafb]134 mutex_unlock(&thread->udebug.lock);
[9a1b20c]135 return EINVAL;
136 }
[a35b458]137
[da1bafb]138 /* Only thread->udebug.lock left. */
[a35b458]139
[da1bafb]140 return EOK; /* All went well. */
[9a1b20c]141}
142
[7dc62af]143/** End debugging operation on a thread. */
[da1bafb]144static void _thread_op_end(thread_t *thread)
[9a1b20c]145{
[da1bafb]146 mutex_unlock(&thread->udebug.lock);
[1871118]147
148 /* Drop reference from _thread_op_begin() */
149 thread_put(thread);
[9a1b20c]150}
151
[7dc62af]152/** Begin debugging the current task.
153 *
154 * Initiates a debugging session for the current task (and its threads).
[b2a1fd92]155 * When the debugging session has started a reply should be sent to the
[7dc62af]156 * UDEBUG_BEGIN call. This may happen immediately in this function if
157 * all the threads in this task are stoppable at the moment and in this
[b2a1fd92]158 * case the function sets @a *active to @c true.
[7dc62af]159 *
[b2a1fd92]160 * Otherwise the function sets @a *active to false and the resonse should
161 * be sent as soon as all the threads become stoppable (i.e. they can be
162 * considered stopped).
[7dc62af]163 *
[da1bafb]164 * @param call The BEGIN call we are servicing.
[b2a1fd92]165 * @param active Place to store @c true iff we went directly to active state,
166 * @c false if we only went to beginning state
[da1bafb]167 *
[b2a1fd92]168 * @return EOK on success, EBUSY if the task is already has an active
169 * debugging session.
[9a1b20c]170 */
[b7fd2a0]171errno_t udebug_begin(call_t *call, bool *active)
[9a1b20c]172{
[da1bafb]173 LOG("Debugging task %" PRIu64, TASK->taskid);
[a35b458]174
[9a1b20c]175 mutex_lock(&TASK->udebug.lock);
[a35b458]176
[9a1b20c]177 if (TASK->udebug.dt_state != UDEBUG_TS_INACTIVE) {
178 mutex_unlock(&TASK->udebug.lock);
179 return EBUSY;
180 }
[a35b458]181
[9a1b20c]182 TASK->udebug.dt_state = UDEBUG_TS_BEGINNING;
183 TASK->udebug.begin_call = call;
184 TASK->udebug.debugger = call->sender;
[a35b458]185
[bd253241]186 if (TASK->udebug.not_stoppable_count == 0) {
187 TASK->udebug.dt_state = UDEBUG_TS_ACTIVE;
188 TASK->udebug.begin_call = NULL;
[b2a1fd92]189 *active = true; /* directly to active state */
[bd253241]190 } else
[b2a1fd92]191 *active = false; /* only in beginning state */
[a35b458]192
[8af9950]193 /* Set udebug.active on all of the task's userspace threads. */
[a35b458]194
[feeac0d]195 list_foreach(TASK->threads, th_link, thread_t, thread) {
[da1bafb]196 mutex_lock(&thread->udebug.lock);
[6eef3c4]197 if (thread->uspace) {
[da1bafb]198 thread->udebug.active = true;
199 mutex_unlock(&thread->udebug.lock);
200 condvar_broadcast(&thread->udebug.active_cv);
201 } else
202 mutex_unlock(&thread->udebug.lock);
[9a1b20c]203 }
[a35b458]204
[9a1b20c]205 mutex_unlock(&TASK->udebug.lock);
[b2a1fd92]206 return EOK;
[9a1b20c]207}
208
[7dc62af]209/** Finish debugging the current task.
210 *
211 * Closes the debugging session for the current task.
[da1bafb]212 *
[cde999a]213 * @return Zero on success or an error code.
[da1bafb]214 *
[7dc62af]215 */
[b7fd2a0]216errno_t udebug_end(void)
[9a1b20c]217{
[ae5aa90]218 LOG("Task %" PRIu64, TASK->taskid);
[a35b458]219
[9a1b20c]220 mutex_lock(&TASK->udebug.lock);
[b7fd2a0]221 errno_t rc = udebug_task_cleanup(TASK);
[9a1b20c]222 mutex_unlock(&TASK->udebug.lock);
[a35b458]223
[9a1b20c]224 return rc;
225}
226
[7dc62af]227/** Set the event mask.
228 *
229 * Sets the event mask that determines which events are enabled.
230 *
[da1bafb]231 * @param mask Or combination of events that should be enabled.
232 *
[cde999a]233 * @return Zero on success or an error code.
[da1bafb]234 *
[7dc62af]235 */
[b7fd2a0]236errno_t udebug_set_evmask(udebug_evmask_t mask)
[9a1b20c]237{
[ae5aa90]238 LOG("mask = 0x%x", mask);
[a35b458]239
[9a1b20c]240 mutex_lock(&TASK->udebug.lock);
[a35b458]241
[9a1b20c]242 if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) {
243 mutex_unlock(&TASK->udebug.lock);
244 return EINVAL;
245 }
[a35b458]246
[9a1b20c]247 TASK->udebug.evmask = mask;
248 mutex_unlock(&TASK->udebug.lock);
[a35b458]249
[7f11dc6]250 return EOK;
[9a1b20c]251}
252
[7dc62af]253/** Give thread GO.
254 *
[1378b2b]255 * Upon recieving a go message, the thread is given GO. Being GO
[7dc62af]256 * means the thread is allowed to execute userspace code (until
257 * a debugging event or STOP occurs, at which point the thread loses GO.
258 *
[da1bafb]259 * @param thread The thread to operate on (unlocked and need not be valid).
260 * @param call The GO call that we are servicing.
261 *
[7dc62af]262 */
[b7fd2a0]263errno_t udebug_go(thread_t *thread, call_t *call)
[9a1b20c]264{
[da1bafb]265 /* On success, this will lock thread->udebug.lock. */
[b7fd2a0]266 errno_t rc = _thread_op_begin(thread, false);
[da1bafb]267 if (rc != EOK)
[9a1b20c]268 return rc;
[a35b458]269
[da1bafb]270 thread->udebug.go_call = call;
271 thread->udebug.go = true;
272 thread->udebug.cur_event = 0; /* none */
[a35b458]273
[9a1b20c]274 /*
[da1bafb]275 * Neither thread's lock nor threads_lock may be held during wakeup.
276 *
[9a1b20c]277 */
[111b9b9]278 waitq_wake_all(&thread->udebug.go_wq);
[a35b458]279
[da1bafb]280 _thread_op_end(thread);
[a35b458]281
[7f11dc6]282 return EOK;
[9a1b20c]283}
284
[7dc62af]285/** Stop a thread (i.e. take its GO away)
286 *
287 * Generates a STOP event as soon as the thread becomes stoppable (i.e.
288 * can be considered stopped).
289 *
[da1bafb]290 * @param thread The thread to operate on (unlocked and need not be valid).
291 * @param call The GO call that we are servicing.
292 *
[7dc62af]293 */
[b7fd2a0]294errno_t udebug_stop(thread_t *thread, call_t *call)
[9a1b20c]295{
[ae5aa90]296 LOG("udebug_stop()");
[a35b458]297
[9a1b20c]298 /*
[da1bafb]299 * On success, this will lock thread->udebug.lock. Note that this
300 * makes sure the thread is not stopped.
301 *
[9a1b20c]302 */
[b7fd2a0]303 errno_t rc = _thread_op_begin(thread, true);
[da1bafb]304 if (rc != EOK)
[9a1b20c]305 return rc;
[a35b458]306
[1378b2b]307 /* Take GO away from the thread. */
[da1bafb]308 thread->udebug.go = false;
[a35b458]309
[da1bafb]310 if (thread->udebug.stoppable != true) {
[1378b2b]311 /* Answer will be sent when the thread becomes stoppable. */
[da1bafb]312 _thread_op_end(thread);
[7f11dc6]313 return EOK;
[9a1b20c]314 }
[a35b458]315
[9a1b20c]316 /*
[1378b2b]317 * Answer GO call.
[da1bafb]318 *
[9a1b20c]319 */
[a35b458]320
[1378b2b]321 /* Make sure nobody takes this call away from us. */
[da1bafb]322 call = thread->udebug.go_call;
323 thread->udebug.go_call = NULL;
[a35b458]324
[fafb8e5]325 ipc_set_retval(&call->data, 0);
326 ipc_set_arg1(&call->data, UDEBUG_EVENT_STOP);
[a35b458]327
[9a1b20c]328 THREAD->udebug.cur_event = UDEBUG_EVENT_STOP;
[a35b458]329
[da1bafb]330 _thread_op_end(thread);
[a35b458]331
[741fd16]332 mutex_lock(&TASK->udebug.lock);
[9a1b20c]333 ipc_answer(&TASK->answerbox, call);
334 mutex_unlock(&TASK->udebug.lock);
[a35b458]335
[7f11dc6]336 return EOK;
[9a1b20c]337}
338
[7dc62af]339/** Read the list of userspace threads in the current task.
340 *
341 * The list takes the form of a sequence of thread hashes (i.e. the pointers
342 * to thread structures). A buffer of size @a buf_size is allocated and
343 * a pointer to it written to @a buffer. The sequence of hashes is written
344 * into this buffer.
345 *
346 * If the sequence is longer than @a buf_size bytes, only as much hashes
[336db295]347 * as can fit are copied. The number of bytes copied is stored in @a stored.
348 * The total number of thread bytes that could have been saved had there been
349 * enough space is stored in @a needed.
[7dc62af]350 *
351 * The rationale for having @a buf_size is that this function is only
352 * used for servicing the THREAD_READ message, which always specifies
353 * a maximum size for the userspace buffer.
354 *
[da1bafb]355 * @param buffer The buffer for storing thread hashes.
356 * @param buf_size Buffer size in bytes.
357 * @param stored The actual number of bytes copied will be stored here.
358 * @param needed Total number of hashes that could have been saved.
359 *
[7dc62af]360 */
[b7fd2a0]361errno_t udebug_thread_read(void **buffer, size_t buf_size, size_t *stored,
[336db295]362 size_t *needed)
[9a1b20c]363{
[ae5aa90]364 LOG("udebug_thread_read()");
[a35b458]365
[9a1b20c]366 /* Allocate a buffer to hold thread IDs */
[11b285d]367 sysarg_t *id_buffer = malloc(buf_size + 1);
[7473807]368 if (!id_buffer)
369 return ENOMEM;
[a35b458]370
[9a1b20c]371 mutex_lock(&TASK->udebug.lock);
[a35b458]372
[9a1b20c]373 /* Verify task state */
374 if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) {
375 mutex_unlock(&TASK->udebug.lock);
[87a2f9b]376 free(id_buffer);
[9a1b20c]377 return EINVAL;
378 }
[a35b458]379
[da1bafb]380 irq_spinlock_lock(&TASK->lock, true);
[a35b458]381
[9a1b20c]382 /* Copy down the thread IDs */
[a35b458]383
[96b02eb9]384 size_t max_ids = buf_size / sizeof(sysarg_t);
[da1bafb]385 size_t copied_ids = 0;
386 size_t extra_ids = 0;
[a35b458]387
[9a1b20c]388 /* FIXME: make sure the thread isn't past debug shutdown... */
[feeac0d]389 list_foreach(TASK->threads, th_link, thread_t, thread) {
[da1bafb]390 irq_spinlock_lock(&thread->lock, false);
[6eef3c4]391 bool uspace = thread->uspace;
[da1bafb]392 irq_spinlock_unlock(&thread->lock, false);
[a35b458]393
[1378b2b]394 /* Not interested in kernel threads. */
[6eef3c4]395 if (!uspace)
[336db295]396 continue;
[a35b458]397
[336db295]398 if (copied_ids < max_ids) {
[9a1b20c]399 /* Using thread struct pointer as identification hash */
[96b02eb9]400 id_buffer[copied_ids++] = (sysarg_t) thread;
[da1bafb]401 } else
[336db295]402 extra_ids++;
[9a1b20c]403 }
[a35b458]404
[da1bafb]405 irq_spinlock_unlock(&TASK->lock, true);
[a35b458]406
[9a1b20c]407 mutex_unlock(&TASK->udebug.lock);
[a35b458]408
[9a1b20c]409 *buffer = id_buffer;
[96b02eb9]410 *stored = copied_ids * sizeof(sysarg_t);
411 *needed = (copied_ids + extra_ids) * sizeof(sysarg_t);
[a35b458]412
[7f11dc6]413 return EOK;
[9a1b20c]414}
415
[3698e44]416/** Read task name.
417 *
418 * Returns task name as non-terminated string in a newly allocated buffer.
419 * Also returns the size of the data.
420 *
[da1bafb]421 * @param data Place to store pointer to newly allocated block.
422 * @param data_size Place to store size of the data.
423 *
[fa603e99]424 * @return EOK on success, ENOMEM if memory allocation failed.
[3698e44]425 *
426 */
[b7fd2a0]427errno_t udebug_name_read(char **data, size_t *data_size)
[3698e44]428{
[da1bafb]429 size_t name_size = str_size(TASK->name) + 1;
[a35b458]430
[11b285d]431 *data = malloc(name_size);
[7473807]432 if (!*data)
433 return ENOMEM;
[3698e44]434 *data_size = name_size;
[a35b458]435
[3698e44]436 memcpy(*data, TASK->name, name_size);
[a35b458]437
[7f11dc6]438 return EOK;
[3698e44]439}
440
[7dc62af]441/** Read the arguments of a system call.
442 *
443 * The arguments of the system call being being executed are copied
444 * to an allocated buffer and a pointer to it is written to @a buffer.
445 * The size of the buffer is exactly such that it can hold the maximum number
446 * of system-call arguments.
447 *
448 * Unless the thread is currently blocked in a SYSCALL_B or SYSCALL_E event,
449 * this function will fail with an EINVAL error code.
450 *
[da1bafb]451 * @param thread Thread where call arguments are to be read.
452 * @param buffer Place to store pointer to new buffer.
453 *
454 * @return EOK on success, ENOENT if @a t is invalid, EINVAL
455 * if thread state is not valid for this operation.
456 *
[7dc62af]457 */
[b7fd2a0]458errno_t udebug_args_read(thread_t *thread, void **buffer)
[9a1b20c]459{
[1378b2b]460 /* On success, this will lock t->udebug.lock. */
[b7fd2a0]461 errno_t rc = _thread_op_begin(thread, false);
[da1bafb]462 if (rc != EOK)
[9a1b20c]463 return rc;
[a35b458]464
[1378b2b]465 /* Additionally we need to verify that we are inside a syscall. */
[da1bafb]466 if ((thread->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B) &&
467 (thread->udebug.cur_event != UDEBUG_EVENT_SYSCALL_E)) {
468 _thread_op_end(thread);
[9a1b20c]469 return EINVAL;
470 }
[a35b458]471
[87a2f9b]472 /* Prepare a buffer to hold the arguments. */
[11b285d]473 sysarg_t *arg_buffer = malloc(6 * sizeof(sysarg_t));
[7473807]474 if (!arg_buffer) {
475 _thread_op_end(thread);
476 return ENOMEM;
477 }
[a35b458]478
[1378b2b]479 /* Copy to a local buffer before releasing the lock. */
[96b02eb9]480 memcpy(arg_buffer, thread->udebug.syscall_args, 6 * sizeof(sysarg_t));
[a35b458]481
[da1bafb]482 _thread_op_end(thread);
[a35b458]483
[9a1b20c]484 *buffer = arg_buffer;
[7f11dc6]485 return EOK;
[9a1b20c]486}
487
[80487bc5]488/** Read the register state of the thread.
489 *
490 * The contents of the thread's istate structure are copied to a newly
491 * allocated buffer and a pointer to it is written to @a buffer. The size of
492 * the buffer will be sizeof(istate_t).
493 *
494 * Currently register state cannot be read if the thread is inside a system
495 * call (as opposed to an exception). This is an implementation limit.
496 *
[da1bafb]497 * @param thread Thread whose state is to be read.
498 * @param buffer Place to store pointer to new buffer.
499 *
500 * @return EOK on success, ENOENT if @a t is invalid, EINVAL
501 * if thread is not in valid state, EBUSY if istate
502 * is not available.
503 *
[80487bc5]504 */
[b7fd2a0]505errno_t udebug_regs_read(thread_t *thread, void **buffer)
[80487bc5]506{
507 /* On success, this will lock t->udebug.lock */
[b7fd2a0]508 errno_t rc = _thread_op_begin(thread, false);
[da1bafb]509 if (rc != EOK)
[80487bc5]510 return rc;
[a35b458]511
[da1bafb]512 istate_t *state = thread->udebug.uspace_state;
[80487bc5]513 if (state == NULL) {
[da1bafb]514 _thread_op_end(thread);
[80487bc5]515 return EBUSY;
516 }
[a35b458]517
[87a2f9b]518 /* Prepare a buffer to hold the data. */
[11b285d]519 istate_t *state_buf = malloc(sizeof(istate_t));
[7473807]520 if (!state_buf) {
521 _thread_op_end(thread);
522 return ENOMEM;
523 }
[a35b458]524
[80487bc5]525 /* Copy to the allocated buffer */
526 memcpy(state_buf, state, sizeof(istate_t));
[a35b458]527
[da1bafb]528 _thread_op_end(thread);
[a35b458]529
[80487bc5]530 *buffer = (void *) state_buf;
[7f11dc6]531 return EOK;
[80487bc5]532}
533
[7dc62af]534/** Read the memory of the debugged task.
535 *
536 * Reads @a n bytes from the address space of the debugged task, starting
537 * from @a uspace_addr. The bytes are copied into an allocated buffer
538 * and a pointer to it is written into @a buffer.
539 *
[da1bafb]540 * @param uspace_addr Address from where to start reading.
541 * @param n Number of bytes to read.
542 * @param buffer For storing a pointer to the allocated buffer.
543 *
[7dc62af]544 */
[5a5269d]545errno_t udebug_mem_read(uspace_addr_t uspace_addr, size_t n, void **buffer)
[9a1b20c]546{
547 /* Verify task state */
548 mutex_lock(&TASK->udebug.lock);
[a35b458]549
[9a1b20c]550 if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) {
551 mutex_unlock(&TASK->udebug.lock);
552 return EBUSY;
553 }
[a35b458]554
[11b285d]555 void *data_buffer = malloc(n);
[7473807]556 if (!data_buffer) {
557 mutex_unlock(&TASK->udebug.lock);
558 return ENOMEM;
559 }
[a35b458]560
[da1bafb]561 /*
562 * NOTE: this is not strictly from a syscall... but that shouldn't
563 * be a problem
564 *
565 */
[5a5269d]566 errno_t rc = copy_from_uspace(data_buffer, uspace_addr, n);
[9a1b20c]567 mutex_unlock(&TASK->udebug.lock);
[a35b458]568
[a53ed3a]569 if (rc != EOK)
[da1bafb]570 return rc;
[a35b458]571
[9a1b20c]572 *buffer = data_buffer;
[7f11dc6]573 return EOK;
[9a1b20c]574}
575
576/** @}
577 */
Note: See TracBrowser for help on using the repository browser.