Changeset 3b3e776 in mainline for kernel/generic/src
- Timestamp:
- 2010-02-05T10:57:50Z (16 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 0358da0
- Parents:
- 3f085132 (diff), b4cbef1 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/generic/src
- Files:
-
- 1 added
- 18 edited
-
console/kconsole.c (modified) (3 diffs)
-
ddi/irq.c (modified) (2 diffs)
-
debug/stacktrace.c (added)
-
debug/symtab.c (modified) (4 diffs)
-
interrupt/interrupt.c (modified) (2 diffs)
-
ipc/ipc.c (modified) (16 diffs)
-
ipc/irq.c (modified) (1 diff)
-
ipc/sysipc.c (modified) (11 diffs)
-
lib/elf.c (modified) (2 diffs)
-
lib/string.c (modified) (3 diffs)
-
mm/as.c (modified) (1 diff)
-
mm/backend_phys.c (modified) (1 diff)
-
proc/task.c (modified) (7 diffs)
-
proc/thread.c (modified) (3 diffs)
-
synch/futex.c (modified) (8 diffs)
-
syscall/syscall.c (modified) (4 diffs)
-
udebug/udebug.c (modified) (3 diffs)
-
udebug/udebug_ipc.c (modified) (6 diffs)
-
udebug/udebug_ops.c (modified) (10 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/console/kconsole.c
r3f085132 r3b3e776 289 289 290 290 char tmp[STR_BOUNDS(MAX_CMDLINE)]; 291 wstr_ nstr(tmp, current + beg, position - beg + 1);291 wstr_to_str(tmp, position - beg + 1, current + beg); 292 292 293 293 int found; … … 543 543 if (str_lcmp(hlp->name, cmdline + start, 544 544 max(str_length(hlp->name), 545 str_nlength(cmdline + start, (size_t) (end - start) - 1))) == 0) {545 str_nlength(cmdline + start, (size_t) (end - start)))) == 0) { 546 546 cmd = hlp; 547 547 break; … … 665 665 666 666 char cmdline[STR_BOUNDS(MAX_CMDLINE)]; 667 wstr_ nstr(cmdline, tmp, STR_BOUNDS(MAX_CMDLINE));667 wstr_to_str(cmdline, STR_BOUNDS(MAX_CMDLINE), tmp); 668 668 669 669 if ((!kcon) && (len == 4) && (str_lcmp(cmdline, "exit", 4) == 0)) -
kernel/generic/src/ddi/irq.c
r3f085132 r3b3e776 74 74 #include <synch/spinlock.h> 75 75 #include <console/console.h> 76 #include <interrupt.h> 76 77 #include <memstr.h> 77 78 #include <arch.h> … … 169 170 irq->inr = -1; 170 171 irq->devno = -1; 172 173 irq_initialize_arch(irq); 171 174 } 172 175 -
kernel/generic/src/debug/symtab.c
r3f085132 r3b3e776 46 46 /** Get name of a symbol that seems most likely to correspond to address. 47 47 * 48 * @param addr Address. 49 * @param name Place to store pointer to the symbol name. 48 * @param addr Address. 49 * @param name Place to store pointer to the symbol name. 50 * @param offset Place to store offset from the symbol address. 50 51 * 51 52 * @return Zero on success or negative error code, ENOENT if not found, … … 53 54 * 54 55 */ 55 int symtab_name_lookup(u native_t addr, char **name)56 int symtab_name_lookup(uintptr_t addr, char **name, uintptr_t *offset) 56 57 { 57 58 #ifdef CONFIG_SYMTAB … … 65 66 if (addr >= uint64_t_le2host(symbol_table[i - 1].address_le)) { 66 67 *name = symbol_table[i - 1].symbol_name; 68 if (offset) 69 *offset = addr - 70 uint64_t_le2host(symbol_table[i - 1].address_le); 67 71 return EOK; 68 72 } … … 88 92 * 89 93 */ 90 char *symtab_fmt_name_lookup(u native_t addr)94 char *symtab_fmt_name_lookup(uintptr_t addr) 91 95 { 92 96 char *name; 93 int rc = symtab_name_lookup(addr, &name );97 int rc = symtab_name_lookup(addr, &name, NULL); 94 98 95 99 switch (rc) { -
kernel/generic/src/interrupt/interrupt.c
r3f085132 r3b3e776 44 44 #include <console/console.h> 45 45 #include <console/cmd.h> 46 #include <ipc/event.h> 47 #include <synch/mutex.h> 48 #include <time/delay.h> 49 #include <macros.h> 46 50 #include <panic.h> 47 51 #include <print.h> … … 107 111 fault_if_from_uspace(istate, "Unhandled exception %d.", n); 108 112 panic("Unhandled exception %d.", n); 113 } 114 115 /** Terminate thread and task if exception came from userspace. */ 116 void fault_if_from_uspace(istate_t *istate, char *fmt, ...) 117 { 118 task_t *task = TASK; 119 va_list args; 120 121 if (!istate_from_uspace(istate)) 122 return; 123 124 printf("Task %s (%" PRIu64 ") killed due to an exception at " 125 "program counter %p.\n", task->name, task->taskid, 126 istate_get_pc(istate)); 127 128 stack_trace_istate(istate); 129 130 printf("Kill message: "); 131 va_start(args, fmt); 132 vprintf(fmt, args); 133 va_end(args); 134 printf("\n"); 135 136 /* 137 * Userspace can subscribe for FAULT events to take action 138 * whenever a thread faults. (E.g. take a dump, run a debugger). 139 * The notification is always available, but unless Udebug is enabled, 140 * that's all you get. 141 */ 142 if (event_is_subscribed(EVENT_FAULT)) { 143 /* Notify the subscriber that a fault occurred. */ 144 event_notify_3(EVENT_FAULT, LOWER32(TASK->taskid), 145 UPPER32(TASK->taskid), (unative_t) THREAD); 146 147 #ifdef CONFIG_UDEBUG 148 /* Wait for a debugging session. */ 149 udebug_thread_fault(); 150 #endif 151 } 152 153 task_kill(task->taskid); 154 thread_exit(); 109 155 } 110 156 -
kernel/generic/src/ipc/ipc.c
r3f085132 r3b3e776 62 62 63 63 static slab_cache_t *ipc_call_slab; 64 static slab_cache_t *ipc_answerbox_slab; 64 65 65 66 /** Initialize a call structure. … … 96 97 } 97 98 98 /** Initialize a statically allocated call structure.99 *100 * @param call Statically allocated kernel call structure to be101 * initialized.102 */103 void ipc_call_static_init(call_t *call)104 {105 _ipc_call_init(call);106 call->flags |= IPC_CALL_STATIC_ALLOC;107 }108 109 99 /** Deallocate a call structure. 110 100 * … … 113 103 void ipc_call_free(call_t *call) 114 104 { 115 ASSERT(!(call->flags & IPC_CALL_STATIC_ALLOC));116 105 /* Check to see if we have data in the IPC_M_DATA_SEND buffer. */ 117 106 if (call->buffer) … … 130 119 spinlock_initialize(&box->irq_lock, "ipc_box_irqlock"); 131 120 waitq_initialize(&box->wq); 121 link_initialize(&box->sync_box_link); 132 122 list_initialize(&box->connected_phones); 133 123 list_initialize(&box->calls); … … 179 169 int ipc_call_sync(phone_t *phone, call_t *request) 180 170 { 181 answerbox_t sync_box; 182 183 ipc_answerbox_init(&sync_box, TASK); 171 answerbox_t *sync_box; 172 ipl_t ipl; 173 174 sync_box = slab_alloc(ipc_answerbox_slab, 0); 175 ipc_answerbox_init(sync_box, TASK); 176 177 /* 178 * Put the answerbox on the TASK's list of synchronous answerboxes so 179 * that it can be cleaned up if the call is interrupted. 180 */ 181 ipl = interrupts_disable(); 182 spinlock_lock(&TASK->lock); 183 list_append(&sync_box->sync_box_link, &TASK->sync_box_head); 184 spinlock_unlock(&TASK->lock); 185 interrupts_restore(ipl); 184 186 185 187 /* We will receive data in a special box. */ 186 request->callerbox = &sync_box;188 request->callerbox = sync_box; 187 189 188 190 ipc_call(phone, request); 189 if (!ipc_wait_for_call(&sync_box, SYNCH_NO_TIMEOUT, 190 SYNCH_FLAGS_INTERRUPTIBLE)) 191 if (!ipc_wait_for_call(sync_box, SYNCH_NO_TIMEOUT, 192 SYNCH_FLAGS_INTERRUPTIBLE)) { 193 /* The answerbox and the call will be freed by ipc_cleanup(). */ 191 194 return EINTR; 195 } 196 197 /* 198 * The answer arrived without interruption so we can remove the 199 * answerbox from the TASK's list of synchronous answerboxes. 200 */ 201 (void) interrupts_disable(); 202 spinlock_lock(&TASK->lock); 203 list_remove(&sync_box->sync_box_link); 204 spinlock_unlock(&TASK->lock); 205 interrupts_restore(ipl); 206 207 slab_free(ipc_answerbox_slab, sync_box); 192 208 return EOK; 193 209 } … … 196 212 * 197 213 * @param call Call structure to be answered. 198 */ 199 static void _ipc_answer_free_call(call_t *call) 214 * @param selflocked If true, then TASK->answebox is locked. 215 */ 216 static void _ipc_answer_free_call(call_t *call, bool selflocked) 200 217 { 201 218 answerbox_t *callerbox = call->callerbox; 219 bool do_lock = ((!selflocked) || callerbox != (&TASK->answerbox)); 202 220 203 221 call->flags |= IPC_CALL_ANSWERED; … … 210 228 } 211 229 212 spinlock_lock(&callerbox->lock); 230 if (do_lock) 231 spinlock_lock(&callerbox->lock); 213 232 list_append(&call->link, &callerbox->answers); 214 spinlock_unlock(&callerbox->lock); 233 if (do_lock) 234 spinlock_unlock(&callerbox->lock); 215 235 waitq_wakeup(&callerbox->wq, WAKEUP_FIRST); 216 236 } … … 228 248 spinlock_unlock(&box->lock); 229 249 /* Send back answer */ 230 _ipc_answer_free_call(call );250 _ipc_answer_free_call(call, false); 231 251 } 232 252 … … 245 265 atomic_inc(&phone->active_calls); 246 266 IPC_SET_RETVAL(call->data, err); 247 _ipc_answer_free_call(call );267 _ipc_answer_free_call(call, false); 248 268 } 249 269 … … 284 304 if (call->flags & IPC_CALL_FORWARDED) { 285 305 IPC_SET_RETVAL(call->data, EFORWARD); 286 _ipc_answer_free_call(call );306 _ipc_answer_free_call(call, false); 287 307 } else { 288 308 if (phone->state == IPC_PHONE_HUNGUP) … … 439 459 440 460 IPC_SET_RETVAL(call->data, EHANGUP); 441 _ipc_answer_free_call(call );461 _ipc_answer_free_call(call, true); 442 462 } 443 463 } … … 520 540 int i; 521 541 call_t *call; 542 ipl_t ipl; 522 543 523 544 /* Disconnect all our phones ('ipc_phone_hangup') */ … … 545 566 spinlock_unlock(&TASK->answerbox.lock); 546 567 547 /* Wait for all async answers to arrive */ 568 /* Wait for all answers to interrupted synchronous calls to arrive */ 569 ipl = interrupts_disable(); 570 while (!list_empty(&TASK->sync_box_head)) { 571 answerbox_t *box = list_get_instance(TASK->sync_box_head.next, 572 answerbox_t, sync_box_link); 573 574 list_remove(&box->sync_box_link); 575 call = ipc_wait_for_call(box, SYNCH_NO_TIMEOUT, 576 SYNCH_FLAGS_NONE); 577 ipc_call_free(call); 578 slab_free(ipc_answerbox_slab, box); 579 } 580 interrupts_restore(ipl); 581 582 /* Wait for all answers to asynchronous calls to arrive */ 548 583 while (1) { 549 584 /* Go through all phones, until all are FREE... */ … … 552 587 for (i = 0; i < IPC_MAX_PHONES; i++) { 553 588 if (TASK->phones[i].state == IPC_PHONE_HUNGUP && 554 atomic_get(&TASK->phones[i].active_calls) == 0) 589 atomic_get(&TASK->phones[i].active_calls) == 0) { 555 590 TASK->phones[i].state = IPC_PHONE_FREE; 591 TASK->phones[i].callee = NULL; 592 } 556 593 557 594 /* Just for sure, we might have had some … … 574 611 ASSERT((call->flags & IPC_CALL_ANSWERED) || 575 612 (call->flags & IPC_CALL_NOTIF)); 576 ASSERT(!(call->flags & IPC_CALL_STATIC_ALLOC));577 613 578 614 /* … … 593 629 ipc_call_slab = slab_cache_create("ipc_call", sizeof(call_t), 0, NULL, 594 630 NULL, 0); 631 ipc_answerbox_slab = slab_cache_create("ipc_answerbox", 632 sizeof(answerbox_t), 0, NULL, NULL, 0); 595 633 } 596 634 -
kernel/generic/src/ipc/irq.c
r3f085132 r3b3e776 423 423 case CMD_ACCEPT: 424 424 return IRQ_ACCEPT; 425 break;426 425 case CMD_DECLINE: 427 426 default: -
kernel/generic/src/ipc/sysipc.c
r3f085132 r3b3e776 61 61 { \ 62 62 if (phoneid > IPC_MAX_PHONES) { \ 63 err ;\63 err \ 64 64 } \ 65 65 phone = &TASK->phones[phoneid]; \ … … 122 122 case IPC_M_DATA_READ: 123 123 return 1; 124 break;125 124 default: 126 125 return 0; … … 376 375 phone_t *cloned_phone; 377 376 GET_CHECK_PHONE(cloned_phone, IPC_GET_ARG1(call->data), 378 return ENOENT );377 return ENOENT;); 379 378 phones_lock(cloned_phone, phone); 380 379 if ((cloned_phone->state != IPC_PHONE_CONNECTED) || … … 531 530 unative_t arg1, unative_t arg2, unative_t arg3, ipc_data_t *data) 532 531 { 533 call_t call;532 call_t *call; 534 533 phone_t *phone; 535 534 int res; 536 535 int rc; 537 536 538 GET_CHECK_PHONE(phone, phoneid, return ENOENT );539 540 ipc_call_static_init(&call);541 IPC_SET_METHOD(call .data, method);542 IPC_SET_ARG1(call .data, arg1);543 IPC_SET_ARG2(call .data, arg2);544 IPC_SET_ARG3(call .data, arg3);537 GET_CHECK_PHONE(phone, phoneid, return ENOENT;); 538 539 call = ipc_call_alloc(0); 540 IPC_SET_METHOD(call->data, method); 541 IPC_SET_ARG1(call->data, arg1); 542 IPC_SET_ARG2(call->data, arg2); 543 IPC_SET_ARG3(call->data, arg3); 545 544 /* 546 545 * To achieve deterministic behavior, zero out arguments that are beyond 547 546 * the limits of the fast version. 548 547 */ 549 IPC_SET_ARG4(call .data, 0);550 IPC_SET_ARG5(call .data, 0);551 552 if (!(res = request_preprocess( &call, phone))) {548 IPC_SET_ARG4(call->data, 0); 549 IPC_SET_ARG5(call->data, 0); 550 551 if (!(res = request_preprocess(call, phone))) { 553 552 #ifdef CONFIG_UDEBUG 554 553 udebug_stoppable_begin(); 555 554 #endif 556 rc = ipc_call_sync(phone, &call);555 rc = ipc_call_sync(phone, call); 557 556 #ifdef CONFIG_UDEBUG 558 557 udebug_stoppable_end(); 559 558 #endif 560 if (rc != EOK) 559 if (rc != EOK) { 560 /* The call will be freed by ipc_cleanup(). */ 561 561 return rc; 562 process_answer(&call); 562 } 563 process_answer(call); 563 564 564 565 } else { 565 IPC_SET_RETVAL(call.data, res); 566 } 567 rc = STRUCT_TO_USPACE(&data->args, &call.data.args); 566 IPC_SET_RETVAL(call->data, res); 567 } 568 rc = STRUCT_TO_USPACE(&data->args, &call->data.args); 569 ipc_call_free(call); 568 570 if (rc != 0) 569 571 return rc; … … 584 586 ipc_data_t *reply) 585 587 { 586 call_t call;588 call_t *call; 587 589 phone_t *phone; 588 590 int res; 589 591 int rc; 590 592 591 ipc_call_static_init(&call); 592 rc = copy_from_uspace(&call.data.args, &question->args, 593 sizeof(call.data.args)); 594 if (rc != 0) 593 GET_CHECK_PHONE(phone, phoneid, return ENOENT;); 594 595 call = ipc_call_alloc(0); 596 rc = copy_from_uspace(&call->data.args, &question->args, 597 sizeof(call->data.args)); 598 if (rc != 0) { 599 ipc_call_free(call); 595 600 return (unative_t) rc; 596 597 GET_CHECK_PHONE(phone, phoneid, return ENOENT); 598 599 if (!(res = request_preprocess( &call, phone))) {601 } 602 603 604 if (!(res = request_preprocess(call, phone))) { 600 605 #ifdef CONFIG_UDEBUG 601 606 udebug_stoppable_begin(); 602 607 #endif 603 rc = ipc_call_sync(phone, &call);608 rc = ipc_call_sync(phone, call); 604 609 #ifdef CONFIG_UDEBUG 605 610 udebug_stoppable_end(); 606 611 #endif 607 if (rc != EOK) 612 if (rc != EOK) { 613 /* The call will be freed by ipc_cleanup(). */ 608 614 return rc; 609 process_answer(&call); 615 } 616 process_answer(call); 610 617 } else 611 IPC_SET_RETVAL(call.data, res); 612 613 rc = STRUCT_TO_USPACE(&reply->args, &call.data.args); 618 IPC_SET_RETVAL(call->data, res); 619 620 rc = STRUCT_TO_USPACE(&reply->args, &call->data.args); 621 ipc_call_free(call); 614 622 if (rc != 0) 615 623 return rc; … … 658 666 return IPC_CALLRET_TEMPORARY; 659 667 660 GET_CHECK_PHONE(phone, phoneid, return IPC_CALLRET_FATAL );668 GET_CHECK_PHONE(phone, phoneid, return IPC_CALLRET_FATAL;); 661 669 662 670 call = ipc_call_alloc(0); … … 697 705 return IPC_CALLRET_TEMPORARY; 698 706 699 GET_CHECK_PHONE(phone, phoneid, return IPC_CALLRET_FATAL );707 GET_CHECK_PHONE(phone, phoneid, return IPC_CALLRET_FATAL;); 700 708 701 709 call = ipc_call_alloc(0); … … 747 755 call->flags |= IPC_CALL_FORWARDED; 748 756 749 GET_CHECK_PHONE(phone, phoneid, { 757 GET_CHECK_PHONE(phone, phoneid, { 750 758 IPC_SET_RETVAL(call->data, EFORWARD); 751 759 ipc_answer(&TASK->answerbox, call); … … 952 960 phone_t *phone; 953 961 954 GET_CHECK_PHONE(phone, phoneid, return ENOENT );962 GET_CHECK_PHONE(phone, phoneid, return ENOENT;); 955 963 956 964 if (ipc_phone_hangup(phone)) … … 991 999 992 1000 if (call->flags & IPC_CALL_NOTIF) { 993 ASSERT(! (call->flags & IPC_CALL_STATIC_ALLOC));994 995 1001 /* Set in_phone_hash to the interrupt counter */ 996 1002 call->data.phone = (void *) call->priv; … … 1005 1011 if (call->flags & IPC_CALL_ANSWERED) { 1006 1012 process_answer(call); 1007 1008 ASSERT(! (call->flags & IPC_CALL_STATIC_ALLOC));1009 1013 1010 1014 if (call->flags & IPC_CALL_DISCARD_ANSWER) { -
kernel/generic/src/lib/elf.c
r3f085132 r3b3e776 163 163 case PT_LOAD: 164 164 return load_segment(entry, elf, as); 165 break;166 165 case PT_DYNAMIC: 167 166 case PT_INTERP: … … 182 181 default: 183 182 return EE_UNSUPPORTED; 184 break;185 183 } 186 184 return EE_OK; -
kernel/generic/src/lib/string.c
r3f085132 r3b3e776 537 537 * null-terminated and containing only complete characters. 538 538 * 539 * @param d st Destination buffer.539 * @param dest Destination buffer. 540 540 * @param count Size of the destination buffer (must be > 0). 541 541 * @param src Source string. … … 571 571 * have to be null-terminated. 572 572 * 573 * @param d st Destination buffer.573 * @param dest Destination buffer. 574 574 * @param count Size of the destination buffer (must be > 0). 575 575 * @param src Source string. … … 596 596 } 597 597 598 /** Copy NULL-terminated wide string to string 599 * 600 * Copy source wide string @a src to destination buffer @a dst. 601 * No more than @a size bytes are written. NULL-terminator is always 602 * written after the last succesfully copied character (i.e. if the 603 * destination buffer is has at least 1 byte, it will be always 604 * NULL-terminated). 605 * 606 * @param src Source wide string. 607 * @param dst Destination buffer. 608 * @param count Size of the destination buffer. 609 * 610 */ 611 void wstr_nstr(char *dst, const wchar_t *src, size_t size) 612 { 613 /* No space for the NULL-terminator in the buffer */ 614 if (size == 0) 615 return; 616 598 /** Convert wide string to string. 599 * 600 * Convert wide string @a src to string. The output is written to the buffer 601 * specified by @a dest and @a size. @a size must be non-zero and the string 602 * written will always be well-formed. 603 * 604 * @param dest Destination buffer. 605 * @param size Size of the destination buffer. 606 * @param src Source wide string. 607 */ 608 void wstr_to_str(char *dest, size_t size, const wchar_t *src) 609 { 617 610 wchar_t ch; 618 size_t src_idx = 0; 619 size_t dst_off = 0; 611 size_t src_idx; 612 size_t dest_off; 613 614 /* There must be space for a null terminator in the buffer. */ 615 ASSERT(size > 0); 616 617 src_idx = 0; 618 dest_off = 0; 620 619 621 620 while ((ch = src[src_idx++]) != 0) { 622 if (chr_encode(ch, d st, &dst_off, size) != EOK)621 if (chr_encode(ch, dest, &dest_off, size - 1) != EOK) 623 622 break; 624 623 } 625 626 if (dst_off >= size) 627 dst[size - 1] = 0; 628 else 629 dst[dst_off] = 0; 624 625 dest[dest_off] = '\0'; 630 626 } 631 627 -
kernel/generic/src/mm/as.c
r3f085132 r3b3e776 1920 1920 } 1921 1921 1922 /** Get list of adress space areas. 1923 * 1924 * @param as Address space. 1925 * @param obuf Place to save pointer to returned buffer. 1926 * @param osize Place to save size of returned buffer. 1927 */ 1928 void as_get_area_info(as_t *as, as_area_info_t **obuf, size_t *osize) 1929 { 1930 ipl_t ipl; 1931 size_t area_cnt, area_idx, i; 1932 link_t *cur; 1933 1934 as_area_info_t *info; 1935 size_t isize; 1936 1937 ipl = interrupts_disable(); 1938 mutex_lock(&as->lock); 1939 1940 /* First pass, count number of areas. */ 1941 1942 area_cnt = 0; 1943 1944 for (cur = as->as_area_btree.leaf_head.next; 1945 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 1946 btree_node_t *node; 1947 1948 node = list_get_instance(cur, btree_node_t, leaf_link); 1949 area_cnt += node->keys; 1950 } 1951 1952 isize = area_cnt * sizeof(as_area_info_t); 1953 info = malloc(isize, 0); 1954 1955 /* Second pass, record data. */ 1956 1957 area_idx = 0; 1958 1959 for (cur = as->as_area_btree.leaf_head.next; 1960 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 1961 btree_node_t *node; 1962 1963 node = list_get_instance(cur, btree_node_t, leaf_link); 1964 1965 for (i = 0; i < node->keys; i++) { 1966 as_area_t *area = node->value[i]; 1967 1968 ASSERT(area_idx < area_cnt); 1969 mutex_lock(&area->lock); 1970 1971 info[area_idx].start_addr = area->base; 1972 info[area_idx].size = FRAMES2SIZE(area->pages); 1973 info[area_idx].flags = area->flags; 1974 ++area_idx; 1975 1976 mutex_unlock(&area->lock); 1977 } 1978 } 1979 1980 mutex_unlock(&as->lock); 1981 interrupts_restore(ipl); 1982 1983 *obuf = info; 1984 *osize = isize; 1985 } 1986 1987 1922 1988 /** Print out information about address space. 1923 1989 * -
kernel/generic/src/mm/backend_phys.c
r3f085132 r3b3e776 40 40 #include <arch/types.h> 41 41 #include <mm/as.h> 42 #include <mm/page.h> 42 43 #include <mm/frame.h> 43 44 #include <mm/slab.h> -
kernel/generic/src/proc/task.c
r3f085132 r3b3e776 54 54 #include <func.h> 55 55 #include <string.h> 56 #include <memstr.h> 56 57 #include <syscall/copy.h> 57 58 #include <macros.h> … … 75 76 static task_id_t task_counter = 0; 76 77 78 static slab_cache_t *task_slab; 79 77 80 /* Forward declarations. */ 78 81 static void task_kill_internal(task_t *); 82 static int tsk_constructor(void *, int); 79 83 80 84 /** Initialize kernel tasks support. */ … … 83 87 TASK = NULL; 84 88 avltree_create(&tasks_tree); 89 task_slab = slab_cache_create("task_slab", sizeof(task_t), 0, 90 tsk_constructor, NULL, 0); 85 91 } 86 92 … … 128 134 } 129 135 136 int tsk_constructor(void *obj, int kmflags) 137 { 138 task_t *ta = obj; 139 int i; 140 141 atomic_set(&ta->refcount, 0); 142 atomic_set(&ta->lifecount, 0); 143 atomic_set(&ta->active_calls, 0); 144 145 spinlock_initialize(&ta->lock, "task_ta_lock"); 146 mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE); 147 148 list_initialize(&ta->th_head); 149 list_initialize(&ta->sync_box_head); 150 151 ipc_answerbox_init(&ta->answerbox, ta); 152 for (i = 0; i < IPC_MAX_PHONES; i++) 153 ipc_phone_init(&ta->phones[i]); 154 155 #ifdef CONFIG_UDEBUG 156 /* Init kbox stuff */ 157 ta->kb.thread = NULL; 158 ipc_answerbox_init(&ta->kb.box, ta); 159 mutex_initialize(&ta->kb.cleanup_lock, MUTEX_PASSIVE); 160 #endif 161 162 return 0; 163 } 164 130 165 /** Create new task with no threads. 131 166 * … … 140 175 ipl_t ipl; 141 176 task_t *ta; 142 int i; 143 144 ta = (task_t *) malloc(sizeof(task_t), 0); 145 177 178 ta = (task_t *) slab_alloc(task_slab, 0); 146 179 task_create_arch(ta); 147 148 spinlock_initialize(&ta->lock, "task_ta_lock");149 list_initialize(&ta->th_head);150 180 ta->as = as; 151 152 181 memcpy(ta->name, name, TASK_NAME_BUFLEN); 153 182 ta->name[TASK_NAME_BUFLEN - 1] = 0; 154 183 155 atomic_set(&ta->refcount, 0);156 atomic_set(&ta->lifecount, 0);157 184 ta->context = CONTEXT; 158 159 185 ta->capabilities = 0; 160 186 ta->cycles = 0; … … 165 191 166 192 /* Init kbox stuff */ 167 ipc_answerbox_init(&ta->kb.box, ta);168 ta->kb.thread = NULL;169 mutex_initialize(&ta->kb.cleanup_lock, MUTEX_PASSIVE);170 193 ta->kb.finished = false; 171 194 #endif 172 195 173 ipc_answerbox_init(&ta->answerbox, ta); 174 for (i = 0; i < IPC_MAX_PHONES; i++) 175 ipc_phone_init(&ta->phones[i]); 176 if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context, 177 ta->context))) 196 if ((ipc_phone_0) && 197 (context_check(ipc_phone_0->task->context, ta->context))) 178 198 ipc_phone_connect(&ta->phones[0], ipc_phone_0); 179 atomic_set(&ta->active_calls, 0); 180 181 mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE); 199 182 200 btree_create(&ta->futexes); 183 201 184 202 ipl = interrupts_disable(); 185 186 /*187 * Increment address space reference count.188 */189 203 atomic_inc(&as->refcount); 190 191 204 spinlock_lock(&tasks_lock); 192 205 ta->taskid = ++task_counter; … … 229 242 as_destroy(t->as); 230 243 231 free(t);244 slab_free(task_slab, t); 232 245 TASK = NULL; 233 246 } -
kernel/generic/src/proc/thread.c
r3f085132 r3b3e776 501 501 void thread_sleep(uint32_t sec) 502 502 { 503 thread_usleep(sec * 1000000); 503 /* Sleep in 1000 second steps to support 504 full argument range */ 505 while (sec > 0) { 506 uint32_t period = (sec > 1000) ? 1000 : sec; 507 508 thread_usleep(period * 1000000); 509 sec -= period; 510 } 504 511 } 505 512 … … 575 582 { 576 583 waitq_t wq; 577 584 578 585 waitq_initialize(&wq); 579 586 580 587 (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING); 581 588 } … … 812 819 } 813 820 821 /** Syscall wrapper for sleeping. */ 822 unative_t sys_thread_usleep(uint32_t usec) 823 { 824 thread_usleep(usec); 825 return 0; 826 } 827 814 828 /** @} 815 829 */ -
kernel/generic/src/synch/futex.c
r3f085132 r3b3e776 90 90 /** Initialize kernel futex structure. 91 91 * 92 * @param futex Kernel futex structure.92 * @param futex Kernel futex structure. 93 93 */ 94 94 void futex_initialize(futex_t *futex) … … 102 102 /** Sleep in futex wait queue. 103 103 * 104 * @param uaddr Userspace address of the futex counter. 105 * @param usec If non-zero, number of microseconds this thread is willing to 106 * sleep. 107 * @param flags Select mode of operation. 108 * 109 * @return One of ESYNCH_TIMEOUT, ESYNCH_OK_ATOMIC and ESYNCH_OK_BLOCKED. See 110 * synch.h. If there is no physical mapping for uaddr ENOENT is returned. 111 */ 112 unative_t sys_futex_sleep_timeout(uintptr_t uaddr, uint32_t usec, int flags) 104 * @param uaddr Userspace address of the futex counter. 105 * 106 * @return If there is no physical mapping for uaddr ENOENT is 107 * returned. Otherwise returns a wait result as defined in 108 * synch.h. 109 */ 110 unative_t sys_futex_sleep(uintptr_t uaddr) 113 111 { 114 112 futex_t *futex; … … 140 138 udebug_stoppable_begin(); 141 139 #endif 142 rc = waitq_sleep_timeout(&futex->wq, usec, flags | 143 SYNCH_FLAGS_INTERRUPTIBLE); 144 140 rc = waitq_sleep_timeout(&futex->wq, 0, SYNCH_FLAGS_INTERRUPTIBLE); 145 141 #ifdef CONFIG_UDEBUG 146 142 udebug_stoppable_end(); … … 151 147 /** Wakeup one thread waiting in futex wait queue. 152 148 * 153 * @param uaddr Userspace address of the futex counter.154 * 155 * @return ENOENT if there is no physical mapping for uaddr.149 * @param uaddr Userspace address of the futex counter. 150 * 151 * @return ENOENT if there is no physical mapping for uaddr. 156 152 */ 157 153 unative_t sys_futex_wakeup(uintptr_t uaddr) … … 190 186 * If the structure does not exist already, a new one is created. 191 187 * 192 * @param paddr Physical address of the userspace futex counter.193 * 194 * @return Address of the kernel futex structure.188 * @param paddr Physical address of the userspace futex counter. 189 * 190 * @return Address of the kernel futex structure. 195 191 */ 196 192 futex_t *futex_find(uintptr_t paddr) … … 284 280 /** Compute hash index into futex hash table. 285 281 * 286 * @param key Address where the key (i.e. physical address of futex counter) is287 * stored.288 * 289 * @return Index into futex hash table.282 * @param key Address where the key (i.e. physical address of futex 283 * counter) is stored. 284 * 285 * @return Index into futex hash table. 290 286 */ 291 287 size_t futex_ht_hash(unative_t *key) … … 296 292 /** Compare futex hash table item with a key. 297 293 * 298 * @param key Address where the key (i.e. physical address of futex counter) is299 * stored.300 * 301 * @return True if the item matches the key. False otherwise.294 * @param key Address where the key (i.e. physical address of futex 295 * counter) is stored. 296 * 297 * @return True if the item matches the key. False otherwise. 302 298 */ 303 299 bool futex_ht_compare(unative_t *key, size_t keys, link_t *item) … … 313 309 /** Callback for removal items from futex hash table. 314 310 * 315 * @param item Item removed from the hash table.311 * @param item Item removed from the hash table. 316 312 */ 317 313 void futex_ht_remove_callback(link_t *item) -
kernel/generic/src/syscall/syscall.c
r3f085132 r3b3e776 62 62 63 63 #ifdef CONFIG_UDEBUG 64 bool debug;65 66 64 /* 67 65 * Early check for undebugged tasks. We do not lock anything as this 68 * test need not be precise in either way.66 * test need not be precise in either direction. 69 67 */ 70 debug = THREAD->udebug.active; 71 72 if (debug) { 68 if (THREAD->udebug.active) { 73 69 udebug_syscall_event(a1, a2, a3, a4, a5, a6, id, 0, false); 74 70 } … … 87 83 88 84 #ifdef CONFIG_UDEBUG 89 if ( debug) {85 if (THREAD->udebug.active) { 90 86 udebug_syscall_event(a1, a2, a3, a4, a5, a6, id, rc, true); 91 87 … … 111 107 (syshandler_t) sys_thread_exit, 112 108 (syshandler_t) sys_thread_get_id, 109 (syshandler_t) sys_thread_usleep, 113 110 114 111 (syshandler_t) sys_task_get_id, … … 117 114 118 115 /* Synchronization related syscalls. */ 119 (syshandler_t) sys_futex_sleep _timeout,116 (syshandler_t) sys_futex_sleep, 120 117 (syshandler_t) sys_futex_wakeup, 121 118 (syshandler_t) sys_smc_coherence, -
kernel/generic/src/udebug/udebug.c
r3f085132 r3b3e776 69 69 mutex_initialize(&ut->lock, MUTEX_PASSIVE); 70 70 waitq_initialize(&ut->go_wq); 71 condvar_initialize(&ut->active_cv); 71 72 72 73 ut->go_call = NULL; … … 446 447 waitq_wakeup(&t->udebug.go_wq, WAKEUP_FIRST); 447 448 } 449 mutex_unlock(&t->udebug.lock); 450 condvar_broadcast(&t->udebug.active_cv); 451 } else { 452 mutex_unlock(&t->udebug.lock); 448 453 } 449 mutex_unlock(&t->udebug.lock);450 454 } 451 455 … … 456 460 } 457 461 462 /** Wait for debugger to handle a fault in this thread. 463 * 464 * When a thread faults and someone is subscribed to the FAULT kernel event, 465 * this function is called to wait for a debugging session to give userspace 466 * a chance to examine the faulting thead/task. When the debugging session 467 * is over, this function returns (so that thread/task cleanup can continue). 468 */ 469 void udebug_thread_fault(void) 470 { 471 udebug_stoppable_begin(); 472 473 /* Wait until a debugger attends to us. */ 474 mutex_lock(&THREAD->udebug.lock); 475 while (!THREAD->udebug.active) 476 condvar_wait(&THREAD->udebug.active_cv, &THREAD->udebug.lock); 477 mutex_unlock(&THREAD->udebug.lock); 478 479 /* Make sure the debugging session is over before proceeding. */ 480 mutex_lock(&THREAD->udebug.lock); 481 while (THREAD->udebug.active) 482 condvar_wait(&THREAD->udebug.active_cv, &THREAD->udebug.lock); 483 mutex_unlock(&THREAD->udebug.lock); 484 485 udebug_stoppable_end(); 486 } 458 487 459 488 /** @} -
kernel/generic/src/udebug/udebug_ipc.c
r3f085132 r3b3e776 41 41 #include <proc/task.h> 42 42 #include <proc/thread.h> 43 #include <mm/as.h> 43 44 #include <arch.h> 44 45 #include <errno.h> … … 165 166 static void udebug_receive_thread_read(call_t *call) 166 167 { 168 uintptr_t uspace_addr; 169 size_t buf_size; 170 void *buffer; 171 size_t copied, needed; 172 int rc; 173 174 uspace_addr = IPC_GET_ARG2(call->data); /* Destination address */ 175 buf_size = IPC_GET_ARG3(call->data); /* Dest. buffer size */ 176 177 /* 178 * Read thread list. Variable n will be filled with actual number 179 * of threads times thread-id size. 180 */ 181 rc = udebug_thread_read(&buffer, buf_size, &copied, &needed); 182 if (rc < 0) { 183 IPC_SET_RETVAL(call->data, rc); 184 ipc_answer(&TASK->kb.box, call); 185 return; 186 } 187 188 /* 189 * Make use of call->buffer to transfer data to caller's userspace 190 */ 191 192 IPC_SET_RETVAL(call->data, 0); 193 /* ARG1=dest, ARG2=size as in IPC_M_DATA_READ so that 194 same code in process_answer() can be used 195 (no way to distinguish method in answer) */ 196 IPC_SET_ARG1(call->data, uspace_addr); 197 IPC_SET_ARG2(call->data, copied); 198 IPC_SET_ARG3(call->data, needed); 199 call->buffer = buffer; 200 201 ipc_answer(&TASK->kb.box, call); 202 } 203 204 /** Process a NAME_READ call. 205 * 206 * Returns a string containing the name of the task. 207 * 208 * @param call The call structure. 209 */ 210 static void udebug_receive_name_read(call_t *call) 211 { 167 212 unative_t uspace_addr; 168 213 unative_t to_copy; 169 unsigned total_bytes; 170 unsigned buf_size; 171 void *buffer; 172 size_t n; 173 int rc; 214 size_t data_size; 215 size_t buf_size; 216 void *data; 174 217 175 218 uspace_addr = IPC_GET_ARG2(call->data); /* Destination address */ … … 177 220 178 221 /* 179 * Read thread list. Variable n will be filled with actual number 180 * of threads times thread-id size. 181 */ 182 rc = udebug_thread_read(&buffer, buf_size, &n); 183 if (rc < 0) { 184 IPC_SET_RETVAL(call->data, rc); 185 ipc_answer(&TASK->kb.box, call); 186 return; 187 } 188 189 total_bytes = n; 190 191 /* Copy MAX(buf_size, total_bytes) bytes */ 192 193 if (buf_size > total_bytes) 194 to_copy = total_bytes; 222 * Read task name. 223 */ 224 udebug_name_read((char **) &data, &data_size); 225 226 /* Copy MAX(buf_size, data_size) bytes */ 227 228 if (buf_size > data_size) 229 to_copy = data_size; 195 230 else 196 231 to_copy = buf_size; … … 207 242 IPC_SET_ARG2(call->data, to_copy); 208 243 209 IPC_SET_ARG3(call->data, total_bytes); 210 call->buffer = buffer; 211 212 ipc_answer(&TASK->kb.box, call); 213 } 244 IPC_SET_ARG3(call->data, data_size); 245 call->buffer = data; 246 247 ipc_answer(&TASK->kb.box, call); 248 } 249 250 /** Process an AREAS_READ call. 251 * 252 * Returns a list of address space areas in the current task, as an array 253 * of as_area_info_t structures. 254 * 255 * @param call The call structure. 256 */ 257 static void udebug_receive_areas_read(call_t *call) 258 { 259 unative_t uspace_addr; 260 unative_t to_copy; 261 size_t data_size; 262 size_t buf_size; 263 void *data; 264 265 uspace_addr = IPC_GET_ARG2(call->data); /* Destination address */ 266 buf_size = IPC_GET_ARG3(call->data); /* Dest. buffer size */ 267 268 /* 269 * Read area list. 270 */ 271 as_get_area_info(AS, (as_area_info_t **) &data, &data_size); 272 273 /* Copy MAX(buf_size, data_size) bytes */ 274 275 if (buf_size > data_size) 276 to_copy = data_size; 277 else 278 to_copy = buf_size; 279 280 /* 281 * Make use of call->buffer to transfer data to caller's userspace 282 */ 283 284 IPC_SET_RETVAL(call->data, 0); 285 /* ARG1=dest, ARG2=size as in IPC_M_DATA_READ so that 286 same code in process_answer() can be used 287 (no way to distinguish method in answer) */ 288 IPC_SET_ARG1(call->data, uspace_addr); 289 IPC_SET_ARG2(call->data, to_copy); 290 291 IPC_SET_ARG3(call->data, data_size); 292 call->buffer = data; 293 294 ipc_answer(&TASK->kb.box, call); 295 } 296 214 297 215 298 /** Process an ARGS_READ call. … … 250 333 ipc_answer(&TASK->kb.box, call); 251 334 } 335 336 /** Receive a REGS_READ call. 337 * 338 * Reads the thread's register state (istate structure). 339 */ 340 static void udebug_receive_regs_read(call_t *call) 341 { 342 thread_t *t; 343 unative_t uspace_addr; 344 unative_t to_copy; 345 void *buffer; 346 int rc; 347 348 t = (thread_t *) IPC_GET_ARG2(call->data); 349 350 rc = udebug_regs_read(t, &buffer); 351 if (rc < 0) { 352 IPC_SET_RETVAL(call->data, rc); 353 ipc_answer(&TASK->kb.box, call); 354 return; 355 } 356 357 /* 358 * Make use of call->buffer to transfer data to caller's userspace 359 */ 360 361 uspace_addr = IPC_GET_ARG3(call->data); 362 to_copy = sizeof(istate_t); 363 364 IPC_SET_RETVAL(call->data, 0); 365 /* ARG1=dest, ARG2=size as in IPC_M_DATA_READ so that 366 same code in process_answer() can be used 367 (no way to distinguish method in answer) */ 368 IPC_SET_ARG1(call->data, uspace_addr); 369 IPC_SET_ARG2(call->data, to_copy); 370 371 call->buffer = buffer; 372 373 ipc_answer(&TASK->kb.box, call); 374 } 375 252 376 253 377 /** Process an MEM_READ call. … … 331 455 udebug_receive_thread_read(call); 332 456 break; 457 case UDEBUG_M_NAME_READ: 458 udebug_receive_name_read(call); 459 break; 460 case UDEBUG_M_AREAS_READ: 461 udebug_receive_areas_read(call); 462 break; 333 463 case UDEBUG_M_ARGS_READ: 334 464 udebug_receive_args_read(call); 335 465 break; 466 case UDEBUG_M_REGS_READ: 467 udebug_receive_regs_read(call); 468 break; 336 469 case UDEBUG_M_MEM_READ: 337 470 udebug_receive_mem_read(call); -
kernel/generic/src/udebug/udebug_ops.c
r3f085132 r3b3e776 46 46 #include <errno.h> 47 47 #include <print.h> 48 #include <string.h> 48 49 #include <syscall/copy.h> 49 50 #include <ipc/ipc.h> 50 51 #include <udebug/udebug.h> 51 52 #include <udebug/udebug_ops.h> 53 #include <memstr.h> 52 54 53 55 /** … … 208 210 209 211 mutex_lock(&t->udebug.lock); 210 if ((t->flags & THREAD_FLAG_USPACE) != 0) 212 if ((t->flags & THREAD_FLAG_USPACE) != 0) { 211 213 t->udebug.active = true; 212 mutex_unlock(&t->udebug.lock); 214 mutex_unlock(&t->udebug.lock); 215 condvar_broadcast(&t->udebug.active_cv); 216 } else { 217 mutex_unlock(&t->udebug.lock); 218 } 213 219 } 214 220 … … 354 360 * 355 361 * If the sequence is longer than @a buf_size bytes, only as much hashes 356 * as can fit are copied. The number of thread hashes copied is stored 357 * in @a n. 362 * as can fit are copied. The number of bytes copied is stored in @a stored. 363 * The total number of thread bytes that could have been saved had there been 364 * enough space is stored in @a needed. 358 365 * 359 366 * The rationale for having @a buf_size is that this function is only … … 363 370 * @param buffer The buffer for storing thread hashes. 364 371 * @param buf_size Buffer size in bytes. 365 * @param n The actual number of hashes copied will be stored here. 366 */ 367 int udebug_thread_read(void **buffer, size_t buf_size, size_t *n) 372 * @param stored The actual number of bytes copied will be stored here. 373 * @param needed Total number of hashes that could have been saved. 374 */ 375 int udebug_thread_read(void **buffer, size_t buf_size, size_t *stored, 376 size_t *needed) 368 377 { 369 378 thread_t *t; 370 379 link_t *cur; 371 380 unative_t tid; 372 unsigned copied_ids; 381 size_t copied_ids; 382 size_t extra_ids; 373 383 ipl_t ipl; 374 384 unative_t *id_buffer; … … 379 389 380 390 /* Allocate a buffer to hold thread IDs */ 381 id_buffer = malloc(buf_size , 0);391 id_buffer = malloc(buf_size + 1, 0); 382 392 383 393 mutex_lock(&TASK->udebug.lock); … … 395 405 max_ids = buf_size / sizeof(unative_t); 396 406 copied_ids = 0; 407 extra_ids = 0; 397 408 398 409 /* FIXME: make sure the thread isn't past debug shutdown... */ 399 410 for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { 400 /* Do not write past end of buffer */401 if (copied_ids >= max_ids) break;402 403 411 t = list_get_instance(cur, thread_t, th_link); 404 412 … … 408 416 409 417 /* Not interested in kernel threads. */ 410 if ((flags & THREAD_FLAG_USPACE) != 0) { 418 if ((flags & THREAD_FLAG_USPACE) == 0) 419 continue; 420 421 if (copied_ids < max_ids) { 411 422 /* Using thread struct pointer as identification hash */ 412 423 tid = (unative_t) t; 413 424 id_buffer[copied_ids++] = tid; 425 } else { 426 extra_ids++; 414 427 } 415 428 } … … 421 434 422 435 *buffer = id_buffer; 423 *n = copied_ids * sizeof(unative_t); 436 *stored = copied_ids * sizeof(unative_t); 437 *needed = (copied_ids + extra_ids) * sizeof(unative_t); 438 439 return 0; 440 } 441 442 /** Read task name. 443 * 444 * Returns task name as non-terminated string in a newly allocated buffer. 445 * Also returns the size of the data. 446 * 447 * @param data Place to store pointer to newly allocated block. 448 * @param data_size Place to store size of the data. 449 * 450 * @returns EOK. 451 */ 452 int udebug_name_read(char **data, size_t *data_size) 453 { 454 size_t name_size; 455 456 name_size = str_size(TASK->name) + 1; 457 *data = malloc(name_size, 0); 458 *data_size = name_size; 459 460 memcpy(*data, TASK->name, name_size); 424 461 425 462 return 0; … … 436 473 * this function will fail with an EINVAL error code. 437 474 * 438 * @param buffer The buffer for storing thread hashes. 475 * @param t Thread where call arguments are to be read. 476 * @param buffer Place to store pointer to new buffer. 477 * @return EOK on success, ENOENT if @a t is invalid, EINVAL 478 * if thread state is not valid for this operation. 439 479 */ 440 480 int udebug_args_read(thread_t *t, void **buffer) … … 468 508 } 469 509 510 /** Read the register state of the thread. 511 * 512 * The contents of the thread's istate structure are copied to a newly 513 * allocated buffer and a pointer to it is written to @a buffer. The size of 514 * the buffer will be sizeof(istate_t). 515 * 516 * Currently register state cannot be read if the thread is inside a system 517 * call (as opposed to an exception). This is an implementation limit. 518 * 519 * @param t Thread whose state is to be read. 520 * @param buffer Place to store pointer to new buffer. 521 * @return EOK on success, ENOENT if @a t is invalid, EINVAL 522 * if thread is not in valid state, EBUSY if istate 523 * is not available. 524 */ 525 int udebug_regs_read(thread_t *t, void **buffer) 526 { 527 istate_t *state, *state_buf; 528 int rc; 529 530 /* Prepare a buffer to hold the data. */ 531 state_buf = malloc(sizeof(istate_t), 0); 532 533 /* On success, this will lock t->udebug.lock */ 534 rc = _thread_op_begin(t, false); 535 if (rc != EOK) { 536 return rc; 537 } 538 539 state = t->udebug.uspace_state; 540 if (state == NULL) { 541 _thread_op_end(t); 542 return EBUSY; 543 } 544 545 /* Copy to the allocated buffer */ 546 memcpy(state_buf, state, sizeof(istate_t)); 547 548 _thread_op_end(t); 549 550 *buffer = (void *) state_buf; 551 return 0; 552 } 553 470 554 /** Read the memory of the debugged task. 471 555 *
Note:
See TracChangeset
for help on using the changeset viewer.
