Changeset 6a44ee4 in mainline for kernel/generic/src
- Timestamp:
- 2011-07-20T15:26:21Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- efcebe1
- Parents:
- 25bef0ff (diff), a701812 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- kernel/generic/src
- Files:
-
- 36 edited
-
adt/btree.c (modified) (8 diffs)
-
adt/hash_table.c (modified) (6 diffs)
-
adt/list.c (modified) (2 diffs)
-
console/cmd.c (modified) (3 diffs)
-
console/console.c (modified) (10 diffs)
-
console/kconsole.c (modified) (6 diffs)
-
cpu/cpu.c (modified) (1 diff)
-
ddi/ddi.c (modified) (4 diffs)
-
ddi/irq.c (modified) (1 diff)
-
debug/panic.c (modified) (2 diffs)
-
interrupt/interrupt.c (modified) (1 diff)
-
ipc/ipc.c (modified) (12 diffs)
-
ipc/ipcrsc.c (modified) (1 diff)
-
ipc/irq.c (modified) (6 diffs)
-
ipc/kbox.c (modified) (2 diffs)
-
ipc/sysipc.c (modified) (4 diffs)
-
lib/rd.c (modified) (1 diff)
-
main/version.c (modified) (1 diff)
-
mm/as.c (modified) (16 diffs)
-
mm/backend_anon.c (modified) (2 diffs)
-
mm/backend_elf.c (modified) (2 diffs)
-
mm/buddy.c (modified) (2 diffs)
-
mm/page.c (modified) (3 diffs)
-
mm/slab.c (modified) (5 diffs)
-
proc/scheduler.c (modified) (8 diffs)
-
proc/task.c (modified) (3 diffs)
-
proc/thread.c (modified) (6 diffs)
-
synch/futex.c (modified) (1 diff)
-
synch/waitq.c (modified) (5 diffs)
-
syscall/syscall.c (modified) (6 diffs)
-
sysinfo/stats.c (modified) (2 diffs)
-
time/clock.c (modified) (2 diffs)
-
time/delay.c (modified) (3 diffs)
-
time/timeout.c (modified) (5 diffs)
-
udebug/udebug.c (modified) (1 diff)
-
udebug/udebug_ops.c (modified) (2 diffs)
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/adt/btree.c
r25bef0ff r6a44ee4 108 108 void btree_create(btree_t *t) 109 109 { 110 list_initialize(&t->leaf_ head);110 list_initialize(&t->leaf_list); 111 111 t->root = (btree_node_t *) slab_alloc(btree_node_slab, 0); 112 112 node_initialize(t->root); 113 list_append(&t->root->leaf_link, &t->leaf_ head);113 list_append(&t->root->leaf_link, &t->leaf_list); 114 114 } 115 115 … … 588 588 589 589 if (LEAF_NODE(node)) { 590 list_ prepend(&rnode->leaf_link, &node->leaf_link);590 list_insert_after(&rnode->leaf_link, &node->leaf_link); 591 591 } 592 592 … … 953 953 ASSERT(LEAF_NODE(node)); 954 954 955 if (node->leaf_link.prev != &t->leaf_ head)955 if (node->leaf_link.prev != &t->leaf_list.head) 956 956 return list_get_instance(node->leaf_link.prev, btree_node_t, leaf_link); 957 957 else … … 972 972 ASSERT(LEAF_NODE(node)); 973 973 974 if (node->leaf_link.next != &t->leaf_ head)974 if (node->leaf_link.next != &t->leaf_list.head) 975 975 return list_get_instance(node->leaf_link.next, btree_node_t, leaf_link); 976 976 else … … 987 987 size_t i; 988 988 int depth = t->root->depth; 989 li nk_t head, *cur;989 list_t list; 990 990 991 991 printf("Printing B-tree:\n"); 992 list_initialize(& head);993 list_append(&t->root->bfs_link, & head);992 list_initialize(&list); 993 list_append(&t->root->bfs_link, &list); 994 994 995 995 /* … … 997 997 * Levels are distinguished from one another by node->depth. 998 998 */ 999 while (!list_empty(& head)) {999 while (!list_empty(&list)) { 1000 1000 link_t *hlp; 1001 1001 btree_node_t *node; 1002 1002 1003 hlp = head.next;1004 ASSERT(hlp != &head);1003 hlp = list_first(&list); 1004 ASSERT(hlp != NULL); 1005 1005 node = list_get_instance(hlp, btree_node_t, bfs_link); 1006 1006 list_remove(hlp); … … 1018 1018 printf("%" PRIu64 "%s", node->key[i], i < node->keys - 1 ? "," : ""); 1019 1019 if (node->depth && node->subtree[i]) { 1020 list_append(&node->subtree[i]->bfs_link, & head);1020 list_append(&node->subtree[i]->bfs_link, &list); 1021 1021 } 1022 1022 } 1023 1023 1024 1024 if (node->depth && node->subtree[i]) 1025 list_append(&node->subtree[i]->bfs_link, & head);1025 list_append(&node->subtree[i]->bfs_link, &list); 1026 1026 1027 1027 printf(")"); … … 1031 1031 1032 1032 printf("Printing list of leaves:\n"); 1033 for (cur = t->leaf_head.next; cur != &t->leaf_head; cur = cur->next) {1033 list_foreach(t->leaf_list, cur) { 1034 1034 btree_node_t *node; 1035 1035 -
kernel/generic/src/adt/hash_table.c
r25bef0ff r6a44ee4 62 62 ASSERT(max_keys > 0); 63 63 64 h->entry = (li nk_t *) malloc(m * sizeof(link_t), 0);64 h->entry = (list_t *) malloc(m * sizeof(list_t), 0); 65 65 if (!h->entry) 66 66 panic("Cannot allocate memory for hash table."); 67 67 68 memsetb(h->entry, m * sizeof(li nk_t), 0);68 memsetb(h->entry, m * sizeof(list_t), 0); 69 69 70 70 for (i = 0; i < m; i++) … … 107 107 link_t *hash_table_find(hash_table_t *h, sysarg_t key[]) 108 108 { 109 link_t *cur;110 109 size_t chain; 111 110 … … 118 117 ASSERT(chain < h->entries); 119 118 120 for (cur = h->entry[chain].next; cur != &h->entry[chain]; cur = cur->next) {119 list_foreach(h->entry[chain], cur) { 121 120 if (h->op->compare(key, h->max_keys, cur)) { 122 121 /* … … 141 140 { 142 141 size_t chain; 143 link_t *cur;144 142 145 143 ASSERT(h); … … 149 147 ASSERT(keys <= h->max_keys); 150 148 149 151 150 if (keys == h->max_keys) { 152 151 link_t *cur; 152 153 153 /* 154 154 * All keys are known, hash_table_find() can be used to find the entry. … … 169 169 */ 170 170 for (chain = 0; chain < h->entries; chain++) { 171 for (cur = h->entry[chain].next; cur != &h->entry[chain]; cur = cur->next) { 171 link_t *cur; 172 for (cur = h->entry[chain].head.next; cur != &h->entry[chain].head; 173 cur = cur->next) { 172 174 if (h->op->compare(key, keys, cur)) { 173 175 link_t *hlp; -
kernel/generic/src/adt/list.c
r25bef0ff r6a44ee4 43 43 /** Check for membership 44 44 * 45 * Check whether link is contained in the list head.46 * The membership is defined as pointer equivalence.45 * Check whether link is contained in a list. 46 * Membership is defined as pointer equivalence. 47 47 * 48 * @param link Item to look for.49 * @param headList to look in.48 * @param link Item to look for. 49 * @param list List to look in. 50 50 * 51 51 * @return true if link is contained in head, false otherwise. 52 52 * 53 53 */ 54 int list_member(const link_t *link, const li nk_t *head)54 int list_member(const link_t *link, const list_t *list) 55 55 { 56 56 bool found = false; 57 link_t *hlp = head->next;57 link_t *hlp = list->head.next; 58 58 59 while (hlp != head) {59 while (hlp != &list->head) { 60 60 if (hlp == link) { 61 61 found = true; … … 68 68 } 69 69 70 71 70 /** Concatenate two lists 72 71 * 73 * Concatenate lists head1 and head2, producing a single74 * list head1 containing items from both (in head1, head275 * order) and empty list head2.72 * Concatenate lists @a list1 and @a list2, producing a single 73 * list @a list1 containing items from both (in @a list1, @a list2 74 * order) and empty list @a list2. 76 75 * 77 * @param head1First list and concatenated output78 * @param head2Second list and empty output.76 * @param list1 First list and concatenated output 77 * @param list2 Second list and empty output. 79 78 * 80 79 */ 81 void list_concat(li nk_t *head1, link_t *head2)80 void list_concat(list_t *list1, list_t *list2) 82 81 { 83 if (list_empty( head2))82 if (list_empty(list2)) 84 83 return; 85 84 86 head2->next->prev = head1->prev; 87 head2->prev->next = head1; 88 head1->prev->next = head2->next; 89 head1->prev = head2->prev; 90 list_initialize(head2); 85 list2->head.next->prev = list1->head.prev; 86 list2->head.prev->next = &list1->head; 87 list1->head.prev->next = list2->head.next; 88 list1->head.prev = list2->head.prev; 89 list_initialize(list2); 90 } 91 92 /** Count list items 93 * 94 * Return the number of items in the list. 95 * 96 * @param list List to count. 97 * @return Number of items in the list. 98 */ 99 unsigned int list_count(const list_t *list) 100 { 101 unsigned int count = 0; 102 103 list_foreach(*list, link) { 104 count++; 105 } 106 107 return count; 91 108 } 92 109 -
kernel/generic/src/console/cmd.c
r25bef0ff r6a44ee4 573 573 spinlock_lock(&cmd_lock); 574 574 575 link_t *cur;576 575 size_t len = 0; 577 for (cur = cmd_head.next; cur != &cmd_head; cur = cur->next) {576 list_foreach(cmd_list, cur) { 578 577 cmd_info_t *hlp; 579 578 hlp = list_get_instance(cur, cmd_info_t, link); … … 591 590 } 592 591 593 for (cur = cmd_head.next; cur != &cmd_head; cur = cur->next) {592 list_foreach(cmd_list, cur) { 594 593 cmd_info_t *hlp; 595 594 hlp = list_get_instance(cur, cmd_info_t, link); … … 646 645 int cmd_desc(cmd_arg_t *argv) 647 646 { 648 link_t *cur;649 650 647 spinlock_lock(&cmd_lock); 651 648 652 for (cur = cmd_head.next; cur != &cmd_head; cur = cur->next) {649 list_foreach(cmd_list, cur) { 653 650 cmd_info_t *hlp; 654 651 -
kernel/generic/src/console/console.c
r25bef0ff r6a44ee4 87 87 }; 88 88 89 static void stdout_write(outdev_t *, wchar_t , bool);89 static void stdout_write(outdev_t *, wchar_t); 90 90 static void stdout_redraw(outdev_t *); 91 91 … … 95 95 }; 96 96 97 /** Silence output */98 bool silent= false;97 /** Override kernel console lockout */ 98 bool console_override = false; 99 99 100 100 /** Standard input and output character devices */ … … 122 122 } 123 123 124 static void stdout_write(outdev_t *dev, wchar_t ch, bool silent) 125 { 126 link_t *cur; 127 128 for (cur = dev->list.next; cur != &dev->list; cur = cur->next) { 124 static void stdout_write(outdev_t *dev, wchar_t ch) 125 { 126 list_foreach(dev->list, cur) { 129 127 outdev_t *sink = list_get_instance(cur, outdev_t, link); 130 128 if ((sink) && (sink->op->write)) 131 sink->op->write(sink, ch , silent);129 sink->op->write(sink, ch); 132 130 } 133 131 } … … 135 133 static void stdout_redraw(outdev_t *dev) 136 134 { 137 link_t *cur; 138 139 for (cur = dev->list.next; cur != &dev->list; cur = cur->next) { 135 list_foreach(dev->list, cur) { 140 136 outdev_t *sink = list_get_instance(cur, outdev_t, link); 141 137 if ((sink) && (sink->op->redraw)) … … 160 156 klog_parea.frames = SIZE2FRAMES(sizeof(klog)); 161 157 klog_parea.unpriv = false; 158 klog_parea.mapped = false; 162 159 ddi_parea_register(&klog_parea); 163 160 … … 171 168 void grab_console(void) 172 169 { 173 bool prev = silent;174 175 silent = false;170 bool prev = console_override; 171 172 console_override = true; 176 173 if ((stdout) && (stdout->op->redraw)) 177 174 stdout->op->redraw(stdout); 178 175 179 if ((stdin) && ( prev)) {176 if ((stdin) && (!prev)) { 180 177 /* 181 178 * Force the console to print the prompt. … … 187 184 void release_console(void) 188 185 { 189 // FIXME arch_release_console 190 silent = true; 191 } 192 193 /** Tell kernel to get keyboard/console access again */ 194 sysarg_t sys_debug_enable_console(void) 186 console_override = false; 187 } 188 189 /** Activate kernel console override */ 190 sysarg_t sys_debug_activate_console(void) 195 191 { 196 192 #ifdef CONFIG_KCONSOLE … … 200 196 return false; 201 197 #endif 202 }203 204 /** Tell kernel to relinquish keyboard/console access */205 sysarg_t sys_debug_disable_console(void)206 {207 release_console();208 return true;209 198 } 210 199 … … 293 282 */ 294 283 spinlock_unlock(&klog_lock); 295 stdout->op->write(stdout, tmp , silent);284 stdout->op->write(stdout, tmp); 296 285 spinlock_lock(&klog_lock); 297 286 } … … 321 310 * it should be no longer buffered. 322 311 */ 323 stdout->op->write(stdout, ch , silent);312 stdout->op->write(stdout, ch); 324 313 } else { 325 314 /* -
kernel/generic/src/console/kconsole.c
r25bef0ff r6a44ee4 84 84 85 85 SPINLOCK_INITIALIZE(cmd_lock); /**< Lock protecting command list. */ 86 LIST_INITIALIZE(cmd_ head); /**< Command list. */86 LIST_INITIALIZE(cmd_list); /**< Command list. */ 87 87 88 88 static wchar_t history[KCONSOLE_HISTORY][MAX_CMDLINE] = {}; … … 113 113 bool cmd_register(cmd_info_t *cmd) 114 114 { 115 link_t *cur;116 117 115 spinlock_lock(&cmd_lock); 118 116 … … 120 118 * Make sure the command is not already listed. 121 119 */ 122 for (cur = cmd_head.next; cur != &cmd_head; cur = cur->next) {120 list_foreach(cmd_list, cur) { 123 121 cmd_info_t *hlp = list_get_instance(cur, cmd_info_t, link); 124 122 … … 153 151 * Now the command can be added. 154 152 */ 155 list_append(&cmd->link, &cmd_ head);153 list_append(&cmd->link, &cmd_list); 156 154 157 155 spinlock_unlock(&cmd_lock); … … 176 174 177 175 if (*startpos == NULL) 178 *startpos = cmd_ head.next;179 180 for (; *startpos != &cmd_ head; *startpos = (*startpos)->next) {176 *startpos = cmd_list.head.next; 177 178 for (; *startpos != &cmd_list.head; *startpos = (*startpos)->next) { 181 179 cmd_info_t *hlp = list_get_instance(*startpos, cmd_info_t, link); 182 180 … … 559 557 560 558 cmd_info_t *cmd = NULL; 561 link_t *cur; 562 563 for (cur = cmd_head.next; cur != &cmd_head; cur = cur->next) { 559 560 list_foreach(cmd_list, cur) { 564 561 cmd_info_t *hlp = list_get_instance(cur, cmd_info_t, link); 565 562 spinlock_lock(&hlp->lock); -
kernel/generic/src/cpu/cpu.c
r25bef0ff r6a44ee4 82 82 for (j = 0; j < RQ_COUNT; j++) { 83 83 irq_spinlock_initialize(&cpus[i].rq[j].lock, "cpus[].rq[].lock"); 84 list_initialize(&cpus[i].rq[j].rq _head);84 list_initialize(&cpus[i].rq[j].rq); 85 85 } 86 86 } -
kernel/generic/src/ddi/ddi.c
r25bef0ff r6a44ee4 122 122 backend_data.frames = pages; 123 123 124 /* Find the zone of the physical memory */ 124 /* 125 * Check if the memory region is explicitly enabled 126 * for mapping by any parea structure. 127 */ 128 129 mutex_lock(&parea_lock); 130 btree_node_t *nodep; 131 parea_t *parea = (parea_t *) btree_search(&parea_btree, 132 (btree_key_t) pf, &nodep); 133 134 if ((parea != NULL) && (parea->frames >= pages)) { 135 if ((!priv) && (!parea->unpriv)) { 136 mutex_unlock(&parea_lock); 137 return EPERM; 138 } 139 140 goto map; 141 } 142 143 parea = NULL; 144 mutex_unlock(&parea_lock); 145 146 /* 147 * Check if the memory region is part of physical 148 * memory generally enabled for mapping. 149 */ 150 125 151 irq_spinlock_lock(&zones.lock, true); 126 152 size_t znum = find_zone(ADDR2PFN(pf), pages, 0); … … 153 179 } 154 180 155 if (zone_flags_available(zones.info[znum].flags)) {156 /*157 * Frames are part of physical memory, check158 * if the memory region is enabled for mapping.159 */160 irq_spinlock_unlock(&zones.lock, true);161 162 mutex_lock(&parea_lock);163 btree_node_t *nodep;164 parea_t *parea = (parea_t *) btree_search(&parea_btree,165 (btree_key_t) pf, &nodep);166 167 if ((!parea) || (parea->frames < pages)) {168 mutex_unlock(&parea_lock);169 return ENOENT;170 }171 172 if (!priv) {173 if (!parea->unpriv) {174 mutex_unlock(&parea_lock);175 return EPERM;176 }177 }178 179 mutex_unlock(&parea_lock);180 goto map;181 }182 183 181 irq_spinlock_unlock(&zones.lock, true); 184 182 return ENOENT; … … 188 186 AS_AREA_ATTR_NONE, &phys_backend, &backend_data)) { 189 187 /* 190 * The address space area could not have beencreated.188 * The address space area was not created. 191 189 * We report it using ENOMEM. 192 190 */ 191 192 if (parea != NULL) 193 mutex_unlock(&parea_lock); 194 193 195 return ENOMEM; 194 196 } … … 197 199 * Mapping is created on-demand during page fault. 198 200 */ 199 return 0; 201 202 if (parea != NULL) { 203 parea->mapped = true; 204 mutex_unlock(&parea_lock); 205 } 206 207 return EOK; 200 208 } 201 209 -
kernel/generic/src/ddi/irq.c
r25bef0ff r6a44ee4 275 275 { 276 276 /* 277 * If the kernel console is silenced, 278 * then try first the uspace handlers, 279 * eventually fall back to kernel handlers. 277 * If the kernel console override is on, 278 * then try first the kernel handlers 279 * and eventually fall back to uspace 280 * handlers. 280 281 * 281 * I f the kernel console is active,282 * then do it the other way around.282 * In the usual case the uspace handlers 283 * have precedence. 283 284 */ 284 if (silent) { 285 irq_t *irq = irq_dispatch_and_lock_uspace(inr); 285 286 if (console_override) { 287 irq_t *irq = irq_dispatch_and_lock_kernel(inr); 286 288 if (irq) 287 289 return irq; 288 290 289 return irq_dispatch_and_lock_ kernel(inr);290 } 291 292 irq_t *irq = irq_dispatch_and_lock_ kernel(inr);291 return irq_dispatch_and_lock_uspace(inr); 292 } 293 294 irq_t *irq = irq_dispatch_and_lock_uspace(inr); 293 295 if (irq) 294 296 return irq; 295 297 296 return irq_dispatch_and_lock_ uspace(inr);298 return irq_dispatch_and_lock_kernel(inr); 297 299 } 298 300 -
kernel/generic/src/debug/panic.c
r25bef0ff r6a44ee4 48 48 uintptr_t address, const char *fmt, ...) 49 49 { 50 va_list args; 51 52 silent = false; 50 console_override = true; 53 51 54 52 printf("\n%s Kernel panic ", BANNER_LEFT); … … 57 55 printf("due to "); 58 56 57 va_list args; 59 58 va_start(args, fmt); 60 59 if (cat == PANIC_ASSERT) { -
kernel/generic/src/interrupt/interrupt.c
r25bef0ff r6a44ee4 177 177 (void *) istate_get_pc(istate)); 178 178 179 istate_decode(istate); 179 180 stack_trace_istate(istate); 180 181 -
kernel/generic/src/ipc/ipc.c
r25bef0ff r6a44ee4 44 44 #include <synch/synch.h> 45 45 #include <ipc/ipc.h> 46 #include <ipc/ipc_methods.h> 46 47 #include <ipc/kbox.h> 47 48 #include <ipc/event.h> … … 127 128 list_initialize(&box->answers); 128 129 list_initialize(&box->irq_notifs); 129 list_initialize(&box->irq_ head);130 list_initialize(&box->irq_list); 130 131 box->task = task; 131 132 } … … 182 183 */ 183 184 irq_spinlock_lock(&TASK->lock, true); 184 list_append(&sync_box->sync_box_link, &TASK->sync_box _head);185 list_append(&sync_box->sync_box_link, &TASK->sync_boxes); 185 186 irq_spinlock_unlock(&TASK->lock, true); 186 187 … … 449 450 irq_spinlock_lock(&box->irq_lock, false); 450 451 451 request = list_get_instance(box->irq_notifs.next, call_t, link); 452 request = list_get_instance(list_first(&box->irq_notifs), 453 call_t, link); 452 454 list_remove(&request->link); 453 455 … … 458 460 459 461 /* Handle asynchronous answers */ 460 request = list_get_instance(box->answers.next, call_t, link); 462 request = list_get_instance(list_first(&box->answers), 463 call_t, link); 461 464 list_remove(&request->link); 462 465 atomic_dec(&request->data.phone->active_calls); … … 466 469 467 470 /* Handle requests */ 468 request = list_get_instance(box->calls.next, call_t, link); 471 request = list_get_instance(list_first(&box->calls), 472 call_t, link); 469 473 list_remove(&request->link); 470 474 … … 493 497 * 494 498 */ 495 void ipc_cleanup_call_list(li nk_t *lst)499 void ipc_cleanup_call_list(list_t *lst) 496 500 { 497 501 while (!list_empty(lst)) { 498 call_t *call = list_get_instance(l st->next, call_t, link);502 call_t *call = list_get_instance(list_first(lst), call_t, link); 499 503 if (call->buffer) 500 504 free(call->buffer); … … 525 529 irq_spinlock_lock(&box->lock, true); 526 530 while (!list_empty(&box->connected_phones)) { 527 phone = list_get_instance( box->connected_phones.next,531 phone = list_get_instance(list_first(&box->connected_phones), 528 532 phone_t, link); 529 533 if (SYNCH_FAILED(mutex_trylock(&phone->lock))) { … … 605 609 /* Wait for all answers to interrupted synchronous calls to arrive */ 606 610 ipl_t ipl = interrupts_disable(); 607 while (!list_empty(&TASK->sync_box _head)) {608 answerbox_t *box = list_get_instance( TASK->sync_box_head.next,609 answerbox_t, sync_box_link);611 while (!list_empty(&TASK->sync_boxes)) { 612 answerbox_t *box = list_get_instance( 613 list_first(&TASK->sync_boxes), answerbox_t, sync_box_link); 610 614 611 615 list_remove(&box->sync_box_link); … … 742 746 #endif 743 747 744 link_t *cur;745 746 748 printf(" --- incomming calls ---\n"); 747 for (cur = task->answerbox.calls.next; cur != &task->answerbox.calls; 748 cur = cur->next) { 749 list_foreach(task->answerbox.calls, cur) { 749 750 call_t *call = list_get_instance(cur, call_t, link); 750 751 … … 766 767 767 768 printf(" --- dispatched calls ---\n"); 768 for (cur = task->answerbox.dispatched_calls.next; 769 cur != &task->answerbox.dispatched_calls; 770 cur = cur->next) { 769 list_foreach(task->answerbox.dispatched_calls, cur) { 771 770 call_t *call = list_get_instance(cur, call_t, link); 772 771 … … 788 787 789 788 printf(" --- incoming answers ---\n"); 790 for (cur = task->answerbox.answers.next; 791 cur != &task->answerbox.answers; 792 cur = cur->next) { 789 list_foreach(task->answerbox.answers, cur) { 793 790 call_t *call = list_get_instance(cur, call_t, link); 794 791 -
kernel/generic/src/ipc/ipcrsc.c
r25bef0ff r6a44ee4 146 146 call_t *get_call(sysarg_t callid) 147 147 { 148 link_t *lst;149 148 call_t *result = NULL; 150 149 151 150 irq_spinlock_lock(&TASK->answerbox.lock, true); 152 for (lst = TASK->answerbox.dispatched_calls.next;153 lst != &TASK->answerbox.dispatched_calls; lst = lst->next) {151 152 list_foreach(TASK->answerbox.dispatched_calls, lst) { 154 153 call_t *call = list_get_instance(lst, call_t, link); 155 154 if ((sysarg_t) call == callid) { -
kernel/generic/src/ipc/irq.c
r25bef0ff r6a44ee4 174 174 irq->notif_cfg.code = code; 175 175 irq->notif_cfg.counter = 0; 176 irq->driver_as = AS; 176 177 177 178 /* … … 199 200 200 201 hash_table_insert(&irq_uspace_hash_table, key, &irq->link); 201 list_append(&irq->notif_cfg.link, &box->irq_ head);202 list_append(&irq->notif_cfg.link, &box->irq_list); 202 203 203 204 irq_spinlock_unlock(&box->irq_lock, false); … … 281 282 irq_spinlock_lock(&box->irq_lock, false); 282 283 283 while ( box->irq_head.next != &box->irq_head) {284 while (!list_empty(&box->irq_list)) { 284 285 DEADLOCK_PROBE_INIT(p_irqlock); 285 286 286 irq_t *irq = list_get_instance( box->irq_head.next, irq_t,287 irq_t *irq = list_get_instance(list_first(&box->irq_list), irq_t, 287 288 notif_cfg.link); 288 289 … … 364 365 return IRQ_DECLINE; 365 366 367 #define CMD_MEM_READ(target) \ 368 do { \ 369 void *va = code->cmds[i].addr; \ 370 if (AS != irq->driver_as) \ 371 as_switch(AS, irq->driver_as); \ 372 memcpy_from_uspace(&target, va, (sizeof(target))); \ 373 if (dstarg) \ 374 scratch[dstarg] = target; \ 375 } while(0) 376 377 #define CMD_MEM_WRITE(val) \ 378 do { \ 379 void *va = code->cmds[i].addr; \ 380 if (AS != irq->driver_as) \ 381 as_switch(AS, irq->driver_as); \ 382 memcpy_to_uspace(va, &val, sizeof(val)); \ 383 } while (0) 384 385 as_t *current_as = AS; 366 386 size_t i; 367 387 for (i = 0; i < code->cmdcount; i++) { … … 422 442 } 423 443 break; 444 case CMD_MEM_READ_8: { 445 uint8_t val; 446 CMD_MEM_READ(val); 447 break; 448 } 449 case CMD_MEM_READ_16: { 450 uint16_t val; 451 CMD_MEM_READ(val); 452 break; 453 } 454 case CMD_MEM_READ_32: { 455 uint32_t val; 456 CMD_MEM_READ(val); 457 break; 458 } 459 case CMD_MEM_WRITE_8: { 460 uint8_t val = code->cmds[i].value; 461 CMD_MEM_WRITE(val); 462 break; 463 } 464 case CMD_MEM_WRITE_16: { 465 uint16_t val = code->cmds[i].value; 466 CMD_MEM_WRITE(val); 467 break; 468 } 469 case CMD_MEM_WRITE_32: { 470 uint32_t val = code->cmds[i].value; 471 CMD_MEM_WRITE(val); 472 break; 473 } 474 case CMD_MEM_WRITE_A_8: 475 if (srcarg) { 476 uint8_t val = scratch[srcarg]; 477 CMD_MEM_WRITE(val); 478 } 479 break; 480 case CMD_MEM_WRITE_A_16: 481 if (srcarg) { 482 uint16_t val = scratch[srcarg]; 483 CMD_MEM_WRITE(val); 484 } 485 break; 486 case CMD_MEM_WRITE_A_32: 487 if (srcarg) { 488 uint32_t val = scratch[srcarg]; 489 CMD_MEM_WRITE(val); 490 } 491 break; 424 492 case CMD_BTEST: 425 493 if ((srcarg) && (dstarg)) { … … 435 503 break; 436 504 case CMD_ACCEPT: 505 if (AS != current_as) 506 as_switch(AS, current_as); 437 507 return IRQ_ACCEPT; 438 508 case CMD_DECLINE: 439 509 default: 510 if (AS != current_as) 511 as_switch(AS, current_as); 440 512 return IRQ_DECLINE; 441 513 } 442 514 } 515 if (AS != current_as) 516 as_switch(AS, current_as); 443 517 444 518 return IRQ_DECLINE; -
kernel/generic/src/ipc/kbox.c
r25bef0ff r6a44ee4 37 37 #include <synch/mutex.h> 38 38 #include <ipc/ipc.h> 39 #include <ipc/ipc_methods.h> 39 40 #include <ipc/ipcrsc.h> 40 41 #include <arch.h> … … 169 170 switch (IPC_GET_IMETHOD(call->data)) { 170 171 171 case IPC_M_DEBUG _ALL:172 case IPC_M_DEBUG: 172 173 /* Handle debug call. */ 173 174 udebug_call_receive(call); -
kernel/generic/src/ipc/sysipc.c
r25bef0ff r6a44ee4 40 40 #include <debug.h> 41 41 #include <ipc/ipc.h> 42 #include <ipc/ipc_methods.h> 42 43 #include <ipc/sysipc.h> 43 44 #include <ipc/irq.h> … … 460 461 } 461 462 #ifdef CONFIG_UDEBUG 462 case IPC_M_DEBUG _ALL:463 case IPC_M_DEBUG: 463 464 return udebug_request_preprocess(call, phone); 464 465 #endif … … 495 496 /* 496 497 * This must be an affirmative answer to IPC_M_DATA_READ 497 * or IPC_M_DEBUG _ALL/UDEBUG_M_MEM_READ...498 * or IPC_M_DEBUG/UDEBUG_M_MEM_READ... 498 499 * 499 500 */ … … 531 532 532 533 switch (IPC_GET_IMETHOD(call->data)) { 533 case IPC_M_DEBUG _ALL:534 case IPC_M_DEBUG: 534 535 return -1; 535 536 default: -
kernel/generic/src/lib/rd.c
r25bef0ff r6a44ee4 91 91 rd_parea.frames = SIZE2FRAMES(dsize); 92 92 rd_parea.unpriv = false; 93 rd_parea.mapped = false; 93 94 ddi_parea_register(&rd_parea); 94 95 -
kernel/generic/src/main/version.c
r25bef0ff r6a44ee4 38 38 39 39 static const char *project = "SPARTAN kernel"; 40 static const char *copyright = "Copyright (c) 2001-201 0HelenOS project";40 static const char *copyright = "Copyright (c) 2001-2011 HelenOS project"; 41 41 static const char *release = STRING(RELEASE); 42 42 static const char *name = STRING(NAME); -
kernel/generic/src/mm/as.c
r25bef0ff r6a44ee4 94 94 * 95 95 * This lock protects: 96 * - inactive_as_with_asid_ headlist96 * - inactive_as_with_asid_list 97 97 * - as->asid for each as of the as_t type 98 98 * - asids_allocated counter … … 105 105 * that have valid ASID. 106 106 */ 107 LIST_INITIALIZE(inactive_as_with_asid_ head);107 LIST_INITIALIZE(inactive_as_with_asid_list); 108 108 109 109 /** Kernel address space. */ … … 235 235 bool cond = true; 236 236 while (cond) { 237 ASSERT(!list_empty(&as->as_area_btree.leaf_ head));237 ASSERT(!list_empty(&as->as_area_btree.leaf_list)); 238 238 239 239 btree_node_t *node = 240 list_get_instance( as->as_area_btree.leaf_head.next,240 list_get_instance(list_first(&as->as_area_btree.leaf_list), 241 241 btree_node_t, leaf_link); 242 242 … … 602 602 bool cond = true; 603 603 while (cond) { 604 ASSERT(!list_empty(&area->used_space.leaf_ head));604 ASSERT(!list_empty(&area->used_space.leaf_list)); 605 605 606 606 btree_node_t *node = 607 list_get_instance( area->used_space.leaf_head.prev,607 list_get_instance(list_last(&area->used_space.leaf_list), 608 608 btree_node_t, leaf_link); 609 609 … … 727 727 if (--sh_info->refcount == 0) { 728 728 dealloc = true; 729 link_t *cur;730 729 731 730 /* … … 733 732 * reference from all frames found there. 734 733 */ 735 for (cur = sh_info->pagemap.leaf_head.next; 736 cur != &sh_info->pagemap.leaf_head; cur = cur->next) { 734 list_foreach(sh_info->pagemap.leaf_list, cur) { 737 735 btree_node_t *node 738 736 = list_get_instance(cur, btree_node_t, leaf_link); … … 786 784 * Visit only the pages mapped by used_space B+tree. 787 785 */ 788 link_t *cur; 789 for (cur = area->used_space.leaf_head.next; 790 cur != &area->used_space.leaf_head; cur = cur->next) { 786 list_foreach(area->used_space.leaf_list, cur) { 791 787 btree_node_t *node; 792 788 btree_key_t i; … … 1065 1061 */ 1066 1062 size_t used_pages = 0; 1067 link_t *cur; 1068 1069 for (cur = area->used_space.leaf_head.next; 1070 cur != &area->used_space.leaf_head; cur = cur->next) { 1063 1064 list_foreach(area->used_space.leaf_list, cur) { 1071 1065 btree_node_t *node 1072 1066 = list_get_instance(cur, btree_node_t, leaf_link); … … 1094 1088 size_t frame_idx = 0; 1095 1089 1096 for (cur = area->used_space.leaf_head.next; 1097 cur != &area->used_space.leaf_head; cur = cur->next) { 1090 list_foreach(area->used_space.leaf_list, cur) { 1098 1091 btree_node_t *node = list_get_instance(cur, btree_node_t, 1099 1092 leaf_link); … … 1147 1140 frame_idx = 0; 1148 1141 1149 for (cur = area->used_space.leaf_head.next; 1150 cur != &area->used_space.leaf_head; cur = cur->next) { 1142 list_foreach(area->used_space.leaf_list, cur) { 1151 1143 btree_node_t *node 1152 1144 = list_get_instance(cur, btree_node_t, leaf_link); … … 1292 1284 * thing which is forbidden in this context is locking the address space. 1293 1285 * 1294 * When this function is en etered, no spinlocks may be held.1286 * When this function is entered, no spinlocks may be held. 1295 1287 * 1296 1288 * @param old Old address space or NULL. … … 1334 1326 1335 1327 list_append(&old_as->inactive_as_with_asid_link, 1336 &inactive_as_with_asid_ head);1328 &inactive_as_with_asid_list); 1337 1329 } 1338 1330 … … 2027 2019 2028 2020 /* Eventually check the addresses behind each area */ 2029 li nk_t *cur;2030 for (cur = AS->as_area_btree.leaf_head.next;2031 (ret == 0) && (cur != &AS->as_area_btree.leaf_head);2032 cur = cur->next) { 2021 list_foreach(AS->as_area_btree.leaf_list, cur) { 2022 if (ret != 0) 2023 break; 2024 2033 2025 btree_node_t *node = 2034 2026 list_get_instance(cur, btree_node_t, leaf_link); … … 2072 2064 2073 2065 size_t area_cnt = 0; 2074 link_t *cur; 2075 2076 for (cur = as->as_area_btree.leaf_head.next; 2077 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2066 2067 list_foreach(as->as_area_btree.leaf_list, cur) { 2078 2068 btree_node_t *node = 2079 2069 list_get_instance(cur, btree_node_t, leaf_link); … … 2088 2078 size_t area_idx = 0; 2089 2079 2090 for (cur = as->as_area_btree.leaf_head.next; 2091 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2080 list_foreach(as->as_area_btree.leaf_list, cur) { 2092 2081 btree_node_t *node = 2093 2082 list_get_instance(cur, btree_node_t, leaf_link); … … 2125 2114 2126 2115 /* Print out info about address space areas */ 2127 link_t *cur; 2128 for (cur = as->as_area_btree.leaf_head.next; 2129 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2116 list_foreach(as->as_area_btree.leaf_list, cur) { 2130 2117 btree_node_t *node 2131 2118 = list_get_instance(cur, btree_node_t, leaf_link); -
kernel/generic/src/mm/backend_anon.c
r25bef0ff r6a44ee4 97 97 void anon_share(as_area_t *area) 98 98 { 99 link_t *cur;100 101 99 ASSERT(mutex_locked(&area->as->lock)); 102 100 ASSERT(mutex_locked(&area->lock)); … … 106 104 */ 107 105 mutex_lock(&area->sh_info->lock); 108 for (cur = area->used_space.leaf_head.next; 109 cur != &area->used_space.leaf_head; cur = cur->next) { 106 list_foreach(area->used_space.leaf_list, cur) { 110 107 btree_node_t *node; 111 108 unsigned int i; -
kernel/generic/src/mm/backend_elf.c
r25bef0ff r6a44ee4 139 139 */ 140 140 if (area->flags & AS_AREA_WRITE) { 141 node = list_get_instance( area->used_space.leaf_head.next,141 node = list_get_instance(list_first(&area->used_space.leaf_list), 142 142 btree_node_t, leaf_link); 143 143 } else { … … 153 153 */ 154 154 mutex_lock(&area->sh_info->lock); 155 for (cur = &node->leaf_link; cur != &area->used_space.leaf_ head;155 for (cur = &node->leaf_link; cur != &area->used_space.leaf_list.head; 156 156 cur = cur->next) { 157 157 unsigned int i; -
kernel/generic/src/mm/buddy.c
r25bef0ff r6a44ee4 82 82 * Use memory after our own structure. 83 83 */ 84 b->order = (li nk_t *) (&b[1]);84 b->order = (list_t *) (&b[1]); 85 85 86 86 for (i = 0; i <= max_order; i++) … … 176 176 * the request can be immediatelly satisfied. 177 177 */ 178 if (!list_empty(&b->order[i])) {179 res = b->order[i].next;178 res = list_first(&b->order[i]); 179 if (res != NULL) { 180 180 list_remove(res); 181 181 b->op->mark_busy(b, res); -
kernel/generic/src/mm/page.c
r25bef0ff r6a44ee4 60 60 61 61 #include <mm/page.h> 62 #include <genarch/mm/page_ht.h> 63 #include <genarch/mm/page_pt.h> 62 64 #include <arch/mm/page.h> 63 65 #include <arch/mm/asid.h> … … 70 72 #include <debug.h> 71 73 #include <arch.h> 74 #include <syscall/copy.h> 75 #include <errno.h> 72 76 73 77 /** Virtual operations for page subsystem. */ … … 172 176 } 173 177 178 /** Syscall wrapper for getting mapping of a virtual page. 179 * 180 * @retval EOK Everything went find, @p uspace_frame and @p uspace_node 181 * contains correct values. 182 * @retval ENOENT Virtual address has no mapping. 183 */ 184 sysarg_t sys_page_find_mapping(uintptr_t virt_address, 185 uintptr_t *uspace_frame) 186 { 187 mutex_lock(&AS->lock); 188 189 pte_t *pte = page_mapping_find(AS, virt_address, false); 190 if (!PTE_VALID(pte) || !PTE_PRESENT(pte)) { 191 mutex_unlock(&AS->lock); 192 193 return (sysarg_t) ENOENT; 194 } 195 196 uintptr_t phys_address = PTE_GET_FRAME(pte); 197 198 mutex_unlock(&AS->lock); 199 200 int rc = copy_to_uspace(uspace_frame, 201 &phys_address, sizeof(phys_address)); 202 if (rc != EOK) { 203 return (sysarg_t) rc; 204 } 205 206 return EOK; 207 } 208 174 209 /** @} 175 210 */ -
kernel/generic/src/mm/slab.c
r25bef0ff r6a44ee4 317 317 spinlock_lock(&cache->slablock); 318 318 } else { 319 slab = list_get_instance( cache->partial_slabs.next, slab_t,320 link);319 slab = list_get_instance(list_first(&cache->partial_slabs), 320 slab_t, link); 321 321 list_remove(&slab->link); 322 322 } … … 360 360 if (!list_empty(&cache->magazines)) { 361 361 if (first) 362 cur = cache->magazines.next;362 cur = list_first(&cache->magazines); 363 363 else 364 cur = cache->magazines.prev;364 cur = list_last(&cache->magazines); 365 365 366 366 mag = list_get_instance(cur, slab_magazine_t, link); … … 812 812 813 813 size_t frames = 0; 814 link_t *cur; 815 for (cur = slab_cache_list.next; cur != &slab_cache_list; 816 cur = cur->next) { 814 list_foreach(slab_cache_list, cur) { 817 815 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link); 818 816 frames += _slab_reclaim(cache, flags); … … 861 859 link_t *cur; 862 860 size_t i; 863 for (i = 0, cur = slab_cache_list. next;864 (i < skip) && (cur != &slab_cache_list );861 for (i = 0, cur = slab_cache_list.head.next; 862 (i < skip) && (cur != &slab_cache_list.head); 865 863 i++, cur = cur->next); 866 864 867 if (cur == &slab_cache_list ) {865 if (cur == &slab_cache_list.head) { 868 866 irq_spinlock_unlock(&slab_cache_lock, true); 869 867 break; … … 940 938 irq_spinlock_lock(&slab_cache_lock, false); 941 939 942 link_t *cur; 943 for (cur = slab_cache_list.next; cur != &slab_cache_list; 944 cur = cur->next) { 940 list_foreach(slab_cache_list, cur) { 945 941 slab_cache_t *slab = list_get_instance(cur, slab_cache_t, link); 946 942 if ((slab->flags & SLAB_CACHE_MAGDEFERRED) != -
kernel/generic/src/proc/scheduler.c
r25bef0ff r6a44ee4 237 237 * Take the first thread from the queue. 238 238 */ 239 thread_t *thread = 240 list_ get_instance(CPU->rq[i].rq_head.next, thread_t, rq_link);239 thread_t *thread = list_get_instance( 240 list_first(&CPU->rq[i].rq), thread_t, rq_link); 241 241 list_remove(&thread->rq_link); 242 242 … … 273 273 static void relink_rq(int start) 274 274 { 275 li nk_t head;276 277 list_initialize(& head);275 list_t list; 276 277 list_initialize(&list); 278 278 irq_spinlock_lock(&CPU->lock, false); 279 279 … … 284 284 285 285 irq_spinlock_lock(&CPU->rq[i + 1].lock, false); 286 list_concat(& head, &CPU->rq[i + 1].rq_head);286 list_concat(&list, &CPU->rq[i + 1].rq); 287 287 size_t n = CPU->rq[i + 1].n; 288 288 CPU->rq[i + 1].n = 0; … … 292 292 293 293 irq_spinlock_lock(&CPU->rq[i].lock, false); 294 list_concat(&CPU->rq[i].rq _head, &head);294 list_concat(&CPU->rq[i].rq, &list); 295 295 CPU->rq[i].n += n; 296 296 irq_spinlock_unlock(&CPU->rq[i].lock, false); … … 586 586 * Searching least priority queues on all CPU's first and most priority 587 587 * queues on all CPU's last. 588 *589 588 */ 590 589 size_t acpu; … … 617 616 618 617 /* Search rq from the back */ 619 link_t *link = cpu->rq[rq].rq_head.prev; 620 621 while (link != &(cpu->rq[rq].rq_head)) { 622 thread = (thread_t *) list_get_instance(link, thread_t, rq_link); 618 link_t *link = cpu->rq[rq].rq.head.prev; 619 620 while (link != &(cpu->rq[rq].rq.head)) { 621 thread = (thread_t *) list_get_instance(link, 622 thread_t, rq_link); 623 623 624 624 /* 625 * We don't want to steal CPU-wired threads 626 * neither threads already stolen. The latter 627 * prevents threads from migrating between CPU's 628 * without ever being run. We don't want to 629 * steal threads whose FPU context is still in 630 * CPU. 631 * 625 * Do not steal CPU-wired threads, threads 626 * already stolen, threads for which migration 627 * was temporarily disabled or threads whose 628 * FPU context is still in the CPU. 632 629 */ 633 630 irq_spinlock_lock(&thread->lock, false); 634 631 635 if ((!(thread->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) 636 && (!(thread->fpu_context_engaged))) { 632 if (!(thread->flags & THREAD_FLAG_WIRED) && 633 !(thread->flags & THREAD_FLAG_STOLEN) && 634 !thread->nomigrate && 635 !thread->fpu_context_engaged) { 637 636 /* 638 637 * Remove thread from ready queue. 639 638 */ 640 irq_spinlock_unlock(&thread->lock, false); 639 irq_spinlock_unlock(&thread->lock, 640 false); 641 641 642 642 atomic_dec(&cpu->nrdy); … … 660 660 */ 661 661 662 irq_spinlock_pass(&(cpu->rq[rq].lock), &thread->lock); 662 irq_spinlock_pass(&(cpu->rq[rq].lock), 663 &thread->lock); 663 664 664 665 #ifdef KCPULB_VERBOSE … … 739 740 740 741 printf("\trq[%u]: ", i); 741 link_t *cur; 742 for (cur = cpus[cpu].rq[i].rq_head.next; 743 cur != &(cpus[cpu].rq[i].rq_head); 744 cur = cur->next) { 745 thread_t *thread = list_get_instance(cur, thread_t, rq_link); 742 list_foreach(cpus[cpu].rq[i].rq, cur) { 743 thread_t *thread = list_get_instance(cur, 744 thread_t, rq_link); 746 745 printf("%" PRIu64 "(%s) ", thread->tid, 747 746 thread_states[thread->state]); -
kernel/generic/src/proc/task.c
r25bef0ff r6a44ee4 155 155 mutex_initialize(&task->futexes_lock, MUTEX_PASSIVE); 156 156 157 list_initialize(&task->th _head);158 list_initialize(&task->sync_box _head);157 list_initialize(&task->threads); 158 list_initialize(&task->sync_boxes); 159 159 160 160 ipc_answerbox_init(&task->answerbox, task); … … 435 435 436 436 /* Current values of threads */ 437 link_t *cur; 438 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 437 list_foreach(task->threads, cur) { 439 438 thread_t *thread = list_get_instance(cur, thread_t, th_link); 440 439 … … 468 467 */ 469 468 470 link_t *cur; 471 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 469 list_foreach(task->threads, cur) { 472 470 thread_t *thread = list_get_instance(cur, thread_t, th_link); 473 471 bool sleeping = false; -
kernel/generic/src/proc/thread.c
r25bef0ff r6a44ee4 55 55 #include <time/clock.h> 56 56 #include <time/timeout.h> 57 #include <time/delay.h> 57 58 #include <config.h> 58 59 #include <arch/interrupt.h> … … 259 260 */ 260 261 261 list_append(&thread->rq_link, &cpu->rq[i].rq _head);262 list_append(&thread->rq_link, &cpu->rq[i].rq); 262 263 cpu->rq[i].n++; 263 264 irq_spinlock_unlock(&(cpu->rq[i].lock), true); … … 321 322 thread->cpu = NULL; 322 323 thread->flags = flags; 324 thread->nomigrate = 0; 323 325 thread->state = Entering; 324 326 … … 421 423 atomic_inc(&task->lifecount); 422 424 423 list_append(&thread->th_link, &task->th _head);425 list_append(&thread->th_link, &task->threads); 424 426 425 427 irq_spinlock_pass(&task->lock, &threads_lock); … … 481 483 /* Not reached */ 482 484 while (true); 485 } 486 487 /** Prevent the current thread from being migrated to another processor. */ 488 void thread_migration_disable(void) 489 { 490 ASSERT(THREAD); 491 492 THREAD->nomigrate++; 493 } 494 495 /** Allow the current thread to be migrated to another processor. */ 496 void thread_migration_enable(void) 497 { 498 ASSERT(THREAD); 499 ASSERT(THREAD->nomigrate > 0); 500 501 THREAD->nomigrate--; 483 502 } 484 503 … … 912 931 } 913 932 933 sysarg_t sys_thread_udelay(uint32_t usec) 934 { 935 delay(usec); 936 return 0; 937 } 938 914 939 /** @} 915 940 */ -
kernel/generic/src/synch/futex.c
r25bef0ff r6a44ee4 272 272 void futex_cleanup(void) 273 273 { 274 link_t *cur;275 276 274 mutex_lock(&futex_ht_lock); 277 275 mutex_lock(&TASK->futexes_lock); 278 276 279 for (cur = TASK->futexes.leaf_head.next; 280 cur != &TASK->futexes.leaf_head; cur = cur->next) { 277 list_foreach(TASK->futexes.leaf_list, cur) { 281 278 btree_node_t *node; 282 279 unsigned int i; -
kernel/generic/src/synch/waitq.c
r25bef0ff r6a44ee4 69 69 { 70 70 irq_spinlock_initialize(&wq->lock, "wq.lock"); 71 list_initialize(&wq-> head);71 list_initialize(&wq->sleepers); 72 72 wq->missed_wakeups = 0; 73 73 } … … 196 196 irq_spinlock_lock(&wq->lock, true); 197 197 198 if (!list_empty(&wq->head)) { 199 thread_t *thread = list_get_instance(wq->head.next, thread_t, wq_link); 198 if (!list_empty(&wq->sleepers)) { 199 thread_t *thread = list_get_instance(list_first(&wq->sleepers), 200 thread_t, wq_link); 200 201 201 202 irq_spinlock_lock(&thread->lock, false); … … 407 408 } 408 409 409 list_append(&THREAD->wq_link, &wq-> head);410 list_append(&THREAD->wq_link, &wq->sleepers); 410 411 411 412 /* … … 464 465 465 466 loop: 466 if (list_empty(&wq-> head)) {467 if (list_empty(&wq->sleepers)) { 467 468 wq->missed_wakeups++; 468 469 if ((count) && (mode == WAKEUP_ALL)) … … 473 474 474 475 count++; 475 thread_t *thread = list_get_instance(wq->head.next, thread_t, wq_link); 476 thread_t *thread = list_get_instance(list_first(&wq->sleepers), 477 thread_t, wq_link); 476 478 477 479 /* -
kernel/generic/src/syscall/syscall.c
r25bef0ff r6a44ee4 41 41 #include <proc/program.h> 42 42 #include <mm/as.h> 43 #include <mm/page.h> 43 44 #include <print.h> 44 45 #include <arch.h> … … 118 119 119 120 syshandler_t syscall_table[SYSCALL_END] = { 121 /* System management syscalls. */ 120 122 (syshandler_t) sys_klog, 121 123 (syshandler_t) sys_tls_set, … … 126 128 (syshandler_t) sys_thread_get_id, 127 129 (syshandler_t) sys_thread_usleep, 130 (syshandler_t) sys_thread_udelay, 128 131 129 132 (syshandler_t) sys_task_get_id, … … 144 147 (syshandler_t) sys_as_area_destroy, 145 148 (syshandler_t) sys_as_get_unmapped_area, 149 150 /* Page mapping related syscalls. */ 151 (syshandler_t) sys_page_find_mapping, 146 152 147 153 /* IPC related syscalls. */ … … 174 180 (syshandler_t) sys_unregister_irq, 175 181 176 /* Sysinfo syscalls */182 /* Sysinfo syscalls. */ 177 183 (syshandler_t) sys_sysinfo_get_tag, 178 184 (syshandler_t) sys_sysinfo_get_value, … … 180 186 (syshandler_t) sys_sysinfo_get_data, 181 187 182 /* Debug calls */ 183 (syshandler_t) sys_debug_enable_console, 184 (syshandler_t) sys_debug_disable_console 188 /* Kernel console syscalls. */ 189 (syshandler_t) sys_debug_activate_console 185 190 }; 186 191 -
kernel/generic/src/sysinfo/stats.c
r25bef0ff r6a44ee4 173 173 174 174 /* Walk the B+ tree and count pages */ 175 link_t *cur; 176 for (cur = as->as_area_btree.leaf_head.next; 177 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 175 list_foreach(as->as_area_btree.leaf_list, cur) { 178 176 btree_node_t *node = 179 177 list_get_instance(cur, btree_node_t, leaf_link); … … 218 216 219 217 /* Walk the B+ tree and count pages */ 220 link_t *cur; 221 for (cur = as->as_area_btree.leaf_head.next; 222 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 218 list_foreach(as->as_area_btree.leaf_list, cur) { 223 219 btree_node_t *node = 224 220 list_get_instance(cur, btree_node_t, leaf_link); -
kernel/generic/src/time/clock.c
r25bef0ff r6a44ee4 94 94 clock_parea.frames = 1; 95 95 clock_parea.unpriv = true; 96 clock_parea.mapped = false; 96 97 ddi_parea_register(&clock_parea); 97 98 … … 163 164 164 165 link_t *cur; 165 while ((cur = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { 166 timeout_t *timeout = list_get_instance(cur, timeout_t, link); 166 while ((cur = list_first(&CPU->timeout_active_list)) != NULL) { 167 timeout_t *timeout = list_get_instance(cur, timeout_t, 168 link); 167 169 168 170 irq_spinlock_lock(&timeout->lock, false); -
kernel/generic/src/time/delay.c
r25bef0ff r6a44ee4 37 37 38 38 #include <time/delay.h> 39 #include <proc/thread.h> 39 40 #include <typedefs.h> 40 41 #include <cpu.h> … … 42 43 #include <arch.h> 43 44 44 /** Active delay45 /** Delay the execution for the given number of microseconds (or slightly more). 45 46 * 46 * Delay the execution for the given number 47 * of microseconds (or slightly more). The delay 48 * is implemented as CPU calibrated active loop. 47 * The delay is implemented as active delay loop. 49 48 * 50 49 * @param usec Number of microseconds to sleep. … … 52 51 void delay(uint32_t usec) 53 52 { 54 ipl_t ipl;55 56 53 /* 57 * The delay loop is calibrated for each and every 58 * CPU in the system. Therefore it is necessary to 59 * call interrupts_disable() before calling the 60 * asm_delay_loop(). 54 * The delay loop is calibrated for each and every CPU in the system. 55 * If running in a thread context, it is therefore necessary to disable 56 * thread migration. We want to do this in a lightweight manner. 61 57 */ 62 ipl = interrupts_disable(); 58 if (THREAD) 59 thread_migration_disable(); 63 60 asm_delay_loop(usec * CPU->delay_loop_const); 64 interrupts_restore(ipl); 61 if (THREAD) 62 thread_migration_enable(); 65 63 } 66 64 -
kernel/generic/src/time/timeout.c
r25bef0ff r6a44ee4 54 54 { 55 55 irq_spinlock_initialize(&CPU->timeoutlock, "cpu.timeoutlock"); 56 list_initialize(&CPU->timeout_active_ head);56 list_initialize(&CPU->timeout_active_list); 57 57 } 58 58 … … 119 119 timeout_t *target = NULL; 120 120 link_t *cur; 121 for (cur = CPU->timeout_active_ head.next;122 cur != &CPU->timeout_active_ head; cur = cur->next) {121 for (cur = CPU->timeout_active_list.head.next; 122 cur != &CPU->timeout_active_list.head; cur = cur->next) { 123 123 target = list_get_instance(cur, timeout_t, link); 124 124 irq_spinlock_lock(&target->lock, false); … … 135 135 /* Avoid using cur->prev directly */ 136 136 link_t *prev = cur->prev; 137 list_ prepend(&timeout->link, prev);137 list_insert_after(&timeout->link, prev); 138 138 139 139 /* … … 146 146 * Decrease ticks of timeout's immediate succesor by timeout->ticks. 147 147 */ 148 if (cur != &CPU->timeout_active_ head) {148 if (cur != &CPU->timeout_active_list.head) { 149 149 irq_spinlock_lock(&target->lock, false); 150 150 target->ticks -= timeout->ticks; … … 184 184 /* 185 185 * Now we know for sure that timeout hasn't been activated yet 186 * and is lurking in timeout->cpu->timeout_active_ head queue.186 * and is lurking in timeout->cpu->timeout_active_list. 187 187 */ 188 188 189 189 link_t *cur = timeout->link.next; 190 if (cur != &timeout->cpu->timeout_active_ head) {190 if (cur != &timeout->cpu->timeout_active_list.head) { 191 191 timeout_t *tmp = list_get_instance(cur, timeout_t, link); 192 192 irq_spinlock_lock(&tmp->lock, false); -
kernel/generic/src/udebug/udebug.c
r25bef0ff r6a44ee4 406 406 407 407 /* Finish debugging of all userspace threads */ 408 link_t *cur; 409 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 408 list_foreach(task->threads, cur) { 410 409 thread_t *thread = list_get_instance(cur, thread_t, th_link); 411 410 -
kernel/generic/src/udebug/udebug_ops.c
r25bef0ff r6a44ee4 196 196 /* Set udebug.active on all of the task's userspace threads. */ 197 197 198 link_t *cur; 199 for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { 198 list_foreach(TASK->threads, cur) { 200 199 thread_t *thread = list_get_instance(cur, thread_t, th_link); 201 200 … … 390 389 391 390 /* FIXME: make sure the thread isn't past debug shutdown... */ 392 link_t *cur; 393 for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { 391 list_foreach(TASK->threads, cur) { 394 392 thread_t *thread = list_get_instance(cur, thread_t, th_link); 395 393
Note:
See TracChangeset
for help on using the changeset viewer.
