Changeset 55b77d9 in mainline for kernel/generic/src
- Timestamp:
- 2011-06-17T20:39:16Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 8f164724
- Parents:
- 98caf49
- Location:
- kernel/generic/src
- Files:
-
- 25 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/adt/btree.c
r98caf49 r55b77d9 108 108 void btree_create(btree_t *t) 109 109 { 110 list_initialize(&t->leaf_ head);110 list_initialize(&t->leaf_list); 111 111 t->root = (btree_node_t *) slab_alloc(btree_node_slab, 0); 112 112 node_initialize(t->root); 113 list_append(&t->root->leaf_link, &t->leaf_ head);113 list_append(&t->root->leaf_link, &t->leaf_list); 114 114 } 115 115 … … 588 588 589 589 if (LEAF_NODE(node)) { 590 list_ prepend(&rnode->leaf_link, &node->leaf_link);590 list_insert_after(&rnode->leaf_link, &node->leaf_link); 591 591 } 592 592 … … 953 953 ASSERT(LEAF_NODE(node)); 954 954 955 if (node->leaf_link.prev != &t->leaf_ head)955 if (node->leaf_link.prev != &t->leaf_list.head) 956 956 return list_get_instance(node->leaf_link.prev, btree_node_t, leaf_link); 957 957 else … … 972 972 ASSERT(LEAF_NODE(node)); 973 973 974 if (node->leaf_link.next != &t->leaf_ head)974 if (node->leaf_link.next != &t->leaf_list.head) 975 975 return list_get_instance(node->leaf_link.next, btree_node_t, leaf_link); 976 976 else … … 987 987 size_t i; 988 988 int depth = t->root->depth; 989 li nk_t head, *cur;989 list_t list; 990 990 991 991 printf("Printing B-tree:\n"); 992 list_initialize(& head);993 list_append(&t->root->bfs_link, & head);992 list_initialize(&list); 993 list_append(&t->root->bfs_link, &list); 994 994 995 995 /* … … 997 997 * Levels are distinguished from one another by node->depth. 998 998 */ 999 while (!list_empty(& head)) {999 while (!list_empty(&list)) { 1000 1000 link_t *hlp; 1001 1001 btree_node_t *node; 1002 1002 1003 hlp = head.next;1004 ASSERT(hlp != &head);1003 hlp = list_first(&list); 1004 ASSERT(hlp != NULL); 1005 1005 node = list_get_instance(hlp, btree_node_t, bfs_link); 1006 1006 list_remove(hlp); … … 1018 1018 printf("%" PRIu64 "%s", node->key[i], i < node->keys - 1 ? "," : ""); 1019 1019 if (node->depth && node->subtree[i]) { 1020 list_append(&node->subtree[i]->bfs_link, & head);1020 list_append(&node->subtree[i]->bfs_link, &list); 1021 1021 } 1022 1022 } 1023 1023 1024 1024 if (node->depth && node->subtree[i]) 1025 list_append(&node->subtree[i]->bfs_link, & head);1025 list_append(&node->subtree[i]->bfs_link, &list); 1026 1026 1027 1027 printf(")"); … … 1031 1031 1032 1032 printf("Printing list of leaves:\n"); 1033 for (cur = t->leaf_head.next; cur != &t->leaf_head; cur = cur->next) {1033 list_foreach(t->leaf_list, cur) { 1034 1034 btree_node_t *node; 1035 1035 -
kernel/generic/src/adt/hash_table.c
r98caf49 r55b77d9 62 62 ASSERT(max_keys > 0); 63 63 64 h->entry = (li nk_t *) malloc(m * sizeof(link_t), 0);64 h->entry = (list_t *) malloc(m * sizeof(list_t), 0); 65 65 if (!h->entry) 66 66 panic("Cannot allocate memory for hash table."); 67 67 68 memsetb(h->entry, m * sizeof(li nk_t), 0);68 memsetb(h->entry, m * sizeof(list_t), 0); 69 69 70 70 for (i = 0; i < m; i++) … … 107 107 link_t *hash_table_find(hash_table_t *h, sysarg_t key[]) 108 108 { 109 link_t *cur;110 109 size_t chain; 111 110 … … 118 117 ASSERT(chain < h->entries); 119 118 120 for (cur = h->entry[chain].next; cur != &h->entry[chain]; cur = cur->next) {119 list_foreach(h->entry[chain], cur) { 121 120 if (h->op->compare(key, h->max_keys, cur)) { 122 121 /* … … 141 140 { 142 141 size_t chain; 143 link_t *cur;144 142 145 143 ASSERT(h); … … 150 148 151 149 if (keys == h->max_keys) { 150 link_t *cur; 152 151 153 152 /* … … 169 168 */ 170 169 for (chain = 0; chain < h->entries; chain++) { 171 for (cur = h->entry[chain].next; cur != &h->entry[chain]; cur = cur->next) {170 list_foreach(h->entry[chain], cur) { 172 171 if (h->op->compare(key, keys, cur)) { 173 172 link_t *hlp; -
kernel/generic/src/adt/list.c
r98caf49 r55b77d9 43 43 /** Check for membership 44 44 * 45 * Check whether link is contained in the list head.46 * The membership is defined as pointer equivalence.45 * Check whether link is contained in a list. 46 * Membership is defined as pointer equivalence. 47 47 * 48 * @param link 49 * @param headList to look in.48 * @param link Item to look for. 49 * @param list List to look in. 50 50 * 51 51 * @return true if link is contained in head, false otherwise. 52 52 * 53 53 */ 54 int list_member(const link_t *link, const li nk_t *head)54 int list_member(const link_t *link, const list_t *list) 55 55 { 56 56 bool found = false; 57 link_t *hlp = head->next;57 link_t *hlp = list->head.next; 58 58 59 while (hlp != head) {59 while (hlp != &list->head) { 60 60 if (hlp == link) { 61 61 found = true; … … 68 68 } 69 69 70 71 70 /** Concatenate two lists 72 71 * 73 * Concatenate lists head1 and head2, producing a single74 * list head1 containing items from both (in head1, head275 * order) and empty list head2.72 * Concatenate lists @a list1 and @a list2, producing a single 73 * list @a list1 containing items from both (in @a list1, @a list2 74 * order) and empty list @a list2. 76 75 * 77 * @param head1First list and concatenated output78 * @param head2Second list and empty output.76 * @param list1 First list and concatenated output 77 * @param list2 Second list and empty output. 79 78 * 80 79 */ 81 void list_concat(li nk_t *head1, link_t *head2)80 void list_concat(list_t *list1, list_t *list2) 82 81 { 83 if (list_empty( head2))82 if (list_empty(list2)) 84 83 return; 85 84 86 head2->next->prev = head1->prev;87 head2->prev->next = head1;88 head1->prev->next = head2->next;89 head1->prev = head2->prev;90 list_initialize( head2);85 list2->head.next->prev = list1->head.prev; 86 list2->head.prev->next = &list1->head; 87 list1->head.prev->next = list2->head.next; 88 list1->head.prev = list2->head.prev; 89 list_initialize(list2); 91 90 } 92 91 -
kernel/generic/src/console/cmd.c
r98caf49 r55b77d9 573 573 spinlock_lock(&cmd_lock); 574 574 575 link_t *cur;576 575 size_t len = 0; 577 for (cur = cmd_head.next; cur != &cmd_head; cur = cur->next) {576 list_foreach(cmd_list, cur) { 578 577 cmd_info_t *hlp; 579 578 hlp = list_get_instance(cur, cmd_info_t, link); … … 591 590 } 592 591 593 for (cur = cmd_head.next; cur != &cmd_head; cur = cur->next) {592 list_foreach(cmd_list, cur) { 594 593 cmd_info_t *hlp; 595 594 hlp = list_get_instance(cur, cmd_info_t, link); … … 646 645 int cmd_desc(cmd_arg_t *argv) 647 646 { 648 link_t *cur;649 650 647 spinlock_lock(&cmd_lock); 651 648 652 for (cur = cmd_head.next; cur != &cmd_head; cur = cur->next) {649 list_foreach(cmd_list, cur) { 653 650 cmd_info_t *hlp; 654 651 -
kernel/generic/src/console/console.c
r98caf49 r55b77d9 124 124 static void stdout_write(outdev_t *dev, wchar_t ch, bool silent) 125 125 { 126 link_t *cur; 127 128 for (cur = dev->list.next; cur != &dev->list; cur = cur->next) { 126 list_foreach(dev->list, cur) { 129 127 outdev_t *sink = list_get_instance(cur, outdev_t, link); 130 128 if ((sink) && (sink->op->write)) … … 135 133 static void stdout_redraw(outdev_t *dev) 136 134 { 137 link_t *cur; 138 139 for (cur = dev->list.next; cur != &dev->list; cur = cur->next) { 135 list_foreach(dev->list, cur) { 140 136 outdev_t *sink = list_get_instance(cur, outdev_t, link); 141 137 if ((sink) && (sink->op->redraw)) -
kernel/generic/src/console/kconsole.c
r98caf49 r55b77d9 84 84 85 85 SPINLOCK_INITIALIZE(cmd_lock); /**< Lock protecting command list. */ 86 LIST_INITIALIZE(cmd_ head); /**< Command list. */86 LIST_INITIALIZE(cmd_list); /**< Command list. */ 87 87 88 88 static wchar_t history[KCONSOLE_HISTORY][MAX_CMDLINE] = {}; … … 113 113 bool cmd_register(cmd_info_t *cmd) 114 114 { 115 link_t *cur;116 117 115 spinlock_lock(&cmd_lock); 118 116 … … 120 118 * Make sure the command is not already listed. 121 119 */ 122 for (cur = cmd_head.next; cur != &cmd_head; cur = cur->next) {120 list_foreach(cmd_list, cur) { 123 121 cmd_info_t *hlp = list_get_instance(cur, cmd_info_t, link); 124 122 … … 153 151 * Now the command can be added. 154 152 */ 155 list_append(&cmd->link, &cmd_ head);153 list_append(&cmd->link, &cmd_list); 156 154 157 155 spinlock_unlock(&cmd_lock); … … 176 174 177 175 if (*startpos == NULL) 178 *startpos = cmd_ head.next;179 180 for (; *startpos != &cmd_ head; *startpos = (*startpos)->next) {176 *startpos = cmd_list.head.next; 177 178 for (; *startpos != &cmd_list.head; *startpos = (*startpos)->next) { 181 179 cmd_info_t *hlp = list_get_instance(*startpos, cmd_info_t, link); 182 180 … … 559 557 560 558 cmd_info_t *cmd = NULL; 561 link_t *cur; 562 563 for (cur = cmd_head.next; cur != &cmd_head; cur = cur->next) { 559 560 list_foreach(cmd_list, cur) { 564 561 cmd_info_t *hlp = list_get_instance(cur, cmd_info_t, link); 565 562 spinlock_lock(&hlp->lock); -
kernel/generic/src/cpu/cpu.c
r98caf49 r55b77d9 82 82 for (j = 0; j < RQ_COUNT; j++) { 83 83 irq_spinlock_initialize(&cpus[i].rq[j].lock, "cpus[].rq[].lock"); 84 list_initialize(&cpus[i].rq[j].rq _head);84 list_initialize(&cpus[i].rq[j].rq); 85 85 } 86 86 } -
kernel/generic/src/ipc/ipc.c
r98caf49 r55b77d9 128 128 list_initialize(&box->answers); 129 129 list_initialize(&box->irq_notifs); 130 list_initialize(&box->irq_ head);130 list_initialize(&box->irq_list); 131 131 box->task = task; 132 132 } … … 183 183 */ 184 184 irq_spinlock_lock(&TASK->lock, true); 185 list_append(&sync_box->sync_box_link, &TASK->sync_box _head);185 list_append(&sync_box->sync_box_link, &TASK->sync_boxes); 186 186 irq_spinlock_unlock(&TASK->lock, true); 187 187 … … 450 450 irq_spinlock_lock(&box->irq_lock, false); 451 451 452 request = list_get_instance(box->irq_notifs.next, call_t, link); 452 request = list_get_instance(list_first(&box->irq_notifs), 453 call_t, link); 453 454 list_remove(&request->link); 454 455 … … 459 460 460 461 /* Handle asynchronous answers */ 461 request = list_get_instance(box->answers.next, call_t, link); 462 request = list_get_instance(list_first(&box->answers), 463 call_t, link); 462 464 list_remove(&request->link); 463 465 atomic_dec(&request->data.phone->active_calls); … … 467 469 468 470 /* Handle requests */ 469 request = list_get_instance(box->calls.next, call_t, link); 471 request = list_get_instance(list_first(&box->calls), 472 call_t, link); 470 473 list_remove(&request->link); 471 474 … … 494 497 * 495 498 */ 496 void ipc_cleanup_call_list(li nk_t *lst)499 void ipc_cleanup_call_list(list_t *lst) 497 500 { 498 501 while (!list_empty(lst)) { 499 call_t *call = list_get_instance(l st->next, call_t, link);502 call_t *call = list_get_instance(list_first(lst), call_t, link); 500 503 if (call->buffer) 501 504 free(call->buffer); … … 526 529 irq_spinlock_lock(&box->lock, true); 527 530 while (!list_empty(&box->connected_phones)) { 528 phone = list_get_instance( box->connected_phones.next,531 phone = list_get_instance(list_first(&box->connected_phones), 529 532 phone_t, link); 530 533 if (SYNCH_FAILED(mutex_trylock(&phone->lock))) { … … 606 609 /* Wait for all answers to interrupted synchronous calls to arrive */ 607 610 ipl_t ipl = interrupts_disable(); 608 while (!list_empty(&TASK->sync_box _head)) {609 answerbox_t *box = list_get_instance( TASK->sync_box_head.next,610 answerbox_t, sync_box_link);611 while (!list_empty(&TASK->sync_boxes)) { 612 answerbox_t *box = list_get_instance( 613 list_first(&TASK->sync_boxes), answerbox_t, sync_box_link); 611 614 612 615 list_remove(&box->sync_box_link); … … 743 746 #endif 744 747 745 link_t *cur;746 747 748 printf(" --- incomming calls ---\n"); 748 for (cur = task->answerbox.calls.next; cur != &task->answerbox.calls; 749 cur = cur->next) { 749 list_foreach(task->answerbox.calls, cur) { 750 750 call_t *call = list_get_instance(cur, call_t, link); 751 751 … … 767 767 768 768 printf(" --- dispatched calls ---\n"); 769 for (cur = task->answerbox.dispatched_calls.next; 770 cur != &task->answerbox.dispatched_calls; 771 cur = cur->next) { 769 list_foreach(task->answerbox.dispatched_calls, cur) { 772 770 call_t *call = list_get_instance(cur, call_t, link); 773 771 … … 789 787 790 788 printf(" --- incoming answers ---\n"); 791 for (cur = task->answerbox.answers.next; 792 cur != &task->answerbox.answers; 793 cur = cur->next) { 789 list_foreach(task->answerbox.answers, cur) { 794 790 call_t *call = list_get_instance(cur, call_t, link); 795 791 -
kernel/generic/src/ipc/ipcrsc.c
r98caf49 r55b77d9 146 146 call_t *get_call(sysarg_t callid) 147 147 { 148 link_t *lst;149 148 call_t *result = NULL; 150 149 151 150 irq_spinlock_lock(&TASK->answerbox.lock, true); 152 for (lst = TASK->answerbox.dispatched_calls.next;153 lst != &TASK->answerbox.dispatched_calls; lst = lst->next) {151 152 list_foreach(TASK->answerbox.dispatched_calls, lst) { 154 153 call_t *call = list_get_instance(lst, call_t, link); 155 154 if ((sysarg_t) call == callid) { -
kernel/generic/src/ipc/irq.c
r98caf49 r55b77d9 200 200 201 201 hash_table_insert(&irq_uspace_hash_table, key, &irq->link); 202 list_append(&irq->notif_cfg.link, &box->irq_ head);202 list_append(&irq->notif_cfg.link, &box->irq_list); 203 203 204 204 irq_spinlock_unlock(&box->irq_lock, false); … … 282 282 irq_spinlock_lock(&box->irq_lock, false); 283 283 284 while ( box->irq_head.next != &box->irq_head) {284 while (!list_empty(&box->irq_list)) { 285 285 DEADLOCK_PROBE_INIT(p_irqlock); 286 286 287 irq_t *irq = list_get_instance( box->irq_head.next, irq_t,287 irq_t *irq = list_get_instance(list_first(&box->irq_list), irq_t, 288 288 notif_cfg.link); 289 289 -
kernel/generic/src/mm/as.c
r98caf49 r55b77d9 94 94 * 95 95 * This lock protects: 96 * - inactive_as_with_asid_ headlist96 * - inactive_as_with_asid_list 97 97 * - as->asid for each as of the as_t type 98 98 * - asids_allocated counter … … 105 105 * that have valid ASID. 106 106 */ 107 LIST_INITIALIZE(inactive_as_with_asid_ head);107 LIST_INITIALIZE(inactive_as_with_asid_list); 108 108 109 109 /** Kernel address space. */ … … 235 235 bool cond = true; 236 236 while (cond) { 237 ASSERT(!list_empty(&as->as_area_btree.leaf_ head));237 ASSERT(!list_empty(&as->as_area_btree.leaf_list)); 238 238 239 239 btree_node_t *node = 240 list_get_instance( as->as_area_btree.leaf_head.next,240 list_get_instance(list_first(&as->as_area_btree.leaf_list), 241 241 btree_node_t, leaf_link); 242 242 … … 602 602 bool cond = true; 603 603 while (cond) { 604 ASSERT(!list_empty(&area->used_space.leaf_ head));604 ASSERT(!list_empty(&area->used_space.leaf_list)); 605 605 606 606 btree_node_t *node = 607 list_get_instance( area->used_space.leaf_head.prev,607 list_get_instance(list_last(&area->used_space.leaf_list), 608 608 btree_node_t, leaf_link); 609 609 … … 727 727 if (--sh_info->refcount == 0) { 728 728 dealloc = true; 729 link_t *cur;730 729 731 730 /* … … 733 732 * reference from all frames found there. 734 733 */ 735 for (cur = sh_info->pagemap.leaf_head.next; 736 cur != &sh_info->pagemap.leaf_head; cur = cur->next) { 734 list_foreach(sh_info->pagemap.leaf_list, cur) { 737 735 btree_node_t *node 738 736 = list_get_instance(cur, btree_node_t, leaf_link); … … 786 784 * Visit only the pages mapped by used_space B+tree. 787 785 */ 788 link_t *cur; 789 for (cur = area->used_space.leaf_head.next; 790 cur != &area->used_space.leaf_head; cur = cur->next) { 786 list_foreach(area->used_space.leaf_list, cur) { 791 787 btree_node_t *node; 792 788 btree_key_t i; … … 1065 1061 */ 1066 1062 size_t used_pages = 0; 1067 link_t *cur; 1068 1069 for (cur = area->used_space.leaf_head.next; 1070 cur != &area->used_space.leaf_head; cur = cur->next) { 1063 1064 list_foreach(area->used_space.leaf_list, cur) { 1071 1065 btree_node_t *node 1072 1066 = list_get_instance(cur, btree_node_t, leaf_link); … … 1094 1088 size_t frame_idx = 0; 1095 1089 1096 for (cur = area->used_space.leaf_head.next; 1097 cur != &area->used_space.leaf_head; cur = cur->next) { 1090 list_foreach(area->used_space.leaf_list, cur) { 1098 1091 btree_node_t *node = list_get_instance(cur, btree_node_t, 1099 1092 leaf_link); … … 1147 1140 frame_idx = 0; 1148 1141 1149 for (cur = area->used_space.leaf_head.next; 1150 cur != &area->used_space.leaf_head; cur = cur->next) { 1142 list_foreach(area->used_space.leaf_list, cur) { 1151 1143 btree_node_t *node 1152 1144 = list_get_instance(cur, btree_node_t, leaf_link); … … 1334 1326 1335 1327 list_append(&old_as->inactive_as_with_asid_link, 1336 &inactive_as_with_asid_ head);1328 &inactive_as_with_asid_list); 1337 1329 } 1338 1330 … … 2027 2019 2028 2020 /* Eventually check the addresses behind each area */ 2029 li nk_t *cur;2030 for (cur = AS->as_area_btree.leaf_head.next;2031 (ret == 0) && (cur != &AS->as_area_btree.leaf_head);2032 cur = cur->next) { 2021 list_foreach(AS->as_area_btree.leaf_list, cur) { 2022 if (ret != 0) 2023 break; 2024 2033 2025 btree_node_t *node = 2034 2026 list_get_instance(cur, btree_node_t, leaf_link); … … 2072 2064 2073 2065 size_t area_cnt = 0; 2074 link_t *cur; 2075 2076 for (cur = as->as_area_btree.leaf_head.next; 2077 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2066 2067 list_foreach(as->as_area_btree.leaf_list, cur) { 2078 2068 btree_node_t *node = 2079 2069 list_get_instance(cur, btree_node_t, leaf_link); … … 2088 2078 size_t area_idx = 0; 2089 2079 2090 for (cur = as->as_area_btree.leaf_head.next; 2091 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2080 list_foreach(as->as_area_btree.leaf_list, cur) { 2092 2081 btree_node_t *node = 2093 2082 list_get_instance(cur, btree_node_t, leaf_link); … … 2125 2114 2126 2115 /* Print out info about address space areas */ 2127 link_t *cur; 2128 for (cur = as->as_area_btree.leaf_head.next; 2129 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2116 list_foreach(as->as_area_btree.leaf_list, cur) { 2130 2117 btree_node_t *node 2131 2118 = list_get_instance(cur, btree_node_t, leaf_link); -
kernel/generic/src/mm/backend_anon.c
r98caf49 r55b77d9 97 97 void anon_share(as_area_t *area) 98 98 { 99 link_t *cur;100 101 99 ASSERT(mutex_locked(&area->as->lock)); 102 100 ASSERT(mutex_locked(&area->lock)); … … 106 104 */ 107 105 mutex_lock(&area->sh_info->lock); 108 for (cur = area->used_space.leaf_head.next; 109 cur != &area->used_space.leaf_head; cur = cur->next) { 106 list_foreach(area->used_space.leaf_list, cur) { 110 107 btree_node_t *node; 111 108 unsigned int i; -
kernel/generic/src/mm/backend_elf.c
r98caf49 r55b77d9 139 139 */ 140 140 if (area->flags & AS_AREA_WRITE) { 141 node = list_get_instance( area->used_space.leaf_head.next,141 node = list_get_instance(list_first(&area->used_space.leaf_list), 142 142 btree_node_t, leaf_link); 143 143 } else { … … 153 153 */ 154 154 mutex_lock(&area->sh_info->lock); 155 for (cur = &node->leaf_link; cur != &area->used_space.leaf_ head;155 for (cur = &node->leaf_link; cur != &area->used_space.leaf_list.head; 156 156 cur = cur->next) { 157 157 unsigned int i; -
kernel/generic/src/mm/buddy.c
r98caf49 r55b77d9 82 82 * Use memory after our own structure. 83 83 */ 84 b->order = (li nk_t *) (&b[1]);84 b->order = (list_t *) (&b[1]); 85 85 86 86 for (i = 0; i <= max_order; i++) … … 176 176 * the request can be immediatelly satisfied. 177 177 */ 178 if (!list_empty(&b->order[i])) {179 res = b->order[i].next;178 res = list_first(&b->order[i]); 179 if (res != NULL) { 180 180 list_remove(res); 181 181 b->op->mark_busy(b, res); -
kernel/generic/src/mm/slab.c
r98caf49 r55b77d9 317 317 spinlock_lock(&cache->slablock); 318 318 } else { 319 slab = list_get_instance( cache->partial_slabs.next, slab_t,320 link);319 slab = list_get_instance(list_first(&cache->partial_slabs), 320 slab_t, link); 321 321 list_remove(&slab->link); 322 322 } … … 360 360 if (!list_empty(&cache->magazines)) { 361 361 if (first) 362 cur = cache->magazines.next;362 cur = list_first(&cache->magazines); 363 363 else 364 cur = cache->magazines.prev;364 cur = list_last(&cache->magazines); 365 365 366 366 mag = list_get_instance(cur, slab_magazine_t, link); … … 812 812 813 813 size_t frames = 0; 814 link_t *cur; 815 for (cur = slab_cache_list.next; cur != &slab_cache_list; 816 cur = cur->next) { 814 list_foreach(slab_cache_list, cur) { 817 815 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link); 818 816 frames += _slab_reclaim(cache, flags); … … 861 859 link_t *cur; 862 860 size_t i; 863 for (i = 0, cur = slab_cache_list. next;864 (i < skip) && (cur != &slab_cache_list );861 for (i = 0, cur = slab_cache_list.head.next; 862 (i < skip) && (cur != &slab_cache_list.head); 865 863 i++, cur = cur->next); 866 864 867 if (cur == &slab_cache_list ) {865 if (cur == &slab_cache_list.head) { 868 866 irq_spinlock_unlock(&slab_cache_lock, true); 869 867 break; … … 940 938 irq_spinlock_lock(&slab_cache_lock, false); 941 939 942 link_t *cur; 943 for (cur = slab_cache_list.next; cur != &slab_cache_list; 944 cur = cur->next) { 940 list_foreach(slab_cache_list, cur) { 945 941 slab_cache_t *slab = list_get_instance(cur, slab_cache_t, link); 946 942 if ((slab->flags & SLAB_CACHE_MAGDEFERRED) != -
kernel/generic/src/proc/scheduler.c
r98caf49 r55b77d9 237 237 * Take the first thread from the queue. 238 238 */ 239 thread_t *thread = 240 list_ get_instance(CPU->rq[i].rq_head.next, thread_t, rq_link);239 thread_t *thread = list_get_instance( 240 list_first(&CPU->rq[i].rq), thread_t, rq_link); 241 241 list_remove(&thread->rq_link); 242 242 … … 273 273 static void relink_rq(int start) 274 274 { 275 li nk_t head;276 277 list_initialize(& head);275 list_t list; 276 277 list_initialize(&list); 278 278 irq_spinlock_lock(&CPU->lock, false); 279 279 … … 284 284 285 285 irq_spinlock_lock(&CPU->rq[i + 1].lock, false); 286 list_concat(& head, &CPU->rq[i + 1].rq_head);286 list_concat(&list, &CPU->rq[i + 1].rq); 287 287 size_t n = CPU->rq[i + 1].n; 288 288 CPU->rq[i + 1].n = 0; … … 292 292 293 293 irq_spinlock_lock(&CPU->rq[i].lock, false); 294 list_concat(&CPU->rq[i].rq _head, &head);294 list_concat(&CPU->rq[i].rq, &list); 295 295 CPU->rq[i].n += n; 296 296 irq_spinlock_unlock(&CPU->rq[i].lock, false); … … 616 616 617 617 /* Search rq from the back */ 618 link_t *link = cpu->rq[rq].rq _head.prev;619 620 while (link != &(cpu->rq[rq].rq _head)) {618 link_t *link = cpu->rq[rq].rq.head.prev; 619 620 while (link != &(cpu->rq[rq].rq.head)) { 621 621 thread = (thread_t *) list_get_instance(link, 622 622 thread_t, rq_link); … … 740 740 741 741 printf("\trq[%u]: ", i); 742 link_t *cur; 743 for (cur = cpus[cpu].rq[i].rq_head.next; 744 cur != &(cpus[cpu].rq[i].rq_head); 745 cur = cur->next) { 746 thread_t *thread = list_get_instance(cur, thread_t, rq_link); 742 list_foreach(cpus[cpu].rq[i].rq, cur) { 743 thread_t *thread = list_get_instance(cur, 744 thread_t, rq_link); 747 745 printf("%" PRIu64 "(%s) ", thread->tid, 748 746 thread_states[thread->state]); -
kernel/generic/src/proc/task.c
r98caf49 r55b77d9 155 155 mutex_initialize(&task->futexes_lock, MUTEX_PASSIVE); 156 156 157 list_initialize(&task->th _head);158 list_initialize(&task->sync_box _head);157 list_initialize(&task->threads); 158 list_initialize(&task->sync_boxes); 159 159 160 160 ipc_answerbox_init(&task->answerbox, task); … … 435 435 436 436 /* Current values of threads */ 437 link_t *cur; 438 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 437 list_foreach(task->threads, cur) { 439 438 thread_t *thread = list_get_instance(cur, thread_t, th_link); 440 439 … … 468 467 */ 469 468 470 link_t *cur; 471 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 469 list_foreach(task->threads, cur) { 472 470 thread_t *thread = list_get_instance(cur, thread_t, th_link); 473 471 bool sleeping = false; -
kernel/generic/src/proc/thread.c
r98caf49 r55b77d9 260 260 */ 261 261 262 list_append(&thread->rq_link, &cpu->rq[i].rq _head);262 list_append(&thread->rq_link, &cpu->rq[i].rq); 263 263 cpu->rq[i].n++; 264 264 irq_spinlock_unlock(&(cpu->rq[i].lock), true); … … 423 423 atomic_inc(&task->lifecount); 424 424 425 list_append(&thread->th_link, &task->th _head);425 list_append(&thread->th_link, &task->threads); 426 426 427 427 irq_spinlock_pass(&task->lock, &threads_lock); -
kernel/generic/src/synch/futex.c
r98caf49 r55b77d9 272 272 void futex_cleanup(void) 273 273 { 274 link_t *cur;275 276 274 mutex_lock(&futex_ht_lock); 277 275 mutex_lock(&TASK->futexes_lock); 278 276 279 for (cur = TASK->futexes.leaf_head.next; 280 cur != &TASK->futexes.leaf_head; cur = cur->next) { 277 list_foreach(TASK->futexes.leaf_list, cur) { 281 278 btree_node_t *node; 282 279 unsigned int i; -
kernel/generic/src/synch/waitq.c
r98caf49 r55b77d9 69 69 { 70 70 irq_spinlock_initialize(&wq->lock, "wq.lock"); 71 list_initialize(&wq-> head);71 list_initialize(&wq->sleepers); 72 72 wq->missed_wakeups = 0; 73 73 } … … 196 196 irq_spinlock_lock(&wq->lock, true); 197 197 198 if (!list_empty(&wq->head)) { 199 thread_t *thread = list_get_instance(wq->head.next, thread_t, wq_link); 198 if (!list_empty(&wq->sleepers)) { 199 thread_t *thread = list_get_instance(list_first(&wq->sleepers), 200 thread_t, wq_link); 200 201 201 202 irq_spinlock_lock(&thread->lock, false); … … 407 408 } 408 409 409 list_append(&THREAD->wq_link, &wq-> head);410 list_append(&THREAD->wq_link, &wq->sleepers); 410 411 411 412 /* … … 464 465 465 466 loop: 466 if (list_empty(&wq-> head)) {467 if (list_empty(&wq->sleepers)) { 467 468 wq->missed_wakeups++; 468 469 if ((count) && (mode == WAKEUP_ALL)) … … 473 474 474 475 count++; 475 thread_t *thread = list_get_instance(wq->head.next, thread_t, wq_link); 476 thread_t *thread = list_get_instance(list_first(&wq->sleepers), 477 thread_t, wq_link); 476 478 477 479 /* -
kernel/generic/src/sysinfo/stats.c
r98caf49 r55b77d9 173 173 174 174 /* Walk the B+ tree and count pages */ 175 link_t *cur; 176 for (cur = as->as_area_btree.leaf_head.next; 177 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 175 list_foreach(as->as_area_btree.leaf_list, cur) { 178 176 btree_node_t *node = 179 177 list_get_instance(cur, btree_node_t, leaf_link); … … 218 216 219 217 /* Walk the B+ tree and count pages */ 220 link_t *cur; 221 for (cur = as->as_area_btree.leaf_head.next; 222 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 218 list_foreach(as->as_area_btree.leaf_list, cur) { 223 219 btree_node_t *node = 224 220 list_get_instance(cur, btree_node_t, leaf_link); -
kernel/generic/src/time/clock.c
r98caf49 r55b77d9 163 163 164 164 link_t *cur; 165 while ((cur = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { 166 timeout_t *timeout = list_get_instance(cur, timeout_t, link); 165 while ((cur = list_first(&CPU->timeout_active_list)) != NULL) { 166 timeout_t *timeout = list_get_instance(cur, timeout_t, 167 link); 167 168 168 169 irq_spinlock_lock(&timeout->lock, false); -
kernel/generic/src/time/timeout.c
r98caf49 r55b77d9 54 54 { 55 55 irq_spinlock_initialize(&CPU->timeoutlock, "cpu.timeoutlock"); 56 list_initialize(&CPU->timeout_active_ head);56 list_initialize(&CPU->timeout_active_list); 57 57 } 58 58 … … 119 119 timeout_t *target = NULL; 120 120 link_t *cur; 121 for (cur = CPU->timeout_active_ head.next;122 cur != &CPU->timeout_active_ head; cur = cur->next) {121 for (cur = CPU->timeout_active_list.head.next; 122 cur != &CPU->timeout_active_list.head; cur = cur->next) { 123 123 target = list_get_instance(cur, timeout_t, link); 124 124 irq_spinlock_lock(&target->lock, false); … … 135 135 /* Avoid using cur->prev directly */ 136 136 link_t *prev = cur->prev; 137 list_ prepend(&timeout->link, prev);137 list_insert_after(&timeout->link, prev); 138 138 139 139 /* … … 146 146 * Decrease ticks of timeout's immediate succesor by timeout->ticks. 147 147 */ 148 if (cur != &CPU->timeout_active_ head) {148 if (cur != &CPU->timeout_active_list.head) { 149 149 irq_spinlock_lock(&target->lock, false); 150 150 target->ticks -= timeout->ticks; … … 184 184 /* 185 185 * Now we know for sure that timeout hasn't been activated yet 186 * and is lurking in timeout->cpu->timeout_active_ head queue.186 * and is lurking in timeout->cpu->timeout_active_list. 187 187 */ 188 188 189 189 link_t *cur = timeout->link.next; 190 if (cur != &timeout->cpu->timeout_active_ head) {190 if (cur != &timeout->cpu->timeout_active_list.head) { 191 191 timeout_t *tmp = list_get_instance(cur, timeout_t, link); 192 192 irq_spinlock_lock(&tmp->lock, false); -
kernel/generic/src/udebug/udebug.c
r98caf49 r55b77d9 406 406 407 407 /* Finish debugging of all userspace threads */ 408 link_t *cur; 409 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 408 list_foreach(task->threads, cur) { 410 409 thread_t *thread = list_get_instance(cur, thread_t, th_link); 411 410 -
kernel/generic/src/udebug/udebug_ops.c
r98caf49 r55b77d9 196 196 /* Set udebug.active on all of the task's userspace threads. */ 197 197 198 link_t *cur; 199 for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { 198 list_foreach(TASK->threads, cur) { 200 199 thread_t *thread = list_get_instance(cur, thread_t, th_link); 201 200 … … 390 389 391 390 /* FIXME: make sure the thread isn't past debug shutdown... */ 392 link_t *cur; 393 for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { 391 list_foreach(TASK->threads, cur) { 394 392 thread_t *thread = list_get_instance(cur, thread_t, th_link); 395 393
Note:
See TracChangeset
for help on using the changeset viewer.