Changes in / [bd08239:313775b] in mainline
- Files:
-
- 41 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/sparc64/src/drivers/tick.c
rbd08239 r313775b 44 44 #include <debug.h> 45 45 46 #define TICK_RESTART_TIME 50 /* Worst case estimate. */ 47 46 48 /** Initialize tick and stick interrupt. */ 47 49 void tick_init(void) … … 49 51 /* initialize TICK interrupt */ 50 52 tick_compare_reg_t compare; 51 softint_reg_t clear;52 53 53 54 interrupt_register(14, "tick_int", tick_interrupt); … … 58 59 tick_compare_write(compare.value); 59 60 60 clear.value = 0;61 clear.tick_int = 1;62 clear_softint_write(clear.value);63 64 61 #if defined (US3) || defined (SUN4V) 65 62 /* disable STICK interrupts and clear any pending ones */ 66 63 tick_compare_reg_t stick_compare; 64 softint_reg_t clear; 67 65 68 66 stick_compare.value = stick_compare_read(); -
kernel/genarch/src/mm/asid.c
rbd08239 r313775b 97 97 * inactive address space. 98 98 */ 99 tmp = list_first(&inactive_as_with_asid_list);100 ASSERT(tmp != NULL);99 ASSERT(!list_empty(&inactive_as_with_asid_head)); 100 tmp = inactive_as_with_asid_head.next; 101 101 list_remove(tmp); 102 102 -
kernel/generic/include/adt/btree.h
rbd08239 r313775b 89 89 typedef struct { 90 90 btree_node_t *root; /**< B-tree root node pointer. */ 91 li st_t leaf_list; /**< List of leaves. */91 link_t leaf_head; /**< Leaf-level list head. */ 92 92 } btree_t; 93 93 -
kernel/generic/include/adt/hash_table.h
rbd08239 r313775b 68 68 /** Hash table structure. */ 69 69 typedef struct { 70 li st_t *entry;70 link_t *entry; 71 71 size_t entries; 72 72 size_t max_keys; -
kernel/generic/include/adt/list.h
rbd08239 r313775b 1 1 /* 2 2 * Copyright (c) 2001-2004 Jakub Jermar 3 * Copyright (c) 2011 Jiri Svoboda4 3 * All rights reserved. 5 4 * … … 40 39 #include <trace.h> 41 40 42 /** Doubly linked list link. */41 /** Doubly linked list head and link type. */ 43 42 typedef struct link { 44 43 struct link *prev; /**< Pointer to the previous item in the list. */ … … 46 45 } link_t; 47 46 48 /** Doubly linked list. */49 typedef struct list {50 link_t head; /**< List head. Does not have any data. */51 } list_t;52 53 47 /** Declare and initialize statically allocated list. 54 48 * … … 57 51 */ 58 52 #define LIST_INITIALIZE(name) \ 59 list_t name = { \ 60 .head = { \ 61 .prev = &(name).head, \ 62 .next = &(name).head \ 63 } \ 53 link_t name = { \ 54 .prev = &name, \ 55 .next = &name \ 64 56 } 65 57 … … 68 60 69 61 #define list_foreach(list, iterator) \ 70 for (link_t *iterator = (list). head.next; \71 iterator != &(list) .head; iterator = iterator->next)62 for (link_t *iterator = (list).next; \ 63 iterator != &(list); iterator = iterator->next) 72 64 73 65 /** Initialize doubly-linked circular list link … … 88 80 * Initialize doubly-linked circular list. 89 81 * 90 * @param list Pointer to list_t structure. 91 * 92 */ 93 NO_TRACE static inline void list_initialize(list_t *list) 94 { 95 list->head.prev = &list->head; 96 list->head.next = &list->head; 82 * @param list Pointer to link_t structure representing the list. 83 * 84 */ 85 NO_TRACE static inline void list_initialize(link_t *list) 86 { 87 list->prev = list; 88 list->next = list; 89 } 90 91 /** Add item to the beginning of doubly-linked circular list 92 * 93 * Add item to the beginning of doubly-linked circular list. 94 * 95 * @param link Pointer to link_t structure to be added. 96 * @param list Pointer to link_t structure representing the list. 97 * 98 */ 99 NO_TRACE static inline void list_prepend(link_t *link, link_t *list) 100 { 101 link->next = list->next; 102 link->prev = list; 103 list->next->prev = link; 104 list->next = link; 105 } 106 107 /** Add item to the end of doubly-linked circular list 108 * 109 * Add item to the end of doubly-linked circular list. 110 * 111 * @param link Pointer to link_t structure to be added. 112 * @param list Pointer to link_t structure representing the list. 113 * 114 */ 115 NO_TRACE static inline void list_append(link_t *link, link_t *list) 116 { 117 link->prev = list->prev; 118 link->next = list; 119 list->prev->next = link; 120 list->prev = link; 97 121 } 98 122 … … 100 124 * 101 125 */ 102 static inline void list_insert_before(link_t *lnew, link_t *lold) 103 { 104 lnew->next = lold; 105 lnew->prev = lold->prev; 106 lold->prev->next = lnew; 107 lold->prev = lnew; 126 static inline void list_insert_before(link_t *link, link_t *list) 127 { 128 list_append(link, list); 108 129 } 109 130 … … 111 132 * 112 133 */ 113 static inline void list_insert_after(link_t *lnew, link_t *lold) 114 { 115 lnew->prev = lold; 116 lnew->next = lold->next; 117 lold->next->prev = lnew; 118 lold->next = lnew; 119 } 120 121 /** Add item to the beginning of doubly-linked circular list 122 * 123 * Add item to the beginning of doubly-linked circular list. 124 * 125 * @param link Pointer to link_t structure to be added. 126 * @param list Pointer to list_t structure. 127 * 128 */ 129 NO_TRACE static inline void list_prepend(link_t *link, list_t *list) 130 { 131 list_insert_after(link, &list->head); 132 } 133 134 /** Add item to the end of doubly-linked circular list 135 * 136 * Add item to the end of doubly-linked circular list. 137 * 138 * @param link Pointer to link_t structure to be added. 139 * @param list Pointer to list_t structure. 140 * 141 */ 142 NO_TRACE static inline void list_append(link_t *link, list_t *list) 143 { 144 list_insert_before(link, &list->head); 134 static inline void list_insert_after(link_t *link, link_t *list) 135 { 136 list_prepend(list, link); 145 137 } 146 138 … … 164 156 * Query emptiness of doubly-linked circular list. 165 157 * 166 * @param list Pointer to lin s_t structure.167 * 168 */ 169 NO_TRACE static inline int list_empty(li st_t *list)170 { 171 return (list-> head.next == &list->head);172 } 173 174 /** Get first item inlist.175 * 176 * @param list Pointer to li st_t structure.158 * @param list Pointer to link_t structure representing the list. 159 * 160 */ 161 NO_TRACE static inline int list_empty(link_t *list) 162 { 163 return (list->next == list); 164 } 165 166 /** Get head item of a list. 167 * 168 * @param list Pointer to link_t structure representing the list. 177 169 * 178 170 * @return Head item of the list. … … 180 172 * 181 173 */ 182 static inline link_t *list_first(list_t *list) 183 { 184 return ((list->head.next == &list->head) ? NULL : list->head.next); 185 } 186 187 /** Get last item in list. 188 * 189 * @param list Pointer to list_t structure. 190 * 191 * @return Head item of the list. 192 * @return NULL if the list is empty. 193 * 194 */ 195 static inline link_t *list_last(list_t *list) 196 { 197 return ((list->head.prev == &list->head) ? NULL : list->head.prev); 174 static inline link_t *list_head(link_t *list) 175 { 176 return ((list->next == list) ? NULL : list->next); 198 177 } 199 178 … … 252 231 } 253 232 254 /** Get n-th item ina list.233 /** Get n-th item of a list. 255 234 * 256 235 * @param list Pointer to link_t structure representing the list. … … 261 240 * 262 241 */ 263 static inline link_t *list_nth(li st_t *list, unsigned int n)242 static inline link_t *list_nth(link_t *list, unsigned int n) 264 243 { 265 244 unsigned int cnt = 0; … … 275 254 } 276 255 277 extern int list_member(const link_t *, const li st_t *);278 extern void list_concat(li st_t *, list_t *);279 extern unsigned int list_count(const li st_t *);256 extern int list_member(const link_t *, const link_t *); 257 extern void list_concat(link_t *, link_t *); 258 extern unsigned int list_count(const link_t *); 280 259 281 260 #endif -
kernel/generic/include/console/chardev.h
rbd08239 r313775b 88 88 /** Fields suitable for multiplexing. */ 89 89 link_t link; 90 li st_t list;90 link_t list; 91 91 92 92 /** Implementation of outdev operations. */ -
kernel/generic/include/console/kconsole.h
rbd08239 r313775b 91 91 92 92 SPINLOCK_EXTERN(cmd_lock); 93 extern li st_t cmd_list;93 extern link_t cmd_head; 94 94 95 95 extern void kconsole_init(void); -
kernel/generic/include/cpu.h
rbd08239 r313775b 59 59 60 60 IRQ_SPINLOCK_DECLARE(timeoutlock); 61 li st_t timeout_active_list;61 link_t timeout_active_head; 62 62 63 63 /** -
kernel/generic/include/ipc/ipc.h
rbd08239 r313775b 166 166 167 167 /** Phones connected to this answerbox. */ 168 li st_t connected_phones;168 link_t connected_phones; 169 169 /** Received calls. */ 170 li st_t calls;171 li st_t dispatched_calls; /* Should be hash table in the future */170 link_t calls; 171 link_t dispatched_calls; /* Should be hash table in the future */ 172 172 173 173 /** Answered calls. */ 174 li st_t answers;174 link_t answers; 175 175 176 176 IRQ_SPINLOCK_DECLARE(irq_lock); 177 177 178 178 /** Notifications from IRQ handlers. */ 179 li st_t irq_notifs;179 link_t irq_notifs; 180 180 /** IRQs with notifications to this answerbox. */ 181 li st_t irq_list;181 link_t irq_head; 182 182 } answerbox_t; 183 183 … … 243 243 extern void ipc_backsend_err(phone_t *, call_t *, sysarg_t); 244 244 extern void ipc_answerbox_slam_phones(answerbox_t *, bool); 245 extern void ipc_cleanup_call_list(li st_t *);245 extern void ipc_cleanup_call_list(link_t *); 246 246 247 247 extern void ipc_print_task(task_id_t); -
kernel/generic/include/mm/as.h
rbd08239 r313775b 254 254 255 255 extern as_operations_t *as_operations; 256 extern li st_t inactive_as_with_asid_list;256 extern link_t inactive_as_with_asid_head; 257 257 258 258 extern void as_init(void); -
kernel/generic/include/mm/buddy.h
rbd08239 r313775b 72 72 /** Maximal order of block which can be stored by buddy system. */ 73 73 uint8_t max_order; 74 li st_t *order;74 link_t *order; 75 75 buddy_system_operations_t *op; 76 76 /** Pointer to be used by the implementation. */ -
kernel/generic/include/mm/slab.h
rbd08239 r313775b 111 111 112 112 /* Slabs */ 113 li st_t full_slabs; /**< List of full slabs */114 li st_t partial_slabs; /**< List of partial slabs */113 link_t full_slabs; /**< List of full slabs */ 114 link_t partial_slabs; /**< List of partial slabs */ 115 115 SPINLOCK_DECLARE(slablock); 116 116 /* Magazines */ 117 li st_t magazines; /**< List o full magazines */117 link_t magazines; /**< List o full magazines */ 118 118 SPINLOCK_DECLARE(maglock); 119 119 -
kernel/generic/include/proc/scheduler.h
rbd08239 r313775b 48 48 typedef struct { 49 49 IRQ_SPINLOCK_DECLARE(lock); 50 li st_t rq;/**< List of ready threads. */51 size_t n; 50 link_t rq_head; /**< List of ready threads. */ 51 size_t n; /**< Number of threads in rq_ready. */ 52 52 } runq_t; 53 53 -
kernel/generic/include/proc/task.h
rbd08239 r313775b 73 73 char name[TASK_NAME_BUFLEN]; 74 74 /** List of threads contained in this task. */ 75 li st_t threads;75 link_t th_head; 76 76 /** Address space. */ 77 77 as_t *as; … … 94 94 stats_ipc_t ipc_info; /**< IPC statistics */ 95 95 /** List of synchronous answerboxes. */ 96 li st_t sync_boxes;96 link_t sync_box_head; 97 97 98 98 #ifdef CONFIG_UDEBUG -
kernel/generic/include/synch/waitq.h
rbd08239 r313775b 63 63 64 64 /** List of sleeping threads for which there was no missed_wakeup. */ 65 li st_t sleepers;65 link_t head; 66 66 } waitq_t; 67 67 -
kernel/generic/src/adt/btree.c
rbd08239 r313775b 108 108 void btree_create(btree_t *t) 109 109 { 110 list_initialize(&t->leaf_ list);110 list_initialize(&t->leaf_head); 111 111 t->root = (btree_node_t *) slab_alloc(btree_node_slab, 0); 112 112 node_initialize(t->root); 113 list_append(&t->root->leaf_link, &t->leaf_ list);113 list_append(&t->root->leaf_link, &t->leaf_head); 114 114 } 115 115 … … 588 588 589 589 if (LEAF_NODE(node)) { 590 list_ insert_after(&rnode->leaf_link, &node->leaf_link);590 list_prepend(&rnode->leaf_link, &node->leaf_link); 591 591 } 592 592 … … 953 953 ASSERT(LEAF_NODE(node)); 954 954 955 if (node->leaf_link.prev != &t->leaf_ list.head)955 if (node->leaf_link.prev != &t->leaf_head) 956 956 return list_get_instance(node->leaf_link.prev, btree_node_t, leaf_link); 957 957 else … … 972 972 ASSERT(LEAF_NODE(node)); 973 973 974 if (node->leaf_link.next != &t->leaf_ list.head)974 if (node->leaf_link.next != &t->leaf_head) 975 975 return list_get_instance(node->leaf_link.next, btree_node_t, leaf_link); 976 976 else … … 987 987 size_t i; 988 988 int depth = t->root->depth; 989 li st_t list;989 link_t head, *cur; 990 990 991 991 printf("Printing B-tree:\n"); 992 list_initialize(& list);993 list_append(&t->root->bfs_link, & list);992 list_initialize(&head); 993 list_append(&t->root->bfs_link, &head); 994 994 995 995 /* … … 997 997 * Levels are distinguished from one another by node->depth. 998 998 */ 999 while (!list_empty(& list)) {999 while (!list_empty(&head)) { 1000 1000 link_t *hlp; 1001 1001 btree_node_t *node; 1002 1002 1003 hlp = list_first(&list);1004 ASSERT(hlp != NULL);1003 hlp = head.next; 1004 ASSERT(hlp != &head); 1005 1005 node = list_get_instance(hlp, btree_node_t, bfs_link); 1006 1006 list_remove(hlp); … … 1018 1018 printf("%" PRIu64 "%s", node->key[i], i < node->keys - 1 ? "," : ""); 1019 1019 if (node->depth && node->subtree[i]) { 1020 list_append(&node->subtree[i]->bfs_link, & list);1020 list_append(&node->subtree[i]->bfs_link, &head); 1021 1021 } 1022 1022 } 1023 1023 1024 1024 if (node->depth && node->subtree[i]) 1025 list_append(&node->subtree[i]->bfs_link, & list);1025 list_append(&node->subtree[i]->bfs_link, &head); 1026 1026 1027 1027 printf(")"); … … 1031 1031 1032 1032 printf("Printing list of leaves:\n"); 1033 list_foreach(t->leaf_list, cur) {1033 for (cur = t->leaf_head.next; cur != &t->leaf_head; cur = cur->next) { 1034 1034 btree_node_t *node; 1035 1035 -
kernel/generic/src/adt/hash_table.c
rbd08239 r313775b 62 62 ASSERT(max_keys > 0); 63 63 64 h->entry = (li st_t *) malloc(m * sizeof(list_t), 0);64 h->entry = (link_t *) malloc(m * sizeof(link_t), 0); 65 65 if (!h->entry) 66 66 panic("Cannot allocate memory for hash table."); 67 67 68 memsetb(h->entry, m * sizeof(li st_t), 0);68 memsetb(h->entry, m * sizeof(link_t), 0); 69 69 70 70 for (i = 0; i < m; i++) … … 107 107 link_t *hash_table_find(hash_table_t *h, sysarg_t key[]) 108 108 { 109 link_t *cur; 109 110 size_t chain; 110 111 … … 117 118 ASSERT(chain < h->entries); 118 119 119 list_foreach(h->entry[chain], cur) {120 for (cur = h->entry[chain].next; cur != &h->entry[chain]; cur = cur->next) { 120 121 if (h->op->compare(key, h->max_keys, cur)) { 121 122 /* … … 140 141 { 141 142 size_t chain; 143 link_t *cur; 142 144 143 145 ASSERT(h); … … 148 150 149 151 if (keys == h->max_keys) { 150 link_t *cur;151 152 152 153 /* … … 168 169 */ 169 170 for (chain = 0; chain < h->entries; chain++) { 170 list_foreach(h->entry[chain], cur) {171 for (cur = h->entry[chain].next; cur != &h->entry[chain]; cur = cur->next) { 171 172 if (h->op->compare(key, keys, cur)) { 172 173 link_t *hlp; -
kernel/generic/src/adt/list.c
rbd08239 r313775b 43 43 /** Check for membership 44 44 * 45 * Check whether link is contained in a list.46 * Membership is defined as pointer equivalence.45 * Check whether link is contained in the list head. 46 * The membership is defined as pointer equivalence. 47 47 * 48 * @param link 49 * @param listList to look in.48 * @param link Item to look for. 49 * @param head List to look in. 50 50 * 51 51 * @return true if link is contained in head, false otherwise. 52 52 * 53 53 */ 54 int list_member(const link_t *link, const li st_t *list)54 int list_member(const link_t *link, const link_t *head) 55 55 { 56 56 bool found = false; 57 link_t *hlp = list->head.next;57 link_t *hlp = head->next; 58 58 59 while (hlp != &list->head) {59 while (hlp != head) { 60 60 if (hlp == link) { 61 61 found = true; … … 68 68 } 69 69 70 70 71 /** Concatenate two lists 71 72 * 72 * Concatenate lists @a list1 and @a list2, producing a single73 * list @a list1 containing items from both (in @a list1, @a list274 * order) and empty list @a list2.73 * Concatenate lists head1 and head2, producing a single 74 * list head1 containing items from both (in head1, head2 75 * order) and empty list head2. 75 76 * 76 * @param list1First list and concatenated output77 * @param list2Second list and empty output.77 * @param head1 First list and concatenated output 78 * @param head2 Second list and empty output. 78 79 * 79 80 */ 80 void list_concat(li st_t *list1, list_t *list2)81 void list_concat(link_t *head1, link_t *head2) 81 82 { 82 if (list_empty( list2))83 if (list_empty(head2)) 83 84 return; 84 85 85 list2->head.next->prev = list1->head.prev;86 list2->head.prev->next = &list1->head;87 list1->head.prev->next = list2->head.next;88 list1->head.prev = list2->head.prev;89 list_initialize( list2);86 head2->next->prev = head1->prev; 87 head2->prev->next = head1; 88 head1->prev->next = head2->next; 89 head1->prev = head2->prev; 90 list_initialize(head2); 90 91 } 91 92 -
kernel/generic/src/console/cmd.c
rbd08239 r313775b 573 573 spinlock_lock(&cmd_lock); 574 574 575 link_t *cur; 575 576 size_t len = 0; 576 list_foreach(cmd_list, cur) {577 for (cur = cmd_head.next; cur != &cmd_head; cur = cur->next) { 577 578 cmd_info_t *hlp; 578 579 hlp = list_get_instance(cur, cmd_info_t, link); … … 590 591 } 591 592 592 list_foreach(cmd_list, cur) {593 for (cur = cmd_head.next; cur != &cmd_head; cur = cur->next) { 593 594 cmd_info_t *hlp; 594 595 hlp = list_get_instance(cur, cmd_info_t, link); … … 645 646 int cmd_desc(cmd_arg_t *argv) 646 647 { 648 link_t *cur; 649 647 650 spinlock_lock(&cmd_lock); 648 651 649 list_foreach(cmd_list, cur) {652 for (cur = cmd_head.next; cur != &cmd_head; cur = cur->next) { 650 653 cmd_info_t *hlp; 651 654 -
kernel/generic/src/console/console.c
rbd08239 r313775b 124 124 static void stdout_write(outdev_t *dev, wchar_t ch, bool silent) 125 125 { 126 list_foreach(dev->list, cur) { 126 link_t *cur; 127 128 for (cur = dev->list.next; cur != &dev->list; cur = cur->next) { 127 129 outdev_t *sink = list_get_instance(cur, outdev_t, link); 128 130 if ((sink) && (sink->op->write)) … … 133 135 static void stdout_redraw(outdev_t *dev) 134 136 { 135 list_foreach(dev->list, cur) { 137 link_t *cur; 138 139 for (cur = dev->list.next; cur != &dev->list; cur = cur->next) { 136 140 outdev_t *sink = list_get_instance(cur, outdev_t, link); 137 141 if ((sink) && (sink->op->redraw)) -
kernel/generic/src/console/kconsole.c
rbd08239 r313775b 84 84 85 85 SPINLOCK_INITIALIZE(cmd_lock); /**< Lock protecting command list. */ 86 LIST_INITIALIZE(cmd_ list); /**< Command list. */86 LIST_INITIALIZE(cmd_head); /**< Command list. */ 87 87 88 88 static wchar_t history[KCONSOLE_HISTORY][MAX_CMDLINE] = {}; … … 113 113 bool cmd_register(cmd_info_t *cmd) 114 114 { 115 link_t *cur; 116 115 117 spinlock_lock(&cmd_lock); 116 118 … … 118 120 * Make sure the command is not already listed. 119 121 */ 120 list_foreach(cmd_list, cur) {122 for (cur = cmd_head.next; cur != &cmd_head; cur = cur->next) { 121 123 cmd_info_t *hlp = list_get_instance(cur, cmd_info_t, link); 122 124 … … 151 153 * Now the command can be added. 152 154 */ 153 list_append(&cmd->link, &cmd_ list);155 list_append(&cmd->link, &cmd_head); 154 156 155 157 spinlock_unlock(&cmd_lock); … … 174 176 175 177 if (*startpos == NULL) 176 *startpos = cmd_ list.head.next;177 178 for (; *startpos != &cmd_ list.head; *startpos = (*startpos)->next) {178 *startpos = cmd_head.next; 179 180 for (; *startpos != &cmd_head; *startpos = (*startpos)->next) { 179 181 cmd_info_t *hlp = list_get_instance(*startpos, cmd_info_t, link); 180 182 … … 557 559 558 560 cmd_info_t *cmd = NULL; 559 560 list_foreach(cmd_list, cur) { 561 link_t *cur; 562 563 for (cur = cmd_head.next; cur != &cmd_head; cur = cur->next) { 561 564 cmd_info_t *hlp = list_get_instance(cur, cmd_info_t, link); 562 565 spinlock_lock(&hlp->lock); -
kernel/generic/src/cpu/cpu.c
rbd08239 r313775b 82 82 for (j = 0; j < RQ_COUNT; j++) { 83 83 irq_spinlock_initialize(&cpus[i].rq[j].lock, "cpus[].rq[].lock"); 84 list_initialize(&cpus[i].rq[j].rq );84 list_initialize(&cpus[i].rq[j].rq_head); 85 85 } 86 86 } -
kernel/generic/src/ipc/ipc.c
rbd08239 r313775b 128 128 list_initialize(&box->answers); 129 129 list_initialize(&box->irq_notifs); 130 list_initialize(&box->irq_ list);130 list_initialize(&box->irq_head); 131 131 box->task = task; 132 132 } … … 183 183 */ 184 184 irq_spinlock_lock(&TASK->lock, true); 185 list_append(&sync_box->sync_box_link, &TASK->sync_box es);185 list_append(&sync_box->sync_box_link, &TASK->sync_box_head); 186 186 irq_spinlock_unlock(&TASK->lock, true); 187 187 … … 450 450 irq_spinlock_lock(&box->irq_lock, false); 451 451 452 request = list_get_instance(list_first(&box->irq_notifs), 453 call_t, link); 452 request = list_get_instance(box->irq_notifs.next, call_t, link); 454 453 list_remove(&request->link); 455 454 … … 460 459 461 460 /* Handle asynchronous answers */ 462 request = list_get_instance(list_first(&box->answers), 463 call_t, link); 461 request = list_get_instance(box->answers.next, call_t, link); 464 462 list_remove(&request->link); 465 463 atomic_dec(&request->data.phone->active_calls); … … 469 467 470 468 /* Handle requests */ 471 request = list_get_instance(list_first(&box->calls), 472 call_t, link); 469 request = list_get_instance(box->calls.next, call_t, link); 473 470 list_remove(&request->link); 474 471 … … 497 494 * 498 495 */ 499 void ipc_cleanup_call_list(li st_t *lst)496 void ipc_cleanup_call_list(link_t *lst) 500 497 { 501 498 while (!list_empty(lst)) { 502 call_t *call = list_get_instance(l ist_first(lst), call_t, link);499 call_t *call = list_get_instance(lst->next, call_t, link); 503 500 if (call->buffer) 504 501 free(call->buffer); … … 529 526 irq_spinlock_lock(&box->lock, true); 530 527 while (!list_empty(&box->connected_phones)) { 531 phone = list_get_instance( list_first(&box->connected_phones),528 phone = list_get_instance(box->connected_phones.next, 532 529 phone_t, link); 533 530 if (SYNCH_FAILED(mutex_trylock(&phone->lock))) { … … 609 606 /* Wait for all answers to interrupted synchronous calls to arrive */ 610 607 ipl_t ipl = interrupts_disable(); 611 while (!list_empty(&TASK->sync_box es)) {612 answerbox_t *box = list_get_instance( 613 list_first(&TASK->sync_boxes),answerbox_t, sync_box_link);608 while (!list_empty(&TASK->sync_box_head)) { 609 answerbox_t *box = list_get_instance(TASK->sync_box_head.next, 610 answerbox_t, sync_box_link); 614 611 615 612 list_remove(&box->sync_box_link); … … 746 743 #endif 747 744 745 link_t *cur; 746 748 747 printf(" --- incomming calls ---\n"); 749 list_foreach(task->answerbox.calls, cur) { 748 for (cur = task->answerbox.calls.next; cur != &task->answerbox.calls; 749 cur = cur->next) { 750 750 call_t *call = list_get_instance(cur, call_t, link); 751 751 … … 767 767 768 768 printf(" --- dispatched calls ---\n"); 769 list_foreach(task->answerbox.dispatched_calls, cur) { 769 for (cur = task->answerbox.dispatched_calls.next; 770 cur != &task->answerbox.dispatched_calls; 771 cur = cur->next) { 770 772 call_t *call = list_get_instance(cur, call_t, link); 771 773 … … 787 789 788 790 printf(" --- incoming answers ---\n"); 789 list_foreach(task->answerbox.answers, cur) { 791 for (cur = task->answerbox.answers.next; 792 cur != &task->answerbox.answers; 793 cur = cur->next) { 790 794 call_t *call = list_get_instance(cur, call_t, link); 791 795 -
kernel/generic/src/ipc/ipcrsc.c
rbd08239 r313775b 146 146 call_t *get_call(sysarg_t callid) 147 147 { 148 link_t *lst; 148 149 call_t *result = NULL; 149 150 150 151 irq_spinlock_lock(&TASK->answerbox.lock, true); 151 152 list_foreach(TASK->answerbox.dispatched_calls, lst) {152 for (lst = TASK->answerbox.dispatched_calls.next; 153 lst != &TASK->answerbox.dispatched_calls; lst = lst->next) { 153 154 call_t *call = list_get_instance(lst, call_t, link); 154 155 if ((sysarg_t) call == callid) { -
kernel/generic/src/ipc/irq.c
rbd08239 r313775b 200 200 201 201 hash_table_insert(&irq_uspace_hash_table, key, &irq->link); 202 list_append(&irq->notif_cfg.link, &box->irq_ list);202 list_append(&irq->notif_cfg.link, &box->irq_head); 203 203 204 204 irq_spinlock_unlock(&box->irq_lock, false); … … 282 282 irq_spinlock_lock(&box->irq_lock, false); 283 283 284 while ( !list_empty(&box->irq_list)) {284 while (box->irq_head.next != &box->irq_head) { 285 285 DEADLOCK_PROBE_INIT(p_irqlock); 286 286 287 irq_t *irq = list_get_instance( list_first(&box->irq_list), irq_t,287 irq_t *irq = list_get_instance(box->irq_head.next, irq_t, 288 288 notif_cfg.link); 289 289 -
kernel/generic/src/mm/as.c
rbd08239 r313775b 94 94 * 95 95 * This lock protects: 96 * - inactive_as_with_asid_ list96 * - inactive_as_with_asid_head list 97 97 * - as->asid for each as of the as_t type 98 98 * - asids_allocated counter … … 105 105 * that have valid ASID. 106 106 */ 107 LIST_INITIALIZE(inactive_as_with_asid_ list);107 LIST_INITIALIZE(inactive_as_with_asid_head); 108 108 109 109 /** Kernel address space. */ … … 235 235 bool cond = true; 236 236 while (cond) { 237 ASSERT(!list_empty(&as->as_area_btree.leaf_ list));237 ASSERT(!list_empty(&as->as_area_btree.leaf_head)); 238 238 239 239 btree_node_t *node = 240 list_get_instance( list_first(&as->as_area_btree.leaf_list),240 list_get_instance(as->as_area_btree.leaf_head.next, 241 241 btree_node_t, leaf_link); 242 242 … … 602 602 bool cond = true; 603 603 while (cond) { 604 ASSERT(!list_empty(&area->used_space.leaf_ list));604 ASSERT(!list_empty(&area->used_space.leaf_head)); 605 605 606 606 btree_node_t *node = 607 list_get_instance( list_last(&area->used_space.leaf_list),607 list_get_instance(area->used_space.leaf_head.prev, 608 608 btree_node_t, leaf_link); 609 609 … … 727 727 if (--sh_info->refcount == 0) { 728 728 dealloc = true; 729 link_t *cur; 729 730 730 731 /* … … 732 733 * reference from all frames found there. 733 734 */ 734 list_foreach(sh_info->pagemap.leaf_list, cur) { 735 for (cur = sh_info->pagemap.leaf_head.next; 736 cur != &sh_info->pagemap.leaf_head; cur = cur->next) { 735 737 btree_node_t *node 736 738 = list_get_instance(cur, btree_node_t, leaf_link); … … 784 786 * Visit only the pages mapped by used_space B+tree. 785 787 */ 786 list_foreach(area->used_space.leaf_list, cur) { 788 link_t *cur; 789 for (cur = area->used_space.leaf_head.next; 790 cur != &area->used_space.leaf_head; cur = cur->next) { 787 791 btree_node_t *node; 788 792 btree_key_t i; … … 1061 1065 */ 1062 1066 size_t used_pages = 0; 1063 1064 list_foreach(area->used_space.leaf_list, cur) { 1067 link_t *cur; 1068 1069 for (cur = area->used_space.leaf_head.next; 1070 cur != &area->used_space.leaf_head; cur = cur->next) { 1065 1071 btree_node_t *node 1066 1072 = list_get_instance(cur, btree_node_t, leaf_link); … … 1088 1094 size_t frame_idx = 0; 1089 1095 1090 list_foreach(area->used_space.leaf_list, cur) { 1096 for (cur = area->used_space.leaf_head.next; 1097 cur != &area->used_space.leaf_head; cur = cur->next) { 1091 1098 btree_node_t *node = list_get_instance(cur, btree_node_t, 1092 1099 leaf_link); … … 1140 1147 frame_idx = 0; 1141 1148 1142 list_foreach(area->used_space.leaf_list, cur) { 1149 for (cur = area->used_space.leaf_head.next; 1150 cur != &area->used_space.leaf_head; cur = cur->next) { 1143 1151 btree_node_t *node 1144 1152 = list_get_instance(cur, btree_node_t, leaf_link); … … 1326 1334 1327 1335 list_append(&old_as->inactive_as_with_asid_link, 1328 &inactive_as_with_asid_ list);1336 &inactive_as_with_asid_head); 1329 1337 } 1330 1338 … … 2019 2027 2020 2028 /* Eventually check the addresses behind each area */ 2021 li st_foreach(AS->as_area_btree.leaf_list, cur) {2022 if (ret != 0)2023 break;2024 2029 link_t *cur; 2030 for (cur = AS->as_area_btree.leaf_head.next; 2031 (ret == 0) && (cur != &AS->as_area_btree.leaf_head); 2032 cur = cur->next) { 2025 2033 btree_node_t *node = 2026 2034 list_get_instance(cur, btree_node_t, leaf_link); … … 2064 2072 2065 2073 size_t area_cnt = 0; 2066 2067 list_foreach(as->as_area_btree.leaf_list, cur) { 2074 link_t *cur; 2075 2076 for (cur = as->as_area_btree.leaf_head.next; 2077 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2068 2078 btree_node_t *node = 2069 2079 list_get_instance(cur, btree_node_t, leaf_link); … … 2078 2088 size_t area_idx = 0; 2079 2089 2080 list_foreach(as->as_area_btree.leaf_list, cur) { 2090 for (cur = as->as_area_btree.leaf_head.next; 2091 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2081 2092 btree_node_t *node = 2082 2093 list_get_instance(cur, btree_node_t, leaf_link); … … 2114 2125 2115 2126 /* Print out info about address space areas */ 2116 list_foreach(as->as_area_btree.leaf_list, cur) { 2127 link_t *cur; 2128 for (cur = as->as_area_btree.leaf_head.next; 2129 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2117 2130 btree_node_t *node 2118 2131 = list_get_instance(cur, btree_node_t, leaf_link); -
kernel/generic/src/mm/backend_anon.c
rbd08239 r313775b 97 97 void anon_share(as_area_t *area) 98 98 { 99 link_t *cur; 100 99 101 ASSERT(mutex_locked(&area->as->lock)); 100 102 ASSERT(mutex_locked(&area->lock)); … … 104 106 */ 105 107 mutex_lock(&area->sh_info->lock); 106 list_foreach(area->used_space.leaf_list, cur) { 108 for (cur = area->used_space.leaf_head.next; 109 cur != &area->used_space.leaf_head; cur = cur->next) { 107 110 btree_node_t *node; 108 111 unsigned int i; -
kernel/generic/src/mm/backend_elf.c
rbd08239 r313775b 139 139 */ 140 140 if (area->flags & AS_AREA_WRITE) { 141 node = list_get_instance( list_first(&area->used_space.leaf_list),141 node = list_get_instance(area->used_space.leaf_head.next, 142 142 btree_node_t, leaf_link); 143 143 } else { … … 153 153 */ 154 154 mutex_lock(&area->sh_info->lock); 155 for (cur = &node->leaf_link; cur != &area->used_space.leaf_ list.head;155 for (cur = &node->leaf_link; cur != &area->used_space.leaf_head; 156 156 cur = cur->next) { 157 157 unsigned int i; -
kernel/generic/src/mm/buddy.c
rbd08239 r313775b 82 82 * Use memory after our own structure. 83 83 */ 84 b->order = (li st_t *) (&b[1]);84 b->order = (link_t *) (&b[1]); 85 85 86 86 for (i = 0; i <= max_order; i++) … … 176 176 * the request can be immediatelly satisfied. 177 177 */ 178 res = list_first(&b->order[i]);179 if (res != NULL) {178 if (!list_empty(&b->order[i])) { 179 res = b->order[i].next; 180 180 list_remove(res); 181 181 b->op->mark_busy(b, res); -
kernel/generic/src/mm/slab.c
rbd08239 r313775b 317 317 spinlock_lock(&cache->slablock); 318 318 } else { 319 slab = list_get_instance( list_first(&cache->partial_slabs),320 slab_t,link);319 slab = list_get_instance(cache->partial_slabs.next, slab_t, 320 link); 321 321 list_remove(&slab->link); 322 322 } … … 360 360 if (!list_empty(&cache->magazines)) { 361 361 if (first) 362 cur = list_first(&cache->magazines);362 cur = cache->magazines.next; 363 363 else 364 cur = list_last(&cache->magazines);364 cur = cache->magazines.prev; 365 365 366 366 mag = list_get_instance(cur, slab_magazine_t, link); … … 812 812 813 813 size_t frames = 0; 814 list_foreach(slab_cache_list, cur) { 814 link_t *cur; 815 for (cur = slab_cache_list.next; cur != &slab_cache_list; 816 cur = cur->next) { 815 817 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link); 816 818 frames += _slab_reclaim(cache, flags); … … 859 861 link_t *cur; 860 862 size_t i; 861 for (i = 0, cur = slab_cache_list. head.next;862 (i < skip) && (cur != &slab_cache_list .head);863 for (i = 0, cur = slab_cache_list.next; 864 (i < skip) && (cur != &slab_cache_list); 863 865 i++, cur = cur->next); 864 866 865 if (cur == &slab_cache_list .head) {867 if (cur == &slab_cache_list) { 866 868 irq_spinlock_unlock(&slab_cache_lock, true); 867 869 break; … … 938 940 irq_spinlock_lock(&slab_cache_lock, false); 939 941 940 list_foreach(slab_cache_list, cur) { 942 link_t *cur; 943 for (cur = slab_cache_list.next; cur != &slab_cache_list; 944 cur = cur->next) { 941 945 slab_cache_t *slab = list_get_instance(cur, slab_cache_t, link); 942 946 if ((slab->flags & SLAB_CACHE_MAGDEFERRED) != -
kernel/generic/src/proc/scheduler.c
rbd08239 r313775b 237 237 * Take the first thread from the queue. 238 238 */ 239 thread_t *thread = list_get_instance(240 list_ first(&CPU->rq[i].rq), thread_t, rq_link);239 thread_t *thread = 240 list_get_instance(CPU->rq[i].rq_head.next, thread_t, rq_link); 241 241 list_remove(&thread->rq_link); 242 242 … … 273 273 static void relink_rq(int start) 274 274 { 275 li st_t list;276 277 list_initialize(& list);275 link_t head; 276 277 list_initialize(&head); 278 278 irq_spinlock_lock(&CPU->lock, false); 279 279 … … 284 284 285 285 irq_spinlock_lock(&CPU->rq[i + 1].lock, false); 286 list_concat(& list, &CPU->rq[i + 1].rq);286 list_concat(&head, &CPU->rq[i + 1].rq_head); 287 287 size_t n = CPU->rq[i + 1].n; 288 288 CPU->rq[i + 1].n = 0; … … 292 292 293 293 irq_spinlock_lock(&CPU->rq[i].lock, false); 294 list_concat(&CPU->rq[i].rq , &list);294 list_concat(&CPU->rq[i].rq_head, &head); 295 295 CPU->rq[i].n += n; 296 296 irq_spinlock_unlock(&CPU->rq[i].lock, false); … … 616 616 617 617 /* Search rq from the back */ 618 link_t *link = cpu->rq[rq].rq .head.prev;619 620 while (link != &(cpu->rq[rq].rq .head)) {618 link_t *link = cpu->rq[rq].rq_head.prev; 619 620 while (link != &(cpu->rq[rq].rq_head)) { 621 621 thread = (thread_t *) list_get_instance(link, 622 622 thread_t, rq_link); … … 740 740 741 741 printf("\trq[%u]: ", i); 742 list_foreach(cpus[cpu].rq[i].rq, cur) { 743 thread_t *thread = list_get_instance(cur, 744 thread_t, rq_link); 742 link_t *cur; 743 for (cur = cpus[cpu].rq[i].rq_head.next; 744 cur != &(cpus[cpu].rq[i].rq_head); 745 cur = cur->next) { 746 thread_t *thread = list_get_instance(cur, thread_t, rq_link); 745 747 printf("%" PRIu64 "(%s) ", thread->tid, 746 748 thread_states[thread->state]); -
kernel/generic/src/proc/task.c
rbd08239 r313775b 155 155 mutex_initialize(&task->futexes_lock, MUTEX_PASSIVE); 156 156 157 list_initialize(&task->th reads);158 list_initialize(&task->sync_box es);157 list_initialize(&task->th_head); 158 list_initialize(&task->sync_box_head); 159 159 160 160 ipc_answerbox_init(&task->answerbox, task); … … 435 435 436 436 /* Current values of threads */ 437 list_foreach(task->threads, cur) { 437 link_t *cur; 438 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 438 439 thread_t *thread = list_get_instance(cur, thread_t, th_link); 439 440 … … 467 468 */ 468 469 469 list_foreach(task->threads, cur) { 470 link_t *cur; 471 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 470 472 thread_t *thread = list_get_instance(cur, thread_t, th_link); 471 473 bool sleeping = false; -
kernel/generic/src/proc/thread.c
rbd08239 r313775b 260 260 */ 261 261 262 list_append(&thread->rq_link, &cpu->rq[i].rq );262 list_append(&thread->rq_link, &cpu->rq[i].rq_head); 263 263 cpu->rq[i].n++; 264 264 irq_spinlock_unlock(&(cpu->rq[i].lock), true); … … 423 423 atomic_inc(&task->lifecount); 424 424 425 list_append(&thread->th_link, &task->th reads);425 list_append(&thread->th_link, &task->th_head); 426 426 427 427 irq_spinlock_pass(&task->lock, &threads_lock); -
kernel/generic/src/synch/futex.c
rbd08239 r313775b 272 272 void futex_cleanup(void) 273 273 { 274 link_t *cur; 275 274 276 mutex_lock(&futex_ht_lock); 275 277 mutex_lock(&TASK->futexes_lock); 276 278 277 list_foreach(TASK->futexes.leaf_list, cur) { 279 for (cur = TASK->futexes.leaf_head.next; 280 cur != &TASK->futexes.leaf_head; cur = cur->next) { 278 281 btree_node_t *node; 279 282 unsigned int i; -
kernel/generic/src/synch/waitq.c
rbd08239 r313775b 69 69 { 70 70 irq_spinlock_initialize(&wq->lock, "wq.lock"); 71 list_initialize(&wq-> sleepers);71 list_initialize(&wq->head); 72 72 wq->missed_wakeups = 0; 73 73 } … … 196 196 irq_spinlock_lock(&wq->lock, true); 197 197 198 if (!list_empty(&wq->sleepers)) { 199 thread_t *thread = list_get_instance(list_first(&wq->sleepers), 200 thread_t, wq_link); 198 if (!list_empty(&wq->head)) { 199 thread_t *thread = list_get_instance(wq->head.next, thread_t, wq_link); 201 200 202 201 irq_spinlock_lock(&thread->lock, false); … … 408 407 } 409 408 410 list_append(&THREAD->wq_link, &wq-> sleepers);409 list_append(&THREAD->wq_link, &wq->head); 411 410 412 411 /* … … 465 464 466 465 loop: 467 if (list_empty(&wq-> sleepers)) {466 if (list_empty(&wq->head)) { 468 467 wq->missed_wakeups++; 469 468 if ((count) && (mode == WAKEUP_ALL)) … … 474 473 475 474 count++; 476 thread_t *thread = list_get_instance(list_first(&wq->sleepers), 477 thread_t, wq_link); 475 thread_t *thread = list_get_instance(wq->head.next, thread_t, wq_link); 478 476 479 477 /* -
kernel/generic/src/sysinfo/stats.c
rbd08239 r313775b 173 173 174 174 /* Walk the B+ tree and count pages */ 175 list_foreach(as->as_area_btree.leaf_list, cur) { 175 link_t *cur; 176 for (cur = as->as_area_btree.leaf_head.next; 177 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 176 178 btree_node_t *node = 177 179 list_get_instance(cur, btree_node_t, leaf_link); … … 216 218 217 219 /* Walk the B+ tree and count pages */ 218 list_foreach(as->as_area_btree.leaf_list, cur) { 220 link_t *cur; 221 for (cur = as->as_area_btree.leaf_head.next; 222 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 219 223 btree_node_t *node = 220 224 list_get_instance(cur, btree_node_t, leaf_link); -
kernel/generic/src/time/clock.c
rbd08239 r313775b 163 163 164 164 link_t *cur; 165 while ((cur = list_first(&CPU->timeout_active_list)) != NULL) { 166 timeout_t *timeout = list_get_instance(cur, timeout_t, 167 link); 165 while ((cur = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { 166 timeout_t *timeout = list_get_instance(cur, timeout_t, link); 168 167 169 168 irq_spinlock_lock(&timeout->lock, false); -
kernel/generic/src/time/timeout.c
rbd08239 r313775b 54 54 { 55 55 irq_spinlock_initialize(&CPU->timeoutlock, "cpu.timeoutlock"); 56 list_initialize(&CPU->timeout_active_ list);56 list_initialize(&CPU->timeout_active_head); 57 57 } 58 58 … … 119 119 timeout_t *target = NULL; 120 120 link_t *cur; 121 for (cur = CPU->timeout_active_ list.head.next;122 cur != &CPU->timeout_active_ list.head; cur = cur->next) {121 for (cur = CPU->timeout_active_head.next; 122 cur != &CPU->timeout_active_head; cur = cur->next) { 123 123 target = list_get_instance(cur, timeout_t, link); 124 124 irq_spinlock_lock(&target->lock, false); … … 135 135 /* Avoid using cur->prev directly */ 136 136 link_t *prev = cur->prev; 137 list_ insert_after(&timeout->link, prev);137 list_prepend(&timeout->link, prev); 138 138 139 139 /* … … 146 146 * Decrease ticks of timeout's immediate succesor by timeout->ticks. 147 147 */ 148 if (cur != &CPU->timeout_active_ list.head) {148 if (cur != &CPU->timeout_active_head) { 149 149 irq_spinlock_lock(&target->lock, false); 150 150 target->ticks -= timeout->ticks; … … 184 184 /* 185 185 * Now we know for sure that timeout hasn't been activated yet 186 * and is lurking in timeout->cpu->timeout_active_ list.186 * and is lurking in timeout->cpu->timeout_active_head queue. 187 187 */ 188 188 189 189 link_t *cur = timeout->link.next; 190 if (cur != &timeout->cpu->timeout_active_ list.head) {190 if (cur != &timeout->cpu->timeout_active_head) { 191 191 timeout_t *tmp = list_get_instance(cur, timeout_t, link); 192 192 irq_spinlock_lock(&tmp->lock, false); -
kernel/generic/src/udebug/udebug.c
rbd08239 r313775b 406 406 407 407 /* Finish debugging of all userspace threads */ 408 list_foreach(task->threads, cur) { 408 link_t *cur; 409 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 409 410 thread_t *thread = list_get_instance(cur, thread_t, th_link); 410 411 -
kernel/generic/src/udebug/udebug_ops.c
rbd08239 r313775b 196 196 /* Set udebug.active on all of the task's userspace threads. */ 197 197 198 list_foreach(TASK->threads, cur) { 198 link_t *cur; 199 for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { 199 200 thread_t *thread = list_get_instance(cur, thread_t, th_link); 200 201 … … 389 390 390 391 /* FIXME: make sure the thread isn't past debug shutdown... */ 391 list_foreach(TASK->threads, cur) { 392 link_t *cur; 393 for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { 392 394 thread_t *thread = list_get_instance(cur, thread_t, th_link); 393 395 -
uspace/lib/usbhid/src/hiddescriptor.c
rbd08239 r313775b 91 91 usb_hid_report_t *report, usb_hid_report_path_t *cmp_path) { 92 92 93 link_t *path_it = report->collection_paths. next;93 link_t *path_it = report->collection_paths.prev->next; 94 94 usb_hid_report_path_t *path = NULL; 95 95 … … 513 513 514 514 usb_hid_report_path_free(report_item->usage_path); 515 list_initialize(&report_item->usage_path->link); 515 516 list_remove (stack.next); 516 517
Note:
See TracChangeset
for help on using the changeset viewer.