Changes in kernel/generic/src/time/clock.c [d0c82c5:98000fb] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/time/clock.c
rd0c82c5 r98000fb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief High-level clock interrupt handler. 36 36 * 37 37 * This file contains the clock() function which is the source 38 38 * of preemption. It is also responsible for executing expired 39 39 * timeouts. 40 * 41 */ 42 40 */ 41 43 42 #include <time/clock.h> 44 43 #include <time/timeout.h> … … 57 56 #include <mm/frame.h> 58 57 #include <ddi/ddi.h> 59 #include <arch/cycle.h>60 58 61 59 /* Pointer to variable with uptime */ … … 65 63 static parea_t clock_parea; 66 64 67 /** Fragment of second 68 * 69 * For updating seconds correctly. 70 * 65 /* Variable holding fragment of second, so that we would update 66 * seconds correctly 71 67 */ 72 68 static unative_t secfrag = 0; … … 77 73 * information about realtime data. We allocate 1 page with these 78 74 * data and update it periodically. 79 *80 75 */ 81 76 void clock_counter_init(void) 82 77 { 83 void *faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC); 78 void *faddr; 79 80 faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC); 84 81 if (!faddr) 85 82 panic("Cannot allocate page for clock."); … … 89 86 uptime->seconds1 = 0; 90 87 uptime->seconds2 = 0; 91 uptime->useconds = 0; 92 88 uptime->useconds = 0; 89 93 90 clock_parea.pbase = (uintptr_t) faddr; 94 91 clock_parea.frames = 1; 95 92 ddi_parea_register(&clock_parea); 96 93 97 94 /* 98 95 * Prepare information for the userspace so that it can successfully 99 96 * physmem_map() the clock_parea. 100 *101 97 */ 102 98 sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true); … … 104 100 } 105 101 102 106 103 /** Update public counters 107 104 * 108 105 * Update it only on first processor 109 * TODO: Do we really need so many write barriers? 110 * 106 * TODO: Do we really need so many write barriers? 111 107 */ 112 108 static void clock_update_counters(void) … … 126 122 } 127 123 128 static void cpu_update_accounting(void)129 {130 irq_spinlock_lock(&CPU->lock, false);131 uint64_t now = get_cycle();132 CPU->busy_cycles += now - CPU->last_cycle;133 CPU->last_cycle = now;134 irq_spinlock_unlock(&CPU->lock, false);135 }136 137 124 /** Clock routine 138 125 * … … 144 131 void clock(void) 145 132 { 133 link_t *l; 134 timeout_t *h; 135 timeout_handler_t f; 136 void *arg; 146 137 size_t missed_clock_ticks = CPU->missed_clock_ticks; 147 148 /* Account CPU usage */ 149 cpu_update_accounting(); 150 138 unsigned int i; 139 151 140 /* 152 141 * To avoid lock ordering problems, 153 142 * run all expired timeouts as you visit them. 154 *155 143 */ 156 size_t i;157 144 for (i = 0; i <= missed_clock_ticks; i++) { 158 /* Update counters and accounting */159 145 clock_update_counters(); 160 cpu_update_accounting(); 161 162 irq_spinlock_lock(&CPU->timeoutlock, false); 163 164 link_t *cur; 165 while ((cur = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { 166 timeout_t *timeout = list_get_instance(cur, timeout_t, link); 167 168 irq_spinlock_lock(&timeout->lock, false); 169 if (timeout->ticks-- != 0) { 170 irq_spinlock_unlock(&timeout->lock, false); 146 spinlock_lock(&CPU->timeoutlock); 147 while ((l = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { 148 h = list_get_instance(l, timeout_t, link); 149 spinlock_lock(&h->lock); 150 if (h->ticks-- != 0) { 151 spinlock_unlock(&h->lock); 171 152 break; 172 153 } 173 174 list_remove(cur); 175 timeout_handler_t handler = timeout->handler; 176 void *arg = timeout->arg; 177 timeout_reinitialize(timeout); 178 179 irq_spinlock_unlock(&timeout->lock, false); 180 irq_spinlock_unlock(&CPU->timeoutlock, false); 181 182 handler(arg); 183 184 irq_spinlock_lock(&CPU->timeoutlock, false); 154 list_remove(l); 155 f = h->handler; 156 arg = h->arg; 157 timeout_reinitialize(h); 158 spinlock_unlock(&h->lock); 159 spinlock_unlock(&CPU->timeoutlock); 160 161 f(arg); 162 163 spinlock_lock(&CPU->timeoutlock); 185 164 } 186 187 irq_spinlock_unlock(&CPU->timeoutlock, false); 165 spinlock_unlock(&CPU->timeoutlock); 188 166 } 189 167 CPU->missed_clock_ticks = 0; 190 168 191 169 /* 192 170 * Do CPU usage accounting and find out whether to preempt THREAD. 193 *194 171 */ 195 172 196 173 if (THREAD) { 197 174 uint64_t ticks; 198 175 199 irq_spinlock_lock(&CPU->lock, false);176 spinlock_lock(&CPU->lock); 200 177 CPU->needs_relink += 1 + missed_clock_ticks; 201 irq_spinlock_unlock(&CPU->lock, false);202 203 irq_spinlock_lock(&THREAD->lock, false);178 spinlock_unlock(&CPU->lock); 179 180 spinlock_lock(&THREAD->lock); 204 181 if ((ticks = THREAD->ticks)) { 205 182 if (ticks >= 1 + missed_clock_ticks) … … 208 185 THREAD->ticks = 0; 209 186 } 210 irq_spinlock_unlock(&THREAD->lock, false);187 spinlock_unlock(&THREAD->lock); 211 188 212 if ((!ticks) && (!PREEMPTION_DISABLED)) { 189 if (!ticks && !PREEMPTION_DISABLED) { 190 #ifdef CONFIG_UDEBUG 191 istate_t *istate; 192 #endif 213 193 scheduler(); 214 194 #ifdef CONFIG_UDEBUG … … 217 197 * before it begins executing userspace code. 218 198 */ 219 istate _t *istate= THREAD->udebug.uspace_state;220 if ( (istate) && (istate_from_uspace(istate)))199 istate = THREAD->udebug.uspace_state; 200 if (istate && istate_from_uspace(istate)) 221 201 udebug_before_thread_runs(); 222 202 #endif 223 203 } 224 204 } 205 225 206 } 226 207
Note:
See TracChangeset
for help on using the changeset viewer.