source: mainline/kernel/generic/src/time/clock.c@ 0b5203b

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 0b5203b was 6f7071b, checked in by Jiri Svoboda <jiri@…>, 7 years ago

Use ordered dictionary for kernel pareas instead of B+ tree.

  • Property mode set to 100644
File size: 5.8 KB
Line 
1/*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup kernel_time
30 * @{
31 */
32
33/**
34 * @file
35 * @brief High-level clock interrupt handler.
36 *
37 * This file contains the clock() function which is the source
38 * of preemption. It is also responsible for executing expired
39 * timeouts.
40 *
41 */
42
43#include <time/clock.h>
44#include <time/timeout.h>
45#include <config.h>
46#include <synch/spinlock.h>
47#include <synch/waitq.h>
48#include <halt.h>
49#include <proc/scheduler.h>
50#include <cpu.h>
51#include <arch.h>
52#include <adt/list.h>
53#include <atomic.h>
54#include <proc/thread.h>
55#include <sysinfo/sysinfo.h>
56#include <barrier.h>
57#include <mm/frame.h>
58#include <ddi/ddi.h>
59#include <arch/cycle.h>
60
61/* Pointer to variable with uptime */
62uptime_t *uptime;
63
64/** Physical memory area of the real time clock */
65static parea_t clock_parea;
66
67/** Fragment of second
68 *
69 * For updating seconds correctly.
70 *
71 */
72static sysarg_t secfrag = 0;
73
74/** Initialize realtime clock counter
75 *
76 * The applications (and sometimes kernel) need to access accurate
77 * information about realtime data. We allocate 1 page with these
78 * data and update it periodically.
79 *
80 */
81void clock_counter_init(void)
82{
83 uintptr_t faddr = frame_alloc(1, FRAME_LOWMEM | FRAME_ATOMIC, 0);
84 if (faddr == 0)
85 panic("Cannot allocate page for clock.");
86
87 uptime = (uptime_t *) PA2KA(faddr);
88
89 uptime->seconds1 = 0;
90 uptime->seconds2 = 0;
91 uptime->useconds = 0;
92
93 ddi_parea_init(&clock_parea);
94 clock_parea.pbase = faddr;
95 clock_parea.frames = 1;
96 clock_parea.unpriv = true;
97 clock_parea.mapped = false;
98 ddi_parea_register(&clock_parea);
99
100 /*
101 * Prepare information for the userspace so that it can successfully
102 * physmem_map() the clock_parea.
103 *
104 */
105 sysinfo_set_item_val("clock.faddr", NULL, (sysarg_t) faddr);
106}
107
108/** Update public counters
109 *
110 * Update it only on first processor
111 * TODO: Do we really need so many write barriers?
112 *
113 */
114static void clock_update_counters(void)
115{
116 if (CPU->id == 0) {
117 secfrag += 1000000 / HZ;
118 if (secfrag >= 1000000) {
119 secfrag -= 1000000;
120 uptime->seconds1++;
121 write_barrier();
122 uptime->useconds = secfrag;
123 write_barrier();
124 uptime->seconds2 = uptime->seconds1;
125 } else
126 uptime->useconds += 1000000 / HZ;
127 }
128}
129
130static void cpu_update_accounting(void)
131{
132 irq_spinlock_lock(&CPU->lock, false);
133 uint64_t now = get_cycle();
134 CPU->busy_cycles += now - CPU->last_cycle;
135 CPU->last_cycle = now;
136 irq_spinlock_unlock(&CPU->lock, false);
137}
138
139/** Clock routine
140 *
141 * Clock routine executed from clock interrupt handler
142 * (assuming interrupts_disable()'d). Runs expired timeouts
143 * and preemptive scheduling.
144 *
145 */
146void clock(void)
147{
148 size_t missed_clock_ticks = CPU->missed_clock_ticks;
149
150 /* Account CPU usage */
151 cpu_update_accounting();
152
153 /*
154 * To avoid lock ordering problems,
155 * run all expired timeouts as you visit them.
156 *
157 */
158 size_t i;
159 for (i = 0; i <= missed_clock_ticks; i++) {
160 /* Update counters and accounting */
161 clock_update_counters();
162 cpu_update_accounting();
163
164 irq_spinlock_lock(&CPU->timeoutlock, false);
165
166 link_t *cur;
167 while ((cur = list_first(&CPU->timeout_active_list)) != NULL) {
168 timeout_t *timeout = list_get_instance(cur, timeout_t,
169 link);
170
171 irq_spinlock_lock(&timeout->lock, false);
172 if (timeout->ticks-- != 0) {
173 irq_spinlock_unlock(&timeout->lock, false);
174 break;
175 }
176
177 list_remove(cur);
178 timeout_handler_t handler = timeout->handler;
179 void *arg = timeout->arg;
180 timeout_reinitialize(timeout);
181
182 irq_spinlock_unlock(&timeout->lock, false);
183 irq_spinlock_unlock(&CPU->timeoutlock, false);
184
185 handler(arg);
186
187 irq_spinlock_lock(&CPU->timeoutlock, false);
188 }
189
190 irq_spinlock_unlock(&CPU->timeoutlock, false);
191 }
192 CPU->missed_clock_ticks = 0;
193
194 /*
195 * Do CPU usage accounting and find out whether to preempt THREAD.
196 *
197 */
198
199 if (THREAD) {
200 uint64_t ticks;
201
202 irq_spinlock_lock(&CPU->lock, false);
203 CPU->needs_relink += 1 + missed_clock_ticks;
204 irq_spinlock_unlock(&CPU->lock, false);
205
206 irq_spinlock_lock(&THREAD->lock, false);
207 if ((ticks = THREAD->ticks)) {
208 if (ticks >= 1 + missed_clock_ticks)
209 THREAD->ticks -= 1 + missed_clock_ticks;
210 else
211 THREAD->ticks = 0;
212 }
213 irq_spinlock_unlock(&THREAD->lock, false);
214
215 if (ticks == 0 && PREEMPTION_ENABLED) {
216 scheduler();
217#ifdef CONFIG_UDEBUG
218 /*
219 * Give udebug chance to stop the thread
220 * before it begins executing userspace code.
221 */
222 istate_t *istate = THREAD->udebug.uspace_state;
223 if ((istate) && (istate_from_uspace(istate)))
224 udebug_before_thread_runs();
225#endif
226 }
227 }
228}
229
230/** @}
231 */
Note: See TracBrowser for help on using the repository browser.