source: mainline/kernel/generic/src/time/clock.c@ aca4a04

lfn serial ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since aca4a04 was b366a6f4, checked in by Martin Decky <martin@…>, 14 years ago

automatic kernel console lockout

  • kernel automatically relinquishes the access to the kernel console when the uspace maps the respective physical memory area
  • kernel output before uspace initialization is currently broken on Ski (no physical memory area), but this is pending further unification
  • kernel console devices are now independent (there is no system-wide "silent" variable), thus on multiple devices the kernel console and uspace output might be usable at the same time
  • Property mode set to 100644
File size: 5.8 KB
Line 
1/*
2 * Copyright (c) 2001-2004 Jakub Jermar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** @addtogroup time
30 * @{
31 */
32
33/**
34 * @file
35 * @brief High-level clock interrupt handler.
36 *
37 * This file contains the clock() function which is the source
38 * of preemption. It is also responsible for executing expired
39 * timeouts.
40 *
41 */
42
43#include <time/clock.h>
44#include <time/timeout.h>
45#include <config.h>
46#include <synch/spinlock.h>
47#include <synch/waitq.h>
48#include <func.h>
49#include <proc/scheduler.h>
50#include <cpu.h>
51#include <arch.h>
52#include <adt/list.h>
53#include <atomic.h>
54#include <proc/thread.h>
55#include <sysinfo/sysinfo.h>
56#include <arch/barrier.h>
57#include <mm/frame.h>
58#include <ddi/ddi.h>
59#include <arch/cycle.h>
60
61/* Pointer to variable with uptime */
62uptime_t *uptime;
63
64/** Physical memory area of the real time clock */
65static parea_t clock_parea;
66
67/** Fragment of second
68 *
69 * For updating seconds correctly.
70 *
71 */
72static sysarg_t secfrag = 0;
73
74/** Initialize realtime clock counter
75 *
76 * The applications (and sometimes kernel) need to access accurate
77 * information about realtime data. We allocate 1 page with these
78 * data and update it periodically.
79 *
80 */
81void clock_counter_init(void)
82{
83 void *faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC);
84 if (!faddr)
85 panic("Cannot allocate page for clock.");
86
87 uptime = (uptime_t *) PA2KA(faddr);
88
89 uptime->seconds1 = 0;
90 uptime->seconds2 = 0;
91 uptime->useconds = 0;
92
93 clock_parea.pbase = (uintptr_t) faddr;
94 clock_parea.frames = 1;
95 clock_parea.unpriv = true;
96 clock_parea.mapped = false;
97 ddi_parea_register(&clock_parea);
98
99 /*
100 * Prepare information for the userspace so that it can successfully
101 * physmem_map() the clock_parea.
102 *
103 */
104 sysinfo_set_item_val("clock.faddr", NULL, (sysarg_t) faddr);
105}
106
107/** Update public counters
108 *
109 * Update it only on first processor
110 * TODO: Do we really need so many write barriers?
111 *
112 */
113static void clock_update_counters(void)
114{
115 if (CPU->id == 0) {
116 secfrag += 1000000 / HZ;
117 if (secfrag >= 1000000) {
118 secfrag -= 1000000;
119 uptime->seconds1++;
120 write_barrier();
121 uptime->useconds = secfrag;
122 write_barrier();
123 uptime->seconds2 = uptime->seconds1;
124 } else
125 uptime->useconds += 1000000 / HZ;
126 }
127}
128
129static void cpu_update_accounting(void)
130{
131 irq_spinlock_lock(&CPU->lock, false);
132 uint64_t now = get_cycle();
133 CPU->busy_cycles += now - CPU->last_cycle;
134 CPU->last_cycle = now;
135 irq_spinlock_unlock(&CPU->lock, false);
136}
137
138/** Clock routine
139 *
140 * Clock routine executed from clock interrupt handler
141 * (assuming interrupts_disable()'d). Runs expired timeouts
142 * and preemptive scheduling.
143 *
144 */
145void clock(void)
146{
147 size_t missed_clock_ticks = CPU->missed_clock_ticks;
148
149 /* Account CPU usage */
150 cpu_update_accounting();
151
152 /*
153 * To avoid lock ordering problems,
154 * run all expired timeouts as you visit them.
155 *
156 */
157 size_t i;
158 for (i = 0; i <= missed_clock_ticks; i++) {
159 /* Update counters and accounting */
160 clock_update_counters();
161 cpu_update_accounting();
162
163 irq_spinlock_lock(&CPU->timeoutlock, false);
164
165 link_t *cur;
166 while ((cur = list_first(&CPU->timeout_active_list)) != NULL) {
167 timeout_t *timeout = list_get_instance(cur, timeout_t,
168 link);
169
170 irq_spinlock_lock(&timeout->lock, false);
171 if (timeout->ticks-- != 0) {
172 irq_spinlock_unlock(&timeout->lock, false);
173 break;
174 }
175
176 list_remove(cur);
177 timeout_handler_t handler = timeout->handler;
178 void *arg = timeout->arg;
179 timeout_reinitialize(timeout);
180
181 irq_spinlock_unlock(&timeout->lock, false);
182 irq_spinlock_unlock(&CPU->timeoutlock, false);
183
184 handler(arg);
185
186 irq_spinlock_lock(&CPU->timeoutlock, false);
187 }
188
189 irq_spinlock_unlock(&CPU->timeoutlock, false);
190 }
191 CPU->missed_clock_ticks = 0;
192
193 /*
194 * Do CPU usage accounting and find out whether to preempt THREAD.
195 *
196 */
197
198 if (THREAD) {
199 uint64_t ticks;
200
201 irq_spinlock_lock(&CPU->lock, false);
202 CPU->needs_relink += 1 + missed_clock_ticks;
203 irq_spinlock_unlock(&CPU->lock, false);
204
205 irq_spinlock_lock(&THREAD->lock, false);
206 if ((ticks = THREAD->ticks)) {
207 if (ticks >= 1 + missed_clock_ticks)
208 THREAD->ticks -= 1 + missed_clock_ticks;
209 else
210 THREAD->ticks = 0;
211 }
212 irq_spinlock_unlock(&THREAD->lock, false);
213
214 if ((!ticks) && (!PREEMPTION_DISABLED)) {
215 scheduler();
216#ifdef CONFIG_UDEBUG
217 /*
218 * Give udebug chance to stop the thread
219 * before it begins executing userspace code.
220 */
221 istate_t *istate = THREAD->udebug.uspace_state;
222 if ((istate) && (istate_from_uspace(istate)))
223 udebug_before_thread_runs();
224#endif
225 }
226 }
227}
228
229/** @}
230 */
Note: See TracBrowser for help on using the repository browser.