source: mainline/kernel/generic/src/time/clock.c@ 742f95ec

ticket/834-toolchain-update topic/msim-upgrade topic/simplify-dev-export
Last change on this file since 742f95ec was 742f95ec, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 3 years ago

Replace timeout→ticks with timeout→deadline

Instead of counting down in the structure, use a fixed deadline number.
This simplifies the code significantly.

  • Property mode set to 100644
File size: 5.8 KB
RevLine 
[f761f1eb]1/*
[df4ed85]2 * Copyright (c) 2001-2004 Jakub Jermar
[f761f1eb]3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
[e88eb48]29/** @addtogroup kernel_time
[b45c443]30 * @{
31 */
32
[cf26ba9]33/**
[b45c443]34 * @file
[da1bafb]35 * @brief High-level clock interrupt handler.
[cf26ba9]36 *
37 * This file contains the clock() function which is the source
38 * of preemption. It is also responsible for executing expired
39 * timeouts.
[da1bafb]40 *
[cf26ba9]41 */
[da1bafb]42
[f761f1eb]43#include <time/clock.h>
44#include <time/timeout.h>
45#include <config.h>
46#include <synch/spinlock.h>
47#include <synch/waitq.h>
[b2e121a]48#include <halt.h>
[f761f1eb]49#include <proc/scheduler.h>
50#include <cpu.h>
51#include <arch.h>
[5c9a08b]52#include <adt/list.h>
[23684b7]53#include <atomic.h>
[1084a784]54#include <proc/thread.h>
[d6e5cbc]55#include <sysinfo/sysinfo.h>
[05882233]56#include <barrier.h>
[f8ddd17]57#include <mm/frame.h>
58#include <ddi/ddi.h>
[d0c82c5]59#include <arch/cycle.h>
[f8ddd17]60
[4b662f8c]61/* Pointer to variable with uptime */
62uptime_t *uptime;
63
64/** Physical memory area of the real time clock */
[f8ddd17]65static parea_t clock_parea;
[d6e5cbc]66
67/** Initialize realtime clock counter
68 *
69 * The applications (and sometimes kernel) need to access accurate
[1b20da0]70 * information about realtime data. We allocate 1 page with these
[d6e5cbc]71 * data and update it periodically.
[da1bafb]72 *
[d6e5cbc]73 */
74void clock_counter_init(void)
75{
[482f968]76 uintptr_t faddr = frame_alloc(1, FRAME_LOWMEM | FRAME_ATOMIC, 0);
[8cbf1c3]77 if (faddr == 0)
[f651e80]78 panic("Cannot allocate page for clock.");
[a35b458]79
[4b662f8c]80 uptime = (uptime_t *) PA2KA(faddr);
[a35b458]81
[4b662f8c]82 uptime->seconds1 = 0;
83 uptime->seconds2 = 0;
[9dae191e]84 uptime->useconds = 0;
[a35b458]85
[6f7071b]86 ddi_parea_init(&clock_parea);
[8cbf1c3]87 clock_parea.pbase = faddr;
[f8ddd17]88 clock_parea.frames = 1;
[d7533c7]89 clock_parea.unpriv = true;
[b366a6f4]90 clock_parea.mapped = false;
[f8ddd17]91 ddi_parea_register(&clock_parea);
[a35b458]92
[f8ddd17]93 /*
94 * Prepare information for the userspace so that it can successfully
95 * physmem_map() the clock_parea.
[da1bafb]96 *
[f8ddd17]97 */
[96b02eb9]98 sysinfo_set_item_val("clock.faddr", NULL, (sysarg_t) faddr);
[d6e5cbc]99}
100
101/** Update public counters
102 *
103 * Update it only on first processor
104 */
[d9dda26]105static void clock_update_counters(uint64_t current_tick)
[d6e5cbc]106{
107 if (CPU->id == 0) {
[d9dda26]108 uint64_t usec = (1000000 / HZ) * current_tick;
109
110 sysarg_t secs = usec / 1000000;
111 sysarg_t usecs = usec % 1000000;
112
113 uptime->seconds1 = secs;
114 write_barrier();
115 uptime->useconds = usecs;
116 write_barrier();
117 uptime->seconds2 = secs;
[d6e5cbc]118 }
119}
[f761f1eb]120
[d0c82c5]121static void cpu_update_accounting(void)
122{
123 irq_spinlock_lock(&CPU->lock, false);
124 uint64_t now = get_cycle();
125 CPU->busy_cycles += now - CPU->last_cycle;
126 CPU->last_cycle = now;
127 irq_spinlock_unlock(&CPU->lock, false);
128}
129
[70527f1]130/** Clock routine
131 *
132 * Clock routine executed from clock interrupt handler
[22f7769]133 * (assuming interrupts_disable()'d). Runs expired timeouts
[70527f1]134 * and preemptive scheduling.
135 *
[f761f1eb]136 */
137void clock(void)
138{
[98000fb]139 size_t missed_clock_ticks = CPU->missed_clock_ticks;
[d9dda26]140 CPU->current_clock_tick += missed_clock_ticks + 1;
141 uint64_t current_clock_tick = CPU->current_clock_tick;
142 clock_update_counters(current_clock_tick);
[a35b458]143
[d0c82c5]144 /* Account CPU usage */
145 cpu_update_accounting();
[a35b458]146
[f761f1eb]147 /*
148 * To avoid lock ordering problems,
149 * run all expired timeouts as you visit them.
[da1bafb]150 *
[f761f1eb]151 */
[da1bafb]152 size_t i;
[8cf8ee6]153 for (i = 0; i <= missed_clock_ticks; i++) {
[d0c82c5]154 /* Update counters and accounting */
[d9dda26]155
[d0c82c5]156 cpu_update_accounting();
[a35b458]157
[da1bafb]158 irq_spinlock_lock(&CPU->timeoutlock, false);
[a35b458]159
[da1bafb]160 link_t *cur;
[55b77d9]161 while ((cur = list_first(&CPU->timeout_active_list)) != NULL) {
162 timeout_t *timeout = list_get_instance(cur, timeout_t,
163 link);
[a35b458]164
[da1bafb]165 irq_spinlock_lock(&timeout->lock, false);
[742f95ec]166 if (current_clock_tick <= timeout->deadline) {
[da1bafb]167 irq_spinlock_unlock(&timeout->lock, false);
[c93e805]168 break;
169 }
[a35b458]170
[da1bafb]171 list_remove(cur);
172 timeout_handler_t handler = timeout->handler;
173 void *arg = timeout->arg;
174 timeout_reinitialize(timeout);
[a35b458]175
[da1bafb]176 irq_spinlock_unlock(&timeout->lock, false);
177 irq_spinlock_unlock(&CPU->timeoutlock, false);
[a35b458]178
[da1bafb]179 handler(arg);
[a35b458]180
[da1bafb]181 irq_spinlock_lock(&CPU->timeoutlock, false);
[c93e805]182 }
[a35b458]183
[da1bafb]184 irq_spinlock_unlock(&CPU->timeoutlock, false);
[f761f1eb]185 }
[c93e805]186 CPU->missed_clock_ticks = 0;
[a35b458]187
[f761f1eb]188 /*
[43114c5]189 * Do CPU usage accounting and find out whether to preempt THREAD.
[da1bafb]190 *
[f761f1eb]191 */
[a35b458]192
[43114c5]193 if (THREAD) {
[7f1c620]194 uint64_t ticks;
[a35b458]195
[da1bafb]196 irq_spinlock_lock(&CPU->lock, false);
[8cf8ee6]197 CPU->needs_relink += 1 + missed_clock_ticks;
[da1bafb]198 irq_spinlock_unlock(&CPU->lock, false);
[a35b458]199
[da1bafb]200 irq_spinlock_lock(&THREAD->lock, false);
[8cf8ee6]201 if ((ticks = THREAD->ticks)) {
202 if (ticks >= 1 + missed_clock_ticks)
203 THREAD->ticks -= 1 + missed_clock_ticks;
204 else
205 THREAD->ticks = 0;
206 }
[da1bafb]207 irq_spinlock_unlock(&THREAD->lock, false);
[a35b458]208
[057e77f]209 if (ticks == 0 && PREEMPTION_ENABLED) {
[f761f1eb]210 scheduler();
[3ff2b54]211#ifdef CONFIG_UDEBUG
212 /*
213 * Give udebug chance to stop the thread
[5d9430d7]214 * before it begins executing userspace code.
[3ff2b54]215 */
[da1bafb]216 istate_t *istate = THREAD->udebug.uspace_state;
217 if ((istate) && (istate_from_uspace(istate)))
[3ff2b54]218 udebug_before_thread_runs();
219#endif
[f761f1eb]220 }
221 }
222}
[b45c443]223
[1bb2e7a]224/** @}
[b45c443]225 */
Note: See TracBrowser for help on using the repository browser.