| 1 | /*
|
|---|
| 2 | * Copyright (c) 2001-2004 Jakub Jermar
|
|---|
| 3 | * All rights reserved.
|
|---|
| 4 | *
|
|---|
| 5 | * Redistribution and use in source and binary forms, with or without
|
|---|
| 6 | * modification, are permitted provided that the following conditions
|
|---|
| 7 | * are met:
|
|---|
| 8 | *
|
|---|
| 9 | * - Redistributions of source code must retain the above copyright
|
|---|
| 10 | * notice, this list of conditions and the following disclaimer.
|
|---|
| 11 | * - Redistributions in binary form must reproduce the above copyright
|
|---|
| 12 | * notice, this list of conditions and the following disclaimer in the
|
|---|
| 13 | * documentation and/or other materials provided with the distribution.
|
|---|
| 14 | * - The name of the author may not be used to endorse or promote products
|
|---|
| 15 | * derived from this software without specific prior written permission.
|
|---|
| 16 | *
|
|---|
| 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|---|
| 18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|---|
| 19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|---|
| 20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|---|
| 21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|---|
| 22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|---|
| 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|---|
| 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|---|
| 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|---|
| 26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|---|
| 27 | */
|
|---|
| 28 |
|
|---|
| 29 | /** @addtogroup time
|
|---|
| 30 | * @{
|
|---|
| 31 | */
|
|---|
| 32 |
|
|---|
| 33 | /**
|
|---|
| 34 | * @file
|
|---|
| 35 | * @brief High-level clock interrupt handler.
|
|---|
| 36 | *
|
|---|
| 37 | * This file contains the clock() function which is the source
|
|---|
| 38 | * of preemption. It is also responsible for executing expired
|
|---|
| 39 | * timeouts.
|
|---|
| 40 | *
|
|---|
| 41 | */
|
|---|
| 42 |
|
|---|
| 43 | #include <time/clock.h>
|
|---|
| 44 | #include <time/timeout.h>
|
|---|
| 45 | #include <config.h>
|
|---|
| 46 | #include <synch/spinlock.h>
|
|---|
| 47 | #include <synch/waitq.h>
|
|---|
| 48 | #include <func.h>
|
|---|
| 49 | #include <proc/scheduler.h>
|
|---|
| 50 | #include <cpu.h>
|
|---|
| 51 | #include <arch.h>
|
|---|
| 52 | #include <adt/list.h>
|
|---|
| 53 | #include <atomic.h>
|
|---|
| 54 | #include <proc/thread.h>
|
|---|
| 55 | #include <sysinfo/sysinfo.h>
|
|---|
| 56 | #include <arch/barrier.h>
|
|---|
| 57 | #include <mm/frame.h>
|
|---|
| 58 | #include <ddi/ddi.h>
|
|---|
| 59 | #include <arch/cycle.h>
|
|---|
| 60 |
|
|---|
| 61 | /* Pointer to variable with uptime */
|
|---|
| 62 | uptime_t *uptime;
|
|---|
| 63 |
|
|---|
| 64 | /** Physical memory area of the real time clock */
|
|---|
| 65 | static parea_t clock_parea;
|
|---|
| 66 |
|
|---|
| 67 | /** Fragment of second
|
|---|
| 68 | *
|
|---|
| 69 | * For updating seconds correctly.
|
|---|
| 70 | *
|
|---|
| 71 | */
|
|---|
| 72 | static unative_t secfrag = 0;
|
|---|
| 73 |
|
|---|
| 74 | /** Initialize realtime clock counter
|
|---|
| 75 | *
|
|---|
| 76 | * The applications (and sometimes kernel) need to access accurate
|
|---|
| 77 | * information about realtime data. We allocate 1 page with these
|
|---|
| 78 | * data and update it periodically.
|
|---|
| 79 | *
|
|---|
| 80 | */
|
|---|
| 81 | void clock_counter_init(void)
|
|---|
| 82 | {
|
|---|
| 83 | void *faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC);
|
|---|
| 84 | if (!faddr)
|
|---|
| 85 | panic("Cannot allocate page for clock.");
|
|---|
| 86 |
|
|---|
| 87 | uptime = (uptime_t *) PA2KA(faddr);
|
|---|
| 88 |
|
|---|
| 89 | uptime->seconds1 = 0;
|
|---|
| 90 | uptime->seconds2 = 0;
|
|---|
| 91 | uptime->useconds = 0;
|
|---|
| 92 |
|
|---|
| 93 | clock_parea.pbase = (uintptr_t) faddr;
|
|---|
| 94 | clock_parea.frames = 1;
|
|---|
| 95 | ddi_parea_register(&clock_parea);
|
|---|
| 96 |
|
|---|
| 97 | /*
|
|---|
| 98 | * Prepare information for the userspace so that it can successfully
|
|---|
| 99 | * physmem_map() the clock_parea.
|
|---|
| 100 | *
|
|---|
| 101 | */
|
|---|
| 102 | sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true);
|
|---|
| 103 | sysinfo_set_item_val("clock.faddr", NULL, (unative_t) faddr);
|
|---|
| 104 | }
|
|---|
| 105 |
|
|---|
| 106 | /** Update public counters
|
|---|
| 107 | *
|
|---|
| 108 | * Update it only on first processor
|
|---|
| 109 | * TODO: Do we really need so many write barriers?
|
|---|
| 110 | *
|
|---|
| 111 | */
|
|---|
| 112 | static void clock_update_counters(void)
|
|---|
| 113 | {
|
|---|
| 114 | if (CPU->id == 0) {
|
|---|
| 115 | secfrag += 1000000 / HZ;
|
|---|
| 116 | if (secfrag >= 1000000) {
|
|---|
| 117 | secfrag -= 1000000;
|
|---|
| 118 | uptime->seconds1++;
|
|---|
| 119 | write_barrier();
|
|---|
| 120 | uptime->useconds = secfrag;
|
|---|
| 121 | write_barrier();
|
|---|
| 122 | uptime->seconds2 = uptime->seconds1;
|
|---|
| 123 | } else
|
|---|
| 124 | uptime->useconds += 1000000 / HZ;
|
|---|
| 125 | }
|
|---|
| 126 | }
|
|---|
| 127 |
|
|---|
| 128 | static void cpu_update_accounting(void)
|
|---|
| 129 | {
|
|---|
| 130 | irq_spinlock_lock(&CPU->lock, false);
|
|---|
| 131 | uint64_t now = get_cycle();
|
|---|
| 132 | CPU->busy_cycles += now - CPU->last_cycle;
|
|---|
| 133 | CPU->last_cycle = now;
|
|---|
| 134 | irq_spinlock_unlock(&CPU->lock, false);
|
|---|
| 135 | }
|
|---|
| 136 |
|
|---|
| 137 | /** Clock routine
|
|---|
| 138 | *
|
|---|
| 139 | * Clock routine executed from clock interrupt handler
|
|---|
| 140 | * (assuming interrupts_disable()'d). Runs expired timeouts
|
|---|
| 141 | * and preemptive scheduling.
|
|---|
| 142 | *
|
|---|
| 143 | */
|
|---|
| 144 | void clock(void)
|
|---|
| 145 | {
|
|---|
| 146 | size_t missed_clock_ticks = CPU->missed_clock_ticks;
|
|---|
| 147 |
|
|---|
| 148 | /* Account CPU usage */
|
|---|
| 149 | cpu_update_accounting();
|
|---|
| 150 |
|
|---|
| 151 | /*
|
|---|
| 152 | * To avoid lock ordering problems,
|
|---|
| 153 | * run all expired timeouts as you visit them.
|
|---|
| 154 | *
|
|---|
| 155 | */
|
|---|
| 156 | size_t i;
|
|---|
| 157 | for (i = 0; i <= missed_clock_ticks; i++) {
|
|---|
| 158 | /* Update counters and accounting */
|
|---|
| 159 | clock_update_counters();
|
|---|
| 160 | cpu_update_accounting();
|
|---|
| 161 |
|
|---|
| 162 | irq_spinlock_lock(&CPU->timeoutlock, false);
|
|---|
| 163 |
|
|---|
| 164 | link_t *cur;
|
|---|
| 165 | while ((cur = CPU->timeout_active_head.next) != &CPU->timeout_active_head) {
|
|---|
| 166 | timeout_t *timeout = list_get_instance(cur, timeout_t, link);
|
|---|
| 167 |
|
|---|
| 168 | irq_spinlock_lock(&timeout->lock, false);
|
|---|
| 169 | if (timeout->ticks-- != 0) {
|
|---|
| 170 | irq_spinlock_unlock(&timeout->lock, false);
|
|---|
| 171 | break;
|
|---|
| 172 | }
|
|---|
| 173 |
|
|---|
| 174 | list_remove(cur);
|
|---|
| 175 | timeout_handler_t handler = timeout->handler;
|
|---|
| 176 | void *arg = timeout->arg;
|
|---|
| 177 | timeout_reinitialize(timeout);
|
|---|
| 178 |
|
|---|
| 179 | irq_spinlock_unlock(&timeout->lock, false);
|
|---|
| 180 | irq_spinlock_unlock(&CPU->timeoutlock, false);
|
|---|
| 181 |
|
|---|
| 182 | handler(arg);
|
|---|
| 183 |
|
|---|
| 184 | irq_spinlock_lock(&CPU->timeoutlock, false);
|
|---|
| 185 | }
|
|---|
| 186 |
|
|---|
| 187 | irq_spinlock_unlock(&CPU->timeoutlock, false);
|
|---|
| 188 | }
|
|---|
| 189 | CPU->missed_clock_ticks = 0;
|
|---|
| 190 |
|
|---|
| 191 | /*
|
|---|
| 192 | * Do CPU usage accounting and find out whether to preempt THREAD.
|
|---|
| 193 | *
|
|---|
| 194 | */
|
|---|
| 195 |
|
|---|
| 196 | if (THREAD) {
|
|---|
| 197 | uint64_t ticks;
|
|---|
| 198 |
|
|---|
| 199 | irq_spinlock_lock(&CPU->lock, false);
|
|---|
| 200 | CPU->needs_relink += 1 + missed_clock_ticks;
|
|---|
| 201 | irq_spinlock_unlock(&CPU->lock, false);
|
|---|
| 202 |
|
|---|
| 203 | irq_spinlock_lock(&THREAD->lock, false);
|
|---|
| 204 | if ((ticks = THREAD->ticks)) {
|
|---|
| 205 | if (ticks >= 1 + missed_clock_ticks)
|
|---|
| 206 | THREAD->ticks -= 1 + missed_clock_ticks;
|
|---|
| 207 | else
|
|---|
| 208 | THREAD->ticks = 0;
|
|---|
| 209 | }
|
|---|
| 210 | irq_spinlock_unlock(&THREAD->lock, false);
|
|---|
| 211 |
|
|---|
| 212 | if ((!ticks) && (!PREEMPTION_DISABLED)) {
|
|---|
| 213 | scheduler();
|
|---|
| 214 | #ifdef CONFIG_UDEBUG
|
|---|
| 215 | /*
|
|---|
| 216 | * Give udebug chance to stop the thread
|
|---|
| 217 | * before it begins executing userspace code.
|
|---|
| 218 | */
|
|---|
| 219 | istate_t *istate = THREAD->udebug.uspace_state;
|
|---|
| 220 | if ((istate) && (istate_from_uspace(istate)))
|
|---|
| 221 | udebug_before_thread_runs();
|
|---|
| 222 | #endif
|
|---|
| 223 | }
|
|---|
| 224 | }
|
|---|
| 225 | }
|
|---|
| 226 |
|
|---|
| 227 | /** @}
|
|---|
| 228 | */
|
|---|