2 * Copyright (c) 2001-2004 Jakub Jermar
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /** @addtogroup kernel_time
35 * @brief High-level clock interrupt handler.
37 * This file contains the clock() function which is the source
38 * of preemption. It is also responsible for executing expired
43 #include <time/clock.h>
44 #include <time/timeout.h>
46 #include <synch/spinlock.h>
47 #include <synch/waitq.h>
49 #include <proc/scheduler.h>
54 #include <proc/thread.h>
55 #include <sysinfo/sysinfo.h>
59 #include <arch/cycle.h>
61 /* Pointer to variable with uptime */
64 /** Physical memory area of the real time clock */
65 static parea_t clock_parea
;
67 /** Initialize realtime clock counter
69 * The applications (and sometimes kernel) need to access accurate
70 * information about realtime data. We allocate 1 page with these
71 * data and update it periodically.
74 void clock_counter_init(void)
76 uintptr_t faddr
= frame_alloc(1, FRAME_LOWMEM
| FRAME_ATOMIC
, 0);
78 panic("Cannot allocate page for clock.");
80 uptime
= (uptime_t
*) PA2KA(faddr
);
86 ddi_parea_init(&clock_parea
);
87 clock_parea
.pbase
= faddr
;
88 clock_parea
.frames
= 1;
89 clock_parea
.unpriv
= true;
90 clock_parea
.mapped
= false;
91 ddi_parea_register(&clock_parea
);
94 * Prepare information for the userspace so that it can successfully
95 * physmem_map() the clock_parea.
98 sysinfo_set_item_val("clock.faddr", NULL
, (sysarg_t
) faddr
);
101 /** Update public counters
103 * Update it only on first processor
105 static void clock_update_counters(uint64_t current_tick
)
108 uint64_t usec
= (1000000 / HZ
) * current_tick
;
110 sysarg_t secs
= usec
/ 1000000;
111 sysarg_t usecs
= usec
% 1000000;
113 uptime
->seconds1
= secs
;
115 uptime
->useconds
= usecs
;
117 uptime
->seconds2
= secs
;
121 static void cpu_update_accounting(void)
123 irq_spinlock_lock(&CPU
->lock
, false);
124 uint64_t now
= get_cycle();
125 CPU
->busy_cycles
+= now
- CPU
->last_cycle
;
126 CPU
->last_cycle
= now
;
127 irq_spinlock_unlock(&CPU
->lock
, false);
132 * Clock routine executed from clock interrupt handler
133 * (assuming interrupts_disable()'d). Runs expired timeouts
134 * and preemptive scheduling.
139 size_t missed_clock_ticks
= CPU
->missed_clock_ticks
;
140 CPU
->current_clock_tick
+= missed_clock_ticks
+ 1;
141 uint64_t current_clock_tick
= CPU
->current_clock_tick
;
142 clock_update_counters(current_clock_tick
);
144 /* Account CPU usage */
145 cpu_update_accounting();
148 * To avoid lock ordering problems,
149 * run all expired timeouts as you visit them.
153 for (i
= 0; i
<= missed_clock_ticks
; i
++) {
154 /* Update counters and accounting */
156 cpu_update_accounting();
158 irq_spinlock_lock(&CPU
->timeoutlock
, false);
161 while ((cur
= list_first(&CPU
->timeout_active_list
)) != NULL
) {
162 timeout_t
*timeout
= list_get_instance(cur
, timeout_t
,
165 irq_spinlock_lock(&timeout
->lock
, false);
166 if (current_clock_tick
<= timeout
->deadline
) {
167 irq_spinlock_unlock(&timeout
->lock
, false);
172 timeout_handler_t handler
= timeout
->handler
;
173 void *arg
= timeout
->arg
;
175 irq_spinlock_unlock(&timeout
->lock
, false);
176 irq_spinlock_unlock(&CPU
->timeoutlock
, false);
180 irq_spinlock_lock(&CPU
->timeoutlock
, false);
183 irq_spinlock_unlock(&CPU
->timeoutlock
, false);
185 CPU
->missed_clock_ticks
= 0;
188 * Do CPU usage accounting and find out whether to preempt THREAD.
195 irq_spinlock_lock(&CPU
->lock
, false);
196 CPU
->needs_relink
+= 1 + missed_clock_ticks
;
197 irq_spinlock_unlock(&CPU
->lock
, false);
199 irq_spinlock_lock(&THREAD
->lock
, false);
200 if ((ticks
= THREAD
->ticks
)) {
201 if (ticks
>= 1 + missed_clock_ticks
)
202 THREAD
->ticks
-= 1 + missed_clock_ticks
;
206 irq_spinlock_unlock(&THREAD
->lock
, false);
208 if (ticks
== 0 && PREEMPTION_ENABLED
) {
212 * Give udebug chance to stop the thread
213 * before it begins executing userspace code.
215 istate_t
*istate
= THREAD
->udebug
.uspace_state
;
216 if ((istate
) && (istate_from_uspace(istate
)))
217 udebug_before_thread_runs();