2 * linux/kernel/time/clockevents.c
4 * This file contains functions which manage clock event devices.
6 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
7 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
8 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
10 * This code is licenced under the GPL version 2. For details see
11 * kernel-base/COPYING.
14 #include <linux/clockchips.h>
15 #include <linux/hrtimer.h>
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/notifier.h>
19 #include <linux/smp.h>
20 #include <linux/sysdev.h>
21 #include <linux/tick.h>
23 #include "tick-internal.h"
25 /* The registered clock event devices */
26 static LIST_HEAD(clockevent_devices
);
27 static LIST_HEAD(clockevents_released
);
29 /* Notification for clock events */
30 static RAW_NOTIFIER_HEAD(clockevents_chain
);
32 /* Protection for the above */
33 static DEFINE_RAW_SPINLOCK(clockevents_lock
);
36 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
37 * @latch: value to convert
38 * @evt: pointer to clock event device descriptor
40 * Math helper, returns latch value converted to nanoseconds (bound checked)
42 u64
clockevent_delta2ns(unsigned long latch
, struct clock_event_device
*evt
)
44 u64 clc
= (u64
) latch
<< evt
->shift
;
46 if (unlikely(!evt
->mult
)) {
51 do_div(clc
, evt
->mult
);
59 EXPORT_SYMBOL_GPL(clockevent_delta2ns
);
62 * clockevents_set_mode - set the operating mode of a clock event device
63 * @dev: device to modify
66 * Must be called with interrupts disabled !
68 void clockevents_set_mode(struct clock_event_device
*dev
,
69 enum clock_event_mode mode
)
71 if (dev
->mode
!= mode
) {
72 dev
->set_mode(mode
, dev
);
76 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
77 * on it, so fix it up and emit a warning:
79 if (mode
== CLOCK_EVT_MODE_ONESHOT
) {
80 if (unlikely(!dev
->mult
)) {
89 * clockevents_shutdown - shutdown the device and clear next_event
90 * @dev: device to shutdown
92 void clockevents_shutdown(struct clock_event_device
*dev
)
94 clockevents_set_mode(dev
, CLOCK_EVT_MODE_SHUTDOWN
);
95 dev
->next_event
.tv64
= KTIME_MAX
;
99 * clockevents_program_event - Reprogram the clock event device.
100 * @expires: absolute expiry time (monotonic clock)
102 * Returns 0 on success, -ETIME when the event is in the past.
104 int clockevents_program_event(struct clock_event_device
*dev
, ktime_t expires
,
107 unsigned long long clc
;
110 if (unlikely(expires
.tv64
< 0)) {
115 delta
= ktime_to_ns(ktime_sub(expires
, now
));
120 dev
->next_event
= expires
;
122 if (dev
->mode
== CLOCK_EVT_MODE_SHUTDOWN
)
125 if (delta
> dev
->max_delta_ns
)
126 delta
= dev
->max_delta_ns
;
127 if (delta
< dev
->min_delta_ns
)
128 delta
= dev
->min_delta_ns
;
130 clc
= delta
* dev
->mult
;
133 return dev
->set_next_event((unsigned long) clc
, dev
);
137 * clockevents_register_notifier - register a clock events change listener
139 int clockevents_register_notifier(struct notifier_block
*nb
)
144 raw_spin_lock_irqsave(&clockevents_lock
, flags
);
145 ret
= raw_notifier_chain_register(&clockevents_chain
, nb
);
146 raw_spin_unlock_irqrestore(&clockevents_lock
, flags
);
152 * Notify about a clock event change. Called with clockevents_lock
155 static void clockevents_do_notify(unsigned long reason
, void *dev
)
157 raw_notifier_call_chain(&clockevents_chain
, reason
, dev
);
161 * Called after a notify add to make devices available which were
162 * released from the notifier call.
164 static void clockevents_notify_released(void)
166 struct clock_event_device
*dev
;
168 while (!list_empty(&clockevents_released
)) {
169 dev
= list_entry(clockevents_released
.next
,
170 struct clock_event_device
, list
);
171 list_del(&dev
->list
);
172 list_add(&dev
->list
, &clockevent_devices
);
173 clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD
, dev
);
178 * clockevents_register_device - register a clock event device
179 * @dev: device to register
181 void clockevents_register_device(struct clock_event_device
*dev
)
185 BUG_ON(dev
->mode
!= CLOCK_EVT_MODE_UNUSED
);
186 BUG_ON(!dev
->cpumask
);
188 raw_spin_lock_irqsave(&clockevents_lock
, flags
);
190 list_add(&dev
->list
, &clockevent_devices
);
191 clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD
, dev
);
192 clockevents_notify_released();
194 raw_spin_unlock_irqrestore(&clockevents_lock
, flags
);
196 EXPORT_SYMBOL_GPL(clockevents_register_device
);
199 * Noop handler when we shut down an event device
201 void clockevents_handle_noop(struct clock_event_device
*dev
)
206 * clockevents_exchange_device - release and request clock devices
207 * @old: device to release (can be NULL)
208 * @new: device to request (can be NULL)
210 * Called from the notifier chain. clockevents_lock is held already
212 void clockevents_exchange_device(struct clock_event_device
*old
,
213 struct clock_event_device
*new)
217 local_irq_save(flags
);
219 * Caller releases a clock event device. We queue it into the
220 * released list and do a notify add later.
223 clockevents_set_mode(old
, CLOCK_EVT_MODE_UNUSED
);
224 list_del(&old
->list
);
225 list_add(&old
->list
, &clockevents_released
);
229 BUG_ON(new->mode
!= CLOCK_EVT_MODE_UNUSED
);
230 clockevents_shutdown(new);
232 local_irq_restore(flags
);
235 #ifdef CONFIG_GENERIC_CLOCKEVENTS
237 * clockevents_notify - notification about relevant events
239 void clockevents_notify(unsigned long reason
, void *arg
)
241 struct clock_event_device
*dev
, *tmp
;
245 raw_spin_lock_irqsave(&clockevents_lock
, flags
);
246 clockevents_do_notify(reason
, arg
);
249 case CLOCK_EVT_NOTIFY_CPU_DEAD
:
251 * Unregister the clock event devices which were
252 * released from the users in the notify chain.
254 list_for_each_entry_safe(dev
, tmp
, &clockevents_released
, list
)
255 list_del(&dev
->list
);
257 * Now check whether the CPU has left unused per cpu devices
260 list_for_each_entry_safe(dev
, tmp
, &clockevent_devices
, list
) {
261 if (cpumask_test_cpu(cpu
, dev
->cpumask
) &&
262 cpumask_weight(dev
->cpumask
) == 1 &&
263 !tick_is_broadcast_device(dev
)) {
264 BUG_ON(dev
->mode
!= CLOCK_EVT_MODE_UNUSED
);
265 list_del(&dev
->list
);
272 raw_spin_unlock_irqrestore(&clockevents_lock
, flags
);
274 EXPORT_SYMBOL_GPL(clockevents_notify
);