2 * linux/kernel/time/clocksource.c
4 * This file contains the functions which manage clocksource drivers.
6 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 * o Allow clocksource drivers to be unregistered
26 #include <linux/clocksource.h>
27 #include <linux/sysdev.h>
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
31 #include <linux/tick.h>
32 #include <linux/kthread.h>
34 void timecounter_init(struct timecounter
*tc
,
35 const struct cyclecounter
*cc
,
39 tc
->cycle_last
= cc
->read(cc
);
40 tc
->nsec
= start_tstamp
;
42 EXPORT_SYMBOL_GPL(timecounter_init
);
45 * timecounter_read_delta - get nanoseconds since last call of this function
46 * @tc: Pointer to time counter
48 * When the underlying cycle counter runs over, this will be handled
49 * correctly as long as it does not run over more than once between
52 * The first call to this function for a new time counter initializes
53 * the time tracking and returns an undefined result.
55 static u64
timecounter_read_delta(struct timecounter
*tc
)
57 cycle_t cycle_now
, cycle_delta
;
60 /* read cycle counter: */
61 cycle_now
= tc
->cc
->read(tc
->cc
);
63 /* calculate the delta since the last timecounter_read_delta(): */
64 cycle_delta
= (cycle_now
- tc
->cycle_last
) & tc
->cc
->mask
;
66 /* convert to nanoseconds: */
67 ns_offset
= cyclecounter_cyc2ns(tc
->cc
, cycle_delta
);
69 /* update time stamp of timecounter_read_delta() call: */
70 tc
->cycle_last
= cycle_now
;
75 u64
timecounter_read(struct timecounter
*tc
)
79 /* increment time by nanoseconds since last call */
80 nsec
= timecounter_read_delta(tc
);
86 EXPORT_SYMBOL_GPL(timecounter_read
);
88 u64
timecounter_cyc2time(struct timecounter
*tc
,
91 u64 cycle_delta
= (cycle_tstamp
- tc
->cycle_last
) & tc
->cc
->mask
;
95 * Instead of always treating cycle_tstamp as more recent
96 * than tc->cycle_last, detect when it is too far in the
97 * future and treat it as old time stamp instead.
99 if (cycle_delta
> tc
->cc
->mask
/ 2) {
100 cycle_delta
= (tc
->cycle_last
- cycle_tstamp
) & tc
->cc
->mask
;
101 nsec
= tc
->nsec
- cyclecounter_cyc2ns(tc
->cc
, cycle_delta
);
103 nsec
= cyclecounter_cyc2ns(tc
->cc
, cycle_delta
) + tc
->nsec
;
108 EXPORT_SYMBOL_GPL(timecounter_cyc2time
);
111 * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
112 * @mult: pointer to mult variable
113 * @shift: pointer to shift variable
114 * @from: frequency to convert from
115 * @to: frequency to convert to
116 * @minsec: guaranteed runtime conversion range in seconds
118 * The function evaluates the shift/mult pair for the scaled math
119 * operations of clocksources and clockevents.
121 * @to and @from are frequency values in HZ. For clock sources @to is
122 * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
123 * event @to is the counter frequency and @from is NSEC_PER_SEC.
125 * The @minsec conversion range argument controls the time frame in
126 * seconds which must be covered by the runtime conversion with the
127 * calculated mult and shift factors. This guarantees that no 64bit
128 * overflow happens when the input value of the conversion is
129 * multiplied with the calculated mult factor. Larger ranges may
130 * reduce the conversion accuracy by chosing smaller mult and shift
134 clocks_calc_mult_shift(u32
*mult
, u32
*shift
, u32 from
, u32 to
, u32 minsec
)
140 * Calculate the shift factor which is limiting the conversion
143 tmp
= ((u64
)minsec
* from
) >> 32;
150 * Find the conversion shift/mult pair which has the best
151 * accuracy and fits the maxsec conversion range:
153 for (sft
= 32; sft
> 0; sft
--) {
154 tmp
= (u64
) to
<< sft
;
156 if ((tmp
>> sftacc
) == 0)
163 /*[Clocksource internal variables]---------
165 * currently selected clocksource.
167 * linked list with the registered clocksources
169 * protects manipulations to curr_clocksource and the clocksource_list
171 * Name of the user-specified clocksource.
173 static struct clocksource
*curr_clocksource
;
174 static LIST_HEAD(clocksource_list
);
175 static DEFINE_MUTEX(clocksource_mutex
);
176 static char override_name
[32];
177 static int finished_booting
;
179 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
180 static void clocksource_watchdog_work(struct work_struct
*work
);
182 static LIST_HEAD(watchdog_list
);
183 static struct clocksource
*watchdog
;
184 static struct timer_list watchdog_timer
;
185 static DECLARE_WORK(watchdog_work
, clocksource_watchdog_work
);
186 static DEFINE_SPINLOCK(watchdog_lock
);
187 static int watchdog_running
;
189 static int clocksource_watchdog_kthread(void *data
);
190 static void __clocksource_change_rating(struct clocksource
*cs
, int rating
);
193 * Interval: 0.5sec Threshold: 0.0625s
195 #define WATCHDOG_INTERVAL (HZ >> 1)
196 #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
198 static void clocksource_watchdog_work(struct work_struct
*work
)
201 * If kthread_run fails the next watchdog scan over the
202 * watchdog_list will find the unstable clock again.
204 kthread_run(clocksource_watchdog_kthread
, NULL
, "kwatchdog");
207 static void __clocksource_unstable(struct clocksource
*cs
)
209 cs
->flags
&= ~(CLOCK_SOURCE_VALID_FOR_HRES
| CLOCK_SOURCE_WATCHDOG
);
210 cs
->flags
|= CLOCK_SOURCE_UNSTABLE
;
211 if (finished_booting
)
212 schedule_work(&watchdog_work
);
215 static void clocksource_unstable(struct clocksource
*cs
, int64_t delta
)
217 printk(KERN_WARNING
"Clocksource %s unstable (delta = %Ld ns)\n",
219 __clocksource_unstable(cs
);
223 * clocksource_mark_unstable - mark clocksource unstable via watchdog
224 * @cs: clocksource to be marked unstable
226 * This function is called instead of clocksource_change_rating from
227 * cpu hotplug code to avoid a deadlock between the clocksource mutex
228 * and the cpu hotplug mutex. It defers the update of the clocksource
229 * to the watchdog thread.
231 void clocksource_mark_unstable(struct clocksource
*cs
)
235 spin_lock_irqsave(&watchdog_lock
, flags
);
236 if (!(cs
->flags
& CLOCK_SOURCE_UNSTABLE
)) {
237 if (list_empty(&cs
->wd_list
))
238 list_add(&cs
->wd_list
, &watchdog_list
);
239 __clocksource_unstable(cs
);
241 spin_unlock_irqrestore(&watchdog_lock
, flags
);
244 static void clocksource_watchdog(unsigned long data
)
246 struct clocksource
*cs
;
247 cycle_t csnow
, wdnow
;
248 int64_t wd_nsec
, cs_nsec
;
251 spin_lock(&watchdog_lock
);
252 if (!watchdog_running
)
255 list_for_each_entry(cs
, &watchdog_list
, wd_list
) {
257 /* Clocksource already marked unstable? */
258 if (cs
->flags
& CLOCK_SOURCE_UNSTABLE
) {
259 if (finished_booting
)
260 schedule_work(&watchdog_work
);
265 csnow
= cs
->read(cs
);
266 wdnow
= watchdog
->read(watchdog
);
269 /* Clocksource initialized ? */
270 if (!(cs
->flags
& CLOCK_SOURCE_WATCHDOG
)) {
271 cs
->flags
|= CLOCK_SOURCE_WATCHDOG
;
277 wd_nsec
= clocksource_cyc2ns((wdnow
- cs
->wd_last
) & watchdog
->mask
,
278 watchdog
->mult
, watchdog
->shift
);
280 cs_nsec
= clocksource_cyc2ns((csnow
- cs
->cs_last
) &
281 cs
->mask
, cs
->mult
, cs
->shift
);
285 /* Check the deviation from the watchdog clocksource. */
286 if (abs(cs_nsec
- wd_nsec
) > WATCHDOG_THRESHOLD
) {
287 clocksource_unstable(cs
, cs_nsec
- wd_nsec
);
291 if (!(cs
->flags
& CLOCK_SOURCE_VALID_FOR_HRES
) &&
292 (cs
->flags
& CLOCK_SOURCE_IS_CONTINUOUS
) &&
293 (watchdog
->flags
& CLOCK_SOURCE_IS_CONTINUOUS
)) {
294 cs
->flags
|= CLOCK_SOURCE_VALID_FOR_HRES
;
296 * We just marked the clocksource as highres-capable,
297 * notify the rest of the system as well so that we
298 * transition into high-res mode:
305 * Cycle through CPUs to check if the CPUs stay synchronized
308 next_cpu
= cpumask_next(raw_smp_processor_id(), cpu_online_mask
);
309 if (next_cpu
>= nr_cpu_ids
)
310 next_cpu
= cpumask_first(cpu_online_mask
);
311 watchdog_timer
.expires
+= WATCHDOG_INTERVAL
;
312 add_timer_on(&watchdog_timer
, next_cpu
);
314 spin_unlock(&watchdog_lock
);
317 static inline void clocksource_start_watchdog(void)
319 if (watchdog_running
|| !watchdog
|| list_empty(&watchdog_list
))
321 init_timer(&watchdog_timer
);
322 watchdog_timer
.function
= clocksource_watchdog
;
323 watchdog_timer
.expires
= jiffies
+ WATCHDOG_INTERVAL
;
324 add_timer_on(&watchdog_timer
, cpumask_first(cpu_online_mask
));
325 watchdog_running
= 1;
328 static inline void clocksource_stop_watchdog(void)
330 if (!watchdog_running
|| (watchdog
&& !list_empty(&watchdog_list
)))
332 del_timer(&watchdog_timer
);
333 watchdog_running
= 0;
336 static inline void clocksource_reset_watchdog(void)
338 struct clocksource
*cs
;
340 list_for_each_entry(cs
, &watchdog_list
, wd_list
)
341 cs
->flags
&= ~CLOCK_SOURCE_WATCHDOG
;
344 static void clocksource_resume_watchdog(void)
349 * We use trylock here to avoid a potential dead lock when
350 * kgdb calls this code after the kernel has been stopped with
351 * watchdog_lock held. When watchdog_lock is held we just
352 * return and accept, that the watchdog might trigger and mark
353 * the monitored clock source (usually TSC) unstable.
355 * This does not affect the other caller clocksource_resume()
356 * because at this point the kernel is UP, interrupts are
357 * disabled and nothing can hold watchdog_lock.
359 if (!spin_trylock_irqsave(&watchdog_lock
, flags
))
361 clocksource_reset_watchdog();
362 spin_unlock_irqrestore(&watchdog_lock
, flags
);
365 static void clocksource_enqueue_watchdog(struct clocksource
*cs
)
369 spin_lock_irqsave(&watchdog_lock
, flags
);
370 if (cs
->flags
& CLOCK_SOURCE_MUST_VERIFY
) {
371 /* cs is a clocksource to be watched. */
372 list_add(&cs
->wd_list
, &watchdog_list
);
373 cs
->flags
&= ~CLOCK_SOURCE_WATCHDOG
;
375 /* cs is a watchdog. */
376 if (cs
->flags
& CLOCK_SOURCE_IS_CONTINUOUS
)
377 cs
->flags
|= CLOCK_SOURCE_VALID_FOR_HRES
;
378 /* Pick the best watchdog. */
379 if (!watchdog
|| cs
->rating
> watchdog
->rating
) {
381 /* Reset watchdog cycles */
382 clocksource_reset_watchdog();
385 /* Check if the watchdog timer needs to be started. */
386 clocksource_start_watchdog();
387 spin_unlock_irqrestore(&watchdog_lock
, flags
);
390 static void clocksource_dequeue_watchdog(struct clocksource
*cs
)
392 struct clocksource
*tmp
;
395 spin_lock_irqsave(&watchdog_lock
, flags
);
396 if (cs
->flags
& CLOCK_SOURCE_MUST_VERIFY
) {
397 /* cs is a watched clocksource. */
398 list_del_init(&cs
->wd_list
);
399 } else if (cs
== watchdog
) {
400 /* Reset watchdog cycles */
401 clocksource_reset_watchdog();
402 /* Current watchdog is removed. Find an alternative. */
404 list_for_each_entry(tmp
, &clocksource_list
, list
) {
405 if (tmp
== cs
|| tmp
->flags
& CLOCK_SOURCE_MUST_VERIFY
)
407 if (!watchdog
|| tmp
->rating
> watchdog
->rating
)
411 cs
->flags
&= ~CLOCK_SOURCE_WATCHDOG
;
412 /* Check if the watchdog timer needs to be stopped. */
413 clocksource_stop_watchdog();
414 spin_unlock_irqrestore(&watchdog_lock
, flags
);
417 static int clocksource_watchdog_kthread(void *data
)
419 struct clocksource
*cs
, *tmp
;
423 mutex_lock(&clocksource_mutex
);
424 spin_lock_irqsave(&watchdog_lock
, flags
);
425 list_for_each_entry_safe(cs
, tmp
, &watchdog_list
, wd_list
)
426 if (cs
->flags
& CLOCK_SOURCE_UNSTABLE
) {
427 list_del_init(&cs
->wd_list
);
428 list_add(&cs
->wd_list
, &unstable
);
430 /* Check if the watchdog timer needs to be stopped. */
431 clocksource_stop_watchdog();
432 spin_unlock_irqrestore(&watchdog_lock
, flags
);
434 /* Needs to be done outside of watchdog lock */
435 list_for_each_entry_safe(cs
, tmp
, &unstable
, wd_list
) {
436 list_del_init(&cs
->wd_list
);
437 __clocksource_change_rating(cs
, 0);
439 mutex_unlock(&clocksource_mutex
);
443 #else /* CONFIG_CLOCKSOURCE_WATCHDOG */
445 static void clocksource_enqueue_watchdog(struct clocksource
*cs
)
447 if (cs
->flags
& CLOCK_SOURCE_IS_CONTINUOUS
)
448 cs
->flags
|= CLOCK_SOURCE_VALID_FOR_HRES
;
451 static inline void clocksource_dequeue_watchdog(struct clocksource
*cs
) { }
452 static inline void clocksource_resume_watchdog(void) { }
453 static inline int clocksource_watchdog_kthread(void *data
) { return 0; }
455 #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
458 * clocksource_suspend - suspend the clocksource(s)
460 void clocksource_suspend(void)
462 struct clocksource
*cs
;
464 list_for_each_entry_reverse(cs
, &clocksource_list
, list
)
470 * clocksource_resume - resume the clocksource(s)
472 void clocksource_resume(void)
474 struct clocksource
*cs
;
476 list_for_each_entry(cs
, &clocksource_list
, list
)
480 clocksource_resume_watchdog();
484 * clocksource_touch_watchdog - Update watchdog
486 * Update the watchdog after exception contexts such as kgdb so as not
487 * to incorrectly trip the watchdog. This might fail when the kernel
488 * was stopped in code which holds watchdog_lock.
490 void clocksource_touch_watchdog(void)
492 clocksource_resume_watchdog();
496 * clocksource_max_deferment - Returns max time the clocksource can be deferred
497 * @cs: Pointer to clocksource
500 static u64
clocksource_max_deferment(struct clocksource
*cs
)
502 u64 max_nsecs
, max_cycles
;
505 * Calculate the maximum number of cycles that we can pass to the
506 * cyc2ns function without overflowing a 64-bit signed result. The
507 * maximum number of cycles is equal to ULLONG_MAX/cs->mult which
508 * is equivalent to the below.
509 * max_cycles < (2^63)/cs->mult
510 * max_cycles < 2^(log2((2^63)/cs->mult))
511 * max_cycles < 2^(log2(2^63) - log2(cs->mult))
512 * max_cycles < 2^(63 - log2(cs->mult))
513 * max_cycles < 1 << (63 - log2(cs->mult))
514 * Please note that we add 1 to the result of the log2 to account for
515 * any rounding errors, ensure the above inequality is satisfied and
516 * no overflow will occur.
518 max_cycles
= 1ULL << (63 - (ilog2(cs
->mult
) + 1));
521 * The actual maximum number of cycles we can defer the clocksource is
522 * determined by the minimum of max_cycles and cs->mask.
524 max_cycles
= min_t(u64
, max_cycles
, (u64
) cs
->mask
);
525 max_nsecs
= clocksource_cyc2ns(max_cycles
, cs
->mult
, cs
->shift
);
528 * To ensure that the clocksource does not wrap whilst we are idle,
529 * limit the time the clocksource can be deferred by 12.5%. Please
530 * note a margin of 12.5% is used because this can be computed with
531 * a shift, versus say 10% which would require division.
533 return max_nsecs
- (max_nsecs
>> 5);
536 #ifdef CONFIG_GENERIC_TIME
539 * clocksource_select - Select the best clocksource available
541 * Private function. Must hold clocksource_mutex when called.
543 * Select the clocksource with the best rating, or the clocksource,
544 * which is selected by userspace override.
546 static void clocksource_select(void)
548 struct clocksource
*best
, *cs
;
550 if (!finished_booting
|| list_empty(&clocksource_list
))
552 /* First clocksource on the list has the best rating. */
553 best
= list_first_entry(&clocksource_list
, struct clocksource
, list
);
554 /* Check for the override clocksource. */
555 list_for_each_entry(cs
, &clocksource_list
, list
) {
556 if (strcmp(cs
->name
, override_name
) != 0)
559 * Check to make sure we don't switch to a non-highres
560 * capable clocksource if the tick code is in oneshot
561 * mode (highres or nohz)
563 if (!(cs
->flags
& CLOCK_SOURCE_VALID_FOR_HRES
) &&
564 tick_oneshot_mode_active()) {
565 /* Override clocksource cannot be used. */
566 printk(KERN_WARNING
"Override clocksource %s is not "
567 "HRT compatible. Cannot switch while in "
568 "HRT/NOHZ mode\n", cs
->name
);
569 override_name
[0] = 0;
571 /* Override clocksource can be used. */
575 if (curr_clocksource
!= best
) {
576 printk(KERN_INFO
"Switching to clocksource %s\n", best
->name
);
577 curr_clocksource
= best
;
578 timekeeping_notify(curr_clocksource
);
582 #else /* CONFIG_GENERIC_TIME */
584 static inline void clocksource_select(void) { }
589 * clocksource_done_booting - Called near the end of core bootup
591 * Hack to avoid lots of clocksource churn at boot time.
592 * We use fs_initcall because we want this to start before
593 * device_initcall but after subsys_initcall.
595 static int __init
clocksource_done_booting(void)
597 mutex_lock(&clocksource_mutex
);
598 curr_clocksource
= clocksource_default_clock();
599 mutex_unlock(&clocksource_mutex
);
601 finished_booting
= 1;
604 * Run the watchdog first to eliminate unstable clock sources
606 clocksource_watchdog_kthread(NULL
);
608 mutex_lock(&clocksource_mutex
);
609 clocksource_select();
610 mutex_unlock(&clocksource_mutex
);
613 fs_initcall(clocksource_done_booting
);
616 * Enqueue the clocksource sorted by rating
618 static void clocksource_enqueue(struct clocksource
*cs
)
620 struct list_head
*entry
= &clocksource_list
;
621 struct clocksource
*tmp
;
623 list_for_each_entry(tmp
, &clocksource_list
, list
)
624 /* Keep track of the place, where to insert */
625 if (tmp
->rating
>= cs
->rating
)
627 list_add(&cs
->list
, entry
);
631 * clocksource_register - Used to install new clocksources
632 * @t: clocksource to be registered
634 * Returns -EBUSY if registration fails, zero otherwise.
636 int clocksource_register(struct clocksource
*cs
)
638 /* calculate max idle time permitted for this clocksource */
639 cs
->max_idle_ns
= clocksource_max_deferment(cs
);
641 mutex_lock(&clocksource_mutex
);
642 clocksource_enqueue(cs
);
643 clocksource_enqueue_watchdog(cs
);
644 clocksource_select();
645 mutex_unlock(&clocksource_mutex
);
648 EXPORT_SYMBOL(clocksource_register
);
650 static void __clocksource_change_rating(struct clocksource
*cs
, int rating
)
654 clocksource_enqueue(cs
);
655 clocksource_select();
659 * clocksource_change_rating - Change the rating of a registered clocksource
661 void clocksource_change_rating(struct clocksource
*cs
, int rating
)
663 mutex_lock(&clocksource_mutex
);
664 __clocksource_change_rating(cs
, rating
);
665 mutex_unlock(&clocksource_mutex
);
667 EXPORT_SYMBOL(clocksource_change_rating
);
670 * clocksource_unregister - remove a registered clocksource
672 void clocksource_unregister(struct clocksource
*cs
)
674 mutex_lock(&clocksource_mutex
);
675 clocksource_dequeue_watchdog(cs
);
677 clocksource_select();
678 mutex_unlock(&clocksource_mutex
);
680 EXPORT_SYMBOL(clocksource_unregister
);
684 * sysfs_show_current_clocksources - sysfs interface for current clocksource
686 * @buf: char buffer to be filled with clocksource list
688 * Provides sysfs interface for listing current clocksource.
691 sysfs_show_current_clocksources(struct sys_device
*dev
,
692 struct sysdev_attribute
*attr
, char *buf
)
696 mutex_lock(&clocksource_mutex
);
697 count
= snprintf(buf
, PAGE_SIZE
, "%s\n", curr_clocksource
->name
);
698 mutex_unlock(&clocksource_mutex
);
704 * sysfs_override_clocksource - interface for manually overriding clocksource
706 * @buf: name of override clocksource
707 * @count: length of buffer
709 * Takes input from sysfs interface for manually overriding the default
710 * clocksource selection.
712 static ssize_t
sysfs_override_clocksource(struct sys_device
*dev
,
713 struct sysdev_attribute
*attr
,
714 const char *buf
, size_t count
)
718 /* strings from sysfs write are not 0 terminated! */
719 if (count
>= sizeof(override_name
))
723 if (buf
[count
-1] == '\n')
726 mutex_lock(&clocksource_mutex
);
729 memcpy(override_name
, buf
, count
);
730 override_name
[count
] = 0;
731 clocksource_select();
733 mutex_unlock(&clocksource_mutex
);
739 * sysfs_show_available_clocksources - sysfs interface for listing clocksource
741 * @buf: char buffer to be filled with clocksource list
743 * Provides sysfs interface for listing registered clocksources
746 sysfs_show_available_clocksources(struct sys_device
*dev
,
747 struct sysdev_attribute
*attr
,
750 struct clocksource
*src
;
753 mutex_lock(&clocksource_mutex
);
754 list_for_each_entry(src
, &clocksource_list
, list
) {
756 * Don't show non-HRES clocksource if the tick code is
757 * in one shot mode (highres=on or nohz=on)
759 if (!tick_oneshot_mode_active() ||
760 (src
->flags
& CLOCK_SOURCE_VALID_FOR_HRES
))
761 count
+= snprintf(buf
+ count
,
762 max((ssize_t
)PAGE_SIZE
- count
, (ssize_t
)0),
765 mutex_unlock(&clocksource_mutex
);
767 count
+= snprintf(buf
+ count
,
768 max((ssize_t
)PAGE_SIZE
- count
, (ssize_t
)0), "\n");
776 static SYSDEV_ATTR(current_clocksource
, 0644, sysfs_show_current_clocksources
,
777 sysfs_override_clocksource
);
779 static SYSDEV_ATTR(available_clocksource
, 0444,
780 sysfs_show_available_clocksources
, NULL
);
782 static struct sysdev_class clocksource_sysclass
= {
783 .name
= "clocksource",
786 static struct sys_device device_clocksource
= {
788 .cls
= &clocksource_sysclass
,
791 static int __init
init_clocksource_sysfs(void)
793 int error
= sysdev_class_register(&clocksource_sysclass
);
796 error
= sysdev_register(&device_clocksource
);
798 error
= sysdev_create_file(
800 &attr_current_clocksource
);
802 error
= sysdev_create_file(
804 &attr_available_clocksource
);
808 device_initcall(init_clocksource_sysfs
);
809 #endif /* CONFIG_SYSFS */
812 * boot_override_clocksource - boot clock override
813 * @str: override name
815 * Takes a clocksource= boot argument and uses it
816 * as the clocksource override name.
818 static int __init
boot_override_clocksource(char* str
)
820 mutex_lock(&clocksource_mutex
);
822 strlcpy(override_name
, str
, sizeof(override_name
));
823 mutex_unlock(&clocksource_mutex
);
827 __setup("clocksource=", boot_override_clocksource
);
830 * boot_override_clock - Compatibility layer for deprecated boot option
831 * @str: override name
833 * DEPRECATED! Takes a clock= boot argument and uses it
834 * as the clocksource override name
836 static int __init
boot_override_clock(char* str
)
838 if (!strcmp(str
, "pmtmr")) {
839 printk("Warning: clock=pmtmr is deprecated. "
840 "Use clocksource=acpi_pm.\n");
841 return boot_override_clocksource("acpi_pm");
843 printk("Warning! clock= boot option is deprecated. "
844 "Use clocksource=xyz\n");
845 return boot_override_clocksource(str
);
848 __setup("clock=", boot_override_clock
);