2 * SuperH clock framework
4 * Copyright (C) 2005 - 2010 Paul Mundt
6 * This clock framework is derived from the OMAP version by:
8 * Copyright (C) 2004 - 2008 Nokia Corporation
9 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
11 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
13 * This file is subject to the terms and conditions of the GNU General Public
14 * License. See the file "COPYING" in the main directory of this archive
17 #define pr_fmt(fmt) "clock: " fmt
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/list.h>
24 #include <linux/kobject.h>
25 #include <linux/sysdev.h>
26 #include <linux/seq_file.h>
27 #include <linux/err.h>
29 #include <linux/debugfs.h>
30 #include <linux/cpufreq.h>
31 #include <linux/clk.h>
32 #include <linux/sh_clk.h>
34 static LIST_HEAD(clock_list
);
35 static DEFINE_SPINLOCK(clock_lock
);
36 static DEFINE_MUTEX(clock_list_sem
);
38 void clk_rate_table_build(struct clk
*clk
,
39 struct cpufreq_frequency_table
*freq_table
,
41 struct clk_div_mult_table
*src_table
,
42 unsigned long *bitmap
)
44 unsigned long mult
, div
;
48 clk
->nr_freqs
= nr_freqs
;
50 for (i
= 0; i
< nr_freqs
; i
++) {
54 if (src_table
->divisors
&& i
< src_table
->nr_divisors
)
55 div
= src_table
->divisors
[i
];
57 if (src_table
->multipliers
&& i
< src_table
->nr_multipliers
)
58 mult
= src_table
->multipliers
[i
];
60 if (!div
|| !mult
|| (bitmap
&& !test_bit(i
, bitmap
)))
61 freq
= CPUFREQ_ENTRY_INVALID
;
63 freq
= clk
->parent
->rate
* mult
/ div
;
65 freq_table
[i
].index
= i
;
66 freq_table
[i
].frequency
= freq
;
69 /* Termination entry */
70 freq_table
[i
].index
= i
;
71 freq_table
[i
].frequency
= CPUFREQ_TABLE_END
;
74 struct clk_rate_round_data
;
76 struct clk_rate_round_data
{
78 unsigned int min
, max
;
79 long (*func
)(unsigned int, struct clk_rate_round_data
*);
83 #define for_each_frequency(pos, r, freq) \
84 for (pos = r->min, freq = r->func(pos, r); \
85 pos <= r->max; pos++, freq = r->func(pos, r)) \
86 if (unlikely(freq == 0)) \
90 static long clk_rate_round_helper(struct clk_rate_round_data
*rounder
)
92 unsigned long rate_error
, rate_error_prev
= ~0UL;
93 unsigned long highest
, lowest
, freq
;
94 long rate_best_fit
= -ENOENT
;
100 for_each_frequency(i
, rounder
, freq
) {
106 rate_error
= abs(freq
- rounder
->rate
);
107 if (rate_error
< rate_error_prev
) {
108 rate_best_fit
= freq
;
109 rate_error_prev
= rate_error
;
116 if (rounder
->rate
>= highest
)
117 rate_best_fit
= highest
;
118 if (rounder
->rate
<= lowest
)
119 rate_best_fit
= lowest
;
121 return rate_best_fit
;
124 static long clk_rate_table_iter(unsigned int pos
,
125 struct clk_rate_round_data
*rounder
)
127 struct cpufreq_frequency_table
*freq_table
= rounder
->arg
;
128 unsigned long freq
= freq_table
[pos
].frequency
;
130 if (freq
== CPUFREQ_ENTRY_INVALID
)
136 long clk_rate_table_round(struct clk
*clk
,
137 struct cpufreq_frequency_table
*freq_table
,
140 struct clk_rate_round_data table_round
= {
142 .max
= clk
->nr_freqs
- 1,
143 .func
= clk_rate_table_iter
,
148 if (clk
->nr_freqs
< 1)
151 return clk_rate_round_helper(&table_round
);
154 static long clk_rate_div_range_iter(unsigned int pos
,
155 struct clk_rate_round_data
*rounder
)
157 return clk_get_rate(rounder
->arg
) / pos
;
160 long clk_rate_div_range_round(struct clk
*clk
, unsigned int div_min
,
161 unsigned int div_max
, unsigned long rate
)
163 struct clk_rate_round_data div_range_round
= {
166 .func
= clk_rate_div_range_iter
,
167 .arg
= clk_get_parent(clk
),
171 return clk_rate_round_helper(&div_range_round
);
174 int clk_rate_table_find(struct clk
*clk
,
175 struct cpufreq_frequency_table
*freq_table
,
180 for (i
= 0; freq_table
[i
].frequency
!= CPUFREQ_TABLE_END
; i
++) {
181 unsigned long freq
= freq_table
[i
].frequency
;
183 if (freq
== CPUFREQ_ENTRY_INVALID
)
193 /* Used for clocks that always have same value as the parent clock */
194 unsigned long followparent_recalc(struct clk
*clk
)
196 return clk
->parent
? clk
->parent
->rate
: 0;
199 int clk_reparent(struct clk
*child
, struct clk
*parent
)
201 list_del_init(&child
->sibling
);
203 list_add(&child
->sibling
, &parent
->children
);
204 child
->parent
= parent
;
206 /* now do the debugfs renaming to reattach the child
207 to the proper parent */
212 /* Propagate rate to children */
213 void propagate_rate(struct clk
*tclk
)
217 list_for_each_entry(clkp
, &tclk
->children
, sibling
) {
218 if (clkp
->ops
&& clkp
->ops
->recalc
)
219 clkp
->rate
= clkp
->ops
->recalc(clkp
);
221 propagate_rate(clkp
);
225 static void __clk_disable(struct clk
*clk
)
227 if (WARN(!clk
->usecount
, "Trying to disable clock %p with 0 usecount\n",
231 if (!(--clk
->usecount
)) {
232 if (likely(clk
->ops
&& clk
->ops
->disable
))
233 clk
->ops
->disable(clk
);
234 if (likely(clk
->parent
))
235 __clk_disable(clk
->parent
);
239 void clk_disable(struct clk
*clk
)
246 spin_lock_irqsave(&clock_lock
, flags
);
248 spin_unlock_irqrestore(&clock_lock
, flags
);
250 EXPORT_SYMBOL_GPL(clk_disable
);
252 static int __clk_enable(struct clk
*clk
)
256 if (clk
->usecount
++ == 0) {
258 ret
= __clk_enable(clk
->parent
);
263 if (clk
->ops
&& clk
->ops
->enable
) {
264 ret
= clk
->ops
->enable(clk
);
267 __clk_disable(clk
->parent
);
279 int clk_enable(struct clk
*clk
)
287 spin_lock_irqsave(&clock_lock
, flags
);
288 ret
= __clk_enable(clk
);
289 spin_unlock_irqrestore(&clock_lock
, flags
);
293 EXPORT_SYMBOL_GPL(clk_enable
);
295 static LIST_HEAD(root_clks
);
298 * recalculate_root_clocks - recalculate and propagate all root clocks
300 * Recalculates all root clocks (clocks with no parent), which if the
301 * clock's .recalc is set correctly, should also propagate their rates.
304 void recalculate_root_clocks(void)
308 list_for_each_entry(clkp
, &root_clks
, sibling
) {
309 if (clkp
->ops
&& clkp
->ops
->recalc
)
310 clkp
->rate
= clkp
->ops
->recalc(clkp
);
311 propagate_rate(clkp
);
315 static struct clk_mapping dummy_mapping
;
317 static struct clk
*lookup_root_clock(struct clk
*clk
)
325 static int clk_establish_mapping(struct clk
*clk
)
327 struct clk_mapping
*mapping
= clk
->mapping
;
330 * Propagate mappings.
336 * dummy mapping for root clocks with no specified ranges
339 clk
->mapping
= &dummy_mapping
;
344 * If we're on a child clock and it provides no mapping of its
345 * own, inherit the mapping from its root clock.
347 clkp
= lookup_root_clock(clk
);
348 mapping
= clkp
->mapping
;
353 * Establish initial mapping.
355 if (!mapping
->base
&& mapping
->phys
) {
356 kref_init(&mapping
->ref
);
358 mapping
->base
= ioremap_nocache(mapping
->phys
, mapping
->len
);
359 if (unlikely(!mapping
->base
))
361 } else if (mapping
->base
) {
363 * Bump the refcount for an existing mapping
365 kref_get(&mapping
->ref
);
368 clk
->mapping
= mapping
;
372 static void clk_destroy_mapping(struct kref
*kref
)
374 struct clk_mapping
*mapping
;
376 mapping
= container_of(kref
, struct clk_mapping
, ref
);
378 iounmap(mapping
->base
);
381 static void clk_teardown_mapping(struct clk
*clk
)
383 struct clk_mapping
*mapping
= clk
->mapping
;
386 if (mapping
== &dummy_mapping
)
389 kref_put(&mapping
->ref
, clk_destroy_mapping
);
393 int clk_register(struct clk
*clk
)
397 if (clk
== NULL
|| IS_ERR(clk
))
401 * trap out already registered clocks
403 if (clk
->node
.next
|| clk
->node
.prev
)
406 mutex_lock(&clock_list_sem
);
408 INIT_LIST_HEAD(&clk
->children
);
411 ret
= clk_establish_mapping(clk
);
416 list_add(&clk
->sibling
, &clk
->parent
->children
);
418 list_add(&clk
->sibling
, &root_clks
);
420 list_add(&clk
->node
, &clock_list
);
422 #ifdef CONFIG_SH_CLK_CPG_LEGACY
423 if (clk
->ops
&& clk
->ops
->init
)
428 mutex_unlock(&clock_list_sem
);
432 EXPORT_SYMBOL_GPL(clk_register
);
434 void clk_unregister(struct clk
*clk
)
436 mutex_lock(&clock_list_sem
);
437 list_del(&clk
->sibling
);
438 list_del(&clk
->node
);
439 clk_teardown_mapping(clk
);
440 mutex_unlock(&clock_list_sem
);
442 EXPORT_SYMBOL_GPL(clk_unregister
);
444 void clk_enable_init_clocks(void)
448 list_for_each_entry(clkp
, &clock_list
, node
)
449 if (clkp
->flags
& CLK_ENABLE_ON_INIT
)
453 unsigned long clk_get_rate(struct clk
*clk
)
457 EXPORT_SYMBOL_GPL(clk_get_rate
);
459 int clk_set_rate(struct clk
*clk
, unsigned long rate
)
461 int ret
= -EOPNOTSUPP
;
464 spin_lock_irqsave(&clock_lock
, flags
);
466 if (likely(clk
->ops
&& clk
->ops
->set_rate
)) {
467 ret
= clk
->ops
->set_rate(clk
, rate
);
475 if (clk
->ops
&& clk
->ops
->recalc
)
476 clk
->rate
= clk
->ops
->recalc(clk
);
481 spin_unlock_irqrestore(&clock_lock
, flags
);
485 EXPORT_SYMBOL_GPL(clk_set_rate
);
487 int clk_set_parent(struct clk
*clk
, struct clk
*parent
)
494 if (clk
->parent
== parent
)
497 spin_lock_irqsave(&clock_lock
, flags
);
498 if (clk
->usecount
== 0) {
499 if (clk
->ops
->set_parent
)
500 ret
= clk
->ops
->set_parent(clk
, parent
);
502 ret
= clk_reparent(clk
, parent
);
505 if (clk
->ops
->recalc
)
506 clk
->rate
= clk
->ops
->recalc(clk
);
507 pr_debug("set parent of %p to %p (new rate %ld)\n",
508 clk
, clk
->parent
, clk
->rate
);
513 spin_unlock_irqrestore(&clock_lock
, flags
);
517 EXPORT_SYMBOL_GPL(clk_set_parent
);
519 struct clk
*clk_get_parent(struct clk
*clk
)
523 EXPORT_SYMBOL_GPL(clk_get_parent
);
525 long clk_round_rate(struct clk
*clk
, unsigned long rate
)
527 if (likely(clk
->ops
&& clk
->ops
->round_rate
)) {
528 unsigned long flags
, rounded
;
530 spin_lock_irqsave(&clock_lock
, flags
);
531 rounded
= clk
->ops
->round_rate(clk
, rate
);
532 spin_unlock_irqrestore(&clock_lock
, flags
);
537 return clk_get_rate(clk
);
539 EXPORT_SYMBOL_GPL(clk_round_rate
);
541 long clk_round_parent(struct clk
*clk
, unsigned long target
,
542 unsigned long *best_freq
, unsigned long *parent_freq
,
543 unsigned int div_min
, unsigned int div_max
)
545 struct cpufreq_frequency_table
*freq
, *best
= NULL
;
546 unsigned long error
= ULONG_MAX
, freq_high
, freq_low
, div
;
547 struct clk
*parent
= clk_get_parent(clk
);
551 *best_freq
= clk_round_rate(clk
, target
);
552 return abs(target
- *best_freq
);
555 for (freq
= parent
->freq_table
; freq
->frequency
!= CPUFREQ_TABLE_END
;
557 if (freq
->frequency
== CPUFREQ_ENTRY_INVALID
)
560 if (unlikely(freq
->frequency
/ target
<= div_min
- 1)) {
561 unsigned long freq_max
;
563 freq_max
= (freq
->frequency
+ div_min
/ 2) / div_min
;
564 if (error
> target
- freq_max
) {
565 error
= target
- freq_max
;
568 *best_freq
= freq_max
;
571 pr_debug("too low freq %u, error %lu\n", freq
->frequency
,
580 if (unlikely(freq
->frequency
/ target
>= div_max
)) {
581 unsigned long freq_min
;
583 freq_min
= (freq
->frequency
+ div_max
/ 2) / div_max
;
584 if (error
> freq_min
- target
) {
585 error
= freq_min
- target
;
588 *best_freq
= freq_min
;
591 pr_debug("too high freq %u, error %lu\n", freq
->frequency
,
600 div
= freq
->frequency
/ target
;
601 freq_high
= freq
->frequency
/ div
;
602 freq_low
= freq
->frequency
/ (div
+ 1);
604 if (freq_high
- target
< error
) {
605 error
= freq_high
- target
;
608 *best_freq
= freq_high
;
611 if (target
- freq_low
< error
) {
612 error
= target
- freq_low
;
615 *best_freq
= freq_low
;
618 pr_debug("%u / %lu = %lu, / %lu = %lu, best %lu, parent %u\n",
619 freq
->frequency
, div
, freq_high
, div
+ 1, freq_low
,
620 *best_freq
, best
->frequency
);
627 *parent_freq
= best
->frequency
;
631 EXPORT_SYMBOL_GPL(clk_round_parent
);
634 static int clks_sysdev_suspend(struct sys_device
*dev
, pm_message_t state
)
636 static pm_message_t prev_state
;
639 switch (state
.event
) {
641 /* Resumeing from hibernation */
642 if (prev_state
.event
!= PM_EVENT_FREEZE
)
645 list_for_each_entry(clkp
, &clock_list
, node
) {
646 if (likely(clkp
->ops
)) {
647 unsigned long rate
= clkp
->rate
;
649 if (likely(clkp
->ops
->set_parent
))
650 clkp
->ops
->set_parent(clkp
,
652 if (likely(clkp
->ops
->set_rate
))
653 clkp
->ops
->set_rate(clkp
, rate
);
654 else if (likely(clkp
->ops
->recalc
))
655 clkp
->rate
= clkp
->ops
->recalc(clkp
);
659 case PM_EVENT_FREEZE
:
661 case PM_EVENT_SUSPEND
:
669 static int clks_sysdev_resume(struct sys_device
*dev
)
671 return clks_sysdev_suspend(dev
, PMSG_ON
);
674 static struct sysdev_class clks_sysdev_class
= {
678 static struct sysdev_driver clks_sysdev_driver
= {
679 .suspend
= clks_sysdev_suspend
,
680 .resume
= clks_sysdev_resume
,
683 static struct sys_device clks_sysdev_dev
= {
684 .cls
= &clks_sysdev_class
,
687 static int __init
clk_sysdev_init(void)
689 sysdev_class_register(&clks_sysdev_class
);
690 sysdev_driver_register(&clks_sysdev_class
, &clks_sysdev_driver
);
691 sysdev_register(&clks_sysdev_dev
);
695 subsys_initcall(clk_sysdev_init
);
699 * debugfs support to trace clock tree hierarchy and attributes
701 static struct dentry
*clk_debugfs_root
;
703 static int clk_debugfs_register_one(struct clk
*c
)
706 struct dentry
*d
, *child
, *child_tmp
;
707 struct clk
*pa
= c
->parent
;
711 p
+= sprintf(p
, "%p", c
);
712 d
= debugfs_create_dir(s
, pa
? pa
->dentry
: clk_debugfs_root
);
717 d
= debugfs_create_u8("usecount", S_IRUGO
, c
->dentry
, (u8
*)&c
->usecount
);
722 d
= debugfs_create_u32("rate", S_IRUGO
, c
->dentry
, (u32
*)&c
->rate
);
727 d
= debugfs_create_x32("flags", S_IRUGO
, c
->dentry
, (u32
*)&c
->flags
);
736 list_for_each_entry_safe(child
, child_tmp
, &d
->d_subdirs
, d_u
.d_child
)
737 debugfs_remove(child
);
738 debugfs_remove(c
->dentry
);
742 static int clk_debugfs_register(struct clk
*c
)
745 struct clk
*pa
= c
->parent
;
747 if (pa
&& !pa
->dentry
) {
748 err
= clk_debugfs_register(pa
);
754 err
= clk_debugfs_register_one(c
);
761 static int __init
clk_debugfs_init(void)
767 d
= debugfs_create_dir("clock", NULL
);
770 clk_debugfs_root
= d
;
772 list_for_each_entry(c
, &clock_list
, node
) {
773 err
= clk_debugfs_register(c
);
779 debugfs_remove_recursive(clk_debugfs_root
);
782 late_initcall(clk_debugfs_init
);