2 * arch/sh/kernel/cpu/clock.c - SuperH clock framework
4 * Copyright (C) 2005, 2006, 2007 Paul Mundt
6 * This clock framework is derived from the OMAP version by:
8 * Copyright (C) 2004 - 2005 Nokia Corporation
9 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
11 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
13 * This file is subject to the terms and conditions of the GNU General Public
14 * License. See the file "COPYING" in the main directory of this archive
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/mutex.h>
21 #include <linux/list.h>
22 #include <linux/kref.h>
23 #include <linux/seq_file.h>
24 #include <linux/err.h>
25 #include <linux/platform_device.h>
26 #include <linux/proc_fs.h>
27 #include <asm/clock.h>
28 #include <asm/timer.h>
30 static LIST_HEAD(clock_list
);
31 static DEFINE_SPINLOCK(clock_lock
);
32 static DEFINE_MUTEX(clock_list_sem
);
35 * Each subtype is expected to define the init routines for these clocks,
36 * as each subtype (or processor family) will have these clocks at the
37 * very least. These are all provided through the CPG, which even some of
38 * the more quirky parts (such as ST40, SH4-202, etc.) still have.
40 * The processor-specific code is expected to register any additional
41 * clock sources that are of interest.
43 static struct clk master_clk
= {
45 .flags
= CLK_ALWAYS_ENABLED
| CLK_RATE_PROPAGATES
,
46 .rate
= CONFIG_SH_PCLK_FREQ
,
49 static struct clk module_clk
= {
51 .parent
= &master_clk
,
52 .flags
= CLK_ALWAYS_ENABLED
| CLK_RATE_PROPAGATES
,
55 static struct clk bus_clk
= {
57 .parent
= &master_clk
,
58 .flags
= CLK_ALWAYS_ENABLED
| CLK_RATE_PROPAGATES
,
61 static struct clk cpu_clk
= {
63 .parent
= &master_clk
,
64 .flags
= CLK_ALWAYS_ENABLED
,
68 * The ordering of these clocks matters, do not change it.
70 static struct clk
*onchip_clocks
[] = {
77 static void propagate_rate(struct clk
*clk
)
81 list_for_each_entry(clkp
, &clock_list
, node
) {
82 if (likely(clkp
->parent
!= clk
))
84 if (likely(clkp
->ops
&& clkp
->ops
->recalc
))
85 clkp
->ops
->recalc(clkp
);
86 if (unlikely(clkp
->flags
& CLK_RATE_PROPAGATES
))
91 static int __clk_enable(struct clk
*clk
)
94 * See if this is the first time we're enabling the clock, some
95 * clocks that are always enabled still require "special"
96 * initialization. This is especially true if the clock mode
97 * changes and the clock needs to hunt for the proper set of
98 * divisors to use before it can effectively recalc.
100 if (unlikely(atomic_read(&clk
->kref
.refcount
) == 1))
101 if (clk
->ops
&& clk
->ops
->init
)
104 kref_get(&clk
->kref
);
106 if (clk
->flags
& CLK_ALWAYS_ENABLED
)
109 if (likely(clk
->ops
&& clk
->ops
->enable
))
110 clk
->ops
->enable(clk
);
115 int clk_enable(struct clk
*clk
)
123 clk_enable(clk
->parent
);
125 spin_lock_irqsave(&clock_lock
, flags
);
126 ret
= __clk_enable(clk
);
127 spin_unlock_irqrestore(&clock_lock
, flags
);
131 EXPORT_SYMBOL_GPL(clk_enable
);
133 static void clk_kref_release(struct kref
*kref
)
138 static void __clk_disable(struct clk
*clk
)
140 int count
= kref_put(&clk
->kref
, clk_kref_release
);
142 if (clk
->flags
& CLK_ALWAYS_ENABLED
)
145 if (!count
) { /* count reaches zero, disable the clock */
146 if (likely(clk
->ops
&& clk
->ops
->disable
))
147 clk
->ops
->disable(clk
);
151 void clk_disable(struct clk
*clk
)
158 spin_lock_irqsave(&clock_lock
, flags
);
160 spin_unlock_irqrestore(&clock_lock
, flags
);
162 clk_disable(clk
->parent
);
164 EXPORT_SYMBOL_GPL(clk_disable
);
166 int clk_register(struct clk
*clk
)
168 mutex_lock(&clock_list_sem
);
170 list_add(&clk
->node
, &clock_list
);
171 kref_init(&clk
->kref
);
173 mutex_unlock(&clock_list_sem
);
175 if (clk
->flags
& CLK_ALWAYS_ENABLED
) {
176 pr_debug( "Clock '%s' is ALWAYS_ENABLED\n", clk
->name
);
177 if (clk
->ops
&& clk
->ops
->init
)
179 if (clk
->ops
&& clk
->ops
->enable
)
180 clk
->ops
->enable(clk
);
181 pr_debug( "Enabled.");
186 EXPORT_SYMBOL_GPL(clk_register
);
188 void clk_unregister(struct clk
*clk
)
190 mutex_lock(&clock_list_sem
);
191 list_del(&clk
->node
);
192 mutex_unlock(&clock_list_sem
);
194 EXPORT_SYMBOL_GPL(clk_unregister
);
196 unsigned long clk_get_rate(struct clk
*clk
)
200 EXPORT_SYMBOL_GPL(clk_get_rate
);
202 int clk_set_rate(struct clk
*clk
, unsigned long rate
)
204 return clk_set_rate_ex(clk
, rate
, 0);
206 EXPORT_SYMBOL_GPL(clk_set_rate
);
208 int clk_set_rate_ex(struct clk
*clk
, unsigned long rate
, int algo_id
)
210 int ret
= -EOPNOTSUPP
;
212 if (likely(clk
->ops
&& clk
->ops
->set_rate
)) {
215 spin_lock_irqsave(&clock_lock
, flags
);
216 ret
= clk
->ops
->set_rate(clk
, rate
, algo_id
);
217 spin_unlock_irqrestore(&clock_lock
, flags
);
220 if (unlikely(clk
->flags
& CLK_RATE_PROPAGATES
))
225 EXPORT_SYMBOL_GPL(clk_set_rate_ex
);
227 void clk_recalc_rate(struct clk
*clk
)
229 if (likely(clk
->ops
&& clk
->ops
->recalc
)) {
232 spin_lock_irqsave(&clock_lock
, flags
);
233 clk
->ops
->recalc(clk
);
234 spin_unlock_irqrestore(&clock_lock
, flags
);
237 if (unlikely(clk
->flags
& CLK_RATE_PROPAGATES
))
240 EXPORT_SYMBOL_GPL(clk_recalc_rate
);
242 long clk_round_rate(struct clk
*clk
, unsigned long rate
)
244 if (likely(clk
->ops
&& clk
->ops
->round_rate
)) {
245 unsigned long flags
, rounded
;
247 spin_lock_irqsave(&clock_lock
, flags
);
248 rounded
= clk
->ops
->round_rate(clk
, rate
);
249 spin_unlock_irqrestore(&clock_lock
, flags
);
254 return clk_get_rate(clk
);
256 EXPORT_SYMBOL_GPL(clk_round_rate
);
259 * Returns a clock. Note that we first try to use device id on the bus
260 * and clock name. If this fails, we try to use clock name only.
262 struct clk
*clk_get(struct device
*dev
, const char *id
)
264 struct clk
*p
, *clk
= ERR_PTR(-ENOENT
);
267 if (dev
== NULL
|| dev
->bus
!= &platform_bus_type
)
270 idno
= to_platform_device(dev
)->id
;
272 mutex_lock(&clock_list_sem
);
273 list_for_each_entry(p
, &clock_list
, node
) {
275 strcmp(id
, p
->name
) == 0 && try_module_get(p
->owner
)) {
281 list_for_each_entry(p
, &clock_list
, node
) {
282 if (strcmp(id
, p
->name
) == 0 && try_module_get(p
->owner
)) {
289 mutex_unlock(&clock_list_sem
);
293 EXPORT_SYMBOL_GPL(clk_get
);
295 void clk_put(struct clk
*clk
)
297 if (clk
&& !IS_ERR(clk
))
298 module_put(clk
->owner
);
300 EXPORT_SYMBOL_GPL(clk_put
);
302 void __init
__attribute__ ((weak
))
303 arch_init_clk_ops(struct clk_ops
**ops
, int type
)
307 int __init
__attribute__ ((weak
))
313 static int show_clocks(char *buf
, char **start
, off_t off
,
314 int len
, int *eof
, void *data
)
319 list_for_each_entry_reverse(clk
, &clock_list
, node
) {
320 unsigned long rate
= clk_get_rate(clk
);
322 p
+= sprintf(p
, "%-12s\t: %ld.%02ldMHz\t%s\n", clk
->name
,
323 rate
/ 1000000, (rate
% 1000000) / 10000,
324 ((clk
->flags
& CLK_ALWAYS_ENABLED
) ||
325 (atomic_read(&clk
->kref
.refcount
) != 1)) ?
326 "enabled" : "disabled");
332 int __init
clk_init(void)
336 BUG_ON(!master_clk
.rate
);
338 for (i
= 0; i
< ARRAY_SIZE(onchip_clocks
); i
++) {
339 struct clk
*clk
= onchip_clocks
[i
];
341 arch_init_clk_ops(&clk
->ops
, i
);
342 ret
|= clk_register(clk
);
345 ret
|= arch_clk_init();
347 /* Kick the child clocks.. */
348 propagate_rate(&master_clk
);
349 propagate_rate(&bus_clk
);
354 static int __init
clk_proc_init(void)
356 struct proc_dir_entry
*p
;
357 p
= create_proc_read_entry("clocks", S_IRUSR
, NULL
,
364 subsys_initcall(clk_proc_init
);