2 * linux/arch/arm/plat-omap/clock.c
4 * Copyright (C) 2004 - 2008 Nokia corporation
5 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
7 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/module.h>
16 #include <linux/list.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/string.h>
20 #include <linux/clk.h>
21 #include <linux/mutex.h>
22 #include <linux/platform_device.h>
23 #include <linux/cpufreq.h>
24 #include <linux/debugfs.h>
27 #include <mach/clock.h>
29 static LIST_HEAD(clocks
);
30 static DEFINE_MUTEX(clocks_mutex
);
31 static DEFINE_SPINLOCK(clockfw_lock
);
33 static struct clk_functions
*arch_clock
;
35 /*-------------------------------------------------------------------------
36 * Standard clock functions defined in include/linux/clk.h
37 *-------------------------------------------------------------------------*/
40 * Returns a clock. Note that we first try to use device id on the bus
41 * and clock name. If this fails, we try to use clock name only.
43 struct clk
* clk_get(struct device
*dev
, const char *id
)
45 struct clk
*p
, *clk
= ERR_PTR(-ENOENT
);
48 if (dev
== NULL
|| dev
->bus
!= &platform_bus_type
)
51 idno
= to_platform_device(dev
)->id
;
53 mutex_lock(&clocks_mutex
);
55 list_for_each_entry(p
, &clocks
, node
) {
57 strcmp(id
, p
->name
) == 0 && try_module_get(p
->owner
)) {
63 list_for_each_entry(p
, &clocks
, node
) {
64 if (strcmp(id
, p
->name
) == 0 && try_module_get(p
->owner
)) {
71 mutex_unlock(&clocks_mutex
);
75 EXPORT_SYMBOL(clk_get
);
77 int clk_enable(struct clk
*clk
)
82 if (clk
== NULL
|| IS_ERR(clk
))
85 spin_lock_irqsave(&clockfw_lock
, flags
);
86 if (arch_clock
->clk_enable
)
87 ret
= arch_clock
->clk_enable(clk
);
88 spin_unlock_irqrestore(&clockfw_lock
, flags
);
92 EXPORT_SYMBOL(clk_enable
);
94 void clk_disable(struct clk
*clk
)
98 if (clk
== NULL
|| IS_ERR(clk
))
101 spin_lock_irqsave(&clockfw_lock
, flags
);
102 if (clk
->usecount
== 0) {
103 printk(KERN_ERR
"Trying disable clock %s with 0 usecount\n",
109 if (arch_clock
->clk_disable
)
110 arch_clock
->clk_disable(clk
);
113 spin_unlock_irqrestore(&clockfw_lock
, flags
);
115 EXPORT_SYMBOL(clk_disable
);
117 int clk_get_usecount(struct clk
*clk
)
122 if (clk
== NULL
|| IS_ERR(clk
))
125 spin_lock_irqsave(&clockfw_lock
, flags
);
127 spin_unlock_irqrestore(&clockfw_lock
, flags
);
131 EXPORT_SYMBOL(clk_get_usecount
);
133 unsigned long clk_get_rate(struct clk
*clk
)
136 unsigned long ret
= 0;
138 if (clk
== NULL
|| IS_ERR(clk
))
141 spin_lock_irqsave(&clockfw_lock
, flags
);
143 spin_unlock_irqrestore(&clockfw_lock
, flags
);
147 EXPORT_SYMBOL(clk_get_rate
);
149 void clk_put(struct clk
*clk
)
151 if (clk
&& !IS_ERR(clk
))
152 module_put(clk
->owner
);
154 EXPORT_SYMBOL(clk_put
);
156 /*-------------------------------------------------------------------------
157 * Optional clock functions defined in include/linux/clk.h
158 *-------------------------------------------------------------------------*/
160 long clk_round_rate(struct clk
*clk
, unsigned long rate
)
165 if (clk
== NULL
|| IS_ERR(clk
))
168 spin_lock_irqsave(&clockfw_lock
, flags
);
169 if (arch_clock
->clk_round_rate
)
170 ret
= arch_clock
->clk_round_rate(clk
, rate
);
171 spin_unlock_irqrestore(&clockfw_lock
, flags
);
175 EXPORT_SYMBOL(clk_round_rate
);
177 int clk_set_rate(struct clk
*clk
, unsigned long rate
)
182 if (clk
== NULL
|| IS_ERR(clk
))
185 spin_lock_irqsave(&clockfw_lock
, flags
);
186 if (arch_clock
->clk_set_rate
)
187 ret
= arch_clock
->clk_set_rate(clk
, rate
);
188 spin_unlock_irqrestore(&clockfw_lock
, flags
);
192 EXPORT_SYMBOL(clk_set_rate
);
194 int clk_set_parent(struct clk
*clk
, struct clk
*parent
)
199 if (clk
== NULL
|| IS_ERR(clk
) || parent
== NULL
|| IS_ERR(parent
))
202 spin_lock_irqsave(&clockfw_lock
, flags
);
203 if (arch_clock
->clk_set_parent
)
204 ret
= arch_clock
->clk_set_parent(clk
, parent
);
205 spin_unlock_irqrestore(&clockfw_lock
, flags
);
209 EXPORT_SYMBOL(clk_set_parent
);
211 struct clk
*clk_get_parent(struct clk
*clk
)
214 struct clk
* ret
= NULL
;
216 if (clk
== NULL
|| IS_ERR(clk
))
219 spin_lock_irqsave(&clockfw_lock
, flags
);
220 if (arch_clock
->clk_get_parent
)
221 ret
= arch_clock
->clk_get_parent(clk
);
222 spin_unlock_irqrestore(&clockfw_lock
, flags
);
226 EXPORT_SYMBOL(clk_get_parent
);
228 /*-------------------------------------------------------------------------
229 * OMAP specific clock functions shared between omap1 and omap2
230 *-------------------------------------------------------------------------*/
232 unsigned int __initdata mpurate
;
235 * By default we use the rate set by the bootloader.
236 * You can override this with mpurate= cmdline option.
238 static int __init
omap_clk_setup(char *str
)
240 get_option(&str
, &mpurate
);
250 __setup("mpurate=", omap_clk_setup
);
252 /* Used for clocks that always have same value as the parent clock */
253 void followparent_recalc(struct clk
*clk
)
255 if (clk
== NULL
|| IS_ERR(clk
))
258 clk
->rate
= clk
->parent
->rate
;
259 if (unlikely(clk
->flags
& RATE_PROPAGATES
))
263 /* Propagate rate to children */
264 void propagate_rate(struct clk
* tclk
)
268 if (tclk
== NULL
|| IS_ERR(tclk
))
271 list_for_each_entry(clkp
, &clocks
, node
) {
272 if (likely(clkp
->parent
!= tclk
))
274 if (likely((u32
)clkp
->recalc
))
280 * recalculate_root_clocks - recalculate and propagate all root clocks
282 * Recalculates all root clocks (clocks with no parent), which if the
283 * clock's .recalc is set correctly, should also propagate their rates.
286 void recalculate_root_clocks(void)
290 list_for_each_entry(clkp
, &clocks
, node
) {
291 if (unlikely(!clkp
->parent
) && likely((u32
)clkp
->recalc
))
296 int clk_register(struct clk
*clk
)
298 if (clk
== NULL
|| IS_ERR(clk
))
301 mutex_lock(&clocks_mutex
);
302 list_add(&clk
->node
, &clocks
);
305 mutex_unlock(&clocks_mutex
);
309 EXPORT_SYMBOL(clk_register
);
311 void clk_unregister(struct clk
*clk
)
313 if (clk
== NULL
|| IS_ERR(clk
))
316 mutex_lock(&clocks_mutex
);
317 list_del(&clk
->node
);
318 mutex_unlock(&clocks_mutex
);
320 EXPORT_SYMBOL(clk_unregister
);
322 void clk_deny_idle(struct clk
*clk
)
326 if (clk
== NULL
|| IS_ERR(clk
))
329 spin_lock_irqsave(&clockfw_lock
, flags
);
330 if (arch_clock
->clk_deny_idle
)
331 arch_clock
->clk_deny_idle(clk
);
332 spin_unlock_irqrestore(&clockfw_lock
, flags
);
334 EXPORT_SYMBOL(clk_deny_idle
);
336 void clk_allow_idle(struct clk
*clk
)
340 if (clk
== NULL
|| IS_ERR(clk
))
343 spin_lock_irqsave(&clockfw_lock
, flags
);
344 if (arch_clock
->clk_allow_idle
)
345 arch_clock
->clk_allow_idle(clk
);
346 spin_unlock_irqrestore(&clockfw_lock
, flags
);
348 EXPORT_SYMBOL(clk_allow_idle
);
350 void clk_enable_init_clocks(void)
354 list_for_each_entry(clkp
, &clocks
, node
) {
355 if (clkp
->flags
& ENABLE_ON_INIT
)
359 EXPORT_SYMBOL(clk_enable_init_clocks
);
361 #ifdef CONFIG_CPU_FREQ
362 void clk_init_cpufreq_table(struct cpufreq_frequency_table
**table
)
366 spin_lock_irqsave(&clockfw_lock
, flags
);
367 if (arch_clock
->clk_init_cpufreq_table
)
368 arch_clock
->clk_init_cpufreq_table(table
);
369 spin_unlock_irqrestore(&clockfw_lock
, flags
);
371 EXPORT_SYMBOL(clk_init_cpufreq_table
);
374 /*-------------------------------------------------------------------------*/
376 #ifdef CONFIG_OMAP_RESET_CLOCKS
378 * Disable any unused clocks left on by the bootloader
380 static int __init
clk_disable_unused(void)
385 list_for_each_entry(ck
, &clocks
, node
) {
386 if (ck
->usecount
> 0 || (ck
->flags
& ALWAYS_ENABLED
) ||
390 spin_lock_irqsave(&clockfw_lock
, flags
);
391 if (arch_clock
->clk_disable_unused
)
392 arch_clock
->clk_disable_unused(ck
);
393 spin_unlock_irqrestore(&clockfw_lock
, flags
);
398 late_initcall(clk_disable_unused
);
401 int __init
clk_init(struct clk_functions
* custom_clocks
)
403 if (!custom_clocks
) {
404 printk(KERN_ERR
"No custom clock functions registered\n");
408 arch_clock
= custom_clocks
;
413 #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
415 * debugfs support to trace clock tree hierarchy and attributes
417 static struct dentry
*clk_debugfs_root
;
419 static int clk_debugfs_register_one(struct clk
*c
)
422 struct dentry
*d
, *child
;
423 struct clk
*pa
= c
->parent
;
427 p
+= sprintf(p
, "%s", c
->name
);
429 sprintf(p
, ":%d", c
->id
);
430 d
= debugfs_create_dir(s
, pa
? pa
->dent
: clk_debugfs_root
);
435 d
= debugfs_create_u8("usecount", S_IRUGO
, c
->dent
, (u8
*)&c
->usecount
);
440 d
= debugfs_create_u32("rate", S_IRUGO
, c
->dent
, (u32
*)&c
->rate
);
445 d
= debugfs_create_x32("flags", S_IRUGO
, c
->dent
, (u32
*)&c
->flags
);
454 list_for_each_entry(child
, &d
->d_subdirs
, d_u
.d_child
)
455 debugfs_remove(child
);
456 debugfs_remove(c
->dent
);
460 static int clk_debugfs_register(struct clk
*c
)
463 struct clk
*pa
= c
->parent
;
465 if (pa
&& !pa
->dent
) {
466 err
= clk_debugfs_register(pa
);
472 err
= clk_debugfs_register_one(c
);
479 static int __init
clk_debugfs_init(void)
485 d
= debugfs_create_dir("clock", NULL
);
488 clk_debugfs_root
= d
;
490 list_for_each_entry(c
, &clocks
, node
) {
491 err
= clk_debugfs_register(c
);
497 debugfs_remove(clk_debugfs_root
); /* REVISIT: Cleanup correctly */
500 late_initcall(clk_debugfs_init
);
502 #endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */