2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * Standard functionality for the common clock API. See Documentation/clk.txt
12 #include <linux/clk-private.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/spinlock.h>
16 #include <linux/err.h>
17 #include <linux/list.h>
18 #include <linux/slab.h>
20 #include <linux/device.h>
21 #include <linux/init.h>
22 #include <linux/sched.h>
26 static DEFINE_SPINLOCK(enable_lock
);
27 static DEFINE_MUTEX(prepare_lock
);
29 static struct task_struct
*prepare_owner
;
30 static struct task_struct
*enable_owner
;
32 static int prepare_refcnt
;
33 static int enable_refcnt
;
35 static HLIST_HEAD(clk_root_list
);
36 static HLIST_HEAD(clk_orphan_list
);
37 static LIST_HEAD(clk_notifier_list
);
40 static void clk_prepare_lock(void)
42 if (!mutex_trylock(&prepare_lock
)) {
43 if (prepare_owner
== current
) {
47 mutex_lock(&prepare_lock
);
49 WARN_ON_ONCE(prepare_owner
!= NULL
);
50 WARN_ON_ONCE(prepare_refcnt
!= 0);
51 prepare_owner
= current
;
55 static void clk_prepare_unlock(void)
57 WARN_ON_ONCE(prepare_owner
!= current
);
58 WARN_ON_ONCE(prepare_refcnt
== 0);
63 mutex_unlock(&prepare_lock
);
66 static unsigned long clk_enable_lock(void)
70 if (!spin_trylock_irqsave(&enable_lock
, flags
)) {
71 if (enable_owner
== current
) {
75 spin_lock_irqsave(&enable_lock
, flags
);
77 WARN_ON_ONCE(enable_owner
!= NULL
);
78 WARN_ON_ONCE(enable_refcnt
!= 0);
79 enable_owner
= current
;
84 static void clk_enable_unlock(unsigned long flags
)
86 WARN_ON_ONCE(enable_owner
!= current
);
87 WARN_ON_ONCE(enable_refcnt
== 0);
92 spin_unlock_irqrestore(&enable_lock
, flags
);
95 /*** debugfs support ***/
97 #ifdef CONFIG_DEBUG_FS
98 #include <linux/debugfs.h>
100 static struct dentry
*rootdir
;
101 static struct dentry
*orphandir
;
102 static int inited
= 0;
104 static void clk_summary_show_one(struct seq_file
*s
, struct clk
*c
, int level
)
109 seq_printf(s
, "%*s%-*s %-11d %-12d %-10lu %-11lu",
111 30 - level
* 3, c
->name
,
112 c
->enable_count
, c
->prepare_count
, clk_get_rate(c
),
113 clk_get_accuracy(c
));
117 static void clk_summary_show_subtree(struct seq_file
*s
, struct clk
*c
,
125 clk_summary_show_one(s
, c
, level
);
127 hlist_for_each_entry(child
, &c
->children
, child_node
)
128 clk_summary_show_subtree(s
, child
, level
+ 1);
131 static int clk_summary_show(struct seq_file
*s
, void *data
)
135 seq_printf(s
, " clock enable_cnt prepare_cnt rate accuracy\n");
136 seq_printf(s
, "---------------------------------------------------------------------------------\n");
140 hlist_for_each_entry(c
, &clk_root_list
, child_node
)
141 clk_summary_show_subtree(s
, c
, 0);
143 hlist_for_each_entry(c
, &clk_orphan_list
, child_node
)
144 clk_summary_show_subtree(s
, c
, 0);
146 clk_prepare_unlock();
152 static int clk_summary_open(struct inode
*inode
, struct file
*file
)
154 return single_open(file
, clk_summary_show
, inode
->i_private
);
157 static const struct file_operations clk_summary_fops
= {
158 .open
= clk_summary_open
,
161 .release
= single_release
,
164 static void clk_dump_one(struct seq_file
*s
, struct clk
*c
, int level
)
169 seq_printf(s
, "\"%s\": { ", c
->name
);
170 seq_printf(s
, "\"enable_count\": %d,", c
->enable_count
);
171 seq_printf(s
, "\"prepare_count\": %d,", c
->prepare_count
);
172 seq_printf(s
, "\"rate\": %lu", clk_get_rate(c
));
173 seq_printf(s
, "\"accuracy\": %lu", clk_get_accuracy(c
));
176 static void clk_dump_subtree(struct seq_file
*s
, struct clk
*c
, int level
)
183 clk_dump_one(s
, c
, level
);
185 hlist_for_each_entry(child
, &c
->children
, child_node
) {
187 clk_dump_subtree(s
, child
, level
+ 1);
193 static int clk_dump(struct seq_file
*s
, void *data
)
196 bool first_node
= true;
202 hlist_for_each_entry(c
, &clk_root_list
, child_node
) {
206 clk_dump_subtree(s
, c
, 0);
209 hlist_for_each_entry(c
, &clk_orphan_list
, child_node
) {
211 clk_dump_subtree(s
, c
, 0);
214 clk_prepare_unlock();
221 static int clk_dump_open(struct inode
*inode
, struct file
*file
)
223 return single_open(file
, clk_dump
, inode
->i_private
);
226 static const struct file_operations clk_dump_fops
= {
227 .open
= clk_dump_open
,
230 .release
= single_release
,
233 /* caller must hold prepare_lock */
234 static int clk_debug_create_one(struct clk
*clk
, struct dentry
*pdentry
)
239 if (!clk
|| !pdentry
) {
244 d
= debugfs_create_dir(clk
->name
, pdentry
);
250 d
= debugfs_create_u32("clk_rate", S_IRUGO
, clk
->dentry
,
255 d
= debugfs_create_u32("clk_accuracy", S_IRUGO
, clk
->dentry
,
256 (u32
*)&clk
->accuracy
);
260 d
= debugfs_create_x32("clk_flags", S_IRUGO
, clk
->dentry
,
265 d
= debugfs_create_u32("clk_prepare_count", S_IRUGO
, clk
->dentry
,
266 (u32
*)&clk
->prepare_count
);
270 d
= debugfs_create_u32("clk_enable_count", S_IRUGO
, clk
->dentry
,
271 (u32
*)&clk
->enable_count
);
275 d
= debugfs_create_u32("clk_notifier_count", S_IRUGO
, clk
->dentry
,
276 (u32
*)&clk
->notifier_count
);
280 if (clk
->ops
->debug_init
)
281 if (clk
->ops
->debug_init(clk
->hw
, clk
->dentry
))
288 debugfs_remove_recursive(clk
->dentry
);
294 /* caller must hold prepare_lock */
295 static int clk_debug_create_subtree(struct clk
*clk
, struct dentry
*pdentry
)
300 if (!clk
|| !pdentry
)
303 ret
= clk_debug_create_one(clk
, pdentry
);
308 hlist_for_each_entry(child
, &clk
->children
, child_node
)
309 clk_debug_create_subtree(child
, clk
->dentry
);
317 * clk_debug_register - add a clk node to the debugfs clk tree
318 * @clk: the clk being added to the debugfs clk tree
320 * Dynamically adds a clk to the debugfs clk tree if debugfs has been
321 * initialized. Otherwise it bails out early since the debugfs clk tree
322 * will be created lazily by clk_debug_init as part of a late_initcall.
324 * Caller must hold prepare_lock. Only clk_init calls this function (so
325 * far) so this is taken care.
327 static int clk_debug_register(struct clk
*clk
)
330 struct dentry
*pdentry
;
336 parent
= clk
->parent
;
339 * Check to see if a clk is a root clk. Also check that it is
340 * safe to add this clk to debugfs
343 if (clk
->flags
& CLK_IS_ROOT
)
349 pdentry
= parent
->dentry
;
353 ret
= clk_debug_create_subtree(clk
, pdentry
);
360 * clk_debug_unregister - remove a clk node from the debugfs clk tree
361 * @clk: the clk being removed from the debugfs clk tree
363 * Dynamically removes a clk and all it's children clk nodes from the
364 * debugfs clk tree if clk->dentry points to debugfs created by
365 * clk_debug_register in __clk_init.
367 * Caller must hold prepare_lock.
369 static void clk_debug_unregister(struct clk
*clk
)
371 debugfs_remove_recursive(clk
->dentry
);
375 * clk_debug_reparent - reparent clk node in the debugfs clk tree
376 * @clk: the clk being reparented
377 * @new_parent: the new clk parent, may be NULL
379 * Rename clk entry in the debugfs clk tree if debugfs has been
380 * initialized. Otherwise it bails out early since the debugfs clk tree
381 * will be created lazily by clk_debug_init as part of a late_initcall.
383 * Caller must hold prepare_lock.
385 static void clk_debug_reparent(struct clk
*clk
, struct clk
*new_parent
)
388 struct dentry
*new_parent_d
;
394 new_parent_d
= new_parent
->dentry
;
396 new_parent_d
= orphandir
;
398 d
= debugfs_rename(clk
->dentry
->d_parent
, clk
->dentry
,
399 new_parent_d
, clk
->name
);
403 pr_debug("%s: failed to rename debugfs entry for %s\n",
404 __func__
, clk
->name
);
408 * clk_debug_init - lazily create the debugfs clk tree visualization
410 * clks are often initialized very early during boot before memory can
411 * be dynamically allocated and well before debugfs is setup.
412 * clk_debug_init walks the clk tree hierarchy while holding
413 * prepare_lock and creates the topology as part of a late_initcall,
414 * thus insuring that clks initialized very early will still be
415 * represented in the debugfs clk tree. This function should only be
416 * called once at boot-time, and all other clks added dynamically will
417 * be done so with clk_debug_register.
419 static int __init
clk_debug_init(void)
424 rootdir
= debugfs_create_dir("clk", NULL
);
429 d
= debugfs_create_file("clk_summary", S_IRUGO
, rootdir
, NULL
,
434 d
= debugfs_create_file("clk_dump", S_IRUGO
, rootdir
, NULL
,
439 orphandir
= debugfs_create_dir("orphans", rootdir
);
446 hlist_for_each_entry(clk
, &clk_root_list
, child_node
)
447 clk_debug_create_subtree(clk
, rootdir
);
449 hlist_for_each_entry(clk
, &clk_orphan_list
, child_node
)
450 clk_debug_create_subtree(clk
, orphandir
);
454 clk_prepare_unlock();
458 late_initcall(clk_debug_init
);
460 static inline int clk_debug_register(struct clk
*clk
) { return 0; }
461 static inline void clk_debug_reparent(struct clk
*clk
, struct clk
*new_parent
)
464 static inline void clk_debug_unregister(struct clk
*clk
)
469 /* caller must hold prepare_lock */
470 static void clk_unprepare_unused_subtree(struct clk
*clk
)
477 hlist_for_each_entry(child
, &clk
->children
, child_node
)
478 clk_unprepare_unused_subtree(child
);
480 if (clk
->prepare_count
)
483 if (clk
->flags
& CLK_IGNORE_UNUSED
)
486 if (__clk_is_prepared(clk
)) {
487 if (clk
->ops
->unprepare_unused
)
488 clk
->ops
->unprepare_unused(clk
->hw
);
489 else if (clk
->ops
->unprepare
)
490 clk
->ops
->unprepare(clk
->hw
);
494 /* caller must hold prepare_lock */
495 static void clk_disable_unused_subtree(struct clk
*clk
)
503 hlist_for_each_entry(child
, &clk
->children
, child_node
)
504 clk_disable_unused_subtree(child
);
506 flags
= clk_enable_lock();
508 if (clk
->enable_count
)
511 if (clk
->flags
& CLK_IGNORE_UNUSED
)
515 * some gate clocks have special needs during the disable-unused
516 * sequence. call .disable_unused if available, otherwise fall
519 if (__clk_is_enabled(clk
)) {
520 if (clk
->ops
->disable_unused
)
521 clk
->ops
->disable_unused(clk
->hw
);
522 else if (clk
->ops
->disable
)
523 clk
->ops
->disable(clk
->hw
);
527 clk_enable_unlock(flags
);
533 static bool clk_ignore_unused
;
534 static int __init
clk_ignore_unused_setup(char *__unused
)
536 clk_ignore_unused
= true;
539 __setup("clk_ignore_unused", clk_ignore_unused_setup
);
541 static int clk_disable_unused(void)
545 if (clk_ignore_unused
) {
546 pr_warn("clk: Not disabling unused clocks\n");
552 hlist_for_each_entry(clk
, &clk_root_list
, child_node
)
553 clk_disable_unused_subtree(clk
);
555 hlist_for_each_entry(clk
, &clk_orphan_list
, child_node
)
556 clk_disable_unused_subtree(clk
);
558 hlist_for_each_entry(clk
, &clk_root_list
, child_node
)
559 clk_unprepare_unused_subtree(clk
);
561 hlist_for_each_entry(clk
, &clk_orphan_list
, child_node
)
562 clk_unprepare_unused_subtree(clk
);
564 clk_prepare_unlock();
568 late_initcall_sync(clk_disable_unused
);
570 /*** helper functions ***/
572 const char *__clk_get_name(struct clk
*clk
)
574 return !clk
? NULL
: clk
->name
;
576 EXPORT_SYMBOL_GPL(__clk_get_name
);
578 struct clk_hw
*__clk_get_hw(struct clk
*clk
)
580 return !clk
? NULL
: clk
->hw
;
582 EXPORT_SYMBOL_GPL(__clk_get_hw
);
584 u8
__clk_get_num_parents(struct clk
*clk
)
586 return !clk
? 0 : clk
->num_parents
;
588 EXPORT_SYMBOL_GPL(__clk_get_num_parents
);
590 struct clk
*__clk_get_parent(struct clk
*clk
)
592 return !clk
? NULL
: clk
->parent
;
594 EXPORT_SYMBOL_GPL(__clk_get_parent
);
596 struct clk
*clk_get_parent_by_index(struct clk
*clk
, u8 index
)
598 if (!clk
|| index
>= clk
->num_parents
)
600 else if (!clk
->parents
)
601 return __clk_lookup(clk
->parent_names
[index
]);
602 else if (!clk
->parents
[index
])
603 return clk
->parents
[index
] =
604 __clk_lookup(clk
->parent_names
[index
]);
606 return clk
->parents
[index
];
608 EXPORT_SYMBOL_GPL(clk_get_parent_by_index
);
610 unsigned int __clk_get_enable_count(struct clk
*clk
)
612 return !clk
? 0 : clk
->enable_count
;
615 unsigned int __clk_get_prepare_count(struct clk
*clk
)
617 return !clk
? 0 : clk
->prepare_count
;
620 unsigned long __clk_get_rate(struct clk
*clk
)
631 if (clk
->flags
& CLK_IS_ROOT
)
640 EXPORT_SYMBOL_GPL(__clk_get_rate
);
642 unsigned long __clk_get_accuracy(struct clk
*clk
)
647 return clk
->accuracy
;
650 unsigned long __clk_get_flags(struct clk
*clk
)
652 return !clk
? 0 : clk
->flags
;
654 EXPORT_SYMBOL_GPL(__clk_get_flags
);
656 bool __clk_is_prepared(struct clk
*clk
)
664 * .is_prepared is optional for clocks that can prepare
665 * fall back to software usage counter if it is missing
667 if (!clk
->ops
->is_prepared
) {
668 ret
= clk
->prepare_count
? 1 : 0;
672 ret
= clk
->ops
->is_prepared(clk
->hw
);
677 bool __clk_is_enabled(struct clk
*clk
)
685 * .is_enabled is only mandatory for clocks that gate
686 * fall back to software usage counter if .is_enabled is missing
688 if (!clk
->ops
->is_enabled
) {
689 ret
= clk
->enable_count
? 1 : 0;
693 ret
= clk
->ops
->is_enabled(clk
->hw
);
697 EXPORT_SYMBOL_GPL(__clk_is_enabled
);
699 static struct clk
*__clk_lookup_subtree(const char *name
, struct clk
*clk
)
704 if (!strcmp(clk
->name
, name
))
707 hlist_for_each_entry(child
, &clk
->children
, child_node
) {
708 ret
= __clk_lookup_subtree(name
, child
);
716 struct clk
*__clk_lookup(const char *name
)
718 struct clk
*root_clk
;
724 /* search the 'proper' clk tree first */
725 hlist_for_each_entry(root_clk
, &clk_root_list
, child_node
) {
726 ret
= __clk_lookup_subtree(name
, root_clk
);
731 /* if not found, then search the orphan tree */
732 hlist_for_each_entry(root_clk
, &clk_orphan_list
, child_node
) {
733 ret
= __clk_lookup_subtree(name
, root_clk
);
742 * Helper for finding best parent to provide a given frequency. This can be used
743 * directly as a determine_rate callback (e.g. for a mux), or from a more
744 * complex clock that may combine a mux with other operations.
746 long __clk_mux_determine_rate(struct clk_hw
*hw
, unsigned long rate
,
747 unsigned long *best_parent_rate
,
748 struct clk
**best_parent_p
)
750 struct clk
*clk
= hw
->clk
, *parent
, *best_parent
= NULL
;
752 unsigned long parent_rate
, best
= 0;
754 /* if NO_REPARENT flag set, pass through to current parent */
755 if (clk
->flags
& CLK_SET_RATE_NO_REPARENT
) {
756 parent
= clk
->parent
;
757 if (clk
->flags
& CLK_SET_RATE_PARENT
)
758 best
= __clk_round_rate(parent
, rate
);
760 best
= __clk_get_rate(parent
);
762 best
= __clk_get_rate(clk
);
766 /* find the parent that can provide the fastest rate <= rate */
767 num_parents
= clk
->num_parents
;
768 for (i
= 0; i
< num_parents
; i
++) {
769 parent
= clk_get_parent_by_index(clk
, i
);
772 if (clk
->flags
& CLK_SET_RATE_PARENT
)
773 parent_rate
= __clk_round_rate(parent
, rate
);
775 parent_rate
= __clk_get_rate(parent
);
776 if (parent_rate
<= rate
&& parent_rate
> best
) {
777 best_parent
= parent
;
784 *best_parent_p
= best_parent
;
785 *best_parent_rate
= best
;
789 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate
);
793 void __clk_unprepare(struct clk
*clk
)
798 if (WARN_ON(clk
->prepare_count
== 0))
801 if (--clk
->prepare_count
> 0)
804 WARN_ON(clk
->enable_count
> 0);
806 if (clk
->ops
->unprepare
)
807 clk
->ops
->unprepare(clk
->hw
);
809 __clk_unprepare(clk
->parent
);
813 * clk_unprepare - undo preparation of a clock source
814 * @clk: the clk being unprepared
816 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
817 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
818 * if the operation may sleep. One example is a clk which is accessed over
819 * I2c. In the complex case a clk gate operation may require a fast and a slow
820 * part. It is this reason that clk_unprepare and clk_disable are not mutually
821 * exclusive. In fact clk_disable must be called before clk_unprepare.
823 void clk_unprepare(struct clk
*clk
)
826 __clk_unprepare(clk
);
827 clk_prepare_unlock();
829 EXPORT_SYMBOL_GPL(clk_unprepare
);
831 int __clk_prepare(struct clk
*clk
)
838 if (clk
->prepare_count
== 0) {
839 ret
= __clk_prepare(clk
->parent
);
843 if (clk
->ops
->prepare
) {
844 ret
= clk
->ops
->prepare(clk
->hw
);
846 __clk_unprepare(clk
->parent
);
852 clk
->prepare_count
++;
858 * clk_prepare - prepare a clock source
859 * @clk: the clk being prepared
861 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
862 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
863 * operation may sleep. One example is a clk which is accessed over I2c. In
864 * the complex case a clk ungate operation may require a fast and a slow part.
865 * It is this reason that clk_prepare and clk_enable are not mutually
866 * exclusive. In fact clk_prepare must be called before clk_enable.
867 * Returns 0 on success, -EERROR otherwise.
869 int clk_prepare(struct clk
*clk
)
874 ret
= __clk_prepare(clk
);
875 clk_prepare_unlock();
879 EXPORT_SYMBOL_GPL(clk_prepare
);
881 static void __clk_disable(struct clk
*clk
)
886 if (WARN_ON(IS_ERR(clk
)))
889 if (WARN_ON(clk
->enable_count
== 0))
892 if (--clk
->enable_count
> 0)
895 if (clk
->ops
->disable
)
896 clk
->ops
->disable(clk
->hw
);
898 __clk_disable(clk
->parent
);
902 * clk_disable - gate a clock
903 * @clk: the clk being gated
905 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
906 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
907 * clk if the operation is fast and will never sleep. One example is a
908 * SoC-internal clk which is controlled via simple register writes. In the
909 * complex case a clk gate operation may require a fast and a slow part. It is
910 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
911 * In fact clk_disable must be called before clk_unprepare.
913 void clk_disable(struct clk
*clk
)
917 flags
= clk_enable_lock();
919 clk_enable_unlock(flags
);
921 EXPORT_SYMBOL_GPL(clk_disable
);
923 static int __clk_enable(struct clk
*clk
)
930 if (WARN_ON(clk
->prepare_count
== 0))
933 if (clk
->enable_count
== 0) {
934 ret
= __clk_enable(clk
->parent
);
939 if (clk
->ops
->enable
) {
940 ret
= clk
->ops
->enable(clk
->hw
);
942 __clk_disable(clk
->parent
);
953 * clk_enable - ungate a clock
954 * @clk: the clk being ungated
956 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
957 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
958 * if the operation will never sleep. One example is a SoC-internal clk which
959 * is controlled via simple register writes. In the complex case a clk ungate
960 * operation may require a fast and a slow part. It is this reason that
961 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
962 * must be called before clk_enable. Returns 0 on success, -EERROR
965 int clk_enable(struct clk
*clk
)
970 flags
= clk_enable_lock();
971 ret
= __clk_enable(clk
);
972 clk_enable_unlock(flags
);
976 EXPORT_SYMBOL_GPL(clk_enable
);
979 * __clk_round_rate - round the given rate for a clk
980 * @clk: round the rate of this clock
981 * @rate: the rate which is to be rounded
983 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
985 unsigned long __clk_round_rate(struct clk
*clk
, unsigned long rate
)
987 unsigned long parent_rate
= 0;
993 parent
= clk
->parent
;
995 parent_rate
= parent
->rate
;
997 if (clk
->ops
->determine_rate
)
998 return clk
->ops
->determine_rate(clk
->hw
, rate
, &parent_rate
,
1000 else if (clk
->ops
->round_rate
)
1001 return clk
->ops
->round_rate(clk
->hw
, rate
, &parent_rate
);
1002 else if (clk
->flags
& CLK_SET_RATE_PARENT
)
1003 return __clk_round_rate(clk
->parent
, rate
);
1009 * clk_round_rate - round the given rate for a clk
1010 * @clk: the clk for which we are rounding a rate
1011 * @rate: the rate which is to be rounded
1013 * Takes in a rate as input and rounds it to a rate that the clk can actually
1014 * use which is then returned. If clk doesn't support round_rate operation
1015 * then the parent rate is returned.
1017 long clk_round_rate(struct clk
*clk
, unsigned long rate
)
1022 ret
= __clk_round_rate(clk
, rate
);
1023 clk_prepare_unlock();
1027 EXPORT_SYMBOL_GPL(clk_round_rate
);
1030 * __clk_notify - call clk notifier chain
1031 * @clk: struct clk * that is changing rate
1032 * @msg: clk notifier type (see include/linux/clk.h)
1033 * @old_rate: old clk rate
1034 * @new_rate: new clk rate
1036 * Triggers a notifier call chain on the clk rate-change notification
1037 * for 'clk'. Passes a pointer to the struct clk and the previous
1038 * and current rates to the notifier callback. Intended to be called by
1039 * internal clock code only. Returns NOTIFY_DONE from the last driver
1040 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
1041 * a driver returns that.
1043 static int __clk_notify(struct clk
*clk
, unsigned long msg
,
1044 unsigned long old_rate
, unsigned long new_rate
)
1046 struct clk_notifier
*cn
;
1047 struct clk_notifier_data cnd
;
1048 int ret
= NOTIFY_DONE
;
1051 cnd
.old_rate
= old_rate
;
1052 cnd
.new_rate
= new_rate
;
1054 list_for_each_entry(cn
, &clk_notifier_list
, node
) {
1055 if (cn
->clk
== clk
) {
1056 ret
= srcu_notifier_call_chain(&cn
->notifier_head
, msg
,
1066 * __clk_recalc_accuracies
1067 * @clk: first clk in the subtree
1069 * Walks the subtree of clks starting with clk and recalculates accuracies as
1070 * it goes. Note that if a clk does not implement the .recalc_accuracy
1071 * callback then it is assumed that the clock will take on the accuracy of it's
1074 * Caller must hold prepare_lock.
1076 static void __clk_recalc_accuracies(struct clk
*clk
)
1078 unsigned long parent_accuracy
= 0;
1082 parent_accuracy
= clk
->parent
->accuracy
;
1084 if (clk
->ops
->recalc_accuracy
)
1085 clk
->accuracy
= clk
->ops
->recalc_accuracy(clk
->hw
,
1088 clk
->accuracy
= parent_accuracy
;
1090 hlist_for_each_entry(child
, &clk
->children
, child_node
)
1091 __clk_recalc_accuracies(child
);
1095 * clk_get_accuracy - return the accuracy of clk
1096 * @clk: the clk whose accuracy is being returned
1098 * Simply returns the cached accuracy of the clk, unless
1099 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
1101 * If clk is NULL then returns 0.
1103 long clk_get_accuracy(struct clk
*clk
)
1105 unsigned long accuracy
;
1108 if (clk
&& (clk
->flags
& CLK_GET_ACCURACY_NOCACHE
))
1109 __clk_recalc_accuracies(clk
);
1111 accuracy
= __clk_get_accuracy(clk
);
1112 clk_prepare_unlock();
1116 EXPORT_SYMBOL_GPL(clk_get_accuracy
);
1119 * __clk_recalc_rates
1120 * @clk: first clk in the subtree
1121 * @msg: notification type (see include/linux/clk.h)
1123 * Walks the subtree of clks starting with clk and recalculates rates as it
1124 * goes. Note that if a clk does not implement the .recalc_rate callback then
1125 * it is assumed that the clock will take on the rate of its parent.
1127 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1130 * Caller must hold prepare_lock.
1132 static void __clk_recalc_rates(struct clk
*clk
, unsigned long msg
)
1134 unsigned long old_rate
;
1135 unsigned long parent_rate
= 0;
1138 old_rate
= clk
->rate
;
1141 parent_rate
= clk
->parent
->rate
;
1143 if (clk
->ops
->recalc_rate
)
1144 clk
->rate
= clk
->ops
->recalc_rate(clk
->hw
, parent_rate
);
1146 clk
->rate
= parent_rate
;
1149 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1150 * & ABORT_RATE_CHANGE notifiers
1152 if (clk
->notifier_count
&& msg
)
1153 __clk_notify(clk
, msg
, old_rate
, clk
->rate
);
1155 hlist_for_each_entry(child
, &clk
->children
, child_node
)
1156 __clk_recalc_rates(child
, msg
);
1160 * clk_get_rate - return the rate of clk
1161 * @clk: the clk whose rate is being returned
1163 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1164 * is set, which means a recalc_rate will be issued.
1165 * If clk is NULL then returns 0.
1167 unsigned long clk_get_rate(struct clk
*clk
)
1173 if (clk
&& (clk
->flags
& CLK_GET_RATE_NOCACHE
))
1174 __clk_recalc_rates(clk
, 0);
1176 rate
= __clk_get_rate(clk
);
1177 clk_prepare_unlock();
1181 EXPORT_SYMBOL_GPL(clk_get_rate
);
1183 static int clk_fetch_parent_index(struct clk
*clk
, struct clk
*parent
)
1187 if (!clk
->parents
) {
1188 clk
->parents
= kcalloc(clk
->num_parents
,
1189 sizeof(struct clk
*), GFP_KERNEL
);
1195 * find index of new parent clock using cached parent ptrs,
1196 * or if not yet cached, use string name comparison and cache
1197 * them now to avoid future calls to __clk_lookup.
1199 for (i
= 0; i
< clk
->num_parents
; i
++) {
1200 if (clk
->parents
[i
] == parent
)
1203 if (clk
->parents
[i
])
1206 if (!strcmp(clk
->parent_names
[i
], parent
->name
)) {
1207 clk
->parents
[i
] = __clk_lookup(parent
->name
);
1215 static void clk_reparent(struct clk
*clk
, struct clk
*new_parent
)
1217 hlist_del(&clk
->child_node
);
1220 /* avoid duplicate POST_RATE_CHANGE notifications */
1221 if (new_parent
->new_child
== clk
)
1222 new_parent
->new_child
= NULL
;
1224 hlist_add_head(&clk
->child_node
, &new_parent
->children
);
1226 hlist_add_head(&clk
->child_node
, &clk_orphan_list
);
1229 clk
->parent
= new_parent
;
1232 static struct clk
*__clk_set_parent_before(struct clk
*clk
, struct clk
*parent
)
1234 unsigned long flags
;
1235 struct clk
*old_parent
= clk
->parent
;
1238 * Migrate prepare state between parents and prevent race with
1241 * If the clock is not prepared, then a race with
1242 * clk_enable/disable() is impossible since we already have the
1243 * prepare lock (future calls to clk_enable() need to be preceded by
1246 * If the clock is prepared, migrate the prepared state to the new
1247 * parent and also protect against a race with clk_enable() by
1248 * forcing the clock and the new parent on. This ensures that all
1249 * future calls to clk_enable() are practically NOPs with respect to
1250 * hardware and software states.
1252 * See also: Comment for clk_set_parent() below.
1254 if (clk
->prepare_count
) {
1255 __clk_prepare(parent
);
1260 /* update the clk tree topology */
1261 flags
= clk_enable_lock();
1262 clk_reparent(clk
, parent
);
1263 clk_enable_unlock(flags
);
1268 static void __clk_set_parent_after(struct clk
*clk
, struct clk
*parent
,
1269 struct clk
*old_parent
)
1272 * Finish the migration of prepare state and undo the changes done
1273 * for preventing a race with clk_enable().
1275 if (clk
->prepare_count
) {
1277 clk_disable(old_parent
);
1278 __clk_unprepare(old_parent
);
1281 /* update debugfs with new clk tree topology */
1282 clk_debug_reparent(clk
, parent
);
1285 static int __clk_set_parent(struct clk
*clk
, struct clk
*parent
, u8 p_index
)
1287 unsigned long flags
;
1289 struct clk
*old_parent
;
1291 old_parent
= __clk_set_parent_before(clk
, parent
);
1293 /* change clock input source */
1294 if (parent
&& clk
->ops
->set_parent
)
1295 ret
= clk
->ops
->set_parent(clk
->hw
, p_index
);
1298 flags
= clk_enable_lock();
1299 clk_reparent(clk
, old_parent
);
1300 clk_enable_unlock(flags
);
1302 if (clk
->prepare_count
) {
1304 clk_disable(parent
);
1305 __clk_unprepare(parent
);
1310 __clk_set_parent_after(clk
, parent
, old_parent
);
1316 * __clk_speculate_rates
1317 * @clk: first clk in the subtree
1318 * @parent_rate: the "future" rate of clk's parent
1320 * Walks the subtree of clks starting with clk, speculating rates as it
1321 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1323 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1324 * pre-rate change notifications and returns early if no clks in the
1325 * subtree have subscribed to the notifications. Note that if a clk does not
1326 * implement the .recalc_rate callback then it is assumed that the clock will
1327 * take on the rate of its parent.
1329 * Caller must hold prepare_lock.
1331 static int __clk_speculate_rates(struct clk
*clk
, unsigned long parent_rate
)
1334 unsigned long new_rate
;
1335 int ret
= NOTIFY_DONE
;
1337 if (clk
->ops
->recalc_rate
)
1338 new_rate
= clk
->ops
->recalc_rate(clk
->hw
, parent_rate
);
1340 new_rate
= parent_rate
;
1342 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
1343 if (clk
->notifier_count
)
1344 ret
= __clk_notify(clk
, PRE_RATE_CHANGE
, clk
->rate
, new_rate
);
1346 if (ret
& NOTIFY_STOP_MASK
) {
1347 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1348 __func__
, clk
->name
, ret
);
1352 hlist_for_each_entry(child
, &clk
->children
, child_node
) {
1353 ret
= __clk_speculate_rates(child
, new_rate
);
1354 if (ret
& NOTIFY_STOP_MASK
)
1362 static void clk_calc_subtree(struct clk
*clk
, unsigned long new_rate
,
1363 struct clk
*new_parent
, u8 p_index
)
1367 clk
->new_rate
= new_rate
;
1368 clk
->new_parent
= new_parent
;
1369 clk
->new_parent_index
= p_index
;
1370 /* include clk in new parent's PRE_RATE_CHANGE notifications */
1371 clk
->new_child
= NULL
;
1372 if (new_parent
&& new_parent
!= clk
->parent
)
1373 new_parent
->new_child
= clk
;
1375 hlist_for_each_entry(child
, &clk
->children
, child_node
) {
1376 if (child
->ops
->recalc_rate
)
1377 child
->new_rate
= child
->ops
->recalc_rate(child
->hw
, new_rate
);
1379 child
->new_rate
= new_rate
;
1380 clk_calc_subtree(child
, child
->new_rate
, NULL
, 0);
1385 * calculate the new rates returning the topmost clock that has to be
1388 static struct clk
*clk_calc_new_rates(struct clk
*clk
, unsigned long rate
)
1390 struct clk
*top
= clk
;
1391 struct clk
*old_parent
, *parent
;
1392 unsigned long best_parent_rate
= 0;
1393 unsigned long new_rate
;
1397 if (IS_ERR_OR_NULL(clk
))
1400 /* save parent rate, if it exists */
1401 parent
= old_parent
= clk
->parent
;
1403 best_parent_rate
= parent
->rate
;
1405 /* find the closest rate and parent clk/rate */
1406 if (clk
->ops
->determine_rate
) {
1407 new_rate
= clk
->ops
->determine_rate(clk
->hw
, rate
,
1410 } else if (clk
->ops
->round_rate
) {
1411 new_rate
= clk
->ops
->round_rate(clk
->hw
, rate
,
1413 } else if (!parent
|| !(clk
->flags
& CLK_SET_RATE_PARENT
)) {
1414 /* pass-through clock without adjustable parent */
1415 clk
->new_rate
= clk
->rate
;
1418 /* pass-through clock with adjustable parent */
1419 top
= clk_calc_new_rates(parent
, rate
);
1420 new_rate
= parent
->new_rate
;
1424 /* some clocks must be gated to change parent */
1425 if (parent
!= old_parent
&&
1426 (clk
->flags
& CLK_SET_PARENT_GATE
) && clk
->prepare_count
) {
1427 pr_debug("%s: %s not gated but wants to reparent\n",
1428 __func__
, clk
->name
);
1432 /* try finding the new parent index */
1434 p_index
= clk_fetch_parent_index(clk
, parent
);
1436 pr_debug("%s: clk %s can not be parent of clk %s\n",
1437 __func__
, parent
->name
, clk
->name
);
1442 if ((clk
->flags
& CLK_SET_RATE_PARENT
) && parent
&&
1443 best_parent_rate
!= parent
->rate
)
1444 top
= clk_calc_new_rates(parent
, best_parent_rate
);
1447 clk_calc_subtree(clk
, new_rate
, parent
, p_index
);
1453 * Notify about rate changes in a subtree. Always walk down the whole tree
1454 * so that in case of an error we can walk down the whole tree again and
1457 static struct clk
*clk_propagate_rate_change(struct clk
*clk
, unsigned long event
)
1459 struct clk
*child
, *tmp_clk
, *fail_clk
= NULL
;
1460 int ret
= NOTIFY_DONE
;
1462 if (clk
->rate
== clk
->new_rate
)
1465 if (clk
->notifier_count
) {
1466 ret
= __clk_notify(clk
, event
, clk
->rate
, clk
->new_rate
);
1467 if (ret
& NOTIFY_STOP_MASK
)
1471 hlist_for_each_entry(child
, &clk
->children
, child_node
) {
1472 /* Skip children who will be reparented to another clock */
1473 if (child
->new_parent
&& child
->new_parent
!= clk
)
1475 tmp_clk
= clk_propagate_rate_change(child
, event
);
1480 /* handle the new child who might not be in clk->children yet */
1481 if (clk
->new_child
) {
1482 tmp_clk
= clk_propagate_rate_change(clk
->new_child
, event
);
1491 * walk down a subtree and set the new rates notifying the rate
1494 static void clk_change_rate(struct clk
*clk
)
1497 unsigned long old_rate
;
1498 unsigned long best_parent_rate
= 0;
1499 bool skip_set_rate
= false;
1500 struct clk
*old_parent
;
1502 old_rate
= clk
->rate
;
1504 if (clk
->new_parent
)
1505 best_parent_rate
= clk
->new_parent
->rate
;
1506 else if (clk
->parent
)
1507 best_parent_rate
= clk
->parent
->rate
;
1509 if (clk
->new_parent
&& clk
->new_parent
!= clk
->parent
) {
1510 old_parent
= __clk_set_parent_before(clk
, clk
->new_parent
);
1512 if (clk
->ops
->set_rate_and_parent
) {
1513 skip_set_rate
= true;
1514 clk
->ops
->set_rate_and_parent(clk
->hw
, clk
->new_rate
,
1516 clk
->new_parent_index
);
1517 } else if (clk
->ops
->set_parent
) {
1518 clk
->ops
->set_parent(clk
->hw
, clk
->new_parent_index
);
1521 __clk_set_parent_after(clk
, clk
->new_parent
, old_parent
);
1524 if (!skip_set_rate
&& clk
->ops
->set_rate
)
1525 clk
->ops
->set_rate(clk
->hw
, clk
->new_rate
, best_parent_rate
);
1527 if (clk
->ops
->recalc_rate
)
1528 clk
->rate
= clk
->ops
->recalc_rate(clk
->hw
, best_parent_rate
);
1530 clk
->rate
= best_parent_rate
;
1532 if (clk
->notifier_count
&& old_rate
!= clk
->rate
)
1533 __clk_notify(clk
, POST_RATE_CHANGE
, old_rate
, clk
->rate
);
1535 hlist_for_each_entry(child
, &clk
->children
, child_node
) {
1536 /* Skip children who will be reparented to another clock */
1537 if (child
->new_parent
&& child
->new_parent
!= clk
)
1539 clk_change_rate(child
);
1542 /* handle the new child who might not be in clk->children yet */
1544 clk_change_rate(clk
->new_child
);
1548 * clk_set_rate - specify a new rate for clk
1549 * @clk: the clk whose rate is being changed
1550 * @rate: the new rate for clk
1552 * In the simplest case clk_set_rate will only adjust the rate of clk.
1554 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
1555 * propagate up to clk's parent; whether or not this happens depends on the
1556 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
1557 * after calling .round_rate then upstream parent propagation is ignored. If
1558 * *parent_rate comes back with a new rate for clk's parent then we propagate
1559 * up to clk's parent and set its rate. Upward propagation will continue
1560 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
1561 * .round_rate stops requesting changes to clk's parent_rate.
1563 * Rate changes are accomplished via tree traversal that also recalculates the
1564 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
1566 * Returns 0 on success, -EERROR otherwise.
1568 int clk_set_rate(struct clk
*clk
, unsigned long rate
)
1570 struct clk
*top
, *fail_clk
;
1576 /* prevent racing with updates to the clock topology */
1579 /* bail early if nothing to do */
1580 if (rate
== clk_get_rate(clk
))
1583 if ((clk
->flags
& CLK_SET_RATE_GATE
) && clk
->prepare_count
) {
1588 /* calculate new rates and get the topmost changed clock */
1589 top
= clk_calc_new_rates(clk
, rate
);
1595 /* notify that we are about to change rates */
1596 fail_clk
= clk_propagate_rate_change(top
, PRE_RATE_CHANGE
);
1598 pr_debug("%s: failed to set %s rate\n", __func__
,
1600 clk_propagate_rate_change(top
, ABORT_RATE_CHANGE
);
1605 /* change the rates */
1606 clk_change_rate(top
);
1609 clk_prepare_unlock();
1613 EXPORT_SYMBOL_GPL(clk_set_rate
);
1616 * clk_get_parent - return the parent of a clk
1617 * @clk: the clk whose parent gets returned
1619 * Simply returns clk->parent. Returns NULL if clk is NULL.
1621 struct clk
*clk_get_parent(struct clk
*clk
)
1626 parent
= __clk_get_parent(clk
);
1627 clk_prepare_unlock();
1631 EXPORT_SYMBOL_GPL(clk_get_parent
);
1634 * .get_parent is mandatory for clocks with multiple possible parents. It is
1635 * optional for single-parent clocks. Always call .get_parent if it is
1636 * available and WARN if it is missing for multi-parent clocks.
1638 * For single-parent clocks without .get_parent, first check to see if the
1639 * .parents array exists, and if so use it to avoid an expensive tree
1640 * traversal. If .parents does not exist then walk the tree with __clk_lookup.
1642 static struct clk
*__clk_init_parent(struct clk
*clk
)
1644 struct clk
*ret
= NULL
;
1647 /* handle the trivial cases */
1649 if (!clk
->num_parents
)
1652 if (clk
->num_parents
== 1) {
1653 if (IS_ERR_OR_NULL(clk
->parent
))
1654 ret
= clk
->parent
= __clk_lookup(clk
->parent_names
[0]);
1659 if (!clk
->ops
->get_parent
) {
1660 WARN(!clk
->ops
->get_parent
,
1661 "%s: multi-parent clocks must implement .get_parent\n",
1667 * Do our best to cache parent clocks in clk->parents. This prevents
1668 * unnecessary and expensive calls to __clk_lookup. We don't set
1669 * clk->parent here; that is done by the calling function
1672 index
= clk
->ops
->get_parent(clk
->hw
);
1676 kcalloc(clk
->num_parents
, sizeof(struct clk
*),
1679 ret
= clk_get_parent_by_index(clk
, index
);
1685 void __clk_reparent(struct clk
*clk
, struct clk
*new_parent
)
1687 clk_reparent(clk
, new_parent
);
1688 clk_debug_reparent(clk
, new_parent
);
1689 __clk_recalc_accuracies(clk
);
1690 __clk_recalc_rates(clk
, POST_RATE_CHANGE
);
1694 * clk_set_parent - switch the parent of a mux clk
1695 * @clk: the mux clk whose input we are switching
1696 * @parent: the new input to clk
1698 * Re-parent clk to use parent as its new input source. If clk is in
1699 * prepared state, the clk will get enabled for the duration of this call. If
1700 * that's not acceptable for a specific clk (Eg: the consumer can't handle
1701 * that, the reparenting is glitchy in hardware, etc), use the
1702 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
1704 * After successfully changing clk's parent clk_set_parent will update the
1705 * clk topology, sysfs topology and propagate rate recalculation via
1706 * __clk_recalc_rates.
1708 * Returns 0 on success, -EERROR otherwise.
1710 int clk_set_parent(struct clk
*clk
, struct clk
*parent
)
1714 unsigned long p_rate
= 0;
1722 /* verify ops for for multi-parent clks */
1723 if ((clk
->num_parents
> 1) && (!clk
->ops
->set_parent
))
1726 /* prevent racing with updates to the clock topology */
1729 if (clk
->parent
== parent
)
1732 /* check that we are allowed to re-parent if the clock is in use */
1733 if ((clk
->flags
& CLK_SET_PARENT_GATE
) && clk
->prepare_count
) {
1738 /* try finding the new parent index */
1740 p_index
= clk_fetch_parent_index(clk
, parent
);
1741 p_rate
= parent
->rate
;
1743 pr_debug("%s: clk %s can not be parent of clk %s\n",
1744 __func__
, parent
->name
, clk
->name
);
1750 /* propagate PRE_RATE_CHANGE notifications */
1751 ret
= __clk_speculate_rates(clk
, p_rate
);
1753 /* abort if a driver objects */
1754 if (ret
& NOTIFY_STOP_MASK
)
1757 /* do the re-parent */
1758 ret
= __clk_set_parent(clk
, parent
, p_index
);
1760 /* propagate rate an accuracy recalculation accordingly */
1762 __clk_recalc_rates(clk
, ABORT_RATE_CHANGE
);
1764 __clk_recalc_rates(clk
, POST_RATE_CHANGE
);
1765 __clk_recalc_accuracies(clk
);
1769 clk_prepare_unlock();
1773 EXPORT_SYMBOL_GPL(clk_set_parent
);
1776 * __clk_init - initialize the data structures in a struct clk
1777 * @dev: device initializing this clk, placeholder for now
1778 * @clk: clk being initialized
1780 * Initializes the lists in struct clk, queries the hardware for the
1781 * parent and rate and sets them both.
1783 int __clk_init(struct device
*dev
, struct clk
*clk
)
1787 struct hlist_node
*tmp2
;
1794 /* check to see if a clock with this name is already registered */
1795 if (__clk_lookup(clk
->name
)) {
1796 pr_debug("%s: clk %s already initialized\n",
1797 __func__
, clk
->name
);
1802 /* check that clk_ops are sane. See Documentation/clk.txt */
1803 if (clk
->ops
->set_rate
&&
1804 !((clk
->ops
->round_rate
|| clk
->ops
->determine_rate
) &&
1805 clk
->ops
->recalc_rate
)) {
1806 pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
1807 __func__
, clk
->name
);
1812 if (clk
->ops
->set_parent
&& !clk
->ops
->get_parent
) {
1813 pr_warning("%s: %s must implement .get_parent & .set_parent\n",
1814 __func__
, clk
->name
);
1819 if (clk
->ops
->set_rate_and_parent
&&
1820 !(clk
->ops
->set_parent
&& clk
->ops
->set_rate
)) {
1821 pr_warn("%s: %s must implement .set_parent & .set_rate\n",
1822 __func__
, clk
->name
);
1827 /* throw a WARN if any entries in parent_names are NULL */
1828 for (i
= 0; i
< clk
->num_parents
; i
++)
1829 WARN(!clk
->parent_names
[i
],
1830 "%s: invalid NULL in %s's .parent_names\n",
1831 __func__
, clk
->name
);
1834 * Allocate an array of struct clk *'s to avoid unnecessary string
1835 * look-ups of clk's possible parents. This can fail for clocks passed
1836 * in to clk_init during early boot; thus any access to clk->parents[]
1837 * must always check for a NULL pointer and try to populate it if
1840 * If clk->parents is not NULL we skip this entire block. This allows
1841 * for clock drivers to statically initialize clk->parents.
1843 if (clk
->num_parents
> 1 && !clk
->parents
) {
1844 clk
->parents
= kcalloc(clk
->num_parents
, sizeof(struct clk
*),
1847 * __clk_lookup returns NULL for parents that have not been
1848 * clk_init'd; thus any access to clk->parents[] must check
1849 * for a NULL pointer. We can always perform lazy lookups for
1850 * missing parents later on.
1853 for (i
= 0; i
< clk
->num_parents
; i
++)
1855 __clk_lookup(clk
->parent_names
[i
]);
1858 clk
->parent
= __clk_init_parent(clk
);
1861 * Populate clk->parent if parent has already been __clk_init'd. If
1862 * parent has not yet been __clk_init'd then place clk in the orphan
1863 * list. If clk has set the CLK_IS_ROOT flag then place it in the root
1866 * Every time a new clk is clk_init'd then we walk the list of orphan
1867 * clocks and re-parent any that are children of the clock currently
1871 hlist_add_head(&clk
->child_node
,
1872 &clk
->parent
->children
);
1873 else if (clk
->flags
& CLK_IS_ROOT
)
1874 hlist_add_head(&clk
->child_node
, &clk_root_list
);
1876 hlist_add_head(&clk
->child_node
, &clk_orphan_list
);
1879 * Set clk's accuracy. The preferred method is to use
1880 * .recalc_accuracy. For simple clocks and lazy developers the default
1881 * fallback is to use the parent's accuracy. If a clock doesn't have a
1882 * parent (or is orphaned) then accuracy is set to zero (perfect
1885 if (clk
->ops
->recalc_accuracy
)
1886 clk
->accuracy
= clk
->ops
->recalc_accuracy(clk
->hw
,
1887 __clk_get_accuracy(clk
->parent
));
1888 else if (clk
->parent
)
1889 clk
->accuracy
= clk
->parent
->accuracy
;
1894 * Set clk's rate. The preferred method is to use .recalc_rate. For
1895 * simple clocks and lazy developers the default fallback is to use the
1896 * parent's rate. If a clock doesn't have a parent (or is orphaned)
1897 * then rate is set to zero.
1899 if (clk
->ops
->recalc_rate
)
1900 clk
->rate
= clk
->ops
->recalc_rate(clk
->hw
,
1901 __clk_get_rate(clk
->parent
));
1902 else if (clk
->parent
)
1903 clk
->rate
= clk
->parent
->rate
;
1907 clk_debug_register(clk
);
1909 * walk the list of orphan clocks and reparent any that are children of
1912 hlist_for_each_entry_safe(orphan
, tmp2
, &clk_orphan_list
, child_node
) {
1913 if (orphan
->num_parents
&& orphan
->ops
->get_parent
) {
1914 i
= orphan
->ops
->get_parent(orphan
->hw
);
1915 if (!strcmp(clk
->name
, orphan
->parent_names
[i
]))
1916 __clk_reparent(orphan
, clk
);
1920 for (i
= 0; i
< orphan
->num_parents
; i
++)
1921 if (!strcmp(clk
->name
, orphan
->parent_names
[i
])) {
1922 __clk_reparent(orphan
, clk
);
1928 * optional platform-specific magic
1930 * The .init callback is not used by any of the basic clock types, but
1931 * exists for weird hardware that must perform initialization magic.
1932 * Please consider other ways of solving initialization problems before
1933 * using this callback, as its use is discouraged.
1936 clk
->ops
->init(clk
->hw
);
1938 kref_init(&clk
->ref
);
1940 clk_prepare_unlock();
1946 * __clk_register - register a clock and return a cookie.
1948 * Same as clk_register, except that the .clk field inside hw shall point to a
1949 * preallocated (generally statically allocated) struct clk. None of the fields
1950 * of the struct clk need to be initialized.
1952 * The data pointed to by .init and .clk field shall NOT be marked as init
1955 * __clk_register is only exposed via clk-private.h and is intended for use with
1956 * very large numbers of clocks that need to be statically initialized. It is
1957 * a layering violation to include clk-private.h from any code which implements
1958 * a clock's .ops; as such any statically initialized clock data MUST be in a
1959 * separate C file from the logic that implements its operations. Returns 0
1960 * on success, otherwise an error code.
1962 struct clk
*__clk_register(struct device
*dev
, struct clk_hw
*hw
)
1968 clk
->name
= hw
->init
->name
;
1969 clk
->ops
= hw
->init
->ops
;
1971 clk
->flags
= hw
->init
->flags
;
1972 clk
->parent_names
= hw
->init
->parent_names
;
1973 clk
->num_parents
= hw
->init
->num_parents
;
1974 if (dev
&& dev
->driver
)
1975 clk
->owner
= dev
->driver
->owner
;
1979 ret
= __clk_init(dev
, clk
);
1981 return ERR_PTR(ret
);
1985 EXPORT_SYMBOL_GPL(__clk_register
);
1987 static int _clk_register(struct device
*dev
, struct clk_hw
*hw
, struct clk
*clk
)
1991 clk
->name
= kstrdup(hw
->init
->name
, GFP_KERNEL
);
1993 pr_err("%s: could not allocate clk->name\n", __func__
);
1997 clk
->ops
= hw
->init
->ops
;
1998 if (dev
&& dev
->driver
)
1999 clk
->owner
= dev
->driver
->owner
;
2001 clk
->flags
= hw
->init
->flags
;
2002 clk
->num_parents
= hw
->init
->num_parents
;
2005 /* allocate local copy in case parent_names is __initdata */
2006 clk
->parent_names
= kcalloc(clk
->num_parents
, sizeof(char *),
2009 if (!clk
->parent_names
) {
2010 pr_err("%s: could not allocate clk->parent_names\n", __func__
);
2012 goto fail_parent_names
;
2016 /* copy each string name in case parent_names is __initdata */
2017 for (i
= 0; i
< clk
->num_parents
; i
++) {
2018 clk
->parent_names
[i
] = kstrdup(hw
->init
->parent_names
[i
],
2020 if (!clk
->parent_names
[i
]) {
2021 pr_err("%s: could not copy parent_names\n", __func__
);
2023 goto fail_parent_names_copy
;
2027 ret
= __clk_init(dev
, clk
);
2031 fail_parent_names_copy
:
2033 kfree(clk
->parent_names
[i
]);
2034 kfree(clk
->parent_names
);
2042 * clk_register - allocate a new clock, register it and return an opaque cookie
2043 * @dev: device that is registering this clock
2044 * @hw: link to hardware-specific clock data
2046 * clk_register is the primary interface for populating the clock tree with new
2047 * clock nodes. It returns a pointer to the newly allocated struct clk which
2048 * cannot be dereferenced by driver code but may be used in conjuction with the
2049 * rest of the clock API. In the event of an error clk_register will return an
2050 * error code; drivers must test for an error code after calling clk_register.
2052 struct clk
*clk_register(struct device
*dev
, struct clk_hw
*hw
)
2057 clk
= kzalloc(sizeof(*clk
), GFP_KERNEL
);
2059 pr_err("%s: could not allocate clk\n", __func__
);
2064 ret
= _clk_register(dev
, hw
, clk
);
2070 return ERR_PTR(ret
);
2072 EXPORT_SYMBOL_GPL(clk_register
);
2075 * Free memory allocated for a clock.
2076 * Caller must hold prepare_lock.
2078 static void __clk_release(struct kref
*ref
)
2080 struct clk
*clk
= container_of(ref
, struct clk
, ref
);
2081 int i
= clk
->num_parents
;
2083 kfree(clk
->parents
);
2085 kfree(clk
->parent_names
[i
]);
2087 kfree(clk
->parent_names
);
2093 * Empty clk_ops for unregistered clocks. These are used temporarily
2094 * after clk_unregister() was called on a clock and until last clock
2095 * consumer calls clk_put() and the struct clk object is freed.
2097 static int clk_nodrv_prepare_enable(struct clk_hw
*hw
)
2102 static void clk_nodrv_disable_unprepare(struct clk_hw
*hw
)
2107 static int clk_nodrv_set_rate(struct clk_hw
*hw
, unsigned long rate
,
2108 unsigned long parent_rate
)
2113 static int clk_nodrv_set_parent(struct clk_hw
*hw
, u8 index
)
2118 static const struct clk_ops clk_nodrv_ops
= {
2119 .enable
= clk_nodrv_prepare_enable
,
2120 .disable
= clk_nodrv_disable_unprepare
,
2121 .prepare
= clk_nodrv_prepare_enable
,
2122 .unprepare
= clk_nodrv_disable_unprepare
,
2123 .set_rate
= clk_nodrv_set_rate
,
2124 .set_parent
= clk_nodrv_set_parent
,
2128 * clk_unregister - unregister a currently registered clock
2129 * @clk: clock to unregister
2131 void clk_unregister(struct clk
*clk
)
2133 unsigned long flags
;
2135 if (!clk
|| WARN_ON_ONCE(IS_ERR(clk
)))
2140 if (clk
->ops
== &clk_nodrv_ops
) {
2141 pr_err("%s: unregistered clock: %s\n", __func__
, clk
->name
);
2145 * Assign empty clock ops for consumers that might still hold
2146 * a reference to this clock.
2148 flags
= clk_enable_lock();
2149 clk
->ops
= &clk_nodrv_ops
;
2150 clk_enable_unlock(flags
);
2152 if (!hlist_empty(&clk
->children
)) {
2155 /* Reparent all children to the orphan list. */
2156 hlist_for_each_entry(child
, &clk
->children
, child_node
)
2157 clk_set_parent(child
, NULL
);
2160 clk_debug_unregister(clk
);
2162 hlist_del_init(&clk
->child_node
);
2164 if (clk
->prepare_count
)
2165 pr_warn("%s: unregistering prepared clock: %s\n",
2166 __func__
, clk
->name
);
2168 kref_put(&clk
->ref
, __clk_release
);
2170 clk_prepare_unlock();
2172 EXPORT_SYMBOL_GPL(clk_unregister
);
2174 static void devm_clk_release(struct device
*dev
, void *res
)
2176 clk_unregister(res
);
2180 * devm_clk_register - resource managed clk_register()
2181 * @dev: device that is registering this clock
2182 * @hw: link to hardware-specific clock data
2184 * Managed clk_register(). Clocks returned from this function are
2185 * automatically clk_unregister()ed on driver detach. See clk_register() for
2188 struct clk
*devm_clk_register(struct device
*dev
, struct clk_hw
*hw
)
2193 clk
= devres_alloc(devm_clk_release
, sizeof(*clk
), GFP_KERNEL
);
2195 return ERR_PTR(-ENOMEM
);
2197 ret
= _clk_register(dev
, hw
, clk
);
2199 devres_add(dev
, clk
);
2207 EXPORT_SYMBOL_GPL(devm_clk_register
);
2209 static int devm_clk_match(struct device
*dev
, void *res
, void *data
)
2211 struct clk
*c
= res
;
2218 * devm_clk_unregister - resource managed clk_unregister()
2219 * @clk: clock to unregister
2221 * Deallocate a clock allocated with devm_clk_register(). Normally
2222 * this function will not need to be called and the resource management
2223 * code will ensure that the resource is freed.
2225 void devm_clk_unregister(struct device
*dev
, struct clk
*clk
)
2227 WARN_ON(devres_release(dev
, devm_clk_release
, devm_clk_match
, clk
));
2229 EXPORT_SYMBOL_GPL(devm_clk_unregister
);
2234 int __clk_get(struct clk
*clk
)
2237 if (!try_module_get(clk
->owner
))
2240 kref_get(&clk
->ref
);
2245 void __clk_put(struct clk
*clk
)
2247 if (!clk
|| WARN_ON_ONCE(IS_ERR(clk
)))
2251 kref_put(&clk
->ref
, __clk_release
);
2252 clk_prepare_unlock();
2254 module_put(clk
->owner
);
2257 /*** clk rate change notifiers ***/
2260 * clk_notifier_register - add a clk rate change notifier
2261 * @clk: struct clk * to watch
2262 * @nb: struct notifier_block * with callback info
2264 * Request notification when clk's rate changes. This uses an SRCU
2265 * notifier because we want it to block and notifier unregistrations are
2266 * uncommon. The callbacks associated with the notifier must not
2267 * re-enter into the clk framework by calling any top-level clk APIs;
2268 * this will cause a nested prepare_lock mutex.
2270 * In all notification cases cases (pre, post and abort rate change) the
2271 * original clock rate is passed to the callback via struct
2272 * clk_notifier_data.old_rate and the new frequency is passed via struct
2273 * clk_notifier_data.new_rate.
2275 * clk_notifier_register() must be called from non-atomic context.
2276 * Returns -EINVAL if called with null arguments, -ENOMEM upon
2277 * allocation failure; otherwise, passes along the return value of
2278 * srcu_notifier_chain_register().
2280 int clk_notifier_register(struct clk
*clk
, struct notifier_block
*nb
)
2282 struct clk_notifier
*cn
;
2290 /* search the list of notifiers for this clk */
2291 list_for_each_entry(cn
, &clk_notifier_list
, node
)
2295 /* if clk wasn't in the notifier list, allocate new clk_notifier */
2296 if (cn
->clk
!= clk
) {
2297 cn
= kzalloc(sizeof(struct clk_notifier
), GFP_KERNEL
);
2302 srcu_init_notifier_head(&cn
->notifier_head
);
2304 list_add(&cn
->node
, &clk_notifier_list
);
2307 ret
= srcu_notifier_chain_register(&cn
->notifier_head
, nb
);
2309 clk
->notifier_count
++;
2312 clk_prepare_unlock();
2316 EXPORT_SYMBOL_GPL(clk_notifier_register
);
2319 * clk_notifier_unregister - remove a clk rate change notifier
2320 * @clk: struct clk *
2321 * @nb: struct notifier_block * with callback info
2323 * Request no further notification for changes to 'clk' and frees memory
2324 * allocated in clk_notifier_register.
2326 * Returns -EINVAL if called with null arguments; otherwise, passes
2327 * along the return value of srcu_notifier_chain_unregister().
2329 int clk_notifier_unregister(struct clk
*clk
, struct notifier_block
*nb
)
2331 struct clk_notifier
*cn
= NULL
;
2339 list_for_each_entry(cn
, &clk_notifier_list
, node
)
2343 if (cn
->clk
== clk
) {
2344 ret
= srcu_notifier_chain_unregister(&cn
->notifier_head
, nb
);
2346 clk
->notifier_count
--;
2348 /* XXX the notifier code should handle this better */
2349 if (!cn
->notifier_head
.head
) {
2350 srcu_cleanup_notifier_head(&cn
->notifier_head
);
2351 list_del(&cn
->node
);
2359 clk_prepare_unlock();
2363 EXPORT_SYMBOL_GPL(clk_notifier_unregister
);
2367 * struct of_clk_provider - Clock provider registration structure
2368 * @link: Entry in global list of clock providers
2369 * @node: Pointer to device tree node of clock provider
2370 * @get: Get clock callback. Returns NULL or a struct clk for the
2371 * given clock specifier
2372 * @data: context pointer to be passed into @get callback
2374 struct of_clk_provider
{
2375 struct list_head link
;
2377 struct device_node
*node
;
2378 struct clk
*(*get
)(struct of_phandle_args
*clkspec
, void *data
);
2382 static const struct of_device_id __clk_of_table_sentinel
2383 __used
__section(__clk_of_table_end
);
2385 static LIST_HEAD(of_clk_providers
);
2386 static DEFINE_MUTEX(of_clk_mutex
);
2388 /* of_clk_provider list locking helpers */
2389 void of_clk_lock(void)
2391 mutex_lock(&of_clk_mutex
);
2394 void of_clk_unlock(void)
2396 mutex_unlock(&of_clk_mutex
);
2399 struct clk
*of_clk_src_simple_get(struct of_phandle_args
*clkspec
,
2404 EXPORT_SYMBOL_GPL(of_clk_src_simple_get
);
2406 struct clk
*of_clk_src_onecell_get(struct of_phandle_args
*clkspec
, void *data
)
2408 struct clk_onecell_data
*clk_data
= data
;
2409 unsigned int idx
= clkspec
->args
[0];
2411 if (idx
>= clk_data
->clk_num
) {
2412 pr_err("%s: invalid clock index %d\n", __func__
, idx
);
2413 return ERR_PTR(-EINVAL
);
2416 return clk_data
->clks
[idx
];
2418 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get
);
2421 * of_clk_add_provider() - Register a clock provider for a node
2422 * @np: Device node pointer associated with clock provider
2423 * @clk_src_get: callback for decoding clock
2424 * @data: context pointer for @clk_src_get callback.
2426 int of_clk_add_provider(struct device_node
*np
,
2427 struct clk
*(*clk_src_get
)(struct of_phandle_args
*clkspec
,
2431 struct of_clk_provider
*cp
;
2433 cp
= kzalloc(sizeof(struct of_clk_provider
), GFP_KERNEL
);
2437 cp
->node
= of_node_get(np
);
2439 cp
->get
= clk_src_get
;
2441 mutex_lock(&of_clk_mutex
);
2442 list_add(&cp
->link
, &of_clk_providers
);
2443 mutex_unlock(&of_clk_mutex
);
2444 pr_debug("Added clock from %s\n", np
->full_name
);
2448 EXPORT_SYMBOL_GPL(of_clk_add_provider
);
2451 * of_clk_del_provider() - Remove a previously registered clock provider
2452 * @np: Device node pointer associated with clock provider
2454 void of_clk_del_provider(struct device_node
*np
)
2456 struct of_clk_provider
*cp
;
2458 mutex_lock(&of_clk_mutex
);
2459 list_for_each_entry(cp
, &of_clk_providers
, link
) {
2460 if (cp
->node
== np
) {
2461 list_del(&cp
->link
);
2462 of_node_put(cp
->node
);
2467 mutex_unlock(&of_clk_mutex
);
2469 EXPORT_SYMBOL_GPL(of_clk_del_provider
);
2471 struct clk
*__of_clk_get_from_provider(struct of_phandle_args
*clkspec
)
2473 struct of_clk_provider
*provider
;
2474 struct clk
*clk
= ERR_PTR(-EPROBE_DEFER
);
2476 /* Check if we have such a provider in our array */
2477 list_for_each_entry(provider
, &of_clk_providers
, link
) {
2478 if (provider
->node
== clkspec
->np
)
2479 clk
= provider
->get(clkspec
, provider
->data
);
2487 struct clk
*of_clk_get_from_provider(struct of_phandle_args
*clkspec
)
2491 mutex_lock(&of_clk_mutex
);
2492 clk
= __of_clk_get_from_provider(clkspec
);
2493 mutex_unlock(&of_clk_mutex
);
2498 int of_clk_get_parent_count(struct device_node
*np
)
2500 return of_count_phandle_with_args(np
, "clocks", "#clock-cells");
2502 EXPORT_SYMBOL_GPL(of_clk_get_parent_count
);
2504 const char *of_clk_get_parent_name(struct device_node
*np
, int index
)
2506 struct of_phandle_args clkspec
;
2507 struct property
*prop
;
2508 const char *clk_name
;
2517 rc
= of_parse_phandle_with_args(np
, "clocks", "#clock-cells", index
,
2522 index
= clkspec
.args_count
? clkspec
.args
[0] : 0;
2525 /* if there is an indices property, use it to transfer the index
2526 * specified into an array offset for the clock-output-names property.
2528 of_property_for_each_u32(clkspec
.np
, "clock-indices", prop
, vp
, pv
) {
2536 if (of_property_read_string_index(clkspec
.np
, "clock-output-names",
2539 clk_name
= clkspec
.np
->name
;
2541 of_node_put(clkspec
.np
);
2544 EXPORT_SYMBOL_GPL(of_clk_get_parent_name
);
2546 struct clock_provider
{
2547 of_clk_init_cb_t clk_init_cb
;
2548 struct device_node
*np
;
2549 struct list_head node
;
2552 static LIST_HEAD(clk_provider_list
);
2555 * This function looks for a parent clock. If there is one, then it
2556 * checks that the provider for this parent clock was initialized, in
2557 * this case the parent clock will be ready.
2559 static int parent_ready(struct device_node
*np
)
2564 struct clk
*clk
= of_clk_get(np
, i
);
2566 /* this parent is ready we can check the next one */
2573 /* at least one parent is not ready, we exit now */
2574 if (PTR_ERR(clk
) == -EPROBE_DEFER
)
2578 * Here we make assumption that the device tree is
2579 * written correctly. So an error means that there is
2580 * no more parent. As we didn't exit yet, then the
2581 * previous parent are ready. If there is no clock
2582 * parent, no need to wait for them, then we can
2583 * consider their absence as being ready
2590 * of_clk_init() - Scan and init clock providers from the DT
2591 * @matches: array of compatible values and init functions for providers.
2593 * This function scans the device tree for matching clock providers
2594 * and calls their initialization functions. It also does it by trying
2595 * to follow the dependencies.
2597 void __init
of_clk_init(const struct of_device_id
*matches
)
2599 const struct of_device_id
*match
;
2600 struct device_node
*np
;
2601 struct clock_provider
*clk_provider
, *next
;
2606 matches
= &__clk_of_table
;
2608 /* First prepare the list of the clocks providers */
2609 for_each_matching_node_and_match(np
, matches
, &match
) {
2610 struct clock_provider
*parent
=
2611 kzalloc(sizeof(struct clock_provider
), GFP_KERNEL
);
2613 parent
->clk_init_cb
= match
->data
;
2615 list_add_tail(&parent
->node
, &clk_provider_list
);
2618 while (!list_empty(&clk_provider_list
)) {
2619 is_init_done
= false;
2620 list_for_each_entry_safe(clk_provider
, next
,
2621 &clk_provider_list
, node
) {
2622 if (force
|| parent_ready(clk_provider
->np
)) {
2623 clk_provider
->clk_init_cb(clk_provider
->np
);
2624 list_del(&clk_provider
->node
);
2625 kfree(clk_provider
);
2626 is_init_done
= true;
2631 * We didn't manage to initialize any of the
2632 * remaining providers during the last loop, so now we
2633 * initialize all the remaining ones unconditionally
2634 * in case the clock parent was not mandatory