clk: Don't set clk->new_rate twice
[linux-2.6.git] / drivers / clk / clk.c
blob2dd20c01134d4e9e96634fe371b00969641df799
1 /*
2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * Standard functionality for the common clock API. See Documentation/clk.txt
12 #include <linux/clk-private.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/spinlock.h>
16 #include <linux/err.h>
17 #include <linux/list.h>
18 #include <linux/slab.h>
20 static DEFINE_SPINLOCK(enable_lock);
21 static DEFINE_MUTEX(prepare_lock);
23 static HLIST_HEAD(clk_root_list);
24 static HLIST_HEAD(clk_orphan_list);
25 static LIST_HEAD(clk_notifier_list);
27 /*** debugfs support ***/
29 #ifdef CONFIG_COMMON_CLK_DEBUG
30 #include <linux/debugfs.h>
32 static struct dentry *rootdir;
33 static struct dentry *orphandir;
34 static int inited = 0;
36 /* caller must hold prepare_lock */
37 static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
39 struct dentry *d;
40 int ret = -ENOMEM;
42 if (!clk || !pdentry) {
43 ret = -EINVAL;
44 goto out;
47 d = debugfs_create_dir(clk->name, pdentry);
48 if (!d)
49 goto out;
51 clk->dentry = d;
53 d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
54 (u32 *)&clk->rate);
55 if (!d)
56 goto err_out;
58 d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
59 (u32 *)&clk->flags);
60 if (!d)
61 goto err_out;
63 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
64 (u32 *)&clk->prepare_count);
65 if (!d)
66 goto err_out;
68 d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
69 (u32 *)&clk->enable_count);
70 if (!d)
71 goto err_out;
73 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
74 (u32 *)&clk->notifier_count);
75 if (!d)
76 goto err_out;
78 ret = 0;
79 goto out;
81 err_out:
82 debugfs_remove(clk->dentry);
83 out:
84 return ret;
87 /* caller must hold prepare_lock */
88 static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
90 struct clk *child;
91 struct hlist_node *tmp;
92 int ret = -EINVAL;;
94 if (!clk || !pdentry)
95 goto out;
97 ret = clk_debug_create_one(clk, pdentry);
99 if (ret)
100 goto out;
102 hlist_for_each_entry(child, tmp, &clk->children, child_node)
103 clk_debug_create_subtree(child, clk->dentry);
105 ret = 0;
106 out:
107 return ret;
111 * clk_debug_register - add a clk node to the debugfs clk tree
112 * @clk: the clk being added to the debugfs clk tree
114 * Dynamically adds a clk to the debugfs clk tree if debugfs has been
115 * initialized. Otherwise it bails out early since the debugfs clk tree
116 * will be created lazily by clk_debug_init as part of a late_initcall.
118 * Caller must hold prepare_lock. Only clk_init calls this function (so
119 * far) so this is taken care.
121 static int clk_debug_register(struct clk *clk)
123 struct clk *parent;
124 struct dentry *pdentry;
125 int ret = 0;
127 if (!inited)
128 goto out;
130 parent = clk->parent;
133 * Check to see if a clk is a root clk. Also check that it is
134 * safe to add this clk to debugfs
136 if (!parent)
137 if (clk->flags & CLK_IS_ROOT)
138 pdentry = rootdir;
139 else
140 pdentry = orphandir;
141 else
142 if (parent->dentry)
143 pdentry = parent->dentry;
144 else
145 goto out;
147 ret = clk_debug_create_subtree(clk, pdentry);
149 out:
150 return ret;
154 * clk_debug_init - lazily create the debugfs clk tree visualization
156 * clks are often initialized very early during boot before memory can
157 * be dynamically allocated and well before debugfs is setup.
158 * clk_debug_init walks the clk tree hierarchy while holding
159 * prepare_lock and creates the topology as part of a late_initcall,
160 * thus insuring that clks initialized very early will still be
161 * represented in the debugfs clk tree. This function should only be
162 * called once at boot-time, and all other clks added dynamically will
163 * be done so with clk_debug_register.
165 static int __init clk_debug_init(void)
167 struct clk *clk;
168 struct hlist_node *tmp;
170 rootdir = debugfs_create_dir("clk", NULL);
172 if (!rootdir)
173 return -ENOMEM;
175 orphandir = debugfs_create_dir("orphans", rootdir);
177 if (!orphandir)
178 return -ENOMEM;
180 mutex_lock(&prepare_lock);
182 hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
183 clk_debug_create_subtree(clk, rootdir);
185 hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
186 clk_debug_create_subtree(clk, orphandir);
188 inited = 1;
190 mutex_unlock(&prepare_lock);
192 return 0;
194 late_initcall(clk_debug_init);
195 #else
196 static inline int clk_debug_register(struct clk *clk) { return 0; }
197 #endif
199 #ifdef CONFIG_COMMON_CLK_DISABLE_UNUSED
200 /* caller must hold prepare_lock */
201 static void clk_disable_unused_subtree(struct clk *clk)
203 struct clk *child;
204 struct hlist_node *tmp;
205 unsigned long flags;
207 if (!clk)
208 goto out;
210 hlist_for_each_entry(child, tmp, &clk->children, child_node)
211 clk_disable_unused_subtree(child);
213 spin_lock_irqsave(&enable_lock, flags);
215 if (clk->enable_count)
216 goto unlock_out;
218 if (clk->flags & CLK_IGNORE_UNUSED)
219 goto unlock_out;
221 if (__clk_is_enabled(clk) && clk->ops->disable)
222 clk->ops->disable(clk->hw);
224 unlock_out:
225 spin_unlock_irqrestore(&enable_lock, flags);
227 out:
228 return;
231 static int clk_disable_unused(void)
233 struct clk *clk;
234 struct hlist_node *tmp;
236 mutex_lock(&prepare_lock);
238 hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
239 clk_disable_unused_subtree(clk);
241 hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
242 clk_disable_unused_subtree(clk);
244 mutex_unlock(&prepare_lock);
246 return 0;
248 late_initcall(clk_disable_unused);
249 #endif
251 /*** helper functions ***/
253 inline const char *__clk_get_name(struct clk *clk)
255 return !clk ? NULL : clk->name;
258 inline struct clk_hw *__clk_get_hw(struct clk *clk)
260 return !clk ? NULL : clk->hw;
263 inline u8 __clk_get_num_parents(struct clk *clk)
265 return !clk ? -EINVAL : clk->num_parents;
268 inline struct clk *__clk_get_parent(struct clk *clk)
270 return !clk ? NULL : clk->parent;
273 inline int __clk_get_enable_count(struct clk *clk)
275 return !clk ? -EINVAL : clk->enable_count;
278 inline int __clk_get_prepare_count(struct clk *clk)
280 return !clk ? -EINVAL : clk->prepare_count;
283 unsigned long __clk_get_rate(struct clk *clk)
285 unsigned long ret;
287 if (!clk) {
288 ret = 0;
289 goto out;
292 ret = clk->rate;
294 if (clk->flags & CLK_IS_ROOT)
295 goto out;
297 if (!clk->parent)
298 ret = 0;
300 out:
301 return ret;
304 inline unsigned long __clk_get_flags(struct clk *clk)
306 return !clk ? -EINVAL : clk->flags;
309 int __clk_is_enabled(struct clk *clk)
311 int ret;
313 if (!clk)
314 return -EINVAL;
317 * .is_enabled is only mandatory for clocks that gate
318 * fall back to software usage counter if .is_enabled is missing
320 if (!clk->ops->is_enabled) {
321 ret = clk->enable_count ? 1 : 0;
322 goto out;
325 ret = clk->ops->is_enabled(clk->hw);
326 out:
327 return ret;
330 static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
332 struct clk *child;
333 struct clk *ret;
334 struct hlist_node *tmp;
336 if (!strcmp(clk->name, name))
337 return clk;
339 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
340 ret = __clk_lookup_subtree(name, child);
341 if (ret)
342 return ret;
345 return NULL;
348 struct clk *__clk_lookup(const char *name)
350 struct clk *root_clk;
351 struct clk *ret;
352 struct hlist_node *tmp;
354 if (!name)
355 return NULL;
357 /* search the 'proper' clk tree first */
358 hlist_for_each_entry(root_clk, tmp, &clk_root_list, child_node) {
359 ret = __clk_lookup_subtree(name, root_clk);
360 if (ret)
361 return ret;
364 /* if not found, then search the orphan tree */
365 hlist_for_each_entry(root_clk, tmp, &clk_orphan_list, child_node) {
366 ret = __clk_lookup_subtree(name, root_clk);
367 if (ret)
368 return ret;
371 return NULL;
374 /*** clk api ***/
376 void __clk_unprepare(struct clk *clk)
378 if (!clk)
379 return;
381 if (WARN_ON(clk->prepare_count == 0))
382 return;
384 if (--clk->prepare_count > 0)
385 return;
387 WARN_ON(clk->enable_count > 0);
389 if (clk->ops->unprepare)
390 clk->ops->unprepare(clk->hw);
392 __clk_unprepare(clk->parent);
396 * clk_unprepare - undo preparation of a clock source
397 * @clk: the clk being unprepare
399 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
400 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
401 * if the operation may sleep. One example is a clk which is accessed over
402 * I2c. In the complex case a clk gate operation may require a fast and a slow
403 * part. It is this reason that clk_unprepare and clk_disable are not mutually
404 * exclusive. In fact clk_disable must be called before clk_unprepare.
406 void clk_unprepare(struct clk *clk)
408 mutex_lock(&prepare_lock);
409 __clk_unprepare(clk);
410 mutex_unlock(&prepare_lock);
412 EXPORT_SYMBOL_GPL(clk_unprepare);
414 int __clk_prepare(struct clk *clk)
416 int ret = 0;
418 if (!clk)
419 return 0;
421 if (clk->prepare_count == 0) {
422 ret = __clk_prepare(clk->parent);
423 if (ret)
424 return ret;
426 if (clk->ops->prepare) {
427 ret = clk->ops->prepare(clk->hw);
428 if (ret) {
429 __clk_unprepare(clk->parent);
430 return ret;
435 clk->prepare_count++;
437 return 0;
441 * clk_prepare - prepare a clock source
442 * @clk: the clk being prepared
444 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
445 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
446 * operation may sleep. One example is a clk which is accessed over I2c. In
447 * the complex case a clk ungate operation may require a fast and a slow part.
448 * It is this reason that clk_prepare and clk_enable are not mutually
449 * exclusive. In fact clk_prepare must be called before clk_enable.
450 * Returns 0 on success, -EERROR otherwise.
452 int clk_prepare(struct clk *clk)
454 int ret;
456 mutex_lock(&prepare_lock);
457 ret = __clk_prepare(clk);
458 mutex_unlock(&prepare_lock);
460 return ret;
462 EXPORT_SYMBOL_GPL(clk_prepare);
464 static void __clk_disable(struct clk *clk)
466 if (!clk)
467 return;
469 if (WARN_ON(clk->enable_count == 0))
470 return;
472 if (--clk->enable_count > 0)
473 return;
475 if (clk->ops->disable)
476 clk->ops->disable(clk->hw);
478 __clk_disable(clk->parent);
482 * clk_disable - gate a clock
483 * @clk: the clk being gated
485 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
486 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
487 * clk if the operation is fast and will never sleep. One example is a
488 * SoC-internal clk which is controlled via simple register writes. In the
489 * complex case a clk gate operation may require a fast and a slow part. It is
490 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
491 * In fact clk_disable must be called before clk_unprepare.
493 void clk_disable(struct clk *clk)
495 unsigned long flags;
497 spin_lock_irqsave(&enable_lock, flags);
498 __clk_disable(clk);
499 spin_unlock_irqrestore(&enable_lock, flags);
501 EXPORT_SYMBOL_GPL(clk_disable);
503 static int __clk_enable(struct clk *clk)
505 int ret = 0;
507 if (!clk)
508 return 0;
510 if (WARN_ON(clk->prepare_count == 0))
511 return -ESHUTDOWN;
513 if (clk->enable_count == 0) {
514 ret = __clk_enable(clk->parent);
516 if (ret)
517 return ret;
519 if (clk->ops->enable) {
520 ret = clk->ops->enable(clk->hw);
521 if (ret) {
522 __clk_disable(clk->parent);
523 return ret;
528 clk->enable_count++;
529 return 0;
533 * clk_enable - ungate a clock
534 * @clk: the clk being ungated
536 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
537 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
538 * if the operation will never sleep. One example is a SoC-internal clk which
539 * is controlled via simple register writes. In the complex case a clk ungate
540 * operation may require a fast and a slow part. It is this reason that
541 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
542 * must be called before clk_enable. Returns 0 on success, -EERROR
543 * otherwise.
545 int clk_enable(struct clk *clk)
547 unsigned long flags;
548 int ret;
550 spin_lock_irqsave(&enable_lock, flags);
551 ret = __clk_enable(clk);
552 spin_unlock_irqrestore(&enable_lock, flags);
554 return ret;
556 EXPORT_SYMBOL_GPL(clk_enable);
559 * clk_get_rate - return the rate of clk
560 * @clk: the clk whose rate is being returned
562 * Simply returns the cached rate of the clk. Does not query the hardware. If
563 * clk is NULL then returns 0.
565 unsigned long clk_get_rate(struct clk *clk)
567 unsigned long rate;
569 mutex_lock(&prepare_lock);
570 rate = __clk_get_rate(clk);
571 mutex_unlock(&prepare_lock);
573 return rate;
575 EXPORT_SYMBOL_GPL(clk_get_rate);
578 * __clk_round_rate - round the given rate for a clk
579 * @clk: round the rate of this clock
581 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
583 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
585 unsigned long parent_rate = 0;
587 if (!clk)
588 return -EINVAL;
590 if (!clk->ops->round_rate) {
591 if (clk->flags & CLK_SET_RATE_PARENT)
592 return __clk_round_rate(clk->parent, rate);
593 else
594 return clk->rate;
597 if (clk->parent)
598 parent_rate = clk->parent->rate;
600 return clk->ops->round_rate(clk->hw, rate, &parent_rate);
604 * clk_round_rate - round the given rate for a clk
605 * @clk: the clk for which we are rounding a rate
606 * @rate: the rate which is to be rounded
608 * Takes in a rate as input and rounds it to a rate that the clk can actually
609 * use which is then returned. If clk doesn't support round_rate operation
610 * then the parent rate is returned.
612 long clk_round_rate(struct clk *clk, unsigned long rate)
614 unsigned long ret;
616 mutex_lock(&prepare_lock);
617 ret = __clk_round_rate(clk, rate);
618 mutex_unlock(&prepare_lock);
620 return ret;
622 EXPORT_SYMBOL_GPL(clk_round_rate);
625 * __clk_notify - call clk notifier chain
626 * @clk: struct clk * that is changing rate
627 * @msg: clk notifier type (see include/linux/clk.h)
628 * @old_rate: old clk rate
629 * @new_rate: new clk rate
631 * Triggers a notifier call chain on the clk rate-change notification
632 * for 'clk'. Passes a pointer to the struct clk and the previous
633 * and current rates to the notifier callback. Intended to be called by
634 * internal clock code only. Returns NOTIFY_DONE from the last driver
635 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
636 * a driver returns that.
638 static int __clk_notify(struct clk *clk, unsigned long msg,
639 unsigned long old_rate, unsigned long new_rate)
641 struct clk_notifier *cn;
642 struct clk_notifier_data cnd;
643 int ret = NOTIFY_DONE;
645 cnd.clk = clk;
646 cnd.old_rate = old_rate;
647 cnd.new_rate = new_rate;
649 list_for_each_entry(cn, &clk_notifier_list, node) {
650 if (cn->clk == clk) {
651 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
652 &cnd);
653 break;
657 return ret;
661 * __clk_recalc_rates
662 * @clk: first clk in the subtree
663 * @msg: notification type (see include/linux/clk.h)
665 * Walks the subtree of clks starting with clk and recalculates rates as it
666 * goes. Note that if a clk does not implement the .recalc_rate callback then
667 * it is assumed that the clock will take on the rate of it's parent.
669 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
670 * if necessary.
672 * Caller must hold prepare_lock.
674 static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
676 unsigned long old_rate;
677 unsigned long parent_rate = 0;
678 struct hlist_node *tmp;
679 struct clk *child;
681 old_rate = clk->rate;
683 if (clk->parent)
684 parent_rate = clk->parent->rate;
686 if (clk->ops->recalc_rate)
687 clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate);
688 else
689 clk->rate = parent_rate;
692 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
693 * & ABORT_RATE_CHANGE notifiers
695 if (clk->notifier_count && msg)
696 __clk_notify(clk, msg, old_rate, clk->rate);
698 hlist_for_each_entry(child, tmp, &clk->children, child_node)
699 __clk_recalc_rates(child, msg);
703 * __clk_speculate_rates
704 * @clk: first clk in the subtree
705 * @parent_rate: the "future" rate of clk's parent
707 * Walks the subtree of clks starting with clk, speculating rates as it
708 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
710 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
711 * pre-rate change notifications and returns early if no clks in the
712 * subtree have subscribed to the notifications. Note that if a clk does not
713 * implement the .recalc_rate callback then it is assumed that the clock will
714 * take on the rate of it's parent.
716 * Caller must hold prepare_lock.
718 static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
720 struct hlist_node *tmp;
721 struct clk *child;
722 unsigned long new_rate;
723 int ret = NOTIFY_DONE;
725 if (clk->ops->recalc_rate)
726 new_rate = clk->ops->recalc_rate(clk->hw, parent_rate);
727 else
728 new_rate = parent_rate;
730 /* abort the rate change if a driver returns NOTIFY_BAD */
731 if (clk->notifier_count)
732 ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
734 if (ret == NOTIFY_BAD)
735 goto out;
737 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
738 ret = __clk_speculate_rates(child, new_rate);
739 if (ret == NOTIFY_BAD)
740 break;
743 out:
744 return ret;
747 static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
749 struct clk *child;
750 struct hlist_node *tmp;
752 clk->new_rate = new_rate;
754 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
755 if (child->ops->recalc_rate)
756 child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
757 else
758 child->new_rate = new_rate;
759 clk_calc_subtree(child, child->new_rate);
764 * calculate the new rates returning the topmost clock that has to be
765 * changed.
767 static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
769 struct clk *top = clk;
770 unsigned long best_parent_rate = 0;
771 unsigned long new_rate;
773 /* sanity */
774 if (IS_ERR_OR_NULL(clk))
775 return NULL;
777 /* never propagate up to the parent */
778 if (!(clk->flags & CLK_SET_RATE_PARENT)) {
779 if (!clk->ops->round_rate) {
780 clk->new_rate = clk->rate;
781 return NULL;
785 /* need clk->parent from here on out */
786 if (!clk->parent) {
787 pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
788 return NULL;
791 if (!clk->ops->round_rate) {
792 top = clk_calc_new_rates(clk->parent, rate);
793 new_rate = clk->parent->new_rate;
795 goto out;
798 best_parent_rate = clk->parent->rate;
799 new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
801 if (best_parent_rate != clk->parent->rate) {
802 top = clk_calc_new_rates(clk->parent, best_parent_rate);
804 goto out;
807 out:
808 clk_calc_subtree(clk, new_rate);
810 return top;
814 * Notify about rate changes in a subtree. Always walk down the whole tree
815 * so that in case of an error we can walk down the whole tree again and
816 * abort the change.
818 static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
820 struct hlist_node *tmp;
821 struct clk *child, *fail_clk = NULL;
822 int ret = NOTIFY_DONE;
824 if (clk->rate == clk->new_rate)
825 return 0;
827 if (clk->notifier_count) {
828 ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
829 if (ret == NOTIFY_BAD)
830 fail_clk = clk;
833 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
834 clk = clk_propagate_rate_change(child, event);
835 if (clk)
836 fail_clk = clk;
839 return fail_clk;
843 * walk down a subtree and set the new rates notifying the rate
844 * change on the way
846 static void clk_change_rate(struct clk *clk)
848 struct clk *child;
849 unsigned long old_rate;
850 struct hlist_node *tmp;
852 old_rate = clk->rate;
854 if (clk->ops->set_rate)
855 clk->ops->set_rate(clk->hw, clk->new_rate, clk->parent->rate);
857 if (clk->ops->recalc_rate)
858 clk->rate = clk->ops->recalc_rate(clk->hw,
859 clk->parent->rate);
860 else
861 clk->rate = clk->parent->rate;
863 if (clk->notifier_count && old_rate != clk->rate)
864 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
866 hlist_for_each_entry(child, tmp, &clk->children, child_node)
867 clk_change_rate(child);
871 * clk_set_rate - specify a new rate for clk
872 * @clk: the clk whose rate is being changed
873 * @rate: the new rate for clk
875 * In the simplest case clk_set_rate will only adjust the rate of clk.
877 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
878 * propagate up to clk's parent; whether or not this happens depends on the
879 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
880 * after calling .round_rate then upstream parent propagation is ignored. If
881 * *parent_rate comes back with a new rate for clk's parent then we propagate
882 * up to clk's parent and set it's rate. Upward propagation will continue
883 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
884 * .round_rate stops requesting changes to clk's parent_rate.
886 * Rate changes are accomplished via tree traversal that also recalculates the
887 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
889 * Returns 0 on success, -EERROR otherwise.
891 int clk_set_rate(struct clk *clk, unsigned long rate)
893 struct clk *top, *fail_clk;
894 int ret = 0;
896 /* prevent racing with updates to the clock topology */
897 mutex_lock(&prepare_lock);
899 /* bail early if nothing to do */
900 if (rate == clk->rate)
901 goto out;
903 /* calculate new rates and get the topmost changed clock */
904 top = clk_calc_new_rates(clk, rate);
905 if (!top) {
906 ret = -EINVAL;
907 goto out;
910 /* notify that we are about to change rates */
911 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
912 if (fail_clk) {
913 pr_warn("%s: failed to set %s rate\n", __func__,
914 fail_clk->name);
915 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
916 ret = -EBUSY;
917 goto out;
920 /* change the rates */
921 clk_change_rate(top);
923 mutex_unlock(&prepare_lock);
925 return 0;
926 out:
927 mutex_unlock(&prepare_lock);
929 return ret;
931 EXPORT_SYMBOL_GPL(clk_set_rate);
934 * clk_get_parent - return the parent of a clk
935 * @clk: the clk whose parent gets returned
937 * Simply returns clk->parent. Returns NULL if clk is NULL.
939 struct clk *clk_get_parent(struct clk *clk)
941 struct clk *parent;
943 mutex_lock(&prepare_lock);
944 parent = __clk_get_parent(clk);
945 mutex_unlock(&prepare_lock);
947 return parent;
949 EXPORT_SYMBOL_GPL(clk_get_parent);
952 * .get_parent is mandatory for clocks with multiple possible parents. It is
953 * optional for single-parent clocks. Always call .get_parent if it is
954 * available and WARN if it is missing for multi-parent clocks.
956 * For single-parent clocks without .get_parent, first check to see if the
957 * .parents array exists, and if so use it to avoid an expensive tree
958 * traversal. If .parents does not exist then walk the tree with __clk_lookup.
960 static struct clk *__clk_init_parent(struct clk *clk)
962 struct clk *ret = NULL;
963 u8 index;
965 /* handle the trivial cases */
967 if (!clk->num_parents)
968 goto out;
970 if (clk->num_parents == 1) {
971 if (IS_ERR_OR_NULL(clk->parent))
972 ret = clk->parent = __clk_lookup(clk->parent_names[0]);
973 ret = clk->parent;
974 goto out;
977 if (!clk->ops->get_parent) {
978 WARN(!clk->ops->get_parent,
979 "%s: multi-parent clocks must implement .get_parent\n",
980 __func__);
981 goto out;
985 * Do our best to cache parent clocks in clk->parents. This prevents
986 * unnecessary and expensive calls to __clk_lookup. We don't set
987 * clk->parent here; that is done by the calling function
990 index = clk->ops->get_parent(clk->hw);
992 if (!clk->parents)
993 clk->parents =
994 kmalloc((sizeof(struct clk*) * clk->num_parents),
995 GFP_KERNEL);
997 if (!clk->parents)
998 ret = __clk_lookup(clk->parent_names[index]);
999 else if (!clk->parents[index])
1000 ret = clk->parents[index] =
1001 __clk_lookup(clk->parent_names[index]);
1002 else
1003 ret = clk->parents[index];
1005 out:
1006 return ret;
1009 void __clk_reparent(struct clk *clk, struct clk *new_parent)
1011 #ifdef CONFIG_COMMON_CLK_DEBUG
1012 struct dentry *d;
1013 struct dentry *new_parent_d;
1014 #endif
1016 if (!clk || !new_parent)
1017 return;
1019 hlist_del(&clk->child_node);
1021 if (new_parent)
1022 hlist_add_head(&clk->child_node, &new_parent->children);
1023 else
1024 hlist_add_head(&clk->child_node, &clk_orphan_list);
1026 #ifdef CONFIG_COMMON_CLK_DEBUG
1027 if (!inited)
1028 goto out;
1030 if (new_parent)
1031 new_parent_d = new_parent->dentry;
1032 else
1033 new_parent_d = orphandir;
1035 d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
1036 new_parent_d, clk->name);
1037 if (d)
1038 clk->dentry = d;
1039 else
1040 pr_debug("%s: failed to rename debugfs entry for %s\n",
1041 __func__, clk->name);
1042 out:
1043 #endif
1045 clk->parent = new_parent;
1047 __clk_recalc_rates(clk, POST_RATE_CHANGE);
1050 static int __clk_set_parent(struct clk *clk, struct clk *parent)
1052 struct clk *old_parent;
1053 unsigned long flags;
1054 int ret = -EINVAL;
1055 u8 i;
1057 old_parent = clk->parent;
1059 /* find index of new parent clock using cached parent ptrs */
1060 for (i = 0; i < clk->num_parents; i++)
1061 if (clk->parents[i] == parent)
1062 break;
1065 * find index of new parent clock using string name comparison
1066 * also try to cache the parent to avoid future calls to __clk_lookup
1068 if (i == clk->num_parents)
1069 for (i = 0; i < clk->num_parents; i++)
1070 if (!strcmp(clk->parent_names[i], parent->name)) {
1071 clk->parents[i] = __clk_lookup(parent->name);
1072 break;
1075 if (i == clk->num_parents) {
1076 pr_debug("%s: clock %s is not a possible parent of clock %s\n",
1077 __func__, parent->name, clk->name);
1078 goto out;
1081 /* migrate prepare and enable */
1082 if (clk->prepare_count)
1083 __clk_prepare(parent);
1085 /* FIXME replace with clk_is_enabled(clk) someday */
1086 spin_lock_irqsave(&enable_lock, flags);
1087 if (clk->enable_count)
1088 __clk_enable(parent);
1089 spin_unlock_irqrestore(&enable_lock, flags);
1091 /* change clock input source */
1092 ret = clk->ops->set_parent(clk->hw, i);
1094 /* clean up old prepare and enable */
1095 spin_lock_irqsave(&enable_lock, flags);
1096 if (clk->enable_count)
1097 __clk_disable(old_parent);
1098 spin_unlock_irqrestore(&enable_lock, flags);
1100 if (clk->prepare_count)
1101 __clk_unprepare(old_parent);
1103 out:
1104 return ret;
1108 * clk_set_parent - switch the parent of a mux clk
1109 * @clk: the mux clk whose input we are switching
1110 * @parent: the new input to clk
1112 * Re-parent clk to use parent as it's new input source. If clk has the
1113 * CLK_SET_PARENT_GATE flag set then clk must be gated for this
1114 * operation to succeed. After successfully changing clk's parent
1115 * clk_set_parent will update the clk topology, sysfs topology and
1116 * propagate rate recalculation via __clk_recalc_rates. Returns 0 on
1117 * success, -EERROR otherwise.
1119 int clk_set_parent(struct clk *clk, struct clk *parent)
1121 int ret = 0;
1123 if (!clk || !clk->ops)
1124 return -EINVAL;
1126 if (!clk->ops->set_parent)
1127 return -ENOSYS;
1129 /* prevent racing with updates to the clock topology */
1130 mutex_lock(&prepare_lock);
1132 if (clk->parent == parent)
1133 goto out;
1135 /* propagate PRE_RATE_CHANGE notifications */
1136 if (clk->notifier_count)
1137 ret = __clk_speculate_rates(clk, parent->rate);
1139 /* abort if a driver objects */
1140 if (ret == NOTIFY_STOP)
1141 goto out;
1143 /* only re-parent if the clock is not in use */
1144 if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count)
1145 ret = -EBUSY;
1146 else
1147 ret = __clk_set_parent(clk, parent);
1149 /* propagate ABORT_RATE_CHANGE if .set_parent failed */
1150 if (ret) {
1151 __clk_recalc_rates(clk, ABORT_RATE_CHANGE);
1152 goto out;
1155 /* propagate rate recalculation downstream */
1156 __clk_reparent(clk, parent);
1158 out:
1159 mutex_unlock(&prepare_lock);
1161 return ret;
1163 EXPORT_SYMBOL_GPL(clk_set_parent);
1166 * __clk_init - initialize the data structures in a struct clk
1167 * @dev: device initializing this clk, placeholder for now
1168 * @clk: clk being initialized
1170 * Initializes the lists in struct clk, queries the hardware for the
1171 * parent and rate and sets them both.
1173 * Any struct clk passed into __clk_init must have the following members
1174 * populated:
1175 * .name
1176 * .ops
1177 * .hw
1178 * .parent_names
1179 * .num_parents
1180 * .flags
1182 * Essentially, everything that would normally be passed into clk_register is
1183 * assumed to be initialized already in __clk_init. The other members may be
1184 * populated, but are optional.
1186 * __clk_init is only exposed via clk-private.h and is intended for use with
1187 * very large numbers of clocks that need to be statically initialized. It is
1188 * a layering violation to include clk-private.h from any code which implements
1189 * a clock's .ops; as such any statically initialized clock data MUST be in a
1190 * separate C file from the logic that implements it's operations. Returns 0
1191 * on success, otherwise an error code.
1193 int __clk_init(struct device *dev, struct clk *clk)
1195 int i, ret = 0;
1196 struct clk *orphan;
1197 struct hlist_node *tmp, *tmp2;
1199 if (!clk)
1200 return -EINVAL;
1202 mutex_lock(&prepare_lock);
1204 /* check to see if a clock with this name is already registered */
1205 if (__clk_lookup(clk->name)) {
1206 pr_debug("%s: clk %s already initialized\n",
1207 __func__, clk->name);
1208 ret = -EEXIST;
1209 goto out;
1212 /* check that clk_ops are sane. See Documentation/clk.txt */
1213 if (clk->ops->set_rate &&
1214 !(clk->ops->round_rate && clk->ops->recalc_rate)) {
1215 pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
1216 __func__, clk->name);
1217 ret = -EINVAL;
1218 goto out;
1221 if (clk->ops->set_parent && !clk->ops->get_parent) {
1222 pr_warning("%s: %s must implement .get_parent & .set_parent\n",
1223 __func__, clk->name);
1224 ret = -EINVAL;
1225 goto out;
1228 /* throw a WARN if any entries in parent_names are NULL */
1229 for (i = 0; i < clk->num_parents; i++)
1230 WARN(!clk->parent_names[i],
1231 "%s: invalid NULL in %s's .parent_names\n",
1232 __func__, clk->name);
1235 * Allocate an array of struct clk *'s to avoid unnecessary string
1236 * look-ups of clk's possible parents. This can fail for clocks passed
1237 * in to clk_init during early boot; thus any access to clk->parents[]
1238 * must always check for a NULL pointer and try to populate it if
1239 * necessary.
1241 * If clk->parents is not NULL we skip this entire block. This allows
1242 * for clock drivers to statically initialize clk->parents.
1244 if (clk->num_parents && !clk->parents) {
1245 clk->parents = kmalloc((sizeof(struct clk*) * clk->num_parents),
1246 GFP_KERNEL);
1248 * __clk_lookup returns NULL for parents that have not been
1249 * clk_init'd; thus any access to clk->parents[] must check
1250 * for a NULL pointer. We can always perform lazy lookups for
1251 * missing parents later on.
1253 if (clk->parents)
1254 for (i = 0; i < clk->num_parents; i++)
1255 clk->parents[i] =
1256 __clk_lookup(clk->parent_names[i]);
1259 clk->parent = __clk_init_parent(clk);
1262 * Populate clk->parent if parent has already been __clk_init'd. If
1263 * parent has not yet been __clk_init'd then place clk in the orphan
1264 * list. If clk has set the CLK_IS_ROOT flag then place it in the root
1265 * clk list.
1267 * Every time a new clk is clk_init'd then we walk the list of orphan
1268 * clocks and re-parent any that are children of the clock currently
1269 * being clk_init'd.
1271 if (clk->parent)
1272 hlist_add_head(&clk->child_node,
1273 &clk->parent->children);
1274 else if (clk->flags & CLK_IS_ROOT)
1275 hlist_add_head(&clk->child_node, &clk_root_list);
1276 else
1277 hlist_add_head(&clk->child_node, &clk_orphan_list);
1280 * Set clk's rate. The preferred method is to use .recalc_rate. For
1281 * simple clocks and lazy developers the default fallback is to use the
1282 * parent's rate. If a clock doesn't have a parent (or is orphaned)
1283 * then rate is set to zero.
1285 if (clk->ops->recalc_rate)
1286 clk->rate = clk->ops->recalc_rate(clk->hw,
1287 __clk_get_rate(clk->parent));
1288 else if (clk->parent)
1289 clk->rate = clk->parent->rate;
1290 else
1291 clk->rate = 0;
1294 * walk the list of orphan clocks and reparent any that are children of
1295 * this clock
1297 hlist_for_each_entry_safe(orphan, tmp, tmp2, &clk_orphan_list, child_node)
1298 for (i = 0; i < orphan->num_parents; i++)
1299 if (!strcmp(clk->name, orphan->parent_names[i])) {
1300 __clk_reparent(orphan, clk);
1301 break;
1305 * optional platform-specific magic
1307 * The .init callback is not used by any of the basic clock types, but
1308 * exists for weird hardware that must perform initialization magic.
1309 * Please consider other ways of solving initialization problems before
1310 * using this callback, as it's use is discouraged.
1312 if (clk->ops->init)
1313 clk->ops->init(clk->hw);
1315 clk_debug_register(clk);
1317 out:
1318 mutex_unlock(&prepare_lock);
1320 return ret;
1324 * clk_register - allocate a new clock, register it and return an opaque cookie
1325 * @dev: device that is registering this clock
1326 * @name: clock name
1327 * @ops: operations this clock supports
1328 * @hw: link to hardware-specific clock data
1329 * @parent_names: array of string names for all possible parents
1330 * @num_parents: number of possible parents
1331 * @flags: framework-level hints and quirks
1333 * clk_register is the primary interface for populating the clock tree with new
1334 * clock nodes. It returns a pointer to the newly allocated struct clk which
1335 * cannot be dereferenced by driver code but may be used in conjuction with the
1336 * rest of the clock API. In the event of an error clk_register will return an
1337 * error code; drivers must test for an error code after calling clk_register.
1339 struct clk *clk_register(struct device *dev, const char *name,
1340 const struct clk_ops *ops, struct clk_hw *hw,
1341 const char **parent_names, u8 num_parents, unsigned long flags)
1343 int i, ret;
1344 struct clk *clk;
1346 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
1347 if (!clk) {
1348 pr_err("%s: could not allocate clk\n", __func__);
1349 ret = -ENOMEM;
1350 goto fail_out;
1353 clk->name = name;
1354 clk->ops = ops;
1355 clk->hw = hw;
1356 clk->flags = flags;
1357 clk->num_parents = num_parents;
1358 hw->clk = clk;
1360 /* allocate local copy in case parent_names is __initdata */
1361 clk->parent_names = kzalloc((sizeof(char*) * num_parents),
1362 GFP_KERNEL);
1364 if (!clk->parent_names) {
1365 pr_err("%s: could not allocate clk->parent_names\n", __func__);
1366 ret = -ENOMEM;
1367 goto fail_parent_names;
1371 /* copy each string name in case parent_names is __initdata */
1372 for (i = 0; i < num_parents; i++) {
1373 clk->parent_names[i] = kstrdup(parent_names[i], GFP_KERNEL);
1374 if (!clk->parent_names[i]) {
1375 pr_err("%s: could not copy parent_names\n", __func__);
1376 ret = -ENOMEM;
1377 goto fail_parent_names_copy;
1381 ret = __clk_init(dev, clk);
1382 if (!ret)
1383 return clk;
1385 fail_parent_names_copy:
1386 while (--i >= 0)
1387 kfree(clk->parent_names[i]);
1388 kfree(clk->parent_names);
1389 fail_parent_names:
1390 kfree(clk);
1391 fail_out:
1392 return ERR_PTR(ret);
1394 EXPORT_SYMBOL_GPL(clk_register);
1396 /*** clk rate change notifiers ***/
1399 * clk_notifier_register - add a clk rate change notifier
1400 * @clk: struct clk * to watch
1401 * @nb: struct notifier_block * with callback info
1403 * Request notification when clk's rate changes. This uses an SRCU
1404 * notifier because we want it to block and notifier unregistrations are
1405 * uncommon. The callbacks associated with the notifier must not
1406 * re-enter into the clk framework by calling any top-level clk APIs;
1407 * this will cause a nested prepare_lock mutex.
1409 * Pre-change notifier callbacks will be passed the current, pre-change
1410 * rate of the clk via struct clk_notifier_data.old_rate. The new,
1411 * post-change rate of the clk is passed via struct
1412 * clk_notifier_data.new_rate.
1414 * Post-change notifiers will pass the now-current, post-change rate of
1415 * the clk in both struct clk_notifier_data.old_rate and struct
1416 * clk_notifier_data.new_rate.
1418 * Abort-change notifiers are effectively the opposite of pre-change
1419 * notifiers: the original pre-change clk rate is passed in via struct
1420 * clk_notifier_data.new_rate and the failed post-change rate is passed
1421 * in via struct clk_notifier_data.old_rate.
1423 * clk_notifier_register() must be called from non-atomic context.
1424 * Returns -EINVAL if called with null arguments, -ENOMEM upon
1425 * allocation failure; otherwise, passes along the return value of
1426 * srcu_notifier_chain_register().
1428 int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
1430 struct clk_notifier *cn;
1431 int ret = -ENOMEM;
1433 if (!clk || !nb)
1434 return -EINVAL;
1436 mutex_lock(&prepare_lock);
1438 /* search the list of notifiers for this clk */
1439 list_for_each_entry(cn, &clk_notifier_list, node)
1440 if (cn->clk == clk)
1441 break;
1443 /* if clk wasn't in the notifier list, allocate new clk_notifier */
1444 if (cn->clk != clk) {
1445 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
1446 if (!cn)
1447 goto out;
1449 cn->clk = clk;
1450 srcu_init_notifier_head(&cn->notifier_head);
1452 list_add(&cn->node, &clk_notifier_list);
1455 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
1457 clk->notifier_count++;
1459 out:
1460 mutex_unlock(&prepare_lock);
1462 return ret;
1464 EXPORT_SYMBOL_GPL(clk_notifier_register);
1467 * clk_notifier_unregister - remove a clk rate change notifier
1468 * @clk: struct clk *
1469 * @nb: struct notifier_block * with callback info
1471 * Request no further notification for changes to 'clk' and frees memory
1472 * allocated in clk_notifier_register.
1474 * Returns -EINVAL if called with null arguments; otherwise, passes
1475 * along the return value of srcu_notifier_chain_unregister().
1477 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
1479 struct clk_notifier *cn = NULL;
1480 int ret = -EINVAL;
1482 if (!clk || !nb)
1483 return -EINVAL;
1485 mutex_lock(&prepare_lock);
1487 list_for_each_entry(cn, &clk_notifier_list, node)
1488 if (cn->clk == clk)
1489 break;
1491 if (cn->clk == clk) {
1492 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
1494 clk->notifier_count--;
1496 /* XXX the notifier code should handle this better */
1497 if (!cn->notifier_head.head) {
1498 srcu_cleanup_notifier_head(&cn->notifier_head);
1499 kfree(cn);
1502 } else {
1503 ret = -ENOENT;
1506 mutex_unlock(&prepare_lock);
1508 return ret;
1510 EXPORT_SYMBOL_GPL(clk_notifier_unregister);