of/device: add helper to get cpu device node from logical cpu index
[linux-2.6.git] / drivers / media / v4l2-core / v4l2-clk.c
blobb67de8642b5ab3a675ef8e7ed62f35a361045f89
1 /*
2 * V4L2 clock service
4 * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
11 #include <linux/atomic.h>
12 #include <linux/device.h>
13 #include <linux/errno.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/slab.h>
18 #include <linux/string.h>
20 #include <media/v4l2-clk.h>
21 #include <media/v4l2-subdev.h>
23 static DEFINE_MUTEX(clk_lock);
24 static LIST_HEAD(clk_list);
26 static struct v4l2_clk *v4l2_clk_find(const char *dev_id, const char *id)
28 struct v4l2_clk *clk;
30 list_for_each_entry(clk, &clk_list, list) {
31 if (strcmp(dev_id, clk->dev_id))
32 continue;
34 if (!id || !clk->id || !strcmp(clk->id, id))
35 return clk;
38 return ERR_PTR(-ENODEV);
41 struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id)
43 struct v4l2_clk *clk;
45 mutex_lock(&clk_lock);
46 clk = v4l2_clk_find(dev_name(dev), id);
48 if (!IS_ERR(clk))
49 atomic_inc(&clk->use_count);
50 mutex_unlock(&clk_lock);
52 return clk;
54 EXPORT_SYMBOL(v4l2_clk_get);
56 void v4l2_clk_put(struct v4l2_clk *clk)
58 struct v4l2_clk *tmp;
60 if (IS_ERR(clk))
61 return;
63 mutex_lock(&clk_lock);
65 list_for_each_entry(tmp, &clk_list, list)
66 if (tmp == clk)
67 atomic_dec(&clk->use_count);
69 mutex_unlock(&clk_lock);
71 EXPORT_SYMBOL(v4l2_clk_put);
73 static int v4l2_clk_lock_driver(struct v4l2_clk *clk)
75 struct v4l2_clk *tmp;
76 int ret = -ENODEV;
78 mutex_lock(&clk_lock);
80 list_for_each_entry(tmp, &clk_list, list)
81 if (tmp == clk) {
82 ret = !try_module_get(clk->ops->owner);
83 if (ret)
84 ret = -EFAULT;
85 break;
88 mutex_unlock(&clk_lock);
90 return ret;
93 static void v4l2_clk_unlock_driver(struct v4l2_clk *clk)
95 module_put(clk->ops->owner);
98 int v4l2_clk_enable(struct v4l2_clk *clk)
100 int ret = v4l2_clk_lock_driver(clk);
102 if (ret < 0)
103 return ret;
105 mutex_lock(&clk->lock);
107 if (++clk->enable == 1 && clk->ops->enable) {
108 ret = clk->ops->enable(clk);
109 if (ret < 0)
110 clk->enable--;
113 mutex_unlock(&clk->lock);
115 return ret;
117 EXPORT_SYMBOL(v4l2_clk_enable);
120 * You might Oops if you try to disabled a disabled clock, because then the
121 * driver isn't locked and could have been unloaded by now, so, don't do that
123 void v4l2_clk_disable(struct v4l2_clk *clk)
125 int enable;
127 mutex_lock(&clk->lock);
129 enable = --clk->enable;
130 if (WARN(enable < 0, "Unbalanced %s() on %s:%s!\n", __func__,
131 clk->dev_id, clk->id))
132 clk->enable++;
133 else if (!enable && clk->ops->disable)
134 clk->ops->disable(clk);
136 mutex_unlock(&clk->lock);
138 v4l2_clk_unlock_driver(clk);
140 EXPORT_SYMBOL(v4l2_clk_disable);
142 unsigned long v4l2_clk_get_rate(struct v4l2_clk *clk)
144 int ret = v4l2_clk_lock_driver(clk);
146 if (ret < 0)
147 return ret;
149 mutex_lock(&clk->lock);
150 if (!clk->ops->get_rate)
151 ret = -ENOSYS;
152 else
153 ret = clk->ops->get_rate(clk);
154 mutex_unlock(&clk->lock);
156 v4l2_clk_unlock_driver(clk);
158 return ret;
160 EXPORT_SYMBOL(v4l2_clk_get_rate);
162 int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate)
164 int ret = v4l2_clk_lock_driver(clk);
166 if (ret < 0)
167 return ret;
169 mutex_lock(&clk->lock);
170 if (!clk->ops->set_rate)
171 ret = -ENOSYS;
172 else
173 ret = clk->ops->set_rate(clk, rate);
174 mutex_unlock(&clk->lock);
176 v4l2_clk_unlock_driver(clk);
178 return ret;
180 EXPORT_SYMBOL(v4l2_clk_set_rate);
182 struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
183 const char *dev_id,
184 const char *id, void *priv)
186 struct v4l2_clk *clk;
187 int ret;
189 if (!ops || !dev_id)
190 return ERR_PTR(-EINVAL);
192 clk = kzalloc(sizeof(struct v4l2_clk), GFP_KERNEL);
193 if (!clk)
194 return ERR_PTR(-ENOMEM);
196 clk->id = kstrdup(id, GFP_KERNEL);
197 clk->dev_id = kstrdup(dev_id, GFP_KERNEL);
198 if ((id && !clk->id) || !clk->dev_id) {
199 ret = -ENOMEM;
200 goto ealloc;
202 clk->ops = ops;
203 clk->priv = priv;
204 atomic_set(&clk->use_count, 0);
205 mutex_init(&clk->lock);
207 mutex_lock(&clk_lock);
208 if (!IS_ERR(v4l2_clk_find(dev_id, id))) {
209 mutex_unlock(&clk_lock);
210 ret = -EEXIST;
211 goto eexist;
213 list_add_tail(&clk->list, &clk_list);
214 mutex_unlock(&clk_lock);
216 return clk;
218 eexist:
219 ealloc:
220 kfree(clk->id);
221 kfree(clk->dev_id);
222 kfree(clk);
223 return ERR_PTR(ret);
225 EXPORT_SYMBOL(v4l2_clk_register);
227 void v4l2_clk_unregister(struct v4l2_clk *clk)
229 if (WARN(atomic_read(&clk->use_count),
230 "%s(): Refusing to unregister ref-counted %s:%s clock!\n",
231 __func__, clk->dev_id, clk->id))
232 return;
234 mutex_lock(&clk_lock);
235 list_del(&clk->list);
236 mutex_unlock(&clk_lock);
238 kfree(clk->id);
239 kfree(clk->dev_id);
240 kfree(clk);
242 EXPORT_SYMBOL(v4l2_clk_unregister);