Merge branch 'drm-nouveau-fixes-3.9' of git://anongit.freedesktop.org/git/nouveau...
[linux-2.6/libata-dev.git] / drivers / iio / inkern.c
blobb289915b8469774664ccd181cc0fa9883dda35d8
1 /* The industrial I/O core in kernel channel mapping
3 * Copyright (c) 2011 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9 #include <linux/err.h>
10 #include <linux/export.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
14 #include <linux/iio/iio.h>
15 #include "iio_core.h"
16 #include <linux/iio/machine.h>
17 #include <linux/iio/driver.h>
18 #include <linux/iio/consumer.h>
20 struct iio_map_internal {
21 struct iio_dev *indio_dev;
22 struct iio_map *map;
23 struct list_head l;
26 static LIST_HEAD(iio_map_list);
27 static DEFINE_MUTEX(iio_map_list_lock);
29 int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
31 int i = 0, ret = 0;
32 struct iio_map_internal *mapi;
34 if (maps == NULL)
35 return 0;
37 mutex_lock(&iio_map_list_lock);
38 while (maps[i].consumer_dev_name != NULL) {
39 mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
40 if (mapi == NULL) {
41 ret = -ENOMEM;
42 goto error_ret;
44 mapi->map = &maps[i];
45 mapi->indio_dev = indio_dev;
46 list_add(&mapi->l, &iio_map_list);
47 i++;
49 error_ret:
50 mutex_unlock(&iio_map_list_lock);
52 return ret;
54 EXPORT_SYMBOL_GPL(iio_map_array_register);
58 * Remove all map entries associated with the given iio device
60 int iio_map_array_unregister(struct iio_dev *indio_dev)
62 int ret = -ENODEV;
63 struct iio_map_internal *mapi;
64 struct list_head *pos, *tmp;
66 mutex_lock(&iio_map_list_lock);
67 list_for_each_safe(pos, tmp, &iio_map_list) {
68 mapi = list_entry(pos, struct iio_map_internal, l);
69 if (indio_dev == mapi->indio_dev) {
70 list_del(&mapi->l);
71 kfree(mapi);
72 ret = 0;
75 mutex_unlock(&iio_map_list_lock);
76 return ret;
78 EXPORT_SYMBOL_GPL(iio_map_array_unregister);
80 static const struct iio_chan_spec
81 *iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
83 int i;
84 const struct iio_chan_spec *chan = NULL;
86 for (i = 0; i < indio_dev->num_channels; i++)
87 if (indio_dev->channels[i].datasheet_name &&
88 strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
89 chan = &indio_dev->channels[i];
90 break;
92 return chan;
96 static struct iio_channel *iio_channel_get_sys(const char *name,
97 const char *channel_name)
99 struct iio_map_internal *c_i = NULL, *c = NULL;
100 struct iio_channel *channel;
101 int err;
103 if (name == NULL && channel_name == NULL)
104 return ERR_PTR(-ENODEV);
106 /* first find matching entry the channel map */
107 mutex_lock(&iio_map_list_lock);
108 list_for_each_entry(c_i, &iio_map_list, l) {
109 if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
110 (channel_name &&
111 strcmp(channel_name, c_i->map->consumer_channel) != 0))
112 continue;
113 c = c_i;
114 iio_device_get(c->indio_dev);
115 break;
117 mutex_unlock(&iio_map_list_lock);
118 if (c == NULL)
119 return ERR_PTR(-ENODEV);
121 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
122 if (channel == NULL) {
123 err = -ENOMEM;
124 goto error_no_mem;
127 channel->indio_dev = c->indio_dev;
129 if (c->map->adc_channel_label) {
130 channel->channel =
131 iio_chan_spec_from_name(channel->indio_dev,
132 c->map->adc_channel_label);
134 if (channel->channel == NULL) {
135 err = -EINVAL;
136 goto error_no_chan;
140 return channel;
142 error_no_chan:
143 kfree(channel);
144 error_no_mem:
145 iio_device_put(c->indio_dev);
146 return ERR_PTR(err);
149 struct iio_channel *iio_channel_get(struct device *dev,
150 const char *channel_name)
152 const char *name = dev ? dev_name(dev) : NULL;
154 return iio_channel_get_sys(name, channel_name);
156 EXPORT_SYMBOL_GPL(iio_channel_get);
158 void iio_channel_release(struct iio_channel *channel)
160 iio_device_put(channel->indio_dev);
161 kfree(channel);
163 EXPORT_SYMBOL_GPL(iio_channel_release);
165 struct iio_channel *iio_channel_get_all(struct device *dev)
167 const char *name;
168 struct iio_channel *chans;
169 struct iio_map_internal *c = NULL;
170 int nummaps = 0;
171 int mapind = 0;
172 int i, ret;
174 if (dev == NULL)
175 return ERR_PTR(-EINVAL);
176 name = dev_name(dev);
178 mutex_lock(&iio_map_list_lock);
179 /* first count the matching maps */
180 list_for_each_entry(c, &iio_map_list, l)
181 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
182 continue;
183 else
184 nummaps++;
186 if (nummaps == 0) {
187 ret = -ENODEV;
188 goto error_ret;
191 /* NULL terminated array to save passing size */
192 chans = kzalloc(sizeof(*chans)*(nummaps + 1), GFP_KERNEL);
193 if (chans == NULL) {
194 ret = -ENOMEM;
195 goto error_ret;
198 /* for each map fill in the chans element */
199 list_for_each_entry(c, &iio_map_list, l) {
200 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
201 continue;
202 chans[mapind].indio_dev = c->indio_dev;
203 chans[mapind].data = c->map->consumer_data;
204 chans[mapind].channel =
205 iio_chan_spec_from_name(chans[mapind].indio_dev,
206 c->map->adc_channel_label);
207 if (chans[mapind].channel == NULL) {
208 ret = -EINVAL;
209 goto error_free_chans;
211 iio_device_get(chans[mapind].indio_dev);
212 mapind++;
214 if (mapind == 0) {
215 ret = -ENODEV;
216 goto error_free_chans;
218 mutex_unlock(&iio_map_list_lock);
220 return chans;
222 error_free_chans:
223 for (i = 0; i < nummaps; i++)
224 iio_device_put(chans[i].indio_dev);
225 kfree(chans);
226 error_ret:
227 mutex_unlock(&iio_map_list_lock);
229 return ERR_PTR(ret);
231 EXPORT_SYMBOL_GPL(iio_channel_get_all);
233 void iio_channel_release_all(struct iio_channel *channels)
235 struct iio_channel *chan = &channels[0];
237 while (chan->indio_dev) {
238 iio_device_put(chan->indio_dev);
239 chan++;
241 kfree(channels);
243 EXPORT_SYMBOL_GPL(iio_channel_release_all);
245 static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
246 enum iio_chan_info_enum info)
248 int unused;
250 if (val2 == NULL)
251 val2 = &unused;
253 return chan->indio_dev->info->read_raw(chan->indio_dev, chan->channel,
254 val, val2, info);
257 int iio_read_channel_raw(struct iio_channel *chan, int *val)
259 int ret;
261 mutex_lock(&chan->indio_dev->info_exist_lock);
262 if (chan->indio_dev->info == NULL) {
263 ret = -ENODEV;
264 goto err_unlock;
267 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
268 err_unlock:
269 mutex_unlock(&chan->indio_dev->info_exist_lock);
271 return ret;
273 EXPORT_SYMBOL_GPL(iio_read_channel_raw);
275 static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
276 int raw, int *processed, unsigned int scale)
278 int scale_type, scale_val, scale_val2, offset;
279 s64 raw64 = raw;
280 int ret;
282 ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_SCALE);
283 if (ret == 0)
284 raw64 += offset;
286 scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
287 IIO_CHAN_INFO_SCALE);
288 if (scale_type < 0)
289 return scale_type;
291 switch (scale_type) {
292 case IIO_VAL_INT:
293 *processed = raw64 * scale_val;
294 break;
295 case IIO_VAL_INT_PLUS_MICRO:
296 if (scale_val2 < 0)
297 *processed = -raw64 * scale_val;
298 else
299 *processed = raw64 * scale_val;
300 *processed += div_s64(raw64 * (s64)scale_val2 * scale,
301 1000000LL);
302 break;
303 case IIO_VAL_INT_PLUS_NANO:
304 if (scale_val2 < 0)
305 *processed = -raw64 * scale_val;
306 else
307 *processed = raw64 * scale_val;
308 *processed += div_s64(raw64 * (s64)scale_val2 * scale,
309 1000000000LL);
310 break;
311 case IIO_VAL_FRACTIONAL:
312 *processed = div_s64(raw64 * (s64)scale_val * scale,
313 scale_val2);
314 break;
315 case IIO_VAL_FRACTIONAL_LOG2:
316 *processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
317 break;
318 default:
319 return -EINVAL;
322 return 0;
325 int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
326 int *processed, unsigned int scale)
328 int ret;
330 mutex_lock(&chan->indio_dev->info_exist_lock);
331 if (chan->indio_dev->info == NULL) {
332 ret = -ENODEV;
333 goto err_unlock;
336 ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
337 scale);
338 err_unlock:
339 mutex_unlock(&chan->indio_dev->info_exist_lock);
341 return ret;
343 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
345 int iio_read_channel_processed(struct iio_channel *chan, int *val)
347 int ret;
349 mutex_lock(&chan->indio_dev->info_exist_lock);
350 if (chan->indio_dev->info == NULL) {
351 ret = -ENODEV;
352 goto err_unlock;
355 if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
356 ret = iio_channel_read(chan, val, NULL,
357 IIO_CHAN_INFO_PROCESSED);
358 } else {
359 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
360 if (ret < 0)
361 goto err_unlock;
362 ret = iio_convert_raw_to_processed_unlocked(chan, *val, val, 1);
365 err_unlock:
366 mutex_unlock(&chan->indio_dev->info_exist_lock);
368 return ret;
370 EXPORT_SYMBOL_GPL(iio_read_channel_processed);
372 int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
374 int ret;
376 mutex_lock(&chan->indio_dev->info_exist_lock);
377 if (chan->indio_dev->info == NULL) {
378 ret = -ENODEV;
379 goto err_unlock;
382 ret = iio_channel_read(chan, val, val2, IIO_CHAN_INFO_SCALE);
383 err_unlock:
384 mutex_unlock(&chan->indio_dev->info_exist_lock);
386 return ret;
388 EXPORT_SYMBOL_GPL(iio_read_channel_scale);
390 int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
392 int ret = 0;
393 /* Need to verify underlying driver has not gone away */
395 mutex_lock(&chan->indio_dev->info_exist_lock);
396 if (chan->indio_dev->info == NULL) {
397 ret = -ENODEV;
398 goto err_unlock;
401 *type = chan->channel->type;
402 err_unlock:
403 mutex_unlock(&chan->indio_dev->info_exist_lock);
405 return ret;
407 EXPORT_SYMBOL_GPL(iio_get_channel_type);