1 /* The industrial I/O core in kernel channel mapping
3 * Copyright (c) 2011 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
10 #include <linux/export.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
14 #include <linux/iio/iio.h>
16 #include <linux/iio/machine.h>
17 #include <linux/iio/driver.h>
18 #include <linux/iio/consumer.h>
20 struct iio_map_internal
{
21 struct iio_dev
*indio_dev
;
26 static LIST_HEAD(iio_map_list
);
27 static DEFINE_MUTEX(iio_map_list_lock
);
29 int iio_map_array_register(struct iio_dev
*indio_dev
, struct iio_map
*maps
)
32 struct iio_map_internal
*mapi
;
37 mutex_lock(&iio_map_list_lock
);
38 while (maps
[i
].consumer_dev_name
!= NULL
) {
39 mapi
= kzalloc(sizeof(*mapi
), GFP_KERNEL
);
45 mapi
->indio_dev
= indio_dev
;
46 list_add(&mapi
->l
, &iio_map_list
);
50 mutex_unlock(&iio_map_list_lock
);
54 EXPORT_SYMBOL_GPL(iio_map_array_register
);
58 * Remove all map entries associated with the given iio device
60 int iio_map_array_unregister(struct iio_dev
*indio_dev
)
63 struct iio_map_internal
*mapi
;
64 struct list_head
*pos
, *tmp
;
66 mutex_lock(&iio_map_list_lock
);
67 list_for_each_safe(pos
, tmp
, &iio_map_list
) {
68 mapi
= list_entry(pos
, struct iio_map_internal
, l
);
69 if (indio_dev
== mapi
->indio_dev
) {
75 mutex_unlock(&iio_map_list_lock
);
78 EXPORT_SYMBOL_GPL(iio_map_array_unregister
);
80 static const struct iio_chan_spec
81 *iio_chan_spec_from_name(const struct iio_dev
*indio_dev
, const char *name
)
84 const struct iio_chan_spec
*chan
= NULL
;
86 for (i
= 0; i
< indio_dev
->num_channels
; i
++)
87 if (indio_dev
->channels
[i
].datasheet_name
&&
88 strcmp(name
, indio_dev
->channels
[i
].datasheet_name
) == 0) {
89 chan
= &indio_dev
->channels
[i
];
96 static struct iio_channel
*iio_channel_get_sys(const char *name
,
97 const char *channel_name
)
99 struct iio_map_internal
*c_i
= NULL
, *c
= NULL
;
100 struct iio_channel
*channel
;
103 if (name
== NULL
&& channel_name
== NULL
)
104 return ERR_PTR(-ENODEV
);
106 /* first find matching entry the channel map */
107 mutex_lock(&iio_map_list_lock
);
108 list_for_each_entry(c_i
, &iio_map_list
, l
) {
109 if ((name
&& strcmp(name
, c_i
->map
->consumer_dev_name
) != 0) ||
111 strcmp(channel_name
, c_i
->map
->consumer_channel
) != 0))
114 iio_device_get(c
->indio_dev
);
117 mutex_unlock(&iio_map_list_lock
);
119 return ERR_PTR(-ENODEV
);
121 channel
= kzalloc(sizeof(*channel
), GFP_KERNEL
);
122 if (channel
== NULL
) {
127 channel
->indio_dev
= c
->indio_dev
;
129 if (c
->map
->adc_channel_label
) {
131 iio_chan_spec_from_name(channel
->indio_dev
,
132 c
->map
->adc_channel_label
);
134 if (channel
->channel
== NULL
) {
145 iio_device_put(c
->indio_dev
);
149 struct iio_channel
*iio_channel_get(struct device
*dev
,
150 const char *channel_name
)
152 const char *name
= dev
? dev_name(dev
) : NULL
;
154 return iio_channel_get_sys(name
, channel_name
);
156 EXPORT_SYMBOL_GPL(iio_channel_get
);
158 void iio_channel_release(struct iio_channel
*channel
)
160 iio_device_put(channel
->indio_dev
);
163 EXPORT_SYMBOL_GPL(iio_channel_release
);
165 struct iio_channel
*iio_channel_get_all(struct device
*dev
)
168 struct iio_channel
*chans
;
169 struct iio_map_internal
*c
= NULL
;
175 return ERR_PTR(-EINVAL
);
176 name
= dev_name(dev
);
178 mutex_lock(&iio_map_list_lock
);
179 /* first count the matching maps */
180 list_for_each_entry(c
, &iio_map_list
, l
)
181 if (name
&& strcmp(name
, c
->map
->consumer_dev_name
) != 0)
191 /* NULL terminated array to save passing size */
192 chans
= kzalloc(sizeof(*chans
)*(nummaps
+ 1), GFP_KERNEL
);
198 /* for each map fill in the chans element */
199 list_for_each_entry(c
, &iio_map_list
, l
) {
200 if (name
&& strcmp(name
, c
->map
->consumer_dev_name
) != 0)
202 chans
[mapind
].indio_dev
= c
->indio_dev
;
203 chans
[mapind
].data
= c
->map
->consumer_data
;
204 chans
[mapind
].channel
=
205 iio_chan_spec_from_name(chans
[mapind
].indio_dev
,
206 c
->map
->adc_channel_label
);
207 if (chans
[mapind
].channel
== NULL
) {
209 goto error_free_chans
;
211 iio_device_get(chans
[mapind
].indio_dev
);
216 goto error_free_chans
;
218 mutex_unlock(&iio_map_list_lock
);
223 for (i
= 0; i
< nummaps
; i
++)
224 iio_device_put(chans
[i
].indio_dev
);
227 mutex_unlock(&iio_map_list_lock
);
231 EXPORT_SYMBOL_GPL(iio_channel_get_all
);
233 void iio_channel_release_all(struct iio_channel
*channels
)
235 struct iio_channel
*chan
= &channels
[0];
237 while (chan
->indio_dev
) {
238 iio_device_put(chan
->indio_dev
);
243 EXPORT_SYMBOL_GPL(iio_channel_release_all
);
245 static int iio_channel_read(struct iio_channel
*chan
, int *val
, int *val2
,
246 enum iio_chan_info_enum info
)
253 return chan
->indio_dev
->info
->read_raw(chan
->indio_dev
, chan
->channel
,
257 int iio_read_channel_raw(struct iio_channel
*chan
, int *val
)
261 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
262 if (chan
->indio_dev
->info
== NULL
) {
267 ret
= iio_channel_read(chan
, val
, NULL
, IIO_CHAN_INFO_RAW
);
269 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
273 EXPORT_SYMBOL_GPL(iio_read_channel_raw
);
275 static int iio_convert_raw_to_processed_unlocked(struct iio_channel
*chan
,
276 int raw
, int *processed
, unsigned int scale
)
278 int scale_type
, scale_val
, scale_val2
, offset
;
282 ret
= iio_channel_read(chan
, &offset
, NULL
, IIO_CHAN_INFO_SCALE
);
286 scale_type
= iio_channel_read(chan
, &scale_val
, &scale_val2
,
287 IIO_CHAN_INFO_SCALE
);
291 switch (scale_type
) {
293 *processed
= raw64
* scale_val
;
295 case IIO_VAL_INT_PLUS_MICRO
:
297 *processed
= -raw64
* scale_val
;
299 *processed
= raw64
* scale_val
;
300 *processed
+= div_s64(raw64
* (s64
)scale_val2
* scale
,
303 case IIO_VAL_INT_PLUS_NANO
:
305 *processed
= -raw64
* scale_val
;
307 *processed
= raw64
* scale_val
;
308 *processed
+= div_s64(raw64
* (s64
)scale_val2
* scale
,
311 case IIO_VAL_FRACTIONAL
:
312 *processed
= div_s64(raw64
* (s64
)scale_val
* scale
,
315 case IIO_VAL_FRACTIONAL_LOG2
:
316 *processed
= (raw64
* (s64
)scale_val
* scale
) >> scale_val2
;
325 int iio_convert_raw_to_processed(struct iio_channel
*chan
, int raw
,
326 int *processed
, unsigned int scale
)
330 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
331 if (chan
->indio_dev
->info
== NULL
) {
336 ret
= iio_convert_raw_to_processed_unlocked(chan
, raw
, processed
,
339 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
343 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed
);
345 int iio_read_channel_processed(struct iio_channel
*chan
, int *val
)
349 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
350 if (chan
->indio_dev
->info
== NULL
) {
355 if (iio_channel_has_info(chan
->channel
, IIO_CHAN_INFO_PROCESSED
)) {
356 ret
= iio_channel_read(chan
, val
, NULL
,
357 IIO_CHAN_INFO_PROCESSED
);
359 ret
= iio_channel_read(chan
, val
, NULL
, IIO_CHAN_INFO_RAW
);
362 ret
= iio_convert_raw_to_processed_unlocked(chan
, *val
, val
, 1);
366 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
370 EXPORT_SYMBOL_GPL(iio_read_channel_processed
);
372 int iio_read_channel_scale(struct iio_channel
*chan
, int *val
, int *val2
)
376 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
377 if (chan
->indio_dev
->info
== NULL
) {
382 ret
= iio_channel_read(chan
, val
, val2
, IIO_CHAN_INFO_SCALE
);
384 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
388 EXPORT_SYMBOL_GPL(iio_read_channel_scale
);
390 int iio_get_channel_type(struct iio_channel
*chan
, enum iio_chan_type
*type
)
393 /* Need to verify underlying driver has not gone away */
395 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
396 if (chan
->indio_dev
->info
== NULL
) {
401 *type
= chan
->channel
->type
;
403 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
407 EXPORT_SYMBOL_GPL(iio_get_channel_type
);