1 /* The industrial I/O core in kernel channel mapping
3 * Copyright (c) 2011 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
10 #include <linux/export.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
14 #include <linux/iio/iio.h>
16 #include <linux/iio/machine.h>
17 #include <linux/iio/driver.h>
18 #include <linux/iio/consumer.h>
20 struct iio_map_internal
{
21 struct iio_dev
*indio_dev
;
26 static LIST_HEAD(iio_map_list
);
27 static DEFINE_MUTEX(iio_map_list_lock
);
29 int iio_map_array_register(struct iio_dev
*indio_dev
, struct iio_map
*maps
)
32 struct iio_map_internal
*mapi
;
37 mutex_lock(&iio_map_list_lock
);
38 while (maps
[i
].consumer_dev_name
!= NULL
) {
39 mapi
= kzalloc(sizeof(*mapi
), GFP_KERNEL
);
45 mapi
->indio_dev
= indio_dev
;
46 list_add(&mapi
->l
, &iio_map_list
);
50 mutex_unlock(&iio_map_list_lock
);
54 EXPORT_SYMBOL_GPL(iio_map_array_register
);
57 /* Assumes the exact same array (e.g. memory locations)
58 * used at unregistration as used at registration rather than
59 * more complex checking of contents.
61 int iio_map_array_unregister(struct iio_dev
*indio_dev
,
66 struct iio_map_internal
*mapi
;
71 mutex_lock(&iio_map_list_lock
);
72 while (maps
[i
].consumer_dev_name
!= NULL
) {
74 list_for_each_entry(mapi
, &iio_map_list
, l
)
75 if (&maps
[i
] == mapi
->map
) {
81 if (found_it
== false) {
88 mutex_unlock(&iio_map_list_lock
);
92 EXPORT_SYMBOL_GPL(iio_map_array_unregister
);
94 static const struct iio_chan_spec
95 *iio_chan_spec_from_name(const struct iio_dev
*indio_dev
,
99 const struct iio_chan_spec
*chan
= NULL
;
101 for (i
= 0; i
< indio_dev
->num_channels
; i
++)
102 if (indio_dev
->channels
[i
].datasheet_name
&&
103 strcmp(name
, indio_dev
->channels
[i
].datasheet_name
) == 0) {
104 chan
= &indio_dev
->channels
[i
];
111 struct iio_channel
*iio_st_channel_get(const char *name
,
112 const char *channel_name
)
114 struct iio_map_internal
*c_i
= NULL
, *c
= NULL
;
115 struct iio_channel
*channel
;
117 if (name
== NULL
&& channel_name
== NULL
)
118 return ERR_PTR(-ENODEV
);
120 /* first find matching entry the channel map */
121 mutex_lock(&iio_map_list_lock
);
122 list_for_each_entry(c_i
, &iio_map_list
, l
) {
123 if ((name
&& strcmp(name
, c_i
->map
->consumer_dev_name
) != 0) ||
125 strcmp(channel_name
, c_i
->map
->consumer_channel
) != 0))
128 get_device(&c
->indio_dev
->dev
);
131 mutex_unlock(&iio_map_list_lock
);
133 return ERR_PTR(-ENODEV
);
135 channel
= kmalloc(sizeof(*channel
), GFP_KERNEL
);
137 return ERR_PTR(-ENOMEM
);
139 channel
->indio_dev
= c
->indio_dev
;
141 if (c
->map
->adc_channel_label
)
143 iio_chan_spec_from_name(channel
->indio_dev
,
144 c
->map
->adc_channel_label
);
148 EXPORT_SYMBOL_GPL(iio_st_channel_get
);
150 void iio_st_channel_release(struct iio_channel
*channel
)
152 put_device(&channel
->indio_dev
->dev
);
155 EXPORT_SYMBOL_GPL(iio_st_channel_release
);
157 struct iio_channel
*iio_st_channel_get_all(const char *name
)
159 struct iio_channel
*chans
;
160 struct iio_map_internal
*c
= NULL
;
166 return ERR_PTR(-EINVAL
);
168 mutex_lock(&iio_map_list_lock
);
169 /* first count the matching maps */
170 list_for_each_entry(c
, &iio_map_list
, l
)
171 if (name
&& strcmp(name
, c
->map
->consumer_dev_name
) != 0)
181 /* NULL terminated array to save passing size */
182 chans
= kzalloc(sizeof(*chans
)*(nummaps
+ 1), GFP_KERNEL
);
188 /* for each map fill in the chans element */
189 list_for_each_entry(c
, &iio_map_list
, l
) {
190 if (name
&& strcmp(name
, c
->map
->consumer_dev_name
) != 0)
192 chans
[mapind
].indio_dev
= c
->indio_dev
;
193 chans
[mapind
].channel
=
194 iio_chan_spec_from_name(chans
[mapind
].indio_dev
,
195 c
->map
->adc_channel_label
);
196 if (chans
[mapind
].channel
== NULL
) {
198 put_device(&chans
[mapind
].indio_dev
->dev
);
199 goto error_free_chans
;
201 get_device(&chans
[mapind
].indio_dev
->dev
);
204 mutex_unlock(&iio_map_list_lock
);
207 goto error_free_chans
;
212 for (i
= 0; i
< nummaps
; i
++)
213 if (chans
[i
].indio_dev
)
214 put_device(&chans
[i
].indio_dev
->dev
);
217 mutex_unlock(&iio_map_list_lock
);
221 EXPORT_SYMBOL_GPL(iio_st_channel_get_all
);
223 void iio_st_channel_release_all(struct iio_channel
*channels
)
225 struct iio_channel
*chan
= &channels
[0];
227 while (chan
->indio_dev
) {
228 put_device(&chan
->indio_dev
->dev
);
233 EXPORT_SYMBOL_GPL(iio_st_channel_release_all
);
235 int iio_st_read_channel_raw(struct iio_channel
*chan
, int *val
)
239 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
240 if (chan
->indio_dev
->info
== NULL
) {
245 ret
= chan
->indio_dev
->info
->read_raw(chan
->indio_dev
, chan
->channel
,
248 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
252 EXPORT_SYMBOL_GPL(iio_st_read_channel_raw
);
254 int iio_st_read_channel_scale(struct iio_channel
*chan
, int *val
, int *val2
)
258 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
259 if (chan
->indio_dev
->info
== NULL
) {
264 ret
= chan
->indio_dev
->info
->read_raw(chan
->indio_dev
,
267 IIO_CHAN_INFO_SCALE
);
269 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
273 EXPORT_SYMBOL_GPL(iio_st_read_channel_scale
);
275 int iio_st_get_channel_type(struct iio_channel
*chan
,
276 enum iio_chan_type
*type
)
279 /* Need to verify underlying driver has not gone away */
281 mutex_lock(&chan
->indio_dev
->info_exist_lock
);
282 if (chan
->indio_dev
->info
== NULL
) {
287 *type
= chan
->channel
->type
;
289 mutex_unlock(&chan
->indio_dev
->info_exist_lock
);
293 EXPORT_SYMBOL_GPL(iio_st_get_channel_type
);