1 /* Industrial I/O event handling
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Based on elements of hwmon and input subsystems.
12 #include <linux/anon_inodes.h>
13 #include <linux/device.h>
15 #include <linux/kernel.h>
16 #include <linux/kfifo.h>
17 #include <linux/module.h>
18 #include <linux/poll.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/uaccess.h>
22 #include <linux/wait.h>
23 #include <linux/iio/iio.h>
25 #include <linux/iio/sysfs.h>
26 #include <linux/iio/events.h>
29 * struct iio_event_interface - chrdev interface for an event line
30 * @wait: wait queue to allow blocking reads of events
31 * @det_events: list of detected events
32 * @dev_attr_list: list of event interface sysfs attribute
33 * @flags: file operations related flags including busy flag.
34 * @group: event interface sysfs attribute group
36 struct iio_event_interface
{
37 wait_queue_head_t wait
;
38 DECLARE_KFIFO(det_events
, struct iio_event_data
, 16);
40 struct list_head dev_attr_list
;
42 struct attribute_group group
;
45 int iio_push_event(struct iio_dev
*indio_dev
, u64 ev_code
, s64 timestamp
)
47 struct iio_event_interface
*ev_int
= indio_dev
->event_interface
;
48 struct iio_event_data ev
;
52 /* Does anyone care? */
53 spin_lock_irqsave(&ev_int
->wait
.lock
, flags
);
54 if (test_bit(IIO_BUSY_BIT_POS
, &ev_int
->flags
)) {
57 ev
.timestamp
= timestamp
;
59 copied
= kfifo_put(&ev_int
->det_events
, ev
);
61 wake_up_locked_poll(&ev_int
->wait
, POLLIN
);
63 spin_unlock_irqrestore(&ev_int
->wait
.lock
, flags
);
67 EXPORT_SYMBOL(iio_push_event
);
70 * iio_event_poll() - poll the event queue to find out if it has data
72 static unsigned int iio_event_poll(struct file
*filep
,
73 struct poll_table_struct
*wait
)
75 struct iio_dev
*indio_dev
= filep
->private_data
;
76 struct iio_event_interface
*ev_int
= indio_dev
->event_interface
;
77 unsigned int events
= 0;
82 poll_wait(filep
, &ev_int
->wait
, wait
);
84 spin_lock_irq(&ev_int
->wait
.lock
);
85 if (!kfifo_is_empty(&ev_int
->det_events
))
86 events
= POLLIN
| POLLRDNORM
;
87 spin_unlock_irq(&ev_int
->wait
.lock
);
92 static ssize_t
iio_event_chrdev_read(struct file
*filep
,
97 struct iio_dev
*indio_dev
= filep
->private_data
;
98 struct iio_event_interface
*ev_int
= indio_dev
->event_interface
;
102 if (!indio_dev
->info
)
105 if (count
< sizeof(struct iio_event_data
))
108 spin_lock_irq(&ev_int
->wait
.lock
);
109 if (kfifo_is_empty(&ev_int
->det_events
)) {
110 if (filep
->f_flags
& O_NONBLOCK
) {
114 /* Blocking on device; waiting for something to be there */
115 ret
= wait_event_interruptible_locked_irq(ev_int
->wait
,
116 !kfifo_is_empty(&ev_int
->det_events
) ||
117 indio_dev
->info
== NULL
);
120 if (indio_dev
->info
== NULL
) {
124 /* Single access device so no one else can get the data */
127 ret
= kfifo_to_user(&ev_int
->det_events
, buf
, count
, &copied
);
130 spin_unlock_irq(&ev_int
->wait
.lock
);
132 return ret
? ret
: copied
;
135 static int iio_event_chrdev_release(struct inode
*inode
, struct file
*filep
)
137 struct iio_dev
*indio_dev
= filep
->private_data
;
138 struct iio_event_interface
*ev_int
= indio_dev
->event_interface
;
140 spin_lock_irq(&ev_int
->wait
.lock
);
141 __clear_bit(IIO_BUSY_BIT_POS
, &ev_int
->flags
);
143 * In order to maintain a clean state for reopening,
144 * clear out any awaiting events. The mask will prevent
145 * any new __iio_push_event calls running.
147 kfifo_reset_out(&ev_int
->det_events
);
148 spin_unlock_irq(&ev_int
->wait
.lock
);
150 iio_device_put(indio_dev
);
155 static const struct file_operations iio_event_chrdev_fileops
= {
156 .read
= iio_event_chrdev_read
,
157 .poll
= iio_event_poll
,
158 .release
= iio_event_chrdev_release
,
159 .owner
= THIS_MODULE
,
160 .llseek
= noop_llseek
,
163 int iio_event_getfd(struct iio_dev
*indio_dev
)
165 struct iio_event_interface
*ev_int
= indio_dev
->event_interface
;
171 spin_lock_irq(&ev_int
->wait
.lock
);
172 if (__test_and_set_bit(IIO_BUSY_BIT_POS
, &ev_int
->flags
)) {
173 spin_unlock_irq(&ev_int
->wait
.lock
);
176 spin_unlock_irq(&ev_int
->wait
.lock
);
177 iio_device_get(indio_dev
);
179 fd
= anon_inode_getfd("iio:event", &iio_event_chrdev_fileops
,
180 indio_dev
, O_RDONLY
| O_CLOEXEC
);
182 spin_lock_irq(&ev_int
->wait
.lock
);
183 __clear_bit(IIO_BUSY_BIT_POS
, &ev_int
->flags
);
184 spin_unlock_irq(&ev_int
->wait
.lock
);
185 iio_device_put(indio_dev
);
190 static const char * const iio_ev_type_text
[] = {
191 [IIO_EV_TYPE_THRESH
] = "thresh",
192 [IIO_EV_TYPE_MAG
] = "mag",
193 [IIO_EV_TYPE_ROC
] = "roc",
194 [IIO_EV_TYPE_THRESH_ADAPTIVE
] = "thresh_adaptive",
195 [IIO_EV_TYPE_MAG_ADAPTIVE
] = "mag_adaptive",
198 static const char * const iio_ev_dir_text
[] = {
199 [IIO_EV_DIR_EITHER
] = "either",
200 [IIO_EV_DIR_RISING
] = "rising",
201 [IIO_EV_DIR_FALLING
] = "falling"
204 static const char * const iio_ev_info_text
[] = {
205 [IIO_EV_INFO_ENABLE
] = "en",
206 [IIO_EV_INFO_VALUE
] = "value",
207 [IIO_EV_INFO_HYSTERESIS
] = "hysteresis",
210 static enum iio_event_direction
iio_ev_attr_dir(struct iio_dev_attr
*attr
)
212 return attr
->c
->event_spec
[attr
->address
& 0xffff].dir
;
215 static enum iio_event_type
iio_ev_attr_type(struct iio_dev_attr
*attr
)
217 return attr
->c
->event_spec
[attr
->address
& 0xffff].type
;
220 static enum iio_event_info
iio_ev_attr_info(struct iio_dev_attr
*attr
)
222 return (attr
->address
>> 16) & 0xffff;
225 static ssize_t
iio_ev_state_store(struct device
*dev
,
226 struct device_attribute
*attr
,
230 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
231 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
235 ret
= strtobool(buf
, &val
);
239 if (indio_dev
->info
->write_event_config
)
240 ret
= indio_dev
->info
->write_event_config(indio_dev
,
241 this_attr
->address
, val
);
243 ret
= indio_dev
->info
->write_event_config_new(indio_dev
,
244 this_attr
->c
, iio_ev_attr_type(this_attr
),
245 iio_ev_attr_dir(this_attr
), val
);
247 return (ret
< 0) ? ret
: len
;
250 static ssize_t
iio_ev_state_show(struct device
*dev
,
251 struct device_attribute
*attr
,
254 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
255 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
258 if (indio_dev
->info
->read_event_config
)
259 val
= indio_dev
->info
->read_event_config(indio_dev
,
262 val
= indio_dev
->info
->read_event_config_new(indio_dev
,
263 this_attr
->c
, iio_ev_attr_type(this_attr
),
264 iio_ev_attr_dir(this_attr
));
268 return sprintf(buf
, "%d\n", val
);
271 static ssize_t
iio_ev_value_show(struct device
*dev
,
272 struct device_attribute
*attr
,
275 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
276 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
280 if (indio_dev
->info
->read_event_value
) {
281 ret
= indio_dev
->info
->read_event_value(indio_dev
,
282 this_attr
->address
, &val
);
285 return sprintf(buf
, "%d\n", val
);
287 ret
= indio_dev
->info
->read_event_value_new(indio_dev
,
288 this_attr
->c
, iio_ev_attr_type(this_attr
),
289 iio_ev_attr_dir(this_attr
), iio_ev_attr_info(this_attr
),
293 return iio_format_value(buf
, ret
, val
, val2
);
297 static ssize_t
iio_ev_value_store(struct device
*dev
,
298 struct device_attribute
*attr
,
302 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
303 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
307 if (!indio_dev
->info
->write_event_value
&&
308 !indio_dev
->info
->write_event_value_new
)
311 if (indio_dev
->info
->write_event_value
) {
312 ret
= kstrtoint(buf
, 10, &val
);
315 ret
= indio_dev
->info
->write_event_value(indio_dev
,
316 this_attr
->address
, val
);
318 ret
= iio_str_to_fixpoint(buf
, 100000, &val
, &val2
);
321 ret
= indio_dev
->info
->write_event_value_new(indio_dev
,
322 this_attr
->c
, iio_ev_attr_type(this_attr
),
323 iio_ev_attr_dir(this_attr
), iio_ev_attr_info(this_attr
),
332 static int iio_device_add_event(struct iio_dev
*indio_dev
,
333 const struct iio_chan_spec
*chan
, unsigned int spec_index
,
334 enum iio_event_type type
, enum iio_event_direction dir
,
335 enum iio_shared_by shared_by
, const unsigned long *mask
)
337 ssize_t (*show
)(struct device
*, struct device_attribute
*, char *);
338 ssize_t (*store
)(struct device
*, struct device_attribute
*,
339 const char *, size_t);
340 unsigned int attrcount
= 0;
345 for_each_set_bit(i
, mask
, sizeof(*mask
)) {
346 postfix
= kasprintf(GFP_KERNEL
, "%s_%s_%s",
347 iio_ev_type_text
[type
], iio_ev_dir_text
[dir
],
348 iio_ev_info_text
[i
]);
352 if (i
== IIO_EV_INFO_ENABLE
) {
353 show
= iio_ev_state_show
;
354 store
= iio_ev_state_store
;
356 show
= iio_ev_value_show
;
357 store
= iio_ev_value_store
;
360 ret
= __iio_add_chan_devattr(postfix
, chan
, show
, store
,
361 (i
<< 16) | spec_index
, shared_by
, &indio_dev
->dev
,
362 &indio_dev
->event_interface
->dev_attr_list
);
374 static int iio_device_add_event_sysfs_new(struct iio_dev
*indio_dev
,
375 struct iio_chan_spec
const *chan
)
377 int ret
= 0, i
, attrcount
= 0;
378 enum iio_event_direction dir
;
379 enum iio_event_type type
;
381 for (i
= 0; i
< chan
->num_event_specs
; i
++) {
382 type
= chan
->event_spec
[i
].type
;
383 dir
= chan
->event_spec
[i
].dir
;
385 ret
= iio_device_add_event(indio_dev
, chan
, i
, type
, dir
,
386 IIO_SEPARATE
, &chan
->event_spec
[i
].mask_separate
);
391 ret
= iio_device_add_event(indio_dev
, chan
, i
, type
, dir
,
393 &chan
->event_spec
[i
].mask_shared_by_type
);
398 ret
= iio_device_add_event(indio_dev
, chan
, i
, type
, dir
,
400 &chan
->event_spec
[i
].mask_shared_by_dir
);
405 ret
= iio_device_add_event(indio_dev
, chan
, i
, type
, dir
,
407 &chan
->event_spec
[i
].mask_shared_by_all
);
417 static int iio_device_add_event_sysfs_old(struct iio_dev
*indio_dev
,
418 struct iio_chan_spec
const *chan
)
420 int ret
= 0, i
, attrcount
= 0;
423 if (!chan
->event_mask
)
426 for_each_set_bit(i
, &chan
->event_mask
, sizeof(chan
->event_mask
)*8) {
427 postfix
= kasprintf(GFP_KERNEL
, "%s_%s_en",
428 iio_ev_type_text
[i
/IIO_EV_DIR_MAX
],
429 iio_ev_dir_text
[i
%IIO_EV_DIR_MAX
]);
430 if (postfix
== NULL
) {
435 mask
= IIO_MOD_EVENT_CODE(chan
->type
, 0, chan
->channel2
,
438 else if (chan
->differential
)
439 mask
= IIO_EVENT_CODE(chan
->type
,
447 mask
= IIO_UNMOD_EVENT_CODE(chan
->type
,
452 ret
= __iio_add_chan_devattr(postfix
,
459 &indio_dev
->event_interface
->
465 postfix
= kasprintf(GFP_KERNEL
, "%s_%s_value",
466 iio_ev_type_text
[i
/IIO_EV_DIR_MAX
],
467 iio_ev_dir_text
[i
%IIO_EV_DIR_MAX
]);
468 if (postfix
== NULL
) {
472 ret
= __iio_add_chan_devattr(postfix
, chan
,
478 &indio_dev
->event_interface
->
491 static int iio_device_add_event_sysfs(struct iio_dev
*indio_dev
,
492 struct iio_chan_spec
const *chan
)
494 if (chan
->event_mask
)
495 return iio_device_add_event_sysfs_old(indio_dev
, chan
);
497 return iio_device_add_event_sysfs_new(indio_dev
, chan
);
500 static inline int __iio_add_event_config_attrs(struct iio_dev
*indio_dev
)
502 int j
, ret
, attrcount
= 0;
504 /* Dynically created from the channels array */
505 for (j
= 0; j
< indio_dev
->num_channels
; j
++) {
506 ret
= iio_device_add_event_sysfs(indio_dev
,
507 &indio_dev
->channels
[j
]);
515 static bool iio_check_for_dynamic_events(struct iio_dev
*indio_dev
)
519 for (j
= 0; j
< indio_dev
->num_channels
; j
++) {
520 if (indio_dev
->channels
[j
].event_mask
!= 0)
522 if (indio_dev
->channels
[j
].num_event_specs
!= 0)
528 static void iio_setup_ev_int(struct iio_event_interface
*ev_int
)
530 INIT_KFIFO(ev_int
->det_events
);
531 init_waitqueue_head(&ev_int
->wait
);
534 static const char *iio_event_group_name
= "events";
535 int iio_device_register_eventset(struct iio_dev
*indio_dev
)
537 struct iio_dev_attr
*p
;
538 int ret
= 0, attrcount_orig
= 0, attrcount
, attrn
;
539 struct attribute
**attr
;
541 if (!(indio_dev
->info
->event_attrs
||
542 iio_check_for_dynamic_events(indio_dev
)))
545 indio_dev
->event_interface
=
546 kzalloc(sizeof(struct iio_event_interface
), GFP_KERNEL
);
547 if (indio_dev
->event_interface
== NULL
) {
552 INIT_LIST_HEAD(&indio_dev
->event_interface
->dev_attr_list
);
554 iio_setup_ev_int(indio_dev
->event_interface
);
555 if (indio_dev
->info
->event_attrs
!= NULL
) {
556 attr
= indio_dev
->info
->event_attrs
->attrs
;
557 while (*attr
++ != NULL
)
560 attrcount
= attrcount_orig
;
561 if (indio_dev
->channels
) {
562 ret
= __iio_add_event_config_attrs(indio_dev
);
564 goto error_free_setup_event_lines
;
568 indio_dev
->event_interface
->group
.name
= iio_event_group_name
;
569 indio_dev
->event_interface
->group
.attrs
= kcalloc(attrcount
+ 1,
570 sizeof(indio_dev
->event_interface
->group
.attrs
[0]),
572 if (indio_dev
->event_interface
->group
.attrs
== NULL
) {
574 goto error_free_setup_event_lines
;
576 if (indio_dev
->info
->event_attrs
)
577 memcpy(indio_dev
->event_interface
->group
.attrs
,
578 indio_dev
->info
->event_attrs
->attrs
,
579 sizeof(indio_dev
->event_interface
->group
.attrs
[0])
581 attrn
= attrcount_orig
;
582 /* Add all elements from the list. */
583 list_for_each_entry(p
,
584 &indio_dev
->event_interface
->dev_attr_list
,
586 indio_dev
->event_interface
->group
.attrs
[attrn
++] =
588 indio_dev
->groups
[indio_dev
->groupcounter
++] =
589 &indio_dev
->event_interface
->group
;
593 error_free_setup_event_lines
:
594 iio_free_chan_devattr_list(&indio_dev
->event_interface
->dev_attr_list
);
595 kfree(indio_dev
->event_interface
);
602 * iio_device_wakeup_eventset - Wakes up the event waitqueue
603 * @indio_dev: The IIO device
605 * Wakes up the event waitqueue used for poll() and blocking read().
606 * Should usually be called when the device is unregistered.
608 void iio_device_wakeup_eventset(struct iio_dev
*indio_dev
)
610 if (indio_dev
->event_interface
== NULL
)
612 wake_up(&indio_dev
->event_interface
->wait
);
615 void iio_device_unregister_eventset(struct iio_dev
*indio_dev
)
617 if (indio_dev
->event_interface
== NULL
)
619 iio_free_chan_devattr_list(&indio_dev
->event_interface
->dev_attr_list
);
620 kfree(indio_dev
->event_interface
->group
.attrs
);
621 kfree(indio_dev
->event_interface
);