1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Based on elements of hwmon and input subsystems.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/idr.h>
15 #include <linux/kdev_t.h>
16 #include <linux/err.h>
17 #include <linux/device.h>
19 #include <linux/interrupt.h>
20 #include <linux/poll.h>
21 #include <linux/sched.h>
22 #include <linux/wait.h>
23 #include <linux/cdev.h>
25 #include "trigger_consumer.h"
27 #define IIO_ID_PREFIX "device"
28 #define IIO_ID_FORMAT IIO_ID_PREFIX "%d"
30 /* IDR to assign each registered device a unique id*/
31 static DEFINE_IDR(iio_idr
);
33 /* IDR for general event identifiers */
34 static DEFINE_IDR(iio_event_idr
);
35 /* IDR to allocate character device minor numbers */
36 static DEFINE_IDR(iio_chrdev_idr
);
37 /* Lock used to protect both of the above */
38 static DEFINE_SPINLOCK(iio_idr_lock
);
41 EXPORT_SYMBOL(iio_devt
);
43 #define IIO_DEV_MAX 256
44 static char *iio_devnode(struct device
*dev
, mode_t
*mode
)
46 return kasprintf(GFP_KERNEL
, "iio/%s", dev_name(dev
));
49 struct class iio_class
= {
51 .devnode
= iio_devnode
,
53 EXPORT_SYMBOL(iio_class
);
55 void __iio_change_event(struct iio_detected_event_list
*ev
,
60 ev
->ev
.timestamp
= timestamp
;
62 EXPORT_SYMBOL(__iio_change_event
);
64 /* Used both in the interrupt line put events and the ring buffer ones */
66 /* Note that in it's current form someone has to be listening before events
67 * are queued. Hence a client MUST open the chrdev before the ring buffer is
70 int __iio_push_event(struct iio_event_interface
*ev_int
,
73 struct iio_shared_ev_pointer
*
76 struct iio_detected_event_list
*ev
;
79 /* Does anyone care? */
80 mutex_lock(&ev_int
->event_list_lock
);
81 if (test_bit(IIO_BUSY_BIT_POS
, &ev_int
->handler
.flags
)) {
82 if (ev_int
->current_events
== ev_int
->max_events
)
84 ev
= kmalloc(sizeof(*ev
), GFP_KERNEL
);
90 ev
->ev
.timestamp
= timestamp
;
91 ev
->shared_pointer
= shared_pointer_p
;
92 if (ev
->shared_pointer
)
93 shared_pointer_p
->ev_p
= ev
;
95 list_add_tail(&ev
->list
, &ev_int
->det_events
.list
);
96 ev_int
->current_events
++;
97 mutex_unlock(&ev_int
->event_list_lock
);
98 wake_up_interruptible(&ev_int
->wait
);
100 mutex_unlock(&ev_int
->event_list_lock
);
105 EXPORT_SYMBOL(__iio_push_event
);
107 int iio_push_event(struct iio_dev
*dev_info
,
112 return __iio_push_event(&dev_info
->event_interfaces
[ev_line
],
113 ev_code
, timestamp
, NULL
);
115 EXPORT_SYMBOL(iio_push_event
);
117 /* Generic interrupt line interrupt handler */
118 irqreturn_t
iio_interrupt_handler(int irq
, void *_int_info
)
120 struct iio_interrupt
*int_info
= _int_info
;
121 struct iio_dev
*dev_info
= int_info
->dev_info
;
122 struct iio_event_handler_list
*p
;
126 spin_lock_irqsave(&int_info
->ev_list_lock
, flags
);
127 if (list_empty(&int_info
->ev_list
)) {
128 spin_unlock_irqrestore(&int_info
->ev_list_lock
, flags
);
132 time_ns
= iio_get_time_ns();
133 /* detect single element list*/
134 if (list_is_singular(&int_info
->ev_list
)) {
135 disable_irq_nosync(irq
);
136 p
= list_first_entry(&int_info
->ev_list
,
137 struct iio_event_handler_list
,
139 /* single event handler - maybe shared */
140 p
->handler(dev_info
, 1, time_ns
, !(p
->refcount
> 1));
142 list_for_each_entry(p
, &int_info
->ev_list
, list
) {
143 disable_irq_nosync(irq
);
144 p
->handler(dev_info
, 1, time_ns
, 0);
146 spin_unlock_irqrestore(&int_info
->ev_list_lock
, flags
);
151 static struct iio_interrupt
*iio_allocate_interrupt(void)
153 struct iio_interrupt
*i
= kmalloc(sizeof *i
, GFP_KERNEL
);
155 spin_lock_init(&i
->ev_list_lock
);
156 INIT_LIST_HEAD(&i
->ev_list
);
161 /* Confirming the validity of supplied irq is left to drivers.*/
162 int iio_register_interrupt_line(unsigned int irq
,
163 struct iio_dev
*dev_info
,
170 dev_info
->interrupts
[line_number
] = iio_allocate_interrupt();
171 if (dev_info
->interrupts
[line_number
] == NULL
) {
175 dev_info
->interrupts
[line_number
]->line_number
= line_number
;
176 dev_info
->interrupts
[line_number
]->irq
= irq
;
177 dev_info
->interrupts
[line_number
]->dev_info
= dev_info
;
179 /* Possibly only request on demand?
180 * Can see this may complicate the handling of interrupts.
181 * However, with this approach we might end up handling lots of
182 * events no-one cares about.*/
183 ret
= request_irq(irq
,
184 &iio_interrupt_handler
,
187 dev_info
->interrupts
[line_number
]);
192 EXPORT_SYMBOL(iio_register_interrupt_line
);
194 /* This turns up an awful lot */
195 ssize_t
iio_read_const_attr(struct device
*dev
,
196 struct device_attribute
*attr
,
199 return sprintf(buf
, "%s\n", to_iio_const_attr(attr
)->string
);
201 EXPORT_SYMBOL(iio_read_const_attr
);
203 /* Before this runs the interrupt generator must have been disabled */
204 void iio_unregister_interrupt_line(struct iio_dev
*dev_info
, int line_number
)
206 /* make sure the interrupt handlers are all done */
207 flush_scheduled_work();
208 free_irq(dev_info
->interrupts
[line_number
]->irq
,
209 dev_info
->interrupts
[line_number
]);
210 kfree(dev_info
->interrupts
[line_number
]);
212 EXPORT_SYMBOL(iio_unregister_interrupt_line
);
214 /* Reference counted add and remove */
215 void iio_add_event_to_list(struct iio_event_handler_list
*el
,
216 struct list_head
*head
)
219 struct iio_interrupt
*inter
= to_iio_interrupt(head
);
221 /* take mutex to protect this element */
222 mutex_lock(&el
->exist_lock
);
223 if (el
->refcount
== 0) {
224 /* Take the event list spin lock */
225 spin_lock_irqsave(&inter
->ev_list_lock
, flags
);
226 list_add(&el
->list
, head
);
227 spin_unlock_irqrestore(&inter
->ev_list_lock
, flags
);
230 mutex_unlock(&el
->exist_lock
);
232 EXPORT_SYMBOL(iio_add_event_to_list
);
234 void iio_remove_event_from_list(struct iio_event_handler_list
*el
,
235 struct list_head
*head
)
238 struct iio_interrupt
*inter
= to_iio_interrupt(head
);
240 mutex_lock(&el
->exist_lock
);
242 if (el
->refcount
== 0) {
243 /* Take the event list spin lock */
244 spin_lock_irqsave(&inter
->ev_list_lock
, flags
);
245 list_del_init(&el
->list
);
246 spin_unlock_irqrestore(&inter
->ev_list_lock
, flags
);
248 mutex_unlock(&el
->exist_lock
);
250 EXPORT_SYMBOL(iio_remove_event_from_list
);
252 ssize_t
iio_event_chrdev_read(struct file
*filep
,
257 struct iio_event_interface
*ev_int
= filep
->private_data
;
258 struct iio_detected_event_list
*el
;
262 mutex_lock(&ev_int
->event_list_lock
);
263 if (list_empty(&ev_int
->det_events
.list
)) {
264 if (filep
->f_flags
& O_NONBLOCK
) {
266 goto error_mutex_unlock
;
268 mutex_unlock(&ev_int
->event_list_lock
);
269 /* Blocking on device; waiting for something to be there */
270 ret
= wait_event_interruptible(ev_int
->wait
,
275 /* Single access device so noone else can get the data */
276 mutex_lock(&ev_int
->event_list_lock
);
279 el
= list_first_entry(&ev_int
->det_events
.list
,
280 struct iio_detected_event_list
,
283 if (copy_to_user(buf
, &(el
->ev
), len
)) {
285 goto error_mutex_unlock
;
288 ev_int
->current_events
--;
289 mutex_unlock(&ev_int
->event_list_lock
);
291 * Possible concurency issue if an update of this event is on its way
292 * through. May lead to new even being removed whilst the reported event
293 * was the unescalated event. In typical use case this is not a problem
294 * as userspace will say read half the buffer due to a 50% full event
295 * which would make the correct 100% full incorrect anyway.
297 spin_lock(&el
->shared_pointer
->lock
);
298 if (el
->shared_pointer
)
299 (el
->shared_pointer
->ev_p
) = NULL
;
300 spin_unlock(&el
->shared_pointer
->lock
);
307 mutex_unlock(&ev_int
->event_list_lock
);
313 int iio_event_chrdev_release(struct inode
*inode
, struct file
*filep
)
315 struct iio_handler
*hand
= iio_cdev_to_handler(inode
->i_cdev
);
316 struct iio_event_interface
*ev_int
= hand
->private;
317 struct iio_detected_event_list
*el
, *t
;
319 mutex_lock(&ev_int
->event_list_lock
);
320 clear_bit(IIO_BUSY_BIT_POS
, &ev_int
->handler
.flags
);
322 * In order to maintain a clean state for reopening,
323 * clear out any awaiting events. The mask will prevent
324 * any new __iio_push_event calls running.
326 list_for_each_entry_safe(el
, t
, &ev_int
->det_events
.list
, list
) {
330 mutex_unlock(&ev_int
->event_list_lock
);
335 int iio_event_chrdev_open(struct inode
*inode
, struct file
*filep
)
337 struct iio_handler
*hand
= iio_cdev_to_handler(inode
->i_cdev
);
338 struct iio_event_interface
*ev_int
= hand
->private;
340 mutex_lock(&ev_int
->event_list_lock
);
341 if (test_and_set_bit(IIO_BUSY_BIT_POS
, &hand
->flags
)) {
342 fops_put(filep
->f_op
);
343 mutex_unlock(&ev_int
->event_list_lock
);
346 filep
->private_data
= hand
->private;
347 mutex_unlock(&ev_int
->event_list_lock
);
352 static const struct file_operations iio_event_chrdev_fileops
= {
353 .read
= iio_event_chrdev_read
,
354 .release
= iio_event_chrdev_release
,
355 .open
= iio_event_chrdev_open
,
356 .owner
= THIS_MODULE
,
359 static void iio_event_dev_release(struct device
*dev
)
361 struct iio_event_interface
*ev_int
362 = container_of(dev
, struct iio_event_interface
, dev
);
363 cdev_del(&ev_int
->handler
.chrdev
);
364 iio_device_free_chrdev_minor(MINOR(dev
->devt
));
367 static struct device_type iio_event_type
= {
368 .release
= iio_event_dev_release
,
371 int iio_device_get_chrdev_minor(void)
376 if (unlikely(idr_pre_get(&iio_chrdev_idr
, GFP_KERNEL
) == 0))
378 spin_lock(&iio_idr_lock
);
379 ret
= idr_get_new(&iio_chrdev_idr
, NULL
, &val
);
380 spin_unlock(&iio_idr_lock
);
381 if (unlikely(ret
== -EAGAIN
))
383 else if (unlikely(ret
))
385 if (val
> IIO_DEV_MAX
)
390 void iio_device_free_chrdev_minor(int val
)
392 spin_lock(&iio_idr_lock
);
393 idr_remove(&iio_chrdev_idr
, val
);
394 spin_unlock(&iio_idr_lock
);
397 int iio_setup_ev_int(struct iio_event_interface
*ev_int
,
399 struct module
*owner
,
404 ev_int
->dev
.class = &iio_class
;
405 ev_int
->dev
.parent
= dev
;
406 ev_int
->dev
.type
= &iio_event_type
;
407 device_initialize(&ev_int
->dev
);
409 minor
= iio_device_get_chrdev_minor();
412 goto error_device_put
;
414 ev_int
->dev
.devt
= MKDEV(MAJOR(iio_devt
), minor
);
415 dev_set_name(&ev_int
->dev
, "%s", name
);
417 ret
= device_add(&ev_int
->dev
);
419 goto error_free_minor
;
421 cdev_init(&ev_int
->handler
.chrdev
, &iio_event_chrdev_fileops
);
422 ev_int
->handler
.chrdev
.owner
= owner
;
424 mutex_init(&ev_int
->event_list_lock
);
425 /* discussion point - make this variable? */
426 ev_int
->max_events
= 10;
427 ev_int
->current_events
= 0;
428 INIT_LIST_HEAD(&ev_int
->det_events
.list
);
429 init_waitqueue_head(&ev_int
->wait
);
430 ev_int
->handler
.private = ev_int
;
431 ev_int
->handler
.flags
= 0;
433 ret
= cdev_add(&ev_int
->handler
.chrdev
, ev_int
->dev
.devt
, 1);
435 goto error_unreg_device
;
440 device_unregister(&ev_int
->dev
);
442 iio_device_free_chrdev_minor(minor
);
444 put_device(&ev_int
->dev
);
449 void iio_free_ev_int(struct iio_event_interface
*ev_int
)
451 device_unregister(&ev_int
->dev
);
452 put_device(&ev_int
->dev
);
455 static int __init
iio_dev_init(void)
459 err
= alloc_chrdev_region(&iio_devt
, 0, IIO_DEV_MAX
, "iio");
461 printk(KERN_ERR
"%s: failed to allocate char dev region\n",
467 static void __exit
iio_dev_exit(void)
470 unregister_chrdev_region(iio_devt
, IIO_DEV_MAX
);
473 static int __init
iio_init(void)
477 /* Create sysfs class */
478 ret
= class_register(&iio_class
);
481 "%s could not create sysfs class\n",
486 ret
= iio_dev_init();
488 goto error_unregister_class
;
492 error_unregister_class
:
493 class_unregister(&iio_class
);
498 static void __exit
iio_exit(void)
501 class_unregister(&iio_class
);
504 static int iio_device_register_sysfs(struct iio_dev
*dev_info
)
508 ret
= sysfs_create_group(&dev_info
->dev
.kobj
, dev_info
->attrs
);
510 dev_err(dev_info
->dev
.parent
,
511 "Failed to register sysfs hooks\n");
515 if (dev_info
->scan_el_attrs
) {
516 ret
= sysfs_create_group(&dev_info
->dev
.kobj
,
517 dev_info
->scan_el_attrs
);
519 dev_err(&dev_info
->dev
,
520 "Failed to add sysfs scan els\n");
527 static void iio_device_unregister_sysfs(struct iio_dev
*dev_info
)
529 if (dev_info
->scan_el_attrs
)
530 sysfs_remove_group(&dev_info
->dev
.kobj
,
531 dev_info
->scan_el_attrs
);
533 sysfs_remove_group(&dev_info
->dev
.kobj
, dev_info
->attrs
);
536 int iio_get_new_idr_val(struct idr
*this_idr
)
542 if (unlikely(idr_pre_get(this_idr
, GFP_KERNEL
) == 0))
545 spin_lock(&iio_idr_lock
);
546 ret
= idr_get_new(this_idr
, NULL
, &val
);
547 spin_unlock(&iio_idr_lock
);
548 if (unlikely(ret
== -EAGAIN
))
550 else if (unlikely(ret
))
555 EXPORT_SYMBOL(iio_get_new_idr_val
);
557 void iio_free_idr_val(struct idr
*this_idr
, int id
)
559 spin_lock(&iio_idr_lock
);
560 idr_remove(this_idr
, id
);
561 spin_unlock(&iio_idr_lock
);
563 EXPORT_SYMBOL(iio_free_idr_val
);
565 static int iio_device_register_id(struct iio_dev
*dev_info
,
566 struct idr
*this_idr
)
569 dev_info
->id
= iio_get_new_idr_val(&iio_idr
);
570 if (dev_info
->id
< 0)
575 static void iio_device_unregister_id(struct iio_dev
*dev_info
)
577 iio_free_idr_val(&iio_idr
, dev_info
->id
);
580 static inline int __iio_add_event_config_attrs(struct iio_dev
*dev_info
, int i
)
583 /*p for adding, q for removing */
584 struct attribute
**attrp
, **attrq
;
586 if (dev_info
->event_conf_attrs
&& dev_info
->event_conf_attrs
[i
].attrs
) {
587 attrp
= dev_info
->event_conf_attrs
[i
].attrs
;
589 ret
= sysfs_add_file_to_group(&dev_info
->dev
.kobj
,
592 ->event_attrs
[i
].name
);
601 attrq
= dev_info
->event_conf_attrs
[i
].attrs
;
602 while (attrq
!= attrp
) {
603 sysfs_remove_file_from_group(&dev_info
->dev
.kobj
,
605 dev_info
->event_attrs
[i
].name
);
612 static inline int __iio_remove_event_config_attrs(struct iio_dev
*dev_info
,
615 struct attribute
**attrq
;
617 if (dev_info
->event_conf_attrs
618 && dev_info
->event_conf_attrs
[i
].attrs
) {
619 attrq
= dev_info
->event_conf_attrs
[i
].attrs
;
621 sysfs_remove_file_from_group(&dev_info
->dev
.kobj
,
624 ->event_attrs
[i
].name
);
632 static int iio_device_register_eventset(struct iio_dev
*dev_info
)
636 if (dev_info
->num_interrupt_lines
== 0)
639 dev_info
->event_interfaces
=
640 kzalloc(sizeof(struct iio_event_interface
)
641 *dev_info
->num_interrupt_lines
,
643 if (dev_info
->event_interfaces
== NULL
) {
648 dev_info
->interrupts
= kzalloc(sizeof(struct iio_interrupt
*)
649 *dev_info
->num_interrupt_lines
,
651 if (dev_info
->interrupts
== NULL
) {
653 goto error_free_event_interfaces
;
656 for (i
= 0; i
< dev_info
->num_interrupt_lines
; i
++) {
657 dev_info
->event_interfaces
[i
].owner
= dev_info
->driver_module
;
658 ret
= iio_get_new_idr_val(&iio_event_idr
);
660 goto error_free_setup_ev_ints
;
662 dev_info
->event_interfaces
[i
].id
= ret
;
664 snprintf(dev_info
->event_interfaces
[i
]._name
, 20,
666 dev_info
->event_interfaces
[i
].id
);
668 ret
= iio_setup_ev_int(&dev_info
->event_interfaces
[i
],
669 (const char *)(dev_info
670 ->event_interfaces
[i
]
672 dev_info
->driver_module
,
675 dev_err(&dev_info
->dev
,
676 "Could not get chrdev interface\n");
677 iio_free_idr_val(&iio_event_idr
,
678 dev_info
->event_interfaces
[i
].id
);
679 goto error_free_setup_ev_ints
;
683 for (i
= 0; i
< dev_info
->num_interrupt_lines
; i
++) {
684 snprintf(dev_info
->event_interfaces
[i
]._attrname
, 20,
685 "event_line%d_sources", i
);
686 dev_info
->event_attrs
[i
].name
688 (dev_info
->event_interfaces
[i
]._attrname
);
689 ret
= sysfs_create_group(&dev_info
->dev
.kobj
,
690 &dev_info
->event_attrs
[i
]);
692 dev_err(&dev_info
->dev
,
693 "Failed to register sysfs for event attrs");
694 goto error_remove_sysfs_interfaces
;
698 for (i
= 0; i
< dev_info
->num_interrupt_lines
; i
++) {
699 ret
= __iio_add_event_config_attrs(dev_info
, i
);
701 goto error_unregister_config_attrs
;
706 error_unregister_config_attrs
:
707 for (j
= 0; j
< i
; j
++)
708 __iio_remove_event_config_attrs(dev_info
, i
);
709 i
= dev_info
->num_interrupt_lines
- 1;
710 error_remove_sysfs_interfaces
:
711 for (j
= 0; j
< i
; j
++)
712 sysfs_remove_group(&dev_info
->dev
.kobj
,
713 &dev_info
->event_attrs
[j
]);
714 i
= dev_info
->num_interrupt_lines
- 1;
715 error_free_setup_ev_ints
:
716 for (j
= 0; j
< i
; j
++) {
717 iio_free_idr_val(&iio_event_idr
,
718 dev_info
->event_interfaces
[i
].id
);
719 iio_free_ev_int(&dev_info
->event_interfaces
[j
]);
721 kfree(dev_info
->interrupts
);
722 error_free_event_interfaces
:
723 kfree(dev_info
->event_interfaces
);
729 static void iio_device_unregister_eventset(struct iio_dev
*dev_info
)
733 if (dev_info
->num_interrupt_lines
== 0)
735 for (i
= 0; i
< dev_info
->num_interrupt_lines
; i
++)
736 sysfs_remove_group(&dev_info
->dev
.kobj
,
737 &dev_info
->event_attrs
[i
]);
739 for (i
= 0; i
< dev_info
->num_interrupt_lines
; i
++) {
740 iio_free_idr_val(&iio_event_idr
,
741 dev_info
->event_interfaces
[i
].id
);
742 iio_free_ev_int(&dev_info
->event_interfaces
[i
]);
744 kfree(dev_info
->interrupts
);
745 kfree(dev_info
->event_interfaces
);
748 static void iio_dev_release(struct device
*device
)
750 struct iio_dev
*dev
= to_iio_dev(device
);
756 static struct device_type iio_dev_type
= {
757 .name
= "iio_device",
758 .release
= iio_dev_release
,
761 struct iio_dev
*iio_allocate_device(void)
763 struct iio_dev
*dev
= kzalloc(sizeof *dev
, GFP_KERNEL
);
766 dev
->dev
.type
= &iio_dev_type
;
767 dev
->dev
.class = &iio_class
;
768 device_initialize(&dev
->dev
);
769 dev_set_drvdata(&dev
->dev
, (void *)dev
);
770 mutex_init(&dev
->mlock
);
776 EXPORT_SYMBOL(iio_allocate_device
);
778 void iio_free_device(struct iio_dev
*dev
)
783 EXPORT_SYMBOL(iio_free_device
);
785 int iio_device_register(struct iio_dev
*dev_info
)
789 ret
= iio_device_register_id(dev_info
, &iio_idr
);
791 dev_err(&dev_info
->dev
, "Failed to get id\n");
794 dev_set_name(&dev_info
->dev
, "device%d", dev_info
->id
);
796 ret
= device_add(&dev_info
->dev
);
799 ret
= iio_device_register_sysfs(dev_info
);
801 dev_err(dev_info
->dev
.parent
,
802 "Failed to register sysfs interfaces\n");
803 goto error_del_device
;
805 ret
= iio_device_register_eventset(dev_info
);
807 dev_err(dev_info
->dev
.parent
,
808 "Failed to register event set \n");
809 goto error_free_sysfs
;
811 if (dev_info
->modes
& INDIO_RING_TRIGGERED
)
812 iio_device_register_trigger_consumer(dev_info
);
817 iio_device_unregister_sysfs(dev_info
);
819 device_del(&dev_info
->dev
);
821 iio_device_unregister_id(dev_info
);
825 EXPORT_SYMBOL(iio_device_register
);
827 void iio_device_unregister(struct iio_dev
*dev_info
)
829 if (dev_info
->modes
& INDIO_RING_TRIGGERED
)
830 iio_device_unregister_trigger_consumer(dev_info
);
831 iio_device_unregister_eventset(dev_info
);
832 iio_device_unregister_sysfs(dev_info
);
833 iio_device_unregister_id(dev_info
);
834 device_unregister(&dev_info
->dev
);
836 EXPORT_SYMBOL(iio_device_unregister
);
840 module_put(THIS_MODULE
);
845 __module_get(THIS_MODULE
);
848 subsys_initcall(iio_init
);
849 module_exit(iio_exit
);
851 MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
852 MODULE_DESCRIPTION("Industrial I/O core");
853 MODULE_LICENSE("GPL");