Linux 2.6.33.13
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / iio / industrialio-core.c
blob768f44894d087ec2fd3bc2c9b7db93f727a06ce0
1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Based on elements of hwmon and input subsystems.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/idr.h>
15 #include <linux/kdev_t.h>
16 #include <linux/err.h>
17 #include <linux/device.h>
18 #include <linux/fs.h>
19 #include <linux/interrupt.h>
20 #include <linux/poll.h>
21 #include <linux/sched.h>
22 #include <linux/wait.h>
23 #include <linux/cdev.h>
24 #include "iio.h"
25 #include "trigger_consumer.h"
27 #define IIO_ID_PREFIX "device"
28 #define IIO_ID_FORMAT IIO_ID_PREFIX "%d"
30 /* IDR to assign each registered device a unique id*/
31 static DEFINE_IDR(iio_idr);
33 /* IDR for general event identifiers */
34 static DEFINE_IDR(iio_event_idr);
35 /* IDR to allocate character device minor numbers */
36 static DEFINE_IDR(iio_chrdev_idr);
37 /* Lock used to protect both of the above */
38 static DEFINE_SPINLOCK(iio_idr_lock);
40 dev_t iio_devt;
41 EXPORT_SYMBOL(iio_devt);
43 #define IIO_DEV_MAX 256
44 static char *iio_devnode(struct device *dev, mode_t *mode)
46 return kasprintf(GFP_KERNEL, "iio/%s", dev_name(dev));
49 struct class iio_class = {
50 .name = "iio",
51 .devnode = iio_devnode,
53 EXPORT_SYMBOL(iio_class);
55 void __iio_change_event(struct iio_detected_event_list *ev,
56 int ev_code,
57 s64 timestamp)
59 ev->ev.id = ev_code;
60 ev->ev.timestamp = timestamp;
62 EXPORT_SYMBOL(__iio_change_event);
64 /* Used both in the interrupt line put events and the ring buffer ones */
66 /* Note that in it's current form someone has to be listening before events
67 * are queued. Hence a client MUST open the chrdev before the ring buffer is
68 * switched on.
70 int __iio_push_event(struct iio_event_interface *ev_int,
71 int ev_code,
72 s64 timestamp,
73 struct iio_shared_ev_pointer *
74 shared_pointer_p)
76 struct iio_detected_event_list *ev;
77 int ret = 0;
79 /* Does anyone care? */
80 mutex_lock(&ev_int->event_list_lock);
81 if (test_bit(IIO_BUSY_BIT_POS, &ev_int->handler.flags)) {
82 if (ev_int->current_events == ev_int->max_events)
83 return 0;
84 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
85 if (ev == NULL) {
86 ret = -ENOMEM;
87 goto error_ret;
89 ev->ev.id = ev_code;
90 ev->ev.timestamp = timestamp;
91 ev->shared_pointer = shared_pointer_p;
92 if (ev->shared_pointer)
93 shared_pointer_p->ev_p = ev;
95 list_add_tail(&ev->list, &ev_int->det_events.list);
96 ev_int->current_events++;
97 mutex_unlock(&ev_int->event_list_lock);
98 wake_up_interruptible(&ev_int->wait);
99 } else
100 mutex_unlock(&ev_int->event_list_lock);
102 error_ret:
103 return ret;
105 EXPORT_SYMBOL(__iio_push_event);
107 int iio_push_event(struct iio_dev *dev_info,
108 int ev_line,
109 int ev_code,
110 s64 timestamp)
112 return __iio_push_event(&dev_info->event_interfaces[ev_line],
113 ev_code, timestamp, NULL);
115 EXPORT_SYMBOL(iio_push_event);
117 /* Generic interrupt line interrupt handler */
118 irqreturn_t iio_interrupt_handler(int irq, void *_int_info)
120 struct iio_interrupt *int_info = _int_info;
121 struct iio_dev *dev_info = int_info->dev_info;
122 struct iio_event_handler_list *p;
123 s64 time_ns;
124 unsigned long flags;
126 spin_lock_irqsave(&int_info->ev_list_lock, flags);
127 if (list_empty(&int_info->ev_list)) {
128 spin_unlock_irqrestore(&int_info->ev_list_lock, flags);
129 return IRQ_NONE;
132 time_ns = iio_get_time_ns();
133 /* detect single element list*/
134 if (list_is_singular(&int_info->ev_list)) {
135 disable_irq_nosync(irq);
136 p = list_first_entry(&int_info->ev_list,
137 struct iio_event_handler_list,
138 list);
139 /* single event handler - maybe shared */
140 p->handler(dev_info, 1, time_ns, !(p->refcount > 1));
141 } else
142 list_for_each_entry(p, &int_info->ev_list, list) {
143 disable_irq_nosync(irq);
144 p->handler(dev_info, 1, time_ns, 0);
146 spin_unlock_irqrestore(&int_info->ev_list_lock, flags);
148 return IRQ_HANDLED;
151 static struct iio_interrupt *iio_allocate_interrupt(void)
153 struct iio_interrupt *i = kmalloc(sizeof *i, GFP_KERNEL);
154 if (i) {
155 spin_lock_init(&i->ev_list_lock);
156 INIT_LIST_HEAD(&i->ev_list);
158 return i;
161 /* Confirming the validity of supplied irq is left to drivers.*/
162 int iio_register_interrupt_line(unsigned int irq,
163 struct iio_dev *dev_info,
164 int line_number,
165 unsigned long type,
166 const char *name)
168 int ret;
170 dev_info->interrupts[line_number] = iio_allocate_interrupt();
171 if (dev_info->interrupts[line_number] == NULL) {
172 ret = -ENOMEM;
173 goto error_ret;
175 dev_info->interrupts[line_number]->line_number = line_number;
176 dev_info->interrupts[line_number]->irq = irq;
177 dev_info->interrupts[line_number]->dev_info = dev_info;
179 /* Possibly only request on demand?
180 * Can see this may complicate the handling of interrupts.
181 * However, with this approach we might end up handling lots of
182 * events no-one cares about.*/
183 ret = request_irq(irq,
184 &iio_interrupt_handler,
185 type,
186 name,
187 dev_info->interrupts[line_number]);
189 error_ret:
190 return ret;
192 EXPORT_SYMBOL(iio_register_interrupt_line);
194 /* This turns up an awful lot */
195 ssize_t iio_read_const_attr(struct device *dev,
196 struct device_attribute *attr,
197 char *buf)
199 return sprintf(buf, "%s\n", to_iio_const_attr(attr)->string);
201 EXPORT_SYMBOL(iio_read_const_attr);
203 /* Before this runs the interrupt generator must have been disabled */
204 void iio_unregister_interrupt_line(struct iio_dev *dev_info, int line_number)
206 /* make sure the interrupt handlers are all done */
207 flush_scheduled_work();
208 free_irq(dev_info->interrupts[line_number]->irq,
209 dev_info->interrupts[line_number]);
210 kfree(dev_info->interrupts[line_number]);
212 EXPORT_SYMBOL(iio_unregister_interrupt_line);
214 /* Reference counted add and remove */
215 void iio_add_event_to_list(struct iio_event_handler_list *el,
216 struct list_head *head)
218 unsigned long flags;
219 struct iio_interrupt *inter = to_iio_interrupt(head);
221 /* take mutex to protect this element */
222 mutex_lock(&el->exist_lock);
223 if (el->refcount == 0) {
224 /* Take the event list spin lock */
225 spin_lock_irqsave(&inter->ev_list_lock, flags);
226 list_add(&el->list, head);
227 spin_unlock_irqrestore(&inter->ev_list_lock, flags);
229 el->refcount++;
230 mutex_unlock(&el->exist_lock);
232 EXPORT_SYMBOL(iio_add_event_to_list);
234 void iio_remove_event_from_list(struct iio_event_handler_list *el,
235 struct list_head *head)
237 unsigned long flags;
238 struct iio_interrupt *inter = to_iio_interrupt(head);
240 mutex_lock(&el->exist_lock);
241 el->refcount--;
242 if (el->refcount == 0) {
243 /* Take the event list spin lock */
244 spin_lock_irqsave(&inter->ev_list_lock, flags);
245 list_del_init(&el->list);
246 spin_unlock_irqrestore(&inter->ev_list_lock, flags);
248 mutex_unlock(&el->exist_lock);
250 EXPORT_SYMBOL(iio_remove_event_from_list);
252 ssize_t iio_event_chrdev_read(struct file *filep,
253 char *buf,
254 size_t count,
255 loff_t *f_ps)
257 struct iio_event_interface *ev_int = filep->private_data;
258 struct iio_detected_event_list *el;
259 int ret;
260 size_t len;
262 mutex_lock(&ev_int->event_list_lock);
263 if (list_empty(&ev_int->det_events.list)) {
264 if (filep->f_flags & O_NONBLOCK) {
265 ret = -EAGAIN;
266 goto error_mutex_unlock;
268 mutex_unlock(&ev_int->event_list_lock);
269 /* Blocking on device; waiting for something to be there */
270 ret = wait_event_interruptible(ev_int->wait,
271 !list_empty(&ev_int
272 ->det_events.list));
273 if (ret)
274 goto error_ret;
275 /* Single access device so noone else can get the data */
276 mutex_lock(&ev_int->event_list_lock);
279 el = list_first_entry(&ev_int->det_events.list,
280 struct iio_detected_event_list,
281 list);
282 len = sizeof el->ev;
283 if (copy_to_user(buf, &(el->ev), len)) {
284 ret = -EFAULT;
285 goto error_mutex_unlock;
287 list_del(&el->list);
288 ev_int->current_events--;
289 mutex_unlock(&ev_int->event_list_lock);
291 * Possible concurency issue if an update of this event is on its way
292 * through. May lead to new even being removed whilst the reported event
293 * was the unescalated event. In typical use case this is not a problem
294 * as userspace will say read half the buffer due to a 50% full event
295 * which would make the correct 100% full incorrect anyway.
297 spin_lock(&el->shared_pointer->lock);
298 if (el->shared_pointer)
299 (el->shared_pointer->ev_p) = NULL;
300 spin_unlock(&el->shared_pointer->lock);
302 kfree(el);
304 return len;
306 error_mutex_unlock:
307 mutex_unlock(&ev_int->event_list_lock);
308 error_ret:
310 return ret;
313 int iio_event_chrdev_release(struct inode *inode, struct file *filep)
315 struct iio_handler *hand = iio_cdev_to_handler(inode->i_cdev);
316 struct iio_event_interface *ev_int = hand->private;
317 struct iio_detected_event_list *el, *t;
319 mutex_lock(&ev_int->event_list_lock);
320 clear_bit(IIO_BUSY_BIT_POS, &ev_int->handler.flags);
322 * In order to maintain a clean state for reopening,
323 * clear out any awaiting events. The mask will prevent
324 * any new __iio_push_event calls running.
326 list_for_each_entry_safe(el, t, &ev_int->det_events.list, list) {
327 list_del(&el->list);
328 kfree(el);
330 mutex_unlock(&ev_int->event_list_lock);
332 return 0;
335 int iio_event_chrdev_open(struct inode *inode, struct file *filep)
337 struct iio_handler *hand = iio_cdev_to_handler(inode->i_cdev);
338 struct iio_event_interface *ev_int = hand->private;
340 mutex_lock(&ev_int->event_list_lock);
341 if (test_and_set_bit(IIO_BUSY_BIT_POS, &hand->flags)) {
342 fops_put(filep->f_op);
343 mutex_unlock(&ev_int->event_list_lock);
344 return -EBUSY;
346 filep->private_data = hand->private;
347 mutex_unlock(&ev_int->event_list_lock);
349 return 0;
352 static const struct file_operations iio_event_chrdev_fileops = {
353 .read = iio_event_chrdev_read,
354 .release = iio_event_chrdev_release,
355 .open = iio_event_chrdev_open,
356 .owner = THIS_MODULE,
359 static void iio_event_dev_release(struct device *dev)
361 struct iio_event_interface *ev_int
362 = container_of(dev, struct iio_event_interface, dev);
363 cdev_del(&ev_int->handler.chrdev);
364 iio_device_free_chrdev_minor(MINOR(dev->devt));
367 static struct device_type iio_event_type = {
368 .release = iio_event_dev_release,
371 int iio_device_get_chrdev_minor(void)
373 int ret, val;
375 idr_again:
376 if (unlikely(idr_pre_get(&iio_chrdev_idr, GFP_KERNEL) == 0))
377 return -ENOMEM;
378 spin_lock(&iio_idr_lock);
379 ret = idr_get_new(&iio_chrdev_idr, NULL, &val);
380 spin_unlock(&iio_idr_lock);
381 if (unlikely(ret == -EAGAIN))
382 goto idr_again;
383 else if (unlikely(ret))
384 return ret;
385 if (val > IIO_DEV_MAX)
386 return -ENOMEM;
387 return val;
390 void iio_device_free_chrdev_minor(int val)
392 spin_lock(&iio_idr_lock);
393 idr_remove(&iio_chrdev_idr, val);
394 spin_unlock(&iio_idr_lock);
397 int iio_setup_ev_int(struct iio_event_interface *ev_int,
398 const char *name,
399 struct module *owner,
400 struct device *dev)
402 int ret, minor;
404 ev_int->dev.class = &iio_class;
405 ev_int->dev.parent = dev;
406 ev_int->dev.type = &iio_event_type;
407 device_initialize(&ev_int->dev);
409 minor = iio_device_get_chrdev_minor();
410 if (minor < 0) {
411 ret = minor;
412 goto error_device_put;
414 ev_int->dev.devt = MKDEV(MAJOR(iio_devt), minor);
415 dev_set_name(&ev_int->dev, "%s", name);
417 ret = device_add(&ev_int->dev);
418 if (ret)
419 goto error_free_minor;
421 cdev_init(&ev_int->handler.chrdev, &iio_event_chrdev_fileops);
422 ev_int->handler.chrdev.owner = owner;
424 mutex_init(&ev_int->event_list_lock);
425 /* discussion point - make this variable? */
426 ev_int->max_events = 10;
427 ev_int->current_events = 0;
428 INIT_LIST_HEAD(&ev_int->det_events.list);
429 init_waitqueue_head(&ev_int->wait);
430 ev_int->handler.private = ev_int;
431 ev_int->handler.flags = 0;
433 ret = cdev_add(&ev_int->handler.chrdev, ev_int->dev.devt, 1);
434 if (ret)
435 goto error_unreg_device;
437 return 0;
439 error_unreg_device:
440 device_unregister(&ev_int->dev);
441 error_free_minor:
442 iio_device_free_chrdev_minor(minor);
443 error_device_put:
444 put_device(&ev_int->dev);
446 return ret;
449 void iio_free_ev_int(struct iio_event_interface *ev_int)
451 device_unregister(&ev_int->dev);
452 put_device(&ev_int->dev);
455 static int __init iio_dev_init(void)
457 int err;
459 err = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
460 if (err < 0)
461 printk(KERN_ERR "%s: failed to allocate char dev region\n",
462 __FILE__);
464 return err;
467 static void __exit iio_dev_exit(void)
469 if (iio_devt)
470 unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
473 static int __init iio_init(void)
475 int ret;
477 /* Create sysfs class */
478 ret = class_register(&iio_class);
479 if (ret < 0) {
480 printk(KERN_ERR
481 "%s could not create sysfs class\n",
482 __FILE__);
483 goto error_nothing;
486 ret = iio_dev_init();
487 if (ret < 0)
488 goto error_unregister_class;
490 return 0;
492 error_unregister_class:
493 class_unregister(&iio_class);
494 error_nothing:
495 return ret;
498 static void __exit iio_exit(void)
500 iio_dev_exit();
501 class_unregister(&iio_class);
504 static int iio_device_register_sysfs(struct iio_dev *dev_info)
506 int ret = 0;
508 ret = sysfs_create_group(&dev_info->dev.kobj, dev_info->attrs);
509 if (ret) {
510 dev_err(dev_info->dev.parent,
511 "Failed to register sysfs hooks\n");
512 goto error_ret;
515 if (dev_info->scan_el_attrs) {
516 ret = sysfs_create_group(&dev_info->dev.kobj,
517 dev_info->scan_el_attrs);
518 if (ret)
519 dev_err(&dev_info->dev,
520 "Failed to add sysfs scan els\n");
523 error_ret:
524 return ret;
527 static void iio_device_unregister_sysfs(struct iio_dev *dev_info)
529 if (dev_info->scan_el_attrs)
530 sysfs_remove_group(&dev_info->dev.kobj,
531 dev_info->scan_el_attrs);
533 sysfs_remove_group(&dev_info->dev.kobj, dev_info->attrs);
536 int iio_get_new_idr_val(struct idr *this_idr)
538 int ret;
539 int val;
541 idr_again:
542 if (unlikely(idr_pre_get(this_idr, GFP_KERNEL) == 0))
543 return -ENOMEM;
545 spin_lock(&iio_idr_lock);
546 ret = idr_get_new(this_idr, NULL, &val);
547 spin_unlock(&iio_idr_lock);
548 if (unlikely(ret == -EAGAIN))
549 goto idr_again;
550 else if (unlikely(ret))
551 return ret;
553 return val;
555 EXPORT_SYMBOL(iio_get_new_idr_val);
557 void iio_free_idr_val(struct idr *this_idr, int id)
559 spin_lock(&iio_idr_lock);
560 idr_remove(this_idr, id);
561 spin_unlock(&iio_idr_lock);
563 EXPORT_SYMBOL(iio_free_idr_val);
565 static int iio_device_register_id(struct iio_dev *dev_info,
566 struct idr *this_idr)
569 dev_info->id = iio_get_new_idr_val(&iio_idr);
570 if (dev_info->id < 0)
571 return dev_info->id;
572 return 0;
575 static void iio_device_unregister_id(struct iio_dev *dev_info)
577 iio_free_idr_val(&iio_idr, dev_info->id);
580 static inline int __iio_add_event_config_attrs(struct iio_dev *dev_info, int i)
582 int ret;
583 /*p for adding, q for removing */
584 struct attribute **attrp, **attrq;
586 if (dev_info->event_conf_attrs && dev_info->event_conf_attrs[i].attrs) {
587 attrp = dev_info->event_conf_attrs[i].attrs;
588 while (*attrp) {
589 ret = sysfs_add_file_to_group(&dev_info->dev.kobj,
590 *attrp,
591 dev_info
592 ->event_attrs[i].name);
593 if (ret)
594 goto error_ret;
595 attrp++;
598 return 0;
600 error_ret:
601 attrq = dev_info->event_conf_attrs[i].attrs;
602 while (attrq != attrp) {
603 sysfs_remove_file_from_group(&dev_info->dev.kobj,
604 *attrq,
605 dev_info->event_attrs[i].name);
606 attrq++;
609 return ret;
612 static inline int __iio_remove_event_config_attrs(struct iio_dev *dev_info,
613 int i)
615 struct attribute **attrq;
617 if (dev_info->event_conf_attrs
618 && dev_info->event_conf_attrs[i].attrs) {
619 attrq = dev_info->event_conf_attrs[i].attrs;
620 while (*attrq) {
621 sysfs_remove_file_from_group(&dev_info->dev.kobj,
622 *attrq,
623 dev_info
624 ->event_attrs[i].name);
625 attrq++;
629 return 0;
632 static int iio_device_register_eventset(struct iio_dev *dev_info)
634 int ret = 0, i, j;
636 if (dev_info->num_interrupt_lines == 0)
637 return 0;
639 dev_info->event_interfaces =
640 kzalloc(sizeof(struct iio_event_interface)
641 *dev_info->num_interrupt_lines,
642 GFP_KERNEL);
643 if (dev_info->event_interfaces == NULL) {
644 ret = -ENOMEM;
645 goto error_ret;
648 dev_info->interrupts = kzalloc(sizeof(struct iio_interrupt *)
649 *dev_info->num_interrupt_lines,
650 GFP_KERNEL);
651 if (dev_info->interrupts == NULL) {
652 ret = -ENOMEM;
653 goto error_free_event_interfaces;
656 for (i = 0; i < dev_info->num_interrupt_lines; i++) {
657 dev_info->event_interfaces[i].owner = dev_info->driver_module;
658 ret = iio_get_new_idr_val(&iio_event_idr);
659 if (ret)
660 goto error_free_setup_ev_ints;
661 else
662 dev_info->event_interfaces[i].id = ret;
664 snprintf(dev_info->event_interfaces[i]._name, 20,
665 "event_line%d",
666 dev_info->event_interfaces[i].id);
668 ret = iio_setup_ev_int(&dev_info->event_interfaces[i],
669 (const char *)(dev_info
670 ->event_interfaces[i]
671 ._name),
672 dev_info->driver_module,
673 &dev_info->dev);
674 if (ret) {
675 dev_err(&dev_info->dev,
676 "Could not get chrdev interface\n");
677 iio_free_idr_val(&iio_event_idr,
678 dev_info->event_interfaces[i].id);
679 goto error_free_setup_ev_ints;
683 for (i = 0; i < dev_info->num_interrupt_lines; i++) {
684 snprintf(dev_info->event_interfaces[i]._attrname, 20,
685 "event_line%d_sources", i);
686 dev_info->event_attrs[i].name
687 = (const char *)
688 (dev_info->event_interfaces[i]._attrname);
689 ret = sysfs_create_group(&dev_info->dev.kobj,
690 &dev_info->event_attrs[i]);
691 if (ret) {
692 dev_err(&dev_info->dev,
693 "Failed to register sysfs for event attrs");
694 goto error_remove_sysfs_interfaces;
698 for (i = 0; i < dev_info->num_interrupt_lines; i++) {
699 ret = __iio_add_event_config_attrs(dev_info, i);
700 if (ret)
701 goto error_unregister_config_attrs;
704 return 0;
706 error_unregister_config_attrs:
707 for (j = 0; j < i; j++)
708 __iio_remove_event_config_attrs(dev_info, i);
709 i = dev_info->num_interrupt_lines - 1;
710 error_remove_sysfs_interfaces:
711 for (j = 0; j < i; j++)
712 sysfs_remove_group(&dev_info->dev.kobj,
713 &dev_info->event_attrs[j]);
714 i = dev_info->num_interrupt_lines - 1;
715 error_free_setup_ev_ints:
716 for (j = 0; j < i; j++) {
717 iio_free_idr_val(&iio_event_idr,
718 dev_info->event_interfaces[i].id);
719 iio_free_ev_int(&dev_info->event_interfaces[j]);
721 kfree(dev_info->interrupts);
722 error_free_event_interfaces:
723 kfree(dev_info->event_interfaces);
724 error_ret:
726 return ret;
729 static void iio_device_unregister_eventset(struct iio_dev *dev_info)
731 int i;
733 if (dev_info->num_interrupt_lines == 0)
734 return;
735 for (i = 0; i < dev_info->num_interrupt_lines; i++)
736 sysfs_remove_group(&dev_info->dev.kobj,
737 &dev_info->event_attrs[i]);
739 for (i = 0; i < dev_info->num_interrupt_lines; i++) {
740 iio_free_idr_val(&iio_event_idr,
741 dev_info->event_interfaces[i].id);
742 iio_free_ev_int(&dev_info->event_interfaces[i]);
744 kfree(dev_info->interrupts);
745 kfree(dev_info->event_interfaces);
748 static void iio_dev_release(struct device *device)
750 struct iio_dev *dev = to_iio_dev(device);
752 iio_put();
753 kfree(dev);
756 static struct device_type iio_dev_type = {
757 .name = "iio_device",
758 .release = iio_dev_release,
761 struct iio_dev *iio_allocate_device(void)
763 struct iio_dev *dev = kzalloc(sizeof *dev, GFP_KERNEL);
765 if (dev) {
766 dev->dev.type = &iio_dev_type;
767 dev->dev.class = &iio_class;
768 device_initialize(&dev->dev);
769 dev_set_drvdata(&dev->dev, (void *)dev);
770 mutex_init(&dev->mlock);
771 iio_get();
774 return dev;
776 EXPORT_SYMBOL(iio_allocate_device);
778 void iio_free_device(struct iio_dev *dev)
780 if (dev)
781 iio_put_device(dev);
783 EXPORT_SYMBOL(iio_free_device);
785 int iio_device_register(struct iio_dev *dev_info)
787 int ret;
789 ret = iio_device_register_id(dev_info, &iio_idr);
790 if (ret) {
791 dev_err(&dev_info->dev, "Failed to get id\n");
792 goto error_ret;
794 dev_set_name(&dev_info->dev, "device%d", dev_info->id);
796 ret = device_add(&dev_info->dev);
797 if (ret)
798 goto error_free_idr;
799 ret = iio_device_register_sysfs(dev_info);
800 if (ret) {
801 dev_err(dev_info->dev.parent,
802 "Failed to register sysfs interfaces\n");
803 goto error_del_device;
805 ret = iio_device_register_eventset(dev_info);
806 if (ret) {
807 dev_err(dev_info->dev.parent,
808 "Failed to register event set \n");
809 goto error_free_sysfs;
811 if (dev_info->modes & INDIO_RING_TRIGGERED)
812 iio_device_register_trigger_consumer(dev_info);
814 return 0;
816 error_free_sysfs:
817 iio_device_unregister_sysfs(dev_info);
818 error_del_device:
819 device_del(&dev_info->dev);
820 error_free_idr:
821 iio_device_unregister_id(dev_info);
822 error_ret:
823 return ret;
825 EXPORT_SYMBOL(iio_device_register);
827 void iio_device_unregister(struct iio_dev *dev_info)
829 if (dev_info->modes & INDIO_RING_TRIGGERED)
830 iio_device_unregister_trigger_consumer(dev_info);
831 iio_device_unregister_eventset(dev_info);
832 iio_device_unregister_sysfs(dev_info);
833 iio_device_unregister_id(dev_info);
834 device_unregister(&dev_info->dev);
836 EXPORT_SYMBOL(iio_device_unregister);
838 void iio_put(void)
840 module_put(THIS_MODULE);
843 void iio_get(void)
845 __module_get(THIS_MODULE);
848 subsys_initcall(iio_init);
849 module_exit(iio_exit);
851 MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
852 MODULE_DESCRIPTION("Industrial I/O core");
853 MODULE_LICENSE("GPL");