staging:iio: Push interrupt setup down into the drivers for event lines.
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / iio / industrialio-core.c
blob136ff04d647ad2877f72d8b6c9c5d642960d24fa
1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Based on elements of hwmon and input subsystems.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/idr.h>
15 #include <linux/kdev_t.h>
16 #include <linux/err.h>
17 #include <linux/device.h>
18 #include <linux/fs.h>
19 #include <linux/interrupt.h>
20 #include <linux/poll.h>
21 #include <linux/sched.h>
22 #include <linux/wait.h>
23 #include <linux/cdev.h>
24 #include <linux/slab.h>
25 #include "iio.h"
26 #include "trigger_consumer.h"
28 #define IIO_ID_PREFIX "device"
29 #define IIO_ID_FORMAT IIO_ID_PREFIX "%d"
31 /* IDR to assign each registered device a unique id*/
32 static DEFINE_IDA(iio_ida);
33 /* IDR to allocate character device minor numbers */
34 static DEFINE_IDA(iio_chrdev_ida);
35 /* Lock used to protect both of the above */
36 static DEFINE_SPINLOCK(iio_ida_lock);
38 dev_t iio_devt;
39 EXPORT_SYMBOL(iio_devt);
41 #define IIO_DEV_MAX 256
42 struct bus_type iio_bus_type = {
43 .name = "iio",
45 EXPORT_SYMBOL(iio_bus_type);
47 static const char * const iio_chan_type_name_spec_shared[] = {
48 [IIO_TIMESTAMP] = "timestamp",
49 [IIO_ACCEL] = "accel",
50 [IIO_IN] = "in",
51 [IIO_IN_DIFF] = "in-in",
52 [IIO_GYRO] = "gyro",
53 [IIO_TEMP] = "temp",
54 [IIO_MAGN] = "magn",
55 [IIO_INCLI] = "incli",
56 [IIO_ROT] = "rot",
57 [IIO_INTENSITY] = "intensity",
58 [IIO_LIGHT] = "illuminance",
59 [IIO_ANGL] = "angl",
62 static const char * const iio_chan_type_name_spec_complex[] = {
63 [IIO_IN_DIFF] = "in%d-in%d",
66 static const char * const iio_modifier_names_light[] = {
67 [IIO_MOD_LIGHT_BOTH] = "both",
68 [IIO_MOD_LIGHT_IR] = "ir",
71 static const char * const iio_modifier_names_axial[] = {
72 [IIO_MOD_X] = "x",
73 [IIO_MOD_Y] = "y",
74 [IIO_MOD_Z] = "z",
77 /* relies on pairs of these shared then separate */
78 static const char * const iio_chan_info_postfix[] = {
79 [IIO_CHAN_INFO_SCALE_SHARED/2] = "scale",
80 [IIO_CHAN_INFO_OFFSET_SHARED/2] = "offset",
81 [IIO_CHAN_INFO_CALIBSCALE_SHARED/2] = "calibscale",
82 [IIO_CHAN_INFO_CALIBBIAS_SHARED/2] = "calibbias",
85 /* Used both in the interrupt line put events and the ring buffer ones */
87 /* Note that in it's current form someone has to be listening before events
88 * are queued. Hence a client MUST open the chrdev before the ring buffer is
89 * switched on.
91 int __iio_push_event(struct iio_event_interface *ev_int,
92 int ev_code,
93 s64 timestamp)
95 struct iio_detected_event_list *ev;
96 int ret = 0;
98 /* Does anyone care? */
99 mutex_lock(&ev_int->event_list_lock);
100 if (test_bit(IIO_BUSY_BIT_POS, &ev_int->handler.flags)) {
101 if (ev_int->current_events == ev_int->max_events) {
102 mutex_unlock(&ev_int->event_list_lock);
103 return 0;
105 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
106 if (ev == NULL) {
107 ret = -ENOMEM;
108 mutex_unlock(&ev_int->event_list_lock);
109 goto error_ret;
111 ev->ev.id = ev_code;
112 ev->ev.timestamp = timestamp;
114 list_add_tail(&ev->list, &ev_int->det_events.list);
115 ev_int->current_events++;
116 mutex_unlock(&ev_int->event_list_lock);
117 wake_up_interruptible(&ev_int->wait);
118 } else
119 mutex_unlock(&ev_int->event_list_lock);
121 error_ret:
122 return ret;
124 EXPORT_SYMBOL(__iio_push_event);
126 int iio_push_event(struct iio_dev *dev_info,
127 int ev_line,
128 int ev_code,
129 s64 timestamp)
131 return __iio_push_event(&dev_info->event_interfaces[ev_line],
132 ev_code, timestamp);
134 EXPORT_SYMBOL(iio_push_event);
136 /* Generic interrupt line interrupt handler */
137 irqreturn_t iio_interrupt_handler(int irq, void *_int_info)
139 struct iio_interrupt *int_info = _int_info;
140 struct iio_dev *dev_info = int_info->dev_info;
141 struct iio_event_handler_list *p;
142 s64 time_ns;
143 unsigned long flags;
145 spin_lock_irqsave(&int_info->ev_list_lock, flags);
146 if (list_empty(&int_info->ev_list)) {
147 spin_unlock_irqrestore(&int_info->ev_list_lock, flags);
148 return IRQ_NONE;
151 time_ns = iio_get_time_ns();
152 list_for_each_entry(p, &int_info->ev_list, list) {
153 disable_irq_nosync(irq);
154 p->handler(dev_info, 1, time_ns, !(p->refcount > 1));
156 spin_unlock_irqrestore(&int_info->ev_list_lock, flags);
158 return IRQ_HANDLED;
160 EXPORT_SYMBOL(iio_interrupt_handler);
162 static struct iio_interrupt *iio_allocate_interrupt(void)
164 struct iio_interrupt *i = kmalloc(sizeof *i, GFP_KERNEL);
165 if (i) {
166 spin_lock_init(&i->ev_list_lock);
167 INIT_LIST_HEAD(&i->ev_list);
169 return i;
172 /* Confirming the validity of supplied irq is left to drivers.*/
173 int iio_register_interrupt_line(unsigned int irq,
174 struct iio_dev *dev_info,
175 int line_number,
176 unsigned long type,
177 const char *name)
179 int ret = 0;
181 dev_info->interrupts[line_number] = iio_allocate_interrupt();
182 if (dev_info->interrupts[line_number] == NULL) {
183 ret = -ENOMEM;
184 goto error_ret;
186 dev_info->interrupts[line_number]->line_number = line_number;
187 dev_info->interrupts[line_number]->irq = irq;
188 dev_info->interrupts[line_number]->dev_info = dev_info;
190 error_ret:
191 return ret;
193 EXPORT_SYMBOL(iio_register_interrupt_line);
195 /* This turns up an awful lot */
196 ssize_t iio_read_const_attr(struct device *dev,
197 struct device_attribute *attr,
198 char *buf)
200 return sprintf(buf, "%s\n", to_iio_const_attr(attr)->string);
202 EXPORT_SYMBOL(iio_read_const_attr);
204 /* Before this runs the interrupt generator must have been disabled */
205 void iio_unregister_interrupt_line(struct iio_dev *dev_info, int line_number)
207 /* make sure the interrupt handlers are all done */
208 flush_scheduled_work();
209 kfree(dev_info->interrupts[line_number]);
211 EXPORT_SYMBOL(iio_unregister_interrupt_line);
213 /* Reference counted add and remove */
214 void iio_add_event_to_list(struct iio_event_handler_list *el,
215 struct list_head *head)
217 unsigned long flags;
218 struct iio_interrupt *inter = to_iio_interrupt(head);
220 /* take mutex to protect this element */
221 mutex_lock(&el->exist_lock);
222 if (el->refcount == 0) {
223 /* Take the event list spin lock */
224 spin_lock_irqsave(&inter->ev_list_lock, flags);
225 list_add(&el->list, head);
226 spin_unlock_irqrestore(&inter->ev_list_lock, flags);
228 el->refcount++;
229 mutex_unlock(&el->exist_lock);
231 EXPORT_SYMBOL(iio_add_event_to_list);
233 void iio_remove_event_from_list(struct iio_event_handler_list *el,
234 struct list_head *head)
236 unsigned long flags;
237 struct iio_interrupt *inter = to_iio_interrupt(head);
239 mutex_lock(&el->exist_lock);
240 el->refcount--;
241 if (el->refcount == 0) {
242 /* Take the event list spin lock */
243 spin_lock_irqsave(&inter->ev_list_lock, flags);
244 list_del_init(&el->list);
245 spin_unlock_irqrestore(&inter->ev_list_lock, flags);
247 mutex_unlock(&el->exist_lock);
249 EXPORT_SYMBOL(iio_remove_event_from_list);
251 static ssize_t iio_event_chrdev_read(struct file *filep,
252 char __user *buf,
253 size_t count,
254 loff_t *f_ps)
256 struct iio_event_interface *ev_int = filep->private_data;
257 struct iio_detected_event_list *el;
258 int ret;
259 size_t len;
261 mutex_lock(&ev_int->event_list_lock);
262 if (list_empty(&ev_int->det_events.list)) {
263 if (filep->f_flags & O_NONBLOCK) {
264 ret = -EAGAIN;
265 goto error_mutex_unlock;
267 mutex_unlock(&ev_int->event_list_lock);
268 /* Blocking on device; waiting for something to be there */
269 ret = wait_event_interruptible(ev_int->wait,
270 !list_empty(&ev_int
271 ->det_events.list));
272 if (ret)
273 goto error_ret;
274 /* Single access device so no one else can get the data */
275 mutex_lock(&ev_int->event_list_lock);
278 el = list_first_entry(&ev_int->det_events.list,
279 struct iio_detected_event_list,
280 list);
281 len = sizeof el->ev;
282 if (copy_to_user(buf, &(el->ev), len)) {
283 ret = -EFAULT;
284 goto error_mutex_unlock;
286 list_del(&el->list);
287 ev_int->current_events--;
288 mutex_unlock(&ev_int->event_list_lock);
289 kfree(el);
291 return len;
293 error_mutex_unlock:
294 mutex_unlock(&ev_int->event_list_lock);
295 error_ret:
297 return ret;
300 static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
302 struct iio_handler *hand = iio_cdev_to_handler(inode->i_cdev);
303 struct iio_event_interface *ev_int = hand->private;
304 struct iio_detected_event_list *el, *t;
306 mutex_lock(&ev_int->event_list_lock);
307 clear_bit(IIO_BUSY_BIT_POS, &ev_int->handler.flags);
309 * In order to maintain a clean state for reopening,
310 * clear out any awaiting events. The mask will prevent
311 * any new __iio_push_event calls running.
313 list_for_each_entry_safe(el, t, &ev_int->det_events.list, list) {
314 list_del(&el->list);
315 kfree(el);
317 mutex_unlock(&ev_int->event_list_lock);
319 return 0;
322 static int iio_event_chrdev_open(struct inode *inode, struct file *filep)
324 struct iio_handler *hand = iio_cdev_to_handler(inode->i_cdev);
325 struct iio_event_interface *ev_int = hand->private;
327 mutex_lock(&ev_int->event_list_lock);
328 if (test_and_set_bit(IIO_BUSY_BIT_POS, &hand->flags)) {
329 fops_put(filep->f_op);
330 mutex_unlock(&ev_int->event_list_lock);
331 return -EBUSY;
333 filep->private_data = hand->private;
334 mutex_unlock(&ev_int->event_list_lock);
336 return 0;
339 static const struct file_operations iio_event_chrdev_fileops = {
340 .read = iio_event_chrdev_read,
341 .release = iio_event_chrdev_release,
342 .open = iio_event_chrdev_open,
343 .owner = THIS_MODULE,
344 .llseek = noop_llseek,
347 static void iio_event_dev_release(struct device *dev)
349 struct iio_event_interface *ev_int
350 = container_of(dev, struct iio_event_interface, dev);
351 cdev_del(&ev_int->handler.chrdev);
352 iio_device_free_chrdev_minor(MINOR(dev->devt));
355 static struct device_type iio_event_type = {
356 .release = iio_event_dev_release,
359 int iio_device_get_chrdev_minor(void)
361 int ret, val;
363 ida_again:
364 if (unlikely(ida_pre_get(&iio_chrdev_ida, GFP_KERNEL) == 0))
365 return -ENOMEM;
366 spin_lock(&iio_ida_lock);
367 ret = ida_get_new(&iio_chrdev_ida, &val);
368 spin_unlock(&iio_ida_lock);
369 if (unlikely(ret == -EAGAIN))
370 goto ida_again;
371 else if (unlikely(ret))
372 return ret;
373 if (val > IIO_DEV_MAX)
374 return -ENOMEM;
375 return val;
378 void iio_device_free_chrdev_minor(int val)
380 spin_lock(&iio_ida_lock);
381 ida_remove(&iio_chrdev_ida, val);
382 spin_unlock(&iio_ida_lock);
385 static int iio_setup_ev_int(struct iio_event_interface *ev_int,
386 const char *name,
387 struct module *owner,
388 struct device *dev)
390 int ret, minor;
392 ev_int->dev.bus = &iio_bus_type;
393 ev_int->dev.parent = dev;
394 ev_int->dev.type = &iio_event_type;
395 device_initialize(&ev_int->dev);
397 minor = iio_device_get_chrdev_minor();
398 if (minor < 0) {
399 ret = minor;
400 goto error_device_put;
402 ev_int->dev.devt = MKDEV(MAJOR(iio_devt), minor);
403 dev_set_name(&ev_int->dev, "%s", name);
405 ret = device_add(&ev_int->dev);
406 if (ret)
407 goto error_free_minor;
409 cdev_init(&ev_int->handler.chrdev, &iio_event_chrdev_fileops);
410 ev_int->handler.chrdev.owner = owner;
412 mutex_init(&ev_int->event_list_lock);
413 /* discussion point - make this variable? */
414 ev_int->max_events = 10;
415 ev_int->current_events = 0;
416 INIT_LIST_HEAD(&ev_int->det_events.list);
417 init_waitqueue_head(&ev_int->wait);
418 ev_int->handler.private = ev_int;
419 ev_int->handler.flags = 0;
421 ret = cdev_add(&ev_int->handler.chrdev, ev_int->dev.devt, 1);
422 if (ret)
423 goto error_unreg_device;
425 return 0;
427 error_unreg_device:
428 device_unregister(&ev_int->dev);
429 error_free_minor:
430 iio_device_free_chrdev_minor(minor);
431 error_device_put:
432 put_device(&ev_int->dev);
434 return ret;
437 static void iio_free_ev_int(struct iio_event_interface *ev_int)
439 device_unregister(&ev_int->dev);
440 put_device(&ev_int->dev);
443 static int __init iio_dev_init(void)
445 int err;
447 err = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
448 if (err < 0)
449 printk(KERN_ERR "%s: failed to allocate char dev region\n",
450 __FILE__);
452 return err;
455 static void __exit iio_dev_exit(void)
457 if (iio_devt)
458 unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
461 static int __init iio_init(void)
463 int ret;
465 /* Register sysfs bus */
466 ret = bus_register(&iio_bus_type);
467 if (ret < 0) {
468 printk(KERN_ERR
469 "%s could not register bus type\n",
470 __FILE__);
471 goto error_nothing;
474 ret = iio_dev_init();
475 if (ret < 0)
476 goto error_unregister_bus_type;
478 return 0;
480 error_unregister_bus_type:
481 bus_unregister(&iio_bus_type);
482 error_nothing:
483 return ret;
486 static void __exit iio_exit(void)
488 iio_dev_exit();
489 bus_unregister(&iio_bus_type);
492 static ssize_t iio_read_channel_info(struct device *dev,
493 struct device_attribute *attr,
494 char *buf)
496 struct iio_dev *indio_dev = dev_get_drvdata(dev);
497 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
498 int val, val2;
499 int ret = indio_dev->read_raw(indio_dev, this_attr->c,
500 &val, &val2, this_attr->address);
502 if (ret < 0)
503 return ret;
505 if (ret == IIO_VAL_INT)
506 return sprintf(buf, "%d\n", val);
507 else if (ret == IIO_VAL_INT_PLUS_MICRO) {
508 if (val2 < 0)
509 return sprintf(buf, "-%d.%06u\n", val, -val2);
510 else
511 return sprintf(buf, "%d.%06u\n", val, val2);
512 } else
513 return 0;
516 static ssize_t iio_write_channel_info(struct device *dev,
517 struct device_attribute *attr,
518 const char *buf,
519 size_t len)
521 struct iio_dev *indio_dev = dev_get_drvdata(dev);
522 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
523 int ret, integer = 0, micro = 0, micro_mult = 100000;
524 bool integer_part = true, negative = false;
526 /* Assumes decimal - precision based on number of digits */
527 if (!indio_dev->write_raw)
528 return -EINVAL;
529 if (buf[0] == '-') {
530 negative = true;
531 buf++;
533 while (*buf) {
534 if ('0' <= *buf && *buf <= '9') {
535 if (integer_part)
536 integer = integer*10 + *buf - '0';
537 else {
538 micro += micro_mult*(*buf - '0');
539 if (micro_mult == 1)
540 break;
541 micro_mult /= 10;
543 } else if (*buf == '\n') {
544 if (*(buf + 1) == '\0')
545 break;
546 else
547 return -EINVAL;
548 } else if (*buf == '.') {
549 integer_part = false;
550 } else {
551 return -EINVAL;
553 buf++;
555 if (negative) {
556 if (integer)
557 integer = -integer;
558 else
559 micro = -micro;
562 ret = indio_dev->write_raw(indio_dev, this_attr->c,
563 integer, micro, this_attr->address);
564 if (ret)
565 return ret;
567 return len;
570 static int __iio_build_postfix(struct iio_chan_spec const *chan,
571 bool generic,
572 const char *postfix,
573 char **result)
575 char *all_post;
576 /* 3 options - generic, extend_name, modified - if generic, extend_name
577 * and modified cannot apply.*/
579 if (generic || (!chan->modified && !chan->extend_name)) {
580 all_post = kasprintf(GFP_KERNEL, "%s", postfix);
581 } else if (chan->modified) {
582 const char *intermediate;
583 switch (chan->type) {
584 case IIO_INTENSITY:
585 intermediate
586 = iio_modifier_names_light[chan->channel2];
587 break;
588 case IIO_ACCEL:
589 case IIO_GYRO:
590 case IIO_MAGN:
591 case IIO_INCLI:
592 case IIO_ROT:
593 case IIO_ANGL:
594 intermediate
595 = iio_modifier_names_axial[chan->channel2];
596 break;
597 default:
598 return -EINVAL;
600 if (chan->extend_name)
601 all_post = kasprintf(GFP_KERNEL, "%s_%s_%s",
602 intermediate,
603 chan->extend_name,
604 postfix);
605 else
606 all_post = kasprintf(GFP_KERNEL, "%s_%s",
607 intermediate,
608 postfix);
609 } else
610 all_post = kasprintf(GFP_KERNEL, "%s_%s", chan->extend_name,
611 postfix);
612 if (all_post == NULL)
613 return -ENOMEM;
614 *result = all_post;
615 return 0;
618 int __iio_device_attr_init(struct device_attribute *dev_attr,
619 const char *postfix,
620 struct iio_chan_spec const *chan,
621 ssize_t (*readfunc)(struct device *dev,
622 struct device_attribute *attr,
623 char *buf),
624 ssize_t (*writefunc)(struct device *dev,
625 struct device_attribute *attr,
626 const char *buf,
627 size_t len),
628 bool generic)
630 int ret;
631 char *name_format, *full_postfix;
632 sysfs_attr_init(&dev_attr->attr);
633 ret = __iio_build_postfix(chan, generic, postfix, &full_postfix);
634 if (ret)
635 goto error_ret;
637 /* Special case for types that uses both channel numbers in naming */
638 if (chan->type == IIO_IN_DIFF && !generic)
639 name_format
640 = kasprintf(GFP_KERNEL, "%s_%s",
641 iio_chan_type_name_spec_complex[chan->type],
642 full_postfix);
643 else if (generic || !chan->indexed)
644 name_format
645 = kasprintf(GFP_KERNEL, "%s_%s",
646 iio_chan_type_name_spec_shared[chan->type],
647 full_postfix);
648 else
649 name_format
650 = kasprintf(GFP_KERNEL, "%s%d_%s",
651 iio_chan_type_name_spec_shared[chan->type],
652 chan->channel,
653 full_postfix);
655 if (name_format == NULL) {
656 ret = -ENOMEM;
657 goto error_free_full_postfix;
659 dev_attr->attr.name = kasprintf(GFP_KERNEL,
660 name_format,
661 chan->channel,
662 chan->channel2);
663 if (dev_attr->attr.name == NULL) {
664 ret = -ENOMEM;
665 goto error_free_name_format;
668 if (readfunc) {
669 dev_attr->attr.mode |= S_IRUGO;
670 dev_attr->show = readfunc;
673 if (writefunc) {
674 dev_attr->attr.mode |= S_IWUSR;
675 dev_attr->store = writefunc;
677 kfree(name_format);
678 kfree(full_postfix);
680 return 0;
682 error_free_name_format:
683 kfree(name_format);
684 error_free_full_postfix:
685 kfree(full_postfix);
686 error_ret:
687 return ret;
690 void __iio_device_attr_deinit(struct device_attribute *dev_attr)
692 kfree(dev_attr->attr.name);
695 int __iio_add_chan_devattr(const char *postfix,
696 const char *group,
697 struct iio_chan_spec const *chan,
698 ssize_t (*readfunc)(struct device *dev,
699 struct device_attribute *attr,
700 char *buf),
701 ssize_t (*writefunc)(struct device *dev,
702 struct device_attribute *attr,
703 const char *buf,
704 size_t len),
705 int mask,
706 bool generic,
707 struct device *dev,
708 struct list_head *attr_list)
710 int ret;
711 struct iio_dev_attr *iio_attr, *t;
713 iio_attr = kzalloc(sizeof *iio_attr, GFP_KERNEL);
714 if (iio_attr == NULL) {
715 ret = -ENOMEM;
716 goto error_ret;
718 ret = __iio_device_attr_init(&iio_attr->dev_attr,
719 postfix, chan,
720 readfunc, writefunc, generic);
721 if (ret)
722 goto error_iio_dev_attr_free;
723 iio_attr->c = chan;
724 iio_attr->address = mask;
725 list_for_each_entry(t, attr_list, l)
726 if (strcmp(t->dev_attr.attr.name,
727 iio_attr->dev_attr.attr.name) == 0) {
728 if (!generic)
729 dev_err(dev, "tried to double register : %s\n",
730 t->dev_attr.attr.name);
731 ret = -EBUSY;
732 goto error_device_attr_deinit;
735 ret = sysfs_add_file_to_group(&dev->kobj,
736 &iio_attr->dev_attr.attr, group);
737 if (ret < 0)
738 goto error_device_attr_deinit;
740 list_add(&iio_attr->l, attr_list);
742 return 0;
744 error_device_attr_deinit:
745 __iio_device_attr_deinit(&iio_attr->dev_attr);
746 error_iio_dev_attr_free:
747 kfree(iio_attr);
748 error_ret:
749 return ret;
752 static int iio_device_add_channel_sysfs(struct iio_dev *dev_info,
753 struct iio_chan_spec const *chan)
755 int ret, i;
758 if (chan->channel < 0)
759 return 0;
760 if (chan->processed_val)
761 ret = __iio_add_chan_devattr("input", NULL, chan,
762 &iio_read_channel_info,
763 NULL,
766 &dev_info->dev,
767 &dev_info->channel_attr_list);
768 else
769 ret = __iio_add_chan_devattr("raw", NULL, chan,
770 &iio_read_channel_info,
771 NULL,
774 &dev_info->dev,
775 &dev_info->channel_attr_list);
776 if (ret)
777 goto error_ret;
779 for_each_set_bit(i, &chan->info_mask, sizeof(long)*8) {
780 ret = __iio_add_chan_devattr(iio_chan_info_postfix[i/2],
781 NULL, chan,
782 &iio_read_channel_info,
783 &iio_write_channel_info,
784 (1 << i),
785 !(i%2),
786 &dev_info->dev,
787 &dev_info->channel_attr_list);
788 if (ret == -EBUSY && (i%2 == 0)) {
789 ret = 0;
790 continue;
792 if (ret < 0)
793 goto error_ret;
795 error_ret:
796 return ret;
799 static void iio_device_remove_and_free_read_attr(struct iio_dev *dev_info,
800 struct iio_dev_attr *p)
802 sysfs_remove_file_from_group(&dev_info->dev.kobj,
803 &p->dev_attr.attr, NULL);
804 kfree(p->dev_attr.attr.name);
805 kfree(p);
808 static int iio_device_register_sysfs(struct iio_dev *dev_info)
810 int i, ret = 0;
811 struct iio_dev_attr *p, *n;
813 if (dev_info->attrs) {
814 ret = sysfs_create_group(&dev_info->dev.kobj, dev_info->attrs);
815 if (ret) {
816 dev_err(dev_info->dev.parent,
817 "Failed to register sysfs hooks\n");
818 goto error_ret;
823 * New channel registration method - relies on the fact a group does
824 * not need to be initialized if it is name is NULL.
826 INIT_LIST_HEAD(&dev_info->channel_attr_list);
827 if (dev_info->channels)
828 for (i = 0; i < dev_info->num_channels; i++) {
829 ret = iio_device_add_channel_sysfs(dev_info,
830 &dev_info
831 ->channels[i]);
832 if (ret < 0)
833 goto error_clear_attrs;
836 return 0;
837 error_clear_attrs:
838 list_for_each_entry_safe(p, n,
839 &dev_info->channel_attr_list, l) {
840 list_del(&p->l);
841 iio_device_remove_and_free_read_attr(dev_info, p);
843 if (dev_info->attrs)
844 sysfs_remove_group(&dev_info->dev.kobj, dev_info->attrs);
845 error_ret:
846 return ret;
850 static void iio_device_unregister_sysfs(struct iio_dev *dev_info)
853 struct iio_dev_attr *p, *n;
854 list_for_each_entry_safe(p, n, &dev_info->channel_attr_list, l) {
855 list_del(&p->l);
856 iio_device_remove_and_free_read_attr(dev_info, p);
859 if (dev_info->attrs)
860 sysfs_remove_group(&dev_info->dev.kobj, dev_info->attrs);
863 /* Return a negative errno on failure */
864 int iio_get_new_ida_val(struct ida *this_ida)
866 int ret;
867 int val;
869 ida_again:
870 if (unlikely(ida_pre_get(this_ida, GFP_KERNEL) == 0))
871 return -ENOMEM;
873 spin_lock(&iio_ida_lock);
874 ret = ida_get_new(this_ida, &val);
875 spin_unlock(&iio_ida_lock);
876 if (unlikely(ret == -EAGAIN))
877 goto ida_again;
878 else if (unlikely(ret))
879 return ret;
881 return val;
883 EXPORT_SYMBOL(iio_get_new_ida_val);
885 void iio_free_ida_val(struct ida *this_ida, int id)
887 spin_lock(&iio_ida_lock);
888 ida_remove(this_ida, id);
889 spin_unlock(&iio_ida_lock);
891 EXPORT_SYMBOL(iio_free_ida_val);
893 static int iio_device_register_id(struct iio_dev *dev_info,
894 struct ida *this_ida)
896 dev_info->id = iio_get_new_ida_val(&iio_ida);
897 if (dev_info->id < 0)
898 return dev_info->id;
899 return 0;
902 static void iio_device_unregister_id(struct iio_dev *dev_info)
904 iio_free_ida_val(&iio_ida, dev_info->id);
907 static const char * const iio_ev_type_text[] = {
908 [IIO_EV_TYPE_THRESH] = "thresh",
909 [IIO_EV_TYPE_MAG] = "mag",
910 [IIO_EV_TYPE_ROC] = "roc"
913 static const char * const iio_ev_dir_text[] = {
914 [IIO_EV_DIR_EITHER] = "either",
915 [IIO_EV_DIR_RISING] = "rising",
916 [IIO_EV_DIR_FALLING] = "falling"
919 static ssize_t iio_ev_state_store(struct device *dev,
920 struct device_attribute *attr,
921 const char *buf,
922 size_t len)
924 struct iio_dev *indio_dev = dev_get_drvdata(dev);
925 struct iio_event_attr *this_attr = to_iio_event_attr(attr);
926 int ret;
927 unsigned long val;
928 ret = strict_strtoul(buf, 10, &val);
929 if (ret || val < 0 || val > 1)
930 return -EINVAL;
932 ret = indio_dev->write_event_config(indio_dev, this_attr->mask,
933 this_attr->listel,
934 val);
935 return (ret < 0) ? ret : len;
938 static ssize_t iio_ev_state_show(struct device *dev,
939 struct device_attribute *attr,
940 char *buf)
942 struct iio_dev *indio_dev = dev_get_drvdata(dev);
943 struct iio_event_attr *this_attr = to_iio_event_attr(attr);
944 int val = indio_dev->read_event_config(indio_dev, this_attr->mask);
946 if (val < 0)
947 return val;
948 else
949 return sprintf(buf, "%d\n", val);
952 static ssize_t iio_ev_value_show(struct device *dev,
953 struct device_attribute *attr,
954 char *buf)
956 struct iio_dev *indio_dev = dev_get_drvdata(dev);
957 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
958 int val, ret;
960 ret = indio_dev->read_event_value(indio_dev,
961 this_attr->address, &val);
962 if (ret < 0)
963 return ret;
965 return sprintf(buf, "%d\n", val);
968 static ssize_t iio_ev_value_store(struct device *dev,
969 struct device_attribute *attr,
970 const char *buf,
971 size_t len)
973 struct iio_dev *indio_dev = dev_get_drvdata(dev);
974 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
975 unsigned long val;
976 int ret;
978 ret = strict_strtoul(buf, 10, &val);
979 if (ret)
980 return ret;
982 ret = indio_dev->write_event_value(indio_dev, this_attr->address,
983 val);
984 if (ret < 0)
985 return ret;
987 return len;
990 static int __iio_add_chan_event_attr(const char *postfix,
991 const char *group,
992 struct iio_chan_spec const *chan,
993 unsigned int mask,
994 struct device *dev,
995 struct list_head *attr_list)
997 char *name_format, *full_postfix;
998 int ret;
999 struct iio_event_attr *iio_ev_attr;
1001 iio_ev_attr = kzalloc(sizeof *iio_ev_attr, GFP_KERNEL);
1002 if (iio_ev_attr == NULL) {
1003 ret = -ENOMEM;
1004 goto error_ret;
1007 sysfs_attr_init(&iio_ev_attr->dev_attr.attr);
1008 ret = __iio_build_postfix(chan, 0, postfix, &full_postfix);
1009 if (ret)
1010 goto error_ret;
1011 /* Special case for types that uses both channel numbers in naming */
1012 if (chan->type == IIO_IN_DIFF)
1013 name_format
1014 = kasprintf(GFP_KERNEL, "%s_%s",
1015 iio_chan_type_name_spec_complex[chan->type],
1016 full_postfix);
1017 else if (!chan->indexed)
1018 name_format
1019 = kasprintf(GFP_KERNEL, "%s_%s",
1020 iio_chan_type_name_spec_shared[chan->type],
1021 full_postfix);
1022 else
1023 name_format
1024 = kasprintf(GFP_KERNEL, "%s%d_%s",
1025 iio_chan_type_name_spec_shared[chan->type],
1026 chan->channel,
1027 full_postfix);
1028 if (name_format == NULL) {
1029 ret = -ENOMEM;
1030 goto error_free_attr;
1033 iio_ev_attr->dev_attr.attr.name = kasprintf(GFP_KERNEL,
1034 name_format,
1035 chan->channel,
1036 chan->channel2);
1037 if (iio_ev_attr->dev_attr.attr.name == NULL) {
1038 ret = -ENOMEM;
1039 goto error_free_name_format;
1042 iio_ev_attr->dev_attr.attr.mode = S_IRUGO | S_IWUSR;
1043 iio_ev_attr->dev_attr.show = &iio_ev_state_show;
1044 iio_ev_attr->dev_attr.store = &iio_ev_state_store;
1045 iio_ev_attr->mask = mask;
1046 iio_ev_attr->listel = chan->shared_handler;
1047 ret = sysfs_add_file_to_group(&dev->kobj,
1048 &iio_ev_attr->dev_attr.attr,
1049 group);
1050 if (ret < 0)
1051 goto error_free_name;
1052 list_add(&iio_ev_attr->l, attr_list);
1053 kfree(name_format);
1054 return 0;
1056 error_free_name:
1057 kfree(iio_ev_attr->dev_attr.attr.name);
1058 error_free_name_format:
1059 kfree(name_format);
1060 error_free_attr:
1061 kfree(iio_ev_attr);
1062 error_ret:
1063 return ret;
1067 static int iio_device_add_event_sysfs(struct iio_dev *dev_info,
1068 struct iio_chan_spec const *chan)
1071 int ret = 0, i, mask;
1072 char *postfix;
1073 if (!chan->event_mask)
1074 return 0;
1076 for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) {
1077 postfix = kasprintf(GFP_KERNEL, "%s_%s_en",
1078 iio_ev_type_text[i/IIO_EV_TYPE_MAX],
1079 iio_ev_dir_text[i%IIO_EV_TYPE_MAX]);
1080 if (postfix == NULL) {
1081 ret = -ENOMEM;
1082 goto error_ret;
1084 switch (chan->type) {
1085 /* Switch this to a table at some point */
1086 case IIO_IN:
1087 mask = IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
1088 i/IIO_EV_TYPE_MAX,
1089 i%IIO_EV_TYPE_MAX);
1090 break;
1091 case IIO_ACCEL:
1092 mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel,
1093 i/IIO_EV_TYPE_MAX,
1094 i%IIO_EV_TYPE_MAX);
1095 break;
1096 case IIO_IN_DIFF:
1097 mask = IIO_MOD_EVENT_CODE(chan->type, chan->channel,
1098 chan->channel2,
1099 i/IIO_EV_TYPE_MAX,
1100 i%IIO_EV_TYPE_MAX);
1101 break;
1102 default:
1103 printk(KERN_INFO "currently unhandled type of event\n");
1105 ret = __iio_add_chan_event_attr(postfix,
1106 NULL,
1107 chan,
1108 mask,
1109 /*HACK. - limits us to one
1110 event interface - fix by
1111 extending the bitmask - but
1112 how far*/
1113 &dev_info->event_interfaces[0]
1114 .dev,
1115 &dev_info->event_interfaces[0].
1116 event_attr_list);
1117 kfree(postfix);
1118 if (ret)
1119 goto error_ret;
1121 postfix = kasprintf(GFP_KERNEL, "%s_%s_value",
1122 iio_ev_type_text[i/IIO_EV_TYPE_MAX],
1123 iio_ev_dir_text[i%IIO_EV_TYPE_MAX]);
1124 if (postfix == NULL) {
1125 ret = -ENOMEM;
1126 goto error_ret;
1128 ret = __iio_add_chan_devattr(postfix, NULL, chan,
1129 iio_ev_value_show,
1130 iio_ev_value_store,
1131 mask,
1133 &dev_info->event_interfaces[0]
1134 .dev,
1135 &dev_info->event_interfaces[0]
1136 .dev_attr_list);
1137 kfree(postfix);
1138 if (ret)
1139 goto error_ret;
1143 error_ret:
1144 return ret;
1147 static inline void __iio_remove_all_event_sysfs(struct iio_dev *dev_info,
1148 const char *groupname,
1149 int num)
1151 struct iio_dev_attr *p, *n;
1152 struct iio_event_attr *q, *m;
1153 list_for_each_entry_safe(p, n,
1154 &dev_info->event_interfaces[num].
1155 dev_attr_list, l) {
1156 sysfs_remove_file_from_group(&dev_info
1157 ->event_interfaces[num].dev.kobj,
1158 &p->dev_attr.attr,
1159 groupname);
1160 kfree(p->dev_attr.attr.name);
1161 kfree(p);
1163 list_for_each_entry_safe(q, m,
1164 &dev_info->event_interfaces[num].
1165 event_attr_list, l) {
1166 sysfs_remove_file_from_group(&dev_info
1167 ->event_interfaces[num].dev.kobj,
1168 &q->dev_attr.attr,
1169 groupname);
1170 kfree(q->dev_attr.attr.name);
1171 kfree(q);
1175 static inline int __iio_add_event_config_attrs(struct iio_dev *dev_info, int i)
1177 int j;
1178 int ret;
1179 /*p for adding, q for removing */
1180 struct attribute **attrp, **attrq;
1182 if (dev_info->event_conf_attrs && dev_info->event_conf_attrs[i].attrs) {
1183 attrp = dev_info->event_conf_attrs[i].attrs;
1184 while (*attrp) {
1185 ret = sysfs_add_file_to_group(&dev_info
1186 ->event_interfaces[0]
1187 .dev.kobj,
1188 *attrp,
1189 NULL);
1190 if (ret)
1191 goto error_ret;
1192 attrp++;
1195 INIT_LIST_HEAD(&dev_info->event_interfaces[0].event_attr_list);
1196 INIT_LIST_HEAD(&dev_info->event_interfaces[0].dev_attr_list);
1197 /* Dynically created from the channels array */
1198 if (dev_info->channels) {
1199 for (j = 0; j < dev_info->num_channels; j++) {
1200 ret = iio_device_add_event_sysfs(dev_info,
1201 &dev_info
1202 ->channels[j]);
1203 if (ret)
1204 goto error_clear_attrs;
1207 return 0;
1209 error_clear_attrs:
1210 __iio_remove_all_event_sysfs(dev_info,
1211 NULL,
1213 error_ret:
1214 attrq = dev_info->event_conf_attrs[i].attrs;
1215 while (attrq != attrp) {
1216 sysfs_remove_file_from_group(&dev_info
1217 ->event_interfaces[0]
1218 .dev.kobj,
1219 *attrq,
1220 NULL);
1221 attrq++;
1224 return ret;
1227 static inline int __iio_remove_event_config_attrs(struct iio_dev *dev_info,
1228 int i)
1230 struct attribute **attrq;
1231 __iio_remove_all_event_sysfs(dev_info,
1232 NULL,
1234 if (dev_info->event_conf_attrs
1235 && dev_info->event_conf_attrs[i].attrs) {
1236 attrq = dev_info->event_conf_attrs[i].attrs;
1237 while (*attrq) {
1238 sysfs_remove_file_from_group(&dev_info
1239 ->event_interfaces[0]
1240 .dev.kobj,
1241 *attrq,
1242 NULL);
1243 attrq++;
1247 return 0;
1250 static int iio_device_register_eventset(struct iio_dev *dev_info)
1252 int ret = 0, i, j;
1254 if (dev_info->num_interrupt_lines == 0)
1255 return 0;
1257 dev_info->event_interfaces =
1258 kzalloc(sizeof(struct iio_event_interface)
1259 *dev_info->num_interrupt_lines,
1260 GFP_KERNEL);
1261 if (dev_info->event_interfaces == NULL) {
1262 ret = -ENOMEM;
1263 goto error_ret;
1266 dev_info->interrupts = kzalloc(sizeof(struct iio_interrupt *)
1267 *dev_info->num_interrupt_lines,
1268 GFP_KERNEL);
1269 if (dev_info->interrupts == NULL) {
1270 ret = -ENOMEM;
1271 goto error_free_event_interfaces;
1274 for (i = 0; i < dev_info->num_interrupt_lines; i++) {
1275 dev_info->event_interfaces[i].owner = dev_info->driver_module;
1277 snprintf(dev_info->event_interfaces[i]._name, 20,
1278 "%s:event%d",
1279 dev_name(&dev_info->dev),
1282 ret = iio_setup_ev_int(&dev_info->event_interfaces[i],
1283 (const char *)(dev_info
1284 ->event_interfaces[i]
1285 ._name),
1286 dev_info->driver_module,
1287 &dev_info->dev);
1288 if (ret) {
1289 dev_err(&dev_info->dev,
1290 "Could not get chrdev interface\n");
1291 goto error_free_setup_ev_ints;
1294 dev_set_drvdata(&dev_info->event_interfaces[i].dev,
1295 (void *)dev_info);
1297 if (dev_info->event_attrs != NULL)
1298 ret = sysfs_create_group(&dev_info
1299 ->event_interfaces[i]
1300 .dev.kobj,
1301 &dev_info->event_attrs[i]);
1303 if (ret) {
1304 dev_err(&dev_info->dev,
1305 "Failed to register sysfs for event attrs");
1306 goto error_remove_sysfs_interfaces;
1310 for (i = 0; i < dev_info->num_interrupt_lines; i++) {
1311 ret = __iio_add_event_config_attrs(dev_info, i);
1312 if (ret)
1313 goto error_unregister_config_attrs;
1316 return 0;
1318 error_unregister_config_attrs:
1319 for (j = 0; j < i; j++)
1320 __iio_remove_event_config_attrs(dev_info, i);
1321 i = dev_info->num_interrupt_lines - 1;
1322 error_remove_sysfs_interfaces:
1323 for (j = 0; j < i; j++)
1324 if (dev_info->event_attrs != NULL)
1325 sysfs_remove_group(&dev_info
1326 ->event_interfaces[j].dev.kobj,
1327 &dev_info->event_attrs[j]);
1328 error_free_setup_ev_ints:
1329 for (j = 0; j < i; j++)
1330 iio_free_ev_int(&dev_info->event_interfaces[j]);
1331 kfree(dev_info->interrupts);
1332 error_free_event_interfaces:
1333 kfree(dev_info->event_interfaces);
1334 error_ret:
1336 return ret;
1339 static void iio_device_unregister_eventset(struct iio_dev *dev_info)
1341 int i;
1343 if (dev_info->num_interrupt_lines == 0)
1344 return;
1345 for (i = 0; i < dev_info->num_interrupt_lines; i++) {
1346 __iio_remove_event_config_attrs(dev_info, i);
1347 if (dev_info->event_attrs != NULL)
1348 sysfs_remove_group(&dev_info
1349 ->event_interfaces[i].dev.kobj,
1350 &dev_info->event_attrs[i]);
1353 for (i = 0; i < dev_info->num_interrupt_lines; i++)
1354 iio_free_ev_int(&dev_info->event_interfaces[i]);
1355 kfree(dev_info->interrupts);
1356 kfree(dev_info->event_interfaces);
1359 static void iio_dev_release(struct device *device)
1361 struct iio_dev *dev = to_iio_dev(device);
1363 iio_put();
1364 kfree(dev);
1367 static struct device_type iio_dev_type = {
1368 .name = "iio_device",
1369 .release = iio_dev_release,
1372 struct iio_dev *iio_allocate_device(int sizeof_priv)
1374 struct iio_dev *dev;
1375 size_t alloc_size;
1377 alloc_size = sizeof(struct iio_dev);
1378 if (sizeof_priv) {
1379 alloc_size = ALIGN(alloc_size, IIO_ALIGN);
1380 alloc_size += sizeof_priv;
1382 /* ensure 32-byte alignment of whole construct ? */
1383 alloc_size += IIO_ALIGN - 1;
1385 dev = kzalloc(alloc_size, GFP_KERNEL);
1387 if (dev) {
1388 dev->dev.type = &iio_dev_type;
1389 dev->dev.bus = &iio_bus_type;
1390 device_initialize(&dev->dev);
1391 dev_set_drvdata(&dev->dev, (void *)dev);
1392 mutex_init(&dev->mlock);
1393 iio_get();
1396 return dev;
1398 EXPORT_SYMBOL(iio_allocate_device);
1400 void iio_free_device(struct iio_dev *dev)
1402 if (dev)
1403 iio_put_device(dev);
1405 EXPORT_SYMBOL(iio_free_device);
1407 int iio_device_register(struct iio_dev *dev_info)
1409 int ret;
1411 ret = iio_device_register_id(dev_info, &iio_ida);
1412 if (ret) {
1413 dev_err(&dev_info->dev, "Failed to get id\n");
1414 goto error_ret;
1416 dev_set_name(&dev_info->dev, "device%d", dev_info->id);
1418 ret = device_add(&dev_info->dev);
1419 if (ret)
1420 goto error_free_ida;
1421 ret = iio_device_register_sysfs(dev_info);
1422 if (ret) {
1423 dev_err(dev_info->dev.parent,
1424 "Failed to register sysfs interfaces\n");
1425 goto error_del_device;
1427 ret = iio_device_register_eventset(dev_info);
1428 if (ret) {
1429 dev_err(dev_info->dev.parent,
1430 "Failed to register event set\n");
1431 goto error_free_sysfs;
1433 if (dev_info->modes & INDIO_RING_TRIGGERED)
1434 iio_device_register_trigger_consumer(dev_info);
1436 return 0;
1438 error_free_sysfs:
1439 iio_device_unregister_sysfs(dev_info);
1440 error_del_device:
1441 device_del(&dev_info->dev);
1442 error_free_ida:
1443 iio_device_unregister_id(dev_info);
1444 error_ret:
1445 return ret;
1447 EXPORT_SYMBOL(iio_device_register);
1449 void iio_device_unregister(struct iio_dev *dev_info)
1451 if (dev_info->modes & INDIO_RING_TRIGGERED)
1452 iio_device_unregister_trigger_consumer(dev_info);
1453 iio_device_unregister_eventset(dev_info);
1454 iio_device_unregister_sysfs(dev_info);
1455 iio_device_unregister_id(dev_info);
1456 device_unregister(&dev_info->dev);
1458 EXPORT_SYMBOL(iio_device_unregister);
1460 void iio_put(void)
1462 module_put(THIS_MODULE);
1465 void iio_get(void)
1467 __module_get(THIS_MODULE);
1470 subsys_initcall(iio_init);
1471 module_exit(iio_exit);
1473 MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
1474 MODULE_DESCRIPTION("Industrial I/O core");
1475 MODULE_LICENSE("GPL");