staging: gma500: gtt based hardware scrolling console
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / iio / industrialio-core.c
blob647a4052a8cb7293aa1db548ee4cf2a9c9f7a917
1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Based on elements of hwmon and input subsystems.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/idr.h>
15 #include <linux/kdev_t.h>
16 #include <linux/err.h>
17 #include <linux/device.h>
18 #include <linux/fs.h>
19 #include <linux/poll.h>
20 #include <linux/sched.h>
21 #include <linux/wait.h>
22 #include <linux/cdev.h>
23 #include <linux/slab.h>
24 #include <linux/anon_inodes.h>
25 #include "iio.h"
26 #include "iio_core.h"
27 #include "iio_core_trigger.h"
28 #include "chrdev.h"
29 #include "sysfs.h"
31 /* IDA to assign each registered device a unique id*/
32 static DEFINE_IDA(iio_ida);
34 static dev_t iio_devt;
36 #define IIO_DEV_MAX 256
37 struct bus_type iio_bus_type = {
38 .name = "iio",
40 EXPORT_SYMBOL(iio_bus_type);
42 static const char * const iio_data_type_name[] = {
43 [IIO_RAW] = "raw",
44 [IIO_PROCESSED] = "input",
47 static const char * const iio_direction[] = {
48 [0] = "in",
49 [1] = "out",
52 static const char * const iio_chan_type_name_spec[] = {
53 [IIO_VOLTAGE] = "voltage",
54 [IIO_CURRENT] = "current",
55 [IIO_POWER] = "power",
56 [IIO_ACCEL] = "accel",
57 [IIO_GYRO] = "gyro",
58 [IIO_MAGN] = "magn",
59 [IIO_LIGHT] = "illuminance",
60 [IIO_INTENSITY] = "intensity",
61 [IIO_PROXIMITY] = "proximity",
62 [IIO_TEMP] = "temp",
63 [IIO_INCLI] = "incli",
64 [IIO_ROT] = "rot",
65 [IIO_ANGL] = "angl",
66 [IIO_TIMESTAMP] = "timestamp",
67 [IIO_CAPACITANCE] = "capacitance",
70 static const char * const iio_modifier_names[] = {
71 [IIO_MOD_X] = "x",
72 [IIO_MOD_Y] = "y",
73 [IIO_MOD_Z] = "z",
74 [IIO_MOD_LIGHT_BOTH] = "both",
75 [IIO_MOD_LIGHT_IR] = "ir",
78 /* relies on pairs of these shared then separate */
79 static const char * const iio_chan_info_postfix[] = {
80 [IIO_CHAN_INFO_SCALE_SHARED/2] = "scale",
81 [IIO_CHAN_INFO_OFFSET_SHARED/2] = "offset",
82 [IIO_CHAN_INFO_CALIBSCALE_SHARED/2] = "calibscale",
83 [IIO_CHAN_INFO_CALIBBIAS_SHARED/2] = "calibbias",
84 [IIO_CHAN_INFO_PEAK_SHARED/2] = "peak_raw",
85 [IIO_CHAN_INFO_PEAK_SCALE_SHARED/2] = "peak_scale",
86 [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SHARED/2]
87 = "quadrature_correction_raw",
88 [IIO_CHAN_INFO_AVERAGE_RAW_SHARED/2] = "mean_raw",
91 /**
92 * struct iio_detected_event_list - list element for events that have occurred
93 * @list: linked list header
94 * @ev: the event itself
96 struct iio_detected_event_list {
97 struct list_head list;
98 struct iio_event_data ev;
102 * struct iio_event_interface - chrdev interface for an event line
103 * @dev: device assocated with event interface
104 * @wait: wait queue to allow blocking reads of events
105 * @event_list_lock: mutex to protect the list of detected events
106 * @det_events: list of detected events
107 * @max_events: maximum number of events before new ones are dropped
108 * @current_events: number of events in detected list
109 * @flags: file operations related flags including busy flag.
111 struct iio_event_interface {
112 wait_queue_head_t wait;
113 struct mutex event_list_lock;
114 struct list_head det_events;
115 int max_events;
116 int current_events;
117 struct list_head dev_attr_list;
118 unsigned long flags;
119 struct attribute_group group;
122 int iio_push_event(struct iio_dev *dev_info, u64 ev_code, s64 timestamp)
124 struct iio_event_interface *ev_int = dev_info->event_interface;
125 struct iio_detected_event_list *ev;
126 int ret = 0;
128 /* Does anyone care? */
129 mutex_lock(&ev_int->event_list_lock);
130 if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
131 if (ev_int->current_events == ev_int->max_events) {
132 mutex_unlock(&ev_int->event_list_lock);
133 return 0;
135 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
136 if (ev == NULL) {
137 ret = -ENOMEM;
138 mutex_unlock(&ev_int->event_list_lock);
139 goto error_ret;
141 ev->ev.id = ev_code;
142 ev->ev.timestamp = timestamp;
144 list_add_tail(&ev->list, &ev_int->det_events);
145 ev_int->current_events++;
146 mutex_unlock(&ev_int->event_list_lock);
147 wake_up_interruptible(&ev_int->wait);
148 } else
149 mutex_unlock(&ev_int->event_list_lock);
151 error_ret:
152 return ret;
154 EXPORT_SYMBOL(iio_push_event);
156 /* This turns up an awful lot */
157 ssize_t iio_read_const_attr(struct device *dev,
158 struct device_attribute *attr,
159 char *buf)
161 return sprintf(buf, "%s\n", to_iio_const_attr(attr)->string);
163 EXPORT_SYMBOL(iio_read_const_attr);
165 static ssize_t iio_event_chrdev_read(struct file *filep,
166 char __user *buf,
167 size_t count,
168 loff_t *f_ps)
170 struct iio_event_interface *ev_int = filep->private_data;
171 struct iio_detected_event_list *el;
172 int ret;
173 size_t len;
175 mutex_lock(&ev_int->event_list_lock);
176 if (list_empty(&ev_int->det_events)) {
177 if (filep->f_flags & O_NONBLOCK) {
178 ret = -EAGAIN;
179 goto error_mutex_unlock;
181 mutex_unlock(&ev_int->event_list_lock);
182 /* Blocking on device; waiting for something to be there */
183 ret = wait_event_interruptible(ev_int->wait,
184 !list_empty(&ev_int
185 ->det_events));
186 if (ret)
187 goto error_ret;
188 /* Single access device so no one else can get the data */
189 mutex_lock(&ev_int->event_list_lock);
192 el = list_first_entry(&ev_int->det_events,
193 struct iio_detected_event_list,
194 list);
195 len = sizeof el->ev;
196 if (copy_to_user(buf, &(el->ev), len)) {
197 ret = -EFAULT;
198 goto error_mutex_unlock;
200 list_del(&el->list);
201 ev_int->current_events--;
202 mutex_unlock(&ev_int->event_list_lock);
203 kfree(el);
205 return len;
207 error_mutex_unlock:
208 mutex_unlock(&ev_int->event_list_lock);
209 error_ret:
211 return ret;
214 static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
216 struct iio_event_interface *ev_int = filep->private_data;
217 struct iio_detected_event_list *el, *t;
219 mutex_lock(&ev_int->event_list_lock);
220 clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
222 * In order to maintain a clean state for reopening,
223 * clear out any awaiting events. The mask will prevent
224 * any new __iio_push_event calls running.
226 list_for_each_entry_safe(el, t, &ev_int->det_events, list) {
227 list_del(&el->list);
228 kfree(el);
230 ev_int->current_events = 0;
231 mutex_unlock(&ev_int->event_list_lock);
233 return 0;
236 static const struct file_operations iio_event_chrdev_fileops = {
237 .read = iio_event_chrdev_read,
238 .release = iio_event_chrdev_release,
239 .owner = THIS_MODULE,
240 .llseek = noop_llseek,
243 static int iio_event_getfd(struct iio_dev *indio_dev)
245 if (indio_dev->event_interface == NULL)
246 return -ENODEV;
248 mutex_lock(&indio_dev->event_interface->event_list_lock);
249 if (test_and_set_bit(IIO_BUSY_BIT_POS,
250 &indio_dev->event_interface->flags)) {
251 mutex_unlock(&indio_dev->event_interface->event_list_lock);
252 return -EBUSY;
254 mutex_unlock(&indio_dev->event_interface->event_list_lock);
255 return anon_inode_getfd("iio:event",
256 &iio_event_chrdev_fileops,
257 indio_dev->event_interface, O_RDONLY);
260 static int __init iio_init(void)
262 int ret;
264 /* Register sysfs bus */
265 ret = bus_register(&iio_bus_type);
266 if (ret < 0) {
267 printk(KERN_ERR
268 "%s could not register bus type\n",
269 __FILE__);
270 goto error_nothing;
273 ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
274 if (ret < 0) {
275 printk(KERN_ERR "%s: failed to allocate char dev region\n",
276 __FILE__);
277 goto error_unregister_bus_type;
280 return 0;
282 error_unregister_bus_type:
283 bus_unregister(&iio_bus_type);
284 error_nothing:
285 return ret;
288 static void __exit iio_exit(void)
290 if (iio_devt)
291 unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
292 bus_unregister(&iio_bus_type);
295 static ssize_t iio_read_channel_info(struct device *dev,
296 struct device_attribute *attr,
297 char *buf)
299 struct iio_dev *indio_dev = dev_get_drvdata(dev);
300 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
301 int val, val2;
302 int ret = indio_dev->info->read_raw(indio_dev, this_attr->c,
303 &val, &val2, this_attr->address);
305 if (ret < 0)
306 return ret;
308 if (ret == IIO_VAL_INT)
309 return sprintf(buf, "%d\n", val);
310 else if (ret == IIO_VAL_INT_PLUS_MICRO) {
311 if (val2 < 0)
312 return sprintf(buf, "-%d.%06u\n", val, -val2);
313 else
314 return sprintf(buf, "%d.%06u\n", val, val2);
315 } else if (ret == IIO_VAL_INT_PLUS_NANO) {
316 if (val2 < 0)
317 return sprintf(buf, "-%d.%09u\n", val, -val2);
318 else
319 return sprintf(buf, "%d.%09u\n", val, val2);
320 } else
321 return 0;
324 static ssize_t iio_write_channel_info(struct device *dev,
325 struct device_attribute *attr,
326 const char *buf,
327 size_t len)
329 struct iio_dev *indio_dev = dev_get_drvdata(dev);
330 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
331 int ret, integer = 0, fract = 0, fract_mult = 100000;
332 bool integer_part = true, negative = false;
334 /* Assumes decimal - precision based on number of digits */
335 if (!indio_dev->info->write_raw)
336 return -EINVAL;
338 if (indio_dev->info->write_raw_get_fmt)
339 switch (indio_dev->info->write_raw_get_fmt(indio_dev,
340 this_attr->c, this_attr->address)) {
341 case IIO_VAL_INT_PLUS_MICRO:
342 fract_mult = 100000;
343 break;
344 case IIO_VAL_INT_PLUS_NANO:
345 fract_mult = 100000000;
346 break;
347 default:
348 return -EINVAL;
351 if (buf[0] == '-') {
352 negative = true;
353 buf++;
356 while (*buf) {
357 if ('0' <= *buf && *buf <= '9') {
358 if (integer_part)
359 integer = integer*10 + *buf - '0';
360 else {
361 fract += fract_mult*(*buf - '0');
362 if (fract_mult == 1)
363 break;
364 fract_mult /= 10;
366 } else if (*buf == '\n') {
367 if (*(buf + 1) == '\0')
368 break;
369 else
370 return -EINVAL;
371 } else if (*buf == '.') {
372 integer_part = false;
373 } else {
374 return -EINVAL;
376 buf++;
378 if (negative) {
379 if (integer)
380 integer = -integer;
381 else
382 fract = -fract;
385 ret = indio_dev->info->write_raw(indio_dev, this_attr->c,
386 integer, fract, this_attr->address);
387 if (ret)
388 return ret;
390 return len;
393 static
394 int __iio_device_attr_init(struct device_attribute *dev_attr,
395 const char *postfix,
396 struct iio_chan_spec const *chan,
397 ssize_t (*readfunc)(struct device *dev,
398 struct device_attribute *attr,
399 char *buf),
400 ssize_t (*writefunc)(struct device *dev,
401 struct device_attribute *attr,
402 const char *buf,
403 size_t len),
404 bool generic)
406 int ret;
407 char *name_format, *full_postfix;
408 sysfs_attr_init(&dev_attr->attr);
410 /* Build up postfix of <extend_name>_<modifier>_postfix */
411 if (chan->modified) {
412 if (chan->extend_name)
413 full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
414 iio_modifier_names[chan
415 ->channel2],
416 chan->extend_name,
417 postfix);
418 else
419 full_postfix = kasprintf(GFP_KERNEL, "%s_%s",
420 iio_modifier_names[chan
421 ->channel2],
422 postfix);
423 } else {
424 if (chan->extend_name == NULL)
425 full_postfix = kstrdup(postfix, GFP_KERNEL);
426 else
427 full_postfix = kasprintf(GFP_KERNEL,
428 "%s_%s",
429 chan->extend_name,
430 postfix);
432 if (full_postfix == NULL) {
433 ret = -ENOMEM;
434 goto error_ret;
437 if (chan->differential) { /* Differential can not have modifier */
438 if (generic)
439 name_format
440 = kasprintf(GFP_KERNEL, "%s_%s-%s_%s",
441 iio_direction[chan->output],
442 iio_chan_type_name_spec[chan->type],
443 iio_chan_type_name_spec[chan->type],
444 full_postfix);
445 else if (chan->indexed)
446 name_format
447 = kasprintf(GFP_KERNEL, "%s_%s%d-%s%d_%s",
448 iio_direction[chan->output],
449 iio_chan_type_name_spec[chan->type],
450 chan->channel,
451 iio_chan_type_name_spec[chan->type],
452 chan->channel2,
453 full_postfix);
454 else {
455 WARN_ON("Differential channels must be indexed\n");
456 ret = -EINVAL;
457 goto error_free_full_postfix;
459 } else { /* Single ended */
460 if (generic)
461 name_format
462 = kasprintf(GFP_KERNEL, "%s_%s_%s",
463 iio_direction[chan->output],
464 iio_chan_type_name_spec[chan->type],
465 full_postfix);
466 else if (chan->indexed)
467 name_format
468 = kasprintf(GFP_KERNEL, "%s_%s%d_%s",
469 iio_direction[chan->output],
470 iio_chan_type_name_spec[chan->type],
471 chan->channel,
472 full_postfix);
473 else
474 name_format
475 = kasprintf(GFP_KERNEL, "%s_%s_%s",
476 iio_direction[chan->output],
477 iio_chan_type_name_spec[chan->type],
478 full_postfix);
480 if (name_format == NULL) {
481 ret = -ENOMEM;
482 goto error_free_full_postfix;
484 dev_attr->attr.name = kasprintf(GFP_KERNEL,
485 name_format,
486 chan->channel,
487 chan->channel2);
488 if (dev_attr->attr.name == NULL) {
489 ret = -ENOMEM;
490 goto error_free_name_format;
493 if (readfunc) {
494 dev_attr->attr.mode |= S_IRUGO;
495 dev_attr->show = readfunc;
498 if (writefunc) {
499 dev_attr->attr.mode |= S_IWUSR;
500 dev_attr->store = writefunc;
502 kfree(name_format);
503 kfree(full_postfix);
505 return 0;
507 error_free_name_format:
508 kfree(name_format);
509 error_free_full_postfix:
510 kfree(full_postfix);
511 error_ret:
512 return ret;
515 static void __iio_device_attr_deinit(struct device_attribute *dev_attr)
517 kfree(dev_attr->attr.name);
520 int __iio_add_chan_devattr(const char *postfix,
521 struct iio_chan_spec const *chan,
522 ssize_t (*readfunc)(struct device *dev,
523 struct device_attribute *attr,
524 char *buf),
525 ssize_t (*writefunc)(struct device *dev,
526 struct device_attribute *attr,
527 const char *buf,
528 size_t len),
529 u64 mask,
530 bool generic,
531 struct device *dev,
532 struct list_head *attr_list)
534 int ret;
535 struct iio_dev_attr *iio_attr, *t;
537 iio_attr = kzalloc(sizeof *iio_attr, GFP_KERNEL);
538 if (iio_attr == NULL) {
539 ret = -ENOMEM;
540 goto error_ret;
542 ret = __iio_device_attr_init(&iio_attr->dev_attr,
543 postfix, chan,
544 readfunc, writefunc, generic);
545 if (ret)
546 goto error_iio_dev_attr_free;
547 iio_attr->c = chan;
548 iio_attr->address = mask;
549 list_for_each_entry(t, attr_list, l)
550 if (strcmp(t->dev_attr.attr.name,
551 iio_attr->dev_attr.attr.name) == 0) {
552 if (!generic)
553 dev_err(dev, "tried to double register : %s\n",
554 t->dev_attr.attr.name);
555 ret = -EBUSY;
556 goto error_device_attr_deinit;
558 list_add(&iio_attr->l, attr_list);
560 return 0;
562 error_device_attr_deinit:
563 __iio_device_attr_deinit(&iio_attr->dev_attr);
564 error_iio_dev_attr_free:
565 kfree(iio_attr);
566 error_ret:
567 return ret;
570 static int iio_device_add_channel_sysfs(struct iio_dev *dev_info,
571 struct iio_chan_spec const *chan)
573 int ret, i, attrcount = 0;
575 if (chan->channel < 0)
576 return 0;
578 ret = __iio_add_chan_devattr(iio_data_type_name[chan->processed_val],
579 chan,
580 &iio_read_channel_info,
581 (chan->output ?
582 &iio_write_channel_info : NULL),
585 &dev_info->dev,
586 &dev_info->channel_attr_list);
587 if (ret)
588 goto error_ret;
589 attrcount++;
591 for_each_set_bit(i, &chan->info_mask, sizeof(long)*8) {
592 ret = __iio_add_chan_devattr(iio_chan_info_postfix[i/2],
593 chan,
594 &iio_read_channel_info,
595 &iio_write_channel_info,
596 (1 << i),
597 !(i%2),
598 &dev_info->dev,
599 &dev_info->channel_attr_list);
600 if (ret == -EBUSY && (i%2 == 0)) {
601 ret = 0;
602 continue;
604 if (ret < 0)
605 goto error_ret;
606 attrcount++;
608 ret = attrcount;
609 error_ret:
610 return ret;
613 static void iio_device_remove_and_free_read_attr(struct iio_dev *dev_info,
614 struct iio_dev_attr *p)
616 kfree(p->dev_attr.attr.name);
617 kfree(p);
620 static ssize_t iio_show_dev_name(struct device *dev,
621 struct device_attribute *attr,
622 char *buf)
624 struct iio_dev *indio_dev = dev_get_drvdata(dev);
625 return sprintf(buf, "%s\n", indio_dev->name);
628 static DEVICE_ATTR(name, S_IRUGO, iio_show_dev_name, NULL);
630 static int iio_device_register_sysfs(struct iio_dev *dev_info)
632 int i, ret = 0, attrcount, attrn, attrcount_orig = 0;
633 struct iio_dev_attr *p, *n;
634 struct attribute **attr;
636 /* First count elements in any existing group */
637 if (dev_info->info->attrs) {
638 attr = dev_info->info->attrs->attrs;
639 while (*attr++ != NULL)
640 attrcount_orig++;
642 attrcount = attrcount_orig;
644 * New channel registration method - relies on the fact a group does
645 * not need to be initialized if it is name is NULL.
647 INIT_LIST_HEAD(&dev_info->channel_attr_list);
648 if (dev_info->channels)
649 for (i = 0; i < dev_info->num_channels; i++) {
650 ret = iio_device_add_channel_sysfs(dev_info,
651 &dev_info
652 ->channels[i]);
653 if (ret < 0)
654 goto error_clear_attrs;
655 attrcount += ret;
658 if (dev_info->name)
659 attrcount++;
661 dev_info->chan_attr_group.attrs
662 = kzalloc(sizeof(dev_info->chan_attr_group.attrs[0])*
663 (attrcount + 1),
664 GFP_KERNEL);
665 if (dev_info->chan_attr_group.attrs == NULL) {
666 ret = -ENOMEM;
667 goto error_clear_attrs;
669 /* Copy across original attributes */
670 if (dev_info->info->attrs)
671 memcpy(dev_info->chan_attr_group.attrs,
672 dev_info->info->attrs->attrs,
673 sizeof(dev_info->chan_attr_group.attrs[0])
674 *attrcount_orig);
675 attrn = attrcount_orig;
676 /* Add all elements from the list. */
677 list_for_each_entry(p, &dev_info->channel_attr_list, l)
678 dev_info->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr;
679 if (dev_info->name)
680 dev_info->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr;
682 dev_info->groups[dev_info->groupcounter++] =
683 &dev_info->chan_attr_group;
685 return 0;
687 error_clear_attrs:
688 list_for_each_entry_safe(p, n,
689 &dev_info->channel_attr_list, l) {
690 list_del(&p->l);
691 iio_device_remove_and_free_read_attr(dev_info, p);
694 return ret;
697 static void iio_device_unregister_sysfs(struct iio_dev *dev_info)
700 struct iio_dev_attr *p, *n;
702 list_for_each_entry_safe(p, n, &dev_info->channel_attr_list, l) {
703 list_del(&p->l);
704 iio_device_remove_and_free_read_attr(dev_info, p);
706 kfree(dev_info->chan_attr_group.attrs);
709 static const char * const iio_ev_type_text[] = {
710 [IIO_EV_TYPE_THRESH] = "thresh",
711 [IIO_EV_TYPE_MAG] = "mag",
712 [IIO_EV_TYPE_ROC] = "roc",
713 [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
714 [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
717 static const char * const iio_ev_dir_text[] = {
718 [IIO_EV_DIR_EITHER] = "either",
719 [IIO_EV_DIR_RISING] = "rising",
720 [IIO_EV_DIR_FALLING] = "falling"
723 static ssize_t iio_ev_state_store(struct device *dev,
724 struct device_attribute *attr,
725 const char *buf,
726 size_t len)
728 struct iio_dev *indio_dev = dev_get_drvdata(dev);
729 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
730 int ret;
731 bool val;
733 ret = strtobool(buf, &val);
734 if (ret < 0)
735 return ret;
737 ret = indio_dev->info->write_event_config(indio_dev,
738 this_attr->address,
739 val);
740 return (ret < 0) ? ret : len;
743 static ssize_t iio_ev_state_show(struct device *dev,
744 struct device_attribute *attr,
745 char *buf)
747 struct iio_dev *indio_dev = dev_get_drvdata(dev);
748 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
749 int val = indio_dev->info->read_event_config(indio_dev,
750 this_attr->address);
752 if (val < 0)
753 return val;
754 else
755 return sprintf(buf, "%d\n", val);
758 static ssize_t iio_ev_value_show(struct device *dev,
759 struct device_attribute *attr,
760 char *buf)
762 struct iio_dev *indio_dev = dev_get_drvdata(dev);
763 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
764 int val, ret;
766 ret = indio_dev->info->read_event_value(indio_dev,
767 this_attr->address, &val);
768 if (ret < 0)
769 return ret;
771 return sprintf(buf, "%d\n", val);
774 static ssize_t iio_ev_value_store(struct device *dev,
775 struct device_attribute *attr,
776 const char *buf,
777 size_t len)
779 struct iio_dev *indio_dev = dev_get_drvdata(dev);
780 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
781 unsigned long val;
782 int ret;
784 ret = strict_strtoul(buf, 10, &val);
785 if (ret)
786 return ret;
788 ret = indio_dev->info->write_event_value(indio_dev, this_attr->address,
789 val);
790 if (ret < 0)
791 return ret;
793 return len;
796 static int iio_device_add_event_sysfs(struct iio_dev *dev_info,
797 struct iio_chan_spec const *chan)
799 int ret = 0, i, attrcount = 0;
800 u64 mask = 0;
801 char *postfix;
802 if (!chan->event_mask)
803 return 0;
805 for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) {
806 postfix = kasprintf(GFP_KERNEL, "%s_%s_en",
807 iio_ev_type_text[i/IIO_EV_DIR_MAX],
808 iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
809 if (postfix == NULL) {
810 ret = -ENOMEM;
811 goto error_ret;
813 if (chan->modified)
814 mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel,
815 i/IIO_EV_DIR_MAX,
816 i%IIO_EV_DIR_MAX);
817 else if (chan->differential)
818 mask = IIO_EVENT_CODE(chan->type,
819 0, 0,
820 i%IIO_EV_DIR_MAX,
821 i/IIO_EV_DIR_MAX,
823 chan->channel,
824 chan->channel2);
825 else
826 mask = IIO_UNMOD_EVENT_CODE(chan->type,
827 chan->channel,
828 i/IIO_EV_DIR_MAX,
829 i%IIO_EV_DIR_MAX);
831 ret = __iio_add_chan_devattr(postfix,
832 chan,
833 &iio_ev_state_show,
834 iio_ev_state_store,
835 mask,
837 &dev_info->dev,
838 &dev_info->event_interface->
839 dev_attr_list);
840 kfree(postfix);
841 if (ret)
842 goto error_ret;
843 attrcount++;
844 postfix = kasprintf(GFP_KERNEL, "%s_%s_value",
845 iio_ev_type_text[i/IIO_EV_DIR_MAX],
846 iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
847 if (postfix == NULL) {
848 ret = -ENOMEM;
849 goto error_ret;
851 ret = __iio_add_chan_devattr(postfix, chan,
852 iio_ev_value_show,
853 iio_ev_value_store,
854 mask,
856 &dev_info->dev,
857 &dev_info->event_interface->
858 dev_attr_list);
859 kfree(postfix);
860 if (ret)
861 goto error_ret;
862 attrcount++;
864 ret = attrcount;
865 error_ret:
866 return ret;
869 static inline void __iio_remove_event_config_attrs(struct iio_dev *dev_info)
871 struct iio_dev_attr *p, *n;
872 list_for_each_entry_safe(p, n,
873 &dev_info->event_interface->
874 dev_attr_list, l) {
875 kfree(p->dev_attr.attr.name);
876 kfree(p);
880 static inline int __iio_add_event_config_attrs(struct iio_dev *dev_info)
882 int j, ret, attrcount = 0;
884 INIT_LIST_HEAD(&dev_info->event_interface->dev_attr_list);
885 /* Dynically created from the channels array */
886 for (j = 0; j < dev_info->num_channels; j++) {
887 ret = iio_device_add_event_sysfs(dev_info,
888 &dev_info->channels[j]);
889 if (ret < 0)
890 goto error_clear_attrs;
891 attrcount += ret;
893 return attrcount;
895 error_clear_attrs:
896 __iio_remove_event_config_attrs(dev_info);
898 return ret;
901 static bool iio_check_for_dynamic_events(struct iio_dev *dev_info)
903 int j;
905 for (j = 0; j < dev_info->num_channels; j++)
906 if (dev_info->channels[j].event_mask != 0)
907 return true;
908 return false;
911 static void iio_setup_ev_int(struct iio_event_interface *ev_int)
913 mutex_init(&ev_int->event_list_lock);
914 /* discussion point - make this variable? */
915 ev_int->max_events = 10;
916 ev_int->current_events = 0;
917 INIT_LIST_HEAD(&ev_int->det_events);
918 init_waitqueue_head(&ev_int->wait);
921 static const char *iio_event_group_name = "events";
922 static int iio_device_register_eventset(struct iio_dev *dev_info)
924 struct iio_dev_attr *p;
925 int ret = 0, attrcount_orig = 0, attrcount, attrn;
926 struct attribute **attr;
928 if (!(dev_info->info->event_attrs ||
929 iio_check_for_dynamic_events(dev_info)))
930 return 0;
932 dev_info->event_interface =
933 kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
934 if (dev_info->event_interface == NULL) {
935 ret = -ENOMEM;
936 goto error_ret;
939 iio_setup_ev_int(dev_info->event_interface);
940 if (dev_info->info->event_attrs != NULL) {
941 attr = dev_info->info->event_attrs->attrs;
942 while (*attr++ != NULL)
943 attrcount_orig++;
945 attrcount = attrcount_orig;
946 if (dev_info->channels) {
947 ret = __iio_add_event_config_attrs(dev_info);
948 if (ret < 0)
949 goto error_free_setup_event_lines;
950 attrcount += ret;
953 dev_info->event_interface->group.name = iio_event_group_name;
954 dev_info->event_interface->group.attrs =
955 kzalloc(sizeof(dev_info->event_interface->group.attrs[0])
956 *(attrcount + 1),
957 GFP_KERNEL);
958 if (dev_info->event_interface->group.attrs == NULL) {
959 ret = -ENOMEM;
960 goto error_free_setup_event_lines;
962 if (dev_info->info->event_attrs)
963 memcpy(dev_info->event_interface->group.attrs,
964 dev_info->info->event_attrs->attrs,
965 sizeof(dev_info->event_interface->group.attrs[0])
966 *attrcount_orig);
967 attrn = attrcount_orig;
968 /* Add all elements from the list. */
969 list_for_each_entry(p,
970 &dev_info->event_interface->dev_attr_list,
972 dev_info->event_interface->group.attrs[attrn++] =
973 &p->dev_attr.attr;
975 dev_info->groups[dev_info->groupcounter++] =
976 &dev_info->event_interface->group;
978 return 0;
980 error_free_setup_event_lines:
981 __iio_remove_event_config_attrs(dev_info);
982 kfree(dev_info->event_interface);
983 error_ret:
985 return ret;
988 static void iio_device_unregister_eventset(struct iio_dev *dev_info)
990 if (dev_info->event_interface == NULL)
991 return;
992 __iio_remove_event_config_attrs(dev_info);
993 kfree(dev_info->event_interface->group.attrs);
994 kfree(dev_info->event_interface);
997 static void iio_dev_release(struct device *device)
999 struct iio_dev *dev_info = container_of(device, struct iio_dev, dev);
1000 cdev_del(&dev_info->chrdev);
1001 if (dev_info->modes & INDIO_BUFFER_TRIGGERED)
1002 iio_device_unregister_trigger_consumer(dev_info);
1003 iio_device_unregister_eventset(dev_info);
1004 iio_device_unregister_sysfs(dev_info);
1005 ida_simple_remove(&iio_ida, dev_info->id);
1006 kfree(dev_info);
1009 static struct device_type iio_dev_type = {
1010 .name = "iio_device",
1011 .release = iio_dev_release,
1014 struct iio_dev *iio_allocate_device(int sizeof_priv)
1016 struct iio_dev *dev;
1017 size_t alloc_size;
1019 alloc_size = sizeof(struct iio_dev);
1020 if (sizeof_priv) {
1021 alloc_size = ALIGN(alloc_size, IIO_ALIGN);
1022 alloc_size += sizeof_priv;
1024 /* ensure 32-byte alignment of whole construct ? */
1025 alloc_size += IIO_ALIGN - 1;
1027 dev = kzalloc(alloc_size, GFP_KERNEL);
1029 if (dev) {
1030 dev->dev.groups = dev->groups;
1031 dev->dev.type = &iio_dev_type;
1032 dev->dev.bus = &iio_bus_type;
1033 device_initialize(&dev->dev);
1034 dev_set_drvdata(&dev->dev, (void *)dev);
1035 mutex_init(&dev->mlock);
1037 dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
1038 if (dev->id < 0) {
1039 /* cannot use a dev_err as the name isn't available */
1040 printk(KERN_ERR "Failed to get id\n");
1041 kfree(dev);
1042 return NULL;
1044 dev_set_name(&dev->dev, "iio:device%d", dev->id);
1047 return dev;
1049 EXPORT_SYMBOL(iio_allocate_device);
1051 void iio_free_device(struct iio_dev *dev)
1053 if (dev) {
1054 ida_simple_remove(&iio_ida, dev->id);
1055 kfree(dev);
1058 EXPORT_SYMBOL(iio_free_device);
1061 * iio_chrdev_open() - chrdev file open for buffer access and ioctls
1063 static int iio_chrdev_open(struct inode *inode, struct file *filp)
1065 struct iio_dev *dev_info = container_of(inode->i_cdev,
1066 struct iio_dev, chrdev);
1067 filp->private_data = dev_info;
1069 return iio_chrdev_buffer_open(dev_info);
1073 * iio_chrdev_release() - chrdev file close buffer access and ioctls
1075 static int iio_chrdev_release(struct inode *inode, struct file *filp)
1077 iio_chrdev_buffer_release(container_of(inode->i_cdev,
1078 struct iio_dev, chrdev));
1079 return 0;
1082 /* Somewhat of a cross file organization violation - ioctls here are actually
1083 * event related */
1084 static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1086 struct iio_dev *indio_dev = filp->private_data;
1087 int __user *ip = (int __user *)arg;
1088 int fd;
1090 if (cmd == IIO_GET_EVENT_FD_IOCTL) {
1091 fd = iio_event_getfd(indio_dev);
1092 if (copy_to_user(ip, &fd, sizeof(fd)))
1093 return -EFAULT;
1094 return 0;
1096 return -EINVAL;
1099 static const struct file_operations iio_buffer_fileops = {
1100 .read = iio_buffer_read_first_n_outer_addr,
1101 .release = iio_chrdev_release,
1102 .open = iio_chrdev_open,
1103 .poll = iio_buffer_poll_addr,
1104 .owner = THIS_MODULE,
1105 .llseek = noop_llseek,
1106 .unlocked_ioctl = iio_ioctl,
1107 .compat_ioctl = iio_ioctl,
1110 int iio_device_register(struct iio_dev *dev_info)
1112 int ret;
1114 /* configure elements for the chrdev */
1115 dev_info->dev.devt = MKDEV(MAJOR(iio_devt), dev_info->id);
1117 ret = iio_device_register_sysfs(dev_info);
1118 if (ret) {
1119 dev_err(dev_info->dev.parent,
1120 "Failed to register sysfs interfaces\n");
1121 goto error_ret;
1123 ret = iio_device_register_eventset(dev_info);
1124 if (ret) {
1125 dev_err(dev_info->dev.parent,
1126 "Failed to register event set\n");
1127 goto error_free_sysfs;
1129 if (dev_info->modes & INDIO_BUFFER_TRIGGERED)
1130 iio_device_register_trigger_consumer(dev_info);
1132 ret = device_add(&dev_info->dev);
1133 if (ret < 0)
1134 goto error_unreg_eventset;
1135 cdev_init(&dev_info->chrdev, &iio_buffer_fileops);
1136 dev_info->chrdev.owner = dev_info->info->driver_module;
1137 ret = cdev_add(&dev_info->chrdev, dev_info->dev.devt, 1);
1138 if (ret < 0)
1139 goto error_del_device;
1140 return 0;
1142 error_del_device:
1143 device_del(&dev_info->dev);
1144 error_unreg_eventset:
1145 iio_device_unregister_eventset(dev_info);
1146 error_free_sysfs:
1147 iio_device_unregister_sysfs(dev_info);
1148 error_ret:
1149 return ret;
1151 EXPORT_SYMBOL(iio_device_register);
1153 void iio_device_unregister(struct iio_dev *dev_info)
1155 device_unregister(&dev_info->dev);
1157 EXPORT_SYMBOL(iio_device_unregister);
1158 subsys_initcall(iio_init);
1159 module_exit(iio_exit);
1161 MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
1162 MODULE_DESCRIPTION("Industrial I/O core");
1163 MODULE_LICENSE("GPL");