1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of buffer allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/device.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
24 #include <linux/iio/iio.h>
26 #include <linux/iio/sysfs.h>
27 #include <linux/iio/buffer.h>
29 static const char * const iio_endian_prefix
[] = {
35 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
37 * This function relies on all buffer implementations having an
38 * iio_buffer as their first element.
40 ssize_t
iio_buffer_read_first_n_outer(struct file
*filp
, char __user
*buf
,
41 size_t n
, loff_t
*f_ps
)
43 struct iio_dev
*indio_dev
= filp
->private_data
;
44 struct iio_buffer
*rb
= indio_dev
->buffer
;
46 if (!rb
|| !rb
->access
->read_first_n
)
48 return rb
->access
->read_first_n(rb
, n
, buf
);
52 * iio_buffer_poll() - poll the buffer to find out if it has data
54 unsigned int iio_buffer_poll(struct file
*filp
,
55 struct poll_table_struct
*wait
)
57 struct iio_dev
*indio_dev
= filp
->private_data
;
58 struct iio_buffer
*rb
= indio_dev
->buffer
;
60 poll_wait(filp
, &rb
->pollq
, wait
);
62 return POLLIN
| POLLRDNORM
;
63 /* need a way of knowing if there may be enough data... */
67 void iio_buffer_init(struct iio_buffer
*buffer
)
69 INIT_LIST_HEAD(&buffer
->demux_list
);
70 init_waitqueue_head(&buffer
->pollq
);
72 EXPORT_SYMBOL(iio_buffer_init
);
74 static ssize_t
iio_show_scan_index(struct device
*dev
,
75 struct device_attribute
*attr
,
78 return sprintf(buf
, "%u\n", to_iio_dev_attr(attr
)->c
->scan_index
);
81 static ssize_t
iio_show_fixed_type(struct device
*dev
,
82 struct device_attribute
*attr
,
85 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
86 u8 type
= this_attr
->c
->scan_type
.endianness
;
88 if (type
== IIO_CPU
) {
89 #ifdef __LITTLE_ENDIAN
95 return sprintf(buf
, "%s:%c%d/%d>>%u\n",
96 iio_endian_prefix
[type
],
97 this_attr
->c
->scan_type
.sign
,
98 this_attr
->c
->scan_type
.realbits
,
99 this_attr
->c
->scan_type
.storagebits
,
100 this_attr
->c
->scan_type
.shift
);
103 static ssize_t
iio_scan_el_show(struct device
*dev
,
104 struct device_attribute
*attr
,
108 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
110 ret
= test_bit(to_iio_dev_attr(attr
)->address
,
111 indio_dev
->buffer
->scan_mask
);
113 return sprintf(buf
, "%d\n", ret
);
116 static int iio_scan_mask_clear(struct iio_buffer
*buffer
, int bit
)
118 clear_bit(bit
, buffer
->scan_mask
);
122 static ssize_t
iio_scan_el_store(struct device
*dev
,
123 struct device_attribute
*attr
,
129 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
130 struct iio_buffer
*buffer
= indio_dev
->buffer
;
131 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
133 ret
= strtobool(buf
, &state
);
136 mutex_lock(&indio_dev
->mlock
);
137 if (iio_buffer_enabled(indio_dev
)) {
141 ret
= iio_scan_mask_query(indio_dev
, buffer
, this_attr
->address
);
145 ret
= iio_scan_mask_clear(buffer
, this_attr
->address
);
148 } else if (state
&& !ret
) {
149 ret
= iio_scan_mask_set(indio_dev
, buffer
, this_attr
->address
);
155 mutex_unlock(&indio_dev
->mlock
);
157 return ret
< 0 ? ret
: len
;
161 static ssize_t
iio_scan_el_ts_show(struct device
*dev
,
162 struct device_attribute
*attr
,
165 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
166 return sprintf(buf
, "%d\n", indio_dev
->buffer
->scan_timestamp
);
169 static ssize_t
iio_scan_el_ts_store(struct device
*dev
,
170 struct device_attribute
*attr
,
175 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
178 ret
= strtobool(buf
, &state
);
182 mutex_lock(&indio_dev
->mlock
);
183 if (iio_buffer_enabled(indio_dev
)) {
187 indio_dev
->buffer
->scan_timestamp
= state
;
188 indio_dev
->scan_timestamp
= state
;
190 mutex_unlock(&indio_dev
->mlock
);
192 return ret
? ret
: len
;
195 static int iio_buffer_add_channel_sysfs(struct iio_dev
*indio_dev
,
196 const struct iio_chan_spec
*chan
)
198 int ret
, attrcount
= 0;
199 struct iio_buffer
*buffer
= indio_dev
->buffer
;
201 ret
= __iio_add_chan_devattr("index",
203 &iio_show_scan_index
,
208 &buffer
->scan_el_dev_attr_list
);
212 ret
= __iio_add_chan_devattr("type",
214 &iio_show_fixed_type
,
219 &buffer
->scan_el_dev_attr_list
);
223 if (chan
->type
!= IIO_TIMESTAMP
)
224 ret
= __iio_add_chan_devattr("en",
231 &buffer
->scan_el_dev_attr_list
);
233 ret
= __iio_add_chan_devattr("en",
235 &iio_scan_el_ts_show
,
236 &iio_scan_el_ts_store
,
240 &buffer
->scan_el_dev_attr_list
);
247 static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev
*indio_dev
,
248 struct iio_dev_attr
*p
)
250 kfree(p
->dev_attr
.attr
.name
);
254 static void __iio_buffer_attr_cleanup(struct iio_dev
*indio_dev
)
256 struct iio_dev_attr
*p
, *n
;
257 struct iio_buffer
*buffer
= indio_dev
->buffer
;
259 list_for_each_entry_safe(p
, n
,
260 &buffer
->scan_el_dev_attr_list
, l
)
261 iio_buffer_remove_and_free_scan_dev_attr(indio_dev
, p
);
264 static const char * const iio_scan_elements_group_name
= "scan_elements";
266 int iio_buffer_register(struct iio_dev
*indio_dev
,
267 const struct iio_chan_spec
*channels
,
270 struct iio_dev_attr
*p
;
271 struct attribute
**attr
;
272 struct iio_buffer
*buffer
= indio_dev
->buffer
;
273 int ret
, i
, attrn
, attrcount
, attrcount_orig
= 0;
276 indio_dev
->groups
[indio_dev
->groupcounter
++] = buffer
->attrs
;
278 if (buffer
->scan_el_attrs
!= NULL
) {
279 attr
= buffer
->scan_el_attrs
->attrs
;
280 while (*attr
++ != NULL
)
283 attrcount
= attrcount_orig
;
284 INIT_LIST_HEAD(&buffer
->scan_el_dev_attr_list
);
287 for (i
= 0; i
< num_channels
; i
++) {
288 /* Establish necessary mask length */
289 if (channels
[i
].scan_index
>
290 (int)indio_dev
->masklength
- 1)
291 indio_dev
->masklength
292 = indio_dev
->channels
[i
].scan_index
+ 1;
294 ret
= iio_buffer_add_channel_sysfs(indio_dev
,
297 goto error_cleanup_dynamic
;
299 if (channels
[i
].type
== IIO_TIMESTAMP
)
300 indio_dev
->scan_index_timestamp
=
301 channels
[i
].scan_index
;
303 if (indio_dev
->masklength
&& buffer
->scan_mask
== NULL
) {
304 buffer
->scan_mask
= kcalloc(BITS_TO_LONGS(indio_dev
->masklength
),
305 sizeof(*buffer
->scan_mask
),
307 if (buffer
->scan_mask
== NULL
) {
309 goto error_cleanup_dynamic
;
314 buffer
->scan_el_group
.name
= iio_scan_elements_group_name
;
316 buffer
->scan_el_group
.attrs
= kcalloc(attrcount
+ 1,
317 sizeof(buffer
->scan_el_group
.attrs
[0]),
319 if (buffer
->scan_el_group
.attrs
== NULL
) {
321 goto error_free_scan_mask
;
323 if (buffer
->scan_el_attrs
)
324 memcpy(buffer
->scan_el_group
.attrs
, buffer
->scan_el_attrs
,
325 sizeof(buffer
->scan_el_group
.attrs
[0])*attrcount_orig
);
326 attrn
= attrcount_orig
;
328 list_for_each_entry(p
, &buffer
->scan_el_dev_attr_list
, l
)
329 buffer
->scan_el_group
.attrs
[attrn
++] = &p
->dev_attr
.attr
;
330 indio_dev
->groups
[indio_dev
->groupcounter
++] = &buffer
->scan_el_group
;
334 error_free_scan_mask
:
335 kfree(buffer
->scan_mask
);
336 error_cleanup_dynamic
:
337 __iio_buffer_attr_cleanup(indio_dev
);
341 EXPORT_SYMBOL(iio_buffer_register
);
343 void iio_buffer_unregister(struct iio_dev
*indio_dev
)
345 kfree(indio_dev
->buffer
->scan_mask
);
346 kfree(indio_dev
->buffer
->scan_el_group
.attrs
);
347 __iio_buffer_attr_cleanup(indio_dev
);
349 EXPORT_SYMBOL(iio_buffer_unregister
);
351 ssize_t
iio_buffer_read_length(struct device
*dev
,
352 struct device_attribute
*attr
,
355 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
356 struct iio_buffer
*buffer
= indio_dev
->buffer
;
358 if (buffer
->access
->get_length
)
359 return sprintf(buf
, "%d\n",
360 buffer
->access
->get_length(buffer
));
364 EXPORT_SYMBOL(iio_buffer_read_length
);
366 ssize_t
iio_buffer_write_length(struct device
*dev
,
367 struct device_attribute
*attr
,
373 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
374 struct iio_buffer
*buffer
= indio_dev
->buffer
;
376 ret
= strict_strtoul(buf
, 10, &val
);
380 if (buffer
->access
->get_length
)
381 if (val
== buffer
->access
->get_length(buffer
))
384 mutex_lock(&indio_dev
->mlock
);
385 if (iio_buffer_enabled(indio_dev
)) {
388 if (buffer
->access
->set_length
)
389 buffer
->access
->set_length(buffer
, val
);
392 mutex_unlock(&indio_dev
->mlock
);
394 return ret
? ret
: len
;
396 EXPORT_SYMBOL(iio_buffer_write_length
);
398 ssize_t
iio_buffer_store_enable(struct device
*dev
,
399 struct device_attribute
*attr
,
404 bool requested_state
, current_state
;
406 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
407 struct iio_buffer
*buffer
= indio_dev
->buffer
;
409 mutex_lock(&indio_dev
->mlock
);
410 previous_mode
= indio_dev
->currentmode
;
411 requested_state
= !(buf
[0] == '0');
412 current_state
= iio_buffer_enabled(indio_dev
);
413 if (current_state
== requested_state
) {
414 printk(KERN_INFO
"iio-buffer, current state requested again\n");
417 if (requested_state
) {
418 if (indio_dev
->setup_ops
->preenable
) {
419 ret
= indio_dev
->setup_ops
->preenable(indio_dev
);
422 "Buffer not started:"
423 "buffer preenable failed\n");
427 if (buffer
->access
->request_update
) {
428 ret
= buffer
->access
->request_update(buffer
);
431 "Buffer not started:"
432 "buffer parameter update failed\n");
436 /* Definitely possible for devices to support both of these.*/
437 if (indio_dev
->modes
& INDIO_BUFFER_TRIGGERED
) {
438 if (!indio_dev
->trig
) {
440 "Buffer not started: no trigger\n");
444 indio_dev
->currentmode
= INDIO_BUFFER_TRIGGERED
;
445 } else if (indio_dev
->modes
& INDIO_BUFFER_HARDWARE
)
446 indio_dev
->currentmode
= INDIO_BUFFER_HARDWARE
;
447 else { /* should never be reached */
452 if (indio_dev
->setup_ops
->postenable
) {
453 ret
= indio_dev
->setup_ops
->postenable(indio_dev
);
456 "Buffer not started:"
457 "postenable failed\n");
458 indio_dev
->currentmode
= previous_mode
;
459 if (indio_dev
->setup_ops
->postdisable
)
460 indio_dev
->setup_ops
->
461 postdisable(indio_dev
);
466 if (indio_dev
->setup_ops
->predisable
) {
467 ret
= indio_dev
->setup_ops
->predisable(indio_dev
);
471 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
472 if (indio_dev
->setup_ops
->postdisable
) {
473 ret
= indio_dev
->setup_ops
->postdisable(indio_dev
);
479 mutex_unlock(&indio_dev
->mlock
);
483 mutex_unlock(&indio_dev
->mlock
);
486 EXPORT_SYMBOL(iio_buffer_store_enable
);
488 ssize_t
iio_buffer_show_enable(struct device
*dev
,
489 struct device_attribute
*attr
,
492 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
493 return sprintf(buf
, "%d\n", iio_buffer_enabled(indio_dev
));
495 EXPORT_SYMBOL(iio_buffer_show_enable
);
497 /* note NULL used as error indicator as it doesn't make sense. */
498 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks
,
499 unsigned int masklength
,
500 const unsigned long *mask
)
502 if (bitmap_empty(mask
, masklength
))
505 if (bitmap_subset(mask
, av_masks
, masklength
))
507 av_masks
+= BITS_TO_LONGS(masklength
);
512 static int iio_compute_scan_bytes(struct iio_dev
*indio_dev
, const long *mask
,
515 const struct iio_chan_spec
*ch
;
519 /* How much space will the demuxed element take? */
520 for_each_set_bit(i
, mask
,
521 indio_dev
->masklength
) {
522 ch
= iio_find_channel_from_si(indio_dev
, i
);
523 length
= ch
->scan_type
.storagebits
/ 8;
524 bytes
= ALIGN(bytes
, length
);
528 ch
= iio_find_channel_from_si(indio_dev
,
529 indio_dev
->scan_index_timestamp
);
530 length
= ch
->scan_type
.storagebits
/ 8;
531 bytes
= ALIGN(bytes
, length
);
537 int iio_sw_buffer_preenable(struct iio_dev
*indio_dev
)
539 struct iio_buffer
*buffer
= indio_dev
->buffer
;
540 dev_dbg(&indio_dev
->dev
, "%s\n", __func__
);
542 /* How much space will the demuxed element take? */
543 indio_dev
->scan_bytes
=
544 iio_compute_scan_bytes(indio_dev
, buffer
->scan_mask
,
545 buffer
->scan_timestamp
);
546 buffer
->access
->set_bytes_per_datum(buffer
, indio_dev
->scan_bytes
);
548 /* What scan mask do we actually have ?*/
549 if (indio_dev
->available_scan_masks
)
550 indio_dev
->active_scan_mask
=
551 iio_scan_mask_match(indio_dev
->available_scan_masks
,
552 indio_dev
->masklength
,
555 indio_dev
->active_scan_mask
= buffer
->scan_mask
;
556 iio_update_demux(indio_dev
);
558 if (indio_dev
->info
->update_scan_mode
)
559 return indio_dev
->info
560 ->update_scan_mode(indio_dev
,
561 indio_dev
->active_scan_mask
);
564 EXPORT_SYMBOL(iio_sw_buffer_preenable
);
567 * iio_scan_mask_set() - set particular bit in the scan mask
568 * @buffer: the buffer whose scan mask we are interested in
569 * @bit: the bit to be set.
571 int iio_scan_mask_set(struct iio_dev
*indio_dev
,
572 struct iio_buffer
*buffer
, int bit
)
574 const unsigned long *mask
;
575 unsigned long *trialmask
;
577 trialmask
= kmalloc(sizeof(*trialmask
)*
578 BITS_TO_LONGS(indio_dev
->masklength
),
581 if (trialmask
== NULL
)
583 if (!indio_dev
->masklength
) {
584 WARN_ON("trying to set scanmask prior to registering buffer\n");
588 bitmap_copy(trialmask
, buffer
->scan_mask
, indio_dev
->masklength
);
589 set_bit(bit
, trialmask
);
591 if (indio_dev
->available_scan_masks
) {
592 mask
= iio_scan_mask_match(indio_dev
->available_scan_masks
,
593 indio_dev
->masklength
,
600 bitmap_copy(buffer
->scan_mask
, trialmask
, indio_dev
->masklength
);
606 EXPORT_SYMBOL_GPL(iio_scan_mask_set
);
608 int iio_scan_mask_query(struct iio_dev
*indio_dev
,
609 struct iio_buffer
*buffer
, int bit
)
611 if (bit
> indio_dev
->masklength
)
614 if (!buffer
->scan_mask
)
617 return test_bit(bit
, buffer
->scan_mask
);
619 EXPORT_SYMBOL_GPL(iio_scan_mask_query
);
622 * struct iio_demux_table() - table describing demux memcpy ops
623 * @from: index to copy from
624 * @to: index to copy to
625 * @length: how many bytes to copy
626 * @l: list head used for management
628 struct iio_demux_table
{
635 static unsigned char *iio_demux(struct iio_buffer
*buffer
,
636 unsigned char *datain
)
638 struct iio_demux_table
*t
;
640 if (list_empty(&buffer
->demux_list
))
642 list_for_each_entry(t
, &buffer
->demux_list
, l
)
643 memcpy(buffer
->demux_bounce
+ t
->to
,
644 datain
+ t
->from
, t
->length
);
646 return buffer
->demux_bounce
;
649 int iio_push_to_buffer(struct iio_buffer
*buffer
, unsigned char *data
,
652 unsigned char *dataout
= iio_demux(buffer
, data
);
654 return buffer
->access
->store_to(buffer
, dataout
, timestamp
);
656 EXPORT_SYMBOL_GPL(iio_push_to_buffer
);
658 static void iio_buffer_demux_free(struct iio_buffer
*buffer
)
660 struct iio_demux_table
*p
, *q
;
661 list_for_each_entry_safe(p
, q
, &buffer
->demux_list
, l
) {
667 int iio_update_demux(struct iio_dev
*indio_dev
)
669 const struct iio_chan_spec
*ch
;
670 struct iio_buffer
*buffer
= indio_dev
->buffer
;
671 int ret
, in_ind
= -1, out_ind
, length
;
672 unsigned in_loc
= 0, out_loc
= 0;
673 struct iio_demux_table
*p
;
675 /* Clear out any old demux */
676 iio_buffer_demux_free(buffer
);
677 kfree(buffer
->demux_bounce
);
678 buffer
->demux_bounce
= NULL
;
680 /* First work out which scan mode we will actually have */
681 if (bitmap_equal(indio_dev
->active_scan_mask
,
683 indio_dev
->masklength
))
686 /* Now we have the two masks, work from least sig and build up sizes */
687 for_each_set_bit(out_ind
,
688 indio_dev
->active_scan_mask
,
689 indio_dev
->masklength
) {
690 in_ind
= find_next_bit(indio_dev
->active_scan_mask
,
691 indio_dev
->masklength
,
693 while (in_ind
!= out_ind
) {
694 in_ind
= find_next_bit(indio_dev
->active_scan_mask
,
695 indio_dev
->masklength
,
697 ch
= iio_find_channel_from_si(indio_dev
, in_ind
);
698 length
= ch
->scan_type
.storagebits
/8;
699 /* Make sure we are aligned */
702 in_loc
+= length
- in_loc
% length
;
704 p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
707 goto error_clear_mux_table
;
709 ch
= iio_find_channel_from_si(indio_dev
, in_ind
);
710 length
= ch
->scan_type
.storagebits
/8;
711 if (out_loc
% length
)
712 out_loc
+= length
- out_loc
% length
;
714 in_loc
+= length
- in_loc
% length
;
718 list_add_tail(&p
->l
, &buffer
->demux_list
);
722 /* Relies on scan_timestamp being last */
723 if (buffer
->scan_timestamp
) {
724 p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
727 goto error_clear_mux_table
;
729 ch
= iio_find_channel_from_si(indio_dev
,
730 indio_dev
->scan_index_timestamp
);
731 length
= ch
->scan_type
.storagebits
/8;
732 if (out_loc
% length
)
733 out_loc
+= length
- out_loc
% length
;
735 in_loc
+= length
- in_loc
% length
;
739 list_add_tail(&p
->l
, &buffer
->demux_list
);
743 buffer
->demux_bounce
= kzalloc(out_loc
, GFP_KERNEL
);
744 if (buffer
->demux_bounce
== NULL
) {
746 goto error_clear_mux_table
;
750 error_clear_mux_table
:
751 iio_buffer_demux_free(buffer
);
755 EXPORT_SYMBOL_GPL(iio_update_demux
);