1 /* The industrial I/O simple minimally locked ring buffer.
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/workqueue.h>
15 #include <linux/poll.h>
19 static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer
*ring
,
20 int bytes_per_datum
, int length
)
22 if ((length
== 0) || (bytes_per_datum
== 0))
24 __iio_update_ring_buffer(&ring
->buf
, bytes_per_datum
, length
);
25 ring
->data
= kmalloc(length
*ring
->buf
.bytes_per_datum
, GFP_ATOMIC
);
28 ring
->last_written_p
= NULL
;
30 return ring
->data
? 0 : -ENOMEM
;
33 static inline void __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer
*ring
)
35 spin_lock_init(&ring
->use_lock
);
38 static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer
*ring
)
43 void iio_mark_sw_rb_in_use(struct iio_ring_buffer
*r
)
45 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
46 spin_lock(&ring
->use_lock
);
48 spin_unlock(&ring
->use_lock
);
50 EXPORT_SYMBOL(iio_mark_sw_rb_in_use
);
52 void iio_unmark_sw_rb_in_use(struct iio_ring_buffer
*r
)
54 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
55 spin_lock(&ring
->use_lock
);
57 spin_unlock(&ring
->use_lock
);
59 EXPORT_SYMBOL(iio_unmark_sw_rb_in_use
);
62 /* Ring buffer related functionality */
63 /* Store to ring is typically called in the bh of a data ready interrupt handler
64 * in the device driver */
65 /* Lock always held if their is a chance this may be called */
66 /* Only one of these per ring may run concurrently - enforced by drivers */
67 static int iio_store_to_sw_ring(struct iio_sw_ring_buffer
*ring
,
68 unsigned char *data
, s64 timestamp
)
72 unsigned char *temp_ptr
, *change_test_ptr
;
75 if (unlikely(ring
->write_p
== NULL
)) {
76 ring
->write_p
= ring
->data
;
77 /* Doesn't actually matter if this is out of the set
78 * as long as the read pointer is valid before this
79 * passes it - guaranteed as set later in this function.
81 ring
->half_p
= ring
->data
- ring
->buf
.length
*ring
->buf
.bytes_per_datum
/2;
83 /* Copy data to where ever the current write pointer says */
84 memcpy(ring
->write_p
, data
, ring
->buf
.bytes_per_datum
);
86 /* Update the pointer used to get most recent value.
87 * Always valid as either points to latest or second latest value.
88 * Before this runs it is null and read attempts fail with -EAGAIN.
90 ring
->last_written_p
= ring
->write_p
;
92 /* temp_ptr used to ensure we never have an invalid pointer
93 * it may be slightly lagging, but never invalid
95 temp_ptr
= ring
->write_p
+ ring
->buf
.bytes_per_datum
;
96 /* End of ring, back to the beginning */
97 if (temp_ptr
== ring
->data
+ ring
->buf
.length
*ring
->buf
.bytes_per_datum
)
98 temp_ptr
= ring
->data
;
99 /* Update the write pointer
100 * always valid as long as this is the only function able to write.
101 * Care needed with smp systems to ensure more than one ring fill
102 * is never scheduled.
104 ring
->write_p
= temp_ptr
;
106 if (ring
->read_p
== NULL
)
107 ring
->read_p
= ring
->data
;
108 /* Buffer full - move the read pointer and create / escalate
110 /* Tricky case - if the read pointer moves before we adjust it.
111 * Handle by not pushing if it has moved - may result in occasional
112 * unnecessary buffer full events when it wasn't quite true.
114 else if (ring
->write_p
== ring
->read_p
) {
115 change_test_ptr
= ring
->read_p
;
116 temp_ptr
= change_test_ptr
+ ring
->buf
.bytes_per_datum
;
118 == ring
->data
+ ring
->buf
.length
*ring
->buf
.bytes_per_datum
) {
119 temp_ptr
= ring
->data
;
121 /* We are moving pointer on one because the ring is full. Any
122 * change to the read pointer will be this or greater.
124 if (change_test_ptr
== ring
->read_p
)
125 ring
->read_p
= temp_ptr
;
127 spin_lock(&ring
->buf
.shared_ev_pointer
.lock
);
129 ret
= iio_push_or_escallate_ring_event(&ring
->buf
,
130 IIO_EVENT_CODE_RING_100_FULL
, timestamp
);
131 spin_unlock(&ring
->buf
.shared_ev_pointer
.lock
);
135 /* investigate if our event barrier has been passed */
136 /* There are definite 'issues' with this and chances of
137 * simultaneous read */
138 /* Also need to use loop count to ensure this only happens once */
139 ring
->half_p
+= ring
->buf
.bytes_per_datum
;
140 if (ring
->half_p
== ring
->data
+ ring
->buf
.length
*ring
->buf
.bytes_per_datum
)
141 ring
->half_p
= ring
->data
;
142 if (ring
->half_p
== ring
->read_p
) {
143 spin_lock(&ring
->buf
.shared_ev_pointer
.lock
);
144 code
= IIO_EVENT_CODE_RING_50_FULL
;
145 ret
= __iio_push_event(&ring
->buf
.ev_int
,
148 &ring
->buf
.shared_ev_pointer
);
149 spin_unlock(&ring
->buf
.shared_ev_pointer
.lock
);
155 int iio_rip_sw_rb(struct iio_ring_buffer
*r
,
156 size_t count
, char __user
*buf
, int *dead_offset
)
158 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
160 u8
*initial_read_p
, *initial_write_p
, *current_read_p
, *end_read_p
;
165 /* A userspace program has probably made an error if it tries to
166 * read something that is not a whole number of bpds.
169 if (count
% ring
->buf
.bytes_per_datum
) {
171 printk(KERN_INFO
"Ring buffer read request not whole number of"
172 "samples: Request bytes %zd, Current bytes per datum %d\n",
173 count
, ring
->buf
.bytes_per_datum
);
176 /* Limit size to whole of ring buffer */
177 bytes_to_rip
= min((size_t)(ring
->buf
.bytes_per_datum
*ring
->buf
.length
), count
);
179 data
= kmalloc(bytes_to_rip
, GFP_KERNEL
);
185 /* build local copy */
186 initial_read_p
= ring
->read_p
;
187 if (unlikely(initial_read_p
== NULL
)) { /* No data here as yet */
189 goto error_free_data_cpy
;
192 initial_write_p
= ring
->write_p
;
194 /* Need a consistent pair */
195 while ((initial_read_p
!= ring
->read_p
)
196 || (initial_write_p
!= ring
->write_p
)) {
197 initial_read_p
= ring
->read_p
;
198 initial_write_p
= ring
->write_p
;
200 if (initial_write_p
== initial_read_p
) {
201 /* No new data available.*/
203 goto error_free_data_cpy
;
206 if (initial_write_p
>= initial_read_p
+ bytes_to_rip
) {
207 /* write_p is greater than necessary, all is easy */
208 max_copied
= bytes_to_rip
;
209 memcpy(data
, initial_read_p
, max_copied
);
210 end_read_p
= initial_read_p
+ max_copied
;
211 } else if (initial_write_p
> initial_read_p
) {
212 /*not enough data to cpy */
213 max_copied
= initial_write_p
- initial_read_p
;
214 memcpy(data
, initial_read_p
, max_copied
);
215 end_read_p
= initial_write_p
;
217 /* going through 'end' of ring buffer */
218 max_copied
= ring
->data
219 + ring
->buf
.length
*ring
->buf
.bytes_per_datum
- initial_read_p
;
220 memcpy(data
, initial_read_p
, max_copied
);
221 /* possible we are done if we align precisely with end */
222 if (max_copied
== bytes_to_rip
)
223 end_read_p
= ring
->data
;
224 else if (initial_write_p
225 > ring
->data
+ bytes_to_rip
- max_copied
) {
226 /* enough data to finish */
227 memcpy(data
+ max_copied
, ring
->data
,
228 bytes_to_rip
- max_copied
);
229 max_copied
= bytes_to_rip
;
230 end_read_p
= ring
->data
+ (bytes_to_rip
- max_copied
);
231 } else { /* not enough data */
232 memcpy(data
+ max_copied
, ring
->data
,
233 initial_write_p
- ring
->data
);
234 max_copied
+= initial_write_p
- ring
->data
;
235 end_read_p
= initial_write_p
;
238 /* Now to verify which section was cleanly copied - i.e. how far
239 * read pointer has been pushed */
240 current_read_p
= ring
->read_p
;
242 if (initial_read_p
<= current_read_p
)
243 *dead_offset
= current_read_p
- initial_read_p
;
245 *dead_offset
= ring
->buf
.length
*ring
->buf
.bytes_per_datum
246 - (initial_read_p
- current_read_p
);
248 /* possible issue if the initial write has been lapped or indeed
249 * the point we were reading to has been passed */
250 /* No valid data read.
251 * In this case the read pointer is already correct having been
252 * pushed further than we would look. */
253 if (max_copied
- *dead_offset
< 0) {
255 goto error_free_data_cpy
;
258 /* setup the next read position */
259 /* Beware, this may fail due to concurrency fun and games.
260 * Possible that sufficient fill commands have run to push the read
261 * pointer past where we would be after the rip. If this occurs, leave
264 /* Tricky - deal with loops */
266 while (ring
->read_p
!= end_read_p
)
267 ring
->read_p
= end_read_p
;
269 ret
= max_copied
- *dead_offset
;
271 if (copy_to_user(buf
, data
+ *dead_offset
, ret
)) {
273 goto error_free_data_cpy
;
281 EXPORT_SYMBOL(iio_rip_sw_rb
);
283 int iio_store_to_sw_rb(struct iio_ring_buffer
*r
, u8
*data
, s64 timestamp
)
285 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
286 return iio_store_to_sw_ring(ring
, data
, timestamp
);
288 EXPORT_SYMBOL(iio_store_to_sw_rb
);
290 static int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer
*ring
,
293 unsigned char *last_written_p_copy
;
295 iio_mark_sw_rb_in_use(&ring
->buf
);
298 last_written_p_copy
= ring
->last_written_p
;
299 barrier(); /*unnessecary? */
300 /* Check there is anything here */
301 if (last_written_p_copy
== NULL
)
303 memcpy(data
, last_written_p_copy
, ring
->buf
.bytes_per_datum
);
305 if (unlikely(ring
->last_written_p
!= last_written_p_copy
))
308 iio_unmark_sw_rb_in_use(&ring
->buf
);
312 int iio_read_last_from_sw_rb(struct iio_ring_buffer
*r
,
315 return iio_read_last_from_sw_ring(iio_to_sw_ring(r
), data
);
317 EXPORT_SYMBOL(iio_read_last_from_sw_rb
);
319 int iio_request_update_sw_rb(struct iio_ring_buffer
*r
)
322 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
324 spin_lock(&ring
->use_lock
);
325 if (!ring
->update_needed
)
327 if (ring
->use_count
) {
331 __iio_free_sw_ring_buffer(ring
);
332 ret
= __iio_allocate_sw_ring_buffer(ring
, ring
->buf
.bytes_per_datum
,
335 spin_unlock(&ring
->use_lock
);
338 EXPORT_SYMBOL(iio_request_update_sw_rb
);
340 int iio_get_bytes_per_datum_sw_rb(struct iio_ring_buffer
*r
)
342 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
343 return ring
->buf
.bytes_per_datum
;
345 EXPORT_SYMBOL(iio_get_bytes_per_datum_sw_rb
);
347 int iio_set_bytes_per_datum_sw_rb(struct iio_ring_buffer
*r
, size_t bpd
)
349 if (r
->bytes_per_datum
!= bpd
) {
350 r
->bytes_per_datum
= bpd
;
351 if (r
->access
.mark_param_change
)
352 r
->access
.mark_param_change(r
);
356 EXPORT_SYMBOL(iio_set_bytes_per_datum_sw_rb
);
358 int iio_get_length_sw_rb(struct iio_ring_buffer
*r
)
362 EXPORT_SYMBOL(iio_get_length_sw_rb
);
364 int iio_set_length_sw_rb(struct iio_ring_buffer
*r
, int length
)
366 if (r
->length
!= length
) {
368 if (r
->access
.mark_param_change
)
369 r
->access
.mark_param_change(r
);
373 EXPORT_SYMBOL(iio_set_length_sw_rb
);
375 int iio_mark_update_needed_sw_rb(struct iio_ring_buffer
*r
)
377 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
378 ring
->update_needed
= true;
381 EXPORT_SYMBOL(iio_mark_update_needed_sw_rb
);
383 static void iio_sw_rb_release(struct device
*dev
)
385 struct iio_ring_buffer
*r
= to_iio_ring_buffer(dev
);
386 kfree(iio_to_sw_ring(r
));
389 static IIO_RING_ENABLE_ATTR
;
390 static IIO_RING_BYTES_PER_DATUM_ATTR
;
391 static IIO_RING_LENGTH_ATTR
;
393 /* Standard set of ring buffer attributes */
394 static struct attribute
*iio_ring_attributes
[] = {
395 &dev_attr_length
.attr
,
396 &dev_attr_bytes_per_datum
.attr
,
397 &dev_attr_enable
.attr
,
401 static struct attribute_group iio_ring_attribute_group
= {
402 .attrs
= iio_ring_attributes
,
405 static const struct attribute_group
*iio_ring_attribute_groups
[] = {
406 &iio_ring_attribute_group
,
410 static struct device_type iio_sw_ring_type
= {
411 .release
= iio_sw_rb_release
,
412 .groups
= iio_ring_attribute_groups
,
415 struct iio_ring_buffer
*iio_sw_rb_allocate(struct iio_dev
*indio_dev
)
417 struct iio_ring_buffer
*buf
;
418 struct iio_sw_ring_buffer
*ring
;
420 ring
= kzalloc(sizeof *ring
, GFP_KERNEL
);
424 iio_ring_buffer_init(buf
, indio_dev
);
425 __iio_init_sw_ring_buffer(ring
);
426 buf
->dev
.type
= &iio_sw_ring_type
;
427 device_initialize(&buf
->dev
);
428 buf
->dev
.parent
= &indio_dev
->dev
;
429 buf
->dev
.bus
= &iio_bus_type
;
430 dev_set_drvdata(&buf
->dev
, (void *)buf
);
434 EXPORT_SYMBOL(iio_sw_rb_allocate
);
436 void iio_sw_rb_free(struct iio_ring_buffer
*r
)
439 iio_put_ring_buffer(r
);
441 EXPORT_SYMBOL(iio_sw_rb_free
);
443 int iio_sw_ring_preenable(struct iio_dev
*indio_dev
)
445 struct iio_ring_buffer
*ring
= indio_dev
->ring
;
447 dev_dbg(&indio_dev
->dev
, "%s\n", __func__
);
448 /* Check if there are any scan elements enabled, if not fail*/
449 if (!(ring
->scan_count
|| ring
->scan_timestamp
))
451 if (ring
->scan_timestamp
)
452 if (ring
->scan_count
)
453 /* Timestamp (aligned to s64) and data */
454 size
= (((ring
->scan_count
* ring
->bpe
)
456 & ~(sizeof(s64
) - 1))
458 else /* Timestamp only */
461 size
= ring
->scan_count
* ring
->bpe
;
462 ring
->access
.set_bytes_per_datum(ring
, size
);
466 EXPORT_SYMBOL(iio_sw_ring_preenable
);
468 void iio_sw_trigger_bh_to_ring(struct work_struct
*work_s
)
470 struct iio_sw_ring_helper_state
*st
471 = container_of(work_s
, struct iio_sw_ring_helper_state
,
472 work_trigger_to_ring
);
473 struct iio_ring_buffer
*ring
= st
->indio_dev
->ring
;
475 size_t datasize
= ring
->access
.get_bytes_per_datum(ring
);
476 char *data
= kmalloc(datasize
, GFP_KERNEL
);
479 dev_err(st
->indio_dev
->dev
.parent
,
480 "memory alloc failed in ring bh");
484 if (ring
->scan_count
)
485 len
= st
->get_ring_element(st
, data
);
487 /* Guaranteed to be aligned with 8 byte boundary */
488 if (ring
->scan_timestamp
)
489 *(s64
*)(((phys_addr_t
)data
+ len
490 + sizeof(s64
) - 1) & ~(sizeof(s64
) - 1))
491 = st
->last_timestamp
;
492 ring
->access
.store_to(ring
,
496 iio_trigger_notify_done(st
->indio_dev
->trig
);
501 EXPORT_SYMBOL(iio_sw_trigger_bh_to_ring
);
503 void iio_sw_poll_func_th(struct iio_dev
*indio_dev
, s64 time
)
504 { struct iio_sw_ring_helper_state
*h
505 = iio_dev_get_devdata(indio_dev
);
506 h
->last_timestamp
= time
;
507 schedule_work(&h
->work_trigger_to_ring
);
509 EXPORT_SYMBOL(iio_sw_poll_func_th
);
511 MODULE_DESCRIPTION("Industrialio I/O software ring buffer");
512 MODULE_LICENSE("GPL");