1 /* The industrial I/O simple minimally locked ring buffer.
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/workqueue.h>
17 static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer
*ring
,
18 int bytes_per_datum
, int length
)
20 if ((length
== 0) || (bytes_per_datum
== 0))
22 __iio_update_ring_buffer(&ring
->buf
, bytes_per_datum
, length
);
23 ring
->data
= kmalloc(length
*ring
->buf
.bpd
, GFP_ATOMIC
);
26 ring
->last_written_p
= NULL
;
28 return ring
->data
? 0 : -ENOMEM
;
31 static inline void __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer
*ring
)
33 spin_lock_init(&ring
->use_lock
);
36 static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer
*ring
)
41 void iio_mark_sw_rb_in_use(struct iio_ring_buffer
*r
)
43 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
44 spin_lock(&ring
->use_lock
);
46 spin_unlock(&ring
->use_lock
);
48 EXPORT_SYMBOL(iio_mark_sw_rb_in_use
);
50 void iio_unmark_sw_rb_in_use(struct iio_ring_buffer
*r
)
52 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
53 spin_lock(&ring
->use_lock
);
55 spin_unlock(&ring
->use_lock
);
57 EXPORT_SYMBOL(iio_unmark_sw_rb_in_use
);
60 /* Ring buffer related functionality */
61 /* Store to ring is typically called in the bh of a data ready interrupt handler
62 * in the device driver */
63 /* Lock always held if their is a chance this may be called */
64 /* Only one of these per ring may run concurrently - enforced by drivers */
65 static int iio_store_to_sw_ring(struct iio_sw_ring_buffer
*ring
,
66 unsigned char *data
, s64 timestamp
)
70 unsigned char *temp_ptr
, *change_test_ptr
;
73 if (unlikely(ring
->write_p
== NULL
)) {
74 ring
->write_p
= ring
->data
;
75 /* Doesn't actually matter if this is out of the set
76 * as long as the read pointer is valid before this
77 * passes it - guaranteed as set later in this function.
79 ring
->half_p
= ring
->data
- ring
->buf
.length
*ring
->buf
.bpd
/2;
81 /* Copy data to where ever the current write pointer says */
82 memcpy(ring
->write_p
, data
, ring
->buf
.bpd
);
84 /* Update the pointer used to get most recent value.
85 * Always valid as either points to latest or second latest value.
86 * Before this runs it is null and read attempts fail with -EAGAIN.
88 ring
->last_written_p
= ring
->write_p
;
90 /* temp_ptr used to ensure we never have an invalid pointer
91 * it may be slightly lagging, but never invalid
93 temp_ptr
= ring
->write_p
+ ring
->buf
.bpd
;
94 /* End of ring, back to the beginning */
95 if (temp_ptr
== ring
->data
+ ring
->buf
.length
*ring
->buf
.bpd
)
96 temp_ptr
= ring
->data
;
97 /* Update the write pointer
98 * always valid as long as this is the only function able to write.
99 * Care needed with smp systems to ensure more than one ring fill
100 * is never scheduled.
102 ring
->write_p
= temp_ptr
;
104 if (ring
->read_p
== NULL
)
105 ring
->read_p
= ring
->data
;
106 /* Buffer full - move the read pointer and create / escalate
108 /* Tricky case - if the read pointer moves before we adjust it.
109 * Handle by not pushing if it has moved - may result in occasional
110 * unnecessary buffer full events when it wasn't quite true.
112 else if (ring
->write_p
== ring
->read_p
) {
113 change_test_ptr
= ring
->read_p
;
114 temp_ptr
= change_test_ptr
+ ring
->buf
.bpd
;
116 == ring
->data
+ ring
->buf
.length
*ring
->buf
.bpd
) {
117 temp_ptr
= ring
->data
;
119 /* We are moving pointer on one because the ring is full. Any
120 * change to the read pointer will be this or greater.
122 if (change_test_ptr
== ring
->read_p
)
123 ring
->read_p
= temp_ptr
;
125 spin_lock(&ring
->buf
.shared_ev_pointer
.lock
);
127 ret
= iio_push_or_escallate_ring_event(&ring
->buf
,
128 IIO_EVENT_CODE_RING_100_FULL
, timestamp
);
129 spin_unlock(&ring
->buf
.shared_ev_pointer
.lock
);
133 /* investigate if our event barrier has been passed */
134 /* There are definite 'issues' with this and chances of
135 * simultaneous read */
136 /* Also need to use loop count to ensure this only happens once */
137 ring
->half_p
+= ring
->buf
.bpd
;
138 if (ring
->half_p
== ring
->data
+ ring
->buf
.length
*ring
->buf
.bpd
)
139 ring
->half_p
= ring
->data
;
140 if (ring
->half_p
== ring
->read_p
) {
141 spin_lock(&ring
->buf
.shared_ev_pointer
.lock
);
142 code
= IIO_EVENT_CODE_RING_50_FULL
;
143 ret
= __iio_push_event(&ring
->buf
.ev_int
,
146 &ring
->buf
.shared_ev_pointer
);
147 spin_unlock(&ring
->buf
.shared_ev_pointer
.lock
);
153 int iio_rip_sw_rb(struct iio_ring_buffer
*r
,
154 size_t count
, u8
**data
, int *dead_offset
)
156 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
158 u8
*initial_read_p
, *initial_write_p
, *current_read_p
, *end_read_p
;
162 /* A userspace program has probably made an error if it tries to
163 * read something that is not a whole number of bpds.
166 if (count
% ring
->buf
.bpd
) {
168 printk(KERN_INFO
"Ring buffer read request not whole number of"
169 "samples: Request bytes %zd, Current bpd %d\n",
170 count
, ring
->buf
.bpd
);
173 /* Limit size to whole of ring buffer */
174 bytes_to_rip
= min((size_t)(ring
->buf
.bpd
*ring
->buf
.length
), count
);
176 *data
= kmalloc(bytes_to_rip
, GFP_KERNEL
);
182 /* build local copy */
183 initial_read_p
= ring
->read_p
;
184 if (unlikely(initial_read_p
== NULL
)) { /* No data here as yet */
186 goto error_free_data_cpy
;
189 initial_write_p
= ring
->write_p
;
191 /* Need a consistent pair */
192 while ((initial_read_p
!= ring
->read_p
)
193 || (initial_write_p
!= ring
->write_p
)) {
194 initial_read_p
= ring
->read_p
;
195 initial_write_p
= ring
->write_p
;
197 if (initial_write_p
== initial_read_p
) {
198 /* No new data available.*/
200 goto error_free_data_cpy
;
203 if (initial_write_p
>= initial_read_p
+ bytes_to_rip
) {
204 /* write_p is greater than necessary, all is easy */
205 max_copied
= bytes_to_rip
;
206 memcpy(*data
, initial_read_p
, max_copied
);
207 end_read_p
= initial_read_p
+ max_copied
;
208 } else if (initial_write_p
> initial_read_p
) {
209 /*not enough data to cpy */
210 max_copied
= initial_write_p
- initial_read_p
;
211 memcpy(*data
, initial_read_p
, max_copied
);
212 end_read_p
= initial_write_p
;
214 /* going through 'end' of ring buffer */
215 max_copied
= ring
->data
216 + ring
->buf
.length
*ring
->buf
.bpd
- initial_read_p
;
217 memcpy(*data
, initial_read_p
, max_copied
);
218 /* possible we are done if we align precisely with end */
219 if (max_copied
== bytes_to_rip
)
220 end_read_p
= ring
->data
;
221 else if (initial_write_p
222 > ring
->data
+ bytes_to_rip
- max_copied
) {
223 /* enough data to finish */
224 memcpy(*data
+ max_copied
, ring
->data
,
225 bytes_to_rip
- max_copied
);
226 max_copied
= bytes_to_rip
;
227 end_read_p
= ring
->data
+ (bytes_to_rip
- max_copied
);
228 } else { /* not enough data */
229 memcpy(*data
+ max_copied
, ring
->data
,
230 initial_write_p
- ring
->data
);
231 max_copied
+= initial_write_p
- ring
->data
;
232 end_read_p
= initial_write_p
;
235 /* Now to verify which section was cleanly copied - i.e. how far
236 * read pointer has been pushed */
237 current_read_p
= ring
->read_p
;
239 if (initial_read_p
<= current_read_p
)
240 *dead_offset
= current_read_p
- initial_read_p
;
242 *dead_offset
= ring
->buf
.length
*ring
->buf
.bpd
243 - (initial_read_p
- current_read_p
);
245 /* possible issue if the initial write has been lapped or indeed
246 * the point we were reading to has been passed */
247 /* No valid data read.
248 * In this case the read pointer is already correct having been
249 * pushed further than we would look. */
250 if (max_copied
- *dead_offset
< 0) {
252 goto error_free_data_cpy
;
255 /* setup the next read position */
256 /* Beware, this may fail due to concurrency fun and games.
257 * Possible that sufficient fill commands have run to push the read
258 * pointer past where we would be after the rip. If this occurs, leave
261 /* Tricky - deal with loops */
263 while (ring
->read_p
!= end_read_p
)
264 ring
->read_p
= end_read_p
;
266 return max_copied
- *dead_offset
;
273 EXPORT_SYMBOL(iio_rip_sw_rb
);
275 int iio_store_to_sw_rb(struct iio_ring_buffer
*r
, u8
*data
, s64 timestamp
)
277 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
278 return iio_store_to_sw_ring(ring
, data
, timestamp
);
280 EXPORT_SYMBOL(iio_store_to_sw_rb
);
282 static int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer
*ring
,
285 unsigned char *last_written_p_copy
;
287 iio_mark_sw_rb_in_use(&ring
->buf
);
290 last_written_p_copy
= ring
->last_written_p
;
291 barrier(); /*unnessecary? */
292 /* Check there is anything here */
293 if (last_written_p_copy
== NULL
)
295 memcpy(data
, last_written_p_copy
, ring
->buf
.bpd
);
297 if (unlikely(ring
->last_written_p
!= last_written_p_copy
))
300 iio_unmark_sw_rb_in_use(&ring
->buf
);
304 int iio_read_last_from_sw_rb(struct iio_ring_buffer
*r
,
307 return iio_read_last_from_sw_ring(iio_to_sw_ring(r
), data
);
309 EXPORT_SYMBOL(iio_read_last_from_sw_rb
);
311 int iio_request_update_sw_rb(struct iio_ring_buffer
*r
)
314 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
316 spin_lock(&ring
->use_lock
);
317 if (!ring
->update_needed
)
319 if (ring
->use_count
) {
323 __iio_free_sw_ring_buffer(ring
);
324 ret
= __iio_allocate_sw_ring_buffer(ring
, ring
->buf
.bpd
,
327 spin_unlock(&ring
->use_lock
);
330 EXPORT_SYMBOL(iio_request_update_sw_rb
);
332 int iio_get_bpd_sw_rb(struct iio_ring_buffer
*r
)
334 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
335 return ring
->buf
.bpd
;
337 EXPORT_SYMBOL(iio_get_bpd_sw_rb
);
339 int iio_set_bpd_sw_rb(struct iio_ring_buffer
*r
, size_t bpd
)
343 if (r
->access
.mark_param_change
)
344 r
->access
.mark_param_change(r
);
348 EXPORT_SYMBOL(iio_set_bpd_sw_rb
);
350 int iio_get_length_sw_rb(struct iio_ring_buffer
*r
)
354 EXPORT_SYMBOL(iio_get_length_sw_rb
);
356 int iio_set_length_sw_rb(struct iio_ring_buffer
*r
, int length
)
358 if (r
->length
!= length
) {
360 if (r
->access
.mark_param_change
)
361 r
->access
.mark_param_change(r
);
365 EXPORT_SYMBOL(iio_set_length_sw_rb
);
367 int iio_mark_update_needed_sw_rb(struct iio_ring_buffer
*r
)
369 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
370 ring
->update_needed
= true;
373 EXPORT_SYMBOL(iio_mark_update_needed_sw_rb
);
375 static void iio_sw_rb_release(struct device
*dev
)
377 struct iio_ring_buffer
*r
= to_iio_ring_buffer(dev
);
378 kfree(iio_to_sw_ring(r
));
381 static IIO_RING_ENABLE_ATTR
;
382 static IIO_RING_BPS_ATTR
;
383 static IIO_RING_LENGTH_ATTR
;
385 /* Standard set of ring buffer attributes */
386 static struct attribute
*iio_ring_attributes
[] = {
387 &dev_attr_length
.attr
,
389 &dev_attr_ring_enable
.attr
,
393 static struct attribute_group iio_ring_attribute_group
= {
394 .attrs
= iio_ring_attributes
,
397 static const struct attribute_group
*iio_ring_attribute_groups
[] = {
398 &iio_ring_attribute_group
,
402 static struct device_type iio_sw_ring_type
= {
403 .release
= iio_sw_rb_release
,
404 .groups
= iio_ring_attribute_groups
,
407 struct iio_ring_buffer
*iio_sw_rb_allocate(struct iio_dev
*indio_dev
)
409 struct iio_ring_buffer
*buf
;
410 struct iio_sw_ring_buffer
*ring
;
412 ring
= kzalloc(sizeof *ring
, GFP_KERNEL
);
416 iio_ring_buffer_init(buf
, indio_dev
);
417 __iio_init_sw_ring_buffer(ring
);
418 buf
->dev
.type
= &iio_sw_ring_type
;
419 device_initialize(&buf
->dev
);
420 buf
->dev
.parent
= &indio_dev
->dev
;
421 buf
->dev
.bus
= &iio_bus_type
;
422 dev_set_drvdata(&buf
->dev
, (void *)buf
);
426 EXPORT_SYMBOL(iio_sw_rb_allocate
);
428 void iio_sw_rb_free(struct iio_ring_buffer
*r
)
431 iio_put_ring_buffer(r
);
433 EXPORT_SYMBOL(iio_sw_rb_free
);
434 MODULE_DESCRIPTION("Industrialio I/O software ring buffer");
435 MODULE_LICENSE("GPL");