1 /* The industrial I/O simple minimally locked ring buffer.
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/workqueue.h>
18 static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer
*ring
,
19 int bytes_per_datum
, int length
)
21 if ((length
== 0) || (bytes_per_datum
== 0))
23 __iio_update_ring_buffer(&ring
->buf
, bytes_per_datum
, length
);
24 ring
->data
= kmalloc(length
*ring
->buf
.bpd
, GFP_ATOMIC
);
27 ring
->last_written_p
= NULL
;
29 return ring
->data
? 0 : -ENOMEM
;
32 static inline void __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer
*ring
)
34 spin_lock_init(&ring
->use_lock
);
37 static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer
*ring
)
42 void iio_mark_sw_rb_in_use(struct iio_ring_buffer
*r
)
44 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
45 spin_lock(&ring
->use_lock
);
47 spin_unlock(&ring
->use_lock
);
49 EXPORT_SYMBOL(iio_mark_sw_rb_in_use
);
51 void iio_unmark_sw_rb_in_use(struct iio_ring_buffer
*r
)
53 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
54 spin_lock(&ring
->use_lock
);
56 spin_unlock(&ring
->use_lock
);
58 EXPORT_SYMBOL(iio_unmark_sw_rb_in_use
);
61 /* Ring buffer related functionality */
62 /* Store to ring is typically called in the bh of a data ready interrupt handler
63 * in the device driver */
64 /* Lock always held if their is a chance this may be called */
65 /* Only one of these per ring may run concurrently - enforced by drivers */
66 static int iio_store_to_sw_ring(struct iio_sw_ring_buffer
*ring
,
67 unsigned char *data
, s64 timestamp
)
71 unsigned char *temp_ptr
, *change_test_ptr
;
74 if (unlikely(ring
->write_p
== NULL
)) {
75 ring
->write_p
= ring
->data
;
76 /* Doesn't actually matter if this is out of the set
77 * as long as the read pointer is valid before this
78 * passes it - guaranteed as set later in this function.
80 ring
->half_p
= ring
->data
- ring
->buf
.length
*ring
->buf
.bpd
/2;
82 /* Copy data to where ever the current write pointer says */
83 memcpy(ring
->write_p
, data
, ring
->buf
.bpd
);
85 /* Update the pointer used to get most recent value.
86 * Always valid as either points to latest or second latest value.
87 * Before this runs it is null and read attempts fail with -EAGAIN.
89 ring
->last_written_p
= ring
->write_p
;
91 /* temp_ptr used to ensure we never have an invalid pointer
92 * it may be slightly lagging, but never invalid
94 temp_ptr
= ring
->write_p
+ ring
->buf
.bpd
;
95 /* End of ring, back to the beginning */
96 if (temp_ptr
== ring
->data
+ ring
->buf
.length
*ring
->buf
.bpd
)
97 temp_ptr
= ring
->data
;
98 /* Update the write pointer
99 * always valid as long as this is the only function able to write.
100 * Care needed with smp systems to ensure more than one ring fill
101 * is never scheduled.
103 ring
->write_p
= temp_ptr
;
105 if (ring
->read_p
== NULL
)
106 ring
->read_p
= ring
->data
;
107 /* Buffer full - move the read pointer and create / escalate
109 /* Tricky case - if the read pointer moves before we adjust it.
110 * Handle by not pushing if it has moved - may result in occasional
111 * unnecessary buffer full events when it wasn't quite true.
113 else if (ring
->write_p
== ring
->read_p
) {
114 change_test_ptr
= ring
->read_p
;
115 temp_ptr
= change_test_ptr
+ ring
->buf
.bpd
;
117 == ring
->data
+ ring
->buf
.length
*ring
->buf
.bpd
) {
118 temp_ptr
= ring
->data
;
120 /* We are moving pointer on one because the ring is full. Any
121 * change to the read pointer will be this or greater.
123 if (change_test_ptr
== ring
->read_p
)
124 ring
->read_p
= temp_ptr
;
126 spin_lock(&ring
->buf
.shared_ev_pointer
.lock
);
128 ret
= iio_push_or_escallate_ring_event(&ring
->buf
,
129 IIO_EVENT_CODE_RING_100_FULL
, timestamp
);
130 spin_unlock(&ring
->buf
.shared_ev_pointer
.lock
);
134 /* investigate if our event barrier has been passed */
135 /* There are definite 'issues' with this and chances of
136 * simultaneous read */
137 /* Also need to use loop count to ensure this only happens once */
138 ring
->half_p
+= ring
->buf
.bpd
;
139 if (ring
->half_p
== ring
->data
+ ring
->buf
.length
*ring
->buf
.bpd
)
140 ring
->half_p
= ring
->data
;
141 if (ring
->half_p
== ring
->read_p
) {
142 spin_lock(&ring
->buf
.shared_ev_pointer
.lock
);
143 code
= IIO_EVENT_CODE_RING_50_FULL
;
144 ret
= __iio_push_event(&ring
->buf
.ev_int
,
147 &ring
->buf
.shared_ev_pointer
);
148 spin_unlock(&ring
->buf
.shared_ev_pointer
.lock
);
154 int iio_rip_sw_rb(struct iio_ring_buffer
*r
,
155 size_t count
, u8
**data
, int *dead_offset
)
157 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
159 u8
*initial_read_p
, *initial_write_p
, *current_read_p
, *end_read_p
;
163 /* A userspace program has probably made an error if it tries to
164 * read something that is not a whole number of bpds.
167 if (count
% ring
->buf
.bpd
) {
169 printk(KERN_INFO
"Ring buffer read request not whole number of"
170 "samples: Request bytes %zd, Current bpd %d\n",
171 count
, ring
->buf
.bpd
);
174 /* Limit size to whole of ring buffer */
175 bytes_to_rip
= min((size_t)(ring
->buf
.bpd
*ring
->buf
.length
), count
);
177 *data
= kmalloc(bytes_to_rip
, GFP_KERNEL
);
183 /* build local copy */
184 initial_read_p
= ring
->read_p
;
185 if (unlikely(initial_read_p
== NULL
)) { /* No data here as yet */
187 goto error_free_data_cpy
;
190 initial_write_p
= ring
->write_p
;
192 /* Need a consistent pair */
193 while ((initial_read_p
!= ring
->read_p
)
194 || (initial_write_p
!= ring
->write_p
)) {
195 initial_read_p
= ring
->read_p
;
196 initial_write_p
= ring
->write_p
;
198 if (initial_write_p
== initial_read_p
) {
199 /* No new data available.*/
201 goto error_free_data_cpy
;
204 if (initial_write_p
>= initial_read_p
+ bytes_to_rip
) {
205 /* write_p is greater than necessary, all is easy */
206 max_copied
= bytes_to_rip
;
207 memcpy(*data
, initial_read_p
, max_copied
);
208 end_read_p
= initial_read_p
+ max_copied
;
209 } else if (initial_write_p
> initial_read_p
) {
210 /*not enough data to cpy */
211 max_copied
= initial_write_p
- initial_read_p
;
212 memcpy(*data
, initial_read_p
, max_copied
);
213 end_read_p
= initial_write_p
;
215 /* going through 'end' of ring buffer */
216 max_copied
= ring
->data
217 + ring
->buf
.length
*ring
->buf
.bpd
- initial_read_p
;
218 memcpy(*data
, initial_read_p
, max_copied
);
219 /* possible we are done if we align precisely with end */
220 if (max_copied
== bytes_to_rip
)
221 end_read_p
= ring
->data
;
222 else if (initial_write_p
223 > ring
->data
+ bytes_to_rip
- max_copied
) {
224 /* enough data to finish */
225 memcpy(*data
+ max_copied
, ring
->data
,
226 bytes_to_rip
- max_copied
);
227 max_copied
= bytes_to_rip
;
228 end_read_p
= ring
->data
+ (bytes_to_rip
- max_copied
);
229 } else { /* not enough data */
230 memcpy(*data
+ max_copied
, ring
->data
,
231 initial_write_p
- ring
->data
);
232 max_copied
+= initial_write_p
- ring
->data
;
233 end_read_p
= initial_write_p
;
236 /* Now to verify which section was cleanly copied - i.e. how far
237 * read pointer has been pushed */
238 current_read_p
= ring
->read_p
;
240 if (initial_read_p
<= current_read_p
)
241 *dead_offset
= current_read_p
- initial_read_p
;
243 *dead_offset
= ring
->buf
.length
*ring
->buf
.bpd
244 - (initial_read_p
- current_read_p
);
246 /* possible issue if the initial write has been lapped or indeed
247 * the point we were reading to has been passed */
248 /* No valid data read.
249 * In this case the read pointer is already correct having been
250 * pushed further than we would look. */
251 if (max_copied
- *dead_offset
< 0) {
253 goto error_free_data_cpy
;
256 /* setup the next read position */
257 /* Beware, this may fail due to concurrency fun and games.
258 * Possible that sufficient fill commands have run to push the read
259 * pointer past where we would be after the rip. If this occurs, leave
262 /* Tricky - deal with loops */
264 while (ring
->read_p
!= end_read_p
)
265 ring
->read_p
= end_read_p
;
267 return max_copied
- *dead_offset
;
274 EXPORT_SYMBOL(iio_rip_sw_rb
);
276 int iio_store_to_sw_rb(struct iio_ring_buffer
*r
, u8
*data
, s64 timestamp
)
278 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
279 return iio_store_to_sw_ring(ring
, data
, timestamp
);
281 EXPORT_SYMBOL(iio_store_to_sw_rb
);
283 static int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer
*ring
,
286 unsigned char *last_written_p_copy
;
288 iio_mark_sw_rb_in_use(&ring
->buf
);
291 last_written_p_copy
= ring
->last_written_p
;
292 barrier(); /*unnessecary? */
293 /* Check there is anything here */
294 if (last_written_p_copy
== NULL
)
296 memcpy(data
, last_written_p_copy
, ring
->buf
.bpd
);
298 if (unlikely(ring
->last_written_p
!= last_written_p_copy
))
301 iio_unmark_sw_rb_in_use(&ring
->buf
);
305 int iio_read_last_from_sw_rb(struct iio_ring_buffer
*r
,
308 return iio_read_last_from_sw_ring(iio_to_sw_ring(r
), data
);
310 EXPORT_SYMBOL(iio_read_last_from_sw_rb
);
312 int iio_request_update_sw_rb(struct iio_ring_buffer
*r
)
315 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
317 spin_lock(&ring
->use_lock
);
318 if (!ring
->update_needed
)
320 if (ring
->use_count
) {
324 __iio_free_sw_ring_buffer(ring
);
325 ret
= __iio_allocate_sw_ring_buffer(ring
, ring
->buf
.bpd
,
328 spin_unlock(&ring
->use_lock
);
331 EXPORT_SYMBOL(iio_request_update_sw_rb
);
333 int iio_get_bpd_sw_rb(struct iio_ring_buffer
*r
)
335 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
336 return ring
->buf
.bpd
;
338 EXPORT_SYMBOL(iio_get_bpd_sw_rb
);
340 int iio_set_bpd_sw_rb(struct iio_ring_buffer
*r
, size_t bpd
)
344 if (r
->access
.mark_param_change
)
345 r
->access
.mark_param_change(r
);
349 EXPORT_SYMBOL(iio_set_bpd_sw_rb
);
351 int iio_get_length_sw_rb(struct iio_ring_buffer
*r
)
355 EXPORT_SYMBOL(iio_get_length_sw_rb
);
357 int iio_set_length_sw_rb(struct iio_ring_buffer
*r
, int length
)
359 if (r
->length
!= length
) {
361 if (r
->access
.mark_param_change
)
362 r
->access
.mark_param_change(r
);
366 EXPORT_SYMBOL(iio_set_length_sw_rb
);
368 int iio_mark_update_needed_sw_rb(struct iio_ring_buffer
*r
)
370 struct iio_sw_ring_buffer
*ring
= iio_to_sw_ring(r
);
371 ring
->update_needed
= true;
374 EXPORT_SYMBOL(iio_mark_update_needed_sw_rb
);
376 static void iio_sw_rb_release(struct device
*dev
)
378 struct iio_ring_buffer
*r
= to_iio_ring_buffer(dev
);
379 kfree(iio_to_sw_ring(r
));
382 static IIO_RING_ENABLE_ATTR
;
383 static IIO_RING_BPS_ATTR
;
384 static IIO_RING_LENGTH_ATTR
;
386 /* Standard set of ring buffer attributes */
387 static struct attribute
*iio_ring_attributes
[] = {
388 &dev_attr_length
.attr
,
390 &dev_attr_ring_enable
.attr
,
394 static struct attribute_group iio_ring_attribute_group
= {
395 .attrs
= iio_ring_attributes
,
398 static const struct attribute_group
*iio_ring_attribute_groups
[] = {
399 &iio_ring_attribute_group
,
403 static struct device_type iio_sw_ring_type
= {
404 .release
= iio_sw_rb_release
,
405 .groups
= iio_ring_attribute_groups
,
408 struct iio_ring_buffer
*iio_sw_rb_allocate(struct iio_dev
*indio_dev
)
410 struct iio_ring_buffer
*buf
;
411 struct iio_sw_ring_buffer
*ring
;
413 ring
= kzalloc(sizeof *ring
, GFP_KERNEL
);
417 iio_ring_buffer_init(buf
, indio_dev
);
418 __iio_init_sw_ring_buffer(ring
);
419 buf
->dev
.type
= &iio_sw_ring_type
;
420 device_initialize(&buf
->dev
);
421 buf
->dev
.parent
= &indio_dev
->dev
;
422 buf
->dev
.bus
= &iio_bus_type
;
423 dev_set_drvdata(&buf
->dev
, (void *)buf
);
427 EXPORT_SYMBOL(iio_sw_rb_allocate
);
429 void iio_sw_rb_free(struct iio_ring_buffer
*r
)
432 iio_put_ring_buffer(r
);
434 EXPORT_SYMBOL(iio_sw_rb_free
);
436 int iio_sw_ring_preenable(struct iio_dev
*indio_dev
)
439 dev_dbg(&indio_dev
->dev
, "%s\n", __func__
);
440 /* Check if there are any scan elements enabled, if not fail*/
441 if (!(indio_dev
->scan_count
|| indio_dev
->scan_timestamp
))
443 if (indio_dev
->scan_timestamp
)
444 if (indio_dev
->scan_count
)
445 /* Timestamp (aligned to s64) and data */
446 size
= (((indio_dev
->scan_count
* indio_dev
->ring
->bpe
)
448 & ~(sizeof(s64
) - 1))
450 else /* Timestamp only */
453 size
= indio_dev
->scan_count
* indio_dev
->ring
->bpe
;
454 indio_dev
->ring
->access
.set_bpd(indio_dev
->ring
, size
);
458 EXPORT_SYMBOL(iio_sw_ring_preenable
);
460 void iio_sw_trigger_bh_to_ring(struct work_struct
*work_s
)
462 struct iio_sw_ring_helper_state
*st
463 = container_of(work_s
, struct iio_sw_ring_helper_state
,
464 work_trigger_to_ring
);
466 size_t datasize
= st
->indio_dev
467 ->ring
->access
.get_bpd(st
->indio_dev
->ring
);
468 char *data
= kmalloc(datasize
, GFP_KERNEL
);
471 dev_err(st
->indio_dev
->dev
.parent
,
472 "memory alloc failed in ring bh");
476 if (st
->indio_dev
->scan_count
)
477 len
= st
->get_ring_element(st
, data
);
479 /* Guaranteed to be aligned with 8 byte boundary */
480 if (st
->indio_dev
->scan_timestamp
)
481 *(s64
*)(((phys_addr_t
)data
+ len
482 + sizeof(s64
) - 1) & ~(sizeof(s64
) - 1))
483 = st
->last_timestamp
;
484 st
->indio_dev
->ring
->access
.store_to(st
->indio_dev
->ring
,
488 iio_trigger_notify_done(st
->indio_dev
->trig
);
493 EXPORT_SYMBOL(iio_sw_trigger_bh_to_ring
);
495 void iio_sw_poll_func_th(struct iio_dev
*indio_dev
, s64 time
)
496 { struct iio_sw_ring_helper_state
*h
497 = iio_dev_get_devdata(indio_dev
);
498 h
->last_timestamp
= time
;
499 schedule_work(&h
->work_trigger_to_ring
);
501 EXPORT_SYMBOL(iio_sw_poll_func_th
);
503 MODULE_DESCRIPTION("Industrialio I/O software ring buffer");
504 MODULE_LICENSE("GPL");