staging:iio: treewide rename iio_triggered_ring_* to iio_triggered_buffer_*
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / iio / accel / lis3l02dq_ring.c
blobeaecda4d658976b67557f15c84eea7e75c4f0a12
1 #include <linux/interrupt.h>
2 #include <linux/gpio.h>
3 #include <linux/mutex.h>
4 #include <linux/kernel.h>
5 #include <linux/spi/spi.h>
6 #include <linux/slab.h>
8 #include "../iio.h"
9 #include "../ring_sw.h"
10 #include "../kfifo_buf.h"
11 #include "../trigger.h"
12 #include "../trigger_consumer.h"
13 #include "lis3l02dq.h"
15 /**
16 * combine_8_to_16() utility function to munge to u8s into u16
17 **/
18 static inline u16 combine_8_to_16(u8 lower, u8 upper)
20 u16 _lower = lower;
21 u16 _upper = upper;
22 return _lower | (_upper << 8);
25 /**
26 * lis3l02dq_data_rdy_trig_poll() the event handler for the data rdy trig
27 **/
28 irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private)
30 struct iio_dev *indio_dev = private;
31 struct lis3l02dq_state *st = iio_priv(indio_dev);
33 if (st->trigger_on) {
34 iio_trigger_poll(st->trig, iio_get_time_ns());
35 return IRQ_HANDLED;
36 } else
37 return IRQ_WAKE_THREAD;
40 /**
41 * lis3l02dq_read_accel_from_ring() individual acceleration read from ring
42 **/
43 ssize_t lis3l02dq_read_accel_from_ring(struct iio_ring_buffer *ring,
44 int index,
45 int *val)
47 int ret;
48 s16 *data;
50 if (!iio_scan_mask_query(ring, index))
51 return -EINVAL;
53 if (!ring->access->read_last)
54 return -EBUSY;
56 data = kmalloc(ring->access->get_bytes_per_datum(ring),
57 GFP_KERNEL);
58 if (data == NULL)
59 return -ENOMEM;
61 ret = ring->access->read_last(ring, (u8 *)data);
62 if (ret)
63 goto error_free_data;
64 *val = data[bitmap_weight(ring->scan_mask, index)];
65 error_free_data:
67 kfree(data);
69 return ret;
72 static const u8 read_all_tx_array[] = {
73 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR), 0,
74 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR), 0,
75 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_L_ADDR), 0,
76 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_H_ADDR), 0,
77 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_L_ADDR), 0,
78 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_H_ADDR), 0,
81 /**
82 * lis3l02dq_read_all() Reads all channels currently selected
83 * @st: device specific state
84 * @rx_array: (dma capable) receive array, must be at least
85 * 4*number of channels
86 **/
87 static int lis3l02dq_read_all(struct iio_dev *indio_dev, u8 *rx_array)
89 struct iio_ring_buffer *ring = indio_dev->ring;
90 struct lis3l02dq_state *st = iio_priv(indio_dev);
91 struct spi_transfer *xfers;
92 struct spi_message msg;
93 int ret, i, j = 0;
95 xfers = kzalloc((ring->scan_count) * 2
96 * sizeof(*xfers), GFP_KERNEL);
97 if (!xfers)
98 return -ENOMEM;
100 mutex_lock(&st->buf_lock);
102 for (i = 0; i < ARRAY_SIZE(read_all_tx_array)/4; i++)
103 if (test_bit(i, ring->scan_mask)) {
104 /* lower byte */
105 xfers[j].tx_buf = st->tx + 2*j;
106 st->tx[2*j] = read_all_tx_array[i*4];
107 st->tx[2*j + 1] = 0;
108 if (rx_array)
109 xfers[j].rx_buf = rx_array + j*2;
110 xfers[j].bits_per_word = 8;
111 xfers[j].len = 2;
112 xfers[j].cs_change = 1;
113 j++;
115 /* upper byte */
116 xfers[j].tx_buf = st->tx + 2*j;
117 st->tx[2*j] = read_all_tx_array[i*4 + 2];
118 st->tx[2*j + 1] = 0;
119 if (rx_array)
120 xfers[j].rx_buf = rx_array + j*2;
121 xfers[j].bits_per_word = 8;
122 xfers[j].len = 2;
123 xfers[j].cs_change = 1;
124 j++;
127 /* After these are transmitted, the rx_buff should have
128 * values in alternate bytes
130 spi_message_init(&msg);
131 for (j = 0; j < ring->scan_count * 2; j++)
132 spi_message_add_tail(&xfers[j], &msg);
134 ret = spi_sync(st->us, &msg);
135 mutex_unlock(&st->buf_lock);
136 kfree(xfers);
138 return ret;
141 static int lis3l02dq_get_ring_element(struct iio_dev *indio_dev,
142 u8 *buf)
144 int ret, i;
145 u8 *rx_array ;
146 s16 *data = (s16 *)buf;
148 rx_array = kzalloc(4 * (indio_dev->ring->scan_count), GFP_KERNEL);
149 if (rx_array == NULL)
150 return -ENOMEM;
151 ret = lis3l02dq_read_all(indio_dev, rx_array);
152 if (ret < 0)
153 return ret;
154 for (i = 0; i < indio_dev->ring->scan_count; i++)
155 data[i] = combine_8_to_16(rx_array[i*4+1],
156 rx_array[i*4+3]);
157 kfree(rx_array);
159 return i*sizeof(data[0]);
162 static irqreturn_t lis3l02dq_trigger_handler(int irq, void *p)
164 struct iio_poll_func *pf = p;
165 struct iio_dev *indio_dev = pf->indio_dev;
166 struct iio_ring_buffer *ring = indio_dev->ring;
167 int len = 0;
168 size_t datasize = ring->access->get_bytes_per_datum(ring);
169 char *data = kmalloc(datasize, GFP_KERNEL);
171 if (data == NULL) {
172 dev_err(indio_dev->dev.parent,
173 "memory alloc failed in ring bh");
174 return -ENOMEM;
177 if (ring->scan_count)
178 len = lis3l02dq_get_ring_element(indio_dev, data);
180 /* Guaranteed to be aligned with 8 byte boundary */
181 if (ring->scan_timestamp)
182 *(s64 *)(((phys_addr_t)data + len
183 + sizeof(s64) - 1) & ~(sizeof(s64) - 1))
184 = pf->timestamp;
185 ring->access->store_to(ring, (u8 *)data, pf->timestamp);
187 iio_trigger_notify_done(indio_dev->trig);
188 kfree(data);
189 return IRQ_HANDLED;
192 /* Caller responsible for locking as necessary. */
193 static int
194 __lis3l02dq_write_data_ready_config(struct device *dev, bool state)
196 int ret;
197 u8 valold;
198 bool currentlyset;
199 struct iio_dev *indio_dev = dev_get_drvdata(dev);
200 struct lis3l02dq_state *st = iio_priv(indio_dev);
202 /* Get the current event mask register */
203 ret = lis3l02dq_spi_read_reg_8(indio_dev,
204 LIS3L02DQ_REG_CTRL_2_ADDR,
205 &valold);
206 if (ret)
207 goto error_ret;
208 /* Find out if data ready is already on */
209 currentlyset
210 = valold & LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
212 /* Disable requested */
213 if (!state && currentlyset) {
214 /* disable the data ready signal */
215 valold &= ~LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
217 /* The double write is to overcome a hardware bug?*/
218 ret = lis3l02dq_spi_write_reg_8(indio_dev,
219 LIS3L02DQ_REG_CTRL_2_ADDR,
220 valold);
221 if (ret)
222 goto error_ret;
223 ret = lis3l02dq_spi_write_reg_8(indio_dev,
224 LIS3L02DQ_REG_CTRL_2_ADDR,
225 valold);
226 if (ret)
227 goto error_ret;
228 st->trigger_on = false;
229 /* Enable requested */
230 } else if (state && !currentlyset) {
231 /* if not set, enable requested */
232 /* first disable all events */
233 ret = lis3l02dq_disable_all_events(indio_dev);
234 if (ret < 0)
235 goto error_ret;
237 valold = ret |
238 LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
240 st->trigger_on = true;
241 ret = lis3l02dq_spi_write_reg_8(indio_dev,
242 LIS3L02DQ_REG_CTRL_2_ADDR,
243 valold);
244 if (ret)
245 goto error_ret;
248 return 0;
249 error_ret:
250 return ret;
254 * lis3l02dq_data_rdy_trigger_set_state() set datardy interrupt state
256 * If disabling the interrupt also does a final read to ensure it is clear.
257 * This is only important in some cases where the scan enable elements are
258 * switched before the ring is reenabled.
260 static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
261 bool state)
263 struct iio_dev *indio_dev = trig->private_data;
264 int ret = 0;
265 u8 t;
267 __lis3l02dq_write_data_ready_config(&indio_dev->dev, state);
268 if (state == false) {
270 * A possible quirk with teh handler is currently worked around
271 * by ensuring outstanding read events are cleared.
273 ret = lis3l02dq_read_all(indio_dev, NULL);
275 lis3l02dq_spi_read_reg_8(indio_dev,
276 LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
277 &t);
278 return ret;
282 * lis3l02dq_trig_try_reen() try renabling irq for data rdy trigger
283 * @trig: the datardy trigger
285 static int lis3l02dq_trig_try_reen(struct iio_trigger *trig)
287 struct iio_dev *indio_dev = trig->private_data;
288 struct lis3l02dq_state *st = iio_priv(indio_dev);
289 int i;
291 /* If gpio still high (or high again) */
292 /* In theory possible we will need to do this several times */
293 for (i = 0; i < 5; i++)
294 if (gpio_get_value(irq_to_gpio(st->us->irq)))
295 lis3l02dq_read_all(indio_dev, NULL);
296 else
297 break;
298 if (i == 5)
299 printk(KERN_INFO
300 "Failed to clear the interrupt for lis3l02dq\n");
302 /* irq reenabled so success! */
303 return 0;
306 static const struct iio_trigger_ops lis3l02dq_trigger_ops = {
307 .owner = THIS_MODULE,
308 .set_trigger_state = &lis3l02dq_data_rdy_trigger_set_state,
309 .try_reenable = &lis3l02dq_trig_try_reen,
312 int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
314 int ret;
315 struct lis3l02dq_state *st = iio_priv(indio_dev);
317 st->trig = iio_allocate_trigger("lis3l02dq-dev%d", indio_dev->id);
318 if (!st->trig) {
319 ret = -ENOMEM;
320 goto error_ret;
323 st->trig->dev.parent = &st->us->dev;
324 st->trig->ops = &lis3l02dq_trigger_ops;
325 st->trig->private_data = indio_dev;
326 ret = iio_trigger_register(st->trig);
327 if (ret)
328 goto error_free_trig;
330 return 0;
332 error_free_trig:
333 iio_free_trigger(st->trig);
334 error_ret:
335 return ret;
338 void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
340 struct lis3l02dq_state *st = iio_priv(indio_dev);
342 iio_trigger_unregister(st->trig);
343 iio_free_trigger(st->trig);
346 void lis3l02dq_unconfigure_ring(struct iio_dev *indio_dev)
348 iio_dealloc_pollfunc(indio_dev->pollfunc);
349 lis3l02dq_free_buf(indio_dev->ring);
352 static int lis3l02dq_ring_postenable(struct iio_dev *indio_dev)
354 /* Disable unwanted channels otherwise the interrupt will not clear */
355 u8 t;
356 int ret;
357 bool oneenabled = false;
359 ret = lis3l02dq_spi_read_reg_8(indio_dev,
360 LIS3L02DQ_REG_CTRL_1_ADDR,
361 &t);
362 if (ret)
363 goto error_ret;
365 if (iio_scan_mask_query(indio_dev->ring, 0)) {
366 t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
367 oneenabled = true;
368 } else
369 t &= ~LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
370 if (iio_scan_mask_query(indio_dev->ring, 1)) {
371 t |= LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
372 oneenabled = true;
373 } else
374 t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
375 if (iio_scan_mask_query(indio_dev->ring, 2)) {
376 t |= LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
377 oneenabled = true;
378 } else
379 t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
381 if (!oneenabled) /* what happens in this case is unknown */
382 return -EINVAL;
383 ret = lis3l02dq_spi_write_reg_8(indio_dev,
384 LIS3L02DQ_REG_CTRL_1_ADDR,
386 if (ret)
387 goto error_ret;
389 return iio_triggered_buffer_postenable(indio_dev);
390 error_ret:
391 return ret;
394 /* Turn all channels on again */
395 static int lis3l02dq_ring_predisable(struct iio_dev *indio_dev)
397 u8 t;
398 int ret;
400 ret = iio_triggered_buffer_predisable(indio_dev);
401 if (ret)
402 goto error_ret;
404 ret = lis3l02dq_spi_read_reg_8(indio_dev,
405 LIS3L02DQ_REG_CTRL_1_ADDR,
406 &t);
407 if (ret)
408 goto error_ret;
409 t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE |
410 LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE |
411 LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
413 ret = lis3l02dq_spi_write_reg_8(indio_dev,
414 LIS3L02DQ_REG_CTRL_1_ADDR,
417 error_ret:
418 return ret;
421 static const struct iio_ring_setup_ops lis3l02dq_ring_setup_ops = {
422 .preenable = &iio_sw_ring_preenable,
423 .postenable = &lis3l02dq_ring_postenable,
424 .predisable = &lis3l02dq_ring_predisable,
427 int lis3l02dq_configure_ring(struct iio_dev *indio_dev)
429 int ret;
430 struct iio_ring_buffer *ring;
432 ring = lis3l02dq_alloc_buf(indio_dev);
433 if (!ring)
434 return -ENOMEM;
436 indio_dev->ring = ring;
437 /* Effectively select the ring buffer implementation */
438 indio_dev->ring->access = &lis3l02dq_access_funcs;
439 ring->bpe = 2;
441 ring->scan_timestamp = true;
442 ring->setup_ops = &lis3l02dq_ring_setup_ops;
443 ring->owner = THIS_MODULE;
445 /* Functions are NULL as we set handler below */
446 indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
447 &lis3l02dq_trigger_handler,
449 indio_dev,
450 "lis3l02dq_consumer%d",
451 indio_dev->id);
453 if (indio_dev->pollfunc == NULL) {
454 ret = -ENOMEM;
455 goto error_iio_sw_rb_free;
458 indio_dev->modes |= INDIO_RING_TRIGGERED;
459 return 0;
461 error_iio_sw_rb_free:
462 lis3l02dq_free_buf(indio_dev->ring);
463 return ret;