1 #include <linux/interrupt.h>
2 #include <linux/gpio.h>
3 #include <linux/mutex.h>
4 #include <linux/kernel.h>
5 #include <linux/spi/spi.h>
6 #include <linux/slab.h>
9 #include "../ring_sw.h"
10 #include "../kfifo_buf.h"
11 #include "../trigger.h"
12 #include "../trigger_consumer.h"
13 #include "lis3l02dq.h"
16 * combine_8_to_16() utility function to munge to u8s into u16
18 static inline u16
combine_8_to_16(u8 lower
, u8 upper
)
22 return _lower
| (_upper
<< 8);
26 * lis3l02dq_data_rdy_trig_poll() the event handler for the data rdy trig
28 irqreturn_t
lis3l02dq_data_rdy_trig_poll(int irq
, void *private)
30 struct iio_dev
*indio_dev
= private;
31 struct lis3l02dq_state
*st
= iio_priv(indio_dev
);
34 iio_trigger_poll(st
->trig
, iio_get_time_ns());
37 return IRQ_WAKE_THREAD
;
41 * lis3l02dq_read_accel_from_ring() individual acceleration read from ring
43 ssize_t
lis3l02dq_read_accel_from_ring(struct iio_ring_buffer
*ring
,
50 if (!iio_scan_mask_query(ring
, index
))
53 if (!ring
->access
->read_last
)
56 data
= kmalloc(ring
->access
->get_bytes_per_datum(ring
),
61 ret
= ring
->access
->read_last(ring
, (u8
*)data
);
64 *val
= data
[bitmap_weight(ring
->scan_mask
, index
)];
72 static const u8 read_all_tx_array
[] = {
73 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR
), 0,
74 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR
), 0,
75 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_L_ADDR
), 0,
76 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_H_ADDR
), 0,
77 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_L_ADDR
), 0,
78 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_H_ADDR
), 0,
82 * lis3l02dq_read_all() Reads all channels currently selected
83 * @st: device specific state
84 * @rx_array: (dma capable) receive array, must be at least
85 * 4*number of channels
87 static int lis3l02dq_read_all(struct iio_dev
*indio_dev
, u8
*rx_array
)
89 struct iio_ring_buffer
*ring
= indio_dev
->ring
;
90 struct lis3l02dq_state
*st
= iio_priv(indio_dev
);
91 struct spi_transfer
*xfers
;
92 struct spi_message msg
;
95 xfers
= kzalloc((ring
->scan_count
) * 2
96 * sizeof(*xfers
), GFP_KERNEL
);
100 mutex_lock(&st
->buf_lock
);
102 for (i
= 0; i
< ARRAY_SIZE(read_all_tx_array
)/4; i
++)
103 if (test_bit(i
, ring
->scan_mask
)) {
105 xfers
[j
].tx_buf
= st
->tx
+ 2*j
;
106 st
->tx
[2*j
] = read_all_tx_array
[i
*4];
109 xfers
[j
].rx_buf
= rx_array
+ j
*2;
110 xfers
[j
].bits_per_word
= 8;
112 xfers
[j
].cs_change
= 1;
116 xfers
[j
].tx_buf
= st
->tx
+ 2*j
;
117 st
->tx
[2*j
] = read_all_tx_array
[i
*4 + 2];
120 xfers
[j
].rx_buf
= rx_array
+ j
*2;
121 xfers
[j
].bits_per_word
= 8;
123 xfers
[j
].cs_change
= 1;
127 /* After these are transmitted, the rx_buff should have
128 * values in alternate bytes
130 spi_message_init(&msg
);
131 for (j
= 0; j
< ring
->scan_count
* 2; j
++)
132 spi_message_add_tail(&xfers
[j
], &msg
);
134 ret
= spi_sync(st
->us
, &msg
);
135 mutex_unlock(&st
->buf_lock
);
141 static int lis3l02dq_get_ring_element(struct iio_dev
*indio_dev
,
146 s16
*data
= (s16
*)buf
;
148 rx_array
= kzalloc(4 * (indio_dev
->ring
->scan_count
), GFP_KERNEL
);
149 if (rx_array
== NULL
)
151 ret
= lis3l02dq_read_all(indio_dev
, rx_array
);
154 for (i
= 0; i
< indio_dev
->ring
->scan_count
; i
++)
155 data
[i
] = combine_8_to_16(rx_array
[i
*4+1],
159 return i
*sizeof(data
[0]);
162 static irqreturn_t
lis3l02dq_trigger_handler(int irq
, void *p
)
164 struct iio_poll_func
*pf
= p
;
165 struct iio_dev
*indio_dev
= pf
->indio_dev
;
166 struct iio_ring_buffer
*ring
= indio_dev
->ring
;
168 size_t datasize
= ring
->access
->get_bytes_per_datum(ring
);
169 char *data
= kmalloc(datasize
, GFP_KERNEL
);
172 dev_err(indio_dev
->dev
.parent
,
173 "memory alloc failed in ring bh");
177 if (ring
->scan_count
)
178 len
= lis3l02dq_get_ring_element(indio_dev
, data
);
180 /* Guaranteed to be aligned with 8 byte boundary */
181 if (ring
->scan_timestamp
)
182 *(s64
*)(((phys_addr_t
)data
+ len
183 + sizeof(s64
) - 1) & ~(sizeof(s64
) - 1))
185 ring
->access
->store_to(ring
, (u8
*)data
, pf
->timestamp
);
187 iio_trigger_notify_done(indio_dev
->trig
);
192 /* Caller responsible for locking as necessary. */
194 __lis3l02dq_write_data_ready_config(struct device
*dev
, bool state
)
199 struct iio_dev
*indio_dev
= dev_get_drvdata(dev
);
200 struct lis3l02dq_state
*st
= iio_priv(indio_dev
);
202 /* Get the current event mask register */
203 ret
= lis3l02dq_spi_read_reg_8(indio_dev
,
204 LIS3L02DQ_REG_CTRL_2_ADDR
,
208 /* Find out if data ready is already on */
210 = valold
& LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION
;
212 /* Disable requested */
213 if (!state
&& currentlyset
) {
214 /* disable the data ready signal */
215 valold
&= ~LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION
;
217 /* The double write is to overcome a hardware bug?*/
218 ret
= lis3l02dq_spi_write_reg_8(indio_dev
,
219 LIS3L02DQ_REG_CTRL_2_ADDR
,
223 ret
= lis3l02dq_spi_write_reg_8(indio_dev
,
224 LIS3L02DQ_REG_CTRL_2_ADDR
,
228 st
->trigger_on
= false;
229 /* Enable requested */
230 } else if (state
&& !currentlyset
) {
231 /* if not set, enable requested */
232 /* first disable all events */
233 ret
= lis3l02dq_disable_all_events(indio_dev
);
238 LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION
;
240 st
->trigger_on
= true;
241 ret
= lis3l02dq_spi_write_reg_8(indio_dev
,
242 LIS3L02DQ_REG_CTRL_2_ADDR
,
254 * lis3l02dq_data_rdy_trigger_set_state() set datardy interrupt state
256 * If disabling the interrupt also does a final read to ensure it is clear.
257 * This is only important in some cases where the scan enable elements are
258 * switched before the ring is reenabled.
260 static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger
*trig
,
263 struct iio_dev
*indio_dev
= trig
->private_data
;
267 __lis3l02dq_write_data_ready_config(&indio_dev
->dev
, state
);
268 if (state
== false) {
270 * A possible quirk with teh handler is currently worked around
271 * by ensuring outstanding read events are cleared.
273 ret
= lis3l02dq_read_all(indio_dev
, NULL
);
275 lis3l02dq_spi_read_reg_8(indio_dev
,
276 LIS3L02DQ_REG_WAKE_UP_SRC_ADDR
,
282 * lis3l02dq_trig_try_reen() try renabling irq for data rdy trigger
283 * @trig: the datardy trigger
285 static int lis3l02dq_trig_try_reen(struct iio_trigger
*trig
)
287 struct iio_dev
*indio_dev
= trig
->private_data
;
288 struct lis3l02dq_state
*st
= iio_priv(indio_dev
);
291 /* If gpio still high (or high again) */
292 /* In theory possible we will need to do this several times */
293 for (i
= 0; i
< 5; i
++)
294 if (gpio_get_value(irq_to_gpio(st
->us
->irq
)))
295 lis3l02dq_read_all(indio_dev
, NULL
);
300 "Failed to clear the interrupt for lis3l02dq\n");
302 /* irq reenabled so success! */
306 static const struct iio_trigger_ops lis3l02dq_trigger_ops
= {
307 .owner
= THIS_MODULE
,
308 .set_trigger_state
= &lis3l02dq_data_rdy_trigger_set_state
,
309 .try_reenable
= &lis3l02dq_trig_try_reen
,
312 int lis3l02dq_probe_trigger(struct iio_dev
*indio_dev
)
315 struct lis3l02dq_state
*st
= iio_priv(indio_dev
);
317 st
->trig
= iio_allocate_trigger("lis3l02dq-dev%d", indio_dev
->id
);
323 st
->trig
->dev
.parent
= &st
->us
->dev
;
324 st
->trig
->ops
= &lis3l02dq_trigger_ops
;
325 st
->trig
->private_data
= indio_dev
;
326 ret
= iio_trigger_register(st
->trig
);
328 goto error_free_trig
;
333 iio_free_trigger(st
->trig
);
338 void lis3l02dq_remove_trigger(struct iio_dev
*indio_dev
)
340 struct lis3l02dq_state
*st
= iio_priv(indio_dev
);
342 iio_trigger_unregister(st
->trig
);
343 iio_free_trigger(st
->trig
);
346 void lis3l02dq_unconfigure_ring(struct iio_dev
*indio_dev
)
348 iio_dealloc_pollfunc(indio_dev
->pollfunc
);
349 lis3l02dq_free_buf(indio_dev
->ring
);
352 static int lis3l02dq_ring_postenable(struct iio_dev
*indio_dev
)
354 /* Disable unwanted channels otherwise the interrupt will not clear */
357 bool oneenabled
= false;
359 ret
= lis3l02dq_spi_read_reg_8(indio_dev
,
360 LIS3L02DQ_REG_CTRL_1_ADDR
,
365 if (iio_scan_mask_query(indio_dev
->ring
, 0)) {
366 t
|= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE
;
369 t
&= ~LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE
;
370 if (iio_scan_mask_query(indio_dev
->ring
, 1)) {
371 t
|= LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE
;
374 t
&= ~LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE
;
375 if (iio_scan_mask_query(indio_dev
->ring
, 2)) {
376 t
|= LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE
;
379 t
&= ~LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE
;
381 if (!oneenabled
) /* what happens in this case is unknown */
383 ret
= lis3l02dq_spi_write_reg_8(indio_dev
,
384 LIS3L02DQ_REG_CTRL_1_ADDR
,
389 return iio_triggered_buffer_postenable(indio_dev
);
394 /* Turn all channels on again */
395 static int lis3l02dq_ring_predisable(struct iio_dev
*indio_dev
)
400 ret
= iio_triggered_buffer_predisable(indio_dev
);
404 ret
= lis3l02dq_spi_read_reg_8(indio_dev
,
405 LIS3L02DQ_REG_CTRL_1_ADDR
,
409 t
|= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE
|
410 LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE
|
411 LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE
;
413 ret
= lis3l02dq_spi_write_reg_8(indio_dev
,
414 LIS3L02DQ_REG_CTRL_1_ADDR
,
421 static const struct iio_ring_setup_ops lis3l02dq_ring_setup_ops
= {
422 .preenable
= &iio_sw_ring_preenable
,
423 .postenable
= &lis3l02dq_ring_postenable
,
424 .predisable
= &lis3l02dq_ring_predisable
,
427 int lis3l02dq_configure_ring(struct iio_dev
*indio_dev
)
430 struct iio_ring_buffer
*ring
;
432 ring
= lis3l02dq_alloc_buf(indio_dev
);
436 indio_dev
->ring
= ring
;
437 /* Effectively select the ring buffer implementation */
438 indio_dev
->ring
->access
= &lis3l02dq_access_funcs
;
441 ring
->scan_timestamp
= true;
442 ring
->setup_ops
= &lis3l02dq_ring_setup_ops
;
443 ring
->owner
= THIS_MODULE
;
445 /* Functions are NULL as we set handler below */
446 indio_dev
->pollfunc
= iio_alloc_pollfunc(&iio_pollfunc_store_time
,
447 &lis3l02dq_trigger_handler
,
450 "lis3l02dq_consumer%d",
453 if (indio_dev
->pollfunc
== NULL
) {
455 goto error_iio_sw_rb_free
;
458 indio_dev
->modes
|= INDIO_RING_TRIGGERED
;
461 error_iio_sw_rb_free
:
462 lis3l02dq_free_buf(indio_dev
->ring
);