Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
lis3l02dq_ring.c
Go to the documentation of this file.
1 #include <linux/interrupt.h>
2 #include <linux/gpio.h>
3 #include <linux/mutex.h>
4 #include <linux/kernel.h>
5 #include <linux/spi/spi.h>
6 #include <linux/slab.h>
7 #include <linux/export.h>
8 
9 #include <linux/iio/iio.h>
10 #include "../ring_sw.h"
11 #include <linux/iio/kfifo_buf.h>
12 #include <linux/iio/trigger.h>
14 #include "lis3l02dq.h"
15 
19 static inline u16 combine_8_to_16(u8 lower, u8 upper)
20 {
21  u16 _lower = lower;
22  u16 _upper = upper;
23  return _lower | (_upper << 8);
24 }
25 
30 {
31  struct iio_dev *indio_dev = private;
32  struct lis3l02dq_state *st = iio_priv(indio_dev);
33 
34  if (st->trigger_on) {
35  iio_trigger_poll(st->trig, iio_get_time_ns());
36  return IRQ_HANDLED;
37  } else
38  return IRQ_WAKE_THREAD;
39 }
40 
41 static const u8 read_all_tx_array[] = {
48 };
49 
56 static int lis3l02dq_read_all(struct iio_dev *indio_dev, u8 *rx_array)
57 {
58  struct lis3l02dq_state *st = iio_priv(indio_dev);
59  struct spi_transfer *xfers;
60  struct spi_message msg;
61  int ret, i, j = 0;
62 
63  xfers = kcalloc(bitmap_weight(indio_dev->active_scan_mask,
64  indio_dev->masklength) * 2,
65  sizeof(*xfers), GFP_KERNEL);
66  if (!xfers)
67  return -ENOMEM;
68 
69  mutex_lock(&st->buf_lock);
70 
71  for (i = 0; i < ARRAY_SIZE(read_all_tx_array)/4; i++)
72  if (test_bit(i, indio_dev->active_scan_mask)) {
73  /* lower byte */
74  xfers[j].tx_buf = st->tx + 2*j;
75  st->tx[2*j] = read_all_tx_array[i*4];
76  st->tx[2*j + 1] = 0;
77  if (rx_array)
78  xfers[j].rx_buf = rx_array + j*2;
79  xfers[j].bits_per_word = 8;
80  xfers[j].len = 2;
81  xfers[j].cs_change = 1;
82  j++;
83 
84  /* upper byte */
85  xfers[j].tx_buf = st->tx + 2*j;
86  st->tx[2*j] = read_all_tx_array[i*4 + 2];
87  st->tx[2*j + 1] = 0;
88  if (rx_array)
89  xfers[j].rx_buf = rx_array + j*2;
90  xfers[j].bits_per_word = 8;
91  xfers[j].len = 2;
92  xfers[j].cs_change = 1;
93  j++;
94  }
95 
96  /* After these are transmitted, the rx_buff should have
97  * values in alternate bytes
98  */
99  spi_message_init(&msg);
100  for (j = 0; j < bitmap_weight(indio_dev->active_scan_mask,
101  indio_dev->masklength) * 2; j++)
102  spi_message_add_tail(&xfers[j], &msg);
103 
104  ret = spi_sync(st->us, &msg);
105  mutex_unlock(&st->buf_lock);
106  kfree(xfers);
107 
108  return ret;
109 }
110 
111 static int lis3l02dq_get_buffer_element(struct iio_dev *indio_dev,
112  u8 *buf)
113 {
114  int ret, i;
115  u8 *rx_array ;
116  s16 *data = (s16 *)buf;
117  int scan_count = bitmap_weight(indio_dev->active_scan_mask,
118  indio_dev->masklength);
119 
120  rx_array = kzalloc(4 * scan_count, GFP_KERNEL);
121  if (rx_array == NULL)
122  return -ENOMEM;
123  ret = lis3l02dq_read_all(indio_dev, rx_array);
124  if (ret < 0) {
125  kfree(rx_array);
126  return ret;
127  }
128  for (i = 0; i < scan_count; i++)
129  data[i] = combine_8_to_16(rx_array[i*4+1],
130  rx_array[i*4+3]);
131  kfree(rx_array);
132 
133  return i*sizeof(data[0]);
134 }
135 
136 static irqreturn_t lis3l02dq_trigger_handler(int irq, void *p)
137 {
138  struct iio_poll_func *pf = p;
139  struct iio_dev *indio_dev = pf->indio_dev;
140  int len = 0;
141  char *data;
142 
143  data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
144  if (data == NULL) {
145  dev_err(indio_dev->dev.parent,
146  "memory alloc failed in buffer bh");
147  goto done;
148  }
149 
150  if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
151  len = lis3l02dq_get_buffer_element(indio_dev, data);
152 
153  /* Guaranteed to be aligned with 8 byte boundary */
154  if (indio_dev->scan_timestamp)
155  *(s64 *)((u8 *)data + ALIGN(len, sizeof(s64)))
156  = pf->timestamp;
157  iio_push_to_buffer(indio_dev->buffer, (u8 *)data);
158 
159  kfree(data);
160 done:
161  iio_trigger_notify_done(indio_dev->trig);
162  return IRQ_HANDLED;
163 }
164 
165 /* Caller responsible for locking as necessary. */
166 static int
167 __lis3l02dq_write_data_ready_config(struct iio_dev *indio_dev, bool state)
168 {
169  int ret;
170  u8 valold;
171  bool currentlyset;
172  struct lis3l02dq_state *st = iio_priv(indio_dev);
173 
174  /* Get the current event mask register */
175  ret = lis3l02dq_spi_read_reg_8(indio_dev,
177  &valold);
178  if (ret)
179  goto error_ret;
180  /* Find out if data ready is already on */
181  currentlyset
183 
184  /* Disable requested */
185  if (!state && currentlyset) {
186  /* Disable the data ready signal */
187  valold &= ~LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
188 
189  /* The double write is to overcome a hardware bug? */
190  ret = lis3l02dq_spi_write_reg_8(indio_dev,
192  valold);
193  if (ret)
194  goto error_ret;
195  ret = lis3l02dq_spi_write_reg_8(indio_dev,
197  valold);
198  if (ret)
199  goto error_ret;
200  st->trigger_on = false;
201  /* Enable requested */
202  } else if (state && !currentlyset) {
203  /* If not set, enable requested
204  * first disable all events */
205  ret = lis3l02dq_disable_all_events(indio_dev);
206  if (ret < 0)
207  goto error_ret;
208 
209  valold = ret |
211 
212  st->trigger_on = true;
213  ret = lis3l02dq_spi_write_reg_8(indio_dev,
215  valold);
216  if (ret)
217  goto error_ret;
218  }
219 
220  return 0;
221 error_ret:
222  return ret;
223 }
224 
232 static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
233  bool state)
234 {
235  struct iio_dev *indio_dev = trig->private_data;
236  int ret = 0;
237  u8 t;
238 
239  __lis3l02dq_write_data_ready_config(indio_dev, state);
240  if (state == false) {
241  /*
242  * A possible quirk with the handler is currently worked around
243  * by ensuring outstanding read events are cleared.
244  */
245  ret = lis3l02dq_read_all(indio_dev, NULL);
246  }
247  lis3l02dq_spi_read_reg_8(indio_dev,
249  &t);
250  return ret;
251 }
252 
257 static int lis3l02dq_trig_try_reen(struct iio_trigger *trig)
258 {
259  struct iio_dev *indio_dev = trig->private_data;
260  struct lis3l02dq_state *st = iio_priv(indio_dev);
261  int i;
262 
263  /* If gpio still high (or high again)
264  * In theory possible we will need to do this several times */
265  for (i = 0; i < 5; i++)
266  if (gpio_get_value(irq_to_gpio(st->us->irq)))
267  lis3l02dq_read_all(indio_dev, NULL);
268  else
269  break;
270  if (i == 5)
272  "Failed to clear the interrupt for lis3l02dq\n");
273 
274  /* irq reenabled so success! */
275  return 0;
276 }
277 
278 static const struct iio_trigger_ops lis3l02dq_trigger_ops = {
279  .owner = THIS_MODULE,
280  .set_trigger_state = &lis3l02dq_data_rdy_trigger_set_state,
281  .try_reenable = &lis3l02dq_trig_try_reen,
282 };
283 
284 int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
285 {
286  int ret;
287  struct lis3l02dq_state *st = iio_priv(indio_dev);
288 
289  st->trig = iio_trigger_alloc("lis3l02dq-dev%d", indio_dev->id);
290  if (!st->trig) {
291  ret = -ENOMEM;
292  goto error_ret;
293  }
294 
295  st->trig->dev.parent = &st->us->dev;
296  st->trig->ops = &lis3l02dq_trigger_ops;
297  st->trig->private_data = indio_dev;
298  ret = iio_trigger_register(st->trig);
299  if (ret)
300  goto error_free_trig;
301 
302  return 0;
303 
304 error_free_trig:
305  iio_trigger_free(st->trig);
306 error_ret:
307  return ret;
308 }
309 
310 void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
311 {
312  struct lis3l02dq_state *st = iio_priv(indio_dev);
313 
315  iio_trigger_free(st->trig);
316 }
317 
318 void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev)
319 {
320  iio_dealloc_pollfunc(indio_dev->pollfunc);
321  lis3l02dq_free_buf(indio_dev->buffer);
322 }
323 
324 static int lis3l02dq_buffer_postenable(struct iio_dev *indio_dev)
325 {
326  /* Disable unwanted channels otherwise the interrupt will not clear */
327  u8 t;
328  int ret;
329  bool oneenabled = false;
330 
331  ret = lis3l02dq_spi_read_reg_8(indio_dev,
333  &t);
334  if (ret)
335  goto error_ret;
336 
337  if (test_bit(0, indio_dev->active_scan_mask)) {
339  oneenabled = true;
340  } else
342  if (test_bit(1, indio_dev->active_scan_mask)) {
344  oneenabled = true;
345  } else
347  if (test_bit(2, indio_dev->active_scan_mask)) {
349  oneenabled = true;
350  } else
352 
353  if (!oneenabled) /* what happens in this case is unknown */
354  return -EINVAL;
355  ret = lis3l02dq_spi_write_reg_8(indio_dev,
357  t);
358  if (ret)
359  goto error_ret;
360 
361  return iio_triggered_buffer_postenable(indio_dev);
362 error_ret:
363  return ret;
364 }
365 
366 /* Turn all channels on again */
367 static int lis3l02dq_buffer_predisable(struct iio_dev *indio_dev)
368 {
369  u8 t;
370  int ret;
371 
372  ret = iio_triggered_buffer_predisable(indio_dev);
373  if (ret)
374  goto error_ret;
375 
376  ret = lis3l02dq_spi_read_reg_8(indio_dev,
378  &t);
379  if (ret)
380  goto error_ret;
384 
385  ret = lis3l02dq_spi_write_reg_8(indio_dev,
387  t);
388 
389 error_ret:
390  return ret;
391 }
392 
393 static const struct iio_buffer_setup_ops lis3l02dq_buffer_setup_ops = {
394  .preenable = &iio_sw_buffer_preenable,
395  .postenable = &lis3l02dq_buffer_postenable,
396  .predisable = &lis3l02dq_buffer_predisable,
397 };
398 
399 int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
400 {
401  int ret;
402  struct iio_buffer *buffer;
403 
404  buffer = lis3l02dq_alloc_buf(indio_dev);
405  if (!buffer)
406  return -ENOMEM;
407 
408  indio_dev->buffer = buffer;
409 
410  buffer->scan_timestamp = true;
411  indio_dev->setup_ops = &lis3l02dq_buffer_setup_ops;
412 
413  /* Functions are NULL as we set handler below */
415  &lis3l02dq_trigger_handler,
416  0,
417  indio_dev,
418  "lis3l02dq_consumer%d",
419  indio_dev->id);
420 
421  if (indio_dev->pollfunc == NULL) {
422  ret = -ENOMEM;
423  goto error_iio_sw_rb_free;
424  }
425 
426  indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
427  return 0;
428 
429 error_iio_sw_rb_free:
430  lis3l02dq_free_buf(indio_dev->buffer);
431  return ret;
432 }