Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
iio_simple_dummy_buffer.c
Go to the documentation of this file.
1 
14 #include <linux/kernel.h>
15 #include <linux/export.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/irq.h>
19 #include <linux/bitmap.h>
20 
21 #include <linux/iio/iio.h>
23 #include <linux/iio/kfifo_buf.h>
24 
25 #include "iio_simple_dummy.h"
26 
27 /* Some fake data */
28 
29 static const s16 fakedata[] = {
30  [voltage0] = 7,
31  [diffvoltage1m2] = -33,
32  [diffvoltage3m4] = -2,
33  [accelx] = 344,
34 };
45 static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
46 {
47  struct iio_poll_func *pf = p;
48  struct iio_dev *indio_dev = pf->indio_dev;
49  struct iio_buffer *buffer = indio_dev->buffer;
50  int len = 0;
51  u16 *data;
52 
53  data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
54  if (data == NULL)
55  goto done;
56 
57  if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) {
58  /*
59  * Three common options here:
60  * hardware scans: certain combinations of channels make
61  * up a fast read. The capture will consist of all of them.
62  * Hence we just call the grab data function and fill the
63  * buffer without processing.
64  * software scans: can be considered to be random access
65  * so efficient reading is just a case of minimal bus
66  * transactions.
67  * software culled hardware scans:
68  * occasionally a driver may process the nearest hardware
69  * scan to avoid storing elements that are not desired. This
70  * is the fiddliest option by far.
71  * Here let's pretend we have random access. And the values are
72  * in the constant table fakedata.
73  */
74  int i, j;
75  for (i = 0, j = 0;
76  i < bitmap_weight(indio_dev->active_scan_mask,
77  indio_dev->masklength);
78  i++, j++) {
79  j = find_next_bit(buffer->scan_mask,
80  indio_dev->masklength, j);
81  /* random access read from the 'device' */
82  data[i] = fakedata[j];
83  len += 2;
84  }
85  }
86  /* Store the timestamp at an 8 byte aligned offset */
87  if (indio_dev->scan_timestamp)
88  *(s64 *)((u8 *)data + ALIGN(len, sizeof(s64)))
89  = iio_get_time_ns();
90  iio_push_to_buffer(buffer, (u8 *)data);
91 
92  kfree(data);
93 
94 done:
95  /*
96  * Tell the core we are done with this trigger and ready for the
97  * next one.
98  */
99  iio_trigger_notify_done(indio_dev->trig);
100 
101  return IRQ_HANDLED;
102 }
103 
104 static const struct iio_buffer_setup_ops iio_simple_dummy_buffer_setup_ops = {
105  /*
106  * iio_sw_buffer_preenable:
107  * Generic function for equal sized ring elements + 64 bit timestamp
108  * Assumes that any combination of channels can be enabled.
109  * Typically replaced to implement restrictions on what combinations
110  * can be captured (hardware scan modes).
111  */
112  .preenable = &iio_sw_buffer_preenable,
113  /*
114  * iio_triggered_buffer_postenable:
115  * Generic function that simply attaches the pollfunc to the trigger.
116  * Replace this to mess with hardware state before we attach the
117  * trigger.
118  */
119  .postenable = &iio_triggered_buffer_postenable,
120  /*
121  * iio_triggered_buffer_predisable:
122  * Generic function that simple detaches the pollfunc from the trigger.
123  * Replace this to put hardware state back again after the trigger is
124  * detached but before userspace knows we have disabled the ring.
125  */
126  .predisable = &iio_triggered_buffer_predisable,
127 };
128 
130  const struct iio_chan_spec *channels, unsigned int num_channels)
131 {
132  int ret;
133  struct iio_buffer *buffer;
134 
135  /* Allocate a buffer to use - here a kfifo */
136  buffer = iio_kfifo_allocate(indio_dev);
137  if (buffer == NULL) {
138  ret = -ENOMEM;
139  goto error_ret;
140  }
141 
142  indio_dev->buffer = buffer;
143 
144  /* Enable timestamps by default */
145  buffer->scan_timestamp = true;
146 
147  /*
148  * Tell the core what device type specific functions should
149  * be run on either side of buffer capture enable / disable.
150  */
151  indio_dev->setup_ops = &iio_simple_dummy_buffer_setup_ops;
152 
153  /*
154  * Configure a polling function.
155  * When a trigger event with this polling function connected
156  * occurs, this function is run. Typically this grabs data
157  * from the device.
158  *
159  * NULL for the top half. This is normally implemented only if we
160  * either want to ping a capture now pin (no sleeping) or grab
161  * a timestamp as close as possible to a data ready trigger firing.
162  *
163  * IRQF_ONESHOT ensures irqs are masked such that only one instance
164  * of the handler can run at a time.
165  *
166  * "iio_simple_dummy_consumer%d" formatting string for the irq 'name'
167  * as seen under /proc/interrupts. Remaining parameters as per printk.
168  */
169  indio_dev->pollfunc = iio_alloc_pollfunc(NULL,
170  &iio_simple_dummy_trigger_h,
171  IRQF_ONESHOT,
172  indio_dev,
173  "iio_simple_dummy_consumer%d",
174  indio_dev->id);
175 
176  if (indio_dev->pollfunc == NULL) {
177  ret = -ENOMEM;
178  goto error_free_buffer;
179  }
180 
181  /*
182  * Notify the core that this device is capable of buffered capture
183  * driven by a trigger.
184  */
185  indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
186 
187  ret = iio_buffer_register(indio_dev, channels, num_channels);
188  if (ret)
189  goto error_dealloc_pollfunc;
190 
191  return 0;
192 
193 error_dealloc_pollfunc:
194  iio_dealloc_pollfunc(indio_dev->pollfunc);
195 error_free_buffer:
196  iio_kfifo_free(indio_dev->buffer);
197 error_ret:
198  return ret;
199 
200 }
201 
207 {
208  iio_buffer_unregister(indio_dev);
209  iio_dealloc_pollfunc(indio_dev->pollfunc);
210  iio_kfifo_free(indio_dev->buffer);
211 }