Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
inkern.c
Go to the documentation of this file.
1 /* The industrial I/O core in kernel channel mapping
2  *
3  * Copyright (c) 2011 Jonathan Cameron
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  */
9 #include <linux/err.h>
10 #include <linux/export.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
13 
14 #include <linux/iio/iio.h>
15 #include "iio_core.h"
16 #include <linux/iio/machine.h>
17 #include <linux/iio/driver.h>
18 #include <linux/iio/consumer.h>
19 
21  struct iio_dev *indio_dev;
22  struct iio_map *map;
23  struct list_head l;
24 };
25 
26 static LIST_HEAD(iio_map_list);
27 static DEFINE_MUTEX(iio_map_list_lock);
28 
29 int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
30 {
31  int i = 0, ret = 0;
32  struct iio_map_internal *mapi;
33 
34  if (maps == NULL)
35  return 0;
36 
37  mutex_lock(&iio_map_list_lock);
38  while (maps[i].consumer_dev_name != NULL) {
39  mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
40  if (mapi == NULL) {
41  ret = -ENOMEM;
42  goto error_ret;
43  }
44  mapi->map = &maps[i];
45  mapi->indio_dev = indio_dev;
46  list_add(&mapi->l, &iio_map_list);
47  i++;
48  }
49 error_ret:
50  mutex_unlock(&iio_map_list_lock);
51 
52  return ret;
53 }
55 
56 
57 /* Assumes the exact same array (e.g. memory locations)
58  * used at unregistration as used at registration rather than
59  * more complex checking of contents.
60  */
62  struct iio_map *maps)
63 {
64  int i = 0, ret = 0;
65  bool found_it;
66  struct iio_map_internal *mapi;
67 
68  if (maps == NULL)
69  return 0;
70 
71  mutex_lock(&iio_map_list_lock);
72  while (maps[i].consumer_dev_name != NULL) {
73  found_it = false;
74  list_for_each_entry(mapi, &iio_map_list, l)
75  if (&maps[i] == mapi->map) {
76  list_del(&mapi->l);
77  kfree(mapi);
78  found_it = true;
79  break;
80  }
81  if (found_it == false) {
82  ret = -ENODEV;
83  goto error_ret;
84  }
85  i++;
86  }
87 error_ret:
88  mutex_unlock(&iio_map_list_lock);
89 
90  return ret;
91 }
93 
94 static const struct iio_chan_spec
95 *iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
96 {
97  int i;
98  const struct iio_chan_spec *chan = NULL;
99 
100  for (i = 0; i < indio_dev->num_channels; i++)
101  if (indio_dev->channels[i].datasheet_name &&
102  strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
103  chan = &indio_dev->channels[i];
104  break;
105  }
106  return chan;
107 }
108 
109 
110 struct iio_channel *iio_channel_get(const char *name, const char *channel_name)
111 {
112  struct iio_map_internal *c_i = NULL, *c = NULL;
113  struct iio_channel *channel;
114  int err;
115 
116  if (name == NULL && channel_name == NULL)
117  return ERR_PTR(-ENODEV);
118 
119  /* first find matching entry the channel map */
120  mutex_lock(&iio_map_list_lock);
121  list_for_each_entry(c_i, &iio_map_list, l) {
122  if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
123  (channel_name &&
124  strcmp(channel_name, c_i->map->consumer_channel) != 0))
125  continue;
126  c = c_i;
127  iio_device_get(c->indio_dev);
128  break;
129  }
130  mutex_unlock(&iio_map_list_lock);
131  if (c == NULL)
132  return ERR_PTR(-ENODEV);
133 
134  channel = kzalloc(sizeof(*channel), GFP_KERNEL);
135  if (channel == NULL) {
136  err = -ENOMEM;
137  goto error_no_mem;
138  }
139 
140  channel->indio_dev = c->indio_dev;
141 
142  if (c->map->adc_channel_label) {
143  channel->channel =
144  iio_chan_spec_from_name(channel->indio_dev,
145  c->map->adc_channel_label);
146 
147  if (channel->channel == NULL) {
148  err = -EINVAL;
149  goto error_no_chan;
150  }
151  }
152 
153  return channel;
154 
155 error_no_chan:
156  kfree(channel);
157 error_no_mem:
158  iio_device_put(c->indio_dev);
159  return ERR_PTR(err);
160 }
162 
164 {
165  iio_device_put(channel->indio_dev);
166  kfree(channel);
167 }
169 
170 struct iio_channel *iio_channel_get_all(const char *name)
171 {
172  struct iio_channel *chans;
173  struct iio_map_internal *c = NULL;
174  int nummaps = 0;
175  int mapind = 0;
176  int i, ret;
177 
178  if (name == NULL)
179  return ERR_PTR(-EINVAL);
180 
181  mutex_lock(&iio_map_list_lock);
182  /* first count the matching maps */
183  list_for_each_entry(c, &iio_map_list, l)
184  if (name && strcmp(name, c->map->consumer_dev_name) != 0)
185  continue;
186  else
187  nummaps++;
188 
189  if (nummaps == 0) {
190  ret = -ENODEV;
191  goto error_ret;
192  }
193 
194  /* NULL terminated array to save passing size */
195  chans = kzalloc(sizeof(*chans)*(nummaps + 1), GFP_KERNEL);
196  if (chans == NULL) {
197  ret = -ENOMEM;
198  goto error_ret;
199  }
200 
201  /* for each map fill in the chans element */
202  list_for_each_entry(c, &iio_map_list, l) {
203  if (name && strcmp(name, c->map->consumer_dev_name) != 0)
204  continue;
205  chans[mapind].indio_dev = c->indio_dev;
206  chans[mapind].channel =
207  iio_chan_spec_from_name(chans[mapind].indio_dev,
208  c->map->adc_channel_label);
209  if (chans[mapind].channel == NULL) {
210  ret = -EINVAL;
211  goto error_free_chans;
212  }
213  iio_device_get(chans[mapind].indio_dev);
214  mapind++;
215  }
216  if (mapind == 0) {
217  ret = -ENODEV;
218  goto error_free_chans;
219  }
220  mutex_unlock(&iio_map_list_lock);
221 
222  return chans;
223 
224 error_free_chans:
225  for (i = 0; i < nummaps; i++)
226  iio_device_put(chans[i].indio_dev);
227  kfree(chans);
228 error_ret:
229  mutex_unlock(&iio_map_list_lock);
230 
231  return ERR_PTR(ret);
232 }
234 
236 {
237  struct iio_channel *chan = &channels[0];
238 
239  while (chan->indio_dev) {
240  iio_device_put(chan->indio_dev);
241  chan++;
242  }
243  kfree(channels);
244 }
246 
247 static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
249 {
250  int unused;
251 
252  if (val2 == NULL)
253  val2 = &unused;
254 
255  return chan->indio_dev->info->read_raw(chan->indio_dev, chan->channel,
256  val, val2, info);
257 }
258 
259 int iio_read_channel_raw(struct iio_channel *chan, int *val)
260 {
261  int ret;
262 
263  mutex_lock(&chan->indio_dev->info_exist_lock);
264  if (chan->indio_dev->info == NULL) {
265  ret = -ENODEV;
266  goto err_unlock;
267  }
268 
269  ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
270 err_unlock:
271  mutex_unlock(&chan->indio_dev->info_exist_lock);
272 
273  return ret;
274 }
276 
277 static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
278  int raw, int *processed, unsigned int scale)
279 {
280  int scale_type, scale_val, scale_val2, offset;
281  s64 raw64 = raw;
282  int ret;
283 
284  ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_SCALE);
285  if (ret == 0)
286  raw64 += offset;
287 
288  scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
290  if (scale_type < 0)
291  return scale_type;
292 
293  switch (scale_type) {
294  case IIO_VAL_INT:
295  *processed = raw64 * scale_val;
296  break;
298  if (scale_val2 < 0)
299  *processed = -raw64 * scale_val;
300  else
301  *processed = raw64 * scale_val;
302  *processed += div_s64(raw64 * (s64)scale_val2 * scale,
303  1000000LL);
304  break;
306  if (scale_val2 < 0)
307  *processed = -raw64 * scale_val;
308  else
309  *processed = raw64 * scale_val;
310  *processed += div_s64(raw64 * (s64)scale_val2 * scale,
311  1000000000LL);
312  break;
313  case IIO_VAL_FRACTIONAL:
314  *processed = div_s64(raw64 * (s64)scale_val * scale,
315  scale_val2);
316  break;
317  default:
318  return -EINVAL;
319  }
320 
321  return 0;
322 }
323 
324 int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
325  int *processed, unsigned int scale)
326 {
327  int ret;
328 
329  mutex_lock(&chan->indio_dev->info_exist_lock);
330  if (chan->indio_dev->info == NULL) {
331  ret = -ENODEV;
332  goto err_unlock;
333  }
334 
335  ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
336  scale);
337 err_unlock:
338  mutex_unlock(&chan->indio_dev->info_exist_lock);
339 
340  return ret;
341 }
343 
344 int iio_read_channel_processed(struct iio_channel *chan, int *val)
345 {
346  int ret;
347 
348  mutex_lock(&chan->indio_dev->info_exist_lock);
349  if (chan->indio_dev->info == NULL) {
350  ret = -ENODEV;
351  goto err_unlock;
352  }
353 
354  if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
355  ret = iio_channel_read(chan, val, NULL,
357  } else {
358  ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
359  if (ret < 0)
360  goto err_unlock;
361  ret = iio_convert_raw_to_processed_unlocked(chan, *val, val, 1);
362  }
363 
364 err_unlock:
365  mutex_unlock(&chan->indio_dev->info_exist_lock);
366 
367  return ret;
368 }
370 
371 int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
372 {
373  int ret;
374 
375  mutex_lock(&chan->indio_dev->info_exist_lock);
376  if (chan->indio_dev->info == NULL) {
377  ret = -ENODEV;
378  goto err_unlock;
379  }
380 
381  ret = iio_channel_read(chan, val, val2, IIO_CHAN_INFO_SCALE);
382 err_unlock:
383  mutex_unlock(&chan->indio_dev->info_exist_lock);
384 
385  return ret;
386 }
388 
390 {
391  int ret = 0;
392  /* Need to verify underlying driver has not gone away */
393 
394  mutex_lock(&chan->indio_dev->info_exist_lock);
395  if (chan->indio_dev->info == NULL) {
396  ret = -ENODEV;
397  goto err_unlock;
398  }
399 
400  *type = chan->channel->type;
401 err_unlock:
402  mutex_unlock(&chan->indio_dev->info_exist_lock);
403 
404  return ret;
405 }