Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
regmap.c
Go to the documentation of this file.
1 /*
2  * Register map access API
3  *
4  * Copyright 2011 Wolfson Microelectronics plc
5  *
6  * Author: Mark Brown <[email protected]>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/device.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/mutex.h>
17 #include <linux/err.h>
18 #include <linux/rbtree.h>
19 
20 #define CREATE_TRACE_POINTS
21 #include <trace/events/regmap.h>
22 
23 #include "internal.h"
24 
25 /*
26  * Sometimes for failures during very early init the trace
27  * infrastructure isn't available early enough to be used. For this
28  * sort of problem defining LOG_DEVICE will add printks for basic
29  * register I/O on a specific device.
30  */
31 #undef LOG_DEVICE
32 
33 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
34  unsigned int mask, unsigned int val,
35  bool *change);
36 
37 bool regmap_writeable(struct regmap *map, unsigned int reg)
38 {
39  if (map->max_register && reg > map->max_register)
40  return false;
41 
42  if (map->writeable_reg)
43  return map->writeable_reg(map->dev, reg);
44 
45  return true;
46 }
47 
48 bool regmap_readable(struct regmap *map, unsigned int reg)
49 {
50  if (map->max_register && reg > map->max_register)
51  return false;
52 
53  if (map->format.format_write)
54  return false;
55 
56  if (map->readable_reg)
57  return map->readable_reg(map->dev, reg);
58 
59  return true;
60 }
61 
62 bool regmap_volatile(struct regmap *map, unsigned int reg)
63 {
64  if (!regmap_readable(map, reg))
65  return false;
66 
67  if (map->volatile_reg)
68  return map->volatile_reg(map->dev, reg);
69 
70  return true;
71 }
72 
73 bool regmap_precious(struct regmap *map, unsigned int reg)
74 {
75  if (!regmap_readable(map, reg))
76  return false;
77 
78  if (map->precious_reg)
79  return map->precious_reg(map->dev, reg);
80 
81  return false;
82 }
83 
84 static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
85  unsigned int num)
86 {
87  unsigned int i;
88 
89  for (i = 0; i < num; i++)
90  if (!regmap_volatile(map, reg + i))
91  return false;
92 
93  return true;
94 }
95 
96 static void regmap_format_2_6_write(struct regmap *map,
97  unsigned int reg, unsigned int val)
98 {
99  u8 *out = map->work_buf;
100 
101  *out = (reg << 6) | val;
102 }
103 
104 static void regmap_format_4_12_write(struct regmap *map,
105  unsigned int reg, unsigned int val)
106 {
107  __be16 *out = map->work_buf;
108  *out = cpu_to_be16((reg << 12) | val);
109 }
110 
111 static void regmap_format_7_9_write(struct regmap *map,
112  unsigned int reg, unsigned int val)
113 {
114  __be16 *out = map->work_buf;
115  *out = cpu_to_be16((reg << 9) | val);
116 }
117 
118 static void regmap_format_10_14_write(struct regmap *map,
119  unsigned int reg, unsigned int val)
120 {
121  u8 *out = map->work_buf;
122 
123  out[2] = val;
124  out[1] = (val >> 8) | (reg << 6);
125  out[0] = reg >> 2;
126 }
127 
128 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
129 {
130  u8 *b = buf;
131 
132  b[0] = val << shift;
133 }
134 
135 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
136 {
137  __be16 *b = buf;
138 
139  b[0] = cpu_to_be16(val << shift);
140 }
141 
142 static void regmap_format_16_native(void *buf, unsigned int val,
143  unsigned int shift)
144 {
145  *(u16 *)buf = val << shift;
146 }
147 
148 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
149 {
150  u8 *b = buf;
151 
152  val <<= shift;
153 
154  b[0] = val >> 16;
155  b[1] = val >> 8;
156  b[2] = val;
157 }
158 
159 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
160 {
161  __be32 *b = buf;
162 
163  b[0] = cpu_to_be32(val << shift);
164 }
165 
166 static void regmap_format_32_native(void *buf, unsigned int val,
167  unsigned int shift)
168 {
169  *(u32 *)buf = val << shift;
170 }
171 
172 static unsigned int regmap_parse_8(void *buf)
173 {
174  u8 *b = buf;
175 
176  return b[0];
177 }
178 
179 static unsigned int regmap_parse_16_be(void *buf)
180 {
181  __be16 *b = buf;
182 
183  b[0] = be16_to_cpu(b[0]);
184 
185  return b[0];
186 }
187 
188 static unsigned int regmap_parse_16_native(void *buf)
189 {
190  return *(u16 *)buf;
191 }
192 
193 static unsigned int regmap_parse_24(void *buf)
194 {
195  u8 *b = buf;
196  unsigned int ret = b[2];
197  ret |= ((unsigned int)b[1]) << 8;
198  ret |= ((unsigned int)b[0]) << 16;
199 
200  return ret;
201 }
202 
203 static unsigned int regmap_parse_32_be(void *buf)
204 {
205  __be32 *b = buf;
206 
207  b[0] = be32_to_cpu(b[0]);
208 
209  return b[0];
210 }
211 
212 static unsigned int regmap_parse_32_native(void *buf)
213 {
214  return *(u32 *)buf;
215 }
216 
217 static void regmap_lock_mutex(struct regmap *map)
218 {
219  mutex_lock(&map->mutex);
220 }
221 
222 static void regmap_unlock_mutex(struct regmap *map)
223 {
224  mutex_unlock(&map->mutex);
225 }
226 
227 static void regmap_lock_spinlock(struct regmap *map)
228 {
229  spin_lock(&map->spinlock);
230 }
231 
232 static void regmap_unlock_spinlock(struct regmap *map)
233 {
234  spin_unlock(&map->spinlock);
235 }
236 
237 static void dev_get_regmap_release(struct device *dev, void *res)
238 {
239  /*
240  * We don't actually have anything to do here; the goal here
241  * is not to manage the regmap but to provide a simple way to
242  * get the regmap back given a struct device.
243  */
244 }
245 
246 static bool _regmap_range_add(struct regmap *map,
247  struct regmap_range_node *data)
248 {
249  struct rb_root *root = &map->range_tree;
250  struct rb_node **new = &(root->rb_node), *parent = NULL;
251 
252  while (*new) {
253  struct regmap_range_node *this =
254  container_of(*new, struct regmap_range_node, node);
255 
256  parent = *new;
257  if (data->range_max < this->range_min)
258  new = &((*new)->rb_left);
259  else if (data->range_min > this->range_max)
260  new = &((*new)->rb_right);
261  else
262  return false;
263  }
264 
265  rb_link_node(&data->node, parent, new);
266  rb_insert_color(&data->node, root);
267 
268  return true;
269 }
270 
271 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
272  unsigned int reg)
273 {
274  struct rb_node *node = map->range_tree.rb_node;
275 
276  while (node) {
277  struct regmap_range_node *this =
278  container_of(node, struct regmap_range_node, node);
279 
281  node = node->rb_left;
282  else if (reg > this->range_max)
283  node = node->rb_right;
284  else
285  return this;
286  }
287 
288  return NULL;
289 }
290 
291 static void regmap_range_exit(struct regmap *map)
292 {
293  struct rb_node *next;
295 
296  next = rb_first(&map->range_tree);
297  while (next) {
298  range_node = rb_entry(next, struct regmap_range_node, node);
299  next = rb_next(&range_node->node);
300  rb_erase(&range_node->node, &map->range_tree);
301  kfree(range_node);
302  }
303 
304  kfree(map->selector_work_buf);
305 }
306 
319 struct regmap *regmap_init(struct device *dev,
320  const struct regmap_bus *bus,
321  void *bus_context,
322  const struct regmap_config *config)
323 {
324  struct regmap *map, **m;
325  int ret = -EINVAL;
326  enum regmap_endian reg_endian, val_endian;
327  int i, j;
328 
329  if (!bus || !config)
330  goto err;
331 
332  map = kzalloc(sizeof(*map), GFP_KERNEL);
333  if (map == NULL) {
334  ret = -ENOMEM;
335  goto err;
336  }
337 
338  if (bus->fast_io) {
339  spin_lock_init(&map->spinlock);
340  map->lock = regmap_lock_spinlock;
341  map->unlock = regmap_unlock_spinlock;
342  } else {
343  mutex_init(&map->mutex);
344  map->lock = regmap_lock_mutex;
345  map->unlock = regmap_unlock_mutex;
346  }
347  map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
348  map->format.pad_bytes = config->pad_bits / 8;
349  map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
350  map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
351  config->val_bits + config->pad_bits, 8);
352  map->reg_shift = config->pad_bits % 8;
353  if (config->reg_stride)
354  map->reg_stride = config->reg_stride;
355  else
356  map->reg_stride = 1;
357  map->use_single_rw = config->use_single_rw;
358  map->dev = dev;
359  map->bus = bus;
360  map->bus_context = bus_context;
361  map->max_register = config->max_register;
362  map->writeable_reg = config->writeable_reg;
363  map->readable_reg = config->readable_reg;
364  map->volatile_reg = config->volatile_reg;
365  map->precious_reg = config->precious_reg;
366  map->cache_type = config->cache_type;
367  map->name = config->name;
368 
369  if (config->read_flag_mask || config->write_flag_mask) {
370  map->read_flag_mask = config->read_flag_mask;
371  map->write_flag_mask = config->write_flag_mask;
372  } else {
373  map->read_flag_mask = bus->read_flag_mask;
374  }
375 
376  reg_endian = config->reg_format_endian;
377  if (reg_endian == REGMAP_ENDIAN_DEFAULT)
378  reg_endian = bus->reg_format_endian_default;
379  if (reg_endian == REGMAP_ENDIAN_DEFAULT)
380  reg_endian = REGMAP_ENDIAN_BIG;
381 
382  val_endian = config->val_format_endian;
383  if (val_endian == REGMAP_ENDIAN_DEFAULT)
384  val_endian = bus->val_format_endian_default;
385  if (val_endian == REGMAP_ENDIAN_DEFAULT)
386  val_endian = REGMAP_ENDIAN_BIG;
387 
388  switch (config->reg_bits + map->reg_shift) {
389  case 2:
390  switch (config->val_bits) {
391  case 6:
392  map->format.format_write = regmap_format_2_6_write;
393  break;
394  default:
395  goto err_map;
396  }
397  break;
398 
399  case 4:
400  switch (config->val_bits) {
401  case 12:
402  map->format.format_write = regmap_format_4_12_write;
403  break;
404  default:
405  goto err_map;
406  }
407  break;
408 
409  case 7:
410  switch (config->val_bits) {
411  case 9:
412  map->format.format_write = regmap_format_7_9_write;
413  break;
414  default:
415  goto err_map;
416  }
417  break;
418 
419  case 10:
420  switch (config->val_bits) {
421  case 14:
422  map->format.format_write = regmap_format_10_14_write;
423  break;
424  default:
425  goto err_map;
426  }
427  break;
428 
429  case 8:
430  map->format.format_reg = regmap_format_8;
431  break;
432 
433  case 16:
434  switch (reg_endian) {
435  case REGMAP_ENDIAN_BIG:
436  map->format.format_reg = regmap_format_16_be;
437  break;
438  case REGMAP_ENDIAN_NATIVE:
439  map->format.format_reg = regmap_format_16_native;
440  break;
441  default:
442  goto err_map;
443  }
444  break;
445 
446  case 32:
447  switch (reg_endian) {
448  case REGMAP_ENDIAN_BIG:
449  map->format.format_reg = regmap_format_32_be;
450  break;
451  case REGMAP_ENDIAN_NATIVE:
452  map->format.format_reg = regmap_format_32_native;
453  break;
454  default:
455  goto err_map;
456  }
457  break;
458 
459  default:
460  goto err_map;
461  }
462 
463  switch (config->val_bits) {
464  case 8:
465  map->format.format_val = regmap_format_8;
466  map->format.parse_val = regmap_parse_8;
467  break;
468  case 16:
469  switch (val_endian) {
470  case REGMAP_ENDIAN_BIG:
471  map->format.format_val = regmap_format_16_be;
472  map->format.parse_val = regmap_parse_16_be;
473  break;
474  case REGMAP_ENDIAN_NATIVE:
475  map->format.format_val = regmap_format_16_native;
476  map->format.parse_val = regmap_parse_16_native;
477  break;
478  default:
479  goto err_map;
480  }
481  break;
482  case 24:
483  if (val_endian != REGMAP_ENDIAN_BIG)
484  goto err_map;
485  map->format.format_val = regmap_format_24;
486  map->format.parse_val = regmap_parse_24;
487  break;
488  case 32:
489  switch (val_endian) {
490  case REGMAP_ENDIAN_BIG:
491  map->format.format_val = regmap_format_32_be;
492  map->format.parse_val = regmap_parse_32_be;
493  break;
494  case REGMAP_ENDIAN_NATIVE:
495  map->format.format_val = regmap_format_32_native;
496  map->format.parse_val = regmap_parse_32_native;
497  break;
498  default:
499  goto err_map;
500  }
501  break;
502  }
503 
504  if (map->format.format_write) {
505  if ((reg_endian != REGMAP_ENDIAN_BIG) ||
506  (val_endian != REGMAP_ENDIAN_BIG))
507  goto err_map;
508  map->use_single_rw = true;
509  }
510 
511  if (!map->format.format_write &&
512  !(map->format.format_reg && map->format.format_val))
513  goto err_map;
514 
515  map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
516  if (map->work_buf == NULL) {
517  ret = -ENOMEM;
518  goto err_map;
519  }
520 
521  map->range_tree = RB_ROOT;
522  for (i = 0; i < config->n_ranges; i++) {
523  const struct regmap_range_cfg *range_cfg = &config->ranges[i];
524  struct regmap_range_node *new;
525 
526  /* Sanity check */
527  if (range_cfg->range_max < range_cfg->range_min ||
528  range_cfg->range_max > map->max_register ||
529  range_cfg->selector_reg > map->max_register ||
530  range_cfg->window_len == 0)
531  goto err_range;
532 
533  /* Make sure, that this register range has no selector
534  or data window within its boundary */
535  for (j = 0; j < config->n_ranges; j++) {
536  unsigned sel_reg = config->ranges[j].selector_reg;
537  unsigned win_min = config->ranges[j].window_start;
538  unsigned win_max = win_min +
539  config->ranges[j].window_len - 1;
540 
541  if (range_cfg->range_min <= sel_reg &&
542  sel_reg <= range_cfg->range_max) {
543  goto err_range;
544  }
545 
546  if (!(win_max < range_cfg->range_min ||
547  win_min > range_cfg->range_max)) {
548  goto err_range;
549  }
550  }
551 
552  new = kzalloc(sizeof(*new), GFP_KERNEL);
553  if (new == NULL) {
554  ret = -ENOMEM;
555  goto err_range;
556  }
557 
558  new->range_min = range_cfg->range_min;
559  new->range_max = range_cfg->range_max;
560  new->selector_reg = range_cfg->selector_reg;
561  new->selector_mask = range_cfg->selector_mask;
562  new->selector_shift = range_cfg->selector_shift;
563  new->window_start = range_cfg->window_start;
564  new->window_len = range_cfg->window_len;
565 
566  if (_regmap_range_add(map, new) == false) {
567  kfree(new);
568  goto err_range;
569  }
570 
571  if (map->selector_work_buf == NULL) {
572  map->selector_work_buf =
573  kzalloc(map->format.buf_size, GFP_KERNEL);
574  if (map->selector_work_buf == NULL) {
575  ret = -ENOMEM;
576  goto err_range;
577  }
578  }
579  }
580 
581  ret = regcache_init(map, config);
582  if (ret < 0)
583  goto err_range;
584 
585  regmap_debugfs_init(map, config->name);
586 
587  /* Add a devres resource for dev_get_regmap() */
588  m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
589  if (!m) {
590  ret = -ENOMEM;
591  goto err_debugfs;
592  }
593  *m = map;
594  devres_add(dev, m);
595 
596  return map;
597 
598 err_debugfs:
599  regmap_debugfs_exit(map);
600  regcache_exit(map);
601 err_range:
602  regmap_range_exit(map);
603  kfree(map->work_buf);
604 err_map:
605  kfree(map);
606 err:
607  return ERR_PTR(ret);
608 }
610 
611 static void devm_regmap_release(struct device *dev, void *res)
612 {
613  regmap_exit(*(struct regmap **)res);
614 }
615 
629 struct regmap *devm_regmap_init(struct device *dev,
630  const struct regmap_bus *bus,
631  void *bus_context,
632  const struct regmap_config *config)
633 {
634  struct regmap **ptr, *regmap;
635 
636  ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
637  if (!ptr)
638  return ERR_PTR(-ENOMEM);
639 
640  regmap = regmap_init(dev, bus, bus_context, config);
641  if (!IS_ERR(regmap)) {
642  *ptr = regmap;
643  devres_add(dev, ptr);
644  } else {
645  devres_free(ptr);
646  }
647 
648  return regmap;
649 }
651 
666 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
667 {
668  regcache_exit(map);
669  regmap_debugfs_exit(map);
670 
671  map->max_register = config->max_register;
672  map->writeable_reg = config->writeable_reg;
673  map->readable_reg = config->readable_reg;
674  map->volatile_reg = config->volatile_reg;
675  map->precious_reg = config->precious_reg;
676  map->cache_type = config->cache_type;
677 
678  regmap_debugfs_init(map, config->name);
679 
680  map->cache_bypass = false;
681  map->cache_only = false;
682 
683  return regcache_init(map, config);
684 }
686 
690 void regmap_exit(struct regmap *map)
691 {
692  regcache_exit(map);
693  regmap_debugfs_exit(map);
694  regmap_range_exit(map);
695  if (map->bus->free_context)
696  map->bus->free_context(map->bus_context);
697  kfree(map->work_buf);
698  kfree(map);
699 }
701 
702 static int dev_get_regmap_match(struct device *dev, void *res, void *data)
703 {
704  struct regmap **r = res;
705  if (!r || !*r) {
706  WARN_ON(!r || !*r);
707  return 0;
708  }
709 
710  /* If the user didn't specify a name match any */
711  if (data)
712  return (*r)->name == data;
713  else
714  return 1;
715 }
716 
729 struct regmap *dev_get_regmap(struct device *dev, const char *name)
730 {
731  struct regmap **r = devres_find(dev, dev_get_regmap_release,
732  dev_get_regmap_match, (void *)name);
733 
734  if (!r)
735  return NULL;
736  return *r;
737 }
739 
740 static int _regmap_select_page(struct regmap *map, unsigned int *reg,
741  unsigned int val_num)
742 {
743  struct regmap_range_node *range;
744  void *orig_work_buf;
745  unsigned int win_offset;
746  unsigned int win_page;
747  bool page_chg;
748  int ret;
749 
750  range = _regmap_range_lookup(map, *reg);
751  if (range) {
752  win_offset = (*reg - range->range_min) % range->window_len;
753  win_page = (*reg - range->range_min) / range->window_len;
754 
755  if (val_num > 1) {
756  /* Bulk write shouldn't cross range boundary */
757  if (*reg + val_num - 1 > range->range_max)
758  return -EINVAL;
759 
760  /* ... or single page boundary */
761  if (val_num > range->window_len - win_offset)
762  return -EINVAL;
763  }
764 
765  /* It is possible to have selector register inside data window.
766  In that case, selector register is located on every page and
767  it needs no page switching, when accessed alone. */
768  if (val_num > 1 ||
769  range->window_start + win_offset != range->selector_reg) {
770  /* Use separate work_buf during page switching */
771  orig_work_buf = map->work_buf;
772  map->work_buf = map->selector_work_buf;
773 
774  ret = _regmap_update_bits(map, range->selector_reg,
775  range->selector_mask,
776  win_page << range->selector_shift,
777  &page_chg);
778 
779  map->work_buf = orig_work_buf;
780 
781  if (ret < 0)
782  return ret;
783  }
784 
785  *reg = range->window_start + win_offset;
786  }
787 
788  return 0;
789 }
790 
791 static int _regmap_raw_write(struct regmap *map, unsigned int reg,
792  const void *val, size_t val_len)
793 {
794  u8 *u8 = map->work_buf;
795  void *buf;
796  int ret = -ENOTSUPP;
797  size_t len;
798  int i;
799 
800  /* Check for unwritable registers before we start */
801  if (map->writeable_reg)
802  for (i = 0; i < val_len / map->format.val_bytes; i++)
803  if (!map->writeable_reg(map->dev,
804  reg + (i * map->reg_stride)))
805  return -EINVAL;
806 
807  if (!map->cache_bypass && map->format.parse_val) {
808  unsigned int ival;
809  int val_bytes = map->format.val_bytes;
810  for (i = 0; i < val_len / val_bytes; i++) {
811  memcpy(map->work_buf, val + (i * val_bytes), val_bytes);
812  ival = map->format.parse_val(map->work_buf);
813  ret = regcache_write(map, reg + (i * map->reg_stride),
814  ival);
815  if (ret) {
816  dev_err(map->dev,
817  "Error in caching of register: %u ret: %d\n",
818  reg + i, ret);
819  return ret;
820  }
821  }
822  if (map->cache_only) {
823  map->cache_dirty = true;
824  return 0;
825  }
826  }
827 
828  ret = _regmap_select_page(map, &reg, val_len / map->format.val_bytes);
829  if (ret < 0)
830  return ret;
831 
832  map->format.format_reg(map->work_buf, reg, map->reg_shift);
833 
834  u8[0] |= map->write_flag_mask;
835 
836  trace_regmap_hw_write_start(map->dev, reg,
837  val_len / map->format.val_bytes);
838 
839  /* If we're doing a single register write we can probably just
840  * send the work_buf directly, otherwise try to do a gather
841  * write.
842  */
843  if (val == (map->work_buf + map->format.pad_bytes +
844  map->format.reg_bytes))
845  ret = map->bus->write(map->bus_context, map->work_buf,
846  map->format.reg_bytes +
847  map->format.pad_bytes +
848  val_len);
849  else if (map->bus->gather_write)
850  ret = map->bus->gather_write(map->bus_context, map->work_buf,
851  map->format.reg_bytes +
852  map->format.pad_bytes,
853  val, val_len);
854 
855  /* If that didn't work fall back on linearising by hand. */
856  if (ret == -ENOTSUPP) {
857  len = map->format.reg_bytes + map->format.pad_bytes + val_len;
858  buf = kzalloc(len, GFP_KERNEL);
859  if (!buf)
860  return -ENOMEM;
861 
862  memcpy(buf, map->work_buf, map->format.reg_bytes);
863  memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
864  val, val_len);
865  ret = map->bus->write(map->bus_context, buf, len);
866 
867  kfree(buf);
868  }
869 
870  trace_regmap_hw_write_done(map->dev, reg,
871  val_len / map->format.val_bytes);
872 
873  return ret;
874 }
875 
876 int _regmap_write(struct regmap *map, unsigned int reg,
877  unsigned int val)
878 {
879  int ret;
880  BUG_ON(!map->format.format_write && !map->format.format_val);
881 
882  if (!map->cache_bypass && map->format.format_write) {
883  ret = regcache_write(map, reg, val);
884  if (ret != 0)
885  return ret;
886  if (map->cache_only) {
887  map->cache_dirty = true;
888  return 0;
889  }
890  }
891 
892 #ifdef LOG_DEVICE
893  if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
894  dev_info(map->dev, "%x <= %x\n", reg, val);
895 #endif
896 
897  trace_regmap_reg_write(map->dev, reg, val);
898 
899  if (map->format.format_write) {
900  ret = _regmap_select_page(map, &reg, 1);
901  if (ret < 0)
902  return ret;
903 
904  map->format.format_write(map, reg, val);
905 
906  trace_regmap_hw_write_start(map->dev, reg, 1);
907 
908  ret = map->bus->write(map->bus_context, map->work_buf,
909  map->format.buf_size);
910 
911  trace_regmap_hw_write_done(map->dev, reg, 1);
912 
913  return ret;
914  } else {
915  map->format.format_val(map->work_buf + map->format.reg_bytes
916  + map->format.pad_bytes, val, 0);
917  return _regmap_raw_write(map, reg,
918  map->work_buf +
919  map->format.reg_bytes +
920  map->format.pad_bytes,
921  map->format.val_bytes);
922  }
923 }
924 
935 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
936 {
937  int ret;
938 
939  if (reg % map->reg_stride)
940  return -EINVAL;
941 
942  map->lock(map);
943 
944  ret = _regmap_write(map, reg, val);
945 
946  map->unlock(map);
947 
948  return ret;
949 }
951 
968 int regmap_raw_write(struct regmap *map, unsigned int reg,
969  const void *val, size_t val_len)
970 {
971  int ret;
972 
973  if (val_len % map->format.val_bytes)
974  return -EINVAL;
975  if (reg % map->reg_stride)
976  return -EINVAL;
977 
978  map->lock(map);
979 
980  ret = _regmap_raw_write(map, reg, val, val_len);
981 
982  map->unlock(map);
983 
984  return ret;
985 }
987 
988 /*
989  * regmap_bulk_write(): Write multiple registers to the device
990  *
991  * @map: Register map to write to
992  * @reg: First register to be write from
993  * @val: Block of data to be written, in native register size for device
994  * @val_count: Number of registers to write
995  *
996  * This function is intended to be used for writing a large block of
997  * data to be device either in single transfer or multiple transfer.
998  *
999  * A value of zero will be returned on success, a negative errno will
1000  * be returned in error cases.
1001  */
1002 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1003  size_t val_count)
1004 {
1005  int ret = 0, i;
1006  size_t val_bytes = map->format.val_bytes;
1007  void *wval;
1008 
1009  if (!map->format.parse_val)
1010  return -EINVAL;
1011  if (reg % map->reg_stride)
1012  return -EINVAL;
1013 
1014  map->lock(map);
1015 
1016  /* No formatting is require if val_byte is 1 */
1017  if (val_bytes == 1) {
1018  wval = (void *)val;
1019  } else {
1020  wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
1021  if (!wval) {
1022  ret = -ENOMEM;
1023  dev_err(map->dev, "Error in memory allocation\n");
1024  goto out;
1025  }
1026  for (i = 0; i < val_count * val_bytes; i += val_bytes)
1027  map->format.parse_val(wval + i);
1028  }
1029  /*
1030  * Some devices does not support bulk write, for
1031  * them we have a series of single write operations.
1032  */
1033  if (map->use_single_rw) {
1034  for (i = 0; i < val_count; i++) {
1035  ret = regmap_raw_write(map,
1036  reg + (i * map->reg_stride),
1037  val + (i * val_bytes),
1038  val_bytes);
1039  if (ret != 0)
1040  return ret;
1041  }
1042  } else {
1043  ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
1044  }
1045 
1046  if (val_bytes != 1)
1047  kfree(wval);
1048 
1049 out:
1050  map->unlock(map);
1051  return ret;
1052 }
1054 
1055 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1056  unsigned int val_len)
1057 {
1058  u8 *u8 = map->work_buf;
1059  int ret;
1060 
1061  ret = _regmap_select_page(map, &reg, val_len / map->format.val_bytes);
1062  if (ret < 0)
1063  return ret;
1064 
1065  map->format.format_reg(map->work_buf, reg, map->reg_shift);
1066 
1067  /*
1068  * Some buses or devices flag reads by setting the high bits in the
1069  * register addresss; since it's always the high bits for all
1070  * current formats we can do this here rather than in
1071  * formatting. This may break if we get interesting formats.
1072  */
1073  u8[0] |= map->read_flag_mask;
1074 
1075  trace_regmap_hw_read_start(map->dev, reg,
1076  val_len / map->format.val_bytes);
1077 
1078  ret = map->bus->read(map->bus_context, map->work_buf,
1079  map->format.reg_bytes + map->format.pad_bytes,
1080  val, val_len);
1081 
1082  trace_regmap_hw_read_done(map->dev, reg,
1083  val_len / map->format.val_bytes);
1084 
1085  return ret;
1086 }
1087 
1088 static int _regmap_read(struct regmap *map, unsigned int reg,
1089  unsigned int *val)
1090 {
1091  int ret;
1092 
1093  if (!map->cache_bypass) {
1094  ret = regcache_read(map, reg, val);
1095  if (ret == 0)
1096  return 0;
1097  }
1098 
1099  if (!map->format.parse_val)
1100  return -EINVAL;
1101 
1102  if (map->cache_only)
1103  return -EBUSY;
1104 
1105  ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
1106  if (ret == 0) {
1107  *val = map->format.parse_val(map->work_buf);
1108 
1109 #ifdef LOG_DEVICE
1110  if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
1111  dev_info(map->dev, "%x => %x\n", reg, *val);
1112 #endif
1113 
1114  trace_regmap_reg_read(map->dev, reg, *val);
1115  }
1116 
1117  if (ret == 0 && !map->cache_bypass)
1118  regcache_write(map, reg, *val);
1119 
1120  return ret;
1121 }
1122 
1133 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
1134 {
1135  int ret;
1136 
1137  if (reg % map->reg_stride)
1138  return -EINVAL;
1139 
1140  map->lock(map);
1141 
1142  ret = _regmap_read(map, reg, val);
1143 
1144  map->unlock(map);
1145 
1146  return ret;
1147 }
1149 
1161 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1162  size_t val_len)
1163 {
1164  size_t val_bytes = map->format.val_bytes;
1165  size_t val_count = val_len / val_bytes;
1166  unsigned int v;
1167  int ret, i;
1168 
1169  if (val_len % map->format.val_bytes)
1170  return -EINVAL;
1171  if (reg % map->reg_stride)
1172  return -EINVAL;
1173 
1174  map->lock(map);
1175 
1176  if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
1177  map->cache_type == REGCACHE_NONE) {
1178  /* Physical block read if there's no cache involved */
1179  ret = _regmap_raw_read(map, reg, val, val_len);
1180 
1181  } else {
1182  /* Otherwise go word by word for the cache; should be low
1183  * cost as we expect to hit the cache.
1184  */
1185  for (i = 0; i < val_count; i++) {
1186  ret = _regmap_read(map, reg + (i * map->reg_stride),
1187  &v);
1188  if (ret != 0)
1189  goto out;
1190 
1191  map->format.format_val(val + (i * val_bytes), v, 0);
1192  }
1193  }
1194 
1195  out:
1196  map->unlock(map);
1197 
1198  return ret;
1199 }
1201 
1213 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
1214  size_t val_count)
1215 {
1216  int ret, i;
1217  size_t val_bytes = map->format.val_bytes;
1218  bool vol = regmap_volatile_range(map, reg, val_count);
1219 
1220  if (!map->format.parse_val)
1221  return -EINVAL;
1222  if (reg % map->reg_stride)
1223  return -EINVAL;
1224 
1225  if (vol || map->cache_type == REGCACHE_NONE) {
1226  /*
1227  * Some devices does not support bulk read, for
1228  * them we have a series of single read operations.
1229  */
1230  if (map->use_single_rw) {
1231  for (i = 0; i < val_count; i++) {
1232  ret = regmap_raw_read(map,
1233  reg + (i * map->reg_stride),
1234  val + (i * val_bytes),
1235  val_bytes);
1236  if (ret != 0)
1237  return ret;
1238  }
1239  } else {
1240  ret = regmap_raw_read(map, reg, val,
1241  val_bytes * val_count);
1242  if (ret != 0)
1243  return ret;
1244  }
1245 
1246  for (i = 0; i < val_count * val_bytes; i += val_bytes)
1247  map->format.parse_val(val + i);
1248  } else {
1249  for (i = 0; i < val_count; i++) {
1250  unsigned int ival;
1251  ret = regmap_read(map, reg + (i * map->reg_stride),
1252  &ival);
1253  if (ret != 0)
1254  return ret;
1255  memcpy(val + (i * val_bytes), &ival, val_bytes);
1256  }
1257  }
1258 
1259  return 0;
1260 }
1262 
1263 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
1264  unsigned int mask, unsigned int val,
1265  bool *change)
1266 {
1267  int ret;
1268  unsigned int tmp, orig;
1269 
1270  ret = _regmap_read(map, reg, &orig);
1271  if (ret != 0)
1272  return ret;
1273 
1274  tmp = orig & ~mask;
1275  tmp |= val & mask;
1276 
1277  if (tmp != orig) {
1278  ret = _regmap_write(map, reg, tmp);
1279  *change = true;
1280  } else {
1281  *change = false;
1282  }
1283 
1284  return ret;
1285 }
1286 
1297 int regmap_update_bits(struct regmap *map, unsigned int reg,
1298  unsigned int mask, unsigned int val)
1299 {
1300  bool change;
1301  int ret;
1302 
1303  map->lock(map);
1304  ret = _regmap_update_bits(map, reg, mask, val, &change);
1305  map->unlock(map);
1306 
1307  return ret;
1308 }
1310 
1323 int regmap_update_bits_check(struct regmap *map, unsigned int reg,
1324  unsigned int mask, unsigned int val,
1325  bool *change)
1326 {
1327  int ret;
1328 
1329  map->lock(map);
1330  ret = _regmap_update_bits(map, reg, mask, val, change);
1331  map->unlock(map);
1332  return ret;
1333 }
1335 
1350 int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
1351  int num_regs)
1352 {
1353  int i, ret;
1354  bool bypass;
1355 
1356  /* If needed the implementation can be extended to support this */
1357  if (map->patch)
1358  return -EBUSY;
1359 
1360  map->lock(map);
1361 
1362  bypass = map->cache_bypass;
1363 
1364  map->cache_bypass = true;
1365 
1366  /* Write out first; it's useful to apply even if we fail later. */
1367  for (i = 0; i < num_regs; i++) {
1368  ret = _regmap_write(map, regs[i].reg, regs[i].def);
1369  if (ret != 0) {
1370  dev_err(map->dev, "Failed to write %x = %x: %d\n",
1371  regs[i].reg, regs[i].def, ret);
1372  goto out;
1373  }
1374  }
1375 
1376  map->patch = kcalloc(num_regs, sizeof(struct reg_default), GFP_KERNEL);
1377  if (map->patch != NULL) {
1378  memcpy(map->patch, regs,
1379  num_regs * sizeof(struct reg_default));
1380  map->patch_regs = num_regs;
1381  } else {
1382  ret = -ENOMEM;
1383  }
1384 
1385 out:
1386  map->cache_bypass = bypass;
1387 
1388  map->unlock(map);
1389 
1390  return ret;
1391 }
1393 
1394 /*
1395  * regmap_get_val_bytes(): Report the size of a register value
1396  *
1397  * Report the size of a register value, mainly intended to for use by
1398  * generic infrastructure built on top of regmap.
1399  */
1401 {
1402  if (map->format.format_write)
1403  return -EINVAL;
1404 
1405  return map->format.val_bytes;
1406 }
1408 
1409 static int __init regmap_initcall(void)
1410 {
1412 
1413  return 0;
1414 }
1415 postcore_initcall(regmap_initcall);