Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
devres.c
Go to the documentation of this file.
1 /*
2  * drivers/base/devres.c - device resource management
3  *
4  * Copyright (c) 2006 SUSE Linux Products GmbH
5  * Copyright (c) 2006 Tejun Heo <[email protected]>
6  *
7  * This file is released under the GPLv2.
8  */
9 
10 #include <linux/device.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 
14 #include "base.h"
15 
16 struct devres_node {
17  struct list_head entry;
19 #ifdef CONFIG_DEBUG_DEVRES
20  const char *name;
21  size_t size;
22 #endif
23 };
24 
25 struct devres {
26  struct devres_node node;
27  /* -- 3 pointers */
28  unsigned long long data[]; /* guarantee ull alignment */
29 };
30 
31 struct devres_group {
32  struct devres_node node[2];
33  void *id;
34  int color;
35  /* -- 8 pointers */
36 };
37 
38 #ifdef CONFIG_DEBUG_DEVRES
39 static int log_devres = 0;
40 module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
41 
42 static void set_node_dbginfo(struct devres_node *node, const char *name,
43  size_t size)
44 {
45  node->name = name;
46  node->size = size;
47 }
48 
49 static void devres_log(struct device *dev, struct devres_node *node,
50  const char *op)
51 {
52  if (unlikely(log_devres))
53  dev_printk(KERN_ERR, dev, "DEVRES %3s %p %s (%lu bytes)\n",
54  op, node, node->name, (unsigned long)node->size);
55 }
56 #else /* CONFIG_DEBUG_DEVRES */
57 #define set_node_dbginfo(node, n, s) do {} while (0)
58 #define devres_log(dev, node, op) do {} while (0)
59 #endif /* CONFIG_DEBUG_DEVRES */
60 
61 /*
62  * Release functions for devres group. These callbacks are used only
63  * for identification.
64  */
65 static void group_open_release(struct device *dev, void *res)
66 {
67  /* noop */
68 }
69 
70 static void group_close_release(struct device *dev, void *res)
71 {
72  /* noop */
73 }
74 
75 static struct devres_group * node_to_group(struct devres_node *node)
76 {
77  if (node->release == &group_open_release)
78  return container_of(node, struct devres_group, node[0]);
79  if (node->release == &group_close_release)
80  return container_of(node, struct devres_group, node[1]);
81  return NULL;
82 }
83 
84 static __always_inline struct devres * alloc_dr(dr_release_t release,
85  size_t size, gfp_t gfp)
86 {
87  size_t tot_size = sizeof(struct devres) + size;
88  struct devres *dr;
89 
90  dr = kmalloc_track_caller(tot_size, gfp);
91  if (unlikely(!dr))
92  return NULL;
93 
94  memset(dr, 0, tot_size);
95  INIT_LIST_HEAD(&dr->node.entry);
96  dr->node.release = release;
97  return dr;
98 }
99 
100 static void add_dr(struct device *dev, struct devres_node *node)
101 {
102  devres_log(dev, node, "ADD");
103  BUG_ON(!list_empty(&node->entry));
104  list_add_tail(&node->entry, &dev->devres_head);
105 }
106 
107 #ifdef CONFIG_DEBUG_DEVRES
108 void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
109  const char *name)
110 {
111  struct devres *dr;
112 
113  dr = alloc_dr(release, size, gfp);
114  if (unlikely(!dr))
115  return NULL;
116  set_node_dbginfo(&dr->node, name, size);
117  return dr->data;
118 }
119 EXPORT_SYMBOL_GPL(__devres_alloc);
120 #else
121 
134 void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
135 {
136  struct devres *dr;
137 
138  dr = alloc_dr(release, size, gfp);
139  if (unlikely(!dr))
140  return NULL;
141  return dr->data;
142 }
144 #endif
145 
161 void devres_for_each_res(struct device *dev, dr_release_t release,
162  dr_match_t match, void *match_data,
163  void (*fn)(struct device *, void *, void *),
164  void *data)
165 {
166  struct devres_node *node;
167  struct devres_node *tmp;
168  unsigned long flags;
169 
170  if (!fn)
171  return;
172 
173  spin_lock_irqsave(&dev->devres_lock, flags);
175  &dev->devres_head, entry) {
176  struct devres *dr = container_of(node, struct devres, node);
177 
178  if (node->release != release)
179  continue;
180  if (match && !match(dev, dr->data, match_data))
181  continue;
182  fn(dev, dr->data, data);
183  }
184  spin_unlock_irqrestore(&dev->devres_lock, flags);
185 }
187 
194 void devres_free(void *res)
195 {
196  if (res) {
197  struct devres *dr = container_of(res, struct devres, data);
198 
199  BUG_ON(!list_empty(&dr->node.entry));
200  kfree(dr);
201  }
202 }
204 
214 void devres_add(struct device *dev, void *res)
215 {
216  struct devres *dr = container_of(res, struct devres, data);
217  unsigned long flags;
218 
219  spin_lock_irqsave(&dev->devres_lock, flags);
220  add_dr(dev, &dr->node);
221  spin_unlock_irqrestore(&dev->devres_lock, flags);
222 }
224 
225 static struct devres *find_dr(struct device *dev, dr_release_t release,
226  dr_match_t match, void *match_data)
227 {
228  struct devres_node *node;
229 
231  struct devres *dr = container_of(node, struct devres, node);
232 
233  if (node->release != release)
234  continue;
235  if (match && !match(dev, dr->data, match_data))
236  continue;
237  return dr;
238  }
239 
240  return NULL;
241 }
242 
257 void * devres_find(struct device *dev, dr_release_t release,
258  dr_match_t match, void *match_data)
259 {
260  struct devres *dr;
261  unsigned long flags;
262 
263  spin_lock_irqsave(&dev->devres_lock, flags);
264  dr = find_dr(dev, release, match, match_data);
265  spin_unlock_irqrestore(&dev->devres_lock, flags);
266 
267  if (dr)
268  return dr->data;
269  return NULL;
270 }
272 
287 void * devres_get(struct device *dev, void *new_res,
288  dr_match_t match, void *match_data)
289 {
290  struct devres *new_dr = container_of(new_res, struct devres, data);
291  struct devres *dr;
292  unsigned long flags;
293 
294  spin_lock_irqsave(&dev->devres_lock, flags);
295  dr = find_dr(dev, new_dr->node.release, match, match_data);
296  if (!dr) {
297  add_dr(dev, &new_dr->node);
298  dr = new_dr;
299  new_dr = NULL;
300  }
301  spin_unlock_irqrestore(&dev->devres_lock, flags);
302  devres_free(new_dr);
303 
304  return dr->data;
305 }
307 
323 void * devres_remove(struct device *dev, dr_release_t release,
324  dr_match_t match, void *match_data)
325 {
326  struct devres *dr;
327  unsigned long flags;
328 
329  spin_lock_irqsave(&dev->devres_lock, flags);
330  dr = find_dr(dev, release, match, match_data);
331  if (dr) {
332  list_del_init(&dr->node.entry);
333  devres_log(dev, &dr->node, "REM");
334  }
335  spin_unlock_irqrestore(&dev->devres_lock, flags);
336 
337  if (dr)
338  return dr->data;
339  return NULL;
340 }
342 
361 int devres_destroy(struct device *dev, dr_release_t release,
362  dr_match_t match, void *match_data)
363 {
364  void *res;
365 
366  res = devres_remove(dev, release, match, match_data);
367  if (unlikely(!res))
368  return -ENOENT;
369 
370  devres_free(res);
371  return 0;
372 }
374 
375 
391 int devres_release(struct device *dev, dr_release_t release,
392  dr_match_t match, void *match_data)
393 {
394  void *res;
395 
396  res = devres_remove(dev, release, match, match_data);
397  if (unlikely(!res))
398  return -ENOENT;
399 
400  (*release)(dev, res);
401  devres_free(res);
402  return 0;
403 }
405 
406 static int remove_nodes(struct device *dev,
407  struct list_head *first, struct list_head *end,
408  struct list_head *todo)
409 {
410  int cnt = 0, nr_groups = 0;
411  struct list_head *cur;
412 
413  /* First pass - move normal devres entries to @todo and clear
414  * devres_group colors.
415  */
416  cur = first;
417  while (cur != end) {
418  struct devres_node *node;
419  struct devres_group *grp;
420 
421  node = list_entry(cur, struct devres_node, entry);
422  cur = cur->next;
423 
424  grp = node_to_group(node);
425  if (grp) {
426  /* clear color of group markers in the first pass */
427  grp->color = 0;
428  nr_groups++;
429  } else {
430  /* regular devres entry */
431  if (&node->entry == first)
432  first = first->next;
433  list_move_tail(&node->entry, todo);
434  cnt++;
435  }
436  }
437 
438  if (!nr_groups)
439  return cnt;
440 
441  /* Second pass - Scan groups and color them. A group gets
442  * color value of two iff the group is wholly contained in
443  * [cur, end). That is, for a closed group, both opening and
444  * closing markers should be in the range, while just the
445  * opening marker is enough for an open group.
446  */
447  cur = first;
448  while (cur != end) {
449  struct devres_node *node;
450  struct devres_group *grp;
451 
452  node = list_entry(cur, struct devres_node, entry);
453  cur = cur->next;
454 
455  grp = node_to_group(node);
456  BUG_ON(!grp || list_empty(&grp->node[0].entry));
457 
458  grp->color++;
459  if (list_empty(&grp->node[1].entry))
460  grp->color++;
461 
462  BUG_ON(grp->color <= 0 || grp->color > 2);
463  if (grp->color == 2) {
464  /* No need to update cur or end. The removed
465  * nodes are always before both.
466  */
467  list_move_tail(&grp->node[0].entry, todo);
468  list_del_init(&grp->node[1].entry);
469  }
470  }
471 
472  return cnt;
473 }
474 
475 static int release_nodes(struct device *dev, struct list_head *first,
476  struct list_head *end, unsigned long flags)
477  __releases(&dev->devres_lock)
478 {
479  LIST_HEAD(todo);
480  int cnt;
481  struct devres *dr, *tmp;
482 
483  cnt = remove_nodes(dev, first, end, &todo);
484 
485  spin_unlock_irqrestore(&dev->devres_lock, flags);
486 
487  /* Release. Note that both devres and devres_group are
488  * handled as devres in the following loop. This is safe.
489  */
490  list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry) {
491  devres_log(dev, &dr->node, "REL");
492  dr->node.release(dev, dr->data);
493  kfree(dr);
494  }
495 
496  return cnt;
497 }
498 
506 int devres_release_all(struct device *dev)
507 {
508  unsigned long flags;
509 
510  /* Looks like an uninitialized device structure */
511  if (WARN_ON(dev->devres_head.next == NULL))
512  return -ENODEV;
513  spin_lock_irqsave(&dev->devres_lock, flags);
514  return release_nodes(dev, dev->devres_head.next, &dev->devres_head,
515  flags);
516 }
517 
531 void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
532 {
533  struct devres_group *grp;
534  unsigned long flags;
535 
536  grp = kmalloc(sizeof(*grp), gfp);
537  if (unlikely(!grp))
538  return NULL;
539 
540  grp->node[0].release = &group_open_release;
541  grp->node[1].release = &group_close_release;
542  INIT_LIST_HEAD(&grp->node[0].entry);
543  INIT_LIST_HEAD(&grp->node[1].entry);
544  set_node_dbginfo(&grp->node[0], "grp<", 0);
545  set_node_dbginfo(&grp->node[1], "grp>", 0);
546  grp->id = grp;
547  if (id)
548  grp->id = id;
549 
550  spin_lock_irqsave(&dev->devres_lock, flags);
551  add_dr(dev, &grp->node[0]);
552  spin_unlock_irqrestore(&dev->devres_lock, flags);
553  return grp->id;
554 }
556 
557 /* Find devres group with ID @id. If @id is NULL, look for the latest. */
558 static struct devres_group * find_group(struct device *dev, void *id)
559 {
560  struct devres_node *node;
561 
563  struct devres_group *grp;
564 
565  if (node->release != &group_open_release)
566  continue;
567 
568  grp = container_of(node, struct devres_group, node[0]);
569 
570  if (id) {
571  if (grp->id == id)
572  return grp;
573  } else if (list_empty(&grp->node[1].entry))
574  return grp;
575  }
576 
577  return NULL;
578 }
579 
588 void devres_close_group(struct device *dev, void *id)
589 {
590  struct devres_group *grp;
591  unsigned long flags;
592 
593  spin_lock_irqsave(&dev->devres_lock, flags);
594 
595  grp = find_group(dev, id);
596  if (grp)
597  add_dr(dev, &grp->node[1]);
598  else
599  WARN_ON(1);
600 
601  spin_unlock_irqrestore(&dev->devres_lock, flags);
602 }
604 
614 void devres_remove_group(struct device *dev, void *id)
615 {
616  struct devres_group *grp;
617  unsigned long flags;
618 
619  spin_lock_irqsave(&dev->devres_lock, flags);
620 
621  grp = find_group(dev, id);
622  if (grp) {
623  list_del_init(&grp->node[0].entry);
624  list_del_init(&grp->node[1].entry);
625  devres_log(dev, &grp->node[0], "REM");
626  } else
627  WARN_ON(1);
628 
629  spin_unlock_irqrestore(&dev->devres_lock, flags);
630 
631  kfree(grp);
632 }
634 
647 int devres_release_group(struct device *dev, void *id)
648 {
649  struct devres_group *grp;
650  unsigned long flags;
651  int cnt = 0;
652 
653  spin_lock_irqsave(&dev->devres_lock, flags);
654 
655  grp = find_group(dev, id);
656  if (grp) {
657  struct list_head *first = &grp->node[0].entry;
658  struct list_head *end = &dev->devres_head;
659 
660  if (!list_empty(&grp->node[1].entry))
661  end = grp->node[1].entry.next;
662 
663  cnt = release_nodes(dev, first, end, flags);
664  } else {
665  WARN_ON(1);
666  spin_unlock_irqrestore(&dev->devres_lock, flags);
667  }
668 
669  return cnt;
670 }
672 
673 /*
674  * Managed kzalloc/kfree
675  */
676 static void devm_kzalloc_release(struct device *dev, void *res)
677 {
678  /* noop */
679 }
680 
681 static int devm_kzalloc_match(struct device *dev, void *res, void *data)
682 {
683  return res == data;
684 }
685 
699 void * devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
700 {
701  struct devres *dr;
702 
703  /* use raw alloc_dr for kmalloc caller tracing */
704  dr = alloc_dr(devm_kzalloc_release, size, gfp);
705  if (unlikely(!dr))
706  return NULL;
707 
708  set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
709  devres_add(dev, dr->data);
710  return dr->data;
711 }
713 
721 void devm_kfree(struct device *dev, void *p)
722 {
723  int rc;
724 
725  rc = devres_destroy(dev, devm_kzalloc_release, devm_kzalloc_match, p);
726  WARN_ON(rc);
727 }