Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
attribute_container.c
Go to the documentation of this file.
1 /*
2  * attribute_container.c - implementation of a simple container for classes
3  *
4  * Copyright (c) 2005 - James Bottomley <[email protected]>
5  *
6  * This file is licensed under GPLv2
7  *
8  * The basic idea here is to enable a device to be attached to an
9  * aritrary numer of classes without having to allocate storage for them.
10  * Instead, the contained classes select the devices they need to attach
11  * to via a matching function.
12  */
13 
15 #include <linux/init.h>
16 #include <linux/device.h>
17 #include <linux/kernel.h>
18 #include <linux/slab.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 
23 #include "base.h"
24 
25 /* This is a private structure used to tie the classdev and the
26  * container .. it should never be visible outside this file */
28  struct klist_node node;
30  struct device classdev;
31 };
32 
33 static void internal_container_klist_get(struct klist_node *n)
34 {
35  struct internal_container *ic =
37  get_device(&ic->classdev);
38 }
39 
40 static void internal_container_klist_put(struct klist_node *n)
41 {
42  struct internal_container *ic =
44  put_device(&ic->classdev);
45 }
46 
47 
55 struct attribute_container *
57 {
58  struct internal_container *ic =
59  container_of(classdev, struct internal_container, classdev);
60  return ic->cont;
61 }
63 
64 static LIST_HEAD(attribute_container_list);
65 
66 static DEFINE_MUTEX(attribute_container_mutex);
67 
74 int
76 {
77  INIT_LIST_HEAD(&cont->node);
78  klist_init(&cont->containers,internal_container_klist_get,
79  internal_container_klist_put);
80 
81  mutex_lock(&attribute_container_mutex);
82  list_add_tail(&cont->node, &attribute_container_list);
83  mutex_unlock(&attribute_container_mutex);
84 
85  return 0;
86 }
88 
94 int
96 {
97  int retval = -EBUSY;
98  mutex_lock(&attribute_container_mutex);
99  spin_lock(&cont->containers.k_lock);
100  if (!list_empty(&cont->containers.k_list))
101  goto out;
102  retval = 0;
103  list_del(&cont->node);
104  out:
105  spin_unlock(&cont->containers.k_lock);
106  mutex_unlock(&attribute_container_mutex);
107  return retval;
108 
109 }
111 
112 /* private function used as class release */
113 static void attribute_container_release(struct device *classdev)
114 {
115  struct internal_container *ic
116  = container_of(classdev, struct internal_container, classdev);
117  struct device *dev = classdev->parent;
118 
119  kfree(ic);
120  put_device(dev);
121 }
122 
141 void
143  int (*fn)(struct attribute_container *,
144  struct device *,
145  struct device *))
146 {
147  struct attribute_container *cont;
148 
149  mutex_lock(&attribute_container_mutex);
150  list_for_each_entry(cont, &attribute_container_list, node) {
151  struct internal_container *ic;
152 
153  if (attribute_container_no_classdevs(cont))
154  continue;
155 
156  if (!cont->match(cont, dev))
157  continue;
158 
159  ic = kzalloc(sizeof(*ic), GFP_KERNEL);
160  if (!ic) {
161  dev_printk(KERN_ERR, dev, "failed to allocate class container\n");
162  continue;
163  }
164 
165  ic->cont = cont;
167  ic->classdev.parent = get_device(dev);
168  ic->classdev.class = cont->class;
169  cont->class->dev_release = attribute_container_release;
170  dev_set_name(&ic->classdev, dev_name(dev));
171  if (fn)
172  fn(cont, dev, &ic->classdev);
173  else
175  klist_add_tail(&ic->node, &cont->containers);
176  }
177  mutex_unlock(&attribute_container_mutex);
178 }
179 
180 /* FIXME: can't break out of this unless klist_iter_exit is also
181  * called before doing the break
182  */
183 #define klist_for_each_entry(pos, head, member, iter) \
184  for (klist_iter_init(head, iter); (pos = ({ \
185  struct klist_node *n = klist_next(iter); \
186  n ? container_of(n, typeof(*pos), member) : \
187  ({ klist_iter_exit(iter) ; NULL; }); \
188  }) ) != NULL; )
189 
190 
206 void
208  void (*fn)(struct attribute_container *,
209  struct device *,
210  struct device *))
211 {
212  struct attribute_container *cont;
213 
214  mutex_lock(&attribute_container_mutex);
215  list_for_each_entry(cont, &attribute_container_list, node) {
216  struct internal_container *ic;
217  struct klist_iter iter;
218 
219  if (attribute_container_no_classdevs(cont))
220  continue;
221 
222  if (!cont->match(cont, dev))
223  continue;
224 
225  klist_for_each_entry(ic, &cont->containers, node, &iter) {
226  if (dev != ic->classdev.parent)
227  continue;
228  klist_del(&ic->node);
229  if (fn)
230  fn(cont, dev, &ic->classdev);
231  else {
234  }
235  }
236  }
237  mutex_unlock(&attribute_container_mutex);
238 }
239 
250 void
252  int (*fn)(struct attribute_container *,
253  struct device *,
254  struct device *))
255 {
256  struct attribute_container *cont;
257 
258  mutex_lock(&attribute_container_mutex);
259  list_for_each_entry(cont, &attribute_container_list, node) {
260  struct internal_container *ic;
261  struct klist_iter iter;
262 
263  if (!cont->match(cont, dev))
264  continue;
265 
266  if (attribute_container_no_classdevs(cont)) {
267  fn(cont, dev, NULL);
268  continue;
269  }
270 
271  klist_for_each_entry(ic, &cont->containers, node, &iter) {
272  if (dev == ic->classdev.parent)
273  fn(cont, dev, &ic->classdev);
274  }
275  }
276  mutex_unlock(&attribute_container_mutex);
277 }
278 
291 void
293  int (*fn)(struct attribute_container *,
294  struct device *))
295 {
296  struct attribute_container *cont;
297 
298  mutex_lock(&attribute_container_mutex);
299  list_for_each_entry(cont, &attribute_container_list, node) {
300  if (cont->match(cont, dev))
301  fn(cont, dev);
302  }
303  mutex_unlock(&attribute_container_mutex);
304 }
305 
314 int
316 {
317  struct attribute_container *cont =
319  struct device_attribute **attrs = cont->attrs;
320  int i, error;
321 
322  BUG_ON(attrs && cont->grp);
323 
324  if (!attrs && !cont->grp)
325  return 0;
326 
327  if (cont->grp)
328  return sysfs_create_group(&classdev->kobj, cont->grp);
329 
330  for (i = 0; attrs[i]; i++) {
331  sysfs_attr_init(&attrs[i]->attr);
332  error = device_create_file(classdev, attrs[i]);
333  if (error)
334  return error;
335  }
336 
337  return 0;
338 }
339 
349 int
351 {
352  int error = device_add(classdev);
353  if (error)
354  return error;
355  return attribute_container_add_attrs(classdev);
356 }
357 
364 int
366  struct device *dev,
367  struct device *classdev)
368 {
369  return attribute_container_add_class_device(classdev);
370 }
371 
378 void
380 {
381  struct attribute_container *cont =
383  struct device_attribute **attrs = cont->attrs;
384  int i;
385 
386  if (!attrs && !cont->grp)
387  return;
388 
389  if (cont->grp) {
390  sysfs_remove_group(&classdev->kobj, cont->grp);
391  return ;
392  }
393 
394  for (i = 0; attrs[i]; i++)
395  device_remove_file(classdev, attrs[i]);
396 }
397 
406 void
408 {
410  device_del(classdev);
411 }
412 
422 struct device *
424  struct device *dev)
425 {
426  struct device *cdev = NULL;
427  struct internal_container *ic;
428  struct klist_iter iter;
429 
430  klist_for_each_entry(ic, &cont->containers, node, &iter) {
431  if (ic->classdev.parent == dev) {
432  cdev = &ic->classdev;
433  /* FIXME: must exit iterator then break */
434  klist_iter_exit(&iter);
435  break;
436  }
437  }
438 
439  return cdev;
440 }