Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
char_dev.c
Go to the documentation of this file.
1 /*
2  * linux/fs/char_dev.c
3  *
4  * Copyright (C) 1991, 1992 Linus Torvalds
5  */
6 
7 #include <linux/init.h>
8 #include <linux/fs.h>
9 #include <linux/kdev_t.h>
10 #include <linux/slab.h>
11 #include <linux/string.h>
12 
13 #include <linux/major.h>
14 #include <linux/errno.h>
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 
18 #include <linux/kobject.h>
19 #include <linux/kobj_map.h>
20 #include <linux/cdev.h>
21 #include <linux/mutex.h>
22 #include <linux/backing-dev.h>
23 #include <linux/tty.h>
24 
25 #include "internal.h"
26 
27 /*
28  * capabilities for /dev/mem, /dev/kmem and similar directly mappable character
29  * devices
30  * - permits shared-mmap for read, write and/or exec
31  * - does not permit private mmap in NOMMU mode (can't do COW)
32  * - no readahead or I/O queue unplugging required
33  */
35  .name = "char",
36  .capabilities = (
37 #ifdef CONFIG_MMU
38  /* permit private copies of the data to be taken */
40 #endif
41  /* permit direct mmap, for read, write or exec */
44  /* no writeback happens */
46 };
47 
48 static struct kobj_map *cdev_map;
49 
50 static DEFINE_MUTEX(chrdevs_lock);
51 
52 static struct char_device_struct {
53  struct char_device_struct *next;
54  unsigned int major;
55  unsigned int baseminor;
56  int minorct;
57  char name[64];
58  struct cdev *cdev; /* will die */
59 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
60 
61 /* index in the above */
62 static inline int major_to_index(unsigned major)
63 {
64  return major % CHRDEV_MAJOR_HASH_SIZE;
65 }
66 
67 #ifdef CONFIG_PROC_FS
68 
69 void chrdev_show(struct seq_file *f, off_t offset)
70 {
71  struct char_device_struct *cd;
72 
73  if (offset < CHRDEV_MAJOR_HASH_SIZE) {
74  mutex_lock(&chrdevs_lock);
75  for (cd = chrdevs[offset]; cd; cd = cd->next)
76  seq_printf(f, "%3d %s\n", cd->major, cd->name);
77  mutex_unlock(&chrdevs_lock);
78  }
79 }
80 
81 #endif /* CONFIG_PROC_FS */
82 
83 /*
84  * Register a single major with a specified minor range.
85  *
86  * If major == 0 this functions will dynamically allocate a major and return
87  * its number.
88  *
89  * If major > 0 this function will attempt to reserve the passed range of
90  * minors and will return zero on success.
91  *
92  * Returns a -ve errno on failure.
93  */
94 static struct char_device_struct *
95 __register_chrdev_region(unsigned int major, unsigned int baseminor,
96  int minorct, const char *name)
97 {
98  struct char_device_struct *cd, **cp;
99  int ret = 0;
100  int i;
101 
102  cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
103  if (cd == NULL)
104  return ERR_PTR(-ENOMEM);
105 
106  mutex_lock(&chrdevs_lock);
107 
108  /* temporary */
109  if (major == 0) {
110  for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) {
111  if (chrdevs[i] == NULL)
112  break;
113  }
114 
115  if (i == 0) {
116  ret = -EBUSY;
117  goto out;
118  }
119  major = i;
120  ret = major;
121  }
122 
123  cd->major = major;
124  cd->baseminor = baseminor;
125  cd->minorct = minorct;
126  strlcpy(cd->name, name, sizeof(cd->name));
127 
128  i = major_to_index(major);
129 
130  for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
131  if ((*cp)->major > major ||
132  ((*cp)->major == major &&
133  (((*cp)->baseminor >= baseminor) ||
134  ((*cp)->baseminor + (*cp)->minorct > baseminor))))
135  break;
136 
137  /* Check for overlapping minor ranges. */
138  if (*cp && (*cp)->major == major) {
139  int old_min = (*cp)->baseminor;
140  int old_max = (*cp)->baseminor + (*cp)->minorct - 1;
141  int new_min = baseminor;
142  int new_max = baseminor + minorct - 1;
143 
144  /* New driver overlaps from the left. */
145  if (new_max >= old_min && new_max <= old_max) {
146  ret = -EBUSY;
147  goto out;
148  }
149 
150  /* New driver overlaps from the right. */
151  if (new_min <= old_max && new_min >= old_min) {
152  ret = -EBUSY;
153  goto out;
154  }
155  }
156 
157  cd->next = *cp;
158  *cp = cd;
159  mutex_unlock(&chrdevs_lock);
160  return cd;
161 out:
162  mutex_unlock(&chrdevs_lock);
163  kfree(cd);
164  return ERR_PTR(ret);
165 }
166 
167 static struct char_device_struct *
168 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
169 {
170  struct char_device_struct *cd = NULL, **cp;
171  int i = major_to_index(major);
172 
173  mutex_lock(&chrdevs_lock);
174  for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
175  if ((*cp)->major == major &&
176  (*cp)->baseminor == baseminor &&
177  (*cp)->minorct == minorct)
178  break;
179  if (*cp) {
180  cd = *cp;
181  *cp = cd->next;
182  }
183  mutex_unlock(&chrdevs_lock);
184  return cd;
185 }
186 
196 int register_chrdev_region(dev_t from, unsigned count, const char *name)
197 {
198  struct char_device_struct *cd;
199  dev_t to = from + count;
200  dev_t n, next;
201 
202  for (n = from; n < to; n = next) {
203  next = MKDEV(MAJOR(n)+1, 0);
204  if (next > to)
205  next = to;
206  cd = __register_chrdev_region(MAJOR(n), MINOR(n),
207  next - n, name);
208  if (IS_ERR(cd))
209  goto fail;
210  }
211  return 0;
212 fail:
213  to = n;
214  for (n = from; n < to; n = next) {
215  next = MKDEV(MAJOR(n)+1, 0);
216  kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
217  }
218  return PTR_ERR(cd);
219 }
220 
232 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
233  const char *name)
234 {
235  struct char_device_struct *cd;
236  cd = __register_chrdev_region(0, baseminor, count, name);
237  if (IS_ERR(cd))
238  return PTR_ERR(cd);
239  *dev = MKDEV(cd->major, cd->baseminor);
240  return 0;
241 }
242 
264 int __register_chrdev(unsigned int major, unsigned int baseminor,
265  unsigned int count, const char *name,
266  const struct file_operations *fops)
267 {
268  struct char_device_struct *cd;
269  struct cdev *cdev;
270  int err = -ENOMEM;
271 
272  cd = __register_chrdev_region(major, baseminor, count, name);
273  if (IS_ERR(cd))
274  return PTR_ERR(cd);
275 
276  cdev = cdev_alloc();
277  if (!cdev)
278  goto out2;
279 
280  cdev->owner = fops->owner;
281  cdev->ops = fops;
282  kobject_set_name(&cdev->kobj, "%s", name);
283 
284  err = cdev_add(cdev, MKDEV(cd->major, baseminor), count);
285  if (err)
286  goto out;
287 
288  cd->cdev = cdev;
289 
290  return major ? 0 : cd->major;
291 out:
292  kobject_put(&cdev->kobj);
293 out2:
294  kfree(__unregister_chrdev_region(cd->major, baseminor, count));
295  return err;
296 }
297 
308 {
309  dev_t to = from + count;
310  dev_t n, next;
311 
312  for (n = from; n < to; n = next) {
313  next = MKDEV(MAJOR(n)+1, 0);
314  if (next > to)
315  next = to;
316  kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
317  }
318 }
319 
331 void __unregister_chrdev(unsigned int major, unsigned int baseminor,
332  unsigned int count, const char *name)
333 {
334  struct char_device_struct *cd;
335 
336  cd = __unregister_chrdev_region(major, baseminor, count);
337  if (cd && cd->cdev)
338  cdev_del(cd->cdev);
339  kfree(cd);
340 }
341 
342 static DEFINE_SPINLOCK(cdev_lock);
343 
344 static struct kobject *cdev_get(struct cdev *p)
345 {
346  struct module *owner = p->owner;
347  struct kobject *kobj;
348 
349  if (owner && !try_module_get(owner))
350  return NULL;
351  kobj = kobject_get(&p->kobj);
352  if (!kobj)
353  module_put(owner);
354  return kobj;
355 }
356 
357 void cdev_put(struct cdev *p)
358 {
359  if (p) {
360  struct module *owner = p->owner;
361  kobject_put(&p->kobj);
362  module_put(owner);
363  }
364 }
365 
366 /*
367  * Called every time a character special file is opened
368  */
369 static int chrdev_open(struct inode *inode, struct file *filp)
370 {
371  struct cdev *p;
372  struct cdev *new = NULL;
373  int ret = 0;
374 
375  spin_lock(&cdev_lock);
376  p = inode->i_cdev;
377  if (!p) {
378  struct kobject *kobj;
379  int idx;
380  spin_unlock(&cdev_lock);
381  kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
382  if (!kobj)
383  return -ENXIO;
384  new = container_of(kobj, struct cdev, kobj);
385  spin_lock(&cdev_lock);
386  /* Check i_cdev again in case somebody beat us to it while
387  we dropped the lock. */
388  p = inode->i_cdev;
389  if (!p) {
390  inode->i_cdev = p = new;
391  list_add(&inode->i_devices, &p->list);
392  new = NULL;
393  } else if (!cdev_get(p))
394  ret = -ENXIO;
395  } else if (!cdev_get(p))
396  ret = -ENXIO;
397  spin_unlock(&cdev_lock);
398  cdev_put(new);
399  if (ret)
400  return ret;
401 
402  ret = -ENXIO;
403  filp->f_op = fops_get(p->ops);
404  if (!filp->f_op)
405  goto out_cdev_put;
406 
407  if (filp->f_op->open) {
408  ret = filp->f_op->open(inode, filp);
409  if (ret)
410  goto out_cdev_put;
411  }
412 
413  return 0;
414 
415  out_cdev_put:
416  cdev_put(p);
417  return ret;
418 }
419 
420 void cd_forget(struct inode *inode)
421 {
422  spin_lock(&cdev_lock);
423  list_del_init(&inode->i_devices);
424  inode->i_cdev = NULL;
425  spin_unlock(&cdev_lock);
426 }
427 
428 static void cdev_purge(struct cdev *cdev)
429 {
430  spin_lock(&cdev_lock);
431  while (!list_empty(&cdev->list)) {
432  struct inode *inode;
433  inode = container_of(cdev->list.next, struct inode, i_devices);
434  list_del_init(&inode->i_devices);
435  inode->i_cdev = NULL;
436  }
437  spin_unlock(&cdev_lock);
438 }
439 
440 /*
441  * Dummy default file-operations: the only thing this does
442  * is contain the open that then fills in the correct operations
443  * depending on the special file...
444  */
446  .open = chrdev_open,
447  .llseek = noop_llseek,
448 };
449 
450 static struct kobject *exact_match(dev_t dev, int *part, void *data)
451 {
452  struct cdev *p = data;
453  return &p->kobj;
454 }
455 
456 static int exact_lock(dev_t dev, void *data)
457 {
458  struct cdev *p = data;
459  return cdev_get(p) ? 0 : -1;
460 }
461 
472 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
473 {
474  int error;
475 
476  p->dev = dev;
477  p->count = count;
478 
479  error = kobj_map(cdev_map, dev, count, NULL,
480  exact_match, exact_lock, p);
481  if (error)
482  return error;
483 
484  kobject_get(p->kobj.parent);
485 
486  return 0;
487 }
488 
489 static void cdev_unmap(dev_t dev, unsigned count)
490 {
491  kobj_unmap(cdev_map, dev, count);
492 }
493 
501 void cdev_del(struct cdev *p)
502 {
503  cdev_unmap(p->dev, p->count);
504  kobject_put(&p->kobj);
505 }
506 
507 
508 static void cdev_default_release(struct kobject *kobj)
509 {
510  struct cdev *p = container_of(kobj, struct cdev, kobj);
511  struct kobject *parent = kobj->parent;
512 
513  cdev_purge(p);
514  kobject_put(parent);
515 }
516 
517 static void cdev_dynamic_release(struct kobject *kobj)
518 {
519  struct cdev *p = container_of(kobj, struct cdev, kobj);
520  struct kobject *parent = kobj->parent;
521 
522  cdev_purge(p);
523  kfree(p);
524  kobject_put(parent);
525 }
526 
527 static struct kobj_type ktype_cdev_default = {
528  .release = cdev_default_release,
529 };
530 
531 static struct kobj_type ktype_cdev_dynamic = {
532  .release = cdev_dynamic_release,
533 };
534 
540 struct cdev *cdev_alloc(void)
541 {
542  struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL);
543  if (p) {
544  INIT_LIST_HEAD(&p->list);
545  kobject_init(&p->kobj, &ktype_cdev_dynamic);
546  }
547  return p;
548 }
549 
558 void cdev_init(struct cdev *cdev, const struct file_operations *fops)
559 {
560  memset(cdev, 0, sizeof *cdev);
561  INIT_LIST_HEAD(&cdev->list);
562  kobject_init(&cdev->kobj, &ktype_cdev_default);
563  cdev->ops = fops;
564 }
565 
566 static struct kobject *base_probe(dev_t dev, int *part, void *data)
567 {
568  if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
569  /* Make old-style 2.4 aliases work */
570  request_module("char-major-%d", MAJOR(dev));
571  return NULL;
572 }
573 
574 void __init chrdev_init(void)
575 {
576  cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
577  bdi_init(&directly_mappable_cdev_bdi);
578 }
579 
580 
581 /* Let modules do char dev stuff */
591 EXPORT_SYMBOL(directly_mappable_cdev_bdi);