Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mem.c
Go to the documentation of this file.
1 /*
2  * linux/drivers/char/mem.c
3  *
4  * Copyright (C) 1991, 1992 Linus Torvalds
5  *
6  * Added devfs support.
7  * Jan-11-1998, C. Scott Ananian <[email protected]>
8  * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <[email protected]>
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/crash_dump.h>
25 #include <linux/backing-dev.h>
26 #include <linux/bootmem.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29 #include <linux/export.h>
30 #include <linux/io.h>
31 
32 #include <asm/uaccess.h>
33 
34 #ifdef CONFIG_IA64
35 # include <linux/efi.h>
36 #endif
37 
38 #define DEVPORT_MINOR 4
39 
40 static inline unsigned long size_inside_page(unsigned long start,
41  unsigned long size)
42 {
43  unsigned long sz;
44 
45  sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
46 
47  return min(sz, size);
48 }
49 
50 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
51 static inline int valid_phys_addr_range(unsigned long addr, size_t count)
52 {
53  return addr + count <= __pa(high_memory);
54 }
55 
56 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
57 {
58  return 1;
59 }
60 #endif
61 
62 #ifdef CONFIG_STRICT_DEVMEM
63 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
64 {
65  u64 from = ((u64)pfn) << PAGE_SHIFT;
66  u64 to = from + size;
67  u64 cursor = from;
68 
69  while (cursor < to) {
70  if (!devmem_is_allowed(pfn)) {
72  "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
73  current->comm, from, to);
74  return 0;
75  }
76  cursor += PAGE_SIZE;
77  pfn++;
78  }
79  return 1;
80 }
81 #else
82 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
83 {
84  return 1;
85 }
86 #endif
87 
88 void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
89 {
90 }
91 
92 /*
93  * This funcion reads the *physical* memory. The f_pos points directly to the
94  * memory location.
95  */
96 static ssize_t read_mem(struct file *file, char __user *buf,
97  size_t count, loff_t *ppos)
98 {
99  unsigned long p = *ppos;
100  ssize_t read, sz;
101  char *ptr;
102 
103  if (!valid_phys_addr_range(p, count))
104  return -EFAULT;
105  read = 0;
106 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
107  /* we don't have page 0 mapped on sparc and m68k.. */
108  if (p < PAGE_SIZE) {
109  sz = size_inside_page(p, count);
110  if (sz > 0) {
111  if (clear_user(buf, sz))
112  return -EFAULT;
113  buf += sz;
114  p += sz;
115  count -= sz;
116  read += sz;
117  }
118  }
119 #endif
120 
121  while (count > 0) {
122  unsigned long remaining;
123 
124  sz = size_inside_page(p, count);
125 
126  if (!range_is_allowed(p >> PAGE_SHIFT, count))
127  return -EPERM;
128 
129  /*
130  * On ia64 if a page has been mapped somewhere as uncached, then
131  * it must also be accessed uncached by the kernel or data
132  * corruption may occur.
133  */
134  ptr = xlate_dev_mem_ptr(p);
135  if (!ptr)
136  return -EFAULT;
137 
138  remaining = copy_to_user(buf, ptr, sz);
139  unxlate_dev_mem_ptr(p, ptr);
140  if (remaining)
141  return -EFAULT;
142 
143  buf += sz;
144  p += sz;
145  count -= sz;
146  read += sz;
147  }
148 
149  *ppos += read;
150  return read;
151 }
152 
153 static ssize_t write_mem(struct file *file, const char __user *buf,
154  size_t count, loff_t *ppos)
155 {
156  unsigned long p = *ppos;
157  ssize_t written, sz;
158  unsigned long copied;
159  void *ptr;
160 
161  if (!valid_phys_addr_range(p, count))
162  return -EFAULT;
163 
164  written = 0;
165 
166 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
167  /* we don't have page 0 mapped on sparc and m68k.. */
168  if (p < PAGE_SIZE) {
169  sz = size_inside_page(p, count);
170  /* Hmm. Do something? */
171  buf += sz;
172  p += sz;
173  count -= sz;
174  written += sz;
175  }
176 #endif
177 
178  while (count > 0) {
179  sz = size_inside_page(p, count);
180 
181  if (!range_is_allowed(p >> PAGE_SHIFT, sz))
182  return -EPERM;
183 
184  /*
185  * On ia64 if a page has been mapped somewhere as uncached, then
186  * it must also be accessed uncached by the kernel or data
187  * corruption may occur.
188  */
189  ptr = xlate_dev_mem_ptr(p);
190  if (!ptr) {
191  if (written)
192  break;
193  return -EFAULT;
194  }
195 
196  copied = copy_from_user(ptr, buf, sz);
197  unxlate_dev_mem_ptr(p, ptr);
198  if (copied) {
199  written += sz - copied;
200  if (written)
201  break;
202  return -EFAULT;
203  }
204 
205  buf += sz;
206  p += sz;
207  count -= sz;
208  written += sz;
209  }
210 
211  *ppos += written;
212  return written;
213 }
214 
215 int __weak phys_mem_access_prot_allowed(struct file *file,
216  unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
217 {
218  return 1;
219 }
220 
221 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
222 
223 /*
224  * Architectures vary in how they handle caching for addresses
225  * outside of main memory.
226  *
227  */
228 #ifdef pgprot_noncached
229 static int uncached_access(struct file *file, unsigned long addr)
230 {
231 #if defined(CONFIG_IA64)
232  /*
233  * On ia64, we ignore O_DSYNC because we cannot tolerate memory
234  * attribute aliases.
235  */
236  return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
237 #elif defined(CONFIG_MIPS)
238  {
239  extern int __uncached_access(struct file *file,
240  unsigned long addr);
241 
242  return __uncached_access(file, addr);
243  }
244 #else
245  /*
246  * Accessing memory above the top the kernel knows about or through a
247  * file pointer
248  * that was marked O_DSYNC will be done non-cached.
249  */
250  if (file->f_flags & O_DSYNC)
251  return 1;
252  return addr >= __pa(high_memory);
253 #endif
254 }
255 #endif
256 
257 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
258  unsigned long size, pgprot_t vma_prot)
259 {
260 #ifdef pgprot_noncached
261  unsigned long offset = pfn << PAGE_SHIFT;
262 
263  if (uncached_access(file, offset))
264  return pgprot_noncached(vma_prot);
265 #endif
266  return vma_prot;
267 }
268 #endif
269 
270 #ifndef CONFIG_MMU
271 static unsigned long get_unmapped_area_mem(struct file *file,
272  unsigned long addr,
273  unsigned long len,
274  unsigned long pgoff,
275  unsigned long flags)
276 {
277  if (!valid_mmap_phys_addr_range(pgoff, len))
278  return (unsigned long) -EINVAL;
279  return pgoff << PAGE_SHIFT;
280 }
281 
282 /* can't do an in-place private mapping if there's no MMU */
283 static inline int private_mapping_ok(struct vm_area_struct *vma)
284 {
285  return vma->vm_flags & VM_MAYSHARE;
286 }
287 #else
288 #define get_unmapped_area_mem NULL
289 
290 static inline int private_mapping_ok(struct vm_area_struct *vma)
291 {
292  return 1;
293 }
294 #endif
295 
296 static const struct vm_operations_struct mmap_mem_ops = {
297 #ifdef CONFIG_HAVE_IOREMAP_PROT
298  .access = generic_access_phys
299 #endif
300 };
301 
302 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
303 {
304  size_t size = vma->vm_end - vma->vm_start;
305 
306  if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
307  return -EINVAL;
308 
309  if (!private_mapping_ok(vma))
310  return -ENOSYS;
311 
312  if (!range_is_allowed(vma->vm_pgoff, size))
313  return -EPERM;
314 
315  if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
316  &vma->vm_page_prot))
317  return -EINVAL;
318 
319  vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
320  size,
321  vma->vm_page_prot);
322 
323  vma->vm_ops = &mmap_mem_ops;
324 
325  /* Remap-pfn-range will mark the range VM_IO */
326  if (remap_pfn_range(vma,
327  vma->vm_start,
328  vma->vm_pgoff,
329  size,
330  vma->vm_page_prot)) {
331  return -EAGAIN;
332  }
333  return 0;
334 }
335 
336 #ifdef CONFIG_DEVKMEM
337 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
338 {
339  unsigned long pfn;
340 
341  /* Turn a kernel-virtual address into a physical page frame */
342  pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
343 
344  /*
345  * RED-PEN: on some architectures there is more mapped memory than
346  * available in mem_map which pfn_valid checks for. Perhaps should add a
347  * new macro here.
348  *
349  * RED-PEN: vmalloc is not supported right now.
350  */
351  if (!pfn_valid(pfn))
352  return -EIO;
353 
354  vma->vm_pgoff = pfn;
355  return mmap_mem(file, vma);
356 }
357 #endif
358 
359 #ifdef CONFIG_CRASH_DUMP
360 /*
361  * Read memory corresponding to the old kernel.
362  */
363 static ssize_t read_oldmem(struct file *file, char __user *buf,
364  size_t count, loff_t *ppos)
365 {
366  unsigned long pfn, offset;
367  size_t read = 0, csize;
368  int rc = 0;
369 
370  while (count) {
371  pfn = *ppos / PAGE_SIZE;
372  if (pfn > saved_max_pfn)
373  return read;
374 
375  offset = (unsigned long)(*ppos % PAGE_SIZE);
376  if (count > PAGE_SIZE - offset)
377  csize = PAGE_SIZE - offset;
378  else
379  csize = count;
380 
381  rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
382  if (rc < 0)
383  return rc;
384  buf += csize;
385  *ppos += csize;
386  read += csize;
387  count -= csize;
388  }
389  return read;
390 }
391 #endif
392 
393 #ifdef CONFIG_DEVKMEM
394 /*
395  * This function reads the *virtual* memory as seen by the kernel.
396  */
397 static ssize_t read_kmem(struct file *file, char __user *buf,
398  size_t count, loff_t *ppos)
399 {
400  unsigned long p = *ppos;
401  ssize_t low_count, read, sz;
402  char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
403  int err = 0;
404 
405  read = 0;
406  if (p < (unsigned long) high_memory) {
407  low_count = count;
408  if (count > (unsigned long)high_memory - p)
409  low_count = (unsigned long)high_memory - p;
410 
411 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
412  /* we don't have page 0 mapped on sparc and m68k.. */
413  if (p < PAGE_SIZE && low_count > 0) {
414  sz = size_inside_page(p, low_count);
415  if (clear_user(buf, sz))
416  return -EFAULT;
417  buf += sz;
418  p += sz;
419  read += sz;
420  low_count -= sz;
421  count -= sz;
422  }
423 #endif
424  while (low_count > 0) {
425  sz = size_inside_page(p, low_count);
426 
427  /*
428  * On ia64 if a page has been mapped somewhere as
429  * uncached, then it must also be accessed uncached
430  * by the kernel or data corruption may occur
431  */
432  kbuf = xlate_dev_kmem_ptr((char *)p);
433 
434  if (copy_to_user(buf, kbuf, sz))
435  return -EFAULT;
436  buf += sz;
437  p += sz;
438  read += sz;
439  low_count -= sz;
440  count -= sz;
441  }
442  }
443 
444  if (count > 0) {
445  kbuf = (char *)__get_free_page(GFP_KERNEL);
446  if (!kbuf)
447  return -ENOMEM;
448  while (count > 0) {
449  sz = size_inside_page(p, count);
450  if (!is_vmalloc_or_module_addr((void *)p)) {
451  err = -ENXIO;
452  break;
453  }
454  sz = vread(kbuf, (char *)p, sz);
455  if (!sz)
456  break;
457  if (copy_to_user(buf, kbuf, sz)) {
458  err = -EFAULT;
459  break;
460  }
461  count -= sz;
462  buf += sz;
463  read += sz;
464  p += sz;
465  }
466  free_page((unsigned long)kbuf);
467  }
468  *ppos = p;
469  return read ? read : err;
470 }
471 
472 
473 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
474  size_t count, loff_t *ppos)
475 {
476  ssize_t written, sz;
477  unsigned long copied;
478 
479  written = 0;
480 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
481  /* we don't have page 0 mapped on sparc and m68k.. */
482  if (p < PAGE_SIZE) {
483  sz = size_inside_page(p, count);
484  /* Hmm. Do something? */
485  buf += sz;
486  p += sz;
487  count -= sz;
488  written += sz;
489  }
490 #endif
491 
492  while (count > 0) {
493  char *ptr;
494 
495  sz = size_inside_page(p, count);
496 
497  /*
498  * On ia64 if a page has been mapped somewhere as uncached, then
499  * it must also be accessed uncached by the kernel or data
500  * corruption may occur.
501  */
502  ptr = xlate_dev_kmem_ptr((char *)p);
503 
504  copied = copy_from_user(ptr, buf, sz);
505  if (copied) {
506  written += sz - copied;
507  if (written)
508  break;
509  return -EFAULT;
510  }
511  buf += sz;
512  p += sz;
513  count -= sz;
514  written += sz;
515  }
516 
517  *ppos += written;
518  return written;
519 }
520 
521 /*
522  * This function writes to the *virtual* memory as seen by the kernel.
523  */
524 static ssize_t write_kmem(struct file *file, const char __user *buf,
525  size_t count, loff_t *ppos)
526 {
527  unsigned long p = *ppos;
528  ssize_t wrote = 0;
529  ssize_t virtr = 0;
530  char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
531  int err = 0;
532 
533  if (p < (unsigned long) high_memory) {
534  unsigned long to_write = min_t(unsigned long, count,
535  (unsigned long)high_memory - p);
536  wrote = do_write_kmem(p, buf, to_write, ppos);
537  if (wrote != to_write)
538  return wrote;
539  p += wrote;
540  buf += wrote;
541  count -= wrote;
542  }
543 
544  if (count > 0) {
545  kbuf = (char *)__get_free_page(GFP_KERNEL);
546  if (!kbuf)
547  return wrote ? wrote : -ENOMEM;
548  while (count > 0) {
549  unsigned long sz = size_inside_page(p, count);
550  unsigned long n;
551 
552  if (!is_vmalloc_or_module_addr((void *)p)) {
553  err = -ENXIO;
554  break;
555  }
556  n = copy_from_user(kbuf, buf, sz);
557  if (n) {
558  err = -EFAULT;
559  break;
560  }
561  vwrite(kbuf, (char *)p, sz);
562  count -= sz;
563  buf += sz;
564  virtr += sz;
565  p += sz;
566  }
567  free_page((unsigned long)kbuf);
568  }
569 
570  *ppos = p;
571  return virtr + wrote ? : err;
572 }
573 #endif
574 
575 #ifdef CONFIG_DEVPORT
576 static ssize_t read_port(struct file *file, char __user *buf,
577  size_t count, loff_t *ppos)
578 {
579  unsigned long i = *ppos;
580  char __user *tmp = buf;
581 
582  if (!access_ok(VERIFY_WRITE, buf, count))
583  return -EFAULT;
584  while (count-- > 0 && i < 65536) {
585  if (__put_user(inb(i), tmp) < 0)
586  return -EFAULT;
587  i++;
588  tmp++;
589  }
590  *ppos = i;
591  return tmp-buf;
592 }
593 
594 static ssize_t write_port(struct file *file, const char __user *buf,
595  size_t count, loff_t *ppos)
596 {
597  unsigned long i = *ppos;
598  const char __user * tmp = buf;
599 
600  if (!access_ok(VERIFY_READ, buf, count))
601  return -EFAULT;
602  while (count-- > 0 && i < 65536) {
603  char c;
604  if (__get_user(c, tmp)) {
605  if (tmp > buf)
606  break;
607  return -EFAULT;
608  }
609  outb(c, i);
610  i++;
611  tmp++;
612  }
613  *ppos = i;
614  return tmp-buf;
615 }
616 #endif
617 
618 static ssize_t read_null(struct file *file, char __user *buf,
619  size_t count, loff_t *ppos)
620 {
621  return 0;
622 }
623 
624 static ssize_t write_null(struct file *file, const char __user *buf,
625  size_t count, loff_t *ppos)
626 {
627  return count;
628 }
629 
630 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
631  struct splice_desc *sd)
632 {
633  return sd->len;
634 }
635 
636 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
637  loff_t *ppos, size_t len, unsigned int flags)
638 {
639  return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
640 }
641 
642 static ssize_t read_zero(struct file *file, char __user *buf,
643  size_t count, loff_t *ppos)
644 {
645  size_t written;
646 
647  if (!count)
648  return 0;
649 
650  if (!access_ok(VERIFY_WRITE, buf, count))
651  return -EFAULT;
652 
653  written = 0;
654  while (count) {
655  unsigned long unwritten;
656  size_t chunk = count;
657 
658  if (chunk > PAGE_SIZE)
659  chunk = PAGE_SIZE; /* Just for latency reasons */
660  unwritten = __clear_user(buf, chunk);
661  written += chunk - unwritten;
662  if (unwritten)
663  break;
664  if (signal_pending(current))
665  return written ? written : -ERESTARTSYS;
666  buf += chunk;
667  count -= chunk;
668  cond_resched();
669  }
670  return written ? written : -EFAULT;
671 }
672 
673 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
674 {
675 #ifndef CONFIG_MMU
676  return -ENOSYS;
677 #endif
678  if (vma->vm_flags & VM_SHARED)
679  return shmem_zero_setup(vma);
680  return 0;
681 }
682 
683 static ssize_t write_full(struct file *file, const char __user *buf,
684  size_t count, loff_t *ppos)
685 {
686  return -ENOSPC;
687 }
688 
689 /*
690  * Special lseek() function for /dev/null and /dev/zero. Most notably, you
691  * can fopen() both devices with "a" now. This was previously impossible.
692  * -- SRB.
693  */
694 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
695 {
696  return file->f_pos = 0;
697 }
698 
699 /*
700  * The memory devices use the full 32/64 bits of the offset, and so we cannot
701  * check against negative addresses: they are ok. The return value is weird,
702  * though, in that case (0).
703  *
704  * also note that seeking relative to the "end of file" isn't supported:
705  * it has no meaning, so it returns -EINVAL.
706  */
707 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
708 {
709  loff_t ret;
710 
711  mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
712  switch (orig) {
713  case SEEK_CUR:
714  offset += file->f_pos;
715  case SEEK_SET:
716  /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
717  if ((unsigned long long)offset >= ~0xFFFULL) {
718  ret = -EOVERFLOW;
719  break;
720  }
721  file->f_pos = offset;
722  ret = file->f_pos;
724  break;
725  default:
726  ret = -EINVAL;
727  }
728  mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
729  return ret;
730 }
731 
732 static int open_port(struct inode * inode, struct file * filp)
733 {
734  return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
735 }
736 
737 #define zero_lseek null_lseek
738 #define full_lseek null_lseek
739 #define write_zero write_null
740 #define read_full read_zero
741 #define open_mem open_port
742 #define open_kmem open_mem
743 #define open_oldmem open_mem
744 
745 static const struct file_operations mem_fops = {
746  .llseek = memory_lseek,
747  .read = read_mem,
748  .write = write_mem,
749  .mmap = mmap_mem,
750  .open = open_mem,
751  .get_unmapped_area = get_unmapped_area_mem,
752 };
753 
754 #ifdef CONFIG_DEVKMEM
755 static const struct file_operations kmem_fops = {
756  .llseek = memory_lseek,
757  .read = read_kmem,
758  .write = write_kmem,
759  .mmap = mmap_kmem,
760  .open = open_kmem,
761  .get_unmapped_area = get_unmapped_area_mem,
762 };
763 #endif
764 
765 static const struct file_operations null_fops = {
766  .llseek = null_lseek,
767  .read = read_null,
768  .write = write_null,
769  .splice_write = splice_write_null,
770 };
771 
772 #ifdef CONFIG_DEVPORT
773 static const struct file_operations port_fops = {
774  .llseek = memory_lseek,
775  .read = read_port,
776  .write = write_port,
777  .open = open_port,
778 };
779 #endif
780 
781 static const struct file_operations zero_fops = {
782  .llseek = zero_lseek,
783  .read = read_zero,
784  .write = write_zero,
785  .mmap = mmap_zero,
786 };
787 
788 /*
789  * capabilities for /dev/zero
790  * - permits private mappings, "copies" are taken of the source of zeros
791  * - no writeback happens
792  */
793 static struct backing_dev_info zero_bdi = {
794  .name = "char/mem",
796 };
797 
798 static const struct file_operations full_fops = {
799  .llseek = full_lseek,
800  .read = read_full,
801  .write = write_full,
802 };
803 
804 #ifdef CONFIG_CRASH_DUMP
805 static const struct file_operations oldmem_fops = {
806  .read = read_oldmem,
807  .open = open_oldmem,
808  .llseek = default_llseek,
809 };
810 #endif
811 
812 static const struct memdev {
813  const char *name;
814  umode_t mode;
815  const struct file_operations *fops;
816  struct backing_dev_info *dev_info;
817 } devlist[] = {
818  [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
819 #ifdef CONFIG_DEVKMEM
820  [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
821 #endif
822  [3] = { "null", 0666, &null_fops, NULL },
823 #ifdef CONFIG_DEVPORT
824  [4] = { "port", 0, &port_fops, NULL },
825 #endif
826  [5] = { "zero", 0666, &zero_fops, &zero_bdi },
827  [7] = { "full", 0666, &full_fops, NULL },
828  [8] = { "random", 0666, &random_fops, NULL },
829  [9] = { "urandom", 0666, &urandom_fops, NULL },
830 #ifdef CONFIG_PRINTK
831  [11] = { "kmsg", 0644, &kmsg_fops, NULL },
832 #endif
833 #ifdef CONFIG_CRASH_DUMP
834  [12] = { "oldmem", 0, &oldmem_fops, NULL },
835 #endif
836 };
837 
838 static int memory_open(struct inode *inode, struct file *filp)
839 {
840  int minor;
841  const struct memdev *dev;
842 
843  minor = iminor(inode);
844  if (minor >= ARRAY_SIZE(devlist))
845  return -ENXIO;
846 
847  dev = &devlist[minor];
848  if (!dev->fops)
849  return -ENXIO;
850 
851  filp->f_op = dev->fops;
852  if (dev->dev_info)
853  filp->f_mapping->backing_dev_info = dev->dev_info;
854 
855  /* Is /dev/mem or /dev/kmem ? */
856  if (dev->dev_info == &directly_mappable_cdev_bdi)
857  filp->f_mode |= FMODE_UNSIGNED_OFFSET;
858 
859  if (dev->fops->open)
860  return dev->fops->open(inode, filp);
861 
862  return 0;
863 }
864 
865 static const struct file_operations memory_fops = {
866  .open = memory_open,
867  .llseek = noop_llseek,
868 };
869 
870 static char *mem_devnode(struct device *dev, umode_t *mode)
871 {
872  if (mode && devlist[MINOR(dev->devt)].mode)
873  *mode = devlist[MINOR(dev->devt)].mode;
874  return NULL;
875 }
876 
877 static struct class *mem_class;
878 
879 static int __init chr_dev_init(void)
880 {
881  int minor;
882  int err;
883 
884  err = bdi_init(&zero_bdi);
885  if (err)
886  return err;
887 
888  if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
889  printk("unable to get major %d for memory devs\n", MEM_MAJOR);
890 
891  mem_class = class_create(THIS_MODULE, "mem");
892  if (IS_ERR(mem_class))
893  return PTR_ERR(mem_class);
894 
895  mem_class->devnode = mem_devnode;
896  for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
897  if (!devlist[minor].name)
898  continue;
899 
900  /*
901  * Create /dev/port?
902  */
903  if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
904  continue;
905 
906  device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
907  NULL, devlist[minor].name);
908  }
909 
910  return tty_init();
911 }
912 
913 fs_initcall(chr_dev_init);