Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ioremap.c
Go to the documentation of this file.
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License. See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  * (C) Copyright 2001, 2002 Ralf Baechle
8  */
9 #include <linux/module.h>
10 #include <asm/addrspace.h>
11 #include <asm/byteorder.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <asm/cacheflush.h>
16 #include <asm/io.h>
17 #include <asm/tlbflush.h>
18 
19 static inline void remap_area_pte(pte_t * pte, unsigned long address,
20  phys_t size, phys_t phys_addr, unsigned long flags)
21 {
22  phys_t end;
23  unsigned long pfn;
25  | __WRITEABLE | flags);
26 
27  address &= ~PMD_MASK;
28  end = address + size;
29  if (end > PMD_SIZE)
30  end = PMD_SIZE;
31  BUG_ON(address >= end);
32  pfn = phys_addr >> PAGE_SHIFT;
33  do {
34  if (!pte_none(*pte)) {
35  printk("remap_area_pte: page already exists\n");
36  BUG();
37  }
38  set_pte(pte, pfn_pte(pfn, pgprot));
39  address += PAGE_SIZE;
40  pfn++;
41  pte++;
42  } while (address && (address < end));
43 }
44 
45 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
46  phys_t size, phys_t phys_addr, unsigned long flags)
47 {
48  phys_t end;
49 
50  address &= ~PGDIR_MASK;
51  end = address + size;
52  if (end > PGDIR_SIZE)
53  end = PGDIR_SIZE;
54  phys_addr -= address;
55  BUG_ON(address >= end);
56  do {
57  pte_t * pte = pte_alloc_kernel(pmd, address);
58  if (!pte)
59  return -ENOMEM;
60  remap_area_pte(pte, address, end - address, address + phys_addr, flags);
61  address = (address + PMD_SIZE) & PMD_MASK;
62  pmd++;
63  } while (address && (address < end));
64  return 0;
65 }
66 
67 static int remap_area_pages(unsigned long address, phys_t phys_addr,
68  phys_t size, unsigned long flags)
69 {
70  int error;
71  pgd_t * dir;
72  unsigned long end = address + size;
73 
74  phys_addr -= address;
75  dir = pgd_offset(&init_mm, address);
77  BUG_ON(address >= end);
78  do {
79  pud_t *pud;
80  pmd_t *pmd;
81 
82  error = -ENOMEM;
83  pud = pud_alloc(&init_mm, dir, address);
84  if (!pud)
85  break;
86  pmd = pmd_alloc(&init_mm, pud, address);
87  if (!pmd)
88  break;
89  if (remap_area_pmd(pmd, address, end - address,
90  phys_addr + address, flags))
91  break;
92  error = 0;
93  address = (address + PGDIR_SIZE) & PGDIR_MASK;
94  dir++;
95  } while (address && (address < end));
96  flush_tlb_all();
97  return error;
98 }
99 
100 /*
101  * Generic mapping function (not visible outside):
102  */
103 
104 /*
105  * Remap an arbitrary physical address space into the kernel virtual
106  * address space. Needed when the kernel wants to access high addresses
107  * directly.
108  *
109  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
110  * have to convert them into an offset in a page-aligned mapping, but the
111  * caller shouldn't need to know that small detail.
112  */
113 
114 #define IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL))
115 
116 void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
117 {
118  struct vm_struct * area;
119  unsigned long offset;
120  phys_t last_addr;
121  void * addr;
122 
123  phys_addr = fixup_bigphys_addr(phys_addr, size);
124 
125  /* Don't allow wraparound or zero size */
126  last_addr = phys_addr + size - 1;
127  if (!size || last_addr < phys_addr)
128  return NULL;
129 
130  /*
131  * Map uncached objects in the low 512mb of address space using KSEG1,
132  * otherwise map using page tables.
133  */
134  if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
135  flags == _CACHE_UNCACHED)
136  return (void __iomem *) CKSEG1ADDR(phys_addr);
137 
138  /*
139  * Don't allow anybody to remap normal RAM that we're using..
140  */
141  if (phys_addr < virt_to_phys(high_memory)) {
142  char *t_addr, *t_end;
143  struct page *page;
144 
145  t_addr = __va(phys_addr);
146  t_end = t_addr + (size - 1);
147 
148  for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
149  if(!PageReserved(page))
150  return NULL;
151  }
152 
153  /*
154  * Mappings have to be page-aligned
155  */
156  offset = phys_addr & ~PAGE_MASK;
157  phys_addr &= PAGE_MASK;
158  size = PAGE_ALIGN(last_addr + 1) - phys_addr;
159 
160  /*
161  * Ok, go for it..
162  */
163  area = get_vm_area(size, VM_IOREMAP);
164  if (!area)
165  return NULL;
166  addr = area->addr;
167  if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
168  vunmap(addr);
169  return NULL;
170  }
171 
172  return (void __iomem *) (offset + (char *)addr);
173 }
174 
175 #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
176 
177 void __iounmap(const volatile void __iomem *addr)
178 {
179  struct vm_struct *p;
180 
181  if (IS_KSEG1(addr))
182  return;
183 
184  p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
185  if (!p)
186  printk(KERN_ERR "iounmap: bad address %p\n", addr);
187 
188  kfree(p);
189 }
190