Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ioremap.c
Go to the documentation of this file.
1 /*
2  * linux/arch/unicore32/mm/ioremap.c
3  *
4  * Code specific to PKUnity SoC and UniCore ISA
5  *
6  * Copyright (C) 2001-2010 GUAN Xue-tao
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  *
13  * Re-map IO memory to kernel address space so that we can access it.
14  *
15  * This allows a driver to remap an arbitrary region of bus memory into
16  * virtual space. One should *only* use readl, writel, memcpy_toio and
17  * so on with such remapped areas.
18  *
19  * Because UniCore only has a 32-bit address space we can't address the
20  * whole of the (physical) PCI space at once. PCI huge-mode addressing
21  * allows us to circumvent this restriction by splitting PCI space into
22  * two 2GB chunks and mapping only one at a time into processor memory.
23  * We use MMU protection domains to trap any attempt to access the bank
24  * that is not currently mapped. (This isn't fully implemented yet.)
25  */
26 #include <linux/module.h>
27 #include <linux/errno.h>
28 #include <linux/mm.h>
29 #include <linux/vmalloc.h>
30 #include <linux/io.h>
31 
32 #include <asm/cputype.h>
33 #include <asm/cacheflush.h>
34 #include <asm/mmu_context.h>
35 #include <asm/pgalloc.h>
36 #include <asm/tlbflush.h>
37 #include <asm/sizes.h>
38 
39 #include <mach/map.h>
40 #include "mm.h"
41 
42 /*
43  * Used by ioremap() and iounmap() code to mark (super)section-mapped
44  * I/O regions in vm_struct->flags field.
45  */
46 #define VM_UNICORE_SECTION_MAPPING 0x80000000
47 
48 int ioremap_page(unsigned long virt, unsigned long phys,
49  const struct mem_type *mtype)
50 {
51  return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
52  __pgprot(mtype->prot_pte));
53 }
55 
56 /*
57  * Section support is unsafe on SMP - If you iounmap and ioremap a region,
58  * the other CPUs will not see this change until their next context switch.
59  * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
60  * which requires the new ioremap'd region to be referenced, the CPU will
61  * reference the _old_ region.
62  *
63  * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
64  * mask the size back to 4MB aligned or we will overflow in the loop below.
65  */
66 static void unmap_area_sections(unsigned long virt, unsigned long size)
67 {
68  unsigned long addr = virt, end = virt + (size & ~(SZ_4M - 1));
69  pgd_t *pgd;
70 
72  pgd = pgd_offset_k(addr);
73  do {
74  pmd_t pmd, *pmdp = pmd_offset((pud_t *)pgd, addr);
75 
76  pmd = *pmdp;
77  if (!pmd_none(pmd)) {
78  /*
79  * Clear the PMD from the page table, and
80  * increment the kvm sequence so others
81  * notice this change.
82  *
83  * Note: this is still racy on SMP machines.
84  */
85  pmd_clear(pmdp);
86 
87  /*
88  * Free the page table, if there was one.
89  */
90  if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
92  }
93 
94  addr += PGDIR_SIZE;
95  pgd++;
96  } while (addr < end);
97 
99 }
100 
101 static int
102 remap_area_sections(unsigned long virt, unsigned long pfn,
103  size_t size, const struct mem_type *type)
104 {
105  unsigned long addr = virt, end = virt + size;
106  pgd_t *pgd;
107 
108  /*
109  * Remove and free any PTE-based mapping, and
110  * sync the current kernel mapping.
111  */
112  unmap_area_sections(virt, size);
113 
114  pgd = pgd_offset_k(addr);
115  do {
116  pmd_t *pmd = pmd_offset((pud_t *)pgd, addr);
117 
118  set_pmd(pmd, __pmd(__pfn_to_phys(pfn) | type->prot_sect));
119  pfn += SZ_4M >> PAGE_SHIFT;
120  flush_pmd_entry(pmd);
121 
122  addr += PGDIR_SIZE;
123  pgd++;
124  } while (addr < end);
125 
126  return 0;
127 }
128 
129 void __iomem *__uc32_ioremap_pfn_caller(unsigned long pfn,
130  unsigned long offset, size_t size, unsigned int mtype, void *caller)
131 {
132  const struct mem_type *type;
133  int err;
134  unsigned long addr;
135  struct vm_struct *area;
136 
137  /*
138  * High mappings must be section aligned
139  */
140  if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SECTION_MASK))
141  return NULL;
142 
143  /*
144  * Don't allow RAM to be mapped
145  */
146  if (pfn_valid(pfn)) {
147  printk(KERN_WARNING "BUG: Your driver calls ioremap() on\n"
148  "system memory. This leads to architecturally\n"
149  "unpredictable behaviour, and ioremap() will fail in\n"
150  "the next kernel release. Please fix your driver.\n");
151  WARN_ON(1);
152  }
153 
154  type = get_mem_type(mtype);
155  if (!type)
156  return NULL;
157 
158  /*
159  * Page align the mapping size, taking account of any offset.
160  */
161  size = PAGE_ALIGN(offset + size);
162 
163  area = get_vm_area_caller(size, VM_IOREMAP, caller);
164  if (!area)
165  return NULL;
166  addr = (unsigned long)area->addr;
167 
168  if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
170  err = remap_area_sections(addr, pfn, size, type);
171  } else
172  err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
173  __pgprot(type->prot_pte));
174 
175  if (err) {
176  vunmap((void *)addr);
177  return NULL;
178  }
179 
180  flush_cache_vmap(addr, addr + size);
181  return (void __iomem *) (offset + addr);
182 }
183 
184 void __iomem *__uc32_ioremap_caller(unsigned long phys_addr, size_t size,
185  unsigned int mtype, void *caller)
186 {
187  unsigned long last_addr;
188  unsigned long offset = phys_addr & ~PAGE_MASK;
189  unsigned long pfn = __phys_to_pfn(phys_addr);
190 
191  /*
192  * Don't allow wraparound or zero size
193  */
194  last_addr = phys_addr + size - 1;
195  if (!size || last_addr < phys_addr)
196  return NULL;
197 
198  return __uc32_ioremap_pfn_caller(pfn, offset, size, mtype, caller);
199 }
200 
201 /*
202  * Remap an arbitrary physical address space into the kernel virtual
203  * address space. Needed when the kernel wants to access high addresses
204  * directly.
205  *
206  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
207  * have to convert them into an offset in a page-aligned mapping, but the
208  * caller shouldn't need to know that small detail.
209  */
210 void __iomem *
211 __uc32_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
212  unsigned int mtype)
213 {
214  return __uc32_ioremap_pfn_caller(pfn, offset, size, mtype,
215  __builtin_return_address(0));
216 }
218 
219 void __iomem *
220 __uc32_ioremap(unsigned long phys_addr, size_t size)
221 {
222  return __uc32_ioremap_caller(phys_addr, size, MT_DEVICE,
223  __builtin_return_address(0));
224 }
226 
227 void __iomem *
228 __uc32_ioremap_cached(unsigned long phys_addr, size_t size)
229 {
230  return __uc32_ioremap_caller(phys_addr, size, MT_DEVICE_CACHED,
231  __builtin_return_address(0));
232 }
234 
235 void __uc32_iounmap(volatile void __iomem *io_addr)
236 {
237  void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
238  struct vm_struct **p, *tmp;
239 
240  /*
241  * If this is a section based mapping we need to handle it
242  * specially as the VM subsystem does not know how to handle
243  * such a beast. We need the lock here b/c we need to clear
244  * all the mappings before the area can be reclaimed
245  * by someone else.
246  */
248  for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
249  if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
250  if (tmp->flags & VM_UNICORE_SECTION_MAPPING) {
251  unmap_area_sections((unsigned long)tmp->addr,
252  tmp->size);
253  }
254  break;
255  }
256  }
258 
259  vunmap(addr);
260 }