Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ioremap.c
Go to the documentation of this file.
1 /*
2  * Re-map IO memory to kernel address space so that we can access it.
3  * This is needed for high PCI addresses that aren't mapped in the
4  * 640k-1MB IO memory area on PC's
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  */
8 
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
16 
17 #include <asm/cacheflush.h>
18 #include <asm/e820.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pat.h>
24 
25 #include "physaddr.h"
26 
27 /*
28  * Fix up the linear direct mapping of the kernel to avoid cache attribute
29  * conflicts.
30  */
31 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
32  unsigned long prot_val)
33 {
34  unsigned long nrpages = size >> PAGE_SHIFT;
35  int err;
36 
37  switch (prot_val) {
38  case _PAGE_CACHE_UC:
39  default:
40  err = _set_memory_uc(vaddr, nrpages);
41  break;
42  case _PAGE_CACHE_WC:
43  err = _set_memory_wc(vaddr, nrpages);
44  break;
45  case _PAGE_CACHE_WB:
46  err = _set_memory_wb(vaddr, nrpages);
47  break;
48  }
49 
50  return err;
51 }
52 
53 /*
54  * Remap an arbitrary physical address space into the kernel virtual
55  * address space. Needed when the kernel wants to access high addresses
56  * directly.
57  *
58  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
59  * have to convert them into an offset in a page-aligned mapping, but the
60  * caller shouldn't need to know that small detail.
61  */
63  unsigned long size, unsigned long prot_val, void *caller)
64 {
65  unsigned long offset, vaddr;
66  resource_size_t pfn, last_pfn, last_addr;
67  const resource_size_t unaligned_phys_addr = phys_addr;
68  const unsigned long unaligned_size = size;
69  struct vm_struct *area;
70  unsigned long new_prot_val;
71  pgprot_t prot;
72  int retval;
73  void __iomem *ret_addr;
74 
75  /* Don't allow wraparound or zero size */
76  last_addr = phys_addr + size - 1;
77  if (!size || last_addr < phys_addr)
78  return NULL;
79 
80  if (!phys_addr_valid(phys_addr)) {
81  printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
82  (unsigned long long)phys_addr);
83  WARN_ON_ONCE(1);
84  return NULL;
85  }
86 
87  /*
88  * Don't remap the low PCI/ISA area, it's always mapped..
89  */
90  if (is_ISA_range(phys_addr, last_addr))
91  return (__force void __iomem *)phys_to_virt(phys_addr);
92 
93  /*
94  * Don't allow anybody to remap normal RAM that we're using..
95  */
96  last_pfn = last_addr >> PAGE_SHIFT;
97  for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
98  int is_ram = page_is_ram(pfn);
99 
100  if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
101  return NULL;
102  WARN_ON_ONCE(is_ram);
103  }
104 
105  /*
106  * Mappings have to be page-aligned
107  */
108  offset = phys_addr & ~PAGE_MASK;
109  phys_addr &= PHYSICAL_PAGE_MASK;
110  size = PAGE_ALIGN(last_addr+1) - phys_addr;
111 
112  retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
113  prot_val, &new_prot_val);
114  if (retval) {
115  printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
116  return NULL;
117  }
118 
119  if (prot_val != new_prot_val) {
120  if (!is_new_memtype_allowed(phys_addr, size,
121  prot_val, new_prot_val)) {
123  "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
124  (unsigned long long)phys_addr,
125  (unsigned long long)(phys_addr + size),
126  prot_val, new_prot_val);
127  goto err_free_memtype;
128  }
129  prot_val = new_prot_val;
130  }
131 
132  switch (prot_val) {
133  case _PAGE_CACHE_UC:
134  default:
135  prot = PAGE_KERNEL_IO_NOCACHE;
136  break;
139  break;
140  case _PAGE_CACHE_WC:
141  prot = PAGE_KERNEL_IO_WC;
142  break;
143  case _PAGE_CACHE_WB:
144  prot = PAGE_KERNEL_IO;
145  break;
146  }
147 
148  /*
149  * Ok, go for it..
150  */
151  area = get_vm_area_caller(size, VM_IOREMAP, caller);
152  if (!area)
153  goto err_free_memtype;
154  area->phys_addr = phys_addr;
155  vaddr = (unsigned long) area->addr;
156 
157  if (kernel_map_sync_memtype(phys_addr, size, prot_val))
158  goto err_free_area;
159 
160  if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
161  goto err_free_area;
162 
163  ret_addr = (void __iomem *) (vaddr + offset);
164  mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
165 
166  /*
167  * Check if the request spans more than any BAR in the iomem resource
168  * tree.
169  */
170  WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
171  KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
172 
173  return ret_addr;
174 err_free_area:
175  free_vm_area(area);
176 err_free_memtype:
177  free_memtype(phys_addr, phys_addr + size);
178  return NULL;
179 }
180 
202 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
203 {
204  /*
205  * Ideally, this should be:
206  * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
207  *
208  * Till we fix all X drivers to use ioremap_wc(), we will use
209  * UC MINUS.
210  */
211  unsigned long val = _PAGE_CACHE_UC_MINUS;
212 
213  return __ioremap_caller(phys_addr, size, val,
214  __builtin_return_address(0));
215 }
217 
228 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
229 {
230  if (pat_enabled)
231  return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
232  __builtin_return_address(0));
233  else
234  return ioremap_nocache(phys_addr, size);
235 }
237 
238 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
239 {
240  return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
241  __builtin_return_address(0));
242 }
244 
245 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
246  unsigned long prot_val)
247 {
248  return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
249  __builtin_return_address(0));
250 }
252 
259 void iounmap(volatile void __iomem *addr)
260 {
261  struct vm_struct *p, *o;
262 
263  if ((void __force *)addr <= high_memory)
264  return;
265 
266  /*
267  * __ioremap special-cases the PCI/ISA range by not instantiating a
268  * vm_area and by simply returning an address into the kernel mapping
269  * of ISA space. So handle that here.
270  */
271  if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
272  (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
273  return;
274 
275  addr = (volatile void __iomem *)
276  (PAGE_MASK & (unsigned long __force)addr);
277 
278  mmiotrace_iounmap(addr);
279 
280  /* Use the vm area unlocked, assuming the caller
281  ensures there isn't another iounmap for the same address
282  in parallel. Reuse of the virtual address is prevented by
283  leaving it in the global lists until we're done with it.
284  cpa takes care of the direct mappings. */
286  for (p = vmlist; p; p = p->next) {
287  if (p->addr == (void __force *)addr)
288  break;
289  }
291 
292  if (!p) {
293  printk(KERN_ERR "iounmap: bad address %p\n", addr);
294  dump_stack();
295  return;
296  }
297 
298  free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
299 
300  /* Finally remove it */
301  o = remove_vm_area((void __force *)addr);
302  BUG_ON(p != o || o == NULL);
303  kfree(p);
304 }
306 
307 /*
308  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
309  * access
310  */
311 void *xlate_dev_mem_ptr(unsigned long phys)
312 {
313  void *addr;
314  unsigned long start = phys & PAGE_MASK;
315 
316  /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
317  if (page_is_ram(start >> PAGE_SHIFT))
318  return __va(phys);
319 
320  addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
321  if (addr)
322  addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
323 
324  return addr;
325 }
326 
327 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
328 {
329  if (page_is_ram(phys >> PAGE_SHIFT))
330  return;
331 
332  iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
333  return;
334 }
335 
336 static int __initdata early_ioremap_debug;
337 
338 static int __init early_ioremap_debug_setup(char *str)
339 {
340  early_ioremap_debug = 1;
341 
342  return 0;
343 }
344 early_param("early_ioremap_debug", early_ioremap_debug_setup);
345 
346 static __initdata int after_paging_init;
347 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
348 
349 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
350 {
351  /* Don't assume we're using swapper_pg_dir at this point */
352  pgd_t *base = __va(read_cr3());
353  pgd_t *pgd = &base[pgd_index(addr)];
354  pud_t *pud = pud_offset(pgd, addr);
355  pmd_t *pmd = pmd_offset(pud, addr);
356 
357  return pmd;
358 }
359 
360 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
361 {
362  return &bm_pte[pte_index(addr)];
363 }
364 
366 {
367  return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
368 }
369 
370 static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
371 
373 {
374  pmd_t *pmd;
375  int i;
376 
377  if (early_ioremap_debug)
378  printk(KERN_INFO "early_ioremap_init()\n");
379 
380  for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
381  slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
382 
383  pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
384  memset(bm_pte, 0, sizeof(bm_pte));
385  pmd_populate_kernel(&init_mm, pmd, bm_pte);
386 
387  /*
388  * The boot-ioremap range spans multiple pmds, for which
389  * we are not prepared:
390  */
391 #define __FIXADDR_TOP (-PAGE_SIZE)
394 #undef __FIXADDR_TOP
395  if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
396  WARN_ON(1);
397  printk(KERN_WARNING "pmd %p != %p\n",
398  pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
399  printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
400  fix_to_virt(FIX_BTMAP_BEGIN));
401  printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
402  fix_to_virt(FIX_BTMAP_END));
403 
404  printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
405  printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
407  }
408 }
409 
411 {
412  after_paging_init = 1;
413 }
414 
415 static void __init __early_set_fixmap(enum fixed_addresses idx,
417 {
418  unsigned long addr = __fix_to_virt(idx);
419  pte_t *pte;
420 
421  if (idx >= __end_of_fixed_addresses) {
422  BUG();
423  return;
424  }
425  pte = early_ioremap_pte(addr);
426 
427  if (pgprot_val(flags))
428  set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
429  else
430  pte_clear(&init_mm, addr, pte);
431  __flush_tlb_one(addr);
432 }
433 
434 static inline void __init early_set_fixmap(enum fixed_addresses idx,
435  phys_addr_t phys, pgprot_t prot)
436 {
437  if (after_paging_init)
438  __set_fixmap(idx, phys, prot);
439  else
440  __early_set_fixmap(idx, phys, prot);
441 }
442 
443 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
444 {
445  if (after_paging_init)
446  clear_fixmap(idx);
447  else
448  __early_set_fixmap(idx, 0, __pgprot(0));
449 }
450 
451 static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
452 static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
453 
455 {
456  int i;
457 
458  for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
459  if (prev_map[i]) {
460  WARN_ON(1);
461  break;
462  }
463  }
464 
466 }
467 
468 static int __init check_early_ioremap_leak(void)
469 {
470  int count = 0;
471  int i;
472 
473  for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
474  if (prev_map[i])
475  count++;
476 
477  if (!count)
478  return 0;
479  WARN(1, KERN_WARNING
480  "Debug warning: early ioremap leak of %d areas detected.\n",
481  count);
483  "please boot with early_ioremap_debug and report the dmesg.\n");
484 
485  return 1;
486 }
487 late_initcall(check_early_ioremap_leak);
488 
489 static void __init __iomem *
490 __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
491 {
492  unsigned long offset;
493  resource_size_t last_addr;
494  unsigned int nrpages;
495  enum fixed_addresses idx0, idx;
496  int i, slot;
497 
499 
500  slot = -1;
501  for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
502  if (!prev_map[i]) {
503  slot = i;
504  break;
505  }
506  }
507 
508  if (slot < 0) {
509  printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n",
510  (u64)phys_addr, size);
511  WARN_ON(1);
512  return NULL;
513  }
514 
515  if (early_ioremap_debug) {
516  printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ",
517  (u64)phys_addr, size, slot);
518  dump_stack();
519  }
520 
521  /* Don't allow wraparound or zero size */
522  last_addr = phys_addr + size - 1;
523  if (!size || last_addr < phys_addr) {
524  WARN_ON(1);
525  return NULL;
526  }
527 
528  prev_size[slot] = size;
529  /*
530  * Mappings have to be page-aligned
531  */
532  offset = phys_addr & ~PAGE_MASK;
533  phys_addr &= PAGE_MASK;
534  size = PAGE_ALIGN(last_addr + 1) - phys_addr;
535 
536  /*
537  * Mappings have to fit in the FIX_BTMAP area.
538  */
539  nrpages = size >> PAGE_SHIFT;
540  if (nrpages > NR_FIX_BTMAPS) {
541  WARN_ON(1);
542  return NULL;
543  }
544 
545  /*
546  * Ok, go for it..
547  */
549  idx = idx0;
550  while (nrpages > 0) {
551  early_set_fixmap(idx, phys_addr, prot);
552  phys_addr += PAGE_SIZE;
553  --idx;
554  --nrpages;
555  }
556  if (early_ioremap_debug)
557  printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
558 
559  prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
560  return prev_map[slot];
561 }
562 
563 /* Remap an IO device */
564 void __init __iomem *
565 early_ioremap(resource_size_t phys_addr, unsigned long size)
566 {
567  return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
568 }
569 
570 /* Remap memory */
571 void __init __iomem *
572 early_memremap(resource_size_t phys_addr, unsigned long size)
573 {
574  return __early_ioremap(phys_addr, size, PAGE_KERNEL);
575 }
576 
577 void __init early_iounmap(void __iomem *addr, unsigned long size)
578 {
579  unsigned long virt_addr;
580  unsigned long offset;
581  unsigned int nrpages;
582  enum fixed_addresses idx;
583  int i, slot;
584 
585  slot = -1;
586  for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
587  if (prev_map[i] == addr) {
588  slot = i;
589  break;
590  }
591  }
592 
593  if (slot < 0) {
594  printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
595  addr, size);
596  WARN_ON(1);
597  return;
598  }
599 
600  if (prev_size[slot] != size) {
601  printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
602  addr, size, slot, prev_size[slot]);
603  WARN_ON(1);
604  return;
605  }
606 
607  if (early_ioremap_debug) {
608  printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
609  size, slot);
610  dump_stack();
611  }
612 
613  virt_addr = (unsigned long)addr;
614  if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
615  WARN_ON(1);
616  return;
617  }
618  offset = virt_addr & ~PAGE_MASK;
619  nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
620 
622  while (nrpages > 0) {
623  early_clear_fixmap(idx);
624  --idx;
625  --nrpages;
626  }
627  prev_map[slot] = NULL;
628 }