Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
pgtable_32.c
Go to the documentation of this file.
1 /*
2  * This file contains the routines setting up the linux page tables.
3  * -- paulus
4  *
5  * Derived from arch/ppc/mm/init.c:
6  * Copyright (C) 1995-1996 Gary Thomas ([email protected])
7  *
8  * Modifications by Paul Mackerras (PowerMac) ([email protected])
9  * and Cort Dougan (PReP) ([email protected])
10  * Copyright (C) 1996 Paul Mackerras
11  *
12  * Derived from "arch/i386/mm/init.c"
13  * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation; either version
18  * 2 of the License, or (at your option) any later version.
19  *
20  */
21 
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/mm.h>
26 #include <linux/vmalloc.h>
27 #include <linux/init.h>
28 #include <linux/highmem.h>
29 #include <linux/memblock.h>
30 #include <linux/slab.h>
31 
32 #include <asm/pgtable.h>
33 #include <asm/pgalloc.h>
34 #include <asm/fixmap.h>
35 #include <asm/io.h>
36 #include <asm/setup.h>
37 
38 #include "mmu_decl.h"
39 
40 unsigned long ioremap_base;
41 unsigned long ioremap_bot;
42 EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
43 
44 #if defined(CONFIG_6xx) || defined(CONFIG_POWER3)
45 #define HAVE_BATS 1
46 #endif
47 
48 #if defined(CONFIG_FSL_BOOKE)
49 #define HAVE_TLBCAM 1
50 #endif
51 
52 extern char etext[], _stext[];
53 
54 #ifdef HAVE_BATS
55 extern phys_addr_t v_mapped_by_bats(unsigned long va);
56 extern unsigned long p_mapped_by_bats(phys_addr_t pa);
57 void setbat(int index, unsigned long virt, phys_addr_t phys,
58  unsigned int size, int flags);
59 
60 #else /* !HAVE_BATS */
61 #define v_mapped_by_bats(x) (0UL)
62 #define p_mapped_by_bats(x) (0UL)
63 #endif /* HAVE_BATS */
64 
65 #ifdef HAVE_TLBCAM
66 extern unsigned int tlbcam_index;
67 extern phys_addr_t v_mapped_by_tlbcam(unsigned long va);
68 extern unsigned long p_mapped_by_tlbcam(phys_addr_t pa);
69 #else /* !HAVE_TLBCAM */
70 #define v_mapped_by_tlbcam(x) (0UL)
71 #define p_mapped_by_tlbcam(x) (0UL)
72 #endif /* HAVE_TLBCAM */
73 
74 #define PGDIR_ORDER (32 + PGD_T_LOG2 - PGDIR_SHIFT)
75 
77 {
78  pgd_t *ret;
79 
80  /* pgdir take page or two with 4K pages and a page fraction otherwise */
81 #ifndef CONFIG_PPC_4K_PAGES
82  ret = kzalloc(1 << PGDIR_ORDER, GFP_KERNEL);
83 #else
86 #endif
87  return ret;
88 }
89 
90 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
91 {
92 #ifndef CONFIG_PPC_4K_PAGES
93  kfree((void *)pgd);
94 #else
95  free_pages((unsigned long)pgd, PGDIR_ORDER - PAGE_SHIFT);
96 #endif
97 }
98 
100 {
101  pte_t *pte;
102  extern int mem_init_done;
103  extern void *early_get_page(void);
104 
105  if (mem_init_done) {
107  } else {
108  pte = (pte_t *)early_get_page();
109  if (pte)
110  clear_page(pte);
111  }
112  return pte;
113 }
114 
115 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
116 {
117  struct page *ptepage;
118 
120 
121  ptepage = alloc_pages(flags, 0);
122  if (!ptepage)
123  return NULL;
124  pgtable_page_ctor(ptepage);
125  return ptepage;
126 }
127 
128 void __iomem *
129 ioremap(phys_addr_t addr, unsigned long size)
130 {
131  return __ioremap_caller(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED,
132  __builtin_return_address(0));
133 }
135 
136 void __iomem *
138 {
139  return __ioremap_caller(addr, size, _PAGE_NO_CACHE,
140  __builtin_return_address(0));
141 }
143 
144 void __iomem *
145 ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
146 {
147  /* writeable implies dirty for kernel addresses */
148  if (flags & _PAGE_RW)
149  flags |= _PAGE_DIRTY | _PAGE_HWWRITE;
150 
151  /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
152  flags &= ~(_PAGE_USER | _PAGE_EXEC);
153 
154 #ifdef _PAGE_BAP_SR
155  /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
156  * which means that we just cleared supervisor access... oops ;-) This
157  * restores it
158  */
159  flags |= _PAGE_BAP_SR;
160 #endif
161 
162  return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
163 }
165 
166 void __iomem *
167 __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
168 {
169  return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
170 }
171 
172 void __iomem *
173 __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
174  void *caller)
175 {
176  unsigned long v, i;
177  phys_addr_t p;
178  int err;
179 
180  /* Make sure we have the base flags */
181  if ((flags & _PAGE_PRESENT) == 0)
182  flags |= PAGE_KERNEL;
183 
184  /* Non-cacheable page cannot be coherent */
185  if (flags & _PAGE_NO_CACHE)
186  flags &= ~_PAGE_COHERENT;
187 
188  /*
189  * Choose an address to map it to.
190  * Once the vmalloc system is running, we use it.
191  * Before then, we use space going down from ioremap_base
192  * (ioremap_bot records where we're up to).
193  */
194  p = addr & PAGE_MASK;
195  size = PAGE_ALIGN(addr + size) - p;
196 
197  /*
198  * If the address lies within the first 16 MB, assume it's in ISA
199  * memory space
200  */
201  if (p < 16*1024*1024)
202  p += _ISA_MEM_BASE;
203 
204 #ifndef CONFIG_CRASH_DUMP
205  /*
206  * Don't allow anybody to remap normal RAM that we're using.
207  * mem_init() sets high_memory so only do the check after that.
208  */
209  if (mem_init_done && (p < virt_to_phys(high_memory)) &&
211  printk("__ioremap(): phys addr 0x%llx is RAM lr %pf\n",
212  (unsigned long long)p, __builtin_return_address(0));
213  return NULL;
214  }
215 #endif
216 
217  if (size == 0)
218  return NULL;
219 
220  /*
221  * Is it already mapped? Perhaps overlapped by a previous
222  * BAT mapping. If the whole area is mapped then we're done,
223  * otherwise remap it since we want to keep the virt addrs for
224  * each request contiguous.
225  *
226  * We make the assumption here that if the bottom and top
227  * of the range we want are mapped then it's mapped to the
228  * same virt address (and this is contiguous).
229  * -- Cort
230  */
231  if ((v = p_mapped_by_bats(p)) /*&& p_mapped_by_bats(p+size-1)*/ )
232  goto out;
233 
234  if ((v = p_mapped_by_tlbcam(p)))
235  goto out;
236 
237  if (mem_init_done) {
238  struct vm_struct *area;
239  area = get_vm_area_caller(size, VM_IOREMAP, caller);
240  if (area == 0)
241  return NULL;
242  area->phys_addr = p;
243  v = (unsigned long) area->addr;
244  } else {
245  v = (ioremap_bot -= size);
246  }
247 
248  /*
249  * Should check if it is a candidate for a BAT mapping
250  */
251 
252  err = 0;
253  for (i = 0; i < size && err == 0; i += PAGE_SIZE)
254  err = map_page(v+i, p+i, flags);
255  if (err) {
256  if (mem_init_done)
257  vunmap((void *)v);
258  return NULL;
259  }
260 
261 out:
262  return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
263 }
265 
266 void iounmap(volatile void __iomem *addr)
267 {
268  /*
269  * If mapped by BATs then there is nothing to do.
270  * Calling vfree() generates a benign warning.
271  */
272  if (v_mapped_by_bats((unsigned long)addr)) return;
273 
274  if (addr > high_memory && (unsigned long) addr < ioremap_bot)
275  vunmap((void *) (PAGE_MASK & (unsigned long)addr));
276 }
278 
279 int map_page(unsigned long va, phys_addr_t pa, int flags)
280 {
281  pmd_t *pd;
282  pte_t *pg;
283  int err = -ENOMEM;
284 
285  /* Use upper 10 bits of VA to index the first level map */
286  pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
287  /* Use middle 10 bits of VA to index the second-level map */
288  pg = pte_alloc_kernel(pd, va);
289  if (pg != 0) {
290  err = 0;
291  /* The PTE should never be already set nor present in the
292  * hash table
293  */
294  BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) &&
295  flags);
296  set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
297  __pgprot(flags)));
298  }
299  return err;
300 }
301 
302 /*
303  * Map in a chunk of physical memory starting at start.
304  */
305 void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
306 {
307  unsigned long v, s, f;
308  phys_addr_t p;
309  int ktext;
310 
311  s = offset;
312  v = PAGE_OFFSET + s;
313  p = memstart_addr + s;
314  for (; s < top; s += PAGE_SIZE) {
315  ktext = ((char *) v >= _stext && (char *) v < etext);
316  f = ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL;
317  map_page(v, p, f);
318 #ifdef CONFIG_PPC_STD_MMU_32
319  if (ktext)
320  hash_preload(&init_mm, v, 0, 0x300);
321 #endif
322  v += PAGE_SIZE;
323  p += PAGE_SIZE;
324  }
325 }
326 
327 void __init mapin_ram(void)
328 {
329  unsigned long s, top;
330 
331 #ifndef CONFIG_WII
332  top = total_lowmem;
333  s = mmu_mapin_ram(top);
334  __mapin_ram_chunk(s, top);
335 #else
336  if (!wii_hole_size) {
339  } else {
340  top = wii_hole_start;
341  s = mmu_mapin_ram(top);
342  __mapin_ram_chunk(s, top);
343 
344  top = memblock_end_of_DRAM();
345  s = wii_mmu_mapin_mem2(top);
346  __mapin_ram_chunk(s, top);
347  }
348 #endif
349 }
350 
351 /* Scan the real Linux page tables and return a PTE pointer for
352  * a virtual address in a context.
353  * Returns true (1) if PTE was found, zero otherwise. The pointer to
354  * the PTE pointer is unmodified if PTE is not found.
355  */
356 int
357 get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
358 {
359  pgd_t *pgd;
360  pud_t *pud;
361  pmd_t *pmd;
362  pte_t *pte;
363  int retval = 0;
364 
365  pgd = pgd_offset(mm, addr & PAGE_MASK);
366  if (pgd) {
367  pud = pud_offset(pgd, addr & PAGE_MASK);
368  if (pud && pud_present(*pud)) {
369  pmd = pmd_offset(pud, addr & PAGE_MASK);
370  if (pmd_present(*pmd)) {
371  pte = pte_offset_map(pmd, addr & PAGE_MASK);
372  if (pte) {
373  retval = 1;
374  *ptep = pte;
375  if (pmdp)
376  *pmdp = pmd;
377  /* XXX caller needs to do pte_unmap, yuck */
378  }
379  }
380  }
381  }
382  return(retval);
383 }
384 
385 #ifdef CONFIG_DEBUG_PAGEALLOC
386 
387 static int __change_page_attr(struct page *page, pgprot_t prot)
388 {
389  pte_t *kpte;
390  pmd_t *kpmd;
391  unsigned long address;
392 
393  BUG_ON(PageHighMem(page));
394  address = (unsigned long)page_address(page);
395 
396  if (v_mapped_by_bats(address) || v_mapped_by_tlbcam(address))
397  return 0;
398  if (!get_pteptr(&init_mm, address, &kpte, &kpmd))
399  return -EINVAL;
400  __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
401  wmb();
402  flush_tlb_page(NULL, address);
403  pte_unmap(kpte);
404 
405  return 0;
406 }
407 
408 /*
409  * Change the page attributes of an page in the linear mapping.
410  *
411  * THIS CONFLICTS WITH BAT MAPPINGS, DEBUG USE ONLY
412  */
413 static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
414 {
415  int i, err = 0;
416  unsigned long flags;
417 
418  local_irq_save(flags);
419  for (i = 0; i < numpages; i++, page++) {
420  err = __change_page_attr(page, prot);
421  if (err)
422  break;
423  }
424  local_irq_restore(flags);
425  return err;
426 }
427 
428 
429 void kernel_map_pages(struct page *page, int numpages, int enable)
430 {
431  if (PageHighMem(page))
432  return;
433 
434  change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
435 }
436 #endif /* CONFIG_DEBUG_PAGEALLOC */
437 
438 static int fixmaps;
439 
441 {
442  unsigned long address = __fix_to_virt(idx);
443 
444  if (idx >= __end_of_fixed_addresses) {
445  BUG();
446  return;
447  }
448 
449  map_page(address, phys, pgprot_val(flags));
450  fixmaps++;
451 }
452 
454 {
455  WARN_ON(1);
456 }