Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
pgtable.c
Go to the documentation of this file.
1 #include <linux/mm.h>
2 #include <linux/gfp.h>
3 #include <asm/pgalloc.h>
4 #include <asm/pgtable.h>
5 #include <asm/tlb.h>
6 #include <asm/fixmap.h>
7 
8 #define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
9 
10 #ifdef CONFIG_HIGHPTE
11 #define PGALLOC_USER_GFP __GFP_HIGHMEM
12 #else
13 #define PGALLOC_USER_GFP 0
14 #endif
15 
17 
18 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
19 {
21 }
22 
23 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
24 {
25  struct page *pte;
26 
28  if (pte)
29  pgtable_page_ctor(pte);
30  return pte;
31 }
32 
33 static int __init setup_userpte(char *arg)
34 {
35  if (!arg)
36  return -EINVAL;
37 
38  /*
39  * "userpte=nohigh" disables allocation of user pagetables in
40  * high memory.
41  */
42  if (strcmp(arg, "nohigh") == 0)
44  else
45  return -EINVAL;
46  return 0;
47 }
48 early_param("userpte", setup_userpte);
49 
50 void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
51 {
52  pgtable_page_dtor(pte);
53  paravirt_release_pte(page_to_pfn(pte));
54  tlb_remove_page(tlb, pte);
55 }
56 
57 #if PAGETABLE_LEVELS > 2
58 void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
59 {
60  paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
61  tlb_remove_page(tlb, virt_to_page(pmd));
62 }
63 
64 #if PAGETABLE_LEVELS > 3
65 void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
66 {
67  paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
68  tlb_remove_page(tlb, virt_to_page(pud));
69 }
70 #endif /* PAGETABLE_LEVELS > 3 */
71 #endif /* PAGETABLE_LEVELS > 2 */
72 
73 static inline void pgd_list_add(pgd_t *pgd)
74 {
75  struct page *page = virt_to_page(pgd);
76 
77  list_add(&page->lru, &pgd_list);
78 }
79 
80 static inline void pgd_list_del(pgd_t *pgd)
81 {
82  struct page *page = virt_to_page(pgd);
83 
84  list_del(&page->lru);
85 }
86 
87 #define UNSHARED_PTRS_PER_PGD \
88  (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
89 
90 
91 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
92 {
93  BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
94  virt_to_page(pgd)->index = (pgoff_t)mm;
95 }
96 
98 {
99  return (struct mm_struct *)page->index;
100 }
101 
102 static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
103 {
104  /* If the pgd points to a shared pagetable level (either the
105  ptes in non-PAE, or shared PMD in PAE), then just copy the
106  references from swapper_pg_dir. */
107  if (PAGETABLE_LEVELS == 2 ||
109  PAGETABLE_LEVELS == 4) {
110  clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
113  }
114 
115  /* list required to sync kernel mapping updates */
116  if (!SHARED_KERNEL_PMD) {
117  pgd_set_mm(pgd, mm);
118  pgd_list_add(pgd);
119  }
120 }
121 
122 static void pgd_dtor(pgd_t *pgd)
123 {
124  if (SHARED_KERNEL_PMD)
125  return;
126 
127  spin_lock(&pgd_lock);
128  pgd_list_del(pgd);
129  spin_unlock(&pgd_lock);
130 }
131 
132 /*
133  * List of all pgd's needed for non-PAE so it can invalidate entries
134  * in both cached and uncached pgd's; not needed for PAE since the
135  * kernel pmd is shared. If PAE were not to share the pmd a similar
136  * tactic would be needed. This is essentially codepath-based locking
137  * against pageattr.c; it is the unique case in which a valid change
138  * of kernel pagetables can't be lazily synchronized by vmalloc faults.
139  * vmalloc faults work because attached pagetables are never freed.
140  * -- wli
141  */
142 
143 #ifdef CONFIG_X86_PAE
144 /*
145  * In PAE mode, we need to do a cr3 reload (=tlb flush) when
146  * updating the top-level pagetable entries to guarantee the
147  * processor notices the update. Since this is expensive, and
148  * all 4 top-level entries are used almost immediately in a
149  * new process's life, we just pre-populate them here.
150  *
151  * Also, if we're in a paravirt environment where the kernel pmd is
152  * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
153  * and initialize the kernel pmds here.
154  */
155 #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
156 
157 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
158 {
159  paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
160 
161  /* Note: almost everything apart from _PAGE_PRESENT is
162  reserved at the pmd (PDPT) level. */
163  set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
164 
165  /*
166  * According to Intel App note "TLBs, Paging-Structure Caches,
167  * and Their Invalidation", April 2007, document 317080-001,
168  * section 8.1: in PAE mode we explicitly have to flush the
169  * TLB via cr3 if the top-level pgd is changed...
170  */
171  flush_tlb_mm(mm);
172 }
173 #else /* !CONFIG_X86_PAE */
174 
175 /* No need to prepopulate any pagetable entries in non-PAE modes. */
176 #define PREALLOCATED_PMDS 0
177 
178 #endif /* CONFIG_X86_PAE */
179 
180 static void free_pmds(pmd_t *pmds[])
181 {
182  int i;
183 
184  for(i = 0; i < PREALLOCATED_PMDS; i++)
185  if (pmds[i])
186  free_page((unsigned long)pmds[i]);
187 }
188 
189 static int preallocate_pmds(pmd_t *pmds[])
190 {
191  int i;
192  bool failed = false;
193 
194  for(i = 0; i < PREALLOCATED_PMDS; i++) {
196  if (pmd == NULL)
197  failed = true;
198  pmds[i] = pmd;
199  }
200 
201  if (failed) {
202  free_pmds(pmds);
203  return -ENOMEM;
204  }
205 
206  return 0;
207 }
208 
209 /*
210  * Mop up any pmd pages which may still be attached to the pgd.
211  * Normally they will be freed by munmap/exit_mmap, but any pmd we
212  * preallocate which never got a corresponding vma will need to be
213  * freed manually.
214  */
215 static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
216 {
217  int i;
218 
219  for(i = 0; i < PREALLOCATED_PMDS; i++) {
220  pgd_t pgd = pgdp[i];
221 
222  if (pgd_val(pgd) != 0) {
223  pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
224 
225  pgdp[i] = native_make_pgd(0);
226 
227  paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
228  pmd_free(mm, pmd);
229  }
230  }
231 }
232 
233 static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
234 {
235  pud_t *pud;
236  unsigned long addr;
237  int i;
238 
239  if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
240  return;
241 
242  pud = pud_offset(pgd, 0);
243 
244  for (addr = i = 0; i < PREALLOCATED_PMDS;
245  i++, pud++, addr += PUD_SIZE) {
246  pmd_t *pmd = pmds[i];
247 
248  if (i >= KERNEL_PGD_BOUNDARY)
250  sizeof(pmd_t) * PTRS_PER_PMD);
251 
252  pud_populate(mm, pud, pmd);
253  }
254 }
255 
257 {
258  pgd_t *pgd;
259  pmd_t *pmds[PREALLOCATED_PMDS];
260 
262 
263  if (pgd == NULL)
264  goto out;
265 
266  mm->pgd = pgd;
267 
268  if (preallocate_pmds(pmds) != 0)
269  goto out_free_pgd;
270 
271  if (paravirt_pgd_alloc(mm) != 0)
272  goto out_free_pmds;
273 
274  /*
275  * Make sure that pre-populating the pmds is atomic with
276  * respect to anything walking the pgd_list, so that they
277  * never see a partially populated pgd.
278  */
279  spin_lock(&pgd_lock);
280 
281  pgd_ctor(mm, pgd);
282  pgd_prepopulate_pmd(mm, pgd, pmds);
283 
284  spin_unlock(&pgd_lock);
285 
286  return pgd;
287 
288 out_free_pmds:
289  free_pmds(pmds);
290 out_free_pgd:
291  free_page((unsigned long)pgd);
292 out:
293  return NULL;
294 }
295 
296 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
297 {
298  pgd_mop_up_pmds(mm, pgd);
299  pgd_dtor(pgd);
300  paravirt_pgd_free(mm, pgd);
301  free_page((unsigned long)pgd);
302 }
303 
305  unsigned long address, pte_t *ptep,
306  pte_t entry, int dirty)
307 {
308  int changed = !pte_same(*ptep, entry);
309 
310  if (changed && dirty) {
311  *ptep = entry;
312  pte_update_defer(vma->vm_mm, address, ptep);
313  flush_tlb_page(vma, address);
314  }
315 
316  return changed;
317 }
318 
319 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
320 int pmdp_set_access_flags(struct vm_area_struct *vma,
321  unsigned long address, pmd_t *pmdp,
322  pmd_t entry, int dirty)
323 {
324  int changed = !pmd_same(*pmdp, entry);
325 
326  VM_BUG_ON(address & ~HPAGE_PMD_MASK);
327 
328  if (changed && dirty) {
329  *pmdp = entry;
330  pmd_update_defer(vma->vm_mm, address, pmdp);
331  flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
332  }
333 
334  return changed;
335 }
336 #endif
337 
339  unsigned long addr, pte_t *ptep)
340 {
341  int ret = 0;
342 
343  if (pte_young(*ptep))
345  (unsigned long *) &ptep->pte);
346 
347  if (ret)
348  pte_update(vma->vm_mm, addr, ptep);
349 
350  return ret;
351 }
352 
353 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
355  unsigned long addr, pmd_t *pmdp)
356 {
357  int ret = 0;
358 
359  if (pmd_young(*pmdp))
361  (unsigned long *)pmdp);
362 
363  if (ret)
364  pmd_update(vma->vm_mm, addr, pmdp);
365 
366  return ret;
367 }
368 #endif
369 
371  unsigned long address, pte_t *ptep)
372 {
373  int young;
374 
375  young = ptep_test_and_clear_young(vma, address, ptep);
376  if (young)
377  flush_tlb_page(vma, address);
378 
379  return young;
380 }
381 
382 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
383 int pmdp_clear_flush_young(struct vm_area_struct *vma,
384  unsigned long address, pmd_t *pmdp)
385 {
386  int young;
387 
388  VM_BUG_ON(address & ~HPAGE_PMD_MASK);
389 
390  young = pmdp_test_and_clear_young(vma, address, pmdp);
391  if (young)
392  flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
393 
394  return young;
395 }
396 
397 void pmdp_splitting_flush(struct vm_area_struct *vma,
398  unsigned long address, pmd_t *pmdp)
399 {
400  int set;
401  VM_BUG_ON(address & ~HPAGE_PMD_MASK);
403  (unsigned long *)pmdp);
404  if (set) {
405  pmd_update(vma->vm_mm, address, pmdp);
406  /* need tlb flush only to serialize against gup-fast */
407  flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
408  }
409 }
410 #endif
411 
419 void __init reserve_top_address(unsigned long reserve)
420 {
421 #ifdef CONFIG_X86_32
422  BUG_ON(fixmaps_set > 0);
423  printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
424  (int)-reserve);
425  __FIXADDR_TOP = -reserve - PAGE_SIZE;
426 #endif
427 }
428 
430 
432 {
433  unsigned long address = __fix_to_virt(idx);
434 
435  if (idx >= __end_of_fixed_addresses) {
436  BUG();
437  return;
438  }
439  set_pte_vaddr(address, pte);
440  fixmaps_set++;
441 }
442 
444  pgprot_t flags)
445 {
446  __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
447 }