Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
tlb_hash64.c
Go to the documentation of this file.
1 /*
2  * This file contains the routines for flushing entries from the
3  * TLB and MMU hash table.
4  *
5  * Derived from arch/ppc64/mm/init.c:
6  * Copyright (C) 1995-1996 Gary Thomas ([email protected])
7  *
8  * Modifications by Paul Mackerras (PowerMac) ([email protected])
9  * and Cort Dougan (PReP) ([email protected])
10  * Copyright (C) 1996 Paul Mackerras
11  *
12  * Derived from "arch/i386/mm/init.c"
13  * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
14  *
15  * Dave Engebretsen <[email protected]>
16  * Rework for PPC64 port.
17  *
18  * This program is free software; you can redistribute it and/or
19  * modify it under the terms of the GNU General Public License
20  * as published by the Free Software Foundation; either version
21  * 2 of the License, or (at your option) any later version.
22  */
23 
24 #include <linux/kernel.h>
25 #include <linux/mm.h>
26 #include <linux/init.h>
27 #include <linux/percpu.h>
28 #include <linux/hardirq.h>
29 #include <asm/pgalloc.h>
30 #include <asm/tlbflush.h>
31 #include <asm/tlb.h>
32 #include <asm/bug.h>
33 
34 DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
35 
36 /*
37  * A linux PTE was changed and the corresponding hash table entry
38  * neesd to be flushed. This function will either perform the flush
39  * immediately or will batch it up if the current CPU has an active
40  * batch on it.
41  */
42 void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
43  pte_t *ptep, unsigned long pte, int huge)
44 {
45  unsigned long vpn;
46  struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
47  unsigned long vsid;
48  unsigned int psize;
49  int ssize;
50  real_pte_t rpte;
51  int i;
52 
53  i = batch->index;
54 
55  /* Get page size (maybe move back to caller).
56  *
57  * NOTE: when using special 64K mappings in 4K environment like
58  * for SPEs, we obtain the page size from the slice, which thus
59  * must still exist (and thus the VMA not reused) at the time
60  * of this call
61  */
62  if (huge) {
63 #ifdef CONFIG_HUGETLB_PAGE
64  psize = get_slice_psize(mm, addr);
65  /* Mask the address for the correct page size */
66  addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
67 #else
68  BUG();
69  psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
70 #endif
71  } else {
72  psize = pte_pagesize_index(mm, addr, pte);
73  /* Mask the address for the standard page size. If we
74  * have a 64k page kernel, but the hardware does not
75  * support 64k pages, this might be different from the
76  * hardware page size encoded in the slice table. */
77  addr &= PAGE_MASK;
78  }
79 
80 
81  /* Build full vaddr */
82  if (!is_kernel_addr(addr)) {
83  ssize = user_segment_size(addr);
84  vsid = get_vsid(mm->context.id, addr, ssize);
85  WARN_ON(vsid == 0);
86  } else {
87  vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
88  ssize = mmu_kernel_ssize;
89  }
90  vpn = hpt_vpn(addr, vsid, ssize);
91  rpte = __real_pte(__pte(pte), ptep);
92 
93  /*
94  * Check if we have an active batch on this CPU. If not, just
95  * flush now and return. For now, we don global invalidates
96  * in that case, might be worth testing the mm cpu mask though
97  * and decide to use local invalidates instead...
98  */
99  if (!batch->active) {
100  flush_hash_page(vpn, rpte, psize, ssize, 0);
101  put_cpu_var(ppc64_tlb_batch);
102  return;
103  }
104 
105  /*
106  * This can happen when we are in the middle of a TLB batch and
107  * we encounter memory pressure (eg copy_page_range when it tries
108  * to allocate a new pte). If we have to reclaim memory and end
109  * up scanning and resetting referenced bits then our batch context
110  * will change mid stream.
111  *
112  * We also need to ensure only one page size is present in a given
113  * batch
114  */
115  if (i != 0 && (mm != batch->mm || batch->psize != psize ||
116  batch->ssize != ssize)) {
117  __flush_tlb_pending(batch);
118  i = 0;
119  }
120  if (i == 0) {
121  batch->mm = mm;
122  batch->psize = psize;
123  batch->ssize = ssize;
124  }
125  batch->pte[i] = rpte;
126  batch->vpn[i] = vpn;
127  batch->index = ++i;
128  if (i >= PPC64_TLB_BATCH_NR)
129  __flush_tlb_pending(batch);
130  put_cpu_var(ppc64_tlb_batch);
131 }
132 
133 /*
134  * This function is called when terminating an mmu batch or when a batch
135  * is full. It will perform the flush of all the entries currently stored
136  * in a batch.
137  *
138  * Must be called from within some kind of spinlock/non-preempt region...
139  */
140 void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
141 {
142  const struct cpumask *tmp;
143  int i, local = 0;
144 
145  i = batch->index;
146  tmp = cpumask_of(smp_processor_id());
147  if (cpumask_equal(mm_cpumask(batch->mm), tmp))
148  local = 1;
149  if (i == 1)
150  flush_hash_page(batch->vpn[0], batch->pte[0],
151  batch->psize, batch->ssize, local);
152  else
153  flush_hash_range(i, local);
154  batch->index = 0;
155 }
156 
157 void tlb_flush(struct mmu_gather *tlb)
158 {
159  struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch);
160 
161  /* If there's a TLB batch pending, then we must flush it because the
162  * pages are going to be freed and we really don't want to have a CPU
163  * access a freed page because it has a stale TLB
164  */
165  if (tlbbatch->index)
166  __flush_tlb_pending(tlbbatch);
167 
168  put_cpu_var(ppc64_tlb_batch);
169 }
170 
189 #ifdef CONFIG_HOTPLUG
190 
191 void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
192  unsigned long end)
193 {
194  unsigned long flags;
195 
196  start = _ALIGN_DOWN(start, PAGE_SIZE);
197  end = _ALIGN_UP(end, PAGE_SIZE);
198 
199  BUG_ON(!mm->pgd);
200 
201  /* Note: Normally, we should only ever use a batch within a
202  * PTE locked section. This violates the rule, but will work
203  * since we don't actually modify the PTEs, we just flush the
204  * hash while leaving the PTEs intact (including their reference
205  * to being hashed). This is not the most performance oriented
206  * way to do things but is fine for our needs here.
207  */
208  local_irq_save(flags);
210  for (; start < end; start += PAGE_SIZE) {
211  pte_t *ptep = find_linux_pte(mm->pgd, start);
212  unsigned long pte;
213 
214  if (ptep == NULL)
215  continue;
216  pte = pte_val(*ptep);
217  if (!(pte & _PAGE_HASHPTE))
218  continue;
219  hpte_need_flush(mm, start, ptep, pte, 0);
220  }
222  local_irq_restore(flags);
223 }
224 
225 #endif /* CONFIG_HOTPLUG */