Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
tlb.c
Go to the documentation of this file.
1 #include <linux/init.h>
2 
3 #include <linux/mm.h>
4 #include <linux/spinlock.h>
5 #include <linux/smp.h>
6 #include <linux/interrupt.h>
7 #include <linux/module.h>
8 #include <linux/cpu.h>
9 
10 #include <asm/tlbflush.h>
11 #include <asm/mmu_context.h>
12 #include <asm/cache.h>
13 #include <asm/apic.h>
14 #include <asm/uv/uv.h>
15 #include <linux/debugfs.h>
16 
17 DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
18  = { &init_mm, 0, };
19 
20 /*
21  * Smarter SMP flushing macros.
22  * c/o Linus Torvalds.
23  *
24  * These mean you can really definitely utterly forget about
25  * writing to user space from interrupts. (Its not allowed anyway).
26  *
27  * Optimizations Manfred Spraul <[email protected]>
28  *
29  * More scalable flush, from Andi Kleen
30  *
31  * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
32  */
33 
36  unsigned long flush_start;
37  unsigned long flush_end;
38 };
39 
40 /*
41  * We cannot call mmdrop() because we are in interrupt context,
42  * instead update mm->cpu_vm_mask.
43  */
44 void leave_mm(int cpu)
45 {
46  struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
47  if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
48  BUG();
49  if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
50  cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
51  load_cr3(swapper_pg_dir);
52  }
53 }
55 
56 /*
57  * The flush IPI assumes that a thread switch happens in this order:
58  * [cpu0: the cpu that switches]
59  * 1) switch_mm() either 1a) or 1b)
60  * 1a) thread switch to a different mm
61  * 1a1) set cpu_tlbstate to TLBSTATE_OK
62  * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
63  * if cpu0 was in lazy tlb mode.
64  * 1a2) update cpu active_mm
65  * Now cpu0 accepts tlb flushes for the new mm.
66  * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
67  * Now the other cpus will send tlb flush ipis.
68  * 1a4) change cr3.
69  * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
70  * Stop ipi delivery for the old mm. This is not synchronized with
71  * the other cpus, but flush_tlb_func ignore flush ipis for the wrong
72  * mm, and in the worst case we perform a superfluous tlb flush.
73  * 1b) thread switch without mm change
74  * cpu active_mm is correct, cpu0 already handles flush ipis.
75  * 1b1) set cpu_tlbstate to TLBSTATE_OK
76  * 1b2) test_and_set the cpu bit in cpu_vm_mask.
77  * Atomically set the bit [other cpus will start sending flush ipis],
78  * and test the bit.
79  * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
80  * 2) switch %%esp, ie current
81  *
82  * The interrupt must handle 2 special cases:
83  * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
84  * - the cpu performs speculative tlb reads, i.e. even if the cpu only
85  * runs in kernel space, the cpu could load tlb entries for user space
86  * pages.
87  *
88  * The good news is that cpu_tlbstate is local to each cpu, no
89  * write/read ordering problems.
90  */
91 
92 /*
93  * TLB flush funcation:
94  * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
95  * 2) Leave the mm if we are in the lazy tlb mode.
96  */
97 static void flush_tlb_func(void *info)
98 {
99  struct flush_tlb_info *f = info;
100 
101  inc_irq_stat(irq_tlb_count);
102 
103  if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
104  return;
105 
106  if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
107  if (f->flush_end == TLB_FLUSH_ALL || !cpu_has_invlpg)
108  local_flush_tlb();
109  else if (!f->flush_end)
111  else {
112  unsigned long addr;
113  addr = f->flush_start;
114  while (addr < f->flush_end) {
115  __flush_tlb_single(addr);
116  addr += PAGE_SIZE;
117  }
118  }
119  } else
121 
122 }
123 
125  struct mm_struct *mm, unsigned long start,
126  unsigned long end)
127 {
128  struct flush_tlb_info info;
129  info.flush_mm = mm;
130  info.flush_start = start;
131  info.flush_end = end;
132 
133  if (is_uv_system()) {
134  unsigned int cpu;
135 
136  cpu = smp_processor_id();
137  cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
138  if (cpumask)
139  smp_call_function_many(cpumask, flush_tlb_func,
140  &info, 1);
141  return;
142  }
143  smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
144 }
145 
147 {
148  struct mm_struct *mm = current->mm;
149 
150  preempt_disable();
151 
152  local_flush_tlb();
153  if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
154  flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
155  preempt_enable();
156 }
157 
158 /*
159  * It can find out the THP large page, or
160  * HUGETLB page in tlb_flush when THP disabled
161  */
162 static inline unsigned long has_large_page(struct mm_struct *mm,
163  unsigned long start, unsigned long end)
164 {
165  pgd_t *pgd;
166  pud_t *pud;
167  pmd_t *pmd;
168  unsigned long addr = ALIGN(start, HPAGE_SIZE);
169  for (; addr < end; addr += HPAGE_SIZE) {
170  pgd = pgd_offset(mm, addr);
171  if (likely(!pgd_none(*pgd))) {
172  pud = pud_offset(pgd, addr);
173  if (likely(!pud_none(*pud))) {
174  pmd = pmd_offset(pud, addr);
175  if (likely(!pmd_none(*pmd)))
176  if (pmd_large(*pmd))
177  return addr;
178  }
179  }
180  }
181  return 0;
182 }
183 
184 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
185  unsigned long end, unsigned long vmflag)
186 {
187  unsigned long addr;
188  unsigned act_entries, tlb_entries = 0;
189 
190  preempt_disable();
191  if (current->active_mm != mm)
192  goto flush_all;
193 
194  if (!current->mm) {
196  goto flush_all;
197  }
198 
199  if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1
200  || vmflag & VM_HUGETLB) {
201  local_flush_tlb();
202  goto flush_all;
203  }
204 
205  /* In modern CPU, last level tlb used for both data/ins */
206  if (vmflag & VM_EXEC)
207  tlb_entries = tlb_lli_4k[ENTRIES];
208  else
209  tlb_entries = tlb_lld_4k[ENTRIES];
210  /* Assume all of TLB entries was occupied by this task */
211  act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm;
212 
213  /* tlb_flushall_shift is on balance point, details in commit log */
214  if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift)
215  local_flush_tlb();
216  else {
217  if (has_large_page(mm, start, end)) {
218  local_flush_tlb();
219  goto flush_all;
220  }
221  /* flush range by one by one 'invlpg' */
222  for (addr = start; addr < end; addr += PAGE_SIZE)
223  __flush_tlb_single(addr);
224 
225  if (cpumask_any_but(mm_cpumask(mm),
227  flush_tlb_others(mm_cpumask(mm), mm, start, end);
228  preempt_enable();
229  return;
230  }
231 
232 flush_all:
233  if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
234  flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
235  preempt_enable();
236 }
237 
238 void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
239 {
240  struct mm_struct *mm = vma->vm_mm;
241 
242  preempt_disable();
243 
244  if (current->active_mm == mm) {
245  if (current->mm)
246  __flush_tlb_one(start);
247  else
249  }
250 
251  if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
252  flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
253 
254  preempt_enable();
255 }
256 
257 static void do_flush_tlb_all(void *info)
258 {
259  __flush_tlb_all();
260  if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
262 }
263 
264 void flush_tlb_all(void)
265 {
266  on_each_cpu(do_flush_tlb_all, NULL, 1);
267 }
268 
269 static void do_kernel_range_flush(void *info)
270 {
271  struct flush_tlb_info *f = info;
272  unsigned long addr;
273 
274  /* flush range by one by one 'invlpg' */
275  for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
276  __flush_tlb_single(addr);
277 }
278 
279 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
280 {
281  unsigned act_entries;
282  struct flush_tlb_info info;
283 
284  /* In modern CPU, last level tlb used for both data/ins */
285  act_entries = tlb_lld_4k[ENTRIES];
286 
287  /* Balance as user space task's flush, a bit conservative */
288  if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 ||
289  (end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift)
290 
291  on_each_cpu(do_flush_tlb_all, NULL, 1);
292  else {
293  info.flush_start = start;
294  info.flush_end = end;
295  on_each_cpu(do_kernel_range_flush, &info, 1);
296  }
297 }
298 
299 #ifdef CONFIG_DEBUG_TLBFLUSH
300 static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
301  size_t count, loff_t *ppos)
302 {
303  char buf[32];
304  unsigned int len;
305 
306  len = sprintf(buf, "%hd\n", tlb_flushall_shift);
307  return simple_read_from_buffer(user_buf, count, ppos, buf, len);
308 }
309 
310 static ssize_t tlbflush_write_file(struct file *file,
311  const char __user *user_buf, size_t count, loff_t *ppos)
312 {
313  char buf[32];
314  ssize_t len;
315  s8 shift;
316 
317  len = min(count, sizeof(buf) - 1);
318  if (copy_from_user(buf, user_buf, len))
319  return -EFAULT;
320 
321  buf[len] = '\0';
322  if (kstrtos8(buf, 0, &shift))
323  return -EINVAL;
324 
325  if (shift < -1 || shift >= BITS_PER_LONG)
326  return -EINVAL;
327 
328  tlb_flushall_shift = shift;
329  return count;
330 }
331 
332 static const struct file_operations fops_tlbflush = {
333  .read = tlbflush_read_file,
334  .write = tlbflush_write_file,
335  .llseek = default_llseek,
336 };
337 
338 static int __cpuinit create_tlb_flushall_shift(void)
339 {
340  if (cpu_has_invlpg) {
341  debugfs_create_file("tlb_flushall_shift", S_IRUSR | S_IWUSR,
342  arch_debugfs_dir, NULL, &fops_tlbflush);
343  }
344  return 0;
345 }
346 late_initcall(create_tlb_flushall_shift);
347 #endif