Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
tlbflush_64.c
Go to the documentation of this file.
1 /*
2  * arch/sh/mm/tlb-flush_64.c
3  *
4  * Copyright (C) 2000, 2001 Paolo Alberelli
5  * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
6  * Copyright (C) 2003 - 2012 Paul Mundt
7  *
8  * This file is subject to the terms and conditions of the GNU General Public
9  * License. See the file "COPYING" in the main directory of this archive
10  * for more details.
11  */
12 #include <linux/signal.h>
13 #include <linux/rwsem.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 #include <linux/ptrace.h>
20 #include <linux/mman.h>
21 #include <linux/mm.h>
22 #include <linux/smp.h>
23 #include <linux/perf_event.h>
24 #include <linux/interrupt.h>
25 #include <asm/io.h>
26 #include <asm/tlb.h>
27 #include <asm/uaccess.h>
28 #include <asm/pgalloc.h>
29 #include <asm/mmu_context.h>
30 
31 void local_flush_tlb_one(unsigned long asid, unsigned long page)
32 {
33  unsigned long long match, pteh=0, lpage;
34  unsigned long tlb;
35 
36  /*
37  * Sign-extend based on neff.
38  */
39  lpage = neff_sign_extend(page);
40  match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
41  match |= lpage;
42 
43  for_each_itlb_entry(tlb) {
44  asm volatile ("getcfg %1, 0, %0"
45  : "=r" (pteh)
46  : "r" (tlb) );
47 
48  if (pteh == match) {
49  __flush_tlb_slot(tlb);
50  break;
51  }
52  }
53 
54  for_each_dtlb_entry(tlb) {
55  asm volatile ("getcfg %1, 0, %0"
56  : "=r" (pteh)
57  : "r" (tlb) );
58 
59  if (pteh == match) {
60  __flush_tlb_slot(tlb);
61  break;
62  }
63 
64  }
65 }
66 
67 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
68 {
69  unsigned long flags;
70 
71  if (vma->vm_mm) {
72  page &= PAGE_MASK;
73  local_irq_save(flags);
74  local_flush_tlb_one(get_asid(), page);
75  local_irq_restore(flags);
76  }
77 }
78 
79 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
80  unsigned long end)
81 {
82  unsigned long flags;
83  unsigned long long match, pteh=0, pteh_epn, pteh_low;
84  unsigned long tlb;
85  unsigned int cpu = smp_processor_id();
86  struct mm_struct *mm;
87 
88  mm = vma->vm_mm;
89  if (cpu_context(cpu, mm) == NO_CONTEXT)
90  return;
91 
92  local_irq_save(flags);
93 
94  start &= PAGE_MASK;
95  end &= PAGE_MASK;
96 
97  match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID;
98 
99  /* Flush ITLB */
100  for_each_itlb_entry(tlb) {
101  asm volatile ("getcfg %1, 0, %0"
102  : "=r" (pteh)
103  : "r" (tlb) );
104 
105  pteh_epn = pteh & PAGE_MASK;
106  pteh_low = pteh & ~PAGE_MASK;
107 
108  if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
109  __flush_tlb_slot(tlb);
110  }
111 
112  /* Flush DTLB */
113  for_each_dtlb_entry(tlb) {
114  asm volatile ("getcfg %1, 0, %0"
115  : "=r" (pteh)
116  : "r" (tlb) );
117 
118  pteh_epn = pteh & PAGE_MASK;
119  pteh_low = pteh & ~PAGE_MASK;
120 
121  if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
122  __flush_tlb_slot(tlb);
123  }
124 
125  local_irq_restore(flags);
126 }
127 
129 {
130  unsigned long flags;
131  unsigned int cpu = smp_processor_id();
132 
133  if (cpu_context(cpu, mm) == NO_CONTEXT)
134  return;
135 
136  local_irq_save(flags);
137 
138  cpu_context(cpu, mm) = NO_CONTEXT;
139  if (mm == current->mm)
140  activate_context(mm, cpu);
141 
142  local_irq_restore(flags);
143 }
144 
146 {
147  /* Invalidate all, including shared pages, excluding fixed TLBs */
148  unsigned long flags, tlb;
149 
150  local_irq_save(flags);
151 
152  /* Flush each ITLB entry */
154  __flush_tlb_slot(tlb);
155 
156  /* Flush each DTLB entry */
158  __flush_tlb_slot(tlb);
159 
160  local_irq_restore(flags);
161 }
162 
163 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
164 {
165  /* FIXME: Optimize this later.. */
166  flush_tlb_all();
167 }
168 
170 {
171  flush_tlb_all();
172 }