Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
tlb-score.c
Go to the documentation of this file.
1 /*
2  * arch/score/mm/tlb-score.c
3  *
4  * Score Processor version.
5  *
6  * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
7  * Lennox Wu <[email protected]>
8  * Chen Liqin <[email protected]>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, see the file COPYING, or write
22  * to the Free Software Foundation, Inc.,
23  * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24  */
25 
26 #include <linux/highmem.h>
27 #include <linux/module.h>
28 
29 #include <asm/irq.h>
30 #include <asm/mmu_context.h>
31 #include <asm/tlb.h>
32 
33 #define TLBSIZE 32
34 
37 
39 {
40  unsigned long flags;
41  unsigned long old_ASID;
42  int entry;
43 
44  local_irq_save(flags);
45  old_ASID = pevn_get() & ASID_MASK;
46  pectx_set(0); /* invalid */
47  entry = tlblock_get(); /* skip locked entries*/
48 
49  for (; entry < TLBSIZE; entry++) {
50  tlbpt_set(entry);
51  pevn_set(KSEG1);
52  barrier();
53  tlb_write_indexed();
54  }
55  pevn_set(old_ASID);
56  local_irq_restore(flags);
57 }
58 
59 /*
60  * If mm is currently active_mm, we can't really drop it. Instead,
61  * we will get a new one for it.
62  */
63 static inline void
64 drop_mmu_context(struct mm_struct *mm)
65 {
66  unsigned long flags;
67 
68  local_irq_save(flags);
70  pevn_set(mm->context & ASID_MASK);
71  local_irq_restore(flags);
72 }
73 
74 void local_flush_tlb_mm(struct mm_struct *mm)
75 {
76  if (mm->context != 0)
77  drop_mmu_context(mm);
78 }
79 
80 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
81  unsigned long end)
82 {
83  struct mm_struct *mm = vma->vm_mm;
84  unsigned long vma_mm_context = mm->context;
85  if (mm->context != 0) {
86  unsigned long flags;
87  int size;
88 
89  local_irq_save(flags);
90  size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
91  if (size <= TLBSIZE) {
92  int oldpid = pevn_get() & ASID_MASK;
93  int newpid = vma_mm_context & ASID_MASK;
94 
95  start &= PAGE_MASK;
96  end += (PAGE_SIZE - 1);
97  end &= PAGE_MASK;
98  while (start < end) {
99  int idx;
100 
101  pevn_set(start | newpid);
102  start += PAGE_SIZE;
103  barrier();
104  tlb_probe();
105  idx = tlbpt_get();
106  pectx_set(0);
107  pevn_set(KSEG1);
108  if (idx < 0)
109  continue;
110  tlb_write_indexed();
111  }
112  pevn_set(oldpid);
113  } else {
114  /* Bigger than TLBSIZE, get new ASID directly */
116  if (mm == current->active_mm)
117  pevn_set(vma_mm_context & ASID_MASK);
118  }
119  local_irq_restore(flags);
120  }
121 }
122 
123 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
124 {
125  unsigned long flags;
126  int size;
127 
128  local_irq_save(flags);
129  size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
130  if (size <= TLBSIZE) {
131  int pid = pevn_get();
132 
133  start &= PAGE_MASK;
134  end += PAGE_SIZE - 1;
135  end &= PAGE_MASK;
136 
137  while (start < end) {
138  long idx;
139 
140  pevn_set(start);
141  start += PAGE_SIZE;
142  tlb_probe();
143  idx = tlbpt_get();
144  if (idx < 0)
145  continue;
146  pectx_set(0);
147  pevn_set(KSEG1);
148  barrier();
149  tlb_write_indexed();
150  }
151  pevn_set(pid);
152  } else {
154  }
155 
156  local_irq_restore(flags);
157 }
158 
159 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
160 {
161  if (vma && vma->vm_mm->context != 0) {
162  unsigned long flags;
163  int oldpid, newpid, idx;
164  unsigned long vma_ASID = vma->vm_mm->context;
165 
166  newpid = vma_ASID & ASID_MASK;
167  page &= PAGE_MASK;
168  local_irq_save(flags);
169  oldpid = pevn_get() & ASID_MASK;
170  pevn_set(page | newpid);
171  barrier();
172  tlb_probe();
173  idx = tlbpt_get();
174  pectx_set(0);
175  pevn_set(KSEG1);
176  if (idx < 0) /* p_bit(31) - 1: miss, 0: hit*/
177  goto finish;
178  barrier();
179  tlb_write_indexed();
180 finish:
181  pevn_set(oldpid);
182  local_irq_restore(flags);
183  }
184 }
185 
186 /*
187  * This one is only used for pages with the global bit set so we don't care
188  * much about the ASID.
189  */
190 void local_flush_tlb_one(unsigned long page)
191 {
192  unsigned long flags;
193  int oldpid, idx;
194 
195  local_irq_save(flags);
196  oldpid = pevn_get();
197  page &= (PAGE_MASK << 1);
198  pevn_set(page);
199  barrier();
200  tlb_probe();
201  idx = tlbpt_get();
202  pectx_set(0);
203  if (idx >= 0) {
204  /* Make sure all entries differ. */
205  pevn_set(KSEG1);
206  barrier();
207  tlb_write_indexed();
208  }
209  pevn_set(oldpid);
210  local_irq_restore(flags);
211 }
212 
213 void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
214 {
215  unsigned long flags;
216  int idx, pid;
217 
218  /*
219  * Handle debugger faulting in for debugee.
220  */
221  if (current->active_mm != vma->vm_mm)
222  return;
223 
224  pid = pevn_get() & ASID_MASK;
225 
226  local_irq_save(flags);
227  address &= PAGE_MASK;
228  pevn_set(address | pid);
229  barrier();
230  tlb_probe();
231  idx = tlbpt_get();
232  pectx_set(pte_val(pte));
233  pevn_set(address | pid);
234  if (idx < 0)
235  tlb_write_random();
236  else
237  tlb_write_indexed();
238 
239  pevn_set(pid);
240  local_irq_restore(flags);
241 }
242 
243 void __cpuinit tlb_init(void)
244 {
245  tlblock_set(0);
247  memcpy((void *)(EXCEPTION_VECTOR_BASE_ADDR + 0x100),
251 }