Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
cache.c
Go to the documentation of this file.
1 /*
2  * arch/sh/mm/cache.c
3  *
4  * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5  * Copyright (C) 2002 - 2010 Paul Mundt
6  *
7  * Released under the terms of the GNU GPL v2.0.
8  */
9 #include <linux/mm.h>
10 #include <linux/init.h>
11 #include <linux/mutex.h>
12 #include <linux/fs.h>
13 #include <linux/smp.h>
14 #include <linux/highmem.h>
15 #include <linux/module.h>
16 #include <asm/mmu_context.h>
17 #include <asm/cacheflush.h>
18 
19 void (*local_flush_cache_all)(void *args) = cache_noop;
20 void (*local_flush_cache_mm)(void *args) = cache_noop;
21 void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
22 void (*local_flush_cache_page)(void *args) = cache_noop;
23 void (*local_flush_cache_range)(void *args) = cache_noop;
24 void (*local_flush_dcache_page)(void *args) = cache_noop;
25 void (*local_flush_icache_range)(void *args) = cache_noop;
26 void (*local_flush_icache_page)(void *args) = cache_noop;
27 void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
28 
35 
36 static inline void noop__flush_region(void *start, int size)
37 {
38 }
39 
40 static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
41  int wait)
42 {
44 
45  /*
46  * It's possible that this gets called early on when IRQs are
47  * still disabled due to ioremapping by the boot CPU, so don't
48  * even attempt IPIs unless there are other CPUs online.
49  */
50  if (num_online_cpus() > 1)
51  smp_call_function(func, info, wait);
52 
53  func(info);
54 
56 }
57 
58 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
59  unsigned long vaddr, void *dst, const void *src,
60  unsigned long len)
61 {
62  if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
63  test_bit(PG_dcache_clean, &page->flags)) {
64  void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
65  memcpy(vto, src, len);
66  kunmap_coherent(vto);
67  } else {
68  memcpy(dst, src, len);
69  if (boot_cpu_data.dcache.n_aliases)
71  }
72 
73  if (vma->vm_flags & VM_EXEC)
74  flush_cache_page(vma, vaddr, page_to_pfn(page));
75 }
76 
77 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
78  unsigned long vaddr, void *dst, const void *src,
79  unsigned long len)
80 {
81  if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
82  test_bit(PG_dcache_clean, &page->flags)) {
83  void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
84  memcpy(dst, vfrom, len);
85  kunmap_coherent(vfrom);
86  } else {
87  memcpy(dst, src, len);
88  if (boot_cpu_data.dcache.n_aliases)
90  }
91 }
92 
93 void copy_user_highpage(struct page *to, struct page *from,
94  unsigned long vaddr, struct vm_area_struct *vma)
95 {
96  void *vfrom, *vto;
97 
98  vto = kmap_atomic(to);
99 
100  if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
101  test_bit(PG_dcache_clean, &from->flags)) {
102  vfrom = kmap_coherent(from, vaddr);
103  copy_page(vto, vfrom);
104  kunmap_coherent(vfrom);
105  } else {
106  vfrom = kmap_atomic(from);
107  copy_page(vto, vfrom);
108  kunmap_atomic(vfrom);
109  }
110 
111  if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
112  (vma->vm_flags & VM_EXEC))
114 
115  kunmap_atomic(vto);
116  /* Make sure this page is cleared on other CPU's too before using it */
117  smp_wmb();
118 }
120 
121 void clear_user_highpage(struct page *page, unsigned long vaddr)
122 {
123  void *kaddr = kmap_atomic(page);
124 
125  clear_page(kaddr);
126 
127  if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
129 
130  kunmap_atomic(kaddr);
131 }
133 
135  unsigned long address, pte_t pte)
136 {
137  struct page *page;
138  unsigned long pfn = pte_pfn(pte);
139 
140  if (!boot_cpu_data.dcache.n_aliases)
141  return;
142 
143  page = pfn_to_page(pfn);
144  if (pfn_valid(pfn)) {
146  if (dirty)
148  }
149 }
150 
151 void __flush_anon_page(struct page *page, unsigned long vmaddr)
152 {
153  unsigned long addr = (unsigned long) page_address(page);
154 
155  if (pages_do_alias(addr, vmaddr)) {
156  if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
157  test_bit(PG_dcache_clean, &page->flags)) {
158  void *kaddr;
159 
160  kaddr = kmap_coherent(page, vmaddr);
161  /* XXX.. For now kunmap_coherent() does a purge */
162  /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
163  kunmap_coherent(kaddr);
164  } else
165  __flush_purge_region((void *)addr, PAGE_SIZE);
166  }
167 }
168 
169 void flush_cache_all(void)
170 {
171  cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
172 }
174 
175 void flush_cache_mm(struct mm_struct *mm)
176 {
177  if (boot_cpu_data.dcache.n_aliases == 0)
178  return;
179 
180  cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
181 }
182 
184 {
185  if (boot_cpu_data.dcache.n_aliases == 0)
186  return;
187 
188  cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
189 }
190 
191 void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
192  unsigned long pfn)
193 {
194  struct flusher_data data;
195 
196  data.vma = vma;
197  data.addr1 = addr;
198  data.addr2 = pfn;
199 
200  cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
201 }
202 
203 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
204  unsigned long end)
205 {
206  struct flusher_data data;
207 
208  data.vma = vma;
209  data.addr1 = start;
210  data.addr2 = end;
211 
212  cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
213 }
215 
217 {
218  cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
219 }
221 
222 void flush_icache_range(unsigned long start, unsigned long end)
223 {
224  struct flusher_data data;
225 
226  data.vma = NULL;
227  data.addr1 = start;
228  data.addr2 = end;
229 
230  cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
231 }
232 
233 void flush_icache_page(struct vm_area_struct *vma, struct page *page)
234 {
235  /* Nothing uses the VMA, so just pass the struct page along */
236  cacheop_on_each_cpu(local_flush_icache_page, page, 1);
237 }
238 
239 void flush_cache_sigtramp(unsigned long address)
240 {
241  cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
242 }
243 
244 static void compute_alias(struct cache_info *c)
245 {
246  c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
247  c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
248 }
249 
250 static void __init emit_cache_params(void)
251 {
252  printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
253  boot_cpu_data.icache.ways,
254  boot_cpu_data.icache.sets,
255  boot_cpu_data.icache.way_incr);
256  printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
257  boot_cpu_data.icache.entry_mask,
258  boot_cpu_data.icache.alias_mask,
259  boot_cpu_data.icache.n_aliases);
260  printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
261  boot_cpu_data.dcache.ways,
262  boot_cpu_data.dcache.sets,
263  boot_cpu_data.dcache.way_incr);
264  printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
265  boot_cpu_data.dcache.entry_mask,
266  boot_cpu_data.dcache.alias_mask,
267  boot_cpu_data.dcache.n_aliases);
268 
269  /*
270  * Emit Secondary Cache parameters if the CPU has a probed L2.
271  */
272  if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
273  printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
274  boot_cpu_data.scache.ways,
275  boot_cpu_data.scache.sets,
276  boot_cpu_data.scache.way_incr);
277  printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
278  boot_cpu_data.scache.entry_mask,
279  boot_cpu_data.scache.alias_mask,
280  boot_cpu_data.scache.n_aliases);
281  }
282 }
283 
285 {
286  unsigned int cache_disabled = 0;
287 
288 #ifdef CCR
289  cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
290 #endif
291 
292  compute_alias(&boot_cpu_data.icache);
293  compute_alias(&boot_cpu_data.dcache);
294  compute_alias(&boot_cpu_data.scache);
295 
296  __flush_wback_region = noop__flush_region;
297  __flush_purge_region = noop__flush_region;
298  __flush_invalidate_region = noop__flush_region;
299 
300  /*
301  * No flushing is necessary in the disabled cache case so we can
302  * just keep the noop functions in local_flush_..() and __flush_..()
303  */
304  if (unlikely(cache_disabled))
305  goto skip;
306 
307  if (boot_cpu_data.family == CPU_FAMILY_SH2) {
308  extern void __weak sh2_cache_init(void);
309 
310  sh2_cache_init();
311  }
312 
313  if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
314  extern void __weak sh2a_cache_init(void);
315 
316  sh2a_cache_init();
317  }
318 
319  if (boot_cpu_data.family == CPU_FAMILY_SH3) {
320  extern void __weak sh3_cache_init(void);
321 
322  sh3_cache_init();
323 
324  if ((boot_cpu_data.type == CPU_SH7705) &&
325  (boot_cpu_data.dcache.sets == 512)) {
326  extern void __weak sh7705_cache_init(void);
327 
329  }
330  }
331 
332  if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
333  (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
334  (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
335  extern void __weak sh4_cache_init(void);
336 
337  sh4_cache_init();
338 
339  if ((boot_cpu_data.type == CPU_SH7786) ||
340  (boot_cpu_data.type == CPU_SHX3)) {
341  extern void __weak shx3_cache_init(void);
342 
343  shx3_cache_init();
344  }
345  }
346 
347  if (boot_cpu_data.family == CPU_FAMILY_SH5) {
348  extern void __weak sh5_cache_init(void);
349 
350  sh5_cache_init();
351  }
352 
353 skip:
354  emit_cache_params();
355 }