Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
c-tx39.c
Go to the documentation of this file.
1 /*
2  * r2300.c: R2000 and R3000 specific mmu/cache code.
3  *
4  * Copyright (C) 1996 David S. Miller ([email protected])
5  *
6  * with a lot of changes to make this thing work for R3000s
7  * Tx39XX R4k style caches added. HK
8  * Copyright (C) 1998, 1999, 2000 Harald Koerfgen
9  * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
10  */
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/mm.h>
16 
17 #include <asm/cacheops.h>
18 #include <asm/page.h>
19 #include <asm/pgtable.h>
20 #include <asm/mmu_context.h>
21 #include <asm/isadep.h>
22 #include <asm/io.h>
23 #include <asm/bootinfo.h>
24 #include <asm/cpu.h>
25 
26 /* For R3000 cores with R4000 style caches */
27 static unsigned long icache_size, dcache_size; /* Size in bytes */
28 
29 #include <asm/r4kcache.h>
30 
31 extern int r3k_have_wired_reg; /* in r3k-tlb.c */
32 
33 /* This sequence is required to ensure icache is disabled immediately */
34 #define TX39_STOP_STREAMING() \
35 __asm__ __volatile__( \
36  ".set push\n\t" \
37  ".set noreorder\n\t" \
38  "b 1f\n\t" \
39  "nop\n\t" \
40  "1:\n\t" \
41  ".set pop" \
42  )
43 
44 /* TX39H-style cache flush routines. */
45 static void tx39h_flush_icache_all(void)
46 {
47  unsigned long flags, config;
48 
49  /* disable icache (set ICE#) */
50  local_irq_save(flags);
51  config = read_c0_conf();
52  write_c0_conf(config & ~TX39_CONF_ICE);
54  blast_icache16();
55  write_c0_conf(config);
56  local_irq_restore(flags);
57 }
58 
59 static void tx39h_dma_cache_wback_inv(unsigned long addr, unsigned long size)
60 {
61  /* Catch bad driver code */
62  BUG_ON(size == 0);
63 
64  iob();
65  blast_inv_dcache_range(addr, addr + size);
66 }
67 
68 
69 /* TX39H2,TX39H3 */
70 static inline void tx39_blast_dcache_page(unsigned long addr)
71 {
73  blast_dcache16_page(addr);
74 }
75 
76 static inline void tx39_blast_dcache_page_indexed(unsigned long addr)
77 {
78  blast_dcache16_page_indexed(addr);
79 }
80 
81 static inline void tx39_blast_dcache(void)
82 {
83  blast_dcache16();
84 }
85 
86 static inline void tx39_blast_icache_page(unsigned long addr)
87 {
88  unsigned long flags, config;
89  /* disable icache (set ICE#) */
90  local_irq_save(flags);
91  config = read_c0_conf();
92  write_c0_conf(config & ~TX39_CONF_ICE);
94  blast_icache16_page(addr);
95  write_c0_conf(config);
96  local_irq_restore(flags);
97 }
98 
99 static inline void tx39_blast_icache_page_indexed(unsigned long addr)
100 {
101  unsigned long flags, config;
102  /* disable icache (set ICE#) */
103  local_irq_save(flags);
104  config = read_c0_conf();
105  write_c0_conf(config & ~TX39_CONF_ICE);
107  blast_icache16_page_indexed(addr);
108  write_c0_conf(config);
109  local_irq_restore(flags);
110 }
111 
112 static inline void tx39_blast_icache(void)
113 {
114  unsigned long flags, config;
115  /* disable icache (set ICE#) */
116  local_irq_save(flags);
117  config = read_c0_conf();
118  write_c0_conf(config & ~TX39_CONF_ICE);
120  blast_icache16();
121  write_c0_conf(config);
122  local_irq_restore(flags);
123 }
124 
125 static void tx39__flush_cache_vmap(void)
126 {
127  tx39_blast_dcache();
128 }
129 
130 static void tx39__flush_cache_vunmap(void)
131 {
132  tx39_blast_dcache();
133 }
134 
135 static inline void tx39_flush_cache_all(void)
136 {
137  if (!cpu_has_dc_aliases)
138  return;
139 
140  tx39_blast_dcache();
141 }
142 
143 static inline void tx39___flush_cache_all(void)
144 {
145  tx39_blast_dcache();
146  tx39_blast_icache();
147 }
148 
149 static void tx39_flush_cache_mm(struct mm_struct *mm)
150 {
151  if (!cpu_has_dc_aliases)
152  return;
153 
154  if (cpu_context(smp_processor_id(), mm) != 0)
155  tx39_blast_dcache();
156 }
157 
158 static void tx39_flush_cache_range(struct vm_area_struct *vma,
159  unsigned long start, unsigned long end)
160 {
161  if (!cpu_has_dc_aliases)
162  return;
163  if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
164  return;
165 
166  tx39_blast_dcache();
167 }
168 
169 static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
170 {
171  int exec = vma->vm_flags & VM_EXEC;
172  struct mm_struct *mm = vma->vm_mm;
173  pgd_t *pgdp;
174  pud_t *pudp;
175  pmd_t *pmdp;
176  pte_t *ptep;
177 
178  /*
179  * If ownes no valid ASID yet, cannot possibly have gotten
180  * this page into the cache.
181  */
182  if (cpu_context(smp_processor_id(), mm) == 0)
183  return;
184 
185  page &= PAGE_MASK;
186  pgdp = pgd_offset(mm, page);
187  pudp = pud_offset(pgdp, page);
188  pmdp = pmd_offset(pudp, page);
189  ptep = pte_offset(pmdp, page);
190 
191  /*
192  * If the page isn't marked valid, the page cannot possibly be
193  * in the cache.
194  */
195  if (!(pte_val(*ptep) & _PAGE_PRESENT))
196  return;
197 
198  /*
199  * Doing flushes for another ASID than the current one is
200  * too difficult since stupid R4k caches do a TLB translation
201  * for every cache flush operation. So we do indexed flushes
202  * in that case, which doesn't overly flush the cache too much.
203  */
204  if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
205  if (cpu_has_dc_aliases || exec)
206  tx39_blast_dcache_page(page);
207  if (exec)
208  tx39_blast_icache_page(page);
209 
210  return;
211  }
212 
213  /*
214  * Do indexed flush, too much work to get the (possible) TLB refills
215  * to work correctly.
216  */
217  if (cpu_has_dc_aliases || exec)
218  tx39_blast_dcache_page_indexed(page);
219  if (exec)
220  tx39_blast_icache_page_indexed(page);
221 }
222 
223 static void local_tx39_flush_data_cache_page(void * addr)
224 {
225  tx39_blast_dcache_page((unsigned long)addr);
226 }
227 
228 static void tx39_flush_data_cache_page(unsigned long addr)
229 {
230  tx39_blast_dcache_page(addr);
231 }
232 
233 static void tx39_flush_icache_range(unsigned long start, unsigned long end)
234 {
235  if (end - start > dcache_size)
236  tx39_blast_dcache();
237  else
238  protected_blast_dcache_range(start, end);
239 
240  if (end - start > icache_size)
241  tx39_blast_icache();
242  else {
243  unsigned long flags, config;
244  /* disable icache (set ICE#) */
245  local_irq_save(flags);
246  config = read_c0_conf();
247  write_c0_conf(config & ~TX39_CONF_ICE);
249  protected_blast_icache_range(start, end);
250  write_c0_conf(config);
251  local_irq_restore(flags);
252  }
253 }
254 
255 static void tx39_flush_kernel_vmap_range(unsigned long vaddr, int size)
256 {
257  BUG();
258 }
259 
260 static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size)
261 {
262  unsigned long end;
263 
264  if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
265  end = addr + size;
266  do {
267  tx39_blast_dcache_page(addr);
268  addr += PAGE_SIZE;
269  } while(addr != end);
270  } else if (size > dcache_size) {
271  tx39_blast_dcache();
272  } else {
273  blast_dcache_range(addr, addr + size);
274  }
275 }
276 
277 static void tx39_dma_cache_inv(unsigned long addr, unsigned long size)
278 {
279  unsigned long end;
280 
281  if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
282  end = addr + size;
283  do {
284  tx39_blast_dcache_page(addr);
285  addr += PAGE_SIZE;
286  } while(addr != end);
287  } else if (size > dcache_size) {
288  tx39_blast_dcache();
289  } else {
290  blast_inv_dcache_range(addr, addr + size);
291  }
292 }
293 
294 static void tx39_flush_cache_sigtramp(unsigned long addr)
295 {
296  unsigned long ic_lsize = current_cpu_data.icache.linesz;
297  unsigned long dc_lsize = current_cpu_data.dcache.linesz;
298  unsigned long config;
299  unsigned long flags;
300 
301  protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
302 
303  /* disable icache (set ICE#) */
304  local_irq_save(flags);
305  config = read_c0_conf();
306  write_c0_conf(config & ~TX39_CONF_ICE);
308  protected_flush_icache_line(addr & ~(ic_lsize - 1));
309  write_c0_conf(config);
310  local_irq_restore(flags);
311 }
312 
313 static __init void tx39_probe_cache(void)
314 {
315  unsigned long config;
316 
317  config = read_c0_conf();
318 
319  icache_size = 1 << (10 + ((config & TX39_CONF_ICS_MASK) >>
321  dcache_size = 1 << (10 + ((config & TX39_CONF_DCS_MASK) >>
323 
324  current_cpu_data.icache.linesz = 16;
325  switch (current_cpu_type()) {
326  case CPU_TX3912:
327  current_cpu_data.icache.ways = 1;
328  current_cpu_data.dcache.ways = 1;
329  current_cpu_data.dcache.linesz = 4;
330  break;
331 
332  case CPU_TX3927:
333  current_cpu_data.icache.ways = 2;
334  current_cpu_data.dcache.ways = 2;
335  current_cpu_data.dcache.linesz = 16;
336  break;
337 
338  case CPU_TX3922:
339  default:
340  current_cpu_data.icache.ways = 1;
341  current_cpu_data.dcache.ways = 1;
342  current_cpu_data.dcache.linesz = 16;
343  break;
344  }
345 }
346 
348 {
349  extern void build_clear_page(void);
350  extern void build_copy_page(void);
351  unsigned long config;
352 
353  config = read_c0_conf();
354  config &= ~TX39_CONF_WBON;
355  write_c0_conf(config);
356 
357  tx39_probe_cache();
358 
359  switch (current_cpu_type()) {
360  case CPU_TX3912:
361  /* TX39/H core (writethru direct-map cache) */
362  __flush_cache_vmap = tx39__flush_cache_vmap;
363  __flush_cache_vunmap = tx39__flush_cache_vunmap;
364  flush_cache_all = tx39h_flush_icache_all;
365  __flush_cache_all = tx39h_flush_icache_all;
366  flush_cache_mm = (void *) tx39h_flush_icache_all;
367  flush_cache_range = (void *) tx39h_flush_icache_all;
368  flush_cache_page = (void *) tx39h_flush_icache_all;
369  flush_icache_range = (void *) tx39h_flush_icache_all;
370  local_flush_icache_range = (void *) tx39h_flush_icache_all;
371 
372  flush_cache_sigtramp = (void *) tx39h_flush_icache_all;
373  local_flush_data_cache_page = (void *) tx39h_flush_icache_all;
374  flush_data_cache_page = (void *) tx39h_flush_icache_all;
375 
376  _dma_cache_wback_inv = tx39h_dma_cache_wback_inv;
377 
379 
380  break;
381 
382  case CPU_TX3922:
383  case CPU_TX3927:
384  default:
385  /* TX39/H2,H3 core (writeback 2way-set-associative cache) */
386  r3k_have_wired_reg = 1;
387  write_c0_wired(0); /* set 8 on reset... */
388  /* board-dependent init code may set WBON */
389 
390  __flush_cache_vmap = tx39__flush_cache_vmap;
391  __flush_cache_vunmap = tx39__flush_cache_vunmap;
392 
393  flush_cache_all = tx39_flush_cache_all;
394  __flush_cache_all = tx39___flush_cache_all;
395  flush_cache_mm = tx39_flush_cache_mm;
396  flush_cache_range = tx39_flush_cache_range;
397  flush_cache_page = tx39_flush_cache_page;
398  flush_icache_range = tx39_flush_icache_range;
399  local_flush_icache_range = tx39_flush_icache_range;
400 
401  __flush_kernel_vmap_range = tx39_flush_kernel_vmap_range;
402 
403  flush_cache_sigtramp = tx39_flush_cache_sigtramp;
404  local_flush_data_cache_page = local_tx39_flush_data_cache_page;
405  flush_data_cache_page = tx39_flush_data_cache_page;
406 
407  _dma_cache_wback_inv = tx39_dma_cache_wback_inv;
408  _dma_cache_wback = tx39_dma_cache_wback_inv;
409  _dma_cache_inv = tx39_dma_cache_inv;
410 
411  shm_align_mask = max_t(unsigned long,
412  (dcache_size / current_cpu_data.dcache.ways) - 1,
413  PAGE_SIZE - 1);
414 
415  break;
416  }
417 
418  current_cpu_data.icache.waysize = icache_size / current_cpu_data.icache.ways;
419  current_cpu_data.dcache.waysize = dcache_size / current_cpu_data.dcache.ways;
420 
421  current_cpu_data.icache.sets =
422  current_cpu_data.icache.waysize / current_cpu_data.icache.linesz;
423  current_cpu_data.dcache.sets =
424  current_cpu_data.dcache.waysize / current_cpu_data.dcache.linesz;
425 
426  if (current_cpu_data.dcache.waysize > PAGE_SIZE)
427  current_cpu_data.dcache.flags |= MIPS_CACHE_ALIASES;
428 
429  current_cpu_data.icache.waybit = 0;
430  current_cpu_data.dcache.waybit = 0;
431 
432  printk("Primary instruction cache %ldkB, linesize %d bytes\n",
433  icache_size >> 10, current_cpu_data.icache.linesz);
434  printk("Primary data cache %ldkB, linesize %d bytes\n",
435  dcache_size >> 10, current_cpu_data.dcache.linesz);
436 
438  build_copy_page();
439  tx39h_flush_icache_all();
440 }