13 #include <linux/kernel.h>
14 #include <linux/linkage.h>
15 #include <linux/sched.h>
18 #include <linux/module.h>
19 #include <linux/bitops.h>
22 #include <asm/bootinfo.h>
23 #include <asm/cache.h>
26 #include <asm/cpu-features.h>
29 #include <asm/pgtable.h>
31 #include <asm/sections.h>
32 #include <asm/mmu_context.h>
34 #include <asm/cacheflush.h>
35 #include <asm/traps.h>
46 static inline void r4k_on_each_cpu(
void (*
func) (
void *
info),
void *info)
50 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
57 #if defined(CONFIG_MIPS_CMP)
58 #define cpu_has_safe_index_cacheops 0
60 #define cpu_has_safe_index_cacheops 1
73 static void cache_noop(
void) {}
76 .bc_enable = (
void *)cache_noop,
78 .bc_wback_inv = (
void *)cache_noop,
79 .
bc_inv = (
void *)cache_noop
84 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
85 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
87 #define R4600_HIT_CACHEOP_WAR_IMPL \
89 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
90 *(volatile unsigned long *)CKSEG1; \
91 if (R4600_V1_HIT_CACHEOP_WAR) \
92 __asm__ __volatile__("nop;nop;nop;nop"); \
95 static void (*r4k_blast_dcache_page)(
unsigned long addr);
97 static inline void r4k_blast_dcache_page_dc32(
unsigned long addr)
100 blast_dcache32_page(addr);
103 static inline void r4k_blast_dcache_page_dc64(
unsigned long addr)
106 blast_dcache64_page(addr);
109 static void __cpuinit r4k_blast_dcache_page_setup(
void)
114 r4k_blast_dcache_page = (
void *)cache_noop;
115 else if (dc_lsize == 16)
116 r4k_blast_dcache_page = blast_dcache16_page;
117 else if (dc_lsize == 32)
118 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
119 else if (dc_lsize == 64)
120 r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
123 static void (* r4k_blast_dcache_page_indexed)(
unsigned long addr);
125 static void __cpuinit r4k_blast_dcache_page_indexed_setup(
void)
130 r4k_blast_dcache_page_indexed = (
void *)cache_noop;
131 else if (dc_lsize == 16)
132 r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
133 else if (dc_lsize == 32)
134 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
135 else if (dc_lsize == 64)
136 r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
139 static void (* r4k_blast_dcache)(
void);
141 static void __cpuinit r4k_blast_dcache_setup(
void)
146 r4k_blast_dcache = (
void *)cache_noop;
147 else if (dc_lsize == 16)
148 r4k_blast_dcache = blast_dcache16;
149 else if (dc_lsize == 32)
150 r4k_blast_dcache = blast_dcache32;
151 else if (dc_lsize == 64)
152 r4k_blast_dcache = blast_dcache64;
156 #define JUMP_TO_ALIGN(order) \
157 __asm__ __volatile__( \
159 ".align\t" #order "\n\t" \
162 #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10)
163 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
165 static inline void blast_r4600_v1_icache32(
void)
174 static inline void tx49_blast_icache32(
void)
185 for (ws = 0; ws < ws_end; ws += ws_inc)
186 for (addr = start + 0x400; addr <
end; addr += 0x400 * 2)
190 for (ws = 0; ws < ws_end; ws += ws_inc)
191 for (addr = start; addr <
end; addr += 0x400 * 2)
195 static inline void blast_icache32_r4600_v1_page_indexed(
unsigned long page)
200 blast_icache32_page_indexed(page);
204 static inline void tx49_blast_icache32_page_indexed(
unsigned long page)
207 unsigned long start =
INDEX_BASE + (page & indexmask);
216 for (ws = 0; ws < ws_end; ws += ws_inc)
217 for (addr = start + 0x400; addr <
end; addr += 0x400 * 2)
221 for (ws = 0; ws < ws_end; ws += ws_inc)
222 for (addr = start; addr <
end; addr += 0x400 * 2)
226 static void (* r4k_blast_icache_page)(
unsigned long addr);
228 static void __cpuinit r4k_blast_icache_page_setup(
void)
233 r4k_blast_icache_page = (
void *)cache_noop;
234 else if (ic_lsize == 16)
235 r4k_blast_icache_page = blast_icache16_page;
236 else if (ic_lsize == 32)
237 r4k_blast_icache_page = blast_icache32_page;
238 else if (ic_lsize == 64)
239 r4k_blast_icache_page = blast_icache64_page;
243 static void (* r4k_blast_icache_page_indexed)(
unsigned long addr);
245 static void __cpuinit r4k_blast_icache_page_indexed_setup(
void)
250 r4k_blast_icache_page_indexed = (
void *)cache_noop;
251 else if (ic_lsize == 16)
252 r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
253 else if (ic_lsize == 32) {
255 r4k_blast_icache_page_indexed =
256 blast_icache32_r4600_v1_page_indexed;
258 r4k_blast_icache_page_indexed =
259 tx49_blast_icache32_page_indexed;
261 r4k_blast_icache_page_indexed =
262 blast_icache32_page_indexed;
263 }
else if (ic_lsize == 64)
264 r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
267 static void (* r4k_blast_icache)(
void);
269 static void __cpuinit r4k_blast_icache_setup(
void)
274 r4k_blast_icache = (
void *)cache_noop;
275 else if (ic_lsize == 16)
276 r4k_blast_icache = blast_icache16;
277 else if (ic_lsize == 32) {
279 r4k_blast_icache = blast_r4600_v1_icache32;
281 r4k_blast_icache = tx49_blast_icache32;
283 r4k_blast_icache = blast_icache32;
284 }
else if (ic_lsize == 64)
285 r4k_blast_icache = blast_icache64;
288 static void (* r4k_blast_scache_page)(
unsigned long addr);
290 static void __cpuinit r4k_blast_scache_page_setup(
void)
295 r4k_blast_scache_page = (
void *)cache_noop;
296 else if (sc_lsize == 16)
297 r4k_blast_scache_page = blast_scache16_page;
298 else if (sc_lsize == 32)
299 r4k_blast_scache_page = blast_scache32_page;
300 else if (sc_lsize == 64)
301 r4k_blast_scache_page = blast_scache64_page;
302 else if (sc_lsize == 128)
303 r4k_blast_scache_page = blast_scache128_page;
306 static void (* r4k_blast_scache_page_indexed)(
unsigned long addr);
308 static void __cpuinit r4k_blast_scache_page_indexed_setup(
void)
313 r4k_blast_scache_page_indexed = (
void *)cache_noop;
314 else if (sc_lsize == 16)
315 r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
316 else if (sc_lsize == 32)
317 r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
318 else if (sc_lsize == 64)
319 r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
320 else if (sc_lsize == 128)
321 r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
324 static void (* r4k_blast_scache)(
void);
326 static void __cpuinit r4k_blast_scache_setup(
void)
331 r4k_blast_scache = (
void *)cache_noop;
332 else if (sc_lsize == 16)
333 r4k_blast_scache = blast_scache16;
334 else if (sc_lsize == 32)
335 r4k_blast_scache = blast_scache32;
336 else if (sc_lsize == 64)
337 r4k_blast_scache = blast_scache64;
338 else if (sc_lsize == 128)
339 r4k_blast_scache = blast_scache128;
342 static inline void local_r4k___flush_cache_all(
void * args)
344 #if defined(CONFIG_CPU_LOONGSON2)
363 static void r4k___flush_cache_all(
void)
365 r4k_on_each_cpu(local_r4k___flush_cache_all,
NULL);
368 static inline int has_valid_asid(
const struct mm_struct *mm)
370 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
383 static void r4k__flush_cache_vmap(
void)
388 static void r4k__flush_cache_vunmap(
void)
393 static inline void local_r4k_flush_cache_range(
void * args)
398 if (!(has_valid_asid(vma->
vm_mm)))
407 unsigned long start,
unsigned long end)
412 r4k_on_each_cpu(local_r4k_flush_cache_range, vma);
415 static inline void local_r4k_flush_cache_mm(
void * args)
419 if (!has_valid_asid(mm))
439 static void r4k_flush_cache_mm(
struct mm_struct *mm)
444 r4k_on_each_cpu(local_r4k_flush_cache_mm, mm);
453 static inline void local_r4k_flush_cache_page(
void *args)
457 unsigned long addr = fcp_args->
addr;
461 int map_coherent = 0;
472 if (!has_valid_asid(mm))
501 addr = (
unsigned long)vaddr;
505 r4k_blast_dcache_page(addr);
507 r4k_blast_scache_page(addr);
514 drop_mmu_context(mm, cpu);
516 r4k_blast_icache_page(addr);
528 unsigned long addr,
unsigned long pfn)
536 r4k_on_each_cpu(local_r4k_flush_cache_page, &args);
539 static inline void local_r4k_flush_data_cache_page(
void * addr)
541 r4k_blast_dcache_page((
unsigned long) addr);
544 static void r4k_flush_data_cache_page(
unsigned long addr)
547 local_r4k_flush_data_cache_page((
void *)addr);
549 r4k_on_each_cpu(local_r4k_flush_data_cache_page, (
void *) addr);
557 static inline void local_r4k_flush_icache_range(
unsigned long start,
unsigned long end)
560 if (end - start >= dcache_size) {
564 protected_blast_dcache_range(start, end);
568 if (end - start > icache_size)
571 protected_blast_icache_range(start, end);
574 static inline void local_r4k_flush_icache_range_ipi(
void *args)
577 unsigned long start = fir_args->
start;
578 unsigned long end = fir_args->
end;
580 local_r4k_flush_icache_range(start, end);
583 static void r4k_flush_icache_range(
unsigned long start,
unsigned long end)
590 r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args);
594 #ifdef CONFIG_DMA_NONCOHERENT
596 static void r4k_dma_cache_wback_inv(
unsigned long addr,
unsigned long size)
605 blast_scache_range(addr, addr + size);
619 blast_dcache_range(addr, addr + size);
626 static void r4k_dma_cache_inv(
unsigned long addr,
unsigned long size)
636 unsigned long almask = ~(lsize - 1);
648 (addr + size - 1) & almask);
649 blast_inv_scache_range(addr, addr + size);
659 unsigned long almask = ~(lsize - 1);
664 blast_inv_dcache_range(addr, addr + size);
677 static void local_r4k_flush_cache_sigtramp(
void *
arg)
682 unsigned long addr = (
unsigned long) arg;
686 protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
688 protected_writeback_scache_line(addr & ~(sc_lsize - 1));
690 protected_flush_icache_line(addr & ~(ic_lsize - 1));
713 static void r4k_flush_cache_sigtramp(
unsigned long addr)
715 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (
void *) addr);
718 static void r4k_flush_icache_all(
void)
729 static inline void local_r4k_flush_kernel_vmap_range(
void *args)
732 unsigned long vaddr = vmra->
vaddr;
733 int size = vmra->
size;
743 blast_dcache_range(vaddr, vaddr + size);
747 static void r4k_flush_kernel_vmap_range(
unsigned long vaddr,
int size)
754 r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args);
757 static inline void rm7k_erratum31(
void)
759 const unsigned long ic_lsize = 32;
771 "cache\t%1, 0(%0)\n\t"
772 "cache\t%1, 0x1000(%0)\n\t"
773 "cache\t%1, 0x2000(%0)\n\t"
774 "cache\t%1, 0x3000(%0)\n\t"
775 "cache\t%2, 0(%0)\n\t"
776 "cache\t%2, 0x1000(%0)\n\t"
777 "cache\t%2, 0x2000(%0)\n\t"
778 "cache\t%2, 0x3000(%0)\n\t"
779 "cache\t%1, 0(%0)\n\t"
780 "cache\t%1, 0x1000(%0)\n\t"
781 "cache\t%1, 0x2000(%0)\n\t"
782 "cache\t%1, 0x3000(%0)\n\t"
809 "3-way",
"4-way",
"5-way",
"6-way",
"7-way",
"8-way"
817 unsigned long config1;
825 icache_size = 1 << (12 + ((config &
CONF_IC) >> 9));
830 dcache_size = 1 << (12 + ((config &
CONF_DC) >> 6));
840 icache_size = 1 << (12 + ((config &
CONF_IC) >> 9));
845 dcache_size = 1 << (12 + ((config &
CONF_DC) >> 6));
854 icache_size = 1 << (12 + ((config &
CONF_IC) >> 9));
859 dcache_size = 1 << (12 + ((config &
CONF_DC) >> 6));
875 icache_size = 1 << (12 + ((config &
CONF_IC) >> 9));
880 dcache_size = 1 << (12 + ((config &
CONF_DC) >> 6));
891 icache_size = 1 << (12 + ((config &
R10K_CONF_IC) >> 29));
896 dcache_size = 1 << (12 + ((config &
R10K_CONF_DC) >> 26));
910 config |= 0x00400000
U;
917 icache_size = 1 << (10 + ((config &
CONF_IC) >> 9));
922 dcache_size = 1 << (10 + ((config &
CONF_DC) >> 6));
934 icache_size = 1 << (10 + ((config &
CONF_IC) >> 9));
939 dcache_size = 1 << (10 + ((config &
CONF_DC) >> 6));
951 icache_size = 1 << (12 + ((config &
CONF_IC) >> 9));
956 dcache_size = 1 << (12 + ((config &
CONF_DC) >> 6));
961 #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
968 icache_size = 1 << (12 + ((config &
CONF_IC) >> 9));
976 dcache_size = 1 << (12 + ((config &
CONF_DC) >> 6));
987 panic(
"Don't know how to probe P-caches on this cpu.");
995 if ((lsize = ((config1 >> 19) & 7)))
996 c->
icache.linesz = 2 << lsize;
999 c->
icache.sets = 32 << (((config1 >> 22) + 1) & 7);
1000 c->
icache.ways = 1 + ((config1 >> 16) & 7);
1002 icache_size = c->
icache.sets *
1015 if ((lsize = ((config1 >> 10) & 7)))
1016 c->
dcache.linesz = 2 << lsize;
1019 c->
dcache.sets = 32 << (((config1 >> 13) + 1) & 7);
1020 c->
dcache.ways = 1 + ((config1 >> 7) & 7);
1022 dcache_size = c->
dcache.sets *
1041 PAGE_SIZE <= 0x8000)
1042 panic(
"Improper R4000SC processor configuration detected");
1079 alias_74k_erratum(c);
1087 if (c->
dcache.waysize > PAGE_SIZE)
1105 #ifdef CONFIG_CPU_LOONGSON2
1113 printk(
"Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1118 printk(
"Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
1119 dcache_size >> 10, way_string[c->
dcache.ways],
1122 "cache aliases" :
"no aliases",
1138 if (config & CONF_SC)
1141 begin = (
unsigned long) &_stext;
1142 begin &= ~((4 * 1024 * 1024) - 1);
1143 end = begin + (4 * 1024 * 1024);
1153 for (addr = begin; addr <
end; addr = (begin + pow2)) {
1154 unsigned long *
p = (
unsigned long *) addr;
1155 __asm__ __volatile__(
"nop" : :
"r" (*p));
1162 __asm__ __volatile__(
"nop; nop; nop; nop;");
1168 pow2 = (128 * 1024);
1169 for (addr = begin + (128 * 1024); addr <
end; addr = begin + pow2) {
1171 __asm__ __volatile__(
"nop; nop; nop; nop;");
1187 #if defined(CONFIG_CPU_LOONGSON2)
1188 static void __init loongson2_sc_init(
void)
1198 pr_info(
"Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1209 static void __cpuinit setup_scache(
void)
1234 c->
scache.linesz = 64 << ((config >> 13) & 1);
1242 #ifdef CONFIG_R5000_CPU_SCACHE
1249 #ifdef CONFIG_RM7000_CPU_SCACHE
1254 #if defined(CONFIG_CPU_LOONGSON2)
1256 loongson2_sc_init();
1268 #ifdef CONFIG_MIPS_CPU_SCACHE
1271 printk(
"MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1277 panic(
"Dunno how to handle MIPS32 / MIPS64 second level cache");
1292 printk(
"Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1318 set_c0_config(1 << 19);
1324 #define NXP_BARRIER() \
1325 __asm__ __volatile__( \
1326 ".set noreorder\n\t" \
1327 "nop; nop; nop; nop; nop; nop;\n\t" \
1330 static void nxp_pr4450_fixup_config(
void)
1332 unsigned long config0;
1337 config0 &= ~(0x7 | (7 << 25) | (7 << 28));
1345 static int __cpuinitdata cca = -1;
1356 static void __cpuinit coherency_setup(
void)
1358 if (cca < 0 || cca > 7)
1362 pr_debug(
"Using cache attribute %d\n", cca);
1391 nxp_pr4450_fixup_config();
1396 #if defined(CONFIG_DMA_NONCOHERENT)
1400 static int __init setcoherentio(
char *
str)
1407 __setup(
"coherentio", setcoherentio);
1410 static void __cpuinit r4k_cache_error_setup(
void)
1412 extern char __weak except_vec2_generic;
1413 extern char __weak except_vec2_sb1;
1437 r4k_blast_dcache_page_setup();
1438 r4k_blast_dcache_page_indexed_setup();
1439 r4k_blast_dcache_setup();
1440 r4k_blast_icache_page_setup();
1441 r4k_blast_icache_page_indexed_setup();
1442 r4k_blast_icache_setup();
1443 r4k_blast_scache_page_setup();
1444 r4k_blast_scache_page_indexed_setup();
1445 r4k_blast_scache_setup();
1477 #if defined(CONFIG_DMA_NONCOHERENT)
1479 _dma_cache_wback_inv = (
void *)cache_noop;
1480 _dma_cache_wback = (
void *)cache_noop;
1481 _dma_cache_inv = (
void *)cache_noop;
1483 _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
1484 _dma_cache_wback = r4k_dma_cache_wback_inv;
1485 _dma_cache_inv = r4k_dma_cache_inv;
1491 #if !defined(CONFIG_MIPS_CMP)
1492 local_r4k___flush_cache_all(
NULL);