17 #include <linux/kernel.h>
24 #include <asm/mmu_context.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
28 #include <asm/pgalloc.h>
29 #include <asm/pgtable.h>
32 #include <asm/cache.h>
33 #include <asm/traps.h>
34 #include <asm/oplib.h>
54 static unsigned int hwbug_bitmask;
62 static pgd_t *srmmu_swapper_pg_dir;
67 const struct sparc32_cachetlb_ops *local_ops;
69 #define FLUSH_BEGIN(mm)
72 #define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
81 static ctxd_t *srmmu_context_table;
86 static int is_hypersparc;
88 static int srmmu_cache_pagetables;
91 static unsigned long srmmu_nocache_size;
92 static unsigned long srmmu_nocache_end;
95 #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
98 #define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
102 static struct bit_map srmmu_nocache_map;
104 static inline int srmmu_pmd_none(
pmd_t pmd)
105 {
return !(
pmd_val(pmd) & 0xFFFFFFF); }
108 static inline void srmmu_ctxd_set(
ctxd_t *ctxp,
pgd_t *pgdp)
141 return (
pte_t *) pte +
150 static void *__srmmu_get_nocache(
int size,
int align)
172 size, (
int) srmmu_nocache_size,
185 tmp = __srmmu_get_nocache(size, align);
198 vaddr = (
unsigned long)addr;
200 printk(
"Vaddr %lx is smaller than nocache base 0x%lx\n",
204 if (vaddr + size > srmmu_nocache_end) {
205 printk(
"Vaddr %lx is bigger than nocache end 0x%lx\n",
206 vaddr, srmmu_nocache_end);
210 printk(
"Size 0x%x is not a power of 2\n", size);
214 printk(
"Size 0x%x is too small\n", size);
217 if (vaddr & (size - 1)) {
218 printk(
"Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
228 static void srmmu_early_allocate_ptable_skeleton(
unsigned long start,
232 static unsigned long __init probe_memory(
void)
234 unsigned long total = 0;
247 static void __init srmmu_nocache_calcsize(
void)
249 unsigned long sysmemavail = probe_memory() / 1024;
250 int srmmu_nocache_npages;
252 srmmu_nocache_npages =
264 srmmu_nocache_size = srmmu_nocache_npages *
PAGE_SIZE;
268 static void __init srmmu_nocache_init(
void)
270 unsigned int bitmap_bits;
275 unsigned long pteval;
288 init_mm.pgd = srmmu_swapper_pg_dir;
295 while (vaddr < srmmu_nocache_end) {
302 if (srmmu_cache_pagetables)
346 pgtable_page_ctor(page);
354 pgtable_page_dtor(pte);
365 #define NO_CONTEXT -1
374 static struct ctx_list *ctx_list_pool;
381 static inline void remove_from_ctx_list(
struct ctx_list *
entry)
393 #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
394 #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
401 ctxp = ctx_free.next;
402 if (ctxp != &ctx_free) {
403 remove_from_ctx_list(ctxp);
409 ctxp = ctx_used.next;
410 if (ctxp->
ctx_mm == old_mm)
412 if (ctxp == &ctx_used)
413 panic(
"out of mmu contexts");
416 remove_from_ctx_list(ctxp);
423 static inline void free_context(
int context)
427 ctx_old = ctx_list_pool +
context;
428 remove_from_ctx_list(ctx_old);
432 static void __init sparc_context_init(
int numctx)
437 size = numctx *
sizeof(
struct ctx_list);
440 for (ctx = 0; ctx < numctx; ctx++) {
443 clist = (ctx_list_pool +
ctx);
447 ctx_free.next = ctx_free.prev = &ctx_free;
448 ctx_used.next = ctx_used.prev = &ctx_used;
449 for (ctx = 0; ctx < numctx; ctx++)
457 spin_lock(&srmmu_context_spinlock);
458 alloc_context(old_mm, mm);
459 spin_unlock(&srmmu_context_spinlock);
460 srmmu_ctxd_set(&srmmu_context_table[mm->
context], mm->
pgd);
467 hyper_flush_whole_icache();
473 static inline void srmmu_mapioaddr(
unsigned long physaddr,
474 unsigned long virt_addr,
int bus_type)
491 tmp |= (bus_type << 28);
498 unsigned long xva,
unsigned int len)
502 srmmu_mapioaddr(xpa, xva, bus);
509 static inline void srmmu_unmapioaddr(
unsigned long virt_addr)
527 srmmu_unmapioaddr(virt_addr);
551 unsigned long start,
unsigned long end);
559 unsigned long start,
unsigned long end);
568 if ((ctx1 = vma->
vm_mm->context) != -1) {
572 printk(
"flush ctx %02x curr %02x\n", ctx1, cctx);
574 swift_flush_page(page);
575 __asm__ __volatile__(
"sta %%g0, [%0] %1\n\t" : :
582 swift_flush_page(page);
584 __asm__ __volatile__(
"sta %%g0, [%0] %1\n\t" : :
644 static void __init early_pgtable_allocfail(
char *
type)
646 prom_printf(
"inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
650 static void __init srmmu_early_allocate_ptable_skeleton(
unsigned long start,
657 while (start < end) {
660 pmdp = __srmmu_get_nocache(
663 early_pgtable_allocfail(
"pmd");
671 early_pgtable_allocfail(
"pte");
675 if (start > (0xffffffffUL -
PMD_SIZE))
681 static void __init srmmu_allocate_ptable_skeleton(
unsigned long start,
688 while (start < end) {
693 early_pgtable_allocfail(
"pmd");
698 if (srmmu_pmd_none(*pmdp)) {
699 ptep = __srmmu_get_nocache(
PTE_SIZE,
702 early_pgtable_allocfail(
"pte");
706 if (start > (0xffffffffUL -
PMD_SIZE))
713 static inline unsigned long srmmu_probe(
unsigned long vaddr)
720 __asm__ __volatile__(
"lda [%1] %2, %0\n\t" :
734 static void __init srmmu_inherit_prom_mappings(
unsigned long start,
737 unsigned long probed;
744 while (start <= end) {
747 if (start == 0xfef00000)
749 probed = srmmu_probe(start);
780 early_pgtable_allocfail(
"pmd");
788 early_pgtable_allocfail(
"pte");
800 val = &pmdp->
pmdv[
x];
811 #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
814 static void __init do_large_mapping(
unsigned long vaddr,
unsigned long phys_base)
817 unsigned long big_pte;
824 static unsigned long __init map_spbank(
unsigned long vbase,
int sp_entry)
833 if (vstart < min_vaddr || vstart >= max_vaddr)
836 if (vend > max_vaddr || vend < min_vaddr)
839 while (vstart < vend) {
840 do_large_mapping(vstart, pstart);
846 static void __init map_kernel(
void)
854 for (i = 0;
sp_banks[
i].num_bytes != 0; i++) {
855 map_spbank((
unsigned long)
__va(
sp_banks[i].base_addr), i);
861 extern unsigned long bootmem_init(
unsigned long *pages_avail);
871 unsigned long pages_avail;
877 num_contexts = 65536;
882 while (cpunode != 0) {
883 prom_getstring(cpunode,
"device_type", node_str,
sizeof(node_str));
884 if (!
strcmp(node_str,
"cpu")) {
893 prom_printf(
"Something wrong, can't find cpu node in paging_init.\n");
900 srmmu_nocache_calcsize();
901 srmmu_nocache_init();
906 srmmu_context_table = __srmmu_get_nocache(num_contexts *
sizeof(
ctxd_t), num_contexts *
sizeof(
ctxd_t));
925 srmmu_allocate_ptable_skeleton(
927 srmmu_allocate_ptable_skeleton(
PKMAP_BASE, PKMAP_END);
937 sparc_context_init(num_contexts);
942 unsigned long zones_size[MAX_NR_ZONES];
943 unsigned long zholes_size[MAX_NR_ZONES];
944 unsigned long npages;
947 for (znum = 0; znum < MAX_NR_ZONES; znum++)
948 zones_size[znum] = zholes_size[znum] = 0;
952 zones_size[ZONE_DMA] = npages;
953 zholes_size[ZONE_DMA] = npages - pages_avail;
956 zones_size[ZONE_HIGHMEM] = npages;
968 "nocache total\t: %ld\n"
969 "nocache used\t: %d\n",
973 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
987 srmmu_ctxd_set(&srmmu_context_table[mm->
context], srmmu_swapper_pg_dir);
989 spin_lock(&srmmu_context_spinlock);
991 spin_unlock(&srmmu_context_spinlock);
997 static void __init srmmu_is_bad(
void)
999 prom_printf(
"Could not determine SRMMU chip type.\n");
1003 static void __init init_vac_layout(
void)
1011 unsigned long min_line_size = 0x10000000;
1017 if (!
strcmp(node_str,
"cpu")) {
1020 prom_printf(
"can't determine cache-line-size, halting.\n");
1024 if (cache_lines == -1) {
1025 prom_printf(
"can't determine cache-nlines, halting.\n");
1033 if (vac_line_size < min_line_size)
1050 vac_line_size = min_line_size;
1052 printk(
"SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
1056 static void __cpuinit poke_hypersparc(
void)
1058 volatile unsigned long clear;
1061 hyper_flush_unconditional_combined();
1070 hyper_clear_all_tags();
1074 hyper_flush_whole_icache();
1079 static const struct sparc32_cachetlb_ops hypersparc_ops = {
1093 static void __init init_hypersparc(
void)
1101 sparc32_cachetlb_ops = &hypersparc_ops;
1131 static const struct sparc32_cachetlb_ops swift_ops = {
1145 #define SWIFT_MASKID_ADDR 0x10003018
1146 static void __init init_swift(
void)
1150 __asm__ __volatile__(
"lda [%1] %2, %0\n\t"
1151 "srl %0, 0x18, %0\n\t" :
1155 switch (swift_rev) {
1195 sparc32_cachetlb_ops = &swift_ops;
1208 static void turbosparc_flush_cache_all(
void)
1211 turbosparc_idflash_clear();
1214 static void turbosparc_flush_cache_mm(
struct mm_struct *mm)
1218 turbosparc_idflash_clear();
1222 static
void turbosparc_flush_cache_range(
struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
1225 flush_user_windows();
1226 turbosparc_idflash_clear();
1233 flush_user_windows();
1235 turbosparc_flush_icache();
1236 turbosparc_flush_dcache();
1241 static
void turbosparc_flush_page_to_ram(
unsigned long page)
1243 #ifdef TURBOSPARC_WRITEBACK
1244 volatile unsigned long clear;
1246 if (srmmu_probe(page))
1247 turbosparc_flush_page_cache(page);
1252 static void turbosparc_flush_sig_insns(
struct mm_struct *mm,
unsigned long insn_addr)
1256 static void turbosparc_flush_page_for_dma(
unsigned long page)
1258 turbosparc_flush_dcache();
1261 static void turbosparc_flush_tlb_all(
void)
1263 srmmu_flush_whole_tlb();
1266 static void turbosparc_flush_tlb_mm(
struct mm_struct *mm)
1269 srmmu_flush_whole_tlb();
1273 static
void turbosparc_flush_tlb_range(
struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
1276 srmmu_flush_whole_tlb();
1283 srmmu_flush_whole_tlb();
1288 static
void __cpuinit poke_turbosparc(
void)
1291 unsigned long ccreg;
1294 turbosparc_flush_cache_all();
1300 ccreg = turbosparc_get_ccreg();
1302 #ifdef TURBOSPARC_WRITEBACK
1315 switch (ccreg & 7) {
1322 turbosparc_set_ccreg(ccreg);
1329 static const struct sparc32_cachetlb_ops turbosparc_ops = {
1330 .cache_all = turbosparc_flush_cache_all,
1331 .cache_mm = turbosparc_flush_cache_mm,
1332 .cache_page = turbosparc_flush_cache_page,
1333 .cache_range = turbosparc_flush_cache_range,
1334 .tlb_all = turbosparc_flush_tlb_all,
1335 .tlb_mm = turbosparc_flush_tlb_mm,
1336 .tlb_page = turbosparc_flush_tlb_page,
1337 .tlb_range = turbosparc_flush_tlb_range,
1338 .page_to_ram = turbosparc_flush_page_to_ram,
1339 .sig_insns = turbosparc_flush_sig_insns,
1340 .page_for_dma = turbosparc_flush_page_for_dma,
1343 static void __init init_turbosparc(
void)
1347 sparc32_cachetlb_ops = &turbosparc_ops;
1351 static void __cpuinit poke_tsunami(
void)
1355 tsunami_flush_icache();
1356 tsunami_flush_dcache();
1362 static const struct sparc32_cachetlb_ops tsunami_ops = {
1376 static void __init init_tsunami(
void)
1386 sparc32_cachetlb_ops = &tsunami_ops;
1395 static int smp_catch;
1398 unsigned long mxcc_control = mxcc_get_creg();
1402 mxcc_set_creg(mxcc_control);
1413 unsigned long bpreg;
1418 bpreg = viking_get_bpreg();
1420 viking_set_bpreg(bpreg);
1434 static struct sparc32_cachetlb_ops viking_ops = {
1465 static struct sparc32_cachetlb_ops viking_sun4d_smp_ops = {
1480 static void __init init_viking(
void)
1505 srmmu_cache_pagetables = 1;
1508 sparc32_cachetlb_ops = (
const struct sparc32_cachetlb_ops *)
1512 sparc32_cachetlb_ops = (
const struct sparc32_cachetlb_ops *)
1513 &viking_sun4d_smp_ops;
1520 static void __init get_srmmu_type(
void)
1522 unsigned long mreg,
psr;
1523 unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
1529 mod_typ = (mreg & 0xf0000000) >> 28;
1530 mod_rev = (mreg & 0x0f000000) >> 24;
1531 psr_typ = (psr >> 28) & 0xf;
1532 psr_vers = (psr >> 24) & 0xf;
1556 prom_printf(
"Sparc-Linux Cypress support does not longer exit.\n");
1566 if (psr_typ == 0 && psr_vers == 5) {
1572 if (psr_typ == 0 && psr_vers == 4) {
1579 prom_getstring(cpunode,
"device_type", node_str,
sizeof(node_str));
1580 if (!
strcmp(node_str,
"cpu")) {
1597 ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
1603 if (psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
1614 static void smp_flush_page_for_dma(
unsigned long page)
1628 xc0((smpfunc_t) local_ops->
tlb_all);
1632 static void smp_flush_cache_mm(
struct mm_struct *mm)
1636 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1638 if (!cpumask_empty(&cpu_mask))
1639 xc1((smpfunc_t) local_ops->
cache_mm, (
unsigned long) mm);
1648 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1650 if (!cpumask_empty(&cpu_mask)) {
1651 xc1((smpfunc_t) local_ops->
tlb_mm, (
unsigned long) mm);
1653 cpumask_copy(mm_cpumask(mm),
1661 unsigned long start,
1668 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1670 if (!cpumask_empty(&cpu_mask))
1672 (
unsigned long) vma, start, end);
1678 unsigned long start,
1685 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1687 if (!cpumask_empty(&cpu_mask))
1689 (
unsigned long) vma, start, end);
1694 static void smp_flush_cache_page(
struct vm_area_struct *vma,
unsigned long page)
1700 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1702 if (!cpumask_empty(&cpu_mask))
1704 (
unsigned long) vma, page);
1715 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1717 if (!cpumask_empty(&cpu_mask))
1718 xc2((smpfunc_t) local_ops->
tlb_page,
1719 (
unsigned long) vma, page);
1724 static void smp_flush_page_to_ram(
unsigned long page)
1738 static void smp_flush_sig_insns(
struct mm_struct *mm,
unsigned long insn_addr)
1741 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1743 if (!cpumask_empty(&cpu_mask))
1745 (
unsigned long) mm, insn_addr);
1749 static struct sparc32_cachetlb_ops smp_cachetlb_ops = {
1751 .cache_mm = smp_flush_cache_mm,
1752 .cache_page = smp_flush_cache_page,
1753 .cache_range = smp_flush_cache_range,
1758 .page_to_ram = smp_flush_page_to_ram,
1759 .sig_insns = smp_flush_sig_insns,
1760 .page_for_dma = smp_flush_page_for_dma,
1797 sparc32_cachetlb_ops = (
const struct sparc32_cachetlb_ops *)