25 #include <linux/kernel.h>
26 #include <linux/types.h>
28 #include <linux/string.h>
32 #include <asm/cacheflush.h>
33 #include <asm/pgtable.h>
36 #include <asm/setup.h>
60 static inline int r45k_bvahwbug(
void)
66 static inline int r4k_250MHZhwbug(
void)
82 static int use_bbit_insns(
void)
94 static int use_lwx_insns(
void)
103 #if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \
104 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
105 static bool scratchpad_available(
void)
109 static int scratchpad_offset(
int i)
116 return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 *
i) - 32768;
119 static bool scratchpad_available(
void)
123 static int scratchpad_offset(
int i)
161 #ifdef CONFIG_HUGETLB_PAGE
162 label_tlb_huge_update,
180 #ifdef CONFIG_HUGETLB_PAGE
186 static void uasm_bgezl_hazard(
u32 **
p,
struct uasm_reloc **
r,
int instance)
197 static void uasm_bgezl_label(
struct uasm_label **
l,
u32 **
p,
int instance)
211 static inline void dump_handler(
const u32 *handler,
int count)
218 for (i = 0; i <
count; i++)
219 pr_debug(
"\t%p\t.word 0x%08x\n", &handler[i], handler[i]);
229 #define C0_INDEX 0, 0
230 #define C0_ENTRYLO0 2, 0
231 #define C0_TCBIND 2, 2
232 #define C0_ENTRYLO1 3, 0
233 #define C0_CONTEXT 4, 0
234 #define C0_PAGEMASK 5, 0
235 #define C0_BADVADDR 8, 0
236 #define C0_ENTRYHI 10, 0
238 #define C0_XCONTEXT 20, 0
241 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
243 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
257 static struct uasm_label labels[128] __cpuinitdata;
258 static struct uasm_reloc relocs[128] __cpuinitdata;
268 static int __cpuinit allocate_kscratch(
void)
271 unsigned int a =
cpu_data[0].kscratch_mask & ~kscratch_used_mask;
280 kscratch_used_mask |= (1 <<
r);
293 int smp_processor_id_reg;
294 int smp_processor_id_sel;
295 int smp_processor_id_shift;
297 if (scratch_reg > 0) {
307 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
308 smp_processor_id_shift = 51;
309 smp_processor_id_reg = 20;
310 smp_processor_id_sel = 0;
313 smp_processor_id_shift = 25;
314 smp_processor_id_reg = 4;
315 smp_processor_id_sel = 0;
318 smp_processor_id_shift = 26;
319 smp_processor_id_reg = 4;
320 smp_processor_id_sel = 0;
324 UASM_i_MFC0(p,
K0, smp_processor_id_reg, smp_processor_id_sel);
345 static void __cpuinit build_restore_work_registers(
u32 **p)
347 if (scratch_reg > 0) {
356 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
370 static void __cpuinit build_r3000_tlb_refill_handler(
void)
372 long pgdc = (
long)pgd_current;
375 memset(tlb_handler, 0,
sizeof(tlb_handler));
381 uasm_i_srl(&p,
K0,
K0, 22);
382 uasm_i_sll(&p,
K0,
K0, 2);
383 uasm_i_addu(&p,
K1,
K1,
K0);
385 uasm_i_lw(&p,
K1, 0,
K1);
386 uasm_i_andi(&p,
K0,
K0, 0xffc);
387 uasm_i_addu(&p,
K1,
K1,
K0);
388 uasm_i_lw(&p,
K0, 0,
K1);
396 if (p > tlb_handler + 32)
397 panic(
"TLB refill handler space exceeded");
399 pr_debug(
"Wrote TLB refill handler (%u instructions).\n",
400 (
unsigned int)(p - tlb_handler));
505 uasm_bgezl_hazard(p, r, hazard_instance);
507 uasm_bgezl_label(l, p, hazard_instance);
612 panic(
"No TLB refill handler yet (CPU type: %d)",
624 #ifdef CONFIG_64BIT_PHYS_ADDR
632 #ifdef CONFIG_HUGETLB_PAGE
640 if (restore_scratch) {
642 if (PM_DEFAULT_MASK >> 16) {
643 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
644 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
647 }
else if (PM_DEFAULT_MASK) {
648 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
658 UASM_i_LW(p, 1, scratchpad_offset(0), 0);
661 if (PM_DEFAULT_MASK >> 16) {
662 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
663 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
666 }
else if (PM_DEFAULT_MASK) {
667 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
677 static __cpuinit void build_huge_tlb_write_entry(
u32 **p,
685 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
686 uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
689 build_tlb_write_entry(p, l, r, wmode);
691 build_restore_pagemask(p, r, tmp,
label_leave, restore_scratch);
698 build_is_huge_pte(
u32 **p,
struct uasm_reloc **r,
unsigned int tmp,
699 unsigned int pmd,
int lid)
702 if (use_bbit_insns()) {
710 static __cpuinit void build_huge_update_entries(
u32 **p,
731 build_convert_pte_to_entrylo(p, pte);
755 build_huge_update_entries(p, pte, ptr);
756 build_huge_tlb_write_entry(p, l, r, pte,
tlb_indexed, 0);
767 unsigned int tmp,
unsigned int ptr)
769 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
770 long pgdc = (
long)pgd_current;
777 if (check_for_high_segbits) {
796 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
807 uasm_i_dins(p, ptr, 0, 0, 23);
810 uasm_i_ori(p, ptr, ptr, 0x540);
811 uasm_i_drotr(p, ptr, ptr, 11);
813 #elif defined(CONFIG_SMP)
814 # ifdef CONFIG_MIPS_MT_SMTC
819 uasm_i_dsrl_safe(p, ptr, ptr, 19);
826 uasm_i_dsrl_safe(p, ptr, ptr, 23);
829 uasm_i_daddu(p, ptr, ptr, tmp);
837 uasm_l_vmalloc_done(l, *p);
843 uasm_i_daddu(p, ptr, ptr, tmp);
844 #ifndef __PAGETABLE_PMD_FOLDED
846 uasm_i_ld(p, ptr, 0, ptr);
847 uasm_i_dsrl_safe(p, tmp, tmp,
PMD_SHIFT-3);
849 uasm_i_daddu(p, ptr, ptr, tmp);
859 unsigned int bvaddr,
unsigned int ptr,
863 int single_insn_swpd;
864 int did_vmalloc_branch = 0;
868 uasm_l_vmalloc(l, *p);
870 if (mode !=
not_refill && check_for_high_segbits) {
871 if (single_insn_swpd) {
874 did_vmalloc_branch = 1;
880 if (!did_vmalloc_branch) {
893 if (mode !=
not_refill && check_for_high_segbits) {
894 uasm_l_large_segbits_fault(l, *p);
914 UASM_i_LW(p, 1, scratchpad_offset(0), 0);
928 build_get_pgde32(
u32 **p,
unsigned int tmp,
unsigned int ptr)
930 long pgdc = (
long)pgd_current;
934 #ifdef CONFIG_MIPS_MT_SMTC
940 uasm_i_srl(p, ptr, ptr, 19);
947 uasm_i_srl(p, ptr, ptr, 23);
949 uasm_i_addu(p, ptr, tmp, ptr);
964 uasm_i_addu(p, ptr, ptr, tmp);
992 uasm_i_andi(p, ctx, ctx, mask);
995 static void __cpuinit build_get_ptep(
u32 **p,
unsigned int tmp,
unsigned int ptr)
1025 build_adjust_context(p, tmp);
1029 static void __cpuinit build_update_entries(
u32 **p,
unsigned int tmp,
1036 #ifdef CONFIG_64BIT_PHYS_ADDR
1038 uasm_i_ld(p, tmp, 0, ptep);
1039 uasm_i_ld(p, ptep,
sizeof(
pte_t), ptep);
1051 int pte_off_even =
sizeof(
pte_t) / 2;
1052 int pte_off_odd = pte_off_even +
sizeof(
pte_t);
1055 uasm_i_lw(p, tmp, pte_off_even, ptep);
1057 uasm_i_lw(p, ptep, pte_off_odd, ptep);
1063 if (r45k_bvahwbug())
1064 build_tlb_probe_entry(p);
1067 if (r4k_250MHZhwbug())
1073 if (r4k_250MHZhwbug())
1077 if (r45k_bvahwbug())
1080 if (r4k_250MHZhwbug())
1094 unsigned int ptr,
int c0_scratch)
1097 unsigned int even, odd;
1098 int vmalloc_branch_delay_filled = 0;
1104 if (check_for_high_segbits) {
1112 if (c0_scratch >= 0)
1115 UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
1117 uasm_i_dsrl_safe(p, scratch, tmp,
1121 if (pgd_reg == -1) {
1122 vmalloc_branch_delay_filled = 1;
1124 uasm_i_dins(p, ptr, 0, 0, 23);
1134 if (c0_scratch >= 0)
1137 UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
1141 uasm_i_dins(p, ptr, 0, 0, 23);
1146 if (pgd_reg == -1) {
1147 vmalloc_branch_delay_filled = 1;
1149 uasm_i_ori(p, ptr, ptr, 0x540);
1150 uasm_i_drotr(p, ptr, ptr, 11);
1153 #ifdef __PAGETABLE_PMD_FOLDED
1154 #define LOC_PTEP scratch
1156 #define LOC_PTEP ptr
1159 if (!vmalloc_branch_delay_filled)
1161 uasm_i_dsrl_safe(p, scratch, tmp,
PGDIR_SHIFT - 3);
1163 uasm_l_vmalloc_done(l, *p);
1171 if (vmalloc_branch_delay_filled)
1173 uasm_i_dsrl_safe(p, scratch, tmp,
PGDIR_SHIFT - 3);
1175 #ifdef __PAGETABLE_PMD_FOLDED
1178 uasm_i_andi(p, scratch, scratch, (
PTRS_PER_PGD - 1) << 3);
1180 if (use_lwx_insns()) {
1183 uasm_i_daddu(p, ptr, ptr, scratch);
1184 uasm_i_ld(p, LOC_PTEP, 0, ptr);
1187 #ifndef __PAGETABLE_PMD_FOLDED
1189 uasm_i_dsrl_safe(p, scratch, tmp,
PMD_SHIFT - 3);
1190 uasm_i_andi(p, scratch, scratch, (
PTRS_PER_PMD - 1) << 3);
1193 if (use_lwx_insns()) {
1196 uasm_i_daddu(p, ptr, ptr, scratch);
1201 build_adjust_context(p, tmp);
1203 #ifdef CONFIG_HUGETLB_PAGE
1210 if (use_lwx_insns())
1216 if (use_lwx_insns()) {
1240 if (c0_scratch >= 0) {
1243 uasm_l_leave(l, *p);
1247 uasm_l_leave(l, *p);
1248 UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1250 UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1252 uasm_l_leave(l, *p);
1267 #define MIPS64_REFILL_INSNS 32
1269 static void __cpuinit build_r4000_tlb_refill_handler(
void)
1271 u32 *p = tlb_handler;
1275 unsigned int final_len;
1279 memset(tlb_handler, 0,
sizeof(tlb_handler));
1280 memset(labels, 0,
sizeof(labels));
1281 memset(relocs, 0,
sizeof(relocs));
1282 memset(final_handler, 0,
sizeof(final_handler));
1284 if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) {
1285 htlb_info = build_fast_tlb_refill_handler(&p, &l, &r,
K0,
K1,
1295 if (bcm1250_m3_war()) {
1296 unsigned int segbits = 44;
1300 uasm_i_xor(&p,
K0,
K0,
K1);
1301 uasm_i_dsrl_safe(&p,
K1,
K0, 62);
1302 uasm_i_dsrl_safe(&p,
K0,
K0, 12 + 1);
1303 uasm_i_dsll_safe(&p,
K0,
K0, 64 + 12 + 1 - segbits);
1304 uasm_i_or(&p,
K0,
K0,
K1);
1310 build_get_pmde64(&p, &l, &r,
K0,
K1);
1312 build_get_pgde32(&p,
K0,
K1);
1315 #ifdef CONFIG_HUGETLB_PAGE
1316 build_is_huge_pte(&p, &r,
K0,
K1, label_tlb_huge_update);
1319 build_get_ptep(&p,
K0,
K1);
1320 build_update_entries(&p,
K0,
K1);
1321 build_tlb_write_entry(&p, &l, &r,
tlb_random);
1322 uasm_l_leave(&l, p);
1325 #ifdef CONFIG_HUGETLB_PAGE
1326 uasm_l_tlb_huge_update(&l, p);
1327 build_huge_update_entries(&p, htlb_info.
huge_pte,
K1);
1333 build_get_pgd_vmalloc64(&p, &l, &r,
K0,
K1, vmalloc_mode);
1344 #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
1345 if ((p - tlb_handler) > 64)
1346 panic(
"TLB refill handler space exceeded");
1352 panic(
"TLB refill handler space exceeded");
1358 #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
1362 final_len = p - tlb_handler;
1365 if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
1368 final_len = p - tlb_handler;
1370 #if defined(CONFIG_HUGETLB_PAGE)
1371 const enum label_id ls = label_tlb_huge_update;
1379 for (i = 0; i <
ARRAY_SIZE(labels) && labels[
i].lab != ls; i++)
1382 split = labels[
i].addr;
1387 if (split > tlb_handler + MIPS64_REFILL_INSNS ||
1388 split < p - MIPS64_REFILL_INSNS)
1397 split = tlb_handler + MIPS64_REFILL_INSNS - 2;
1409 f += split - tlb_handler;
1413 uasm_l_split(&l, final_handler);
1419 split, split + 1, f);
1434 pr_debug(
"Wrote TLB refill handler (%u instructions).\n",
1446 #define FASTPATH_SIZE 128
1451 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
1454 static void __cpuinit build_r4000_setup_pgd(
void)
1458 u32 *p = tlbmiss_handler_setup_pgd;
1462 memset(tlbmiss_handler_setup_pgd, 0,
sizeof(tlbmiss_handler_setup_pgd));
1463 memset(labels, 0,
sizeof(labels));
1464 memset(relocs, 0,
sizeof(relocs));
1466 pgd_reg = allocate_kscratch();
1468 if (pgd_reg == -1) {
1480 uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
1481 uasm_l_tlbl_goaround1(&l, p);
1490 if (p - tlbmiss_handler_setup_pgd >
ARRAY_SIZE(tlbmiss_handler_setup_pgd))
1491 panic(
"tlbmiss_handler_setup_pgd space exceeded");
1493 pr_debug(
"Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
1494 (
unsigned int)(p - tlbmiss_handler_setup_pgd));
1496 dump_handler(tlbmiss_handler_setup_pgd,
1502 iPTE_LW(
u32 **p,
unsigned int pte,
unsigned int ptr)
1505 # ifdef CONFIG_64BIT_PHYS_ADDR
1507 uasm_i_lld(p, pte, 0, ptr);
1512 # ifdef CONFIG_64BIT_PHYS_ADDR
1514 uasm_i_ld(p, pte, 0, ptr);
1522 iPTE_SW(
u32 **p,
struct uasm_reloc **r,
unsigned int pte,
unsigned int ptr,
1525 #ifdef CONFIG_64BIT_PHYS_ADDR
1529 uasm_i_ori(p, pte, pte, mode);
1531 # ifdef CONFIG_64BIT_PHYS_ADDR
1533 uasm_i_scd(p, pte, 0, ptr);
1538 if (r10000_llsc_war())
1543 # ifdef CONFIG_64BIT_PHYS_ADDR
1546 uasm_i_ll(p, pte,
sizeof(
pte_t) / 2, ptr);
1547 uasm_i_ori(p, pte, pte, hwmode);
1548 uasm_i_sc(p, pte,
sizeof(
pte_t) / 2, ptr);
1551 uasm_i_lw(p, pte, 0, ptr);
1558 # ifdef CONFIG_64BIT_PHYS_ADDR
1560 uasm_i_sd(p, pte, 0, ptr);
1565 # ifdef CONFIG_64BIT_PHYS_ADDR
1567 uasm_i_lw(p, pte,
sizeof(
pte_t) / 2, ptr);
1568 uasm_i_ori(p, pte, pte, hwmode);
1569 uasm_i_sw(p, pte,
sizeof(
pte_t) / 2, ptr);
1570 uasm_i_lw(p, pte, 0, ptr);
1583 int pte,
int ptr,
int scratch,
enum label_id lid)
1585 int t = scratch >= 0 ? scratch :
pte;
1588 if (use_bbit_insns()) {
1596 iPTE_LW(p, pte, ptr);
1604 iPTE_LW(p, pte, ptr);
1610 build_make_valid(
u32 **p,
struct uasm_reloc **r,
unsigned int pte,
1615 iPTE_SW(p, r, pte, ptr, mode);
1624 unsigned int pte,
unsigned int ptr,
int scratch,
1627 int t = scratch >= 0 ? scratch :
pte;
1634 iPTE_LW(p, pte, ptr);
1643 build_make_write(
u32 **p,
struct uasm_reloc **r,
unsigned int pte,
1649 iPTE_SW(p, r, pte, ptr, mode);
1658 unsigned int pte,
unsigned int ptr,
int scratch,
1661 if (use_bbit_insns()) {
1665 int t = scratch >= 0 ? scratch :
pte;
1670 iPTE_LW(p, pte, ptr);
1674 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1686 build_r3000_pte_reload_tlbwi(
u32 **p,
unsigned int pte,
unsigned int tmp)
1689 uasm_i_mfc0(p, tmp,
C0_EPC);
1702 build_r3000_tlb_reload_write(
u32 **p,
struct uasm_label **l,
1709 uasm_i_mfc0(p, tmp,
C0_EPC);
1713 uasm_l_r3000_write_probe_fail(l, *p);
1720 build_r3000_tlbchange_handler_head(
u32 **p,
unsigned int pte,
1723 long pgdc = (
long)pgd_current;
1728 uasm_i_srl(p, pte, pte, 22);
1729 uasm_i_sll(p, pte, pte, 2);
1730 uasm_i_addu(p, ptr, ptr, pte);
1732 uasm_i_lw(p, ptr, 0, ptr);
1733 uasm_i_andi(p, pte, pte, 0xffc);
1734 uasm_i_addu(p, ptr, ptr, pte);
1735 uasm_i_lw(p, pte, 0, ptr);
1739 static void __cpuinit build_r3000_tlb_load_handler(
void)
1746 memset(labels, 0,
sizeof(labels));
1747 memset(relocs, 0,
sizeof(relocs));
1749 build_r3000_tlbchange_handler_head(&p,
K0,
K1);
1752 build_make_valid(&p, &r,
K0,
K1);
1753 build_r3000_tlb_reload_write(&p, &l, &r,
K0,
K1);
1755 uasm_l_nopage_tlbl(&l, p);
1760 panic(
"TLB load handler fastpath space exceeded");
1763 pr_debug(
"Wrote TLB load handler fastpath (%u instructions).\n",
1769 static void __cpuinit build_r3000_tlb_store_handler(
void)
1776 memset(labels, 0,
sizeof(labels));
1777 memset(relocs, 0,
sizeof(relocs));
1779 build_r3000_tlbchange_handler_head(&p,
K0,
K1);
1782 build_make_write(&p, &r,
K0,
K1);
1783 build_r3000_tlb_reload_write(&p, &l, &r,
K0,
K1);
1785 uasm_l_nopage_tlbs(&l, p);
1790 panic(
"TLB store handler fastpath space exceeded");
1793 pr_debug(
"Wrote TLB store handler fastpath (%u instructions).\n",
1799 static void __cpuinit build_r3000_tlb_modify_handler(
void)
1806 memset(labels, 0,
sizeof(labels));
1807 memset(relocs, 0,
sizeof(relocs));
1809 build_r3000_tlbchange_handler_head(&p,
K0,
K1);
1812 build_make_write(&p, &r,
K0,
K1);
1813 build_r3000_pte_reload_tlbwi(&p,
K0,
K1);
1815 uasm_l_nopage_tlbm(&l, p);
1820 panic(
"TLB modify handler fastpath space exceeded");
1823 pr_debug(
"Wrote TLB modify handler fastpath (%u instructions).\n",
1840 build_get_pmde64(p, l, r, wr.
r1, wr.
r2);
1842 build_get_pgde32(p, wr.
r1, wr.
r2);
1845 #ifdef CONFIG_HUGETLB_PAGE
1851 build_is_huge_pte(p, r, wr.
r1, wr.
r2, label_tlb_huge_update);
1861 uasm_l_smp_pgtable_change(l, *p);
1863 iPTE_LW(p, wr.
r1, wr.
r2);
1864 if (!m4kc_tlbp_war())
1865 build_tlb_probe_entry(p);
1870 build_r4000_tlbchange_handler_tail(
u32 **p,
struct uasm_label **l,
1874 uasm_i_ori(p, ptr, ptr,
sizeof(
pte_t));
1875 uasm_i_xori(p, ptr, ptr,
sizeof(
pte_t));
1876 build_update_entries(p, tmp, ptr);
1878 uasm_l_leave(l, *p);
1879 build_restore_work_registers(p);
1883 build_get_pgd_vmalloc64(p, l, r, tmp, ptr,
not_refill);
1887 static void __cpuinit build_r4000_tlb_load_handler(
void)
1895 memset(labels, 0,
sizeof(labels));
1896 memset(relocs, 0,
sizeof(relocs));
1898 if (bcm1250_m3_war()) {
1899 unsigned int segbits = 44;
1903 uasm_i_xor(&p,
K0,
K0,
K1);
1904 uasm_i_dsrl_safe(&p,
K1,
K0, 62);
1905 uasm_i_dsrl_safe(&p,
K0,
K0, 12 + 1);
1906 uasm_i_dsll_safe(&p,
K0,
K0, 64 + 12 + 1 - segbits);
1907 uasm_i_or(&p,
K0,
K0,
K1);
1912 wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
1914 if (m4kc_tlbp_war())
1915 build_tlb_probe_entry(&p);
1922 if (use_bbit_insns()) {
1933 if (use_bbit_insns()) {
1936 uasm_i_andi(&p, wr.
r3, wr.
r2,
sizeof(
pte_t));
1947 if (use_bbit_insns()) {
1950 uasm_l_tlbl_goaround1(&l, p);
1952 uasm_i_andi(&p, wr.
r3, wr.
r3, 2);
1956 uasm_l_tlbl_goaround1(&l, p);
1958 build_make_valid(&p, &r, wr.
r1, wr.
r2);
1959 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.
r1, wr.
r2);
1961 #ifdef CONFIG_HUGETLB_PAGE
1966 uasm_l_tlb_huge_update(&l, p);
1967 iPTE_LW(&p, wr.
r1, wr.
r2);
1969 build_tlb_probe_entry(&p);
1976 if (use_bbit_insns()) {
1987 if (use_bbit_insns()) {
1990 uasm_i_andi(&p, wr.
r3, wr.
r2,
sizeof(
pte_t));
2001 if (use_bbit_insns()) {
2004 uasm_i_andi(&p, wr.
r3, wr.
r3, 2);
2007 if (PM_DEFAULT_MASK == 0)
2015 uasm_l_tlbl_goaround2(&l, p);
2018 build_huge_handler_tail(&p, &r, &l, wr.
r1, wr.
r2);
2021 uasm_l_nopage_tlbl(&l, p);
2022 build_restore_work_registers(&p);
2027 panic(
"TLB load handler fastpath space exceeded");
2030 pr_debug(
"Wrote TLB load handler fastpath (%u instructions).\n",
2036 static void __cpuinit build_r4000_tlb_store_handler(
void)
2044 memset(labels, 0,
sizeof(labels));
2045 memset(relocs, 0,
sizeof(relocs));
2047 wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2049 if (m4kc_tlbp_war())
2050 build_tlb_probe_entry(&p);
2051 build_make_write(&p, &r, wr.
r1, wr.
r2);
2052 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.
r1, wr.
r2);
2054 #ifdef CONFIG_HUGETLB_PAGE
2059 uasm_l_tlb_huge_update(&l, p);
2060 iPTE_LW(&p, wr.
r1, wr.
r2);
2062 build_tlb_probe_entry(&p);
2063 uasm_i_ori(&p, wr.
r1, wr.
r1,
2065 build_huge_handler_tail(&p, &r, &l, wr.
r1, wr.
r2);
2068 uasm_l_nopage_tlbs(&l, p);
2069 build_restore_work_registers(&p);
2074 panic(
"TLB store handler fastpath space exceeded");
2077 pr_debug(
"Wrote TLB store handler fastpath (%u instructions).\n",
2083 static void __cpuinit build_r4000_tlb_modify_handler(
void)
2091 memset(labels, 0,
sizeof(labels));
2092 memset(relocs, 0,
sizeof(relocs));
2094 wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2096 if (m4kc_tlbp_war())
2097 build_tlb_probe_entry(&p);
2099 build_make_write(&p, &r, wr.
r1, wr.
r2);
2100 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.
r1, wr.
r2);
2102 #ifdef CONFIG_HUGETLB_PAGE
2107 uasm_l_tlb_huge_update(&l, p);
2108 iPTE_LW(&p, wr.
r1, wr.
r2);
2110 build_tlb_probe_entry(&p);
2111 uasm_i_ori(&p, wr.
r1, wr.
r1,
2113 build_huge_handler_tail(&p, &r, &l, wr.
r1, wr.
r2);
2116 uasm_l_nopage_tlbm(&l, p);
2117 build_restore_work_registers(&p);
2122 panic(
"TLB modify handler fastpath space exceeded");
2125 pr_debug(
"Wrote TLB modify handler fastpath (%u instructions).\n",
2138 static int run_once = 0;
2152 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
2153 build_r3000_tlb_refill_handler();
2155 build_r3000_tlb_load_handler();
2156 build_r3000_tlb_store_handler();
2157 build_r3000_tlb_modify_handler();
2161 panic(
"No R3000 TLB refill handler");
2167 panic(
"No R6000 TLB refill handler yet");
2171 panic(
"No R8000 TLB refill handler yet");
2176 scratch_reg = allocate_kscratch();
2177 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
2178 build_r4000_setup_pgd();
2180 build_r4000_tlb_load_handler();
2181 build_r4000_tlb_store_handler();
2182 build_r4000_tlb_modify_handler();
2185 build_r4000_tlb_refill_handler();
2192 (
unsigned long)handle_tlbl +
sizeof(handle_tlbl));
2194 (
unsigned long)handle_tlbs +
sizeof(handle_tlbs));
2196 (
unsigned long)handle_tlbm +
sizeof(handle_tlbm));
2197 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
2199 (
unsigned long)tlbmiss_handler_setup_pgd +
sizeof(handle_tlbm));