16 #ifndef _ASM_TILE_PGTABLE_64_H
17 #define _ASM_TILE_PGTABLE_64_H
20 #define PGDIR_SHIFT HV_LOG2_L1_SPAN
21 #define PGDIR_SIZE HV_L1_SPAN
22 #define PGDIR_MASK (~(PGDIR_SIZE-1))
23 #define PTRS_PER_PGD HV_L0_ENTRIES
24 #define PGD_INDEX(va) HV_L0_INDEX(va)
25 #define SIZEOF_PGD HV_L0_SIZE
31 #define PMD_SHIFT HPAGE_SHIFT
32 #define PMD_SIZE HPAGE_SIZE
33 #define PMD_MASK (~(PMD_SIZE-1))
34 #define PTRS_PER_PMD _HV_L1_ENTRIES(HPAGE_SHIFT)
35 #define PMD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT)
36 #define SIZEOF_PMD _HV_L1_SIZE(HPAGE_SHIFT)
45 #define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT)
46 #define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT)
47 #define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
55 #define _VMALLOC_END HUGE_VMAP_BASE
56 #define VMALLOC_END _VMALLOC_END
57 #define VMALLOC_START _VMALLOC_START
59 #define HUGE_VMAP_END (HUGE_VMAP_BASE + PGDIR_SIZE)
76 #define pmd_ERROR(e) \
77 pr_err("%s:%d: bad pmd 0x%016llx.\n", __FILE__, __LINE__, pmd_val(e))
81 __pte_clear(&pudp->
pgd);
90 #define pud_ptfn(pud) hv_pte_get_ptfn((pud).pgd)
98 #define pud_page_vaddr(pud) \
99 (__va((phys_addr_t)pud_ptfn(pud) << HV_LOG2_PAGE_TABLE_ALIGN))
105 #define pud_page(pud) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pud_ptfn(pud))))
112 #define pmd_offset(pud, address) \
113 ((pmd_t *)pud_page_vaddr(*(pud)) + pmd_index(address))
116 #define pgd_addr_normalize pgd_addr_normalize
124 static inline int pgd_addr_invalid(
unsigned long addr)
126 return addr >= MEM_HV_START ||
127 (addr > MEM_LOW_END && addr < MEM_HIGH_START);
133 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
141 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
148 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
152 return hv_pte(__insn_exch(&ptep->val, 0
UL));
158 #define pmd_pte(pmd) (pmd)
159 #define pmdp_ptep(pmdp) (pmdp)
160 #define pte_pmd(pte) (pte)