|
#define | PTE_VALID (_AT(pteval_t, 1) << 0) /* pte_present() check */ |
|
#define | PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */ |
|
#define | PTE_DIRTY (_AT(pteval_t, 1) << 55) |
|
#define | PTE_SPECIAL (_AT(pteval_t, 1) << 56) |
|
#define | VMALLOC_START UL(0xffffff8000000000) |
|
#define | VMALLOC_END (PAGE_OFFSET - UL(0x400000000) - SZ_64K) |
|
#define | vmemmap ((struct page *)(VMALLOC_END + SZ_64K)) |
|
#define | FIRST_USER_ADDRESS 0 |
|
#define | pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) |
|
#define | pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) |
|
#define | pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) |
|
#define | _PAGE_DEFAULT PTE_TYPE_PAGE | PTE_AF |
|
#define | _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) |
|
#define | PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) |
|
#define | PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) |
|
#define | PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) |
|
#define | PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) |
|
#define | PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) |
|
#define | PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) |
|
#define | PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) |
|
#define | PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY) |
|
#define | PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY) |
|
#define | __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) |
|
#define | __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) |
|
#define | __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) |
|
#define | __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) |
|
#define | __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) |
|
#define | __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) |
|
#define | __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) |
|
#define | __P000 __PAGE_NONE |
|
#define | __P001 __PAGE_READONLY |
|
#define | __P010 __PAGE_COPY |
|
#define | __P011 __PAGE_COPY |
|
#define | __P100 __PAGE_READONLY_EXEC |
|
#define | __P101 __PAGE_READONLY_EXEC |
|
#define | __P110 __PAGE_COPY_EXEC |
|
#define | __P111 __PAGE_COPY_EXEC |
|
#define | __S000 __PAGE_NONE |
|
#define | __S001 __PAGE_READONLY |
|
#define | __S010 __PAGE_SHARED |
|
#define | __S011 __PAGE_SHARED |
|
#define | __S100 __PAGE_READONLY_EXEC |
|
#define | __S101 __PAGE_READONLY_EXEC |
|
#define | __S110 __PAGE_SHARED_EXEC |
|
#define | __S111 __PAGE_SHARED_EXEC |
|
#define | ZERO_PAGE(vaddr) (empty_zero_page) |
|
#define | pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT) |
|
#define | pfn_pte(pfn, prot) (__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) |
|
#define | pte_none(pte) (!pte_val(pte)) |
|
#define | pte_clear(mm, addr, ptep) set_pte(ptep, __pte(0)) |
|
#define | pte_page(pte) (pfn_to_page(pte_pfn(pte))) |
|
#define | pte_offset_kernel(dir, addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) |
|
#define | pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr)) |
|
#define | pte_offset_map_nested(dir, addr) pte_offset_kernel((dir), (addr)) |
|
#define | pte_unmap(pte) do { } while (0) |
|
#define | pte_unmap_nested(pte) do { } while (0) |
|
#define | pte_present(pte) (pte_val(pte) & PTE_VALID) |
|
#define | pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) |
|
#define | pte_young(pte) (pte_val(pte) & PTE_AF) |
|
#define | pte_special(pte) (pte_val(pte) & PTE_SPECIAL) |
|
#define | pte_write(pte) (!(pte_val(pte) & PTE_RDONLY)) |
|
#define | pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) |
|
#define | pte_present_exec_user(pte) |
|
#define | PTE_BIT_FUNC(fn, op) static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } |
|
#define | pte_huge(pte) ((pte_val(pte) & PTE_TYPE_MASK) == PTE_TYPE_HUGEPAGE) |
|
#define | pte_mkhuge(pte) (__pte((pte_val(pte) & ~PTE_TYPE_MASK) | PTE_TYPE_HUGEPAGE)) |
|
#define | __pgprot_modify(prot, mask, bits) __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) |
|
#define | __HAVE_ARCH_PTE_SPECIAL |
|
#define | pgprot_noncached(prot) __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE)) |
|
#define | pgprot_writecombine(prot) __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE)) |
|
#define | pgprot_dmacoherent(prot) __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC)) |
|
#define | __HAVE_PHYS_MEM_ACCESS_PROT |
|
#define | pmd_none(pmd) (!pmd_val(pmd)) |
|
#define | pmd_present(pmd) (pmd_val(pmd)) |
|
#define | pmd_bad(pmd) (!(pmd_val(pmd) & 2)) |
|
#define | pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) |
|
#define | mk_pte(page, prot) pfn_pte(page_to_pfn(page),prot) |
|
#define | pud_none(pud) (!pud_val(pud)) |
|
#define | pud_bad(pud) (!(pud_val(pud) & 2)) |
|
#define | pud_present(pud) (pud_val(pud)) |
|
#define | pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) |
|
#define | pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr)) |
|
#define | pgd_offset_k(addr) pgd_offset(&init_mm, addr) |
|
#define | pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) |
|
#define | __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
|
#define | SWAPPER_DIR_SIZE (3 * PAGE_SIZE) |
|
#define | IDMAP_DIR_SIZE (2 * PAGE_SIZE) |
|
#define | __SWP_TYPE_SHIFT 3 |
|
#define | __SWP_TYPE_BITS 6 |
|
#define | __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) |
|
#define | __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) |
|
#define | __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) |
|
#define | __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) |
|
#define | __swp_entry(type, offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) |
|
#define | __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
|
#define | __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) |
|
#define | MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) |
|
#define | pte_file(pte) (pte_val(pte) & PTE_FILE) |
|
#define | pte_to_pgoff(x) (pte_val(x) >> 3) |
|
#define | pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE) |
|
#define | PTE_FILE_MAX_BITS 61 |
|
#define | io_remap_pfn_range(vma, from, pfn, size, prot) remap_pfn_range(vma, from, pfn, size, prot) |
|
#define | pgtable_cache_init() do { } while (0) |
|