|
#define | pgprintk(x...) do { } while (0) |
|
#define | rmap_printk(x...) do { } while (0) |
|
#define | ASSERT(x) do { } while (0) |
|
#define | PTE_PREFETCH_NUM 8 |
|
#define | PT_FIRST_AVAIL_BITS_SHIFT 10 |
|
#define | PT64_SECOND_AVAIL_BITS_SHIFT 52 |
|
#define | PT64_LEVEL_BITS 9 |
|
#define | PT64_LEVEL_SHIFT(level) (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS) |
|
#define | PT64_INDEX(address, level) (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1)) |
|
#define | PT32_LEVEL_BITS 10 |
|
#define | PT32_LEVEL_SHIFT(level) (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS) |
|
#define | PT32_LVL_OFFSET_MASK(level) |
|
#define | PT32_INDEX(address, level) (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1)) |
|
#define | PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)) |
|
#define | PT64_DIR_BASE_ADDR_MASK (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1)) |
|
#define | PT64_LVL_ADDR_MASK(level) |
|
#define | PT64_LVL_OFFSET_MASK(level) |
|
#define | PT32_BASE_ADDR_MASK PAGE_MASK |
|
#define | PT32_DIR_BASE_ADDR_MASK (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1)) |
|
#define | PT32_LVL_ADDR_MASK(level) |
|
#define | PT64_PERM_MASK |
|
#define | ACC_EXEC_MASK 1 |
|
#define | ACC_WRITE_MASK PT_WRITABLE_MASK |
|
#define | ACC_USER_MASK PT_USER_MASK |
|
#define | ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK) |
|
#define | CREATE_TRACE_POINTS |
|
#define | SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT) |
|
#define | SPTE_MMU_WRITEABLE (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1)) |
|
#define | SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) |
|
#define | PTE_LIST_EXT 3 |
|
#define | for_each_shadow_entry(_vcpu, _addr, _walker) |
|
#define | for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) |
|
#define | RMAP_RECYCLE_THRESHOLD 1000 |
|
#define | KVM_PAGE_ARRAY_NR 16 |
|
#define | for_each_gfn_sp(kvm, sp, gfn, pos) |
|
#define | for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) |
|
#define | for_each_sp(pvec, sp, parents, i) |
|
#define | PTTYPE 64 |
|
#define | PTTYPE 32 |
|
|
void | kvm_mmu_set_mmio_spte_mask (u64 mmio_mask) |
|
| EXPORT_SYMBOL_GPL (kvm_mmu_set_mmio_spte_mask) |
|
void | kvm_mmu_set_mask_ptes (u64 user_mask, u64 accessed_mask, u64 dirty_mask, u64 nx_mask, u64 x_mask) |
|
| EXPORT_SYMBOL_GPL (kvm_mmu_set_mask_ptes) |
|
void | kvm_mmu_write_protect_pt_masked (struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) |
|
int | kvm_unmap_hva (struct kvm *kvm, unsigned long hva) |
|
int | kvm_unmap_hva_range (struct kvm *kvm, unsigned long start, unsigned long end) |
|
void | kvm_set_spte_hva (struct kvm *kvm, unsigned long hva, pte_t pte) |
|
int | kvm_age_hva (struct kvm *kvm, unsigned long hva) |
|
int | kvm_test_age_hva (struct kvm *kvm, unsigned long hva) |
|
void | kvm_mmu_change_mmu_pages (struct kvm *kvm, unsigned int goal_nr_mmu_pages) |
|
int | kvm_mmu_unprotect_page (struct kvm *kvm, gfn_t gfn) |
|
| EXPORT_SYMBOL_GPL (kvm_mmu_unprotect_page) |
|
u8 | kvm_get_guest_memory_type (struct kvm_vcpu *vcpu, gfn_t gfn) |
|
| EXPORT_SYMBOL_GPL (kvm_get_guest_memory_type) |
|
void | kvm_mmu_sync_roots (struct kvm_vcpu *vcpu) |
|
int | handle_mmio_page_fault_common (struct kvm_vcpu *vcpu, u64 addr, bool direct) |
|
| EXPORT_SYMBOL_GPL (handle_mmio_page_fault_common) |
|
void | kvm_mmu_flush_tlb (struct kvm_vcpu *vcpu) |
|
int | kvm_init_shadow_mmu (struct kvm_vcpu *vcpu, struct kvm_mmu *context) |
|
| EXPORT_SYMBOL_GPL (kvm_init_shadow_mmu) |
|
int | kvm_mmu_reset_context (struct kvm_vcpu *vcpu) |
|
| EXPORT_SYMBOL_GPL (kvm_mmu_reset_context) |
|
int | kvm_mmu_load (struct kvm_vcpu *vcpu) |
|
| EXPORT_SYMBOL_GPL (kvm_mmu_load) |
|
void | kvm_mmu_unload (struct kvm_vcpu *vcpu) |
|
| EXPORT_SYMBOL_GPL (kvm_mmu_unload) |
|
void | kvm_mmu_pte_write (struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, int bytes) |
|
int | kvm_mmu_unprotect_page_virt (struct kvm_vcpu *vcpu, gva_t gva) |
|
| EXPORT_SYMBOL_GPL (kvm_mmu_unprotect_page_virt) |
|
void | __kvm_mmu_free_some_pages (struct kvm_vcpu *vcpu) |
|
int | kvm_mmu_page_fault (struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code, void *insn, int insn_len) |
|
| EXPORT_SYMBOL_GPL (kvm_mmu_page_fault) |
|
void | kvm_mmu_invlpg (struct kvm_vcpu *vcpu, gva_t gva) |
|
| EXPORT_SYMBOL_GPL (kvm_mmu_invlpg) |
|
void | kvm_enable_tdp (void) |
|
| EXPORT_SYMBOL_GPL (kvm_enable_tdp) |
|
void | kvm_disable_tdp (void) |
|
| EXPORT_SYMBOL_GPL (kvm_disable_tdp) |
|
int | kvm_mmu_create (struct kvm_vcpu *vcpu) |
|
int | kvm_mmu_setup (struct kvm_vcpu *vcpu) |
|
void | kvm_mmu_slot_remove_write_access (struct kvm *kvm, int slot) |
|
void | kvm_mmu_zap_all (struct kvm *kvm) |
|
int | kvm_mmu_module_init (void) |
|
unsigned int | kvm_mmu_calculate_mmu_pages (struct kvm *kvm) |
|
int | kvm_mmu_get_spte_hierarchy (struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]) |
|
| EXPORT_SYMBOL_GPL (kvm_mmu_get_spte_hierarchy) |
|
void | kvm_mmu_destroy (struct kvm_vcpu *vcpu) |
|
void | kvm_mmu_module_exit (void) |
|