|
#define | MASK_41 ((unsigned long)0x1ffffffffff) |
|
#define | VA_MATTR_WB 0x0 |
|
#define | VA_MATTR_UC 0x4 |
|
#define | VA_MATTR_UCE 0x5 |
|
#define | VA_MATTR_WC 0x6 |
|
#define | VA_MATTR_NATPAGE 0x7 |
|
#define | PMASK(size) (~((size) - 1)) |
|
#define | PSIZE(size) (1UL<<(size)) |
|
#define | CLEARLSB(ppn, nbits) (((ppn) >> (nbits)) << (nbits)) |
|
#define | PAGEALIGN(va, ps) CLEARLSB(va, ps) |
|
#define | PAGE_FLAGS_RV_MASK (0x2|(0x3UL<<50)|(((1UL<<11)-1)<<53)) |
|
#define | _PAGE_MA_ST (0x1 << 2) /* is reserved for software use */ |
|
#define | ARCH_PAGE_SHIFT 12 |
|
#define | INVALID_TI_TAG (1UL << 63) |
|
#define | VTLB_PTE_P_BIT 0 |
|
#define | VTLB_PTE_IO_BIT 60 |
|
#define | VTLB_PTE_IO (1UL<<VTLB_PTE_IO_BIT) |
|
#define | VTLB_PTE_P (1UL<<VTLB_PTE_P_BIT) |
|
#define | vcpu_quick_region_check(_tr_regions, _ifa) (_tr_regions & (1 << ((unsigned long)_ifa >> 61))) |
|
#define | vcpu_quick_region_set(_tr_regions, _ifa) do {_tr_regions |= (1 << ((unsigned long)_ifa >> 61)); } while (0) |
|
#define | IA64_NO_FAULT 0 |
|
#define | IA64_FAULT 1 |
|
#define | VMM_RBS_OFFSET ((VMM_TASK_SIZE + 15) & ~15) |
|
#define | SW_BAD 0 /* Bad mode transitition */ |
|
#define | SW_V2P 1 /* Physical emulatino is activated */ |
|
#define | SW_P2V 2 /* Exit physical mode emulation */ |
|
#define | SW_SELF 3 /* No mode transition */ |
|
#define | SW_NOP 4 /* Mode transition, but without action required */ |
|
#define | GUEST_IN_PHY 0x1 |
|
#define | GUEST_PHY_EMUL 0x2 |
|
#define | current_vcpu ((struct kvm_vcpu *) ia64_getreg(_IA64_REG_TP)) |
|
#define | VRN_SHIFT 61 |
|
#define | VRN_MASK 0xe000000000000000 |
|
#define | VRN0 0x0UL |
|
#define | VRN1 0x1UL |
|
#define | VRN2 0x2UL |
|
#define | VRN3 0x3UL |
|
#define | VRN4 0x4UL |
|
#define | VRN5 0x5UL |
|
#define | VRN6 0x6UL |
|
#define | VRN7 0x7UL |
|
#define | IRQ_NO_MASKED 0 |
|
#define | IRQ_MASKED_BY_VTPR 1 |
|
#define | IRQ_MASKED_BY_INSVC 2 /* masked by inservice IRQ */ |
|
#define | PTA_BASE_SHIFT 15 |
|
#define | IA64_PSR_VM_BIT 46 |
|
#define | IA64_PSR_VM (__IA64_UL(1) << IA64_PSR_VM_BIT) |
|
#define | IA64_IFS_V_BIT 63 |
|
#define | IA64_IFS_V (__IA64_UL(1) << IA64_IFS_V_BIT) |
|
#define | PHY_PAGE_UC (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_UC|_PAGE_AR_RWX) |
|
#define | PHY_PAGE_WB (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_WB|_PAGE_AR_RWX) |
|
#define | is_physical_mode(v) ((v->arch.mode_flags) & GUEST_IN_PHY) |
|
#define | is_virtual_mode(v) (!is_physical_mode(v)) |
|
#define | MODE_IND(psr) (((psr).it << 2) + ((psr).dt << 1) + (psr).rt) |
|
#define | _vmm_raw_spin_lock(x) do {}while(0) |
|
#define | _vmm_raw_spin_unlock(x) do {}while(0) |
|
#define | __kvm_pa(x) |
|
#define | __kvm_va(x) |
|
#define | _REGION_ID(x) |
|
#define | _REGION_PAGE_SIZE(x) |
|
#define | _REGION_HW_WALKER(x) |
|
#define | VCPU(_v, _x) ((_v)->arch.vpd->_x) |
|
#define | VMX(_v, _x) ((_v)->arch._x) |
|
#define | VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.insvc[i]) |
|
#define | VLSAPIC_XTP(_v) VMX(_v, xtp) |
|
|
u64 | kvm_get_mpt_entry (u64 gpfn) |
|
void | vmm_spin_lock (vmm_spinlock_t *lock) |
|
void | vmm_spin_unlock (vmm_spinlock_t *lock) |
|
void | vcpu_get_fpreg (struct kvm_vcpu *vcpu, unsigned long reg, struct ia64_fpreg *val) |
|
void | vcpu_set_fpreg (struct kvm_vcpu *vcpu, unsigned long reg, struct ia64_fpreg *val) |
|
u64 | vcpu_get_gr (struct kvm_vcpu *vcpu, unsigned long reg) |
|
void | vcpu_set_gr (struct kvm_vcpu *vcpu, unsigned long reg, u64 val, int nat) |
|
unsigned long | vcpu_get_psr (struct kvm_vcpu *vcpu) |
|
void | vcpu_set_psr (struct kvm_vcpu *vcpu, unsigned long val) |
|
u64 | vcpu_thash (struct kvm_vcpu *vcpu, u64 vadr) |
|
void | vcpu_bsw0 (struct kvm_vcpu *vcpu) |
|
void | thash_vhpt_insert (struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type) |
|
struct thash_data * | vhpt_lookup (u64 va) |
|
u64 | guest_vhpt_lookup (u64 iha, u64 *pte) |
|
void | thash_purge_entries (struct kvm_vcpu *v, u64 va, u64 ps) |
|
void | thash_purge_entries_remote (struct kvm_vcpu *v, u64 va, u64 ps) |
|
u64 | translate_phy_pte (u64 *pte, u64 itir, u64 va) |
|
void | thash_purge_and_insert (struct kvm_vcpu *v, u64 pte, u64 itir, u64 ifa, int type) |
|
void | thash_purge_all (struct kvm_vcpu *v) |
|
struct thash_data * | vtlb_lookup (struct kvm_vcpu *v, u64 va, int is_data) |
|
int | vtr_find_overlap (struct kvm_vcpu *vcpu, u64 va, u64 ps, int is_data) |
|
void | vcpu_increment_iip (struct kvm_vcpu *v) |
|
void | vcpu_decrement_iip (struct kvm_vcpu *vcpu) |
|
void | vcpu_pend_interrupt (struct kvm_vcpu *vcpu, u8 vec) |
|
void | vcpu_unpend_interrupt (struct kvm_vcpu *vcpu, u8 vec) |
|
void | data_page_not_present (struct kvm_vcpu *vcpu, u64 vadr) |
|
void | dnat_page_consumption (struct kvm_vcpu *vcpu, u64 vadr) |
|
void | alt_dtlb (struct kvm_vcpu *vcpu, u64 vadr) |
|
void | nested_dtlb (struct kvm_vcpu *vcpu) |
|
void | dvhpt_fault (struct kvm_vcpu *vcpu, u64 vadr) |
|
int | vhpt_enabled (struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref) |
|
void | update_vhpi (struct kvm_vcpu *vcpu, int vec) |
|
int | irq_masked (struct kvm_vcpu *vcpu, int h_pending, int h_inservice) |
|
int | fetch_code (struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle) |
|
void | emulate_io_inst (struct kvm_vcpu *vcpu, u64 padr, u64 ma) |
|
void | vmm_transition (struct kvm_vcpu *vcpu) |
|
void | vmm_trampoline (union context *from, union context *to) |
|
int | vmm_entry (void) |
|
u64 | vcpu_get_itc (struct kvm_vcpu *vcpu) |
|
void | vmm_reset_entry (void) |
|
void | kvm_init_vtlb (struct kvm_vcpu *v) |
|
void | kvm_init_vhpt (struct kvm_vcpu *v) |
|
void | thash_init (struct thash_cb *hcb, u64 sz) |
|
void | panic_vm (struct kvm_vcpu *v, const char *fmt,...) |
|
u64 | kvm_gpa_to_mpa (u64 gpa) |
|
u64 | ia64_call_vsa (u64 proc, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7) |
|