1 #ifndef ARCH_X86_KVM_X86_H
2 #define ARCH_X86_KVM_X86_H
7 static inline void kvm_clear_exception_queue(
struct kvm_vcpu *vcpu)
9 vcpu->
arch.exception.pending =
false;
15 vcpu->
arch.interrupt.pending =
true;
20 static inline void kvm_clear_interrupt_queue(
struct kvm_vcpu *vcpu)
22 vcpu->
arch.interrupt.pending =
false;
25 static inline bool kvm_event_needs_reinjection(
struct kvm_vcpu *vcpu)
27 return vcpu->
arch.exception.pending || vcpu->
arch.interrupt.pending ||
28 vcpu->
arch.nmi_injected;
31 static inline bool kvm_exception_is_soft(
unsigned int nr)
36 static inline bool is_protmode(
struct kvm_vcpu *vcpu)
41 static inline int is_long_mode(
struct kvm_vcpu *vcpu)
50 static inline bool mmu_is_nested(
struct kvm_vcpu *vcpu)
52 return vcpu->
arch.walk_mmu == &vcpu->
arch.nested_mmu;
55 static inline int is_pae(
struct kvm_vcpu *vcpu)
60 static inline int is_pse(
struct kvm_vcpu *vcpu)
65 static inline int is_paging(
struct kvm_vcpu *vcpu)
70 static inline u32 bit(
int bitno)
72 return 1 << (bitno & 31);
75 static inline void vcpu_cache_mmio_info(
struct kvm_vcpu *vcpu,
80 vcpu->
arch.mmio_gfn = gfn;
87 static inline void vcpu_clear_mmio_info(
struct kvm_vcpu *vcpu,
gva_t gva)
89 if (gva != (~0ul) && vcpu->
arch.mmio_gva != (gva &
PAGE_MASK))
92 vcpu->
arch.mmio_gva = 0;
95 static inline bool vcpu_match_mmio_gva(
struct kvm_vcpu *vcpu,
unsigned long gva)
103 static inline bool vcpu_match_mmio_gpa(
struct kvm_vcpu *vcpu,
gpa_t gpa)