9 #include <linux/types.h>
11 #include <linux/list.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
21 #include <linux/slab.h>
25 #include <asm/signal.h>
28 #include <linux/kvm_para.h>
32 #include <asm/kvm_host.h>
35 #define KVM_MMIO_SIZE 8
43 #define KVM_MEMSLOT_INVALID (1UL << 16)
46 #define KVM_MAX_MMIO_FRAGMENTS 2
52 #define KVM_PFN_ERR_MASK (0xfffULL << 52)
54 #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
55 #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
56 #define KVM_PFN_ERR_BAD (KVM_PFN_ERR_MASK + 2)
57 #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 3)
59 static inline bool is_error_pfn(
pfn_t pfn)
64 static inline bool is_noslot_pfn(
pfn_t pfn)
69 static inline bool is_invalid_pfn(
pfn_t pfn)
71 return !is_noslot_pfn(pfn) && is_error_pfn(pfn);
74 #define KVM_HVA_ERR_BAD (PAGE_OFFSET)
75 #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE)
77 static inline bool kvm_is_error_hva(
unsigned long addr)
82 #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
84 static inline bool is_error_page(
struct page *
page)
92 #define KVM_REQ_TLB_FLUSH 0
93 #define KVM_REQ_MIGRATE_TIMER 1
94 #define KVM_REQ_REPORT_TPR_ACCESS 2
95 #define KVM_REQ_MMU_RELOAD 3
96 #define KVM_REQ_TRIPLE_FAULT 4
97 #define KVM_REQ_PENDING_TIMER 5
98 #define KVM_REQ_UNHALT 6
99 #define KVM_REQ_MMU_SYNC 7
100 #define KVM_REQ_CLOCK_UPDATE 8
101 #define KVM_REQ_KICK 9
102 #define KVM_REQ_DEACTIVATE_FPU 10
103 #define KVM_REQ_EVENT 11
104 #define KVM_REQ_APF_HALT 12
105 #define KVM_REQ_STEAL_UPDATE 13
106 #define KVM_REQ_NMI 14
107 #define KVM_REQ_IMMEDIATE_EXIT 15
108 #define KVM_REQ_PMU 16
109 #define KVM_REQ_PMI 17
111 #define KVM_USERSPACE_IRQ_SOURCE_ID 0
112 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
124 #define NR_IOBUS_DEVS 1000
138 int len,
const void *
val);
146 #ifdef CONFIG_KVM_ASYNC_PF
147 struct kvm_async_pf {
186 #ifdef CONFIG_PREEMPT_NOTIFIERS
187 struct preempt_notifier preempt_notifier;
207 #ifdef CONFIG_HAS_IOMEM
209 int mmio_read_completed;
211 int mmio_cur_fragment;
212 int mmio_nr_fragments;
216 #ifdef CONFIG_KVM_ASYNC_PF
225 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
240 static inline int kvm_vcpu_exiting_guest_mode(
struct kvm_vcpu *vcpu)
249 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
262 static inline unsigned long kvm_dirty_bitmap_bytes(
struct kvm_memory_slot *memslot)
282 #ifdef __KVM_HAVE_IOAPIC
301 #ifndef KVM_MEM_SLOTS_NUM
302 #define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
323 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
332 #ifdef CONFIG_HAVE_KVM_EVENTFD
337 struct mutex resampler_lock;
344 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
351 #ifdef CONFIG_HAVE_KVM_IRQCHIP
361 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
362 struct mmu_notifier mmu_notifier;
363 unsigned long mmu_notifier_seq;
364 long mmu_notifier_count;
369 #define kvm_err(fmt, ...) \
370 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
371 #define kvm_info(fmt, ...) \
372 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
373 #define kvm_debug(fmt, ...) \
374 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
375 #define kvm_pr_unimpl(fmt, ...) \
376 pr_err_ratelimited("kvm [%i]: " fmt, \
377 task_tgid_nr(current), ## __VA_ARGS__)
380 #define vcpu_unimpl(vcpu, fmt, ...) \
381 kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
389 #define kvm_for_each_vcpu(idx, vcpup, kvm) \
391 idx < atomic_read(&kvm->online_vcpus) && \
392 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
395 #define kvm_for_each_memslot(memslot, slots) \
396 for (memslot = &slots->memslots[0]; \
397 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
406 int kvm_init(
void *opaque,
unsigned vcpu_size,
unsigned vcpu_align,
417 srcu_read_lock_held(&kvm->
srcu)
491 void *
data,
unsigned long len);
497 void *
data,
unsigned long len);
521 unsigned int ioctl,
unsigned long arg);
523 unsigned int ioctl,
unsigned long arg);
539 unsigned int ioctl,
unsigned long arg);
588 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
591 return kzalloc(
sizeof(
struct kvm),
GFP_KERNEL);
602 #ifdef __KVM_HAVE_ARCH_WQP
603 return vcpu->
arch.wqp;
663 #ifdef __KVM_HAVE_IOAPIC
664 void kvm_get_intr_delivery_bitmask(
struct kvm_ioapic *ioapic,
666 unsigned long *deliver_bitmask);
670 int irq_source_id,
int level);
680 #define KVM_IOMMU_CACHE_COHERENCY 0x1
682 #ifdef CONFIG_IOMMU_API
726 static inline void kvm_guest_enter(
void)
741 static inline void kvm_guest_exit(
void)
769 return search_memslots(slots, gfn);
772 static inline unsigned long
778 static inline int memslot_id(
struct kvm *kvm,
gfn_t gfn)
813 static inline void kvm_migrate_timers(
struct kvm_vcpu *vcpu)
832 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
833 static inline int mmu_notifier_retry(
struct kvm_vcpu *vcpu,
unsigned long mmu_seq)
848 if (vcpu->
kvm->mmu_notifier_seq != mmu_seq)
854 #ifdef KVM_CAP_IRQ_ROUTING
856 #define KVM_MAX_IRQ_ROUTES 1024
860 const struct kvm_irq_routing_entry *
entries,
873 #ifdef CONFIG_HAVE_KVM_EVENTFD
892 #ifdef CONFIG_HAVE_KVM_IRQCHIP
907 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
908 static inline bool kvm_vcpu_is_bsp(
struct kvm_vcpu *vcpu)
910 return vcpu->
kvm->bsp_vcpu_id == vcpu->
vcpu_id;
921 #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
936 static inline void kvm_make_request(
int req,
struct kvm_vcpu *vcpu)
941 static inline bool kvm_check_request(
int req,
struct kvm_vcpu *vcpu)
951 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
953 static inline void kvm_vcpu_set_in_spin_loop(
struct kvm_vcpu *vcpu,
bool val)
955 vcpu->spin_loop.in_spin_loop =
val;
957 static inline void kvm_vcpu_set_dy_eligible(
struct kvm_vcpu *vcpu,
bool val)
959 vcpu->spin_loop.dy_eligible =
val;
964 static inline void kvm_vcpu_set_in_spin_loop(
struct kvm_vcpu *vcpu,
bool val)
968 static inline void kvm_vcpu_set_dy_eligible(
struct kvm_vcpu *vcpu,
bool val)
972 static inline bool kvm_vcpu_eligible_for_directed_yield(
struct kvm_vcpu *vcpu)