31 #define audit_printk(kvm, fmt, args...) \
32 printk(KERN_ERR "audit: (%s) error: " \
33 fmt, audit_point_name[kvm->arch.audit_point], ##args)
45 fn(vcpu, ent + i, level);
47 if (is_shadow_present_pte(ent[i]) &&
48 !is_last_spte(ent[i], level)) {
52 __mmu_spte_walk(vcpu, child, fn, level - 1);
68 sp = page_header(root);
73 for (i = 0; i < 4; ++
i) {
78 sp = page_header(root);
79 __mmu_spte_walk(vcpu, sp, fn, 2);
88 static void walk_all_active_sps(
struct kvm *kvm,
sp_handler fn)
103 sp = page_header(
__pa(sptep));
108 "level = %d\n", sp, level);
113 if (!is_shadow_present_pte(*sptep) || !is_last_spte(*sptep, level))
116 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->
spt);
119 if (is_error_pfn(pfn))
125 "ent %llxn", vcpu->
arch.mmu.root_level, pfn,
129 static void inspect_spte_has_rmap(
struct kvm *kvm,
u64 *sptep)
132 unsigned long *rmapp;
136 rev_sp = page_header(
__pa(sptep));
137 gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->
spt);
144 (
long int)(sptep - rev_sp->
spt), rev_sp->
gfn);
149 rmapp = gfn_to_rmap(kvm, gfn, rev_sp->
role.
level);
159 static void audit_sptes_have_rmaps(
struct kvm_vcpu *vcpu,
u64 *sptep,
int level)
161 if (is_shadow_present_pte(*sptep) && is_last_spte(*sptep, level))
162 inspect_spte_has_rmap(vcpu->
kvm, sptep);
165 static void audit_spte_after_sync(
struct kvm_vcpu *vcpu,
u64 *sptep,
int level)
174 static void check_mappings_rmap(
struct kvm *kvm,
struct kvm_mmu_page *sp)
182 if (!is_rmap_spte(sp->
spt[i]))
185 inspect_spte_has_rmap(kvm, sp->
spt + i);
189 static void audit_write_protection(
struct kvm *kvm,
struct kvm_mmu_page *sp)
191 unsigned long *rmapp;
200 for (sptep = rmap_get_first(*rmapp, &iter); sptep;
201 sptep = rmap_get_next(&iter)) {
202 if (is_writable_pte(*sptep))
204 "mappings: gfn %llx role %x\n",
209 static void audit_sp(
struct kvm *kvm,
struct kvm_mmu_page *sp)
211 check_mappings_rmap(kvm, sp);
212 audit_write_protection(kvm, sp);
215 static void audit_all_active_sps(
struct kvm *kvm)
217 walk_all_active_sps(kvm, audit_sp);
220 static void audit_spte(
struct kvm_vcpu *vcpu,
u64 *sptep,
int level)
222 audit_sptes_have_rmaps(vcpu, sptep, level);
223 audit_mappings(vcpu, sptep, level);
224 audit_spte_after_sync(vcpu, sptep, level);
227 static void audit_vcpu_spte(
struct kvm_vcpu *vcpu)
229 mmu_spte_walk(vcpu, audit_spte);
232 static bool mmu_audit;
235 static void __kvm_mmu_audit(
struct kvm_vcpu *vcpu,
int point)
242 vcpu->
kvm->arch.audit_point =
point;
243 audit_all_active_sps(vcpu->
kvm);
244 audit_vcpu_spte(vcpu);
247 static inline void kvm_mmu_audit(
struct kvm_vcpu *vcpu,
int point)
249 if (static_key_false((&mmu_audit_key)))
250 __kvm_mmu_audit(vcpu, point);
253 static void mmu_audit_enable(
void)
258 static_key_slow_inc(&mmu_audit_key);
262 static void mmu_audit_disable(
void)
267 static_key_slow_dec(&mmu_audit_key);
271 static int mmu_audit_set(
const char *
val,
const struct kernel_param *kp)
295 .set = mmu_audit_set,