15 #include <linux/types.h>
17 #include <linux/perf_event.h>
22 static struct kvm_arch_event_perf_mapping {
42 static bool pmc_is_gp(
struct kvm_pmc *pmc)
47 static inline u64 pmc_bitmask(
struct kvm_pmc *pmc)
54 static inline bool pmc_enabled(
struct kvm_pmc *pmc)
63 if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
71 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
106 struct perf_sample_data *
data,
109 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
117 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
120 kvm_perf_overflow(perf_event, data, regs);
149 return counter & pmc_bitmask(pmc);
152 static void stop_counter(
struct kvm_pmc *pmc)
162 unsigned config,
bool exclude_user,
bool exclude_kernel,
165 struct perf_event *
event;
168 .size =
sizeof(
attr),
172 .exclude_user = exclude_user,
173 .exclude_kernel = exclude_kernel,
180 intr ? kvm_perf_overflow_intr :
181 kvm_perf_overflow, pmc);
183 printk_once(
"kvm: pmu event creation failed %ld\n",
192 static unsigned find_arch_event(
struct kvm_pmu *pmu,
u8 event_select,
198 if (arch_events[i].eventsel == event_select
199 && arch_events[i].unit_mask == unit_mask
206 return arch_events[
i].event_type;
209 static void reprogram_gp_counter(
struct kvm_pmc *pmc,
u64 eventsel)
212 u8 event_select, unit_mask;
215 printk_once(
"kvm pmu: pin control bit is ignored\n");
230 config = find_arch_event(&pmc->
vcpu->arch.pmu, event_select,
239 reprogram_counter(pmc, type, config,
245 static void reprogram_fixed_counter(
struct kvm_pmc *pmc,
u8 en_pmi,
int idx)
247 unsigned en = en_pmi & 0x3;
248 bool pmi = en_pmi & 0x8;
252 if (!en || !pmc_enabled(pmc))
262 static inline u8 fixed_en_pmi(
u64 ctrl,
int idx)
264 return (ctrl >> (idx * 4)) & 0xf;
267 static void reprogram_fixed_counters(
struct kvm_pmu *pmu,
u64 data)
272 u8 en_pmi = fixed_en_pmi(data, i);
273 struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i);
278 reprogram_fixed_counter(pmc, en_pmi, i);
284 static void reprogram_idx(
struct kvm_pmu *pmu,
int idx)
286 struct kvm_pmc *pmc = global_idx_to_pmc(pmu, idx);
292 reprogram_gp_counter(pmc, pmc->
eventsel);
295 reprogram_fixed_counter(pmc,
300 static void global_ctrl_changed(
struct kvm_pmu *pmu,
u64 data)
308 reprogram_idx(pmu, bit);
313 struct kvm_pmu *pmu = &vcpu->arch.pmu;
326 || get_fixed_pmc(pmu, msr);
352 (pmc = get_fixed_pmc(pmu, index))) {
353 *data = read_pmc(pmc);
372 if (!(data & 0xfffffffffffff444ull)) {
373 reprogram_fixed_counters(pmu, data);
383 global_ctrl_changed(pmu, data);
396 (pmc = get_fixed_pmc(pmu, index))) {
398 pmc->
counter += data - read_pmc(pmc);
403 if (!(data & 0xffffffff00200000ull)) {
404 reprogram_gp_counter(pmc, data);
415 bool fast_mode = pmc & (1
u << 31);
416 bool fixed = pmc & (1
u << 30);
426 ctr = read_pmc(&counters[pmc]);
457 ((
u64)1 << ((entry->
eax >> 16) & 0xff)) - 1;
458 bitmap_len = (entry->
eax >> 24) & 0xff;
467 ((
u64)1 << ((entry->
edx >> 5) & 0xff)) - 1;
480 memset(pmu, 0,
sizeof(*pmu));
491 init_irq_work(&pmu->
irq_work, trigger_pmi);
528 struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit);
535 reprogram_idx(pmu, bit);