9 #include <linux/perf_event.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/ptrace.h>
20 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
27 #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
28 #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
51 unsigned long offset_mask[1];
62 struct perf_ibs_data {
109 *hw_period = (
u64)left;
118 int shift = 64 -
width;
131 new_raw_count) != prev_raw_count)
142 delta = (new_raw_count << shift) - (prev_raw_count << shift);
151 static struct perf_ibs perf_ibs_fetch;
152 static struct perf_ibs perf_ibs_op;
154 static struct perf_ibs *get_ibs_pmu(
int type)
156 if (perf_ibs_fetch.pmu.type == type)
157 return &perf_ibs_fetch;
158 if (perf_ibs_op.pmu.type == type)
182 switch (event->
attr.precise_ip) {
192 switch (event->
attr.type) {
194 switch (event->
attr.config) {
201 switch (event->
attr.config) {
226 static int perf_ibs_init(
struct perf_event *event)
229 struct perf_ibs *perf_ibs;
233 perf_ibs = get_ibs_pmu(event->
attr.type);
235 config =
event->attr.config;
237 perf_ibs = &perf_ibs_op;
238 ret = perf_ibs_precise_event(event, &config);
243 if (event->pmu != &perf_ibs->pmu)
249 if (config & ~perf_ibs->config_mask)
252 if (hwc->sample_period) {
253 if (config & perf_ibs->cnt_mask)
256 if (!event->
attr.sample_freq && hwc->sample_period & 0x0f)
263 hwc->sample_period &= ~0x0FULL;
264 if (!hwc->sample_period)
265 hwc->sample_period = 0x10;
267 max_cnt = config & perf_ibs->cnt_mask;
268 config &= ~perf_ibs->cnt_mask;
269 event->attr.sample_period = max_cnt << 4;
270 hwc->sample_period =
event->attr.sample_period;
273 if (!hwc->sample_period)
280 hwc->last_period = hwc->sample_period;
281 local64_set(&hwc->period_left, hwc->sample_period);
283 hwc->config_base = perf_ibs->msr;
289 static int perf_ibs_set_period(
struct perf_ibs *perf_ibs,
295 overflow = perf_event_set_period(hwc, 1<<4, perf_ibs->max_period, period);
301 static u64 get_ibs_fetch_count(
u64 config)
306 static u64 get_ibs_op_count(
u64 config)
320 perf_ibs_event_update(
struct perf_ibs *perf_ibs,
struct perf_event *event,
323 u64 count = perf_ibs->get_count(*config);
330 while (!perf_event_try_update(event, count, 64)) {
331 rdmsrl(event->hw.config_base, *config);
332 count = perf_ibs->get_count(*config);
336 static inline void perf_ibs_enable_event(
struct perf_ibs *perf_ibs,
339 wrmsrl(hwc->config_base, hwc->config | config | perf_ibs->enable_mask);
349 static inline void perf_ibs_disable_event(
struct perf_ibs *perf_ibs,
352 config &= ~perf_ibs->cnt_mask;
353 wrmsrl(hwc->config_base, config);
354 config &= ~perf_ibs->enable_mask;
355 wrmsrl(hwc->config_base, config);
367 struct perf_ibs *perf_ibs =
container_of(event->pmu,
struct perf_ibs,
pmu);
377 perf_ibs_set_period(perf_ibs, hwc, &period);
378 set_bit(IBS_STARTED, pcpu->state);
379 perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
384 static void perf_ibs_stop(
struct perf_event *event,
int flags)
387 struct perf_ibs *perf_ibs =
container_of(event->pmu,
struct perf_ibs,
pmu);
388 struct cpu_perf_ibs *pcpu =
this_cpu_ptr(perf_ibs->pcpu);
397 rdmsrl(hwc->config_base, config);
400 set_bit(IBS_STOPPING, pcpu->state);
401 perf_ibs_disable_event(perf_ibs, hwc, config);
413 config &= ~perf_ibs->valid_mask;
415 perf_ibs_event_update(perf_ibs, event, &config);
419 static int perf_ibs_add(
struct perf_event *event,
int flags)
421 struct perf_ibs *perf_ibs =
container_of(event->pmu,
struct perf_ibs,
pmu);
422 struct cpu_perf_ibs *pcpu =
this_cpu_ptr(perf_ibs->pcpu);
437 static void perf_ibs_del(
struct perf_event *event,
int flags)
439 struct perf_ibs *perf_ibs =
container_of(event->pmu,
struct perf_ibs,
pmu);
440 struct cpu_perf_ibs *pcpu =
this_cpu_ptr(perf_ibs->pcpu);
452 static void perf_ibs_read(
struct perf_event *event) { }
457 static struct attribute *ibs_fetch_format_attrs[] = {
458 &format_attr_rand_en.attr,
462 static struct attribute *ibs_op_format_attrs[] = {
467 static struct perf_ibs perf_ibs_fetch = {
471 .event_init = perf_ibs_init,
474 .start = perf_ibs_start,
475 .stop = perf_ibs_stop,
476 .read = perf_ibs_read,
479 .config_mask = IBS_FETCH_CONFIG_MASK,
486 .format_attrs = ibs_fetch_format_attrs,
488 .get_count = get_ibs_fetch_count,
491 static struct perf_ibs perf_ibs_op = {
495 .event_init = perf_ibs_init,
498 .start = perf_ibs_start,
499 .stop = perf_ibs_stop,
500 .read = perf_ibs_read,
503 .config_mask = IBS_OP_CONFIG_MASK,
510 .format_attrs = ibs_op_format_attrs,
512 .get_count = get_ibs_op_count,
515 static int perf_ibs_handle_irq(
struct perf_ibs *perf_ibs,
struct pt_regs *iregs)
517 struct cpu_perf_ibs *pcpu =
this_cpu_ptr(perf_ibs->pcpu);
520 struct perf_sample_data
data;
523 struct perf_ibs_data ibs_data;
524 int offset,
size, check_rip, offset_max, throttle = 0;
528 if (!
test_bit(IBS_STARTED, pcpu->state)) {
538 msr = hwc->config_base;
541 if (!(*buf++ & perf_ibs->valid_mask))
544 config = &ibs_data.regs[0];
545 perf_ibs_event_update(perf_ibs, event, config);
546 perf_sample_data_init(&
data, 0, hwc->last_period);
547 if (!perf_ibs_set_period(perf_ibs, hwc, &period))
550 ibs_data.caps = ibs_caps;
555 offset_max = perf_ibs->offset_max;
561 rdmsrl(msr + offset, *buf++);
564 perf_ibs->offset_max,
566 }
while (offset < offset_max);
567 ibs_data.size =
sizeof(
u64) * size;
571 regs.flags &= ~PERF_EFLAGS_EXACT;
573 set_linear_ip(&
regs, ibs_data.regs[1]);
574 regs.flags |= PERF_EFLAGS_EXACT;
578 raw.size =
sizeof(
u32) + ibs_data.size;
579 raw.data = ibs_data.data;
586 perf_ibs_disable_event(perf_ibs, hwc, *config);
588 perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
600 handled += perf_ibs_handle_irq(&perf_ibs_fetch, regs);
601 handled += perf_ibs_handle_irq(&perf_ibs_op, regs);
609 static __init int perf_ibs_pmu_init(
struct perf_ibs *perf_ibs,
char *
name)
618 perf_ibs->pcpu = pcpu;
621 if (perf_ibs->format_attrs[0]) {
622 memset(&perf_ibs->format_group, 0,
sizeof(perf_ibs->format_group));
623 perf_ibs->format_group.name =
"format";
624 perf_ibs->format_group.attrs = perf_ibs->format_attrs;
626 memset(&perf_ibs->attr_groups, 0,
sizeof(perf_ibs->attr_groups));
627 perf_ibs->attr_groups[0] = &perf_ibs->format_group;
628 perf_ibs->pmu.attr_groups = perf_ibs->attr_groups;
633 perf_ibs->pcpu =
NULL;
640 static __init int perf_event_ibs_init(
void)
647 perf_ibs_pmu_init(&perf_ibs_fetch,
"ibs_fetch");
651 *attr++ = &format_attr_cnt_ctl.attr;
653 perf_ibs_pmu_init(&perf_ibs_op,
"ibs_op");
663 static __init int perf_event_ibs_init(
void) {
return 0; }
678 max_level = cpuid_eax(0x80000000);
697 static inline int get_eilvt(
int offset)
702 static inline int put_eilvt(
int offset)
710 static inline int ibs_eilvt_valid(
void)
722 pr_err(
FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
727 if (!get_eilvt(offset)) {
728 pr_err(
FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
740 static int setup_ibs_ctl(
int ibs_eilvt_off)
755 pci_write_config_dword(cpu_cfg,
IBSCTL, ibs_eilvt_off
756 | IBSCTL_LVT_OFFSET_VALID);
757 pci_read_config_dword(cpu_cfg,
IBSCTL, &value);
758 if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
761 "IBSCTL = 0x%08x\n", value);
782 static int force_ibs_eilvt_setup(
void)
790 if (get_eilvt(offset))
795 if (offset == APIC_EILVT_NR_MAX) {
800 ret = setup_ibs_ctl(offset);
804 if (!ibs_eilvt_valid()) {
809 pr_info(
"IBS: LVT offset %d assigned\n", offset);
819 static inline int get_ibs_lvt_offset(
void)
824 if (!(val & IBSCTL_LVT_OFFSET_VALID))
830 static void setup_APIC_ibs(
void *
dummy)
834 offset = get_ibs_lvt_offset();
841 pr_warn(
"perf: IBS APIC setup failed on cpu #%d\n",
845 static void clear_APIC_ibs(
void *dummy)
849 offset = get_ibs_lvt_offset();
859 setup_APIC_ibs(
NULL);
862 clear_APIC_ibs(
NULL);
871 static __init int amd_ibs_init(
void)
876 caps = __get_ibs_caps();
887 force_ibs_eilvt_setup();
889 if (!ibs_eilvt_valid())
900 ret = perf_event_ibs_init();
903 pr_err(
"Failed to setup IBS, %d\n", ret);