1 #include <linux/perf_event.h>
2 #include <linux/export.h>
3 #include <linux/types.h>
5 #include <linux/slab.h>
17 [
C(RESULT_ACCESS) ] = 0x0040,
18 [
C(RESULT_MISS) ] = 0x0141,
21 [
C(RESULT_ACCESS) ] = 0x0142,
22 [
C(RESULT_MISS) ] = 0,
24 [
C(OP_PREFETCH) ] = {
25 [
C(RESULT_ACCESS) ] = 0x0267,
26 [
C(RESULT_MISS) ] = 0x0167,
31 [
C(RESULT_ACCESS) ] = 0x0080,
32 [
C(RESULT_MISS) ] = 0x0081,
35 [
C(RESULT_ACCESS) ] = -1,
36 [
C(RESULT_MISS) ] = -1,
38 [
C(OP_PREFETCH) ] = {
39 [
C(RESULT_ACCESS) ] = 0x014B,
40 [
C(RESULT_MISS) ] = 0,
45 [
C(RESULT_ACCESS) ] = 0x037D,
46 [
C(RESULT_MISS) ] = 0x037E,
49 [
C(RESULT_ACCESS) ] = 0x017F,
50 [
C(RESULT_MISS) ] = 0,
52 [
C(OP_PREFETCH) ] = {
53 [
C(RESULT_ACCESS) ] = 0,
54 [
C(RESULT_MISS) ] = 0,
59 [
C(RESULT_ACCESS) ] = 0x0040,
60 [
C(RESULT_MISS) ] = 0x0746,
63 [
C(RESULT_ACCESS) ] = 0,
64 [
C(RESULT_MISS) ] = 0,
66 [
C(OP_PREFETCH) ] = {
67 [
C(RESULT_ACCESS) ] = 0,
68 [
C(RESULT_MISS) ] = 0,
73 [
C(RESULT_ACCESS) ] = 0x0080,
74 [
C(RESULT_MISS) ] = 0x0385,
77 [
C(RESULT_ACCESS) ] = -1,
78 [
C(RESULT_MISS) ] = -1,
80 [
C(OP_PREFETCH) ] = {
81 [
C(RESULT_ACCESS) ] = -1,
82 [
C(RESULT_MISS) ] = -1,
87 [
C(RESULT_ACCESS) ] = 0x00c2,
88 [
C(RESULT_MISS) ] = 0x00c3,
91 [
C(RESULT_ACCESS) ] = -1,
92 [
C(RESULT_MISS) ] = -1,
94 [
C(OP_PREFETCH) ] = {
95 [
C(RESULT_ACCESS) ] = -1,
96 [
C(RESULT_MISS) ] = -1,
101 [
C(RESULT_ACCESS) ] = 0xb8e9,
102 [
C(RESULT_MISS) ] = 0x98e9,
105 [
C(RESULT_ACCESS) ] = -1,
106 [
C(RESULT_MISS) ] = -1,
108 [
C(OP_PREFETCH) ] = {
109 [
C(RESULT_ACCESS) ] = -1,
110 [
C(RESULT_MISS) ] = -1,
118 static const u64 amd_perfmon_event_map[] =
130 static u64 amd_pmu_event_map(
int hw_event)
132 return amd_perfmon_event_map[hw_event];
147 if (has_branch_stack(event))
150 if (event->
attr.exclude_host && event->
attr.exclude_guest)
158 else if (event->
attr.exclude_host)
160 else if (event->
attr.exclude_guest)
174 static inline unsigned int amd_get_event_code(
struct hw_perf_event *hwc)
176 return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
181 return (hwc->config & 0xe0) == 0xe0;
188 return nb && nb->
nb_id != -1;
191 static void amd_put_event_constraints(
struct cpu_hw_events *cpuc,
201 if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
266 if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
279 for (i = 0; i <
max; i++) {
283 if (k == -1 && !nb->
owners[i])
287 if (nb->
owners[i] == event)
294 if (hwc->idx != -1) {
297 }
else if (k != -1) {
323 static struct amd_nb *amd_alloc_nb(
int cpu)
345 static int amd_pmu_cpu_prepare(
int cpu)
354 cpuc->
amd_nb = amd_alloc_nb(cpu);
361 static void amd_pmu_cpu_starting(
int cpu)
380 if (nb->
nb_id == nb_id) {
391 static void amd_pmu_cpu_dead(
int cpu)
416 static struct attribute *amd_format_attr[] = {
417 &format_attr_event.attr,
418 &format_attr_umask.attr,
419 &format_attr_edge.attr,
420 &format_attr_inv.attr,
421 &format_attr_cmask.attr,
427 #define AMD_EVENT_TYPE_MASK 0x000000F0ULL
429 #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
430 #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
431 #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
432 #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
433 #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
434 #define AMD_EVENT_EX_LS 0x000000C0ULL
435 #define AMD_EVENT_DE 0x000000D0ULL
436 #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
503 unsigned int event_code = amd_get_event_code(hwc);
507 switch (event_code) {
509 if (!(hwc->config & 0x0000F000ULL))
511 if (!(hwc->config & 0x00000F00ULL))
513 return &amd_f15_PMC3;
517 return &amd_f15_PMC3;
521 return &amd_f15_PMC3;
523 return &amd_f15_PMC53;
527 switch (event_code) {
534 return &amd_f15_PMC20;
536 return &amd_f15_PMC3;
538 return &amd_f15_PMC30;
541 return &amd_f15_PMC20;
544 return &amd_f15_PMC53;
546 return &amd_f15_PMC50;
551 switch (event_code) {
555 return &amd_f15_PMC0;
556 case 0x0DB ... 0x0DF:
559 return &amd_f15_PMC50;
561 return &amd_f15_PMC20;
577 .disable = x86_pmu_disable_event,
578 .hw_config = amd_pmu_hw_config,
582 .event_map = amd_pmu_event_map,
583 .max_events =
ARRAY_SIZE(amd_perfmon_event_map),
586 .cntval_mask = (1ULL << 48) - 1,
590 .get_event_constraints = amd_get_event_constraints,
591 .put_event_constraints = amd_put_event_constraints,
593 .format_attrs = amd_format_attr,
595 .cpu_prepare = amd_pmu_cpu_prepare,
596 .cpu_starting = amd_pmu_cpu_starting,
597 .cpu_dead = amd_pmu_cpu_dead,
600 static int setup_event_constraints(
void)
607 static int setup_perfctr_core(
void)
609 if (!cpu_has_perfctr_core) {
611 KERN_ERR "Odd, counter constraints enabled but no core perfctrs detected!");
616 KERN_ERR "hw perf events core counters need constraints handler!");
640 setup_event_constraints();
641 setup_perfctr_core();