8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/stddef.h>
11 #include <linux/types.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
16 #include <asm/hardirq.h>
47 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
65 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
81 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
87 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
110 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
122 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
130 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
136 static u64 intel_pmu_event_map(
int hw_event)
138 return intel_perfmon_event_map[hw_event];
141 #define SNB_DMND_DATA_RD (1ULL << 0)
142 #define SNB_DMND_RFO (1ULL << 1)
143 #define SNB_DMND_IFETCH (1ULL << 2)
144 #define SNB_DMND_WB (1ULL << 3)
145 #define SNB_PF_DATA_RD (1ULL << 4)
146 #define SNB_PF_RFO (1ULL << 5)
147 #define SNB_PF_IFETCH (1ULL << 6)
148 #define SNB_LLC_DATA_RD (1ULL << 7)
149 #define SNB_LLC_RFO (1ULL << 8)
150 #define SNB_LLC_IFETCH (1ULL << 9)
151 #define SNB_BUS_LOCKS (1ULL << 10)
152 #define SNB_STRM_ST (1ULL << 11)
153 #define SNB_OTHER (1ULL << 15)
154 #define SNB_RESP_ANY (1ULL << 16)
155 #define SNB_NO_SUPP (1ULL << 17)
156 #define SNB_LLC_HITM (1ULL << 18)
157 #define SNB_LLC_HITE (1ULL << 19)
158 #define SNB_LLC_HITS (1ULL << 20)
159 #define SNB_LLC_HITF (1ULL << 21)
160 #define SNB_LOCAL (1ULL << 22)
161 #define SNB_REMOTE (0xffULL << 23)
162 #define SNB_SNP_NONE (1ULL << 31)
163 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
164 #define SNB_SNP_MISS (1ULL << 33)
165 #define SNB_NO_FWD (1ULL << 34)
166 #define SNB_SNP_FWD (1ULL << 35)
167 #define SNB_HITM (1ULL << 36)
168 #define SNB_NON_DRAM (1ULL << 37)
170 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
171 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
172 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
174 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
175 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
178 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
179 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
181 #define SNB_L3_ACCESS SNB_RESP_ANY
182 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
198 [
C(OP_PREFETCH) ] = {
212 [
C(OP_PREFETCH) ] = {
226 [
C(RESULT_ACCESS) ] = 0xf1d0,
227 [
C(RESULT_MISS) ] = 0x0151,
230 [
C(RESULT_ACCESS) ] = 0xf2d0,
231 [
C(RESULT_MISS) ] = 0x0851,
233 [
C(OP_PREFETCH) ] = {
234 [
C(RESULT_ACCESS) ] = 0x0,
235 [
C(RESULT_MISS) ] = 0x024e,
240 [
C(RESULT_ACCESS) ] = 0x0,
241 [
C(RESULT_MISS) ] = 0x0280,
244 [
C(RESULT_ACCESS) ] = -1,
245 [
C(RESULT_MISS) ] = -1,
247 [
C(OP_PREFETCH) ] = {
248 [
C(RESULT_ACCESS) ] = 0x0,
249 [
C(RESULT_MISS) ] = 0x0,
255 [
C(RESULT_ACCESS) ] = 0x01b7,
257 [
C(RESULT_MISS) ] = 0x01b7,
261 [
C(RESULT_ACCESS) ] = 0x01b7,
263 [
C(RESULT_MISS) ] = 0x01b7,
265 [
C(OP_PREFETCH) ] = {
267 [
C(RESULT_ACCESS) ] = 0x01b7,
269 [
C(RESULT_MISS) ] = 0x01b7,
274 [
C(RESULT_ACCESS) ] = 0x81d0,
275 [
C(RESULT_MISS) ] = 0x0108,
278 [
C(RESULT_ACCESS) ] = 0x82d0,
279 [
C(RESULT_MISS) ] = 0x0149,
281 [
C(OP_PREFETCH) ] = {
282 [
C(RESULT_ACCESS) ] = 0x0,
283 [
C(RESULT_MISS) ] = 0x0,
288 [
C(RESULT_ACCESS) ] = 0x1085,
289 [
C(RESULT_MISS) ] = 0x0185,
292 [
C(RESULT_ACCESS) ] = -1,
293 [
C(RESULT_MISS) ] = -1,
295 [
C(OP_PREFETCH) ] = {
296 [
C(RESULT_ACCESS) ] = -1,
297 [
C(RESULT_MISS) ] = -1,
302 [
C(RESULT_ACCESS) ] = 0x00c4,
303 [
C(RESULT_MISS) ] = 0x00c5,
306 [
C(RESULT_ACCESS) ] = -1,
307 [
C(RESULT_MISS) ] = -1,
309 [
C(OP_PREFETCH) ] = {
310 [
C(RESULT_ACCESS) ] = -1,
311 [
C(RESULT_MISS) ] = -1,
316 [
C(RESULT_ACCESS) ] = 0x01b7,
317 [
C(RESULT_MISS) ] = 0x01b7,
320 [
C(RESULT_ACCESS) ] = 0x01b7,
321 [
C(RESULT_MISS) ] = 0x01b7,
323 [
C(OP_PREFETCH) ] = {
324 [
C(RESULT_ACCESS) ] = 0x01b7,
325 [
C(RESULT_MISS) ] = 0x01b7,
338 [
C(RESULT_ACCESS) ] = 0x010b,
339 [
C(RESULT_MISS) ] = 0x0151,
342 [
C(RESULT_ACCESS) ] = 0x020b,
343 [
C(RESULT_MISS) ] = 0x0251,
345 [
C(OP_PREFETCH) ] = {
346 [
C(RESULT_ACCESS) ] = 0x014e,
347 [
C(RESULT_MISS) ] = 0x024e,
352 [
C(RESULT_ACCESS) ] = 0x0380,
353 [
C(RESULT_MISS) ] = 0x0280,
356 [
C(RESULT_ACCESS) ] = -1,
357 [
C(RESULT_MISS) ] = -1,
359 [
C(OP_PREFETCH) ] = {
360 [
C(RESULT_ACCESS) ] = 0x0,
361 [
C(RESULT_MISS) ] = 0x0,
367 [
C(RESULT_ACCESS) ] = 0x01b7,
369 [
C(RESULT_MISS) ] = 0x01b7,
377 [
C(RESULT_ACCESS) ] = 0x01b7,
379 [
C(RESULT_MISS) ] = 0x01b7,
381 [
C(OP_PREFETCH) ] = {
383 [
C(RESULT_ACCESS) ] = 0x01b7,
385 [
C(RESULT_MISS) ] = 0x01b7,
390 [
C(RESULT_ACCESS) ] = 0x010b,
391 [
C(RESULT_MISS) ] = 0x0108,
394 [
C(RESULT_ACCESS) ] = 0x020b,
395 [
C(RESULT_MISS) ] = 0x010c,
397 [
C(OP_PREFETCH) ] = {
398 [
C(RESULT_ACCESS) ] = 0x0,
399 [
C(RESULT_MISS) ] = 0x0,
404 [
C(RESULT_ACCESS) ] = 0x01c0,
405 [
C(RESULT_MISS) ] = 0x0185,
408 [
C(RESULT_ACCESS) ] = -1,
409 [
C(RESULT_MISS) ] = -1,
411 [
C(OP_PREFETCH) ] = {
412 [
C(RESULT_ACCESS) ] = -1,
413 [
C(RESULT_MISS) ] = -1,
418 [
C(RESULT_ACCESS) ] = 0x00c4,
419 [
C(RESULT_MISS) ] = 0x03e8,
422 [
C(RESULT_ACCESS) ] = -1,
423 [
C(RESULT_MISS) ] = -1,
425 [
C(OP_PREFETCH) ] = {
426 [
C(RESULT_ACCESS) ] = -1,
427 [
C(RESULT_MISS) ] = -1,
432 [
C(RESULT_ACCESS) ] = 0x01b7,
433 [
C(RESULT_MISS) ] = 0x01b7,
436 [
C(RESULT_ACCESS) ] = 0x01b7,
437 [
C(RESULT_MISS) ] = 0x01b7,
439 [
C(OP_PREFETCH) ] = {
440 [
C(RESULT_ACCESS) ] = 0x01b7,
441 [
C(RESULT_MISS) ] = 0x01b7,
451 #define NHM_DMND_DATA_RD (1 << 0)
452 #define NHM_DMND_RFO (1 << 1)
453 #define NHM_DMND_IFETCH (1 << 2)
454 #define NHM_DMND_WB (1 << 3)
455 #define NHM_PF_DATA_RD (1 << 4)
456 #define NHM_PF_DATA_RFO (1 << 5)
457 #define NHM_PF_IFETCH (1 << 6)
458 #define NHM_OFFCORE_OTHER (1 << 7)
459 #define NHM_UNCORE_HIT (1 << 8)
460 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
461 #define NHM_OTHER_CORE_HITM (1 << 10)
463 #define NHM_REMOTE_CACHE_FWD (1 << 12)
464 #define NHM_REMOTE_DRAM (1 << 13)
465 #define NHM_LOCAL_DRAM (1 << 14)
466 #define NHM_NON_DRAM (1 << 15)
468 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
469 #define NHM_REMOTE (NHM_REMOTE_DRAM)
471 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
472 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
473 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
475 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
476 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
477 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
493 [
C(OP_PREFETCH) ] = {
507 [
C(OP_PREFETCH) ] = {
521 [
C(RESULT_ACCESS) ] = 0x010b,
522 [
C(RESULT_MISS) ] = 0x0151,
525 [
C(RESULT_ACCESS) ] = 0x020b,
526 [
C(RESULT_MISS) ] = 0x0251,
528 [
C(OP_PREFETCH) ] = {
529 [
C(RESULT_ACCESS) ] = 0x014e,
530 [
C(RESULT_MISS) ] = 0x024e,
535 [
C(RESULT_ACCESS) ] = 0x0380,
536 [
C(RESULT_MISS) ] = 0x0280,
539 [
C(RESULT_ACCESS) ] = -1,
540 [
C(RESULT_MISS) ] = -1,
542 [
C(OP_PREFETCH) ] = {
543 [
C(RESULT_ACCESS) ] = 0x0,
544 [
C(RESULT_MISS) ] = 0x0,
550 [
C(RESULT_ACCESS) ] = 0x01b7,
552 [
C(RESULT_MISS) ] = 0x01b7,
560 [
C(RESULT_ACCESS) ] = 0x01b7,
562 [
C(RESULT_MISS) ] = 0x01b7,
564 [
C(OP_PREFETCH) ] = {
566 [
C(RESULT_ACCESS) ] = 0x01b7,
568 [
C(RESULT_MISS) ] = 0x01b7,
573 [
C(RESULT_ACCESS) ] = 0x0f40,
574 [
C(RESULT_MISS) ] = 0x0108,
577 [
C(RESULT_ACCESS) ] = 0x0f41,
578 [
C(RESULT_MISS) ] = 0x010c,
580 [
C(OP_PREFETCH) ] = {
581 [
C(RESULT_ACCESS) ] = 0x0,
582 [
C(RESULT_MISS) ] = 0x0,
587 [
C(RESULT_ACCESS) ] = 0x01c0,
588 [
C(RESULT_MISS) ] = 0x20c8,
591 [
C(RESULT_ACCESS) ] = -1,
592 [
C(RESULT_MISS) ] = -1,
594 [
C(OP_PREFETCH) ] = {
595 [
C(RESULT_ACCESS) ] = -1,
596 [
C(RESULT_MISS) ] = -1,
601 [
C(RESULT_ACCESS) ] = 0x00c4,
602 [
C(RESULT_MISS) ] = 0x03e8,
605 [
C(RESULT_ACCESS) ] = -1,
606 [
C(RESULT_MISS) ] = -1,
608 [
C(OP_PREFETCH) ] = {
609 [
C(RESULT_ACCESS) ] = -1,
610 [
C(RESULT_MISS) ] = -1,
615 [
C(RESULT_ACCESS) ] = 0x01b7,
616 [
C(RESULT_MISS) ] = 0x01b7,
619 [
C(RESULT_ACCESS) ] = 0x01b7,
620 [
C(RESULT_MISS) ] = 0x01b7,
622 [
C(OP_PREFETCH) ] = {
623 [
C(RESULT_ACCESS) ] = 0x01b7,
624 [
C(RESULT_MISS) ] = 0x01b7,
636 [
C(RESULT_ACCESS) ] = 0x0f40,
637 [
C(RESULT_MISS) ] = 0x0140,
640 [
C(RESULT_ACCESS) ] = 0x0f41,
641 [
C(RESULT_MISS) ] = 0x0141,
643 [
C(OP_PREFETCH) ] = {
644 [
C(RESULT_ACCESS) ] = 0x104e,
645 [
C(RESULT_MISS) ] = 0,
650 [
C(RESULT_ACCESS) ] = 0x0080,
651 [
C(RESULT_MISS) ] = 0x0081,
654 [
C(RESULT_ACCESS) ] = -1,
655 [
C(RESULT_MISS) ] = -1,
657 [
C(OP_PREFETCH) ] = {
658 [
C(RESULT_ACCESS) ] = 0,
659 [
C(RESULT_MISS) ] = 0,
664 [
C(RESULT_ACCESS) ] = 0x4f29,
665 [
C(RESULT_MISS) ] = 0x4129,
668 [
C(RESULT_ACCESS) ] = 0x4f2A,
669 [
C(RESULT_MISS) ] = 0x412A,
671 [
C(OP_PREFETCH) ] = {
672 [
C(RESULT_ACCESS) ] = 0,
673 [
C(RESULT_MISS) ] = 0,
678 [
C(RESULT_ACCESS) ] = 0x0f40,
679 [
C(RESULT_MISS) ] = 0x0208,
682 [
C(RESULT_ACCESS) ] = 0x0f41,
683 [
C(RESULT_MISS) ] = 0x0808,
685 [
C(OP_PREFETCH) ] = {
686 [
C(RESULT_ACCESS) ] = 0,
687 [
C(RESULT_MISS) ] = 0,
692 [
C(RESULT_ACCESS) ] = 0x00c0,
693 [
C(RESULT_MISS) ] = 0x1282,
696 [
C(RESULT_ACCESS) ] = -1,
697 [
C(RESULT_MISS) ] = -1,
699 [
C(OP_PREFETCH) ] = {
700 [
C(RESULT_ACCESS) ] = -1,
701 [
C(RESULT_MISS) ] = -1,
706 [
C(RESULT_ACCESS) ] = 0x00c4,
707 [
C(RESULT_MISS) ] = 0x00c5,
710 [
C(RESULT_ACCESS) ] = -1,
711 [
C(RESULT_MISS) ] = -1,
713 [
C(OP_PREFETCH) ] = {
714 [
C(RESULT_ACCESS) ] = -1,
715 [
C(RESULT_MISS) ] = -1,
727 [
C(RESULT_ACCESS) ] = 0x2140,
728 [
C(RESULT_MISS) ] = 0,
731 [
C(RESULT_ACCESS) ] = 0x2240,
732 [
C(RESULT_MISS) ] = 0,
734 [
C(OP_PREFETCH) ] = {
735 [
C(RESULT_ACCESS) ] = 0x0,
736 [
C(RESULT_MISS) ] = 0,
741 [
C(RESULT_ACCESS) ] = 0x0380,
742 [
C(RESULT_MISS) ] = 0x0280,
745 [
C(RESULT_ACCESS) ] = -1,
746 [
C(RESULT_MISS) ] = -1,
748 [
C(OP_PREFETCH) ] = {
749 [
C(RESULT_ACCESS) ] = 0,
750 [
C(RESULT_MISS) ] = 0,
755 [
C(RESULT_ACCESS) ] = 0x4f29,
756 [
C(RESULT_MISS) ] = 0x4129,
759 [
C(RESULT_ACCESS) ] = 0x4f2A,
760 [
C(RESULT_MISS) ] = 0x412A,
762 [
C(OP_PREFETCH) ] = {
763 [
C(RESULT_ACCESS) ] = 0,
764 [
C(RESULT_MISS) ] = 0,
769 [
C(RESULT_ACCESS) ] = 0x2140,
770 [
C(RESULT_MISS) ] = 0x0508,
773 [
C(RESULT_ACCESS) ] = 0x2240,
774 [
C(RESULT_MISS) ] = 0x0608,
776 [
C(OP_PREFETCH) ] = {
777 [
C(RESULT_ACCESS) ] = 0,
778 [
C(RESULT_MISS) ] = 0,
783 [
C(RESULT_ACCESS) ] = 0x00c0,
784 [
C(RESULT_MISS) ] = 0x0282,
787 [
C(RESULT_ACCESS) ] = -1,
788 [
C(RESULT_MISS) ] = -1,
790 [
C(OP_PREFETCH) ] = {
791 [
C(RESULT_ACCESS) ] = -1,
792 [
C(RESULT_MISS) ] = -1,
797 [
C(RESULT_ACCESS) ] = 0x00c4,
798 [
C(RESULT_MISS) ] = 0x00c5,
801 [
C(RESULT_ACCESS) ] = -1,
802 [
C(RESULT_MISS) ] = -1,
804 [
C(OP_PREFETCH) ] = {
805 [
C(RESULT_ACCESS) ] = -1,
806 [
C(RESULT_MISS) ] = -1,
814 if (has_branch_stack(event))
824 static void intel_pmu_disable_all(
void)
837 static void intel_pmu_enable_all(
int added)
871 static void intel_pmu_nhm_workaround(
void)
874 static const unsigned long nhm_magic[4] = {
905 for (i = 0; i < 4; i++) {
911 for (i = 0; i < 4; i++) {
919 for (i = 0; i < 4; i++) {
924 __x86_pmu_enable_event(&event->hw,
931 static void intel_pmu_nhm_enable_all(
int added)
934 intel_pmu_nhm_workaround();
935 intel_pmu_enable_all(added);
938 static inline u64 intel_pmu_get_status(
void)
947 static inline void intel_pmu_ack_status(
u64 ack)
952 static void intel_pmu_disable_fixed(
struct hw_perf_event *hwc)
957 mask = 0xfULL << (idx * 4);
959 rdmsrl(hwc->config_base, ctrl_val);
961 wrmsrl(hwc->config_base, ctrl_val);
964 static void intel_pmu_disable_event(
struct perf_event *event)
982 if (intel_pmu_needs_lbr_smpl(event))
986 intel_pmu_disable_fixed(hwc);
990 x86_pmu_disable_event(event);
996 static void intel_pmu_enable_fixed(
struct hw_perf_event *hwc)
1019 mask = 0xfULL << (idx * 4);
1021 rdmsrl(hwc->config_base, ctrl_val);
1024 wrmsrl(hwc->config_base, ctrl_val);
1027 static void intel_pmu_enable_event(
struct perf_event *event)
1043 if (intel_pmu_needs_lbr_smpl(event))
1046 if (event->
attr.exclude_host)
1048 if (event->
attr.exclude_guest)
1052 intel_pmu_enable_fixed(hwc);
1072 static void intel_pmu_reset(
void)
1075 unsigned long flags;
1086 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
1087 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
1102 static int intel_pmu_handle_irq(
struct pt_regs *
regs)
1104 struct perf_sample_data
data;
1122 intel_pmu_disable_all();
1124 status = intel_pmu_get_status();
1126 intel_pmu_enable_all(0);
1132 intel_pmu_ack_status(status);
1133 if (++loops > 100) {
1134 WARN_ONCE(1,
"perfevents: irq loop stuck!\n");
1163 perf_sample_data_init(&
data, 0, event->hw.last_period);
1165 if (has_branch_stack(event))
1175 status = intel_pmu_get_status();
1180 intel_pmu_enable_all(0);
1185 intel_bts_constraints(
struct perf_event *event)
1188 unsigned int hw_event, bts_event;
1190 if (event->
attr.freq)
1196 if (
unlikely(hw_event == bts_event && hwc->sample_period == 1))
1202 static int intel_alt_er(
int idx)
1216 static void intel_fixup_er(
struct perf_event *event,
int idx)
1218 event->hw.extra_reg.idx =
idx;
1222 event->hw.config |= 0x01b7;
1226 event->hw.config |= 0x01bb;
1239 __intel_shared_reg_get_constraints(
struct cpu_hw_events *cpuc,
1245 unsigned long flags;
1277 if (idx != reg->
idx)
1278 intel_fixup_er(event, idx);
1302 idx = intel_alt_er(idx);
1303 if (idx != reg->
idx) {
1314 __intel_shared_reg_put_constraints(
struct cpu_hw_events *cpuc,
1346 xreg = &
event->hw.extra_reg;
1348 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
1352 breg = &
event->hw.branch_reg;
1354 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
1356 __intel_shared_reg_put_constraints(cpuc, xreg);
1370 if ((event->hw.config & c->
cmask) == c->
code)
1383 c = intel_bts_constraints(event);
1391 c = intel_shared_regs_constraints(cpuc, event);
1399 intel_put_shared_regs_event_constraints(
struct cpu_hw_events *cpuc,
1404 reg = &
event->hw.extra_reg;
1406 __intel_shared_reg_put_constraints(cpuc, reg);
1408 reg = &
event->hw.branch_reg;
1410 __intel_shared_reg_put_constraints(cpuc, reg);
1413 static void intel_put_event_constraints(
struct cpu_hw_events *cpuc,
1416 intel_put_shared_regs_event_constraints(cpuc, event);
1419 static void intel_pebs_aliases_core2(
struct perf_event *event)
1440 u64 alt_config =
X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
1443 event->hw.config = alt_config;
1447 static void intel_pebs_aliases_snb(
struct perf_event *event)
1468 u64 alt_config =
X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
1471 event->hw.config = alt_config;
1475 static int intel_pmu_hw_config(
struct perf_event *event)
1485 if (intel_pmu_needs_lbr_smpl(event)) {
1517 static struct perf_guest_switch_msr *intel_guest_get_msrs(
int *
nr)
1538 static struct perf_guest_switch_msr *core_guest_get_msrs(
int *
nr)
1547 arr[
idx].msr = x86_pmu_config_addr(idx);
1548 arr[
idx].host = arr[
idx].guest = 0;
1553 arr[
idx].host = arr[
idx].guest =
1556 if (event->
attr.exclude_host)
1558 else if (event->
attr.exclude_guest)
1566 static void core_pmu_enable_event(
struct perf_event *event)
1568 if (!event->
attr.exclude_host)
1572 static void core_pmu_enable_all(
int added)
1581 cpuc->
events[idx]->attr.exclude_host)
1596 static struct attribute *intel_arch_formats_attr[] = {
1597 &format_attr_event.attr,
1598 &format_attr_umask.attr,
1599 &format_attr_edge.attr,
1600 &format_attr_pc.attr,
1601 &format_attr_inv.attr,
1602 &format_attr_cmask.attr,
1610 .enable_all = core_pmu_enable_all,
1611 .enable = core_pmu_enable_event,
1612 .disable = x86_pmu_disable_event,
1617 .event_map = intel_pmu_event_map,
1618 .max_events =
ARRAY_SIZE(intel_perfmon_event_map),
1625 .max_period = (1ULL << 31) - 1,
1652 static int intel_pmu_cpu_prepare(
int cpu)
1666 static void intel_pmu_cpu_starting(
int cpu)
1688 if (pc && pc->
core_id == core_id) {
1702 static void intel_pmu_cpu_dying(
int cpu)
1717 static void intel_pmu_flush_branch_stack(
void)
1731 static struct attribute *intel_arch3_formats_attr[] = {
1732 &format_attr_event.attr,
1733 &format_attr_umask.attr,
1734 &format_attr_edge.attr,
1735 &format_attr_pc.attr,
1736 &format_attr_any.attr,
1737 &format_attr_inv.attr,
1738 &format_attr_cmask.attr,
1740 &format_attr_offcore_rsp.attr,
1746 .handle_irq = intel_pmu_handle_irq,
1747 .disable_all = intel_pmu_disable_all,
1748 .enable_all = intel_pmu_enable_all,
1749 .enable = intel_pmu_enable_event,
1750 .disable = intel_pmu_disable_event,
1751 .hw_config = intel_pmu_hw_config,
1755 .event_map = intel_pmu_event_map,
1756 .max_events =
ARRAY_SIZE(intel_perfmon_event_map),
1763 .max_period = (1ULL << 31) - 1,
1777 static __init void intel_clovertown_quirk(
void)
1798 pr_warn(
"PEBS disabled due to CPU errata\n");
1803 static int intel_snb_pebs_broken(
int cpu)
1814 case 6: rev = 0x618;
break;
1815 case 7: rev = 0x70c;
break;
1822 static void intel_snb_check_microcode(
void)
1829 if ((pebs_broken = intel_snb_pebs_broken(cpu)))
1841 pr_info(
"PEBS enabled due to microcode update\n");
1844 pr_info(
"PEBS disabled due to CPU errata, please upgrade microcode\n");
1849 static __init void intel_sandybridge_quirk(
void)
1852 intel_snb_check_microcode();
1865 static __init void intel_arch_events_quirk(
void)
1871 intel_perfmon_event_map[intel_arch_events_map[
bit].id] = 0;
1872 pr_warn(
"CPUID marked event: \'%s\' unavailable\n",
1873 intel_arch_events_map[bit].
name);
1877 static __init void intel_nehalem_quirk(
void)
1882 if (
ebx.split.no_branch_misses_retired) {
1890 ebx.split.no_branch_misses_retired = 0;
1892 pr_info(
"CPU erratum AAJ80 worked around\n");
1925 version = eax.
split.version_id;
2075 pr_cont(
"SandyBridge events, ");
2097 pr_cont(
"IvyBridge events, ");
2105 pr_cont(
"generic architected perfmon v1, ");
2112 pr_cont(
"generic architected perfmon, ");
2118 WARN(1,
KERN_ERR "hw perf events %d > max(%d), clipping!",
2125 WARN(1,
KERN_ERR "hw perf events fixed %d > max(%d), clipping!",