Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
perf_event_intel_ds.c
Go to the documentation of this file.
1 #include <linux/bitops.h>
2 #include <linux/types.h>
3 #include <linux/slab.h>
4 
5 #include <asm/perf_event.h>
6 #include <asm/insn.h>
7 
8 #include "perf_event.h"
9 
10 /* The size of a BTS record in bytes: */
11 #define BTS_RECORD_SIZE 24
12 
13 #define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
14 #define PEBS_BUFFER_SIZE PAGE_SIZE
15 
16 /*
17  * pebs_record_32 for p4 and core not supported
18 
19 struct pebs_record_32 {
20  u32 flags, ip;
21  u32 ax, bc, cx, dx;
22  u32 si, di, bp, sp;
23 };
24 
25  */
26 
29  u64 ax, bx, cx, dx;
30  u64 si, di, bp, sp;
31  u64 r8, r9, r10, r11;
33 };
34 
37  u64 ax, bx, cx, dx;
38  u64 si, di, bp, sp;
39  u64 r8, r9, r10, r11;
42 };
43 
45 {
46  struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
47 
48  if (!ds)
49  return;
50 
52  (u32)((u64)(unsigned long)ds),
53  (u32)((u64)(unsigned long)ds >> 32));
54 }
55 
57 {
58  if (!per_cpu(cpu_hw_events, cpu).ds)
59  return;
60 
61  wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
62 }
63 
64 static int alloc_pebs_buffer(int cpu)
65 {
66  struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
67  int node = cpu_to_node(cpu);
68  int max, thresh = 1; /* always use a single PEBS record */
69  void *buffer;
70 
71  if (!x86_pmu.pebs)
72  return 0;
73 
74  buffer = kmalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
75  if (unlikely(!buffer))
76  return -ENOMEM;
77 
79 
80  ds->pebs_buffer_base = (u64)(unsigned long)buffer;
81  ds->pebs_index = ds->pebs_buffer_base;
84 
86  thresh * x86_pmu.pebs_record_size;
87 
88  return 0;
89 }
90 
91 static void release_pebs_buffer(int cpu)
92 {
93  struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
94 
95  if (!ds || !x86_pmu.pebs)
96  return;
97 
98  kfree((void *)(unsigned long)ds->pebs_buffer_base);
99  ds->pebs_buffer_base = 0;
100 }
101 
102 static int alloc_bts_buffer(int cpu)
103 {
104  struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
105  int node = cpu_to_node(cpu);
106  int max, thresh;
107  void *buffer;
108 
109  if (!x86_pmu.bts)
110  return 0;
111 
112  buffer = kmalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
113  if (unlikely(!buffer))
114  return -ENOMEM;
115 
117  thresh = max / 16;
118 
119  ds->bts_buffer_base = (u64)(unsigned long)buffer;
120  ds->bts_index = ds->bts_buffer_base;
122  max * BTS_RECORD_SIZE;
124  thresh * BTS_RECORD_SIZE;
125 
126  return 0;
127 }
128 
129 static void release_bts_buffer(int cpu)
130 {
131  struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
132 
133  if (!ds || !x86_pmu.bts)
134  return;
135 
136  kfree((void *)(unsigned long)ds->bts_buffer_base);
137  ds->bts_buffer_base = 0;
138 }
139 
140 static int alloc_ds_buffer(int cpu)
141 {
142  int node = cpu_to_node(cpu);
143  struct debug_store *ds;
144 
145  ds = kmalloc_node(sizeof(*ds), GFP_KERNEL | __GFP_ZERO, node);
146  if (unlikely(!ds))
147  return -ENOMEM;
148 
149  per_cpu(cpu_hw_events, cpu).ds = ds;
150 
151  return 0;
152 }
153 
154 static void release_ds_buffer(int cpu)
155 {
156  struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
157 
158  if (!ds)
159  return;
160 
161  per_cpu(cpu_hw_events, cpu).ds = NULL;
162  kfree(ds);
163 }
164 
166 {
167  int cpu;
168 
169  if (!x86_pmu.bts && !x86_pmu.pebs)
170  return;
171 
172  get_online_cpus();
175 
176  for_each_possible_cpu(cpu) {
177  release_pebs_buffer(cpu);
178  release_bts_buffer(cpu);
179  release_ds_buffer(cpu);
180  }
181  put_online_cpus();
182 }
183 
185 {
186  int bts_err = 0, pebs_err = 0;
187  int cpu;
188 
189  x86_pmu.bts_active = 0;
190  x86_pmu.pebs_active = 0;
191 
192  if (!x86_pmu.bts && !x86_pmu.pebs)
193  return;
194 
195  if (!x86_pmu.bts)
196  bts_err = 1;
197 
198  if (!x86_pmu.pebs)
199  pebs_err = 1;
200 
201  get_online_cpus();
202 
203  for_each_possible_cpu(cpu) {
204  if (alloc_ds_buffer(cpu)) {
205  bts_err = 1;
206  pebs_err = 1;
207  }
208 
209  if (!bts_err && alloc_bts_buffer(cpu))
210  bts_err = 1;
211 
212  if (!pebs_err && alloc_pebs_buffer(cpu))
213  pebs_err = 1;
214 
215  if (bts_err && pebs_err)
216  break;
217  }
218 
219  if (bts_err) {
221  release_bts_buffer(cpu);
222  }
223 
224  if (pebs_err) {
226  release_pebs_buffer(cpu);
227  }
228 
229  if (bts_err && pebs_err) {
231  release_ds_buffer(cpu);
232  } else {
233  if (x86_pmu.bts && !bts_err)
234  x86_pmu.bts_active = 1;
235 
236  if (x86_pmu.pebs && !pebs_err)
237  x86_pmu.pebs_active = 1;
238 
241  }
242 
243  put_online_cpus();
244 }
245 
246 /*
247  * BTS
248  */
249 
252 
254 {
255  unsigned long debugctlmsr;
256 
257  debugctlmsr = get_debugctlmsr();
258 
259  debugctlmsr |= DEBUGCTLMSR_TR;
260  debugctlmsr |= DEBUGCTLMSR_BTS;
261  debugctlmsr |= DEBUGCTLMSR_BTINT;
262 
263  if (!(config & ARCH_PERFMON_EVENTSEL_OS))
264  debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
265 
266  if (!(config & ARCH_PERFMON_EVENTSEL_USR))
267  debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR;
268 
269  update_debugctlmsr(debugctlmsr);
270 }
271 
273 {
274  struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
275  unsigned long debugctlmsr;
276 
277  if (!cpuc->ds)
278  return;
279 
280  debugctlmsr = get_debugctlmsr();
281 
282  debugctlmsr &=
285 
286  update_debugctlmsr(debugctlmsr);
287 }
288 
290 {
291  struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
292  struct debug_store *ds = cpuc->ds;
293  struct bts_record {
294  u64 from;
295  u64 to;
296  u64 flags;
297  };
298  struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
299  struct bts_record *at, *top;
300  struct perf_output_handle handle;
301  struct perf_event_header header;
302  struct perf_sample_data data;
303  struct pt_regs regs;
304 
305  if (!event)
306  return 0;
307 
308  if (!x86_pmu.bts_active)
309  return 0;
310 
311  at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
312  top = (struct bts_record *)(unsigned long)ds->bts_index;
313 
314  if (top <= at)
315  return 0;
316 
317  ds->bts_index = ds->bts_buffer_base;
318 
319  perf_sample_data_init(&data, 0, event->hw.last_period);
320  regs.ip = 0;
321 
322  /*
323  * Prepare a generic sample, i.e. fill in the invariant fields.
324  * We will overwrite the from and to address before we output
325  * the sample.
326  */
327  perf_prepare_sample(&header, &data, event, &regs);
328 
329  if (perf_output_begin(&handle, event, header.size * (top - at)))
330  return 1;
331 
332  for (; at < top; at++) {
333  data.ip = at->from;
334  data.addr = at->to;
335 
336  perf_output_sample(&handle, &header, &data, event);
337  }
338 
339  perf_output_end(&handle);
340 
341  /* There's new data available. */
342  event->hw.interrupts++;
343  event->pending_kill = POLL_IN;
344  return 1;
345 }
346 
347 /*
348  * PEBS
349  */
351  INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
352  INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
353  INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
354  INTEL_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
355  INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
357 };
358 
360  INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
361  INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
362  INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
364 };
365 
367  INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
368  INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
369  INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
370  INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
371  INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
372  INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
373  INTEL_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
374  INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
375  INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
376  INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
377  INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
379 };
380 
382  INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
383  INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
384  INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
385  INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
386  INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
387  INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
388  INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
389  INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
390  INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
391  INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
392  INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
394 };
395 
397  INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
398  INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
399  INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
400  INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
401  INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
402  INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */
403  INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
404  INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
405  INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
406  INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
408 };
409 
411  INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
412  INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
413  INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
414  INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
415  INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
416  INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */
417  INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
418  INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
419  INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
420  INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
422 };
423 
425 {
426  struct event_constraint *c;
427 
428  if (!event->attr.precise_ip)
429  return NULL;
430 
433  if ((event->hw.config & c->cmask) == c->code)
434  return c;
435  }
436  }
437 
438  return &emptyconstraint;
439 }
440 
442 {
443  struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
444  struct hw_perf_event *hwc = &event->hw;
445 
446  hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
447 
448  cpuc->pebs_enabled |= 1ULL << hwc->idx;
449 }
450 
452 {
453  struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
454  struct hw_perf_event *hwc = &event->hw;
455 
456  cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
457  if (cpuc->enabled)
458  wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
459 
460  hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
461 }
462 
464 {
465  struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
466 
467  if (cpuc->pebs_enabled)
468  wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
469 }
470 
472 {
473  struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
474 
475  if (cpuc->pebs_enabled)
476  wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
477 }
478 
479 static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
480 {
481  struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
482  unsigned long from = cpuc->lbr_entries[0].from;
483  unsigned long old_to, to = cpuc->lbr_entries[0].to;
484  unsigned long ip = regs->ip;
485  int is_64bit = 0;
486 
487  /*
488  * We don't need to fixup if the PEBS assist is fault like
489  */
491  return 1;
492 
493  /*
494  * No LBR entry, no basic block, no rewinding
495  */
496  if (!cpuc->lbr_stack.nr || !from || !to)
497  return 0;
498 
499  /*
500  * Basic blocks should never cross user/kernel boundaries
501  */
502  if (kernel_ip(ip) != kernel_ip(to))
503  return 0;
504 
505  /*
506  * unsigned math, either ip is before the start (impossible) or
507  * the basic block is larger than 1 page (sanity)
508  */
509  if ((ip - to) > PAGE_SIZE)
510  return 0;
511 
512  /*
513  * We sampled a branch insn, rewind using the LBR stack
514  */
515  if (ip == to) {
516  set_linear_ip(regs, from);
517  return 1;
518  }
519 
520  do {
521  struct insn insn;
523  void *kaddr;
524 
525  old_to = to;
526  if (!kernel_ip(ip)) {
527  int bytes, size = MAX_INSN_SIZE;
528 
529  bytes = copy_from_user_nmi(buf, (void __user *)to, size);
530  if (bytes != size)
531  return 0;
532 
533  kaddr = buf;
534  } else
535  kaddr = (void *)to;
536 
537 #ifdef CONFIG_X86_64
538  is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
539 #endif
540  insn_init(&insn, kaddr, is_64bit);
542  to += insn.length;
543  } while (to < ip);
544 
545  if (to == ip) {
546  set_linear_ip(regs, old_to);
547  return 1;
548  }
549 
550  /*
551  * Even though we decoded the basic block, the instruction stream
552  * never matched the given IP, either the TO or the IP got corrupted.
553  */
554  return 0;
555 }
556 
557 static void __intel_pmu_pebs_event(struct perf_event *event,
558  struct pt_regs *iregs, void *__pebs)
559 {
560  /*
561  * We cast to pebs_record_core since that is a subset of
562  * both formats and we don't use the other fields in this
563  * routine.
564  */
565  struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
566  struct pebs_record_core *pebs = __pebs;
567  struct perf_sample_data data;
568  struct pt_regs regs;
569 
570  if (!intel_pmu_save_and_restart(event))
571  return;
572 
573  perf_sample_data_init(&data, 0, event->hw.last_period);
574 
575  /*
576  * We use the interrupt regs as a base because the PEBS record
577  * does not contain a full regs set, specifically it seems to
578  * lack segment descriptors, which get used by things like
579  * user_mode().
580  *
581  * In the simple case fix up only the IP and BP,SP regs, for
582  * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
583  * A possible PERF_SAMPLE_REGS will have to transfer all regs.
584  */
585  regs = *iregs;
586  regs.flags = pebs->flags;
587  set_linear_ip(&regs, pebs->ip);
588  regs.bp = pebs->bp;
589  regs.sp = pebs->sp;
590 
591  if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(&regs))
592  regs.flags |= PERF_EFLAGS_EXACT;
593  else
594  regs.flags &= ~PERF_EFLAGS_EXACT;
595 
596  if (has_branch_stack(event))
597  data.br_stack = &cpuc->lbr_stack;
598 
599  if (perf_event_overflow(event, &data, &regs))
600  x86_pmu_stop(event, 0);
601 }
602 
603 static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
604 {
605  struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
606  struct debug_store *ds = cpuc->ds;
607  struct perf_event *event = cpuc->events[0]; /* PMC0 only */
608  struct pebs_record_core *at, *top;
609  int n;
610 
611  if (!x86_pmu.pebs_active)
612  return;
613 
614  at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
615  top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
616 
617  /*
618  * Whatever else happens, drain the thing
619  */
620  ds->pebs_index = ds->pebs_buffer_base;
621 
622  if (!test_bit(0, cpuc->active_mask))
623  return;
624 
625  WARN_ON_ONCE(!event);
626 
627  if (!event->attr.precise_ip)
628  return;
629 
630  n = top - at;
631  if (n <= 0)
632  return;
633 
634  /*
635  * Should not happen, we program the threshold at 1 and do not
636  * set a reset value.
637  */
638  WARN_ONCE(n > 1, "bad leftover pebs %d\n", n);
639  at += n - 1;
640 
641  __intel_pmu_pebs_event(event, iregs, at);
642 }
643 
644 static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
645 {
646  struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
647  struct debug_store *ds = cpuc->ds;
648  struct pebs_record_nhm *at, *top;
649  struct perf_event *event = NULL;
650  u64 status = 0;
651  int bit, n;
652 
653  if (!x86_pmu.pebs_active)
654  return;
655 
656  at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
657  top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
658 
659  ds->pebs_index = ds->pebs_buffer_base;
660 
661  n = top - at;
662  if (n <= 0)
663  return;
664 
665  /*
666  * Should not happen, we program the threshold at 1 and do not
667  * set a reset value.
668  */
669  WARN_ONCE(n > x86_pmu.max_pebs_events, "Unexpected number of pebs records %d\n", n);
670 
671  for ( ; at < top; at++) {
672  for_each_set_bit(bit, (unsigned long *)&at->status, x86_pmu.max_pebs_events) {
673  event = cpuc->events[bit];
674  if (!test_bit(bit, cpuc->active_mask))
675  continue;
676 
677  WARN_ON_ONCE(!event);
678 
679  if (!event->attr.precise_ip)
680  continue;
681 
682  if (__test_and_set_bit(bit, (unsigned long *)&status))
683  continue;
684 
685  break;
686  }
687 
688  if (!event || bit >= x86_pmu.max_pebs_events)
689  continue;
690 
691  __intel_pmu_pebs_event(event, iregs, at);
692  }
693 }
694 
695 /*
696  * BTS, PEBS probe and setup
697  */
698 
699 void intel_ds_init(void)
700 {
701  /*
702  * No support for 32bit formats
703  */
704  if (!boot_cpu_has(X86_FEATURE_DTES64))
705  return;
706 
707  x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
708  x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
709  if (x86_pmu.pebs) {
710  char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
712 
713  switch (format) {
714  case 0:
715  printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
716  x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
717  x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
718  break;
719 
720  case 1:
721  printk(KERN_CONT "PEBS fmt1%c, ", pebs_type);
722  x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
723  x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
724  break;
725 
726  default:
727  printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type);
728  x86_pmu.pebs = 0;
729  }
730  }
731 }