Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
perf_event.c
Go to the documentation of this file.
1 /*
2  * Performance events x86 architecture code
3  *
4  * Copyright (C) 2008 Thomas Gleixner <[email protected]>
5  * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  * Copyright (C) 2009 Jaswinder Singh Rajput
7  * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <[email protected]>
9  * Copyright (C) 2009 Intel Corporation, <[email protected]>
10  * Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  * For licencing details see kernel-base/COPYING
13  */
14 
15 #include <linux/perf_event.h>
16 #include <linux/capability.h>
17 #include <linux/notifier.h>
18 #include <linux/hardirq.h>
19 #include <linux/kprobes.h>
20 #include <linux/module.h>
21 #include <linux/kdebug.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
24 #include <linux/slab.h>
25 #include <linux/cpu.h>
26 #include <linux/bitops.h>
27 #include <linux/device.h>
28 
29 #include <asm/apic.h>
30 #include <asm/stacktrace.h>
31 #include <asm/nmi.h>
32 #include <asm/smp.h>
33 #include <asm/alternative.h>
34 #include <asm/timer.h>
35 #include <asm/desc.h>
36 #include <asm/ldt.h>
37 
38 #include "perf_event.h"
39 
41 
43  .enabled = 1,
44 };
45 
54 
55 /*
56  * Propagate event elapsed time into the generic event.
57  * Can only be executed on the CPU where the event is active.
58  * Returns the delta events processed.
59  */
61 {
62  struct hw_perf_event *hwc = &event->hw;
63  int shift = 64 - x86_pmu.cntval_bits;
64  u64 prev_raw_count, new_raw_count;
65  int idx = hwc->idx;
66  s64 delta;
67 
68  if (idx == INTEL_PMC_IDX_FIXED_BTS)
69  return 0;
70 
71  /*
72  * Careful: an NMI might modify the previous event value.
73  *
74  * Our tactic to handle this is to first atomically read and
75  * exchange a new raw count - then add that new-prev delta
76  * count to the generic event atomically:
77  */
78 again:
79  prev_raw_count = local64_read(&hwc->prev_count);
80  rdpmcl(hwc->event_base_rdpmc, new_raw_count);
81 
82  if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
83  new_raw_count) != prev_raw_count)
84  goto again;
85 
86  /*
87  * Now we have the new raw value and have updated the prev
88  * timestamp already. We can now calculate the elapsed delta
89  * (event-)time and add that to the generic event.
90  *
91  * Careful, not all hw sign-extends above the physical width
92  * of the count.
93  */
94  delta = (new_raw_count << shift) - (prev_raw_count << shift);
95  delta >>= shift;
96 
97  local64_add(delta, &event->count);
98  local64_sub(delta, &hwc->period_left);
99 
100  return new_raw_count;
101 }
102 
103 /*
104  * Find and validate any extra registers to set up.
105  */
106 static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
107 {
108  struct hw_perf_event_extra *reg;
109  struct extra_reg *er;
110 
111  reg = &event->hw.extra_reg;
112 
113  if (!x86_pmu.extra_regs)
114  return 0;
115 
116  for (er = x86_pmu.extra_regs; er->msr; er++) {
117  if (er->event != (config & er->config_mask))
118  continue;
119  if (event->attr.config1 & ~er->valid_mask)
120  return -EINVAL;
121 
122  reg->idx = er->idx;
123  reg->config = event->attr.config1;
124  reg->reg = er->msr;
125  break;
126  }
127  return 0;
128 }
129 
130 static atomic_t active_events;
131 static DEFINE_MUTEX(pmc_reserve_mutex);
132 
133 #ifdef CONFIG_X86_LOCAL_APIC
134 
135 static bool reserve_pmc_hardware(void)
136 {
137  int i;
138 
139  for (i = 0; i < x86_pmu.num_counters; i++) {
140  if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
141  goto perfctr_fail;
142  }
143 
144  for (i = 0; i < x86_pmu.num_counters; i++) {
145  if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
146  goto eventsel_fail;
147  }
148 
149  return true;
150 
151 eventsel_fail:
152  for (i--; i >= 0; i--)
153  release_evntsel_nmi(x86_pmu_config_addr(i));
154 
155  i = x86_pmu.num_counters;
156 
157 perfctr_fail:
158  for (i--; i >= 0; i--)
159  release_perfctr_nmi(x86_pmu_event_addr(i));
160 
161  return false;
162 }
163 
164 static void release_pmc_hardware(void)
165 {
166  int i;
167 
168  for (i = 0; i < x86_pmu.num_counters; i++) {
169  release_perfctr_nmi(x86_pmu_event_addr(i));
170  release_evntsel_nmi(x86_pmu_config_addr(i));
171  }
172 }
173 
174 #else
175 
176 static bool reserve_pmc_hardware(void) { return true; }
177 static void release_pmc_hardware(void) {}
178 
179 #endif
180 
181 static bool check_hw_exists(void)
182 {
183  u64 val, val_new = ~0;
184  int i, reg, ret = 0;
185 
186  /*
187  * Check to see if the BIOS enabled any of the counters, if so
188  * complain and bail.
189  */
190  for (i = 0; i < x86_pmu.num_counters; i++) {
191  reg = x86_pmu_config_addr(i);
192  ret = rdmsrl_safe(reg, &val);
193  if (ret)
194  goto msr_fail;
196  goto bios_fail;
197  }
198 
201  ret = rdmsrl_safe(reg, &val);
202  if (ret)
203  goto msr_fail;
204  for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
205  if (val & (0x03 << i*4))
206  goto bios_fail;
207  }
208  }
209 
210  /*
211  * Read the current value, change it and read it back to see if it
212  * matches, this is needed to detect certain hardware emulators
213  * (qemu/kvm) that don't trap on the MSR access and always return 0s.
214  */
215  reg = x86_pmu_event_addr(0);
216  if (rdmsrl_safe(reg, &val))
217  goto msr_fail;
218  val ^= 0xffffUL;
219  ret = wrmsrl_safe(reg, val);
220  ret |= rdmsrl_safe(reg, &val_new);
221  if (ret || val != val_new)
222  goto msr_fail;
223 
224  return true;
225 
226 bios_fail:
227  /*
228  * We still allow the PMU driver to operate:
229  */
230  printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n");
231  printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val);
232 
233  return true;
234 
235 msr_fail:
236  printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
237  printk(KERN_ERR "Failed to access perfctr msr (MSR %x is %Lx)\n", reg, val_new);
238 
239  return false;
240 }
241 
242 static void hw_perf_event_destroy(struct perf_event *event)
243 {
244  if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
247  mutex_unlock(&pmc_reserve_mutex);
248  }
249 }
250 
251 static inline int x86_pmu_initialized(void)
252 {
253  return x86_pmu.handle_irq != NULL;
254 }
255 
256 static inline int
257 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
258 {
259  struct perf_event_attr *attr = &event->attr;
260  unsigned int cache_type, cache_op, cache_result;
261  u64 config, val;
262 
263  config = attr->config;
264 
265  cache_type = (config >> 0) & 0xff;
266  if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
267  return -EINVAL;
268 
269  cache_op = (config >> 8) & 0xff;
270  if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
271  return -EINVAL;
272 
273  cache_result = (config >> 16) & 0xff;
274  if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
275  return -EINVAL;
276 
277  val = hw_cache_event_ids[cache_type][cache_op][cache_result];
278 
279  if (val == 0)
280  return -ENOENT;
281 
282  if (val == -1)
283  return -EINVAL;
284 
285  hwc->config |= val;
286  attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
287  return x86_pmu_extra_regs(val, event);
288 }
289 
290 int x86_setup_perfctr(struct perf_event *event)
291 {
292  struct perf_event_attr *attr = &event->attr;
293  struct hw_perf_event *hwc = &event->hw;
294  u64 config;
295 
296  if (!is_sampling_event(event)) {
297  hwc->sample_period = x86_pmu.max_period;
298  hwc->last_period = hwc->sample_period;
299  local64_set(&hwc->period_left, hwc->sample_period);
300  } else {
301  /*
302  * If we have a PMU initialized but no APIC
303  * interrupts, we cannot sample hardware
304  * events (user-space has to fall back and
305  * sample via a hrtimer based software event):
306  */
307  if (!x86_pmu.apic)
308  return -EOPNOTSUPP;
309  }
310 
311  if (attr->type == PERF_TYPE_RAW)
312  return x86_pmu_extra_regs(event->attr.config, event);
313 
314  if (attr->type == PERF_TYPE_HW_CACHE)
315  return set_ext_hw_attr(hwc, event);
316 
317  if (attr->config >= x86_pmu.max_events)
318  return -EINVAL;
319 
320  /*
321  * The generic map:
322  */
323  config = x86_pmu.event_map(attr->config);
324 
325  if (config == 0)
326  return -ENOENT;
327 
328  if (config == -1LL)
329  return -EINVAL;
330 
331  /*
332  * Branch tracing:
333  */
335  !attr->freq && hwc->sample_period == 1) {
336  /* BTS is not supported by this architecture. */
337  if (!x86_pmu.bts_active)
338  return -EOPNOTSUPP;
339 
340  /* BTS is currently only allowed for user-mode. */
341  if (!attr->exclude_kernel)
342  return -EOPNOTSUPP;
343 
344  if (!attr->exclude_guest)
345  return -EOPNOTSUPP;
346  }
347 
348  hwc->config |= config;
349 
350  return 0;
351 }
352 
353 /*
354  * check that branch_sample_type is compatible with
355  * settings needed for precise_ip > 1 which implies
356  * using the LBR to capture ALL taken branches at the
357  * priv levels of the measurement
358  */
359 static inline int precise_br_compat(struct perf_event *event)
360 {
361  u64 m = event->attr.branch_sample_type;
362  u64 b = 0;
363 
364  /* must capture all branches */
365  if (!(m & PERF_SAMPLE_BRANCH_ANY))
366  return 0;
367 
369 
370  if (!event->attr.exclude_user)
372 
373  if (!event->attr.exclude_kernel)
375 
376  /*
377  * ignore PERF_SAMPLE_BRANCH_HV, not supported on x86
378  */
379 
380  return m == b;
381 }
382 
383 int x86_pmu_hw_config(struct perf_event *event)
384 {
385  if (event->attr.precise_ip) {
386  int precise = 0;
387 
388  if (!event->attr.exclude_guest)
389  return -EOPNOTSUPP;
390 
391  /* Support for constant skid */
393  precise++;
394 
395  /* Support for IP fixup */
396  if (x86_pmu.lbr_nr)
397  precise++;
398  }
399 
400  if (event->attr.precise_ip > precise)
401  return -EOPNOTSUPP;
402  /*
403  * check that PEBS LBR correction does not conflict with
404  * whatever the user is asking with attr->branch_sample_type
405  */
406  if (event->attr.precise_ip > 1) {
407  u64 *br_type = &event->attr.branch_sample_type;
408 
409  if (has_branch_stack(event)) {
410  if (!precise_br_compat(event))
411  return -EOPNOTSUPP;
412 
413  /* branch_sample_type is compatible */
414 
415  } else {
416  /*
417  * user did not specify branch_sample_type
418  *
419  * For PEBS fixups, we capture all
420  * the branches at the priv level of the
421  * event.
422  */
423  *br_type = PERF_SAMPLE_BRANCH_ANY;
424 
425  if (!event->attr.exclude_user)
426  *br_type |= PERF_SAMPLE_BRANCH_USER;
427 
428  if (!event->attr.exclude_kernel)
429  *br_type |= PERF_SAMPLE_BRANCH_KERNEL;
430  }
431  }
432  }
433 
434  /*
435  * Generate PMC IRQs:
436  * (keep 'enabled' bit clear for now)
437  */
438  event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
439 
440  /*
441  * Count user and OS events unless requested not to
442  */
443  if (!event->attr.exclude_user)
444  event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
445  if (!event->attr.exclude_kernel)
446  event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
447 
448  if (event->attr.type == PERF_TYPE_RAW)
449  event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
450 
451  return x86_setup_perfctr(event);
452 }
453 
454 /*
455  * Setup the hardware configuration for a given attr_type
456  */
457 static int __x86_pmu_event_init(struct perf_event *event)
458 {
459  int err;
460 
461  if (!x86_pmu_initialized())
462  return -ENODEV;
463 
464  err = 0;
465  if (!atomic_inc_not_zero(&active_events)) {
466  mutex_lock(&pmc_reserve_mutex);
467  if (atomic_read(&active_events) == 0) {
468  if (!reserve_pmc_hardware())
469  err = -EBUSY;
470  else
472  }
473  if (!err)
474  atomic_inc(&active_events);
475  mutex_unlock(&pmc_reserve_mutex);
476  }
477  if (err)
478  return err;
479 
480  event->destroy = hw_perf_event_destroy;
481 
482  event->hw.idx = -1;
483  event->hw.last_cpu = -1;
484  event->hw.last_tag = ~0ULL;
485 
486  /* mark unused */
487  event->hw.extra_reg.idx = EXTRA_REG_NONE;
488  event->hw.branch_reg.idx = EXTRA_REG_NONE;
489 
490  return x86_pmu.hw_config(event);
491 }
492 
494 {
495  struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
496  int idx;
497 
498  for (idx = 0; idx < x86_pmu.num_counters; idx++) {
499  u64 val;
500 
501  if (!test_bit(idx, cpuc->active_mask))
502  continue;
503  rdmsrl(x86_pmu_config_addr(idx), val);
504  if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
505  continue;
506  val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
507  wrmsrl(x86_pmu_config_addr(idx), val);
508  }
509 }
510 
511 static void x86_pmu_disable(struct pmu *pmu)
512 {
513  struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
514 
515  if (!x86_pmu_initialized())
516  return;
517 
518  if (!cpuc->enabled)
519  return;
520 
521  cpuc->n_added = 0;
522  cpuc->enabled = 0;
523  barrier();
524 
526 }
527 
528 void x86_pmu_enable_all(int added)
529 {
530  struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
531  int idx;
532 
533  for (idx = 0; idx < x86_pmu.num_counters; idx++) {
534  struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
535 
536  if (!test_bit(idx, cpuc->active_mask))
537  continue;
538 
539  __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
540  }
541 }
542 
543 static struct pmu pmu;
544 
545 static inline int is_x86_event(struct perf_event *event)
546 {
547  return event->pmu == &pmu;
548 }
549 
550 /*
551  * Event scheduler state:
552  *
553  * Assign events iterating over all events and counters, beginning
554  * with events with least weights first. Keep the current iterator
555  * state in struct sched_state.
556  */
557 struct sched_state {
558  int weight;
559  int event; /* event index */
560  int counter; /* counter index */
561  int unassigned; /* number of events to be assigned left */
563 };
564 
565 /* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */
566 #define SCHED_STATES_MAX 2
567 
568 struct perf_sched {
575 };
576 
577 /*
578  * Initialize interator that runs through all events and counters.
579  */
580 static void perf_sched_init(struct perf_sched *sched, struct event_constraint **c,
581  int num, int wmin, int wmax)
582 {
583  int idx;
584 
585  memset(sched, 0, sizeof(*sched));
586  sched->max_events = num;
587  sched->max_weight = wmax;
588  sched->constraints = c;
589 
590  for (idx = 0; idx < num; idx++) {
591  if (c[idx]->weight == wmin)
592  break;
593  }
594 
595  sched->state.event = idx; /* start with min weight */
596  sched->state.weight = wmin;
597  sched->state.unassigned = num;
598 }
599 
600 static void perf_sched_save_state(struct perf_sched *sched)
601 {
603  return;
604 
605  sched->saved[sched->saved_states] = sched->state;
606  sched->saved_states++;
607 }
608 
609 static bool perf_sched_restore_state(struct perf_sched *sched)
610 {
611  if (!sched->saved_states)
612  return false;
613 
614  sched->saved_states--;
615  sched->state = sched->saved[sched->saved_states];
616 
617  /* continue with next counter: */
618  clear_bit(sched->state.counter++, sched->state.used);
619 
620  return true;
621 }
622 
623 /*
624  * Select a counter for the current event to schedule. Return true on
625  * success.
626  */
627 static bool __perf_sched_find_counter(struct perf_sched *sched)
628 {
629  struct event_constraint *c;
630  int idx;
631 
632  if (!sched->state.unassigned)
633  return false;
634 
635  if (sched->state.event >= sched->max_events)
636  return false;
637 
638  c = sched->constraints[sched->state.event];
639 
640  /* Prefer fixed purpose counters */
641  if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
642  idx = INTEL_PMC_IDX_FIXED;
644  if (!__test_and_set_bit(idx, sched->state.used))
645  goto done;
646  }
647  }
648  /* Grab the first unused counter starting with idx */
649  idx = sched->state.counter;
651  if (!__test_and_set_bit(idx, sched->state.used))
652  goto done;
653  }
654 
655  return false;
656 
657 done:
658  sched->state.counter = idx;
659 
660  if (c->overlap)
661  perf_sched_save_state(sched);
662 
663  return true;
664 }
665 
666 static bool perf_sched_find_counter(struct perf_sched *sched)
667 {
668  while (!__perf_sched_find_counter(sched)) {
669  if (!perf_sched_restore_state(sched))
670  return false;
671  }
672 
673  return true;
674 }
675 
676 /*
677  * Go through all unassigned events and find the next one to schedule.
678  * Take events with the least weight first. Return true on success.
679  */
680 static bool perf_sched_next_event(struct perf_sched *sched)
681 {
682  struct event_constraint *c;
683 
684  if (!sched->state.unassigned || !--sched->state.unassigned)
685  return false;
686 
687  do {
688  /* next event */
689  sched->state.event++;
690  if (sched->state.event >= sched->max_events) {
691  /* next weight */
692  sched->state.event = 0;
693  sched->state.weight++;
694  if (sched->state.weight > sched->max_weight)
695  return false;
696  }
697  c = sched->constraints[sched->state.event];
698  } while (c->weight != sched->state.weight);
699 
700  sched->state.counter = 0; /* start with first counter */
701 
702  return true;
703 }
704 
705 /*
706  * Assign a counter for each event.
707  */
708 int perf_assign_events(struct event_constraint **constraints, int n,
709  int wmin, int wmax, int *assign)
710 {
711  struct perf_sched sched;
712 
713  perf_sched_init(&sched, constraints, n, wmin, wmax);
714 
715  do {
716  if (!perf_sched_find_counter(&sched))
717  break; /* failed */
718  if (assign)
719  assign[sched.state.event] = sched.state.counter;
720  } while (perf_sched_next_event(&sched));
721 
722  return sched.state.unassigned;
723 }
724 
725 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
726 {
727  struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
728  unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
729  int i, wmin, wmax, num = 0;
730  struct hw_perf_event *hwc;
731 
732  bitmap_zero(used_mask, X86_PMC_IDX_MAX);
733 
734  for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
735  c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
736  constraints[i] = c;
737  wmin = min(wmin, c->weight);
738  wmax = max(wmax, c->weight);
739  }
740 
741  /*
742  * fastpath, try to reuse previous register
743  */
744  for (i = 0; i < n; i++) {
745  hwc = &cpuc->event_list[i]->hw;
746  c = constraints[i];
747 
748  /* never assigned */
749  if (hwc->idx == -1)
750  break;
751 
752  /* constraint still honored */
753  if (!test_bit(hwc->idx, c->idxmsk))
754  break;
755 
756  /* not already used */
757  if (test_bit(hwc->idx, used_mask))
758  break;
759 
760  __set_bit(hwc->idx, used_mask);
761  if (assign)
762  assign[i] = hwc->idx;
763  }
764 
765  /* slow path */
766  if (i != n)
767  num = perf_assign_events(constraints, n, wmin, wmax, assign);
768 
769  /*
770  * scheduling failed or is just a simulation,
771  * free resources if necessary
772  */
773  if (!assign || num) {
774  for (i = 0; i < n; i++) {
776  x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
777  }
778  }
779  return num ? -EINVAL : 0;
780 }
781 
782 /*
783  * dogrp: true if must collect siblings events (group)
784  * returns total number of events and error code
785  */
786 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
787 {
788  struct perf_event *event;
789  int n, max_count;
790 
792 
793  /* current number of events already accepted */
794  n = cpuc->n_events;
795 
796  if (is_x86_event(leader)) {
797  if (n >= max_count)
798  return -EINVAL;
799  cpuc->event_list[n] = leader;
800  n++;
801  }
802  if (!dogrp)
803  return n;
804 
805  list_for_each_entry(event, &leader->sibling_list, group_entry) {
806  if (!is_x86_event(event) ||
807  event->state <= PERF_EVENT_STATE_OFF)
808  continue;
809 
810  if (n >= max_count)
811  return -EINVAL;
812 
813  cpuc->event_list[n] = event;
814  n++;
815  }
816  return n;
817 }
818 
819 static inline void x86_assign_hw_event(struct perf_event *event,
820  struct cpu_hw_events *cpuc, int i)
821 {
822  struct hw_perf_event *hwc = &event->hw;
823 
824  hwc->idx = cpuc->assign[i];
825  hwc->last_cpu = smp_processor_id();
826  hwc->last_tag = ++cpuc->tags[i];
827 
828  if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) {
829  hwc->config_base = 0;
830  hwc->event_base = 0;
831  } else if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
832  hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
833  hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED);
834  hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30;
835  } else {
836  hwc->config_base = x86_pmu_config_addr(hwc->idx);
837  hwc->event_base = x86_pmu_event_addr(hwc->idx);
838  hwc->event_base_rdpmc = hwc->idx;
839  }
840 }
841 
842 static inline int match_prev_assignment(struct hw_perf_event *hwc,
843  struct cpu_hw_events *cpuc,
844  int i)
845 {
846  return hwc->idx == cpuc->assign[i] &&
847  hwc->last_cpu == smp_processor_id() &&
848  hwc->last_tag == cpuc->tags[i];
849 }
850 
851 static void x86_pmu_start(struct perf_event *event, int flags);
852 
853 static void x86_pmu_enable(struct pmu *pmu)
854 {
855  struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
856  struct perf_event *event;
857  struct hw_perf_event *hwc;
858  int i, added = cpuc->n_added;
859 
860  if (!x86_pmu_initialized())
861  return;
862 
863  if (cpuc->enabled)
864  return;
865 
866  if (cpuc->n_added) {
867  int n_running = cpuc->n_events - cpuc->n_added;
868  /*
869  * apply assignment obtained either from
870  * hw_perf_group_sched_in() or x86_pmu_enable()
871  *
872  * step1: save events moving to new counters
873  * step2: reprogram moved events into new counters
874  */
875  for (i = 0; i < n_running; i++) {
876  event = cpuc->event_list[i];
877  hwc = &event->hw;
878 
879  /*
880  * we can avoid reprogramming counter if:
881  * - assigned same counter as last time
882  * - running on same CPU as last time
883  * - no other event has used the counter since
884  */
885  if (hwc->idx == -1 ||
886  match_prev_assignment(hwc, cpuc, i))
887  continue;
888 
889  /*
890  * Ensure we don't accidentally enable a stopped
891  * counter simply because we rescheduled.
892  */
893  if (hwc->state & PERF_HES_STOPPED)
894  hwc->state |= PERF_HES_ARCH;
895 
897  }
898 
899  for (i = 0; i < cpuc->n_events; i++) {
900  event = cpuc->event_list[i];
901  hwc = &event->hw;
902 
903  if (!match_prev_assignment(hwc, cpuc, i))
904  x86_assign_hw_event(event, cpuc, i);
905  else if (i < n_running)
906  continue;
907 
908  if (hwc->state & PERF_HES_ARCH)
909  continue;
910 
911  x86_pmu_start(event, PERF_EF_RELOAD);
912  }
913  cpuc->n_added = 0;
915  }
916 
917  cpuc->enabled = 1;
918  barrier();
919 
920  x86_pmu.enable_all(added);
921 }
922 
923 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
924 
925 /*
926  * Set the next IRQ period, based on the hwc->period_left value.
927  * To be called with the event disabled in hw:
928  */
930 {
931  struct hw_perf_event *hwc = &event->hw;
932  s64 left = local64_read(&hwc->period_left);
933  s64 period = hwc->sample_period;
934  int ret = 0, idx = hwc->idx;
935 
936  if (idx == INTEL_PMC_IDX_FIXED_BTS)
937  return 0;
938 
939  /*
940  * If we are way outside a reasonable range then just skip forward:
941  */
942  if (unlikely(left <= -period)) {
943  left = period;
944  local64_set(&hwc->period_left, left);
945  hwc->last_period = period;
946  ret = 1;
947  }
948 
949  if (unlikely(left <= 0)) {
950  left += period;
951  local64_set(&hwc->period_left, left);
952  hwc->last_period = period;
953  ret = 1;
954  }
955  /*
956  * Quirk: certain CPUs dont like it if just 1 hw_event is left:
957  */
958  if (unlikely(left < 2))
959  left = 2;
960 
961  if (left > x86_pmu.max_period)
962  left = x86_pmu.max_period;
963 
964  per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
965 
966  /*
967  * The hw event starts counting from this event offset,
968  * mark it to be able to extra future deltas:
969  */
970  local64_set(&hwc->prev_count, (u64)-left);
971 
972  wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
973 
974  /*
975  * Due to erratum on certan cpu we need
976  * a second write to be sure the register
977  * is updated properly
978  */
980  wrmsrl(hwc->event_base,
981  (u64)(-left) & x86_pmu.cntval_mask);
982  }
983 
985 
986  return ret;
987 }
988 
989 void x86_pmu_enable_event(struct perf_event *event)
990 {
992  __x86_pmu_enable_event(&event->hw,
994 }
995 
996 /*
997  * Add a single event to the PMU.
998  *
999  * The event is added to the group of enabled events
1000  * but only if it can be scehduled with existing events.
1001  */
1002 static int x86_pmu_add(struct perf_event *event, int flags)
1003 {
1004  struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1005  struct hw_perf_event *hwc;
1006  int assign[X86_PMC_IDX_MAX];
1007  int n, n0, ret;
1008 
1009  hwc = &event->hw;
1010 
1011  perf_pmu_disable(event->pmu);
1012  n0 = cpuc->n_events;
1013  ret = n = collect_events(cpuc, event, false);
1014  if (ret < 0)
1015  goto out;
1016 
1017  hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1018  if (!(flags & PERF_EF_START))
1019  hwc->state |= PERF_HES_ARCH;
1020 
1021  /*
1022  * If group events scheduling transaction was started,
1023  * skip the schedulability test here, it will be performed
1024  * at commit time (->commit_txn) as a whole
1025  */
1026  if (cpuc->group_flag & PERF_EVENT_TXN)
1027  goto done_collect;
1028 
1029  ret = x86_pmu.schedule_events(cpuc, n, assign);
1030  if (ret)
1031  goto out;
1032  /*
1033  * copy new assignment, now we know it is possible
1034  * will be used by hw_perf_enable()
1035  */
1036  memcpy(cpuc->assign, assign, n*sizeof(int));
1037 
1038 done_collect:
1039  cpuc->n_events = n;
1040  cpuc->n_added += n - n0;
1041  cpuc->n_txn += n - n0;
1042 
1043  ret = 0;
1044 out:
1045  perf_pmu_enable(event->pmu);
1046  return ret;
1047 }
1048 
1049 static void x86_pmu_start(struct perf_event *event, int flags)
1050 {
1051  struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1052  int idx = event->hw.idx;
1053 
1054  if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1055  return;
1056 
1057  if (WARN_ON_ONCE(idx == -1))
1058  return;
1059 
1060  if (flags & PERF_EF_RELOAD) {
1061  WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
1063  }
1064 
1065  event->hw.state = 0;
1066 
1067  cpuc->events[idx] = event;
1068  __set_bit(idx, cpuc->active_mask);
1069  __set_bit(idx, cpuc->running);
1070  x86_pmu.enable(event);
1072 }
1073 
1075 {
1076  u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1077  u64 pebs;
1078  struct cpu_hw_events *cpuc;
1079  unsigned long flags;
1080  int cpu, idx;
1081 
1082  if (!x86_pmu.num_counters)
1083  return;
1084 
1085  local_irq_save(flags);
1086 
1087  cpu = smp_processor_id();
1088  cpuc = &per_cpu(cpu_hw_events, cpu);
1089 
1090  if (x86_pmu.version >= 2) {
1091  rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1092  rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1093  rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1094  rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1095  rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
1096 
1097  pr_info("\n");
1098  pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1099  pr_info("CPU#%d: status: %016llx\n", cpu, status);
1100  pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1101  pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
1102  pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
1103  }
1104  pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1105 
1106  for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1107  rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
1108  rdmsrl(x86_pmu_event_addr(idx), pmc_count);
1109 
1110  prev_left = per_cpu(pmc_prev_left[idx], cpu);
1111 
1112  pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
1113  cpu, idx, pmc_ctrl);
1114  pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
1115  cpu, idx, pmc_count);
1116  pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
1117  cpu, idx, prev_left);
1118  }
1119  for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
1120  rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1121 
1122  pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1123  cpu, idx, pmc_count);
1124  }
1125  local_irq_restore(flags);
1126 }
1127 
1128 void x86_pmu_stop(struct perf_event *event, int flags)
1129 {
1130  struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1131  struct hw_perf_event *hwc = &event->hw;
1132 
1133  if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
1134  x86_pmu.disable(event);
1135  cpuc->events[hwc->idx] = NULL;
1136  WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1137  hwc->state |= PERF_HES_STOPPED;
1138  }
1139 
1140  if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1141  /*
1142  * Drain the remaining delta count out of a event
1143  * that we are disabling:
1144  */
1145  x86_perf_event_update(event);
1146  hwc->state |= PERF_HES_UPTODATE;
1147  }
1148 }
1149 
1150 static void x86_pmu_del(struct perf_event *event, int flags)
1151 {
1152  struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1153  int i;
1154 
1155  /*
1156  * If we're called during a txn, we don't need to do anything.
1157  * The events never got scheduled and ->cancel_txn will truncate
1158  * the event_list.
1159  */
1160  if (cpuc->group_flag & PERF_EVENT_TXN)
1161  return;
1162 
1163  x86_pmu_stop(event, PERF_EF_UPDATE);
1164 
1165  for (i = 0; i < cpuc->n_events; i++) {
1166  if (event == cpuc->event_list[i]) {
1167 
1169  x86_pmu.put_event_constraints(cpuc, event);
1170 
1171  while (++i < cpuc->n_events)
1172  cpuc->event_list[i-1] = cpuc->event_list[i];
1173 
1174  --cpuc->n_events;
1175  break;
1176  }
1177  }
1179 }
1180 
1182 {
1183  struct perf_sample_data data;
1184  struct cpu_hw_events *cpuc;
1185  struct perf_event *event;
1186  int idx, handled = 0;
1187  u64 val;
1188 
1189  cpuc = &__get_cpu_var(cpu_hw_events);
1190 
1191  /*
1192  * Some chipsets need to unmask the LVTPC in a particular spot
1193  * inside the nmi handler. As a result, the unmasking was pushed
1194  * into all the nmi handlers.
1195  *
1196  * This generic handler doesn't seem to have any issues where the
1197  * unmasking occurs so it was left at the top.
1198  */
1199  apic_write(APIC_LVTPC, APIC_DM_NMI);
1200 
1201  for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1202  if (!test_bit(idx, cpuc->active_mask)) {
1203  /*
1204  * Though we deactivated the counter some cpus
1205  * might still deliver spurious interrupts still
1206  * in flight. Catch them:
1207  */
1208  if (__test_and_clear_bit(idx, cpuc->running))
1209  handled++;
1210  continue;
1211  }
1212 
1213  event = cpuc->events[idx];
1214 
1215  val = x86_perf_event_update(event);
1216  if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
1217  continue;
1218 
1219  /*
1220  * event overflow
1221  */
1222  handled++;
1223  perf_sample_data_init(&data, 0, event->hw.last_period);
1224 
1225  if (!x86_perf_event_set_period(event))
1226  continue;
1227 
1228  if (perf_event_overflow(event, &data, regs))
1229  x86_pmu_stop(event, 0);
1230  }
1231 
1232  if (handled)
1233  inc_irq_stat(apic_perf_irqs);
1234 
1235  return handled;
1236 }
1237 
1239 {
1240  if (!x86_pmu.apic || !x86_pmu_initialized())
1241  return;
1242 
1243  /*
1244  * Always use NMI for PMU
1245  */
1246  apic_write(APIC_LVTPC, APIC_DM_NMI);
1247 }
1248 
1249 static int __kprobes
1250 perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
1251 {
1252  if (!atomic_read(&active_events))
1253  return NMI_DONE;
1254 
1255  return x86_pmu.handle_irq(regs);
1256 }
1257 
1260 
1261 static int __cpuinit
1262 x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1263 {
1264  unsigned int cpu = (long)hcpu;
1265  struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1266  int ret = NOTIFY_OK;
1267 
1268  switch (action & ~CPU_TASKS_FROZEN) {
1269  case CPU_UP_PREPARE:
1270  cpuc->kfree_on_online = NULL;
1271  if (x86_pmu.cpu_prepare)
1272  ret = x86_pmu.cpu_prepare(cpu);
1273  break;
1274 
1275  case CPU_STARTING:
1276  if (x86_pmu.attr_rdpmc)
1277  set_in_cr4(X86_CR4_PCE);
1278  if (x86_pmu.cpu_starting)
1279  x86_pmu.cpu_starting(cpu);
1280  break;
1281 
1282  case CPU_ONLINE:
1283  kfree(cpuc->kfree_on_online);
1284  break;
1285 
1286  case CPU_DYING:
1287  if (x86_pmu.cpu_dying)
1288  x86_pmu.cpu_dying(cpu);
1289  break;
1290 
1291  case CPU_UP_CANCELED:
1292  case CPU_DEAD:
1293  if (x86_pmu.cpu_dead)
1294  x86_pmu.cpu_dead(cpu);
1295  break;
1296 
1297  default:
1298  break;
1299  }
1300 
1301  return ret;
1302 }
1303 
1304 static void __init pmu_check_apic(void)
1305 {
1306  if (cpu_has_apic)
1307  return;
1308 
1309  x86_pmu.apic = 0;
1310  pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1311  pr_info("no hardware sampling interrupt available.\n");
1312 }
1313 
1314 static struct attribute_group x86_pmu_format_group = {
1315  .name = "format",
1316  .attrs = NULL,
1317 };
1318 
1319 static int __init init_hw_perf_events(void)
1320 {
1321  struct x86_pmu_quirk *quirk;
1322  int err;
1323 
1324  pr_info("Performance Events: ");
1325 
1326  switch (boot_cpu_data.x86_vendor) {
1327  case X86_VENDOR_INTEL:
1328  err = intel_pmu_init();
1329  break;
1330  case X86_VENDOR_AMD:
1331  err = amd_pmu_init();
1332  break;
1333  default:
1334  return 0;
1335  }
1336  if (err != 0) {
1337  pr_cont("no PMU driver, software events only.\n");
1338  return 0;
1339  }
1340 
1341  pmu_check_apic();
1342 
1343  /* sanity check that the hardware exists or is emulated */
1344  if (!check_hw_exists())
1345  return 0;
1346 
1347  pr_cont("%s PMU driver.\n", x86_pmu.name);
1348 
1349  for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
1350  quirk->func();
1351 
1352  if (!x86_pmu.intel_ctrl)
1353  x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1354 
1356  register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
1357 
1358  unconstrained = (struct event_constraint)
1359  __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1360  0, x86_pmu.num_counters, 0);
1361 
1362  x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
1363  x86_pmu_format_group.attrs = x86_pmu.format_attrs;
1364 
1365  pr_info("... version: %d\n", x86_pmu.version);
1366  pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
1367  pr_info("... generic registers: %d\n", x86_pmu.num_counters);
1368  pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
1369  pr_info("... max period: %016Lx\n", x86_pmu.max_period);
1370  pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
1371  pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
1372 
1373  perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1374  perf_cpu_notifier(x86_pmu_notifier);
1375 
1376  return 0;
1377 }
1379 
1380 static inline void x86_pmu_read(struct perf_event *event)
1381 {
1382  x86_perf_event_update(event);
1383 }
1384 
1385 /*
1386  * Start group events scheduling transaction
1387  * Set the flag to make pmu::enable() not perform the
1388  * schedulability test, it will be performed at commit time
1389  */
1390 static void x86_pmu_start_txn(struct pmu *pmu)
1391 {
1392  perf_pmu_disable(pmu);
1395 }
1396 
1397 /*
1398  * Stop group events scheduling transaction
1399  * Clear the flag and pmu::enable() will perform the
1400  * schedulability test.
1401  */
1402 static void x86_pmu_cancel_txn(struct pmu *pmu)
1403 {
1405  /*
1406  * Truncate the collected events.
1407  */
1410  perf_pmu_enable(pmu);
1411 }
1412 
1413 /*
1414  * Commit group events scheduling transaction
1415  * Perform the group schedulability test as a whole
1416  * Return 0 if success
1417  */
1418 static int x86_pmu_commit_txn(struct pmu *pmu)
1419 {
1420  struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1421  int assign[X86_PMC_IDX_MAX];
1422  int n, ret;
1423 
1424  n = cpuc->n_events;
1425 
1426  if (!x86_pmu_initialized())
1427  return -EAGAIN;
1428 
1429  ret = x86_pmu.schedule_events(cpuc, n, assign);
1430  if (ret)
1431  return ret;
1432 
1433  /*
1434  * copy new assignment, now we know it is possible
1435  * will be used by hw_perf_enable()
1436  */
1437  memcpy(cpuc->assign, assign, n*sizeof(int));
1438 
1439  cpuc->group_flag &= ~PERF_EVENT_TXN;
1440  perf_pmu_enable(pmu);
1441  return 0;
1442 }
1443 /*
1444  * a fake_cpuc is used to validate event groups. Due to
1445  * the extra reg logic, we need to also allocate a fake
1446  * per_core and per_cpu structure. Otherwise, group events
1447  * using extra reg may conflict without the kernel being
1448  * able to catch this when the last event gets added to
1449  * the group.
1450  */
1451 static void free_fake_cpuc(struct cpu_hw_events *cpuc)
1452 {
1453  kfree(cpuc->shared_regs);
1454  kfree(cpuc);
1455 }
1456 
1457 static struct cpu_hw_events *allocate_fake_cpuc(void)
1458 {
1459  struct cpu_hw_events *cpuc;
1460  int cpu = raw_smp_processor_id();
1461 
1462  cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
1463  if (!cpuc)
1464  return ERR_PTR(-ENOMEM);
1465 
1466  /* only needed, if we have extra_regs */
1467  if (x86_pmu.extra_regs) {
1468  cpuc->shared_regs = allocate_shared_regs(cpu);
1469  if (!cpuc->shared_regs)
1470  goto error;
1471  }
1472  cpuc->is_fake = 1;
1473  return cpuc;
1474 error:
1475  free_fake_cpuc(cpuc);
1476  return ERR_PTR(-ENOMEM);
1477 }
1478 
1479 /*
1480  * validate that we can schedule this event
1481  */
1482 static int validate_event(struct perf_event *event)
1483 {
1484  struct cpu_hw_events *fake_cpuc;
1485  struct event_constraint *c;
1486  int ret = 0;
1487 
1488  fake_cpuc = allocate_fake_cpuc();
1489  if (IS_ERR(fake_cpuc))
1490  return PTR_ERR(fake_cpuc);
1491 
1492  c = x86_pmu.get_event_constraints(fake_cpuc, event);
1493 
1494  if (!c || !c->weight)
1495  ret = -EINVAL;
1496 
1498  x86_pmu.put_event_constraints(fake_cpuc, event);
1499 
1500  free_fake_cpuc(fake_cpuc);
1501 
1502  return ret;
1503 }
1504 
1505 /*
1506  * validate a single event group
1507  *
1508  * validation include:
1509  * - check events are compatible which each other
1510  * - events do not compete for the same counter
1511  * - number of events <= number of counters
1512  *
1513  * validation ensures the group can be loaded onto the
1514  * PMU if it was the only group available.
1515  */
1516 static int validate_group(struct perf_event *event)
1517 {
1518  struct perf_event *leader = event->group_leader;
1519  struct cpu_hw_events *fake_cpuc;
1520  int ret = -EINVAL, n;
1521 
1522  fake_cpuc = allocate_fake_cpuc();
1523  if (IS_ERR(fake_cpuc))
1524  return PTR_ERR(fake_cpuc);
1525  /*
1526  * the event is not yet connected with its
1527  * siblings therefore we must first collect
1528  * existing siblings, then add the new event
1529  * before we can simulate the scheduling
1530  */
1531  n = collect_events(fake_cpuc, leader, true);
1532  if (n < 0)
1533  goto out;
1534 
1535  fake_cpuc->n_events = n;
1536  n = collect_events(fake_cpuc, event, false);
1537  if (n < 0)
1538  goto out;
1539 
1540  fake_cpuc->n_events = n;
1541 
1542  ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
1543 
1544 out:
1545  free_fake_cpuc(fake_cpuc);
1546  return ret;
1547 }
1548 
1549 static int x86_pmu_event_init(struct perf_event *event)
1550 {
1551  struct pmu *tmp;
1552  int err;
1553 
1554  switch (event->attr.type) {
1555  case PERF_TYPE_RAW:
1556  case PERF_TYPE_HARDWARE:
1557  case PERF_TYPE_HW_CACHE:
1558  break;
1559 
1560  default:
1561  return -ENOENT;
1562  }
1563 
1564  err = __x86_pmu_event_init(event);
1565  if (!err) {
1566  /*
1567  * we temporarily connect event to its pmu
1568  * such that validate_group() can classify
1569  * it as an x86 event using is_x86_event()
1570  */
1571  tmp = event->pmu;
1572  event->pmu = &pmu;
1573 
1574  if (event->group_leader != event)
1575  err = validate_group(event);
1576  else
1577  err = validate_event(event);
1578 
1579  event->pmu = tmp;
1580  }
1581  if (err) {
1582  if (event->destroy)
1583  event->destroy(event);
1584  }
1585 
1586  return err;
1587 }
1588 
1589 static int x86_pmu_event_idx(struct perf_event *event)
1590 {
1591  int idx = event->hw.idx;
1592 
1593  if (!x86_pmu.attr_rdpmc)
1594  return 0;
1595 
1597  idx -= INTEL_PMC_IDX_FIXED;
1598  idx |= 1 << 30;
1599  }
1600 
1601  return idx + 1;
1602 }
1603 
1604 static ssize_t get_attr_rdpmc(struct device *cdev,
1605  struct device_attribute *attr,
1606  char *buf)
1607 {
1608  return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc);
1609 }
1610 
1611 static void change_rdpmc(void *info)
1612 {
1613  bool enable = !!(unsigned long)info;
1614 
1615  if (enable)
1616  set_in_cr4(X86_CR4_PCE);
1617  else
1618  clear_in_cr4(X86_CR4_PCE);
1619 }
1620 
1621 static ssize_t set_attr_rdpmc(struct device *cdev,
1622  struct device_attribute *attr,
1623  const char *buf, size_t count)
1624 {
1625  unsigned long val;
1626  ssize_t ret;
1627 
1628  ret = kstrtoul(buf, 0, &val);
1629  if (ret)
1630  return ret;
1631 
1632  if (!!val != !!x86_pmu.attr_rdpmc) {
1633  x86_pmu.attr_rdpmc = !!val;
1634  smp_call_function(change_rdpmc, (void *)val, 1);
1635  }
1636 
1637  return count;
1638 }
1639 
1640 static DEVICE_ATTR(rdpmc, S_IRUSR | S_IWUSR, get_attr_rdpmc, set_attr_rdpmc);
1641 
1642 static struct attribute *x86_pmu_attrs[] = {
1643  &dev_attr_rdpmc.attr,
1644  NULL,
1645 };
1646 
1647 static struct attribute_group x86_pmu_attr_group = {
1648  .attrs = x86_pmu_attrs,
1649 };
1650 
1651 static const struct attribute_group *x86_pmu_attr_groups[] = {
1652  &x86_pmu_attr_group,
1653  &x86_pmu_format_group,
1654  NULL,
1655 };
1656 
1657 static void x86_pmu_flush_branch_stack(void)
1658 {
1661 }
1662 
1664 {
1667 }
1669 
1670 static struct pmu pmu = {
1671  .pmu_enable = x86_pmu_enable,
1672  .pmu_disable = x86_pmu_disable,
1673 
1674  .attr_groups = x86_pmu_attr_groups,
1675 
1676  .event_init = x86_pmu_event_init,
1677 
1678  .add = x86_pmu_add,
1679  .del = x86_pmu_del,
1680  .start = x86_pmu_start,
1681  .stop = x86_pmu_stop,
1682  .read = x86_pmu_read,
1683 
1684  .start_txn = x86_pmu_start_txn,
1685  .cancel_txn = x86_pmu_cancel_txn,
1686  .commit_txn = x86_pmu_commit_txn,
1687 
1688  .event_idx = x86_pmu_event_idx,
1689  .flush_branch_stack = x86_pmu_flush_branch_stack,
1690 };
1691 
1693 {
1694  userpg->cap_usr_time = 0;
1695  userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc;
1696  userpg->pmc_width = x86_pmu.cntval_bits;
1697 
1698  if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1699  return;
1700 
1701  if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
1702  return;
1703 
1704  userpg->cap_usr_time = 1;
1705  userpg->time_mult = this_cpu_read(cyc2ns);
1706  userpg->time_shift = CYC2NS_SCALE_FACTOR;
1707  userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
1708 }
1709 
1710 /*
1711  * callchain support
1712  */
1713 
1714 static int backtrace_stack(void *data, char *name)
1715 {
1716  return 0;
1717 }
1718 
1719 static void backtrace_address(void *data, unsigned long addr, int reliable)
1720 {
1721  struct perf_callchain_entry *entry = data;
1722 
1723  perf_callchain_store(entry, addr);
1724 }
1725 
1726 static const struct stacktrace_ops backtrace_ops = {
1727  .stack = backtrace_stack,
1728  .address = backtrace_address,
1729  .walk_stack = print_context_stack_bp,
1730 };
1731 
1732 void
1734 {
1735  if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1736  /* TODO: We don't support guest os callchain now */
1737  return;
1738  }
1739 
1740  perf_callchain_store(entry, regs->ip);
1741 
1742  dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
1743 }
1744 
1745 static inline int
1746 valid_user_frame(const void __user *fp, unsigned long size)
1747 {
1748  return (__range_not_ok(fp, size, TASK_SIZE) == 0);
1749 }
1750 
1751 static unsigned long get_segment_base(unsigned int segment)
1752 {
1753  struct desc_struct *desc;
1754  int idx = segment >> 3;
1755 
1756  if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
1757  if (idx > LDT_ENTRIES)
1758  return 0;
1759 
1760  if (idx > current->active_mm->context.size)
1761  return 0;
1762 
1763  desc = current->active_mm->context.ldt;
1764  } else {
1765  if (idx > GDT_ENTRIES)
1766  return 0;
1767 
1768  desc = __this_cpu_ptr(&gdt_page.gdt[0]);
1769  }
1770 
1771  return get_desc_base(desc + idx);
1772 }
1773 
1774 #ifdef CONFIG_COMPAT
1775 
1776 #include <asm/compat.h>
1777 
1778 static inline int
1779 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1780 {
1781  /* 32-bit process in 64-bit kernel. */
1782  unsigned long ss_base, cs_base;
1783  struct stack_frame_ia32 frame;
1784  const void __user *fp;
1785 
1786  if (!test_thread_flag(TIF_IA32))
1787  return 0;
1788 
1789  cs_base = get_segment_base(regs->cs);
1790  ss_base = get_segment_base(regs->ss);
1791 
1792  fp = compat_ptr(ss_base + regs->bp);
1793  while (entry->nr < PERF_MAX_STACK_DEPTH) {
1794  unsigned long bytes;
1795  frame.next_frame = 0;
1796  frame.return_address = 0;
1797 
1798  bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1799  if (bytes != sizeof(frame))
1800  break;
1801 
1802  if (!valid_user_frame(fp, sizeof(frame)))
1803  break;
1804 
1805  perf_callchain_store(entry, cs_base + frame.return_address);
1806  fp = compat_ptr(ss_base + frame.next_frame);
1807  }
1808  return 1;
1809 }
1810 #else
1811 static inline int
1812 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1813 {
1814  return 0;
1815 }
1816 #endif
1817 
1818 void
1820 {
1821  struct stack_frame frame;
1822  const void __user *fp;
1823 
1824  if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1825  /* TODO: We don't support guest os callchain now */
1826  return;
1827  }
1828 
1829  /*
1830  * We don't know what to do with VM86 stacks.. ignore them for now.
1831  */
1832  if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
1833  return;
1834 
1835  fp = (void __user *)regs->bp;
1836 
1837  perf_callchain_store(entry, regs->ip);
1838 
1839  if (!current->mm)
1840  return;
1841 
1842  if (perf_callchain_user32(regs, entry))
1843  return;
1844 
1845  while (entry->nr < PERF_MAX_STACK_DEPTH) {
1846  unsigned long bytes;
1847  frame.next_frame = NULL;
1848  frame.return_address = 0;
1849 
1850  bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1851  if (bytes != sizeof(frame))
1852  break;
1853 
1854  if (!valid_user_frame(fp, sizeof(frame)))
1855  break;
1856 
1857  perf_callchain_store(entry, frame.return_address);
1858  fp = frame.next_frame;
1859  }
1860 }
1861 
1862 /*
1863  * Deal with code segment offsets for the various execution modes:
1864  *
1865  * VM86 - the good olde 16 bit days, where the linear address is
1866  * 20 bits and we use regs->ip + 0x10 * regs->cs.
1867  *
1868  * IA32 - Where we need to look at GDT/LDT segment descriptor tables
1869  * to figure out what the 32bit base address is.
1870  *
1871  * X32 - has TIF_X32 set, but is running in x86_64
1872  *
1873  * X86_64 - CS,DS,SS,ES are all zero based.
1874  */
1875 static unsigned long code_segment_base(struct pt_regs *regs)
1876 {
1877  /*
1878  * If we are in VM86 mode, add the segment offset to convert to a
1879  * linear address.
1880  */
1881  if (regs->flags & X86_VM_MASK)
1882  return 0x10 * regs->cs;
1883 
1884  /*
1885  * For IA32 we look at the GDT/LDT segment base to convert the
1886  * effective IP to a linear address.
1887  */
1888 #ifdef CONFIG_X86_32
1889  if (user_mode(regs) && regs->cs != __USER_CS)
1890  return get_segment_base(regs->cs);
1891 #else
1892  if (test_thread_flag(TIF_IA32)) {
1893  if (user_mode(regs) && regs->cs != __USER32_CS)
1894  return get_segment_base(regs->cs);
1895  }
1896 #endif
1897  return 0;
1898 }
1899 
1900 unsigned long perf_instruction_pointer(struct pt_regs *regs)
1901 {
1902  if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
1903  return perf_guest_cbs->get_guest_ip();
1904 
1905  return regs->ip + code_segment_base(regs);
1906 }
1907 
1908 unsigned long perf_misc_flags(struct pt_regs *regs)
1909 {
1910  int misc = 0;
1911 
1912  if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1913  if (perf_guest_cbs->is_user_mode())
1915  else
1917  } else {
1918  if (user_mode(regs))
1919  misc |= PERF_RECORD_MISC_USER;
1920  else
1921  misc |= PERF_RECORD_MISC_KERNEL;
1922  }
1923 
1924  if (regs->flags & PERF_EFLAGS_EXACT)
1925  misc |= PERF_RECORD_MISC_EXACT_IP;
1926 
1927  return misc;
1928 }
1929 
1931 {
1932  cap->version = x86_pmu.version;
1937  cap->events_mask = (unsigned int)x86_pmu.events_maskl;
1939 }