Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
perf_event.c
Go to the documentation of this file.
1 /*
2  * Hardware performance events for the Alpha.
3  *
4  * We implement HW counts on the EV67 and subsequent CPUs only.
5  *
6  * (C) 2010 Michael J. Cree
7  *
8  * Somewhat based on the Sparc code, and to a lesser extent the PowerPC and
9  * ARM code, which are copyright by their respective authors.
10  */
11 
12 #include <linux/perf_event.h>
13 #include <linux/kprobes.h>
14 #include <linux/kernel.h>
15 #include <linux/kdebug.h>
16 #include <linux/mutex.h>
17 #include <linux/init.h>
18 
19 #include <asm/hwrpb.h>
20 #include <linux/atomic.h>
21 #include <asm/irq.h>
22 #include <asm/irq_regs.h>
23 #include <asm/pal.h>
24 #include <asm/wrperfmon.h>
25 #include <asm/hw_irq.h>
26 
27 
28 /* The maximum number of PMCs on any Alpha CPU whatsoever. */
29 #define MAX_HWEVENTS 3
30 #define PMC_NO_INDEX -1
31 
32 /* For tracking PMCs and the hw events they monitor on each CPU. */
33 struct cpu_hw_events {
34  int enabled;
35  /* Number of events scheduled; also number entries valid in arrays below. */
36  int n_events;
37  /* Number events added since last hw_perf_disable(). */
38  int n_added;
39  /* Events currently scheduled. */
40  struct perf_event *event[MAX_HWEVENTS];
41  /* Event type of each scheduled event. */
42  unsigned long evtype[MAX_HWEVENTS];
43  /* Current index of each scheduled event; if not yet determined
44  * contains PMC_NO_INDEX.
45  */
47  /* The active PMCs' config for easy use with wrperfmon(). */
48  unsigned long config;
49  /* The active counters' indices for easy use with wrperfmon(). */
50  unsigned long idx_mask;
51 };
53 
54 
55 
56 /*
57  * A structure to hold the description of the PMCs available on a particular
58  * type of Alpha CPU.
59  */
60 struct alpha_pmu_t {
61  /* Mapping of the perf system hw event types to indigenous event types */
62  const int *event_map;
63  /* The number of entries in the event_map */
65  /* The number of PMCs on this Alpha */
66  int num_pmcs;
67  /*
68  * All PMC counters reside in the IBOX register PCTR. This is the
69  * LSB of the counter.
70  */
72  /*
73  * The mask that isolates the PMC bits when the LSB of the counter
74  * is shifted to bit 0.
75  */
76  unsigned long pmc_count_mask[MAX_HWEVENTS];
77  /* The maximum period the PMC can count. */
78  unsigned long pmc_max_period[MAX_HWEVENTS];
79  /*
80  * The maximum value that may be written to the counter due to
81  * hardware restrictions is pmc_max_period - pmc_left.
82  */
83  long pmc_left[3];
84  /* Subroutine for allocation of PMCs. Enforces constraints. */
85  int (*check_constraints)(struct perf_event **, unsigned long *, int);
86 };
87 
88 /*
89  * The Alpha CPU PMU description currently in operation. This is set during
90  * the boot process to the specific CPU of the machine.
91  */
92 static const struct alpha_pmu_t *alpha_pmu;
93 
94 
95 #define HW_OP_UNSUPPORTED -1
96 
97 /*
98  * The hardware description of the EV67, EV68, EV69, EV7 and EV79 PMUs
99  * follow. Since they are identical we refer to them collectively as the
100  * EV67 henceforth.
101  */
102 
103 /*
104  * EV67 PMC event types
105  *
106  * There is no one-to-one mapping of the possible hw event types to the
107  * actual codes that are used to program the PMCs hence we introduce our
108  * own hw event type identifiers.
109  */
116 };
117 #define EV67_NUM_EVENT_TYPES (EV67_LAST_ET-EV67_CYCLES)
118 
119 
120 /* Mapping of the hw event types to the perf tool interface */
121 static const int ev67_perfmon_event_map[] = {
126 };
127 
129  int config;
130  int idx;
131 };
132 
133 /*
134  * The mapping used for one event only - these must be in same order as enum
135  * ev67_pmc_event_type definition.
136  */
137 static const struct ev67_mapping_t ev67_mapping[] = {
138  {EV67_PCTR_INSTR_CYCLES, 1}, /* EV67_CYCLES, */
139  {EV67_PCTR_INSTR_CYCLES, 0}, /* EV67_INSTRUCTIONS */
140  {EV67_PCTR_INSTR_BCACHEMISS, 1}, /* EV67_BCACHEMISS */
141  {EV67_PCTR_CYCLES_MBOX, 1} /* EV67_MBOXREPLAY */
142 };
143 
144 
145 /*
146  * Check that a group of events can be simultaneously scheduled on to the
147  * EV67 PMU. Also allocate counter indices and config.
148  */
149 static int ev67_check_constraints(struct perf_event **event,
150  unsigned long *evtype, int n_ev)
151 {
152  int idx0;
153  unsigned long config;
154 
155  idx0 = ev67_mapping[evtype[0]-1].idx;
156  config = ev67_mapping[evtype[0]-1].config;
157  if (n_ev == 1)
158  goto success;
159 
160  BUG_ON(n_ev != 2);
161 
162  if (evtype[0] == EV67_MBOXREPLAY || evtype[1] == EV67_MBOXREPLAY) {
163  /* MBOX replay traps must be on PMC 1 */
164  idx0 = (evtype[0] == EV67_MBOXREPLAY) ? 1 : 0;
165  /* Only cycles can accompany MBOX replay traps */
166  if (evtype[idx0] == EV67_CYCLES) {
167  config = EV67_PCTR_CYCLES_MBOX;
168  goto success;
169  }
170  }
171 
172  if (evtype[0] == EV67_BCACHEMISS || evtype[1] == EV67_BCACHEMISS) {
173  /* Bcache misses must be on PMC 1 */
174  idx0 = (evtype[0] == EV67_BCACHEMISS) ? 1 : 0;
175  /* Only instructions can accompany Bcache misses */
176  if (evtype[idx0] == EV67_INSTRUCTIONS) {
178  goto success;
179  }
180  }
181 
182  if (evtype[0] == EV67_INSTRUCTIONS || evtype[1] == EV67_INSTRUCTIONS) {
183  /* Instructions must be on PMC 0 */
184  idx0 = (evtype[0] == EV67_INSTRUCTIONS) ? 0 : 1;
185  /* By this point only cycles can accompany instructions */
186  if (evtype[idx0^1] == EV67_CYCLES) {
187  config = EV67_PCTR_INSTR_CYCLES;
188  goto success;
189  }
190  }
191 
192  /* Otherwise, darn it, there is a conflict. */
193  return -1;
194 
195 success:
196  event[0]->hw.idx = idx0;
197  event[0]->hw.config_base = config;
198  if (n_ev == 2) {
199  event[1]->hw.idx = idx0 ^ 1;
200  event[1]->hw.config_base = config;
201  }
202  return 0;
203 }
204 
205 
206 static const struct alpha_pmu_t ev67_pmu = {
207  .event_map = ev67_perfmon_event_map,
208  .max_events = ARRAY_SIZE(ev67_perfmon_event_map),
209  .num_pmcs = 2,
210  .pmc_count_shift = {EV67_PCTR_0_COUNT_SHIFT, EV67_PCTR_1_COUNT_SHIFT, 0},
211  .pmc_count_mask = {EV67_PCTR_0_COUNT_MASK, EV67_PCTR_1_COUNT_MASK, 0},
212  .pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0},
213  .pmc_left = {16, 4, 0},
214  .check_constraints = ev67_check_constraints
215 };
216 
217 
218 
219 /*
220  * Helper routines to ensure that we read/write only the correct PMC bits
221  * when calling the wrperfmon PALcall.
222  */
223 static inline void alpha_write_pmc(int idx, unsigned long val)
224 {
225  val &= alpha_pmu->pmc_count_mask[idx];
226  val <<= alpha_pmu->pmc_count_shift[idx];
227  val |= (1<<idx);
228  wrperfmon(PERFMON_CMD_WRITE, val);
229 }
230 
231 static inline unsigned long alpha_read_pmc(int idx)
232 {
233  unsigned long val;
234 
235  val = wrperfmon(PERFMON_CMD_READ, 0);
236  val >>= alpha_pmu->pmc_count_shift[idx];
237  val &= alpha_pmu->pmc_count_mask[idx];
238  return val;
239 }
240 
241 /* Set a new period to sample over */
242 static int alpha_perf_event_set_period(struct perf_event *event,
243  struct hw_perf_event *hwc, int idx)
244 {
245  long left = local64_read(&hwc->period_left);
246  long period = hwc->sample_period;
247  int ret = 0;
248 
249  if (unlikely(left <= -period)) {
250  left = period;
251  local64_set(&hwc->period_left, left);
252  hwc->last_period = period;
253  ret = 1;
254  }
255 
256  if (unlikely(left <= 0)) {
257  left += period;
258  local64_set(&hwc->period_left, left);
259  hwc->last_period = period;
260  ret = 1;
261  }
262 
263  /*
264  * Hardware restrictions require that the counters must not be
265  * written with values that are too close to the maximum period.
266  */
267  if (unlikely(left < alpha_pmu->pmc_left[idx]))
268  left = alpha_pmu->pmc_left[idx];
269 
270  if (left > (long)alpha_pmu->pmc_max_period[idx])
271  left = alpha_pmu->pmc_max_period[idx];
272 
273  local64_set(&hwc->prev_count, (unsigned long)(-left));
274 
275  alpha_write_pmc(idx, (unsigned long)(-left));
276 
278 
279  return ret;
280 }
281 
282 
283 /*
284  * Calculates the count (the 'delta') since the last time the PMC was read.
285  *
286  * As the PMCs' full period can easily be exceeded within the perf system
287  * sampling period we cannot use any high order bits as a guard bit in the
288  * PMCs to detect overflow as is done by other architectures. The code here
289  * calculates the delta on the basis that there is no overflow when ovf is
290  * zero. The value passed via ovf by the interrupt handler corrects for
291  * overflow.
292  *
293  * This can be racey on rare occasions -- a call to this routine can occur
294  * with an overflowed counter just before the PMI service routine is called.
295  * The check for delta negative hopefully always rectifies this situation.
296  */
297 static unsigned long alpha_perf_event_update(struct perf_event *event,
298  struct hw_perf_event *hwc, int idx, long ovf)
299 {
300  long prev_raw_count, new_raw_count;
301  long delta;
302 
303 again:
304  prev_raw_count = local64_read(&hwc->prev_count);
305  new_raw_count = alpha_read_pmc(idx);
306 
307  if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
308  new_raw_count) != prev_raw_count)
309  goto again;
310 
311  delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
312 
313  /* It is possible on very rare occasions that the PMC has overflowed
314  * but the interrupt is yet to come. Detect and fix this situation.
315  */
316  if (unlikely(delta < 0)) {
317  delta += alpha_pmu->pmc_max_period[idx] + 1;
318  }
319 
320  local64_add(delta, &event->count);
321  local64_sub(delta, &hwc->period_left);
322 
323  return new_raw_count;
324 }
325 
326 
327 /*
328  * Collect all HW events into the array event[].
329  */
330 static int collect_events(struct perf_event *group, int max_count,
331  struct perf_event *event[], unsigned long *evtype,
332  int *current_idx)
333 {
334  struct perf_event *pe;
335  int n = 0;
336 
337  if (!is_software_event(group)) {
338  if (n >= max_count)
339  return -1;
340  event[n] = group;
341  evtype[n] = group->hw.event_base;
342  current_idx[n++] = PMC_NO_INDEX;
343  }
344  list_for_each_entry(pe, &group->sibling_list, group_entry) {
345  if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) {
346  if (n >= max_count)
347  return -1;
348  event[n] = pe;
349  evtype[n] = pe->hw.event_base;
350  current_idx[n++] = PMC_NO_INDEX;
351  }
352  }
353  return n;
354 }
355 
356 
357 
358 /*
359  * Check that a group of events can be simultaneously scheduled on to the PMU.
360  */
361 static int alpha_check_constraints(struct perf_event **events,
362  unsigned long *evtypes, int n_ev)
363 {
364 
365  /* No HW events is possible from hw_perf_group_sched_in(). */
366  if (n_ev == 0)
367  return 0;
368 
369  if (n_ev > alpha_pmu->num_pmcs)
370  return -1;
371 
372  return alpha_pmu->check_constraints(events, evtypes, n_ev);
373 }
374 
375 
376 /*
377  * If new events have been scheduled then update cpuc with the new
378  * configuration. This may involve shifting cycle counts from one PMC to
379  * another.
380  */
381 static void maybe_change_configuration(struct cpu_hw_events *cpuc)
382 {
383  int j;
384 
385  if (cpuc->n_added == 0)
386  return;
387 
388  /* Find counters that are moving to another PMC and update */
389  for (j = 0; j < cpuc->n_events; j++) {
390  struct perf_event *pe = cpuc->event[j];
391 
392  if (cpuc->current_idx[j] != PMC_NO_INDEX &&
393  cpuc->current_idx[j] != pe->hw.idx) {
394  alpha_perf_event_update(pe, &pe->hw, cpuc->current_idx[j], 0);
395  cpuc->current_idx[j] = PMC_NO_INDEX;
396  }
397  }
398 
399  /* Assign to counters all unassigned events. */
400  cpuc->idx_mask = 0;
401  for (j = 0; j < cpuc->n_events; j++) {
402  struct perf_event *pe = cpuc->event[j];
403  struct hw_perf_event *hwc = &pe->hw;
404  int idx = hwc->idx;
405 
406  if (cpuc->current_idx[j] == PMC_NO_INDEX) {
407  alpha_perf_event_set_period(pe, hwc, idx);
408  cpuc->current_idx[j] = idx;
409  }
410 
411  if (!(hwc->state & PERF_HES_STOPPED))
412  cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
413  }
414  cpuc->config = cpuc->event[0]->hw.config_base;
415 }
416 
417 
418 
419 /* Schedule perf HW event on to PMU.
420  * - this function is called from outside this module via the pmu struct
421  * returned from perf event initialisation.
422  */
423 static int alpha_pmu_add(struct perf_event *event, int flags)
424 {
425  struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
426  struct hw_perf_event *hwc = &event->hw;
427  int n0;
428  int ret;
429  unsigned long irq_flags;
430 
431  /*
432  * The Sparc code has the IRQ disable first followed by the perf
433  * disable, however this can lead to an overflowed counter with the
434  * PMI disabled on rare occasions. The alpha_perf_event_update()
435  * routine should detect this situation by noting a negative delta,
436  * nevertheless we disable the PMCs first to enable a potential
437  * final PMI to occur before we disable interrupts.
438  */
439  perf_pmu_disable(event->pmu);
440  local_irq_save(irq_flags);
441 
442  /* Default to error to be returned */
443  ret = -EAGAIN;
444 
445  /* Insert event on to PMU and if successful modify ret to valid return */
446  n0 = cpuc->n_events;
447  if (n0 < alpha_pmu->num_pmcs) {
448  cpuc->event[n0] = event;
449  cpuc->evtype[n0] = event->hw.event_base;
450  cpuc->current_idx[n0] = PMC_NO_INDEX;
451 
452  if (!alpha_check_constraints(cpuc->event, cpuc->evtype, n0+1)) {
453  cpuc->n_events++;
454  cpuc->n_added++;
455  ret = 0;
456  }
457  }
458 
459  hwc->state = PERF_HES_UPTODATE;
460  if (!(flags & PERF_EF_START))
461  hwc->state |= PERF_HES_STOPPED;
462 
463  local_irq_restore(irq_flags);
464  perf_pmu_enable(event->pmu);
465 
466  return ret;
467 }
468 
469 
470 
471 /* Disable performance monitoring unit
472  * - this function is called from outside this module via the pmu struct
473  * returned from perf event initialisation.
474  */
475 static void alpha_pmu_del(struct perf_event *event, int flags)
476 {
477  struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
478  struct hw_perf_event *hwc = &event->hw;
479  unsigned long irq_flags;
480  int j;
481 
482  perf_pmu_disable(event->pmu);
483  local_irq_save(irq_flags);
484 
485  for (j = 0; j < cpuc->n_events; j++) {
486  if (event == cpuc->event[j]) {
487  int idx = cpuc->current_idx[j];
488 
489  /* Shift remaining entries down into the existing
490  * slot.
491  */
492  while (++j < cpuc->n_events) {
493  cpuc->event[j - 1] = cpuc->event[j];
494  cpuc->evtype[j - 1] = cpuc->evtype[j];
495  cpuc->current_idx[j - 1] =
496  cpuc->current_idx[j];
497  }
498 
499  /* Absorb the final count and turn off the event. */
500  alpha_perf_event_update(event, hwc, idx, 0);
502 
503  cpuc->idx_mask &= ~(1UL<<idx);
504  cpuc->n_events--;
505  break;
506  }
507  }
508 
509  local_irq_restore(irq_flags);
510  perf_pmu_enable(event->pmu);
511 }
512 
513 
514 static void alpha_pmu_read(struct perf_event *event)
515 {
516  struct hw_perf_event *hwc = &event->hw;
517 
518  alpha_perf_event_update(event, hwc, hwc->idx, 0);
519 }
520 
521 
522 static void alpha_pmu_stop(struct perf_event *event, int flags)
523 {
524  struct hw_perf_event *hwc = &event->hw;
525  struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
526 
527  if (!(hwc->state & PERF_HES_STOPPED)) {
528  cpuc->idx_mask &= ~(1UL<<hwc->idx);
529  hwc->state |= PERF_HES_STOPPED;
530  }
531 
532  if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
533  alpha_perf_event_update(event, hwc, hwc->idx, 0);
534  hwc->state |= PERF_HES_UPTODATE;
535  }
536 
537  if (cpuc->enabled)
538  wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx));
539 }
540 
541 
542 static void alpha_pmu_start(struct perf_event *event, int flags)
543 {
544  struct hw_perf_event *hwc = &event->hw;
545  struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
546 
547  if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
548  return;
549 
550  if (flags & PERF_EF_RELOAD) {
551  WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
552  alpha_perf_event_set_period(event, hwc, hwc->idx);
553  }
554 
555  hwc->state = 0;
556 
557  cpuc->idx_mask |= 1UL<<hwc->idx;
558  if (cpuc->enabled)
559  wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
560 }
561 
562 
563 /*
564  * Check that CPU performance counters are supported.
565  * - currently support EV67 and later CPUs.
566  * - actually some later revisions of the EV6 have the same PMC model as the
567  * EV67 but we don't do suffiently deep CPU detection to detect them.
568  * Bad luck to the very few people who might have one, I guess.
569  */
570 static int supported_cpu(void)
571 {
572  struct percpu_struct *cpu;
573  unsigned long cputype;
574 
575  /* Get cpu type from HW */
576  cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset);
577  cputype = cpu->type & 0xffffffff;
578  /* Include all of EV67, EV68, EV7, EV79 and EV69 as supported. */
579  return (cputype >= EV67_CPU) && (cputype <= EV69_CPU);
580 }
581 
582 
583 
584 static void hw_perf_event_destroy(struct perf_event *event)
585 {
586  /* Nothing to be done! */
587  return;
588 }
589 
590 
591 
592 static int __hw_perf_event_init(struct perf_event *event)
593 {
594  struct perf_event_attr *attr = &event->attr;
595  struct hw_perf_event *hwc = &event->hw;
596  struct perf_event *evts[MAX_HWEVENTS];
597  unsigned long evtypes[MAX_HWEVENTS];
598  int idx_rubbish_bin[MAX_HWEVENTS];
599  int ev;
600  int n;
601 
602  /* We only support a limited range of HARDWARE event types with one
603  * only programmable via a RAW event type.
604  */
605  if (attr->type == PERF_TYPE_HARDWARE) {
606  if (attr->config >= alpha_pmu->max_events)
607  return -EINVAL;
608  ev = alpha_pmu->event_map[attr->config];
609  } else if (attr->type == PERF_TYPE_HW_CACHE) {
610  return -EOPNOTSUPP;
611  } else if (attr->type == PERF_TYPE_RAW) {
612  ev = attr->config & 0xff;
613  } else {
614  return -EOPNOTSUPP;
615  }
616 
617  if (ev < 0) {
618  return ev;
619  }
620 
621  /* The EV67 does not support mode exclusion */
622  if (attr->exclude_kernel || attr->exclude_user
623  || attr->exclude_hv || attr->exclude_idle) {
624  return -EPERM;
625  }
626 
627  /*
628  * We place the event type in event_base here and leave calculation
629  * of the codes to programme the PMU for alpha_pmu_enable() because
630  * it is only then we will know what HW events are actually
631  * scheduled on to the PMU. At that point the code to programme the
632  * PMU is put into config_base and the PMC to use is placed into
633  * idx. We initialise idx (below) to PMC_NO_INDEX to indicate that
634  * it is yet to be determined.
635  */
636  hwc->event_base = ev;
637 
638  /* Collect events in a group together suitable for calling
639  * alpha_check_constraints() to verify that the group as a whole can
640  * be scheduled on to the PMU.
641  */
642  n = 0;
643  if (event->group_leader != event) {
644  n = collect_events(event->group_leader,
645  alpha_pmu->num_pmcs - 1,
646  evts, evtypes, idx_rubbish_bin);
647  if (n < 0)
648  return -EINVAL;
649  }
650  evtypes[n] = hwc->event_base;
651  evts[n] = event;
652 
653  if (alpha_check_constraints(evts, evtypes, n + 1))
654  return -EINVAL;
655 
656  /* Indicate that PMU config and idx are yet to be determined. */
657  hwc->config_base = 0;
658  hwc->idx = PMC_NO_INDEX;
659 
660  event->destroy = hw_perf_event_destroy;
661 
662  /*
663  * Most architectures reserve the PMU for their use at this point.
664  * As there is no existing mechanism to arbitrate usage and there
665  * appears to be no other user of the Alpha PMU we just assume
666  * that we can just use it, hence a NO-OP here.
667  *
668  * Maybe an alpha_reserve_pmu() routine should be implemented but is
669  * anything else ever going to use it?
670  */
671 
672  if (!hwc->sample_period) {
673  hwc->sample_period = alpha_pmu->pmc_max_period[0];
674  hwc->last_period = hwc->sample_period;
675  local64_set(&hwc->period_left, hwc->sample_period);
676  }
677 
678  return 0;
679 }
680 
681 /*
682  * Main entry point to initialise a HW performance event.
683  */
684 static int alpha_pmu_event_init(struct perf_event *event)
685 {
686  int err;
687 
688  /* does not support taken branch sampling */
689  if (has_branch_stack(event))
690  return -EOPNOTSUPP;
691 
692  switch (event->attr.type) {
693  case PERF_TYPE_RAW:
694  case PERF_TYPE_HARDWARE:
695  case PERF_TYPE_HW_CACHE:
696  break;
697 
698  default:
699  return -ENOENT;
700  }
701 
702  if (!alpha_pmu)
703  return -ENODEV;
704 
705  /* Do the real initialisation work. */
706  err = __hw_perf_event_init(event);
707 
708  return err;
709 }
710 
711 /*
712  * Main entry point - enable HW performance counters.
713  */
714 static void alpha_pmu_enable(struct pmu *pmu)
715 {
716  struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
717 
718  if (cpuc->enabled)
719  return;
720 
721  cpuc->enabled = 1;
722  barrier();
723 
724  if (cpuc->n_events > 0) {
725  /* Update cpuc with information from any new scheduled events. */
726  maybe_change_configuration(cpuc);
727 
728  /* Start counting the desired events. */
730  wrperfmon(PERFMON_CMD_DESIRED_EVENTS, cpuc->config);
731  wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
732  }
733 }
734 
735 
736 /*
737  * Main entry point - disable HW performance counters.
738  */
739 
740 static void alpha_pmu_disable(struct pmu *pmu)
741 {
742  struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
743 
744  if (!cpuc->enabled)
745  return;
746 
747  cpuc->enabled = 0;
748  cpuc->n_added = 0;
749 
750  wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
751 }
752 
753 static struct pmu pmu = {
754  .pmu_enable = alpha_pmu_enable,
755  .pmu_disable = alpha_pmu_disable,
756  .event_init = alpha_pmu_event_init,
757  .add = alpha_pmu_add,
758  .del = alpha_pmu_del,
759  .start = alpha_pmu_start,
760  .stop = alpha_pmu_stop,
761  .read = alpha_pmu_read,
762 };
763 
764 
765 /*
766  * Main entry point - don't know when this is called but it
767  * obviously dumps debug info.
768  */
770 {
771  unsigned long flags;
772  unsigned long pcr;
773  int pcr0, pcr1;
774  int cpu;
775 
776  if (!supported_cpu())
777  return;
778 
779  local_irq_save(flags);
780 
781  cpu = smp_processor_id();
782 
783  pcr = wrperfmon(PERFMON_CMD_READ, 0);
784  pcr0 = (pcr >> alpha_pmu->pmc_count_shift[0]) & alpha_pmu->pmc_count_mask[0];
785  pcr1 = (pcr >> alpha_pmu->pmc_count_shift[1]) & alpha_pmu->pmc_count_mask[1];
786 
787  pr_info("CPU#%d: PCTR0[%06x] PCTR1[%06x]\n", cpu, pcr0, pcr1);
788 
789  local_irq_restore(flags);
790 }
791 
792 
793 /*
794  * Performance Monitoring Interrupt Service Routine called when a PMC
795  * overflows. The PMC that overflowed is passed in la_ptr.
796  */
797 static void alpha_perf_event_irq_handler(unsigned long la_ptr,
798  struct pt_regs *regs)
799 {
800  struct cpu_hw_events *cpuc;
801  struct perf_sample_data data;
802  struct perf_event *event;
803  struct hw_perf_event *hwc;
804  int idx, j;
805 
806  __get_cpu_var(irq_pmi_count)++;
807  cpuc = &__get_cpu_var(cpu_hw_events);
808 
809  /* Completely counting through the PMC's period to trigger a new PMC
810  * overflow interrupt while in this interrupt routine is utterly
811  * disastrous! The EV6 and EV67 counters are sufficiently large to
812  * prevent this but to be really sure disable the PMCs.
813  */
814  wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
815 
816  /* la_ptr is the counter that overflowed. */
817  if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
818  /* This should never occur! */
819  irq_err_count++;
820  pr_warning("PMI: silly index %ld\n", la_ptr);
821  wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
822  return;
823  }
824 
825  idx = la_ptr;
826 
827  for (j = 0; j < cpuc->n_events; j++) {
828  if (cpuc->current_idx[j] == idx)
829  break;
830  }
831 
832  if (unlikely(j == cpuc->n_events)) {
833  /* This can occur if the event is disabled right on a PMC overflow. */
834  wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
835  return;
836  }
837 
838  event = cpuc->event[j];
839 
840  if (unlikely(!event)) {
841  /* This should never occur! */
842  irq_err_count++;
843  pr_warning("PMI: No event at index %d!\n", idx);
844  wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
845  return;
846  }
847 
848  hwc = &event->hw;
849  alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1);
850  perf_sample_data_init(&data, 0, hwc->last_period);
851 
852  if (alpha_perf_event_set_period(event, hwc, idx)) {
853  if (perf_event_overflow(event, &data, regs)) {
854  /* Interrupts coming too quickly; "throttle" the
855  * counter, i.e., disable it for a little while.
856  */
857  alpha_pmu_stop(event, 0);
858  }
859  }
860  wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
861 
862  return;
863 }
864 
865 
866 
867 /*
868  * Init call to initialise performance events at kernel startup.
869  */
871 {
872  pr_info("Performance events: ");
873 
874  if (!supported_cpu()) {
875  pr_cont("No support for your CPU.\n");
876  return 0;
877  }
878 
879  pr_cont("Supported CPU type!\n");
880 
881  /* Override performance counter IRQ vector */
882 
883  perf_irq = alpha_perf_event_irq_handler;
884 
885  /* And set up PMU specification */
886  alpha_pmu = &ev67_pmu;
887 
888  perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
889 
890  return 0;
891 }