12 #include <linux/perf_event.h>
14 #include <linux/kernel.h>
22 #include <asm/irq_regs.h>
25 #include <asm/hw_irq.h>
29 #define MAX_HWEVENTS 3
30 #define PMC_NO_INDEX -1
95 #define HW_OP_UNSUPPORTED -1
117 #define EV67_NUM_EVENT_TYPES (EV67_LAST_ET-EV67_CYCLES)
121 static const int ev67_perfmon_event_map[] = {
150 unsigned long *evtype,
int n_ev)
155 idx0 = ev67_mapping[evtype[0]-1].
idx;
156 config = ev67_mapping[evtype[0]-1].
config;
196 event[0]->hw.idx = idx0;
197 event[0]->hw.config_base =
config;
199 event[1]->hw.idx = idx0 ^ 1;
200 event[1]->hw.config_base =
config;
207 .event_map = ev67_perfmon_event_map,
208 .max_events =
ARRAY_SIZE(ev67_perfmon_event_map),
212 .pmc_max_period = {(1
UL<<20) - 1, (1
UL<<20) - 1, 0},
213 .pmc_left = {16, 4, 0},
214 .check_constraints = ev67_check_constraints
223 static inline void alpha_write_pmc(
int idx,
unsigned long val)
231 static inline unsigned long alpha_read_pmc(
int idx)
242 static int alpha_perf_event_set_period(
struct perf_event *event,
246 long period = hwc->sample_period;
252 hwc->last_period =
period;
259 hwc->last_period =
period;
273 local64_set(&hwc->prev_count, (
unsigned long)(-left));
275 alpha_write_pmc(idx, (
unsigned long)(-left));
297 static unsigned long alpha_perf_event_update(
struct perf_event *event,
300 long prev_raw_count, new_raw_count;
305 new_raw_count = alpha_read_pmc(idx);
308 new_raw_count) != prev_raw_count)
311 delta = (new_raw_count - (prev_raw_count & alpha_pmu->
pmc_count_mask[
idx])) + ovf;
323 return new_raw_count;
331 struct perf_event *event[],
unsigned long *evtype,
337 if (!is_software_event(group)) {
341 evtype[
n] = group->hw.event_base;
349 evtype[
n] = pe->hw.event_base;
362 unsigned long *evtypes,
int n_ev)
381 static void maybe_change_configuration(
struct cpu_hw_events *cpuc)
389 for (j = 0; j < cpuc->
n_events; j++) {
394 alpha_perf_event_update(pe, &pe->hw, cpuc->
current_idx[j], 0);
401 for (j = 0; j < cpuc->
n_events; j++) {
407 alpha_perf_event_set_period(pe, hwc, idx);
429 unsigned long irq_flags;
447 if (n0 < alpha_pmu->num_pmcs) {
449 cpuc->
evtype[n0] =
event->hw.event_base;
452 if (!alpha_check_constraints(cpuc->
event, cpuc->
evtype, n0+1)) {
475 static void alpha_pmu_del(
struct perf_event *event,
int flags)
479 unsigned long irq_flags;
485 for (j = 0; j < cpuc->
n_events; j++) {
486 if (event == cpuc->
event[j]) {
492 while (++j < cpuc->n_events) {
500 alpha_perf_event_update(event, hwc, idx, 0);
514 static void alpha_pmu_read(
struct perf_event *event)
518 alpha_perf_event_update(event, hwc, hwc->idx, 0);
522 static void alpha_pmu_stop(
struct perf_event *event,
int flags)
533 alpha_perf_event_update(event, hwc, hwc->idx, 0);
542 static void alpha_pmu_start(
struct perf_event *event,
int flags)
552 alpha_perf_event_set_period(event, hwc, hwc->idx);
570 static int supported_cpu(
void)
577 cputype = cpu->
type & 0xffffffff;
584 static void hw_perf_event_destroy(
struct perf_event *event)
592 static int __hw_perf_event_init(
struct perf_event *event)
636 hwc->event_base = ev;
643 if (event->group_leader != event) {
644 n = collect_events(event->group_leader,
646 evts, evtypes, idx_rubbish_bin);
650 evtypes[
n] = hwc->event_base;
653 if (alpha_check_constraints(evts, evtypes, n + 1))
657 hwc->config_base = 0;
660 event->destroy = hw_perf_event_destroy;
672 if (!hwc->sample_period) {
674 hwc->last_period = hwc->sample_period;
675 local64_set(&hwc->period_left, hwc->sample_period);
684 static int alpha_pmu_event_init(
struct perf_event *event)
689 if (has_branch_stack(event))
692 switch (event->
attr.type) {
706 err = __hw_perf_event_init(event);
714 static void alpha_pmu_enable(
struct pmu *
pmu)
726 maybe_change_configuration(cpuc);
740 static void alpha_pmu_disable(
struct pmu *pmu)
753 static struct pmu pmu = {
755 .pmu_disable = alpha_pmu_disable,
756 .event_init = alpha_pmu_event_init,
757 .add = alpha_pmu_add,
758 .del = alpha_pmu_del,
759 .start = alpha_pmu_start,
760 .stop = alpha_pmu_stop,
761 .read = alpha_pmu_read,
776 if (!supported_cpu())
787 pr_info(
"CPU#%d: PCTR0[%06x] PCTR1[%06x]\n", cpu, pcr0, pcr1);
797 static void alpha_perf_event_irq_handler(
unsigned long la_ptr,
801 struct perf_sample_data
data;
827 for (j = 0; j < cpuc->
n_events; j++) {
843 pr_warning(
"PMI: No event at index %d!\n", idx);
849 alpha_perf_event_update(event, hwc, idx, alpha_pmu->
pmc_max_period[idx]+1);
850 perf_sample_data_init(&
data, 0, hwc->last_period);
852 if (alpha_perf_event_set_period(event, hwc, idx)) {
857 alpha_pmu_stop(event, 0);
872 pr_info(
"Performance events: ");
874 if (!supported_cpu()) {
875 pr_cont(
"No support for your CPU.\n");
879 pr_cont(
"Supported CPU type!\n");
883 perf_irq = alpha_perf_event_irq_handler;
886 alpha_pmu = &ev67_pmu;