12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/perf_event.h>
19 #include <asm/machdep.h>
21 #include <asm/ptrace.h>
42 static inline int perf_intr_is_nmi(
struct pt_regs *
regs)
51 static void perf_event_interrupt(
struct pt_regs *
regs);
56 static unsigned long read_pmc(
int idx)
62 val = mfpmr(PMRN_PMC0);
65 val = mfpmr(PMRN_PMC1);
68 val = mfpmr(PMRN_PMC2);
71 val = mfpmr(PMRN_PMC3);
83 static void write_pmc(
int idx,
unsigned long val)
87 mtpmr(PMRN_PMC0, val);
90 mtpmr(PMRN_PMC1, val);
93 mtpmr(PMRN_PMC2, val);
96 mtpmr(PMRN_PMC3, val);
108 static void write_pmlca(
int idx,
unsigned long val)
112 mtpmr(PMRN_PMLCA0, val);
115 mtpmr(PMRN_PMLCA1, val);
118 mtpmr(PMRN_PMLCA2, val);
121 mtpmr(PMRN_PMLCA3, val);
133 static void write_pmlcb(
int idx,
unsigned long val)
137 mtpmr(PMRN_PMLCB0, val);
140 mtpmr(PMRN_PMLCB1, val);
143 mtpmr(PMRN_PMLCB2, val);
146 mtpmr(PMRN_PMLCB3, val);
170 val = read_pmc(event->hw.idx);
174 delta = (val -
prev) & 0xfffffffful;
183 static void fsl_emb_pmu_disable(
struct pmu *
pmu)
210 mtpmr(PMRN_PMGC0, PMGC0_FAC);
222 static void fsl_emb_pmu_enable(
struct pmu *pmu)
233 ppc_set_pmu_inuse(cpuhw->
n_events != 0);
236 mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
250 if (!is_software_event(group)) {
257 if (!is_software_event(event) &&
269 static int fsl_emb_pmu_add(
struct perf_event *event,
int flags)
287 for (i = num_counters - 1; i >= 0; i--) {
302 if (event->hw.sample_period) {
304 if (left < 0x80000000L)
305 val = 0x80000000
L -
left;
317 write_pmlcb(i, event->hw.config >> 32);
318 write_pmlca(i, event->hw.config_base);
328 static void fsl_emb_pmu_del(
struct perf_event *event,
int flags)
331 int i =
event->hw.idx;
337 fsl_emb_pmu_read(event);
365 static void fsl_emb_pmu_start(
struct perf_event *event,
int ef_flags)
370 if (event->hw.idx < 0 || !event->hw.sample_period)
384 write_pmc(event->hw.idx, left);
391 static void fsl_emb_pmu_stop(
struct perf_event *event,
int ef_flags)
395 if (event->hw.idx < 0 || !event->hw.sample_period)
404 fsl_emb_pmu_read(event);
406 write_pmc(event->hw.idx, 0);
416 static void hw_perf_event_destroy(
struct perf_event *event)
418 if (!atomic_add_unless(&num_events, -1, 1)) {
438 type = config & 0xff;
439 op = (config >> 8) & 0xff;
440 result = (config >> 16) & 0xff;
456 static int fsl_emb_pmu_event_init(
struct perf_event *event)
465 switch (event->
attr.type) {
467 ev =
event->attr.config;
474 err = hw_perf_cache_event(event->
attr.config, &ev);
480 ev =
event->attr.config;
497 if (event->group_leader != event) {
498 n = collect_events(event->group_leader,
506 for (i = 0; i <
n; i++) {
517 event->hw.config_base = PMLCA_CE | PMLCA_FCM1 |
518 (
u32)((ev << 16) & PMLCA_EVENT_MASK);
520 if (event->
attr.exclude_user)
521 event->hw.config_base |= PMLCA_FCU;
522 if (event->
attr.exclude_kernel)
523 event->hw.config_base |= PMLCA_FCS;
524 if (event->
attr.exclude_idle)
527 event->hw.last_period =
event->hw.sample_period;
528 local64_set(&event->hw.period_left, event->hw.last_period);
546 mtpmr(PMRN_PMGC0, PMGC0_FAC);
549 event->destroy = hw_perf_event_destroy;
555 .pmu_enable = fsl_emb_pmu_enable,
556 .pmu_disable = fsl_emb_pmu_disable,
557 .event_init = fsl_emb_pmu_event_init,
558 .add = fsl_emb_pmu_add,
559 .del = fsl_emb_pmu_del,
560 .start = fsl_emb_pmu_start,
561 .stop = fsl_emb_pmu_stop,
562 .read = fsl_emb_pmu_read,
570 static void record_and_restart(
struct perf_event *event,
unsigned long val,
578 write_pmc(event->hw.idx, 0);
584 delta = (val -
prev) & 0xfffffffful;
599 event->hw.last_period =
event->hw.sample_period;
601 if (left < 0x80000000LL)
602 val = 0x80000000
LL -
left;
605 write_pmc(event->hw.idx, val);
614 struct perf_sample_data
data;
616 perf_sample_data_init(&
data, 0, event->hw.last_period);
619 fsl_emb_pmu_stop(event, 0);
623 static void perf_event_interrupt(
struct pt_regs *regs)
632 nmi = perf_intr_is_nmi(regs);
646 record_and_restart(event, val, regs);
658 mtmsr(mfmsr() | MSR_PMM);
659 mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
672 memset(cpuhw, 0,
sizeof(*cpuhw));
681 pr_info(
"%s performance monitor hardware support registered\n",