23 #include <linux/kernel.h>
27 #include <linux/perf_event.h>
28 #include <linux/export.h>
29 #include <asm/processor.h>
58 static inline int sh_pmu_initialized(
void)
86 if (!atomic_add_unless(&num_events, -1, 1)) {
94 static int hw_perf_cache_event(
int config,
int *evp)
103 type = config & 0xff;
104 op = (config >> 8) & 0xff;
105 result = (config >> 16) & 0xff;
121 static int __hw_perf_event_init(
struct perf_event *event)
128 if (!sh_pmu_initialized())
136 if (hwc->sample_period)
160 event->destroy = hw_perf_event_destroy;
162 switch (attr->
type) {
167 err = hw_perf_cache_event(attr->
config, &config);
187 static void sh_perf_event_update(
struct perf_event *event,
190 u64 prev_raw_count, new_raw_count;
211 new_raw_count) != prev_raw_count)
222 delta = (new_raw_count << shift) - (prev_raw_count << shift);
241 sh_perf_event_update(event, &event->hw, idx);
246 static void sh_pmu_start(
struct perf_event *event,
int flags)
263 static void sh_pmu_del(
struct perf_event *event,
int flags)
267 sh_pmu_stop(event, PERF_EF_UPDATE);
273 static int sh_pmu_add(
struct perf_event *event,
int flags)
295 sh_pmu_start(event, PERF_EF_RELOAD);
304 static void sh_pmu_read(
struct perf_event *event)
306 sh_perf_event_update(event, &event->hw, event->hw.idx);
309 static int sh_pmu_event_init(
struct perf_event *event)
314 if (has_branch_stack(event))
317 switch (event->
attr.type) {
321 err = __hw_perf_event_init(event);
330 event->destroy(event);
336 static void sh_pmu_enable(
struct pmu *
pmu)
338 if (!sh_pmu_initialized())
344 static void sh_pmu_disable(
struct pmu *pmu)
346 if (!sh_pmu_initialized())
352 static struct pmu pmu = {
354 .pmu_disable = sh_pmu_disable,
355 .event_init = sh_pmu_event_init,
358 .start = sh_pmu_start,
363 static void sh_pmu_setup(
int cpu)
373 unsigned int cpu = (
long)hcpu;
393 pr_info(
"Performance Events: %s support registered\n", _pmu->
name);