13 #define pr_fmt(fmt) "hw perfevents: " fmt
15 #include <linux/kernel.h>
20 #include <asm/irq_regs.h>
22 #include <asm/stacktrace.h>
25 armpmu_map_cache_event(
const unsigned (*cache_map)
33 cache_type = (config >> 0) & 0xff;
37 cache_op = (config >> 8) & 0xff;
41 cache_result = (config >> 16) & 0xff;
42 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
63 return (
int)(config & raw_event_mask);
69 const unsigned (*cache_map)
75 u64 config =
event->attr.config;
77 switch (event->
attr.type) {
79 return armpmu_map_hw_event(event_map, config);
81 return armpmu_map_cache_event(cache_map, config);
83 return armpmu_map_raw_event(raw_event_mask, config);
94 struct arm_pmu *armpmu =
to_arm_pmu(event->pmu);
100 if (
unlikely(period != hwc->last_period))
101 left = period - (hwc->last_period -
left);
106 hwc->last_period =
period;
113 hwc->last_period =
period;
117 if (left > (
s64)armpmu->max_period)
118 left = armpmu->max_period;
122 armpmu->write_counter(idx, (
u64)(-left) & 0xffffffff);
134 struct arm_pmu *armpmu =
to_arm_pmu(event->pmu);
135 u64 delta, prev_raw_count, new_raw_count;
139 new_raw_count = armpmu->read_counter(idx);
142 new_raw_count) != prev_raw_count)
145 delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
150 return new_raw_count;
168 struct arm_pmu *armpmu =
to_arm_pmu(event->pmu);
176 armpmu->disable(hwc, hwc->idx);
183 armpmu_start(
struct perf_event *event,
int flags)
185 struct arm_pmu *armpmu =
to_arm_pmu(event->pmu);
204 armpmu->enable(hwc, hwc->idx);
208 armpmu_del(
struct perf_event *event,
int flags)
210 struct arm_pmu *armpmu =
to_arm_pmu(event->pmu);
211 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
225 armpmu_add(
struct perf_event *event,
int flags)
227 struct arm_pmu *armpmu =
to_arm_pmu(event->pmu);
228 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
236 idx = armpmu->get_event_idx(hw_events, hwc);
247 armpmu->disable(hwc, idx);
252 armpmu_start(event, PERF_EF_RELOAD);
263 validate_event(
struct pmu_hw_events *hw_events,
266 struct arm_pmu *armpmu =
to_arm_pmu(event->pmu);
268 struct pmu *leader_pmu =
event->group_leader->pmu;
273 return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
279 struct perf_event *sibling, *leader =
event->group_leader;
280 struct pmu_hw_events fake_pmu;
287 memset(fake_used_mask, 0,
sizeof(fake_used_mask));
288 fake_pmu.used_mask = fake_used_mask;
290 if (!validate_event(&fake_pmu, leader))
294 if (!validate_event(&fake_pmu, sibling))
298 if (!validate_event(&fake_pmu, event))
306 struct arm_pmu *armpmu = (
struct arm_pmu *) dev;
311 return plat->
handle_irq(irq, dev, armpmu->handle_irq);
313 return armpmu->handle_irq(irq, dev);
317 armpmu_release_hardware(
struct arm_pmu *armpmu)
320 pm_runtime_put_sync(&armpmu->plat_device->dev);
324 armpmu_reserve_hardware(
struct arm_pmu *armpmu)
332 pm_runtime_get_sync(&pmu_device->
dev);
333 err = armpmu->request_irq(armpmu_dispatch_irq);
335 armpmu_release_hardware(armpmu);
343 hw_perf_event_destroy(
struct perf_event *event)
345 struct arm_pmu *armpmu =
to_arm_pmu(event->pmu);
346 atomic_t *active_events = &armpmu->active_events;
347 struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
350 armpmu_release_hardware(armpmu);
363 __hw_perf_event_init(
struct perf_event *event)
365 struct arm_pmu *armpmu =
to_arm_pmu(event->pmu);
369 mapping = armpmu->map_event(event);
372 pr_debug(
"event %x:%llx not supported\n", event->
attr.type,
384 hwc->config_base = 0;
391 if ((!armpmu->set_event_filter ||
392 armpmu->set_event_filter(hwc, &event->
attr)) &&
393 event_requires_mode_exclusion(&event->
attr)) {
394 pr_debug(
"ARM performance counters do not support "
402 hwc->config_base |= (
unsigned long)mapping;
404 if (!hwc->sample_period) {
411 hwc->sample_period = armpmu->max_period >> 1;
412 hwc->last_period = hwc->sample_period;
413 local64_set(&hwc->period_left, hwc->sample_period);
417 if (event->group_leader != event) {
418 err = validate_group(event);
426 static int armpmu_event_init(
struct perf_event *event)
428 struct arm_pmu *armpmu =
to_arm_pmu(event->pmu);
430 atomic_t *active_events = &armpmu->active_events;
433 if (has_branch_stack(event))
436 if (armpmu->map_event(event) == -
ENOENT)
439 event->destroy = hw_perf_event_destroy;
444 err = armpmu_reserve_hardware(armpmu);
454 err = __hw_perf_event_init(event);
456 hw_perf_event_destroy(event);
461 static void armpmu_enable(
struct pmu *
pmu)
464 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
465 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
471 static void armpmu_disable(
struct pmu *pmu)
477 #ifdef CONFIG_PM_RUNTIME
478 static int armpmu_runtime_resume(
struct device *dev)
488 static int armpmu_runtime_suspend(
struct device *dev)
503 static void __init armpmu_init(
struct arm_pmu *armpmu)
508 armpmu->pmu = (
struct pmu) {
509 .pmu_enable = armpmu_enable,
510 .pmu_disable = armpmu_disable,
511 .event_init = armpmu_event_init,
514 .start = armpmu_start,
523 pr_info(
"enabled with %s PMU driver, %d counters available\n",
524 armpmu->name, armpmu->num_events);
562 perf_callchain_store(entry, buftail.
lr);
568 if (tail + 1 >= buftail.
fp)
571 return buftail.
fp - 1;
580 tail = (
struct frame_tail __user *)regs->ARM_fp - 1;
583 tail && !((
unsigned long)tail & 0x3))
584 tail = user_backtrace(tail, entry);
597 perf_callchain_store(entry, fr->
pc);
606 fr.
fp = regs->ARM_fp;
607 fr.
sp = regs->ARM_sp;
608 fr.
lr = regs->ARM_lr;
609 fr.
pc = regs->ARM_pc;