21 #include <linux/kernel.h>
22 #include <linux/perf_event.h>
26 #include <asm/irq_regs.h>
27 #include <asm/stacktrace.h>
30 #define MIPS_MAX_HWEVENTS 4
31 #define MIPS_TCS_PER_COUNTER 2
32 #define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1)
64 #define CNTR_EVEN 0x55555555
65 #define CNTR_ODD 0xaaaaaaaa
66 #define CNTR_ALL 0xffffffff
67 #ifdef CONFIG_MIPS_MT_SMP
83 #define C(x) PERF_COUNT_HW_CACHE_##x
104 #define M_CONFIG1_PC (1 << 4)
106 #define M_PERFCTL_EXL (1 << 0)
107 #define M_PERFCTL_KERNEL (1 << 1)
108 #define M_PERFCTL_SUPERVISOR (1 << 2)
109 #define M_PERFCTL_USER (1 << 3)
110 #define M_PERFCTL_INTERRUPT_ENABLE (1 << 4)
111 #define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
112 #define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
114 #ifdef CONFIG_CPU_BMIPS5000
115 #define M_PERFCTL_MT_EN(filter) 0
117 #define M_PERFCTL_MT_EN(filter) ((filter) << 20)
120 #define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
121 #define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
122 #define M_TC_EN_TC M_PERFCTL_MT_EN(2)
123 #define M_PERFCTL_TCID(tcid) ((tcid) << 22)
124 #define M_PERFCTL_WIDE (1 << 30)
125 #define M_PERFCTL_MORE (1 << 31)
126 #define M_PERFCTL_TC (1 << 30)
128 #define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \
131 M_PERFCTL_SUPERVISOR | \
132 M_PERFCTL_INTERRUPT_ENABLE)
134 #ifdef CONFIG_MIPS_MT_SMP
135 #define M_PERFCTL_CONFIG_MASK 0x3fff801f
137 #define M_PERFCTL_CONFIG_MASK 0x1f
139 #define M_PERFCTL_EVENT_MASK 0xfe0
142 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
143 static int cpu_has_mipsmt_pertccounters;
147 #if defined(CONFIG_CPU_BMIPS5000)
148 #define vpe_id() (cpu_has_mipsmt_pertccounters ? \
149 0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK))
155 #define vpe_id() (cpu_has_mipsmt_pertccounters ? \
156 0 : smp_processor_id())
160 static unsigned int vpe_shift(
void)
168 static unsigned int counters_total_to_per_cpu(
unsigned int counters)
170 return counters >> vpe_shift();
178 static void resume_local_counters(
void);
179 static void pause_local_counters(
void);
180 static irqreturn_t mipsxx_pmu_handle_irq(
int,
void *);
181 static int mipsxx_pmu_handle_shared_irq(
void);
183 static unsigned int mipsxx_pmu_swizzle_perf_idx(
unsigned int idx)
190 static u64 mipsxx_pmu_read_counter(
unsigned int idx)
192 idx = mipsxx_pmu_swizzle_perf_idx(idx);
208 WARN_ONCE(1,
"Invalid performance counter number (%d)\n", idx);
213 static u64 mipsxx_pmu_read_counter_64(
unsigned int idx)
215 idx = mipsxx_pmu_swizzle_perf_idx(idx);
227 WARN_ONCE(1,
"Invalid performance counter number (%d)\n", idx);
232 static void mipsxx_pmu_write_counter(
unsigned int idx,
u64 val)
234 idx = mipsxx_pmu_swizzle_perf_idx(idx);
252 static void mipsxx_pmu_write_counter_64(
unsigned int idx,
u64 val)
254 idx = mipsxx_pmu_swizzle_perf_idx(idx);
272 static unsigned int mipsxx_pmu_read_control(
unsigned int idx)
274 idx = mipsxx_pmu_swizzle_perf_idx(idx);
286 WARN_ONCE(1,
"Invalid performance counter number (%d)\n", idx);
291 static void mipsxx_pmu_write_control(
unsigned int idx,
unsigned int val)
293 idx = mipsxx_pmu_swizzle_perf_idx(idx);
311 static int mipsxx_pmu_alloc_counter(
struct cpu_hw_events *cpuc,
320 unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
322 for (i = mipspmu.num_counters - 1; i >= 0; i--) {
345 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
361 static void mipsxx_pmu_disable_event(
int idx)
366 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
371 mipsxx_pmu_write_control(idx, cpuc->
saved_ctrl[idx]);
383 if (
unlikely((left + period) & (1ULL << 63))) {
387 hwc->last_period =
period;
389 }
else if (
unlikely((left + period) <= period)) {
393 hwc->last_period =
period;
397 if (left > mipspmu.max_period) {
398 left = mipspmu.max_period;
402 local64_set(&hwc->prev_count, mipspmu.overflow - left);
404 mipspmu.write_counter(idx, mipspmu.overflow - left);
411 static void mipspmu_event_update(
struct perf_event *event,
415 u64 prev_raw_count, new_raw_count;
420 new_raw_count = mipspmu.read_counter(idx);
423 new_raw_count) != prev_raw_count)
426 delta = new_raw_count - prev_raw_count;
442 mipspmu_event_set_period(event, hwc, hwc->idx);
445 mipsxx_pmu_enable_event(hwc, hwc->idx);
448 static void mipspmu_stop(
struct perf_event *event,
int flags)
454 mipsxx_pmu_disable_event(hwc->idx);
456 mipspmu_event_update(event, hwc, hwc->idx);
461 static int mipspmu_add(
struct perf_event *event,
int flags)
471 idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
482 mipsxx_pmu_disable_event(idx);
487 mipspmu_start(event, PERF_EF_RELOAD);
497 static void mipspmu_del(
struct perf_event *event,
int flags)
503 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
512 static void mipspmu_read(
struct perf_event *event)
520 mipspmu_event_update(event, hwc, hwc->idx);
523 static void mipspmu_enable(
struct pmu *
pmu)
525 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
528 resume_local_counters();
542 static void mipspmu_disable(
struct pmu *
pmu)
544 pause_local_counters();
545 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
552 static int (*save_perf_irq)(
void);
554 static int mipspmu_get_irq(
void)
558 if (mipspmu.irq >= 0) {
560 err =
request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
562 "mips_perf_pmu",
NULL);
564 pr_warning(
"Unable to request IRQ%d for MIPS "
565 "performance counters!\n", mipspmu.irq);
572 perf_irq = mipsxx_pmu_handle_shared_irq;
575 pr_warning(
"The platform hasn't properly defined its "
576 "interrupt controller.\n");
583 static void mipspmu_free_irq(
void)
585 if (mipspmu.irq >= 0)
595 static void reset_counters(
void *
arg);
596 static int __hw_perf_event_init(
struct perf_event *event);
598 static void hw_perf_event_destroy(
struct perf_event *event)
601 &pmu_reserve_mutex)) {
607 (
void *)(
long)mipspmu.num_counters, 1);
613 static int mipspmu_event_init(
struct perf_event *event)
618 if (has_branch_stack(event))
621 switch (event->
attr.type) {
638 err = mipspmu_get_irq();
648 return __hw_perf_event_init(event);
653 .pmu_disable = mipspmu_disable,
654 .event_init = mipspmu_event_init,
657 .start = mipspmu_start,
658 .stop = mipspmu_stop,
659 .read = mipspmu_read,
662 static unsigned int mipspmu_perf_event_encode(
const struct mips_perf_event *pev)
668 #ifdef CONFIG_MIPS_MT_SMP
669 return ((
unsigned int)pev->range << 24) |
678 static const struct mips_perf_event *mipspmu_map_general_event(
int idx)
681 if ((*mipspmu.general_event_map)[idx].cntr_mask == 0)
683 return &(*mipspmu.general_event_map)[idx];
691 cache_type = (config >> 0) & 0xff;
695 cache_op = (config >> 8) & 0xff;
699 cache_result = (config >> 16) & 0xff;
703 pev = &((*mipspmu.cache_event_map)
715 static int validate_group(
struct perf_event *event)
717 struct perf_event *sibling, *leader =
event->group_leader;
720 memset(&fake_cpuc, 0,
sizeof(fake_cpuc));
722 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
726 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
730 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
737 static void handle_associated_event(
struct cpu_hw_events *cpuc,
738 int idx,
struct perf_sample_data *
data,
744 mipspmu_event_update(event, hwc, idx);
745 data->period =
event->hw.last_period;
746 if (!mipspmu_event_set_period(event, hwc, idx))
750 mipsxx_pmu_disable_event(idx);
754 static int __n_counters(
void)
768 static int n_counters(
void)
783 counters = __n_counters();
789 static void reset_counters(
void *
arg)
791 int counters = (
int)(
long)
arg;
794 mipsxx_pmu_write_control(3, 0);
795 mipspmu.write_counter(3, 0);
797 mipsxx_pmu_write_control(2, 0);
798 mipspmu.write_counter(2, 0);
800 mipsxx_pmu_write_control(1, 0);
801 mipspmu.write_counter(1, 0);
803 mipsxx_pmu_write_control(0, 0);
804 mipspmu.write_counter(0, 0);
1025 [
C(OP_PREFETCH)] = {
1061 [
C(RESULT_ACCESS)] = { 0x2b,
CNTR_ALL },
1065 [
C(RESULT_ACCESS)] = { 0x30,
CNTR_ALL },
1070 [
C(RESULT_ACCESS)] = { 0x18,
CNTR_ALL },
1072 [
C(OP_PREFETCH)] = {
1073 [
C(RESULT_ACCESS)] = { 0x19,
CNTR_ALL },
1095 #ifdef CONFIG_MIPS_MT_SMP
1096 static void check_and_calc_range(
struct perf_event *event,
1101 if (event->cpu >= 0) {
1102 if (pev->range >
V) {
1120 static void check_and_calc_range(
struct perf_event *event,
1126 static int __hw_perf_event_init(
struct perf_event *event)
1137 pev = mipspmu_map_general_event(event->
attr.config);
1139 pev = mipspmu_map_cache_event(event->
attr.config);
1143 pev = mipspmu.map_raw_event(event->
attr.config);
1152 return PTR_ERR(pev);
1163 check_and_calc_range(event, pev);
1165 hwc->event_base = mipspmu_perf_event_encode(pev);
1187 if (!hwc->sample_period) {
1188 hwc->sample_period = mipspmu.max_period;
1189 hwc->last_period = hwc->sample_period;
1190 local64_set(&hwc->period_left, hwc->sample_period);
1194 if (event->group_leader != event)
1195 err = validate_group(event);
1197 event->destroy = hw_perf_event_destroy;
1200 event->destroy(event);
1205 static void pause_local_counters(
void)
1208 int ctr = mipspmu.num_counters;
1209 unsigned long flags;
1214 cpuc->
saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
1215 mipsxx_pmu_write_control(ctr, cpuc->
saved_ctrl[ctr] &
1221 static void resume_local_counters(
void)
1224 int ctr = mipspmu.num_counters;
1228 mipsxx_pmu_write_control(ctr, cpuc->
saved_ctrl[ctr]);
1232 static int mipsxx_pmu_handle_shared_irq(
void)
1235 struct perf_sample_data data;
1236 unsigned int counters = mipspmu.num_counters;
1250 pause_local_counters();
1251 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1257 perf_sample_data_init(&data, 0, 0);
1260 #define HANDLE_COUNTER(n) \
1262 if (test_bit(n, cpuc->used_mask)) { \
1263 counter = mipspmu.read_counter(n); \
1264 if (counter & mipspmu.overflow) { \
1265 handle_associated_event(cpuc, n, &data, regs); \
1266 handled = IRQ_HANDLED; \
1283 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1286 resume_local_counters();
1292 return mipsxx_pmu_handle_shared_irq();
1296 #define IS_BOTH_COUNTERS_24K_EVENT(b) \
1297 ((b) == 0 || (b) == 1 || (b) == 11)
1300 #define IS_BOTH_COUNTERS_34K_EVENT(b) \
1301 ((b) == 0 || (b) == 1 || (b) == 11)
1302 #ifdef CONFIG_MIPS_MT_SMP
1303 #define IS_RANGE_P_34K_EVENT(r, b) \
1304 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1305 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
1306 (r) == 176 || ((b) >= 50 && (b) <= 55) || \
1307 ((b) >= 64 && (b) <= 67))
1308 #define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
1312 #define IS_BOTH_COUNTERS_74K_EVENT(b) \
1313 ((b) == 0 || (b) == 1)
1316 #define IS_BOTH_COUNTERS_1004K_EVENT(b) \
1317 ((b) == 0 || (b) == 1 || (b) == 11)
1318 #ifdef CONFIG_MIPS_MT_SMP
1319 #define IS_RANGE_P_1004K_EVENT(r, b) \
1320 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1321 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
1322 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
1323 (r) == 188 || (b) == 61 || (b) == 62 || \
1324 ((b) >= 64 && (b) <= 67))
1325 #define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
1329 #define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \
1330 ((b) == 0 || (b) == 1)
1343 unsigned int raw_id = config & 0xff;
1344 unsigned int base_id = raw_id & 0x7f;
1346 raw_event.event_id = base_id;
1353 raw_event.cntr_mask =
1355 #ifdef CONFIG_MIPS_MT_SMP
1360 raw_event.range =
P;
1367 raw_event.cntr_mask =
1369 #ifdef CONFIG_MIPS_MT_SMP
1370 if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
1371 raw_event.range =
P;
1372 else if (
unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
1373 raw_event.range =
V;
1375 raw_event.range =
T;
1382 raw_event.cntr_mask =
1384 #ifdef CONFIG_MIPS_MT_SMP
1385 raw_event.range =
P;
1392 raw_event.cntr_mask =
1394 #ifdef CONFIG_MIPS_MT_SMP
1395 if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
1396 raw_event.range =
P;
1397 else if (
unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
1398 raw_event.range =
V;
1400 raw_event.range =
T;
1407 raw_event.cntr_mask =
1416 unsigned int raw_id = config & 0xff;
1417 unsigned int base_id = raw_id & 0x7f;
1421 raw_event.event_id = base_id;
1453 pr_info(
"Performance counters: ");
1455 counters = n_counters();
1456 if (counters == 0) {
1457 pr_cont(
"No available PMU.\n");
1461 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1463 if (!cpu_has_mipsmt_pertccounters)
1464 counters = counters_total_to_per_cpu(counters);
1467 #ifdef MSC01E_INT_BASE
1480 #ifdef MSC01E_INT_BASE
1484 mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
1488 mipspmu.name =
"mips/24K";
1489 mipspmu.general_event_map = &mipsxxcore_event_map;
1490 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1493 mipspmu.name =
"mips/34K";
1494 mipspmu.general_event_map = &mipsxxcore_event_map;
1495 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1498 mipspmu.name =
"mips/74K";
1499 mipspmu.general_event_map = &mipsxx74Kcore_event_map;
1500 mipspmu.cache_event_map = &mipsxx74Kcore_cache_map;
1503 mipspmu.name =
"mips/1004K";
1504 mipspmu.general_event_map = &mipsxxcore_event_map;
1505 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1508 mipspmu.name =
"mips/loongson1";
1509 mipspmu.general_event_map = &mipsxxcore_event_map;
1510 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1515 mipspmu.name =
"octeon";
1516 mipspmu.general_event_map = &octeon_event_map;
1517 mipspmu.cache_event_map = &octeon_cache_map;
1518 mipspmu.map_raw_event = octeon_pmu_map_raw_event;
1521 mipspmu.name =
"BMIPS5000";
1522 mipspmu.general_event_map = &bmips5000_event_map;
1523 mipspmu.cache_event_map = &bmips5000_cache_map;
1526 pr_cont(
"Either hardware does not support performance "
1527 "counters, or not yet implemented.\n");
1531 mipspmu.num_counters = counters;
1535 mipspmu.max_period = (1ULL << 63) - 1;
1536 mipspmu.valid_count = (1ULL << 63) - 1;
1537 mipspmu.overflow = 1ULL << 63;
1538 mipspmu.read_counter = mipsxx_pmu_read_counter_64;
1539 mipspmu.write_counter = mipsxx_pmu_write_counter_64;
1542 mipspmu.max_period = (1ULL << 31) - 1;
1543 mipspmu.valid_count = (1ULL << 31) - 1;
1544 mipspmu.overflow = 1ULL << 31;
1545 mipspmu.read_counter = mipsxx_pmu_read_counter;
1546 mipspmu.write_counter = mipsxx_pmu_write_counter;
1550 on_each_cpu(reset_counters, (
void *)(
long)counters, 1);
1552 pr_cont(
"%s PMU enabled, %d %d-bit counters available to each "
1553 "CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
1554 irq < 0 ?
" (share with timer interrupt)" :
"");