15 #ifdef CONFIG_CPU_XSCALE
16 enum xscale_perf_types {
17 XSCALE_PERFCTR_ICACHE_MISS = 0x00,
18 XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01,
19 XSCALE_PERFCTR_DATA_STALL = 0x02,
20 XSCALE_PERFCTR_ITLB_MISS = 0x03,
21 XSCALE_PERFCTR_DTLB_MISS = 0x04,
22 XSCALE_PERFCTR_BRANCH = 0x05,
23 XSCALE_PERFCTR_BRANCH_MISS = 0x06,
24 XSCALE_PERFCTR_INSTRUCTION = 0x07,
25 XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08,
26 XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09,
27 XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A,
28 XSCALE_PERFCTR_DCACHE_MISS = 0x0B,
29 XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C,
30 XSCALE_PERFCTR_PC_CHANGED = 0x0D,
31 XSCALE_PERFCTR_BCU_REQUEST = 0x10,
32 XSCALE_PERFCTR_BCU_FULL = 0x11,
33 XSCALE_PERFCTR_BCU_DRAIN = 0x12,
34 XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14,
35 XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15,
36 XSCALE_PERFCTR_RMW = 0x16,
38 XSCALE_PERFCTR_CCNT = 0xFE,
39 XSCALE_PERFCTR_UNUSED = 0xFF,
42 enum xscale_counters {
43 XSCALE_CYCLE_COUNTER = 0,
67 [
C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
68 [
C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
71 [
C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
72 [
C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
82 [
C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
86 [
C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
110 [
C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
114 [
C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
124 [
C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
128 [
C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
165 #define XSCALE_PMU_ENABLE 0x001
166 #define XSCALE_PMN_RESET 0x002
167 #define XSCALE_CCNT_RESET 0x004
168 #define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET)
169 #define XSCALE_PMU_CNT64 0x008
171 #define XSCALE1_OVERFLOWED_MASK 0x700
172 #define XSCALE1_CCOUNT_OVERFLOW 0x400
173 #define XSCALE1_COUNT0_OVERFLOW 0x100
174 #define XSCALE1_COUNT1_OVERFLOW 0x200
175 #define XSCALE1_CCOUNT_INT_EN 0x040
176 #define XSCALE1_COUNT0_INT_EN 0x010
177 #define XSCALE1_COUNT1_INT_EN 0x020
178 #define XSCALE1_COUNT0_EVT_SHFT 12
179 #define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT)
180 #define XSCALE1_COUNT1_EVT_SHFT 20
181 #define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT)
184 xscale1pmu_read_pmnc(
void)
187 asm volatile(
"mrc p14, 0, %0, c0, c0, 0" :
"=r" (
val));
192 xscale1pmu_write_pmnc(
u32 val)
196 asm volatile(
"mcr p14, 0, %0, c0, c0, 0" : :
"r" (
val));
200 xscale1_pmnc_counter_has_overflowed(
unsigned long pmnc,
206 case XSCALE_CYCLE_COUNTER:
207 ret = pmnc & XSCALE1_CCOUNT_OVERFLOW;
209 case XSCALE_COUNTER0:
210 ret = pmnc & XSCALE1_COUNT0_OVERFLOW;
212 case XSCALE_COUNTER1:
213 ret = pmnc & XSCALE1_COUNT1_OVERFLOW;
216 WARN_ONCE(1,
"invalid counter number (%d)\n", counter);
223 xscale1pmu_handle_irq(
int irq_num,
void *
dev)
226 struct perf_sample_data
data;
227 struct pmu_hw_events *cpuc;
237 pmnc = xscale1pmu_read_pmnc();
244 xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
246 if (!(pmnc & XSCALE1_OVERFLOWED_MASK))
252 for (idx = 0; idx < cpu_pmu->num_events; ++
idx) {
259 if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
264 perf_sample_data_init(&
data, 0, hwc->last_period);
269 cpu_pmu->disable(hwc, idx);
277 pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE;
278 xscale1pmu_write_pmnc(pmnc);
287 struct pmu_hw_events *
events = cpu_pmu->get_hw_events();
290 case XSCALE_CYCLE_COUNTER:
292 evt = XSCALE1_CCOUNT_INT_EN;
294 case XSCALE_COUNTER0:
295 mask = XSCALE1_COUNT0_EVT_MASK;
296 evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) |
297 XSCALE1_COUNT0_INT_EN;
299 case XSCALE_COUNTER1:
300 mask = XSCALE1_COUNT1_EVT_MASK;
301 evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) |
302 XSCALE1_COUNT1_INT_EN;
305 WARN_ONCE(1,
"invalid counter number (%d)\n", idx);
310 val = xscale1pmu_read_pmnc();
313 xscale1pmu_write_pmnc(val);
321 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
324 case XSCALE_CYCLE_COUNTER:
325 mask = XSCALE1_CCOUNT_INT_EN;
328 case XSCALE_COUNTER0:
329 mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK;
330 evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT;
332 case XSCALE_COUNTER1:
333 mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK;
334 evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT;
337 WARN_ONCE(1,
"invalid counter number (%d)\n", idx);
342 val = xscale1pmu_read_pmnc();
345 xscale1pmu_write_pmnc(val);
350 xscale1pmu_get_event_idx(
struct pmu_hw_events *cpuc,
353 if (XSCALE_PERFCTR_CCNT == event->config_base) {
357 return XSCALE_CYCLE_COUNTER;
360 return XSCALE_COUNTER1;
363 return XSCALE_COUNTER0;
370 xscale1pmu_start(
void)
373 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
376 val = xscale1pmu_read_pmnc();
377 val |= XSCALE_PMU_ENABLE;
378 xscale1pmu_write_pmnc(val);
383 xscale1pmu_stop(
void)
386 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
389 val = xscale1pmu_read_pmnc();
390 val &= ~XSCALE_PMU_ENABLE;
391 xscale1pmu_write_pmnc(val);
396 xscale1pmu_read_counter(
int counter)
401 case XSCALE_CYCLE_COUNTER:
402 asm volatile(
"mrc p14, 0, %0, c1, c0, 0" :
"=r" (
val));
404 case XSCALE_COUNTER0:
405 asm volatile(
"mrc p14, 0, %0, c2, c0, 0" :
"=r" (
val));
407 case XSCALE_COUNTER1:
408 asm volatile(
"mrc p14, 0, %0, c3, c0, 0" :
"=r" (
val));
416 xscale1pmu_write_counter(
int counter,
u32 val)
419 case XSCALE_CYCLE_COUNTER:
420 asm volatile(
"mcr p14, 0, %0, c1, c0, 0" : :
"r" (
val));
422 case XSCALE_COUNTER0:
423 asm volatile(
"mcr p14, 0, %0, c2, c0, 0" : :
"r" (
val));
425 case XSCALE_COUNTER1:
426 asm volatile(
"mcr p14, 0, %0, c3, c0, 0" : :
"r" (
val));
431 static int xscale_map_event(
struct perf_event *event)
434 &xscale_perf_cache_map, 0xFF);
437 static struct arm_pmu xscale1pmu = {
439 .handle_irq = xscale1pmu_handle_irq,
440 .enable = xscale1pmu_enable_event,
441 .disable = xscale1pmu_disable_event,
442 .read_counter = xscale1pmu_read_counter,
443 .write_counter = xscale1pmu_write_counter,
444 .get_event_idx = xscale1pmu_get_event_idx,
445 .start = xscale1pmu_start,
446 .stop = xscale1pmu_stop,
447 .map_event = xscale_map_event,
449 .max_period = (1LLU << 32) - 1,
452 static struct arm_pmu *
__devinit xscale1pmu_init(
void)
457 #define XSCALE2_OVERFLOWED_MASK 0x01f
458 #define XSCALE2_CCOUNT_OVERFLOW 0x001
459 #define XSCALE2_COUNT0_OVERFLOW 0x002
460 #define XSCALE2_COUNT1_OVERFLOW 0x004
461 #define XSCALE2_COUNT2_OVERFLOW 0x008
462 #define XSCALE2_COUNT3_OVERFLOW 0x010
463 #define XSCALE2_CCOUNT_INT_EN 0x001
464 #define XSCALE2_COUNT0_INT_EN 0x002
465 #define XSCALE2_COUNT1_INT_EN 0x004
466 #define XSCALE2_COUNT2_INT_EN 0x008
467 #define XSCALE2_COUNT3_INT_EN 0x010
468 #define XSCALE2_COUNT0_EVT_SHFT 0
469 #define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT)
470 #define XSCALE2_COUNT1_EVT_SHFT 8
471 #define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT)
472 #define XSCALE2_COUNT2_EVT_SHFT 16
473 #define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT)
474 #define XSCALE2_COUNT3_EVT_SHFT 24
475 #define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT)
478 xscale2pmu_read_pmnc(
void)
481 asm volatile(
"mrc p14, 0, %0, c0, c1, 0" :
"=r" (
val));
483 return val & 0xff000009;
487 xscale2pmu_write_pmnc(
u32 val)
491 asm volatile(
"mcr p14, 0, %0, c0, c1, 0" : :
"r" (
val));
495 xscale2pmu_read_overflow_flags(
void)
498 asm volatile(
"mrc p14, 0, %0, c5, c1, 0" :
"=r" (
val));
503 xscale2pmu_write_overflow_flags(
u32 val)
505 asm volatile(
"mcr p14, 0, %0, c5, c1, 0" : :
"r" (
val));
509 xscale2pmu_read_event_select(
void)
512 asm volatile(
"mrc p14, 0, %0, c8, c1, 0" :
"=r" (
val));
517 xscale2pmu_write_event_select(
u32 val)
519 asm volatile(
"mcr p14, 0, %0, c8, c1, 0" : :
"r"(
val));
523 xscale2pmu_read_int_enable(
void)
526 asm volatile(
"mrc p14, 0, %0, c4, c1, 0" :
"=r" (
val));
531 xscale2pmu_write_int_enable(
u32 val)
533 asm volatile(
"mcr p14, 0, %0, c4, c1, 0" : :
"r" (
val));
537 xscale2_pmnc_counter_has_overflowed(
unsigned long of_flags,
538 enum xscale_counters counter)
543 case XSCALE_CYCLE_COUNTER:
544 ret = of_flags & XSCALE2_CCOUNT_OVERFLOW;
546 case XSCALE_COUNTER0:
547 ret = of_flags & XSCALE2_COUNT0_OVERFLOW;
549 case XSCALE_COUNTER1:
550 ret = of_flags & XSCALE2_COUNT1_OVERFLOW;
552 case XSCALE_COUNTER2:
553 ret = of_flags & XSCALE2_COUNT2_OVERFLOW;
555 case XSCALE_COUNTER3:
556 ret = of_flags & XSCALE2_COUNT3_OVERFLOW;
559 WARN_ONCE(1,
"invalid counter number (%d)\n", counter);
566 xscale2pmu_handle_irq(
int irq_num,
void *dev)
568 unsigned long pmnc, of_flags;
569 struct perf_sample_data
data;
570 struct pmu_hw_events *cpuc;
575 pmnc = xscale2pmu_read_pmnc();
576 xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
579 of_flags = xscale2pmu_read_overflow_flags();
580 if (!(of_flags & XSCALE2_OVERFLOWED_MASK))
584 xscale2pmu_write_overflow_flags(of_flags);
589 for (idx = 0; idx < cpu_pmu->num_events; ++
idx) {
596 if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx))
601 perf_sample_data_init(&
data, 0, hwc->last_period);
606 cpu_pmu->disable(hwc, idx);
614 pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE;
615 xscale2pmu_write_pmnc(pmnc);
623 unsigned long flags, ien, evtsel;
624 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
626 ien = xscale2pmu_read_int_enable();
627 evtsel = xscale2pmu_read_event_select();
630 case XSCALE_CYCLE_COUNTER:
631 ien |= XSCALE2_CCOUNT_INT_EN;
633 case XSCALE_COUNTER0:
634 ien |= XSCALE2_COUNT0_INT_EN;
635 evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
636 evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
638 case XSCALE_COUNTER1:
639 ien |= XSCALE2_COUNT1_INT_EN;
640 evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
641 evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
643 case XSCALE_COUNTER2:
644 ien |= XSCALE2_COUNT2_INT_EN;
645 evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
646 evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
648 case XSCALE_COUNTER3:
649 ien |= XSCALE2_COUNT3_INT_EN;
650 evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
651 evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
654 WARN_ONCE(1,
"invalid counter number (%d)\n", idx);
659 xscale2pmu_write_event_select(evtsel);
660 xscale2pmu_write_int_enable(ien);
667 unsigned long flags, ien, evtsel, of_flags;
668 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
670 ien = xscale2pmu_read_int_enable();
671 evtsel = xscale2pmu_read_event_select();
674 case XSCALE_CYCLE_COUNTER:
675 ien &= ~XSCALE2_CCOUNT_INT_EN;
676 of_flags = XSCALE2_CCOUNT_OVERFLOW;
678 case XSCALE_COUNTER0:
679 ien &= ~XSCALE2_COUNT0_INT_EN;
680 evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
681 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
682 of_flags = XSCALE2_COUNT0_OVERFLOW;
684 case XSCALE_COUNTER1:
685 ien &= ~XSCALE2_COUNT1_INT_EN;
686 evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
687 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
688 of_flags = XSCALE2_COUNT1_OVERFLOW;
690 case XSCALE_COUNTER2:
691 ien &= ~XSCALE2_COUNT2_INT_EN;
692 evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
693 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
694 of_flags = XSCALE2_COUNT2_OVERFLOW;
696 case XSCALE_COUNTER3:
697 ien &= ~XSCALE2_COUNT3_INT_EN;
698 evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
699 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
700 of_flags = XSCALE2_COUNT3_OVERFLOW;
703 WARN_ONCE(1,
"invalid counter number (%d)\n", idx);
708 xscale2pmu_write_event_select(evtsel);
709 xscale2pmu_write_int_enable(ien);
710 xscale2pmu_write_overflow_flags(of_flags);
715 xscale2pmu_get_event_idx(
struct pmu_hw_events *cpuc,
718 int idx = xscale1pmu_get_event_idx(cpuc, event);
723 idx = XSCALE_COUNTER3;
725 idx = XSCALE_COUNTER2;
731 xscale2pmu_start(
void)
734 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
737 val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
738 val |= XSCALE_PMU_ENABLE;
739 xscale2pmu_write_pmnc(val);
744 xscale2pmu_stop(
void)
747 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
750 val = xscale2pmu_read_pmnc();
751 val &= ~XSCALE_PMU_ENABLE;
752 xscale2pmu_write_pmnc(val);
757 xscale2pmu_read_counter(
int counter)
762 case XSCALE_CYCLE_COUNTER:
763 asm volatile(
"mrc p14, 0, %0, c1, c1, 0" :
"=r" (
val));
765 case XSCALE_COUNTER0:
766 asm volatile(
"mrc p14, 0, %0, c0, c2, 0" :
"=r" (
val));
768 case XSCALE_COUNTER1:
769 asm volatile(
"mrc p14, 0, %0, c1, c2, 0" :
"=r" (
val));
771 case XSCALE_COUNTER2:
772 asm volatile(
"mrc p14, 0, %0, c2, c2, 0" :
"=r" (
val));
774 case XSCALE_COUNTER3:
775 asm volatile(
"mrc p14, 0, %0, c3, c2, 0" :
"=r" (
val));
783 xscale2pmu_write_counter(
int counter,
u32 val)
786 case XSCALE_CYCLE_COUNTER:
787 asm volatile(
"mcr p14, 0, %0, c1, c1, 0" : :
"r" (
val));
789 case XSCALE_COUNTER0:
790 asm volatile(
"mcr p14, 0, %0, c0, c2, 0" : :
"r" (
val));
792 case XSCALE_COUNTER1:
793 asm volatile(
"mcr p14, 0, %0, c1, c2, 0" : :
"r" (
val));
795 case XSCALE_COUNTER2:
796 asm volatile(
"mcr p14, 0, %0, c2, c2, 0" : :
"r" (
val));
798 case XSCALE_COUNTER3:
799 asm volatile(
"mcr p14, 0, %0, c3, c2, 0" : :
"r" (
val));
804 static struct arm_pmu xscale2pmu = {
806 .handle_irq = xscale2pmu_handle_irq,
807 .enable = xscale2pmu_enable_event,
808 .disable = xscale2pmu_disable_event,
809 .read_counter = xscale2pmu_read_counter,
810 .write_counter = xscale2pmu_write_counter,
811 .get_event_idx = xscale2pmu_get_event_idx,
812 .start = xscale2pmu_start,
813 .stop = xscale2pmu_stop,
814 .map_event = xscale_map_event,
816 .max_period = (1LLU << 32) - 1,
819 static struct arm_pmu *
__devinit xscale2pmu_init(
void)
824 static struct arm_pmu *
__devinit xscale1pmu_init(
void)
829 static struct arm_pmu *
__devinit xscale2pmu_init(
void)