10 #include <linux/kernel.h>
14 #include <linux/perf_event.h>
15 #include <asm/processor.h>
17 #define PPC_CCBR(idx) (0xff200800 + (sizeof(u32) * idx))
18 #define PPC_PMCTR(idx) (0xfc100000 + (sizeof(u32) * idx))
20 #define CCBR_CIT_MASK (0x7ff << 6)
21 #define CCBR_DUC (1 << 3)
22 #define CCBR_CMDS (1 << 1)
23 #define CCBR_PPCE (1 << 0)
25 #ifdef CONFIG_CPU_SHX3
40 #define PPC_PMCAT 0xfc100240
42 #define PPC_PMCAT 0xfc100080
45 #define PMCAT_OVF3 (1 << 27)
46 #define PMCAT_CNN3 (1 << 26)
47 #define PMCAT_CLR3 (1 << 25)
48 #define PMCAT_OVF2 (1 << 19)
49 #define PMCAT_CLR2 (1 << 17)
50 #define PMCAT_OVF1 (1 << 11)
51 #define PMCAT_CNN1 (1 << 10)
52 #define PMCAT_CLR1 (1 << 9)
53 #define PMCAT_OVF0 (1 << 3)
54 #define PMCAT_CLR0 (1 << 1)
56 static struct sh_pmu sh4a_pmu;
100 #define PMCAT_EMU_CLR_MASK ((1 << 24) | (1 << 16) | (1 << 8) | (1 << 0))
102 static const int sh4a_general_events[] = {
112 #define C(x) PERF_COUNT_HW_CACHE_##x
114 static const int sh4a_cache_events
121 [
C(RESULT_ACCESS) ] = 0x0031,
122 [
C(RESULT_MISS) ] = 0x0032,
125 [
C(RESULT_ACCESS) ] = 0x0039,
126 [
C(RESULT_MISS) ] = 0x003a,
128 [
C(OP_PREFETCH) ] = {
129 [
C(RESULT_ACCESS) ] = 0,
130 [
C(RESULT_MISS) ] = 0,
136 [
C(RESULT_ACCESS) ] = 0x0029,
137 [
C(RESULT_MISS) ] = 0x002a,
140 [
C(RESULT_ACCESS) ] = -1,
141 [
C(RESULT_MISS) ] = -1,
143 [
C(OP_PREFETCH) ] = {
144 [
C(RESULT_ACCESS) ] = 0,
145 [
C(RESULT_MISS) ] = 0,
151 [
C(RESULT_ACCESS) ] = 0x0030,
152 [
C(RESULT_MISS) ] = 0,
155 [
C(RESULT_ACCESS) ] = 0x0038,
156 [
C(RESULT_MISS) ] = 0,
158 [
C(OP_PREFETCH) ] = {
159 [
C(RESULT_ACCESS) ] = 0,
160 [
C(RESULT_MISS) ] = 0,
166 [
C(RESULT_ACCESS) ] = 0x0222,
167 [
C(RESULT_MISS) ] = 0x0220,
170 [
C(RESULT_ACCESS) ] = 0,
171 [
C(RESULT_MISS) ] = 0,
173 [
C(OP_PREFETCH) ] = {
174 [
C(RESULT_ACCESS) ] = 0,
175 [
C(RESULT_MISS) ] = 0,
181 [
C(RESULT_ACCESS) ] = 0,
182 [
C(RESULT_MISS) ] = 0x02a0,
185 [
C(RESULT_ACCESS) ] = -1,
186 [
C(RESULT_MISS) ] = -1,
188 [
C(OP_PREFETCH) ] = {
189 [
C(RESULT_ACCESS) ] = -1,
190 [
C(RESULT_MISS) ] = -1,
196 [
C(RESULT_ACCESS) ] = -1,
197 [
C(RESULT_MISS) ] = -1,
200 [
C(RESULT_ACCESS) ] = -1,
201 [
C(RESULT_MISS) ] = -1,
203 [
C(OP_PREFETCH) ] = {
204 [
C(RESULT_ACCESS) ] = -1,
205 [
C(RESULT_MISS) ] = -1,
211 [
C(RESULT_ACCESS) ] = -1,
212 [
C(RESULT_MISS) ] = -1,
215 [
C(RESULT_ACCESS) ] = -1,
216 [
C(RESULT_MISS) ] = -1,
218 [
C(OP_PREFETCH) ] = {
219 [
C(RESULT_ACCESS) ] = -1,
220 [
C(RESULT_MISS) ] = -1,
225 static int sh4a_event_map(
int event)
227 return sh4a_general_events[
event];
230 static u64 sh4a_pmu_read(
int idx)
235 static void sh4a_pmu_disable(
struct hw_perf_event *hwc,
int idx)
244 static void sh4a_pmu_enable(
struct hw_perf_event *hwc,
int idx)
260 static void sh4a_pmu_disable_all(
void)
264 for (i = 0; i < sh4a_pmu.num_events; i++)
268 static void sh4a_pmu_enable_all(
void)
272 for (i = 0; i < sh4a_pmu.num_events; i++)
276 static struct sh_pmu sh4a_pmu = {
279 .event_map = sh4a_event_map,
280 .max_events =
ARRAY_SIZE(sh4a_general_events),
281 .raw_event_mask = 0x3ff,
282 .cache_events = &sh4a_cache_events,
283 .read = sh4a_pmu_read,
284 .disable = sh4a_pmu_disable,
285 .enable = sh4a_pmu_enable,
286 .disable_all = sh4a_pmu_disable_all,
287 .enable_all = sh4a_pmu_enable_all,
290 static int __init sh4a_pmu_init(
void)
296 pr_notice(
"HW perf events unsupported, software events only.\n");