16 #include <asm/ptrace.h>
17 #include <asm/processor.h>
18 #include <asm/cputable.h>
24 #define OPROFILE_PM_PMCSEL_MSK 0xffULL
25 #define OPROFILE_PM_UNIT_SHIFT 60
26 #define OPROFILE_PM_UNIT_MSK 0xfULL
27 #define OPROFILE_MAX_PMC_NUM 3
28 #define OPROFILE_PMSEL_FIELD_WIDTH 8
29 #define OPROFILE_UNIT_FIELD_WIDTH 4
30 #define MMCRA_SIAR_VALID_MASK 0x10000000ULL
34 static int oprofile_running;
35 static int use_slot_nums;
41 static u32 cntr_marked_events;
43 static int power7_marked_instr_event(
u64 mmcr1)
46 int pmc, cntr_marked_events = 0;
52 for (pmc = 0; pmc < 4; pmc++) {
66 cntr_marked_events |= (pmc == 1 || pmc == 3) << pmc;
70 cntr_marked_events |= (pmc == 0) << pmc;
75 cntr_marked_events |= (pmc != 1) << pmc;
79 cntr_marked_events |= 1 << pmc;
83 cntr_marked_events |= (unit == 0xd) << pmc;
87 cntr_marked_events |= (pmc >= 2) << pmc;
90 cntr_marked_events |= (unit == 0xd) << pmc;
94 return cntr_marked_events;
108 mmcr0_val = sys->mmcr0;
109 mmcr1_val = sys->mmcr1;
110 mmcra_val = sys->mmcra;
121 if (pvr_version_is(PVR_POWER7p))
122 cntr_marked_events = power7_marked_instr_event(mmcr1_val);
124 cntr_marked_events = 0;
130 reset_value[i] = 0x80000000UL - ctr[i].
count;
134 mmcr0_val &= ~MMCR0_KERNEL_DISABLE;
136 mmcr0_val |= MMCR0_KERNEL_DISABLE;
139 mmcr0_val &= ~MMCR0_PROBLEM_DISABLE;
141 mmcr0_val |= MMCR0_PROBLEM_DISABLE;
143 if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p) ||
144 pvr_version_is(PVR_970) || pvr_version_is(PVR_970FX) ||
145 pvr_version_is(PVR_970MP) || pvr_version_is(PVR_970GX) ||
146 pvr_version_is(PVR_POWER5) || pvr_version_is(PVR_POWER5p))
164 static inline int mmcra_must_set_sample(
void)
166 if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p) ||
167 pvr_version_is(PVR_970) || pvr_version_is(PVR_970FX) ||
168 pvr_version_is(PVR_970MP) || pvr_version_is(PVR_970GX))
176 unsigned int mmcr0 = mmcr0_val;
177 unsigned long mmcra = mmcra_val;
183 mtspr(SPRN_MMCR0, mmcr0);
185 mmcr0 |= MMCR0_FCM1|MMCR0_PMXE|MMCR0_FCECE;
186 mmcr0 |= MMCR0_PMC1CE|MMCR0_PMCjCE;
187 mtspr(SPRN_MMCR0, mmcr0);
189 mtspr(SPRN_MMCR1, mmcr1_val);
191 if (mmcra_must_set_sample())
192 mmcra |= MMCRA_SAMPLE_ENABLE;
193 mtspr(SPRN_MMCRA, mmcra);
211 mtmsrd(mfmsr() | MSR_PMM);
215 classic_ctr_write(i, reset_value[i]);
217 classic_ctr_write(i, 0);
221 mmcr0 =
mfspr(SPRN_MMCR0);
235 mtspr(SPRN_MMCR0, mmcr0);
237 oprofile_running = 1;
243 static void power4_stop(
void)
248 mmcr0 =
mfspr(SPRN_MMCR0);
250 mtspr(SPRN_MMCR0, mmcr0);
252 oprofile_running = 0;
260 static void __used hypervisor_bucket(
void)
264 static void __used rtas_bucket(
void)
268 static void __used kernel_unknown_bucket(
void)
284 unsigned long pc =
mfspr(SPRN_SIAR);
292 mmcra =
mfspr(SPRN_MMCRA);
294 if (use_slot_nums && (mmcra & MMCRA_SAMPLE_ENABLE)) {
295 slot = ((mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT);
297 pc += 4 * (slot - 1);
301 if (firmware_has_feature(FW_FEATURE_LPAR) &&
304 return *((
unsigned long *)hypervisor_bucket);
310 #ifdef CONFIG_PPC_RTAS
314 return *((
unsigned long *)rtas_bucket);
318 if (pc < 0x1000000UL)
319 return (
unsigned long)
__va(pc);
324 return *((
unsigned long *)kernel_unknown_bucket);
329 static int get_kernel(
unsigned long pc,
unsigned long mmcra)
336 is_kernel = ((mmcra &
cur_cpu_spec->oprofile_mmcra_sipr) == 0);
342 static bool pmc_overflow(
unsigned long val)
358 if (pvr_version_is(PVR_POWER7) && ((0x80000000 - val) <= 256))
364 static void power4_handle_interrupt(
struct pt_regs *regs,
373 bool siar_valid =
false;
375 mmcra =
mfspr(SPRN_MMCRA);
378 is_kernel = get_kernel(pc, mmcra);
381 mtmsrd(mfmsr() | MSR_PMM);
388 val = classic_ctr_read(i);
389 if (pmc_overflow(val)) {
390 if (oprofile_running && ctr[i].
enabled) {
401 (cntr_marked_events & (1 << i)))
402 || !(cntr_marked_events & (1 << i)))
406 classic_ctr_write(i, reset_value[i]);
408 classic_ctr_write(i, 0);
413 mmcr0 =
mfspr(SPRN_MMCR0);
426 mtspr(SPRN_MMCRA, mmcra);
434 mtspr(SPRN_MMCR0, mmcr0);
438 .reg_setup = power4_reg_setup,
439 .cpu_setup = power4_cpu_setup,
440 .start = power4_start,
442 .handle_interrupt = power4_handle_interrupt,