19 #include <linux/slab.h>
35 static int nmi_enabled;
36 static int ctr_running;
51 val |= (counter_config->
unit_mask & 0xFF) << 8;
55 val |= counter_config->
extra;
64 static int profile_exceptions_notify(
unsigned int val,
struct pt_regs *
regs)
68 else if (!nmi_enabled)
75 static void nmi_cpu_save_registers(
struct op_msrs *msrs)
83 rdmsrl(counters[i].addr, counters[i].
saved);
88 rdmsrl(controls[i].addr, controls[i].
saved);
92 static void nmi_cpu_start(
void *
dummy)
101 static int nmi_start(
void)
112 static void nmi_cpu_stop(
void *dummy)
121 static void nmi_stop(
void)
129 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
133 static inline int has_mux(
void)
135 return !!model->switch_ctrl;
148 static void nmi_shutdown_mux(
void)
162 static int nmi_setup_mux(
void)
164 size_t multiplex_size =
172 per_cpu(cpu_msrs, i).multiplex =
174 if (!
per_cpu(cpu_msrs, i).multiplex)
181 static void nmi_cpu_setup_mux(
int cpu,
struct op_msrs const *
const msrs)
197 per_cpu(switch_index, cpu) = 0;
200 static void nmi_cpu_save_mpx_registers(
struct op_msrs *msrs)
208 if (counters[i].
addr)
209 rdmsrl(counters[i].addr, multiplex[virt].
saved);
213 static void nmi_cpu_restore_mpx_registers(
struct op_msrs *msrs)
221 if (counters[i].
addr)
222 wrmsrl(counters[i].addr, multiplex[virt].
saved);
226 static void nmi_cpu_switch(
void *dummy)
229 int si =
per_cpu(switch_index, cpu);
233 nmi_cpu_save_mpx_registers(msrs);
238 per_cpu(switch_index, cpu) = 0;
240 per_cpu(switch_index, cpu) = si;
242 model->switch_ctrl(model, msrs);
243 nmi_cpu_restore_mpx_registers(msrs);
254 static int nmi_multiplex_on(
void)
259 static int nmi_switch_event(
void)
263 if (nmi_multiplex_on() < 0)
280 static void mux_clone(
int cpu)
286 per_cpu(cpu_msrs, 0).multiplex,
294 static inline void nmi_shutdown_mux(
void) { }
295 static inline int nmi_setup_mux(
void) {
return 1; }
297 nmi_cpu_setup_mux(
int cpu,
struct op_msrs const *
const msrs) { }
299 static void mux_clone(
int cpu) { }
303 static void free_msrs(
void)
315 static int allocate_msrs(
void)
322 per_cpu(cpu_msrs, i).counters = kzalloc(counters_size,
324 if (!
per_cpu(cpu_msrs, i).counters)
326 per_cpu(cpu_msrs, i).controls = kzalloc(controls_size,
328 if (!
per_cpu(cpu_msrs, i).controls)
332 if (!nmi_setup_mux())
342 static void nmi_cpu_setup(
void *dummy)
346 nmi_cpu_save_registers(msrs);
349 nmi_cpu_setup_mux(cpu, msrs);
355 static void nmi_cpu_restore_registers(
struct op_msrs *msrs)
362 if (controls[i].
addr)
363 wrmsrl(controls[i].addr, controls[i].
saved);
367 if (counters[i].
addr)
368 wrmsrl(counters[i].addr, counters[i].
saved);
372 static void nmi_cpu_shutdown(
void *dummy)
387 nmi_cpu_restore_registers(msrs);
390 static void nmi_cpu_up(
void *dummy)
393 nmi_cpu_setup(dummy);
395 nmi_cpu_start(dummy);
398 static void nmi_cpu_down(
void *dummy)
403 nmi_cpu_shutdown(dummy);
422 snprintf(buf,
sizeof(buf),
"%d", i);
439 int cpu = (
unsigned long)data;
453 .notifier_call = oprofile_cpu_notifier
456 static int nmi_setup(
void)
461 if (!allocate_msrs())
498 register_cpu_notifier(&oprofile_cpu_nb);
511 static void nmi_shutdown(
void)
516 unregister_cpu_notifier(&oprofile_cpu_nb);
532 static int nmi_suspend(
void)
535 if (nmi_enabled == 1)
540 static void nmi_resume(
void)
542 if (nmi_enabled == 1)
548 .suspend = nmi_suspend,
551 static void __init init_suspend_resume(
void)
556 static void exit_suspend_resume(
void)
563 static inline void init_suspend_resume(
void) { }
564 static inline void exit_suspend_resume(
void) { }
572 if (cpu_model > 6 || cpu_model == 5)
576 *cpu_type =
"i386/p4";
582 *cpu_type =
"i386/p4";
587 *cpu_type =
"i386/p4-ht";
593 printk(
KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n");
604 static int force_cpu_type;
608 if (!
strcmp(str,
"timer")) {
609 force_cpu_type =
timer;
611 }
else if (!
strcmp(str,
"arch_perfmon")) {
622 static int __init ppro_init(
char **cpu_type)
627 if (force_cpu_type ==
arch_perfmon && cpu_has_arch_perfmon)
644 *cpu_type =
"i386/ppro";
647 *cpu_type =
"i386/pii";
651 *cpu_type =
"i386/piii";
655 *cpu_type =
"i386/p6_mobile";
658 *cpu_type =
"i386/core";
664 *cpu_type =
"i386/core_2";
670 *cpu_type =
"i386/core_i7";
673 *cpu_type =
"i386/atom";
688 char *cpu_type =
NULL;
694 if (force_cpu_type ==
timer)
703 cpu_type =
"i386/athlon";
710 cpu_type =
"x86-64/hammer";
713 cpu_type =
"x86-64/family10";
716 cpu_type =
"x86-64/family11h";
719 cpu_type =
"x86-64/family12h";
722 cpu_type =
"x86-64/family14h";
725 cpu_type =
"x86-64/family15h";
742 ppro_init(&cpu_type);
752 if (!cpu_has_arch_perfmon)
756 cpu_type =
"i386/arch_perfmon";
766 ops->
setup = nmi_setup;
768 ops->
start = nmi_start;
769 ops->
stop = nmi_stop;
773 ret = model->
init(ops);
782 init_suspend_resume();
790 exit_suspend_resume();