28 #include <linux/kernel.h>
29 #include <linux/module.h>
32 #include <linux/sched.h>
34 #include <linux/compiler.h>
36 #include <linux/slab.h>
46 #include <asm/processor.h>
47 #include <asm/cpufeature.h>
54 #define PFX "acpi-cpufreq: "
63 #define INTEL_MSR_RANGE (0xffff)
64 #define AMD_MSR_RANGE (0x7)
66 #define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
82 static unsigned int acpi_pstate_strict;
83 static bool boost_enabled, boost_supported;
86 static bool boost_state(
unsigned int cpu)
94 msr = lo | ((
u64)hi << 32);
98 msr = lo | ((
u64)hi << 32);
139 unsigned long val = 0;
141 if (!boost_supported)
144 ret = kstrtoul(buf, 10, &val);
145 if (ret || (val > 1))
148 if ((val && boost_enabled) || (!val && !boost_enabled))
153 boost_set_msrs(val, cpu_online_mask);
158 pr_debug(
"Core Boosting %sabled.\n", val ?
"en" :
"dis");
164 const char *buf,
size_t count)
166 return _store_boost(buf, count);
172 return sprintf(buf,
"%u\n", boost_enabled);
179 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
183 return _store_boost(buf, count);
188 return sprintf(buf,
"%u\n", boost_enabled);
194 static int check_est_cpu(
unsigned int cpuid)
201 static int check_amd_hwpstate_cpu(
unsigned int cpuid)
216 if (value == perf->
states[i].status)
246 return extract_msr(val, data);
248 return extract_io(val, data);
274 static void do_drv_read(
void *_cmd)
295 static void do_drv_write(
void *_cmd)
319 static void drv_read(
struct drv_cmd *cmd)
324 err = smp_call_function_any(cmd->
mask, do_drv_read, cmd, 1);
328 static void drv_write(
struct drv_cmd *cmd)
339 static u32 get_cur_val(
const struct cpumask *
mask)
347 switch (
per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
358 perf =
per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
374 static unsigned int get_cur_freq_on_cpu(
unsigned int cpu)
378 unsigned int cached_freq;
380 pr_debug(
"get_cur_freq_on_cpu (%d)\n", cpu);
388 freq = extract_freq(get_cur_val(
cpumask_of(cpu)), data);
389 if (freq != cached_freq) {
402 static unsigned int check_freqs(
const struct cpumask *mask,
unsigned int freq,
405 unsigned int cur_freq;
408 for (i = 0; i < 100; i++) {
409 cur_freq = extract_freq(get_cur_val(mask), data);
410 if (cur_freq == freq)
418 unsigned int target_freq,
unsigned int relation)
425 unsigned int next_perf_state = 0;
429 pr_debug(
"acpi_cpufreq_target %d (%d)\n", target_freq, policy->
cpu);
440 relation, &next_state);
447 if (perf->
state == next_perf_state) {
449 pr_debug(
"Called after resume, resetting to P%d\n",
453 pr_debug(
"Already at target state (P%d)\n",
487 freqs.old = perf->
states[perf->
state].core_frequency * 1000;
496 if (acpi_pstate_strict) {
497 if (!check_freqs(cmd.
mask, freqs.new, data)) {
498 pr_debug(
"acpi_cpufreq_target failed (%d)\n",
509 perf->
state = next_perf_state;
533 unsigned long freqn = perf->
states[0].core_frequency * 1000;
537 freqn = perf->
states[i+1].core_frequency * 1000;
548 return perf->
states[0].core_frequency * 1000;
552 static void free_acpi_perf_data(
void)
566 unsigned cpu = (
long)hcpu;
569 cpumask = get_cpu_mask(cpu);
581 boost_set_msrs(boost_enabled, cpumask);
586 boost_set_msrs(1, cpumask);
598 .notifier_call = boost_notify,
609 static int __init acpi_cpufreq_early_init(
void)
612 pr_debug(
"acpi_cpufreq_early_init\n");
615 if (!acpi_perf_data) {
616 pr_debug(
"Memory allocation error for acpi_perf_data.\n");
620 if (!zalloc_cpumask_var_node(
625 free_acpi_perf_data();
642 static int bios_with_sw_any_bug;
646 bios_with_sw_any_bug = 1;
653 .ident =
"Supermicro Server X6DLP",
663 static int acpi_cpufreq_blacklist(
struct cpuinfo_x86 *
c)
671 if ((c->
x86 == 15) &&
675 "Xeon(R) 7100 Errata AL30, processors may "
676 "lock up on frequency changes: disabling "
688 unsigned int valid_states = 0;
689 unsigned int cpu = policy->
cpu;
691 unsigned int result = 0;
695 static int blacklisted;
698 pr_debug(
"acpi_cpufreq_cpu_init\n");
703 blacklisted = acpi_cpufreq_blacklist(c);
737 if (bios_with_sw_any_bug && cpumask_weight(policy->
cpus) == 1) {
739 cpumask_copy(policy->
cpus, cpu_core_mask(cpu));
742 if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
743 cpumask_clear(policy->
cpus);
744 cpumask_set_cpu(cpu, policy->
cpus);
745 cpumask_copy(policy->
related_cpus, cpu_sibling_mask(cpu));
770 if (check_est_cpu(cpu)) {
774 if (check_amd_hwpstate_cpu(cpu)) {
795 policy->
cpuinfo.transition_latency = 0;
797 if ((perf->
states[i].transition_latency * 1000) >
798 policy->
cpuinfo.transition_latency)
799 policy->
cpuinfo.transition_latency =
800 perf->
states[
i].transition_latency * 1000;
805 policy->
cpuinfo.transition_latency > 20 * 1000) {
806 policy->
cpuinfo.transition_latency = 20 * 1000;
808 "P-state transition latency capped at 20 uS\n");
813 if (i > 0 && perf->
states[i].core_frequency >=
814 data->
freq_table[valid_states-1].frequency / 1000)
819 perf->
states[
i].core_frequency * 1000;
829 if (perf->
states[0].core_frequency * 1000 != policy->
cpuinfo.max_freq)
835 policy->
cur = acpi_cpufreq_guess_freq(data, policy->
cpu);
838 acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
839 policy->
cur = get_cur_freq_on_cpu(cpu);
852 pr_debug(
"CPU%u - ACPI performance management activated.\n", cpu);
854 pr_debug(
" %cP%d: %d MHz, %d mW, %d uS\n",
855 (i == perf->
state ?
'*' :
' '), i,
858 (
u32) perf->
states[i].transition_latency);
885 pr_debug(
"acpi_cpufreq_cpu_exit\n");
910 static struct freq_attr *acpi_cpufreq_attr[] = {
917 .verify = acpi_cpufreq_verify,
918 .target = acpi_cpufreq_target,
920 .init = acpi_cpufreq_cpu_init,
921 .exit = acpi_cpufreq_cpu_exit,
922 .resume = acpi_cpufreq_resume,
923 .name =
"acpi-cpufreq",
925 .attr = acpi_cpufreq_attr,
928 static void __init acpi_cpufreq_boost_init(
void)
936 boost_supported =
true;
937 boost_enabled = boost_state(0);
942 boost_set_msrs(boost_enabled, cpu_online_mask);
944 register_cpu_notifier(&boost_nb);
948 global_boost.
attr.mode = 0444;
954 pr_warn(
PFX "could not register global boost sysfs file\n");
956 pr_debug(
"registered global boost sysfs file\n");
959 static void __exit acpi_cpufreq_boost_exit(
void)
964 unregister_cpu_notifier(&boost_nb);
971 static int __init acpi_cpufreq_init(
void)
980 ret = acpi_cpufreq_early_init();
984 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
991 if (check_amd_hwpstate_cpu(0)) {
994 pr_debug(
"adding sysfs entry for cpb\n");
996 for (iter = acpi_cpufreq_attr; *iter !=
NULL; iter++)
1000 if (iter[1] ==
NULL)
1007 free_acpi_perf_data();
1009 acpi_cpufreq_boost_init();
1014 static void __exit acpi_cpufreq_exit(
void)
1018 acpi_cpufreq_boost_exit();
1022 free_acpi_perf_data();
1027 "value 0 or non-zero. non-zero -> strict ACPI checks are "
1028 "performed during frequency changes.");