diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 3235ba1e5b06..4dce10c0ac96 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -184,6 +184,9 @@ extern unsigned int __read_mostly sysctl_sched_itmt_enabled; /* Interface to set priority of a cpu */ void sched_set_itmt_core_prio(int prio, int core_cpu); +/* Interface to enable ITMT settings in the scheduler */ +void sched_set_itmt(void); + /* Interface to notify scheduler that system supports ITMT */ int sched_set_itmt_support(void); diff --git a/arch/x86/kernel/itmt.c b/arch/x86/kernel/itmt.c index b49ac8ecbbd6..be88e98a9966 100644 --- a/arch/x86/kernel/itmt.c +++ b/arch/x86/kernel/itmt.c @@ -122,7 +122,6 @@ int sched_set_itmt_support(void) return 0; } -EXPORT_SYMBOL_GPL(sched_set_itmt_support); /** * sched_clear_itmt_support() - Revoke platform's support of ITMT @@ -183,3 +182,17 @@ void sched_set_itmt_core_prio(int prio, int cpu) per_cpu(sched_core_priority, cpu) = prio; } EXPORT_SYMBOL_GPL(sched_set_itmt_core_prio); + +/* The work item is needed to avoid CPU hotplug locking issues */ +static void sched_itmt_work_fn(struct work_struct *work) +{ + sched_set_itmt_support(); +} + +static DECLARE_WORK(sched_itmt_work, sched_itmt_work_fn); + +void sched_set_itmt(void) +{ + schedule_work(&sched_itmt_work); +} +EXPORT_SYMBOL_GPL(sched_set_itmt); diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index d87f9dc537f0..cffbf66670ad 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -628,35 +628,32 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) #endif #ifdef CONFIG_ACPI_CPPC_LIB -static bool cppc_highest_perf_diff; -static struct cpumask core_prior_mask; - -static void cppc_get_highest_nominal_perf(int cpu, u64 *highest_perf, u64 *nominal_perf) +static u64 get_max_boost_ratio(unsigned int cpu) { struct cppc_perf_caps perf_caps; + u64 highest_perf, nominal_perf; int ret; + if (acpi_pstate_strict) + return 0; + + if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) + return 0; + ret = cppc_get_perf_caps(cpu, &perf_caps); if (ret) { - pr_debug("CPU%d: Unable to get performance capabilities (%d)\n", cpu, ret); - return; + pr_debug("CPU%d: Unable to get performance capabilities (%d)\n", + cpu, ret); + return 0; } + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) - *highest_perf = amd_get_highest_perf(); + highest_perf = amd_get_highest_perf(); else - *highest_perf = perf_caps.highest_perf; + highest_perf = perf_caps.highest_perf; - *nominal_perf = perf_caps.nominal_perf; -} - -static u64 get_max_boost_ratio(unsigned int cpu) -{ - u64 highest_perf, nominal_perf; - - if (acpi_pstate_strict) - return 0; - - cppc_get_highest_nominal_perf(cpu, &highest_perf, &nominal_perf); + nominal_perf = perf_caps.nominal_perf; if (!highest_perf || !nominal_perf) { pr_debug("CPU%d: highest or nominal performance missing\n", cpu); @@ -671,23 +668,16 @@ static u64 get_max_boost_ratio(unsigned int cpu) return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf); } -/* The work item is needed to avoid CPU hotplug locking issues */ -static void cpufreq_sched_itmt_work_fn(struct work_struct *work) -{ - sched_set_itmt_support(); -} - -static DECLARE_WORK(sched_itmt_work, cpufreq_sched_itmt_work_fn); - -static void cpufreq_set_itmt_prio(int cpu) +static bool cppc_highest_perf_diff; +static struct cpumask core_prio_cpumask; +static void core_set_itmt_prio(int cpu) { - u64 highest_perf, nominal_perf; + u64 highest_perf; static u64 max_highest_perf = 0, min_highest_perf = U64_MAX; - cppc_get_highest_nominal_perf(cpu, &highest_perf, &nominal_perf); - + cppc_get_highest_perf(cpu, &highest_perf); sched_set_itmt_core_prio(highest_perf, cpu); - cpumask_set_cpu(cpu, &core_prior_mask); + cpumask_set_cpu(cpu, &core_prio_cpumask); if (max_highest_perf <= min_highest_perf) { if (highest_perf > max_highest_perf) @@ -696,25 +686,22 @@ static void cpufreq_set_itmt_prio(int cpu) if (highest_perf < min_highest_perf) min_highest_perf = highest_perf; - if (max_highest_perf > min_highest_perf) { - /* - * This code can be run during CPU online under the - * CPU hotplug locks, so sched_set_itmt_support() - * cannot be called from here. Queue up a work item - * to invoke it. - */ + if (max_highest_perf > min_highest_perf) cppc_highest_perf_diff = true; - } } - - if (cppc_highest_perf_diff && cpumask_equal(&core_prior_mask, cpu_online_mask)) { - pr_debug("queue a work to set itmt enabled\n"); - schedule_work(&sched_itmt_work); + if (cppc_highest_perf_diff && cpumask_equal(&core_prio_cpumask, cpu_online_mask)) { + /* + * This code can be run during CPU online under the CPU hotplug locks, + * so sched_set_itmt cannot be called from here. + * queue a work item to invoke it + */ + pr_debug("queue a work to set itmt support and enable\n"); + sched_set_itmt(); } } #else static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; } -static void cpufreq_set_itmt_prio(int cpu) { } +static void core_set_itmt_prio(int cpu) {} #endif static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) @@ -791,11 +778,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) pr_info_once("overriding BIOS provided _PSD data\n"); } #endif - if (c->x86_vendor == X86_VENDOR_CENTAUR || c->x86_vendor == X86_VENDOR_ZHAOXIN) { - for_each_cpu(j, policy->cpus) { - cpufreq_set_itmt_prio(j); - } - } + + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) + for_each_cpu(j, policy->cpus) + core_set_itmt_prio(j); /* capability check */ if (perf->state_count <= 1) {