Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[linux-6.6.y] update ITMT support patch for Zhaoxin CPUs #530

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions arch/x86/kernel/itmt.c
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,6 @@ int sched_set_itmt_support(void)

return 0;
}
EXPORT_SYMBOL_GPL(sched_set_itmt_support);

/**
* sched_clear_itmt_support() - Revoke platform's support of ITMT
Expand Down Expand Up @@ -182,4 +181,3 @@ void sched_set_itmt_core_prio(int prio, int cpu)
{
per_cpu(sched_core_priority, cpu) = prio;
}
EXPORT_SYMBOL_GPL(sched_set_itmt_core_prio);
100 changes: 54 additions & 46 deletions drivers/cpufreq/acpi-cpufreq.c
Original file line number Diff line number Diff line change
Expand Up @@ -627,36 +627,46 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
}
#endif

#ifdef CONFIG_ACPI_CPPC_LIB
static bool cppc_highest_perf_diff;
static struct cpumask core_prior_mask;

static void cppc_get_highest_nominal_perf(int cpu, u64 *highest_perf, u64 *nominal_perf)
/* The work item is needed to avoid CPU hotplug locking issues */
static void sched_itmt_work_fn(struct work_struct *work)
{
struct cppc_perf_caps perf_caps;
int ret;
sched_set_itmt_support();
}

ret = cppc_get_perf_caps(cpu, &perf_caps);
if (ret) {
pr_debug("CPU%d: Unable to get performance capabilities (%d)\n", cpu, ret);
return;
}
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
*highest_perf = amd_get_highest_perf();
else
*highest_perf = perf_caps.highest_perf;
static DECLARE_WORK(sched_itmt_work, sched_itmt_work_fn);

*nominal_perf = perf_caps.nominal_perf;
static void sched_set_itmt(void)
{
schedule_work(&sched_itmt_work);
}

#ifdef CONFIG_ACPI_CPPC_LIB
static u64 get_max_boost_ratio(unsigned int cpu)
{
struct cppc_perf_caps perf_caps;
u64 highest_perf, nominal_perf;
int ret;

if (acpi_pstate_strict)
return 0;

cppc_get_highest_nominal_perf(cpu, &highest_perf, &nominal_perf);
if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN ||
boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR)
return 0;

ret = cppc_get_perf_caps(cpu, &perf_caps);
if (ret) {
pr_debug("CPU%d: Unable to get performance capabilities (%d)\n",
cpu, ret);
return 0;
}

if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
highest_perf = amd_get_highest_perf();
else
highest_perf = perf_caps.highest_perf;

nominal_perf = perf_caps.nominal_perf;

if (!highest_perf || !nominal_perf) {
pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
Expand All @@ -671,23 +681,22 @@ static u64 get_max_boost_ratio(unsigned int cpu)
return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
}

/* The work item is needed to avoid CPU hotplug locking issues */
static void cpufreq_sched_itmt_work_fn(struct work_struct *work)
{
sched_set_itmt_support();
}

static DECLARE_WORK(sched_itmt_work, cpufreq_sched_itmt_work_fn);
static bool cppc_highest_perf_diff;
static struct cpumask core_prio_cpumask;

static void cpufreq_set_itmt_prio(int cpu)
static void core_set_itmt_prio(int cpu)
{
u64 highest_perf, nominal_perf;
u64 highest_perf = 0;
int ret = 0;
static u64 max_highest_perf = 0, min_highest_perf = U64_MAX;

cppc_get_highest_nominal_perf(cpu, &highest_perf, &nominal_perf);

ret = cppc_get_highest_perf(cpu, &highest_perf);
if (ret) {
pr_debug("CPU%d: Unable to get performance capabilities (%d)\n", cpu, ret);
return;
}
sched_set_itmt_core_prio(highest_perf, cpu);
cpumask_set_cpu(cpu, &core_prior_mask);
cpumask_set_cpu(cpu, &core_prio_cpumask);

if (max_highest_perf <= min_highest_perf) {
if (highest_perf > max_highest_perf)
Expand All @@ -696,25 +705,24 @@ static void cpufreq_set_itmt_prio(int cpu)
if (highest_perf < min_highest_perf)
min_highest_perf = highest_perf;

if (max_highest_perf > min_highest_perf) {
/*
* This code can be run during CPU online under the
* CPU hotplug locks, so sched_set_itmt_support()
* cannot be called from here. Queue up a work item
* to invoke it.
*/
if (max_highest_perf > min_highest_perf)
cppc_highest_perf_diff = true;
}
}

if (cppc_highest_perf_diff && cpumask_equal(&core_prior_mask, cpu_online_mask)) {
pr_debug("queue a work to set itmt enabled\n");
schedule_work(&sched_itmt_work);
if (cppc_highest_perf_diff && cpumask_equal(&core_prio_cpumask, cpu_online_mask)) {
/*
* This code can be run during CPU online under the CPU hotplug locks,
* so sched_set_itmt cannot be called from here.
* queue a work item to invoke it
*/
pr_debug("queue a work to set itmt support and enable\n");
sched_set_itmt();
}
}

#else
static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
static void cpufreq_set_itmt_prio(int cpu) { }
static void core_set_itmt_prio(int cpu) {}
#endif

static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
Expand All @@ -727,7 +735,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
unsigned int valid_states = 0;
unsigned int result = 0;
u64 max_boost_ratio;
unsigned int i, j;
unsigned int i, j = 0;
#ifdef CONFIG_SMP
static int blacklisted;
#endif
Expand Down Expand Up @@ -791,10 +799,10 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
pr_info_once("overriding BIOS provided _PSD data\n");
}
#endif

if (c->x86_vendor == X86_VENDOR_CENTAUR || c->x86_vendor == X86_VENDOR_ZHAOXIN) {
for_each_cpu(j, policy->cpus) {
cpufreq_set_itmt_prio(j);
}
for_each_cpu(j, policy->cpus)
core_set_itmt_prio(j);
}

/* capability check */
Expand Down
Loading