Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[linux-6.6.y] update ITMT support patch for Zhaoxin CPUs #530

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions arch/x86/include/asm/topology.h
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,9 @@ extern unsigned int __read_mostly sysctl_sched_itmt_enabled;
/* Interface to set priority of a cpu */
void sched_set_itmt_core_prio(int prio, int core_cpu);

/* Interface to enable ITMT settings in the scheduler */
void sched_set_itmt(void);

/* Interface to notify scheduler that system supports ITMT */
int sched_set_itmt_support(void);

Expand Down
15 changes: 14 additions & 1 deletion arch/x86/kernel/itmt.c
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,6 @@ int sched_set_itmt_support(void)

return 0;
}
EXPORT_SYMBOL_GPL(sched_set_itmt_support);

/**
* sched_clear_itmt_support() - Revoke platform's support of ITMT
Expand Down Expand Up @@ -183,3 +182,17 @@ void sched_set_itmt_core_prio(int prio, int cpu)
per_cpu(sched_core_priority, cpu) = prio;
}
EXPORT_SYMBOL_GPL(sched_set_itmt_core_prio);

/* The work item is needed to avoid CPU hotplug locking issues */
static void sched_itmt_work_fn(struct work_struct *work)
{
sched_set_itmt_support();
}

static DECLARE_WORK(sched_itmt_work, sched_itmt_work_fn);

void sched_set_itmt(void)
{
schedule_work(&sched_itmt_work);
}
EXPORT_SYMBOL_GPL(sched_set_itmt);
87 changes: 37 additions & 50 deletions drivers/cpufreq/acpi-cpufreq.c
Original file line number Diff line number Diff line change
Expand Up @@ -628,35 +628,32 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
#endif

#ifdef CONFIG_ACPI_CPPC_LIB
static bool cppc_highest_perf_diff;
static struct cpumask core_prior_mask;

static void cppc_get_highest_nominal_perf(int cpu, u64 *highest_perf, u64 *nominal_perf)
static u64 get_max_boost_ratio(unsigned int cpu)
{
struct cppc_perf_caps perf_caps;
u64 highest_perf, nominal_perf;
int ret;

if (acpi_pstate_strict)
return 0;

if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN ||
boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR)
return 0;

ret = cppc_get_perf_caps(cpu, &perf_caps);
if (ret) {
pr_debug("CPU%d: Unable to get performance capabilities (%d)\n", cpu, ret);
return;
pr_debug("CPU%d: Unable to get performance capabilities (%d)\n",
cpu, ret);
return 0;
}

if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
*highest_perf = amd_get_highest_perf();
highest_perf = amd_get_highest_perf();
else
*highest_perf = perf_caps.highest_perf;
highest_perf = perf_caps.highest_perf;

*nominal_perf = perf_caps.nominal_perf;
}

static u64 get_max_boost_ratio(unsigned int cpu)
{
u64 highest_perf, nominal_perf;

if (acpi_pstate_strict)
return 0;

cppc_get_highest_nominal_perf(cpu, &highest_perf, &nominal_perf);
nominal_perf = perf_caps.nominal_perf;

if (!highest_perf || !nominal_perf) {
pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
Expand All @@ -671,23 +668,16 @@ static u64 get_max_boost_ratio(unsigned int cpu)
return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
}

/* The work item is needed to avoid CPU hotplug locking issues */
static void cpufreq_sched_itmt_work_fn(struct work_struct *work)
{
sched_set_itmt_support();
}

static DECLARE_WORK(sched_itmt_work, cpufreq_sched_itmt_work_fn);

static void cpufreq_set_itmt_prio(int cpu)
static bool cppc_highest_perf_diff;
static struct cpumask core_prio_cpumask;
static void core_set_itmt_prio(int cpu)
{
u64 highest_perf, nominal_perf;
u64 highest_perf;
static u64 max_highest_perf = 0, min_highest_perf = U64_MAX;

cppc_get_highest_nominal_perf(cpu, &highest_perf, &nominal_perf);

cppc_get_highest_perf(cpu, &highest_perf);
sched_set_itmt_core_prio(highest_perf, cpu);
cpumask_set_cpu(cpu, &core_prior_mask);
cpumask_set_cpu(cpu, &core_prio_cpumask);

if (max_highest_perf <= min_highest_perf) {
if (highest_perf > max_highest_perf)
Expand All @@ -696,25 +686,22 @@ static void cpufreq_set_itmt_prio(int cpu)
if (highest_perf < min_highest_perf)
min_highest_perf = highest_perf;

if (max_highest_perf > min_highest_perf) {
/*
* This code can be run during CPU online under the
* CPU hotplug locks, so sched_set_itmt_support()
* cannot be called from here. Queue up a work item
* to invoke it.
*/
if (max_highest_perf > min_highest_perf)
cppc_highest_perf_diff = true;
}
}

if (cppc_highest_perf_diff && cpumask_equal(&core_prior_mask, cpu_online_mask)) {
pr_debug("queue a work to set itmt enabled\n");
schedule_work(&sched_itmt_work);
if (cppc_highest_perf_diff && cpumask_equal(&core_prio_cpumask, cpu_online_mask)) {
/*
* This code can be run during CPU online under the CPU hotplug locks,
* so sched_set_itmt cannot be called from here.
* queue a work item to invoke it
*/
pr_debug("queue a work to set itmt support and enable\n");
sched_set_itmt();
}
}
#else
static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
static void cpufreq_set_itmt_prio(int cpu) { }
static void core_set_itmt_prio(int cpu) {}
#endif

static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
Expand Down Expand Up @@ -791,11 +778,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
pr_info_once("overriding BIOS provided _PSD data\n");
}
#endif
if (c->x86_vendor == X86_VENDOR_CENTAUR || c->x86_vendor == X86_VENDOR_ZHAOXIN) {
for_each_cpu(j, policy->cpus) {
cpufreq_set_itmt_prio(j);
}
}

if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR ||
boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN)
for_each_cpu(j, policy->cpus)
core_set_itmt_prio(j);

/* capability check */
if (perf->state_count <= 1) {
Expand Down
Loading