Skip to content

Commit

Permalink
alinux: sched: Fix regression caused by nr_uninterruptible
Browse files Browse the repository at this point in the history
fix #27788368

per cgroup nr_uninterruptible tracking leads to huge performance regression
of hackbench. This patch delete nr_uninterruptible related code for now, to
address performance regression issue.

Fixes: 9410d31 ("alinux: cpuacct: Export nr_running & nr_uninterruptible")
Fixes: 36da4fe ("alinux: sched: Maintain "nr_uninterruptible" in runqueue")
Signed-off-by: Yihao Wu <[email protected]>
Acked-by: Shanpei Chen <[email protected]>
  • Loading branch information
Yihao Wu committed May 26, 2020
1 parent 1d6103a commit 1607a48
Show file tree
Hide file tree
Showing 5 changed files with 3 additions and 155 deletions.
28 changes: 3 additions & 25 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -744,28 +744,18 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
p->sched_class->dequeue_task(rq, p, flags);
}

static void update_nr_uninterruptible(struct task_struct *tsk, long inc)
{
if (tsk->sched_class->update_nr_uninterruptible)
tsk->sched_class->update_nr_uninterruptible(tsk, inc);
}

void activate_task(struct rq *rq, struct task_struct *p, int flags)
{
if (task_contributes_to_load(p)) {
update_nr_uninterruptible(p, -1);
if (task_contributes_to_load(p))
rq->nr_uninterruptible--;
}

enqueue_task(rq, p, flags);
}

void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
{
if (task_contributes_to_load(p)) {
update_nr_uninterruptible(p, 1);
if (task_contributes_to_load(p))
rq->nr_uninterruptible++;
}

dequeue_task(rq, p, flags);
}
Expand Down Expand Up @@ -1700,10 +1690,8 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
lockdep_assert_held(&rq->lock);

#ifdef CONFIG_SMP
if (p->sched_contributes_to_load) {
update_nr_uninterruptible(p, -1);
if (p->sched_contributes_to_load)
rq->nr_uninterruptible--;
}

if (wake_flags & WF_MIGRATED)
en_flags |= ENQUEUE_MIGRATED;
Expand Down Expand Up @@ -6394,18 +6382,8 @@ void sched_move_task(struct task_struct *tsk)
if (running)
put_prev_task(rq, tsk);

/* decrease old group */
if ((!queued && task_contributes_to_load(tsk)) ||
(tsk->state == TASK_WAKING && tsk->sched_contributes_to_load))
update_nr_uninterruptible(tsk, -1);

sched_change_group(tsk, TASK_MOVE_GROUP);

/* increase new group after change */
if ((!queued && task_contributes_to_load(tsk)) ||
(tsk->state == TASK_WAKING && tsk->sched_contributes_to_load))
update_nr_uninterruptible(tsk, 1);

if (queued)
enqueue_task(rq, tsk, queue_flags);
if (running)
Expand Down
99 changes: 0 additions & 99 deletions kernel/sched/cpuacct.c
Original file line number Diff line number Diff line change
Expand Up @@ -358,95 +358,6 @@ static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
struct task_group, css);
}

static inline unsigned long nr_uninterruptible(void)
{
unsigned long i, sum = 0;

for_each_possible_cpu(i)
sum += cpu_rq(i)->nr_uninterruptible;

/*
* Since we read the counters lockless, it might be slightly
* inaccurate. Do not allow it to go below zero though:
*/
if (unlikely((long)sum < 0))
sum = 0;

return sum;
}

#ifdef CONFIG_CFS_BANDWIDTH
static inline bool tg_cfs_throttled(struct task_group *tg, int cpu)
{
return tg->cfs_rq[cpu]->throttle_count;
}
#else
static inline bool tg_cfs_throttled(struct task_group *tg, int cpu)
{
return false;
}
#endif

#ifdef CONFIG_RT_GROUP_SCHED
static inline bool tg_rt_throttled(struct task_group *tg, int cpu)
{
return tg->rt_rq[cpu]->rt_throttled && !tg->rt_rq[cpu]->rt_nr_boosted;
}
#endif

static unsigned long ca_running(struct cpuacct *ca, int cpu)
{
unsigned long nr_running = 0;
struct cgroup *cgrp = ca->css.cgroup;
struct task_group *tg;

/* Make sure it is only called for non-root cpuacct */
if (ca == &root_cpuacct)
return 0;

rcu_read_lock();
tg = cgroup_tg(cgrp);
if (unlikely(!tg))
goto out;

if (!tg_cfs_throttled(tg, cpu))
nr_running += tg->cfs_rq[cpu]->h_nr_running;
#ifdef CONFIG_RT_GROUP_SCHED
if (!tg_rt_throttled(tg, cpu))
nr_running += tg->rt_rq[cpu]->rt_nr_running;
#endif
/* SCHED_DEADLINE doesn't support cgroup yet */

out:
rcu_read_unlock();
return nr_running;
}

static unsigned long ca_uninterruptible(struct cpuacct *ca, int cpu)
{
unsigned long nr = 0;
struct cgroup *cgrp = ca->css.cgroup;
struct task_group *tg;

/* Make sure it is only called for non-root cpuacct */
if (ca == &root_cpuacct)
return nr;

rcu_read_lock();
tg = cgroup_tg(cgrp);
if (unlikely(!tg))
goto out_rcu_unlock;

nr = tg->cfs_rq[cpu]->nr_uninterruptible;
#ifdef CONFIG_RT_GROUP_SCHED
nr += tg->rt_rq[cpu]->nr_uninterruptible;
#endif

out_rcu_unlock:
rcu_read_unlock();
return nr;
}

void cgroup_idle_start(struct sched_entity *se)
{
unsigned long flags;
Expand Down Expand Up @@ -625,7 +536,6 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v)
u64 user, nice, system, idle, iowait, irq, softirq, steal, guest;
u64 nr_migrations = 0;
struct cpuacct_alistats *alistats;
unsigned long nr_run = 0, nr_uninter = 0;
int cpu;

user = nice = system = idle = iowait =
Expand Down Expand Up @@ -656,8 +566,6 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v)

alistats = per_cpu_ptr(ca->alistats, cpu);
nr_migrations += alistats->nr_migrations;
nr_run += ca_running(ca, cpu);
nr_uninter += ca_uninterruptible(ca, cpu);
}
} else {
struct kernel_cpustat *kcpustat;
Expand All @@ -677,9 +585,6 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v)
alistats = per_cpu_ptr(ca->alistats, cpu);
nr_migrations += alistats->nr_migrations;
}

nr_run = nr_running();
nr_uninter = nr_uninterruptible();
}

seq_printf(sf, "user %lld\n", nsec_to_clock_t(user));
Expand All @@ -692,10 +597,6 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v)
seq_printf(sf, "steal %lld\n", nsec_to_clock_t(steal));
seq_printf(sf, "guest %lld\n", nsec_to_clock_t(guest));

seq_printf(sf, "nr_running %lld\n", (u64)nr_run);
if ((long) nr_uninter < 0)
nr_uninter = 0;
seq_printf(sf, "nr_uninterruptible %lld\n", (u64)nr_uninter);
seq_printf(sf, "nr_migrations %lld\n", (u64)nr_migrations);

return 0;
Expand Down
11 changes: 0 additions & 11 deletions kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -10311,16 +10311,6 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task
return rr_interval;
}

#ifdef CONFIG_SCHED_SLI
static void update_nr_uninterruptible_fair(struct task_struct *p, long inc)
{
struct sched_entity *se = &p->se;

for_each_sched_entity(se)
cfs_rq_of(se)->nr_uninterruptible += inc;
}
#endif

/*
* All the scheduling class methods:
*/
Expand Down Expand Up @@ -10364,7 +10354,6 @@ const struct sched_class fair_sched_class = {
#endif

#ifdef CONFIG_SCHED_SLI
.update_nr_uninterruptible = update_nr_uninterruptible_fair,
.update_nr_iowait = update_nr_iowait_fair,
#endif
};
Expand Down
14 changes: 0 additions & 14 deletions kernel/sched/rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -2374,16 +2374,6 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
return 0;
}

#ifdef CONFIG_SCHED_SLI
static void update_nr_uninterruptible_rt(struct task_struct *p, long inc)
{
struct sched_rt_entity *se = &p->rt;

for_each_sched_rt_entity(se)
rt_rq_of_se(se)->nr_uninterruptible += inc;
}
#endif

const struct sched_class rt_sched_class = {
.next = &fair_sched_class,
.enqueue_task = enqueue_task_rt,
Expand Down Expand Up @@ -2414,10 +2404,6 @@ const struct sched_class rt_sched_class = {
.switched_to = switched_to_rt,

.update_curr = update_curr_rt,

#ifdef CONFIG_SCHED_SLI
.update_nr_uninterruptible = update_nr_uninterruptible_rt,
#endif
};

#ifdef CONFIG_RT_GROUP_SCHED
Expand Down
6 changes: 0 additions & 6 deletions kernel/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -575,8 +575,6 @@ struct cfs_rq {
#endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */

unsigned long nr_uninterruptible;

ALI_HOTFIX_RESERVE(1)
ALI_HOTFIX_RESERVE(2)
ALI_HOTFIX_RESERVE(3)
Expand Down Expand Up @@ -627,8 +625,6 @@ struct rt_rq {
struct rq *rq;
struct task_group *tg;
#endif

unsigned long nr_uninterruptible;
};

static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq)
Expand Down Expand Up @@ -1675,8 +1671,6 @@ struct sched_class {
#ifdef CONFIG_FAIR_GROUP_SCHED
void (*task_change_group)(struct task_struct *p, int type);
#endif

void (*update_nr_uninterruptible)(struct task_struct *p, long inc);
void (*update_nr_iowait)(struct task_struct *p, long inc);
};

Expand Down

0 comments on commit 1607a48

Please sign in to comment.