sched/fair: Merge for each idle cpu loop of ILB

[ Upstream commit 7a82e5f52a3506bc35a4dc04d53ad2c9daf82e7f ]

Remove the specific case for handling this_cpu outside for_each_cpu() loop
when running ILB. Instead we use for_each_cpu_wrap() and start with the
next cpu after this_cpu so we will continue to finish with this_cpu.

update_nohz_stats() is now used for this_cpu too and will prevents
unnecessary update. We don't need a special case for handling the update of
nohz.next_balance for this_cpu anymore because it is now handled by the
loop like others.

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
Link: https://lkml.kernel.org/r/20210224133007.28644-5-vincent.guittot@linaro.org
Stable-dep-of: ff47a0acfcce ("sched/fair: Check idle_cpu() before need_resched() to detect ilb CPU turning busy")
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Vincent Guittot 2021-02-24 14:30:04 +01:00 committed by Ksawlii
parent 4ae526c326
commit 9ae9714a14

View file

@ -10430,22 +10430,9 @@ out:
* When the cpu is attached to null domain for ex, it will not be * When the cpu is attached to null domain for ex, it will not be
* updated. * updated.
*/ */
if (likely(update_next_balance)) { if (likely(update_next_balance))
rq->next_balance = next_balance; rq->next_balance = next_balance;
#ifdef CONFIG_NO_HZ_COMMON
/*
* If this CPU has been elected to perform the nohz idle
* balance. Other idle CPUs have already rebalanced with
* nohz_idle_balance() and nohz.next_balance has been
* updated accordingly. This CPU is now running the idle load
* balance for itself and we need to update the
* nohz.next_balance accordingly.
*/
if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance))
nohz.next_balance = rq->next_balance;
#endif
}
} }
static inline int on_null_domain(struct rq *rq) static inline int on_null_domain(struct rq *rq)
@ -10791,8 +10778,12 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
*/ */
smp_mb(); smp_mb();
for_each_cpu(balance_cpu, nohz.idle_cpus_mask) { /*
if (balance_cpu == this_cpu || !idle_cpu(balance_cpu)) * Start with the next CPU after this_cpu so we will end with this_cpu and let a
* chance for other idle cpu to pull load.
*/
for_each_cpu_wrap(balance_cpu, nohz.idle_cpus_mask, this_cpu+1) {
if (!idle_cpu(balance_cpu))
continue; continue;
/* /*
@ -10838,15 +10829,6 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
if (likely(update_next_balance)) if (likely(update_next_balance))
nohz.next_balance = next_balance; nohz.next_balance = next_balance;
/* Newly idle CPU doesn't need an update */
if (idle != CPU_NEWLY_IDLE) {
update_blocked_averages(this_cpu);
has_blocked_load |= this_rq->has_blocked_load;
}
if (flags & NOHZ_BALANCE_KICK)
rebalance_domains(this_rq, CPU_IDLE);
WRITE_ONCE(nohz.next_blocked, WRITE_ONCE(nohz.next_blocked,
now + msecs_to_jiffies(LOAD_AVG_PERIOD)); now + msecs_to_jiffies(LOAD_AVG_PERIOD));