sched/fair: Trigger the update of blocked load on newly idle cpu
[ Upstream commit c6f886546cb8a38617cdbe755fe50d3acd2463e4 ] Instead of waking up a random and already idle CPU, we can take advantage of this_cpu being about to enter idle to run the ILB and update the blocked load. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Valentin Schneider <valentin.schneider@arm.com> Link: https://lkml.kernel.org/r/20210224133007.28644-7-vincent.guittot@linaro.org Stable-dep-of: ff47a0acfcce ("sched/fair: Check idle_cpu() before need_resched() to detect ilb CPU turning busy") Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
9ae9714a14
commit
ab620a407a
4 changed files with 35 additions and 4 deletions
|
@ -789,7 +789,7 @@ static void nohz_csd_func(void *info)
|
||||||
/*
|
/*
|
||||||
* Release the rq::nohz_csd.
|
* Release the rq::nohz_csd.
|
||||||
*/
|
*/
|
||||||
flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu));
|
flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
|
||||||
WARN_ON(!(flags & NOHZ_KICK_MASK));
|
WARN_ON(!(flags & NOHZ_KICK_MASK));
|
||||||
|
|
||||||
rq->idle_balance = idle_cpu(cpu);
|
rq->idle_balance = idle_cpu(cpu);
|
||||||
|
|
|
@ -10864,6 +10864,24 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check if we need to run the ILB for updating blocked load before entering
|
||||||
|
* idle state.
|
||||||
|
*/
|
||||||
|
void nohz_run_idle_balance(int cpu)
|
||||||
|
{
|
||||||
|
unsigned int flags;
|
||||||
|
|
||||||
|
flags = atomic_fetch_andnot(NOHZ_NEWILB_KICK, nohz_flags(cpu));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Update the blocked load only if no SCHED_SOFTIRQ is about to happen
|
||||||
|
* (ie NOHZ_STATS_KICK set) and will do the same.
|
||||||
|
*/
|
||||||
|
if ((flags == NOHZ_NEWILB_KICK) && !need_resched())
|
||||||
|
_nohz_idle_balance(cpu_rq(cpu), NOHZ_STATS_KICK, CPU_IDLE);
|
||||||
|
}
|
||||||
|
|
||||||
static void nohz_newidle_balance(struct rq *this_rq)
|
static void nohz_newidle_balance(struct rq *this_rq)
|
||||||
{
|
{
|
||||||
int this_cpu = this_rq->cpu;
|
int this_cpu = this_rq->cpu;
|
||||||
|
@ -10885,10 +10903,10 @@ static void nohz_newidle_balance(struct rq *this_rq)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Blocked load of idle CPUs need to be updated.
|
* Set the need to trigger ILB in order to update blocked load
|
||||||
* Kick an ILB to update statistics.
|
* before entering idle state.
|
||||||
*/
|
*/
|
||||||
kick_ilb(NOHZ_STATS_KICK);
|
atomic_or(NOHZ_NEWILB_KICK, nohz_flags(this_cpu));
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* !CONFIG_NO_HZ_COMMON */
|
#else /* !CONFIG_NO_HZ_COMMON */
|
||||||
|
|
|
@ -263,6 +263,12 @@ exit_idle:
|
||||||
static void do_idle(void)
|
static void do_idle(void)
|
||||||
{
|
{
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check if we need to update blocked load
|
||||||
|
*/
|
||||||
|
nohz_run_idle_balance(cpu);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the arch has a polling bit, we maintain an invariant:
|
* If the arch has a polling bit, we maintain an invariant:
|
||||||
*
|
*
|
||||||
|
|
|
@ -2348,9 +2348,11 @@ extern void cfs_bandwidth_usage_dec(void);
|
||||||
#ifdef CONFIG_NO_HZ_COMMON
|
#ifdef CONFIG_NO_HZ_COMMON
|
||||||
#define NOHZ_BALANCE_KICK_BIT 0
|
#define NOHZ_BALANCE_KICK_BIT 0
|
||||||
#define NOHZ_STATS_KICK_BIT 1
|
#define NOHZ_STATS_KICK_BIT 1
|
||||||
|
#define NOHZ_NEWILB_KICK_BIT 2
|
||||||
|
|
||||||
#define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT)
|
#define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT)
|
||||||
#define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT)
|
#define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT)
|
||||||
|
#define NOHZ_NEWILB_KICK BIT(NOHZ_NEWILB_KICK_BIT)
|
||||||
|
|
||||||
#define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
|
#define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
|
||||||
|
|
||||||
|
@ -2361,6 +2363,11 @@ extern void nohz_balance_exit_idle(struct rq *rq);
|
||||||
static inline void nohz_balance_exit_idle(struct rq *rq) { }
|
static inline void nohz_balance_exit_idle(struct rq *rq) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
|
||||||
|
extern void nohz_run_idle_balance(int cpu);
|
||||||
|
#else
|
||||||
|
static inline void nohz_run_idle_balance(int cpu) { }
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
static inline
|
static inline
|
||||||
|
|
Loading…
Reference in a new issue