From 0e4add4b718d6b1191585768a6d6d35aa4d0d4f7 Mon Sep 17 00:00:00 2001 From: ztc1997 Date: Sat, 13 Apr 2024 23:04:30 +0800 Subject: [PATCH] schedhorizon: Sync with schedutil --- kernel/sched/cpufreq_schedhorizon.c | 122 ++++++++++++++++++++++++---- 1 file changed, 107 insertions(+), 15 deletions(-) diff --git a/kernel/sched/cpufreq_schedhorizon.c b/kernel/sched/cpufreq_schedhorizon.c index 4e846712c..d85d10e7b 100644 --- a/kernel/sched/cpufreq_schedhorizon.c +++ b/kernel/sched/cpufreq_schedhorizon.c @@ -19,7 +19,8 @@ static u64 default_up_delay[] = {0}; struct sugov_tunables { struct gov_attr_set attr_set; - unsigned int rate_limit_us; + unsigned int up_rate_limit_us; + unsigned int down_rate_limit_us; unsigned int *efficient_freq; int nefficient_freq; u64 *up_delay; @@ -35,7 +36,9 @@ struct sugov_policy { raw_spinlock_t update_lock; /* For shared policies */ u64 last_freq_update_time; - s64 freq_update_delay_ns; + s64 min_rate_limit_ns; + s64 up_rate_delay_ns; + s64 down_rate_delay_ns; unsigned int next_freq; unsigned int cached_raw_freq; u64 first_hp_request_time; @@ -151,9 +154,33 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) if (sg_policy->work_in_progress) return true; + /* No need to recalculate next freq for min_rate_limit_us + * at least. However we might still decide to further rate + * limit once frequency change direction is decided, according + * to the separate rate limits. + */ + delta_ns = time - sg_policy->last_freq_update_time; - return delta_ns >= sg_policy->freq_update_delay_ns; + return delta_ns >= sg_policy->min_rate_limit_ns; +} + +static bool sugov_up_down_rate_limit(struct sugov_policy *sg_policy, u64 time, + unsigned int next_freq) +{ + s64 delta_ns; + + delta_ns = time - sg_policy->last_freq_update_time; + + if (next_freq > sg_policy->next_freq && + delta_ns < sg_policy->up_rate_delay_ns) + return true; + + if (next_freq < sg_policy->next_freq && + delta_ns < sg_policy->down_rate_delay_ns) + return true; + + return false; } static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time, @@ -166,6 +193,9 @@ static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time, sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS); } + if (sugov_up_down_rate_limit(sg_policy, time, next_freq)) + return false; + sg_policy->next_freq = next_freq; sg_policy->last_freq_update_time = time; @@ -219,6 +249,7 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy, struct cpufreq_policy *policy = sg_policy->policy; unsigned int freq = arch_scale_freq_invariant() ? policy->cpuinfo.max_freq : policy->cur; + unsigned int idx, l_freq, h_freq; unsigned long next_freq = 0; trace_android_vh_map_util_freq(util, freq, max, &next_freq, policy, @@ -234,7 +265,21 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy, return sg_policy->next_freq; sg_policy->cached_raw_freq = freq; - return cpufreq_driver_resolve_freq(policy, freq); + l_freq = cpufreq_driver_resolve_freq(policy, freq); + idx = cpufreq_frequency_table_target(policy, freq, CPUFREQ_RELATION_H); + h_freq = policy->freq_table[idx].frequency; + h_freq = clamp(h_freq, policy->min, policy->max); + if (l_freq <= h_freq || l_freq == policy->min) + return l_freq; + + /* + * Use the frequency step below if the calculated frequency is <20% + * higher than it. + */ + if (mult_frac(100, freq - h_freq, l_freq - h_freq) < 20) + return h_freq; + + return l_freq; } /* @@ -588,15 +633,32 @@ static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr return container_of(attr_set, struct sugov_tunables, attr_set); } -static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf) +static DEFINE_MUTEX(min_rate_lock); + +static void update_min_rate_limit_ns(struct sugov_policy *sg_policy) +{ + mutex_lock(&min_rate_lock); + sg_policy->min_rate_limit_ns = min(sg_policy->up_rate_delay_ns, + sg_policy->down_rate_delay_ns); + mutex_unlock(&min_rate_lock); +} + +static ssize_t up_rate_limit_us_show(struct gov_attr_set *attr_set, char *buf) { struct sugov_tunables *tunables = to_sugov_tunables(attr_set); - return sprintf(buf, "%u\n", tunables->rate_limit_us); + return sprintf(buf, "%u\n", tunables->up_rate_limit_us); } -static ssize_t -rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count) +static ssize_t down_rate_limit_us_show(struct gov_attr_set *attr_set, char *buf) +{ + struct sugov_tunables *tunables = to_sugov_tunables(attr_set); + + return sprintf(buf, "%u\n", tunables->down_rate_limit_us); +} + +static ssize_t up_rate_limit_us_store(struct gov_attr_set *attr_set, + const char *buf, size_t count) { struct sugov_tunables *tunables = to_sugov_tunables(attr_set); struct sugov_policy *sg_policy; @@ -605,10 +667,33 @@ rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count if (kstrtouint(buf, 10, &rate_limit_us)) return -EINVAL; - tunables->rate_limit_us = rate_limit_us; + tunables->up_rate_limit_us = rate_limit_us; - list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) - sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC; + list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) { + sg_policy->up_rate_delay_ns = rate_limit_us * NSEC_PER_USEC; + update_min_rate_limit_ns(sg_policy); + } + + return count; +} + +static ssize_t + down_rate_limit_us_store(struct gov_attr_set *attr_set, + const char *buf, size_t count) +{ + struct sugov_tunables *tunables = to_sugov_tunables(attr_set); + struct sugov_policy *sg_policy; + unsigned int rate_limit_us; + + if (kstrtouint(buf, 10, &rate_limit_us)) + return -EINVAL; + + tunables->down_rate_limit_us = rate_limit_us; + + list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) { + sg_policy->down_rate_delay_ns = rate_limit_us * NSEC_PER_USEC; + update_min_rate_limit_ns(sg_policy); + } return count; } @@ -683,12 +768,14 @@ static ssize_t up_delay_store(struct gov_attr_set *attr_set, return count; } -static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us); +static struct governor_attr up_rate_limit_us = __ATTR_RW(up_rate_limit_us); +static struct governor_attr down_rate_limit_us = __ATTR_RW(down_rate_limit_us); static struct governor_attr efficient_freq = __ATTR_RW(efficient_freq); static struct governor_attr up_delay = __ATTR_RW(up_delay); static struct attribute *sugov_attrs[] = { - &rate_limit_us.attr, + &up_rate_limit_us.attr, + &down_rate_limit_us.attr, &efficient_freq.attr, &up_delay.attr, NULL @@ -854,7 +941,8 @@ static int sugov_init(struct cpufreq_policy *policy) goto stop_kthread; } - tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy); + tunables->up_rate_limit_us = 500; + tunables->down_rate_limit_us = 1000; tunables->efficient_freq = default_efficient_freq; tunables->nefficient_freq = ARRAY_SIZE(default_efficient_freq); tunables->up_delay = default_up_delay; @@ -917,7 +1005,11 @@ static int sugov_start(struct cpufreq_policy *policy) struct sugov_policy *sg_policy = policy->governor_data; unsigned int cpu; - sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC; + sg_policy->up_rate_delay_ns = + sg_policy->tunables->up_rate_limit_us * NSEC_PER_USEC; + sg_policy->down_rate_delay_ns = + sg_policy->tunables->down_rate_limit_us * NSEC_PER_USEC; + update_min_rate_limit_ns(sg_policy); sg_policy->last_freq_update_time = 0; sg_policy->next_freq = 0; sg_policy->work_in_progress = false;