diff --git a/kernel/events/core.c b/kernel/events/core.c index 479fe1fa5..507cc9bff 100755 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -419,7 +419,7 @@ int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' */ #define DEFAULT_MAX_SAMPLE_RATE 100000 #define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE) -#define DEFAULT_CPU_TIME_MAX_PERCENT 5 +#define DEFAULT_CPU_TIME_MAX_PERCENT 25 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE; diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c index 752a27d82..2067080bb 100755 --- a/kernel/sched/autogroup.c +++ b/kernel/sched/autogroup.c @@ -197,7 +197,7 @@ void sched_autogroup_exit(struct signal_struct *sig) static int __init setup_autogroup(char *str) { - sysctl_sched_autogroup_enabled = 1; + sysctl_sched_autogroup_enabled = 0; return 1; } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6bc998de3..9fb453a77 100755 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -83,7 +83,8 @@ EXPORT_SYMBOL_GPL(sysctl_sched_features); #if !defined(CONFIG_ANDROID) const_debug unsigned int sysctl_sched_nr_migrate = NR_CPUS; #else -const_debug unsigned int sysctl_sched_nr_migrate = NR_CPUS; +<<<<<<< HEAD +const_debug unsigned int sysctl_sched_nr_migrate = 32; #endif /* diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 23ac79c64..da4d52e2a 100755 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -39,9 +39,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_stat_runtime); * * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) */ -unsigned int sysctl_sched_latency = 4000000ULL; +unsigned int sysctl_sched_latency = 5000000ULL; EXPORT_SYMBOL_GPL(sysctl_sched_latency); -static unsigned int normalized_sysctl_sched_latency = 4000000ULL; +static unsigned int normalized_sysctl_sched_latency = 5000000ULL; /* * The initial- and re-scaling of tunables is configurable @@ -54,16 +54,16 @@ static unsigned int normalized_sysctl_sched_latency = 4000000ULL; * * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) */ -enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE; +enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; /* * Minimal preemption granularity for CPU-bound tasks: * * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) */ -unsigned int sysctl_sched_min_granularity = 500000ULL; +unsigned int sysctl_sched_min_granularity = 750000ULL; EXPORT_SYMBOL_GPL(sysctl_sched_min_granularity); -static unsigned int normalized_sysctl_sched_min_granularity = 500000ULL; +static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; /* * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity @@ -74,7 +74,7 @@ static unsigned int sched_nr_latency = 8; * After fork, child runs first. If set to 0 (default) then * parent will (try to) run first. */ -unsigned int sysctl_sched_child_runs_first __read_mostly = 1; +unsigned int sysctl_sched_child_runs_first __read_mostly; /* * SCHED_OTHER wake-up granularity. @@ -85,10 +85,10 @@ unsigned int sysctl_sched_child_runs_first __read_mostly = 1; * * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) */ -unsigned int sysctl_sched_wakeup_granularity = 2000000UL; -static unsigned int normalized_sysctl_sched_wakeup_granularity = 2000000UL; +unsigned int sysctl_sched_wakeup_granularity = 1000000UL; +static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; -unsigned int __read_mostly sysctl_sched_migration_cost = 5000000UL; +unsigned int __read_mostly sysctl_sched_migration_cost = 500000UL; int sched_thermal_decay_shift; static int __init setup_sched_thermal_decay_shift(char *str) diff --git a/kernel/sched/features.h b/kernel/sched/features.h index c301bc1cc..f1bf5e12d 100755 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -49,8 +49,7 @@ SCHED_FEAT(NONTASK_CAPACITY, true) * Queue remote wakeups on the target CPU and process them * using the scheduler IPI. Reduces rq->lock contention/bounces. */ -#define SCHED_FEAT_TTWU_QUEUE 1 - +SCHED_FEAT(TTWU_QUEUE, true) /* * When doing wakeups, attempt to limit superfluous scans of the LLC domain.