treewide: use power efficient workingqueues
(cherry picked from commit 8ddf75b4fb1d7b54a795c1dc70bf480a5f049603) (cherry picked from commit dbf96ce6987d4361b4135124b81cb40b269366c5) (cherry picked from commit 3291d145fade85cef2830b9d28fe1c90e154ba9c)
This commit is contained in:
parent
7f393c3513
commit
58659caf1a
4 changed files with 7 additions and 7 deletions
|
@ -2200,12 +2200,12 @@ static void wakeup_dirtytime_writeback(struct work_struct *w)
|
|||
wb_wakeup(wb);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
|
||||
queue_delayed_work(system_power_efficient_wq, &dirtytime_work, dirtytime_expire_interval * HZ);
|
||||
}
|
||||
|
||||
static int __init start_dirtytime_writeback(void)
|
||||
{
|
||||
schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
|
||||
queue_delayed_work(system_power_efficient_wq, &dirtytime_work, dirtytime_expire_interval * HZ);
|
||||
return 0;
|
||||
}
|
||||
__initcall(start_dirtytime_writeback);
|
||||
|
|
|
@ -661,7 +661,7 @@ static void log_block_read(struct mount_info *mi, incfs_uuid_t *id,
|
|||
++head->current_record_no;
|
||||
|
||||
spin_unlock(&log->rl_lock);
|
||||
schedule_delayed_work(&log->ml_wakeup_work, msecs_to_jiffies(16));
|
||||
queue_delayed_work(system_power_efficient_wq, &log->ml_wakeup_work, msecs_to_jiffies(16));
|
||||
}
|
||||
|
||||
static int validate_hash_tree(struct backing_file_context *bfc, struct file *f,
|
||||
|
|
|
@ -448,7 +448,7 @@ static void psi_avgs_work(struct work_struct *work)
|
|||
group->avg_next_update = update_averages(group, now);
|
||||
|
||||
if (nonidle) {
|
||||
schedule_delayed_work(dwork, nsecs_to_jiffies(
|
||||
queue_delayed_work(system_power_efficient_wq, dwork, nsecs_to_jiffies(
|
||||
group->avg_next_update - now) + 1);
|
||||
}
|
||||
|
||||
|
@ -815,7 +815,7 @@ static void psi_group_change(struct psi_group *group, int cpu,
|
|||
psi_schedule_poll_work(group, 1, false);
|
||||
|
||||
if (wake_clock && !delayed_work_pending(&group->avgs_work))
|
||||
schedule_delayed_work(&group->avgs_work, PSI_FREQ);
|
||||
queue_delayed_work(system_power_efficient_wq, &group->avgs_work, PSI_FREQ);
|
||||
}
|
||||
|
||||
static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
|
||||
|
|
|
@ -43,7 +43,7 @@ __page_reporting_request(struct page_reporting_dev_info *prdev)
|
|||
* now we are limiting this to running no more than once every
|
||||
* couple of seconds.
|
||||
*/
|
||||
schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY);
|
||||
queue_delayed_work(system_power_efficient_wq, &prdev->work, PAGE_REPORTING_DELAY);
|
||||
}
|
||||
|
||||
/* notify prdev of free page reporting request */
|
||||
|
@ -306,7 +306,7 @@ err_out:
|
|||
*/
|
||||
state = atomic_cmpxchg(&prdev->state, state, PAGE_REPORTING_IDLE);
|
||||
if (state == PAGE_REPORTING_REQUESTED)
|
||||
schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY);
|
||||
queue_delayed_work(system_power_efficient_wq, &prdev->work, PAGE_REPORTING_DELAY);
|
||||
}
|
||||
|
||||
static DEFINE_MUTEX(page_reporting_mutex);
|
||||
|
|
Loading…
Reference in a new issue