ANDROID: sched/rt: Add support for rt sync wakeups

Some rt tasks undergo sync wakeup. Currently, these tasks will be placed
on other, often sleeping or otherwise idle CPUs, which can lead to
unnecessary power hits.

Bug: 157906395
Change-Id: I48864d0847bbe4f7813c842032880ad3f3b8b06b
Signed-off-by: J. Avila <elavila@google.com>
This commit is contained in:
J. Avila
2020-07-14 22:03:38 +00:00
parent 9a86bea7c3
commit da5f3cd378
3 changed files with 43 additions and 1 deletions

View File

@@ -2627,6 +2627,9 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
{
int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
if (wake_flags & WF_SYNC)
en_flags |= ENQUEUE_WAKEUP_SYNC;
lockdep_assert_held(&rq->lock);
if (p->sched_contributes_to_load)

View File

@@ -1372,6 +1372,27 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
enqueue_top_rt_rq(&rq->rt);
}
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p,
bool sync)
{
/*
* If the waker is CFS, then an RT sync wakeup would preempt the waker
* and force it to run for a likely small time after the RT wakee is
* done. So, only honor RT sync wakeups from RT wakers.
*/
return sync && task_has_rt_policy(rq->curr) &&
p->prio <= rq->rt.highest_prio.next &&
rq->rt.rt_nr_running <= 2;
}
#else
static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p,
bool sync)
{
return 0;
}
#endif
/*
* Adding/removing a task to/from a priority array:
*/
@@ -1379,13 +1400,15 @@ static void
enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{
struct sched_rt_entity *rt_se = &p->rt;
bool sync = !!(flags & ENQUEUE_WAKEUP_SYNC);
if (flags & ENQUEUE_WAKEUP)
rt_se->timeout = 0;
enqueue_rt_entity(rt_se, flags);
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
if (!task_current(rq, p) && p->nr_cpus_allowed > 1 &&
!should_honor_rt_sync(rq, p, sync))
enqueue_pushable_task(rq, p);
}
@@ -1462,9 +1485,12 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
{
struct task_struct *curr;
struct rq *rq;
struct rq *this_cpu_rq;
bool test;
int target_cpu = -1;
bool may_not_preempt;
bool sync = !!(flags & WF_SYNC);
int this_cpu;
trace_android_rvh_select_task_rq_rt(p, cpu, sd_flag,
flags, &target_cpu);
@@ -1479,6 +1505,8 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
rcu_read_lock();
curr = READ_ONCE(rq->curr); /* unlocked access */
this_cpu = smp_processor_id();
this_cpu_rq = cpu_rq(this_cpu);
/*
* If the current task on @p's runqueue is a softirq task,
@@ -1516,6 +1544,15 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
(unlikely(rt_task(curr)) &&
(curr->nr_cpus_allowed < 2 || curr->prio <= p->prio))));
/*
* Respect the sync flag as long as the task can run on this CPU.
*/
if (should_honor_rt_sync(this_cpu_rq, p, sync) &&
cpumask_test_cpu(this_cpu, p->cpus_ptr)) {
cpu = this_cpu;
goto out_unlock;
}
if (test || !rt_task_fits_capacity(p, cpu)) {
int target = find_lowest_rq(p);

View File

@@ -1785,6 +1785,8 @@ extern const u32 sched_prio_to_wmult[40];
#define ENQUEUE_MIGRATED 0x00
#endif
#define ENQUEUE_WAKEUP_SYNC 0x80
#define RETRY_TASK ((void *)-1UL)
struct sched_class {