ANDROID: sched: avoid placing RT threads on cores handling softirqs
In certain audio use cases, scheduling RT threads on cores that are handling softirqs can lead to glitches. Prevent this behavior. Bug: 31501544 Bug: 168521633 Change-Id: I99dd7aaa12c11270b28dbabea484bcc8fb8ba0c1 Signed-off-by: John Dias <joaodias@google.com> [elavila: Port to mainline, amend commit text] Signed-off-by: J. Avila <elavila@google.com>
This commit is contained in:
11
init/Kconfig
11
init/Kconfig
@@ -1211,6 +1211,17 @@ config SCHED_AUTOGROUP
|
|||||||
desktop applications. Task group autogeneration is currently based
|
desktop applications. Task group autogeneration is currently based
|
||||||
upon task session.
|
upon task session.
|
||||||
|
|
||||||
|
config RT_SOFTINT_OPTIMIZATION
|
||||||
|
bool "Improve RT scheduling during long softint execution"
|
||||||
|
depends on ARM64
|
||||||
|
depends on SMP
|
||||||
|
default n
|
||||||
|
help
|
||||||
|
Enable an optimization which tries to avoid placing RT tasks on CPUs
|
||||||
|
occupied by nonpreemptible tasks, such as a long softint, or CPUs
|
||||||
|
which may soon block preemptions, such as a CPU running a ksoftirq
|
||||||
|
thread which handles slow softints.
|
||||||
|
|
||||||
config SYSFS_DEPRECATED
|
config SYSFS_DEPRECATED
|
||||||
bool "Enable deprecated sysfs features to support old userspace tools"
|
bool "Enable deprecated sysfs features to support old userspace tools"
|
||||||
depends on SYSFS
|
depends on SYSFS
|
||||||
|
|||||||
@@ -41,8 +41,29 @@ static int convert_prio(int prio)
|
|||||||
return cpupri;
|
return cpupri;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
|
||||||
|
/**
|
||||||
|
* drop_nopreempt_cpus - remove likely nonpreemptible cpus from the mask
|
||||||
|
* @lowest_mask: mask with selected CPUs (non-NULL)
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
drop_nopreempt_cpus(struct cpumask *lowest_mask)
|
||||||
|
{
|
||||||
|
unsigned cpu = cpumask_first(lowest_mask);
|
||||||
|
while (cpu < nr_cpu_ids) {
|
||||||
|
/* unlocked access */
|
||||||
|
struct task_struct *task = READ_ONCE(cpu_rq(cpu)->curr);
|
||||||
|
if (task_may_not_preempt(task, cpu)) {
|
||||||
|
cpumask_clear_cpu(cpu, lowest_mask);
|
||||||
|
}
|
||||||
|
cpu = cpumask_next(cpu, lowest_mask);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
|
static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
|
||||||
struct cpumask *lowest_mask, int idx)
|
struct cpumask *lowest_mask, int idx,
|
||||||
|
bool drop_nopreempts)
|
||||||
{
|
{
|
||||||
struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
|
struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
|
||||||
int skip = 0;
|
int skip = 0;
|
||||||
@@ -79,6 +100,11 @@ static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
|
|||||||
if (lowest_mask) {
|
if (lowest_mask) {
|
||||||
cpumask_and(lowest_mask, p->cpus_ptr, vec->mask);
|
cpumask_and(lowest_mask, p->cpus_ptr, vec->mask);
|
||||||
|
|
||||||
|
#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
|
||||||
|
if (drop_nopreempts)
|
||||||
|
drop_nopreempt_cpus(lowest_mask);
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We have to ensure that we have at least one bit
|
* We have to ensure that we have at least one bit
|
||||||
* still set in the array, since the map could have
|
* still set in the array, since the map could have
|
||||||
@@ -123,12 +149,16 @@ int cpupri_find_fitness(struct cpupri *cp, struct task_struct *p,
|
|||||||
{
|
{
|
||||||
int task_pri = convert_prio(p->prio);
|
int task_pri = convert_prio(p->prio);
|
||||||
int idx, cpu;
|
int idx, cpu;
|
||||||
|
bool drop_nopreempts = task_pri <= MAX_RT_PRIO;
|
||||||
|
|
||||||
BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
|
BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
|
||||||
|
|
||||||
|
#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
|
||||||
|
retry:
|
||||||
|
#endif
|
||||||
for (idx = 0; idx < task_pri; idx++) {
|
for (idx = 0; idx < task_pri; idx++) {
|
||||||
|
|
||||||
if (!__cpupri_find(cp, p, lowest_mask, idx))
|
if (!__cpupri_find(cp, p, lowest_mask, idx, drop_nopreempts))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (!lowest_mask || !fitness_fn)
|
if (!lowest_mask || !fitness_fn)
|
||||||
@@ -150,6 +180,17 @@ int cpupri_find_fitness(struct cpupri *cp, struct task_struct *p,
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we can't find any non-preemptible cpu's, retry so we can
|
||||||
|
* find the lowest priority target and avoid priority inversion.
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
|
||||||
|
if (drop_nopreempts) {
|
||||||
|
drop_nopreempts = false;
|
||||||
|
goto retry;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we failed to find a fitting lowest_mask, kick off a new search
|
* If we failed to find a fitting lowest_mask, kick off a new search
|
||||||
* but without taking into account any fitness criteria this time.
|
* but without taking into account any fitness criteria this time.
|
||||||
|
|||||||
@@ -1436,6 +1436,21 @@ static void yield_task_rt(struct rq *rq)
|
|||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
static int find_lowest_rq(struct task_struct *task);
|
static int find_lowest_rq(struct task_struct *task);
|
||||||
|
|
||||||
|
#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
|
||||||
|
/*
|
||||||
|
* Return whether the task on the given cpu is currently non-preemptible
|
||||||
|
* while handling a softirq or is likely to block preemptions soon because
|
||||||
|
* it is a ksoftirq thread.
|
||||||
|
*/
|
||||||
|
bool
|
||||||
|
task_may_not_preempt(struct task_struct *task, int cpu)
|
||||||
|
{
|
||||||
|
struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);
|
||||||
|
return (task_thread_info(task)->preempt_count & SOFTIRQ_MASK) ||
|
||||||
|
task == cpu_ksoftirqd;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_RT_SOFTINT_OPTIMIZATION */
|
||||||
|
|
||||||
static int
|
static int
|
||||||
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
|
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
|
||||||
{
|
{
|
||||||
@@ -1443,6 +1458,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
|
|||||||
struct rq *rq;
|
struct rq *rq;
|
||||||
bool test;
|
bool test;
|
||||||
int target_cpu = -1;
|
int target_cpu = -1;
|
||||||
|
bool may_not_preempt;
|
||||||
|
|
||||||
trace_android_rvh_select_task_rq_rt(p, cpu, sd_flag,
|
trace_android_rvh_select_task_rq_rt(p, cpu, sd_flag,
|
||||||
flags, &target_cpu);
|
flags, &target_cpu);
|
||||||
@@ -1459,7 +1475,12 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
|
|||||||
curr = READ_ONCE(rq->curr); /* unlocked access */
|
curr = READ_ONCE(rq->curr); /* unlocked access */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the current task on @p's runqueue is an RT task, then
|
* If the current task on @p's runqueue is a softirq task,
|
||||||
|
* it may run without preemption for a time that is
|
||||||
|
* ill-suited for a waiting RT task. Therefore, try to
|
||||||
|
* wake this RT task on another runqueue.
|
||||||
|
*
|
||||||
|
* Also, if the current task on @p's runqueue is an RT task, then
|
||||||
* try to see if we can wake this RT task up on another
|
* try to see if we can wake this RT task up on another
|
||||||
* runqueue. Otherwise simply start this RT task
|
* runqueue. Otherwise simply start this RT task
|
||||||
* on its current runqueue.
|
* on its current runqueue.
|
||||||
@@ -1484,9 +1505,10 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
|
|||||||
* requirement of the task - which is only important on heterogeneous
|
* requirement of the task - which is only important on heterogeneous
|
||||||
* systems like big.LITTLE.
|
* systems like big.LITTLE.
|
||||||
*/
|
*/
|
||||||
test = curr &&
|
may_not_preempt = task_may_not_preempt(curr, cpu);
|
||||||
unlikely(rt_task(curr)) &&
|
test = (curr && (may_not_preempt ||
|
||||||
(curr->nr_cpus_allowed < 2 || curr->prio <= p->prio);
|
(unlikely(rt_task(curr)) &&
|
||||||
|
(curr->nr_cpus_allowed < 2 || curr->prio <= p->prio))));
|
||||||
|
|
||||||
if (test || !rt_task_fits_capacity(p, cpu)) {
|
if (test || !rt_task_fits_capacity(p, cpu)) {
|
||||||
int target = find_lowest_rq(p);
|
int target = find_lowest_rq(p);
|
||||||
@@ -1499,11 +1521,14 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
|
|||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Don't bother moving it if the destination CPU is
|
* If cpu is non-preemptible, prefer remote cpu
|
||||||
|
* even if it's running a higher-prio task.
|
||||||
|
* Otherwise: Don't bother moving it if the destination CPU is
|
||||||
* not running a lower priority task.
|
* not running a lower priority task.
|
||||||
*/
|
*/
|
||||||
if (target != -1 &&
|
if (target != -1 &&
|
||||||
p->prio < cpu_rq(target)->rt.highest_prio.curr)
|
(may_not_preempt ||
|
||||||
|
p->prio < cpu_rq(target)->rt.highest_prio.curr))
|
||||||
cpu = target;
|
cpu = target;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2671,3 +2671,15 @@ static inline bool is_per_cpu_kthread(struct task_struct *p)
|
|||||||
|
|
||||||
void swake_up_all_locked(struct swait_queue_head *q);
|
void swake_up_all_locked(struct swait_queue_head *q);
|
||||||
void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
|
void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* task_may_not_preempt - check whether a task may not be preemptible soon
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
|
||||||
|
extern bool task_may_not_preempt(struct task_struct *task, int cpu);
|
||||||
|
#else
|
||||||
|
static inline bool task_may_not_preempt(struct task_struct *task, int cpu)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_RT_SOFTINT_OPTIMIZATION */
|
||||||
|
|||||||
Reference in New Issue
Block a user