Revert "Revert "ANDROID: sched: avoid migrating when softint on tgt cpu should be short""
This reverts commit 4196c1dafc, as the
merge conflicts have been resolved.
Bug: 31752786
Bug: 168521633
Change-Id: I6cb3fc698d567e03c67e2c4373ce75cc71cdfe9c
Signed-off-by: John Dias <joaodias@google.com>
[elavila: Amend commit text for AOSP, port to mainline]
Signed-off-by: J. Avila <elavila@google.com>
[ashayj@codeaurora.org: update usage of __IRQ_STAT and minor conflicts]
Signed-off-by: Ashay Jaiswal <ashayj@codeaurora.org>
Signed-off-by: Shaleen Agrawal <shalagra@codeaurora.org>
This commit is contained in:
@@ -529,6 +529,12 @@ enum
|
||||
};
|
||||
|
||||
#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
|
||||
/* Softirq's where the handling might be long: */
|
||||
#define LONG_SOFTIRQ_MASK ((1 << NET_TX_SOFTIRQ) | \
|
||||
(1 << NET_RX_SOFTIRQ) | \
|
||||
(1 << BLOCK_SOFTIRQ) | \
|
||||
(1 << IRQ_POLL_SOFTIRQ) | \
|
||||
(1 << TASKLET_SOFTIRQ))
|
||||
|
||||
/* map softirq index to softirq name. update 'softirq_to_name' in
|
||||
* kernel/softirq.c when adding a new softirq.
|
||||
@@ -555,6 +561,7 @@ extern void raise_softirq_irqoff(unsigned int nr);
|
||||
extern void raise_softirq(unsigned int nr);
|
||||
|
||||
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
|
||||
DECLARE_PER_CPU(__u32, active_softirqs);
|
||||
|
||||
static inline struct task_struct *this_cpu_ksoftirqd(void)
|
||||
{
|
||||
|
||||
@@ -1441,15 +1441,20 @@ static int find_lowest_rq(struct task_struct *task);
|
||||
#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
|
||||
/*
|
||||
* Return whether the task on the given cpu is currently non-preemptible
|
||||
* while handling a softirq or is likely to block preemptions soon because
|
||||
* it is a ksoftirq thread.
|
||||
* while handling a potentially long softint, or if the task is likely
|
||||
* to block preemptions soon because it is a ksoftirq thread that is
|
||||
* handling slow softints.
|
||||
*/
|
||||
bool
|
||||
task_may_not_preempt(struct task_struct *task, int cpu)
|
||||
{
|
||||
__u32 softirqs = per_cpu(active_softirqs, cpu) |
|
||||
local_softirq_pending();
|
||||
|
||||
struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);
|
||||
return (task_thread_info(task)->preempt_count & SOFTIRQ_MASK) ||
|
||||
task == cpu_ksoftirqd;
|
||||
return ((softirqs & LONG_SOFTIRQ_MASK) &&
|
||||
(task == cpu_ksoftirqd ||
|
||||
task_thread_info(task)->preempt_count & SOFTIRQ_MASK));
|
||||
}
|
||||
#endif /* CONFIG_RT_SOFTINT_OPTIMIZATION */
|
||||
|
||||
|
||||
@@ -63,6 +63,13 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
|
||||
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(ksoftirqd);
|
||||
|
||||
/*
|
||||
* active_softirqs -- per cpu, a mask of softirqs that are being handled,
|
||||
* with the expectation that approximate answers are acceptable and therefore
|
||||
* no synchronization.
|
||||
*/
|
||||
DEFINE_PER_CPU(__u32, active_softirqs);
|
||||
|
||||
const char * const softirq_to_name[NR_SOFTIRQS] = {
|
||||
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
|
||||
"TASKLET", "SCHED", "HRTIMER", "RCU"
|
||||
@@ -541,6 +548,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
|
||||
restart:
|
||||
/* Reset the pending bitmask before enabling irqs */
|
||||
set_softirq_pending(0);
|
||||
__this_cpu_write(active_softirqs, pending);
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
@@ -570,6 +578,7 @@ restart:
|
||||
pending >>= softirq_bit;
|
||||
}
|
||||
|
||||
__this_cpu_write(active_softirqs, 0);
|
||||
if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
|
||||
__this_cpu_read(ksoftirqd) == current)
|
||||
rcu_softirq_qs();
|
||||
|
||||
Reference in New Issue
Block a user