Revert "Revert "ANDROID: sched: avoid migrating when softint on tgt cpu should be short""

This reverts commit 4196c1dafc, as the
merge conflicts have been resolved.

Bug: 31752786
Bug: 168521633
Change-Id: I6cb3fc698d567e03c67e2c4373ce75cc71cdfe9c
Signed-off-by: John Dias <joaodias@google.com>
[elavila: Amend commit text for AOSP, port to mainline]
Signed-off-by: J. Avila <elavila@google.com>
[ashayj@codeaurora.org: update usage of __IRQ_STAT and minor conflicts]
Signed-off-by: Ashay Jaiswal <ashayj@codeaurora.org>
Signed-off-by: Shaleen Agrawal <shalagra@codeaurora.org>
This commit is contained in:
John Dias
2016-10-05 15:11:40 -07:00
committed by Todd Kjos
parent 272fc0a73b
commit 027f8bd863
3 changed files with 25 additions and 4 deletions

View File

@@ -529,6 +529,12 @@ enum
}; };
#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ)) #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
/* Softirq's where the handling might be long: */
#define LONG_SOFTIRQ_MASK ((1 << NET_TX_SOFTIRQ) | \
(1 << NET_RX_SOFTIRQ) | \
(1 << BLOCK_SOFTIRQ) | \
(1 << IRQ_POLL_SOFTIRQ) | \
(1 << TASKLET_SOFTIRQ))
/* map softirq index to softirq name. update 'softirq_to_name' in /* map softirq index to softirq name. update 'softirq_to_name' in
* kernel/softirq.c when adding a new softirq. * kernel/softirq.c when adding a new softirq.
@@ -555,6 +561,7 @@ extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr); extern void raise_softirq(unsigned int nr);
DECLARE_PER_CPU(struct task_struct *, ksoftirqd); DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
DECLARE_PER_CPU(__u32, active_softirqs);
static inline struct task_struct *this_cpu_ksoftirqd(void) static inline struct task_struct *this_cpu_ksoftirqd(void)
{ {

View File

@@ -1441,15 +1441,20 @@ static int find_lowest_rq(struct task_struct *task);
#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION #ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
/* /*
* Return whether the task on the given cpu is currently non-preemptible * Return whether the task on the given cpu is currently non-preemptible
* while handling a softirq or is likely to block preemptions soon because * while handling a potentially long softint, or if the task is likely
* it is a ksoftirq thread. * to block preemptions soon because it is a ksoftirq thread that is
* handling slow softints.
*/ */
bool bool
task_may_not_preempt(struct task_struct *task, int cpu) task_may_not_preempt(struct task_struct *task, int cpu)
{ {
__u32 softirqs = per_cpu(active_softirqs, cpu) |
local_softirq_pending();
struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu); struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);
return (task_thread_info(task)->preempt_count & SOFTIRQ_MASK) || return ((softirqs & LONG_SOFTIRQ_MASK) &&
task == cpu_ksoftirqd; (task == cpu_ksoftirqd ||
task_thread_info(task)->preempt_count & SOFTIRQ_MASK));
} }
#endif /* CONFIG_RT_SOFTINT_OPTIMIZATION */ #endif /* CONFIG_RT_SOFTINT_OPTIMIZATION */

View File

@@ -63,6 +63,13 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
DEFINE_PER_CPU(struct task_struct *, ksoftirqd); DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
EXPORT_PER_CPU_SYMBOL_GPL(ksoftirqd); EXPORT_PER_CPU_SYMBOL_GPL(ksoftirqd);
/*
* active_softirqs -- per cpu, a mask of softirqs that are being handled,
* with the expectation that approximate answers are acceptable and therefore
* no synchronization.
*/
DEFINE_PER_CPU(__u32, active_softirqs);
const char * const softirq_to_name[NR_SOFTIRQS] = { const char * const softirq_to_name[NR_SOFTIRQS] = {
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL", "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
"TASKLET", "SCHED", "HRTIMER", "RCU" "TASKLET", "SCHED", "HRTIMER", "RCU"
@@ -541,6 +548,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
restart: restart:
/* Reset the pending bitmask before enabling irqs */ /* Reset the pending bitmask before enabling irqs */
set_softirq_pending(0); set_softirq_pending(0);
__this_cpu_write(active_softirqs, pending);
local_irq_enable(); local_irq_enable();
@@ -570,6 +578,7 @@ restart:
pending >>= softirq_bit; pending >>= softirq_bit;
} }
__this_cpu_write(active_softirqs, 0);
if (!IS_ENABLED(CONFIG_PREEMPT_RT) && if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
__this_cpu_read(ksoftirqd) == current) __this_cpu_read(ksoftirqd) == current)
rcu_softirq_qs(); rcu_softirq_qs();