diff --git a/kernel/freezer.c b/kernel/freezer.c index 45ab36ffd0e7..9c34194db56f 100644 --- a/kernel/freezer.c +++ b/kernel/freezer.c @@ -11,6 +11,7 @@ #include #include #include +#include /* total number of freezing conditions in effect */ atomic_t system_freezing_cnt = ATOMIC_INIT(0); @@ -146,9 +147,16 @@ bool freeze_task(struct task_struct *p) void __thaw_task(struct task_struct *p) { unsigned long flags; + const struct cpumask *mask = task_cpu_possible_mask(p); spin_lock_irqsave(&freezer_lock, flags); - if (frozen(p)) + /* + * Wake up frozen tasks. On asymmetric systems where tasks cannot + * run on all CPUs, ttwu() may have deferred a wakeup generated + * before thaw_secondary_cpus() had completed so we generate + * additional wakeups here for tasks in the PF_FREEZER_SKIP state. + */ + if (frozen(p) || (frozen_or_skipped(p) && mask != cpu_possible_mask)) wake_up_process(p); spin_unlock_irqrestore(&freezer_lock, flags); } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 607c64aabdd6..227c054eee24 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4059,6 +4059,19 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) if (!ttwu_state_match(p, state, &success)) goto unlock; +#ifdef CONFIG_FREEZER + /* + * If we're going to wake up a thread which may be frozen, then + * we can only do so if we have an active CPU which is capable of + * running it. This may not be the case when resuming from suspend, + * as the secondary CPUs may not yet be back online. See __thaw_task() + * for the actual wakeup. + */ + if (unlikely(frozen_or_skipped(p)) && + !cpumask_intersects(cpu_active_mask, task_cpu_possible_mask(p))) + goto unlock; +#endif + trace_sched_waking(p); /*