ANDROID: Sched: Export scheduler symbols needed by vendor modules

Need to export internal scheduler symbols to facilitate vendor module
with scheduler based value-adds.

Bug: 173725277
Change-Id: I021f09097dfc1480abcc998cc8e05e75b2ee828b
Signed-off-by: Shaleen Agrawal <shalagra@codeaurora.org>
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
This commit is contained in:
Shaleen Agrawal
2020-10-26 18:23:50 -07:00
committed by Todd Kjos
parent 3216403849
commit 5a920a6503
8 changed files with 10 additions and 1 deletions

View File

@@ -33,6 +33,7 @@ __weak bool arch_freq_counters_available(const struct cpumask *cpus)
return false; return false;
} }
DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE; DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
EXPORT_PER_CPU_SYMBOL_GPL(freq_scale);
void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq, void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
unsigned long max_freq) unsigned long max_freq)

View File

@@ -4166,6 +4166,7 @@ struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
return next; return next;
return NULL; return NULL;
} }
EXPORT_SYMBOL_GPL(css_next_child);
/** /**
* css_next_descendant_pre - find the next descendant for pre-order walk * css_next_descendant_pre - find the next descendant for pre-order walk

View File

@@ -139,6 +139,7 @@ static const char * const resident_page_types[] = {
DEFINE_PER_CPU(unsigned long, process_counts) = 0; DEFINE_PER_CPU(unsigned long, process_counts) = 0;
__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
EXPORT_SYMBOL_GPL(tasklist_lock);
#ifdef CONFIG_PROVE_RCU #ifdef CONFIG_PROVE_RCU
int lockdep_tasklist_lock_is_held(void) int lockdep_tasklist_lock_is_held(void)

View File

@@ -111,7 +111,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
return true; return true;
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
} }
EXPORT_SYMBOL_GPL(irq_work_queue_on);
bool irq_work_needs_cpu(void) bool irq_work_needs_cpu(void)
{ {

View File

@@ -455,6 +455,7 @@ void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
{ {
__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE); __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
} }
EXPORT_SYMBOL_GPL(kthread_bind_mask);
/** /**
* kthread_bind - bind a just-created kthread to a cpu. * kthread_bind - bind a just-created kthread to a cpu.

View File

@@ -201,6 +201,7 @@ struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
cpu_relax(); cpu_relax();
} }
} }
EXPORT_SYMBOL_GPL(__task_rq_lock);
/* /*
* task_rq_lock - lock p->pi_lock and lock the rq @p resides on. * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
@@ -7086,7 +7087,9 @@ int in_sched_functions(unsigned long addr)
* Every task in system belongs to this group at bootup. * Every task in system belongs to this group at bootup.
*/ */
struct task_group root_task_group; struct task_group root_task_group;
EXPORT_SYMBOL_GPL(root_task_group);
LIST_HEAD(task_groups); LIST_HEAD(task_groups);
EXPORT_SYMBOL_GPL(task_groups);
/* Cacheline aligned slab cache for task_group */ /* Cacheline aligned slab cache for task_group */
static struct kmem_cache *task_group_cache __read_mostly; static struct kmem_cache *task_group_cache __read_mostly;

View File

@@ -19,6 +19,7 @@
* compromise in place of having locks on each irq in account_system_time. * compromise in place of having locks on each irq in account_system_time.
*/ */
DEFINE_PER_CPU(struct irqtime, cpu_irqtime); DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
EXPORT_PER_CPU_SYMBOL_GPL(cpu_irqtime);
static int sched_clock_irqtime; static int sched_clock_irqtime;

View File

@@ -55,6 +55,7 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
DEFINE_PER_CPU(struct task_struct *, ksoftirqd); DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
EXPORT_PER_CPU_SYMBOL_GPL(ksoftirqd);
/* /*
* active_softirqs -- per cpu, a mask of softirqs that are being handled, * active_softirqs -- per cpu, a mask of softirqs that are being handled,