Revert "Revert "ANDROID: Sched: Add restricted vendor hooks for scheduler""
This reverts commit 627a494ef3.
Reverting commit to add the vendor hooks back, this is to facilitate
various scheduler value adds.
Bug: 200103201
Change-Id: Ifbd7d235f863e01848c22b8185c4ec94da8c862c
Signed-off-by: Ashay Jaiswal <quic_ashayj@quicinc.com>
This commit is contained in:
@@ -105,6 +105,21 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_preempt_disable);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_preempt_enable);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_irqs_disable);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_irqs_enable);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_task_cpu);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_try_to_wake_up);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_try_to_wake_up_success);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_fork);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_wake_up_new_task);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_new_task_stats);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_flush_task);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_tick_entry);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_schedule);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_cpu_starting);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_cpu_dying);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_account_irq);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_place_entity);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_cpu_capacity);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_misfit_status);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_cgroup_attach);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_cgroup_can_attach);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_map_util_freq);
|
||||
|
||||
@@ -124,6 +124,67 @@ DECLARE_RESTRICTED_HOOK(android_rvh_update_cpus_allowed,
|
||||
const struct cpumask *new_mask, int *ret),
|
||||
TP_ARGS(p, cpus_requested, new_mask, ret), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_set_task_cpu,
|
||||
TP_PROTO(struct task_struct *p, unsigned int new_cpu),
|
||||
TP_ARGS(p, new_cpu), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_try_to_wake_up,
|
||||
TP_PROTO(struct task_struct *p),
|
||||
TP_ARGS(p), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_try_to_wake_up_success,
|
||||
TP_PROTO(struct task_struct *p),
|
||||
TP_ARGS(p), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_sched_fork,
|
||||
TP_PROTO(struct task_struct *p),
|
||||
TP_ARGS(p), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_wake_up_new_task,
|
||||
TP_PROTO(struct task_struct *p),
|
||||
TP_ARGS(p), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_new_task_stats,
|
||||
TP_PROTO(struct task_struct *p),
|
||||
TP_ARGS(p), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_flush_task,
|
||||
TP_PROTO(struct task_struct *prev),
|
||||
TP_ARGS(prev), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_tick_entry,
|
||||
TP_PROTO(struct rq *rq),
|
||||
TP_ARGS(rq), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_schedule,
|
||||
TP_PROTO(struct task_struct *prev, struct task_struct *next, struct rq *rq),
|
||||
TP_ARGS(prev, next, rq), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_sched_cpu_starting,
|
||||
TP_PROTO(int cpu),
|
||||
TP_ARGS(cpu), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_sched_cpu_dying,
|
||||
TP_PROTO(int cpu),
|
||||
TP_ARGS(cpu), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_account_irq,
|
||||
TP_PROTO(struct task_struct *curr, int cpu, s64 delta),
|
||||
TP_ARGS(curr, cpu, delta), 1);
|
||||
|
||||
struct sched_entity;
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_place_entity,
|
||||
TP_PROTO(struct sched_entity *se, u64 vruntime),
|
||||
TP_ARGS(se, vruntime), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_update_cpu_capacity,
|
||||
TP_PROTO(int cpu, unsigned long *capacity),
|
||||
TP_ARGS(cpu, capacity), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_update_misfit_status,
|
||||
TP_PROTO(struct task_struct *p, struct rq *rq, bool *need_update),
|
||||
TP_ARGS(p, rq, need_update), 1);
|
||||
|
||||
struct cgroup_taskset;
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_cpu_cgroup_attach,
|
||||
TP_PROTO(struct cgroup_taskset *tset),
|
||||
|
||||
@@ -3065,6 +3065,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
||||
p->se.nr_migrations++;
|
||||
rseq_migrate(p);
|
||||
perf_event_task_migrate(p);
|
||||
trace_android_rvh_set_task_cpu(p, new_cpu);
|
||||
}
|
||||
|
||||
__set_task_cpu(p, new_cpu);
|
||||
@@ -4113,6 +4114,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
||||
*/
|
||||
smp_cond_load_acquire(&p->on_cpu, !VAL);
|
||||
|
||||
trace_android_rvh_try_to_wake_up(p);
|
||||
|
||||
cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU);
|
||||
if (task_cpu(p) != cpu) {
|
||||
if (p->in_iowait) {
|
||||
@@ -4132,8 +4135,10 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
||||
unlock:
|
||||
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
||||
out:
|
||||
if (success)
|
||||
if (success) {
|
||||
trace_android_rvh_try_to_wake_up_success(p);
|
||||
ttwu_stat(p, task_cpu(p), wake_flags);
|
||||
}
|
||||
preempt_enable();
|
||||
|
||||
return success;
|
||||
@@ -4364,6 +4369,8 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
trace_android_rvh_sched_fork(p);
|
||||
|
||||
__sched_fork(clone_flags, p);
|
||||
/*
|
||||
* We mark the process as NEW here. This guarantees that
|
||||
@@ -4477,6 +4484,8 @@ void wake_up_new_task(struct task_struct *p)
|
||||
struct rq_flags rf;
|
||||
struct rq *rq;
|
||||
|
||||
trace_android_rvh_wake_up_new_task(p);
|
||||
|
||||
raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
|
||||
WRITE_ONCE(p->__state, TASK_RUNNING);
|
||||
#ifdef CONFIG_SMP
|
||||
@@ -4495,6 +4504,7 @@ void wake_up_new_task(struct task_struct *p)
|
||||
rq = __task_rq_lock(p, &rf);
|
||||
update_rq_clock(rq);
|
||||
post_init_entity_util_avg(p);
|
||||
trace_android_rvh_new_task_stats(p);
|
||||
|
||||
activate_task(rq, p, ENQUEUE_NOCLOCK);
|
||||
trace_sched_wakeup_new(p);
|
||||
@@ -4883,6 +4893,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
|
||||
* task and put them back on the free list.
|
||||
*/
|
||||
kprobe_flush_task(prev);
|
||||
trace_android_rvh_flush_task(prev);
|
||||
|
||||
/* Task is done with its stack. */
|
||||
put_task_stack(prev);
|
||||
@@ -5241,6 +5252,7 @@ void scheduler_tick(void)
|
||||
|
||||
rq_lock(rq, &rf);
|
||||
|
||||
trace_android_rvh_tick_entry(rq);
|
||||
update_rq_clock(rq);
|
||||
thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
|
||||
update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure);
|
||||
@@ -6293,6 +6305,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
|
||||
rq->last_seen_need_resched_ns = 0;
|
||||
#endif
|
||||
|
||||
trace_android_rvh_schedule(prev, next, rq);
|
||||
if (likely(prev != next)) {
|
||||
rq->nr_switches++;
|
||||
/*
|
||||
@@ -9171,6 +9184,7 @@ int sched_cpu_starting(unsigned int cpu)
|
||||
sched_core_cpu_starting(cpu);
|
||||
sched_rq_cpu_starting(cpu);
|
||||
sched_tick_start(cpu);
|
||||
trace_android_rvh_sched_cpu_starting(cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -9244,6 +9258,8 @@ int sched_cpu_dying(unsigned int cpu)
|
||||
}
|
||||
rq_unlock_irqrestore(rq, &rf);
|
||||
|
||||
trace_android_rvh_sched_cpu_dying(cpu);
|
||||
|
||||
calc_load_migrate(rq);
|
||||
update_max_interval();
|
||||
hrtick_clear(rq);
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
*/
|
||||
#include <linux/cpufreq_times.h>
|
||||
#include "sched.h"
|
||||
#include <trace/hooks/sched.h>
|
||||
|
||||
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
||||
|
||||
@@ -74,6 +75,8 @@ void irqtime_account_irq(struct task_struct *curr, unsigned int offset)
|
||||
irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
|
||||
else if ((pc & SOFTIRQ_OFFSET) && curr != this_cpu_ksoftirqd())
|
||||
irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
|
||||
|
||||
trace_android_rvh_account_irq(curr, cpu, delta);
|
||||
}
|
||||
|
||||
static u64 irqtime_tick_accounted(u64 maxtime)
|
||||
|
||||
@@ -4120,7 +4120,10 @@ static inline int task_fits_capacity(struct task_struct *p, long capacity)
|
||||
|
||||
static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
|
||||
{
|
||||
if (!static_branch_unlikely(&sched_asym_cpucapacity))
|
||||
bool need_update = true;
|
||||
|
||||
trace_android_rvh_update_misfit_status(p, rq, &need_update);
|
||||
if (!static_branch_unlikely(&sched_asym_cpucapacity) || !need_update)
|
||||
return;
|
||||
|
||||
if (!p || p->nr_cpus_allowed == 1) {
|
||||
@@ -4220,6 +4223,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
|
||||
thresh >>= 1;
|
||||
|
||||
vruntime -= thresh;
|
||||
trace_android_rvh_place_entity(se, vruntime);
|
||||
}
|
||||
|
||||
/* ensure we never gain time by being placed backwards. */
|
||||
@@ -8485,6 +8489,7 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
|
||||
if (!capacity)
|
||||
capacity = 1;
|
||||
|
||||
trace_android_rvh_update_cpu_capacity(cpu, &capacity);
|
||||
cpu_rq(cpu)->cpu_capacity = capacity;
|
||||
trace_sched_cpu_capacity_tp(cpu_rq(cpu));
|
||||
|
||||
|
||||
Reference in New Issue
Block a user