ANDROID: sched: Add vendor hook for util-update related functions

Vendor may have the need to implement their own util tracking.

Bug: 201260585
Signed-off-by: Rick Yiu <rickyiu@google.com>
Change-Id: I973902e6ff82a85ecd029ac5a78692d629df1ebe
This commit is contained in:
Rick Yiu
2022-01-07 18:31:06 +08:00
parent 54fc960b31
commit e8762ad424
4 changed files with 41 additions and 5 deletions

View File

@@ -361,3 +361,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_css_offline);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_css_online);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_free);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_alloc);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_attach_entity_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_detach_entity_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_remove_entity_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_blocked_fair);

View File

@@ -416,6 +416,27 @@ DECLARE_HOOK(android_vh_dup_task_struct,
DECLARE_HOOK(android_vh_account_task_time,
TP_PROTO(struct task_struct *p, struct rq *rq, int user_tick),
TP_ARGS(p, rq, user_tick));
DECLARE_RESTRICTED_HOOK(android_rvh_attach_entity_load_avg,
TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se),
TP_ARGS(cfs_rq, se), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_detach_entity_load_avg,
TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se),
TP_ARGS(cfs_rq, se), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_update_load_avg,
TP_PROTO(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se),
TP_ARGS(now, cfs_rq, se), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_remove_entity_load_avg,
TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se),
TP_ARGS(cfs_rq, se), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_update_blocked_fair,
TP_PROTO(struct rq *rq),
TP_ARGS(rq), 1);
/* macro versions of hooks are no longer required */
#endif /* _TRACE_HOOK_SCHED_H */

View File

@@ -3807,6 +3807,8 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
else
se->avg.load_sum = 1;
trace_android_rvh_attach_entity_load_avg(cfs_rq, se);
enqueue_load_avg(cfs_rq, se);
cfs_rq->avg.util_avg += se->avg.util_avg;
cfs_rq->avg.util_sum += se->avg.util_sum;
@@ -3836,6 +3838,8 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
*/
u32 divider = get_pelt_divider(&cfs_rq->avg);
trace_android_rvh_detach_entity_load_avg(cfs_rq, se);
dequeue_load_avg(cfs_rq, se);
sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
@@ -3872,6 +3876,8 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
decayed = update_cfs_rq_load_avg(now, cfs_rq);
decayed |= propagate_entity_load_avg(se);
trace_android_rvh_update_load_avg(now, cfs_rq, se);
if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
/*
@@ -3943,6 +3949,8 @@ static void remove_entity_load_avg(struct sched_entity *se)
sync_entity_load_avg(se);
trace_android_rvh_remove_entity_load_avg(cfs_rq, se);
raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags);
++cfs_rq->removed.nr;
cfs_rq->removed.util_avg += se->avg.util_avg;
@@ -8332,6 +8340,8 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
bool decayed = false;
int cpu = cpu_of(rq);
trace_android_rvh_update_blocked_fair(rq);
/*
* Iterates the task_group tree in a bottom up fashion, see
* list_add_leaf_cfs_rq() for details.

View File

@@ -180,8 +180,7 @@ accumulate_sum(u64 delta, struct sched_avg *sa,
* load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
* = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
*/
static __always_inline int
___update_load_sum(u64 now, struct sched_avg *sa,
int ___update_load_sum(u64 now, struct sched_avg *sa,
unsigned long load, unsigned long runnable, int running)
{
u64 delta;
@@ -232,6 +231,7 @@ ___update_load_sum(u64 now, struct sched_avg *sa,
return 1;
}
EXPORT_SYMBOL_GPL(___update_load_sum);
/*
* When syncing *_avg with *_sum, we must take into account the current
@@ -257,8 +257,7 @@ ___update_load_sum(u64 now, struct sched_avg *sa,
* the period_contrib of cfs_rq when updating the sched_avg of a sched_entity
* if it's more convenient.
*/
static __always_inline void
___update_load_avg(struct sched_avg *sa, unsigned long load)
void ___update_load_avg(struct sched_avg *sa, unsigned long load)
{
u32 divider = get_pelt_divider(sa);
@@ -269,6 +268,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
sa->runnable_avg = div_u64(sa->runnable_sum, divider);
WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
}
EXPORT_SYMBOL_GPL(___update_load_avg);
/*
* sched_entity: