Skip to content

Commit

Permalink
ANDROID: sched: Add vendor hook for util-update related functions
Browse files Browse the repository at this point in the history
Vendor may have the need to implement their own util tracking.

Bug: 201260585
Signed-off-by: Rick Yiu <rickyiu@google.com>
Change-Id: I973902e6ff82a85ecd029ac5a78692d629df1ebe
  • Loading branch information
Rickyiu committed Mar 29, 2022
1 parent ec7c9ea commit e3356ca
Show file tree
Hide file tree
Showing 4 changed files with 41 additions and 5 deletions.
5 changes: 5 additions & 0 deletions drivers/android/vendor_hooks.c
Expand Up @@ -388,3 +388,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh___get_user_pages_remote);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_get_user_pages);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_internal_get_user_pages_fast);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_pin_user_pages);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_attach_entity_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_detach_entity_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_remove_entity_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_blocked_fair);
20 changes: 20 additions & 0 deletions include/trace/hooks/sched.h
Expand Up @@ -405,6 +405,26 @@ DECLARE_HOOK(android_vh_setscheduler_uclamp,
TP_PROTO(struct task_struct *tsk, int clamp_id, unsigned int value),
TP_ARGS(tsk, clamp_id, value));

DECLARE_RESTRICTED_HOOK(android_rvh_attach_entity_load_avg,
TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se),
TP_ARGS(cfs_rq, se), 1);

DECLARE_RESTRICTED_HOOK(android_rvh_detach_entity_load_avg,
TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se),
TP_ARGS(cfs_rq, se), 1);

DECLARE_RESTRICTED_HOOK(android_rvh_update_load_avg,
TP_PROTO(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se),
TP_ARGS(now, cfs_rq, se), 1);

DECLARE_RESTRICTED_HOOK(android_rvh_remove_entity_load_avg,
TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se),
TP_ARGS(cfs_rq, se), 1);

DECLARE_RESTRICTED_HOOK(android_rvh_update_blocked_fair,
TP_PROTO(struct rq *rq),
TP_ARGS(rq), 1);

/* macro versions of hooks are no longer required */

#endif /* _TRACE_HOOK_SCHED_H */
Expand Down
10 changes: 10 additions & 0 deletions kernel/sched/fair.c
Expand Up @@ -3763,6 +3763,8 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se));
}

trace_android_rvh_attach_entity_load_avg(cfs_rq, se);

enqueue_load_avg(cfs_rq, se);
cfs_rq->avg.util_avg += se->avg.util_avg;
cfs_rq->avg.util_sum += se->avg.util_sum;
Expand All @@ -3786,6 +3788,8 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
*/
static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
trace_android_rvh_detach_entity_load_avg(cfs_rq, se);

dequeue_load_avg(cfs_rq, se);
sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
Expand Down Expand Up @@ -3825,6 +3829,8 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
decayed = update_cfs_rq_load_avg(now, cfs_rq);
decayed |= propagate_entity_load_avg(se);

trace_android_rvh_update_load_avg(now, cfs_rq, se);

if (!se->avg.last_update_time && (flags & DO_ATTACH)) {

/*
Expand Down Expand Up @@ -3896,6 +3902,8 @@ static void remove_entity_load_avg(struct sched_entity *se)

sync_entity_load_avg(se);

trace_android_rvh_remove_entity_load_avg(cfs_rq, se);

raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags);
++cfs_rq->removed.nr;
cfs_rq->removed.util_avg += se->avg.util_avg;
Expand Down Expand Up @@ -8108,6 +8116,8 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
bool decayed = false;
int cpu = cpu_of(rq);

trace_android_rvh_update_blocked_fair(rq);

/*
* Iterates the task_group tree in a bottom up fashion, see
* list_add_leaf_cfs_rq() for details.
Expand Down
11 changes: 6 additions & 5 deletions kernel/sched/pelt.c
Expand Up @@ -30,6 +30,7 @@

int pelt_load_avg_period = PELT32_LOAD_AVG_PERIOD;
int pelt_load_avg_max = PELT32_LOAD_AVG_MAX;
EXPORT_SYMBOL_GPL(pelt_load_avg_max);
const u32 *pelt_runnable_avg_yN_inv = pelt32_runnable_avg_yN_inv;

static int __init set_pelt(char *str)
Expand Down Expand Up @@ -216,9 +217,8 @@ accumulate_sum(u64 delta, struct sched_avg *sa,
* load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
* = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
*/
static __always_inline int
___update_load_sum(u64 now, struct sched_avg *sa,
unsigned long load, unsigned long runnable, int running)
int ___update_load_sum(u64 now, struct sched_avg *sa,
unsigned long load, unsigned long runnable, int running)
{
u64 delta;

Expand Down Expand Up @@ -268,6 +268,7 @@ ___update_load_sum(u64 now, struct sched_avg *sa,

return 1;
}
EXPORT_SYMBOL_GPL(___update_load_sum);

/*
* When syncing *_avg with *_sum, we must take into account the current
Expand All @@ -293,8 +294,7 @@ ___update_load_sum(u64 now, struct sched_avg *sa,
* the period_contrib of cfs_rq when updating the sched_avg of a sched_entity
* if it's more convenient.
*/
static __always_inline void
___update_load_avg(struct sched_avg *sa, unsigned long load)
void ___update_load_avg(struct sched_avg *sa, unsigned long load)
{
u32 divider = get_pelt_divider(sa);

Expand All @@ -305,6 +305,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
sa->runnable_avg = div_u64(sa->runnable_sum, divider);
WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
}
EXPORT_SYMBOL_GPL(___update_load_avg);

/*
* sched_entity:
Expand Down

0 comments on commit e3356ca

Please sign in to comment.