Skip to content

Commit

Permalink
sched/core: Skip sched_core_fork/free() when core sched is disabled
Browse files Browse the repository at this point in the history
As __put_task_struct() and copy_process() are hot path functions,
the call of sched_core_fork/free() will bring overhead when core
sched is disabled, and we skip them when core sched is disabled().

Signed-off-by: Cruz Zhao <CruzZhao@linux.alibaba.com>
  • Loading branch information
Cruz Zhao authored and intel-lab-lkp committed Apr 24, 2022
1 parent a658353 commit 211b986
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 13 deletions.
10 changes: 10 additions & 0 deletions include/linux/sched.h
Expand Up @@ -2397,9 +2397,19 @@ extern void sched_core_free(struct task_struct *tsk);
extern void sched_core_fork(struct task_struct *p);
extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
unsigned long uaddr);
DECLARE_STATIC_KEY_FALSE(__sched_core_enabled);
static inline bool sched_core_disabled(void)
{
return !static_branch_unlikely(&__sched_core_enabled);
}

#else
static inline void sched_core_free(struct task_struct *tsk) { }
static inline void sched_core_fork(struct task_struct *p) { }
static inline bool sched_core_disabled(void)
{
return true;
}
#endif

#endif
9 changes: 6 additions & 3 deletions kernel/fork.c
Expand Up @@ -845,7 +845,8 @@ void __put_task_struct(struct task_struct *tsk)
exit_creds(tsk);
delayacct_tsk_free(tsk);
put_signal_struct(tsk->signal);
sched_core_free(tsk);
if (!sched_core_disabled())
sched_core_free(tsk);
free_task(tsk);
}
EXPORT_SYMBOL_GPL(__put_task_struct);
Expand Down Expand Up @@ -2383,7 +2384,8 @@ static __latent_entropy struct task_struct *copy_process(

klp_copy_process(p);

sched_core_fork(p);
if (!sched_core_disabled())
sched_core_fork(p);

spin_lock(&current->sighand->siglock);

Expand Down Expand Up @@ -2471,7 +2473,8 @@ static __latent_entropy struct task_struct *copy_process(
return p;

bad_fork_cancel_cgroup:
sched_core_free(p);
if (!sched_core_disabled())
sched_core_free(p);
spin_unlock(&current->sighand->siglock);
write_unlock_irq(&tasklist_lock);
cgroup_cancel_fork(p, args);
Expand Down
10 changes: 0 additions & 10 deletions kernel/sched/sched.h
Expand Up @@ -1150,11 +1150,6 @@ static inline bool sched_core_enabled(struct rq *rq)
return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled;
}

static inline bool sched_core_disabled(void)
{
return !static_branch_unlikely(&__sched_core_enabled);
}

/*
* Be careful with this function; not for general use. The return value isn't
* stable unless you actually hold a relevant rq->__lock.
Expand Down Expand Up @@ -1252,11 +1247,6 @@ static inline bool sched_core_enabled(struct rq *rq)
return false;
}

static inline bool sched_core_disabled(void)
{
return true;
}

static inline raw_spinlock_t *rq_lockp(struct rq *rq)
{
return &rq->__lock;
Expand Down

0 comments on commit 211b986

Please sign in to comment.