Skip to content

Commit

Permalink
sched: Make const-safe
Browse files Browse the repository at this point in the history
With a modified container_of() that preserves constness, the compiler
finds some variables which should have been const and some functions
which should have had their arguments marked as const.  No change to
generated code.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
  • Loading branch information
Matthew Wilcox (Oracle) authored and intel-lab-lkp committed Dec 9, 2022
1 parent d6962c4 commit de0d969
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 15 deletions.
8 changes: 5 additions & 3 deletions kernel/sched/core.c
Expand Up @@ -152,7 +152,7 @@ __read_mostly int scheduler_running;
DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);

/* kernel prio, less is more */
static inline int __task_prio(struct task_struct *p)
static inline int __task_prio(const struct task_struct *p)
{
if (p->sched_class == &stop_sched_class) /* trumps deadline */
return -2;
Expand All @@ -174,7 +174,8 @@ static inline int __task_prio(struct task_struct *p)
*/

/* real prio, less is less */
static inline bool prio_less(struct task_struct *a, struct task_struct *b, bool in_fi)
static inline bool prio_less(const struct task_struct *a,
const struct task_struct *b, bool in_fi)
{

int pa = __task_prio(a), pb = __task_prio(b);
Expand All @@ -194,7 +195,8 @@ static inline bool prio_less(struct task_struct *a, struct task_struct *b, bool
return false;
}

static inline bool __sched_core_less(struct task_struct *a, struct task_struct *b)
static inline bool __sched_core_less(const struct task_struct *a,
const struct task_struct *b)
{
if (a->core_cookie < b->core_cookie)
return true;
Expand Down
16 changes: 9 additions & 7 deletions kernel/sched/fair.c
Expand Up @@ -453,7 +453,7 @@ is_same_group(struct sched_entity *se, struct sched_entity *pse)
return NULL;
}

static inline struct sched_entity *parent_entity(struct sched_entity *se)
static inline struct sched_entity *parent_entity(const struct sched_entity *se)
{
return se->parent;
}
Expand Down Expand Up @@ -580,8 +580,8 @@ static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
return min_vruntime;
}

static inline bool entity_before(struct sched_entity *a,
struct sched_entity *b)
static inline bool entity_before(const struct sched_entity *a,
const struct sched_entity *b)
{
return (s64)(a->vruntime - b->vruntime) < 0;
}
Expand Down Expand Up @@ -11716,7 +11716,8 @@ static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
/*
* se_fi_update - Update the cfs_rq->min_vruntime_fi in a CFS hierarchy if needed.
*/
static void se_fi_update(struct sched_entity *se, unsigned int fi_seq, bool forceidle)
static void se_fi_update(const struct sched_entity *se, unsigned int fi_seq,
bool forceidle)
{
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
Expand All @@ -11741,11 +11742,12 @@ void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi)
se_fi_update(se, rq->core->core_forceidle_seq, in_fi);
}

bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool in_fi)
bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b,
bool in_fi)
{
struct rq *rq = task_rq(a);
struct sched_entity *sea = &a->se;
struct sched_entity *seb = &b->se;
const struct sched_entity *sea = &a->se;
const struct sched_entity *seb = &b->se;
struct cfs_rq *cfs_rqa;
struct cfs_rq *cfs_rqb;
s64 delta;
Expand Down
11 changes: 6 additions & 5 deletions kernel/sched/sched.h
Expand Up @@ -248,7 +248,7 @@ static inline void update_avg(u64 *avg, u64 sample)

#define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV)

static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
static inline bool dl_entity_is_special(const struct sched_dl_entity *dl_se)
{
#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
return unlikely(dl_se->flags & SCHED_FLAG_SUGOV);
Expand All @@ -260,8 +260,8 @@ static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
/*
* Tells if entity @a should preempt entity @b.
*/
static inline bool
dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
static inline bool dl_entity_preempt(const struct sched_dl_entity *a,
const struct sched_dl_entity *b)
{
return dl_entity_is_special(a) ||
dl_time_before(a->deadline, b->deadline);
Expand Down Expand Up @@ -1236,7 +1236,8 @@ static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
return &rq->__lock;
}

bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool fi);
bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b,
bool fi);

/*
* Helpers to check if the CPU's core cookie matches with the task's cookie
Expand Down Expand Up @@ -1415,7 +1416,7 @@ static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
}

/* runqueue on which this entity is (to be) queued */
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se)
{
return se->cfs_rq;
}
Expand Down

0 comments on commit de0d969

Please sign in to comment.