@@ -1360,9 +1360,9 @@ static bool scx_dsq_priq_less(struct rb_node *node_a,
13601360 const struct rb_node * node_b )
13611361{
13621362 const struct task_struct * a =
1363- container_of (node_a , struct task_struct , scx .dsq_node . priq );
1363+ container_of (node_a , struct task_struct , scx .dsq_priq );
13641364 const struct task_struct * b =
1365- container_of (node_b , struct task_struct , scx .dsq_node . priq );
1365+ container_of (node_b , struct task_struct , scx .dsq_priq );
13661366
13671367 return time_before64 (a -> scx .dsq_vtime , b -> scx .dsq_vtime );
13681368}
@@ -1378,9 +1378,9 @@ static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p,
13781378{
13791379 bool is_local = dsq -> id == SCX_DSQ_LOCAL ;
13801380
1381- WARN_ON_ONCE (p -> scx .dsq || !list_empty (& p -> scx .dsq_node . list ));
1382- WARN_ON_ONCE ((p -> scx .dsq_node . flags & SCX_TASK_DSQ_ON_PRIQ ) ||
1383- !RB_EMPTY_NODE (& p -> scx .dsq_node . priq ));
1381+ WARN_ON_ONCE (p -> scx .dsq || !list_empty (& p -> scx .dsq_list . node ));
1382+ WARN_ON_ONCE ((p -> scx .dsq_flags & SCX_TASK_DSQ_ON_PRIQ ) ||
1383+ !RB_EMPTY_NODE (& p -> scx .dsq_priq ));
13841384
13851385 if (!is_local ) {
13861386 raw_spin_lock (& dsq -> lock );
@@ -1419,21 +1419,21 @@ static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p,
14191419 scx_ops_error ("DSQ ID 0x%016llx already had FIFO-enqueued tasks" ,
14201420 dsq -> id );
14211421
1422- p -> scx .dsq_node . flags |= SCX_TASK_DSQ_ON_PRIQ ;
1423- rb_add (& p -> scx .dsq_node . priq , & dsq -> priq , scx_dsq_priq_less );
1422+ p -> scx .dsq_flags |= SCX_TASK_DSQ_ON_PRIQ ;
1423+ rb_add (& p -> scx .dsq_priq , & dsq -> priq , scx_dsq_priq_less );
14241424
14251425 /*
14261426 * Find the previous task and insert after it on the list so
14271427 * that @dsq->list is vtime ordered.
14281428 */
1429- rbp = rb_prev (& p -> scx .dsq_node . priq );
1429+ rbp = rb_prev (& p -> scx .dsq_priq );
14301430 if (rbp ) {
14311431 struct task_struct * prev =
14321432 container_of (rbp , struct task_struct ,
1433- scx .dsq_node . priq );
1434- list_add (& p -> scx .dsq_node . list , & prev -> scx .dsq_node . list );
1433+ scx .dsq_priq );
1434+ list_add (& p -> scx .dsq_list . node , & prev -> scx .dsq_list . node );
14351435 } else {
1436- list_add (& p -> scx .dsq_node . list , & dsq -> list );
1436+ list_add (& p -> scx .dsq_list . node , & dsq -> list );
14371437 }
14381438 } else {
14391439 /* a FIFO DSQ shouldn't be using PRIQ enqueuing */
@@ -1442,9 +1442,9 @@ static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p,
14421442 dsq -> id );
14431443
14441444 if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT ))
1445- list_add (& p -> scx .dsq_node . list , & dsq -> list );
1445+ list_add (& p -> scx .dsq_list . node , & dsq -> list );
14461446 else
1447- list_add_tail (& p -> scx .dsq_node . list , & dsq -> list );
1447+ list_add_tail (& p -> scx .dsq_list . node , & dsq -> list );
14481448 }
14491449
14501450 dsq_mod_nr (dsq , 1 );
@@ -1487,18 +1487,18 @@ static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p,
14871487static void task_unlink_from_dsq (struct task_struct * p ,
14881488 struct scx_dispatch_q * dsq )
14891489{
1490- if (p -> scx .dsq_node . flags & SCX_TASK_DSQ_ON_PRIQ ) {
1491- rb_erase (& p -> scx .dsq_node . priq , & dsq -> priq );
1492- RB_CLEAR_NODE (& p -> scx .dsq_node . priq );
1493- p -> scx .dsq_node . flags &= ~SCX_TASK_DSQ_ON_PRIQ ;
1490+ if (p -> scx .dsq_flags & SCX_TASK_DSQ_ON_PRIQ ) {
1491+ rb_erase (& p -> scx .dsq_priq , & dsq -> priq );
1492+ RB_CLEAR_NODE (& p -> scx .dsq_priq );
1493+ p -> scx .dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ ;
14941494 }
14951495
1496- list_del_init (& p -> scx .dsq_node . list );
1496+ list_del_init (& p -> scx .dsq_list . node );
14971497}
14981498
14991499static bool task_linked_on_dsq (struct task_struct * p )
15001500{
1501- return !list_empty (& p -> scx .dsq_node . list );
1501+ return !list_empty (& p -> scx .dsq_list . node );
15021502}
15031503
15041504static void dispatch_dequeue (struct rq * rq , struct task_struct * p )
@@ -1523,8 +1523,8 @@ static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
15231523 raw_spin_lock (& dsq -> lock );
15241524
15251525 /*
1526- * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_node
1527- * can't change underneath us.
1526+ * Now that we hold @dsq->lock, @p->holding_cpu and @p->scx.dsq_* can't
1527+ * change underneath us.
15281528 */
15291529 if (p -> scx .holding_cpu < 0 ) {
15301530 /* @p must still be on @dsq, dequeue */
@@ -2034,7 +2034,7 @@ static void consume_local_task(struct rq *rq, struct scx_dispatch_q *dsq,
20342034 /* @dsq is locked and @p is on this rq */
20352035 WARN_ON_ONCE (p -> scx .holding_cpu >= 0 );
20362036 task_unlink_from_dsq (p , dsq );
2037- list_add_tail (& p -> scx .dsq_node . list , & rq -> scx .local_dsq .list );
2037+ list_add_tail (& p -> scx .dsq_list . node , & rq -> scx .local_dsq .list );
20382038 dsq_mod_nr (dsq , -1 );
20392039 dsq_mod_nr (& rq -> scx .local_dsq , 1 );
20402040 p -> scx .dsq = & rq -> scx .local_dsq ;
@@ -2109,7 +2109,7 @@ static bool consume_dispatch_q(struct rq *rq, struct rq_flags *rf,
21092109
21102110 raw_spin_lock (& dsq -> lock );
21112111
2112- list_for_each_entry (p , & dsq -> list , scx .dsq_node . list ) {
2112+ list_for_each_entry (p , & dsq -> list , scx .dsq_list . node ) {
21132113 struct rq * task_rq = task_rq (p );
21142114
21152115 if (rq == task_rq ) {
@@ -2628,7 +2628,7 @@ static void put_prev_task_scx(struct rq *rq, struct task_struct *p)
26282628static struct task_struct * first_local_task (struct rq * rq )
26292629{
26302630 return list_first_entry_or_null (& rq -> scx .local_dsq .list ,
2631- struct task_struct , scx .dsq_node . list );
2631+ struct task_struct , scx .dsq_list . node );
26322632}
26332633
26342634static struct task_struct * pick_next_task_scx (struct rq * rq )
@@ -3309,8 +3309,8 @@ void init_scx_entity(struct sched_ext_entity *scx)
33093309 */
33103310 memset (scx , 0 , offsetof(struct sched_ext_entity , tasks_node ));
33113311
3312- INIT_LIST_HEAD (& scx -> dsq_node . list );
3313- RB_CLEAR_NODE (& scx -> dsq_node . priq );
3312+ INIT_LIST_HEAD (& scx -> dsq_list . node );
3313+ RB_CLEAR_NODE (& scx -> dsq_priq );
33143314 scx -> sticky_cpu = -1 ;
33153315 scx -> holding_cpu = -1 ;
33163316 INIT_LIST_HEAD (& scx -> runnable_node );
@@ -4160,7 +4160,7 @@ static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx,
41604160 jiffies_delta_msecs (p -> scx .runnable_at , dctx -> at_jiffies ));
41614161 dump_line (s , " scx_state/flags=%u/0x%x dsq_flags=0x%x ops_state/qseq=%lu/%lu" ,
41624162 scx_get_task_state (p ), p -> scx .flags & ~SCX_TASK_STATE_MASK ,
4163- p -> scx .dsq_node . flags , ops_state & SCX_OPSS_STATE_MASK ,
4163+ p -> scx .dsq_flags , ops_state & SCX_OPSS_STATE_MASK ,
41644164 ops_state >> SCX_OPSS_QSEQ_SHIFT );
41654165 dump_line (s , " sticky/holding_cpu=%d/%d dsq_id=%s dsq_vtime=%llu" ,
41664166 p -> scx .sticky_cpu , p -> scx .holding_cpu , dsq_id_buf ,
0 commit comments