@@ -490,7 +490,7 @@ static void __register_mlrc_exec_queue(struct xe_guc *guc,
490490 action [len ++ ] = info -> hwlrca_hi ;
491491
492492 for (i = 1 ; i < q -> width ; ++ i ) {
493- struct xe_lrc * lrc = q -> lrc + i ;
493+ struct xe_lrc * lrc = q -> lrc [ i ] ;
494494
495495 action [len ++ ] = lower_32_bits (xe_lrc_descriptor (lrc ));
496496 action [len ++ ] = upper_32_bits (xe_lrc_descriptor (lrc ));
@@ -527,7 +527,7 @@ static void register_exec_queue(struct xe_exec_queue *q)
527527{
528528 struct xe_guc * guc = exec_queue_to_guc (q );
529529 struct xe_device * xe = guc_to_xe (guc );
530- struct xe_lrc * lrc = q -> lrc ;
530+ struct xe_lrc * lrc = q -> lrc [ 0 ] ;
531531 struct guc_ctxt_registration_info info ;
532532
533533 xe_assert (xe , !exec_queue_registered (q ));
@@ -586,7 +586,7 @@ static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size)
586586{
587587 struct xe_guc * guc = exec_queue_to_guc (q );
588588 struct xe_device * xe = guc_to_xe (guc );
589- struct iosys_map map = xe_lrc_parallel_map (q -> lrc );
589+ struct iosys_map map = xe_lrc_parallel_map (q -> lrc [ 0 ] );
590590 unsigned int sleep_period_ms = 1 ;
591591
592592#define AVAILABLE_SPACE \
@@ -614,7 +614,7 @@ static int wq_noop_append(struct xe_exec_queue *q)
614614{
615615 struct xe_guc * guc = exec_queue_to_guc (q );
616616 struct xe_device * xe = guc_to_xe (guc );
617- struct iosys_map map = xe_lrc_parallel_map (q -> lrc );
617+ struct iosys_map map = xe_lrc_parallel_map (q -> lrc [ 0 ] );
618618 u32 len_dw = wq_space_until_wrap (q ) / sizeof (u32 ) - 1 ;
619619
620620 if (wq_wait_for_space (q , wq_space_until_wrap (q )))
@@ -634,7 +634,7 @@ static void wq_item_append(struct xe_exec_queue *q)
634634{
635635 struct xe_guc * guc = exec_queue_to_guc (q );
636636 struct xe_device * xe = guc_to_xe (guc );
637- struct iosys_map map = xe_lrc_parallel_map (q -> lrc );
637+ struct iosys_map map = xe_lrc_parallel_map (q -> lrc [ 0 ] );
638638#define WQ_HEADER_SIZE 4 /* Includes 1 LRC address too */
639639 u32 wqi [XE_HW_ENGINE_MAX_INSTANCE + (WQ_HEADER_SIZE - 1 )];
640640 u32 wqi_size = (q -> width + (WQ_HEADER_SIZE - 1 )) * sizeof (u32 );
@@ -650,12 +650,12 @@ static void wq_item_append(struct xe_exec_queue *q)
650650
651651 wqi [i ++ ] = FIELD_PREP (WQ_TYPE_MASK , WQ_TYPE_MULTI_LRC ) |
652652 FIELD_PREP (WQ_LEN_MASK , len_dw );
653- wqi [i ++ ] = xe_lrc_descriptor (q -> lrc );
653+ wqi [i ++ ] = xe_lrc_descriptor (q -> lrc [ 0 ] );
654654 wqi [i ++ ] = FIELD_PREP (WQ_GUC_ID_MASK , q -> guc -> id ) |
655- FIELD_PREP (WQ_RING_TAIL_MASK , q -> lrc -> ring .tail / sizeof (u64 ));
655+ FIELD_PREP (WQ_RING_TAIL_MASK , q -> lrc [ 0 ] -> ring .tail / sizeof (u64 ));
656656 wqi [i ++ ] = 0 ;
657657 for (j = 1 ; j < q -> width ; ++ j ) {
658- struct xe_lrc * lrc = q -> lrc + j ;
658+ struct xe_lrc * lrc = q -> lrc [ j ] ;
659659
660660 wqi [i ++ ] = lrc -> ring .tail / sizeof (u64 );
661661 }
@@ -670,7 +670,7 @@ static void wq_item_append(struct xe_exec_queue *q)
670670
671671 xe_device_wmb (xe );
672672
673- map = xe_lrc_parallel_map (q -> lrc );
673+ map = xe_lrc_parallel_map (q -> lrc [ 0 ] );
674674 parallel_write (xe , map , wq_desc .tail , q -> guc -> wqi_tail );
675675}
676676
@@ -679,7 +679,7 @@ static void submit_exec_queue(struct xe_exec_queue *q)
679679{
680680 struct xe_guc * guc = exec_queue_to_guc (q );
681681 struct xe_device * xe = guc_to_xe (guc );
682- struct xe_lrc * lrc = q -> lrc ;
682+ struct xe_lrc * lrc = q -> lrc [ 0 ] ;
683683 u32 action [3 ];
684684 u32 g2h_len = 0 ;
685685 u32 num_g2h = 0 ;
@@ -1236,7 +1236,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
12361236 msecs_to_jiffies (q -> sched_props .job_timeout_ms );
12371237 err = xe_sched_init (& ge -> sched , & drm_sched_ops , & xe_sched_ops ,
12381238 get_submit_wq (guc ),
1239- q -> lrc [0 ]. ring .size / MAX_JOB_SIZE_BYTES , 64 ,
1239+ q -> lrc [0 ]-> ring .size / MAX_JOB_SIZE_BYTES , 64 ,
12401240 timeout , guc_to_gt (guc )-> ordered_wq , NULL ,
12411241 q -> name , gt_to_xe (q -> gt )-> drm .dev );
12421242 if (err )
@@ -1464,7 +1464,7 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
14641464 ban = true;
14651465 }
14661466 } else if (xe_exec_queue_is_lr (q ) &&
1467- (xe_lrc_ring_head (q -> lrc ) != xe_lrc_ring_tail (q -> lrc ))) {
1467+ (xe_lrc_ring_head (q -> lrc [ 0 ] ) != xe_lrc_ring_tail (q -> lrc [ 0 ] ))) {
14681468 ban = true;
14691469 }
14701470
@@ -1529,7 +1529,7 @@ static void guc_exec_queue_start(struct xe_exec_queue *q)
15291529
15301530 trace_xe_exec_queue_resubmit (q );
15311531 for (i = 0 ; i < q -> width ; ++ i )
1532- xe_lrc_set_ring_head (q -> lrc + i , q -> lrc [i ]. ring .tail );
1532+ xe_lrc_set_ring_head (q -> lrc [ i ] , q -> lrc [i ]-> ring .tail );
15331533 xe_sched_resubmit_jobs (sched );
15341534 }
15351535
@@ -1775,7 +1775,7 @@ guc_exec_queue_wq_snapshot_capture(struct xe_exec_queue *q,
17751775{
17761776 struct xe_guc * guc = exec_queue_to_guc (q );
17771777 struct xe_device * xe = guc_to_xe (guc );
1778- struct iosys_map map = xe_lrc_parallel_map (q -> lrc );
1778+ struct iosys_map map = xe_lrc_parallel_map (q -> lrc [ 0 ] );
17791779 int i ;
17801780
17811781 snapshot -> guc .wqi_head = q -> guc -> wqi_head ;
@@ -1855,7 +1855,7 @@ xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q)
18551855
18561856 if (snapshot -> lrc ) {
18571857 for (i = 0 ; i < q -> width ; ++ i ) {
1858- struct xe_lrc * lrc = q -> lrc + i ;
1858+ struct xe_lrc * lrc = q -> lrc [ i ] ;
18591859
18601860 snapshot -> lrc [i ] = xe_lrc_snapshot_capture (lrc );
18611861 }
0 commit comments