@@ -67,16 +67,16 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
6767
6868 /* Possibly override number of tile based on configuration register */
6969 if (!xe -> info .skip_mtcfg ) {
70- struct xe_gt * gt = xe_root_mmio_gt (xe );
70+ struct xe_mmio * mmio = xe_root_tile_mmio (xe );
7171 u8 tile_count ;
7272 u32 mtcfg ;
7373
7474 /*
7575 * Although the per-tile mmio regs are not yet initialized, this
76- * is fine as it's going to the root gt , that's guaranteed to be
77- * initialized earlier in xe_mmio_init()
76+ * is fine as it's going to the root tile's mmio , that's
77+ * guaranteed to be initialized earlier in xe_mmio_init()
7878 */
79- mtcfg = xe_mmio_read64_2x32 (gt , XEHP_MTCFG_ADDR );
79+ mtcfg = xe_mmio_read64_2x32 (mmio , XEHP_MTCFG_ADDR );
8080 tile_count = REG_FIELD_GET (TILE_COUNT , mtcfg ) + 1 ;
8181
8282 if (tile_count < xe -> info .tile_count ) {
@@ -187,116 +187,111 @@ int xe_mmio_init(struct xe_device *xe)
187187 return devm_add_action_or_reset (xe -> drm .dev , mmio_fini , xe );
188188}
189189
190- static void mmio_flush_pending_writes (struct xe_gt * gt )
190+ static void mmio_flush_pending_writes (struct xe_mmio * mmio )
191191{
192192#define DUMMY_REG_OFFSET 0x130030
193- struct xe_tile * tile = gt_to_tile (gt );
194193 int i ;
195194
196- if (tile -> xe -> info .platform != XE_LUNARLAKE )
195+ if (mmio -> tile -> xe -> info .platform != XE_LUNARLAKE )
197196 return ;
198197
199198 /* 4 dummy writes */
200199 for (i = 0 ; i < 4 ; i ++ )
201- writel (0 , tile -> mmio . regs + DUMMY_REG_OFFSET );
200+ writel (0 , mmio -> regs + DUMMY_REG_OFFSET );
202201}
203202
204- u8 xe_mmio_read8 (struct xe_gt * gt , struct xe_reg reg )
203+ u8 __xe_mmio_read8 (struct xe_mmio * mmio , struct xe_reg reg )
205204{
206- struct xe_tile * tile = gt_to_tile (gt );
207- u32 addr = xe_mmio_adjusted_addr (gt , reg .addr );
205+ u32 addr = xe_mmio_adjusted_addr (mmio , reg .addr );
208206 u8 val ;
209207
210208 /* Wa_15015404425 */
211- mmio_flush_pending_writes (gt );
209+ mmio_flush_pending_writes (mmio );
212210
213- val = readb (( reg . ext ? tile -> mmio_ext . regs : tile -> mmio . regs ) + addr );
214- trace_xe_reg_rw (gt , false, addr , val , sizeof (val ));
211+ val = readb (mmio -> regs + addr );
212+ trace_xe_reg_rw (mmio , false, addr , val , sizeof (val ));
215213
216214 return val ;
217215}
218216
219- u16 xe_mmio_read16 (struct xe_gt * gt , struct xe_reg reg )
217+ u16 __xe_mmio_read16 (struct xe_mmio * mmio , struct xe_reg reg )
220218{
221- struct xe_tile * tile = gt_to_tile (gt );
222- u32 addr = xe_mmio_adjusted_addr (gt , reg .addr );
219+ u32 addr = xe_mmio_adjusted_addr (mmio , reg .addr );
223220 u16 val ;
224221
225222 /* Wa_15015404425 */
226- mmio_flush_pending_writes (gt );
223+ mmio_flush_pending_writes (mmio );
227224
228- val = readw (( reg . ext ? tile -> mmio_ext . regs : tile -> mmio . regs ) + addr );
229- trace_xe_reg_rw (gt , false, addr , val , sizeof (val ));
225+ val = readw (mmio -> regs + addr );
226+ trace_xe_reg_rw (mmio , false, addr , val , sizeof (val ));
230227
231228 return val ;
232229}
233230
234- void xe_mmio_write32 (struct xe_gt * gt , struct xe_reg reg , u32 val )
231+ void __xe_mmio_write32 (struct xe_mmio * mmio , struct xe_reg reg , u32 val )
235232{
236- struct xe_tile * tile = gt_to_tile (gt );
237- u32 addr = xe_mmio_adjusted_addr (gt , reg .addr );
233+ u32 addr = xe_mmio_adjusted_addr (mmio , reg .addr );
238234
239- trace_xe_reg_rw (gt , true, addr , val , sizeof (val ));
235+ trace_xe_reg_rw (mmio , true, addr , val , sizeof (val ));
240236
241- if (!reg .vf && IS_SRIOV_VF ( gt_to_xe ( gt )) )
242- xe_gt_sriov_vf_write32 (gt , reg , val );
237+ if (!reg .vf && mmio -> sriov_vf_gt )
238+ xe_gt_sriov_vf_write32 (mmio -> sriov_vf_gt , reg , val );
243239 else
244- writel (val , ( reg . ext ? tile -> mmio_ext . regs : tile -> mmio . regs ) + addr );
240+ writel (val , mmio -> regs + addr );
245241}
246242
247- u32 xe_mmio_read32 (struct xe_gt * gt , struct xe_reg reg )
243+ u32 __xe_mmio_read32 (struct xe_mmio * mmio , struct xe_reg reg )
248244{
249- struct xe_tile * tile = gt_to_tile (gt );
250- u32 addr = xe_mmio_adjusted_addr (gt , reg .addr );
245+ u32 addr = xe_mmio_adjusted_addr (mmio , reg .addr );
251246 u32 val ;
252247
253248 /* Wa_15015404425 */
254- mmio_flush_pending_writes (gt );
249+ mmio_flush_pending_writes (mmio );
255250
256- if (!reg .vf && IS_SRIOV_VF ( gt_to_xe ( gt )) )
257- val = xe_gt_sriov_vf_read32 (gt , reg );
251+ if (!reg .vf && mmio -> sriov_vf_gt )
252+ val = xe_gt_sriov_vf_read32 (mmio -> sriov_vf_gt , reg );
258253 else
259- val = readl (( reg . ext ? tile -> mmio_ext . regs : tile -> mmio . regs ) + addr );
254+ val = readl (mmio -> regs + addr );
260255
261- trace_xe_reg_rw (gt , false, addr , val , sizeof (val ));
256+ trace_xe_reg_rw (mmio , false, addr , val , sizeof (val ));
262257
263258 return val ;
264259}
265260
266- u32 xe_mmio_rmw32 (struct xe_gt * gt , struct xe_reg reg , u32 clr , u32 set )
261+ u32 __xe_mmio_rmw32 (struct xe_mmio * mmio , struct xe_reg reg , u32 clr , u32 set )
267262{
268263 u32 old , reg_val ;
269264
270- old = xe_mmio_read32 (gt , reg );
265+ old = xe_mmio_read32 (mmio , reg );
271266 reg_val = (old & ~clr ) | set ;
272- xe_mmio_write32 (gt , reg , reg_val );
267+ xe_mmio_write32 (mmio , reg , reg_val );
273268
274269 return old ;
275270}
276271
277- int xe_mmio_write32_and_verify (struct xe_gt * gt ,
278- struct xe_reg reg , u32 val , u32 mask , u32 eval )
272+ int __xe_mmio_write32_and_verify (struct xe_mmio * mmio ,
273+ struct xe_reg reg , u32 val , u32 mask , u32 eval )
279274{
280275 u32 reg_val ;
281276
282- xe_mmio_write32 (gt , reg , val );
283- reg_val = xe_mmio_read32 (gt , reg );
277+ xe_mmio_write32 (mmio , reg , val );
278+ reg_val = xe_mmio_read32 (mmio , reg );
284279
285280 return (reg_val & mask ) != eval ? - EINVAL : 0 ;
286281}
287282
288- bool xe_mmio_in_range (const struct xe_gt * gt ,
289- const struct xe_mmio_range * range ,
290- struct xe_reg reg )
283+ bool __xe_mmio_in_range (const struct xe_mmio * mmio ,
284+ const struct xe_mmio_range * range ,
285+ struct xe_reg reg )
291286{
292- u32 addr = xe_mmio_adjusted_addr (gt , reg .addr );
287+ u32 addr = xe_mmio_adjusted_addr (mmio , reg .addr );
293288
294289 return range && addr >= range -> start && addr <= range -> end ;
295290}
296291
297292/**
298293 * xe_mmio_read64_2x32() - Read a 64-bit register as two 32-bit reads
299- * @gt : MMIO target GT
294+ * @mmio : MMIO target
300295 * @reg: register to read value from
301296 *
302297 * Although Intel GPUs have some 64-bit registers, the hardware officially
@@ -316,36 +311,36 @@ bool xe_mmio_in_range(const struct xe_gt *gt,
316311 *
317312 * Returns the value of the 64-bit register.
318313 */
319- u64 xe_mmio_read64_2x32 (struct xe_gt * gt , struct xe_reg reg )
314+ u64 __xe_mmio_read64_2x32 (struct xe_mmio * mmio , struct xe_reg reg )
320315{
321316 struct xe_reg reg_udw = { .addr = reg .addr + 0x4 };
322317 u32 ldw , udw , oldudw , retries ;
323318
324- reg .addr = xe_mmio_adjusted_addr (gt , reg .addr );
325- reg_udw .addr = xe_mmio_adjusted_addr (gt , reg_udw .addr );
319+ reg .addr = xe_mmio_adjusted_addr (mmio , reg .addr );
320+ reg_udw .addr = xe_mmio_adjusted_addr (mmio , reg_udw .addr );
326321
327322 /* we shouldn't adjust just one register address */
328- xe_gt_assert ( gt , reg_udw .addr == reg .addr + 0x4 );
323+ xe_tile_assert ( mmio -> tile , reg_udw .addr == reg .addr + 0x4 );
329324
330- oldudw = xe_mmio_read32 (gt , reg_udw );
325+ oldudw = xe_mmio_read32 (mmio , reg_udw );
331326 for (retries = 5 ; retries ; -- retries ) {
332- ldw = xe_mmio_read32 (gt , reg );
333- udw = xe_mmio_read32 (gt , reg_udw );
327+ ldw = xe_mmio_read32 (mmio , reg );
328+ udw = xe_mmio_read32 (mmio , reg_udw );
334329
335330 if (udw == oldudw )
336331 break ;
337332
338333 oldudw = udw ;
339334 }
340335
341- xe_gt_WARN ( gt , retries == 0 ,
342- "64-bit read of %#x did not stabilize\n" , reg .addr );
336+ drm_WARN ( & mmio -> tile -> xe -> drm , retries == 0 ,
337+ "64-bit read of %#x did not stabilize\n" , reg .addr );
343338
344339 return (u64 )udw << 32 | ldw ;
345340}
346341
347- static int __xe_mmio_wait32 (struct xe_gt * gt , struct xe_reg reg , u32 mask , u32 val , u32 timeout_us ,
348- u32 * out_val , bool atomic , bool expect_match )
342+ static int ____xe_mmio_wait32 (struct xe_mmio * mmio , struct xe_reg reg , u32 mask , u32 val , u32 timeout_us ,
343+ u32 * out_val , bool atomic , bool expect_match )
349344{
350345 ktime_t cur = ktime_get_raw ();
351346 const ktime_t end = ktime_add_us (cur , timeout_us );
@@ -355,7 +350,7 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v
355350 bool check ;
356351
357352 for (;;) {
358- read = xe_mmio_read32 (gt , reg );
353+ read = xe_mmio_read32 (mmio , reg );
359354
360355 check = (read & mask ) == val ;
361356 if (!expect_match )
@@ -381,7 +376,7 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v
381376 }
382377
383378 if (ret != 0 ) {
384- read = xe_mmio_read32 (gt , reg );
379+ read = xe_mmio_read32 (mmio , reg );
385380
386381 check = (read & mask ) == val ;
387382 if (!expect_match )
@@ -399,7 +394,7 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v
399394
400395/**
401396 * xe_mmio_wait32() - Wait for a register to match the desired masked value
402- * @gt : MMIO target GT
397+ * @mmio : MMIO target
403398 * @reg: register to read value from
404399 * @mask: mask to be applied to the value read from the register
405400 * @val: desired value after applying the mask
@@ -416,15 +411,15 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v
416411 * @timeout_us for different reasons, specially in non-atomic contexts. Thus,
417412 * it is possible that this function succeeds even after @timeout_us has passed.
418413 */
419- int xe_mmio_wait32 (struct xe_gt * gt , struct xe_reg reg , u32 mask , u32 val , u32 timeout_us ,
420- u32 * out_val , bool atomic )
414+ int __xe_mmio_wait32 (struct xe_mmio * mmio , struct xe_reg reg , u32 mask , u32 val , u32 timeout_us ,
415+ u32 * out_val , bool atomic )
421416{
422- return __xe_mmio_wait32 ( gt , reg , mask , val , timeout_us , out_val , atomic , true);
417+ return ____xe_mmio_wait32 ( mmio , reg , mask , val , timeout_us , out_val , atomic , true);
423418}
424419
425420/**
426421 * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value
427- * @gt : MMIO target GT
422+ * @mmio : MMIO target
428423 * @reg: register to read value from
429424 * @mask: mask to be applied to the value read from the register
430425 * @val: value not to be matched after applying the mask
@@ -435,8 +430,8 @@ int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 t
435430 * This function works exactly like xe_mmio_wait32() with the exception that
436431 * @val is expected not to be matched.
437432 */
438- int xe_mmio_wait32_not (struct xe_gt * gt , struct xe_reg reg , u32 mask , u32 val , u32 timeout_us ,
439- u32 * out_val , bool atomic )
433+ int __xe_mmio_wait32_not (struct xe_mmio * mmio , struct xe_reg reg , u32 mask , u32 val , u32 timeout_us ,
434+ u32 * out_val , bool atomic )
440435{
441- return __xe_mmio_wait32 ( gt , reg , mask , val , timeout_us , out_val , atomic , false);
436+ return ____xe_mmio_wait32 ( mmio , reg , mask , val , timeout_us , out_val , atomic , false);
442437}
0 commit comments