Skip to content
/ linux Public

Commit 26a4032

Browse files
mattropeSasha Levin
authored andcommitted
drm/xe: Switch MMIO interface to take xe_mmio instead of xe_gt
[ Upstream commit a84590c ] Since much of the MMIO register access done by the driver is to non-GT registers, use of 'xe_gt' in these interfaces has been a long-standing design flaw that's been hard to disentangle. To avoid a flag day across the whole driver, munge the function names and add temporary compatibility macros with the original function names that can accept either the new xe_mmio or the old xe_gt structure as a parameter. This will allow us to slowly convert parts of the driver over to the new interface independently. Signed-off-by: Matt Roper <matthew.d.roper@intel.com> Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240910234719.3335472-54-matthew.d.roper@intel.com Stable-dep-of: 4a9b4e1 ("drm/xe/mmio: Avoid double-adjust in 64-bit reads") Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent f5508f1 commit 26a4032

File tree

3 files changed

+126
-88
lines changed

3 files changed

+126
-88
lines changed

drivers/gpu/drm/xe/xe_mmio.c

Lines changed: 63 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -67,16 +67,16 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
6767

6868
/* Possibly override number of tile based on configuration register */
6969
if (!xe->info.skip_mtcfg) {
70-
struct xe_gt *gt = xe_root_mmio_gt(xe);
70+
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
7171
u8 tile_count;
7272
u32 mtcfg;
7373

7474
/*
7575
* Although the per-tile mmio regs are not yet initialized, this
76-
* is fine as it's going to the root gt, that's guaranteed to be
77-
* initialized earlier in xe_mmio_init()
76+
* is fine as it's going to the root tile's mmio, that's
77+
* guaranteed to be initialized earlier in xe_mmio_init()
7878
*/
79-
mtcfg = xe_mmio_read64_2x32(gt, XEHP_MTCFG_ADDR);
79+
mtcfg = xe_mmio_read64_2x32(mmio, XEHP_MTCFG_ADDR);
8080
tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
8181

8282
if (tile_count < xe->info.tile_count) {
@@ -187,116 +187,111 @@ int xe_mmio_init(struct xe_device *xe)
187187
return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe);
188188
}
189189

190-
static void mmio_flush_pending_writes(struct xe_gt *gt)
190+
static void mmio_flush_pending_writes(struct xe_mmio *mmio)
191191
{
192192
#define DUMMY_REG_OFFSET 0x130030
193-
struct xe_tile *tile = gt_to_tile(gt);
194193
int i;
195194

196-
if (tile->xe->info.platform != XE_LUNARLAKE)
195+
if (mmio->tile->xe->info.platform != XE_LUNARLAKE)
197196
return;
198197

199198
/* 4 dummy writes */
200199
for (i = 0; i < 4; i++)
201-
writel(0, tile->mmio.regs + DUMMY_REG_OFFSET);
200+
writel(0, mmio->regs + DUMMY_REG_OFFSET);
202201
}
203202

204-
u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
203+
u8 __xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg)
205204
{
206-
struct xe_tile *tile = gt_to_tile(gt);
207-
u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
205+
u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
208206
u8 val;
209207

210208
/* Wa_15015404425 */
211-
mmio_flush_pending_writes(gt);
209+
mmio_flush_pending_writes(mmio);
212210

213-
val = readb((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
214-
trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
211+
val = readb(mmio->regs + addr);
212+
trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
215213

216214
return val;
217215
}
218216

219-
u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg)
217+
u16 __xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg)
220218
{
221-
struct xe_tile *tile = gt_to_tile(gt);
222-
u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
219+
u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
223220
u16 val;
224221

225222
/* Wa_15015404425 */
226-
mmio_flush_pending_writes(gt);
223+
mmio_flush_pending_writes(mmio);
227224

228-
val = readw((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
229-
trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
225+
val = readw(mmio->regs + addr);
226+
trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
230227

231228
return val;
232229
}
233230

234-
void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
231+
void __xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val)
235232
{
236-
struct xe_tile *tile = gt_to_tile(gt);
237-
u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
233+
u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
238234

239-
trace_xe_reg_rw(gt, true, addr, val, sizeof(val));
235+
trace_xe_reg_rw(mmio, true, addr, val, sizeof(val));
240236

241-
if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt)))
242-
xe_gt_sriov_vf_write32(gt, reg, val);
237+
if (!reg.vf && mmio->sriov_vf_gt)
238+
xe_gt_sriov_vf_write32(mmio->sriov_vf_gt, reg, val);
243239
else
244-
writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
240+
writel(val, mmio->regs + addr);
245241
}
246242

247-
u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
243+
u32 __xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg)
248244
{
249-
struct xe_tile *tile = gt_to_tile(gt);
250-
u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
245+
u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
251246
u32 val;
252247

253248
/* Wa_15015404425 */
254-
mmio_flush_pending_writes(gt);
249+
mmio_flush_pending_writes(mmio);
255250

256-
if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt)))
257-
val = xe_gt_sriov_vf_read32(gt, reg);
251+
if (!reg.vf && mmio->sriov_vf_gt)
252+
val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt, reg);
258253
else
259-
val = readl((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
254+
val = readl(mmio->regs + addr);
260255

261-
trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
256+
trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
262257

263258
return val;
264259
}
265260

266-
u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set)
261+
u32 __xe_mmio_rmw32(struct xe_mmio *mmio, struct xe_reg reg, u32 clr, u32 set)
267262
{
268263
u32 old, reg_val;
269264

270-
old = xe_mmio_read32(gt, reg);
265+
old = xe_mmio_read32(mmio, reg);
271266
reg_val = (old & ~clr) | set;
272-
xe_mmio_write32(gt, reg, reg_val);
267+
xe_mmio_write32(mmio, reg, reg_val);
273268

274269
return old;
275270
}
276271

277-
int xe_mmio_write32_and_verify(struct xe_gt *gt,
278-
struct xe_reg reg, u32 val, u32 mask, u32 eval)
272+
int __xe_mmio_write32_and_verify(struct xe_mmio *mmio,
273+
struct xe_reg reg, u32 val, u32 mask, u32 eval)
279274
{
280275
u32 reg_val;
281276

282-
xe_mmio_write32(gt, reg, val);
283-
reg_val = xe_mmio_read32(gt, reg);
277+
xe_mmio_write32(mmio, reg, val);
278+
reg_val = xe_mmio_read32(mmio, reg);
284279

285280
return (reg_val & mask) != eval ? -EINVAL : 0;
286281
}
287282

288-
bool xe_mmio_in_range(const struct xe_gt *gt,
289-
const struct xe_mmio_range *range,
290-
struct xe_reg reg)
283+
bool __xe_mmio_in_range(const struct xe_mmio *mmio,
284+
const struct xe_mmio_range *range,
285+
struct xe_reg reg)
291286
{
292-
u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
287+
u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
293288

294289
return range && addr >= range->start && addr <= range->end;
295290
}
296291

297292
/**
298293
* xe_mmio_read64_2x32() - Read a 64-bit register as two 32-bit reads
299-
* @gt: MMIO target GT
294+
* @mmio: MMIO target
300295
* @reg: register to read value from
301296
*
302297
* Although Intel GPUs have some 64-bit registers, the hardware officially
@@ -316,36 +311,36 @@ bool xe_mmio_in_range(const struct xe_gt *gt,
316311
*
317312
* Returns the value of the 64-bit register.
318313
*/
319-
u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg)
314+
u64 __xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg)
320315
{
321316
struct xe_reg reg_udw = { .addr = reg.addr + 0x4 };
322317
u32 ldw, udw, oldudw, retries;
323318

324-
reg.addr = xe_mmio_adjusted_addr(gt, reg.addr);
325-
reg_udw.addr = xe_mmio_adjusted_addr(gt, reg_udw.addr);
319+
reg.addr = xe_mmio_adjusted_addr(mmio, reg.addr);
320+
reg_udw.addr = xe_mmio_adjusted_addr(mmio, reg_udw.addr);
326321

327322
/* we shouldn't adjust just one register address */
328-
xe_gt_assert(gt, reg_udw.addr == reg.addr + 0x4);
323+
xe_tile_assert(mmio->tile, reg_udw.addr == reg.addr + 0x4);
329324

330-
oldudw = xe_mmio_read32(gt, reg_udw);
325+
oldudw = xe_mmio_read32(mmio, reg_udw);
331326
for (retries = 5; retries; --retries) {
332-
ldw = xe_mmio_read32(gt, reg);
333-
udw = xe_mmio_read32(gt, reg_udw);
327+
ldw = xe_mmio_read32(mmio, reg);
328+
udw = xe_mmio_read32(mmio, reg_udw);
334329

335330
if (udw == oldudw)
336331
break;
337332

338333
oldudw = udw;
339334
}
340335

341-
xe_gt_WARN(gt, retries == 0,
342-
"64-bit read of %#x did not stabilize\n", reg.addr);
336+
drm_WARN(&mmio->tile->xe->drm, retries == 0,
337+
"64-bit read of %#x did not stabilize\n", reg.addr);
343338

344339
return (u64)udw << 32 | ldw;
345340
}
346341

347-
static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
348-
u32 *out_val, bool atomic, bool expect_match)
342+
static int ____xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
343+
u32 *out_val, bool atomic, bool expect_match)
349344
{
350345
ktime_t cur = ktime_get_raw();
351346
const ktime_t end = ktime_add_us(cur, timeout_us);
@@ -355,7 +350,7 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v
355350
bool check;
356351

357352
for (;;) {
358-
read = xe_mmio_read32(gt, reg);
353+
read = xe_mmio_read32(mmio, reg);
359354

360355
check = (read & mask) == val;
361356
if (!expect_match)
@@ -381,7 +376,7 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v
381376
}
382377

383378
if (ret != 0) {
384-
read = xe_mmio_read32(gt, reg);
379+
read = xe_mmio_read32(mmio, reg);
385380

386381
check = (read & mask) == val;
387382
if (!expect_match)
@@ -399,7 +394,7 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v
399394

400395
/**
401396
* xe_mmio_wait32() - Wait for a register to match the desired masked value
402-
* @gt: MMIO target GT
397+
* @mmio: MMIO target
403398
* @reg: register to read value from
404399
* @mask: mask to be applied to the value read from the register
405400
* @val: desired value after applying the mask
@@ -416,15 +411,15 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v
416411
* @timeout_us for different reasons, specially in non-atomic contexts. Thus,
417412
* it is possible that this function succeeds even after @timeout_us has passed.
418413
*/
419-
int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
420-
u32 *out_val, bool atomic)
414+
int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
415+
u32 *out_val, bool atomic)
421416
{
422-
return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, true);
417+
return ____xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, true);
423418
}
424419

425420
/**
426421
* xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value
427-
* @gt: MMIO target GT
422+
* @mmio: MMIO target
428423
* @reg: register to read value from
429424
* @mask: mask to be applied to the value read from the register
430425
* @val: value not to be matched after applying the mask
@@ -435,8 +430,8 @@ int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 t
435430
* This function works exactly like xe_mmio_wait32() with the exception that
436431
* @val is expected not to be matched.
437432
*/
438-
int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
439-
u32 *out_val, bool atomic)
433+
int __xe_mmio_wait32_not(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
434+
u32 *out_val, bool atomic)
440435
{
441-
return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, false);
436+
return ____xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, false);
442437
}

drivers/gpu/drm/xe/xe_mmio.h

Lines changed: 59 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -14,25 +14,67 @@ struct xe_reg;
1414
int xe_mmio_init(struct xe_device *xe);
1515
int xe_mmio_probe_tiles(struct xe_device *xe);
1616

17-
u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg);
18-
u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg);
19-
void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val);
20-
u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg);
21-
u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set);
22-
int xe_mmio_write32_and_verify(struct xe_gt *gt, struct xe_reg reg, u32 val, u32 mask, u32 eval);
23-
bool xe_mmio_in_range(const struct xe_gt *gt, const struct xe_mmio_range *range, struct xe_reg reg);
24-
25-
u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg);
26-
int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
27-
u32 *out_val, bool atomic);
28-
int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
29-
u32 *out_val, bool atomic);
30-
31-
static inline u32 xe_mmio_adjusted_addr(const struct xe_gt *gt, u32 addr)
17+
/*
18+
* Temporary transition helper for xe_gt -> xe_mmio conversion. Allows
19+
* continued usage of xe_gt as a parameter to MMIO operations which now
20+
* take an xe_mmio structure instead. Will be removed once the driver-wide
21+
* conversion is complete.
22+
*/
23+
#define __to_xe_mmio(ptr) \
24+
_Generic(ptr, \
25+
const struct xe_gt *: (&((const struct xe_gt *)(ptr))->mmio), \
26+
struct xe_gt *: (&((struct xe_gt *)(ptr))->mmio), \
27+
const struct xe_mmio *: (ptr), \
28+
struct xe_mmio *: (ptr))
29+
30+
u8 __xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg);
31+
#define xe_mmio_read8(p, reg) __xe_mmio_read8(__to_xe_mmio(p), reg)
32+
33+
u16 __xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg);
34+
#define xe_mmio_read16(p, reg) __xe_mmio_read16(__to_xe_mmio(p), reg)
35+
36+
void __xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val);
37+
#define xe_mmio_write32(p, reg, val) __xe_mmio_write32(__to_xe_mmio(p), reg, val)
38+
39+
u32 __xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg);
40+
#define xe_mmio_read32(p, reg) __xe_mmio_read32(__to_xe_mmio(p), reg)
41+
42+
u32 __xe_mmio_rmw32(struct xe_mmio *mmio, struct xe_reg reg, u32 clr, u32 set);
43+
#define xe_mmio_rmw32(p, reg, clr, set) __xe_mmio_rmw32(__to_xe_mmio(p), reg, clr, set)
44+
45+
int __xe_mmio_write32_and_verify(struct xe_mmio *mmio, struct xe_reg reg,
46+
u32 val, u32 mask, u32 eval);
47+
#define xe_mmio_write32_and_verify(p, reg, val, mask, eval) \
48+
__xe_mmio_write32_and_verify(__to_xe_mmio(p), reg, val, mask, eval)
49+
50+
bool __xe_mmio_in_range(const struct xe_mmio *mmio,
51+
const struct xe_mmio_range *range, struct xe_reg reg);
52+
#define xe_mmio_in_range(p, range, reg) __xe_mmio_in_range(__to_xe_mmio(p), range, reg)
53+
54+
u64 __xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg);
55+
#define xe_mmio_read64_2x32(p, reg) __xe_mmio_read64_2x32(__to_xe_mmio(p), reg)
56+
57+
int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val,
58+
u32 timeout_us, u32 *out_val, bool atomic);
59+
#define xe_mmio_wait32(p, reg, mask, val, timeout_us, out_val, atomic) \
60+
__xe_mmio_wait32(__to_xe_mmio(p), reg, mask, val, timeout_us, out_val, atomic)
61+
62+
int __xe_mmio_wait32_not(struct xe_mmio *mmio, struct xe_reg reg, u32 mask,
63+
u32 val, u32 timeout_us, u32 *out_val, bool atomic);
64+
#define xe_mmio_wait32_not(p, reg, mask, val, timeout_us, out_val, atomic) \
65+
__xe_mmio_wait32_not(__to_xe_mmio(p), reg, mask, val, timeout_us, out_val, atomic)
66+
67+
static inline u32 __xe_mmio_adjusted_addr(const struct xe_mmio *mmio, u32 addr)
3268
{
33-
if (addr < gt->mmio.adj_limit)
34-
addr += gt->mmio.adj_offset;
69+
if (addr < mmio->adj_limit)
70+
addr += mmio->adj_offset;
3571
return addr;
3672
}
73+
#define xe_mmio_adjusted_addr(p, addr) __xe_mmio_adjusted_addr(__to_xe_mmio(p), addr)
74+
75+
static inline struct xe_mmio *xe_root_tile_mmio(struct xe_device *xe)
76+
{
77+
return &xe->tiles[0].mmio;
78+
}
3779

3880
#endif

drivers/gpu/drm/xe/xe_trace.h

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
#include "xe_vm.h"
2222

2323
#define __dev_name_xe(xe) dev_name((xe)->drm.dev)
24+
#define __dev_name_tile(tile) __dev_name_xe(tile_to_xe((tile)))
2425
#define __dev_name_gt(gt) __dev_name_xe(gt_to_xe((gt)))
2526
#define __dev_name_eq(q) __dev_name_gt((q)->gt)
2627

@@ -342,12 +343,12 @@ DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal,
342343
);
343344

344345
TRACE_EVENT(xe_reg_rw,
345-
TP_PROTO(struct xe_gt *gt, bool write, u32 reg, u64 val, int len),
346+
TP_PROTO(struct xe_mmio *mmio, bool write, u32 reg, u64 val, int len),
346347

347-
TP_ARGS(gt, write, reg, val, len),
348+
TP_ARGS(mmio, write, reg, val, len),
348349

349350
TP_STRUCT__entry(
350-
__string(dev, __dev_name_gt(gt))
351+
__string(dev, __dev_name_tile(mmio->tile))
351352
__field(u64, val)
352353
__field(u32, reg)
353354
__field(u16, write)

0 commit comments

Comments
 (0)