Skip to content

Commit 72d4796

Browse files
committed
drm/xe/pxp/uapi: Add userspace and LRC support for PXP-using queues
Userspace is required to mark a queue as using PXP to guarantee that the PXP instructions will work. In addition to managing the PXP sessions, when a PXP queue is created the driver will set the relevant bits in its context control register. On submission of a valid PXP queue, the driver will validate all encrypted objects mapped to the VM to ensured they were encrypted with the current key. v2: Remove pxp_types include outside of PXP code (Jani), better comments and code cleanup (John) v3: split the internal PXP management to a separate patch for ease of review. re-order ioctl checks to always return -EINVAL if parameters are invalid, rebase on msix changes. Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Cc: John Harrison <John.C.Harrison@Intel.com> Reviewed-by: John Harrison <John.C.Harrison@Intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20250129174140.948829-9-daniele.ceraolospurio@intel.com
1 parent f8caa80 commit 72d4796

File tree

10 files changed

+153
-14
lines changed

10 files changed

+153
-14
lines changed

drivers/gpu/drm/xe/regs/xe_engine_regs.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,7 @@
132132
#define RING_EXECLIST_STATUS_HI(base) XE_REG((base) + 0x234 + 4)
133133

134134
#define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244, XE_REG_OPTION_MASKED)
135+
#define CTX_CTRL_PXP_ENABLE REG_BIT(10)
135136
#define CTX_CTRL_OAC_CONTEXT_ENABLE REG_BIT(8)
136137
#define CTX_CTRL_RUN_ALONE REG_BIT(7)
137138
#define CTX_CTRL_INDIRECT_RING_STATE_ENABLE REG_BIT(4)

drivers/gpu/drm/xe/xe_exec_queue.c

Lines changed: 54 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
#include "xe_ring_ops_types.h"
2626
#include "xe_trace.h"
2727
#include "xe_vm.h"
28+
#include "xe_pxp.h"
2829

2930
enum xe_exec_queue_sched_prop {
3031
XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
@@ -38,6 +39,8 @@ static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue
3839

3940
static void __xe_exec_queue_free(struct xe_exec_queue *q)
4041
{
42+
if (xe_exec_queue_uses_pxp(q))
43+
xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
4144
if (q->vm)
4245
xe_vm_put(q->vm);
4346

@@ -113,6 +116,21 @@ static int __xe_exec_queue_init(struct xe_exec_queue *q)
113116
{
114117
struct xe_vm *vm = q->vm;
115118
int i, err;
119+
u32 flags = 0;
120+
121+
/*
122+
* PXP workloads executing on RCS or CCS must run in isolation (i.e. no
123+
* other workload can use the EUs at the same time). On MTL this is done
124+
* by setting the RUNALONE bit in the LRC, while starting on Xe2 there
125+
* is a dedicated bit for it.
126+
*/
127+
if (xe_exec_queue_uses_pxp(q) &&
128+
(q->class == XE_ENGINE_CLASS_RENDER || q->class == XE_ENGINE_CLASS_COMPUTE)) {
129+
if (GRAPHICS_VER(gt_to_xe(q->gt)) >= 20)
130+
flags |= XE_LRC_CREATE_PXP;
131+
else
132+
flags |= XE_LRC_CREATE_RUNALONE;
133+
}
116134

117135
if (vm) {
118136
err = xe_vm_lock(vm, true);
@@ -121,7 +139,7 @@ static int __xe_exec_queue_init(struct xe_exec_queue *q)
121139
}
122140

123141
for (i = 0; i < q->width; ++i) {
124-
q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec);
142+
q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec, flags);
125143
if (IS_ERR(q->lrc[i])) {
126144
err = PTR_ERR(q->lrc[i]);
127145
goto err_unlock;
@@ -166,6 +184,19 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v
166184
if (err)
167185
goto err_post_alloc;
168186

187+
/*
188+
* We can only add the queue to the PXP list after the init is complete,
189+
* because the PXP termination can call exec_queue_kill and that will
190+
* go bad if the queue is only half-initialized. This means that we
191+
* can't do it when we handle the PXP extension in __xe_exec_queue_alloc
192+
* and we need to do it here instead.
193+
*/
194+
if (xe_exec_queue_uses_pxp(q)) {
195+
err = xe_pxp_exec_queue_add(xe->pxp, q);
196+
if (err)
197+
goto err_post_alloc;
198+
}
199+
169200
return q;
170201

171202
err_post_alloc:
@@ -254,6 +285,9 @@ void xe_exec_queue_destroy(struct kref *ref)
254285
struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
255286
struct xe_exec_queue *eq, *next;
256287

288+
if (xe_exec_queue_uses_pxp(q))
289+
xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
290+
257291
xe_exec_queue_last_fence_put_unlocked(q);
258292
if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
259293
list_for_each_entry_safe(eq, next, &q->multi_gt_list,
@@ -409,13 +443,30 @@ static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *
409443
return 0;
410444
}
411445

446+
static int
447+
exec_queue_set_pxp_type(struct xe_device *xe, struct xe_exec_queue *q, u64 value)
448+
{
449+
if (value == DRM_XE_PXP_TYPE_NONE)
450+
return 0;
451+
452+
/* we only support HWDRM sessions right now */
453+
if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM))
454+
return -EINVAL;
455+
456+
if (!xe_pxp_is_enabled(xe->pxp))
457+
return -ENODEV;
458+
459+
return xe_pxp_exec_queue_set_type(xe->pxp, q, DRM_XE_PXP_TYPE_HWDRM);
460+
}
461+
412462
typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
413463
struct xe_exec_queue *q,
414464
u64 value);
415465

416466
static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
417467
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
418468
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
469+
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE] = exec_queue_set_pxp_type,
419470
};
420471

421472
static int exec_queue_user_ext_set_property(struct xe_device *xe,
@@ -435,7 +486,8 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe,
435486
ARRAY_SIZE(exec_queue_set_property_funcs)) ||
436487
XE_IOCTL_DBG(xe, ext.pad) ||
437488
XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
438-
ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE))
489+
ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE &&
490+
ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE))
439491
return -EINVAL;
440492

441493
idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));

drivers/gpu/drm/xe/xe_exec_queue.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,11 @@ static inline bool xe_exec_queue_is_parallel(struct xe_exec_queue *q)
5757
return q->width > 1;
5858
}
5959

60+
static inline bool xe_exec_queue_uses_pxp(struct xe_exec_queue *q)
61+
{
62+
return q->pxp.type;
63+
}
64+
6065
bool xe_exec_queue_is_lr(struct xe_exec_queue *q);
6166

6267
bool xe_exec_queue_ring_full(struct xe_exec_queue *q);

drivers/gpu/drm/xe/xe_exec_queue_types.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,8 @@ struct xe_exec_queue {
132132

133133
/** @pxp: PXP info tracking */
134134
struct {
135+
/** @pxp.type: PXP session type used by this queue */
136+
u8 type;
135137
/** @pxp.link: link into the list of PXP exec queues */
136138
struct list_head link;
137139
} pxp;

drivers/gpu/drm/xe/xe_execlist.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -269,7 +269,7 @@ struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
269269

270270
port->hwe = hwe;
271271

272-
port->lrc = xe_lrc_create(hwe, NULL, SZ_16K, XE_IRQ_DEFAULT_MSIX);
272+
port->lrc = xe_lrc_create(hwe, NULL, SZ_16K, XE_IRQ_DEFAULT_MSIX, 0);
273273
if (IS_ERR(port->lrc)) {
274274
err = PTR_ERR(port->lrc);
275275
goto err;

drivers/gpu/drm/xe/xe_lrc.c

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -883,7 +883,8 @@ static void xe_lrc_finish(struct xe_lrc *lrc)
883883
#define PVC_CTX_ACC_CTR_THOLD (0x2a + 1)
884884

885885
static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
886-
struct xe_vm *vm, u32 ring_size, u16 msix_vec)
886+
struct xe_vm *vm, u32 ring_size, u16 msix_vec,
887+
u32 init_flags)
887888
{
888889
struct xe_gt *gt = hwe->gt;
889890
struct xe_tile *tile = gt_to_tile(gt);
@@ -979,6 +980,16 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
979980
RING_CTL_SIZE(lrc->ring.size) | RING_VALID);
980981
}
981982

983+
if (init_flags & XE_LRC_CREATE_RUNALONE)
984+
xe_lrc_write_ctx_reg(lrc, CTX_CONTEXT_CONTROL,
985+
xe_lrc_read_ctx_reg(lrc, CTX_CONTEXT_CONTROL) |
986+
_MASKED_BIT_ENABLE(CTX_CTRL_RUN_ALONE));
987+
988+
if (init_flags & XE_LRC_CREATE_PXP)
989+
xe_lrc_write_ctx_reg(lrc, CTX_CONTEXT_CONTROL,
990+
xe_lrc_read_ctx_reg(lrc, CTX_CONTEXT_CONTROL) |
991+
_MASKED_BIT_ENABLE(CTX_CTRL_PXP_ENABLE));
992+
982993
xe_lrc_write_ctx_reg(lrc, CTX_TIMESTAMP, 0);
983994

984995
if (xe->info.has_asid && vm)
@@ -1021,14 +1032,15 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
10211032
* @vm: The VM (address space)
10221033
* @ring_size: LRC ring size
10231034
* @msix_vec: MSI-X interrupt vector (for platforms that support it)
1035+
* @flags: LRC initialization flags
10241036
*
10251037
* Allocate and initialize the Logical Ring Context (LRC).
10261038
*
10271039
* Return pointer to created LRC upon success and an error pointer
10281040
* upon failure.
10291041
*/
10301042
struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
1031-
u32 ring_size, u16 msix_vec)
1043+
u32 ring_size, u16 msix_vec, u32 flags)
10321044
{
10331045
struct xe_lrc *lrc;
10341046
int err;
@@ -1037,7 +1049,7 @@ struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
10371049
if (!lrc)
10381050
return ERR_PTR(-ENOMEM);
10391051

1040-
err = xe_lrc_init(lrc, hwe, vm, ring_size, msix_vec);
1052+
err = xe_lrc_init(lrc, hwe, vm, ring_size, msix_vec, flags);
10411053
if (err) {
10421054
kfree(lrc);
10431055
return ERR_PTR(err);

drivers/gpu/drm/xe/xe_lrc.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,10 @@ struct xe_lrc_snapshot {
4242
#define LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR (0x34 * 4)
4343
#define LRC_PPHWSP_PXP_INVAL_SCRATCH_ADDR (0x40 * 4)
4444

45+
#define XE_LRC_CREATE_RUNALONE 0x1
46+
#define XE_LRC_CREATE_PXP 0x2
4547
struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
46-
u32 ring_size, u16 msix_vec);
48+
u32 ring_size, u16 msix_vec, u32 flags);
4749
void xe_lrc_destroy(struct kref *ref);
4850

4951
/**

drivers/gpu/drm/xe/xe_pxp.c

Lines changed: 31 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
#include "xe_pxp.h"
77

88
#include <drm/drm_managed.h>
9+
#include <uapi/drm/xe_drm.h>
910

1011
#include "xe_device_types.h"
1112
#include "xe_exec_queue.h"
@@ -47,7 +48,7 @@ bool xe_pxp_is_supported(const struct xe_device *xe)
4748
return xe->info.has_pxp && IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY);
4849
}
4950

50-
static bool pxp_is_enabled(const struct xe_pxp *pxp)
51+
bool xe_pxp_is_enabled(const struct xe_pxp *pxp)
5152
{
5253
return pxp;
5354
}
@@ -249,7 +250,7 @@ void xe_pxp_irq_handler(struct xe_device *xe, u16 iir)
249250
{
250251
struct xe_pxp *pxp = xe->pxp;
251252

252-
if (!pxp_is_enabled(pxp)) {
253+
if (!xe_pxp_is_enabled(pxp)) {
253254
drm_err(&xe->drm, "PXP irq 0x%x received with PXP disabled!\n", iir);
254255
return;
255256
}
@@ -424,6 +425,27 @@ static int __pxp_start_arb_session(struct xe_pxp *pxp)
424425
return ret;
425426
}
426427

428+
/**
429+
* xe_pxp_exec_queue_set_type - Mark a queue as using PXP
430+
* @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
431+
* @q: the queue to mark as using PXP
432+
* @type: the type of PXP session this queue will use
433+
*
434+
* Returns 0 if the selected PXP type is supported, -ENODEV otherwise.
435+
*/
436+
int xe_pxp_exec_queue_set_type(struct xe_pxp *pxp, struct xe_exec_queue *q, u8 type)
437+
{
438+
if (!xe_pxp_is_enabled(pxp))
439+
return -ENODEV;
440+
441+
/* we only support HWDRM sessions right now */
442+
xe_assert(pxp->xe, type == DRM_XE_PXP_TYPE_HWDRM);
443+
444+
q->pxp.type = type;
445+
446+
return 0;
447+
}
448+
427449
static void __exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
428450
{
429451
spin_lock_irq(&pxp->queues.lock);
@@ -449,9 +471,12 @@ int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
449471
{
450472
int ret = 0;
451473

452-
if (!pxp_is_enabled(pxp))
474+
if (!xe_pxp_is_enabled(pxp))
453475
return -ENODEV;
454476

477+
/* we only support HWDRM sessions right now */
478+
xe_assert(pxp->xe, q->pxp.type == DRM_XE_PXP_TYPE_HWDRM);
479+
455480
/*
456481
* Runtime suspend kills PXP, so we take a reference to prevent it from
457482
* happening while we have active queues that use PXP
@@ -589,7 +614,7 @@ void xe_pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q)
589614
{
590615
bool need_pm_put = false;
591616

592-
if (!pxp_is_enabled(pxp))
617+
if (!xe_pxp_is_enabled(pxp))
593618
return;
594619

595620
spin_lock_irq(&pxp->queues.lock);
@@ -599,6 +624,8 @@ void xe_pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q)
599624
need_pm_put = true;
600625
}
601626

627+
q->pxp.type = DRM_XE_PXP_TYPE_NONE;
628+
602629
spin_unlock_irq(&pxp->queues.lock);
603630

604631
if (need_pm_put)

drivers/gpu/drm/xe/xe_pxp.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,13 +12,13 @@ struct xe_device;
1212
struct xe_exec_queue;
1313
struct xe_pxp;
1414

15-
#define DRM_XE_PXP_HWDRM_DEFAULT_SESSION 0xF /* TODO: move to uapi */
16-
1715
bool xe_pxp_is_supported(const struct xe_device *xe);
16+
bool xe_pxp_is_enabled(const struct xe_pxp *pxp);
1817

1918
int xe_pxp_init(struct xe_device *xe);
2019
void xe_pxp_irq_handler(struct xe_device *xe, u16 iir);
2120

21+
int xe_pxp_exec_queue_set_type(struct xe_pxp *pxp, struct xe_exec_queue *q, u8 type);
2222
int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q);
2323
void xe_pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q);
2424

include/uapi/drm/xe_drm.h

Lines changed: 39 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1114,6 +1114,24 @@ struct drm_xe_vm_bind {
11141114
/**
11151115
* struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
11161116
*
1117+
* This ioctl supports setting the following properties via the
1118+
* %DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY extension, which uses the
1119+
* generic @drm_xe_ext_set_property struct:
1120+
*
1121+
* - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY - set the queue priority.
1122+
* CAP_SYS_NICE is required to set a value above normal.
1123+
* - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE - set the queue timeslice
1124+
* duration in microseconds.
1125+
* - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE - set the type of PXP session
1126+
* this queue will be used with. Valid values are listed in enum
1127+
* drm_xe_pxp_session_type. %DRM_XE_PXP_TYPE_NONE is the default behavior, so
1128+
* there is no need to explicitly set that. When a queue of type
1129+
* %DRM_XE_PXP_TYPE_HWDRM is created, the PXP default HWDRM session
1130+
* (%XE_PXP_HWDRM_DEFAULT_SESSION) will be started, if isn't already running.
1131+
* Given that going into a power-saving state kills PXP HWDRM sessions,
1132+
* runtime PM will be blocked while queues of this type are alive.
1133+
* All PXP queues will be killed if a PXP invalidation event occurs.
1134+
*
11171135
* The example below shows how to use @drm_xe_exec_queue_create to create
11181136
* a simple exec_queue (no parallel submission) of class
11191137
* &DRM_XE_ENGINE_CLASS_RENDER.
@@ -1137,7 +1155,7 @@ struct drm_xe_exec_queue_create {
11371155
#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
11381156
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
11391157
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
1140-
1158+
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE 2
11411159
/** @extensions: Pointer to the first extension struct, if any */
11421160
__u64 extensions;
11431161

@@ -1756,6 +1774,26 @@ struct drm_xe_oa_stream_info {
17561774
__u64 reserved[3];
17571775
};
17581776

1777+
/**
1778+
* enum drm_xe_pxp_session_type - Supported PXP session types.
1779+
*
1780+
* We currently only support HWDRM sessions, which are used for protected
1781+
* content that ends up being displayed, but the HW supports multiple types, so
1782+
* we might extend support in the future.
1783+
*/
1784+
enum drm_xe_pxp_session_type {
1785+
/** @DRM_XE_PXP_TYPE_NONE: PXP not used */
1786+
DRM_XE_PXP_TYPE_NONE = 0,
1787+
/**
1788+
* @DRM_XE_PXP_TYPE_HWDRM: HWDRM sessions are used for content that ends
1789+
* up on the display.
1790+
*/
1791+
DRM_XE_PXP_TYPE_HWDRM = 1,
1792+
};
1793+
1794+
/* ID of the protected content session managed by Xe when PXP is active */
1795+
#define DRM_XE_PXP_HWDRM_DEFAULT_SESSION 0xf
1796+
17591797
#if defined(__cplusplus)
17601798
}
17611799
#endif

0 commit comments

Comments
 (0)