Skip to content

Commit 844150c

Browse files
author
Thomas Hellström
committed
drm/xe: Convert pinned suspend eviction for exhaustive eviction
Pinned suspend eviction and preparation for eviction validates system memory for eviction buffers. Do that under a validation exclusive lock to avoid interfering with other processes validating system graphics memory. v2: - Avoid gotos from within xe_validation_guard(). - Adapt to signature change of xe_validation_guard(). Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Link: https://lore.kernel.org/r/20250908101246.65025-14-thomas.hellstrom@linux.intel.com
1 parent 1f15417 commit 844150c

File tree

1 file changed

+103
-81
lines changed

1 file changed

+103
-81
lines changed

drivers/gpu/drm/xe/xe_bo.c

Lines changed: 103 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -1141,43 +1141,47 @@ long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
11411141
int xe_bo_notifier_prepare_pinned(struct xe_bo *bo)
11421142
{
11431143
struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
1144-
struct drm_exec *exec = XE_VALIDATION_UNIMPLEMENTED;
1144+
struct xe_validation_ctx ctx;
1145+
struct drm_exec exec;
11451146
struct xe_bo *backup;
11461147
int ret = 0;
11471148

1148-
xe_bo_lock(bo, false);
1149+
xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.exclusive = true}, ret) {
1150+
ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
1151+
drm_exec_retry_on_contention(&exec);
1152+
xe_assert(xe, !ret);
1153+
xe_assert(xe, !bo->backup_obj);
11491154

1150-
xe_assert(xe, !bo->backup_obj);
1155+
/*
1156+
* Since this is called from the PM notifier we might have raced with
1157+
* someone unpinning this after we dropped the pinned list lock and
1158+
* grabbing the above bo lock.
1159+
*/
1160+
if (!xe_bo_is_pinned(bo))
1161+
break;
11511162

1152-
/*
1153-
* Since this is called from the PM notifier we might have raced with
1154-
* someone unpinning this after we dropped the pinned list lock and
1155-
* grabbing the above bo lock.
1156-
*/
1157-
if (!xe_bo_is_pinned(bo))
1158-
goto out_unlock_bo;
1163+
if (!xe_bo_is_vram(bo))
1164+
break;
11591165

1160-
if (!xe_bo_is_vram(bo))
1161-
goto out_unlock_bo;
1166+
if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
1167+
break;
11621168

1163-
if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
1164-
goto out_unlock_bo;
1169+
backup = xe_bo_init_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, xe_bo_size(bo),
1170+
DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
1171+
XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
1172+
XE_BO_FLAG_PINNED, &exec);
1173+
if (IS_ERR(backup)) {
1174+
drm_exec_retry_on_contention(&exec);
1175+
ret = PTR_ERR(backup);
1176+
xe_validation_retry_on_oom(&ctx, &ret);
1177+
break;
1178+
}
11651179

1166-
backup = xe_bo_init_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, xe_bo_size(bo),
1167-
DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
1168-
XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
1169-
XE_BO_FLAG_PINNED, exec);
1170-
if (IS_ERR(backup)) {
1171-
ret = PTR_ERR(backup);
1172-
goto out_unlock_bo;
1180+
backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
1181+
ttm_bo_pin(&backup->ttm);
1182+
bo->backup_obj = backup;
11731183
}
11741184

1175-
backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
1176-
ttm_bo_pin(&backup->ttm);
1177-
bo->backup_obj = backup;
1178-
1179-
out_unlock_bo:
1180-
xe_bo_unlock(bo);
11811185
return ret;
11821186
}
11831187

@@ -1203,57 +1207,12 @@ int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo)
12031207
return 0;
12041208
}
12051209

1206-
/**
1207-
* xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
1208-
* @bo: The buffer object to move.
1209-
*
1210-
* On successful completion, the object memory will be moved to system memory.
1211-
*
1212-
* This is needed to for special handling of pinned VRAM object during
1213-
* suspend-resume.
1214-
*
1215-
* Return: 0 on success. Negative error code on failure.
1216-
*/
1217-
int xe_bo_evict_pinned(struct xe_bo *bo)
1210+
static int xe_bo_evict_pinned_copy(struct xe_bo *bo, struct xe_bo *backup)
12181211
{
1219-
struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
1220-
struct drm_exec *exec = XE_VALIDATION_UNIMPLEMENTED;
1221-
struct xe_bo *backup = bo->backup_obj;
1222-
bool backup_created = false;
1212+
struct xe_device *xe = xe_bo_device(bo);
12231213
bool unmap = false;
12241214
int ret = 0;
12251215

1226-
xe_bo_lock(bo, false);
1227-
1228-
if (WARN_ON(!bo->ttm.resource)) {
1229-
ret = -EINVAL;
1230-
goto out_unlock_bo;
1231-
}
1232-
1233-
if (WARN_ON(!xe_bo_is_pinned(bo))) {
1234-
ret = -EINVAL;
1235-
goto out_unlock_bo;
1236-
}
1237-
1238-
if (!xe_bo_is_vram(bo))
1239-
goto out_unlock_bo;
1240-
1241-
if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
1242-
goto out_unlock_bo;
1243-
1244-
if (!backup) {
1245-
backup = xe_bo_init_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, xe_bo_size(bo),
1246-
DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
1247-
XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
1248-
XE_BO_FLAG_PINNED, exec);
1249-
if (IS_ERR(backup)) {
1250-
ret = PTR_ERR(backup);
1251-
goto out_unlock_bo;
1252-
}
1253-
backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
1254-
backup_created = true;
1255-
}
1256-
12571216
if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
12581217
struct xe_migrate *migrate;
12591218
struct dma_fence *fence;
@@ -1286,7 +1245,7 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
12861245
if (iosys_map_is_null(&bo->vmap)) {
12871246
ret = xe_bo_vmap(bo);
12881247
if (ret)
1289-
goto out_backup;
1248+
goto out_vunmap;
12901249
unmap = true;
12911250
}
12921251

@@ -1296,15 +1255,78 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
12961255

12971256
if (!bo->backup_obj)
12981257
bo->backup_obj = backup;
1299-
1300-
out_backup:
1258+
out_vunmap:
13011259
xe_bo_vunmap(backup);
1302-
if (ret && backup_created)
1303-
xe_bo_put(backup);
1304-
out_unlock_bo:
1260+
out_backup:
13051261
if (unmap)
13061262
xe_bo_vunmap(bo);
1307-
xe_bo_unlock(bo);
1263+
1264+
return ret;
1265+
}
1266+
1267+
/**
1268+
* xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
1269+
* @bo: The buffer object to move.
1270+
*
1271+
* On successful completion, the object memory will be moved to system memory.
1272+
*
1273+
* This is needed to for special handling of pinned VRAM object during
1274+
* suspend-resume.
1275+
*
1276+
* Return: 0 on success. Negative error code on failure.
1277+
*/
1278+
int xe_bo_evict_pinned(struct xe_bo *bo)
1279+
{
1280+
struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
1281+
struct xe_validation_ctx ctx;
1282+
struct drm_exec exec;
1283+
struct xe_bo *backup = bo->backup_obj;
1284+
bool backup_created = false;
1285+
int ret = 0;
1286+
1287+
xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.exclusive = true}, ret) {
1288+
ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
1289+
drm_exec_retry_on_contention(&exec);
1290+
xe_assert(xe, !ret);
1291+
1292+
if (WARN_ON(!bo->ttm.resource)) {
1293+
ret = -EINVAL;
1294+
break;
1295+
}
1296+
1297+
if (WARN_ON(!xe_bo_is_pinned(bo))) {
1298+
ret = -EINVAL;
1299+
break;
1300+
}
1301+
1302+
if (!xe_bo_is_vram(bo))
1303+
break;
1304+
1305+
if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
1306+
break;
1307+
1308+
if (!backup) {
1309+
backup = xe_bo_init_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL,
1310+
xe_bo_size(bo),
1311+
DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
1312+
XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
1313+
XE_BO_FLAG_PINNED, &exec);
1314+
if (IS_ERR(backup)) {
1315+
drm_exec_retry_on_contention(&exec);
1316+
ret = PTR_ERR(backup);
1317+
xe_validation_retry_on_oom(&ctx, &ret);
1318+
break;
1319+
}
1320+
backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
1321+
backup_created = true;
1322+
}
1323+
1324+
ret = xe_bo_evict_pinned_copy(bo, backup);
1325+
}
1326+
1327+
if (ret && backup_created)
1328+
xe_bo_put(backup);
1329+
13081330
return ret;
13091331
}
13101332

0 commit comments

Comments
 (0)