Skip to content
Permalink
Browse files
drm/i915: Use to_root_gt() to refer to the root tile
Starting from a patch from Matt to_root_gt() returns the
reference to the root tile in order to abstract the root tile
from th callers.

Being the root tile identified as tile '0', embed the id in the
name so that i915->gt becomes i915->gt0.

The renaming has been mostly done with the following command and
some manual fixes.

sed -i -e sed -i 's/\&i915\->gt\./\&to_root_gt(i915)\->/g' \
	-e sed -i 's/\&dev_priv\->gt\./\&to_root_gt(dev_priv)\->/g' \
	-e 's/\&dev_priv\->gt/to_root_gt(dev_priv)/g' \
	-e 's/\&i915\->gt/to_root_gt(i915)/g' \
	-e 's/dev_priv\->gt\./to_root_gt(dev_priv)\->/g' \
	-e 's/i915\->gt\./to_root_gt(i915)\->/g' \
	`find drivers/gpu/drm/i915/ -name *.[ch]`

Two small changes have been added to this commit:

 1. intel_reset_gpu() in intel_display.c retreives the gt from
    to_scanout_gt()
 2. in set_scheduler_caps() the gt is taken from the engine and
    not from i915.

Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
  • Loading branch information
Andi Shyti authored and intel-lab-lkp committed Nov 21, 2021
1 parent 72be923 commit 068a75571292e317e35752c1b078605dda122741
Show file tree
Hide file tree
Showing 73 changed files with 271 additions and 255 deletions.
@@ -818,7 +818,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
* maximum clocks following a vblank miss (see do_rps_boost()).
*/
if (!state->rps_interactive) {
intel_rps_mark_interactive(&dev_priv->gt.rps, true);
intel_rps_mark_interactive(&to_root_gt(dev_priv)->rps, true);
state->rps_interactive = true;
}

@@ -852,7 +852,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
return;

if (state->rps_interactive) {
intel_rps_mark_interactive(&dev_priv->gt.rps, false);
intel_rps_mark_interactive(&to_root_gt(dev_priv)->rps, false);
state->rps_interactive = false;
}

@@ -124,6 +124,11 @@ static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
static void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);

static struct intel_gt *to_scanout_gt(struct drm_i915_private *i915)
{
return i915->ggtt.vm.gt;
}

/**
* intel_update_watermarks - update FIFO watermark values based on current modes
* @dev_priv: i915 device
@@ -838,7 +843,7 @@ __intel_display_resume(struct drm_device *dev,
static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
{
return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
intel_has_gpu_reset(&dev_priv->gt));
intel_has_gpu_reset(to_scanout_gt(dev_priv)));
}

void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
@@ -857,14 +862,14 @@ void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
return;

/* We have a modeset vs reset deadlock, defensively unbreak it. */
set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
set_bit(I915_RESET_MODESET, &to_root_gt(dev_priv)->reset.flags);
smp_mb__after_atomic();
wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
wake_up_bit(&to_root_gt(dev_priv)->reset.flags, I915_RESET_MODESET);

if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
drm_dbg_kms(&dev_priv->drm,
"Modeset potentially stuck, unbreaking through wedging\n");
intel_gt_set_wedged(&dev_priv->gt);
intel_gt_set_wedged(to_root_gt(dev_priv));
}

/*
@@ -915,7 +920,7 @@ void intel_display_finish_reset(struct drm_i915_private *dev_priv)
return;

/* reset doesn't touch the display */
if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
if (!test_bit(I915_RESET_MODESET, &to_root_gt(dev_priv)->reset.flags))
return;

state = fetch_and_zero(&dev_priv->modeset_restore_state);
@@ -953,7 +958,7 @@ void intel_display_finish_reset(struct drm_i915_private *dev_priv)
drm_modeset_acquire_fini(ctx);
mutex_unlock(&dev->mode_config.mutex);

clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
clear_bit_unlock(I915_RESET_MODESET, &to_root_gt(dev_priv)->reset.flags);
}

static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
@@ -8550,19 +8555,19 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat
for (;;) {
prepare_to_wait(&intel_state->commit_ready.wait,
&wait_fence, TASK_UNINTERRUPTIBLE);
prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
prepare_to_wait(bit_waitqueue(&to_root_gt(dev_priv)->reset.flags,
I915_RESET_MODESET),
&wait_reset, TASK_UNINTERRUPTIBLE);


if (i915_sw_fence_done(&intel_state->commit_ready) ||
test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
test_bit(I915_RESET_MODESET, &to_root_gt(dev_priv)->reset.flags))
break;

schedule();
}
finish_wait(&intel_state->commit_ready.wait, &wait_fence);
finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
finish_wait(bit_waitqueue(&to_root_gt(dev_priv)->reset.flags,
I915_RESET_MODESET),
&wait_reset);
}
@@ -258,7 +258,7 @@ intel_dpt_create(struct intel_framebuffer *fb)

vm = &dpt->vm;

vm->gt = &i915->gt;
vm->gt = to_root_gt(i915);
vm->i915 = i915;
vm->dma = i915->drm.dev;
vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
@@ -1382,7 +1382,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
if (!HAS_OVERLAY(dev_priv))
return;

engine = dev_priv->gt.engine[RCS0];
engine = to_root_gt(dev_priv)->engine[RCS0];
if (!engine || !engine->kernel_context)
return;

@@ -1727,7 +1727,7 @@ static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);

return intel_pxp_key_check(&i915->gt.pxp, obj, false) == 0;
return intel_pxp_key_check(&to_root_gt(i915)->pxp, obj, false) == 0;
}

static bool pxp_is_borked(struct drm_i915_gem_object *obj)
@@ -237,7 +237,7 @@ static int proto_context_set_persistence(struct drm_i915_private *i915,
* colateral damage, and we should not pretend we can by
* exposing the interface.
*/
if (!intel_has_reset_engine(&i915->gt))
if (!intel_has_reset_engine(to_root_gt(i915)))
return -ENODEV;

pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE);
@@ -254,7 +254,7 @@ static int proto_context_set_protected(struct drm_i915_private *i915,

if (!protected) {
pc->uses_protected_content = false;
} else if (!intel_pxp_is_enabled(&i915->gt.pxp)) {
} else if (!intel_pxp_is_enabled(&to_root_gt(i915)->pxp)) {
ret = -ENODEV;
} else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) ||
!(pc->user_flags & BIT(UCONTEXT_BANNABLE))) {
@@ -268,8 +268,8 @@ static int proto_context_set_protected(struct drm_i915_private *i915,
*/
pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);

if (!intel_pxp_is_active(&i915->gt.pxp))
ret = intel_pxp_start(&i915->gt.pxp);
if (!intel_pxp_is_active(&to_root_gt(i915)->pxp))
ret = intel_pxp_start(&to_root_gt(i915)->pxp);
}

return ret;
@@ -571,7 +571,7 @@ set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
intel_engine_mask_t prev_mask;

/* FIXME: This is NIY for execlists */
if (!(intel_uc_uses_guc_submission(&i915->gt.uc)))
if (!(intel_uc_uses_guc_submission(&to_root_gt(i915)->uc)))
return -ENODEV;

if (get_user(slot, &ext->engine_index))
@@ -833,7 +833,7 @@ static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
sseu = &pc->legacy_rcs_sseu;
}

ret = i915_gem_user_to_context_sseu(&i915->gt, &user_sseu, sseu);
ret = i915_gem_user_to_context_sseu(to_root_gt(i915), &user_sseu, sseu);
if (ret)
return ret;

@@ -1044,7 +1044,7 @@ static struct i915_gem_engines *alloc_engines(unsigned int count)
static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
struct intel_sseu rcs_sseu)
{
const struct intel_gt *gt = &ctx->i915->gt;
const struct intel_gt *gt = to_root_gt(ctx->i915);
struct intel_engine_cs *engine;
struct i915_gem_engines *e, *err;
enum intel_engine_id id;
@@ -1521,7 +1521,7 @@ static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
* colateral damage, and we should not pretend we can by
* exposing the interface.
*/
if (!intel_has_reset_engine(&ctx->i915->gt))
if (!intel_has_reset_engine(to_root_gt(ctx->i915)))
return -ENODEV;

i915_gem_context_clear_persistence(ctx);
@@ -1559,7 +1559,7 @@ i915_gem_create_context(struct drm_i915_private *i915,
} else if (HAS_FULL_PPGTT(i915)) {
struct i915_ppgtt *ppgtt;

ppgtt = i915_ppgtt_create(&i915->gt, 0);
ppgtt = i915_ppgtt_create(to_root_gt(i915), 0);
if (IS_ERR(ppgtt)) {
drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
@@ -1742,7 +1742,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
if (args->flags)
return -EINVAL;

ppgtt = i915_ppgtt_create(&i915->gt, 0);
ppgtt = i915_ppgtt_create(to_root_gt(i915), 0);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);

@@ -2194,7 +2194,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
return -EINVAL;

ret = intel_gt_terminally_wedged(&i915->gt);
ret = intel_gt_terminally_wedged(to_root_gt(i915));
if (ret)
return ret;

@@ -379,7 +379,7 @@ static int ext_set_protected(struct i915_user_extension __user *base, void *data
if (ext.flags)
return -EINVAL;

if (!intel_pxp_is_enabled(&ext_data->i915->gt.pxp))
if (!intel_pxp_is_enabled(&to_root_gt(ext_data->i915)->pxp))
return -ENODEV;

ext_data->flags |= I915_BO_PROTECTED;
@@ -2287,9 +2287,9 @@ static int eb_submit(struct i915_execbuffer *eb)
return err;
}

static int num_vcs_engines(const struct drm_i915_private *i915)
static int num_vcs_engines(struct drm_i915_private *i915)
{
return hweight_long(VDBOX_MASK(&i915->gt));
return hweight_long(VDBOX_MASK(to_root_gt(i915)));
}

/*
@@ -645,7 +645,7 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
goto insert;

/* Attempt to reap some mmap space from dead objects */
err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT,
err = intel_gt_retire_requests_timeout(to_root_gt(i915), MAX_SCHEDULE_TIMEOUT,
NULL);
if (err)
goto err;
@@ -19,6 +19,7 @@
static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
struct address_space *mapping = obj->base.filp->f_mapping;
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct scatterlist *sg;
struct sg_table *st;
dma_addr_t dma;
@@ -73,7 +74,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
dst += PAGE_SIZE;
}

intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
intel_gt_chipset_flush(to_root_gt(i915));

/* We're no longer struct page backed */
obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE;
@@ -140,6 +141,7 @@ int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
{
void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
char __user *user_data = u64_to_user_ptr(args->data_ptr);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
int err;

err = i915_gem_object_wait(obj,
@@ -159,7 +161,7 @@ int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
return -EFAULT;

drm_clflush_virt_range(vaddr, args->size);
intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
intel_gt_chipset_flush(to_root_gt(i915));

i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
return 0;
@@ -35,7 +35,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
* state. Fortunately, the kernel_context is disposable and we do
* not rely on its state.
*/
intel_gt_suspend_prepare(&i915->gt);
intel_gt_suspend_prepare(to_root_gt(i915));

i915_gem_drain_freed_objects(i915);
}
@@ -153,7 +153,7 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
* machine in an unusable condition.
*/

intel_gt_suspend_late(&i915->gt);
intel_gt_suspend_late(to_root_gt(i915));

spin_lock_irqsave(&i915->mm.obj_lock, flags);
for (phase = phases; *phase; phase++) {
@@ -223,7 +223,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
* guarantee that the context image is complete. So let's just reset
* it and start again.
*/
intel_gt_resume(&i915->gt);
intel_gt_resume(to_root_gt(i915));

ret = lmem_restore(i915, I915_TTM_BACKUP_ALLOW_GPU);
GEM_WARN_ON(ret);
@@ -153,7 +153,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
*/
if (shrink & I915_SHRINK_ACTIVE)
/* Retire requests to unpin all idle contexts */
intel_gt_retire_requests(&i915->gt);
intel_gt_retire_requests(to_root_gt(i915));

/*
* As we may completely rewrite the (un)bound list whilst unbinding
@@ -38,12 +38,13 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
{
const unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_private *i915 = to_i915(dev);
struct i915_gem_context *ctx;
unsigned long idx;
long ret;

/* ABI: return -EIO if already wedged */
ret = intel_gt_terminally_wedged(&to_i915(dev)->gt);
ret = intel_gt_terminally_wedged(to_root_gt(i915));
if (ret)
return ret;

@@ -166,7 +166,7 @@ static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo,
enum i915_cache_level src_level, dst_level;
int ret;

if (!i915->gt.migrate.context || intel_gt_is_wedged(&i915->gt))
if (!to_root_gt(i915)->migrate.context || intel_gt_is_wedged(to_root_gt(i915)))
return ERR_PTR(-EINVAL);

/* With fail_gpu_migration, we always perform a GPU clear. */
@@ -179,8 +179,8 @@ static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo,
!I915_SELFTEST_ONLY(fail_gpu_migration))
return ERR_PTR(-EINVAL);

intel_engine_pm_get(i915->gt.migrate.context->engine);
ret = intel_context_migrate_clear(i915->gt.migrate.context, NULL,
intel_engine_pm_get(to_root_gt(i915)->migrate.context->engine);
ret = intel_context_migrate_clear(to_root_gt(i915)->migrate.context, NULL,
dst_st->sgl, dst_level,
i915_ttm_gtt_binds_lmem(dst_mem),
0, &rq);
@@ -192,8 +192,8 @@ static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo,
return ERR_CAST(src_rsgt);

src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm);
intel_engine_pm_get(i915->gt.migrate.context->engine);
ret = intel_context_migrate_copy(i915->gt.migrate.context,
intel_engine_pm_get(to_root_gt(i915)->migrate.context->engine);
ret = intel_context_migrate_copy(to_root_gt(i915)->migrate.context,
NULL, src_rsgt->table.sgl,
src_level,
i915_ttm_gtt_binds_lmem(bo->resource),
@@ -204,7 +204,7 @@ static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo,
i915_refct_sgt_put(src_rsgt);
}

intel_engine_pm_put(i915->gt.migrate.context->engine);
intel_engine_pm_put(to_root_gt(i915)->migrate.context->engine);

if (ret && rq) {
i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
@@ -529,7 +529,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
* On almost all of the older hw, we cannot tell the GPU that
* a page is readonly.
*/
if (!dev_priv->gt.vm->has_read_only)
if (!to_root_gt(dev_priv)->vm->has_read_only)
return -ENODEV;
}

@@ -1705,7 +1705,7 @@ int i915_gem_huge_page_mock_selftests(void)
mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
mkwrite_device_info(dev_priv)->ppgtt_size = 48;

ppgtt = i915_ppgtt_create(&dev_priv->gt, 0);
ppgtt = i915_ppgtt_create(to_root_gt(dev_priv), 0);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
@@ -1747,7 +1747,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
return 0;
}

if (intel_gt_is_wedged(&i915->gt))
if (intel_gt_is_wedged(to_root_gt(i915)))
return 0;

return i915_live_subtests(tests, i915);

0 comments on commit 068a755

Please sign in to comment.