Skip to content

Commit

Permalink
drm/scheduler: Scheduler priority fixes (v2)
Browse files Browse the repository at this point in the history
[ Upstream commit e2d732f ]

Remove DRM_SCHED_PRIORITY_LOW, as it was used
in only one place.

Rename and separate by a line
DRM_SCHED_PRIORITY_MAX to DRM_SCHED_PRIORITY_COUNT
as it represents a (total) count of said
priorities and it is used as such in loops
throughout the code. (0-based indexing is the
the count number.)

Remove redundant word HIGH in priority names,
and rename *KERNEL* to *HIGH*, as it really
means that, high.

v2: Add back KERNEL and remove SW and HW,
    in lieu of a single HIGH between NORMAL and KERNEL.

Signed-off-by: Luben Tuikov <luben.tuikov@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
  • Loading branch information
Luben Tuikov authored and gregkh committed Nov 5, 2020
1 parent e2578eb commit c1918dd
Show file tree
Hide file tree
Showing 8 changed files with 18 additions and 16 deletions.
4 changes: 2 additions & 2 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
Expand Up @@ -46,7 +46,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
static int amdgpu_ctx_priority_permit(struct drm_file *filp,
enum drm_sched_priority priority)
{
if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT)
return -EINVAL;

/* NORMAL and below are accessible by everyone */
Expand All @@ -65,7 +65,7 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)
{
switch (prio) {
case DRM_SCHED_PRIORITY_HIGH_HW:
case DRM_SCHED_PRIORITY_HIGH:
case DRM_SCHED_PRIORITY_KERNEL:
return AMDGPU_GFX_PIPE_PRIO_HIGH;
default:
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
Expand Up @@ -251,7 +251,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
int i;

/* Signal all jobs not yet scheduled */
for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
struct drm_sched_rq *rq = &sched->sched_rq[i];

if (!rq)
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
Expand Up @@ -267,7 +267,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
&ring->sched;
}

for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; ++i)
atomic_set(&ring->num_jobs[i], 0);

return 0;
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
Expand Up @@ -243,7 +243,7 @@ struct amdgpu_ring {
bool has_compute_vm_bug;
bool no_scheduler;

atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX];
atomic_t num_jobs[DRM_SCHED_PRIORITY_COUNT];
struct mutex priority_mutex;
/* protected by priority_mutex */
int priority;
Expand Down
6 changes: 3 additions & 3 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
Expand Up @@ -36,14 +36,14 @@ enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
{
switch (amdgpu_priority) {
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return DRM_SCHED_PRIORITY_HIGH_HW;
return DRM_SCHED_PRIORITY_HIGH;
case AMDGPU_CTX_PRIORITY_HIGH:
return DRM_SCHED_PRIORITY_HIGH_SW;
return DRM_SCHED_PRIORITY_HIGH;
case AMDGPU_CTX_PRIORITY_NORMAL:
return DRM_SCHED_PRIORITY_NORMAL;
case AMDGPU_CTX_PRIORITY_LOW:
case AMDGPU_CTX_PRIORITY_VERY_LOW:
return DRM_SCHED_PRIORITY_LOW;
return DRM_SCHED_PRIORITY_MIN;
case AMDGPU_CTX_PRIORITY_UNSET:
return DRM_SCHED_PRIORITY_UNSET;
default:
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
Expand Up @@ -2101,7 +2101,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
ring = adev->mman.buffer_funcs_ring;
sched = &ring->sched;
r = drm_sched_entity_init(&adev->mman.entity,
DRM_SCHED_PRIORITY_KERNEL, &sched,
DRM_SCHED_PRIORITY_KERNEL, &sched,
1, NULL);
if (r) {
DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
Expand Down
4 changes: 2 additions & 2 deletions drivers/gpu/drm/scheduler/sched_main.c
Expand Up @@ -625,7 +625,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
return NULL;

/* Kernel run queue has higher priority than normal run queue*/
for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
if (entity)
break;
Expand Down Expand Up @@ -852,7 +852,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
sched->name = name;
sched->timeout = timeout;
sched->hang_limit = hang_limit;
for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
drm_sched_rq_init(sched, &sched->sched_rq[i]);

init_waitqueue_head(&sched->wake_up_worker);
Expand Down
12 changes: 7 additions & 5 deletions include/drm/gpu_scheduler.h
Expand Up @@ -33,14 +33,16 @@
struct drm_gpu_scheduler;
struct drm_sched_rq;

/* These are often used as an (initial) index
* to an array, and as such should start at 0.
*/
enum drm_sched_priority {
DRM_SCHED_PRIORITY_MIN,
DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN,
DRM_SCHED_PRIORITY_NORMAL,
DRM_SCHED_PRIORITY_HIGH_SW,
DRM_SCHED_PRIORITY_HIGH_HW,
DRM_SCHED_PRIORITY_HIGH,
DRM_SCHED_PRIORITY_KERNEL,
DRM_SCHED_PRIORITY_MAX,

DRM_SCHED_PRIORITY_COUNT,
DRM_SCHED_PRIORITY_INVALID = -1,
DRM_SCHED_PRIORITY_UNSET = -2
};
Expand Down Expand Up @@ -274,7 +276,7 @@ struct drm_gpu_scheduler {
uint32_t hw_submission_limit;
long timeout;
const char *name;
struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX];
struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT];
wait_queue_head_t wake_up_worker;
wait_queue_head_t job_scheduled;
atomic_t hw_rq_count;
Expand Down

0 comments on commit c1918dd

Please sign in to comment.