Skip to content

Commit

Permalink
drm/nouveau: wait for the exclusive fence after the shared ones v2
Browse files Browse the repository at this point in the history
commit 67f7430 upstream.

Always waiting for the exclusive fence resulted on some performance
regressions. So try to wait for the shared fences first, then the
exclusive fence should always be signaled already.

v2: fix incorrectly placed "(", add some comment why we do this.

Signed-off-by: Christian König <christian.koenig@amd.com>
Tested-by: Stefan Fritsch <sf@sfritsch.de>
Tested-by: Dan Moulding <dmoulding@me.com>
Acked-by: Ben Skeggs <bskeggs@redhat.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Cc: <stable@vger.kernel.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20211209102335.18321-1-christian.koenig@amd.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
ChristianKoenigAMD authored and gregkh committed Jan 5, 2022
1 parent e631093 commit 2ee1296
Showing 1 changed file with 15 additions and 13 deletions.
28 changes: 15 additions & 13 deletions drivers/gpu/drm/nouveau/nouveau_fence.c
Expand Up @@ -353,15 +353,22 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e

if (ret)
return ret;
}

fobj = dma_resv_shared_list(resv);
fence = dma_resv_excl_fence(resv);
fobj = NULL;
} else {
fobj = dma_resv_shared_list(resv);
}

if (fence) {
/* Waiting for the exclusive fence first causes performance regressions
* under some circumstances. So manually wait for the shared ones first.
*/
for (i = 0; i < (fobj ? fobj->shared_count : 0) && !ret; ++i) {
struct nouveau_channel *prev = NULL;
bool must_wait = true;

fence = rcu_dereference_protected(fobj->shared[i],
dma_resv_held(resv));

f = nouveau_local_fence(fence, chan->drm);
if (f) {
rcu_read_lock();
Expand All @@ -373,20 +380,13 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e

if (must_wait)
ret = dma_fence_wait(fence, intr);

return ret;
}

if (!exclusive || !fobj)
return ret;

for (i = 0; i < fobj->shared_count && !ret; ++i) {
fence = dma_resv_excl_fence(resv);
if (fence) {
struct nouveau_channel *prev = NULL;
bool must_wait = true;

fence = rcu_dereference_protected(fobj->shared[i],
dma_resv_held(resv));

f = nouveau_local_fence(fence, chan->drm);
if (f) {
rcu_read_lock();
Expand All @@ -398,6 +398,8 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e

if (must_wait)
ret = dma_fence_wait(fence, intr);

return ret;
}

return ret;
Expand Down

0 comments on commit 2ee1296

Please sign in to comment.