Skip to content
Permalink
Browse files
dma-buf: fix and rework dma_buf_poll
Daniel pointed me towards this function and there are multiple obvious problems
in the implementation.

First of all the retry loop is not working as intended. In general the retry
makes only sense if you grab the reference first and then check the retry. Then
we skipped checking the exclusive fence when shared fences were present. And
last the whole implementation was unnecessary complex and rather hard to
understand which could lead to probably unexpected behavior of the IOCTL.

Fix all this by reworking the implementation from scratch.

Only mildly tested and needs a thoughtful review of the code.

Signed-off-by: Christian König <christian.koenig@amd.com>
  • Loading branch information
Christian König authored and intel-lab-lkp committed Jun 17, 2021
1 parent c7d4c1f commit dfa9f2ec4c082b73e644e2c565e58e2291f94463
Show file tree
Hide file tree
Showing 2 changed files with 54 additions and 80 deletions.
@@ -72,7 +72,7 @@ static void dma_buf_release(struct dentry *dentry)
* If you hit this BUG() it means someone dropped their ref to the
* dma-buf while still having pending operation to the buffer.
*/
BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);

dmabuf->ops->release(dmabuf);

@@ -206,12 +206,15 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)

static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
{
struct dma_buf_poll_cb_t *dcb;
struct dma_buf *dmabuf;
struct dma_resv *resv;
struct dma_resv_list *fobj;
struct dma_fence *fence_excl;
__poll_t events;
unsigned shared_count, seq;
struct dma_fence *fence;
__poll_t events;
int r, i;

dmabuf = file->private_data;
if (!dmabuf || !dmabuf->resv)
@@ -225,99 +228,70 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
if (!events)
return 0;

dcb = events & EPOLLOUT ? &dmabuf->cb_out : &dmabuf->cb_in;

/* Only queue a new one if we are not still waiting for the old one */
spin_lock_irq(&dmabuf->poll.lock);
if (dcb->active)
events = 0;
else
dcb->active = events;
spin_unlock_irq(&dmabuf->poll.lock);
if (!events)
return 0;

retry:
seq = read_seqcount_begin(&resv->seq);
rcu_read_lock();

fobj = rcu_dereference(resv->fence);
if (fobj)
if (fobj && events & EPOLLOUT)
shared_count = fobj->shared_count;
else
shared_count = 0;
fence_excl = dma_resv_excl_fence(resv);
if (read_seqcount_retry(&resv->seq, seq)) {
rcu_read_unlock();
goto retry;
}

if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
__poll_t pevents = EPOLLIN;

if (shared_count == 0)
pevents |= EPOLLOUT;

spin_lock_irq(&dmabuf->poll.lock);
if (dcb->active) {
dcb->active |= pevents;
events &= ~pevents;
} else
dcb->active = pevents;
spin_unlock_irq(&dmabuf->poll.lock);

if (events & pevents) {
if (!dma_fence_get_rcu(fence_excl)) {
/* force a recheck */
events &= ~pevents;
dma_buf_poll_cb(NULL, &dcb->cb);
} else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
dma_buf_poll_cb)) {
events &= ~pevents;
dma_fence_put(fence_excl);
} else {
/*
* No callback queued, wake up any additional
* waiters.
*/
dma_fence_put(fence_excl);
dma_buf_poll_cb(NULL, &dcb->cb);
}
for (i = 0; i < shared_count; ++i) {
fence = rcu_dereference(fobj->shared[i]);
fence = dma_fence_get_rcu(fence);
if (!fence || read_seqcount_retry(&resv->seq, seq)) {
/* Concurrent modify detected, force re-check */
dma_fence_put(fence);
rcu_read_unlock();
goto retry;
}
}

if ((events & EPOLLOUT) && shared_count > 0) {
struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
int i;

/* Only queue a new callback if no event has fired yet */
spin_lock_irq(&dmabuf->poll.lock);
if (dcb->active)
events &= ~EPOLLOUT;
else
dcb->active = EPOLLOUT;
spin_unlock_irq(&dmabuf->poll.lock);

if (!(events & EPOLLOUT))
r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
dma_fence_put(fence);
if (!r) {
/* Callback queued */
events = 0;
goto out;
}
}

for (i = 0; i < shared_count; ++i) {
struct dma_fence *fence = rcu_dereference(fobj->shared[i]);

if (!dma_fence_get_rcu(fence)) {
/*
* fence refcount dropped to zero, this means
* that fobj has been freed
*
* call dma_buf_poll_cb and force a recheck!
*/
events &= ~EPOLLOUT;
dma_buf_poll_cb(NULL, &dcb->cb);
break;
}
if (!dma_fence_add_callback(fence, &dcb->cb,
dma_buf_poll_cb)) {
dma_fence_put(fence);
events &= ~EPOLLOUT;
break;
}
fence = dma_resv_excl_fence(resv);
if (fence) {
fence = dma_fence_get_rcu(fence);
if (!fence || read_seqcount_retry(&resv->seq, seq)) {
/* Concurrent modify detected, force re-check */
dma_fence_put(fence);
rcu_read_unlock();
goto retry;

}

/* No callback queued, wake up any additional waiters. */
if (i == shared_count)
dma_buf_poll_cb(NULL, &dcb->cb);
r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
dma_fence_put(fence_excl);
if (!r) {
/* Callback queued */
events = 0;
goto out;
}
}

/* No callback queued, wake up any additional waiters. */
dma_buf_poll_cb(NULL, &dcb->cb);

out:
rcu_read_unlock();
return events;
@@ -562,8 +536,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
dmabuf->owner = exp_info->owner;
spin_lock_init(&dmabuf->name_lock);
init_waitqueue_head(&dmabuf->poll);
dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
dmabuf->cb_in.active = dmabuf->cb_out.active = 0;

if (!resv) {
resv = (struct dma_resv *)&dmabuf[1];
@@ -329,7 +329,7 @@ struct dma_buf {
wait_queue_head_t *poll;

__poll_t active;
} cb_excl, cb_shared;
} cb_in, cb_out;
};

/**

0 comments on commit dfa9f2e

Please sign in to comment.