Skip to content
Permalink
Browse files
DONOTMERGE: dma-buf: Get rid of dma_fence_get_rcu_safe
This helper existed to handle the weird corner-cases caused by using
SLAB_TYPESAFE_BY_RCU for backing dma_fence.  Now that no one is using
that anymore (i915 was the only real user), dma_fence_get_rcu is
sufficient.  The one slightly annoying thing we have to deal with here
is that dma_fence_get_rcu_safe did an rcu_dereference as well as a
SLAB_TYPESAFE_BY_RCU-safe dma_fence_get_rcu.  This means each call site
ends up being 3 lines instead of 1.

Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Christian König <christian.koenig@amd.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
  • Loading branch information
jekstrand authored and intel-lab-lkp committed Jun 16, 2021
1 parent c889567 commit d718e3dba487fc068d793f6220ac2508c98d0eef
Show file tree
Hide file tree
Showing 8 changed files with 23 additions and 59 deletions.
@@ -15,15 +15,17 @@ static bool dma_fence_chain_enable_signaling(struct dma_fence *fence);
* dma_fence_chain_get_prev - use RCU to get a reference to the previous fence
* @chain: chain node to get the previous node from
*
* Use dma_fence_get_rcu_safe to get a reference to the previous fence of the
* chain node.
* Use rcu_dereference and dma_fence_get_rcu to get a reference to the
* previous fence of the chain node.
*/
static struct dma_fence *dma_fence_chain_get_prev(struct dma_fence_chain *chain)
{
struct dma_fence *prev;

rcu_read_lock();
prev = dma_fence_get_rcu_safe(&chain->prev);
prev = rcu_dereference(chain->prev);
if (prev)
prev = dma_fence_get_rcu(prev);
rcu_read_unlock();
return prev;
}
@@ -376,7 +376,9 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
dst_list = NULL;
}

new = dma_fence_get_rcu_safe(&src->fence_excl);
new = rcu_dereference(src->fence_excl);
if (new)
new = dma_fence_get_rcu(new);
rcu_read_unlock();

src_list = dma_resv_shared_list(dst);
@@ -161,7 +161,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
struct dma_fence *old;

rcu_read_lock();
old = dma_fence_get_rcu_safe(ptr);
old = rcu_dereference(*ptr);
if (old)
old = dma_fence_get_rcu(old);
rcu_read_unlock();

if (old) {
@@ -103,7 +103,9 @@ i915_active_fence_get(struct i915_active_fence *active)
struct dma_fence *fence;

rcu_read_lock();
fence = dma_fence_get_rcu_safe(&active->fence);
fence = rcu_dereference(active->fence);
if (fence)
fence = dma_fence_get_rcu(fence);
rcu_read_unlock();

return fence;
@@ -351,7 +351,9 @@ int i915_vma_wait_for_bind(struct i915_vma *vma)
struct dma_fence *fence;

rcu_read_lock();
fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
fence = rcu_dereference(vma->active.excl.fence);
if (fence)
fence = dma_fence_get_rcu(fence);
rcu_read_unlock();
if (fence) {
err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT);
@@ -105,7 +105,9 @@ drm_syncobj_fence_get(struct drm_syncobj *syncobj)
struct dma_fence *fence;

rcu_read_lock();
fence = dma_fence_get_rcu_safe(&syncobj->fence);
fence = rcu_dereference(syncobj->fence);
if (fence)
fence = dma_fence_get_rcu(syncobj->fence);
rcu_read_unlock();

return fence;
@@ -307,56 +307,6 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
return NULL;
}

/**
* dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence
* @fencep: pointer to fence to increase refcount of
*
* Function returns NULL if no refcount could be obtained, or the fence.
* This function handles acquiring a reference to a fence that may be
* reallocated within the RCU grace period (such as with SLAB_TYPESAFE_BY_RCU),
* so long as the caller is using RCU on the pointer to the fence.
*
* An alternative mechanism is to employ a seqlock to protect a bunch of
* fences, such as used by struct dma_resv. When using a seqlock,
* the seqlock must be taken before and checked after a reference to the
* fence is acquired (as shown here).
*
* The caller is required to hold the RCU read lock.
*/
static inline struct dma_fence *
dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep)
{
do {
struct dma_fence *fence;

fence = rcu_dereference(*fencep);
if (!fence)
return NULL;

if (!dma_fence_get_rcu(fence))
continue;

/* The atomic_inc_not_zero() inside dma_fence_get_rcu()
* provides a full memory barrier upon success (such as now).
* This is paired with the write barrier from assigning
* to the __rcu protected fence pointer so that if that
* pointer still matches the current fence, we know we
* have successfully acquire a reference to it. If it no
* longer matches, we are holding a reference to some other
* reallocated pointer. This is possible if the allocator
* is using a freelist like SLAB_TYPESAFE_BY_RCU where the
* fence remains valid for the RCU grace period, but it
* may be reallocated. When using such allocators, we are
* responsible for ensuring the reference we get is to
* the right fence, as below.
*/
if (fence == rcu_access_pointer(*fencep))
return rcu_pointer_handoff(fence);

dma_fence_put(fence);
} while (1);
}

#ifdef CONFIG_LOCKDEP
bool dma_fence_begin_signalling(void);
void dma_fence_end_signalling(bool cookie);
@@ -248,7 +248,9 @@ dma_resv_get_excl_unlocked(struct dma_resv *obj)
return NULL;

rcu_read_lock();
fence = dma_fence_get_rcu_safe(&obj->fence_excl);
fence = rcu_dereference(obj->fence_excl);
if (fence)
fence = dma_fence_get_rcu(fence);
rcu_read_unlock();

return fence;

0 comments on commit d718e3d

Please sign in to comment.