Skip to content

Commit

Permalink
linux/dma-fence.h: Allow fences to be signaled after their refcount g…
Browse files Browse the repository at this point in the history
…oing down to zero

as long as their .release callback hasn't called dma_fence_free() yet. This
semantics is not documented anywhere, and is probably just an
implementation detail, but the vmwgfx driver heavily relies on it.

Sorry riastradh@, this is unfortunate but circumventing this (admittedly
healthy) assertion requires a major code reorganization of the driver. We
really can't fight against the upstream on this matter.
  • Loading branch information
depressed-pho committed Jul 15, 2023
1 parent 0a65674 commit af88505
Showing 1 changed file with 33 additions and 31 deletions.
64 changes: 33 additions & 31 deletions sys/external/bsd/drm2/linux/linux_dma_fence.c
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,18 @@ linux_dma_fences_fini(void)
spin_lock_destroy(&dma_fence_stub.lock);
}

/*
* dma_fence_assert_alive(fence)
*
* Assert that fence hasn't been destroyed.
*/
static inline void __diagused
dma_fence_assert_alive(struct dma_fence *fence)
{
KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
}

/*
* dma_fence_referenced_p(fence)
*
Expand All @@ -158,8 +170,7 @@ static inline bool __diagused
dma_fence_referenced_p(struct dma_fence *fence)
{

KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
dma_fence_assert_alive(fence);

return kref_referenced_p(&fence->refcount);
}
Expand Down Expand Up @@ -206,8 +217,7 @@ dma_fence_reset(struct dma_fence *fence, const struct dma_fence_ops *ops,
spinlock_t *lock, uint64_t context, uint64_t seqno)
{

KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
dma_fence_assert_alive(fence);
KASSERT(kref_read(&fence->refcount) == 0 ||
kref_read(&fence->refcount) == 1);
KASSERT(TAILQ_EMPTY(&fence->f_callbacks));
Expand Down Expand Up @@ -345,10 +355,8 @@ bool
dma_fence_is_later(struct dma_fence *a, struct dma_fence *b)
{

KASSERTMSG(a->f_magic != FENCE_MAGIC_BAD, "fence %p", a);
KASSERTMSG(a->f_magic == FENCE_MAGIC_GOOD, "fence %p", a);
KASSERTMSG(b->f_magic != FENCE_MAGIC_BAD, "fence %p", b);
KASSERTMSG(b->f_magic == FENCE_MAGIC_GOOD, "fence %p", b);
dma_fence_assert_alive(a);
dma_fence_assert_alive(b);
KASSERTMSG(a->context == b->context, "incommensurate fences"
": %"PRIu64" @ %p =/= %"PRIu64" @ %p",
a->context, a, b->context, b);
Expand Down Expand Up @@ -382,8 +390,7 @@ dma_fence_get(struct dma_fence *fence)
if (fence == NULL)
return NULL;

KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
dma_fence_assert_alive(fence);

kref_get(&fence->refcount);
return fence;
Expand All @@ -401,8 +408,7 @@ dma_fence_get_rcu(struct dma_fence *fence)
{

__insn_barrier();
KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
dma_fence_assert_alive(fence);
if (!kref_get_unless_zero(&fence->refcount))
return NULL;
return fence;
Expand Down Expand Up @@ -498,7 +504,7 @@ dma_fence_ensure_signal_enabled(struct dma_fence *fence)
{
bool already_enabled;

KASSERT(dma_fence_referenced_p(fence));
dma_fence_assert_alive(fence);
KASSERT(spin_is_locked(fence->lock));

/* Determine whether signalling was enabled, and enable it. */
Expand Down Expand Up @@ -544,7 +550,7 @@ dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *fcb,
{
int ret;

KASSERT(dma_fence_referenced_p(fence));
dma_fence_assert_alive(fence);

/* Optimistically try to skip the lock if it's already signalled. */
if (atomic_load_relaxed(&fence->flags) &
Expand Down Expand Up @@ -590,7 +596,7 @@ dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *fcb)
{
bool onqueue;

KASSERT(dma_fence_referenced_p(fence));
dma_fence_assert_alive(fence);

spin_lock(fence->lock);
onqueue = fcb->fcb_onqueue;
Expand All @@ -616,7 +622,7 @@ void
dma_fence_enable_sw_signaling(struct dma_fence *fence)
{

KASSERT(dma_fence_referenced_p(fence));
dma_fence_assert_alive(fence);

spin_lock(fence->lock);
if ((fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)) == 0)
Expand All @@ -638,7 +644,7 @@ dma_fence_is_signaled(struct dma_fence *fence)
{
bool signaled;

KASSERT(dma_fence_referenced_p(fence));
dma_fence_assert_alive(fence);

spin_lock(fence->lock);
signaled = dma_fence_is_signaled_locked(fence);
Expand All @@ -657,7 +663,7 @@ bool
dma_fence_is_signaled_locked(struct dma_fence *fence)
{

KASSERT(dma_fence_referenced_p(fence));
dma_fence_assert_alive(fence);
KASSERT(spin_is_locked(fence->lock));

/* Check whether we already set the signalled bit. */
Expand Down Expand Up @@ -691,8 +697,7 @@ void
dma_fence_set_error(struct dma_fence *fence, int error)
{

KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
dma_fence_assert_alive(fence);
KASSERT((atomic_load_relaxed(&fence->flags) &
(1u << DMA_FENCE_FLAG_SIGNALED_BIT)) == 0);
KASSERTMSG(error >= -ELAST, "%d", error);
Expand All @@ -714,8 +719,7 @@ dma_fence_get_status(struct dma_fence *fence)
{
int ret;

KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
dma_fence_assert_alive(fence);

spin_lock(fence->lock);
if (!dma_fence_is_signaled_locked(fence)) {
Expand Down Expand Up @@ -745,7 +749,7 @@ dma_fence_signal(struct dma_fence *fence)
{
int ret;

KASSERT(dma_fence_referenced_p(fence));
dma_fence_assert_alive(fence);

spin_lock(fence->lock);
ret = dma_fence_signal_locked(fence);
Expand All @@ -765,7 +769,7 @@ dma_fence_signal_locked(struct dma_fence *fence)
{
struct dma_fence_cb *fcb, *next;

KASSERT(dma_fence_referenced_p(fence));
dma_fence_assert_alive(fence);
KASSERT(spin_is_locked(fence->lock));

/* If it's been signalled, fail; otherwise set the signalled bit. */
Expand Down Expand Up @@ -808,7 +812,7 @@ wait_any_cb(struct dma_fence *fence, struct dma_fence_cb *fcb)
{
struct wait_any *cb = container_of(fcb, struct wait_any, fcb);

KASSERT(dma_fence_referenced_p(fence));
dma_fence_assert_alive(fence);

mutex_enter(&cb->common->lock);
cb->common->done = true;
Expand Down Expand Up @@ -841,7 +845,7 @@ dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t nfences,

/* Optimistically check whether any are signalled. */
for (i = 0; i < nfences; i++) {
KASSERT(dma_fence_referenced_p(fences[i]));
dma_fence_assert_alive(fences[i]);
if (dma_fence_is_signaled(fences[i])) {
if (ip)
*ip = i;
Expand Down Expand Up @@ -873,7 +877,7 @@ dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t nfences,
*/
for (i = 0; i < nfences; i++) {
cb[i].common = &common;
KASSERT(dma_fence_referenced_p(fences[i]));
dma_fence_assert_alive(fences[i]);
ret = dma_fence_add_callback(fences[i], &cb[i].fcb,
&wait_any_cb);
if (ret) {
Expand Down Expand Up @@ -1158,8 +1162,7 @@ bool
__dma_fence_signal(struct dma_fence *fence)
{

KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
dma_fence_assert_alive(fence);

if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return false;
Expand All @@ -1179,8 +1182,7 @@ __dma_fence_signal_wake(struct dma_fence *fence, ktime_t timestamp)
{
struct dma_fence_cb *fcb, *next;

KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
dma_fence_assert_alive(fence);

spin_lock(fence->lock);

Expand Down

0 comments on commit af88505

Please sign in to comment.