Skip to content

Commit

Permalink
locking/core: Remove cpu_relax_lowlatency() users
Browse files Browse the repository at this point in the history
With the s390 special case of a yielding cpu_relax() implementation gone,
we can now remove all users of cpu_relax_lowlatency() and replace them
with cpu_relax().

Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Noam Camus <noamc@ezchip.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linuxppc-dev@lists.ozlabs.org
Cc: virtualization@lists.linux-foundation.org
Cc: xen-devel@lists.xenproject.org
Link: http://lkml.kernel.org/r/1477386195-32736-5-git-send-email-borntraeger@de.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
borntraeger authored and Ingo Molnar committed Nov 16, 2016
1 parent 22b6430 commit f2f09a4
Show file tree
Hide file tree
Showing 8 changed files with 16 additions and 16 deletions.
2 changes: 1 addition & 1 deletion drivers/gpu/drm/i915/i915_gem_request.c
Expand Up @@ -723,7 +723,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
if (busywait_stop(timeout_us, cpu))
break;

cpu_relax_lowlatency();
cpu_relax();
} while (!need_resched());

return false;
Expand Down
4 changes: 2 additions & 2 deletions drivers/vhost/net.c
Expand Up @@ -342,7 +342,7 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
endtime = busy_clock() + vq->busyloop_timeout;
while (vhost_can_busy_poll(vq->dev, endtime) &&
vhost_vq_avail_empty(vq->dev, vq))
cpu_relax_lowlatency();
cpu_relax();
preempt_enable();
r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
out_num, in_num, NULL, NULL);
Expand Down Expand Up @@ -533,7 +533,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
while (vhost_can_busy_poll(&net->dev, endtime) &&
!sk_has_rx_data(sk) &&
vhost_vq_avail_empty(&net->dev, vq))
cpu_relax_lowlatency();
cpu_relax();

preempt_enable();

Expand Down
4 changes: 2 additions & 2 deletions kernel/locking/mcs_spinlock.h
Expand Up @@ -28,7 +28,7 @@ struct mcs_spinlock {
#define arch_mcs_spin_lock_contended(l) \
do { \
while (!(smp_load_acquire(l))) \
cpu_relax_lowlatency(); \
cpu_relax(); \
} while (0)
#endif

Expand Down Expand Up @@ -108,7 +108,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
return;
/* Wait until the next pointer is set */
while (!(next = READ_ONCE(node->next)))
cpu_relax_lowlatency();
cpu_relax();
}

/* Pass lock to next waiter. */
Expand Down
4 changes: 2 additions & 2 deletions kernel/locking/mutex.c
Expand Up @@ -369,7 +369,7 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
break;
}

cpu_relax_lowlatency();
cpu_relax();
}
rcu_read_unlock();

Expand Down Expand Up @@ -492,7 +492,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
* memory barriers as we'll eventually observe the right
* values at the cost of a few extra spins.
*/
cpu_relax_lowlatency();
cpu_relax();
}

if (!waiter)
Expand Down
6 changes: 3 additions & 3 deletions kernel/locking/osq_lock.c
Expand Up @@ -75,7 +75,7 @@ osq_wait_next(struct optimistic_spin_queue *lock,
break;
}

cpu_relax_lowlatency();
cpu_relax();
}

return next;
Expand Down Expand Up @@ -122,7 +122,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
if (need_resched())
goto unqueue;

cpu_relax_lowlatency();
cpu_relax();
}
return true;

Expand All @@ -148,7 +148,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
if (smp_load_acquire(&node->locked))
return true;

cpu_relax_lowlatency();
cpu_relax();

/*
* Or we race against a concurrent unqueue()'s step-B, in which
Expand Down
6 changes: 3 additions & 3 deletions kernel/locking/qrwlock.c
Expand Up @@ -54,7 +54,7 @@ static __always_inline void
rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts)
{
while ((cnts & _QW_WMASK) == _QW_LOCKED) {
cpu_relax_lowlatency();
cpu_relax();
cnts = atomic_read_acquire(&lock->cnts);
}
}
Expand Down Expand Up @@ -130,7 +130,7 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
(cmpxchg_relaxed(&l->wmode, 0, _QW_WAITING) == 0))
break;

cpu_relax_lowlatency();
cpu_relax();
}

/* When no more readers, set the locked flag */
Expand All @@ -141,7 +141,7 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
_QW_LOCKED) == _QW_WAITING))
break;

cpu_relax_lowlatency();
cpu_relax();
}
unlock:
arch_spin_unlock(&lock->wait_lock);
Expand Down
4 changes: 2 additions & 2 deletions kernel/locking/rwsem-xadd.c
Expand Up @@ -368,7 +368,7 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
return false;
}

cpu_relax_lowlatency();
cpu_relax();
}
rcu_read_unlock();
out:
Expand Down Expand Up @@ -423,7 +423,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
* memory barriers as we'll eventually observe the right
* values at the cost of a few extra spins.
*/
cpu_relax_lowlatency();
cpu_relax();
}
osq_unlock(&sem->osq);
done:
Expand Down
2 changes: 1 addition & 1 deletion lib/lockref.c
Expand Up @@ -20,7 +20,7 @@
if (likely(old.lock_count == prev.lock_count)) { \
SUCCESS; \
} \
cpu_relax_lowlatency(); \
cpu_relax(); \
} \
} while (0)

Expand Down

0 comments on commit f2f09a4

Please sign in to comment.