New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
follow-up fixups for atomic_t spinlocks #17611
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -100,7 +100,6 @@ XioConnection::XioConnection(XioMessenger *m, XioConnection::type _type, | |
in_seq(), | ||
cstate(this) | ||
{ | ||
pthread_spin_init(&sp, PTHREAD_PROCESS_PRIVATE); | ||
set_peer_type(peer.name.type()); | ||
set_peer_addr(peer.addr); | ||
|
||
|
@@ -160,7 +159,7 @@ void XioConnection::send_keepalive_or_ack(bool ack, const utime_t *tp) | |
{ | ||
/* If con is not in READY state, we need to queue the request */ | ||
if (cstate.session_state.read() != XioConnection::UP) { | ||
pthread_spin_lock(&sp); | ||
std::lock_guad<ceph::util::spinlock> lg(sp); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. typo? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, and that makes it worrisome that it compiled on both my local system and here... hmm... There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. with-xio is set to OFF by default, so cmake -DWITH_XIO=ON maybe? |
||
if (cstate.session_state.read() != XioConnection::UP) { | ||
if (ack) { | ||
outgoing.ack = true; | ||
|
@@ -169,10 +168,8 @@ void XioConnection::send_keepalive_or_ack(bool ack, const utime_t *tp) | |
else { | ||
outgoing.keepalive = true; | ||
} | ||
pthread_spin_unlock(&sp); | ||
return; | ||
} | ||
pthread_spin_unlock(&sp); | ||
} | ||
|
||
send_keepalive_or_ack_internal(ack, tp); | ||
|
@@ -612,7 +609,7 @@ void XioConnection::msg_release_fail(struct xio_msg *msg, int code) | |
int XioConnection::flush_out_queues(uint32_t flags) { | ||
XioMessenger* msgr = static_cast<XioMessenger*>(get_messenger()); | ||
if (! (flags & CState::OP_FLAG_LOCKED)) | ||
pthread_spin_lock(&sp); | ||
sp.lock(); | ||
|
||
if (outgoing.keepalive) { | ||
outgoing.keepalive = false; | ||
|
@@ -637,7 +634,7 @@ int XioConnection::flush_out_queues(uint32_t flags) { | |
msgr->_send_message_impl(m, this); | ||
} | ||
if (! (flags & CState::OP_FLAG_LOCKED)) | ||
pthread_spin_unlock(&sp); | ||
sp.unlock(); | ||
return 0; | ||
} | ||
|
||
|
@@ -647,7 +644,7 @@ int XioConnection::discard_out_queues(uint32_t flags) | |
XioSubmit::Queue deferred_q; | ||
|
||
if (! (flags & CState::OP_FLAG_LOCKED)) | ||
pthread_spin_lock(&sp); | ||
sp.lock(); | ||
|
||
/* the two send queues contain different objects: | ||
* - anything on the mqueue is a Message | ||
|
@@ -662,7 +659,7 @@ int XioConnection::discard_out_queues(uint32_t flags) | |
outgoing.keepalive = outgoing.ack = false; | ||
|
||
if (! (flags & CState::OP_FLAG_LOCKED)) | ||
pthread_spin_unlock(&sp); | ||
sp.unlock(); | ||
|
||
// mqueue | ||
while (!disc_q.empty()) { | ||
|
@@ -700,11 +697,11 @@ int XioConnection::discard_out_queues(uint32_t flags) | |
int XioConnection::adjust_clru(uint32_t flags) | ||
{ | ||
if (flags & CState::OP_FLAG_LOCKED) | ||
pthread_spin_unlock(&sp); | ||
sp.unlock(); | ||
|
||
XioMessenger* msgr = static_cast<XioMessenger*>(get_messenger()); | ||
msgr->conns_sp.lock(); | ||
pthread_spin_lock(&sp); | ||
sp.lock(); | ||
|
||
if (cstate.flags & CState::FLAG_MAPPED) { | ||
XioConnection::ConnList::iterator citer = | ||
|
@@ -716,7 +713,7 @@ int XioConnection::adjust_clru(uint32_t flags) | |
msgr->conns_sp.unlock(); | ||
|
||
if (! (flags & CState::OP_FLAG_LOCKED)) | ||
pthread_spin_unlock(&sp); | ||
sp.unlock(); | ||
|
||
return 0; | ||
} | ||
|
@@ -742,7 +739,7 @@ void XioConnection::mark_down() | |
int XioConnection::_mark_down(uint32_t flags) | ||
{ | ||
if (! (flags & CState::OP_FLAG_LOCKED)) | ||
pthread_spin_lock(&sp); | ||
sp.lock(); | ||
|
||
// per interface comment, we only stage a remote reset if the | ||
// current policy required it | ||
|
@@ -756,7 +753,7 @@ int XioConnection::_mark_down(uint32_t flags) | |
discard_out_queues(flags|CState::OP_FLAG_LOCKED); | ||
|
||
if (! (flags & CState::OP_FLAG_LOCKED)) | ||
pthread_spin_unlock(&sp); | ||
sp.unlock(); | ||
|
||
return 0; | ||
} | ||
|
@@ -769,28 +766,28 @@ void XioConnection::mark_disposable() | |
int XioConnection::_mark_disposable(uint32_t flags) | ||
{ | ||
if (! (flags & CState::OP_FLAG_LOCKED)) | ||
pthread_spin_lock(&sp); | ||
sp.lock(); | ||
|
||
cstate.policy.lossy = true; | ||
|
||
if (! (flags & CState::OP_FLAG_LOCKED)) | ||
pthread_spin_unlock(&sp); | ||
sp.unlock(); | ||
|
||
return 0; | ||
} | ||
|
||
int XioConnection::CState::state_up_ready(uint32_t flags) | ||
{ | ||
if (! (flags & CState::OP_FLAG_LOCKED)) | ||
pthread_spin_lock(&xcon->sp); | ||
xcon->sp.lock(); | ||
|
||
xcon->flush_out_queues(flags|CState::OP_FLAG_LOCKED); | ||
|
||
session_state = session_states::UP; | ||
startup_state = session_startup_states::READY; | ||
|
||
if (! (flags & CState::OP_FLAG_LOCKED)) | ||
pthread_spin_unlock(&xcon->sp); | ||
xcon->sp.unlock(); | ||
|
||
return (0); | ||
} | ||
|
@@ -806,20 +803,20 @@ int XioConnection::CState::state_discon() | |
int XioConnection::CState::state_flow_controlled(uint32_t flags) | ||
{ | ||
if (! (flags & OP_FLAG_LOCKED)) | ||
pthread_spin_lock(&xcon->sp); | ||
xcon->sp.lock(); | ||
|
||
session_state = session_states::FLOW_CONTROLLED; | ||
|
||
if (! (flags & OP_FLAG_LOCKED)) | ||
pthread_spin_unlock(&xcon->sp); | ||
xcon->sp.unlock(); | ||
|
||
return (0); | ||
} | ||
|
||
int XioConnection::CState::state_fail(Message* m, uint32_t flags) | ||
{ | ||
if (! (flags & OP_FLAG_LOCKED)) | ||
pthread_spin_lock(&xcon->sp); | ||
xcon->sp.lock(); | ||
|
||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If we're going to pass a 'locked' or 'unlocked' flag around, then it's better to just pass a reference to unique_lock This might be more involved than you want to do right now, though. On the other hand it's one of those "We're touching it doing cleanups anyway, we may as well get all the obvious, relevant ones done" sorta' things. And since a flag stating whether something is locked and a lock are basically unique_lock except wandering around separately and not exception safe and not as idiomatic, I'd kind of like to replace use of the LOCKED flag with unique_lock. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah, TBH I'm a bit sick today and thus taking the lazy route exactly as you surmised. ;-) |
||
// advance to state FAIL, drop queued, msgs, adjust LRU | ||
session_state = session_states::DISCONNECTED); | ||
|
@@ -831,7 +828,7 @@ int XioConnection::CState::state_fail(Message* m, uint32_t flags) | |
xcon->disconnect(); | ||
|
||
if (! (flags & OP_FLAG_LOCKED)) | ||
pthread_spin_unlock(&xcon->sp); | ||
xcon->sp.unlock(); | ||
|
||
// notify ULP | ||
XioMessenger* msgr = static_cast<XioMessenger*>(xcon->get_messenger()); | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The power of libraries!
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Brought a tear to my eye, it did!
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Mind to rename to
gen_rand_number
? ,to make consistent with rgw naming style (like https://github.com/ceph/ceph/blob/master/src/rgw/rgw_common.cc#L830)