Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Garbage collect (req_id, seq_id) pairs

  • Loading branch information...
commit dbda322ce36c97b123567c393c0ed7d8faee78a9 1 parent dd563fb
@rescrv authored
View
12 common/configuration.cc
@@ -90,6 +90,18 @@ configuration :: version() const
return m_version;
}
+void
+configuration :: get_all_addresses(std::vector<std::pair<server_id, po6::net::location> >* addrs)
+{
+ addrs->resize(m_addresses_by_server_id.size());
+
+ for (size_t i = 0; i < m_addresses_by_server_id.size(); ++i)
+ {
+ (*addrs)[i].first = server_id(m_addresses_by_server_id[i].first);
+ (*addrs)[i].second = m_addresses_by_server_id[i].second;
+ }
+}
+
po6::net::location
configuration :: get_address(const server_id& id) const
{
View
1  common/configuration.h
@@ -62,6 +62,7 @@ class configuration
// membership metadata
public:
+ void get_all_addresses(std::vector<std::pair<server_id, po6::net::location> >* addrs);
po6::net::location get_address(const server_id& id) const;
region_id get_region_id(const virtual_server_id& id) const;
server_id get_server_id(const virtual_server_id& id) const;
View
9 common/counter_map.cc
@@ -74,6 +74,15 @@ counter_map :: adopt(const std::vector<region_id>& ris)
tmp.swap(m_counters);
}
+void
+counter_map :: peek(std::map<region_id, uint64_t>* ris)
+{
+ for (size_t i = 0; i < m_counters.size(); ++i)
+ {
+ (*ris)[m_counters[i].first] = m_counters[i].second;
+ }
+}
+
bool
counter_map :: lookup(const region_id& ri, uint64_t* count)
{
View
6 common/counter_map.h
@@ -29,14 +29,15 @@
#define hyperdex_common_counter_map_h_
// STL
+#include <map>
#include <utility>
#include <vector>
// HyperDex
#include "common/ids.h"
-// The only thread-safe call is "lookup". "adopt" and "take_max" both require
-// external synchronization.
+// The only thread-safe call is "lookup". "adopt", "peek", and "take_max" all
+// require external synchronization.
namespace hyperdex
{
@@ -49,6 +50,7 @@ class counter_map
public:
void adopt(const std::vector<region_id>& ris);
+ void peek(std::map<region_id, uint64_t>* ris);
bool lookup(const region_id& ri, uint64_t* count);
bool take_max(const region_id& ri, uint64_t count);
View
1  common/network_msgtype.cc
@@ -52,6 +52,7 @@ hyperdex :: operator << (std::ostream& lhs, const network_msgtype& rhs)
STRINGIFY(CHAIN_OP);
STRINGIFY(CHAIN_SUBSPACE);
STRINGIFY(CHAIN_ACK);
+ STRINGIFY(CHAIN_GC);
STRINGIFY(XFER_OP);
STRINGIFY(XFER_ACK);
STRINGIFY(CONFIGMISMATCH);
View
1  common/network_msgtype.h
@@ -60,6 +60,7 @@ enum network_msgtype
CHAIN_OP = 64,
CHAIN_SUBSPACE = 65,
CHAIN_ACK = 66,
+ CHAIN_GC = 67,
XFER_OP = 80,
XFER_ACK = 81,
View
62 daemon/communication.cc
@@ -25,6 +25,8 @@
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
+#define __STDC_LIMIT_MACROS
+
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
@@ -199,6 +201,63 @@ communication :: send_client(const virtual_server_id& from,
bool
communication :: send(const virtual_server_id& from,
+ const server_id& to,
+ network_msgtype msg_type,
+ std::auto_ptr<e::buffer> msg)
+{
+ assert(msg->size() >= HYPERDEX_HEADER_SIZE_VV);
+
+ if (m_daemon->m_us != m_daemon->m_config.get_server_id(from))
+ {
+ return false;
+ }
+
+ uint8_t mt = static_cast<uint8_t>(msg_type);
+ uint8_t flags = 1;
+ virtual_server_id vto(UINT64_MAX);
+ msg->pack_at(BUSYBEE_HEADER_SIZE) << mt << flags << m_daemon->m_config.version() << vto.get() << from.get();
+
+ if (to == server_id())
+ {
+ return false;
+ }
+
+#ifdef HD_LOG_ALL_MESSAGES
+ LOG(INFO) << "SEND " << from << "->" << to << " " << msg_type << " " << msg->hex();
+#endif
+
+ if (to == m_daemon->m_us)
+ {
+ m_busybee->deliver(to.get(), msg);
+ }
+ else
+ {
+ busybee_returncode rc = m_busybee->send(to.get(), msg);
+
+ switch (rc)
+ {
+ case BUSYBEE_SUCCESS:
+ break;
+ case BUSYBEE_DISRUPTED:
+ handle_disruption(to.get());
+ return false;
+ case BUSYBEE_SHUTDOWN:
+ case BUSYBEE_POLLFAILED:
+ case BUSYBEE_ADDFDFAIL:
+ case BUSYBEE_TIMEOUT:
+ case BUSYBEE_EXTERNAL:
+ case BUSYBEE_INTERRUPTED:
+ default:
+ LOG(ERROR) << "BusyBee unexpectedly returned " << rc;
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool
+communication :: send(const virtual_server_id& from,
const virtual_server_id& vto,
network_msgtype msg_type,
std::auto_ptr<e::buffer> msg)
@@ -428,7 +487,8 @@ communication :: recv(server_id* from,
}
bool from_valid = true;
- bool to_valid = m_daemon->m_us == m_daemon->m_config.get_server_id(virtual_server_id(vidt));
+ bool to_valid = m_daemon->m_us == m_daemon->m_config.get_server_id(*vto) ||
+ *vto == virtual_server_id(UINT64_MAX);
// If this is a virtual-virtual message
if ((flags & 0x1))
View
5 daemon/communication.h
@@ -59,6 +59,7 @@
+ sizeof(uint64_t) /*config version*/ \
+ sizeof(uint64_t) /*virt to*/ \
+ sizeof(uint64_t) /*virt from*/)
+#define HYPERDEX_HEADER_SIZE_VS HYPERDEX_HEADER_SIZE_VV
namespace hyperdex
{
@@ -96,6 +97,10 @@ class communication
network_msgtype msg_type,
std::auto_ptr<e::buffer> msg);
bool send(const virtual_server_id& from,
+ const server_id& to,
+ network_msgtype msg_type,
+ std::auto_ptr<e::buffer> msg);
+ bool send(const virtual_server_id& from,
const virtual_server_id& to,
network_msgtype msg_type,
std::auto_ptr<e::buffer> msg);
View
9 daemon/coordinator_link.cc
@@ -213,6 +213,7 @@ coordinator_link :: register_id(server_id us, const po6::net::location& bind_to)
}
extern bool s_continue;
+extern bool s_alarm;
bool
coordinator_link :: wait_for_config(configuration* config)
@@ -222,6 +223,14 @@ coordinator_link :: wait_for_config(configuration* config)
while (s_continue)
{
+ if (s_alarm)
+ {
+ alarm(30);
+ s_alarm = false;
+ m_daemon->m_repl.trip_periodic();
+ need_to_backoff = false;
+ }
+
if (need_to_backoff)
{
LOG(ERROR) << "connection to the coordinator failed; retrying in " << retry / 1000000. << " milliseconds";
View
50 daemon/daemon.cc
@@ -47,6 +47,7 @@
using hyperdex::daemon;
bool s_continue = true;
+bool s_alarm = false;
static void
exit_on_signal(int /*signum*/)
@@ -56,6 +57,12 @@ exit_on_signal(int /*signum*/)
}
static void
+handle_alarm(int /*signum*/)
+{
+ s_alarm = true;
+}
+
+static void
dummy(int /*signum*/)
{
}
@@ -132,12 +139,33 @@ daemon :: run(bool daemonize,
return EXIT_FAILURE;
}
+ if (!install_signal_handler(SIGALRM, handle_alarm))
+ {
+ std::cerr << "could not install SIGUSR1 handler; exiting" << std::endl;
+ return EXIT_FAILURE;
+ }
+
if (!install_signal_handler(SIGUSR1, dummy))
{
std::cerr << "could not install SIGUSR1 handler; exiting" << std::endl;
return EXIT_FAILURE;
}
+ sigset_t ss;
+
+ if (sigfillset(&ss) < 0)
+ {
+ PLOG(ERROR) << "sigfillset";
+ return EXIT_FAILURE;
+ }
+
+ if (pthread_sigmask(SIG_BLOCK, &ss, NULL) < 0)
+ {
+ PLOG(ERROR) << "could not block signals";
+ return EXIT_FAILURE;
+ }
+
+ alarm(30);
google::LogToStderr();
bool saved = false;
server_id saved_us;
@@ -362,6 +390,9 @@ daemon :: loop()
case CHAIN_ACK:
process_chain_ack(from, vfrom, vto, msg, up);
break;
+ case CHAIN_GC:
+ process_chain_gc(from, vfrom, vto, msg, up);
+ break;
case XFER_OP:
process_xfer_op(from, vfrom, vto, msg, up);
break;
@@ -632,6 +663,25 @@ daemon :: process_chain_subspace(server_id,
}
void
+daemon :: process_chain_gc(server_id,
+ virtual_server_id vfrom,
+ virtual_server_id,
+ std::auto_ptr<e::buffer> msg,
+ e::unpacker up)
+{
+ uint64_t seq_id;
+
+ if ((up >> seq_id).error())
+ {
+ LOG(WARNING) << "unpack of CHAIN_GC failed; here's some hex: " << msg->hex();
+ return;
+ }
+
+ region_id ri = m_config.get_region_id(vfrom);
+ m_repl.chain_gc(ri, seq_id);
+}
+
+void
daemon :: process_chain_ack(server_id,
virtual_server_id vfrom,
virtual_server_id vto,
View
1  daemon/daemon.h
@@ -81,6 +81,7 @@ class daemon
void process_chain_op(server_id from, virtual_server_id vfrom, virtual_server_id vto, std::auto_ptr<e::buffer> msg, e::unpacker up);
void process_chain_subspace(server_id from, virtual_server_id vfrom, virtual_server_id vto, std::auto_ptr<e::buffer> msg, e::unpacker up);
void process_chain_ack(server_id from, virtual_server_id vfrom, virtual_server_id vto, std::auto_ptr<e::buffer> msg, e::unpacker up);
+ void process_chain_gc(server_id from, virtual_server_id vfrom, virtual_server_id vto, std::auto_ptr<e::buffer> msg, e::unpacker up);
void process_xfer_op(server_id from, virtual_server_id vfrom, virtual_server_id vto, std::auto_ptr<e::buffer> msg, e::unpacker up);
void process_xfer_ack(server_id from, virtual_server_id vfrom, virtual_server_id vto, std::auto_ptr<e::buffer> msg, e::unpacker up);
View
63 daemon/datalayer.cc
@@ -450,6 +450,7 @@ datalayer :: put(const region_id& ri,
if (seq_id != 0)
{
char abacking[ACKED_BUF_SIZE];
+ seq_id = UINT64_MAX - seq_id;
encode_acked(ri, reg_id, seq_id, abacking);
leveldb::Slice akey(abacking, ACKED_BUF_SIZE);
leveldb::Slice aval("", 0);
@@ -531,6 +532,7 @@ datalayer :: del(const region_id& ri,
if (seq_id != 0)
{
char abacking[ACKED_BUF_SIZE];
+ seq_id = UINT64_MAX - seq_id;
encode_acked(ri, reg_id, seq_id, abacking);
leveldb::Slice akey(abacking, ACKED_BUF_SIZE);
leveldb::Slice aval("", 0);
@@ -854,6 +856,63 @@ datalayer :: max_seq_id(const region_id& reg_id,
}
void
+datalayer :: clear_acked(const region_id& reg_id,
+ uint64_t seq_id)
+{
+ leveldb::ReadOptions opts;
+ opts.fill_cache = false;
+ opts.verify_checksums = true;
+ opts.snapshot = NULL;
+ std::auto_ptr<leveldb::Iterator> it(m_db->NewIterator(opts));
+ char abacking[ACKED_BUF_SIZE];
+ encode_acked(region_id(0), reg_id, 0, abacking);
+ it->Seek(leveldb::Slice(abacking, ACKED_BUF_SIZE));
+ encode_acked(region_id(0), region_id(reg_id.get() + 1), 0, abacking);
+ leveldb::Slice upper_bound(abacking, ACKED_BUF_SIZE);
+
+ while (it->Valid() &&
+ it->key().compare(upper_bound) < 0)
+ {
+ region_id tmp_ri;
+ region_id tmp_reg_id;
+ uint64_t tmp_seq_id;
+ datalayer::returncode rc = decode_acked(e::slice(it->key().data(), it->key().size()),
+ &tmp_ri, &tmp_reg_id, &tmp_seq_id);
+ tmp_seq_id = UINT64_MAX - tmp_seq_id;
+
+ if (rc == SUCCESS &&
+ tmp_reg_id == reg_id &&
+ tmp_seq_id < seq_id)
+ {
+ leveldb::WriteOptions wopts;
+ wopts.sync = false;
+ leveldb::Status st = m_db->Delete(wopts, it->key());
+
+ if (st.ok() || st.IsNotFound())
+ {
+ // WOOT!
+ }
+ else if (st.IsCorruption())
+ {
+ LOG(ERROR) << "corruption at the disk layer: could not delete "
+ << reg_id << " " << seq_id << ": desc=" << st.ToString();
+ }
+ else if (st.IsIOError())
+ {
+ LOG(ERROR) << "IO error at the disk layer: could not delete "
+ << reg_id << " " << seq_id << ": desc=" << st.ToString();
+ }
+ else
+ {
+ LOG(ERROR) << "LevelDB returned an unknown error that we don't know how to handle";
+ }
+ }
+
+ it->Next();
+ }
+}
+
+void
datalayer :: encode_key(const region_id& ri,
const e::slice& key,
std::vector<char>* kbacking,
@@ -986,9 +1045,9 @@ datalayer :: encode_acked(const region_id& ri, /*region we saw an ack for*/
{
char* ptr = buf;
ptr = e::pack8be('a', ptr);
- ptr = e::pack64be(ri.get(), ptr);
ptr = e::pack64be(reg_id.get(), ptr);
ptr = e::pack64be(seq_id, ptr);
+ ptr = e::pack64be(ri.get(), ptr);
}
datalayer::returncode
@@ -1007,9 +1066,9 @@ datalayer :: decode_acked(const e::slice& key,
uint64_t _reg_id;
const uint8_t* ptr = key.data();
ptr = e::unpack8be(ptr, &_p);
- ptr = e::unpack64be(ptr, &_ri);
ptr = e::unpack64be(ptr, &_reg_id);
ptr = e::unpack64be(ptr, seq_id);
+ ptr = e::unpack64be(ptr, &_ri);
*ri = region_id(_ri);
*reg_id = region_id(_reg_id);
return _p == 'a' ? SUCCESS : BAD_ENCODING;
View
3  daemon/datalayer.h
@@ -136,6 +136,9 @@ class datalayer
uint64_t seq_id);
void max_seq_id(const region_id& reg_id,
uint64_t* seq_id);
+ // Clear less than seq_id
+ void clear_acked(const region_id& reg_id,
+ uint64_t seq_id);
private:
class search_filter;
View
140 daemon/replication_manager.cc
@@ -66,12 +66,16 @@ replication_manager :: replication_manager(daemon* d)
: m_daemon(d)
, m_keyholder_locks(1024)
, m_keyholders(16)
+ , m_counters()
+ , m_shutdown(true)
, m_retransmitter(std::tr1::bind(&replication_manager::retransmitter, this))
, m_block_retransmitter()
, m_wakeup_retransmitter(&m_block_retransmitter)
, m_need_retransmit(false)
- , m_shutdown(true)
- , m_counters()
+ , m_garbage_collector(std::tr1::bind(&replication_manager::garbage_collector, this))
+ , m_block_garbage_collector()
+ , m_wakeup_garbage_collector(&m_block_garbage_collector)
+ , m_lower_bounds()
{
}
@@ -83,7 +87,10 @@ replication_manager :: ~replication_manager() throw ()
bool
replication_manager :: setup()
{
+ po6::threads::mutex::hold holdr(&m_block_retransmitter);
+ po6::threads::mutex::hold holdg(&m_block_garbage_collector);
m_retransmitter.start();
+ m_garbage_collector.start();
m_shutdown = false;
return true;
}
@@ -529,6 +536,22 @@ replication_manager :: chain_ack(const virtual_server_id& from,
CLEANUP_KEYHOLDER(ri, key, kh);
}
+void
+replication_manager :: chain_gc(const region_id& reg_id, uint64_t seq_id)
+{
+ po6::threads::mutex::hold hold(&m_block_garbage_collector);
+ m_wakeup_garbage_collector.broadcast();
+ m_lower_bounds.push_back(std::make_pair(reg_id, seq_id));
+}
+
+void
+replication_manager :: trip_periodic()
+{
+ po6::threads::mutex::hold hold(&m_block_retransmitter);
+ m_wakeup_retransmitter.broadcast();
+ m_need_retransmit = true;
+}
+
uint64_t
replication_manager :: hash(const keypair& kp)
{
@@ -981,6 +1004,12 @@ replication_manager :: retransmitter()
}
std::set<region_id> region_cache;
+ std::map<region_id, uint64_t> seq_id_lower_bounds;
+
+ {
+ po6::threads::mutex::hold hold(&m_block_retransmitter);
+ m_counters.peek(&seq_id_lower_bounds);
+ }
for (keyholder_map_t::iterator it = m_keyholders.begin();
it != m_keyholders.end(); it.next())
@@ -1024,22 +1053,126 @@ replication_manager :: retransmitter()
kh->resend_committable(this, us, key);
move_operations_between_queues(us, ri, *sc, key, kh);
+
+ if (m_daemon->m_config.is_point_leader(us))
+ {
+ uint64_t min_id = kh->min_seq_id();
+ std::map<region_id, uint64_t>::iterator lb = seq_id_lower_bounds.find(ri);
+
+ if (lb == seq_id_lower_bounds.end())
+ {
+ seq_id_lower_bounds.insert(std::make_pair(ri, min_id));
+ }
+ else
+ {
+ lb->second = std::min(lb->second, min_id);
+ }
+ }
}
m_daemon->m_comm.wake_one();
+
+ po6::threads::mutex::hold hold(&m_block_retransmitter);
+ std::vector<std::pair<server_id, po6::net::location> > cluster_members;
+ m_daemon->m_config.get_all_addresses(&cluster_members);
+
+ for (std::map<region_id, uint64_t>::iterator it = seq_id_lower_bounds.begin();
+ it != seq_id_lower_bounds.end(); ++it)
+ {
+ // lookup and check again since we lost/acquired the lock
+ virtual_server_id us = m_daemon->m_config.get_virtual(it->first, m_daemon->m_us);
+
+ if (us == virtual_server_id() || !m_daemon->m_config.is_point_leader(us))
+ {
+ continue;
+ }
+
+ size_t sz = HYPERDEX_HEADER_SIZE_VV
+ + sizeof(uint64_t);
+
+ for (size_t i = 0; i < cluster_members.size(); ++i)
+ {
+ std::auto_ptr<e::buffer> msg(e::buffer::create(sz));
+ msg->pack_at(HYPERDEX_HEADER_SIZE_VS) << it->second;
+ m_daemon->m_comm.send(us, cluster_members[i].first, CHAIN_GC, msg);
+ }
+ }
}
LOG(INFO) << "retransmitter thread shutting down";
}
void
+replication_manager :: garbage_collector()
+{
+ LOG(INFO) << "garbage collector thread started";
+ sigset_t ss;
+
+ if (sigfillset(&ss) < 0)
+ {
+ PLOG(ERROR) << "sigfillset";
+ return;
+ }
+
+ if (pthread_sigmask(SIG_BLOCK, &ss, NULL) < 0)
+ {
+ PLOG(ERROR) << "could not block signals";
+ return;
+ }
+
+ while (true)
+ {
+ std::list<std::pair<region_id, uint64_t> > lower_bounds;
+
+ {
+ po6::threads::mutex::hold hold(&m_block_garbage_collector);
+
+ while (m_lower_bounds.empty() && !m_shutdown)
+ {
+ m_wakeup_garbage_collector.wait();
+ }
+
+ if (m_shutdown)
+ {
+ break;
+ }
+
+ lower_bounds.swap(m_lower_bounds);
+ }
+
+ // sort so that we scan disk sequentially
+ lower_bounds.sort();
+
+ while (!lower_bounds.empty())
+ {
+ region_id reg_id = lower_bounds.front().first;
+ uint64_t seq_id = lower_bounds.front().second;
+
+ // I chose to use seq_id - 1 for clearing because i'm too tired to check for
+ // an off by one. At worst it'll leave a little extra state laying around,
+ // and is guaranteed to be as correct as garbage collecting seq_id.
+ if (seq_id > 0)
+ {
+ m_daemon->m_data.clear_acked(reg_id, seq_id - 1);
+ }
+
+ lower_bounds.pop_front();
+ }
+ }
+
+ LOG(INFO) << "garbage collector thread shutting down";
+}
+
+void
replication_manager :: shutdown()
{
bool is_shutdown;
{
- po6::threads::mutex::hold hold(&m_block_retransmitter);
+ po6::threads::mutex::hold holdr(&m_block_retransmitter);
+ po6::threads::mutex::hold holdg(&m_block_garbage_collector);
m_wakeup_retransmitter.broadcast();
+ m_wakeup_garbage_collector.broadcast();
is_shutdown = m_shutdown;
m_shutdown = true;
}
@@ -1047,5 +1180,6 @@ replication_manager :: shutdown()
if (!is_shutdown)
{
m_retransmitter.join();
+ m_garbage_collector.join();
}
}
View
14 daemon/replication_manager.h
@@ -29,6 +29,7 @@
#define hyperdex_daemon_replication_manager_h_
// STL
+#include <list>
#include <memory>
#include <tr1/unordered_map>
@@ -119,6 +120,8 @@ class replication_manager
uint64_t seq_id,
uint64_t version,
const e::slice& key);
+ void chain_gc(const region_id& reg_id, uint64_t seq_id);
+ void trip_periodic();
private:
class pending;
@@ -175,20 +178,25 @@ class replication_manager
const server_id& client,
uint64_t nonce,
network_returncode ret);
- // Retransmit functions
+ // thread functions
void retransmitter();
+ void garbage_collector();
void shutdown();
private:
daemon* m_daemon;
e::striped_lock<po6::threads::mutex> m_keyholder_locks;
keyholder_map_t m_keyholders;
+ counter_map m_counters;
+ bool m_shutdown;
po6::threads::thread m_retransmitter;
po6::threads::mutex m_block_retransmitter;
po6::threads::cond m_wakeup_retransmitter;
bool m_need_retransmit;
- bool m_shutdown;
- counter_map m_counters;
+ po6::threads::thread m_garbage_collector;
+ po6::threads::mutex m_block_garbage_collector;
+ po6::threads::cond m_wakeup_garbage_collector;
+ std::list<std::pair<region_id, uint64_t> > m_lower_bounds;
};
} // namespace hyperdex
View
30 daemon/replication_manager_keyholder.cc
@@ -26,6 +26,7 @@
// POSSIBILITY OF SUCH DAMAGE.
// HyperDex
+#include "daemon/daemon.h"
#include "daemon/replication_manager_keyholder.h"
#include "daemon/replication_manager_pending.h"
@@ -148,6 +149,29 @@ replication_manager :: keyholder :: max_seq_id() const
return ret;
}
+uint64_t
+replication_manager :: keyholder :: min_seq_id() const
+{
+ uint64_t ret = 0;
+
+ if (!m_committable.empty())
+ {
+ ret = std::min(ret, m_committable.front().second->seq_id);
+ }
+
+ if (!m_blocked.empty())
+ {
+ ret = std::min(ret, m_blocked.front().second->seq_id);
+ }
+
+ if (!m_deferred.empty())
+ {
+ ret = std::min(ret, m_deferred.front().second->seq_id);
+ }
+
+ return ret;
+}
+
bool
replication_manager :: keyholder :: has_committable_ops() const
{
@@ -296,6 +320,12 @@ replication_manager :: keyholder :: resend_committable(replication_manager* rm,
for (committable_list_t::iterator it = m_committable.begin();
it != m_committable.end(); ++it)
{
+ // skip those messages already sent in this version
+ if (it->second->sent_config_version == rm->m_daemon->m_config.version())
+ {
+ continue;
+ }
+
it->second->sent = virtual_server_id();
it->second->sent_config_version = 0;
rm->send_message(us, true, it->first, key, it->second);
View
1  daemon/replication_manager_keyholder.h
@@ -49,6 +49,7 @@ class hyperdex::replication_manager::keyholder
e::intrusive_ptr<pending> get_by_version(uint64_t version) const;
uint64_t version_on_disk() const;
uint64_t max_seq_id() const;
+ uint64_t min_seq_id() const;
public:
bool has_committable_ops() const;
Please sign in to comment.
Something went wrong with that request. Please try again.