Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Merge branch 'branch-18'

* branch-18:
  MB-5085 Fix getLogger()->log format string errors in ep-engine

Change-Id: I3a496b280bd3bf87ec32b75d66d6f75ce068bf6c
  • Loading branch information...
commit e151367dc3e9b11adb73a6c163c66dd34b97c8fa 2 parents 328dcd8 + 1d06858
Gerrit authored
View
26 checkpoint.cc
@@ -38,7 +38,7 @@ class CheckpointConfigChangeListener : public ValueChangedListener {
Checkpoint::~Checkpoint() {
getLogger()->log(EXTENSION_LOG_INFO, NULL,
- "Checkpoint %d for vbucket %d is purged from memory.\n",
+ "Checkpoint %llu for vbucket %d is purged from memory.\n",
checkpointId, vbucketId);
stats.memOverhead.decr(memorySize());
assert(stats.memOverhead.get() < GIGANTOR);
@@ -144,7 +144,7 @@ size_t Checkpoint::mergePrevCheckpoint(Checkpoint *pPrevCheckpoint) {
std::list<queued_item>::reverse_iterator rit = pPrevCheckpoint->rbegin();
getLogger()->log(EXTENSION_LOG_INFO, NULL,
- "Collapse the checkpoint %d into the checkpoint %d for vbucket %d.\n",
+ "Collapse the checkpoint %llu into the checkpoint %llu for vbucket %d.\n",
pPrevCheckpoint->getId(), checkpointId, vbucketId);
for (; rit != pPrevCheckpoint->rend(); ++rit) {
@@ -223,7 +223,7 @@ uint64_t CheckpointManager::getLastClosedCheckpointId() {
void CheckpointManager::setOpenCheckpointId_UNLOCKED(uint64_t id) {
if (checkpointList.size() > 0) {
getLogger()->log(EXTENSION_LOG_INFO, NULL,
- "Set the current open checkpoint id to %d for vbucket %d.\n",
+ "Set the current open checkpoint id to %llu for vbucket %d.\n",
id, vbucketId);
checkpointList.back()->setId(id);
// Update the checkpoint_start item with the new Id.
@@ -240,7 +240,7 @@ bool CheckpointManager::addNewCheckpoint_UNLOCKED(uint64_t id) {
}
getLogger()->log(EXTENSION_LOG_INFO, NULL,
- "Create a new open checkpoint %d for vbucket %d.\n",
+ "Create a new open checkpoint %llu for vbucket %d.\n",
id, vbucketId);
Checkpoint *checkpoint = new Checkpoint(stats, id, vbucketId, opened);
@@ -272,7 +272,7 @@ bool CheckpointManager::closeOpenCheckpoint_UNLOCKED(uint64_t id) {
}
getLogger()->log(EXTENSION_LOG_INFO, NULL,
- "Close the open checkpoint %d for vbucket %d\n", id, vbucketId);
+ "Close the open checkpoint %llu for vbucket %d\n", id, vbucketId);
// This item represents the end of the current open checkpoint and is sent to the slave node.
queued_item qi = createCheckpointItem(id, vbucketId, queue_op_checkpoint_end);
@@ -328,7 +328,7 @@ bool CheckpointManager::registerTAPCursor(const std::string &name, uint64_t chec
if (!found) {
getLogger()->log(EXTENSION_LOG_DEBUG, NULL,
- "Checkpoint %d for vbucket %d doesn't exist in memory. "
+ "Checkpoint %llu for vbucket %d doesn't exist in memory. "
"Set the cursor with the name \"%s\" to the open checkpoint.\n",
checkpointId, vbucketId, name.c_str());
it = --(checkpointList.end());
@@ -342,8 +342,8 @@ bool CheckpointManager::registerTAPCursor(const std::string &name, uint64_t chec
std::list<queued_item>::iterator curr;
getLogger()->log(EXTENSION_LOG_DEBUG, NULL,
- "Checkpoint %d for vbucket %d exists in memory. "
- "Set the cursor with the name \"%s\" to the checkpoint %d\n",
+ "Checkpoint %llu for vbucket %d exists in memory. "
+ "Set the cursor with the name \"%s\" to the checkpoint %llu\n",
checkpointId, vbucketId, name.c_str(), checkpointId);
if (!alwaysFromBeginning &&
@@ -716,7 +716,7 @@ uint64_t CheckpointManager::getAllItemsForPersistence(std::vector<queued_item> &
persistenceCursor.offset = numItems;
getLogger()->log(EXTENSION_LOG_DEBUG, NULL,
- "Grab %d items through the persistence cursor from vbucket %d.\n",
+ "Grab %ld items through the persistence cursor from vbucket %d.\n",
items.size(), vbucketId);
return checkpoint_id;
@@ -736,7 +736,7 @@ uint64_t CheckpointManager::getAllItemsForTAPConnection(const std::string &name,
it->second.offset = numItems;
getLogger()->log(EXTENSION_LOG_DEBUG, NULL,
- "Grab %d items through the tap cursor with name \"%s\" from vbucket %d.\n",
+ "Grab %ld items through the tap cursor with name \"%s\" from vbucket %d.\n",
items.size(), name.c_str(), vbucketId);
return checkpointId;
@@ -1155,7 +1155,7 @@ bool CheckpointConfig::validateCheckpointMaxItemsParam(size_t checkpoint_max_ite
ss << "New checkpoint_max_items param value " << checkpoint_max_items
<< " is not ranged between the min allowed value " << MIN_CHECKPOINT_ITEMS
<< " and max value " << MAX_CHECKPOINT_ITEMS;
- getLogger()->log(EXTENSION_LOG_WARNING, NULL, ss.str().c_str());
+ getLogger()->log(EXTENSION_LOG_WARNING, NULL, "%s\n", ss.str().c_str());
return false;
}
return true;
@@ -1168,7 +1168,7 @@ bool CheckpointConfig::validateCheckpointPeriodParam(size_t checkpoint_period) {
ss << "New checkpoint_period param value " << checkpoint_period
<< " is not ranged between the min allowed value " << MIN_CHECKPOINT_PERIOD
<< " and max value " << MAX_CHECKPOINT_PERIOD;
- getLogger()->log(EXTENSION_LOG_WARNING, NULL, ss.str().c_str());
+ getLogger()->log(EXTENSION_LOG_WARNING, NULL, "%s\n", ss.str().c_str());
return false;
}
return true;
@@ -1181,7 +1181,7 @@ bool CheckpointConfig::validateMaxCheckpointsParam(size_t max_checkpoints) {
ss << "New max_checkpoints param value " << max_checkpoints
<< " is not ranged between the min allowed value " << DEFAULT_MAX_CHECKPOINTS
<< " and max value " << MAX_CHECKPOINTS_UPPER_BOUND;
- getLogger()->log(EXTENSION_LOG_WARNING, NULL, ss.str().c_str());
+ getLogger()->log(EXTENSION_LOG_WARNING, NULL, "%s\n", ss.str().c_str());
return false;
}
return true;
View
2  checkpoint_remover.cc
@@ -35,7 +35,7 @@ class CheckpointVisitor : public VBucketVisitor {
stats.itemsRemovedFromCheckpoints.incr(removed);
if (removed > 0) {
getLogger()->log(EXTENSION_LOG_INFO, NULL,
- "Removed %d closed unreferenced checkpoints from VBucket %d.\n",
+ "Removed %ld closed unreferenced checkpoints from VBucket %d.\n",
removed, currentBucket->getId());
}
removed = 0;
View
16 ep.cc
@@ -362,7 +362,7 @@ class VBucketDeletionCallback : public DispatcherCallback {
d.snooze(t, 10);
rv = true;
getLogger()->log(EXTENSION_LOG_DEBUG, NULL,
- "Reschedule to delete the chunk %d of vbucket %d from disk\n",
+ "Reschedule to delete the chunk %ld of vbucket %d from disk\n",
chunk_num, vbucket);
break;
case vbucket_del_invalid:
@@ -415,7 +415,7 @@ EventuallyPersistentStore::EventuallyPersistentStore(EventuallyPersistentEngine
bgFetchDelay(0)
{
getLogger()->log(EXTENSION_LOG_INFO, NULL,
- "Storage props: c=%d/r=%d/rw=%d\n",
+ "Storage props: c=%ld/r=%ld/rw=%ld\n",
storageProperties.maxConcurrency(),
storageProperties.maxReaders(),
storageProperties.maxWriters());
@@ -1226,7 +1226,7 @@ void EventuallyPersistentStore::completeBGFetch(const std::string &key,
std::stringstream ss;
ss << "Completed a background fetch, now at " << bgFetchQueue.get()
<< std::endl;
- getLogger()->log(EXTENSION_LOG_DEBUG, NULL, ss.str().c_str());
+ getLogger()->log(EXTENSION_LOG_DEBUG, NULL, "%s\n", ss.str().c_str());
// Go find the data
RememberingCallback<GetValue> gcb;
@@ -1282,7 +1282,7 @@ void EventuallyPersistentStore::bgFetch(const std::string &key,
std::stringstream ss;
ss << "Queued a background fetch, now at " << bgFetchQueue.get()
<< std::endl;
- getLogger()->log(EXTENSION_LOG_DEBUG, NULL, ss.str().c_str());
+ getLogger()->log(EXTENSION_LOG_DEBUG, NULL, "%s\n", ss.str().c_str());
roDispatcher->schedule(dcb, NULL, Priority::BgFetcherPriority, bgFetchDelay);
}
@@ -1870,7 +1870,7 @@ std::queue<queued_item>* EventuallyPersistentStore::beginFlush() {
stats.flusher_todo.set(writing.size());
stats.queue_size.set(queue_size);
getLogger()->log(EXTENSION_LOG_DEBUG, NULL,
- "Flushing %d items with %d still in queue\n",
+ "Flushing %ld items with %ld still in queue\n",
writing.size(), queue_size);
rv = &writing;
}
@@ -2543,7 +2543,7 @@ bool EventuallyPersistentStore::warmupFromLog(const std::map<std::pair<uint16_t,
warmupTask->setEstimatedItemCount(harvester.total());
getLogger()->log(EXTENSION_LOG_DEBUG, NULL,
- "Completed log read in %s with %d entries\n",
+ "Completed log read in %s with %ld entries\n",
hrtime2text(end1 - start).c_str(), harvester.total());
harvester.apply(&cb, &warmupLogCallback);
@@ -2551,7 +2551,7 @@ bool EventuallyPersistentStore::warmupFromLog(const std::map<std::pair<uint16_t,
hrtime_t end2(gethrtime());
getLogger()->log(EXTENSION_LOG_DEBUG, NULL,
- "Completed repopulation from log in %dms\n",
+ "Completed repopulation from log in %llums\n",
((end2 - end1) / 1000000));
// Anything left in the "loading" map at this point is uncommitted.
@@ -2559,7 +2559,7 @@ bool EventuallyPersistentStore::warmupFromLog(const std::map<std::pair<uint16_t,
harvester.getUncommitted(uitems);
if (uitems.size() > 0) {
getLogger()->log(EXTENSION_LOG_WARNING, NULL,
- "%d items were uncommitted in the mutation log file. "
+ "%ld items were uncommitted in the mutation log file. "
"Deleting them from the underlying data store.\n",
uitems.size());
std::vector<mutation_log_uncommitted_t>::iterator uit = uitems.begin();
View
6 ep_engine.cc
@@ -1902,7 +1902,7 @@ ENGINE_ERROR_CODE EventuallyPersistentEngine::tapNotify(const void *cookie,
} else {
ret = ENGINE_DISCONNECT;
getLogger()->log(EXTENSION_LOG_WARNING, NULL,
- "%s Error processing checkpoint %d. Force disconnect\n",
+ "%s Error processing checkpoint %llu. Force disconnect\n",
connection->logHeader(), checkpointId);
}
} else {
@@ -3439,7 +3439,7 @@ ENGINE_ERROR_CODE EventuallyPersistentEngine::observe(const void *cookie,
protocol_binary_response_status rv;
stats.observeCalls++;
- getLogger()->log(EXTENSION_LOG_DEBUG, NULL, "observe %s %ld %s %d",
+ getLogger()->log(EXTENSION_LOG_DEBUG, NULL, "observe %s %llu %s %d",
key.c_str(), cas, obs_set.c_str(), expiration);
rv = getObserveRegistry().observeKey(key, cas, vbucket, expiration, obs_set);
if (rv == PROTOCOL_BINARY_RESPONSE_ETMPFAIL) {
@@ -3459,7 +3459,7 @@ ENGINE_ERROR_CODE EventuallyPersistentEngine::unobserve(const void *cookie,
std::string obs_set,
ADD_RESPONSE response) {
stats.unobserveCalls++;
- getLogger()->log(EXTENSION_LOG_DEBUG, NULL, "unobserve %s %ld %s",
+ getLogger()->log(EXTENSION_LOG_DEBUG, NULL, "unobserve %s %llu %s",
key.c_str(), cas, obs_set.c_str());
getObserveRegistry().unobserveKey(key, cas, vbucket, obs_set);
return sendResponse(response, NULL, 0, NULL, 0, NULL, 0, 0, 0, 0, cookie);
View
2  flusher.hh
@@ -65,7 +65,7 @@ public:
}
if (rejectQueue != NULL) {
getLogger()->log(EXTENSION_LOG_WARNING, NULL,
- "Flusher being destroyed with %d tasks in the reject queue\n",
+ "Flusher being destroyed with %ld tasks in the reject queue\n",
rejectQueue->size());
delete rejectQueue;
}
View
6 item_pager.cc
@@ -73,13 +73,13 @@ class PagingVisitor : public VBucketVisitor {
if (numEjected() > 0) {
getLogger()->log(EXTENSION_LOG_INFO, NULL,
- "Paged out %d values\n", numEjected());
+ "Paged out %ld values\n", numEjected());
}
size_t num_expired = expired.size();
if (num_expired > 0) {
getLogger()->log(EXTENSION_LOG_INFO, NULL,
- "Purged %d expired items\n", num_expired);
+ "Purged %ld expired items\n", num_expired);
}
totalEjected += (ejected + num_expired);
@@ -239,7 +239,7 @@ bool InvalidItemDbPager::callback(Dispatcher &d, TaskId t) {
else {
d.snooze(t, 10);
getLogger()->log(EXTENSION_LOG_DEBUG, NULL,
- "Reschedule to delete the old chunk of vbucket %d with",
+ "Reschedule to delete the old chunk of vbucket %d with"
" the version %d from disk\n",
vbid, vb_version);
}
View
4 kvstore.cc
@@ -92,7 +92,7 @@ size_t KVStore::warmup(MutationLog &lf,
size_t total = harvester.total();
estimate.callback(total);
getLogger()->log(EXTENSION_LOG_DEBUG, NULL,
- "Completed log read in %s with %d entries\n",
+ "Completed log read in %s with %ld entries\n",
hrtime2text(end - start).c_str(), total);
WarmupCookie cookie(this, cb);
@@ -101,7 +101,7 @@ size_t KVStore::warmup(MutationLog &lf,
end = gethrtime();
getLogger()->log(EXTENSION_LOG_DEBUG, NULL,
- "Populated log in %s with (l: %d, s: %d, e: %d)",
+ "Populated log in %s with (l: %ld, s: %ld, e: %ld)",
hrtime2text(end - start).c_str(),
cookie.loaded, cookie.skipped, cookie.error);
View
18 mc-kvstore/mc-engine.cc
@@ -382,7 +382,7 @@ class TapResponseHandler: public BinaryPacketHandler {
~TapResponseHandler() {
if (keysOnly) {
getLogger()->log(EXTENSION_LOG_WARNING, NULL,
- "Preloaded %zu keys (with metadata)",
+ "Preloaded %ld keys (with metadata)",
num);
}
}
@@ -703,7 +703,7 @@ bool MemcachedEngine::connect() {
msg << hptr << ":";
}
msg << port << "\"";
- getLogger()->log(EXTENSION_LOG_WARNING, this, msg.str().c_str());
+ getLogger()->log(EXTENSION_LOG_WARNING, this, "%s\n", msg.str().c_str());
configurationError = true;
ainfo = NULL;
return false;
@@ -732,7 +732,7 @@ bool MemcachedEngine::connect() {
msg << hptr << ":";
}
msg << port << "\"";
- getLogger()->log(EXTENSION_LOG_WARNING, this, msg.str().c_str());
+ getLogger()->log(EXTENSION_LOG_WARNING, this, "%s\n", msg.str().c_str());
configurationError = true;
return false;
}
@@ -752,7 +752,7 @@ void MemcachedEngine::ensureConnection()
<< configuration.getCouchHost().c_str() << ":"
<< configuration.getCouchPort() << "\"";
- getLogger()->log(EXTENSION_LOG_WARNING, this, rv.str().c_str());
+ getLogger()->log(EXTENSION_LOG_WARNING, this, "%s\n", rv.str().c_str());
while (!connect()) {
if (shutdown) {
return ;
@@ -771,7 +771,7 @@ void MemcachedEngine::ensureConnection()
rv << "Failed to connect to: \""
<< configuration.getCouchHost().c_str() << ":"
<< configuration.getCouchPort() << "\"";
- getLogger()->log(EXTENSION_LOG_WARNING, this, rv.str().c_str());
+ getLogger()->log(EXTENSION_LOG_WARNING, this, "%s\n", rv.str().c_str());
usleep(5000);
// we might have new configuration parameters...
@@ -781,7 +781,7 @@ void MemcachedEngine::ensureConnection()
rv << "Connection refused: \""
<< configuration.getCouchHost().c_str() << ":"
<< configuration.getCouchPort() << "\"";
- getLogger()->log(EXTENSION_LOG_WARNING, this, rv.str().c_str());
+ getLogger()->log(EXTENSION_LOG_WARNING, this, "%s\n", rv.str().c_str());
usleep(configuration.getCouchReconnectSleeptime());
}
}
@@ -789,7 +789,7 @@ void MemcachedEngine::ensureConnection()
rv << "Connected to mccouch: \""
<< configuration.getCouchHost().c_str() << ":"
<< configuration.getCouchPort() << "\"";
- getLogger()->log(EXTENSION_LOG_WARNING, this, rv.str().c_str());
+ getLogger()->log(EXTENSION_LOG_WARNING, this, "%s\n", rv.str().c_str());
}
}
@@ -822,7 +822,7 @@ bool MemcachedEngine::waitForWritable()
} else if ((waitTime += timeout) >= configuration.getCouchResponseTimeout()) {
// Poll failed due to timeouts multiple times and is above timeout threshold.
getLogger()->log(EXTENSION_LOG_INFO, this,
- "No response for mccouch in %zu seconds. Resetting connection.",
+ "No response for mccouch in %ld seconds. Resetting connection.",
waitTime);
resetConnection();
}
@@ -1068,7 +1068,7 @@ bool MemcachedEngine::waitForReadable()
} else if ((waitTime += timeout) >= configuration.getCouchResponseTimeout()) {
// Poll failed due to timeouts multiple times and is above timeout threshold.
getLogger()->log(EXTENSION_LOG_INFO, this,
- "No response for mccouch in %d seconds. Resetting connection.",
+ "No response for mccouch in %ld seconds. Resetting connection.",
waitTime);
resetConnection();
}
View
6 mutation_log_compactor.cc
@@ -30,7 +30,7 @@ class LogCompactionVisitor : public VBucketVisitor {
mutationLog.commit1();
mutationLog.commit2();
getLogger()->log(EXTENSION_LOG_INFO, NULL,
- "Mutation log compactor: Dumped %d items from VBucket %d "
+ "Mutation log compactor: Dumped %ld items from VBucket %d "
"into a new mutation log file.\n",
numItemsLogged, currentBucket->getId());
totalItemsLogged += numItemsLogged;
@@ -41,7 +41,7 @@ class LogCompactionVisitor : public VBucketVisitor {
void complete() {
update();
getLogger()->log(EXTENSION_LOG_INFO, NULL,
- "Mutation log compactor: Completed by dumping total %d items "
+ "Mutation log compactor: Completed by dumping total %ld items "
"into a new mutation log file.\n", totalItemsLogged);
}
@@ -84,7 +84,7 @@ bool MutationLogCompactor::callback(Dispatcher &d, TaskId t) {
new_log.setSyncConfig(mutationLog.getSyncConfig());
LogCompactionVisitor compact_visitor(new_log, stats);
- epStore->visit(compact_visitor);
+ epStore->visit(compact_visitor);
mutationLog.replaceWith(new_log);
} catch (MutationLog::ReadException e) {
getLogger()->log(EXTENSION_LOG_WARNING, NULL,
View
2  sqlite-kvstore/sqlite-kvstore.cc
@@ -414,7 +414,7 @@ size_t StrategicSqlite3::warmup(MutationLog &lf,
estimate.callback(total);
getLogger()->log(EXTENSION_LOG_DEBUG, NULL,
- "Completed log read in %s with %d entries\n",
+ "Completed log read in %s with %ld entries\n",
hrtime2text(end - start).c_str(), total);
start = gethrtime();
View
4 sqlite-kvstore/sqlite-strategies.cc
@@ -542,7 +542,7 @@ void ShardedMultiTableSqliteStrategy::destroyInvalidTables(bool destroyOnlyOne)
static_cast<int>(i), it->c_str());
execute(buf);
getLogger()->log(EXTENSION_LOG_INFO, NULL,
- "Removed the temp table kv_%d.%s\n", i, it->c_str());
+ "Removed the temp table kv_%ld.%s\n", i, it->c_str());
if (destroyOnlyOne) {
return;
}
@@ -695,7 +695,7 @@ void ShardedByVBucketSqliteStrategy::destroyInvalidTables(bool destroyOnlyOne) {
static_cast<int>(i), it->c_str());
execute(buf);
getLogger()->log(EXTENSION_LOG_INFO, NULL,
- "Removed the temp table kv_%d.%s\n", i, it->c_str());
+ "Removed the temp table kv_%ld.%s\n", i, it->c_str());
if (destroyOnlyOne) {
return;
}
View
15 tapconnection.cc
@@ -263,7 +263,7 @@ void TapProducer::setBackfillAge(uint64_t age, bool reconnect) {
if (flags & TAP_CONNECT_FLAG_BACKFILL) {
backfillAge = age;
getLogger()->log(EXTENSION_LOG_DEBUG, NULL,
- "%s Backfill age set to %d\n",
+ "%s Backfill age set to %llu\n",
logHeader(), age);
}
}
@@ -297,7 +297,7 @@ void TapProducer::setVBucketFilter(const std::vector<uint16_t> &vbuckets)
ss << logHeader() << ": Changing the vbucket filter from "
<< vbucketFilter << " to "
<< filter << " (diff: " << diff << ")" << std::endl;
- getLogger()->log(EXTENSION_LOG_DEBUG, NULL,
+ getLogger()->log(EXTENSION_LOG_DEBUG, NULL, "%s\n",
ss.str().c_str());
vbucketFilter = filter;
@@ -893,7 +893,8 @@ ENGINE_ERROR_CODE TapProducer::processAck(uint32_t s,
ss << "TAP takeover is completed. ";
}
ss << "Disconnecting tap stream <" << getName() << ">";
- getLogger()->log(EXTENSION_LOG_WARNING, NULL, ss.str().c_str());
+ getLogger()->log(EXTENSION_LOG_WARNING, NULL, "%s\n",
+ ss.str().c_str());
setDisconnect(true);
expiryTime = 0;
@@ -1377,7 +1378,7 @@ bool TapConsumer::processCheckpointCommand(tap_event_t event, uint16_t vbucket,
if (vb->getState() == vbucket_state_active &&
!engine.getCheckpointConfig().isInconsistentSlaveCheckpoint()) {
getLogger()->log(EXTENSION_LOG_INFO, NULL,
- "%s Checkpoint %d ignored because vbucket %d is in active state\n",
+ "%s Checkpoint %llu ignored because vbucket %d is in active state\n",
logHeader(), checkpointId, vbucket);
return true;
}
@@ -1387,7 +1388,7 @@ bool TapConsumer::processCheckpointCommand(tap_event_t event, uint16_t vbucket,
case TAP_CHECKPOINT_START:
{
getLogger()->log(EXTENSION_LOG_INFO, NULL,
- "%s Received checkpoint_start message with id %d for vbucket %d\n",
+ "%s Received checkpoint_start message with id %llu for vbucket %d\n",
logHeader(), checkpointId, vbucket);
if (vb->isBackfillPhase() && checkpointId > 0) {
setBackfillPhase(false, vbucket);
@@ -1405,7 +1406,7 @@ bool TapConsumer::processCheckpointCommand(tap_event_t event, uint16_t vbucket,
break;
case TAP_CHECKPOINT_END:
getLogger()->log(EXTENSION_LOG_INFO, NULL,
- "%s Received checkpoint_end message with id %d for vbucket %d\n",
+ "%s Received checkpoint_end message with id %llu for vbucket %d\n",
logHeader(), checkpointId, vbucket);
ret = vb->checkpointManager.closeOpenCheckpoint(checkpointId);
break;
@@ -1480,7 +1481,7 @@ queued_item TapProducer::nextFgFetched_UNLOCKED(bool &shouldPause) {
if (!vb || (vb->getState() == vbucket_state_dead && !doTakeOver)) {
getLogger()->log(EXTENSION_LOG_WARNING, NULL,
"%s Skip vbucket %d checkpoint queue as it's in invalid state.\n",
- logHeader());
+ logHeader(), vbid);
++invalid_count;
continue;
}
View
2  vbucket.hh
@@ -118,7 +118,7 @@ public:
~VBucket() {
if (!pendingOps.empty()) {
getLogger()->log(EXTENSION_LOG_WARNING, NULL,
- "Have %d pending ops while destroying vbucket\n",
+ "Have %ld pending ops while destroying vbucket\n",
pendingOps.size());
}
stats.memOverhead.decr(sizeof(VBucket) + ht.memorySize() + sizeof(CheckpointManager));

0 comments on commit e151367

Please sign in to comment.
Something went wrong with that request. Please try again.