Skip to content

Commit

Permalink
Remove unneeded logging statements.
Browse files Browse the repository at this point in the history
  • Loading branch information
yaron-zilliqa committed Sep 25, 2023
1 parent f4d8624 commit d622040
Show file tree
Hide file tree
Showing 18 changed files with 21 additions and 272 deletions.
19 changes: 0 additions & 19 deletions src/libConsensus/ConsensusBackup.cpp
Expand Up @@ -112,10 +112,6 @@ bool ConsensusBackup::ProcessMessageAnnounce(const zbytes& announcement,
// Initial checks
// ==============

LOG_GENERAL(WARNING,
"BZ ConsensusBackup::ProcessMessageAnnounce I'm in state: "
<< GetStateString());

if (!CheckState(PROCESS_ANNOUNCE)) {
return false;
}
Expand Down Expand Up @@ -364,9 +360,6 @@ bool ConsensusBackup::ProcessMessageChallengeCore(
bool ConsensusBackup::ProcessMessageChallenge(const zbytes& challenge,
unsigned int offset) {
LOG_MARKER();
LOG_GENERAL(WARNING,
"BZ ConsensusBackup::ProcessMessageChallenge I'm in state: "
<< GetStateString());
return ProcessMessageChallengeCore(challenge, offset, PROCESS_CHALLENGE,
RESPONSE, RESPONSE_DONE, "Challenge");
}
Expand Down Expand Up @@ -561,9 +554,6 @@ ConsensusBackup::ConsensusBackup(
span.SetAttribute("consensus.node_id", static_cast<uint64_t>(m_myID));
span.SetAttribute("consensus.block_number", m_blockNumber);
TracedIds::GetInstance().SetConsensusSpanIds(span.GetIds());

LOG_GENERAL(WARNING, "BZ ConsensusBackup::ConsensusBackup I'm in state: "
<< GetStateString());
}

ConsensusBackup::~ConsensusBackup() {}
Expand All @@ -577,32 +567,23 @@ bool ConsensusBackup::ProcessMessage(const zbytes& message, unsigned int offset,

bool result = false;

LOG_GENERAL(WARNING,
"BZ ConsensusBackup::Audit I'm in state: " << GetStateString());

switch (message.at(offset)) {
case ConsensusMessageType::ANNOUNCE:
LOG_GENERAL(INFO, "BZ Processing ANNOUNCE message at backup");
result = ProcessMessageAnnounce(message, offset + 1);
break;
case ConsensusMessageType::CONSENSUSFAILURE:
LOG_GENERAL(INFO, "BZ Processing CONSENSUSFAILURE message at backup");
result = ProcessMessageConsensusFailure(message, offset + 1);
break;
case ConsensusMessageType::CHALLENGE:
LOG_GENERAL(INFO, "BZ Processing CHALLENGE message at backup");
result = ProcessMessageChallenge(message, offset + 1);
break;
case ConsensusMessageType::COLLECTIVESIG:
LOG_GENERAL(INFO, "BZ Processing COLLECTIVESIG message at backup");
result = ProcessMessageCollectiveSig(message, offset + 1);
break;
case ConsensusMessageType::FINALCHALLENGE:
LOG_GENERAL(INFO, "BZ Processing FINALCHALLENGE message at backup");
result = ProcessMessageFinalChallenge(message, offset + 1);
break;
case ConsensusMessageType::FINALCOLLECTIVESIG:
LOG_GENERAL(INFO, "BZ Processing FINALCOLLECTIVESIG message at backup");
result = ProcessMessageFinalCollectiveSig(message, offset + 1);
break;
default:
Expand Down
23 changes: 1 addition & 22 deletions src/libConsensus/ConsensusLeader.cpp
Expand Up @@ -1007,13 +1007,8 @@ ConsensusLeader::ConsensusLeader(
m_state = INITIAL;
zil::local::variables.SetConsensusState(int(m_state));
// m_numForConsensus = (floor(TOLERANCE_FRACTION * (pubkeys.size() - 1)) + 1);
LOG_GENERAL(
WARNING,
"BZ Determining number of consensus responses for committee size: "
<< committee.size());
m_numForConsensus = ConsensusCommon::NumForConsensus(committee.size());
LOG_GENERAL(WARNING,
"BZ Number of consensus responses : " << m_numForConsensus);
LOG_GENERAL(INFO, "Number of consensus responses: " << m_numForConsensus);
m_numForConsensusFailure = committee.size() - m_numForConsensus;

m_nodeCommitFailureHandlerFunc = std::move(nodeCommitFailureHandlerFunc);
Expand Down Expand Up @@ -1110,15 +1105,10 @@ bool ConsensusLeader::StartConsensus(
for (auto const& i : m_committee) {
peer.push_back(i.second);
}
LOG_GENERAL(WARNING,
"BZ Sending ConsensusMessageType::ANNOUNCE from leader");
zil::p2p::GetInstance().SendMessage(
peer, announcement_message, zil::p2p::START_BYTE_NORMAL, true, true);
}

LOG_GENERAL(WARNING, "BZ ConsensusLeader::StartConsensus I'm in state: "
<< GetStateString());

if (m_numOfSubsets > 1) {
// Start timer for accepting commits
// =================================
Expand Down Expand Up @@ -1164,28 +1154,20 @@ bool ConsensusLeader::ProcessMessage(const zbytes& message, unsigned int offset,

bool result = false;

LOG_GENERAL(WARNING, "BZ ConsensusLeader::ProcessMessage I'm in state: "
<< GetStateString());

switch (message.at(offset)) {
case ConsensusMessageType::COMMIT:
LOG_GENERAL(INFO, "BZ Processing COMMIT message at leader");
result = ProcessMessageCommit(message, offset + 1, from);
break;
case ConsensusMessageType::COMMITFAILURE:
LOG_GENERAL(INFO, "BZ Processing COMMITFAILURE message at leader");
result = ProcessMessageCommitFailure(message, offset + 1, from);
break;
case ConsensusMessageType::RESPONSE:
LOG_GENERAL(INFO, "BZ Processing RESPONSE message at leader");
result = ProcessMessageResponse(message, offset + 1, from);
break;
case ConsensusMessageType::FINALCOMMIT:
LOG_GENERAL(INFO, "BZ Processing FINALCOMMIT message at leader");
result = ProcessMessageFinalCommit(message, offset + 1, from);
break;
case ConsensusMessageType::FINALRESPONSE:
LOG_GENERAL(INFO, "BZ Processing FINALRESPONSE message at leader");
result = ProcessMessageFinalResponse(message, offset + 1, from);
break;
default:
Expand All @@ -1201,9 +1183,6 @@ void ConsensusLeader::Audit() {

lock_guard<mutex> g(m_mutex);

LOG_GENERAL(WARNING,
"BZ ConsensusLeader::Audit I'm in state: " << GetStateString());

for (unsigned int subsetID = 0; subsetID < m_consensusSubsets.size();
subsetID++) {
ConsensusSubset& subset = m_consensusSubsets.at(subsetID);
Expand Down
23 changes: 4 additions & 19 deletions src/libDirectoryService/DSBlockPostProcessing.cpp
Expand Up @@ -319,11 +319,6 @@ void DirectoryService::UpdateDSCommitteeComposition() {
LOG_MARKER();
std::lock_guard<mutex> g(m_mediator.m_mutexDSCommittee);

const bool leader = m_mediator.m_ds->GetConsensusMyID() ==
m_mediator.m_ds->m_consensusLeaderID;
LOG_GENERAL(WARNING, "BZ UpdateDSCommitteeComposition enter, I am leader? : "
<< (leader ? "true" : "false"));

UpdateDSCommitteeCompositionCore(m_mediator.m_selfKey.second,
*m_mediator.m_DSCommittee,
m_mediator.m_dsBlockChain.GetLastBlock());
Expand All @@ -337,8 +332,6 @@ void DirectoryService::StartNextTxEpoch() {
return;
}

LOG_GENERAL(WARNING, "BZ DirectoryService::StartNextTxEpoch() enter");

LOG_MARKER();

{
Expand Down Expand Up @@ -371,12 +364,9 @@ void DirectoryService::StartNextTxEpoch() {

m_mediator.m_node->m_myShardMembers = m_mediator.m_DSCommittee;

LOG_GENERAL(
WARNING,
"BZ: DirectoryService::StartNextTxEpoch Setting myShardMembers to: ");
LOG_GENERAL(INFO, "Setting myShardMembers to: ");
for (const auto& kv : *m_mediator.m_node->m_myShardMembers) {
LOG_GENERAL(WARNING, "BZ: DirectoryService::StartNextTxEpoch IP: "
<< kv.second.GetPrintableIPAddress());
LOG_GENERAL(INFO, "IP: " << kv.second.GetPrintableIPAddress());
}

LOG_EPOCH(INFO, m_mediator.m_currentEpochNum, "DS shard:");
Expand Down Expand Up @@ -457,8 +447,6 @@ void DirectoryService::StartFirstTxEpoch() {

LOG_MARKER();

LOG_GENERAL(WARNING, "BZ DirectoryService::StartFirstTxEpoch() enter");

{
lock_guard<mutex> g(m_mutexAllPOW);
m_allPoWs.clear();
Expand Down Expand Up @@ -494,12 +482,9 @@ void DirectoryService::StartFirstTxEpoch() {
lock_guard<mutex> g(m_mediator.m_node->m_mutexShardMember);
m_mediator.m_node->m_myShardMembers = m_mediator.m_DSCommittee;

LOG_GENERAL(
WARNING,
"BZ: DirectoryService::StartFirstTxEpoch Setting myShardMembers to: ");
LOG_GENERAL(INFO, "Setting myShardMembers to: ");
for (const auto& kv : *m_mediator.m_node->m_myShardMembers) {
LOG_GENERAL(WARNING, "BZ: DirectoryService::StartFirstTxEpoch IP: "
<< kv.second.GetPrintableIPAddress());
LOG_GENERAL(INFO, "IP: " << kv.second.GetPrintableIPAddress());
}

LOG_EPOCH(INFO, m_mediator.m_currentEpochNum, "DS shard:");
Expand Down
17 changes: 0 additions & 17 deletions src/libDirectoryService/DSBlockPreProcessing.cpp
Expand Up @@ -167,10 +167,6 @@ void DirectoryService::ComputeMembersInShard(
const PubKey& key = kv.second;
m_shards.emplace_back(key, m_allPoWConns.at(key), m_mapNodeReputation[key]);
}
LOG_GENERAL(
INFO,
"BZ: DirectoryService::ComputeMembersInShard finished, shard shize is: "
<< m_shards.size());
}

void DirectoryService::InjectPoWForDSNode(
Expand Down Expand Up @@ -849,10 +845,6 @@ bool DirectoryService::RunConsensusOnDSBlockWhenDSPrimary() {
LOG_EPOCH(INFO, m_mediator.m_currentEpochNum,
"I am the leader DS node. Creating DS block.");

LOG_GENERAL(WARNING, "BZ RunConsensusOnDSBlockWhenDSPrimary, committee size: "
<< m_mediator.m_DSCommittee->size()
<< ", shard size: " << std::size(m_shards));

lock(m_mutexPendingDSBlock, m_mutexAllPoWConns);
lock_guard<mutex> g(m_mutexPendingDSBlock, adopt_lock);
lock_guard<mutex> g2(m_mutexAllPoWConns, adopt_lock);
Expand Down Expand Up @@ -1089,11 +1081,6 @@ bool DirectoryService::RunConsensusOnDSBlockWhenDSPrimary() {
dst, offset, consensusID, blockNumber, blockHash, leaderID, leaderKey,
*m_pendingDSBlock, m_shards, m_allPoWs, dsWinnerPoWs, messageToCosign);
};
LOG_GENERAL(WARNING,
"BZ RunConsensusOnDSBlockWhenDSPrimary, before announcement "
"starts, committee size: "
<< m_mediator.m_DSCommittee->size()
<< ", shard size: " << std::size(m_shards));
cl->StartConsensus(announcementGeneratorFunc, nullptr, BROADCAST_GOSSIP_MODE);
return true;
}
Expand Down Expand Up @@ -1270,10 +1257,6 @@ bool DirectoryService::RunConsensusOnDSBlockWhenDSBackup() {
return true;
}

LOG_GENERAL(WARNING, "BZ RunConsensusOnDSBlockWhenDSBackup, committee size: "
<< m_mediator.m_DSCommittee->size()
<< ", shard size: " << std::size(m_shards));

#ifdef VC_TEST_VC_PRECHECK_1
uint64_t dsCurBlockNum =
m_mediator.m_dsBlockChain.GetLastBlock().GetHeader().GetBlockNum();
Expand Down
18 changes: 0 additions & 18 deletions src/libDirectoryService/DSComposition.cpp
Expand Up @@ -40,9 +40,6 @@ void UpdateDSCommitteeCompositionCore(const PubKey& selfKeyPub,
const auto& NewDSMembers = dsblock.GetHeader().GetDSPoWWinners();
unsigned int NumWinners = NewDSMembers.size();

LOG_GENERAL(WARNING, "BZ UpdateDSCommitteeCompositionCore enter, winners: "
<< NumWinners);

// Get the vector of all non-performant nodes to be removed.
const auto& removeDSNodePubkeys = dsblock.GetHeader().GetDSRemovePubKeys();

Expand Down Expand Up @@ -75,9 +72,6 @@ void UpdateDSCommitteeCompositionCore(const PubKey& selfKeyPub,
dsComm.emplace_back(*it);
dsComm.erase(it);

LOG_GENERAL(WARNING, "BZ Pushing node to the end: "
<< it->second.GetPrintableIPAddress());

continue;
}

Expand All @@ -90,31 +84,23 @@ void UpdateDSCommitteeCompositionCore(const PubKey& selfKeyPub,
// Peer() is required because my own node's network information is
// zeroed out.
dsComm.emplace_front(selfKeyPub, Peer());
LOG_GENERAL(WARNING, "BZ Myself Pushing non-guard to front: "
<< DSPowWinner.second.GetPrintableIPAddress());
} else {
// Calculate the position to insert the current winner.
it = dsComm.begin() + (Guard::GetInstance().GetNumOfDSGuard());
// Place my node's information in front of the DS Committee Community
// Nodes.
LOG_GENERAL(WARNING, "BZ Myself Pushing guard to proper position: "
<< DSPowWinner.second.GetPrintableIPAddress());
dsComm.emplace(it, selfKeyPub, Peer());
}
} else {
if (!GUARD_MODE) {
// Place the current winner node's information in front of the DS
// Committee.
dsComm.emplace_front(DSPowWinner);
LOG_GENERAL(WARNING, "BZ Other Pushing non-guard to front: "
<< DSPowWinner.second.GetPrintableIPAddress());
} else {
// Calculate the position to insert the current winner.
it = dsComm.begin() + (Guard::GetInstance().GetNumOfDSGuard());
// Place the winner's information in front of the DS Committee Community
// Nodes.
LOG_GENERAL(WARNING, "BZ Other Pushing guard to proper position: "
<< DSPowWinner.second.GetPrintableIPAddress());
dsComm.emplace(it, DSPowWinner);
}
}
Expand Down Expand Up @@ -146,14 +132,10 @@ void UpdateDSCommitteeCompositionCore(const PubKey& selfKeyPub,
}

if (LOOKUP_NODE_MODE && !bStoreDSCommittee) {
LOG_GENERAL(WARNING, "BZ Adding ejected node: "
<< dsComm.back().second.GetPrintableIPAddress());
minerInfo.m_dsNodesEjected.emplace_back(dsComm.back().first);
}

// Remove this node from blacklist if it exists
LOG_GENERAL(WARNING, "BZ Removing from dsComm node: "
<< dsComm.back().second.GetPrintableIPAddress());
Peer& p = dsComm.back().second;
Blacklist::GetInstance().Remove({p.GetIpAddress(),p.GetListenPortHost(),p.GetNodeIndentifier()});
dsComm.pop_back();
Expand Down
10 changes: 0 additions & 10 deletions src/libDirectoryService/DirectoryService.cpp
Expand Up @@ -199,7 +199,6 @@ bool DirectoryService::ProcessSetPrimary(
// Note: This function should only be invoked during bootstrap sequence
// Message = [Primary node IP] [Primary node port]
LOG_MARKER();
LOG_GENERAL(WARNING, "BZ: ProcessSetPrimary 1");

if (m_mediator.m_currentEpochNum > 1) {
// TODO: Get the IP address of who send this message, and deduct its
Expand Down Expand Up @@ -265,7 +264,6 @@ bool DirectoryService::ProcessSetPrimary(
// Load the DS committee, with my own peer set to dummy
m_mediator.m_lookup->SetDSCommitteInfo(true);
}
LOG_GENERAL(WARNING, "BZ: ProcessSetPrimary 2");
// Lets start the gossip as earliest as possible
if (BROADCAST_GOSSIP_MODE) {
VectorOfNode peers;
Expand Down Expand Up @@ -315,7 +313,6 @@ bool DirectoryService::ProcessSetPrimary(
Guard::GetInstance().AddDSGuardToBlacklistExcludeList(
*m_mediator.m_DSCommittee);
}
LOG_GENERAL(WARNING, "BZ: ProcessSetPrimary 3");
SetConsensusLeaderID(0);
if (m_mediator.m_currentEpochNum > 1) {
LOG_GENERAL(WARNING, "ProcessSetPrimary called in epoch "
Expand Down Expand Up @@ -344,12 +341,10 @@ bool DirectoryService::ProcessSetPrimary(
<< "][" << std::setw(6) << std::left << m_consensusMyID
<< "] DSBK");
}
LOG_GENERAL(WARNING, "BZ: ProcessSetPrimary 4");
if ((m_consensusMyID < POW_PACKET_SENDERS) ||
(primary == m_mediator.m_selfPeer)) {
m_powSubmissionWindowExpired = false;
LOG_GENERAL(INFO, "m_consensusMyID: " << m_consensusMyID);
LOG_GENERAL(WARNING, "BZ: ProcessSetPrimary 11");
LOG_EPOCH(INFO, m_mediator.m_currentEpochNum,
"Waiting " << POW_WINDOW_IN_SECONDS
<< " seconds, accepting PoW submissions...");
Expand All @@ -361,7 +356,6 @@ bool DirectoryService::ProcessSetPrimary(
this->SendPoWPacketSubmissionToOtherDSComm();
};
DetachedFunction(1, func);
LOG_GENERAL(WARNING, "BZ: ProcessSetPrimary 22");
LOG_EPOCH(INFO, m_mediator.m_currentEpochNum,
"Waiting " << POWPACKETSUBMISSION_WINDOW_IN_SECONDS
<< " seconds, accepting PoW submissions packet from "
Expand Down Expand Up @@ -743,8 +737,6 @@ void DirectoryService::StartNewDSEpochConsensus(bool isRejoin) {
m_mediator.m_consensusID = 0;
m_mediator.m_node->SetConsensusLeaderID(0);

LOG_GENERAL(WARNING, "BZ Starting StartNewDSEpochConsensus consensus");

CleanFinalBlockConsensusBuffer();

m_mediator.m_node->CleanCreatedTransaction();
Expand Down Expand Up @@ -1195,8 +1187,6 @@ bool DirectoryService::Execute(const zbytes& message, unsigned int offset,
LOG_EPOCH(WARNING, m_mediator.m_currentEpochNum, "Ignore DS message");
return false;
}
LOG_GENERAL(WARNING,
"BZ Dispatching DS msg type: " << hex << (unsigned int)ins_byte);
if (ins_byte < ins_handlers_count) {
result =
(this->*ins_handlers[ins_byte])(message, offset + 1, from, startByte);
Expand Down

0 comments on commit d622040

Please sign in to comment.