Skip to content

Commit

Permalink
Remove logs (#4202)
Browse files Browse the repository at this point in the history
  • Loading branch information
jowlee committed Oct 28, 2022
1 parent 2d0daf3 commit c359bc7
Show file tree
Hide file tree
Showing 12 changed files with 25 additions and 76 deletions.
8 changes: 0 additions & 8 deletions discovery-provider/src/api/v1/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,11 +113,7 @@ class CachedRouteMetrics(Resource):
@cache(ttl_sec=5)
def get(self):
args = metrics_route_parser.parse_args()
logger.info(
f"getting cached route metrics at {args.get('start_time')} before parsing"
)
start_time = parse_unix_epoch_param_non_utc(args.get("start_time"))
logger.info(f"getting cached route metrics at {start_time} UTC")
deduped_metrics = get_redis_route_metrics(start_time)
summed_metrics = get_summed_unique_metrics(start_time)
metrics = {"deduped": deduped_metrics, "summed": summed_metrics}
Expand All @@ -135,11 +131,7 @@ class CachedAppMetrics(Resource):
@cache(ttl_sec=5)
def get(self):
args = metrics_route_parser.parse_args()
logger.info(
f"getting cached app metrics at {args.get('start_time')} before parsing"
)
start_time = parse_unix_epoch_param_non_utc(args.get("start_time"))
logger.info(f"getting cached app metrics at {start_time.now()} UTC")
metrics = get_redis_app_metrics(start_time)
response = success_response(metrics)
return response
Expand Down
12 changes: 1 addition & 11 deletions discovery-provider/src/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -348,14 +348,6 @@ def default(self, o):
return app


def delete_last_scanned_eth_block_redis(redis_inst):
logger.info("index_eth.py | deleting existing redis scanned block on start")
redis_inst.delete(eth_indexing_last_scanned_block_key)
logger.info(
"index_eth.py | successfully deleted existing redis scanned block on start"
)


def configure_celery(celery, test_config=None):
database_url = shared_config["db"]["url"]
database_url_read_replica = shared_config["db"]["url_read_replica"]
Expand Down Expand Up @@ -537,9 +529,6 @@ def configure_celery(celery, test_config=None):
eth_abi_values,
)

# Clear last scanned redis block on startup
delete_last_scanned_eth_block_redis(redis_inst)

# Initialize Anchor Indexer
anchor_program_indexer = AnchorProgramIndexer(
shared_config["solana"]["anchor_data_program_id"],
Expand All @@ -558,6 +547,7 @@ def configure_celery(celery, test_config=None):
eth_manager.init_contracts()

# Clear existing locks used in tasks if present
redis_inst.delete(eth_indexing_last_scanned_block_key)
redis_inst.delete("disc_prov_lock")
redis_inst.delete("network_peers_lock")
redis_inst.delete("update_metrics_lock")
Expand Down
12 changes: 0 additions & 12 deletions discovery-provider/src/solana/solana_client_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,15 +46,9 @@ def handle_get_sol_tx_info(client: Client, index: int):
num_retries = retries
while num_retries > 0:
try:
logger.info(
f"solana_client_manager.py | get_sol_tx_info | Fetching tx {tx_sig} {endpoint}"
)
tx_info: ConfirmedTransaction = client.get_transaction(
tx_sig, encoding
)
logger.info(
f"solana_client_manager.py | get_sol_tx_info | Finished fetching tx {tx_sig} {endpoint}"
)
if tx_info["result"] is not None:
return tx_info
except Exception as e:
Expand Down Expand Up @@ -93,17 +87,11 @@ def handle_get_signatures_for_address(client: Client, index: int):
num_retries = retries
while num_retries > 0:
try:
logger.info(
f"solana_client_manager.py | handle_get_signatures_for_address | Fetching {before} {endpoint}"
)
transactions: ConfirmedSignatureForAddressResponse = (
client.get_signatures_for_address(
account, before, until, limit, Commitment("finalized")
)
)
logger.info(
f"solana_client_manager.py | handle_get_signatures_for_address | Finished fetching {before} {endpoint}"
)
return transactions
except Exception as e:
logger.error(
Expand Down
6 changes: 0 additions & 6 deletions discovery-provider/src/tasks/index.py
Original file line number Diff line number Diff line change
Expand Up @@ -463,9 +463,6 @@ def get_contract_type_for_tx(tx_type_to_grouped_lists_map, tx, tx_receipt):
)
break

logger.info(
f"index.py | checking returned {contract_type} vs {tx_target_contract_address}"
)
return contract_type


Expand Down Expand Up @@ -654,9 +651,6 @@ def index_blocks(self, db, blocks_list):
txs_grouped_by_type[TRACK_FACTORY],
txs_grouped_by_type[ENTITY_MANAGER],
)
logger.info(
f"index.py | index_blocks - fetch_metadata in {time.time() - fetch_metadata_start_time}s"
)
# Record the time this took in redis
duration_ms = round(
(time.time() - fetch_metadata_start_time) * 1000
Expand Down
2 changes: 1 addition & 1 deletion discovery-provider/src/tasks/index_eth.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def index_eth_transfer_events(db, redis_inst):
# Run the scan
result, total_chunks_scanned = scanner.scan(start_block, end_block)

logger.info(
logger.debug(
"index_eth.py | Reached end block for eth transfer events... saving events to database"
)
scanner.save(end_block)
Expand Down
12 changes: 6 additions & 6 deletions discovery-provider/src/tasks/index_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ def get_metrics(endpoint, start_time):
route_metrics_endpoint = (
f"{endpoint}/v1/metrics/routes/cached?start_time={start_time}"
)
logger.info(f"route metrics request to: {route_metrics_endpoint}")
logger.debug(f"route metrics request to: {route_metrics_endpoint}")
route_metrics_response = requests.get(route_metrics_endpoint, timeout=10)
if route_metrics_response.status_code != 200:
raise Exception(
Expand All @@ -182,7 +182,7 @@ def get_metrics(endpoint, start_time):
app_metrics_endpoint = (
f"{endpoint}/v1/metrics/apps/cached?start_time={start_time}"
)
logger.info(f"app metrics request to: {app_metrics_endpoint}")
logger.debug(f"app metrics request to: {app_metrics_endpoint}")
app_metrics_response = requests.get(app_metrics_endpoint, timeout=10)
if app_metrics_response.status_code != 200:
raise Exception(
Expand Down Expand Up @@ -266,7 +266,7 @@ def consolidate_metrics_from_other_nodes(self, db, redis):
start_time = int(start_time_obj.timestamp())
new_route_metrics, new_app_metrics = get_metrics(node, start_time)

logger.info(
logger.debug(
f"did attempt to receive route and app metrics from {node} at {start_time_obj} ({start_time})"
)

Expand All @@ -293,13 +293,13 @@ def consolidate_metrics_from_other_nodes(self, db, redis):
db, end_time, summed_unique_daily_count, summed_unique_monthly_count
)

logger.info(f"visited node timestamps: {visited_node_timestamps}")
logger.debug(f"visited node timestamps: {visited_node_timestamps}")


def get_historical_metrics(node):
try:
endpoint = f"{node}/v1/metrics/aggregates/historical"
logger.info(f"historical metrics request to: {endpoint}")
logger.debug(f"historical metrics request to: {endpoint}")
response = requests.get(endpoint, timeout=10)
if response.status_code != 200:
raise Exception(
Expand Down Expand Up @@ -368,7 +368,7 @@ def synchronize_all_node_metrics(self, db):
all_other_nodes = get_all_other_nodes()[0]
for node in all_other_nodes:
historical_metrics = get_historical_metrics(node)
logger.info(f"got historical metrics from {node}: {historical_metrics}")
logger.debug(f"got historical metrics from {node}: {historical_metrics}")
if historical_metrics:
update_route_metrics_count(
daily_route_metrics, historical_metrics["routes"]["daily"]
Expand Down
7 changes: 1 addition & 6 deletions discovery-provider/src/tasks/index_network_peers.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,12 +43,7 @@ def update_network_peers(self):
have_lock = update_lock.acquire(blocking=False)
if have_lock:
# An object returned from web3 chain queries
peers_from_ethereum = retrieve_peers_from_eth_contracts(self)
logger.info(
f"index_network_peers.py | Peers from eth-contracts: {peers_from_ethereum}"
)
# Combine the set of known peers from ethereum and within local database
all_peers = peers_from_ethereum
all_peers = retrieve_peers_from_eth_contracts(self)

logger.info(f"index_network_peers.py | All known peers {all_peers}")
peers_list = list(all_peers)
Expand Down
4 changes: 2 additions & 2 deletions discovery-provider/src/tasks/index_rewards_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ def process_batch_sol_reward_manager_txs(
):
"""Validates that the transfer instruction is consistent with DB and inserts ChallengeDisbursement DB entries"""
try:
logger.error(f"index_reward_manager | {reward_manager_txs}")
logger.info(f"index_reward_manager | {reward_manager_txs}")
eth_recipients = [
tx["transfer_instruction"]["eth_recipient"]
for tx in reward_manager_txs
Expand Down Expand Up @@ -399,7 +399,7 @@ def get_transaction_signatures(
for tx_info in transactions_array:
tx_sig = tx_info["signature"]
tx_slot = tx_info["slot"]
logger.info(
logger.debug(
f"index_rewards_manager.py | Processing tx={tx_sig} | slot={tx_slot}"
)
if tx_info["slot"] > latest_processed_slot:
Expand Down
7 changes: 1 addition & 6 deletions discovery-provider/src/tasks/index_spl_token.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,8 +138,6 @@ def get_latest_slot(db):
latest_slot = highest_slot_query[0]

# Return None if not yet cached

logger.info(f"index_spl_token.py | returning {latest_slot} for highest slot")
return latest_slot


Expand Down Expand Up @@ -353,7 +351,7 @@ def process_spl_token_tx(
else:
# handle initial case where no there is no stored latest processed slot and start from current
if latest_processed_slot is None:
logger.info("index_spl_token.py | setting from none")
logger.debug("index_spl_token.py | setting from none")
transaction_signature_batch = transactions_array
intersection_found = True
else:
Expand Down Expand Up @@ -394,7 +392,6 @@ def process_spl_token_tx(
page_count = page_count + 1

transaction_signatures.reverse()
logger.info("index_spl_token.py | intersection found")
totals = {"user_ids": 0, "root_accts": 0, "token_accts": 0}
solana_logger.end_time("fetch_batches")
solana_logger.start_time("parse_batches")
Expand Down Expand Up @@ -448,8 +445,6 @@ def index_spl_token(self):
if have_lock:
logger.info("index_spl_token.py | Acquired lock")
process_spl_token_tx(solana_client_manager, db, redis)
# else:
# logger.info("index_spl_token.py | Failed to acquire lock")
except Exception as e:
logger.error("index_spl_token.py | Fatal error in main loop", exc_info=True)
raise e
Expand Down
6 changes: 2 additions & 4 deletions discovery-provider/src/tasks/index_user_bank.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,6 @@
# Message formatted as follows:
# EthereumAddress = [214, 237, 135, 129, 143, 240, 221, 138, 97, 84, 199, 236, 234, 175, 81, 23, 114, 209, 118, 39]
def parse_eth_address_from_msg(msg: str):
logger.info(f"index_user_bank.py {msg}")
res = re.findall(r"\[.*?\]", msg)
# Remove brackets
inner_res = res[0][1:-1]
Expand Down Expand Up @@ -102,7 +101,6 @@ def get_tx_in_db(session: Session, tx_sig: str) -> bool:
session.query(UserBankTx).filter(UserBankTx.signature == tx_sig)
).count()
exists = tx_sig_db_count > 0
# logger.info(f"index_user_bank.py | {tx_sig} exists={exists}")
return exists


Expand Down Expand Up @@ -376,7 +374,7 @@ def parse_user_bank_transaction(
timestamp = tx_info["result"]["blockTime"]
parsed_timestamp = datetime.datetime.utcfromtimestamp(timestamp)

logger.info(
logger.debug(
f"index_user_bank.py | parse_user_bank_transaction |\
{tx_slot}, {tx_sig} | {tx_info} | {parsed_timestamp}"
)
Expand Down Expand Up @@ -439,7 +437,7 @@ def process_user_bank_txs():
for tx_info in transactions_array:
tx_sig = tx_info["signature"]
tx_slot = tx_info["slot"]
logger.info(
logger.debug(
f"index_user_bank.py | Processing tx={tx_sig} | slot={tx_slot}"
)
if tx_info["slot"] > latest_processed_slot:
Expand Down
5 changes: 1 addition & 4 deletions discovery-provider/src/utils/eth_contracts_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,17 +21,14 @@ def fetch_cnode_info(sp_id, sp_factory_instance, redis):
sp_id_key = get_cn_sp_id_key(sp_id)
sp_info_cached = get_json_cached_key(redis, sp_id_key)
if sp_info_cached:
logger.info(
f"eth_contract_helpers.py | Found cached value for spID={sp_id} - {sp_info_cached}"
)
return sp_info_cached

cn_endpoint_info = sp_factory_instance.functions.getServiceEndpointInfo(
content_node_service_type, sp_id
).call()
set_json_cached_key(redis, sp_id_key, cn_endpoint_info, cnode_info_redis_ttl_s)
logger.info(
f"eth_contract_helpers.py | Configured redis {sp_id_key} - {cn_endpoint_info} - TTL {cnode_info_redis_ttl_s}"
f"eth_contract_helpers.py | set cache valud for sp_id: {sp_id_key} - {cn_endpoint_info} - TTL {cnode_info_redis_ttl_s}"
)
return cn_endpoint_info

Expand Down
20 changes: 10 additions & 10 deletions discovery-provider/src/utils/redis_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ def persist_summed_unique_counts(
.first()
)
if day_unique_record:
logger.info(
logger.debug(
f"summed unique count record for day {day} before update: {day_unique_record.summed_count}"
)
day_unique_record.summed_count = max(
Expand All @@ -139,7 +139,7 @@ def persist_summed_unique_counts(
.first()
)
if month_unique_record:
logger.info(
logger.debug(
f"summed unique count record for month {month} before update: \
{month_unique_record.summed_count}"
)
Expand All @@ -163,9 +163,9 @@ def persist_route_metrics(
.first()
)
if day_unique_record:
logger.info(
logger.debug(
f"unique count record for day {day} before adding new unique count \
{unique_daily_count}: {day_unique_record.count}"
{unique_daily_count}: {day_unique_record.count} + "
)
day_unique_record.count += unique_daily_count
logger.info(
Expand All @@ -187,7 +187,7 @@ def persist_route_metrics(
.first()
)
if day_total_record:
logger.info(
logger.debug(
f"total count record for day {day} before adding new total count \
{count}: {day_total_record.count}"
)
Expand All @@ -211,7 +211,7 @@ def persist_route_metrics(
.first()
)
if month_unique_record:
logger.info(
logger.debug(
f"unique count record for month {month} before adding new unique count \
{unique_monthly_count}: {month_unique_record.count}"
)
Expand All @@ -236,7 +236,7 @@ def persist_route_metrics(
.first()
)
if month_total_record:
logger.info(
logger.debug(
f"total count record for month {month} before adding new total count \
{count}: {month_total_record.count}"
)
Expand Down Expand Up @@ -267,7 +267,7 @@ def persist_app_metrics(db, day, month, app_count):
.first()
)
if day_record:
logger.info(
logger.debug(
f"daily app record for day {day} and application {application_name} \
before adding new count {count}: {day_record.count}"
)
Expand Down Expand Up @@ -295,7 +295,7 @@ def persist_app_metrics(db, day, month, app_count):
.first()
)
if month_record:
logger.info(
logger.debug(
f"monthly app record for month {month} and application {application_name} \
before adding new count {count}: {month_record.count}"
)
Expand Down Expand Up @@ -328,7 +328,7 @@ def merge_metrics(metrics, end_time, metric_type, db):
Persist metrics in the database
"""
logger.info(f"about to merge {metric_type} metrics: {len(metrics)} new entries")
logger.debug(f"about to merge {metric_type} metrics: {len(metrics)} new entries")
day = end_time.split(":")[0]
month = f"{day[:7]}/01"

Expand Down

0 comments on commit c359bc7

Please sign in to comment.