Skip to content

Commit

Permalink
Update logging format
Browse files Browse the repository at this point in the history
  • Loading branch information
ayeowch committed Feb 24, 2015
1 parent bb0b17c commit 3948038
Show file tree
Hide file tree
Showing 5 changed files with 48 additions and 48 deletions.
10 changes: 5 additions & 5 deletions export.py
Expand Up @@ -85,11 +85,11 @@ def export_nodes(nodes, timestamp):
rows.append(row)
end = time.time()
elapsed = end - start
logging.info("Elapsed: {}".format(elapsed))
logging.info("Elapsed: %d", elapsed)

dump = os.path.join(SETTINGS['export_dir'], "{}.json".format(timestamp))
open(dump, 'w').write(json.dumps(rows, encoding="latin-1"))
logging.info("Wrote {}".format(dump))
logging.info("Wrote %s", dump)


def init_settings(argv):
Expand Down Expand Up @@ -125,7 +125,7 @@ def main(argv):
filename=SETTINGS['logfile'],
filemode='w')
print("Writing output to {}, press CTRL+C to terminate..".format(
SETTINGS['logfile']))
SETTINGS['logfile']))

pubsub = REDIS_CONN.pubsub()
pubsub.subscribe('resolve')
Expand All @@ -134,9 +134,9 @@ def main(argv):
# and GeoIP data for all reachable nodes.
if msg['channel'] == 'resolve' and msg['type'] == 'message':
timestamp = int(msg['data']) # From ping.py's 'snapshot' message
logging.info("Timestamp: {}".format(timestamp))
logging.info("Timestamp: %d", timestamp)
nodes = REDIS_CONN.smembers('opendata')
logging.info("Nodes: {}".format(len(nodes)))
logging.info("Nodes: %d", len(nodes))
export_nodes(nodes, timestamp)
REDIS_CONN.publish('export', timestamp)

Expand Down
18 changes: 9 additions & 9 deletions pcap.py
Expand Up @@ -100,13 +100,13 @@ def cache_messages(self):
try:
(msg, _data) = self.serializer.deserialize_msg(_data)
except (HeaderTooShortError, PayloadTooShortError) as err:
logging.debug("{}: {}".format(stream_id, err))
logging.debug("%s: %s", stream_id, err)
try:
_data += data.next()
except StopIteration:
break
except ProtocolError as err:
logging.debug("{}: {}".format(stream_id, err))
logging.debug("%s: %s", stream_id, err)
try:
_data = data.next()
except StopIteration:
Expand Down Expand Up @@ -142,7 +142,7 @@ def extract_streams(self):
timestamp = int(timestamp * 1000) # in ms
self.streams[stream_id].put(
(tcp_pkt.seq, (timestamp, tcp_pkt)))
logging.info("Streams: {}".format(len(self.streams)))
logging.info("Streams: %d", len(self.streams))

def cache_message(self, node, timestamp, msg):
"""
Expand All @@ -160,7 +160,7 @@ def cache_message(self, node, timestamp, msg):
self.redis_pipe.set("lastblockhash", inv['hash'])
elif (timestamp - int(rkey_ms)) / 1000 > SETTINGS['ttl']:
# Ignore block inv first seen more than 3 hours ago
logging.debug("Skip: {}".format(key))
logging.debug("Skip: %s", key)
continue
self.redis_pipe.zadd(key, timestamp, node)
self.redis_pipe.expire(key, SETTINGS['ttl'])
Expand All @@ -180,7 +180,7 @@ def cache_rtt(self):
if len(timestamps) > 1:
rtt_key = "rtt:{}".format(':'.join(key.split(":")[1:-1]))
rtt = int(timestamps[1]) - int(timestamps[0]) # pong - ping
logging.debug("{}: {}".format(rtt_key, rtt))
logging.debug("%s: %d", rtt_key, rtt)
self.redis_pipe.lpush(rtt_key, rtt)
self.redis_pipe.ltrim(rtt_key, 0, SETTINGS['rtt_count'] - 1)
self.redis_pipe.expire(rtt_key, SETTINGS['ttl'])
Expand Down Expand Up @@ -210,16 +210,16 @@ def cron():
logging.warning(err)
continue

logging.info("Loading: {}".format(dump))
logging.info("Loading: %s", dump)

start = time.time()
cache = Cache(filepath=dump)
cache.cache_messages()
end = time.time()
elapsed = end - start

logging.info("Dump: {} ({} messages)".format(dump, cache.count))
logging.info("Elapsed: {}".format(elapsed))
logging.info("Dump: %s (%d messages)", dump, cache.count)
logging.info("Elapsed: %d", elapsed)

os.remove(dump)

Expand Down Expand Up @@ -259,7 +259,7 @@ def main(argv):
filename=SETTINGS['logfile'],
filemode='a')
print("Writing output to {}, press CTRL+C to terminate..".format(
SETTINGS['logfile']))
SETTINGS['logfile']))

cron()

Expand Down
22 changes: 11 additions & 11 deletions ping.py
Expand Up @@ -93,7 +93,7 @@ def keepalive(self):
self.send_bestblockhash()
self.send_addr()
except socket.error as err:
logging.debug("Closing {} ({})".format(self.node, err))
logging.debug("Closing %s (%s)", self.node, err)
break

# Sink received messages to flush them off socket buffer
Expand All @@ -102,7 +102,7 @@ def keepalive(self):
except socket.timeout:
pass
except (ProtocolError, ConnectionError, socket.error) as err:
logging.debug("Closing {} ({})".format(self.node, err))
logging.debug("Closing %s (%s)", self.node, err)
break
gevent.sleep(0.3)

Expand Down Expand Up @@ -180,7 +180,7 @@ def task():
node = (address, port)

if REDIS_CONN.sadd('open', node) == 0:
logging.debug("Connection exists: {}".format(node))
logging.debug("Connection exists: %s", node)
return

handshake_msgs = []
Expand All @@ -196,7 +196,7 @@ def task():
conn.open()
handshake_msgs = conn.handshake()
except (ProtocolError, ConnectionError, socket.error) as err:
logging.debug("Closing {} ({})".format(node, err))
logging.debug("Closing %s (%s)", node, err)
conn.close()

if len(handshake_msgs) == 0:
Expand Down Expand Up @@ -233,28 +233,28 @@ def cron(pool):
if len(nodes) == 0:
continue

logging.info("New snapshot: {}".format(new_snapshot))
logging.info("New snapshot: %s", new_snapshot)
snapshot = new_snapshot

logging.info("Nodes: {}".format(len(nodes)))
logging.info("Nodes: %d", len(nodes))

reachable_nodes = set_reachable(nodes)
logging.info("New reachable nodes: {}".format(reachable_nodes))
logging.info("New reachable nodes: %d", reachable_nodes)

# Allow connections to stabilize before publishing snapshot
gevent.sleep(SETTINGS['socket_timeout'])
REDIS_CONN.publish('snapshot', int(time.time()))

connections = REDIS_CONN.scard('open')
logging.info("Connections: {}".format(connections))
logging.info("Connections: %d", connections)

set_bestblockhash()

for _ in xrange(min(REDIS_CONN.scard('reachable'), pool.free_count())):
pool.spawn(task)

workers = SETTINGS['workers'] - pool.free_count()
logging.info("Workers: {}".format(workers))
logging.info("Workers: %d", workers)

gevent.sleep(SETTINGS['cron_delay'])

Expand Down Expand Up @@ -323,7 +323,7 @@ def set_bestblockhash():
nodes = REDIS_CONN.zcard('inv:2:{}'.format(lastblockhash))
if nodes >= reachable_nodes / 2.0:
REDIS_CONN.set('bestblockhash', lastblockhash)
logging.info("bestblockhash: {}".format(lastblockhash))
logging.info("bestblockhash: %s", lastblockhash)


def init_settings(argv):
Expand Down Expand Up @@ -371,7 +371,7 @@ def main(argv):
filename=SETTINGS['logfile'],
filemode='a')
print("Writing output to {}, press CTRL+C to terminate..".format(
SETTINGS['logfile']))
SETTINGS['logfile']))

if SETTINGS['master']:
logging.info("Removing all keys")
Expand Down
22 changes: 11 additions & 11 deletions resolve.py
Expand Up @@ -95,17 +95,17 @@ def resolve_addresses(self):
idx += 1
self.redis_pipe.execute()

logging.info("GeoIP: {}".format(len(self.resolved['geoip'])))
logging.info("GeoIP: %d", len(self.resolved['geoip']))
self.resolve_geoip()

logging.info("Hostname: {}".format(len(self.resolved['hostname'])))
logging.info("Hostname: %d", len(self.resolved['hostname']))
self.resolve_hostname()

self.cache_resolved()

end = time.time()
elapsed = end - start
logging.info("Elapsed: {}".format(elapsed))
logging.info("Elapsed: %d", elapsed)

def cache_resolved(self):
"""
Expand All @@ -118,8 +118,8 @@ def cache_resolved(self):
key = 'resolve:{}'.format(address)
self.redis_pipe.hset(key, 'geoip', geoip)
self.redis_pipe.expire(key, SETTINGS['ttl'])
logging.debug("{} geoip: {}".format(key, geoip))
logging.info("GeoIP: {} resolved".format(resolved))
logging.debug("%s geoip: %s", key, geoip)
logging.info("GeoIP: %d resolved", resolved)

resolved = 0
for address, hostname in self.resolved['hostname'].iteritems():
Expand All @@ -128,8 +128,8 @@ def cache_resolved(self):
key = 'resolve:{}'.format(address)
self.redis_pipe.hset(key, 'hostname', hostname)
self.redis_pipe.expire(key, SETTINGS['ttl'])
logging.debug("{} hostname: {}".format(key, hostname))
logging.info("Hostname: {} resolved".format(resolved))
logging.debug("%s hostname: %s", key, hostname)
logging.info("Hostname: %d resolved", resolved)

self.redis_pipe.execute()

Expand Down Expand Up @@ -167,7 +167,7 @@ def raw_hostname(address):
try:
hostname = socket.gethostbyaddr(address)[0]
except (socket.gaierror, socket.herror) as err:
logging.debug("{}: {}".format(address, err))
logging.debug("%s: %s", address, err)
return hostname


Expand Down Expand Up @@ -241,7 +241,7 @@ def main(argv):
filename=SETTINGS['logfile'],
filemode='w')
print("Writing output to {}, press CTRL+C to terminate..".format(
SETTINGS['logfile']))
SETTINGS['logfile']))

pubsub = REDIS_CONN.pubsub()
pubsub.subscribe('snapshot')
Expand All @@ -250,9 +250,9 @@ def main(argv):
# connection with nodes from a new snapshot.
if msg['channel'] == 'snapshot' and msg['type'] == 'message':
timestamp = int(msg['data'])
logging.info("Timestamp: {}".format(timestamp))
logging.info("Timestamp: %d", timestamp)
nodes = REDIS_CONN.smembers('opendata')
logging.info("Nodes: {}".format(len(nodes)))
logging.info("Nodes: %d", len(nodes))
addresses = set([eval(node)[0] for node in nodes])
resolve = Resolve(addresses=addresses)
resolve.resolve_addresses()
Expand Down
24 changes: 12 additions & 12 deletions seeder.py
Expand Up @@ -95,12 +95,12 @@ def save_zone_file(self):
"""
Saves A and AAAA records in DNS zone file.
"""
logging.info("A records: {}".format(len(self.a_records)))
logging.info("AAAA records: {}".format(len(self.aaaa_records)))
logging.info("A records: %d", len(self.a_records))
logging.info("AAAA records: %d", len(self.aaaa_records))
random.shuffle(self.a_records)
random.shuffle(self.aaaa_records)
serial = str(self.now)
logging.debug("Serial: {}".format(serial))
logging.debug("Serial: %s", serial)
template = open(SETTINGS['template'], "r").read()
template = template.replace("1413235952", serial)
content = "".join([
Expand Down Expand Up @@ -146,7 +146,7 @@ def get_min_height(self):
min_height = SETTINGS['min_height']
else:
min_height = int(min_height)
logging.info("Min. height: {}".format(min_height))
logging.info("Min. height: %d", min_height)
return min_height

def get_min_age(self):
Expand All @@ -157,10 +157,10 @@ def get_min_age(self):
"""
min_age = SETTINGS['min_age']
oldest = self.now - min(self.nodes, key=operator.itemgetter(4))[4]
logging.info("Longest uptime: {}".format(oldest))
logging.info("Longest uptime: %d", oldest)
if oldest < min_age:
min_age = oldest - (0.01 * oldest) # Max. 1% newer than oldest
logging.info("Min. age: {}".format(min_age))
logging.info("Min. age: %d", min_age)
return min_age

def is_blocked(self, address):
Expand All @@ -171,7 +171,7 @@ def is_blocked(self, address):
return False
for network in self.blocklist:
if ip_address(address) in network:
logging.debug("Blocked: {}".format(address))
logging.debug("Blocked: %s", address)
return True
return False

Expand All @@ -194,9 +194,9 @@ def update_blocklist(self):
network = line.split(";")[0].strip()
self.blocklist.add(ip_network(unicode(network)))
else:
logging.warning("HTTP{}: {} ({})".format(
response.status_code, url, response.content))
logging.debug("Blocklist entries: {}".format(len(self.blocklist)))
logging.warning("HTTP%d: %s (%s)",
response.status_code, url, response.content)
logging.debug("Blocklist entries: %d", len(self.blocklist))
self.blocklist_timestamp = self.now


Expand All @@ -208,7 +208,7 @@ def cron():
while True:
time.sleep(5)
dump = max(glob.iglob("{}/*.json".format(SETTINGS['export_dir'])))
logging.info("Dump: {}".format(dump))
logging.info("Dump: %s", dump)
seeder.export_nodes(dump)


Expand Down Expand Up @@ -249,7 +249,7 @@ def main(argv):
filename=SETTINGS['logfile'],
filemode='w')
print("Writing output to {}, press CTRL+C to terminate..".format(
SETTINGS['logfile']))
SETTINGS['logfile']))

cron()

Expand Down

0 comments on commit 3948038

Please sign in to comment.