Permalink
Browse files

XXX (temporarily?) removed meaningful names

git-svn-id: svn://forre.st/undns@1219 470744a7-cac9-478e-843e-5ec1b25c69e8
  • Loading branch information...
1 parent 28ca319 commit 50016a9d5d2ccdd737213f2adf1b915c729d6873 forrest committed Apr 10, 2011
Showing with 180 additions and 489 deletions.
  1. +33 −0 db.py
  2. +57 −71 packet.py
  3. +90 −407 server.py
  4. +0 −11 util.py
View
33 db.py
@@ -1,4 +1,7 @@
+import bsddb
import random
+import atexit
+import weakref
class CachingDictWrapper(object):
def __init__(self, inner, cache_size=10000):
@@ -95,3 +98,33 @@ def __contains__(self, item):
def __iter__(self):
return iter(self._inner)
+
+class ValueDictWrapper(object):
+ def __init__(self, inner):
+ self._inner = inner
+ def __len__(self):
+ return len(self._inner)
+ def __getitem__(self, key):
+ return self._decode(key, self._inner[key])
+ def __setitem__(self, key, value):
+ self._inner[key] = self._encode(key, value)
+ def __contains__(self, key):
+ return key in self._inner
+ def __iter__(self):
+ return iter(self._inner)
+ def keys(self):
+ return self._inner.keys()
+ def iteritems(self):
+ for k, v in self._inner.iteritems():
+ yield k, self._decode(k, v)
+
+def try_sync(db_weakref):
+ db = db_weakref()
+ if db is None:
+ return
+ db.sync()
+
+def safe_open_db(filename):
+ db = bsddb.hashopen(filename)
+ atexit.register(try_sync, weakref.ref(db))
+ return db
View
128 packet.py
@@ -3,13 +3,11 @@
import zlib
from Crypto.PublicKey import RSA
-import twisted.names.common, twisted.names.client, twisted.names.dns, twisted.names.server, twisted.names.error, twisted.names.authority
-del twisted
-from twisted import names
+from twisted.names import authority
import util
-class BindStringAuthority(names.authority.BindAuthority):
+class BindStringAuthority(authority.BindAuthority):
def __init__(self, contents, origin):
names.common.ResolverBase.__init__(self)
self.origin = origin
@@ -19,47 +17,9 @@ def __init__(self, contents, origin):
self.parseLines(lines)
self._cache = {}
-class Packet(object):
- @classmethod
- def from_binary(cls, x, address=None, address_hash=None):
- d = json.loads(zlib.decompress(x))
- return cls(util.tuple_to_key(d['public_key']), d['zone_file'], d['signature'], address, address_hash)
-
- def __init__(self, public_key, zone_file, signature, address=None, address_hash=None):
- if public_key.has_private():
- raise ValueError("key not public")
- if not public_key.verify(hashlib.sha1(zone_file).digest(), signature):
- raise ValueError("signature invalid")
-
- self._public_key = public_key
- self._zone_file = zone_file
- self._signature = signature
-
- self._address = util.key_to_address(self._public_key)
- self._address_hash = hashlib.sha1(self._address).digest()
- self._zone = BindStringAuthority(self._zone_file.encode('utf8'), self._address + '.')
-
- if address is not None and self.get_address() != address:
- raise ValueError("address not correct")
- if address_hash is not None and self.get_address_hash() != address_hash:
- raise ValueError("address hash not correct")
-
- def to_binary(self):
- return zlib.compress(json.dumps(dict(public_key=util.key_to_tuple(self._public_key), zone_file=self._zone_file, signature=self._signature)))
-
- def get_address(self):
- return self._address
-
- def get_address_hash(self):
- return self._address_hash
+class DomainKey(object):
+ "All that is needed to control a domain"
- def get_zone_file(self):
- return self._zone_file
-
- def get_zone(self):
- return self._zone
-
-class PrivateKey(object):
@classmethod
def generate(cls, rng):
return cls(RSA.generate(1024, rng))
@@ -80,55 +40,81 @@ def to_binary(self):
def get_address(self):
return util.key_to_address(self._private_key.publickey())
- def encode(self, zone_file, rng):
- return Packet(self._private_key.publickey(), zone_file, self._private_key.sign(hashlib.sha1(zone_file).digest(), rng))
+ def encode(self, record, rng):
+ return DomainPacket(self._private_key.publickey(), record, self._private_key.sign(record.get_hash(), rng))
-class TheirIdentity(object):
+class DomainPacket(object):
+ "All that is needed to securely convey a DomainRecord"
+
@classmethod
def from_binary(cls, x):
- return cls(util.tuple_to_key(json.loads(zlib.decompress(x))))
+ d = json.loads(zlib.decompress(x))
+ return cls(util.tuple_to_key(d['public_key']), DomainRecord.from_obj(d['record']), d['signature'])
- def __init__(self, public_key):
+ def __init__(self, public_key, record, signature):
if public_key.has_private():
raise ValueError("key not public")
self._public_key = public_key
+ self._record = record
+ self._signature = signature
+
+ self._address = util.key_to_address(self._public_key)
+ self._address_hash = util.hash_address_hash(self._address).digest()
+ self._zone = None
def to_binary(self):
- return zlib.compress(json.dumps(util.key_to_tuple(self._public_key)))
+ return zlib.compress(json.dumps(dict(public_key=util.key_to_tuple(self._public_key), record=self._record.to_obj(), signature=self._signature)))
- def get_id(self):
- return util.hash_address(util.key_to_string(self._public_key))
+ def verify_signature(self):
+ return public_key.verify(self._record.get_hash(), signature)
+
+ def get_address(self):
+ return self._address
- def verify(self, data, signature):
- return self._public_key.verify(util.hash_sign(data), signature)
+ def get_address_hash(self):
+ return self._address_hash
+
+ def get_record(self):
+ return self._record
-class MyIdentity(object):
- @classmethod
- def generate(cls, rng):
- return cls(RSA.generate(1024, rng))
+class DomainRecord(object):
+ "Information about a domain"
@classmethod
- def from_binary(cls, x):
- return cls(util.tuple_to_key(json.loads(zlib.decompress(x))))
+ def from_obj(cls, (zone_file, start_time, end_time)):
+ return cls(zone_file, start_time, end_time)
- def __init__(self, private_key):
- if not private_key.has_private():
- raise ValueError("key not private")
+ def __init__(self, zone_file, start_time, end_time):
+ assert isinstance(zone_file, unicode)
+ assert isinstance(start_time, (int, long))
+ assert isinstance(end_time, (int, long))
- self._private_key = private_key
+ self._zone_file = zone_file
+ self._start_file = start_time
+ self._end_time = end_time
+
+ def to_obj(self):
+ return (self._zone_file, self._start_time, self._end_time)
def to_binary(self):
- return zlib.compress(json.dumps(util.key_to_tuple(self._private_key)))
+ return json.dumps(dict(zone_file=self._zone_file, start_time=self._start_time, end_time=self._end_time))
+
+ def get_zone_file(self):
+ return self._zone_file
+
+ def get_zone(self, address):
+ assert not address.endswith('.')
+ return BindStringAuthority(self._zone_file.encode('utf8'), address + '.')
- def get_id(self):
- return util.hash_address(util.key_to_string(self._private_key.publickey()))
+ def get_start_time(self):
+ return self._start_time
- def to_binary_public(self):
- return zlib.compress(json.dumps(util.key_to_tuple(self._private_key.publickey())))
+ def get_end_time(self):
+ return self._end_time
- def sign(self, data, rng):
- return self._private_key.sign(util.hash_sign(data), rng)
+ def get_hash(self):
+ return util.hash_sign(self.to_binary()).digest()
if __name__ == '__main__':
from Crypto import Random
View
497 server.py
@@ -11,9 +11,10 @@
import traceback
import json
import itertools
-import sqlite3
import time
-import bsddb
+
+from Crypto import Random
+rng = Random.new().read
import twisted.names.common, twisted.names.client, twisted.names.dns, twisted.names.server, twisted.names.error, twisted.names.authority
del twisted
@@ -37,10 +38,10 @@
parser = argparse.ArgumentParser(description=name)
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument("-a", "--authoritative-dns", metavar="PORT",
- help="run an authoritative dns server on PORT; you likely don't want this - this is for _the_ public nameserver",
+ help="run a TCP+UDP authoritative dns server on PORT; you likely don't want this - this is for _the_ public nameserver",
type=int, action="append", default=[], dest="authoritative_dns_ports")
parser.add_argument("-r", "--recursive-dns", metavar="PORT",
- help="run a recursive dns server on PORT; you likely do want this - this is for clients",
+ help="run a TCP+UDP recursive dns server on PORT; you likely do want this - this is for clients",
type=int, action="append", default=[], dest="recursive_dns_ports")
parser.add_argument("-d", "--dht-port", metavar="PORT",
help="use UDP port PORT to connect to other DHT nodes and listen for connections (if not specified a random high port is chosen)",
@@ -86,82 +87,26 @@ def median(x):
right = len(y)//2
return (y[left] + y[right])/2
-def do_work(x, difficulty, stop_flag):
- d = "[%s, " % json.dumps(x)
- h = util.hash_difficulty(d)
- count = 0
- for i in itertools.count(random.randrange(2**63)):
- count += 1
- if count > 1000:
- return None
- if stop_flag[0]:
- return None
- d2 = "%i]" % i
- h2 = h.copy()
- h2.update(d2)
- if int(h2.hexdigest(), 16) % difficulty == 0:
- return d + d2
-
def sleep(t):
d = defer.Deferred()
reactor.callLater(t, d.callback, None)
return d
-GENESIS_DIFFICULTY = 100
-LOOKBEHIND = 20
-
-class Block(object):
- @classmethod
- def generate(cls, previous_hash, pos, timestamp, total_difficulty, message, difficulty):
- contents = (previous_hash, pos, timestamp, total_difficulty, message, difficulty)
- stop_flag = [False]
- def abort(d):
- stop_flag[0] = True
- d = defer.Deferred(abort)
- t = threads.deferToThread(do_work, contents, difficulty, stop_flag)
- def f(result):
- if isinstance(result, failure.Failure):
- return result
- if stop_flag[0]:
- return
- if result is None:
- return
- d.callback(cls(result))
- t.addBoth(f)
- return d
- def __init__(self, data):
- self.data = data
- self.hash_difficulty = int(util.hash_difficulty(data).hexdigest(), 16)
- self.hash_id = hashlib.sha1(self.data).hexdigest()
- (self.previous_hash, self.pos, self.timestamp, self.total_difficulty, self.message, self.difficulty), self.nonce = json.loads(data)
- if isinstance(self.previous_hash, unicode):
- self.previous_hash = str(self.previous_hash)
- def is_valid(self):
- if self.previous_hash is None:
- a
- else:
- a
-
-class BlockDictWrapper(object):
- def __init__(self, inner):
- self._inner = inner
- def __len__(self):
- return len(self._inner)
- def __getitem__(self, key):
- block = Block(self._inner[key])
- if block.hash_id != key:
- print "warning: invalid block in db!"
- self._inner[key]
- raise KeyError()
- return block
- def __setitem__(self, key, value):
- if value.hash_id != key:
- raise ValueError("invalid block insertion")
- self._inner[key] = value.data
- def __contains__(self, key):
- return key in self._inner
- def __iter__(self):
- return iter(self._inner)
+
+class DomainKeyDictWrapper(db.ValueDictWrapper):
+ def _encode(self, addr, domain_record):
+ assert addr == domain_key.get_address()
+ return domain_key.to_binary()
+ def _decode(self, addr, binary):
+ domain_key = packet.PrivateKey.from_binary(binary)
+ assert addr == domain_key.get_address()
+ return domain_key
+
+class JSONWrapper(db.ValueDictWrapper):
+ def _encode(self, addr, content):
+ return json.dumps(content)
+ def _decode(self, addr, binary):
+ return json.loads(data)
class UnDNSNode(node.Node):
@property
@@ -174,340 +119,81 @@ def peers(self):
def __init__(self, *args, **kwargs):
node.Node.__init__(self, *args, **kwargs)
-
- blocks_db = bsddb.hashopen(db_prefix + '.blocks2')
- task.LoopingCall(blocks_db.sync).start(5)
- self.blocks = db.CachingDictWrapper(BlockDictWrapper(blocks_db))
-
- verified_db = bsddb.hashopen(db_prefix + '.verified')
- task.LoopingCall(verified_db.sync).start(5)
- self.verified = db.CachingSetWrapper(db.SetDictWrapper(verified_db))
-
- self.referrers = {} # hash -> list of hashes
- self.requests_in_progress = set()
-
- #self.best_block = max(
- # itertools.chain(
- # [None],
- # (self.blocks[block_hash] for block_hash in self.blocks if block_hash in self.verified),
- # ),
- # key=lambda block: 0 if block is None else block.total_difficulty,
- #)
- self.best_block = None
- self.best_block_callbacks = []
-
- try:
- self.best_block = self.blocks[open(db_prefix + '.best').read()]
- except:
- traceback.print_exc()
+ self.domains = DomainKeyDictWrapper(db.safe_open_db(db_prefix + '.domains'))
+ self.entries = JSONWrapper(db.safe_open_db(db_prefix + '.entries'))
+ self.clock_deltas = {} # contact -> (time, offset)
+ self.clock_offset = 0
def joinNetwork(self, *args, **kwargs):
node.Node.joinNetwork(self, *args, **kwargs)
self._joinDeferred.addCallback(lambda _: reactor.callLater(0, self.joined))
def joined(self):
+ self.time_task()
self.push_task()
- self.try_to_do_something()
+
+ def get_my_time(self):
+ return time.time() - self.clock_offset
+
+ @defer.inlineCallbacks
+ def time_task(self):
+ while True:
+ t_send = time.time()
+ requests = [(peer, peer.get_time().addCallback(lambda res: (time.time(), res))) for peer in self.peers]
+ results = []
+ self.clock_deltas[None] = (t_send, t_send)
+ for peer, request in requests:
+ try:
+ t_recv, response = yield request
+ t = .5 * (t_send + t_recv)
+ self.clock_deltas[(peer.id, peer.address)] = (t, float(response))
+ except:
+ traceback.print_exc()
+ continue
+
+ print self.clock_deltas
+ self.clock_offset = median(mine - theirs for mine, theirs in self.clock_deltas.itervalues())
+ print self.clock_offset
+
+ yield sleep(random.expovariate(1/10))
@defer.inlineCallbacks
def push_task(self):
while True:
- def x(resp):
- print resp
- for peer in self.peers:
- peer.ping().addCallback(x)
- #for packet in packets:
- # print "publishing", packet.get_address()
- # n.iterativeStore(packet.get_address_hash(), packet.to_binary())
+ for addr, (zone_file, ttl) in self.entries.iteritems():
+ self.push_addr(addr, zone_file, ttl)
yield sleep(random.expovariate(1/6))
- @node.rpcmethod
- def store(self, key, value, originalPublisherID=None, age=0, **kwargs):
- print "store", (self, key, value, originalPublisherID, age, kwargs)
-
- packet.Packet.from_binary(value, address_hash=key) # will throw an exception if not valid
-
- return node.Node.store(self, key, value, originalPublisherID, age, **kwargs)
-
- @node.rpcmethod
- def handle_new_block(self, block_data, _rpcNodeID, _rpcNodeContact):
+ def push_addr(self, addr, zone_file, ttl):
+ print "publishing", addr, (zone_file, ttl)
try:
- self.received_block(Block(block_data), _rpcNodeContact)
+ key = self.domains[addr]
except:
- traceback.print_exc()
+ print "MISSING KEY FOR", addr
+ return
+ t = self.get_my_time()
+ record = packet.DomainRecord(zone_file, int(t - 5), int(math.ceil(t + ttl + 5)))
+ pkt = key.encode(record)
+ n.iterativeStore(packet.get_address_hash(), pkt.to_binary())
@node.rpcmethod
- def handle_new_request(self, request_data):
- request = Request(request_data)
- if request.hash_id in self.requests:
- return
+ def store(self, key, value, originalPublisherID=None, age=0, **kwargs):
+ print "store", (self, key, value, originalPublisherID, age, kwargs)
- # check
+ pkt = packet.Packet.from_binary(value)
+ if packet.get_address_hash() != key:
+ raise ValueError("invalid address hash")
+ record = packet.get_record()
+ if not record.get_start_time() < self.get_my_time() < record.get_end_time():
+ raise ValueError("invalid time range")
+ if not packet.verify_signature():
+ raise ValueError("invalid signature")
- self.requests[request.hash_id] = request
-
- def check_request(self, request):
- if request.hash_difficulty % request.difficulty != 0:
- return False
-
- @node.rpcmethod
- def get_block(self, block_hash):
- block_data = None
- if block_hash in self.blocks:
- block = self.blocks[block_hash]
- assert block.hash_id == block_hash
- block_data = block.data
- return block_data
-
- @node.rpcmethod
- def get_blocks(self, block_hash, n):
- result = []
- while True:
- if block_hash in self.requests_in_progress:
- break
- try:
- block = self.blocks[block_hash]
- except KeyError:
- break
- result.append(block.data)
- if len(result) >= n:
- break
- block_hash = block.previous_hash
- if block_hash is None:
- break
- return result
-
- @node.rpcmethod
- def get_best_block_hash(self):
- return None if self.best_block is None else self.best_block.hash_id
+ return node.Node.store(self, key, value, originalPublisherID, age, **kwargs)
@node.rpcmethod
def get_time(self):
return time.time()
-
- def say(self, *x):
- print " " * (self.port%120), self.port, ' '.join(map(str,x))
-
- @defer.inlineCallbacks
- def try_to_do_something(self):
- while True:
- previous_block = self.best_block
- if previous_block is None:
- previous_hash = None
- pos = 0
- timestamp = int(time.time()*1000)
- message = {self.port: 1} # XXX insert self.requests
- difficulty = GENESIS_DIFFICULTY
- total_difficulty = 0 + difficulty
- else:
- previous_hash = previous_block.hash_id
- pos = previous_block.pos + 1
- timestamp = max(previous_block.timestamp, int(time.time()*1000))
- message = dict((int(k), int(v)) for k, v in previous_block.message.iteritems()) # XXX insert self.requests
- message[self.port] = message.get(self.port, 0) + 1
-
- if pos < 25:
- difficulty = GENESIS_DIFFICULTY
- else:
- cur = previous_block
- for i in xrange(LOOKBEHIND):
- if cur.previous_hash is None:
- break
- cur = self.blocks[cur.previous_hash]
-
- # want each block to take 10 seconds
- difficulty_sum = previous_block.total_difficulty - cur.total_difficulty
- dt = previous_block.timestamp - cur.timestamp
- if dt == 0:
- dt = 1
- difficulty = difficulty_sum * 10000 // dt
-
- total_difficulty = previous_block.total_difficulty + difficulty
-
- d = Block.generate(previous_hash, pos, timestamp, total_difficulty, message, difficulty)
- def abort(d=d):
- if not d.called:
- d.cancel()
- self.best_block_callbacks.append(abort)
- reactor.callLater(5, abort) # update timestamp
-
- try:
- result = yield d
- except defer.CancelledError:
- self.say("cancelled")
- continue # we aborted because of a new longest chain
- if result is None:
- continue
-
- self.say("generated", result.pos, result.message, result.difficulty, self.received_block(result))
-
- def received_block(self, block, from_node=None, depth=0):
- try:
- print block.data
- if block.hash_id in self.verified:
- return "already verified"
-
- if block.hash_difficulty % block.difficulty != 0:
- return "invalid nonce"
-
- if block.timestamp > (time.time() + 60 * 10) * 1000:
- return "block is from the future!"
-
- # this needs to change ... it should compare against all blocks, not the best verified block
- #if self.best_block is not None and block.pos < self.best_block.pos - 16:
- # return "you lose"
-
- if block.previous_hash is None:
- if block.pos != 0:
- return "not first"
-
- if block.difficulty != GENESIS_DIFFICULTY:
- return "genesis difficulty"
-
- if block.total_difficulty != block.difficulty:
- return "genesis total_difficulty"
-
- self.blocks[block.hash_id] = block
- self.referrers.setdefault(block.previous_hash, set()).add(block.hash_id)
- self.say("g_received", block.pos, block.message)
- self.verified_block(block, from_node, depth=depth + 1)
- return
- if block.previous_hash not in self.verified:
- self.blocks[block.hash_id] = block
- self.referrers.setdefault(block.previous_hash, set()).add(block.hash_id)
- self.say("h_received", block.pos, block.message)
-
- b = block
- while True:
- print 1
- assert b.previous_hash is not None, b.__dict__
- if b.previous_hash not in self.blocks:
- print .5
- if from_node is None:
- if not self.peers:
- print 2
- return
- from_node = random.choice(self.peers)
- def got_block(datas):
- print datas
- self.requests_in_progress.remove(b.previous_hash)
- for data in reversed(datas):
- block2 = Block(data)
- try:
- self.received_block(block2, from_node)
- except:
- traceback.print_exc()
- def got_error(fail):
- print fail
- self.requests_in_progress.remove(b.previous_hash)
- if b.previous_hash in self.requests_in_progress:
- print 3
- print "not requesting!", block.pos
- return "waiting on other request ..."
- print 4
- print "requesting", b.previous_hash
- self.requests_in_progress.add(b.previous_hash)
- from_node.get_blocks(b.previous_hash, 100).addCallbacks(got_block, got_error)
- return "waiting on block.."
- b = self.blocks[b.previous_hash]
- return
- else:
- previous_block = self.blocks[block.previous_hash]
-
- if block.pos != previous_block.pos + 1:
- return "pos needs to advance by 1"
-
- if block.timestamp < previous_block.timestamp:
- return "timestamp must not decrease"
-
- if block.total_difficulty != previous_block.total_difficulty + block.difficulty:
- return "genesis total_difficulty"
-
- if block.pos < 25:
- difficulty = GENESIS_DIFFICULTY
- else:
- cur = self.blocks[block.previous_hash]
- for i in xrange(LOOKBEHIND):
- if cur.previous_hash is None:
- break
- cur = self.blocks[cur.previous_hash]
-
- # want each block to take 10 seconds
- difficulty_sum = previous_block.total_difficulty - cur.total_difficulty
- dt = previous_block.timestamp - cur.timestamp
- if dt == 0:
- dt = 1
- difficulty = difficulty_sum * 10000 // dt
-
- if block.difficulty != difficulty:
- return "difficulty must follow pattern (%i != %i)" % (block.difficulty, difficulty)
-
- self.blocks[block.hash_id] = block
- self.referrers.setdefault(block.previous_hash, set()).add(block.hash_id)
- self.say("i_received", block.pos, block.difficulty, block.timestamp, block.message)
- self.verified_block(block, depth=depth + 1)
- except:
- traceback.print_exc()
-
- def verified_block(self, block, from_node=None, depth=0):
- assert block.previous_hash is None or block.previous_hash in self.verified
- assert block.hash_id in self.blocks
-
- self.verified.add(block.hash_id)
- self.say("verified", block.pos, block.message)
-
- for referring_block_hash_id in self.referrers.pop(block.hash_id, set()):
- referring_block = self.blocks[referring_block_hash_id]
- # no from_node here because we might send the newly released block back
- if depth > 100:
- reactor.callLater(0, self.received_block, referring_block)
- else:
- self.received_block(referring_block, depth=depth+1)
-
- for peer in self.peers:
- if peer == from_node:
- continue
- self.say("spreading to", peer.port)
- peer.handle_new_block(block.data).addErrback(lambda fail: None)
-
- if self.best_block is None or block.total_difficulty > self.best_block.total_difficulty:
- self.say("new best", block.pos, block.message)
- self.best_block = block
-
- open(db_prefix + '.best', 'w').write(self.best_block.hash_id)
- self.blocks._inner._inner.sync()
-
- cbs = self.best_block_callbacks
- self.best_block_callbacks = []
- for cb in cbs:
- cb()
-
- def try_to_verify(self, block):
- assert block.hash_id in self.blocks
-
- if block.previous_hash is None:
- if block.pos != 0:
- return "only first block doesn't need a reference"
-
- if block.difficulty != GENESIS_DIFFICULTY:
- return "genesis difficulty"
-
- if block.total_difficulty != block.difficulty:
- return "genesis total_difficulty"
-
- self.blocks[block.hash_id] = block
- self.referrers.setdefault(block.previous_hash, set()).add(block.hash_id)
- self.say("g_received", block.pos, block.message)
- self.verified_block(block, from_node, depth=depth + 1)
- a
- else:
- if block.pos <= 0:
- return "invalid position"
-
- a
-
- def __del__(self):
- print "DELETED"
n = UnDNSNode(udpPort=port, dataStore=dataStore)
n.joinNetwork(knownNodes)
@@ -576,31 +262,28 @@ def lineReceived(self, line):
def rpc_help(self):
return "hi!"
- def rpc_get_graph(self):
- self = n
- res = []
- if self.best_block is not None:
- cur = self.best_block
- while True:
- res.append((cur.timestamp, cur.difficulty))
- if cur.previous_hash is None:
- break
- cur = self.blocks[cur.previous_hash]
- return "{" + ', '.join("{%i, %i}" % x for x in res[::-1]) + "}"
+ def rpc_list_domains(self):
+ return n.domains.keys()
- def rpc_register(self, name, contents, ttl):
- # update time and expiry time are different
- a
+ def rpc_register(self):
+ domain_key = packet.PrivateKey.generate(rng)
+ addr = domain_key.get_address()
+ n.domains[addr] = domain_key
+ return addr
- def rpc_update(self, name, contents, ttl):
- a
+ def rpc_update(self, addr, contents, ttl):
+ if addr not in n.domains:
+ return "don't have key"
+ n.entries[addr] = (contents, ttl)
+ n.push_addr(addr, contents, ttl)
- def rpc_transfer(self, name, dest):
- # change key, contents remain
- a
+ def rpc_get(self, addr):
+ return n.entries[addr]
- def rpc_drop(self, name):
- a
+ def rpc_drop(self, addr):
+ del n.domains[addr]
+ if addr in n.entries:
+ del n.entries[addr]
rpc_factory = protocol.ServerFactory()
rpc_factory.protocol = RPCProtocol
View
11 util.py
@@ -149,17 +149,6 @@ def hexdigest(self):
hash_class.digest_size = len(hash_class().digest())
return hash_class
-# for block difficulty checking
-# not limited
-# if this is broken, people have an advantage in generating blocks, possibly compromising the network
-# interleaving the bits of two hash algorithms prevents breaking of one algorithm from resulting in more than a 2x speed gain
-hash_difficulty = make_hash_class([whirlpool, sha256], interleave=True)
-
-# for block references
-# limited by storage space for chain
-# if this is broken, people can change the history of the block chain
-hash_block = make_hash_class([ripemd160, sha256], interleave=False)
-
# for signing
# if this is broken, people can forge DHT node IDs and gain control over randomly-generated addresses by generating messages that fit a past signature
hash_sign = make_hash_class([whirlpool, ripemd160, sha256], interleave=False)

0 comments on commit 50016a9

Please sign in to comment.