From 5aef6ab7a25cf9d95a1d9e64216bb16e93d96c49 Mon Sep 17 00:00:00 2001 From: nsa Date: Thu, 21 Sep 2017 09:57:09 -0400 Subject: [PATCH 01/23] sphinx: replay protection and garbage collector This commit introduces persistent replay protection against prior shared secrets from prior HTLC's. A new data structure DecayedLog was created that stores all shared secrets in boltdb and it persists on startup. Additionally, it comes with a set of tests that assert its persistence guarantees. DecayedLog adheres to the newly created PersistLog interface. DecayedLog also comes with a garbage collector that removes expired shared secrets from the back-end boltdb. --- bench_test.go | 21 ++- cmd/main.go | 2 +- glide.lock | 18 +- glide.yaml | 7 + persistlog/decayedlog.go | 236 ++++++++++++++++++++++++ persistlog/decayedlog_test.go | 330 ++++++++++++++++++++++++++++++++++ persistlog/interface.go | 26 +++ sphinx.go | 67 ++++--- sphinx_test.go | 42 ++++- 9 files changed, 702 insertions(+), 47 deletions(-) create mode 100644 persistlog/decayedlog.go create mode 100644 persistlog/decayedlog_test.go create mode 100644 persistlog/interface.go diff --git a/bench_test.go b/bench_test.go index 3755db2..1cbbc77 100644 --- a/bench_test.go +++ b/bench_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/roasbeef/btcd/btcec" + "github.com/lightningnetwork/lightning-onion/persistlog" ) var ( @@ -55,7 +56,14 @@ func BenchmarkPathPacketConstruction(b *testing.B) { func BenchmarkProcessPacket(b *testing.B) { b.StopTimer() - path, _, sphinxPacket, err := newTestRoute(1) + + // Create the DecayedLog object + d := persistlog.DecayedLog{} + if err := d.Start(); err != nil { + b.Fatalf("unable to start channeldb") + } + + path, _, sphinxPacket, err := newTestRoute(1, d) if err != nil { b.Fatalf("unable to create test route: %v", err) } @@ -65,15 +73,10 @@ func BenchmarkProcessPacket(b *testing.B) { var ( pkt *ProcessedPacket ) - for i := 0; i < b.N; i++ { - pkt, err = path[0].ProcessOnionPacket(sphinxPacket, nil) - if err != nil { - b.Fatalf("unable to process packet: %v", err) - } - b.StopTimer() - path[0].seenSecrets = make(map[[sharedSecretSize]byte]struct{}) - b.StartTimer() + pkt, err = path[0].ProcessOnionPacket(sphinxPacket, nil) + if err != nil { + b.Fatalf("unable to process packet: %v", err) } p = pkt diff --git a/cmd/main.go b/cmd/main.go index 60e0d71..0f7b454 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -76,7 +76,7 @@ func main() { } privkey, _ := btcec.PrivKeyFromBytes(btcec.S256(), binKey) - s := sphinx.NewRouter(privkey, &chaincfg.TestNet3Params) + s := sphinx.NewRouter(privkey, &chaincfg.TestNet3Params, nil) var packet sphinx.OnionPacket err = packet.Decode(bytes.NewBuffer(binMsg)) diff --git a/glide.lock b/glide.lock index f73c478..3104318 100644 --- a/glide.lock +++ b/glide.lock @@ -1,29 +1,35 @@ -hash: edf51fbb3ee6f3e3f9e39d5ffc33739e8a2817f6c990042840594e867a8bc94a -updated: 2017-06-24T21:22:50.234773431+03:00 +hash: e37d8986cb181b5a89c7cd46611858aa1edd67e52f16f521d92a479c2f1841c9 +updated: 2017-09-21T01:35:04.66549614-04:00 imports: - name: github.com/aead/chacha20 version: d31a916ded42d1640b9d89a26f8abd53cc96790c subpackages: - chacha -- name: github.com/btcsuite/fastsha256 - version: 637e656429416087660c84436a2a035d69d54e2e +- name: github.com/boltdb/bolt + version: 2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8 - name: github.com/btcsuite/golangcrypto version: 53f62d9b43e87a6c56975cf862af7edf33a8d0df subpackages: - ripemd160 - name: github.com/go-errors/errors version: 8fa88b06e5974e97fbf9899a7f86a344bfd1f105 +- name: github.com/lightningnetwork/lnd + version: a314e661bd1fd4ed2aeeb14db5f2d68b1b424e2b + subpackages: + - chainntnfs + - channeldb - name: github.com/roasbeef/btcd - version: 707a14a79daeb2440fe92feaeceb0fae68ab3e9b + version: 4b411f0e78f1faa96ae650d886170d018c1835bf subpackages: - btcec - chaincfg - chaincfg/chainhash - wire - name: github.com/roasbeef/btcutil - version: d347e49b656d2a7f6d06cc9e2daebc5acded5728 + version: 1584022350b4400b511beab6a013f0189adeef40 subpackages: - base58 + - bech32 - name: golang.org/x/crypto version: 459e26527287adbc2adcc5d0d49abff9a5f315a7 subpackages: diff --git a/glide.yaml b/glide.yaml index 059d502..c058d8e 100644 --- a/glide.yaml +++ b/glide.yaml @@ -1,11 +1,18 @@ package: github.com/lightningnetwork/lightning-onion import: +- package: github.com/boltdb/bolt + version: ^1.2.1 - package: github.com/aead/chacha20 version: d31a916ded42d1640b9d89a26f8abd53cc96790c +- package: github.com/lightningnetwork/lnd + subpackages: + - chainntnfs + - channeldb - package: github.com/roasbeef/btcd subpackages: - btcec - chaincfg + - wire - package: github.com/roasbeef/btcutil - package: golang.org/x/crypto subpackages: diff --git a/persistlog/decayedlog.go b/persistlog/decayedlog.go new file mode 100644 index 0000000..5a21d92 --- /dev/null +++ b/persistlog/decayedlog.go @@ -0,0 +1,236 @@ +package persistlog + +import ( + "crypto/sha256" + "encoding/binary" + "fmt" + "github.com/boltdb/bolt" + "github.com/lightningnetwork/lnd/chainntnfs" + "github.com/lightningnetwork/lnd/channeldb" + "sync" +) + +const ( + // defaultDbDirectory is the default directory where our decayed log + // will store our (sharedHash, CLTV expiry height) key-value pairs. + defaultDbDirectory = "sharedsecret" + + // sharedHashSize is the size in bytes of the keys we will be storing + // in the DecayedLog. It represents the first 20 bytes of a truncated + // sha-256 hash of a secret generated by ECDH. + sharedHashSize = 20 + + // sharedSecretSize is the size in bytes of the shared secrets. + sharedSecretSize = 32 +) + +var ( + // sharedHashBucket is a bucket which houses all the first sharedHashSize + // bytes of a received HTLC's hashed shared secret and the HTLC's + // expiry block height. + sharedHashBucket = []byte("shared-hash") +) + +// DecayedLog implements the PersistLog interface. It stores the first +// sharedHashSize bytes of a sha256-hashed shared secret along with a node's +// CLTV value. It is a decaying log meaning there will be a garbage collector +// to collect entries which are expired according to their stored CLTV value +// and the current block height. DecayedLog wraps channeldb for simplicity, but +// must batch writes to the database to decrease write contention. +type DecayedLog struct { + db *channeldb.DB + wg sync.WaitGroup + quit chan (struct{}) + Notifier chainntnfs.ChainNotifier +} + +// garbageCollector deletes entries from sharedHashBucket whose expiry height +// has already past. This function MUST be run as a goroutine. +func (d *DecayedLog) garbageCollector() error { + defer d.wg.Done() + + epochClient, err := d.Notifier.RegisterBlockEpochNtfn() + if err != nil { + return fmt.Errorf("Unable to register for epoch "+ + "notification: %v", err) + } + defer epochClient.Cancel() + +outer: + for { + select { + case epoch, ok := <-epochClient.Epochs: + if !ok { + return fmt.Errorf("Epoch client shutting " + + "down") + } + + var expiredCltv [][]byte + err := d.db.View(func(tx *bolt.Tx) error { + // Grab the shared hash bucket + sharedHashes := tx.Bucket(sharedHashBucket) + if sharedHashes == nil { + return fmt.Errorf("sharedHashBucket " + + "is nil") + } + + sharedHashes.ForEach(func(k, v []byte) error { + cltv := uint32(binary.BigEndian.Uint32(v)) + if uint32(epoch.Height) > cltv { + // Store expired hash in array + expiredCltv = append(expiredCltv, k) + } + return nil + }) + + return nil + }) + if err != nil { + return fmt.Errorf("Error viewing channeldb: "+ + "%v", err) + } + + // Delete every item in array + for _, hash := range expiredCltv { + err = d.Delete(hash) + if err != nil { + return fmt.Errorf("Unable to delete"+ + "expired secret: %v", err) + } + } + + case <-d.quit: + break outer + } + } + + return nil +} + +// A compile time check to see if DecayedLog adheres to the PersistLog +// interface. +var _ PersistLog = (*DecayedLog)(nil) + +// HashSharedSecret Sha-256 hashes the shared secret and returns the first +// sharedHashSize bytes of the hash. +func HashSharedSecret(sharedSecret [sharedSecretSize]byte) [sharedHashSize]byte { + // Sha256 hash of sharedSecret + h := sha256.New() + h.Write(sharedSecret[:]) + + var sharedHash [sharedHashSize]byte + + // Copy bytes to sharedHash + copy(sharedHash[:], h.Sum(nil)[:sharedHashSize]) + return sharedHash +} + +// Delete removes a key-pair from the +// sharedHashBucket. +func (d *DecayedLog) Delete(hash []byte) error { + return d.db.Update(func(tx *bolt.Tx) error { + sharedHashes, err := tx.CreateBucketIfNotExists(sharedHashBucket) + if err != nil { + return fmt.Errorf("Unable to created sharedHashes bucket:"+ + " %v", err) + } + + return sharedHashes.Delete(hash) + }) +} + +// Get retrieves the CLTV value of a processed HTLC given the first 20 bytes +// of the Sha-256 hash of the shared secret used during sphinx processing. +func (d *DecayedLog) Get(hash []byte) ( + uint32, error) { + var value uint32 + + err := d.db.View(func(tx *bolt.Tx) error { + // Grab the shared hash bucket which stores the mapping from + // truncated sha-256 hashes of shared secrets to CLTV values. + sharedHashes := tx.Bucket(sharedHashBucket) + if sharedHashes == nil { + return fmt.Errorf("sharedHashes is nil, could " + + "not retrieve CLTV value") + } + + // If the sharedHash is found, we use it to find the associated + // CLTV in the sharedHashBucket. + valueBytes := sharedHashes.Get(hash) + if valueBytes == nil { + return nil + } + + value = uint32(binary.BigEndian.Uint32(valueBytes)) + + return nil + }) + if err != nil { + return value, err + } + + return value, nil +} + +// Put stores a key-pair into the +// sharedHashBucket. +func (d *DecayedLog) Put(hash []byte, + value uint32) error { + return d.db.Batch(func(tx *bolt.Tx) error { + var scratch [4]byte + + sharedHashes, err := tx.CreateBucketIfNotExists(sharedHashBucket) + if err != nil { + return fmt.Errorf("Unable to create bucket sharedHashes:"+ + " %v", err) + } + + // Store value into scratch + binary.BigEndian.PutUint32(scratch[:], value) + + return sharedHashes.Put(hash, scratch[:]) + }) +} + +// Start opens the database we will be using to store hashed shared secrets. +// It also starts the garbage collector in a goroutine to remove stale +// database entries. +func (d *DecayedLog) Start() error { + // Create the quit channel + d.quit = make(chan struct{}) + + // Open the channeldb for use. + var err error + if d.db, err = channeldb.Open(defaultDbDirectory); err != nil { + return fmt.Errorf("Could not open channeldb: %v", err) + } + + err = d.db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucketIfNotExists(sharedHashBucket) + if err != nil { + return fmt.Errorf("Unable to create bucket sharedHashes:"+ + " %v", err) + } + return nil + }) + if err != nil { + return fmt.Errorf("Could not create sharedHashes") + } + + // Start garbage collector. + if d.Notifier != nil { + d.wg.Add(1) + go d.garbageCollector() + } + + return nil +} + +// Stop halts the garbage collector and closes channeldb. +func (d *DecayedLog) Stop() { + // Stop garbage collector. + close(d.quit) + + // Close channeldb. + d.db.Close() +} diff --git a/persistlog/decayedlog_test.go b/persistlog/decayedlog_test.go new file mode 100644 index 0000000..3d95392 --- /dev/null +++ b/persistlog/decayedlog_test.go @@ -0,0 +1,330 @@ +package persistlog + +import ( + "crypto/sha256" + "github.com/lightningnetwork/lnd/chainntnfs" + "github.com/roasbeef/btcd/btcec" + "github.com/roasbeef/btcd/chaincfg/chainhash" + "github.com/roasbeef/btcd/wire" + "testing" + "time" +) + +var ( + // Bytes of a private key + key = [32]byte{ + 0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, + 0x68, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, + 0xd, 0xe7, 0x93, 0xe4, 0xb7, 0x25, 0xb8, 0x4d, + 0x1e, 0xb, 0x4c, 0xf9, 0x9e, 0xc5, 0x8c, 0xe9, + } +) + +type mockNotifier struct { + confChannel chan *chainntnfs.TxConfirmation + epochChan chan *chainntnfs.BlockEpoch +} + +func (m *mockNotifier) RegisterBlockEpochNtfn() (*chainntnfs.BlockEpochEvent, error) { + return &chainntnfs.BlockEpochEvent{ + Epochs: m.epochChan, + Cancel: func() {}, + }, nil +} + +func (m *mockNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash, numConfs, + heightHint uint32) (*chainntnfs.ConfirmationEvent, error) { + return nil, nil +} + +func (m *mockNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, + heightHint uint32) (*chainntnfs.SpendEvent, error) { + return nil, nil +} + +func (m *mockNotifier) Start() error { + return nil +} + +func (m *mockNotifier) Stop() error { + return nil +} + +// generateSharedSecret generates a shared secret given a public key and a +// private key. It is directly copied from sphinx.go. +func generateSharedSecret(pub *btcec.PublicKey, priv *btcec.PrivateKey) [32]byte { + s := &btcec.PublicKey{} + x, y := btcec.S256().ScalarMult(pub.X, pub.Y, priv.D.Bytes()) + s.X = x + s.Y = y + + return sha256.Sum256(s.SerializeCompressed()) +} + +// TestDecayedLogGarbageCollector tests the ability of the garbage collector +// to delete expired cltv values every time a block is received. Expired cltv +// values are cltv values that are <= current block height. +func TestDecayedLogGarbageCollector(t *testing.T) { + // Random (EXPIRED) cltv value + cltv := uint32(200390) + + // Create the MockNotifier which triggers the garbage collector + MockNotifier := &mockNotifier{ + epochChan: make(chan *chainntnfs.BlockEpoch, 1), + } + + // Create a DecayedLog object + d := DecayedLog{Notifier: MockNotifier} + + // Open the channeldb (start the garbage collector) + err := d.Start() + if err != nil { + t.Fatalf("Unable to start / open DecayedLog") + } + defer d.Stop() + + // Create a new private key on elliptic curve secp256k1 + priv, err := btcec.NewPrivateKey(btcec.S256()) + if err != nil { + t.Fatalf("Unable to create new private key") + } + + // Generate a public key from the key bytes + _, testPub := btcec.PrivKeyFromBytes(btcec.S256(), key[:]) + + // Generate a shared secret with the public and private keys we made + secret := generateSharedSecret(testPub, priv) + + // Create the hashedSecret given the shared secret we just generated. + // This is the first 20 bytes of the Sha-256 hash of the shared secret. + // This is used as a key to retrieve the cltv value. + hashedSecret := HashSharedSecret(secret) + + // Store in the sharedHashBucket. + err = d.Put(hashedSecret[:], cltv) + if err != nil { + t.Fatalf("Unable to store in channeldb") + } + + // Send Block notification to garbage collector. The garbage collector + // should remove the entry we just added to sharedHashBucket as it is + // now expired. + MockNotifier.epochChan <- &chainntnfs.BlockEpoch{ + Height: int32(cltv + 1), + } + + // Wait for database write (GC is in a goroutine) + time.Sleep(500 * time.Millisecond) + + // Assert that hashedSecret is not in the sharedHashBucket + val, err := d.Get(hashedSecret[:]) + if err != nil { + t.Fatalf("Delete failed - received an error upon Get") + } + + if val != 0 { + t.Fatalf("cltv was not deleted") + } +} + +// TestDecayedLogInsertionAndRetrieval inserts a cltv value into the nested +// sharedHashBucket and then deletes it and finally asserts that we can no +// longer retrieve it. +func TestDecayedLogInsertionAndDeletion(t *testing.T) { + // Random cltv value + cltv := uint32(503928) + + // Create a DecayedLog object + d := DecayedLog{} + + // Open the channeldb + err := d.Start() + if err != nil { + t.Fatalf("Unable to start / open DecayedLog") + } + defer d.Stop() + + // Create a new private key on elliptic curve secp256k1 + priv, err := btcec.NewPrivateKey(btcec.S256()) + if err != nil { + t.Fatalf("Unable to create new private key") + } + + // Generate a public key from the key bytes + _, testPub := btcec.PrivKeyFromBytes(btcec.S256(), key[:]) + + // Generate a shared secret with the public and private keys we made + secret := generateSharedSecret(testPub, priv) + + // Create the hashedSecret given the shared secret we just generated. + // This is the first 20 bytes of the Sha-256 hash of the shared secret. + // This is used as a key to retrieve the cltv value. + hashedSecret := HashSharedSecret(secret) + + // Store in the sharedHashBucket. + err = d.Put(hashedSecret[:], cltv) + if err != nil { + t.Fatalf("Unable to store in channeldb") + } + + // Delete hashedSecret from the sharedHashBucket. + err = d.Delete(hashedSecret[:]) + if err != nil { + t.Fatalf("Unable to delete from channeldb") + } + + // Assert that hashedSecret is not in the sharedHashBucket + val, err := d.Get(hashedSecret[:]) + if err != nil { + t.Fatalf("Delete failed - received the wrong error message") + } + + if val != 0 { + t.Fatalf("cltv was not deleted") + } + +} + +// TestDecayedLogStartAndStop tests for persistence. The DecayedLog is started, +// a cltv value is stored in the sharedHashBucket, and then it the DecayedLog +// is stopped. The DecayedLog is then started up again and we test that the +// cltv value is indeed still stored in the sharedHashBucket. We then delete +// the cltv value and check that it persists upon startup. +func TestDecayedLogStartAndStop(t *testing.T) { + // Random cltv value + cltv := uint32(909020) + + // Create a DecayedLog object + d := DecayedLog{} + + // Open the channeldb + err := d.Start() + if err != nil { + t.Fatalf("Unable to start / open DecayedLog") + } + defer d.Stop() + + // Create a new private key on elliptic curve secp256k1 + priv, err := btcec.NewPrivateKey(btcec.S256()) + if err != nil { + t.Fatalf("Unable to create new private key") + } + + // Generate a public key from the key bytes + _, testPub := btcec.PrivKeyFromBytes(btcec.S256(), key[:]) + + // Generate a shared secret with the public and private keys we made + secret := generateSharedSecret(testPub, priv) + + // Create the hashedSecret given the shared secret we just generated. + // This is the first 20 bytes of the Sha-256 hash of the shared secret. + // This is used as a key to retrieve the cltv value. + hashedSecret := HashSharedSecret(secret) + + // Store in the sharedHashBucket. + err = d.Put(hashedSecret[:], cltv) + if err != nil { + t.Fatalf("Unable to store in channeldb") + } + + // Shutdown the DecayedLog's channeldb + d.Stop() + + // Startup the DecayedLog's channeldb + err = d.Start() + if err != nil { + t.Fatalf("Unable to start / open DecayedLog") + } + + // Retrieve the stored cltv value given the hashedSecret key. + value, err := d.Get(hashedSecret[:]) + if err != nil { + t.Fatalf("Unable to retrieve from channeldb") + } + + // Check that the original cltv value matches the retrieved cltv + // value. + if cltv != value { + t.Fatalf("Value retrieved doesn't match value stored") + } + + // Delete hashedSecret from sharedHashBucket + err = d.Delete(hashedSecret[:]) + if err != nil { + t.Fatalf("Unable to delete from channeldb") + } + + // Shutdown the DecayedLog's channeldb + d.Stop() + + // Startup the DecayedLog's channeldb + err = d.Start() + if err != nil { + t.Fatalf("Unable to start / open DecayedLog") + } + + // Assert that hashedSecret is not in the sharedHashBucket + val, err := d.Get(hashedSecret[:]) + if err != nil { + t.Fatalf("Delete failed - received the wrong error message") + } + + if val != 0 { + t.Fatalf("cltv was not deleted") + } + +} + +// TestDecayedLogStorageAndRetrieval stores a cltv value and then retrieves it +// via the nested sharedHashBucket and finally asserts that the original stored +// and retrieved cltv values are equal. +func TestDecayedLogStorageAndRetrieval(t *testing.T) { + // Random cltv value + cltv := uint32(302930) + + // Create a DecayedLog object + d := DecayedLog{} + + // Open the channeldb + err := d.Start() + if err != nil { + t.Fatalf("Unable to start / open DecayedLog") + } + defer d.Stop() + + // Create a new private key on elliptic curve secp256k1 + priv, err := btcec.NewPrivateKey(btcec.S256()) + if err != nil { + t.Fatalf("Unable to create new private key") + } + + // Generate a public key from the key bytes + _, testPub := btcec.PrivKeyFromBytes(btcec.S256(), key[:]) + + // Generate a shared secret with the public and private keys we made + secret := generateSharedSecret(testPub, priv) + + // Create the hashedSecret given the shared secret we just generated. + // This is the first 20 bytes of the Sha-256 hash of the shared secret. + // This is used as a key to retrieve the cltv value. + hashedSecret := HashSharedSecret(secret) + + // Store in the sharedHashBucket + err = d.Put(hashedSecret[:], cltv) + if err != nil { + t.Fatalf("Unable to store in channeldb") + } + + // Retrieve the stored cltv value given the hashedSecret key. + value, err := d.Get(hashedSecret[:]) + if err != nil { + t.Fatalf("Unable to retrieve from channeldb") + } + + // If the original cltv value does not match the value retrieved, + // then the test failed. + if cltv != value { + t.Fatalf("Value retrieved doesn't match value stored") + } + +} diff --git a/persistlog/interface.go b/persistlog/interface.go new file mode 100644 index 0000000..fb8a24f --- /dev/null +++ b/persistlog/interface.go @@ -0,0 +1,26 @@ +package persistlog + +// PersistLog is an interface that defines a new on-disk data structure that +// contains a persistent log. The interface is general to allow implementations +// near-complete autonomy. All of these calls should be safe for concurrent +// access. +type PersistLog interface { + // Delete deletes an entry from the persistent log given []byte + Delete([]byte) error + + // Get retrieves an entry from the persistent log given a []byte. It + // returns the value stored and an error if one occurs. + Get([]byte) (uint32, error) + + // Put stores an entry into the persistent log given a []byte and an + // accompanying purposefully general type. It returns an error if one + // occurs. + Put([]byte, uint32) error + + // Start starts up the on-disk persistent log. It returns an error if + // one occurs. + Start() error + + // Stop safely stops the on-disk persistent log. + Stop() +} diff --git a/sphinx.go b/sphinx.go index 96027c4..9c87755 100644 --- a/sphinx.go +++ b/sphinx.go @@ -9,9 +9,10 @@ import ( "io" "io/ioutil" "math/big" - "sync" "github.com/aead/chacha20" + "github.com/lightningnetwork/lightning-onion/persistlog" + "github.com/lightningnetwork/lnd/chainntnfs" "github.com/roasbeef/btcd/btcec" "github.com/roasbeef/btcd/chaincfg" "github.com/roasbeef/btcutil" @@ -620,20 +621,23 @@ type Router struct { onionKey *btcec.PrivateKey - sync.RWMutex - - seenSecrets map[[sharedSecretSize]byte]struct{} + d persistlog.DecayedLog } // NewRouter creates a new instance of a Sphinx onion Router given the node's // currently advertised onion private key, and the target Bitcoin network. -func NewRouter(nodeKey *btcec.PrivateKey, net *chaincfg.Params) *Router { +func NewRouter(nodeKey *btcec.PrivateKey, net *chaincfg.Params, + chainNotifier chainntnfs.ChainNotifier) *Router { var nodeID [addressSize]byte copy(nodeID[:], btcutil.Hash160(nodeKey.PubKey().SerializeCompressed())) // Safe to ignore the error here, nodeID is 20 bytes. nodeAddr, _ := btcutil.NewAddressPubKeyHash(nodeID[:], net) + d := persistlog.DecayedLog{ + Notifier: chainNotifier, + } + return &Router{ nodeID: nodeID, nodeAddr: nodeAddr, @@ -647,10 +651,22 @@ func NewRouter(nodeKey *btcec.PrivateKey, net *chaincfg.Params) *Router { }, // TODO(roasbeef): replace instead with bloom filter? // * https://moderncrypto.org/mail-archive/messaging/2015/001911.html - seenSecrets: make(map[[sharedSecretSize]byte]struct{}), + d: d, } } +// Start starts / opens the DecayedLog's channeldb and its accompanying +// garbage collector goroutine. +func (r *Router) Start() error { + return r.d.Start() +} + +// Stop stops / closes the DecayedLog's channeldb and its accompanying +// garbage collector goroutine. +func (r *Router) Stop() { + r.d.Stop() +} + // ProcessOnionPacket processes an incoming onion packet which has been forward // to the target Sphinx router. If the encoded ephemeral key isn't on the // target Elliptic Curve, then the packet is rejected. Similarly, if the @@ -673,12 +689,14 @@ func (r *Router) ProcessOnionPacket(onionPkt *OnionPacket, assocData []byte) (*P // In order to mitigate replay attacks, if we've seen this particular // shared secret before, cease processing and just drop this forwarding // message. - r.RLock() - if _, ok := r.seenSecrets[sharedSecret]; ok { - r.RUnlock() + hashedSecret := persistlog.HashSharedSecret(sharedSecret) + cltv, err := r.d.Get(hashedSecret[:]) + if err != nil { + return nil, err + } + if cltv != 0 { return nil, ErrReplayedPacket } - r.RUnlock() // Using the derived shared secret, ensure the integrity of the routing // information by checking the attached MAC without leaking timing @@ -689,18 +707,6 @@ func (r *Router) ProcessOnionPacket(onionPkt *OnionPacket, assocData []byte) (*P return nil, ErrInvalidOnionHMAC } - // The MAC checks out, mark this current shared secret as processed in - // order to mitigate future replay attacks. We need to check to see if - // we already know the secret again since a replay might have happened - // while we were checking the MAC. - r.Lock() - if _, ok := r.seenSecrets[sharedSecret]; ok { - r.RUnlock() - return nil, ErrReplayedPacket - } - r.seenSecrets[sharedSecret] = struct{}{} - r.Unlock() - // Attach the padding zeroes in order to properly strip an encryption // layer off the routing info revealing the routing information for the // next hop. @@ -722,6 +728,23 @@ func (r *Router) ProcessOnionPacket(onionPkt *OnionPacket, assocData []byte) (*P return nil, err } + // The MAC checks out, mark this current shared secret as processed in + // order to mitigate future replay attacks. We need to check to see if + // we already know the secret again since a replay might have happened + // while we were checking the MAC and decoding the HopData. + cltv, err = r.d.Get(hashedSecret[:]) + if err != nil { + return nil, err + } + if cltv != 0 { + return nil, ErrReplayedPacket + } + + err = r.d.Put(hashedSecret[:], hopData.OutgoingCltv) + if err != nil { + return nil, err + } + // With the necessary items extracted, we'll copy of the onion packet // for the next node, snipping off our per-hop data. var nextMixHeader [routingInfoSize]byte diff --git a/sphinx_test.go b/sphinx_test.go index 1595dfe..8b98dc9 100644 --- a/sphinx_test.go +++ b/sphinx_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/davecgh/go-spew/spew" + "github.com/lightningnetwork/lightning-onion/persistlog" "github.com/roasbeef/btcd/btcec" "github.com/roasbeef/btcd/chaincfg" ) @@ -86,7 +87,7 @@ var ( "baaa7d63ad64199f4664813b955cff954949076dcf" ) -func newTestRoute(numHops int) ([]*Router, *[]HopData, *OnionPacket, error) { +func newTestRoute(numHops int, d persistlog.DecayedLog) ([]*Router, *[]HopData, *OnionPacket, error) { nodes := make([]*Router, numHops) // Create numHops random sphinx nodes. @@ -97,7 +98,8 @@ func newTestRoute(numHops int) ([]*Router, *[]HopData, *OnionPacket, error) { " random key for sphinx node: %v", err) } - nodes[i] = NewRouter(privKey, &chaincfg.MainNetParams) + nodes[i] = NewRouter(privKey, &chaincfg.MainNetParams, nil) + nodes[i].d = d } // Gather all the pub keys in the path. @@ -111,7 +113,9 @@ func newTestRoute(numHops int) ([]*Router, *[]HopData, *OnionPacket, error) { hopsData = append(hopsData, HopData{ Realm: 0x00, ForwardAmount: uint64(i), - OutgoingCltv: uint32(i), + OutgoingCltv: uint32(i + 1), + // This is used to avoid a CLTV of 0, which is + // considered a non-CLTV value by DecayedLog. }) copy(hopsData[i].NextAddress[:], bytes.Repeat([]byte{byte(i)}, 8)) } @@ -178,7 +182,11 @@ func TestBolt4Packet(t *testing.T) { } func TestSphinxCorrectness(t *testing.T) { - nodes, hopDatas, fwdMsg, err := newTestRoute(NumMaxHops) + d := persistlog.DecayedLog{} + if err := d.Start(); err != nil { + t.Fatalf("unable to start channeldb") + } + nodes, hopDatas, fwdMsg, err := newTestRoute(NumMaxHops, d) if err != nil { t.Fatalf("unable to create random onion packet: %v", err) } @@ -240,8 +248,11 @@ func TestSphinxSingleHop(t *testing.T) { // We'd like to test the proper behavior of the correctness of onion // packet processing for "single-hop" payments which bare a full onion // packet. - - nodes, _, fwdMsg, err := newTestRoute(1) + d := persistlog.DecayedLog{} + if err := d.Start(); err != nil { + t.Fatalf("unable to start channeldb") + } + nodes, _, fwdMsg, err := newTestRoute(1, d) if err != nil { t.Fatalf("unable to create test route: %v", err) } @@ -264,7 +275,12 @@ func TestSphinxSingleHop(t *testing.T) { func TestSphinxNodeRelpay(t *testing.T) { // We'd like to ensure that the sphinx node itself rejects all replayed // packets which share the same shared secret. - nodes, _, fwdMsg, err := newTestRoute(NumMaxHops) + d := persistlog.DecayedLog{} + if err := d.Start(); err != nil { + t.Fatalf("unable to start channeldb") + } + + nodes, _, fwdMsg, err := newTestRoute(NumMaxHops, d) if err != nil { t.Fatalf("unable to create test route: %v", err) } @@ -285,7 +301,11 @@ func TestSphinxNodeRelpay(t *testing.T) { func TestSphinxAssocData(t *testing.T) { // We want to make sure that the associated data is considered in the // HMAC creation - nodes, _, fwdMsg, err := newTestRoute(5) + d := persistlog.DecayedLog{} + if err := d.Start(); err != nil { + t.Fatalf("unable to start channeldb") + } + nodes, _, fwdMsg, err := newTestRoute(5, d) if err != nil { t.Fatalf("unable to create random onion packet: %v", err) } @@ -299,7 +319,11 @@ func TestSphinxAssocData(t *testing.T) { func TestSphinxEncodeDecode(t *testing.T) { // Create some test data with a randomly populated, yet valid onion // forwarding message. - _, _, fwdMsg, err := newTestRoute(5) + d := persistlog.DecayedLog{} + if err := d.Start(); err != nil { + t.Fatalf("unable to start channeldb") + } + _, _, fwdMsg, err := newTestRoute(5, d) if err != nil { t.Fatalf("unable to create random onion packet: %v", err) } From f0fc2e7019cde418237f7a7a3c8f564074bdb802 Mon Sep 17 00:00:00 2001 From: nsa Date: Thu, 21 Sep 2017 22:42:59 -0400 Subject: [PATCH 02/23] formatting: correctly pass DecayedLog, changed Put This commit fixes previously incorrect passing of DecayedLog to newTestRoute in bench_test.go, sphinx_test.go, and in sphinx.go. A pointer to DecayedLog is used instead. Additionally, serialization of the CLTV value is moved out of the Put function's boltdb.Batch call. --- bench_test.go | 4 ++-- persistlog/decayedlog.go | 11 ++++++----- sphinx.go | 4 ++-- sphinx_test.go | 12 ++++++------ 4 files changed, 16 insertions(+), 15 deletions(-) diff --git a/bench_test.go b/bench_test.go index 1cbbc77..123ebd9 100644 --- a/bench_test.go +++ b/bench_test.go @@ -4,8 +4,8 @@ import ( "bytes" "testing" - "github.com/roasbeef/btcd/btcec" "github.com/lightningnetwork/lightning-onion/persistlog" + "github.com/roasbeef/btcd/btcec" ) var ( @@ -58,7 +58,7 @@ func BenchmarkProcessPacket(b *testing.B) { b.StopTimer() // Create the DecayedLog object - d := persistlog.DecayedLog{} + d := &persistlog.DecayedLog{} if err := d.Start(); err != nil { b.Fatalf("unable to start channeldb") } diff --git a/persistlog/decayedlog.go b/persistlog/decayedlog.go index 5a21d92..07cc704 100644 --- a/persistlog/decayedlog.go +++ b/persistlog/decayedlog.go @@ -176,18 +176,19 @@ func (d *DecayedLog) Get(hash []byte) ( // sharedHashBucket. func (d *DecayedLog) Put(hash []byte, value uint32) error { - return d.db.Batch(func(tx *bolt.Tx) error { - var scratch [4]byte + var scratch [4]byte + + // Store value into scratch + binary.BigEndian.PutUint32(scratch[:], value) + + return d.db.Batch(func(tx *bolt.Tx) error { sharedHashes, err := tx.CreateBucketIfNotExists(sharedHashBucket) if err != nil { return fmt.Errorf("Unable to create bucket sharedHashes:"+ " %v", err) } - // Store value into scratch - binary.BigEndian.PutUint32(scratch[:], value) - return sharedHashes.Put(hash, scratch[:]) }) } diff --git a/sphinx.go b/sphinx.go index 9c87755..5be0c67 100644 --- a/sphinx.go +++ b/sphinx.go @@ -621,7 +621,7 @@ type Router struct { onionKey *btcec.PrivateKey - d persistlog.DecayedLog + d *persistlog.DecayedLog } // NewRouter creates a new instance of a Sphinx onion Router given the node's @@ -634,7 +634,7 @@ func NewRouter(nodeKey *btcec.PrivateKey, net *chaincfg.Params, // Safe to ignore the error here, nodeID is 20 bytes. nodeAddr, _ := btcutil.NewAddressPubKeyHash(nodeID[:], net) - d := persistlog.DecayedLog{ + d := &persistlog.DecayedLog{ Notifier: chainNotifier, } diff --git a/sphinx_test.go b/sphinx_test.go index 8b98dc9..1931c9b 100644 --- a/sphinx_test.go +++ b/sphinx_test.go @@ -87,7 +87,7 @@ var ( "baaa7d63ad64199f4664813b955cff954949076dcf" ) -func newTestRoute(numHops int, d persistlog.DecayedLog) ([]*Router, *[]HopData, *OnionPacket, error) { +func newTestRoute(numHops int, d *persistlog.DecayedLog) ([]*Router, *[]HopData, *OnionPacket, error) { nodes := make([]*Router, numHops) // Create numHops random sphinx nodes. @@ -182,7 +182,7 @@ func TestBolt4Packet(t *testing.T) { } func TestSphinxCorrectness(t *testing.T) { - d := persistlog.DecayedLog{} + d := &persistlog.DecayedLog{} if err := d.Start(); err != nil { t.Fatalf("unable to start channeldb") } @@ -248,7 +248,7 @@ func TestSphinxSingleHop(t *testing.T) { // We'd like to test the proper behavior of the correctness of onion // packet processing for "single-hop" payments which bare a full onion // packet. - d := persistlog.DecayedLog{} + d := &persistlog.DecayedLog{} if err := d.Start(); err != nil { t.Fatalf("unable to start channeldb") } @@ -275,7 +275,7 @@ func TestSphinxSingleHop(t *testing.T) { func TestSphinxNodeRelpay(t *testing.T) { // We'd like to ensure that the sphinx node itself rejects all replayed // packets which share the same shared secret. - d := persistlog.DecayedLog{} + d := &persistlog.DecayedLog{} if err := d.Start(); err != nil { t.Fatalf("unable to start channeldb") } @@ -301,7 +301,7 @@ func TestSphinxNodeRelpay(t *testing.T) { func TestSphinxAssocData(t *testing.T) { // We want to make sure that the associated data is considered in the // HMAC creation - d := persistlog.DecayedLog{} + d := &persistlog.DecayedLog{} if err := d.Start(); err != nil { t.Fatalf("unable to start channeldb") } @@ -319,7 +319,7 @@ func TestSphinxAssocData(t *testing.T) { func TestSphinxEncodeDecode(t *testing.T) { // Create some test data with a randomly populated, yet valid onion // forwarding message. - d := persistlog.DecayedLog{} + d := &persistlog.DecayedLog{} if err := d.Start(); err != nil { t.Fatalf("unable to start channeldb") } From 9de21d422611470f0dcae92c8106590aa58af24d Mon Sep 17 00:00:00 2001 From: nsa Date: Thu, 12 Oct 2017 02:12:37 -0400 Subject: [PATCH 03/23] persistlog: fix garbage collector, Get, and tests This commit reverts some of the work of the previous commit. It fixes the garbage collector to sweep up the CLTV by decrementing the CLTV for evey block notification it receives. It also fixes the Get method to return a CLTV value of math.MaxUint32 upon retrieving no CLTV value. Tests were also cleaned up and fixed. --- bench_test.go | 24 ++++++------- glide.lock | 16 +++++++-- persistlog/decayedlog.go | 45 ++++++++++++++++------- persistlog/decayedlog_test.go | 52 ++++++++++++++++----------- persistlog/interface.go | 2 +- sphinx.go | 9 ++--- sphinx_test.go | 67 ++++++++++++++++++----------------- 7 files changed, 132 insertions(+), 83 deletions(-) diff --git a/bench_test.go b/bench_test.go index 123ebd9..e146854 100644 --- a/bench_test.go +++ b/bench_test.go @@ -4,7 +4,6 @@ import ( "bytes" "testing" - "github.com/lightningnetwork/lightning-onion/persistlog" "github.com/roasbeef/btcd/btcec" ) @@ -56,27 +55,28 @@ func BenchmarkPathPacketConstruction(b *testing.B) { func BenchmarkProcessPacket(b *testing.B) { b.StopTimer() - - // Create the DecayedLog object - d := &persistlog.DecayedLog{} - if err := d.Start(); err != nil { - b.Fatalf("unable to start channeldb") - } - - path, _, sphinxPacket, err := newTestRoute(1, d) + path, _, sphinxPacket, err := newTestRoute(1) if err != nil { b.Fatalf("unable to create test route: %v", err) } b.ReportAllocs() + path[0].d.Start("0") + defer shutdown("0", path[0].d) b.StartTimer() var ( pkt *ProcessedPacket ) + for i := 0; i < b.N; i++ { + pkt, err = path[0].ProcessOnionPacket(sphinxPacket, nil) + if err != nil { + b.Fatalf("unable to process packet: %v", err) + } - pkt, err = path[0].ProcessOnionPacket(sphinxPacket, nil) - if err != nil { - b.Fatalf("unable to process packet: %v", err) + b.StopTimer() + shutdown("0", path[0].d) + path[0].d.Start("0") + b.StartTimer() } p = pkt diff --git a/glide.lock b/glide.lock index 3104318..106189c 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: e37d8986cb181b5a89c7cd46611858aa1edd67e52f16f521d92a479c2f1841c9 -updated: 2017-09-21T01:35:04.66549614-04:00 +hash: 02ca2623123671074881b4e61e5b0bc0805530e1a68ae23bf7a5d67e04b7b326 +updated: 2017-10-11T17:39:22.162242846-04:00 imports: - name: github.com/aead/chacha20 version: d31a916ded42d1640b9d89a26f8abd53cc96790c @@ -7,6 +7,8 @@ imports: - chacha - name: github.com/boltdb/bolt version: 2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8 +- name: github.com/btcsuite/btclog + version: 84c8d2346e9fc8c7b947e243b9c24e6df9fd206a - name: github.com/btcsuite/golangcrypto version: 53f62d9b43e87a6c56975cf862af7edf33a8d0df subpackages: @@ -18,6 +20,8 @@ imports: subpackages: - chainntnfs - channeldb + - lnwire + - shachain - name: github.com/roasbeef/btcd version: 4b411f0e78f1faa96ae650d886170d018c1835bf subpackages: @@ -34,6 +38,14 @@ imports: version: 459e26527287adbc2adcc5d0d49abff9a5f315a7 subpackages: - ripemd160 +- name: golang.org/x/sys + version: b6e1ae21643682ce023deb8d152024597b0e9bb4 + subpackages: + - unix +- name: google.golang.org/grpc + version: b3ddf786825de56a4178401b7e174ee332173b66 + subpackages: + - codes testImports: - name: github.com/davecgh/go-spew version: 346938d642f2ec3594ed81d874461961cd0faa76 diff --git a/persistlog/decayedlog.go b/persistlog/decayedlog.go index 07cc704..e01344f 100644 --- a/persistlog/decayedlog.go +++ b/persistlog/decayedlog.go @@ -7,6 +7,7 @@ import ( "github.com/boltdb/bolt" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" + "math" "sync" ) @@ -59,13 +60,14 @@ func (d *DecayedLog) garbageCollector() error { outer: for { select { - case epoch, ok := <-epochClient.Epochs: + case _, ok := <-epochClient.Epochs: if !ok { return fmt.Errorf("Epoch client shutting " + "down") } var expiredCltv [][]byte + validCltv := make(map[string]uint32) err := d.db.View(func(tx *bolt.Tx) error { // Grab the shared hash bucket sharedHashes := tx.Bucket(sharedHashBucket) @@ -76,9 +78,13 @@ outer: sharedHashes.ForEach(func(k, v []byte) error { cltv := uint32(binary.BigEndian.Uint32(v)) - if uint32(epoch.Height) > cltv { + cltv-- + if cltv == 0 { // Store expired hash in array expiredCltv = append(expiredCltv, k) + } else { + // Store valid in map + validCltv[string(k)] = cltv } return nil }) @@ -94,11 +100,20 @@ outer: for _, hash := range expiredCltv { err = d.Delete(hash) if err != nil { - return fmt.Errorf("Unable to delete"+ + return fmt.Errorf("Unable to delete "+ "expired secret: %v", err) } } + // Update decremented CLTV's via validCltv + for hash, cltv := range validCltv { + err = d.Put([]byte(hash), cltv) + if err != nil { + return fmt.Errorf("Unable to decrement "+ + "cltv value: %v", err) + } + } + case <-d.quit: break outer } @@ -141,9 +156,9 @@ func (d *DecayedLog) Delete(hash []byte) error { // Get retrieves the CLTV value of a processed HTLC given the first 20 bytes // of the Sha-256 hash of the shared secret used during sphinx processing. -func (d *DecayedLog) Get(hash []byte) ( - uint32, error) { - var value uint32 +func (d *DecayedLog) Get(hash []byte) (uint32, error) { + // math.MaxUint32 is returned when Get did not retrieve a value. + var value uint32 = math.MaxUint32 err := d.db.View(func(tx *bolt.Tx) error { // Grab the shared hash bucket which stores the mapping from @@ -172,10 +187,9 @@ func (d *DecayedLog) Get(hash []byte) ( return value, nil } -// Put stores a key-pair into the -// sharedHashBucket. -func (d *DecayedLog) Put(hash []byte, - value uint32) error { +// Put stores a shared secret hash as the key and a slice consisting of the +// current blockheight and the outgoing CLTV value +func (d *DecayedLog) Put(hash []byte, value uint32) error { var scratch [4]byte @@ -196,13 +210,20 @@ func (d *DecayedLog) Put(hash []byte, // Start opens the database we will be using to store hashed shared secrets. // It also starts the garbage collector in a goroutine to remove stale // database entries. -func (d *DecayedLog) Start() error { +func (d *DecayedLog) Start(dbDir string) error { // Create the quit channel d.quit = make(chan struct{}) + var directory string + if dbDir == "" { + directory = defaultDbDirectory + } else { + directory = dbDir + } + // Open the channeldb for use. var err error - if d.db, err = channeldb.Open(defaultDbDirectory); err != nil { + if d.db, err = channeldb.Open(directory); err != nil { return fmt.Errorf("Could not open channeldb: %v", err) } diff --git a/persistlog/decayedlog_test.go b/persistlog/decayedlog_test.go index 3d95392..c0cbdc5 100644 --- a/persistlog/decayedlog_test.go +++ b/persistlog/decayedlog_test.go @@ -6,6 +6,8 @@ import ( "github.com/roasbeef/btcd/btcec" "github.com/roasbeef/btcd/chaincfg/chainhash" "github.com/roasbeef/btcd/wire" + "math" + "os" "testing" "time" ) @@ -61,12 +63,19 @@ func generateSharedSecret(pub *btcec.PublicKey, priv *btcec.PrivateKey) [32]byte return sha256.Sum256(s.SerializeCompressed()) } +// shutdown stops the DecayedLog and deletes the folder enclosing the +// temporary channel database. +func shutdown(d *DecayedLog) { + os.RemoveAll("tempdir") + d.Stop() +} + // TestDecayedLogGarbageCollector tests the ability of the garbage collector // to delete expired cltv values every time a block is received. Expired cltv // values are cltv values that are <= current block height. func TestDecayedLogGarbageCollector(t *testing.T) { - // Random (EXPIRED) cltv value - cltv := uint32(200390) + // Random (TO-BE-EXPIRED) cltv value + cltv := uint32(2) // Create the MockNotifier which triggers the garbage collector MockNotifier := &mockNotifier{ @@ -77,11 +86,11 @@ func TestDecayedLogGarbageCollector(t *testing.T) { d := DecayedLog{Notifier: MockNotifier} // Open the channeldb (start the garbage collector) - err := d.Start() + err := d.Start("tempdir") if err != nil { t.Fatalf("Unable to start / open DecayedLog") } - defer d.Stop() + defer shutdown(&d) // Create a new private key on elliptic curve secp256k1 priv, err := btcec.NewPrivateKey(btcec.S256()) @@ -110,7 +119,10 @@ func TestDecayedLogGarbageCollector(t *testing.T) { // should remove the entry we just added to sharedHashBucket as it is // now expired. MockNotifier.epochChan <- &chainntnfs.BlockEpoch{ - Height: int32(cltv + 1), + Height: int32(101), + } + MockNotifier.epochChan <- &chainntnfs.BlockEpoch{ + Height: int32(102), } // Wait for database write (GC is in a goroutine) @@ -122,7 +134,7 @@ func TestDecayedLogGarbageCollector(t *testing.T) { t.Fatalf("Delete failed - received an error upon Get") } - if val != 0 { + if val != math.MaxUint32 { t.Fatalf("cltv was not deleted") } } @@ -132,17 +144,17 @@ func TestDecayedLogGarbageCollector(t *testing.T) { // longer retrieve it. func TestDecayedLogInsertionAndDeletion(t *testing.T) { // Random cltv value - cltv := uint32(503928) + cltv := uint32(5) // Create a DecayedLog object d := DecayedLog{} // Open the channeldb - err := d.Start() + err := d.Start("tempdir") if err != nil { t.Fatalf("Unable to start / open DecayedLog") } - defer d.Stop() + defer shutdown(&d) // Create a new private key on elliptic curve secp256k1 priv, err := btcec.NewPrivateKey(btcec.S256()) @@ -179,7 +191,7 @@ func TestDecayedLogInsertionAndDeletion(t *testing.T) { t.Fatalf("Delete failed - received the wrong error message") } - if val != 0 { + if val != math.MaxUint32 { t.Fatalf("cltv was not deleted") } @@ -192,17 +204,17 @@ func TestDecayedLogInsertionAndDeletion(t *testing.T) { // the cltv value and check that it persists upon startup. func TestDecayedLogStartAndStop(t *testing.T) { // Random cltv value - cltv := uint32(909020) + cltv := uint32(6) // Create a DecayedLog object d := DecayedLog{} // Open the channeldb - err := d.Start() + err := d.Start("tempdir") if err != nil { t.Fatalf("Unable to start / open DecayedLog") } - defer d.Stop() + defer shutdown(&d) // Create a new private key on elliptic curve secp256k1 priv, err := btcec.NewPrivateKey(btcec.S256()) @@ -231,7 +243,7 @@ func TestDecayedLogStartAndStop(t *testing.T) { d.Stop() // Startup the DecayedLog's channeldb - err = d.Start() + err = d.Start("tempdir") if err != nil { t.Fatalf("Unable to start / open DecayedLog") } @@ -258,7 +270,7 @@ func TestDecayedLogStartAndStop(t *testing.T) { d.Stop() // Startup the DecayedLog's channeldb - err = d.Start() + err = d.Start("tempdir") if err != nil { t.Fatalf("Unable to start / open DecayedLog") } @@ -266,10 +278,10 @@ func TestDecayedLogStartAndStop(t *testing.T) { // Assert that hashedSecret is not in the sharedHashBucket val, err := d.Get(hashedSecret[:]) if err != nil { - t.Fatalf("Delete failed - received the wrong error message") + t.Fatalf("Delete failed") } - if val != 0 { + if val != math.MaxUint32 { t.Fatalf("cltv was not deleted") } @@ -280,17 +292,17 @@ func TestDecayedLogStartAndStop(t *testing.T) { // and retrieved cltv values are equal. func TestDecayedLogStorageAndRetrieval(t *testing.T) { // Random cltv value - cltv := uint32(302930) + cltv := uint32(7) // Create a DecayedLog object d := DecayedLog{} // Open the channeldb - err := d.Start() + err := d.Start("tempdir") if err != nil { t.Fatalf("Unable to start / open DecayedLog") } - defer d.Stop() + defer shutdown(&d) // Create a new private key on elliptic curve secp256k1 priv, err := btcec.NewPrivateKey(btcec.S256()) diff --git a/persistlog/interface.go b/persistlog/interface.go index fb8a24f..5c380f6 100644 --- a/persistlog/interface.go +++ b/persistlog/interface.go @@ -19,7 +19,7 @@ type PersistLog interface { // Start starts up the on-disk persistent log. It returns an error if // one occurs. - Start() error + Start(string) error // Stop safely stops the on-disk persistent log. Stop() diff --git a/sphinx.go b/sphinx.go index 5be0c67..a7c5c3f 100644 --- a/sphinx.go +++ b/sphinx.go @@ -8,10 +8,11 @@ import ( "encoding/binary" "io" "io/ioutil" + "math" "math/big" + "github.com/Crypt-iQ/lightning-onion/persistlog" "github.com/aead/chacha20" - "github.com/lightningnetwork/lightning-onion/persistlog" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/roasbeef/btcd/btcec" "github.com/roasbeef/btcd/chaincfg" @@ -658,7 +659,7 @@ func NewRouter(nodeKey *btcec.PrivateKey, net *chaincfg.Params, // Start starts / opens the DecayedLog's channeldb and its accompanying // garbage collector goroutine. func (r *Router) Start() error { - return r.d.Start() + return r.d.Start("") } // Stop stops / closes the DecayedLog's channeldb and its accompanying @@ -694,7 +695,7 @@ func (r *Router) ProcessOnionPacket(onionPkt *OnionPacket, assocData []byte) (*P if err != nil { return nil, err } - if cltv != 0 { + if cltv != math.MaxUint32 { return nil, ErrReplayedPacket } @@ -736,7 +737,7 @@ func (r *Router) ProcessOnionPacket(onionPkt *OnionPacket, assocData []byte) (*P if err != nil { return nil, err } - if cltv != 0 { + if cltv != math.MaxUint32 { return nil, ErrReplayedPacket } diff --git a/sphinx_test.go b/sphinx_test.go index 1931c9b..9c58899 100644 --- a/sphinx_test.go +++ b/sphinx_test.go @@ -5,12 +5,14 @@ import ( "encoding/hex" "fmt" "reflect" + "strconv" "testing" + "github.com/Crypt-iQ/lightning-onion/persistlog" "github.com/davecgh/go-spew/spew" - "github.com/lightningnetwork/lightning-onion/persistlog" "github.com/roasbeef/btcd/btcec" "github.com/roasbeef/btcd/chaincfg" + "os" ) // BOLT 4 Test Vectors @@ -87,7 +89,7 @@ var ( "baaa7d63ad64199f4664813b955cff954949076dcf" ) -func newTestRoute(numHops int, d *persistlog.DecayedLog) ([]*Router, *[]HopData, *OnionPacket, error) { +func newTestRoute(numHops int) ([]*Router, *[]HopData, *OnionPacket, error) { nodes := make([]*Router, numHops) // Create numHops random sphinx nodes. @@ -99,7 +101,6 @@ func newTestRoute(numHops int, d *persistlog.DecayedLog) ([]*Router, *[]HopData, } nodes[i] = NewRouter(privKey, &chaincfg.MainNetParams, nil) - nodes[i].d = d } // Gather all the pub keys in the path. @@ -113,9 +114,7 @@ func newTestRoute(numHops int, d *persistlog.DecayedLog) ([]*Router, *[]HopData, hopsData = append(hopsData, HopData{ Realm: 0x00, ForwardAmount: uint64(i), - OutgoingCltv: uint32(i + 1), - // This is used to avoid a CLTV of 0, which is - // considered a non-CLTV value by DecayedLog. + OutgoingCltv: uint32(i), }) copy(hopsData[i].NextAddress[:], bytes.Repeat([]byte{byte(i)}, 8)) } @@ -181,12 +180,15 @@ func TestBolt4Packet(t *testing.T) { } } +// shutdown deletes the temporary directory that the test database uses +// and handles closing the database. +func shutdown(dir string, d *persistlog.DecayedLog) { + os.RemoveAll(dir) + d.Stop() +} + func TestSphinxCorrectness(t *testing.T) { - d := &persistlog.DecayedLog{} - if err := d.Start(); err != nil { - t.Fatalf("unable to start channeldb") - } - nodes, hopDatas, fwdMsg, err := newTestRoute(NumMaxHops, d) + nodes, hopDatas, fwdMsg, err := newTestRoute(NumMaxHops) if err != nil { t.Fatalf("unable to create random onion packet: %v", err) } @@ -194,6 +196,11 @@ func TestSphinxCorrectness(t *testing.T) { // Now simulate the message propagating through the mix net eventually // reaching the final destination. for i := 0; i < len(nodes); i++ { + // Start each node's DecayedLog and defer shutdown + var tempDir = strconv.Itoa(i) + nodes[i].d.Start(tempDir) + defer shutdown(tempDir, nodes[i].d) + hop := nodes[i] t.Logf("Processing at hop: %v \n", i) @@ -248,15 +255,16 @@ func TestSphinxSingleHop(t *testing.T) { // We'd like to test the proper behavior of the correctness of onion // packet processing for "single-hop" payments which bare a full onion // packet. - d := &persistlog.DecayedLog{} - if err := d.Start(); err != nil { - t.Fatalf("unable to start channeldb") - } - nodes, _, fwdMsg, err := newTestRoute(1, d) + + nodes, _, fwdMsg, err := newTestRoute(1) if err != nil { t.Fatalf("unable to create test route: %v", err) } + // Start the DecayedLog and defer shutdown + nodes[0].d.Start("0") + defer shutdown("0", nodes[0].d) + // Simulating a direct single-hop payment, send the sphinx packet to // the destination node, making it process the packet fully. processedPacket, err := nodes[0].ProcessOnionPacket(fwdMsg, nil) @@ -275,16 +283,15 @@ func TestSphinxSingleHop(t *testing.T) { func TestSphinxNodeRelpay(t *testing.T) { // We'd like to ensure that the sphinx node itself rejects all replayed // packets which share the same shared secret. - d := &persistlog.DecayedLog{} - if err := d.Start(); err != nil { - t.Fatalf("unable to start channeldb") - } - - nodes, _, fwdMsg, err := newTestRoute(NumMaxHops, d) + nodes, _, fwdMsg, err := newTestRoute(NumMaxHops) if err != nil { t.Fatalf("unable to create test route: %v", err) } + // Start the DecayedLog and defer shutdown + nodes[0].d.Start("0") + defer shutdown("0", nodes[0].d) + // Allow the node to process the initial packet, this should proceed // without any failures. if _, err := nodes[0].ProcessOnionPacket(fwdMsg, nil); err != nil { @@ -301,15 +308,15 @@ func TestSphinxNodeRelpay(t *testing.T) { func TestSphinxAssocData(t *testing.T) { // We want to make sure that the associated data is considered in the // HMAC creation - d := &persistlog.DecayedLog{} - if err := d.Start(); err != nil { - t.Fatalf("unable to start channeldb") - } - nodes, _, fwdMsg, err := newTestRoute(5, d) + nodes, _, fwdMsg, err := newTestRoute(5) if err != nil { t.Fatalf("unable to create random onion packet: %v", err) } + // Start the DecayedLog and defer shutdown + nodes[0].d.Start("0") + defer shutdown("0", nodes[0].d) + if _, err := nodes[0].ProcessOnionPacket(fwdMsg, []byte("somethingelse")); err == nil { t.Fatalf("we should fail when associated data changes") } @@ -319,11 +326,7 @@ func TestSphinxAssocData(t *testing.T) { func TestSphinxEncodeDecode(t *testing.T) { // Create some test data with a randomly populated, yet valid onion // forwarding message. - d := &persistlog.DecayedLog{} - if err := d.Start(); err != nil { - t.Fatalf("unable to start channeldb") - } - _, _, fwdMsg, err := newTestRoute(5, d) + _, _, fwdMsg, err := newTestRoute(5) if err != nil { t.Fatalf("unable to create random onion packet: %v", err) } From 56b02b98f3794df420f18c7f4249be1b690998b0 Mon Sep 17 00:00:00 2001 From: nsa Date: Thu, 12 Oct 2017 02:26:17 -0400 Subject: [PATCH 04/23] small cltv fix --- persistlog/decayedlog.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/persistlog/decayedlog.go b/persistlog/decayedlog.go index e01344f..09fb1bd 100644 --- a/persistlog/decayedlog.go +++ b/persistlog/decayedlog.go @@ -78,11 +78,11 @@ outer: sharedHashes.ForEach(func(k, v []byte) error { cltv := uint32(binary.BigEndian.Uint32(v)) - cltv-- if cltv == 0 { // Store expired hash in array expiredCltv = append(expiredCltv, k) } else { + cltv-- // Store valid in map validCltv[string(k)] = cltv } From 22d2642e964aba24a1fb6f579dfbf560adba4c8f Mon Sep 17 00:00:00 2001 From: nsa Date: Thu, 12 Oct 2017 02:28:31 -0400 Subject: [PATCH 05/23] small test fix --- persistlog/decayedlog_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/persistlog/decayedlog_test.go b/persistlog/decayedlog_test.go index c0cbdc5..2f3d39f 100644 --- a/persistlog/decayedlog_test.go +++ b/persistlog/decayedlog_test.go @@ -124,6 +124,9 @@ func TestDecayedLogGarbageCollector(t *testing.T) { MockNotifier.epochChan <- &chainntnfs.BlockEpoch{ Height: int32(102), } + MockNotifier.epochChan <- &chainntnfs.BlockEpoch{ + Height: int32(103), + } // Wait for database write (GC is in a goroutine) time.Sleep(500 * time.Millisecond) From df442be83baf805cb62e1e874d40892297100628 Mon Sep 17 00:00:00 2001 From: nsa Date: Thu, 12 Oct 2017 13:59:39 -0400 Subject: [PATCH 06/23] persistlog: persistent GC, cleaned tests, batched writes This commit introduces changes to the garbage collector that cause it to batch all writes to the backend boltdb. The garbage collector's code is much cleaner now and all the logic is contained in the Batch call. Additionally, persistence was added to the garbage collector so that even on DecayedLog shutdown, the garbage collector will still delete expired entries based on their CLTV. Tests were cleaned up and a new test for GC persistence was added. --- persistlog/decayedlog.go | 122 +++++++++------- persistlog/decayedlog_test.go | 262 +++++++++++++++++----------------- 2 files changed, 205 insertions(+), 179 deletions(-) diff --git a/persistlog/decayedlog.go b/persistlog/decayedlog.go index 09fb1bd..b5cf996 100644 --- a/persistlog/decayedlog.go +++ b/persistlog/decayedlog.go @@ -4,17 +4,18 @@ import ( "crypto/sha256" "encoding/binary" "fmt" + "math" + "sync" + "github.com/boltdb/bolt" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" - "math" - "sync" ) const ( // defaultDbDirectory is the default directory where our decayed log - // will store our (sharedHash, CLTV expiry height) key-value pairs. - defaultDbDirectory = "sharedsecret" + // will store our (sharedHash, CLTV) key-value pairs. + defaultDbDirectory = "sharedhashes" // sharedHashSize is the size in bytes of the keys we will be storing // in the DecayedLog. It represents the first 20 bytes of a truncated @@ -26,9 +27,9 @@ const ( ) var ( - // sharedHashBucket is a bucket which houses all the first sharedHashSize - // bytes of a received HTLC's hashed shared secret and the HTLC's - // expiry block height. + // sharedHashBucket is a bucket which houses the first sharedHashSize + // bytes of a received HTLC's hashed shared secret as the key and the HTLC's + // CLTV expiry as the value. sharedHashBucket = []byte("shared-hash") ) @@ -36,8 +37,8 @@ var ( // sharedHashSize bytes of a sha256-hashed shared secret along with a node's // CLTV value. It is a decaying log meaning there will be a garbage collector // to collect entries which are expired according to their stored CLTV value -// and the current block height. DecayedLog wraps channeldb for simplicity, but -// must batch writes to the database to decrease write contention. +// and the current block height. DecayedLog wraps channeldb for simplicity and +// batches writes to the database to decrease write contention. type DecayedLog struct { db *channeldb.DB wg sync.WaitGroup @@ -60,15 +61,13 @@ func (d *DecayedLog) garbageCollector() error { outer: for { select { - case _, ok := <-epochClient.Epochs: + case epoch, ok := <-epochClient.Epochs: if !ok { return fmt.Errorf("Epoch client shutting " + "down") } - var expiredCltv [][]byte - validCltv := make(map[string]uint32) - err := d.db.View(func(tx *bolt.Tx) error { + err := d.db.Batch(func(tx *bolt.Tx) error { // Grab the shared hash bucket sharedHashes := tx.Bucket(sharedHashBucket) if sharedHashes == nil { @@ -77,15 +76,53 @@ outer: } sharedHashes.ForEach(func(k, v []byte) error { - cltv := uint32(binary.BigEndian.Uint32(v)) + // The CLTV value in question. + cltv := uint32(binary.BigEndian.Uint32(v[:4])) + // The last recorded block height that + // the garbage collector received. + lastHeight := uint32(binary.BigEndian.Uint32(v[4:8])) + if cltv == 0 { - // Store expired hash in array - expiredCltv = append(expiredCltv, k) + // This CLTV just expired. We + // must delete it from the db. + err := sharedHashes.Delete(k) + if err != nil { + return err + } + } else if lastHeight != 0 && uint32(epoch.Height) - lastHeight > cltv { + // This CLTV just expired or + // expired in the past but the + // garbage collector was not + // running and therefore could + // not handle it. We delete it + // from the db now. + err := sharedHashes.Delete(k) + if err != nil { + return err + } } else { + // The CLTV is still valid. We + // decrement the CLTV value and + // store the new CLTV value along + // with the current block height. + var scratch [8]byte + + // Store decremented CLTV in + // scratch[:4] cltv-- - // Store valid in map - validCltv[string(k)] = cltv + binary.BigEndian.PutUint32(scratch[:4], cltv) + + // Store current blockheight in + // scratch[4:8] + binary.BigEndian.PutUint32(scratch[4:8], uint32(epoch.Height)) + + // Store + err := sharedHashes.Put(k, scratch[:]) + if err != nil { + return err + } } + return nil }) @@ -96,24 +133,6 @@ outer: "%v", err) } - // Delete every item in array - for _, hash := range expiredCltv { - err = d.Delete(hash) - if err != nil { - return fmt.Errorf("Unable to delete "+ - "expired secret: %v", err) - } - } - - // Update decremented CLTV's via validCltv - for hash, cltv := range validCltv { - err = d.Put([]byte(hash), cltv) - if err != nil { - return fmt.Errorf("Unable to decrement "+ - "cltv value: %v", err) - } - } - case <-d.quit: break outer } @@ -140,10 +159,10 @@ func HashSharedSecret(sharedSecret [sharedSecretSize]byte) [sharedHashSize]byte return sharedHash } -// Delete removes a key-pair from the +// Delete removes a key-pair from the // sharedHashBucket. func (d *DecayedLog) Delete(hash []byte) error { - return d.db.Update(func(tx *bolt.Tx) error { + return d.db.Batch(func(tx *bolt.Tx) error { sharedHashes, err := tx.CreateBucketIfNotExists(sharedHashBucket) if err != nil { return fmt.Errorf("Unable to created sharedHashes bucket:"+ @@ -154,29 +173,30 @@ func (d *DecayedLog) Delete(hash []byte) error { }) } -// Get retrieves the CLTV value of a processed HTLC given the first 20 bytes -// of the Sha-256 hash of the shared secret used during sphinx processing. +// Get retrieves the CLTV of a processed HTLC given the first 20 bytes of the +// Sha-256 hash of the shared secret. func (d *DecayedLog) Get(hash []byte) (uint32, error) { // math.MaxUint32 is returned when Get did not retrieve a value. + // This was chosen because it's not feasible for a CLTV to be this high. var value uint32 = math.MaxUint32 err := d.db.View(func(tx *bolt.Tx) error { // Grab the shared hash bucket which stores the mapping from - // truncated sha-256 hashes of shared secrets to CLTV values. + // truncated sha-256 hashes of shared secrets to CLTV's. sharedHashes := tx.Bucket(sharedHashBucket) if sharedHashes == nil { return fmt.Errorf("sharedHashes is nil, could " + "not retrieve CLTV value") } - // If the sharedHash is found, we use it to find the associated - // CLTV in the sharedHashBucket. + // Retrieve the bytes which represents the CLTV + blockheight. valueBytes := sharedHashes.Get(hash) if valueBytes == nil { return nil } - value = uint32(binary.BigEndian.Uint32(valueBytes)) + // The first 4 bytes represent the CLTV, store it in value. + value = uint32(binary.BigEndian.Uint32(valueBytes[:4])) return nil }) @@ -187,14 +207,14 @@ func (d *DecayedLog) Get(hash []byte) (uint32, error) { return value, nil } -// Put stores a shared secret hash as the key and a slice consisting of the -// current blockheight and the outgoing CLTV value -func (d *DecayedLog) Put(hash []byte, value uint32) error { - - var scratch [4]byte +// Put stores a shared secret hash as the key and the CLTV as the value. +func (d *DecayedLog) Put(hash []byte, cltv uint32) error { + // The CLTV will be stored into scratch and then stored into the + // sharedHashBucket. + var scratch [8]byte // Store value into scratch - binary.BigEndian.PutUint32(scratch[:], value) + binary.BigEndian.PutUint32(scratch[:4], cltv) return d.db.Batch(func(tx *bolt.Tx) error { sharedHashes, err := tx.CreateBucketIfNotExists(sharedHashBucket) @@ -236,7 +256,7 @@ func (d *DecayedLog) Start(dbDir string) error { return nil }) if err != nil { - return fmt.Errorf("Could not create sharedHashes") + return err } // Start garbage collector. diff --git a/persistlog/decayedlog_test.go b/persistlog/decayedlog_test.go index 2f3d39f..897b7ba 100644 --- a/persistlog/decayedlog_test.go +++ b/persistlog/decayedlog_test.go @@ -2,14 +2,19 @@ package persistlog import ( "crypto/sha256" - "github.com/lightningnetwork/lnd/chainntnfs" - "github.com/roasbeef/btcd/btcec" - "github.com/roasbeef/btcd/chaincfg/chainhash" - "github.com/roasbeef/btcd/wire" "math" "os" "testing" "time" + + "github.com/lightningnetwork/lnd/chainntnfs" + "github.com/roasbeef/btcd/btcec" + "github.com/roasbeef/btcd/chaincfg/chainhash" + "github.com/roasbeef/btcd/wire" +) + +const ( + cltv uint32 = 5 ) var ( @@ -63,39 +68,34 @@ func generateSharedSecret(pub *btcec.PublicKey, priv *btcec.PrivateKey) [32]byte return sha256.Sum256(s.SerializeCompressed()) } -// shutdown stops the DecayedLog and deletes the folder enclosing the -// temporary channel database. -func shutdown(d *DecayedLog) { - os.RemoveAll("tempdir") - d.Stop() -} - -// TestDecayedLogGarbageCollector tests the ability of the garbage collector -// to delete expired cltv values every time a block is received. Expired cltv -// values are cltv values that are <= current block height. -func TestDecayedLogGarbageCollector(t *testing.T) { - // Random (TO-BE-EXPIRED) cltv value - cltv := uint32(2) - - // Create the MockNotifier which triggers the garbage collector - MockNotifier := &mockNotifier{ - epochChan: make(chan *chainntnfs.BlockEpoch, 1), +// startup sets up the DecayedLog and possibly the garbage collector. +func startup(notifier bool) (*DecayedLog, *mockNotifier, [sharedHashSize]byte, error) { + var d DecayedLog + var MockNotifier *mockNotifier + var hashedSecret [sharedHashSize]byte + if notifier { + // Create the MockNotifier which triggers the garbage collector + MockNotifier = &mockNotifier{ + epochChan: make(chan *chainntnfs.BlockEpoch, 1), + } + + // Initialize the DecayedLog object + d = DecayedLog{Notifier: MockNotifier} + } else { + // Initialize the DecayedLog object + d = DecayedLog{} } - // Create a DecayedLog object - d := DecayedLog{Notifier: MockNotifier} - // Open the channeldb (start the garbage collector) err := d.Start("tempdir") if err != nil { - t.Fatalf("Unable to start / open DecayedLog") + return nil, nil, hashedSecret, err } - defer shutdown(&d) // Create a new private key on elliptic curve secp256k1 priv, err := btcec.NewPrivateKey(btcec.S256()) if err != nil { - t.Fatalf("Unable to create new private key") + return nil, nil, hashedSecret, err } // Generate a public key from the key bytes @@ -107,25 +107,43 @@ func TestDecayedLogGarbageCollector(t *testing.T) { // Create the hashedSecret given the shared secret we just generated. // This is the first 20 bytes of the Sha-256 hash of the shared secret. // This is used as a key to retrieve the cltv value. - hashedSecret := HashSharedSecret(secret) + hashedSecret = HashSharedSecret(secret) + + return &d, MockNotifier, hashedSecret, nil +} + +// shutdown stops the DecayedLog and deletes the folder enclosing the +// temporary channel database. +func shutdown(d *DecayedLog) { + os.RemoveAll("tempdir") + d.Stop() +} + +// TestDecayedLogGarbageCollector tests the ability of the garbage collector +// to delete expired cltv values every time a block is received. Expired cltv +// values are cltv values that are <= current block height. +func TestDecayedLogGarbageCollector(t *testing.T) { + t.Parallel() + + d, notifier, hashedSecret, err := startup(true) + if err != nil { + t.Fatalf("Unable to start up DecayedLog: %v", err) + } + defer shutdown(d) // Store in the sharedHashBucket. err = d.Put(hashedSecret[:], cltv) if err != nil { - t.Fatalf("Unable to store in channeldb") + t.Fatalf("Unable to store in channeldb: %v", err) } - // Send Block notification to garbage collector. The garbage collector - // should remove the entry we just added to sharedHashBucket as it is - // now expired. - MockNotifier.epochChan <- &chainntnfs.BlockEpoch{ - Height: int32(101), - } - MockNotifier.epochChan <- &chainntnfs.BlockEpoch{ - Height: int32(102), - } - MockNotifier.epochChan <- &chainntnfs.BlockEpoch{ - Height: int32(103), + // Send block notifications to garbage collector. The garbage collector + // should remove the entry we just added to sharedHashBucket as it will + // expire by the 6th block notification. + for i := 0; i < 6; i++ { + notifier.epochChan <- &chainntnfs.BlockEpoch{ + Height: int32(100 + i), + } } // Wait for database write (GC is in a goroutine) @@ -134,7 +152,7 @@ func TestDecayedLogGarbageCollector(t *testing.T) { // Assert that hashedSecret is not in the sharedHashBucket val, err := d.Get(hashedSecret[:]) if err != nil { - t.Fatalf("Delete failed - received an error upon Get") + t.Fatalf("Delete failed - received an error upon Get: %v", err) } if val != math.MaxUint32 { @@ -142,56 +160,88 @@ func TestDecayedLogGarbageCollector(t *testing.T) { } } -// TestDecayedLogInsertionAndRetrieval inserts a cltv value into the nested -// sharedHashBucket and then deletes it and finally asserts that we can no -// longer retrieve it. -func TestDecayedLogInsertionAndDeletion(t *testing.T) { - // Random cltv value - cltv := uint32(5) - - // Create a DecayedLog object - d := DecayedLog{} +// TestDecayedLogPersistentGarbageCollector tests the persistence property of +// the garbage collector. A block will be sent to the garbage collector, the +// garbage collector will be shut down, and then a much later block will be sent +// (past the expiry of our test CLTV) that causes the in the sharedHashBucket + if err = d.Put(hashedSecret[:], cltv); err != nil { + t.Fatalf("Unable to store in channeldb: %v", err) + } + + // Send a single block notification to the garbage collector. + notifier.epochChan <- &chainntnfs.BlockEpoch{ + Height: int32(100), + } + + // Wait for database write (GC is in a goroutine) + time.Sleep(500 * time.Millisecond) + + // Shut down DecayedLog and the garbage collector along with it. + d.Stop() + + // Start the DecayedLog again. + if err = d.Start("tempdir"); err != nil { + t.Fatalf("Unable to restart DecayedLog: %v", err) + } + + // Send a much later block notification to the garbage collector. + notifier.epochChan <- &chainntnfs.BlockEpoch{ + Height: int32(150), + } + + // Wait for database write (GC is in a goroutine) + time.Sleep(500 * time.Millisecond) + + // Assert that hashedSecret is not in the sharedHashBucket + val, err := d.Get(hashedSecret[:]) if err != nil { - t.Fatalf("Unable to create new private key") + t.Fatalf("Delete failed - received an error upon Get: %v", err) } - // Generate a public key from the key bytes - _, testPub := btcec.PrivKeyFromBytes(btcec.S256(), key[:]) + if val != math.MaxUint32 { + t.Fatalf("cltv was not deleted") + } +} - // Generate a shared secret with the public and private keys we made - secret := generateSharedSecret(testPub, priv) +// TestDecayedLogInsertionAndRetrieval inserts a cltv value into the nested +// sharedHashBucket and then deletes it and finally asserts that we can no +// longer retrieve it. +func TestDecayedLogInsertionAndDeletion(t *testing.T) { + t.Parallel() - // Create the hashedSecret given the shared secret we just generated. - // This is the first 20 bytes of the Sha-256 hash of the shared secret. - // This is used as a key to retrieve the cltv value. - hashedSecret := HashSharedSecret(secret) + d, _, hashedSecret, err := startup(false) + if err != nil { + t.Fatalf("Unable to start up DecayedLog: %v", err) + } + defer shutdown(d) // Store in the sharedHashBucket. err = d.Put(hashedSecret[:], cltv) if err != nil { - t.Fatalf("Unable to store in channeldb") + t.Fatalf("Unable to store in channeldb: %v", err) } // Delete hashedSecret from the sharedHashBucket. err = d.Delete(hashedSecret[:]) if err != nil { - t.Fatalf("Unable to delete from channeldb") + t.Fatalf("Unable to delete from channeldb: %v", err) } // Assert that hashedSecret is not in the sharedHashBucket val, err := d.Get(hashedSecret[:]) if err != nil { - t.Fatalf("Delete failed - received the wrong error message") + t.Fatalf("Delete failed - received the wrong error message: %v", err) } if val != math.MaxUint32 { @@ -206,40 +256,18 @@ func TestDecayedLogInsertionAndDeletion(t *testing.T) { // cltv value is indeed still stored in the sharedHashBucket. We then delete // the cltv value and check that it persists upon startup. func TestDecayedLogStartAndStop(t *testing.T) { - // Random cltv value - cltv := uint32(6) - - // Create a DecayedLog object - d := DecayedLog{} - - // Open the channeldb - err := d.Start("tempdir") - if err != nil { - t.Fatalf("Unable to start / open DecayedLog") - } - defer shutdown(&d) + t.Parallel() - // Create a new private key on elliptic curve secp256k1 - priv, err := btcec.NewPrivateKey(btcec.S256()) + d, _, hashedSecret, err := startup(false) if err != nil { - t.Fatalf("Unable to create new private key") + t.Fatalf("Unable to start up DecayedLog: %v", err) } - - // Generate a public key from the key bytes - _, testPub := btcec.PrivKeyFromBytes(btcec.S256(), key[:]) - - // Generate a shared secret with the public and private keys we made - secret := generateSharedSecret(testPub, priv) - - // Create the hashedSecret given the shared secret we just generated. - // This is the first 20 bytes of the Sha-256 hash of the shared secret. - // This is used as a key to retrieve the cltv value. - hashedSecret := HashSharedSecret(secret) + defer shutdown(d) // Store in the sharedHashBucket. err = d.Put(hashedSecret[:], cltv) if err != nil { - t.Fatalf("Unable to store in channeldb") + t.Fatalf("Unable to store in channeldb: %v", err) } // Shutdown the DecayedLog's channeldb @@ -248,13 +276,13 @@ func TestDecayedLogStartAndStop(t *testing.T) { // Startup the DecayedLog's channeldb err = d.Start("tempdir") if err != nil { - t.Fatalf("Unable to start / open DecayedLog") + t.Fatalf("Unable to start / open DecayedLog: %v", err) } // Retrieve the stored cltv value given the hashedSecret key. value, err := d.Get(hashedSecret[:]) if err != nil { - t.Fatalf("Unable to retrieve from channeldb") + t.Fatalf("Unable to retrieve from channeldb: %v", err) } // Check that the original cltv value matches the retrieved cltv @@ -266,7 +294,7 @@ func TestDecayedLogStartAndStop(t *testing.T) { // Delete hashedSecret from sharedHashBucket err = d.Delete(hashedSecret[:]) if err != nil { - t.Fatalf("Unable to delete from channeldb") + t.Fatalf("Unable to delete from channeldb: %v", err) } // Shutdown the DecayedLog's channeldb @@ -275,13 +303,13 @@ func TestDecayedLogStartAndStop(t *testing.T) { // Startup the DecayedLog's channeldb err = d.Start("tempdir") if err != nil { - t.Fatalf("Unable to start / open DecayedLog") + t.Fatalf("Unable to start / open DecayedLog: %v", err) } // Assert that hashedSecret is not in the sharedHashBucket val, err := d.Get(hashedSecret[:]) if err != nil { - t.Fatalf("Delete failed") + t.Fatalf("Delete failed: %v", err) } if val != math.MaxUint32 { @@ -294,46 +322,24 @@ func TestDecayedLogStartAndStop(t *testing.T) { // via the nested sharedHashBucket and finally asserts that the original stored // and retrieved cltv values are equal. func TestDecayedLogStorageAndRetrieval(t *testing.T) { - // Random cltv value - cltv := uint32(7) - - // Create a DecayedLog object - d := DecayedLog{} - - // Open the channeldb - err := d.Start("tempdir") - if err != nil { - t.Fatalf("Unable to start / open DecayedLog") - } - defer shutdown(&d) + t.Parallel() - // Create a new private key on elliptic curve secp256k1 - priv, err := btcec.NewPrivateKey(btcec.S256()) + d, _, hashedSecret, err := startup(false) if err != nil { - t.Fatalf("Unable to create new private key") + t.Fatalf("Unable to start up DecayedLog: %v", err) } - - // Generate a public key from the key bytes - _, testPub := btcec.PrivKeyFromBytes(btcec.S256(), key[:]) - - // Generate a shared secret with the public and private keys we made - secret := generateSharedSecret(testPub, priv) - - // Create the hashedSecret given the shared secret we just generated. - // This is the first 20 bytes of the Sha-256 hash of the shared secret. - // This is used as a key to retrieve the cltv value. - hashedSecret := HashSharedSecret(secret) + defer shutdown(d) // Store in the sharedHashBucket err = d.Put(hashedSecret[:], cltv) if err != nil { - t.Fatalf("Unable to store in channeldb") + t.Fatalf("Unable to store in channeldb: %v", err) } // Retrieve the stored cltv value given the hashedSecret key. value, err := d.Get(hashedSecret[:]) if err != nil { - t.Fatalf("Unable to retrieve from channeldb") + t.Fatalf("Unable to retrieve from channeldb: %v", err) } // If the original cltv value does not match the value retrieved, From a287245278e5d89448e5355a0166251791c44ff9 Mon Sep 17 00:00:00 2001 From: nsa Date: Thu, 12 Oct 2017 14:09:54 -0400 Subject: [PATCH 07/23] Off by 1 & gofmt --- persistlog/decayedlog.go | 2 +- sphinx_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/persistlog/decayedlog.go b/persistlog/decayedlog.go index b5cf996..c90fad2 100644 --- a/persistlog/decayedlog.go +++ b/persistlog/decayedlog.go @@ -89,7 +89,7 @@ outer: if err != nil { return err } - } else if lastHeight != 0 && uint32(epoch.Height) - lastHeight > cltv { + } else if lastHeight != 0 && uint32(epoch.Height)-lastHeight > cltv { // This CLTV just expired or // expired in the past but the // garbage collector was not diff --git a/sphinx_test.go b/sphinx_test.go index 9c58899..245cbe3 100644 --- a/sphinx_test.go +++ b/sphinx_test.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/hex" "fmt" + "os" "reflect" "strconv" "testing" @@ -12,7 +13,6 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/roasbeef/btcd/btcec" "github.com/roasbeef/btcd/chaincfg" - "os" ) // BOLT 4 Test Vectors From 3466252e8ce55a8c4d760c3edbef28394d9c44f5 Mon Sep 17 00:00:00 2001 From: nsa Date: Thu, 19 Oct 2017 04:50:02 -0400 Subject: [PATCH 08/23] persistlog: reverting incorrect GC, test changes This commit reverts incorrect garbage collector and test changes from previous commits. Specifically, boltdb's Delete funcion should not have been called in a ForEach loop. This is undefined behavior. The CLTV's were being treated as a relative timelock instead of an absolute timelock. Finally, t.Parallel() should not have been used in conjunction with boltdb. --- persistlog/decayedlog.go | 74 ++++++++++++----------------------- persistlog/decayedlog_test.go | 69 ++++++++++++++++---------------- 2 files changed, 62 insertions(+), 81 deletions(-) diff --git a/persistlog/decayedlog.go b/persistlog/decayedlog.go index c90fad2..eeb7e57 100644 --- a/persistlog/decayedlog.go +++ b/persistlog/decayedlog.go @@ -75,57 +75,35 @@ outer: "is nil") } + var expiredCltv [][]byte sharedHashes.ForEach(func(k, v []byte) error { // The CLTV value in question. - cltv := uint32(binary.BigEndian.Uint32(v[:4])) - // The last recorded block height that - // the garbage collector received. - lastHeight := uint32(binary.BigEndian.Uint32(v[4:8])) - - if cltv == 0 { - // This CLTV just expired. We - // must delete it from the db. - err := sharedHashes.Delete(k) - if err != nil { - return err - } - } else if lastHeight != 0 && uint32(epoch.Height)-lastHeight > cltv { - // This CLTV just expired or - // expired in the past but the - // garbage collector was not - // running and therefore could - // not handle it. We delete it - // from the db now. - err := sharedHashes.Delete(k) - if err != nil { - return err - } - } else { - // The CLTV is still valid. We - // decrement the CLTV value and - // store the new CLTV value along - // with the current block height. - var scratch [8]byte - - // Store decremented CLTV in - // scratch[:4] - cltv-- - binary.BigEndian.PutUint32(scratch[:4], cltv) - - // Store current blockheight in - // scratch[4:8] - binary.BigEndian.PutUint32(scratch[4:8], uint32(epoch.Height)) - - // Store - err := sharedHashes.Put(k, scratch[:]) - if err != nil { - return err - } + cltv := uint32(binary.BigEndian.Uint32(v)) + + // Current blockheight + height := uint32(epoch.Height) + + if cltv < height { + // This CLTV is expired. We must + // add it to an array which we'll + // loop over and delete every + // hash contained from the db. + expiredCltv = append(expiredCltv, k) } return nil }) + // Delete every item in the array. This must + // be done explicitly outside of the ForEach + // function for safety reasons. + for _, hash := range expiredCltv { + err := sharedHashes.Delete(hash) + if err != nil { + return err + } + } + return nil }) if err != nil { @@ -189,14 +167,14 @@ func (d *DecayedLog) Get(hash []byte) (uint32, error) { "not retrieve CLTV value") } - // Retrieve the bytes which represents the CLTV + blockheight. + // Retrieve the bytes which represents the CLTV valueBytes := sharedHashes.Get(hash) if valueBytes == nil { return nil } // The first 4 bytes represent the CLTV, store it in value. - value = uint32(binary.BigEndian.Uint32(valueBytes[:4])) + value = uint32(binary.BigEndian.Uint32(valueBytes)) return nil }) @@ -211,10 +189,10 @@ func (d *DecayedLog) Get(hash []byte) (uint32, error) { func (d *DecayedLog) Put(hash []byte, cltv uint32) error { // The CLTV will be stored into scratch and then stored into the // sharedHashBucket. - var scratch [8]byte + var scratch [4]byte // Store value into scratch - binary.BigEndian.PutUint32(scratch[:4], cltv) + binary.BigEndian.PutUint32(scratch[:], cltv) return d.db.Batch(func(tx *bolt.Tx) error { sharedHashes, err := tx.CreateBucketIfNotExists(sharedHashBucket) diff --git a/persistlog/decayedlog_test.go b/persistlog/decayedlog_test.go index 897b7ba..7ae7217 100644 --- a/persistlog/decayedlog_test.go +++ b/persistlog/decayedlog_test.go @@ -14,7 +14,7 @@ import ( ) const ( - cltv uint32 = 5 + cltv uint32 = 100000 ) var ( @@ -121,10 +121,8 @@ func shutdown(d *DecayedLog) { // TestDecayedLogGarbageCollector tests the ability of the garbage collector // to delete expired cltv values every time a block is received. Expired cltv -// values are cltv values that are <= current block height. +// values are cltv values that are < current block height. func TestDecayedLogGarbageCollector(t *testing.T) { - t.Parallel() - d, notifier, hashedSecret, err := startup(true) if err != nil { t.Fatalf("Unable to start up DecayedLog: %v", err) @@ -137,37 +135,52 @@ func TestDecayedLogGarbageCollector(t *testing.T) { t.Fatalf("Unable to store in channeldb: %v", err) } + // Wait for database write (GC is in a goroutine) + time.Sleep(500 * time.Millisecond) + // Send block notifications to garbage collector. The garbage collector - // should remove the entry we just added to sharedHashBucket as it will - // expire by the 6th block notification. - for i := 0; i < 6; i++ { - notifier.epochChan <- &chainntnfs.BlockEpoch{ - Height: int32(100 + i), - } + // should remove the entry by block 100001. + + // Send block 100000 + notifier.epochChan <- &chainntnfs.BlockEpoch{ + Height: 100000, + } + + // Assert that hashedSecret is still in the sharedHashBucket + val, err := d.Get(hashedSecret[:]) + if err != nil { + t.Fatalf("Get failed - received an error upon Get: %v", err) + } + + if val != cltv { + t.Fatalf("GC incorrectly deleted CLTV") + } + + // Send block 100001 (expiry block) + notifier.epochChan <- &chainntnfs.BlockEpoch{ + Height: 100001, } // Wait for database write (GC is in a goroutine) time.Sleep(500 * time.Millisecond) // Assert that hashedSecret is not in the sharedHashBucket - val, err := d.Get(hashedSecret[:]) + val, err = d.Get(hashedSecret[:]) if err != nil { - t.Fatalf("Delete failed - received an error upon Get: %v", err) + t.Fatalf("Get failed - received an error upon Get: %v", err) } if val != math.MaxUint32 { - t.Fatalf("cltv was not deleted") + t.Fatalf("CLTV was not deleted") } } // TestDecayedLogPersistentGarbageCollector tests the persistence property of -// the garbage collector. A block will be sent to the garbage collector, the -// garbage collector will be shut down, and then a much later block will be sent -// (past the expiry of our test CLTV) that causes the pair to be deleted even +// on GC restarts. func TestDecayedLogPersistentGarbageCollector(t *testing.T) { - t.Parallel() - d, notifier, hashedSecret, err := startup(true) if err != nil { t.Fatalf("Unable to start up DecayedLog: %v", err) @@ -179,11 +192,6 @@ func TestDecayedLogPersistentGarbageCollector(t *testing.T) { t.Fatalf("Unable to store in channeldb: %v", err) } - // Send a single block notification to the garbage collector. - notifier.epochChan <- &chainntnfs.BlockEpoch{ - Height: int32(100), - } - // Wait for database write (GC is in a goroutine) time.Sleep(500 * time.Millisecond) @@ -195,9 +203,10 @@ func TestDecayedLogPersistentGarbageCollector(t *testing.T) { t.Fatalf("Unable to restart DecayedLog: %v", err) } - // Send a much later block notification to the garbage collector. + // Send a block notification to the garbage collector that expires + // the stored CLTV. notifier.epochChan <- &chainntnfs.BlockEpoch{ - Height: int32(150), + Height: int32(100001), } // Wait for database write (GC is in a goroutine) @@ -214,12 +223,10 @@ func TestDecayedLogPersistentGarbageCollector(t *testing.T) { } } -// TestDecayedLogInsertionAndRetrieval inserts a cltv value into the nested +// TestDecayedLogInsertionAndRetrieval inserts a cltv value into the // sharedHashBucket and then deletes it and finally asserts that we can no // longer retrieve it. func TestDecayedLogInsertionAndDeletion(t *testing.T) { - t.Parallel() - d, _, hashedSecret, err := startup(false) if err != nil { t.Fatalf("Unable to start up DecayedLog: %v", err) @@ -256,8 +263,6 @@ func TestDecayedLogInsertionAndDeletion(t *testing.T) { // cltv value is indeed still stored in the sharedHashBucket. We then delete // the cltv value and check that it persists upon startup. func TestDecayedLogStartAndStop(t *testing.T) { - t.Parallel() - d, _, hashedSecret, err := startup(false) if err != nil { t.Fatalf("Unable to start up DecayedLog: %v", err) @@ -322,8 +327,6 @@ func TestDecayedLogStartAndStop(t *testing.T) { // via the nested sharedHashBucket and finally asserts that the original stored // and retrieved cltv values are equal. func TestDecayedLogStorageAndRetrieval(t *testing.T) { - t.Parallel() - d, _, hashedSecret, err := startup(false) if err != nil { t.Fatalf("Unable to start up DecayedLog: %v", err) From efd8a7898e942aa66ac673381f0ba98192bc8dcf Mon Sep 17 00:00:00 2001 From: Conner Fromknecht Date: Fri, 12 Jan 2018 15:17:02 -0800 Subject: [PATCH 09/23] persistlog: move to main sphinx package --- persistlog/decayedlog.go | 256 ------------------------ persistlog/decayedlog_test.go | 354 ---------------------------------- persistlog/interface.go | 26 --- 3 files changed, 636 deletions(-) delete mode 100644 persistlog/decayedlog.go delete mode 100644 persistlog/decayedlog_test.go delete mode 100644 persistlog/interface.go diff --git a/persistlog/decayedlog.go b/persistlog/decayedlog.go deleted file mode 100644 index eeb7e57..0000000 --- a/persistlog/decayedlog.go +++ /dev/null @@ -1,256 +0,0 @@ -package persistlog - -import ( - "crypto/sha256" - "encoding/binary" - "fmt" - "math" - "sync" - - "github.com/boltdb/bolt" - "github.com/lightningnetwork/lnd/chainntnfs" - "github.com/lightningnetwork/lnd/channeldb" -) - -const ( - // defaultDbDirectory is the default directory where our decayed log - // will store our (sharedHash, CLTV) key-value pairs. - defaultDbDirectory = "sharedhashes" - - // sharedHashSize is the size in bytes of the keys we will be storing - // in the DecayedLog. It represents the first 20 bytes of a truncated - // sha-256 hash of a secret generated by ECDH. - sharedHashSize = 20 - - // sharedSecretSize is the size in bytes of the shared secrets. - sharedSecretSize = 32 -) - -var ( - // sharedHashBucket is a bucket which houses the first sharedHashSize - // bytes of a received HTLC's hashed shared secret as the key and the HTLC's - // CLTV expiry as the value. - sharedHashBucket = []byte("shared-hash") -) - -// DecayedLog implements the PersistLog interface. It stores the first -// sharedHashSize bytes of a sha256-hashed shared secret along with a node's -// CLTV value. It is a decaying log meaning there will be a garbage collector -// to collect entries which are expired according to their stored CLTV value -// and the current block height. DecayedLog wraps channeldb for simplicity and -// batches writes to the database to decrease write contention. -type DecayedLog struct { - db *channeldb.DB - wg sync.WaitGroup - quit chan (struct{}) - Notifier chainntnfs.ChainNotifier -} - -// garbageCollector deletes entries from sharedHashBucket whose expiry height -// has already past. This function MUST be run as a goroutine. -func (d *DecayedLog) garbageCollector() error { - defer d.wg.Done() - - epochClient, err := d.Notifier.RegisterBlockEpochNtfn() - if err != nil { - return fmt.Errorf("Unable to register for epoch "+ - "notification: %v", err) - } - defer epochClient.Cancel() - -outer: - for { - select { - case epoch, ok := <-epochClient.Epochs: - if !ok { - return fmt.Errorf("Epoch client shutting " + - "down") - } - - err := d.db.Batch(func(tx *bolt.Tx) error { - // Grab the shared hash bucket - sharedHashes := tx.Bucket(sharedHashBucket) - if sharedHashes == nil { - return fmt.Errorf("sharedHashBucket " + - "is nil") - } - - var expiredCltv [][]byte - sharedHashes.ForEach(func(k, v []byte) error { - // The CLTV value in question. - cltv := uint32(binary.BigEndian.Uint32(v)) - - // Current blockheight - height := uint32(epoch.Height) - - if cltv < height { - // This CLTV is expired. We must - // add it to an array which we'll - // loop over and delete every - // hash contained from the db. - expiredCltv = append(expiredCltv, k) - } - - return nil - }) - - // Delete every item in the array. This must - // be done explicitly outside of the ForEach - // function for safety reasons. - for _, hash := range expiredCltv { - err := sharedHashes.Delete(hash) - if err != nil { - return err - } - } - - return nil - }) - if err != nil { - return fmt.Errorf("Error viewing channeldb: "+ - "%v", err) - } - - case <-d.quit: - break outer - } - } - - return nil -} - -// A compile time check to see if DecayedLog adheres to the PersistLog -// interface. -var _ PersistLog = (*DecayedLog)(nil) - -// HashSharedSecret Sha-256 hashes the shared secret and returns the first -// sharedHashSize bytes of the hash. -func HashSharedSecret(sharedSecret [sharedSecretSize]byte) [sharedHashSize]byte { - // Sha256 hash of sharedSecret - h := sha256.New() - h.Write(sharedSecret[:]) - - var sharedHash [sharedHashSize]byte - - // Copy bytes to sharedHash - copy(sharedHash[:], h.Sum(nil)[:sharedHashSize]) - return sharedHash -} - -// Delete removes a key-pair from the -// sharedHashBucket. -func (d *DecayedLog) Delete(hash []byte) error { - return d.db.Batch(func(tx *bolt.Tx) error { - sharedHashes, err := tx.CreateBucketIfNotExists(sharedHashBucket) - if err != nil { - return fmt.Errorf("Unable to created sharedHashes bucket:"+ - " %v", err) - } - - return sharedHashes.Delete(hash) - }) -} - -// Get retrieves the CLTV of a processed HTLC given the first 20 bytes of the -// Sha-256 hash of the shared secret. -func (d *DecayedLog) Get(hash []byte) (uint32, error) { - // math.MaxUint32 is returned when Get did not retrieve a value. - // This was chosen because it's not feasible for a CLTV to be this high. - var value uint32 = math.MaxUint32 - - err := d.db.View(func(tx *bolt.Tx) error { - // Grab the shared hash bucket which stores the mapping from - // truncated sha-256 hashes of shared secrets to CLTV's. - sharedHashes := tx.Bucket(sharedHashBucket) - if sharedHashes == nil { - return fmt.Errorf("sharedHashes is nil, could " + - "not retrieve CLTV value") - } - - // Retrieve the bytes which represents the CLTV - valueBytes := sharedHashes.Get(hash) - if valueBytes == nil { - return nil - } - - // The first 4 bytes represent the CLTV, store it in value. - value = uint32(binary.BigEndian.Uint32(valueBytes)) - - return nil - }) - if err != nil { - return value, err - } - - return value, nil -} - -// Put stores a shared secret hash as the key and the CLTV as the value. -func (d *DecayedLog) Put(hash []byte, cltv uint32) error { - // The CLTV will be stored into scratch and then stored into the - // sharedHashBucket. - var scratch [4]byte - - // Store value into scratch - binary.BigEndian.PutUint32(scratch[:], cltv) - - return d.db.Batch(func(tx *bolt.Tx) error { - sharedHashes, err := tx.CreateBucketIfNotExists(sharedHashBucket) - if err != nil { - return fmt.Errorf("Unable to create bucket sharedHashes:"+ - " %v", err) - } - - return sharedHashes.Put(hash, scratch[:]) - }) -} - -// Start opens the database we will be using to store hashed shared secrets. -// It also starts the garbage collector in a goroutine to remove stale -// database entries. -func (d *DecayedLog) Start(dbDir string) error { - // Create the quit channel - d.quit = make(chan struct{}) - - var directory string - if dbDir == "" { - directory = defaultDbDirectory - } else { - directory = dbDir - } - - // Open the channeldb for use. - var err error - if d.db, err = channeldb.Open(directory); err != nil { - return fmt.Errorf("Could not open channeldb: %v", err) - } - - err = d.db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucketIfNotExists(sharedHashBucket) - if err != nil { - return fmt.Errorf("Unable to create bucket sharedHashes:"+ - " %v", err) - } - return nil - }) - if err != nil { - return err - } - - // Start garbage collector. - if d.Notifier != nil { - d.wg.Add(1) - go d.garbageCollector() - } - - return nil -} - -// Stop halts the garbage collector and closes channeldb. -func (d *DecayedLog) Stop() { - // Stop garbage collector. - close(d.quit) - - // Close channeldb. - d.db.Close() -} diff --git a/persistlog/decayedlog_test.go b/persistlog/decayedlog_test.go deleted file mode 100644 index 7ae7217..0000000 --- a/persistlog/decayedlog_test.go +++ /dev/null @@ -1,354 +0,0 @@ -package persistlog - -import ( - "crypto/sha256" - "math" - "os" - "testing" - "time" - - "github.com/lightningnetwork/lnd/chainntnfs" - "github.com/roasbeef/btcd/btcec" - "github.com/roasbeef/btcd/chaincfg/chainhash" - "github.com/roasbeef/btcd/wire" -) - -const ( - cltv uint32 = 100000 -) - -var ( - // Bytes of a private key - key = [32]byte{ - 0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, - 0x68, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, - 0xd, 0xe7, 0x93, 0xe4, 0xb7, 0x25, 0xb8, 0x4d, - 0x1e, 0xb, 0x4c, 0xf9, 0x9e, 0xc5, 0x8c, 0xe9, - } -) - -type mockNotifier struct { - confChannel chan *chainntnfs.TxConfirmation - epochChan chan *chainntnfs.BlockEpoch -} - -func (m *mockNotifier) RegisterBlockEpochNtfn() (*chainntnfs.BlockEpochEvent, error) { - return &chainntnfs.BlockEpochEvent{ - Epochs: m.epochChan, - Cancel: func() {}, - }, nil -} - -func (m *mockNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash, numConfs, - heightHint uint32) (*chainntnfs.ConfirmationEvent, error) { - return nil, nil -} - -func (m *mockNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, - heightHint uint32) (*chainntnfs.SpendEvent, error) { - return nil, nil -} - -func (m *mockNotifier) Start() error { - return nil -} - -func (m *mockNotifier) Stop() error { - return nil -} - -// generateSharedSecret generates a shared secret given a public key and a -// private key. It is directly copied from sphinx.go. -func generateSharedSecret(pub *btcec.PublicKey, priv *btcec.PrivateKey) [32]byte { - s := &btcec.PublicKey{} - x, y := btcec.S256().ScalarMult(pub.X, pub.Y, priv.D.Bytes()) - s.X = x - s.Y = y - - return sha256.Sum256(s.SerializeCompressed()) -} - -// startup sets up the DecayedLog and possibly the garbage collector. -func startup(notifier bool) (*DecayedLog, *mockNotifier, [sharedHashSize]byte, error) { - var d DecayedLog - var MockNotifier *mockNotifier - var hashedSecret [sharedHashSize]byte - if notifier { - // Create the MockNotifier which triggers the garbage collector - MockNotifier = &mockNotifier{ - epochChan: make(chan *chainntnfs.BlockEpoch, 1), - } - - // Initialize the DecayedLog object - d = DecayedLog{Notifier: MockNotifier} - } else { - // Initialize the DecayedLog object - d = DecayedLog{} - } - - // Open the channeldb (start the garbage collector) - err := d.Start("tempdir") - if err != nil { - return nil, nil, hashedSecret, err - } - - // Create a new private key on elliptic curve secp256k1 - priv, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - return nil, nil, hashedSecret, err - } - - // Generate a public key from the key bytes - _, testPub := btcec.PrivKeyFromBytes(btcec.S256(), key[:]) - - // Generate a shared secret with the public and private keys we made - secret := generateSharedSecret(testPub, priv) - - // Create the hashedSecret given the shared secret we just generated. - // This is the first 20 bytes of the Sha-256 hash of the shared secret. - // This is used as a key to retrieve the cltv value. - hashedSecret = HashSharedSecret(secret) - - return &d, MockNotifier, hashedSecret, nil -} - -// shutdown stops the DecayedLog and deletes the folder enclosing the -// temporary channel database. -func shutdown(d *DecayedLog) { - os.RemoveAll("tempdir") - d.Stop() -} - -// TestDecayedLogGarbageCollector tests the ability of the garbage collector -// to delete expired cltv values every time a block is received. Expired cltv -// values are cltv values that are < current block height. -func TestDecayedLogGarbageCollector(t *testing.T) { - d, notifier, hashedSecret, err := startup(true) - if err != nil { - t.Fatalf("Unable to start up DecayedLog: %v", err) - } - defer shutdown(d) - - // Store in the sharedHashBucket. - err = d.Put(hashedSecret[:], cltv) - if err != nil { - t.Fatalf("Unable to store in channeldb: %v", err) - } - - // Wait for database write (GC is in a goroutine) - time.Sleep(500 * time.Millisecond) - - // Send block notifications to garbage collector. The garbage collector - // should remove the entry by block 100001. - - // Send block 100000 - notifier.epochChan <- &chainntnfs.BlockEpoch{ - Height: 100000, - } - - // Assert that hashedSecret is still in the sharedHashBucket - val, err := d.Get(hashedSecret[:]) - if err != nil { - t.Fatalf("Get failed - received an error upon Get: %v", err) - } - - if val != cltv { - t.Fatalf("GC incorrectly deleted CLTV") - } - - // Send block 100001 (expiry block) - notifier.epochChan <- &chainntnfs.BlockEpoch{ - Height: 100001, - } - - // Wait for database write (GC is in a goroutine) - time.Sleep(500 * time.Millisecond) - - // Assert that hashedSecret is not in the sharedHashBucket - val, err = d.Get(hashedSecret[:]) - if err != nil { - t.Fatalf("Get failed - received an error upon Get: %v", err) - } - - if val != math.MaxUint32 { - t.Fatalf("CLTV was not deleted") - } -} - -// TestDecayedLogPersistentGarbageCollector tests the persistence property of -// the garbage collector. The garbage collector will be restarted immediately and -// a block that expires the stored CLTV value will be sent to the ChainNotifier. -// We test that this causes the pair to be deleted even -// on GC restarts. -func TestDecayedLogPersistentGarbageCollector(t *testing.T) { - d, notifier, hashedSecret, err := startup(true) - if err != nil { - t.Fatalf("Unable to start up DecayedLog: %v", err) - } - defer shutdown(d) - - // Store in the sharedHashBucket - if err = d.Put(hashedSecret[:], cltv); err != nil { - t.Fatalf("Unable to store in channeldb: %v", err) - } - - // Wait for database write (GC is in a goroutine) - time.Sleep(500 * time.Millisecond) - - // Shut down DecayedLog and the garbage collector along with it. - d.Stop() - - // Start the DecayedLog again. - if err = d.Start("tempdir"); err != nil { - t.Fatalf("Unable to restart DecayedLog: %v", err) - } - - // Send a block notification to the garbage collector that expires - // the stored CLTV. - notifier.epochChan <- &chainntnfs.BlockEpoch{ - Height: int32(100001), - } - - // Wait for database write (GC is in a goroutine) - time.Sleep(500 * time.Millisecond) - - // Assert that hashedSecret is not in the sharedHashBucket - val, err := d.Get(hashedSecret[:]) - if err != nil { - t.Fatalf("Delete failed - received an error upon Get: %v", err) - } - - if val != math.MaxUint32 { - t.Fatalf("cltv was not deleted") - } -} - -// TestDecayedLogInsertionAndRetrieval inserts a cltv value into the -// sharedHashBucket and then deletes it and finally asserts that we can no -// longer retrieve it. -func TestDecayedLogInsertionAndDeletion(t *testing.T) { - d, _, hashedSecret, err := startup(false) - if err != nil { - t.Fatalf("Unable to start up DecayedLog: %v", err) - } - defer shutdown(d) - - // Store in the sharedHashBucket. - err = d.Put(hashedSecret[:], cltv) - if err != nil { - t.Fatalf("Unable to store in channeldb: %v", err) - } - - // Delete hashedSecret from the sharedHashBucket. - err = d.Delete(hashedSecret[:]) - if err != nil { - t.Fatalf("Unable to delete from channeldb: %v", err) - } - - // Assert that hashedSecret is not in the sharedHashBucket - val, err := d.Get(hashedSecret[:]) - if err != nil { - t.Fatalf("Delete failed - received the wrong error message: %v", err) - } - - if val != math.MaxUint32 { - t.Fatalf("cltv was not deleted") - } - -} - -// TestDecayedLogStartAndStop tests for persistence. The DecayedLog is started, -// a cltv value is stored in the sharedHashBucket, and then it the DecayedLog -// is stopped. The DecayedLog is then started up again and we test that the -// cltv value is indeed still stored in the sharedHashBucket. We then delete -// the cltv value and check that it persists upon startup. -func TestDecayedLogStartAndStop(t *testing.T) { - d, _, hashedSecret, err := startup(false) - if err != nil { - t.Fatalf("Unable to start up DecayedLog: %v", err) - } - defer shutdown(d) - - // Store in the sharedHashBucket. - err = d.Put(hashedSecret[:], cltv) - if err != nil { - t.Fatalf("Unable to store in channeldb: %v", err) - } - - // Shutdown the DecayedLog's channeldb - d.Stop() - - // Startup the DecayedLog's channeldb - err = d.Start("tempdir") - if err != nil { - t.Fatalf("Unable to start / open DecayedLog: %v", err) - } - - // Retrieve the stored cltv value given the hashedSecret key. - value, err := d.Get(hashedSecret[:]) - if err != nil { - t.Fatalf("Unable to retrieve from channeldb: %v", err) - } - - // Check that the original cltv value matches the retrieved cltv - // value. - if cltv != value { - t.Fatalf("Value retrieved doesn't match value stored") - } - - // Delete hashedSecret from sharedHashBucket - err = d.Delete(hashedSecret[:]) - if err != nil { - t.Fatalf("Unable to delete from channeldb: %v", err) - } - - // Shutdown the DecayedLog's channeldb - d.Stop() - - // Startup the DecayedLog's channeldb - err = d.Start("tempdir") - if err != nil { - t.Fatalf("Unable to start / open DecayedLog: %v", err) - } - - // Assert that hashedSecret is not in the sharedHashBucket - val, err := d.Get(hashedSecret[:]) - if err != nil { - t.Fatalf("Delete failed: %v", err) - } - - if val != math.MaxUint32 { - t.Fatalf("cltv was not deleted") - } - -} - -// TestDecayedLogStorageAndRetrieval stores a cltv value and then retrieves it -// via the nested sharedHashBucket and finally asserts that the original stored -// and retrieved cltv values are equal. -func TestDecayedLogStorageAndRetrieval(t *testing.T) { - d, _, hashedSecret, err := startup(false) - if err != nil { - t.Fatalf("Unable to start up DecayedLog: %v", err) - } - defer shutdown(d) - - // Store in the sharedHashBucket - err = d.Put(hashedSecret[:], cltv) - if err != nil { - t.Fatalf("Unable to store in channeldb: %v", err) - } - - // Retrieve the stored cltv value given the hashedSecret key. - value, err := d.Get(hashedSecret[:]) - if err != nil { - t.Fatalf("Unable to retrieve from channeldb: %v", err) - } - - // If the original cltv value does not match the value retrieved, - // then the test failed. - if cltv != value { - t.Fatalf("Value retrieved doesn't match value stored") - } - -} diff --git a/persistlog/interface.go b/persistlog/interface.go deleted file mode 100644 index 5c380f6..0000000 --- a/persistlog/interface.go +++ /dev/null @@ -1,26 +0,0 @@ -package persistlog - -// PersistLog is an interface that defines a new on-disk data structure that -// contains a persistent log. The interface is general to allow implementations -// near-complete autonomy. All of these calls should be safe for concurrent -// access. -type PersistLog interface { - // Delete deletes an entry from the persistent log given []byte - Delete([]byte) error - - // Get retrieves an entry from the persistent log given a []byte. It - // returns the value stored and an error if one occurs. - Get([]byte) (uint32, error) - - // Put stores an entry into the persistent log given a []byte and an - // accompanying purposefully general type. It returns an error if one - // occurs. - Put([]byte, uint32) error - - // Start starts up the on-disk persistent log. It returns an error if - // one occurs. - Start(string) error - - // Stop safely stops the on-disk persistent log. - Stop() -} From e0cb955519f19c0a3e93d5b2363253d3d79951ca Mon Sep 17 00:00:00 2001 From: Conner Fromknecht Date: Fri, 12 Jan 2018 15:25:18 -0800 Subject: [PATCH 10/23] decayedlog+decayedlog_test: clean up decayedlog and add PutBatch --- decayedlog.go | 442 +++++++++++++++++++++++++++++++++++++++++++++ decayedlog_test.go | 336 ++++++++++++++++++++++++++++++++++ 2 files changed, 778 insertions(+) create mode 100644 decayedlog.go create mode 100644 decayedlog_test.go diff --git a/decayedlog.go b/decayedlog.go new file mode 100644 index 0000000..3b986a9 --- /dev/null +++ b/decayedlog.go @@ -0,0 +1,442 @@ +package sphinx + +import ( + "bytes" + "crypto/sha256" + "encoding/binary" + "errors" + "fmt" + "math" + "sync" + "sync/atomic" + + "github.com/boltdb/bolt" + "github.com/lightningnetwork/lnd/chainntnfs" +) + +const ( + // defaultDbDirectory is the default directory where our decayed log + // will store our (sharedHash, CLTV) key-value pairs. + defaultDbDirectory = "sharedhashes" + + // dbPermissions sets the database permissions to user write-and-readable. + dbPermissions = 0600 + + // sharedHashSize is the size in bytes of the keys we will be storing + // in the DecayedLog. It represents the first 20 bytes of a truncated + // sha-256 hash of a secret generated by ECDH. + sharedHashSize = 20 +) + +var ( + // sharedHashBucket is a bucket which houses the first sharedHashSize + // bytes of a received HTLC's hashed shared secret as the key and the HTLC's + // CLTV expiry as the value. + sharedHashBucket = []byte("shared-hash") + + // batchReplayBucket is a bucket that maps batch identifiers to + // serialized ReplaySets. This is used to give idempotency in the event + // that a batch is processed more than once. + batchReplayBucket = []byte("batch-replay") +) + +// HashPrefix is a statically size, 20-byte array containing the prefix +// of a Hash256, and is used to detect duplicate sphinx packets. +type HashPrefix [sharedHashSize]byte + +var ( + // ErrDecayedLogInit is used to indicate a decayed log failed to create + // the proper bucketing structure on startup. + ErrDecayedLogInit = errors.New("unable to initialize decayed log") + + // ErrDecayedLogCorrupted signals that the anticipated bucketing + // structure has diverged since initialization. + ErrDecayedLogCorrupted = errors.New("decayed log structure corrupted") +) + +// ReplayLog is an interface that defines a new on-disk data structure that +// contains a persistent log to enable strong replay protection. The interface +// is general to allow implementations near-complete autonomy. All of these +// calls should be safe for concurrent access. +type ReplayLog interface { + // Start starts up the on-disk persistent log. It returns an error if + // one occurs. + Start() error + + // Stop safely stops the on-disk persistent log. + Stop() error + + // Get retrieves an entry from the persistent log given a []byte. It + // returns the value stored and an error if one occurs. + Get([]byte) (uint32, error) + + // Put stores an entry into the persistent log given a []byte and an + // accompanying purposefully general type. It returns an error if the + // provided hash prefix already exists in the log. + Put(*HashPrefix, uint32) error + + // PutBatch stores + PutBatch(*Batch) (*ReplaySet, error) + + // Delete deletes an entry from the persistent log given []byte + Delete([]byte) error +} + +// DecayedLog implements the PersistLog interface. It stores the first +// sharedHashSize bytes of a sha256-hashed shared secret along with a node's +// CLTV value. It is a decaying log meaning there will be a garbage collector +// to collect entries which are expired according to their stored CLTV value +// and the current block height. DecayedLog wraps boltdb for simplicity and +// batches writes to the database to decrease write contention. +type DecayedLog struct { + started int32 + stopped int32 + + dbPath string + + db *bolt.DB + + notifier chainntnfs.ChainNotifier + + wg sync.WaitGroup + quit chan struct{} +} + +// NewDecayedLog creates a new DecayedLog, which caches recently seen hash +// shared secrets. Entries are evicted as their cltv expires using block epochs +// from the given notifier. +func NewDecayedLog(dbPath string, + notifier chainntnfs.ChainNotifier) *DecayedLog { + + // Use default path for log database + if dbPath == "" { + dbPath = defaultDbDirectory + } + + return &DecayedLog{ + dbPath: dbPath, + notifier: notifier, + quit: make(chan struct{}), + } +} + +// Start opens the database we will be using to store hashed shared secrets. +// It also starts the garbage collector in a goroutine to remove stale +// database entries. +func (d *DecayedLog) Start() error { + if !atomic.CompareAndSwapInt32(&d.started, 0, 1) { + return nil + } + + // Open the boltdb for use. + var err error + if d.db, err = bolt.Open(d.dbPath, dbPermissions, nil); err != nil { + return fmt.Errorf("Could not open boltdb: %v", err) + } + + // Initialize the primary buckets used by the decayed log. + if err := d.initBuckets(); err != nil { + return err + } + + // Start garbage collector. + if d.notifier != nil { + epochClient, err := d.notifier.RegisterBlockEpochNtfn() + if err != nil { + return fmt.Errorf("Unable to register for epoch "+ + "notifications: %v", err) + } + + d.wg.Add(1) + go d.garbageCollector(epochClient) + } + + return nil +} + +// initBuckets initializes the primary buckets used by the decayed log, namely +// the shared hash bucket, and batch replay +func (d *DecayedLog) initBuckets() error { + return d.db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucketIfNotExists(sharedHashBucket) + if err != nil { + return ErrDecayedLogInit + } + + _, err = tx.CreateBucketIfNotExists(batchReplayBucket) + if err != nil { + return ErrDecayedLogInit + } + + return nil + }) +} + +// Stop halts the garbage collector and closes boltdb. +func (d *DecayedLog) Stop() error { + if !atomic.CompareAndSwapInt32(&d.stopped, 0, 1) { + return nil + } + + // Stop garbage collector. + close(d.quit) + + d.wg.Wait() + + // Close boltdb. + d.db.Close() + + return nil +} + +// garbageCollector deletes entries from sharedHashBucket whose expiry height +// has already past. This function MUST be run as a goroutine. +func (d *DecayedLog) garbageCollector(epochClient *chainntnfs.BlockEpochEvent) { + defer d.wg.Done() + defer epochClient.Cancel() + + for { + select { + case epoch, ok := <-epochClient.Epochs: + if !ok { + // Block epoch was canceled, shutting down. + sphxLog.Infof("Block epoch canceled, " + + "decaying hash log shutting down") + return + } + + // Perform a bout of garbage collection using the + // epoch's block height. + height := uint32(epoch.Height) + if err := d.gcExpiredHashes(height); err != nil { + sphxLog.Errorf("unable to expire hashes at "+ + "height=%d", height) + } + + case <-d.quit: + // Received shutdown request. + sphxLog.Infof("Decaying hash log received " + + "shutdown request") + return + } + } +} + +// gcExpiredHashes purges the decaying log of all entries whose CLTV expires +// below the provided height. +func (d *DecayedLog) gcExpiredHashes(height uint32) error { + return d.db.Batch(func(tx *bolt.Tx) error { + // Grab the shared hash bucket + sharedHashes := tx.Bucket(sharedHashBucket) + if sharedHashes == nil { + return fmt.Errorf("sharedHashBucket " + + "is nil") + } + + var expiredCltv [][]byte + if err := sharedHashes.ForEach(func(k, v []byte) error { + // Deserialize the CLTV value for this entry. + cltv := uint32(binary.BigEndian.Uint32(v)) + + if cltv < height { + // This CLTV is expired. We must add it to an + // array which we'll loop over and delete every + // hash contained from the db. + expiredCltv = append(expiredCltv, k) + } + + return nil + }); err != nil { + return err + } + + // Delete every item in the array. This must + // be done explicitly outside of the ForEach + // function for safety reasons. + for _, hash := range expiredCltv { + err := sharedHashes.Delete(hash) + if err != nil { + return err + } + } + + return nil + }) +} + +// hashSharedSecret Sha-256 hashes the shared secret and returns the first +// sharedHashSize bytes of the hash. +func hashSharedSecret(sharedSecret *Hash256) HashPrefix { + // Sha256 hash of sharedSecret + h := sha256.New() + h.Write(sharedSecret[:]) + + var sharedHash HashPrefix + + // Copy bytes to sharedHash + copy(sharedHash[:], h.Sum(nil)) + return sharedHash +} + +// Delete removes a key-pair from the +// sharedHashBucket. +func (d *DecayedLog) Delete(hash []byte) error { + return d.db.Batch(func(tx *bolt.Tx) error { + sharedHashes := tx.Bucket(sharedHashBucket) + if sharedHashes == nil { + return ErrDecayedLogCorrupted + } + + return sharedHashes.Delete(hash) + }) +} + +// Get retrieves the CLTV of a processed HTLC given the first 20 bytes of the +// Sha-256 hash of the shared secret. +func (d *DecayedLog) Get(hash []byte) (uint32, error) { + // math.MaxUint32 is returned when Get did not retrieve a value. + // This was chosen because it's not feasible for a CLTV to be this high. + var value uint32 = math.MaxUint32 + + err := d.db.View(func(tx *bolt.Tx) error { + // Grab the shared hash bucket which stores the mapping from + // truncated sha-256 hashes of shared secrets to CLTV's. + sharedHashes := tx.Bucket(sharedHashBucket) + if sharedHashes == nil { + return fmt.Errorf("sharedHashes is nil, could " + + "not retrieve CLTV value") + } + + // Retrieve the bytes which represents the CLTV + valueBytes := sharedHashes.Get(hash) + if valueBytes == nil { + return nil + } + + // The first 4 bytes represent the CLTV, store it in value. + value = uint32(binary.BigEndian.Uint32(valueBytes)) + + return nil + }) + if err != nil { + return value, err + } + + return value, nil +} + +// Put stores a shared secret hash as the key and the CLTV as the value. +func (d *DecayedLog) Put(hash *HashPrefix, cltv uint32) error { + // Optimisitically serialize the cltv value into the scratch buffer. + var scratch [4]byte + binary.BigEndian.PutUint32(scratch[:], cltv) + + return d.db.Batch(func(tx *bolt.Tx) error { + sharedHashes := tx.Bucket(sharedHashBucket) + if sharedHashes == nil { + return ErrDecayedLogCorrupted + } + + // Check to see if this hash prefix has been recorded before. If + // a value is found, this packet is being replayed. + valueBytes := sharedHashes.Get(hash[:]) + if valueBytes != nil { + return ErrReplayedPacket + } + + return sharedHashes.Put(hash[:], scratch[:]) + }) +} + +// PutBatch accepts a pending batch of hashed secret entries to write to disk. +// Each hashed secret is inserted with a corresponding time value, dictating +// when the entry will be evicted from the log. +// NOTE: This method enforces idempotency by writing the replay set obtained +// from the first attempt for a particular batch ID, and decoding the return +// value to subsequent calls. For the indices of the replay set to be aligned +// properly, the batch MUST be constructed identically to the first attempt, +// pruning will cause the indices to become invalid. +func (d *DecayedLog) PutBatch(b *Batch) (*ReplaySet, error) { + // Since batched boltdb txns may be executed multiple times before + // succeeding, we will create a new replay set for each invocation to + // avoid any side-effects. If the txn is successful, this replay set + // will be merged with the replay set computed during batch construction + // to generate the complete replay set. If this batch was previously + // processed, the replay set will be deserialized from disk. + var replays *ReplaySet + if err := d.db.Batch(func(tx *bolt.Tx) error { + sharedHashes := tx.Bucket(sharedHashBucket) + if sharedHashes == nil { + return ErrDecayedLogCorrupted + } + + // Load the batch replay bucket, which will be used to either + // retrieve the result of previously processing this batch, or + // to write the result of this operation. + batchReplayBkt := tx.Bucket(batchReplayBucket) + if batchReplayBkt == nil { + return ErrDecayedLogCorrupted + } + + // Check for the existence of this batch's id in the replay + // bucket. If a non-nil value is found, this indicates that we + // have already processed this batch before. We deserialize the + // resulting and return it to ensure calls to put batch are + // idempotent. + replayBytes := batchReplayBkt.Get(b.id) + if replayBytes != nil { + replays = &ReplaySet{} + return replays.Decode(bytes.NewReader(replayBytes)) + } + + // The CLTV will be stored into scratch and then stored into the + // sharedHashBucket. + var scratch [4]byte + + replays = NewReplaySet() + for seqNum, entry := range b.entries { + // Retrieve the bytes which represents the CLTV + valueBytes := sharedHashes.Get(entry.hashPrefix[:]) + if valueBytes != nil { + replays.Add(seqNum) + continue + } + + // Serialize the cltv value and write an entry keyed by + // the hash prefix. + binary.BigEndian.PutUint32(scratch[:], entry.cltv) + err := sharedHashes.Put(entry.hashPrefix[:], scratch[:]) + if err != nil { + return err + } + } + + // Merge the replay set computed from checking the on-disk + // entries with the in-batch replays computed during this + // batch's construction. + replays.Merge(b.replaySet) + + // Write the replay set under the batch identifier to the batch + // replays bucket. This can be used during recovery to test (1) + // that a particular batch was successfully processed and (2) + // recover the indexes of the adds that were rejected as + // replays. + var replayBuf bytes.Buffer + if err := replays.Encode(&replayBuf); err != nil { + return err + } + + return batchReplayBkt.Put(b.id, replayBuf.Bytes()) + }); err != nil { + return nil, err + } + + b.replaySet = replays + b.isCommitted = true + + return replays, nil +} + +// A compile time check to see if DecayedLog adheres to the PersistLog +// interface. +var _ ReplayLog = (*DecayedLog)(nil) diff --git a/decayedlog_test.go b/decayedlog_test.go new file mode 100644 index 0000000..91fcc63 --- /dev/null +++ b/decayedlog_test.go @@ -0,0 +1,336 @@ +package sphinx + +import ( + "math" + "testing" + "time" + + "github.com/lightningnetwork/lnd/chainntnfs" + "github.com/roasbeef/btcd/btcec" + "github.com/roasbeef/btcd/chaincfg/chainhash" + "github.com/roasbeef/btcd/wire" +) + +const ( + cltv uint32 = 100000 +) + +var ( + // Bytes of a private key + key = [32]byte{ + 0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, + 0x68, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, + 0xd, 0xe7, 0x93, 0xe4, 0xb7, 0x25, 0xb8, 0x4d, + 0x1e, 0xb, 0x4c, 0xf9, 0x9e, 0xc5, 0x8c, 0xe9, + } +) + +type mockNotifier struct { + confChannel chan *chainntnfs.TxConfirmation + epochChan chan *chainntnfs.BlockEpoch +} + +func (m *mockNotifier) RegisterBlockEpochNtfn() (*chainntnfs.BlockEpochEvent, error) { + return &chainntnfs.BlockEpochEvent{ + Epochs: m.epochChan, + Cancel: func() {}, + }, nil +} + +func (m *mockNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash, numConfs, + heightHint uint32) (*chainntnfs.ConfirmationEvent, error) { + return nil, nil +} + +func (m *mockNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, + heightHint uint32) (*chainntnfs.SpendEvent, error) { + return nil, nil +} + +func (m *mockNotifier) Start() error { + return nil +} + +func (m *mockNotifier) Stop() error { + return nil +} + +// startup sets up the DecayedLog and possibly the garbage collector. +func startup(notifier bool) (ReplayLog, *mockNotifier, HashPrefix, error) { + var log ReplayLog + var chainNotifier *mockNotifier + var hashedSecret HashPrefix + if notifier { + + // Create the MockNotifier which triggers the garbage collector + chainNotifier = &mockNotifier{ + epochChan: make(chan *chainntnfs.BlockEpoch, 1), + } + + // Initialize the DecayedLog object + log = NewDecayedLog("tempdir", chainNotifier) + } else { + // Initialize the DecayedLog object + log = NewDecayedLog("tempdir", nil) + } + + // Open the channeldb (start the garbage collector) + err := log.Start() + if err != nil { + return nil, nil, hashedSecret, err + } + + // Create a new private key on elliptic curve secp256k1 + priv, err := btcec.NewPrivateKey(btcec.S256()) + if err != nil { + return nil, nil, hashedSecret, err + } + + // Generate a public key from the key bytes + _, testPub := btcec.PrivKeyFromBytes(btcec.S256(), key[:]) + + // Generate a shared secret with the public and private keys we made + secret := generateSharedSecret(testPub, priv) + + // Create the hashedSecret given the shared secret we just generated. + // This is the first 20 bytes of the Sha-256 hash of the shared secret. + // This is used as a key to retrieve the cltv value. + hashedSecret = hashSharedSecret(&secret) + + return log, chainNotifier, hashedSecret, nil +} + +// TestDecayedLogGarbageCollector tests the ability of the garbage collector +// to delete expired cltv values every time a block is received. Expired cltv +// values are cltv values that are < current block height. +func TestDecayedLogGarbageCollector(t *testing.T) { + d, notifier, hashedSecret, err := startup(true) + if err != nil { + t.Fatalf("Unable to start up DecayedLog: %v", err) + } + defer shutdown("tempdir", d) + + // Store in the sharedHashBucket. + err = d.Put(&hashedSecret, cltv) + if err != nil { + t.Fatalf("Unable to store in channeldb: %v", err) + } + + // Wait for database write (GC is in a goroutine) + time.Sleep(500 * time.Millisecond) + + // Send block notifications to garbage collector. The garbage collector + // should remove the entry by block 100001. + + // Send block 100000 + notifier.epochChan <- &chainntnfs.BlockEpoch{ + Height: 100000, + } + + // Assert that hashedSecret is still in the sharedHashBucket + val, err := d.Get(hashedSecret[:]) + if err != nil { + t.Fatalf("Get failed - received an error upon Get: %v", err) + } + + if val != cltv { + t.Fatalf("GC incorrectly deleted CLTV") + } + + // Send block 100001 (expiry block) + notifier.epochChan <- &chainntnfs.BlockEpoch{ + Height: 100001, + } + + // Wait for database write (GC is in a goroutine) + time.Sleep(500 * time.Millisecond) + + // Assert that hashedSecret is not in the sharedHashBucket + val, err = d.Get(hashedSecret[:]) + if err != nil { + t.Fatalf("Get failed - received an error upon Get: %v", err) + } + + if val != math.MaxUint32 { + t.Fatalf("CLTV was not deleted") + } +} + +// TestDecayedLogPersistentGarbageCollector tests the persistence property of +// the garbage collector. The garbage collector will be restarted immediately and +// a block that expires the stored CLTV value will be sent to the ChainNotifier. +// We test that this causes the pair to be deleted even +// on GC restarts. +func TestDecayedLogPersistentGarbageCollector(t *testing.T) { + d, _, hashedSecret, err := startup(true) + if err != nil { + t.Fatalf("Unable to start up DecayedLog: %v", err) + } + defer shutdown("tempdir", d) + + // Store in the sharedHashBucket + if err = d.Put(&hashedSecret, cltv); err != nil { + t.Fatalf("Unable to store in channeldb: %v", err) + } + + // Wait for database write (GC is in a goroutine) + time.Sleep(500 * time.Millisecond) + + // Shut down DecayedLog and the garbage collector along with it. + d.Stop() + + d2, notifier2, hashedSecret2, err := startup(true) + if err != nil { + t.Fatalf("Unable to restart DecayedLog: %v", err) + } + defer shutdown("tempdir", d2) + + // Send a block notification to the garbage collector that expires + // the stored CLTV. + notifier2.epochChan <- &chainntnfs.BlockEpoch{ + Height: int32(100001), + } + + // Wait for database write (GC is in a goroutine) + time.Sleep(500 * time.Millisecond) + + // Assert that hashedSecret is not in the sharedHashBucket + val, err := d2.Get(hashedSecret2[:]) + if err != nil { + t.Fatalf("Delete failed - received an error upon Get: %v", err) + } + + if val != math.MaxUint32 { + t.Fatalf("cltv was not deleted") + } +} + +// TestDecayedLogInsertionAndRetrieval inserts a cltv value into the +// sharedHashBucket and then deletes it and finally asserts that we can no +// longer retrieve it. +func TestDecayedLogInsertionAndDeletion(t *testing.T) { + d, _, hashedSecret, err := startup(false) + if err != nil { + t.Fatalf("Unable to start up DecayedLog: %v", err) + } + defer shutdown("tempdir", d) + + // Store in the sharedHashBucket. + err = d.Put(&hashedSecret, cltv) + if err != nil { + t.Fatalf("Unable to store in channeldb: %v", err) + } + + // Delete hashedSecret from the sharedHashBucket. + err = d.Delete(hashedSecret[:]) + if err != nil { + t.Fatalf("Unable to delete from channeldb: %v", err) + } + + // Assert that hashedSecret is not in the sharedHashBucket + val, err := d.Get(hashedSecret[:]) + if err != nil { + t.Fatalf("Delete failed - received the wrong error message: %v", err) + } + + if val != math.MaxUint32 { + t.Fatalf("cltv was not deleted") + } + +} + +// TestDecayedLogStartAndStop tests for persistence. The DecayedLog is started, +// a cltv value is stored in the sharedHashBucket, and then it the DecayedLog +// is stopped. The DecayedLog is then started up again and we test that the +// cltv value is indeed still stored in the sharedHashBucket. We then delete +// the cltv value and check that it persists upon startup. +func TestDecayedLogStartAndStop(t *testing.T) { + d, _, hashedSecret, err := startup(false) + if err != nil { + t.Fatalf("Unable to start up DecayedLog: %v", err) + } + defer shutdown("tempdir", d) + + // Store in the sharedHashBucket. + err = d.Put(&hashedSecret, cltv) + if err != nil { + t.Fatalf("Unable to store in channeldb: %v", err) + } + + // Shutdown the DecayedLog's channeldb + d.Stop() + + d2, _, hashedSecret2, err := startup(false) + if err != nil { + t.Fatalf("Unable to restart DecayedLog: %v", err) + } + defer shutdown("tempdir", d2) + + // Retrieve the stored cltv value given the hashedSecret key. + value, err := d2.Get(hashedSecret[:]) + if err != nil { + t.Fatalf("Unable to retrieve from channeldb: %v", err) + } + + // Check that the original cltv value matches the retrieved cltv + // value. + if cltv != value { + t.Fatalf("Value retrieved doesn't match value stored") + } + + // Delete hashedSecret from sharedHashBucket + err = d2.Delete(hashedSecret2[:]) + if err != nil { + t.Fatalf("Unable to delete from channeldb: %v", err) + } + + // Shutdown the DecayedLog's channeldb + d2.Stop() + + d3, _, hashedSecret3, err := startup(false) + if err != nil { + t.Fatalf("Unable to restart DecayedLog: %v", err) + } + defer shutdown("tempdir", d3) + + // Assert that hashedSecret is not in the sharedHashBucket + val, err := d3.Get(hashedSecret3[:]) + if err != nil { + t.Fatalf("Delete failed: %v", err) + } + + if val != math.MaxUint32 { + t.Fatalf("cltv was not deleted") + } + +} + +// TestDecayedLogStorageAndRetrieval stores a cltv value and then retrieves it +// via the nested sharedHashBucket and finally asserts that the original stored +// and retrieved cltv values are equal. +func TestDecayedLogStorageAndRetrieval(t *testing.T) { + d, _, hashedSecret, err := startup(false) + if err != nil { + t.Fatalf("Unable to start up DecayedLog: %v", err) + } + defer shutdown("tempdir", d) + + // Store in the sharedHashBucket + err = d.Put(&hashedSecret, cltv) + if err != nil { + t.Fatalf("Unable to store in channeldb: %v", err) + } + + // Retrieve the stored cltv value given the hashedSecret key. + value, err := d.Get(hashedSecret[:]) + if err != nil { + t.Fatalf("Unable to retrieve from channeldb: %v", err) + } + + // If the original cltv value does not match the value retrieved, + // then the test failed. + if cltv != value { + t.Fatalf("Value retrieved doesn't match value stored") + } + +} From 542572ca00284c6458b3d246c6b8b65f3739c99e Mon Sep 17 00:00:00 2001 From: Conner Fromknecht Date: Fri, 12 Jan 2018 16:07:19 -0800 Subject: [PATCH 11/23] obfuscation: refactor to use Hash256 in fn signatures --- obfuscation.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/obfuscation.go b/obfuscation.go index 2d74d93..d7f902b 100644 --- a/obfuscation.go +++ b/obfuscation.go @@ -13,7 +13,7 @@ import ( // onionEncrypt obfuscates the data with compliance with BOLT#4. As we use a // stream cipher, calling onionEncrypt on an already encrypted piece of data // will decrypt it. -func onionEncrypt(sharedSecret [sha256.Size]byte, data []byte) []byte { +func onionEncrypt(sharedSecret *Hash256, data []byte) []byte { p := make([]byte, len(data)) @@ -27,7 +27,7 @@ func onionEncrypt(sharedSecret [sha256.Size]byte, data []byte) []byte { // OnionErrorEncrypter is a struct that's used to implement onion error // encryption as defined within BOLT0004. type OnionErrorEncrypter struct { - sharedSecret [sha256.Size]byte + sharedSecret Hash256 } // NewOnionErrorEncrypter creates new instance of the onion encryper backed by @@ -59,14 +59,14 @@ func NewOnionErrorEncrypter(router *Router, // failure and its origin. func (o *OnionErrorEncrypter) EncryptError(initial bool, data []byte) []byte { if initial { - umKey := generateKey("um", o.sharedSecret) + umKey := generateKey("um", &o.sharedSecret) hash := hmac.New(sha256.New, umKey[:]) hash.Write(data) h := hash.Sum(nil) data = append(h, data...) } - return onionEncrypt(o.sharedSecret, data) + return onionEncrypt(&o.sharedSecret, data) } // Encode writes the encrypter's shared secret to the provided io.Writer. @@ -180,7 +180,7 @@ func (o *OnionErrorDecrypter) DecryptError(encryptedData []byte) (*btcec.PublicK var ( sender *btcec.PublicKey msg []byte - dummySecret [sha256.Size]byte + dummySecret Hash256 ) copy(dummySecret[:], bytes.Repeat([]byte{1}, 32)) @@ -188,7 +188,7 @@ func (o *OnionErrorDecrypter) DecryptError(encryptedData []byte) (*btcec.PublicK // away an timing information pertaining to the position in the route // that the error emanated from. for i := 0; i < NumMaxHops; i++ { - var sharedSecret [sha256.Size]byte + var sharedSecret Hash256 // If we've already found the sender, then we'll use our dummy // secret to continue decryption attempts to fill out the rest @@ -202,7 +202,7 @@ func (o *OnionErrorDecrypter) DecryptError(encryptedData []byte) (*btcec.PublicK // With the shared secret, we'll now strip off a layer of // encryption from the encrypted error payload. - encryptedData = onionEncrypt(sharedSecret, encryptedData) + encryptedData = onionEncrypt(&sharedSecret, encryptedData) // Next, we'll need to separate the data, from the MAC itself // so we can reconstruct and verify it. @@ -211,7 +211,7 @@ func (o *OnionErrorDecrypter) DecryptError(encryptedData []byte) (*btcec.PublicK // With the data split, we'll now re-generate the MAC using its // specified key. - umKey := generateKey("um", sharedSecret) + umKey := generateKey("um", &sharedSecret) h := hmac.New(sha256.New, umKey[:]) h.Write(data) From 67a4f4a6b7b6989b7d04217a8fb99d3be1b2efea Mon Sep 17 00:00:00 2001 From: Conner Fromknecht Date: Fri, 12 Jan 2018 16:33:09 -0800 Subject: [PATCH 12/23] batch: adds Batch helper object for transaction construction --- batch.go | 89 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 batch.go diff --git a/batch.go b/batch.go new file mode 100644 index 0000000..8e87a8c --- /dev/null +++ b/batch.go @@ -0,0 +1,89 @@ +package sphinx + +import "errors" + +// ErrAlreadyCommitted signals that an entry could not be added to the +// batch because it has already been persisted. +var ErrAlreadyCommitted = errors.New("cannot add to batch after committing") + +// Batch is an object used to incrementally construct a set of entries to add to +// the replay log. After construction is completed, it can be added to the log +// using the PutBatch method. +type Batch struct { + // isCommitted denotes whether or not this batch has been successfully + // written to disk. + isCommitted bool + + // id is a unique, caller chosen identifier for this batch. + id []byte + + // entries stores the set of all potential entries that might get + // written to the replay log. Some entries may be skipped after + // examining the on-disk content at the time of commit.. + entries map[uint16]batchEntry + + // replayCache is an in memory lookup-table, which stores the hash + // prefix of entries already added to this batch. This allows a quick + // mechanism for intra-batch duplicate detection. + replayCache map[HashPrefix]struct{} + + // replaySet contains the sequence numbers of all entries that were + // detected as replays. The set is finalized upon writing the batch to + // disk, and merges replays detected by the replay cache and on-disk + // replay log. + replaySet *ReplaySet +} + +// NewBatch initializes an object for constructing a set of entries to +// atomically add to a replay log. Batches are identified by byte slice, which +// allows the caller to safely process the same batch twice and get an +// idempotent result. +func NewBatch(id []byte) *Batch { + return &Batch{ + id: id, + entries: make(map[uint16]batchEntry), + replayCache: make(map[HashPrefix]struct{}), + replaySet: NewReplaySet(), + } +} + +// Put inserts a hash-prefix/CLTV pair into the current batch. This method only +// returns an error in the event that the batch was already committed to disk. +// Decisions regarding whether or not a particular sequence number is a replay +// is ultimately reported via the batch's ReplaySet after committing to disk. +func (b *Batch) Put(seqNum uint16, hashPrefix *HashPrefix, cltv uint32) error { + // Abort if this batch was already written to disk. + if b.isCommitted { + return ErrAlreadyCommitted + } + + // Check to see if this hash prefix is already included in this batch. + // If so, we will opportunistically mark this index as replayed. + if _, ok := b.replayCache[*hashPrefix]; ok { + b.replaySet.Add(seqNum) + return nil + } + + // Otherwise, this is a distinct hash prefix for this batch. Add it to + // our list of entries that we will try to write to disk. Each of these + // entries will be checked again during the commit to see if any other + // on-disk entries contain the same hash prefix. + b.entries[seqNum] = batchEntry{ + hashPrefix: *hashPrefix, + cltv: cltv, + } + + // Finally, add this hash prefix to our in-memory replay cache, this + // will be consulted upon further adds to check for duplicates in the + // same batch. + b.replayCache[*hashPrefix] = struct{}{} + + return nil +} + +// batchEntry is a tuple of a secret's hash prefix and the corresponding CLTV at +// which the onion blob from which the secret was derived expires. +type batchEntry struct { + hashPrefix HashPrefix + cltv uint32 +} From 814a002bdd7605d010f6307d354566a6932c0640 Mon Sep 17 00:00:00 2001 From: Conner Fromknecht Date: Fri, 12 Jan 2018 16:34:01 -0800 Subject: [PATCH 13/23] replay_set: adds ReplaySet to store outcome of batched processing --- replay_set.go | 81 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 replay_set.go diff --git a/replay_set.go b/replay_set.go new file mode 100644 index 0000000..a631a53 --- /dev/null +++ b/replay_set.go @@ -0,0 +1,81 @@ +package sphinx + +import ( + "encoding/binary" + "io" +) + +// ReplaySet is a data structure used to efficiently record the occurrence of +// replays, identified by sequence number, when processing a Batch. Its primary +// functionality includes set construction, membership queries, and merging of +// replay sets. +type ReplaySet struct { + replays map[uint16]struct{} +} + +// NewReplaySet initializes an empty replay set. +func NewReplaySet() *ReplaySet { + return &ReplaySet{ + replays: make(map[uint16]struct{}), + } +} + +// Size returns the number of elements in the replay set. +func (rs *ReplaySet) Size() int { + return len(rs.replays) +} + +// Add inserts the provided index into the replay set. +func (rs *ReplaySet) Add(idx uint16) { + rs.replays[idx] = struct{}{} +} + +// Contains queries the contents of the replay set for membership of a +// particular index. +func (rs *ReplaySet) Contains(idx uint16) bool { + _, ok := rs.replays[idx] + return ok +} + +// Merge adds the contents of the provided replay set to the receiver's set. +func (rs *ReplaySet) Merge(rs2 *ReplaySet) { + for seqNum := range rs2.replays { + rs.Add(seqNum) + } +} + +// Encode serializes the replay set into an io.Writer suitable for storage. The +// replay set can be recovered using Decode. +func (rs *ReplaySet) Encode(w io.Writer) error { + for seqNum := range rs.replays { + err := binary.Write(w, binary.BigEndian, seqNum) + if err != nil { + return err + } + } + + return nil +} + +// Decode reconstructs a replay set given a io.Reader. The byte +// slice is assumed to be even in length, otherwise resulting in failure. +func (rs *ReplaySet) Decode(r io.Reader) error { + for { + // seqNum provides to buffer to read the next uint16 index. + var seqNum uint16 + + err := binary.Read(r, binary.BigEndian, &seqNum) + switch err { + case nil: + // Successful read, proceed. + case io.EOF: + return nil + default: + // Can return ErrShortBuffer or ErrUnexpectedEOF. + return err + } + + // Add this decoded sequence number to the set. + rs.Add(seqNum) + } +} From d1efa2dca3245be003f4092e2c67fc70bf64f72f Mon Sep 17 00:00:00 2001 From: Conner Fromknecht Date: Fri, 12 Jan 2018 16:34:47 -0800 Subject: [PATCH 14/23] bench_test: use minor API modifications to ReplayLog --- bench_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bench_test.go b/bench_test.go index e146854..a06b39f 100644 --- a/bench_test.go +++ b/bench_test.go @@ -60,8 +60,8 @@ func BenchmarkProcessPacket(b *testing.B) { b.Fatalf("unable to create test route: %v", err) } b.ReportAllocs() - path[0].d.Start("0") - defer shutdown("0", path[0].d) + path[0].log.Start() + defer shutdown("0", path[0].log) b.StartTimer() var ( @@ -74,8 +74,8 @@ func BenchmarkProcessPacket(b *testing.B) { } b.StopTimer() - shutdown("0", path[0].d) - path[0].d.Start("0") + shutdown("0", path[0].log) + path[0].log.Start() b.StartTimer() } From e926602dbaa7c2b71318e1c5c32e8a62227f6e7d Mon Sep 17 00:00:00 2001 From: Conner Fromknecht Date: Fri, 12 Jan 2018 16:54:54 -0800 Subject: [PATCH 15/23] sphinx: adds batched processing of onion pkts via Tx --- sphinx.go | 206 ++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 153 insertions(+), 53 deletions(-) diff --git a/sphinx.go b/sphinx.go index a7c5c3f..9dba3cb 100644 --- a/sphinx.go +++ b/sphinx.go @@ -11,7 +11,6 @@ import ( "math" "math/big" - "github.com/Crypt-iQ/lightning-onion/persistlog" "github.com/aead/chacha20" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/roasbeef/btcd/btcec" @@ -70,6 +69,10 @@ const ( baseVersion = 0 ) +// Hash256 is a statically sized, 32-byte array, typically containing +// the output of a SHA256 hash. +type Hash256 [sha256.Size]byte + var ( // paddingBytes are the padding bytes used to fill out the remainder of the // unused portion of the per-hop payload. @@ -207,14 +210,14 @@ func (hd *HopData) Decode(r io.Reader) error { // generateSharedSecrets by the given nodes pubkeys, generates the shared // secrets. func generateSharedSecrets(paymentPath []*btcec.PublicKey, - sessionKey *btcec.PrivateKey) [][sha256.Size]byte { + sessionKey *btcec.PrivateKey) []Hash256 { // Each hop performs ECDH with our ephemeral key pair to arrive at a // shared secret. Additionally, each hop randomizes the group element // for the next hop by multiplying it by the blinding factor. This way // we only need to transmit a single group element, and hops can't link // a session back to us if they have several nodes in the path. numHops := len(paymentPath) - hopSharedSecrets := make([][sha256.Size]byte, numHops) + hopSharedSecrets := make([]Hash256, numHops) // Compute the triplet for the first hop outside of the main loop. // Within the loop each new triplet will be computed recursively based @@ -301,8 +304,8 @@ func NewOnionPacket(paymentPath []*btcec.PublicKey, sessionKey *btcec.PrivateKey // We'll derive the two keys we need for each hop in order to: // generate our stream cipher bytes for the mixHeader, and // calculate the MAC over the entire constructed packet. - rhoKey := generateKey("rho", hopSharedSecrets[i]) - muKey := generateKey("mu", hopSharedSecrets[i]) + rhoKey := generateKey("rho", &hopSharedSecrets[i]) + muKey := generateKey("mu", &hopSharedSecrets[i]) // The HMAC for the final hop is simply zeroes. This allows the // last hop to recognize that it is the destination for a @@ -379,13 +382,13 @@ func rightShift(slice []byte, num int) { // "filler" bytes produced by this function at the last hop. Using this // methodology, the size of the field stays constant at each hop. func generateHeaderPadding(key string, numHops int, hopSize int, - sharedSecrets [][sharedSecretSize]byte) []byte { + sharedSecrets []Hash256) []byte { filler := make([]byte, (numHops-1)*hopSize) for i := 1; i < numHops; i++ { totalFillerSize := ((NumMaxHops - i) + 1) * hopSize - streamKey := generateKey(key, sharedSecrets[i-1]) + streamKey := generateKey(key, &sharedSecrets[i-1]) streamBytes := generateCipherStream(streamKey, numStreamBytes) xor(filler, filler, streamBytes[totalFillerSize:totalFillerSize+i*hopSize]) @@ -486,7 +489,7 @@ func xor(dst, a, b []byte) int { // construction/processing based off of the denoted keyType. Within Sphinx // various keys are used within the same onion packet for padding generation, // MAC generation, and encryption/decryption. -func generateKey(keyType string, sharedKey [sharedSecretSize]byte) [keyLen]byte { +func generateKey(keyType string, sharedKey *Hash256) [keyLen]byte { mac := hmac.New(sha256.New, []byte(keyType)) mac.Write(sharedKey[:]) h := mac.Sum(nil) @@ -517,12 +520,14 @@ func generateCipherStream(key [keyLen]byte, numBytes uint) []byte { // computeBlindingFactor for the next hop given the ephemeral pubKey and // sharedSecret for this hop. The blinding factor is computed as the // sha-256(pubkey || sharedSecret). -func computeBlindingFactor(hopPubKey *btcec.PublicKey, hopSharedSecret []byte) [sha256.Size]byte { +func computeBlindingFactor(hopPubKey *btcec.PublicKey, + hopSharedSecret []byte) Hash256 { + sha := sha256.New() sha.Write(hopPubKey.SerializeCompressed()) sha.Write(hopSharedSecret) - var hash [sha256.Size]byte + var hash Hash256 copy(hash[:], sha.Sum(nil)) return hash } @@ -547,7 +552,7 @@ func blindBaseElement(blindingFactor []byte) *btcec.PublicKey { // key. We then take the _entire_ point generated by the ECDH operation, // serialize that using a compressed format, then feed the raw bytes through a // single SHA256 invocation. The resulting value is the shared secret. -func generateSharedSecret(pub *btcec.PublicKey, priv *btcec.PrivateKey) [32]byte { +func generateSharedSecret(pub *btcec.PublicKey, priv *btcec.PrivateKey) Hash256 { s := &btcec.PublicKey{} x, y := btcec.S256().ScalarMult(pub.X, pub.Y, priv.D.Bytes()) s.X = x @@ -622,12 +627,12 @@ type Router struct { onionKey *btcec.PrivateKey - d *persistlog.DecayedLog + log ReplayLog } // NewRouter creates a new instance of a Sphinx onion Router given the node's // currently advertised onion private key, and the target Bitcoin network. -func NewRouter(nodeKey *btcec.PrivateKey, net *chaincfg.Params, +func NewRouter(dbPath string, nodeKey *btcec.PrivateKey, net *chaincfg.Params, chainNotifier chainntnfs.ChainNotifier) *Router { var nodeID [addressSize]byte copy(nodeID[:], btcutil.Hash160(nodeKey.PubKey().SerializeCompressed())) @@ -635,10 +640,6 @@ func NewRouter(nodeKey *btcec.PrivateKey, net *chaincfg.Params, // Safe to ignore the error here, nodeID is 20 bytes. nodeAddr, _ := btcutil.NewAddressPubKeyHash(nodeID[:], net) - d := &persistlog.DecayedLog{ - Notifier: chainNotifier, - } - return &Router{ nodeID: nodeID, nodeAddr: nodeAddr, @@ -652,20 +653,20 @@ func NewRouter(nodeKey *btcec.PrivateKey, net *chaincfg.Params, }, // TODO(roasbeef): replace instead with bloom filter? // * https://moderncrypto.org/mail-archive/messaging/2015/001911.html - d: d, + log: NewDecayedLog(dbPath, chainNotifier), } } // Start starts / opens the DecayedLog's channeldb and its accompanying // garbage collector goroutine. func (r *Router) Start() error { - return r.d.Start("") + return r.log.Start() } // Stop stops / closes the DecayedLog's channeldb and its accompanying // garbage collector goroutine. func (r *Router) Stop() { - r.d.Stop() + r.log.Stop() } // ProcessOnionPacket processes an incoming onion packet which has been forward @@ -677,28 +678,46 @@ func (r *Router) Stop() { // In the case of a successful packet processing, and ProcessedPacket struct is // returned which houses the newly parsed packet, along with instructions on // what to do next. -func (r *Router) ProcessOnionPacket(onionPkt *OnionPacket, assocData []byte) (*ProcessedPacket, error) { - dhKey := onionPkt.EphemeralKey - routeInfo := onionPkt.RoutingInfo - headerMac := onionPkt.HeaderMAC +func (r *Router) ProcessOnionPacket(onionPkt *OnionPacket, + assocData []byte, incomingCltv uint32) (*ProcessedPacket, error) { + // Compute the shared secret for this onion packet. sharedSecret, err := r.generateSharedSecret(onionPkt.EphemeralKey) if err != nil { return nil, err } - // In order to mitigate replay attacks, if we've seen this particular - // shared secret before, cease processing and just drop this forwarding - // message. - hashedSecret := persistlog.HashSharedSecret(sharedSecret) - cltv, err := r.d.Get(hashedSecret[:]) + // Additionally, compute the hash prefix of the shared secret, which + // will serve as an identifier for detecting replayed packets. + hashPrefix := hashSharedSecret(&sharedSecret) + + // Continue to optimistically process this packet, deferring replay + // protection until the end to reduce the penalty of multiple IO + // operations. + packet, err := processOnionPacket(onionPkt, &sharedSecret, assocData) if err != nil { return nil, err } - if cltv != math.MaxUint32 { - return nil, ErrReplayedPacket + + // Atomically compare this hash prefix with the contents of the on-disk + // log, persisting it only if this entry was not detected as a replay. + if err := r.log.Put(&hashPrefix, incomingCltv); err != nil { + return nil, err } + return packet, nil +} + +// processOnionPacket performs the primary key derivation and handling of onion +// packets. The processed packets returned from this method should only be used +// if the packet was not flagged as a replayed packet. +func processOnionPacket(onionPkt *OnionPacket, + sharedSecret *Hash256, assocData []byte) (*ProcessedPacket, error) { + + dhKey := onionPkt.EphemeralKey + routeInfo := onionPkt.RoutingInfo + headerMac := onionPkt.HeaderMAC + // Using the derived shared secret, ensure the integrity of the routing // information by checking the attached MAC without leaking timing // information. @@ -711,9 +730,14 @@ func (r *Router) ProcessOnionPacket(onionPkt *OnionPacket, assocData []byte) (*P // Attach the padding zeroes in order to properly strip an encryption // layer off the routing info revealing the routing information for the // next hop. + streamBytes := generateCipherStream( + generateKey("rho", sharedSecret), + numStreamBytes, + ) + headerWithPadding := append(routeInfo[:], + bytes.Repeat([]byte{0}, hopDataSize)...) + var hopInfo [numStreamBytes]byte - streamBytes := generateCipherStream(generateKey("rho", sharedSecret), numStreamBytes) - headerWithPadding := append(routeInfo[:], bytes.Repeat([]byte{0}, hopDataSize)...) xor(hopInfo[:], headerWithPadding, streamBytes) // Randomize the DH group element for the next hop using the @@ -729,23 +753,6 @@ func (r *Router) ProcessOnionPacket(onionPkt *OnionPacket, assocData []byte) (*P return nil, err } - // The MAC checks out, mark this current shared secret as processed in - // order to mitigate future replay attacks. We need to check to see if - // we already know the secret again since a replay might have happened - // while we were checking the MAC and decoding the HopData. - cltv, err = r.d.Get(hashedSecret[:]) - if err != nil { - return nil, err - } - if cltv != math.MaxUint32 { - return nil, ErrReplayedPacket - } - - err = r.d.Put(hashedSecret[:], hopData.OutgoingCltv) - if err != nil { - return nil, err - } - // With the necessary items extracted, we'll copy of the onion packet // for the next node, snipping off our per-hop data. var nextMixHeader [routingInfoSize]byte @@ -761,7 +768,7 @@ func (r *Router) ProcessOnionPacket(onionPkt *OnionPacket, assocData []byte) (*P // However if the uncovered 'nextMac' is all zeroes, then this // indicates that we're the final hop in the route. var action ProcessCode = MoreHops - if bytes.Compare(bytes.Repeat([]byte{0x00}, hmacSize), hopData.HMAC[:]) == 0 { + if bytes.Compare(zeroHMAC[:], hopData.HMAC[:]) == 0 { action = ExitNode } @@ -773,9 +780,9 @@ func (r *Router) ProcessOnionPacket(onionPkt *OnionPacket, assocData []byte) (*P } // generateSharedSecret generates the shared secret by given ephemeral key. -func (r *Router) generateSharedSecret(dhKey *btcec.PublicKey) ([sha256.Size]byte, +func (r *Router) generateSharedSecret(dhKey *btcec.PublicKey) (Hash256, error) { - var sharedSecret [sha256.Size]byte + var sharedSecret Hash256 // Ensure that the public key is on our curve. if !btcec.S256().IsOnCurve(dhKey.X, dhKey.Y) { @@ -786,3 +793,96 @@ func (r *Router) generateSharedSecret(dhKey *btcec.PublicKey) ([sha256.Size]byte sharedSecret = generateSharedSecret(dhKey, r.onionKey) return sharedSecret, nil } + +// Tx is a transaction consisting of a number of sphinx packets to be atomically +// written to the replay log. This structure helps to coordinate construction of +// the underlying Batch object, and to ensure that the result of the processing +// is idempotent. +type Tx struct { + // batch is the set of packets to be incrementally processed and + // ultimately committed in this transaction + batch *Batch + + // router is a reference to the sphinx router that created this + // transaction. Committing this transaction will utilize this router's + // replay log. + router *Router + + // packets contains a potentially sparse list of optimistically processed + // packets for this batch. The contents of a particular index should + // only be accessed if the index is *not* included in the replay set, or + // otherwise failed any other stage of the processing. + packets []ProcessedPacket +} + +// BeginTxn creates a new transaction that can later be committed back to the +// sphinx router's replay log. +// +// NOTE: The nels parameter should represent the maximum number of that could be +// added to the batch, using sequence numbers that match or exceed this value +// could result in an out-of-bounds panic. +func (r *Router) BeginTxn(id []byte, nels int) *Tx { + return &Tx{ + batch: NewBatch(id), + router: r, + packets: make([]ProcessedPacket, nels), + } +} + +// ProcessOnionPacket processes an incoming onion packet which has been forward +// to the target Sphinx router. If the encoded ephemeral key isn't on the +// target Elliptic Curve, then the packet is rejected. Similarly, if the +// derived shared secret has been seen before the packet is rejected. Finally +// if the MAC doesn't check the packet is again rejected. +// +// In the case of a successful packet processing, and ProcessedPacket struct is +// returned which houses the newly parsed packet, along with instructions on +// what to do next. +func (t *Tx) ProcessOnionPacket(seqNum uint16, onionPkt *OnionPacket, + assocData []byte, incomingCltv uint32) error { + + // Compute the shared secret for this onion packet. + sharedSecret, err := t.router.generateSharedSecret( + onionPkt.EphemeralKey) + if err != nil { + return err + } + + // Additionally, compute the hash prefix of the shared secret, which + // will serve as an identifier for detecting replayed packets. + hashPrefix := hashSharedSecret(&sharedSecret) + + // Continue to optimistically process this packet, deferring replay + // protection until the end to reduce the penalty of multiple IO + // operations. + packet, err := processOnionPacket(onionPkt, &sharedSecret, assocData) + if err != nil { + return err + } + + // Add the hash prefix to pending batch of shared secrets that will be + // written later via Commit(). + err = t.batch.Put(seqNum, &hashPrefix, incomingCltv) + if err != nil { + return err + } + + // If we successfully added this packet to the batch, cache the processed + // packet within the Tx which can be accessed after committing if this + // sequence number does not appear in the replay set. + t.packets[seqNum] = *packet + + return nil +} + +// Commit writes this transaction's batch of sphinx packets to the replay log, +// performing a final check against the log for replays. +func (t *Tx) Commit() ([]ProcessedPacket, *ReplaySet, error) { + if t.batch.isCommitted { + return t.packets, t.batch.replaySet, nil + } + + rs, err := t.router.log.PutBatch(t.batch) + + return t.packets, rs, err +} From cb6fb5964903afee95fc2cb4f23f344aee245415 Mon Sep 17 00:00:00 2001 From: Conner Fromknecht Date: Fri, 12 Jan 2018 16:55:32 -0800 Subject: [PATCH 16/23] sphinx_test: adds batched processing unit tests --- sphinx_test.go | 167 +++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 155 insertions(+), 12 deletions(-) diff --git a/sphinx_test.go b/sphinx_test.go index 245cbe3..6d9d577 100644 --- a/sphinx_test.go +++ b/sphinx_test.go @@ -9,7 +9,6 @@ import ( "strconv" "testing" - "github.com/Crypt-iQ/lightning-onion/persistlog" "github.com/davecgh/go-spew/spew" "github.com/roasbeef/btcd/btcec" "github.com/roasbeef/btcd/chaincfg" @@ -100,7 +99,9 @@ func newTestRoute(numHops int) ([]*Router, *[]HopData, *OnionPacket, error) { " random key for sphinx node: %v", err) } - nodes[i] = NewRouter(privKey, &chaincfg.MainNetParams, nil) + dbPath := strconv.Itoa(i) + + nodes[i] = NewRouter(dbPath, privKey, &chaincfg.MainNetParams, nil) } // Gather all the pub keys in the path. @@ -182,7 +183,7 @@ func TestBolt4Packet(t *testing.T) { // shutdown deletes the temporary directory that the test database uses // and handles closing the database. -func shutdown(dir string, d *persistlog.DecayedLog) { +func shutdown(dir string, d ReplayLog) { os.RemoveAll(dir) d.Stop() } @@ -197,9 +198,9 @@ func TestSphinxCorrectness(t *testing.T) { // reaching the final destination. for i := 0; i < len(nodes); i++ { // Start each node's DecayedLog and defer shutdown - var tempDir = strconv.Itoa(i) - nodes[i].d.Start(tempDir) - defer shutdown(tempDir, nodes[i].d) + tempDir := strconv.Itoa(i) + nodes[i].log.Start() + defer shutdown(tempDir, nodes[i].log) hop := nodes[i] @@ -262,8 +263,8 @@ func TestSphinxSingleHop(t *testing.T) { } // Start the DecayedLog and defer shutdown - nodes[0].d.Start("0") - defer shutdown("0", nodes[0].d) + nodes[0].log.Start() + defer shutdown("0", nodes[0].log) // Simulating a direct single-hop payment, send the sphinx packet to // the destination node, making it process the packet fully. @@ -289,8 +290,8 @@ func TestSphinxNodeRelpay(t *testing.T) { } // Start the DecayedLog and defer shutdown - nodes[0].d.Start("0") - defer shutdown("0", nodes[0].d) + nodes[0].log.Start() + defer shutdown("0", nodes[0].log) // Allow the node to process the initial packet, this should proceed // without any failures. @@ -305,6 +306,148 @@ func TestSphinxNodeRelpay(t *testing.T) { } } +func TestSphinxNodeRelpaySameBatch(t *testing.T) { + // We'd like to ensure that the sphinx node itself rejects all replayed + // packets which share the same shared secret. + nodes, _, fwdMsg, err := newTestRoute(NumMaxHops) + if err != nil { + t.Fatalf("unable to create test route: %v", err) + } + + // Start the DecayedLog and defer shutdown + nodes[0].log.Start() + defer shutdown("0", nodes[0].log) + + tx := nodes[0].BeginTxn([]byte("0"), 2) + + // Allow the node to process the initial packet, this should proceed + // without any failures. + if err := tx.ProcessOnionPacket(0, fwdMsg, nil); err != nil { + t.Fatalf("unable to process sphinx packet: %v", err) + } + + // Now, force the node to process the packet a second time, this call + // should not fail, even though the batch has internally recorded this + // as a duplicate. + err = tx.ProcessOnionPacket(1, fwdMsg, nil) + if err != nil { + t.Fatalf("adding duplicate sphinx packet to batch should not "+ + "result in an error, instead got: %v", err) + } + + // Commit the batch to disk, then we will inspect the replay set to + // ensure the duplicate entry was properly included. + _, replaySet, err := tx.Commit() + if err != nil { + t.Fatalf("unable to commit batch of sphinx packets: %v", err) + } + + if replaySet.Contains(0) { + t.Fatalf("index 0 was not expected to be in replay set") + } + + if !replaySet.Contains(1) { + t.Fatalf("expected replay set to contain duplicate packet " + + "at index 1") + } +} + +func TestSphinxNodeRelpayLaterBatch(t *testing.T) { + // We'd like to ensure that the sphinx node itself rejects all replayed + // packets which share the same shared secret. + nodes, _, fwdMsg, err := newTestRoute(NumMaxHops) + if err != nil { + t.Fatalf("unable to create test route: %v", err) + } + + // Start the DecayedLog and defer shutdown + nodes[0].log.Start() + defer shutdown("0", nodes[0].log) + + tx := nodes[0].BeginTxn([]byte("0"), 1) + + // Allow the node to process the initial packet, this should proceed + // without any failures. + if err := tx.ProcessOnionPacket(uint16(0), fwdMsg, nil); err != nil { + t.Fatalf("unable to process sphinx packet: %v", err) + } + + _, _, err = tx.Commit() + if err != nil { + t.Fatalf("unable to commit sphinx batch: %v", err) + } + + tx2 := nodes[0].BeginTxn([]byte("1"), 1) + + // Now, force the node to process the packet a second time, this should + // fail with a detected replay error. + err = tx2.ProcessOnionPacket(uint16(0), fwdMsg, nil) + if err != nil { + t.Fatalf("sphinx packet replay should not have been rejected, "+ + "instead error is %v", err) + } + + _, replays, err := tx2.Commit() + if err != nil { + t.Fatalf("unable to commit second sphinx batch: %v", err) + } + + if !replays.Contains(0) { + t.Fatalf("expected replay set to contain index: %v", 0) + } +} + +func TestSphinxNodeRelpayBatchIdempotency(t *testing.T) { + // We'd like to ensure that the sphinx node itself rejects all replayed + // packets which share the same shared secret. + nodes, _, fwdMsg, err := newTestRoute(NumMaxHops) + if err != nil { + t.Fatalf("unable to create test route: %v", err) + } + + // Start the DecayedLog and defer shutdown + nodes[0].log.Start() + defer shutdown("0", nodes[0].log) + + tx := nodes[0].BeginTxn([]byte("0"), 1) + + // Allow the node to process the initial packet, this should proceed + // without any failures. + if err := tx.ProcessOnionPacket(uint16(0), fwdMsg, nil); err != nil { + t.Fatalf("unable to process sphinx packet: %v", err) + } + + packets, replays, err := tx.Commit() + if err != nil { + t.Fatalf("unable to commit sphinx batch: %v", err) + } + + tx2 := nodes[0].BeginTxn([]byte("0"), 1) + + // Now, force the node to process the packet a second time, this should + // not fail with a detected replay error. + err = tx2.ProcessOnionPacket(uint16(0), fwdMsg, nil) + if err != nil { + t.Fatalf("sphinx packet replay should not have been rejected, "+ + "instead error is %v", err) + } + + packets2, replays2, err := tx2.Commit() + if err != nil { + t.Fatalf("unable to commit second sphinx batch: %v", err) + } + + if replays.Size() != replays2.Size() { + t.Fatalf("expected replay set to be %v, instead got %v", + replays, replays2) + } + + if !reflect.DeepEqual(packets, packets2) { + t.Fatalf("expected packets to be %v, instead go %v", + packets, packets2) + } +} + func TestSphinxAssocData(t *testing.T) { // We want to make sure that the associated data is considered in the // HMAC creation @@ -314,8 +457,8 @@ func TestSphinxAssocData(t *testing.T) { } // Start the DecayedLog and defer shutdown - nodes[0].d.Start("0") - defer shutdown("0", nodes[0].d) + nodes[0].log.Start() + defer shutdown("0", nodes[0].log) if _, err := nodes[0].ProcessOnionPacket(fwdMsg, []byte("somethingelse")); err == nil { t.Fatalf("we should fail when associated data changes") From 5a2e68711e3ec65349c934a69ff6f7972727b3c2 Mon Sep 17 00:00:00 2001 From: Conner Fromknecht Date: Tue, 30 Jan 2018 23:53:53 -0800 Subject: [PATCH 17/23] log: introduces the SPHX subsytem logger --- log.go | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 log.go diff --git a/log.go b/log.go new file mode 100644 index 0000000..c9804c9 --- /dev/null +++ b/log.go @@ -0,0 +1,42 @@ +package sphinx + +import "github.com/btcsuite/btclog" + +// sphxLog is a logger that is initialized with no output filters. This +// means the package will not perform any logging by default until the caller +// requests it. +var sphxLog btclog.Logger + +// The default amount of logging is none. +func init() { + DisableLog() +} + +// DisableLog disables all library log output. Logging output is disabled +// by default until UseLogger is called. +func DisableLog() { + sphxLog = btclog.Disabled +} + +// UseLogger uses a specified Logger to output package logging info. +// This should be used in preference to SetLogWriter if the caller is also +// using btclog. +func UseLogger(logger btclog.Logger) { + sphxLog = logger +} + +// logClosure is used to provide a closure over expensive logging operations +// so don't have to be performed when the logging level doesn't warrant it. +type logClosure func() string + +// String invokes the underlying function and returns the result. +func (c logClosure) String() string { + return c() +} + +// newLogClosure returns a new closure over a function that returns a string +// which itself provides a Stringer interface so that it can be used with the +// logging system. +func newLogClosure(c func() string) logClosure { + return logClosure(c) +} From e20d68856ad8f999272145885eccd38f207e88e1 Mon Sep 17 00:00:00 2001 From: Conner Fromknecht Date: Thu, 1 Feb 2018 14:08:45 -0800 Subject: [PATCH 18/23] bench_test: modify bench test to restart decayed log --- bench_test.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/bench_test.go b/bench_test.go index a06b39f..5d3a19a 100644 --- a/bench_test.go +++ b/bench_test.go @@ -70,11 +70,18 @@ func BenchmarkProcessPacket(b *testing.B) { for i := 0; i < b.N; i++ { pkt, err = path[0].ProcessOnionPacket(sphinxPacket, nil) if err != nil { - b.Fatalf("unable to process packet: %v", err) + b.Fatalf("unable to process packet %d: %v", i, err) } b.StopTimer() - shutdown("0", path[0].log) + router := path[0] + shutdown("0", router.log) + path[0] = &Router{ + nodeID: router.nodeID, + nodeAddr: router.nodeAddr, + onionKey: router.onionKey, + log: NewDecayedLog("0", nil), + } path[0].log.Start() b.StartTimer() } From 8af0072712b76f83a3519c529f63e23e4a8ccd3d Mon Sep 17 00:00:00 2001 From: Conner Fromknecht Date: Thu, 1 Feb 2018 14:09:07 -0800 Subject: [PATCH 19/23] sphinx_test: reorder to close log before removing directory --- sphinx_test.go | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/sphinx_test.go b/sphinx_test.go index 6d9d577..b1002dd 100644 --- a/sphinx_test.go +++ b/sphinx_test.go @@ -184,8 +184,8 @@ func TestBolt4Packet(t *testing.T) { // shutdown deletes the temporary directory that the test database uses // and handles closing the database. func shutdown(dir string, d ReplayLog) { - os.RemoveAll(dir) d.Stop() + os.RemoveAll(dir) } func TestSphinxCorrectness(t *testing.T) { @@ -205,7 +205,7 @@ func TestSphinxCorrectness(t *testing.T) { hop := nodes[i] t.Logf("Processing at hop: %v \n", i) - onionPacket, err := hop.ProcessOnionPacket(fwdMsg, nil) + onionPacket, err := hop.ProcessOnionPacket(fwdMsg, nil, uint32(i)+1) if err != nil { t.Fatalf("Node %v was unable to process the "+ "forwarding message: %v", i, err) @@ -268,7 +268,7 @@ func TestSphinxSingleHop(t *testing.T) { // Simulating a direct single-hop payment, send the sphinx packet to // the destination node, making it process the packet fully. - processedPacket, err := nodes[0].ProcessOnionPacket(fwdMsg, nil) + processedPacket, err := nodes[0].ProcessOnionPacket(fwdMsg, nil, 1) if err != nil { t.Fatalf("unable to process sphinx packet: %v", err) } @@ -295,13 +295,13 @@ func TestSphinxNodeRelpay(t *testing.T) { // Allow the node to process the initial packet, this should proceed // without any failures. - if _, err := nodes[0].ProcessOnionPacket(fwdMsg, nil); err != nil { + if _, err := nodes[0].ProcessOnionPacket(fwdMsg, nil, 1); err != nil { t.Fatalf("unable to process sphinx packet: %v", err) } // Now, force the node to process the packet a second time, this should // fail with a detected replay error. - if _, err := nodes[0].ProcessOnionPacket(fwdMsg, nil); err != ErrReplayedPacket { + if _, err := nodes[0].ProcessOnionPacket(fwdMsg, nil, 1); err != ErrReplayedPacket { t.Fatalf("sphinx packet replay should be rejected, instead error is %v", err) } } @@ -322,14 +322,14 @@ func TestSphinxNodeRelpaySameBatch(t *testing.T) { // Allow the node to process the initial packet, this should proceed // without any failures. - if err := tx.ProcessOnionPacket(0, fwdMsg, nil); err != nil { + if err := tx.ProcessOnionPacket(0, fwdMsg, nil, 1); err != nil { t.Fatalf("unable to process sphinx packet: %v", err) } // Now, force the node to process the packet a second time, this call // should not fail, even though the batch has internally recorded this // as a duplicate. - err = tx.ProcessOnionPacket(1, fwdMsg, nil) + err = tx.ProcessOnionPacket(1, fwdMsg, nil, 1) if err != nil { t.Fatalf("adding duplicate sphinx packet to batch should not "+ "result in an error, instead got: %v", err) @@ -368,7 +368,7 @@ func TestSphinxNodeRelpayLaterBatch(t *testing.T) { // Allow the node to process the initial packet, this should proceed // without any failures. - if err := tx.ProcessOnionPacket(uint16(0), fwdMsg, nil); err != nil { + if err := tx.ProcessOnionPacket(uint16(0), fwdMsg, nil, 1); err != nil { t.Fatalf("unable to process sphinx packet: %v", err) } @@ -381,7 +381,7 @@ func TestSphinxNodeRelpayLaterBatch(t *testing.T) { // Now, force the node to process the packet a second time, this should // fail with a detected replay error. - err = tx2.ProcessOnionPacket(uint16(0), fwdMsg, nil) + err = tx2.ProcessOnionPacket(uint16(0), fwdMsg, nil, 1) if err != nil { t.Fatalf("sphinx packet replay should not have been rejected, "+ "instead error is %v", err) @@ -413,7 +413,7 @@ func TestSphinxNodeRelpayBatchIdempotency(t *testing.T) { // Allow the node to process the initial packet, this should proceed // without any failures. - if err := tx.ProcessOnionPacket(uint16(0), fwdMsg, nil); err != nil { + if err := tx.ProcessOnionPacket(uint16(0), fwdMsg, nil, 1); err != nil { t.Fatalf("unable to process sphinx packet: %v", err) } @@ -426,7 +426,7 @@ func TestSphinxNodeRelpayBatchIdempotency(t *testing.T) { // Now, force the node to process the packet a second time, this should // not fail with a detected replay error. - err = tx2.ProcessOnionPacket(uint16(0), fwdMsg, nil) + err = tx2.ProcessOnionPacket(uint16(0), fwdMsg, nil, 1) if err != nil { t.Fatalf("sphinx packet replay should not have been rejected, "+ "instead error is %v", err) @@ -460,7 +460,8 @@ func TestSphinxAssocData(t *testing.T) { nodes[0].log.Start() defer shutdown("0", nodes[0].log) - if _, err := nodes[0].ProcessOnionPacket(fwdMsg, []byte("somethingelse")); err == nil { + _, err = nodes[0].ProcessOnionPacket(fwdMsg, []byte("somethingelse"), 1) + if err == nil { t.Fatalf("we should fail when associated data changes") } From fc276f42718e572279fbafc00db7b3b68437dcef Mon Sep 17 00:00:00 2001 From: Conner Fromknecht Date: Thu, 1 Feb 2018 14:08:13 -0800 Subject: [PATCH 20/23] sphinx: linear packet construction via cached blinding factors --- sphinx.go | 1 - 1 file changed, 1 deletion(-) diff --git a/sphinx.go b/sphinx.go index 9dba3cb..3eae1c0 100644 --- a/sphinx.go +++ b/sphinx.go @@ -8,7 +8,6 @@ import ( "encoding/binary" "io" "io/ioutil" - "math" "math/big" "github.com/aead/chacha20" From b2bbddee62aafc876c2db9fa5044da93fb525ff1 Mon Sep 17 00:00:00 2001 From: Conner Fromknecht Date: Fri, 2 Feb 2018 21:27:54 -0800 Subject: [PATCH 21/23] sphinx: adds ReconstructOnionPacket for deriving packets w/o replay check --- sphinx.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/sphinx.go b/sphinx.go index 3eae1c0..05ec024 100644 --- a/sphinx.go +++ b/sphinx.go @@ -707,6 +707,21 @@ func (r *Router) ProcessOnionPacket(onionPkt *OnionPacket, return packet, nil } +// ReconstructOnionPacket rederives the subsequent onion packet. +// NOTE: This method does not do any sort of replay protection, and should only +// be used to reconstruct packets that were successfully processed previously. +func (r *Router) ReconstructOnionPacket(onionPkt *OnionPacket, + assocData []byte) (*ProcessedPacket, error) { + + // Compute the shared secret for this onion packet. + sharedSecret, err := r.generateSharedSecret(onionPkt.EphemeralKey) + if err != nil { + return nil, err + } + + return processOnionPacket(onionPkt, &sharedSecret, assocData) +} + // processOnionPacket performs the primary key derivation and handling of onion // packets. The processed packets returned from this method should only be used // if the packet was not flagged as a replayed packet. From d74165099514cf080c645163fc486ca7d91151ef Mon Sep 17 00:00:00 2001 From: Conner Fromknecht Date: Tue, 20 Feb 2018 23:21:59 -0800 Subject: [PATCH 22/23] glide: remove channeldb dep --- glide.lock | 21 +++++++-------------- glide.yaml | 1 - 2 files changed, 7 insertions(+), 15 deletions(-) diff --git a/glide.lock b/glide.lock index 106189c..dd8c50b 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 02ca2623123671074881b4e61e5b0bc0805530e1a68ae23bf7a5d67e04b7b326 -updated: 2017-10-11T17:39:22.162242846-04:00 +hash: 2d47f5b9766af60984cadf5ba0ba9a21e085e4715cafcbdae080a7b620c38b0d +updated: 2018-02-20T23:10:22.589085-08:00 imports: - name: github.com/aead/chacha20 version: d31a916ded42d1640b9d89a26f8abd53cc96790c @@ -16,36 +16,29 @@ imports: - name: github.com/go-errors/errors version: 8fa88b06e5974e97fbf9899a7f86a344bfd1f105 - name: github.com/lightningnetwork/lnd - version: a314e661bd1fd4ed2aeeb14db5f2d68b1b424e2b + version: 1c3dbb25434ef9f4d3dedc226dea41755e1621e7 subpackages: - chainntnfs - - channeldb - - lnwire - - shachain - name: github.com/roasbeef/btcd - version: 4b411f0e78f1faa96ae650d886170d018c1835bf + version: e6807bc4dd5ddbb95b4ab163f6dd61e4ad79463a subpackages: - btcec - chaincfg - chaincfg/chainhash - wire - name: github.com/roasbeef/btcutil - version: 1584022350b4400b511beab6a013f0189adeef40 + version: c3ff179366044979fb9856c2feb79bd4c2184c7a subpackages: - base58 - bech32 - name: golang.org/x/crypto - version: 459e26527287adbc2adcc5d0d49abff9a5f315a7 + version: 9419663f5a44be8b34ca85f08abc5fe1be11f8a3 subpackages: - ripemd160 - name: golang.org/x/sys - version: b6e1ae21643682ce023deb8d152024597b0e9bb4 + version: ab9e364efd8b52800ff7ee48a9ffba4e0ed78dfb subpackages: - unix -- name: google.golang.org/grpc - version: b3ddf786825de56a4178401b7e174ee332173b66 - subpackages: - - codes testImports: - name: github.com/davecgh/go-spew version: 346938d642f2ec3594ed81d874461961cd0faa76 diff --git a/glide.yaml b/glide.yaml index c058d8e..16da9db 100644 --- a/glide.yaml +++ b/glide.yaml @@ -7,7 +7,6 @@ import: - package: github.com/lightningnetwork/lnd subpackages: - chainntnfs - - channeldb - package: github.com/roasbeef/btcd subpackages: - btcec From 7291f19ac6517199dee681ff1f75a7c7a8e974d1 Mon Sep 17 00:00:00 2001 From: Conner Fromknecht Date: Tue, 20 Feb 2018 23:23:12 -0800 Subject: [PATCH 23/23] bench_test: pass incoming cltv to ProcessOnionPacket --- bench_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bench_test.go b/bench_test.go index 5d3a19a..23d7b20 100644 --- a/bench_test.go +++ b/bench_test.go @@ -68,7 +68,7 @@ func BenchmarkProcessPacket(b *testing.B) { pkt *ProcessedPacket ) for i := 0; i < b.N; i++ { - pkt, err = path[0].ProcessOnionPacket(sphinxPacket, nil) + pkt, err = path[0].ProcessOnionPacket(sphinxPacket, nil, uint32(i)) if err != nil { b.Fatalf("unable to process packet %d: %v", i, err) }