Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
5aef6ab
sphinx: replay protection and garbage collector
Sep 21, 2017
f0fc2e7
formatting: correctly pass DecayedLog, changed Put
Sep 22, 2017
9de21d4
persistlog: fix garbage collector, Get, and tests
Crypt-iQ Oct 12, 2017
56b02b9
small cltv fix
Crypt-iQ Oct 12, 2017
22d2642
small test fix
Crypt-iQ Oct 12, 2017
df442be
persistlog: persistent GC, cleaned tests, batched writes
Crypt-iQ Oct 12, 2017
a287245
Off by 1 & gofmt
Crypt-iQ Oct 12, 2017
3466252
persistlog: reverting incorrect GC, test changes
Crypt-iQ Oct 19, 2017
efd8a78
persistlog: move to main sphinx package
cfromknecht Jan 12, 2018
e0cb955
decayedlog+decayedlog_test: clean up decayedlog and add PutBatch
cfromknecht Jan 12, 2018
542572c
obfuscation: refactor to use Hash256 in fn signatures
cfromknecht Jan 13, 2018
67a4f4a
batch: adds Batch helper object for transaction construction
cfromknecht Jan 13, 2018
814a002
replay_set: adds ReplaySet to store outcome of batched processing
cfromknecht Jan 13, 2018
d1efa2d
bench_test: use minor API modifications to ReplayLog
cfromknecht Jan 13, 2018
e926602
sphinx: adds batched processing of onion pkts via Tx
cfromknecht Jan 13, 2018
cb6fb59
sphinx_test: adds batched processing unit tests
cfromknecht Jan 13, 2018
5a2e687
log: introduces the SPHX subsytem logger
cfromknecht Jan 31, 2018
e20d688
bench_test: modify bench test to restart decayed log
cfromknecht Feb 1, 2018
8af0072
sphinx_test: reorder to close log before removing directory
cfromknecht Feb 1, 2018
fc276f4
sphinx: linear packet construction via cached blinding factors
cfromknecht Feb 1, 2018
b2bbdde
sphinx: adds ReconstructOnionPacket for deriving packets w/o replay c…
cfromknecht Feb 3, 2018
d741650
glide: remove channeldb dep
cfromknecht Feb 21, 2018
7291f19
bench_test: pass incoming cltv to ProcessOnionPacket
cfromknecht Feb 21, 2018
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
89 changes: 89 additions & 0 deletions batch.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
package sphinx

import "errors"

// ErrAlreadyCommitted signals that an entry could not be added to the
// batch because it has already been persisted.
var ErrAlreadyCommitted = errors.New("cannot add to batch after committing")

// Batch is an object used to incrementally construct a set of entries to add to
// the replay log. After construction is completed, it can be added to the log
// using the PutBatch method.
type Batch struct {
// isCommitted denotes whether or not this batch has been successfully
// written to disk.
isCommitted bool

// id is a unique, caller chosen identifier for this batch.
id []byte

// entries stores the set of all potential entries that might get
// written to the replay log. Some entries may be skipped after
// examining the on-disk content at the time of commit..
entries map[uint16]batchEntry

// replayCache is an in memory lookup-table, which stores the hash
// prefix of entries already added to this batch. This allows a quick
// mechanism for intra-batch duplicate detection.
replayCache map[HashPrefix]struct{}

// replaySet contains the sequence numbers of all entries that were
// detected as replays. The set is finalized upon writing the batch to
// disk, and merges replays detected by the replay cache and on-disk
// replay log.
replaySet *ReplaySet
}

// NewBatch initializes an object for constructing a set of entries to
// atomically add to a replay log. Batches are identified by byte slice, which
// allows the caller to safely process the same batch twice and get an
// idempotent result.
func NewBatch(id []byte) *Batch {
return &Batch{
id: id,
entries: make(map[uint16]batchEntry),
replayCache: make(map[HashPrefix]struct{}),
replaySet: NewReplaySet(),
}
}

// Put inserts a hash-prefix/CLTV pair into the current batch. This method only
// returns an error in the event that the batch was already committed to disk.
// Decisions regarding whether or not a particular sequence number is a replay
// is ultimately reported via the batch's ReplaySet after committing to disk.
func (b *Batch) Put(seqNum uint16, hashPrefix *HashPrefix, cltv uint32) error {
// Abort if this batch was already written to disk.
if b.isCommitted {
return ErrAlreadyCommitted
}

// Check to see if this hash prefix is already included in this batch.
// If so, we will opportunistically mark this index as replayed.
if _, ok := b.replayCache[*hashPrefix]; ok {
b.replaySet.Add(seqNum)
return nil
}

// Otherwise, this is a distinct hash prefix for this batch. Add it to
// our list of entries that we will try to write to disk. Each of these
// entries will be checked again during the commit to see if any other
// on-disk entries contain the same hash prefix.
b.entries[seqNum] = batchEntry{
hashPrefix: *hashPrefix,
cltv: cltv,
}

// Finally, add this hash prefix to our in-memory replay cache, this
// will be consulted upon further adds to check for duplicates in the
// same batch.
b.replayCache[*hashPrefix] = struct{}{}

return nil
}

// batchEntry is a tuple of a secret's hash prefix and the corresponding CLTV at
// which the onion blob from which the secret was derived expires.
type batchEntry struct {
hashPrefix HashPrefix
cltv uint32
}
16 changes: 13 additions & 3 deletions bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,19 +60,29 @@ func BenchmarkProcessPacket(b *testing.B) {
b.Fatalf("unable to create test route: %v", err)
}
b.ReportAllocs()
path[0].log.Start()
defer shutdown("0", path[0].log)
b.StartTimer()

var (
pkt *ProcessedPacket
)
for i := 0; i < b.N; i++ {
pkt, err = path[0].ProcessOnionPacket(sphinxPacket, nil)
pkt, err = path[0].ProcessOnionPacket(sphinxPacket, nil, uint32(i))
if err != nil {
b.Fatalf("unable to process packet: %v", err)
b.Fatalf("unable to process packet %d: %v", i, err)
}

b.StopTimer()
path[0].seenSecrets = make(map[[sharedSecretSize]byte]struct{})
router := path[0]
shutdown("0", router.log)
path[0] = &Router{
nodeID: router.nodeID,
nodeAddr: router.nodeAddr,
onionKey: router.onionKey,
log: NewDecayedLog("0", nil),
}
path[0].log.Start()
b.StartTimer()
}

Expand Down
2 changes: 1 addition & 1 deletion cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ func main() {
}

privkey, _ := btcec.PrivKeyFromBytes(btcec.S256(), binKey)
s := sphinx.NewRouter(privkey, &chaincfg.TestNet3Params)
s := sphinx.NewRouter(privkey, &chaincfg.TestNet3Params, nil)

var packet sphinx.OnionPacket
err = packet.Decode(bytes.NewBuffer(binMsg))
Expand Down
Loading