diff --git a/.circleci/config.yml b/.circleci/config.yml index 6d24eca1f2..998748fae6 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -25,16 +25,17 @@ jobs: name: Get goveralls command: | go get github.com/mattn/goveralls - cd $GOPATH/src/github.com/Masterminds/glide - git checkout tags/v0.13.1 - go install - run: name: Get the dependencies command: | glide install - run: name: Build and install the executable - command: go install -v + command: go install -v && cp -r $GOPATH/ /tmp + + - persist_to_workspace: + root: /tmp + paths: go - save_cache: key: v2-factomd-go-build-cache-{{ checksum "glide.lock" }} @@ -42,104 +43,44 @@ jobs: - vendor test: - working_directory: /go/src/github.com/FactomProject/factomd + working_directory: /tmp docker: - image: circleci/golang:1.10 - steps: - - checkout - - - restore_cache: - keys: - - v2-factomd-go-build-cache-{{ checksum "glide.lock" }} + parallelism: 8 - - run: - name: Get glide - command: | - go get -v github.com/Masterminds/glide - cd $GOPATH/src/github.com/Masterminds/glide - git checkout tags/v0.13.1 - go install - - run: - name: Get goveralls - command: | - go get github.com/mattn/goveralls - cd $GOPATH/src/github.com/Masterminds/glide - git checkout tags/v0.13.1 - go install - - run: - name: Get the dependencies - command: | - glide install - - run: - name: Build and install the executable - command: go install -v + steps: + - attach_workspace: + at: /tmp - run: - name: Run tests! + name: Run Tests! no_output_timeout: 2400 - command: ./test.sh - - - - save_cache: - key: v2-factomd-go-build-cache-{{ checksum "glide.lock" }} - paths: - - vendor + command: | + export PATH="/tmp/go/bin:$PATH" + export GOPATH=/tmp/go + cd /tmp/go/src/github.com/FactomProject/factomd/ + ./test.sh coveralls: - working_directory: /go/src/github.com/FactomProject/factomd + working_directory: /tmp docker: - image: circleci/golang:1.10 steps: - - checkout - - - restore_cache: - keys: - - v2-factomd-go-build-cache-{{ checksum "glide.lock" }} - - - run: - name: Get glide - command: | - go get -v github.com/Masterminds/glide - cd $GOPATH/src/github.com/Masterminds/glide - git checkout tags/v0.13.1 - go install - - run: - name: Get goveralls - command: | - go get github.com/mattn/goveralls - cd $GOPATH/src/github.com/Masterminds/glide - git checkout tags/v0.13.1 - go install - - run: - name: Get the dependencies - command: | - glide install - - run: - name: Build and install the executable - command: go install -v + - attach_workspace: + at: /tmp - run: name: Go Test with Coverage no_output_timeout: 2400 - command: go test $(glide nv | grep -v Utilities | grep -v LongTests | grep -v simTest) -vet=off -v -cover -coverprofile=coverage.out - - - run: - name: Coveralls! - no_output_timeout: 2400 - command: goveralls -coverprofile=coverage.out -service=circle-ci -repotoken=$COVERALLS_TOKEN - -# - run: -# name: Coveralls! -# no_output_timeout: 2400 -# command: goveralls -v -flags "-vet=off" -ignore=$(paste -sd, .coverignore) -service=circle-ci -repotoken=$COVERALLS_TOKEN - - - - save_cache: - key: v2-factomd-go-build-cache-{{ checksum "glide.lock" }} - paths: - - vendor + command: | + export PATH="/tmp/go/bin:$PATH" + export GOPATH=/tmp/go + cd /tmp/go/src/github.com/FactomProject/factomd/ + go test $(glide nv | grep -v Utilities | grep -v longTest | grep -v peerTest | grep -v simTest | grep '...') -vet=off -v -cover -coverprofile=coverage.out + goveralls -coverprofile=coverage.out -service=circle-ci -repotoken=$COVERALLS_TOKEN + bash -c "exit 0" # coverage should never fail # Docker builds diff --git a/.dockerignore b/.dockerignore index 6dab441dfe..896adae39d 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,2 +1,3 @@ # Don't copy the glide vendor directory -vendor \ No newline at end of file +vendor +.sim diff --git a/.gitignore b/.gitignore index 8c9a4e6949..2f69ca1d63 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ +# Special build files +engine/overrideversion.go # Executable factomd # Compiled Object files, Static and Dynamic libs (Shared Objects) @@ -53,3 +55,4 @@ _testmain.go # Vendoring vendor/ +.sim diff --git a/CLA b/CLA index 93f28b36e4..3e0695be6d 100644 --- a/CLA +++ b/CLA @@ -43,3 +43,4 @@ Niels Klomp nklomp Clay Douglass factom-clay Factom Inc. Sander Postma sanderPostma AtomicVoid, Sphereon BV, BIF Tor Hogne Paulsen Tor Hogne The 42nd Factoid AS, TFA Factom Protocol Guide +Steven Masley Emyrk Factom Inc. diff --git a/README.md b/README.md index 139ea37afe..7052a3c7c2 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,5 @@ # Factom -[![Build Status](https://travis-ci.org/FactomProject/factomd.svg?branch=develop)](https://travis-ci.org/FactomProject/factomd) [![CircleCI](https://circleci.com/gh/FactomProject/factomd/tree/develop.svg?style=shield)](https://circleci.com/gh/FactomProject/factomd/tree/develop) Factom is an Open-Source project that provides a way to build applications on the Bitcoin blockchain. @@ -67,89 +66,186 @@ To get the current list of flags, type the command: Which will get you something like: - //////////////////////// Copyright 2017 Factom Foundation - //////////////////////// Use of this source code is governed by the MIT - //////////////////////// license that can be found in the LICENSE file. - Go compiler version: go1.6.2 - Using build: - len(Args) 2 + Command Line Arguments: + -h Usage of factomd: + -balancehash + If false, then don't pass around balance hashes (default true) -blktime int - Seconds per block. Production is 600. + Seconds per block. Production is 600. + -broadcastnum int + Number of peers to broadcast to in the peer to peer networking (default 16) + -checkheads + Enables checking chain heads on boot (default true) -clonedb string - Override the main node and use this database for the clones in a Network. + Override the main node and use this database for the clones in a Network. + -config string + Override the config file location (factomd.conf) + -controlpanelport int + Port for control panel webserver; Default 8090 + -controlpanelsetting string + Can set to 'disabled', 'readonly', or 'readwrite' to overwrite config file -count int - The number of nodes to generate (default 1) + The number of nodes to generate (default 1) + -customnet string + This string specifies a custom blockchain network ID. -db string - Override the Database in the Config file and use this Database implementation + Override the Database in the Config file and use this Database implementation. Options Map, LDB, or Bolt + -deadline int + Timeout Delay in milliseconds used on Reads and Writes to the network comm (default 1000) + -debugconsole string + Enable DebugConsole on port. localhost:8093 open 8093 and spawns a telnet console, remotehost:8093 open 8093 + -debuglog string + regex to pick which logs to save -drop int - Number of messages to drop out of every thousand + Number of messages to drop out of every thousand + -enablenet + Enable or disable networking (default true) -exclusive - If true, we only dial out to special/trusted peers. - -folder string - Directory in .factom to store nodes. (eg: multiple nodes on one filesystem support) + If true, we only dial out to special/trusted peers. + -exclusive_in + If true, we only dial out to special/trusted peers and no incoming connections are accepted. + -exposeprofiler + Setting this exposes the profiling port to outside localhost. + -factomhome string + Set the Factom home directory. The .factom folder will be placed here if set, otherwise it will default to $HOME + -fast + If true, Factomd will fast-boot from a file. (default true) + -fastlocation string + Directory to put the Fast-boot file in. + -fastsaverate int + Save a fastboot file every so many blocks. Should be > 1000 for live systems. (default 1000) + -faulttimeout int + Seconds before considering Federated servers at-fault. Default is 120. (default 120) + -fixheads + If --checkheads is enabled, then this will also correct any errors reported (default true) + -fnet string + Read the given file to build the network connections -follower - If true, force node to be a follower. Only used when replaying a journal. + If true, force node to be a follower. Only used when replaying a journal. + -fullhasheslog + true create a log of all unique hashes seen during processing -journal string - Rerun a Journal of messages + Rerun a Journal of messages + -journaling + Write a journal of all messages received. Default is off. + -keepmismatch + If true, do not discard DBStates even when a majority of DBSignatures have a different hash -leader - If true, force node to be a leader. Only used when replaying a journal. (default true) + If true, force node to be a leader. Only used when replaying a journal. (default true) + -logPort string + Port for pprof logging (default "6060") + -logjson + Use to set logging to use a json formatting + -loglvl string + Set log level to either: none, debug, info, warning, error, fatal or panic (default "none") + -logstash + If true, use Logstash + -logurl string + Endpoint URL for Logstash (default "localhost:8345") + -mpr int + Set the Memory Profile Rate to update profiling per X bytes allocated. Default 512K, set to 1 to profile everything, 0 to disable. (default 524288) -net string - The default algorithm to build the network connections (default "tree") + The default algorithm to build the network connections (default "tree") + -network string + Network to join: MAIN, TEST or LOCAL + -networkport int + Port for p2p network; default 8110 -node int - Node Number the simulator will set as the focus - -p2pPort string - Port to listen for peers on. (default "8108") + Node Number the simulator will set as the focus + -nodename string + Assign a name to the node -peers string - Array of peer addresses. + Array of peer addresses. + -plugin string + Input the path to any plugin binaries -port int - Address to serve WSAPI on + Port where we serve WSAPI; default 8088 -prefix string - Prefix the Factom Node Names with this value; used to create leaderless networks. - -profile string - If true, turn on the go Profiler to profile execution of Factomd + Prefix the Factom Node Names with this value; used to create leaderless networks. + -reparseanchorchains + If true, reparse bitcoin and ethereum anchor chains in the database -rotate - If true, responsiblity is owned by one leader, and rotated over the leaders. + If true, responsibility is owned by one leader, and Rotated over the leaders. + -roundtimeout int + Seconds before audit servers will increment rounds and volunteer. (default 30) + -rpcpass string + Password to protect factomd local API. Ignored if rpcuser is blank + -rpcuser string + Username to protect factomd local API with simple HTTP authentication -runtimeLog - If true, maintain runtime logs of messages passed. - -test.bench string - regular expression to select benchmarks to run + If true, maintain runtime logs of messages passed. + -selfaddr string + comma separated IPAddresses and DNS names of this factomd to use when creating a cert file + -sim_stdin + If true, sim control reads from stdin. (default true) + -startdelay int + Delay to start processing messages, in seconds (default 10) + -stderrlog string + Log stderr to a file, optionally the same file as stdout + -stdoutlog string + Log stdout to a file + -sync2 int + Set the initial blockheight for the second Sync pass. Used to force a total sync, or skip unnecessary syncing of entries. (default -1) + -test.bench regexp + run only benchmarks matching regexp -test.benchmem - print memory allocations for benchmarks - -test.benchtime duration - approximate run time for each benchmark (default 1s) - -test.blockprofile string - write a goroutine blocking profile to the named file after execution - -test.blockprofilerate int - if >= 0, calls runtime.SetBlockProfileRate() (default 1) + print memory allocations for benchmarks + -test.benchtime d + run each benchmark for duration d (default 1s) + -test.blockprofile file + write a goroutine blocking profile to file + -test.blockprofilerate rate + set blocking profile rate (see runtime.SetBlockProfileRate) (default 1) -test.count n - run tests and benchmarks n times (default 1) - -test.coverprofile string - write a coverage profile to the named file after execution - -test.cpu string - comma-separated list of number of CPUs to use for each test - -test.cpuprofile string - write a cpu profile to the named file during execution - -test.memprofile string - write a memory profile to the named file after execution - -test.memprofilerate int - if >=0, sets runtime.MemProfileRate - -test.outputdir string - directory in which to write profiles - -test.parallel int - maximum test parallelism (default 1) - -test.run string - regular expression to select tests and examples to run + run tests and benchmarks n times (default 1) + -test.coverprofile file + write a coverage profile to file + -test.cpu list + comma-separated list of cpu counts to run each test with + -test.cpuprofile file + write a cpu profile to file + -test.failfast + do not start new tests after the first test failure + -test.list regexp + list tests, examples, and benchmarks matching regexp then exit + -test.memprofile file + write an allocation profile to file + -test.memprofilerate rate + set memory allocation profiling rate (see runtime.MemProfileRate) + -test.mutexprofile string + write a mutex contention profile to the named file after execution + -test.mutexprofilefraction int + if >= 0, calls runtime.SetMutexProfileFraction() (default 1) + -test.outputdir dir + write profiles to dir + -test.parallel n + run at most n tests in parallel (default 8) + -test.run regexp + run only tests and examples matching regexp -test.short - run smaller test suite to save time - -test.timeout duration - if positive, sets an aggregate time limit for all tests - -test.trace string - write an execution trace to the named file after execution + run smaller test suite to save time + -test.testlogfile file + write test action log to file (for use only by cmd/go) + -test.timeout d + panic test binary after duration d (default 0, timeout disabled) + -test.trace file + write an execution trace to file -test.v - verbose: print additional output + verbose: print additional output -timedelta int - Maximum timeDelta in milliseconds to offset each node. Simulates deltas in system clocks over a network. + Maximum timeDelta in milliseconds to offset each node. Simulates deltas in system clocks over a network. + -tls + Set to true to require encrypted connections to factomd API and Control Panel + -tormanage + Use torrent dbstate manager. Must have plugin binary installed and in $PATH + -torupload + Be a torrent uploader + -waitentries + Wait for Entries to be validated prior to execution of messages + -wrproc + Write processed blocks to temporary debug file (default true) + The flags that begin with "test." are supplied by the profiling package installed. The flags that relate to running factomd and the simulator are the following, with a little more explaination. That follows below. diff --git a/Utilities/BalanceFinder/main.gox b/Utilities/BalanceFinder/main.go similarity index 92% rename from Utilities/BalanceFinder/main.gox rename to Utilities/BalanceFinder/main.go index 8cca63ad0d..70df8a850c 100644 --- a/Utilities/BalanceFinder/main.gox +++ b/Utilities/BalanceFinder/main.go @@ -59,6 +59,7 @@ func main() { fmt.Println("Usage:") fmt.Println("BalanceFinder level/bolt/api DBFileLocation") fmt.Println("Program will find balances") + fmt.Println("All balance hashes cannot be compared to those presented in factomd. Only compared to others made by the tool.") if len(flag.Args()) < 2 { fmt.Println("\nNot enough arguments passed") @@ -180,8 +181,11 @@ func FindBalance(reader tools.Fetcher) (map[[32]byte]int64, map[[32]byte]int64, // Print the balance hash if heightmap[i] == true { { - h1 := state.GetMapHash(i, fctAddressMap) - h2 := state.GetMapHash(i, ecAddressMap) + // The dbheight was removed from the call, and the balance hash is + // added in another piece of code that we cannot easily access from this tool. + // So this tool's hashes can only be compared to other hashes made by this tool. + h1 := state.GetMapHash(fctAddressMap) + h2 := state.GetMapHash(ecAddressMap) var b []byte b = append(b, h1.Bytes()...) diff --git a/Utilities/CorrectChainHeads/main.go b/Utilities/CorrectChainHeads/main.go index ba6df06e27..e2a66ee6a7 100644 --- a/Utilities/CorrectChainHeads/main.go +++ b/Utilities/CorrectChainHeads/main.go @@ -1,7 +1,6 @@ package main import ( - "encoding/hex" "flag" "fmt" "os" @@ -9,12 +8,8 @@ import ( "sync/atomic" "time" - "github.com/FactomProject/factom" - "github.com/FactomProject/factomd/common/directoryBlock" + "github.com/FactomProject/factomd/Utilities/tools" "github.com/FactomProject/factomd/common/interfaces" - "github.com/FactomProject/factomd/common/primitives" - "github.com/FactomProject/factomd/database/databaseOverlay" - "github.com/FactomProject/factomd/database/hybridDB" ) var CheckFloating bool @@ -53,10 +48,10 @@ func main() { UsingAPI = true } - var reader Fetcher + var reader tools.Fetcher if UsingAPI { - reader = NewAPIReader(flag.Args()[1]) + reader = tools.NewAPIReader(flag.Args()[1]) } else { levelBolt := flag.Args()[0] @@ -65,7 +60,7 @@ func main() { os.Exit(1) } path := flag.Args()[1] - reader = NewDBReader(levelBolt, path) + reader = tools.NewDBReader(levelBolt, path) } // dblock, err := reader.FetchDBlockHead() @@ -73,7 +68,7 @@ func main() { FindHeads(reader) } -func FindHeads(f Fetcher) { +func FindHeads(f tools.Fetcher) { chainHeads := make(map[string]interfaces.IHash) var allEblockLock sync.Mutex @@ -219,94 +214,3 @@ func FindHeads(f Fetcher) { fmt.Printf("%d Errors found checking for bad links\n", errCount) } - -type Fetcher interface { - FetchDBlockHead() (interfaces.IDirectoryBlock, error) - FetchDBlockByHeight(dBlockHeight uint32) (interfaces.IDirectoryBlock, error) - //FetchDBlock(hash interfaces.IHash) (interfaces.IDirectoryBlock, error) - FetchHeadIndexByChainID(chainID interfaces.IHash) (interfaces.IHash, error) - FetchEBlock(hash interfaces.IHash) (interfaces.IEntryBlock, error) - SetChainHeads(primaryIndexes, chainIDs []interfaces.IHash) error -} - -func NewDBReader(levelBolt string, path string) *databaseOverlay.Overlay { - var dbase *hybridDB.HybridDB - var err error - if levelBolt == bolt { - dbase = hybridDB.NewBoltMapHybridDB(nil, path) - } else { - dbase, err = hybridDB.NewLevelMapHybridDB(path, false) - if err != nil { - panic(err) - } - } - - dbo := databaseOverlay.NewOverlay(dbase) - return dbo -} - -type APIReader struct { - location string -} - -func NewAPIReader(loc string) *APIReader { - a := new(APIReader) - a.location = loc - factom.SetFactomdServer(loc) - - return a -} - -func (a *APIReader) SetChainHeads(primaryIndexes, chainIDs []interfaces.IHash) error { - return nil -} - -func (a *APIReader) FetchEBlock(hash interfaces.IHash) (interfaces.IEntryBlock, error) { - return nil, fmt.Errorf("Not implemented for api") -} - -func (a *APIReader) FetchDBlockHead() (interfaces.IDirectoryBlock, error) { - head, err := factom.GetDBlockHead() - if err != nil { - return nil, err - } - raw, err := factom.GetRaw(head) - if err != nil { - return nil, err - } - return rawBytesToblock(raw) -} - -func (a *APIReader) FetchDBlockByHeight(dBlockHeight uint32) (interfaces.IDirectoryBlock, error) { - raw, err := factom.GetBlockByHeightRaw("d", int64(dBlockHeight)) - if err != nil { - return nil, err - } - - return rawRespToBlock(raw.RawData) -} - -func (a *APIReader) FetchHeadIndexByChainID(chainID interfaces.IHash) (interfaces.IHash, error) { - resp, err := factom.GetChainHead(chainID.String()) - if err != nil { - return nil, err - } - return primitives.HexToHash(resp) -} - -func rawBytesToblock(raw []byte) (interfaces.IDirectoryBlock, error) { - dblock := directoryBlock.NewDirectoryBlock(nil) - err := dblock.UnmarshalBinary(raw) - if err != nil { - return nil, err - } - return dblock, nil -} - -func rawRespToBlock(raw string) (interfaces.IDirectoryBlock, error) { - by, err := hex.DecodeString(raw) - if err != nil { - return nil, err - } - return rawBytesToblock(by) -} diff --git a/Utilities/DatabaseGenerator/blockgen/generator.go b/Utilities/DatabaseGenerator/blockgen/generator.go index f2332d3da2..d4c0478e0e 100644 --- a/Utilities/DatabaseGenerator/blockgen/generator.go +++ b/Utilities/DatabaseGenerator/blockgen/generator.go @@ -6,6 +6,7 @@ import ( "time" "github.com/FactomProject/factomd/common/constants" + "github.com/FactomProject/factomd/common/identity" "github.com/FactomProject/factomd/common/interfaces" "github.com/FactomProject/factomd/common/messages" "github.com/FactomProject/factomd/common/messages/electionMsgs" @@ -71,7 +72,9 @@ func NewDBGenerator(c *DBGeneratorConfig) (*DBGenerator, error) { func NewGeneratorState(conf *DBGeneratorConfig, starttime interfaces.Timestamp) *state.State { s := new(state.State) + s.TimestampAtBoot = starttime s.SetLeaderTimestamp(starttime) + s.Balancehash = primitives.NewZeroHash() var db interfaces.IDatabase var err error switch strings.ToLower(conf.DBType) { @@ -137,12 +140,15 @@ func (g *DBGenerator) loadGenesis() { g.FactomdState.DBStates.SaveDBStateToDB(sds) sds.Saved = true g.last = sds + g.FactomdState.DBStates.Last().Saved = true } // SaveDBState will save a dbstate to disk func (g *DBGenerator) SaveDBState(dbstate *state.DBState) { dbstate.ReadyToSave = true dbstate.Signed = true + dbstate.SaveStruct = new(state.SaveState) + dbstate.SaveStruct.IdentityControl = identity.NewIdentityManager() g.FactomdState.DBStates.ProcessHeight = dbstate.DirectoryBlock.GetDatabaseHeight() put := g.FactomdState.DBStates.Put(dbstate) if !put { @@ -152,6 +158,17 @@ func (g *DBGenerator) SaveDBState(dbstate *state.DBState) { if !progress { log.Warnf("%d Not saved to disk", dbstate.DirectoryBlock.GetDatabaseHeight()) } + +EntryLoop: + for { + select { + case ent := <-g.FactomdState.WriteEntry: + g.FactomdState.GetDB().InsertEntry(ent) + default: + break EntryLoop + } + } + dbstate.Saved = true g.FactomdState.DBStates.Complete = dbstate.DirectoryBlock.GetDatabaseHeight() - g.FactomdState.DBStates.Base g.FactomdState.ProcessLists.DBHeightBase = dbstate.DirectoryBlock.GetDatabaseHeight() @@ -236,6 +253,7 @@ func (g *DBGenerator) CreateBlocks(amt int) error { loopEntries += len(dbstate.Entries) totalEntries += len(dbstate.Entries) + g.SaveDBState(dbstate) g.last = dbstate diff --git a/Utilities/DatabaseGenerator/main.go b/Utilities/DatabaseGenerator/main.go index 07e3a9af8f..5a5660ee1b 100644 --- a/Utilities/DatabaseGenerator/main.go +++ b/Utilities/DatabaseGenerator/main.go @@ -22,7 +22,7 @@ func main() { loglvl = flag.String("loglvl", "info", "Sets log level to 'debug', 'info', 'warning', or 'error'") configfile = flag.String("config", "", "Generator config file location.") genconfig = flag.Bool("genconfig", false, "Does not run the program, but instead outputs the default config file") - profiling = flag.Bool("profile", false, "Turn on profiling on :6060") + profiling = flag.Bool("profile", true, "Turn on profiling on :6060") ) flag.IntVar(&blockcount, "b", 1000, "Number of blocks to generate") diff --git a/Utilities/DatabaseGenerator/smallblocks.yaml b/Utilities/DatabaseGenerator/smallblocks.yaml new file mode 100644 index 0000000000..9d659fbbc8 --- /dev/null +++ b/Utilities/DatabaseGenerator/smallblocks.yaml @@ -0,0 +1,21 @@ +dbpath: factoid_level.db +dbtype: level +factomdconfigpath: gen.conf +customnetid: gen +starttime: "" +loopsperprint: 200 +entrygenerator: "incr" +entrygenconfig: + entriespereblock: + min: 5 + max: 10 + entrysize: + min: 100 + max: 250 + eblocksperheight: + min: 1 + max: 5 + multithreaded: true + threadpoolcount: 8 + + diff --git a/Utilities/DatabasePorter/porter.go b/Utilities/DatabasePorter/porter.go index 53e3d9235e..8852e58409 100644 --- a/Utilities/DatabasePorter/porter.go +++ b/Utilities/DatabasePorter/porter.go @@ -191,7 +191,7 @@ func main() { } fmt.Printf("\t\tRebulding DirBlockInfo\n") - err = dbo.RebuildDirBlockInfo() + err = dbo.ReparseAnchorChains() if err != nil { panic(err) } diff --git a/Utilities/tools/fetcher.go b/Utilities/tools/fetcher.go index 5f320da6ec..ea1d0870f6 100644 --- a/Utilities/tools/fetcher.go +++ b/Utilities/tools/fetcher.go @@ -1,13 +1,12 @@ package tools import ( - "encoding/hex" - - "fmt" - "github.com/FactomProject/factom" + "github.com/FactomProject/factomd/common/adminBlock" "github.com/FactomProject/factomd/common/directoryBlock" "github.com/FactomProject/factomd/common/entryBlock" + "github.com/FactomProject/factomd/common/entryCreditBlock" + "github.com/FactomProject/factomd/common/factoid" "github.com/FactomProject/factomd/common/interfaces" "github.com/FactomProject/factomd/common/primitives" "github.com/FactomProject/factomd/database/databaseOverlay" @@ -17,16 +16,24 @@ import ( const level string = "level" const bolt string = "bolt" -// Able to be either a datbase or api type Fetcher interface { + SetChainHeads(primaryIndexes, chainIDs []interfaces.IHash) error FetchDBlockHead() (interfaces.IDirectoryBlock, error) - FetchDBlockByHeight(dBlockHeight uint32) (interfaces.IDirectoryBlock, error) //FetchDBlock(hash interfaces.IHash) (interfaces.IDirectoryBlock, error) FetchHeadIndexByChainID(chainID interfaces.IHash) (interfaces.IHash, error) FetchEBlock(hash interfaces.IHash) (interfaces.IEntryBlock, error) - SetChainHeads(primaryIndexes, chainIDs []interfaces.IHash) error + + FetchEntry(hash interfaces.IHash) (interfaces.IEBEntry, error) + FetchDBlockByHeight(dBlockHeight uint32) (interfaces.IDirectoryBlock, error) + FetchABlockByHeight(blockHeight uint32) (interfaces.IAdminBlock, error) + FetchFBlockByHeight(blockHeight uint32) (interfaces.IFBlock, error) + FetchECBlockByHeight(blockHeight uint32) (interfaces.IEntryCreditBlock, error) + FetchECBlockByPrimary(keymr interfaces.IHash) (interfaces.IEntryCreditBlock, error) } +var _ Fetcher = (*APIReader)(nil) +var _ Fetcher = (*databaseOverlay.Overlay)(nil) + func NewDBReader(levelBolt string, path string) *databaseOverlay.Overlay { var dbase *hybridDB.HybridDB var err error @@ -59,13 +66,26 @@ func (a *APIReader) SetChainHeads(primaryIndexes, chainIDs []interfaces.IHash) e return nil } +func (a *APIReader) FetchEntry(hash interfaces.IHash) (interfaces.IEBEntry, error) { + raw, err := factom.GetRaw(hash.String()) + if err != nil { + return nil, err + } + + entry := entryBlock.NewEntry() + err = UnmarshalGeneric(entry, raw) + return entry, err +} + func (a *APIReader) FetchEBlock(hash interfaces.IHash) (interfaces.IEntryBlock, error) { - return nil, fmt.Errorf("Not implmented for api") - //raw, err := factom.GetRaw(hash.String()) - //if err != nil { - // return nil, err - //} - //return rawBytesToEblock(raw) + raw, err := factom.GetRaw(hash.String()) + if err != nil { + return nil, err + } + + block := entryBlock.NewEBlock() + err = UnmarshalGeneric(block, raw) + return block, err } func (a *APIReader) FetchDBlockHead() (interfaces.IDirectoryBlock, error) { @@ -77,48 +97,79 @@ func (a *APIReader) FetchDBlockHead() (interfaces.IDirectoryBlock, error) { if err != nil { return nil, err } - return rawBytesToDblock(raw) + + block := directoryBlock.NewDirectoryBlock(nil) + err = UnmarshalGeneric(block, raw) + return block, err } -func (a *APIReader) FetchDBlockByHeight(dBlockHeight uint32) (interfaces.IDirectoryBlock, error) { - raw, err := factom.GetBlockByHeightRaw("d", int64(dBlockHeight)) +func (a *APIReader) FetchDBlockByHeight(height uint32) (interfaces.IDirectoryBlock, error) { + _, data, err := factom.GetDBlockByHeight(int64(height)) if err != nil { return nil, err } - return rawRespToBlock(raw.RawData) + block := directoryBlock.NewDirectoryBlock(nil) + err = UnmarshalGeneric(block, data) + return block, err } -func (a *APIReader) FetchHeadIndexByChainID(chainID interfaces.IHash) (interfaces.IHash, error) { - resp, err := factom.GetChainHead(chainID.String()) +func (a *APIReader) FetchFBlockByHeight(height uint32) (interfaces.IFBlock, error) { + _, data, err := factom.GetFBlockByHeight(int64(height)) if err != nil { return nil, err } - return primitives.HexToHash(resp) + + block := factoid.NewFBlock(nil) + err = UnmarshalGeneric(block, data) + return block, err +} + +func (a *APIReader) FetchABlockByHeight(height uint32) (interfaces.IAdminBlock, error) { + _, data, err := factom.GetABlockByHeight(int64(height)) + if err != nil { + return nil, err + } + + ablock := adminBlock.NewAdminBlock(nil) + err = UnmarshalGeneric(ablock, data) + return ablock, err } -func rawBytesToEblock(raw []byte) (interfaces.IEntryBlock, error) { - eblock := entryBlock.NewEBlock() - err := eblock.UnmarshalBinary(raw) +func (a *APIReader) FetchECBlockByPrimary(keymr interfaces.IHash) (interfaces.IEntryCreditBlock, error) { + data, err := factom.GetRaw(keymr.String()) if err != nil { return nil, err } - return eblock, nil + + ecblock := entryCreditBlock.NewECBlock() + err = UnmarshalGeneric(ecblock, data) + return ecblock, err } -func rawBytesToDblock(raw []byte) (interfaces.IDirectoryBlock, error) { - dblock := directoryBlock.NewDirectoryBlock(nil) - err := dblock.UnmarshalBinary(raw) +func (a *APIReader) FetchECBlockByHeight(height uint32) (interfaces.IEntryCreditBlock, error) { + _, data, err := factom.GetECBlockByHeight(int64(height)) if err != nil { return nil, err } - return dblock, nil + + ecblock := entryCreditBlock.NewECBlock() + err = UnmarshalGeneric(ecblock, data) + return ecblock, err } -func rawRespToBlock(raw string) (interfaces.IDirectoryBlock, error) { - by, err := hex.DecodeString(raw) +func (a *APIReader) FetchHeadIndexByChainID(chainID interfaces.IHash) (interfaces.IHash, error) { + resp, _, err := factom.GetChainHead(chainID.String()) if err != nil { return nil, err } - return rawBytesToDblock(by) + return primitives.HexToHash(resp) +} + +func UnmarshalGeneric(i interfaces.BinaryMarshallable, raw []byte) error { + err := i.UnmarshalBinary(raw) + if err != nil { + return err + } + return nil } diff --git a/VERSION b/VERSION index 91e4a9f262..7849b73dc7 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -6.3.2 +6.3.3 diff --git a/anchor/anchorRecord.go b/anchor/anchorRecord.go index 71dc1a3526..c5ac788a77 100644 --- a/anchor/anchorRecord.go +++ b/anchor/anchorRecord.go @@ -20,9 +20,14 @@ import ( //AnchorRecord is used to construct anchor chain type AnchorRecord struct { AnchorRecordVer int - DBHeight uint32 - KeyMR string - RecordHeight uint32 //the block height we intended to put the anchorrecod into + DBHeight uint32 `json:",omitempty"` // The only directory block height included in this anchor + KeyMR string `json:",omitempty"` // Merkle root of the only directory block included in this anchor + + DBHeightMax uint32 `json:",omitempty"` // The highest directory block height included in this anchor window + DBHeightMin uint32 `json:",omitempty"` // The lowest directory block height included in this anchor window + WindowMR string `json:",omitempty"` // Merkle root of all directory block KeyMRs from DBHeightMin to DBHeightMax + + RecordHeight uint32 // Directory block height we intended to put the AnchorRecord into Bitcoin *BitcoinStruct `json:",omitempty"` Ethereum *EthereumStruct `json:",omitempty"` @@ -37,11 +42,11 @@ type BitcoinStruct struct { } type EthereumStruct struct { - Address string //0x30aa981f6d2fce81083e584c8ee2f822b548752f - TXID string //0x50ea0effc383542811a58704a6d6842ed6d76439a2d942d941896ad097c06a78 - BlockHeight int64 //293003 - BlockHash string //0x3b504616495fc9cf7be9b5b776692a9abbfb95491fa62abf62dcdf4d53ff5979 - Offset int64 //0 + ContractAddress string // Address of the Ethereum anchor contract + TxID string // Transaction ID of this particular anchor + BlockHeight int64 // Ethereum block height that this anchor was included in + BlockHash string // Hash of the Ethereum block that this anchor was included in + TxIndex int64 // Where the anchor tx is located within that block } var _ interfaces.Printable = (*AnchorRecord)(nil) diff --git a/common/constants/ack.go b/common/constants/ack.go index ba781707f3..de50c7452b 100644 --- a/common/constants/ack.go +++ b/common/constants/ack.go @@ -4,6 +4,14 @@ package constants +const ( + // MaxAckHeightMinuteDelta is the maximum number of minute in the + // future we will set our HighestAckHeight too. This means + // 2000/10 = max number of blocks to set the max height too ontop + // of our current block height. + MaxAckHeightMinuteDelta = 2000 +) + // Ack status levels const ( _ int = iota diff --git a/common/constants/constants.go b/common/constants/constants.go index 158b1b6084..dc72eb6289 100644 --- a/common/constants/constants.go +++ b/common/constants/constants.go @@ -66,6 +66,15 @@ func NormallyFullBroadcast(t byte) bool { return false } +// Check is they type needs an ACK to be processed. +func NeedsAck(t byte) bool { + switch t { + case EOM_MSG, COMMIT_CHAIN_MSG, COMMIT_ENTRY_MSG, REVEAL_ENTRY_MSG, DIRECTORY_BLOCK_SIGNATURE_MSG, FACTOID_TRANSACTION_MSG, ADDSERVER_MSG, CHANGESERVER_KEY_MSG, REMOVESERVER_MSG: + return true + } + return false +} + // Election related messages are full broadcast func NormallyPeer2Peer(t byte) bool { switch t { @@ -474,4 +483,4 @@ const ( //Fast boot save state version (savestate) //To be increased whenever the data being saved changes from the last version -const SaveStateVersion = 11 +const SaveStateVersion = 12 diff --git a/common/constants/runstate/runStates.go b/common/constants/runstate/runStates.go new file mode 100644 index 0000000000..1dbae767d9 --- /dev/null +++ b/common/constants/runstate/runStates.go @@ -0,0 +1,30 @@ +package runstate + +import "fmt" + +type RunState int + +const ( + New RunState = 0 + Booting RunState = 1 + Running RunState = 2 + Stopping RunState = 3 + Stopped RunState = 4 +) + +func (runState RunState) String() string { + switch runState { + case New: + return "New" + case Booting: + return "Booting" + case Running: + return "Running" + case Stopping: + return "Stopping" + case Stopped: + return "Stopped" + default: + return fmt.Sprintf("Unknown state %d", int(runState)) + } +} diff --git a/common/constants/servertype/serverTypes.go b/common/constants/servertype/serverTypes.go new file mode 100644 index 0000000000..fed7970f2d --- /dev/null +++ b/common/constants/servertype/serverTypes.go @@ -0,0 +1,26 @@ +package servertype + +import ( + "github.com/FactomProject/factomd/state" +) + +type ServerType string + +const ( + Follower ServerType = "follower" + AuditServer ServerType = "audit server" + FederatedServer ServerType = "federated server" +) + +func GetServerType(list *state.ProcessList, state *state.State) ServerType { + if state.Leader { + return FederatedServer + } + + foundAudit, _ := list.GetAuditServerIndexHash(state.GetIdentityChainID()) + if foundAudit { + return AuditServer + } + + return Follower +} diff --git a/common/directoryBlock/dbInfo/dbInfo.go b/common/directoryBlock/dbInfo/dbInfo.go index 9cd7da6b3f..f072a274e4 100644 --- a/common/directoryBlock/dbInfo/dbInfo.go +++ b/common/directoryBlock/dbInfo/dbInfo.go @@ -28,6 +28,9 @@ type DirBlockInfo struct { DBMerkleRoot interfaces.IHash // A flag to to show BTC anchor confirmation BTCConfirmed bool + + EthereumAnchorRecordEntryHash interfaces.IHash + EthereumConfirmed bool } var _ interfaces.Printable = (*DirBlockInfo)(nil) @@ -48,6 +51,9 @@ func (e *DirBlockInfo) Init() { if e.DBMerkleRoot == nil { e.DBMerkleRoot = primitives.NewZeroHash() } + if e.EthereumAnchorRecordEntryHash == nil { + e.EthereumAnchorRecordEntryHash = primitives.NewZeroHash() + } } func NewDirBlockInfo() *DirBlockInfo { @@ -56,6 +62,7 @@ func NewDirBlockInfo() *DirBlockInfo { dbi.BTCTxHash = primitives.NewZeroHash() dbi.BTCBlockHash = primitives.NewZeroHash() dbi.DBMerkleRoot = primitives.NewZeroHash() + dbi.EthereumAnchorRecordEntryHash = primitives.NewZeroHash() return dbi } @@ -213,6 +220,9 @@ type dirBlockInfoCopy struct { DBMerkleRoot interfaces.IHash // A flag to to show BTC anchor confirmation BTCConfirmed bool + + EthereumAnchorRecordEntryHash interfaces.IHash + EthereumConfirmed bool } func newDirBlockInfoCopyFromDBI(dbi *DirBlockInfo) *dirBlockInfoCopy { @@ -226,6 +236,8 @@ func newDirBlockInfoCopyFromDBI(dbi *DirBlockInfo) *dirBlockInfoCopy { dbic.BTCBlockHash = dbi.BTCBlockHash dbic.DBMerkleRoot = dbi.DBMerkleRoot dbic.BTCConfirmed = dbi.BTCConfirmed + dbic.EthereumAnchorRecordEntryHash = dbi.EthereumAnchorRecordEntryHash + dbic.EthereumConfirmed = dbi.EthereumConfirmed return dbic } @@ -235,6 +247,7 @@ func newDirBlockInfoCopy() *dirBlockInfoCopy { dbi.BTCTxHash = primitives.NewZeroHash() dbi.BTCBlockHash = primitives.NewZeroHash() dbi.DBMerkleRoot = primitives.NewZeroHash() + dbi.EthereumAnchorRecordEntryHash = primitives.NewZeroHash() return dbi } @@ -248,6 +261,8 @@ func (dbic *DirBlockInfo) parseDirBlockInfoCopy(dbi *dirBlockInfoCopy) { dbic.BTCBlockHash = dbi.BTCBlockHash dbic.DBMerkleRoot = dbi.DBMerkleRoot dbic.BTCConfirmed = dbi.BTCConfirmed + dbic.EthereumAnchorRecordEntryHash = dbi.EthereumAnchorRecordEntryHash + dbic.EthereumConfirmed = dbi.EthereumConfirmed } // NewDirBlockInfoFromDirBlock creates a DirDirBlockInfo from DirectoryBlock @@ -260,5 +275,7 @@ func NewDirBlockInfoFromDirBlock(dirBlock interfaces.IDirectoryBlock) *DirBlockI dbi.BTCTxHash = primitives.NewZeroHash() dbi.BTCBlockHash = primitives.NewZeroHash() dbi.BTCConfirmed = false + dbi.EthereumAnchorRecordEntryHash = primitives.NewZeroHash() + dbi.EthereumConfirmed = false return dbi } diff --git a/common/entryBlock/entry.go b/common/entryBlock/entry.go index f8faf49961..3de66efc91 100644 --- a/common/entryBlock/entry.go +++ b/common/entryBlock/entry.go @@ -45,6 +45,21 @@ func RandomEntry() interfaces.IEBEntry { return e } +func DeterministicEntry(i int) interfaces.IEBEntry { + e := NewEntry() + e.Version = 0 + bs := fmt.Sprintf("%x", i) + if len(bs)%2 == 1 { + bs = "0" + bs + } + + e.ExtIDs = []primitives.ByteSlice{*primitives.StringToByteSlice(bs)} + //e.ExtIDs = append(e.ExtIDs, *primitives.StringToByteSlice(fmt.Sprintf("%d", i))) + e.ChainID = ExternalIDsToChainID([][]byte{e.ExtIDs[0].Bytes}) + + return e +} + func (c *Entry) IsSameAs(b interfaces.IEBEntry) bool { if b == nil { if c != nil { diff --git a/common/factoid/fblock.go b/common/factoid/fblock.go index 63282946b4..1641415bba 100644 --- a/common/factoid/fblock.go +++ b/common/factoid/fblock.go @@ -586,11 +586,9 @@ func (b *FBlock) GetExchRate() uint64 { func (b FBlock) ValidateTransaction(index int, trans interfaces.ITransaction) error { // Calculate the fee due. - { - err := trans.Validate(index) - if err != nil { - return err - } + err := trans.Validate(index) + if err != nil { + return err } //Ignore coinbase transaction's signatures diff --git a/common/globals/globals.go b/common/globals/globals.go index 15afa6c912..2724df1675 100644 --- a/common/globals/globals.go +++ b/common/globals/globals.go @@ -39,6 +39,8 @@ type FactomParams struct { RuntimeLog bool Exclusive bool ExclusiveIn bool + P2PIncoming int + P2POutgoing int Prefix string Rotate bool TimeOffset int @@ -78,6 +80,8 @@ type FactomParams struct { NodeName string FactomHome string FullHashesLog bool // Log all unique full hashes + DebugLogLocation string + ReparseAnchorChains bool } /**************************************************************** diff --git a/common/identity/authority.go b/common/identity/authority.go index 97c75df6d3..583810dad8 100644 --- a/common/identity/authority.go +++ b/common/identity/authority.go @@ -34,16 +34,16 @@ func (p AuthoritySort) Less(i, j int) bool { } type Authority struct { - AuthorityChainID interfaces.IHash `json:"identity_chainid"` - ManagementChainID interfaces.IHash `json:"management_chaind"` - MatryoshkaHash interfaces.IHash `json:"matryoshka_hash"` - SigningKey primitives.PublicKey `json:"signing_key"` + AuthorityChainID interfaces.IHash `json:"chainid"` + ManagementChainID interfaces.IHash `json:"manageid"` + MatryoshkaHash interfaces.IHash `json:"matroyshka"` + SigningKey primitives.PublicKey `json:"signingkey"` Status uint8 `json:"status"` - AnchorKeys []AnchorSigningKey `json:"anchor_keys"` + AnchorKeys []AnchorSigningKey `json:"anchorkeys"` KeyHistory []HistoricKey `json:"-"` Efficiency uint16 `json:"efficiency"` - CoinbaseAddress interfaces.IAddress `json:"coinbase_address"` + CoinbaseAddress interfaces.IAddress `json:"coinbaseaddress"` } func NewAuthority() *Authority { @@ -385,6 +385,8 @@ func (auth *Authority) MarshalJSON() (rval []byte, err error) { SigningKey string `json:"signingkey"` Status string `json:"status"` AnchorKeys []AnchorSigningKey `json:"anchorkeys"` + Efficiency int `json:"efficiency"` + CoinbaseAddress string `json:"coinbaseaddress"` }{ AuthorityChainID: auth.AuthorityChainID, ManagementChainID: auth.ManagementChainID, @@ -392,6 +394,8 @@ func (auth *Authority) MarshalJSON() (rval []byte, err error) { SigningKey: auth.SigningKey.String(), Status: statusToJSONString(auth.Status), AnchorKeys: auth.AnchorKeys, + Efficiency: int(auth.Efficiency), + CoinbaseAddress: primitives.ConvertFctAddressToUserStr(auth.CoinbaseAddress), }) } diff --git a/common/identity/authority_test.go b/common/identity/authority_test.go index 5eb89e8e19..d1c5b5d89a 100644 --- a/common/identity/authority_test.go +++ b/common/identity/authority_test.go @@ -8,8 +8,10 @@ import ( "testing" "bytes" + "encoding/json" "github.com/FactomProject/factomd/common/constants" + "github.com/FactomProject/factomd/common/factoid" . "github.com/FactomProject/factomd/common/identity" "github.com/FactomProject/factomd/common/interfaces" "github.com/FactomProject/factomd/common/messages" @@ -32,7 +34,6 @@ func TestAuthorityType(t *testing.T) { if auth.Type() != 0 { t.Errorf("Invalid type returned - %v", auth.Type()) } - } //func TestAuthoritySignature(t *testing.T) { @@ -255,3 +256,33 @@ func newAck(id interfaces.IHash, ts interfaces.Timestamp) *messages.Ack { return ack } + +func TestAuthorityJsonMarshal(t *testing.T) { + // Testing Human readable json marshal + a := NewAuthority() + a.CoinbaseAddress = factoid.NewAddress(make([]byte, 32)) + a.Efficiency = 100 + + data, err := a.MarshalJSON() + if err != nil { + t.Error(err) + } + + var dst bytes.Buffer + exp := ` + { + "chainid": "0000000000000000000000000000000000000000000000000000000000000000", + "manageid": "0000000000000000000000000000000000000000000000000000000000000000", + "matroyshka": "0000000000000000000000000000000000000000000000000000000000000000", + "signingkey": "0000000000000000000000000000000000000000000000000000000000000000", + "status": "none", + "anchorkeys": null, + "efficiency": 100, + "coinbaseaddress": "FA1y5ZGuHSLmf2TqNf6hVMkPiNGyQpQDTFJvDLRkKQaoPo4bmbgu" + } + ` + json.Compact(&dst, []byte(exp)) + if bytes.Compare(dst.Bytes(), data) != 0 { + t.Errorf("Does not match expected") + } +} diff --git a/common/interfaces/databaseOverlay.go b/common/interfaces/databaseOverlay.go index 516e87e75c..728b773f45 100644 --- a/common/interfaces/databaseOverlay.go +++ b/common/interfaces/databaseOverlay.go @@ -251,7 +251,10 @@ type DBOverlay interface { SaveIncludedInMultiFromBlock(block DatabaseBlockWithEntries, checkForDuplicateEntries bool) error SaveIncludedInMulti(entries []IHash, block IHash, checkForDuplicateEntries bool) error FetchIncludedIn(hash IHash) (IHash, error) - RebuildDirBlockInfo() error + + ReparseAnchorChains() error + SetBitcoinAnchorRecordPublicKeysFromHex([]string) error + SetEthereumAnchorRecordPublicKeysFromHex([]string) error FetchPaidFor(hash IHash) (IHash, error) diff --git a/common/interfaces/factoidstate.go b/common/interfaces/factoidstate.go index ad71cf784a..801915f5d7 100644 --- a/common/interfaces/factoidstate.go +++ b/common/interfaces/factoidstate.go @@ -22,7 +22,7 @@ type IFactoidState interface { // Validate transaction // Return zero len string if the balance of an address covers each input - Validate(int, ITransaction) error + Validate(int, ITransaction) (err error, holdAddr [32]byte) // Check the transaction timestamp for to ensure it can be included // in the current Transactions that are too old, or dated to diff --git a/common/interfaces/state.go b/common/interfaces/state.go index 06047674bd..9a167507f5 100644 --- a/common/interfaces/state.go +++ b/common/interfaces/state.go @@ -5,7 +5,10 @@ package interfaces import ( + "regexp" + "github.com/FactomProject/factomd/activations" + "github.com/FactomProject/factomd/common/constants/runstate" ) type DBStateSent struct { @@ -28,7 +31,7 @@ type IQueue interface { // can be logged about the execution of Factom. Also ensures that we do not // accidentally type IState interface { - Running() bool // Returns true as long as this Factomd instance is running. + GetRunState() runstate.RunState // Server GetFactomNodeName() string GetSalt(Timestamp) uint32 // A secret number computed from a TS that tests if a message was issued from this server or not @@ -327,6 +330,7 @@ type IState interface { DidCreateLastBlockFromDBState() bool GetUnsyncedServers(dbheight uint32) []IHash Validate(msg IMsg) (validToSend int, validToExecute int) + GetIgnoreDone() bool // Access to Holding Queue LoadHoldingMap() map[[32]byte]IMsg @@ -345,6 +349,22 @@ type IState interface { CheckFileName(string) bool AddToReplayFilter(mask int, hash [32]byte, timestamp Timestamp, systemtime Timestamp) bool - // Activations + // Activations ------------------------------------------------------- IsActive(id activations.ActivationType) bool + + // Holding of dependent messages ------------------------------------- + // Add a messsage to a dependent holding list + Add(h [32]byte, msg IMsg) int + // get and remove the list of dependent message for a hash + Get(h [32]byte) []IMsg + // expire any dependent messages that are in holding but are older than limit + ExecuteFromHolding(h [32]byte) + // create a hash to hold messages that depend on height + HoldForHeight(ht uint32, msg IMsg) int + + // test/debug filters + PassOutputRegEx(*regexp.Regexp, string) + GetOutputRegEx() (*regexp.Regexp, string) + PassInputRegEx(*regexp.Regexp, string) + GetInputRegEx() (*regexp.Regexp, string) } diff --git a/common/messages/ack.go b/common/messages/ack.go index c626ae1aa6..1a0f8cfafc 100644 --- a/common/messages/ack.go +++ b/common/messages/ack.go @@ -120,8 +120,8 @@ func (m *Ack) Validate(s interfaces.IState) int { // Update the highest known ack to start requesting // DBState blocks if necessary if s.GetHighestAck() < m.DBHeight { - if delta > 2000 { // cap at a relative 2000 due to fd-850 - s.SetHighestAck(s.GetLeaderPL().GetDBHeight() + 2000) + if delta > constants.MaxAckHeightMinuteDelta { // cap at a relative 200 blks due to fd-850 + s.SetHighestAck(s.GetLeaderPL().GetDBHeight() + constants.MaxAckHeightMinuteDelta/10) } else { s.SetHighestAck(m.DBHeight) } @@ -134,8 +134,8 @@ func (m *Ack) Validate(s interfaces.IState) int { return -1 } - if delta > 30 { - return 0 // put this in the holding and validate it later + if delta > 30 { // REVIEW: should we decrease this limit since msg goes to newHolding ? + return s.HoldForHeight(m.DBHeight, m) } // Only new acks are valid. Of course, the VMIndex has to be valid too. @@ -168,7 +168,7 @@ func (m *Ack) Validate(s interfaces.IState) int { if ackSigned <= 0 { if m.DBHeight > s.GetLLeaderHeight() { s.LogPrintf("executeMsg", "Hold, Not signed by a leader") - return 0 // This is for a future block so the auth set may change so hold on to it. + return s.HoldForHeight(m.DBHeight, m) } else { s.LogPrintf("executeMsg", "Drop, Not signed by a leader") diff --git a/common/messages/commitChain.go b/common/messages/commitChain.go index e1974e7c8e..eb1c1c378c 100644 --- a/common/messages/commitChain.go +++ b/common/messages/commitChain.go @@ -137,7 +137,9 @@ func (m *CommitChainMsg) Validate(state interfaces.IState) int { ebal := state.GetFactoidState().GetECBalance(*m.CommitChain.ECPubKey) v := int(ebal) - int(m.CommitChain.Credits) if v < 0 { - return 0 + // return 0 // old way add to scanned holding queue + // new holding mechanism added it to a list of messages dependent on the EC address + return state.Add(m.CommitChain.ECPubKey.Fixed(), m) } return 1 diff --git a/common/messages/commitEntry.go b/common/messages/commitEntry.go index 4761892701..8917d1cd7d 100644 --- a/common/messages/commitEntry.go +++ b/common/messages/commitEntry.go @@ -253,7 +253,9 @@ func (m *CommitEntryMsg) Validate(state interfaces.IState) int { ebal := state.GetFactoidState().GetECBalance(*m.CommitEntry.ECPubKey) if int(m.CommitEntry.Credits) > int(ebal) { - return 0 + // return 0 // old way add to scanned holding queue + // new holding mechanism added it to a list of messages dependent on the EC address + return state.Add(m.CommitEntry.ECPubKey.Fixed(), m) } return 1 } diff --git a/common/messages/directoryBlockSignature.go b/common/messages/directoryBlockSignature.go index 39cad9d1e0..f0b702d4ba 100644 --- a/common/messages/directoryBlockSignature.go +++ b/common/messages/directoryBlockSignature.go @@ -182,7 +182,8 @@ func (m *DirectoryBlockSignature) Validate(state interfaces.IState) int { state.GetLLeaderHeight(), m.ServerIdentityChainID.Bytes()[3:6], m.String())) - return 0 + + return state.HoldForHeight(m.DBHeight, m) } isVer, err := m.VerifySignature() diff --git a/common/messages/electionMsgs/electionAdapter.go b/common/messages/electionMsgs/electionAdapter.go index beab7e5077..6e00aa5161 100644 --- a/common/messages/electionMsgs/electionAdapter.go +++ b/common/messages/electionMsgs/electionAdapter.go @@ -1,6 +1,9 @@ package electionMsgs import ( + "crypto/sha256" + "fmt" + "github.com/FactomProject/factomd/common/interfaces" primitives2 "github.com/FactomProject/factomd/common/primitives" "github.com/FactomProject/factomd/electionsCore/election" @@ -10,11 +13,8 @@ import ( // "github.com/FactomProject/factomd/common/messages/electionMsgs" "github.com/FactomProject/factomd/elections" - //"github.com/FactomProject/factomd/state" - "fmt" - - "crypto/sha256" + //"github.com/FactomProject/factomd/state" "github.com/FactomProject/factomd/electionsCore/messages" ) @@ -312,7 +312,7 @@ func (ea *ElectionAdapter) adaptVolunteerMessage(msg *FedVoteVolunteerMsg) *mess vol := msg.ServerID.Fixed() volid := primitives.Identity(vol) volmsg := messages.NewVolunteerMessageWithoutEOM(volid) - volmsg.TagMessage(msg.MsgHash.Fixed()) + volmsg.TagMessage(msg.GetMsgHash().Fixed()) return &volmsg } @@ -321,7 +321,7 @@ func (ea *ElectionAdapter) adaptVoteMessage(msg *FedVoteProposalMsg) *messages.V volmsg := ea.adaptVolunteerMessage(&msg.Volunteer) vote := messages.NewVoteMessage(*volmsg, primitives.Identity(msg.Signer.Fixed())) - vote.TagMessage(msg.MsgHash.Fixed()) + vote.TagMessage(msg.GetMsgHash().Fixed()) return &vote } @@ -332,7 +332,7 @@ func (ea *ElectionAdapter) adaptLevelMessage(msg *FedVoteLevelMsg, single bool) volmsg := ea.adaptVolunteerMessage(&msg.Volunteer) ll := messages.NewLeaderLevelMessage(primitives.Identity(msg.Signer.Fixed()), int(msg.Rank), int(msg.Level), *volmsg) - ll.TagMessage(msg.MsgHash.Fixed()) + ll.TagMessage(msg.GetMsgHash().Fixed()) ll.VolunteerPriority = ea.SimulatedElection.GetVolunteerPriority(volmsg.Signer) ll.Committed = msg.Committed ll.EOMFrom = msg.EOMFrom.Fixed() @@ -361,7 +361,7 @@ func (ea *ElectionAdapter) adaptLevelMessage(msg *FedVoteLevelMsg, single bool) // tagMessage is called on all adapted messages. func (ea *ElectionAdapter) tagMessage(msg interfaces.IMsg) { - ea.tagedMessages[msg.GetHash().Fixed()] = msg + ea.tagedMessages[msg.GetMsgHash().Fixed()] = msg ea.saveVolunteer(msg) } diff --git a/common/messages/electionMsgs/fedVoteLevelMsg.go b/common/messages/electionMsgs/fedVoteLevelMsg.go index 173199041d..061931db7b 100644 --- a/common/messages/electionMsgs/fedVoteLevelMsg.go +++ b/common/messages/electionMsgs/fedVoteLevelMsg.go @@ -19,8 +19,6 @@ import ( "github.com/FactomProject/factomd/state" log "github.com/sirupsen/logrus" - //"github.com/FactomProject/factomd/state" - "github.com/FactomProject/factomd/common/messages/msgbase" "github.com/FactomProject/factomd/elections" ) @@ -227,6 +225,8 @@ func (m *FedVoteLevelMsg) FollowerExecute(is interfaces.IState) { pl.AuditServers[m.Volunteer.ServerIdx], pl.FedServers[m.Volunteer.FedIdx] // Add to the process list and immediately process + + is.LogMessage("executeMsg", "add to pl", m.Volunteer.Ack) pl.AddToProcessList(pl.State, m.Volunteer.Ack.(*messages.Ack), m.Volunteer.Missing) is.UpdateState() } else { diff --git a/common/messages/electionMsgs/fedVoteMsg.go b/common/messages/electionMsgs/fedVoteMsg.go index 58c1367607..9bf61c46dd 100644 --- a/common/messages/electionMsgs/fedVoteMsg.go +++ b/common/messages/electionMsgs/fedVoteMsg.go @@ -223,12 +223,8 @@ func (m *FedVoteMsg) Validate(is interfaces.IState) int { // Check to make sure the volunteer message can be put in our process list if validVolunteer := m.ValidateVolunteer(*vol, is); validVolunteer != 1 { - if validVolunteer == -1 { - return -1 - } - - // Volunteer is not valid because the volunteer has a higher process list height - return 0 + // 0 means Volunteer is not valid because the volunteer has a higher process list height + return validVolunteer } signed, err := sm.MarshalForSignature() diff --git a/common/messages/electionMsgs/fedVoteVolunteerMsg.go b/common/messages/electionMsgs/fedVoteVolunteerMsg.go index 123b54b609..d9c13044ce 100644 --- a/common/messages/electionMsgs/fedVoteVolunteerMsg.go +++ b/common/messages/electionMsgs/fedVoteVolunteerMsg.go @@ -208,7 +208,7 @@ func (m *FedVoteVolunteerMsg) GetHash() (rval interfaces.IHash) { primitives.LogNilHashBug("FedVoteVolunteerMsg.GetHash() saw an interface that was nil") } }() - return m.GetMsgHash() + return m.Missing.GetMsgHash() } func (m *FedVoteVolunteerMsg) GetTimestamp() interfaces.Timestamp { diff --git a/common/messages/electionMsgs/startElectionInternal.go b/common/messages/electionMsgs/startElectionInternal.go index 65d37720f1..26d0e3c577 100644 --- a/common/messages/electionMsgs/startElectionInternal.go +++ b/common/messages/electionMsgs/startElectionInternal.go @@ -41,6 +41,7 @@ func (m *StartElectionInternal) ElectionProcess(s interfaces.IState, elect inter return } e.Adapter = NewElectionAdapter(e, m.PreviousDBHash) + s.LogPrintf("election", "Create Election Adapter") // An election that finishes may make us a leader. We need to know that for the next election that // takes place. So use the election's list of fed servers to determine if we are a leader for _, id := range e.Federated { @@ -76,47 +77,10 @@ func (m *StartElectionInternal) FollowerExecute(is interfaces.IState) { } // Process all the messages that we can - for s.Process() { + for s.LeaderPL.Process(s) { } m.VMHeight = vm.Height - // TODO: Process all messages that we can. Then trim to the first non-processed message - // TODO: This is incase a leader sends out ack 10, but not 9. We need to trim back to 8 because 9 does not exist - // TODO: Do not trim EOMs or DBsigs, as they may not be processed until certain conditions. - - //end := len(vm.List) - //if end > vm.Height { - // for _, msg := range vm.List[vm.Height:] { - // if msg != nil { - // hash := msg.GetRepeatHash() - // s.Replay.Clear(constants.INTERNAL_REPLAY, hash.Fixed()) - // s.Holding[msg.GetMsgHash().Fixed()] = msg - // } - // } - //} - // - //// Trim the height to the last processed message - //trimto := vm.Height - //pre := len(vm.List) - //if trimto < len(vm.List) { - // // When trimming, we need to check if trimto+1 is an EOM or DBSig. In which case, do not trim - // // the EOM or DBSig - // if len(vm.List) > trimto { - // // There exists an item at +1 - // if _, ok := vm.List[vm.Height].(*messages.EOM); ok { - // trimto += 1 - // } else if _, ok := vm.List[vm.Height].(*messages.DirectoryBlockSignature); ok { - // trimto += 1 - // } - // } - // - // vm.List = vm.List[:trimto] - // vm.ListAck = vm.ListAck[:trimto] - //} - //post := len(vm.List) - //if pre != post { - // fmt.Printf("Trimmed!, VM: %d %s from %d to %d\n", m.VMIndex, s.FactomNodeName, pre, post) - //} // Send to elections is.ElectionsQueue().Enqueue(m) diff --git a/common/messages/electionMsgs/syncMsg.go b/common/messages/electionMsgs/syncMsg.go index 555f4955ab..a8949745ba 100644 --- a/common/messages/electionMsgs/syncMsg.go +++ b/common/messages/electionMsgs/syncMsg.go @@ -14,7 +14,6 @@ import ( "github.com/FactomProject/factomd/common/messages/msgbase" "github.com/FactomProject/factomd/common/primitives" "github.com/FactomProject/factomd/state" - "github.com/FactomProject/goleveldb/leveldb/errors" log "github.com/sirupsen/logrus" ) @@ -187,11 +186,13 @@ func (m *SyncMsg) FollowerExecute(is interfaces.IState) { } else { msg, ack = s.CreateDBSig(m.DBHeight, m.VMIndex) } + if msg == nil { // TODO: What does this mean? -- clay //s.Holding[m.GetMsgHash().Fixed()] = m s.AddToHolding(m.GetMsgHash().Fixed(), m) // SyncMsg.FollowerExecute return // Maybe we are not yet prepared to create an SigType... } + va := new(FedVoteVolunteerMsg) va.Missing = msg va.Ack = ack @@ -232,53 +233,12 @@ func (e *SyncMsg) JSONString() (string, error) { } func (m *SyncMsg) UnmarshalBinaryData(data []byte) (newData []byte, err error) { - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("Error unmarshalling: %v", r) - } - }() - - buf := primitives.NewBuffer(data) - - if t, e := buf.PopByte(); e != nil || t != constants.SYNC_MSG { - return nil, errors.New("Not a Sync Message Audit type") - } - if m.TS, err = buf.PopTimestamp(); err != nil { - return nil, err - } - if m.SigType, err = buf.PopBool(); err != nil { - return nil, err - } - if m.Name, err = buf.PopString(); err != nil { - return nil, err - } - if m.ServerIdx, err = buf.PopUInt32(); err != nil { - return nil, err - } - if m.ServerID, err = buf.PopIHash(); err != nil { - return nil, err - } - if m.Weight, err = buf.PopIHash(); err != nil { - return nil, err - } - if m.DBHeight, err = buf.PopUInt32(); err != nil { - return nil, err - } - if m.VMIndex, err = buf.PopInt(); err != nil { - return nil, err - } - if m.Round, err = buf.PopInt(); err != nil { - return nil, err - } - if m.Minute, err = buf.PopByte(); err != nil { - return nil, err - } - return buf.Bytes(), err + err = fmt.Errorf("SyncMsg is an internal message only") + return } func (m *SyncMsg) UnmarshalBinary(data []byte) error { - _, err := m.UnmarshalBinaryData(data) - return err + return fmt.Errorf("SyncMsg is an internal message only") } func (m *SyncMsg) MarshalBinary() (data []byte, err error) { diff --git a/common/messages/electionMsgs/syncMsg_test.go b/common/messages/electionMsgs/syncMsg_test.go index 3c9459a8ad..0acaf1f3dc 100644 --- a/common/messages/electionMsgs/syncMsg_test.go +++ b/common/messages/electionMsgs/syncMsg_test.go @@ -7,15 +7,12 @@ package electionMsgs_test import ( "testing" - "fmt" - - "github.com/FactomProject/factomd/common/constants" . "github.com/FactomProject/factomd/common/messages/electionMsgs" "github.com/FactomProject/factomd/common/messages/msgsupport" "github.com/FactomProject/factomd/common/primitives" ) -func TestUnmarshalfolunteerSyncMsg_test(t *testing.T) { +func TestUnmarshalVolunteerSyncMsg_test(t *testing.T) { defer func() { if r := recover(); r != nil { t.Errorf("Panic caught during the test - %v", r) @@ -45,24 +42,12 @@ func TestMarshalUnmarshalSyncMsg(t *testing.T) { t.Error(err) } - va2, err := msgsupport.UnmarshalMessage(hex) - if err != nil { - t.Error(err) - } - _, err = va2.JSONString() - if err != nil { + _, err = msgsupport.UnmarshalMessage(hex) + // Expect an error, as sync messages are local only, + // and cannot be unmarshalled. + if err == nil { t.Error(err) } - - if va2.Type() != constants.SYNC_MSG { - t.Error(num + " Invalid message type unmarshalled") - } - - if sm.IsSameAs(va2) == false { - t.Error(num + " Acks are not the same") - fmt.Println(sm.String()) - fmt.Println(va2.String()) - } } sm := new(SyncMsg) sm.Minute = 5 diff --git a/common/messages/electionMsgs/timeoutInternal.go b/common/messages/electionMsgs/timeoutInternal.go index 901c3250a9..52b0530758 100644 --- a/common/messages/electionMsgs/timeoutInternal.go +++ b/common/messages/electionMsgs/timeoutInternal.go @@ -81,8 +81,8 @@ func (m *TimeoutInternal) InitiateElectionAdapter(st interfaces.IState) bool { msg.VMIndex = m.VMIndex msg.Minute = m.Minute msg.SigType = m.SigType - e.State.LogMessage("InMsgQueue", "enqueue_InitiateElectionAdapter", msg) - e.State.InMsgQueue().Enqueue(msg) + e.State.LogMessage("MsgQueue", "enqueue_InitiateElectionAdapter", msg) + e.State.MsgQueue() <- msg // When we start a new election, we can process all messages that were being held go e.ProcessWaiting() @@ -317,17 +317,12 @@ func (e *TimeoutInternal) JSONString() (string, error) { } func (m *TimeoutInternal) UnmarshalBinaryData(data []byte) (newData []byte, err error) { - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("Error unmarshalling: %v", r) - } - }() + err = fmt.Errorf("TimeoutInternal is an internal message only") return } func (m *TimeoutInternal) UnmarshalBinary(data []byte) error { - _, err := m.UnmarshalBinaryData(data) - return err + return fmt.Errorf("TimeoutInternal is an internal message only") } func (m *TimeoutInternal) String() string { diff --git a/common/messages/eom.go b/common/messages/eom.go index 22929a0c0b..3b2f7e3414 100644 --- a/common/messages/eom.go +++ b/common/messages/eom.go @@ -157,8 +157,14 @@ func (m *EOM) Validate(state interfaces.IState) int { } found, _ := state.GetVirtualServers(m.DBHeight, int(m.Minute), m.ChainID) - if !found { // Only EOM from federated servers are valid. - return -1 + if !found { + if m.DBHeight > state.GetHighestSavedBlk() { + // msg from future may be a valid server when we get to this block + state.HoldForHeight(m.DBHeight, m) + } else { + // Only EOM from federated servers are valid. + return -1 + } } // Check signature diff --git a/common/messages/factoidTransaction.go b/common/messages/factoidTransaction.go index 1cc3c909fa..142a29d669 100644 --- a/common/messages/factoidTransaction.go +++ b/common/messages/factoidTransaction.go @@ -129,9 +129,14 @@ func (m *FactoidTransaction) Validate(state interfaces.IState) int { } // Is the transaction valid at this point in time? - err = state.GetFactoidState().Validate(1, m.Transaction) + holdAddr := [32]byte{} + err, holdAddr = state.GetFactoidState().Validate(1, m.Transaction) if err != nil { - return 0 // Well, mumble. Might be out of order. + if holdAddr != [32]byte{} { // hold for an address that is short + state.Add(holdAddr, m) + } else { + return -1 // message was invalid for another reason + } } // First check all inputs are good. diff --git a/common/messages/heartbeat.go b/common/messages/heartbeat.go index 92108a6d5c..74cdd5e942 100644 --- a/common/messages/heartbeat.go +++ b/common/messages/heartbeat.go @@ -9,6 +9,7 @@ import ( "encoding/binary" "fmt" "reflect" + "strings" "github.com/FactomProject/factomd/common/constants" "github.com/FactomProject/factomd/common/interfaces" @@ -326,7 +327,15 @@ func (m *Heartbeat) FollowerExecute(is interfaces.IState) { if auditServer.GetChainID().IsSameAs(m.IdentityChainID) { if m.IdentityChainID.IsSameAs(is.GetIdentityChainID()) { if m.SecretNumber != is.GetSalt(m.Timestamp) { - panic("We have seen a heartbeat using our Identity that isn't ours") + lLeaderHeight := is.GetLLeaderHeight() + if m.DBHeight == lLeaderHeight { + var b strings.Builder + b.WriteString("We have seen a heartbeat using our Identity that isn't ours.") + b.WriteString(fmt.Sprintf("\n Node: %s", is.GetFactomNodeName())) + b.WriteString(fmt.Sprintf("\n LLeaderHeight: %d", lLeaderHeight)) + b.WriteString(fmt.Sprintf("\n Message dbHeight: %d", m.DBHeight)) + panic(b.String()) + } } } auditServer.SetOnline(true) diff --git a/common/messages/messageTrace.go b/common/messages/messageTrace.go index 5fe8a695d2..cd51cce78c 100644 --- a/common/messages/messageTrace.go +++ b/common/messages/messageTrace.go @@ -39,38 +39,51 @@ func checkFileName(name string) bool { if globals.Params.DebugLogRegEx == "" { return false } + checkForChangesInDebugRegex() + flag, old := enabled[name] + if !old { + flag = TestRegex.Match([]byte(name)) + enabled[name] = flag + } + return flag +} + +func checkForChangesInDebugRegex() { // if the regex string has changed ... if globals.Params.DebugLogRegEx != globals.LastDebugLogRegEx { + globals.Params.DebugLogLocation, globals.Params.DebugLogRegEx = SplitUpDebugLogRegEx(globals.Params.DebugLogRegEx) TestRegex = nil // throw away the old regex globals.LastDebugLogRegEx = globals.Params.DebugLogRegEx } //strip quotes if they are included in the string - if globals.Params.DebugLogRegEx[0] == '"' || globals.Params.DebugLogRegEx[0] == '\'' { + if globals.Params.DebugLogRegEx != "" && (globals.Params.DebugLogRegEx[0] == '"' || globals.Params.DebugLogRegEx[0] == '\'') { globals.Params.DebugLogRegEx = globals.Params.DebugLogRegEx[1 : len(globals.Params.DebugLogRegEx)-1] // Trim the "'s } // if we haven't compiled the regex ... - if TestRegex == nil { + if TestRegex == nil && globals.Params.DebugLogRegEx != "" { theRegex, err := regexp.Compile("(?i)" + globals.Params.DebugLogRegEx) // force case insensitive if err != nil { panic(err) } enabled = make(map[string]bool) // create a clean cache of enabled files TestRegex = theRegex - globals.LastDebugLogRegEx = globals.Params.DebugLogRegEx } - flag, old := enabled[name] - if !old { - flag = TestRegex.Match([]byte(name)) - enabled[name] = flag - } - return flag + globals.LastDebugLogRegEx = globals.Params.DebugLogRegEx +} + +func SplitUpDebugLogRegEx(DebugLogRegEx string) (string, string) { + lastSlashIndex := strings.LastIndex(DebugLogRegEx, string(os.PathSeparator)) + regex := DebugLogRegEx[lastSlashIndex+1:] + dirlocation := DebugLogRegEx[0 : lastSlashIndex+1] + return dirlocation, regex } // assumes traceMutex is locked already func getTraceFile(name string) (f *os.File) { + checkForChangesInDebugRegex() //traceMutex.Lock() defer traceMutex.Unlock() - name = strings.ToLower(name) + name = globals.Params.DebugLogLocation + strings.ToLower(name) if !checkFileName(name) { return nil } @@ -87,7 +100,7 @@ func getTraceFile(name string) (f *os.File) { } } if f == nil { - fmt.Println("Creating " + name) + fmt.Println("Creating " + (name)) var err error f, err = os.Create(name) if err != nil { @@ -192,10 +205,9 @@ func logMessage(name string, note string, msg interfaces.IMsg) { //to = "broadcast" } switch t { - case constants.ACK_MSG: - ack := msg.(*Ack) - embeddedHash = fmt.Sprintf(" EmbeddedMsg: %x", ack.GetHash().Bytes()[:3]) - fixed := ack.GetHash().Fixed() + case constants.VOLUNTEERAUDIT, constants.ACK_MSG: + embeddedHash = fmt.Sprintf(" EmbeddedMsg: %x", msg.GetHash().Bytes()[:3]) + fixed := msg.GetHash().Fixed() embeddedMsg = getmsg(fixed) if embeddedMsg == nil { embeddedHash += "(unknown)" @@ -221,7 +233,7 @@ func logMessage(name string, note string, msg interfaces.IMsg) { s = fmt.Sprintf("%9d %02d:%02d:%02d.%03d %-50s M-%v|R-%v|H-%v|%p %30s:%v\n", sequence, now.Hour()%24, now.Minute()%60, now.Second()%60, (now.Nanosecond()/1e6)%1000, note, mhash, rhash, hash, msg, "continue:", text) } - s = addNodeNames(s) + //s = addNodeNames(s) myfile.WriteString(s) } @@ -300,7 +312,7 @@ func LogPrintf(name string, format string, more ...interface{}) { default: s = fmt.Sprintf("%9d %02d:%02d:%02d.%03d %s\n", sequence, now.Hour()%24, now.Minute()%60, now.Second()%60, (now.Nanosecond()/1e6)%1000, text) } - s = addNodeNames(s) + //s = addNodeNames(s) myfile.WriteString(s) } } diff --git a/common/messages/missingMsg.go b/common/messages/missingMsg.go index dbbf69c225..d3f177bbd5 100644 --- a/common/messages/missingMsg.go +++ b/common/messages/missingMsg.go @@ -246,7 +246,13 @@ func (m *MissingMsg) Validate(state interfaces.IState) int { if m.Asking == nil { return -1 } - if m.Asking.IsZero() { + // can't answer about the future + if m.DBHeight > state.GetLLeaderHeight() { + return -1 + } + // can't answer about the past before our earliest pl + // use int so at height near 0 we can go negative + if int(m.DBHeight) < int(state.GetLLeaderHeight())-2 { return -1 } return 1 diff --git a/common/messages/missingMsg_test.go b/common/messages/missingMsg_test.go index 0975cae498..78e5e266b8 100644 --- a/common/messages/missingMsg_test.go +++ b/common/messages/missingMsg_test.go @@ -88,6 +88,7 @@ func TestValidateMissingMsg(t *testing.T) { } msg.Asking = primitives.RandomHash() + msg.DBHeight = s.GetLLeaderHeight() v = msg.Validate(s) if v != 1 { t.Errorf("Should be 1, found %d", v) diff --git a/common/messages/msgbase/MessageBase.go b/common/messages/msgbase/MessageBase.go index c35a807ac3..4706c934db 100644 --- a/common/messages/msgbase/MessageBase.go +++ b/common/messages/msgbase/MessageBase.go @@ -144,6 +144,9 @@ func checkForDuplicateSend(s interfaces.IState, msg interfaces.IMsg, whereAmI st } func (m *MessageBase) SendOut(s interfaces.IState, msg interfaces.IMsg) { + if msg.GetRepeatHash() == nil { // Do not send pokemon messages + return + } if msg.GetNoResend() { return } diff --git a/common/messages/revealEntry.go b/common/messages/revealEntry.go index 8b75f3ad1c..c3ce0d885f 100644 --- a/common/messages/revealEntry.go +++ b/common/messages/revealEntry.go @@ -136,7 +136,10 @@ func (m *RevealEntryMsg) Validate(state interfaces.IState) int { if commit == nil { state.LogMessage("executeMsg", "Hold, no commit", m) - return 0 + // old holding return 0 + state.LogPrintf("newHolding", "Hold, no commit M-%x is waiting on H-%x", m.GetMsgHash().Bytes()[:3], m.Entry.GetHash().Bytes()[:3]) + return state.Add(m.Entry.GetHash().Fixed(), m) // hold for a commit + } // // Make sure one of the two proper commits got us here. @@ -164,7 +167,9 @@ func (m *RevealEntryMsg) Validate(state interfaces.IState) int { if m.Entry.KSize() > ECs { state.LogMessage("executeMsg", "Hold, underpaid", m) - return 0 // not enough payments on the EC to reveal this entry. Return 0 to wait on another commit + // old holding .... return 0 // not enough payments on the EC to reveal this entry. Return 0 to wait on another commit + state.LogPrintf("newHolding", "Hold, underpaid M-%x is waiting on M-%x", m.GetMsgHash().Bytes()[:3], m.Entry.GetHash().Bytes()[:3]) + return state.Add(m.Entry.GetHash().Fixed(), m) // hold for a new commit } // Make sure we have a chain. If we don't, then bad things happen. @@ -186,7 +191,10 @@ func (m *RevealEntryMsg) Validate(state interfaces.IState) int { if eb == nil { state.LogMessage("executeMsg", "Hold, no chain", m) // No chain, we have to leave it be and maybe one will be made. - return 0 + //old holding .., return 0 + state.LogPrintf("newHolding", "Hold, No Chain M-%x is waiting on chain %x", m.GetMsgHash().Bytes()[:3], m.Entry.GetChainID().Bytes()[:3]) + return state.Add(m.Entry.GetChainID().Fixed(), m) // hold for a new commit + } return 1 } else { @@ -194,7 +202,9 @@ func (m *RevealEntryMsg) Validate(state interfaces.IState) int { ECs := int(m.CommitChain.CommitChain.Credits) if m.Entry.KSize()+10 > ECs { // Discard commits that are not funded properly state.LogMessage("executeMsg", "Hold, under paid", m) - return 0 + // old holding .... return 0 // not enough payments on the EC to reveal this chain. Return 0 to wait on another commit + state.LogPrintf("newHolding", "Hold, underpaid M-%x is waiting on M-%x", m.GetMsgHash().Bytes()[:3], m.Entry.GetHash().Bytes()[:3]) + return state.Add(m.Entry.GetHash().Fixed(), m) // hold for a new commit } if !CheckChainID(state, m.Entry.ExternalIDs(), m) { diff --git a/common/messages/revealEntry_test.go b/common/messages/revealEntry_test.go index 7c4184ee93..7286e1e194 100644 --- a/common/messages/revealEntry_test.go +++ b/common/messages/revealEntry_test.go @@ -97,8 +97,8 @@ func newRevealEntry() *RevealEntryMsg { func TestValidRevealMsg(t *testing.T) { s := testHelper.CreateAndPopulateTestStateAndStartValidator() - if v := testValid(1, 0, s); v != 0 { - t.Error("Should be 0, found ", v) + if v := testValid(1, 0, s); v != -2 { + t.Error("Should be -2 found ", v) } if v := testValid(15, 12000, s); v != -1 { diff --git a/common/primitives/merkle.go b/common/primitives/merkle.go index 44895cfd34..3455e791ee 100644 --- a/common/primitives/merkle.go +++ b/common/primitives/merkle.go @@ -75,9 +75,9 @@ type MerkleNode struct { Top *Hash `json:"top,omitempty"` } -func BuildMerkleBranchForEntryHash(hashes []interfaces.IHash, entryHash interfaces.IHash, fullDetail bool) []*MerkleNode { +func BuildMerkleBranchForHash(hashes []interfaces.IHash, target interfaces.IHash, fullDetail bool) []*MerkleNode { for i, h := range hashes { - if h.IsSameAs(entryHash) { + if h.IsSameAs(target) { return BuildMerkleBranch(hashes, i, fullDetail) } } diff --git a/controlPanel/controlPanel.go b/controlPanel/controlPanel.go index 11af06a21f..16c5bbf8af 100644 --- a/controlPanel/controlPanel.go +++ b/controlPanel/controlPanel.go @@ -110,9 +110,7 @@ func ServeControlPanel(displayStateChannel chan state.DisplayState, statePointer StatePointer = statePointer StatePointer.ControlPanelDataRequest = true // Request initial State // Wait for initial State - select { - case DisplayState = <-displayStateChannel: - } + DisplayState = <-displayStateChannel DisplayStateMutex.RLock() controlPanelSetting := DisplayState.ControlPanelSetting @@ -145,11 +143,12 @@ func ServeControlPanel(displayStateChannel chan state.DisplayState, statePointer go doEvery(10*time.Second, getRecentTransactions) go manageConnections(connections) - http.HandleFunc("/", static(indexHandler)) - http.HandleFunc("/search", searchHandler) - http.HandleFunc("/post", postHandler) - http.HandleFunc("/factomd", factomdHandler) - http.HandleFunc("/factomdBatch", factomdBatchHandler) + controlPanelMux := http.NewServeMux() + controlPanelMux.HandleFunc("/", static(indexHandler)) + controlPanelMux.HandleFunc("/search", searchHandler) + controlPanelMux.HandleFunc("/post", postHandler) + controlPanelMux.HandleFunc("/factomd", factomdHandler) + controlPanelMux.HandleFunc("/factomdBatch", factomdBatchHandler) tlsIsEnabled, tlsPrivate, tlsPublic := StatePointer.GetTlsInfo() if tlsIsEnabled { @@ -165,10 +164,10 @@ func ServeControlPanel(displayStateChannel chan state.DisplayState, statePointer time.Sleep(100 * time.Millisecond) } fmt.Println("Starting encrypted Control Panel on https://localhost" + portStr + "/ Please note the HTTPS in the browser.") - http.ListenAndServeTLS(portStr, tlsPublic, tlsPrivate, nil) + http.ListenAndServeTLS(portStr, tlsPublic, tlsPrivate, controlPanelMux) } else { fmt.Println("Starting Control Panel on http://localhost" + portStr + "/") - http.ListenAndServe(portStr, nil) + http.ListenAndServe(portStr, controlPanelMux) } } @@ -353,6 +352,16 @@ func factomdQuery(item string, value string, batchQueried bool) []byte { RequestData() } switch item { + case "ignoreDone": + DisplayStateMutex.RLock() + flag := DisplayState.IgnoreDone + DisplayStateMutex.RUnlock() + + if flag { + return []byte(`{"IgnoreDone": true}`) + } else { + return []byte(`{"IgnoreDone": false}`) + } case "myHeight": DisplayStateMutex.RLock() h := DisplayState.CurrentNodeHeight diff --git a/database/databaseOverlay/anchorInfo.go b/database/databaseOverlay/anchorInfo.go index 9ae2f84857..759023eae6 100644 --- a/database/databaseOverlay/anchorInfo.go +++ b/database/databaseOverlay/anchorInfo.go @@ -5,160 +5,175 @@ package databaseOverlay import ( - //"fmt" - "sort" - + "errors" "github.com/FactomProject/factomd/anchor" "github.com/FactomProject/factomd/common/directoryBlock/dbInfo" "github.com/FactomProject/factomd/common/interfaces" "github.com/FactomProject/factomd/common/primitives" ) -var AnchorBlockID string = "df3ade9eec4b08d5379cc64270c30ea7315d8a8a1a69efe2b98a60ecdd69e604" -var AnchorSigKeys []string = []string{ - "0426a802617848d4d16d87830fc521f4d136bb2d0c352850919c2679f189613a", //m1 key - "d569419348ed7056ec2ba54f0ecd9eea02648b260b26e0474f8c07fe9ac6bf83", //m2 key +var BitcoinAnchorChainID = "df3ade9eec4b08d5379cc64270c30ea7315d8a8a1a69efe2b98a60ecdd69e604" +var EthereumAnchorChainID = "6e4540d08d5ac6a1a394e982fb6a2ab8b516ee751c37420055141b94fe070bfe" +var ValidAnchorChains = map[string]bool{ + BitcoinAnchorChainID: true, + EthereumAnchorChainID: true, } -var AnchorSigPublicKeys []interfaces.Verifier -func init() { - for _, v := range AnchorSigKeys { - pubKey := new(primitives.PublicKey) - err := pubKey.UnmarshalText([]byte(v)) +func (dbo *Overlay) SetBitcoinAnchorRecordPublicKeysFromHex(publicKeys []string) error { + dbo.BitcoinAnchorRecordPublicKeys = nil + for _, v := range publicKeys { + publicKey := new(primitives.PublicKey) + err := publicKey.UnmarshalText([]byte(v)) if err != nil { - panic(err) + return err } - AnchorSigPublicKeys = append(AnchorSigPublicKeys, pubKey) + dbo.BitcoinAnchorRecordPublicKeys = append(dbo.BitcoinAnchorRecordPublicKeys, publicKey) } + return nil } -func (dbo *Overlay) RebuildDirBlockInfo() error { - ars, err := dbo.FetchAllAnchorInfo() - if err != nil { - return err - } - err = dbo.SaveAnchorInfoAsDirBlockInfo(ars) - if err != nil { - return err +func (dbo *Overlay) SetEthereumAnchorRecordPublicKeysFromHex(publicKeys []string) error { + dbo.EthereumAnchorRecordPublicKeys = nil + for _, v := range publicKeys { + publicKey := new(primitives.PublicKey) + err := publicKey.UnmarshalText([]byte(v)) + if err != nil { + return err + } + dbo.EthereumAnchorRecordPublicKeys = append(dbo.EthereumAnchorRecordPublicKeys, publicKey) } - return nil } -func (dbo *Overlay) SaveAnchorInfoFromEntry(entry interfaces.IEBEntry) error { - if entry.DatabasePrimaryIndex().String() == "24674e6bc3094eb773297de955ee095a05830e431da13a37382dcdc89d73c7d7" { - return nil - } - ar, ok, err := anchor.UnmarshalAndValidateAnchorEntryAnyVersion(entry, AnchorSigPublicKeys) +func (dbo *Overlay) ReparseAnchorChains() error { + // Delete all DirBlockInfo buckets + err := dbo.Clear(DIRBLOCKINFO) if err != nil { return err } - if ok == false { - return nil + err = dbo.Clear(DIRBLOCKINFO_UNCONFIRMED) + if err != nil { + return err } - if ar == nil { - return nil + err = dbo.Clear(DIRBLOCKINFO_NUMBER) + if err != nil { + return err } - dbi, err := AnchorRecordToDirBlockInfo(ar) + err = dbo.Clear(DIRBLOCKINFO_SECONDARYINDEX) if err != nil { return err } - return dbo.ProcessDirBlockInfoBatch(dbi) -} -func (dbo *Overlay) SaveAnchorInfoFromEntryMultiBatch(entry interfaces.IEBEntry) error { - if entry.DatabasePrimaryIndex().String() == "24674e6bc3094eb773297de955ee095a05830e431da13a37382dcdc89d73c7d7" { - return nil - } - ar, ok, err := anchor.UnmarshalAndValidateAnchorEntryAnyVersion(entry, AnchorSigPublicKeys) + // Fetch all potential anchor records + btcChainID, err := primitives.NewShaHashFromStr(BitcoinAnchorChainID) if err != nil { + panic(err) return err } - if ok == false { - return nil - } - if ar == nil { - return nil - } - dbi, err := AnchorRecordToDirBlockInfo(ar) + btcAnchorEntries, err := dbo.FetchAllEntriesByChainID(btcChainID) if err != nil { + panic(err) return err } - return dbo.ProcessDirBlockInfoMultiBatch(dbi) -} -func (dbo *Overlay) FetchAllAnchorInfo() ([]*anchor.AnchorRecord, error) { - chainID, err := primitives.NewShaHashFromStr(AnchorBlockID) + ethChainID, err := primitives.NewShaHashFromStr(EthereumAnchorChainID) if err != nil { panic(err) - return nil, err + return err } - entries, err := dbo.FetchAllEntriesByChainID(chainID) + ethAnchorEntries, err := dbo.FetchAllEntriesByChainID(ethChainID) if err != nil { panic(err) - return nil, err - } - answer := []*anchor.AnchorRecord{} - for _, entry := range entries { - if entry.DatabasePrimaryIndex().String() == "24674e6bc3094eb773297de955ee095a05830e431da13a37382dcdc89d73c7d7" { - continue - } - content := entry.GetContent() - ar, err := anchor.UnmarshalAnchorRecord(content) - if err != nil { - panic(err) - return nil, err - } - answer = append(answer, ar) + return err } - sort.Sort(ByAnchorDBHeightAscending(answer)) - return answer, nil -} -func (dbo *Overlay) SaveAnchorInfoAsDirBlockInfo(ars []*anchor.AnchorRecord) error { - sort.Sort(ByAnchorDBHeightAscending(ars)) - - for _, v := range ars { - dbi, err := AnchorRecordToDirBlockInfo(v) - if err != nil { - return err - } - err = dbo.SaveDirBlockInfo(dbi) - if err != nil { - return err - } + // Validate structure, verify signatures, and store in database + entries := append(btcAnchorEntries, ethAnchorEntries...) + for _, entry := range entries { + _ = dbo.SaveAnchorInfoFromEntry(entry, false) } - return nil } -func AnchorRecordToDirBlockInfo(ar *anchor.AnchorRecord) (*dbInfo.DirBlockInfo, error) { - dbi := new(dbInfo.DirBlockInfo) +func (dbo *Overlay) SaveAnchorInfoFromEntry(entry interfaces.IEBEntry, multiBatch bool) error { + var anchorRecord *anchor.AnchorRecord + var ok bool var err error - //TODO: fetch proper data - //dbi.DBHash = - dbi.DBHash, err = primitives.NewShaHashFromStr(ar.KeyMR) + switch entry.GetChainID().String() { + case BitcoinAnchorChainID: + // Bitcoin has mixed v1 and v2 AnchorRecords + anchorRecord, ok, err = anchor.UnmarshalAndValidateAnchorEntryAnyVersion(entry, dbo.BitcoinAnchorRecordPublicKeys) + case EthereumAnchorChainID: + // Ethereum has v2 AnchorRecords only + anchorRecord, ok, err = anchor.UnmarshalAndValidateAnchorRecordV2(entry.GetContent(), entry.ExternalIDs(), dbo.EthereumAnchorRecordPublicKeys) + default: + // Given where this function is called from, we shouldn't hit this. But just in case... + return errors.New("unsupported anchor chain") + } + if err != nil { - return nil, err + return err + } else if ok == false || anchorRecord == nil { + return nil } - dbi.DBHeight = ar.DBHeight - //dbi.Timestamp = - dbi.BTCTxHash, err = primitives.NewShaHashFromStr(ar.Bitcoin.TXID) + + // We have a valid, signed anchor record entry + // Now either create the DirBlockInfo for this block or update the existing DirBlockInfo with new found data + dbi, err := dbo.CreateUpdatedDirBlockInfoFromAnchorRecord(anchorRecord) if err != nil { - return nil, err + return err + } + if dbi.EthereumConfirmed && dbi.EthereumAnchorRecordEntryHash.IsSameAs(primitives.ZeroHash) { + dbi.EthereumAnchorRecordEntryHash = entry.GetHash() + } + + if multiBatch { + return dbo.ProcessDirBlockInfoMultiBatch(dbi) + } + return dbo.ProcessDirBlockInfoBatch(dbi) +} + +func (dbo *Overlay) CreateUpdatedDirBlockInfoFromAnchorRecord(ar *anchor.AnchorRecord) (*dbInfo.DirBlockInfo, error) { + height := ar.DBHeight + if ar.DBHeightMax != 0 && ar.DBHeightMax != ar.DBHeight { + height = ar.DBHeightMax } - dbi.BTCTxOffset = ar.Bitcoin.Offset - dbi.BTCBlockHeight = ar.Bitcoin.BlockHeight - dbi.BTCBlockHash, err = primitives.NewShaHashFromStr(ar.Bitcoin.BlockHash) + dirBlockKeyMR, err := dbo.FetchDBKeyMRByHeight(height) if err != nil { return nil, err } - dbi.DBMerkleRoot, err = primitives.NewShaHashFromStr(ar.KeyMR) + + dirBlockInfo, err := dbo.FetchDirBlockInfoByKeyMR(dirBlockKeyMR) if err != nil { return nil, err } - dbi.BTCConfirmed = true + + var dbi *dbInfo.DirBlockInfo + if dirBlockInfo == nil { + dbi = dbInfo.NewDirBlockInfo() + dbi.DBHash = dirBlockKeyMR + dbi.DBMerkleRoot = dirBlockKeyMR + dbi.DBHeight = height + } else { + dbi = dirBlockInfo.(*dbInfo.DirBlockInfo) + } + + if ar.Bitcoin != nil { + dbi.BTCTxHash, err = primitives.NewShaHashFromStr(ar.Bitcoin.TXID) + if err != nil { + return nil, err + } + dbi.BTCTxOffset = ar.Bitcoin.Offset + dbi.BTCBlockHeight = ar.Bitcoin.BlockHeight + dbi.BTCBlockHash, err = primitives.NewShaHashFromStr(ar.Bitcoin.BlockHash) + if err != nil { + return nil, err + } + dbi.BTCConfirmed = true + } else if ar.Ethereum != nil { + dbi.EthereumConfirmed = true + } return dbi, nil } diff --git a/database/databaseOverlay/anchorInfo_test.go b/database/databaseOverlay/anchorInfo_test.go index 2a25a4c455..36e44f8454 100644 --- a/database/databaseOverlay/anchorInfo_test.go +++ b/database/databaseOverlay/anchorInfo_test.go @@ -13,110 +13,77 @@ import ( func TestRebuildDirBlockInfo(t *testing.T) { dbo := testHelper.CreateEmptyTestDatabaseOverlay() - anchors := CreateAnchors() - err := dbo.SaveAnchorInfoAsDirBlockInfo(anchors) + anchors, err := CreateAnchors() if err != nil { - t.Error(err) + t.Fatalf("Failed to create test anchors: %s", err.Error()) + } + + for _, anchor := range anchors { + dbi, err := dbo.CreateUpdatedDirBlockInfoFromAnchorRecord(anchor) + if err != nil { + t.Error(err) + } + + err = dbo.ProcessDirBlockInfoBatch(dbi) + if err != nil { + t.Error(err) + } } } -func CreateAnchors() []*anchor.AnchorRecord { - answer := []*anchor.AnchorRecord{} +func CreateAnchors() ([]*anchor.AnchorRecord, error) { + anchors := []*anchor.AnchorRecord{} - record := ` -{ - "AnchorRecordVer": 1, - "DBHeight": 8, - "KeyMR": "637b6010cb6121f76c65b200a6cf94cb6655881fb4cac48979f8950e7a349da1", - "RecordHeight": 9, + records := []string{ + `{"AnchorRecordVer": 1, "DBHeight": 8, "KeyMR": "637b6010cb6121f76c65b200a6cf94cb6655881fb4cac48979f8950e7a349da1","RecordHeight": 9, "Bitcoin": { "Address": "1K2SXgApmo9uZoyahvsbSanpVWbzZWVVMF", "TXID": "b73b38b8af43f4dbaeb061f158d4bf5004b40216b30acd3beca43fae1ba6d1b7", "BlockHeight": 372579, "BlockHash": "00000000000000000589540fdaacf4f6ba37513aedc1033e68a649ffde0573ad", "Offset": 1185 - }}` - ar, err := anchor.UnmarshalAnchorRecord([]byte(record)) - if err != nil { - panic(err) - } - answer = append(answer, ar) - - record = ` -{ - "AnchorRecordVer": 1, - "DBHeight": 12, - "KeyMR": "6b4ef43604d2d5fb14267411fa0d1fa6ea7cb5fce631dfbe619334f082bc504f", - "RecordHeight": 12, + }}`, + `{"AnchorRecordVer": 1, "DBHeight": 12, "KeyMR": "6b4ef43604d2d5fb14267411fa0d1fa6ea7cb5fce631dfbe619334f082bc504f", "RecordHeight": 12, "Bitcoin": { "Address": "1K2SXgApmo9uZoyahvsbSanpVWbzZWVVMF", "TXID": "e0c67e64cfdbf025b41eb235cc07b2e63fb3c62c8b877f319cac8ec3b5483223", "BlockHeight": 372584, "BlockHash": "00000000000000000fdc6526a60522d44731c4d30b36421c10bb21fbe97eb468", "Offset": 536 - }}` - ar, err = anchor.UnmarshalAnchorRecord([]byte(record)) - if err != nil { - panic(err) - } - answer = append(answer, ar) - - record = ` -{ - "AnchorRecordVer": 1, - "DBHeight": 14, - "KeyMR": "26d5be575d93b4fc2e15294266ab423fff8cb442a3c8111be99e6a5e7c3682d2", - "RecordHeight": 16, + }}`, + `{"AnchorRecordVer": 1, "DBHeight": 14, "KeyMR": "26d5be575d93b4fc2e15294266ab423fff8cb442a3c8111be99e6a5e7c3682d2", "RecordHeight": 16, "Bitcoin": { "Address": "1K2SXgApmo9uZoyahvsbSanpVWbzZWVVMF", "TXID": "b8a9104f58ae697df3c6052f01243809222d4f17c1eb3983adf77aea60b7b17b", "BlockHeight": 372587, "BlockHash": "0000000000000000056b2c07b093727ae1d49186ef93857e5bc8ba97cac52790", "Offset": 1368 - }}` - ar, err = anchor.UnmarshalAnchorRecord([]byte(record)) - if err != nil { - panic(err) - } - answer = append(answer, ar) - - record = ` -{ - "AnchorRecordVer": 1, - "DBHeight": 21, - "KeyMR": "b2e3b5dd50a0bbd6837e2a4cd6fa6b83a8ac2640f0caf2109200e53a7497d587", - "RecordHeight": 21, + }}`, + `{"AnchorRecordVer": 1, "DBHeight": 21, "KeyMR": "b2e3b5dd50a0bbd6837e2a4cd6fa6b83a8ac2640f0caf2109200e53a7497d587", "RecordHeight": 21, "Bitcoin": { "Address": "1K2SXgApmo9uZoyahvsbSanpVWbzZWVVMF", "TXID": "43faa9e0b4f8b3fd366bf1a3a4fe6d42276627f96bcc4376754130fc4c3faf63", "BlockHeight": 372594, "BlockHash": "000000000000000008101836aa63e20b5cd2b3e4bd4133cb990306c4fd2c4f60", "Offset": 415 - }}` - ar, err = anchor.UnmarshalAnchorRecord([]byte(record)) - if err != nil { - panic(err) - } - answer = append(answer, ar) - - record = ` -{ - "AnchorRecordVer": 1, - "DBHeight": 22, - "KeyMR": "5c9f7cf6667b6d46da730cddd13a7c0094ef39462a7db4e779f950e5a7c763cc", - "RecordHeight": 24, + }}`, + `{"AnchorRecordVer": 1, "DBHeight": 22, "KeyMR": "5c9f7cf6667b6d46da730cddd13a7c0094ef39462a7db4e779f950e5a7c763cc", "RecordHeight": 24, "Bitcoin": { "Address": "1K2SXgApmo9uZoyahvsbSanpVWbzZWVVMF", "TXID": "5e77a98390e45f39bd38e908944858fa3e85ea47a06be5480af68fa256213572", "BlockHeight": 372595, "BlockHash": "0000000000000000007f3d7a17a7565d9b326ad7a692710a71ed070394399a33", "Offset": 1017 - }}` - ar, err = anchor.UnmarshalAnchorRecord([]byte(record)) - if err != nil { - panic(err) + }}`, + } + + for _, record := range records { + ar, err := anchor.UnmarshalAnchorRecord([]byte(record)) + if err != nil { + return nil, err + } + anchors = append(anchors, ar) } - answer = append(answer, ar) - return answer + return anchors, nil } diff --git a/database/databaseOverlay/entry.go b/database/databaseOverlay/entry.go index eb528d2f71..e54f9d97ad 100644 --- a/database/databaseOverlay/entry.go +++ b/database/databaseOverlay/entry.go @@ -24,8 +24,8 @@ func (db *Overlay) InsertEntry(entry interfaces.IEBEntry) error { if err != nil { return err } - if entry.GetChainID().String() == AnchorBlockID { - db.SaveAnchorInfoFromEntry(entry) + if _, exists := ValidAnchorChains[entry.GetChainID().String()]; exists { + db.SaveAnchorInfoFromEntry(entry, false) } return nil } @@ -44,8 +44,8 @@ func (db *Overlay) InsertEntryMultiBatch(entry interfaces.IEBEntry) error { batch = append(batch, interfaces.Record{ENTRY, entry.DatabasePrimaryIndex().Bytes(), entry.GetChainIDHash()}) db.PutInMultiBatch(batch) - if entry.GetChainID().String() == AnchorBlockID { - db.SaveAnchorInfoFromEntryMultiBatch(entry) + if _, exists := ValidAnchorChains[entry.GetChainID().String()]; exists { + db.SaveAnchorInfoFromEntry(entry, true) } return nil } diff --git a/database/databaseOverlay/overlay.go b/database/databaseOverlay/overlay.go index 49526dbdf9..075c458ea2 100644 --- a/database/databaseOverlay/overlay.go +++ b/database/databaseOverlay/overlay.go @@ -121,6 +121,9 @@ type Overlay struct { BatchSemaphore sync.Mutex MultiBatch []interfaces.Record BlockExtractor blockExtractor.BlockExtractor + + BitcoinAnchorRecordPublicKeys []interfaces.Verifier + EthereumAnchorRecordPublicKeys []interfaces.Verifier } var _ interfaces.IDatabase = (*Overlay)(nil) @@ -247,6 +250,10 @@ func (db *Overlay) FetchBlockBySecondaryIndex(secondaryIndexBucket, blockBucket } func (db *Overlay) FetchBlock(bucket []byte, key interfaces.IHash, dst interfaces.DatabaseBatchable) (interfaces.DatabaseBatchable, error) { + if key == nil { + return nil, nil + } + block, err := db.Get(bucket, key.Bytes(), dst) if err != nil { return nil, err diff --git a/database/test_databases_test.go b/database/test_databases_test.go index 5d3285cd74..7405cee21d 100644 --- a/database/test_databases_test.go +++ b/database/test_databases_test.go @@ -9,6 +9,13 @@ import ( "os" "testing" + "reflect" + + "time" + + "sync" + + "github.com/FactomProject/factomd/common/entryBlock" "github.com/FactomProject/factomd/common/interfaces" "github.com/FactomProject/factomd/common/primitives" "github.com/FactomProject/factomd/common/primitives/random" @@ -51,9 +58,21 @@ var _ interfaces.BinaryMarshallable = (*TestData)(nil) var dbFilename = "testdb" +func TestOneDatabase(t *testing.T) { + // Testing level + m, err := leveldb.NewLevelDB(dbFilename, true) + if err != nil { + t.Error(err) + } + testNilRetreive(t, m) + CleanupTest(t, m) +} + func TestAllDatabases(t *testing.T) { + totalTests := 4 + // Secure Bolt - for i := 0; i < 5; i++ { + for i := 0; i < totalTests; i++ { m, err := securedb.NewEncryptedDB(dbFilename, "Bolt", random.RandomString()) if err != nil { t.Error(err) @@ -61,9 +80,10 @@ func TestAllDatabases(t *testing.T) { testDB(t, m, i) CleanupTest(t, m) } + t.Log("Finished Secure Bolt DB (1/6)") // Secure LDB - for i := 0; i < 5; i++ { + for i := 0; i < totalTests; i++ { m, err := securedb.NewEncryptedDB(dbFilename, "LDB", random.RandomString()) if err != nil { t.Error(err) @@ -71,9 +91,10 @@ func TestAllDatabases(t *testing.T) { testDB(t, m, i) CleanupTest(t, m) } + t.Log("Finished Secure LDB (2/6)") // Secure Map - for i := 0; i < 5; i++ { + for i := 0; i < totalTests; i++ { m, err := securedb.NewEncryptedDB(dbFilename, "Map", random.RandomString()) if err != nil { t.Error(err) @@ -81,16 +102,18 @@ func TestAllDatabases(t *testing.T) { testDB(t, m, i) CleanupTest(t, m) } + t.Log("Finished Secure Map (3/6)") // Bolt - for i := 0; i < 5; i++ { + for i := 0; i < totalTests; i++ { m := boltdb.NewBoltDB(nil, dbFilename) testDB(t, m, i) CleanupTest(t, m) } + t.Log("Finished Bolt DB (4/6)") // Level - for i := 0; i < 5; i++ { + for i := 0; i < totalTests; i++ { m, err := leveldb.NewLevelDB(dbFilename, true) if err != nil { t.Error(err) @@ -98,13 +121,15 @@ func TestAllDatabases(t *testing.T) { testDB(t, m, i) CleanupTest(t, m) } + t.Log("Finished LDB (5/6)") // Map - for i := 0; i < 5; i++ { + for i := 0; i < totalTests; i++ { m := new(mapdb.MapDB) testDB(t, m, i) CleanupTest(t, m) } + t.Log("Finished Map (3/6)") } func testDB(t *testing.T, m interfaces.IDatabase, i int) { @@ -291,3 +316,81 @@ func testGetAll(t *testing.T, m interfaces.IDatabase) { } } } + +func testNilRetreive(t *testing.T, m interfaces.IDatabase) { + o := databaseOverlay.NewOverlay(m) + //totalEntries := 10000 + + g := sync.WaitGroup{} + + writer := func(s, l int) { // Writes + g.Add(1) + for k, _ := range filledMap(s, l) { + e := entryBlock.DeterministicEntry(k) + err := o.InsertEntry(e) + if err != nil { + t.Errorf("%s", err.Error()) + } + time.Sleep(5 * time.Millisecond) + } + g.Done() + } + + reader := func(s, l int) { // Reads + g.Add(1) + for k, _ := range filledMap(s, l) { + f_e, err := o.FetchEntry(entryBlock.DeterministicEntry(k).GetHash()) + if err != nil { + t.Errorf("%s", err.Error()) + } + if f_e != nil && reflect.ValueOf(f_e).IsNil() { + t.Errorf("Expected a nil, got %v", f_e) + } + time.Sleep(5 * time.Millisecond) + } + g.Done() + } + + for i := 0; i < 3; i++ { + go writer(0, 100) + go writer(0, 200) + go writer(0, 200) + + // Add contention on 0-1k + go reader(0, 100) + go reader(0, 100) + go reader(0, 100) + go reader(0, 200) + go reader(0, 200) + } + // Kinda kulgy, but each goroutine adds itself to wait group. + // Give them a chance to add themselves + time.Sleep(10 * time.Millisecond) + + g.Wait() + + e := entryBlock.RandomEntry() + f_e, err := o.FetchEntry(e.GetHash()) + if f_e != nil && reflect.ValueOf(f_e).IsNil() { + t.Errorf("Expected a nil, got %v", f_e) + } + + err = o.InsertEntry(e) + if err != nil { + t.Errorf("%s", err.Error()) + } + + f_e, err = o.FetchEntry(e.GetHash()) + if f_e != nil && reflect.ValueOf(f_e).IsNil() { + t.Errorf("Expected a nil, got %v", f_e) + } + +} + +func filledMap(start, length int) map[int]struct{} { + avail := make(map[int]struct{}) + for i := start; i < start+length; i++ { + avail[i] = struct{}{} + } + return avail +} diff --git a/elections/elections.go b/elections/elections.go index 6ed5b4c413..29e8aa067d 100644 --- a/elections/elections.go +++ b/elections/elections.go @@ -38,7 +38,6 @@ type Elections struct { VMIndex int // VMIndex of this election Msgs []interfaces.IMsg // Messages we are collecting in this election. Look here for what's missing. Input interfaces.IQueue - Output interfaces.IQueue Round []int Electing int // This is the federated Server index that we are looking to replace State interfaces.IState @@ -445,7 +444,6 @@ func Run(s *state.State) { e.State = s e.Name = s.FactomNodeName e.Input = s.ElectionsQueue() - e.Output = s.InMsgQueue() e.Electing = -1 e.Timeout = time.Duration(FaultTimeout) * time.Second diff --git a/electionsCore/interpreter/interpreter/interpreter.go b/electionsCore/interpreter/interpreter/interpreter.go index df5f2070c0..7016a53bc8 100644 --- a/electionsCore/interpreter/interpreter/interpreter.go +++ b/electionsCore/interpreter/interpreter/interpreter.go @@ -237,7 +237,7 @@ func (i *Interpreter) InterpretLine(line string) { } // till EOF or error func (i *Interpreter) Interpret(source io.Reader) { - defer func() { i.Input = i.Input }() // Reset i.Input when we exit + defer func(old *bufio.Reader) { i.Input = old }(i.Input) // Reset i.Input when we exit defer func() { if r := recover(); r != nil { fmt.Println("Error:", r) diff --git a/engine/MsgLogging.go b/engine/MsgLogging.go index 3611ddb069..d0476c3cb8 100644 --- a/engine/MsgLogging.go +++ b/engine/MsgLogging.go @@ -53,6 +53,9 @@ func (m *MsgLog) Init(enable bool, nodecnt int) { } func (m *MsgLog) Add2(fnode *FactomNode, out bool, peer string, where string, valid bool, msg interfaces.IMsg) { + if !m.Enable { + return + } m.sem.Lock() defer m.sem.Unlock() now := fnode.State.GetTimestamp() @@ -98,6 +101,10 @@ func (m *MsgLog) Add2(fnode *FactomNode, out bool, peer string, where string, va } func (m *MsgLog) PrtMsgs(state interfaces.IState) { + if !m.Enable { + fmt.Println("Message log is not enabled. Run factomd with runtime log enabled.") + return + } m.sem.Lock() defer m.sem.Unlock() diff --git a/engine/NetStart.go b/engine/NetStart.go index 6fa4174d26..cad723f8cd 100644 --- a/engine/NetStart.go +++ b/engine/NetStart.go @@ -16,6 +16,7 @@ import ( "time" "github.com/FactomProject/factomd/common/constants" + "github.com/FactomProject/factomd/common/constants/runstate" . "github.com/FactomProject/factomd/common/globals" "github.com/FactomProject/factomd/common/interfaces" "github.com/FactomProject/factomd/common/messages" @@ -23,6 +24,7 @@ import ( "github.com/FactomProject/factomd/common/messages/msgsupport" "github.com/FactomProject/factomd/common/primitives" "github.com/FactomProject/factomd/controlPanel" + "github.com/FactomProject/factomd/database/databaseOverlay" "github.com/FactomProject/factomd/database/leveldb" "github.com/FactomProject/factomd/elections" "github.com/FactomProject/factomd/p2p" @@ -182,6 +184,13 @@ func NetStart(s *state.State, p *FactomParams, listenToStdin bool) { s.CheckChainHeads.CheckChainHeads = p.CheckChainHeads s.CheckChainHeads.Fix = p.FixChainHeads + if p.P2PIncoming > 0 { + p2p.MaxNumberIncomingConnections = p.P2PIncoming + } + if p.P2POutgoing > 0 { + p2p.NumberPeersToConnect = p.P2POutgoing + } + fmt.Println(">>>>>>>>>>>>>>>>") fmt.Println(">>>>>>>>>>>>>>>> Net Sim Start!") fmt.Println(">>>>>>>>>>>>>>>>") @@ -192,8 +201,7 @@ func NetStart(s *state.State, p *FactomParams, listenToStdin bool) { fmt.Print("\n") fmt.Print("Gracefully shutting down the server...\n") for _, fnode := range fnodes { - fmt.Print("Shutting Down: ", fnode.State.FactomNodeName, "\r\n") - fnode.State.ShutdownChan <- 0 + fnode.State.ShutdownNode(0) } if p.EnableNet { p2pNetwork.NetworkStop() @@ -269,8 +277,10 @@ func NetStart(s *state.State, p *FactomParams, listenToStdin bool) { os.Stderr.WriteString(fmt.Sprintf("%20s %s\n", "Build", Build)) os.Stderr.WriteString(fmt.Sprintf("%20s %s\n", "Node name", p.NodeName)) os.Stderr.WriteString(fmt.Sprintf("%20s %v\n", "balancehash", messages.AckBalanceHash)) - os.Stderr.WriteString(fmt.Sprintf("%20s %s\n", "FNode 0 Salt", s.Salt.String()[:16])) + os.Stderr.WriteString(fmt.Sprintf("%20s %s\n", fmt.Sprintf("%s Salt", s.GetFactomNodeName()), s.Salt.String()[:16])) os.Stderr.WriteString(fmt.Sprintf("%20s %v\n", "enablenet", p.EnableNet)) + os.Stderr.WriteString(fmt.Sprintf("%20s %v\n", "net incoming", p2p.MaxNumberIncomingConnections)) + os.Stderr.WriteString(fmt.Sprintf("%20s %v\n", "net outgoing", p2p.NumberPeersToConnect)) os.Stderr.WriteString(fmt.Sprintf("%20s %v\n", "waitentries", p.WaitEntries)) os.Stderr.WriteString(fmt.Sprintf("%20s %d\n", "node", p.ListenTo)) os.Stderr.WriteString(fmt.Sprintf("%20s %s\n", "prefix", p.Prefix)) @@ -319,6 +329,9 @@ func NetStart(s *state.State, p *FactomParams, listenToStdin bool) { for i := 0; i < p.Cnt; i++ { makeServer(s) // We clone s to make all of our servers } + + addFnodeName(0) // bootstrap id doesn't change + // Modify Identities of new nodes if len(fnodes) > 1 && len(s.Prefix) == 0 { modifyLoadIdentities() // We clone s to make all of our servers @@ -546,6 +559,28 @@ func NetStart(s *state.State, p *FactomParams, listenToStdin bool) { startServers(true) } + // Anchoring related configurations + config := s.Cfg.(*util.FactomdConfig) + if len(config.App.BitcoinAnchorRecordPublicKeys) > 0 { + err := s.GetDB().(*databaseOverlay.Overlay).SetBitcoinAnchorRecordPublicKeysFromHex(config.App.BitcoinAnchorRecordPublicKeys) + if err != nil { + panic("Encountered an error while trying to set custom Bitcoin anchor record keys from config") + } + } + if len(config.App.EthereumAnchorRecordPublicKeys) > 0 { + err := s.GetDB().(*databaseOverlay.Overlay).SetEthereumAnchorRecordPublicKeysFromHex(config.App.EthereumAnchorRecordPublicKeys) + if err != nil { + panic("Encountered an error while trying to set custom Ethereum anchor record keys from config") + } + } + if p.ReparseAnchorChains { + fmt.Println("Reparsing anchor chains...") + err := fnodes[0].State.GetDB().(*databaseOverlay.Overlay).ReparseAnchorChains() + if err != nil { + panic("Encountered an error while trying to re-parse anchor chains: " + err.Error()) + } + } + // Start the webserver wsapi.Start(fnodes[0].State) if fnodes[0].State.DebugExec() && messages.CheckFileName("graphData.txt") { @@ -605,20 +640,25 @@ func makeServer(s *state.State) *FactomNode { func startServers(load bool) { for i, fnode := range fnodes { - if i > 0 { - fnode.State.Init() - } - go NetworkProcessorNet(fnode) - if load { - go state.LoadDatabase(fnode.State) - } - go fnode.State.GoSyncEntries() - go Timer(fnode.State) - go elections.Run(fnode.State) - go fnode.State.ValidatorLoop() + startServer(i, fnode, load) } } +func startServer(i int, fnode *FactomNode, load bool) { + fnode.State.RunState = runstate.Booting + if i > 0 { + fnode.State.Init() + } + NetworkProcessorNet(fnode) + if load { + go state.LoadDatabase(fnode.State) + } + go fnode.State.GoSyncEntries() + go Timer(fnode.State) + go elections.Run(fnode.State) + go fnode.State.ValidatorLoop() +} + func setupFirstAuthority(s *state.State) { if len(s.IdentityControl.Authorities) > 0 { //Don't initialize first authority if we are loading during fast boot @@ -635,3 +675,19 @@ func networkHousekeeping() { p2pProxy.SetWeight(p2pNetwork.GetNumberOfConnections()) } } + +func AddNode() { + + fnodes := GetFnodes() + s := fnodes[0].State + i := len(fnodes) + + makeServer(s) + modifyLoadIdentities() + + fnodes = GetFnodes() + fnodes[i].State.IntiateNetworkSkeletonIdentity() + fnodes[i].State.InitiateNetworkIdentityRegistration() + AddSimPeer(fnodes, i, i-1) // KLUDGE peer w/ only last node + startServer(i, fnodes[i], true) +} diff --git a/engine/NetworkProcessorNet.go b/engine/NetworkProcessorNet.go index 438e5fd319..923b3baa91 100644 --- a/engine/NetworkProcessorNet.go +++ b/engine/NetworkProcessorNet.go @@ -119,11 +119,11 @@ func Peers(fnode *FactomNode) { } // func ignoreMsg(){...} for { - if primitives.NewTimestampNow().GetTimeSeconds()-fnode.State.BootTime > int64(constants.CROSSBOOT_SALT_REPLAY_DURATION.Seconds()) { + now := fnode.State.GetTimestamp() + if now.GetTimeSeconds()-fnode.State.BootTime > int64(constants.CROSSBOOT_SALT_REPLAY_DURATION.Seconds()) { saltReplayFilterOn = false } cnt := 0 - now := fnode.State.GetTimestamp() for i := 0; i < 100 && fnode.State.APIQueue().Length() > 0; i++ { msg := fnode.State.APIQueue().Dequeue() @@ -283,6 +283,33 @@ func Peers(fnode *FactomNode) { continue } + regex, _ := fnode.State.GetInputRegEx() + + if regex != nil { + t := "" + if mm, ok := msg.(*messages.MissingMsgResponse); ok { + t = fmt.Sprintf("%7d-:-%d %s", fnode.State.LLeaderHeight, fnode.State.CurrentMinute, mm.MsgResponse.String()) + } else { + t = fmt.Sprintf("%7d-:-%d %s", fnode.State.LLeaderHeight, fnode.State.CurrentMinute, msg.String()) + } + + if mm, ok := msg.(*messages.MissingMsgResponse); ok { + if eom, ok := mm.MsgResponse.(*messages.EOM); ok { + t2 := fmt.Sprintf("%7d-:-%d %s", fnode.State.LLeaderHeight, fnode.State.CurrentMinute, eom.String()) + messageResult := regex.MatchString(t2) + if messageResult { + fnode.State.LogMessage("NetworkInputs", "Drop, matched filter Regex", msg) + continue + } + } + } + messageResult := regex.MatchString(t) + if messageResult { + fnode.State.LogMessage("NetworkInputs", "Drop, matched filter Regex", msg) + continue + } + } + //if state.GetOut() { // fnode.State.Println("In Coming!! ",msg) //} @@ -312,6 +339,12 @@ func Peers(fnode *FactomNode) { fnode.State.LogMessage("NetworkInputs", fromPeer+", enqueue2", msg) fnode.State.LogMessage("InMsgQueue2", fromPeer+", enqueue2", msg) fnode.State.InMsgQueue2().Enqueue(msg) + } else if msg.Type() == constants.DBSTATE_MSG { + // notify the state that a new DBState has been recieved. + // TODO: send the msg to StatesReceived and only send to InMsgQueue when the next received message is ready. + fnode.State.LogMessage("NetworkInputs", fromPeer+", enqueue", msg) + fnode.State.LogMessage("InMsgQueue", fromPeer+", enqueue", msg) + fnode.State.InMsgQueue().Enqueue(msg) } else { fnode.State.LogMessage("NetworkInputs", fromPeer+", enqueue", msg) fnode.State.LogMessage("InMsgQueue", fromPeer+", enqueue", msg) @@ -342,21 +375,41 @@ func NetworkOutputs(fnode *FactomNode) { // by an updated version when the block is ready. if msg.IsLocal() { // todo: Should be a dead case. Add tracking code to see if it ever happens -- clay - fnode.State.LogMessage("NetworkOutputs", "drop, local", msg) - continue - } - // Don't do a rand int if drop rate is 0 - if fnode.State.GetDropRate() > 0 && rand.Int()%1000 < fnode.State.GetDropRate() { - //drop the message, rather than processing it normally - - fnode.State.LogMessage("NetworkOutputs", "drop, simCtrl", msg) + fnode.State.LogMessage("NetworkOutputs", "Drop, local", msg) continue } if msg.GetRepeatHash() == nil { - fnode.State.LogMessage("NetworkOutputs", "drop, no repeat hash", msg) + fnode.State.LogMessage("NetworkOutputs", "Drop, no repeat hash", msg) continue } + regex, _ := fnode.State.GetOutputRegEx() + if regex != nil { + t := "" + if mm, ok := msg.(*messages.MissingMsgResponse); ok { + t = fmt.Sprintf("%7d-:-%d %s", fnode.State.LLeaderHeight, fnode.State.CurrentMinute, mm.MsgResponse.String()) + } else { + t = fmt.Sprintf("%7d-:-%d %s", fnode.State.LLeaderHeight, fnode.State.CurrentMinute, msg.String()) + } + + if mm, ok := msg.(*messages.MissingMsgResponse); ok { + if eom, ok := mm.MsgResponse.(*messages.EOM); ok { + t2 := fmt.Sprintf("%7d-:-%d %s", fnode.State.LLeaderHeight, fnode.State.CurrentMinute, eom.String()) + messageResult := regex.MatchString(t2) + if messageResult { + fnode.State.LogMessage("NetworkOutputs", "Drop, matched filter Regex", msg) + continue + } + } + } + messageResult := regex.MatchString(t) + if messageResult { + //fmt.Println("Found it!", t) + fnode.State.LogMessage("NetworkOutputs", "Drop, matched filter Regex", msg) + continue + } + } + //_, ok := msg.(*messages.Ack) //if ok { //// We don't care about the result, but we do want to log that we have @@ -382,20 +435,27 @@ func NetworkOutputs(fnode *FactomNode) { peer := fnode.Peers[p] fnode.MLog.Add2(fnode, true, peer.GetNameTo(), "P2P out", true, msg) if !fnode.State.GetNetStateOff() { // don't Send p2p messages if he is OFF - preSendTime := time.Now() - fnode.State.LogMessage("NetworkOutputs", "Send P2P "+peer.GetNameTo(), msg) - peer.Send(msg) - sendTime := time.Since(preSendTime) - TotalSendTime.Add(float64(sendTime.Nanoseconds())) - if fnode.State.MessageTally { - fnode.State.TallySent(int(msg.Type())) + // Don't do a rand int if drop rate is 0 + if fnode.State.GetDropRate() > 0 && rand.Int()%1000 < fnode.State.GetDropRate() { + //drop the message, rather than processing it normally + + fnode.State.LogMessage("NetworkOutputs", "Drop, simCtrl", msg) + } else { + preSendTime := time.Now() + fnode.State.LogMessage("NetworkOutputs", "Send P2P "+peer.GetNameTo(), msg) + peer.Send(msg) + sendTime := time.Since(preSendTime) + TotalSendTime.Add(float64(sendTime.Nanoseconds())) + if fnode.State.MessageTally { + fnode.State.TallySent(int(msg.Type())) + } } } else { - fnode.State.LogMessage("NetworkOutputs", "drop, simCtrl X", msg) + fnode.State.LogMessage("NetworkOutputs", "Drop, simCtrl X", msg) } } else { - fnode.State.LogMessage("NetworkOutputs", "drop, no peers", msg) + fnode.State.LogMessage("NetworkOutputs", "Drop, no peers", msg) } } else { fnode.State.LogMessage("NetworkOutputs", "Send broadcast", msg) @@ -409,12 +469,18 @@ func NetworkOutputs(fnode *FactomNode) { bco := fmt.Sprintf("%s/%d/%d", "BCast", p, i) fnode.MLog.Add2(fnode, true, peer.GetNameTo(), bco, true, msg) if !fnode.State.GetNetStateOff() { // Don't send him broadcast message if he is off - preSendTime := time.Now() - peer.Send(msg) - sendTime := time.Since(preSendTime) - TotalSendTime.Add(float64(sendTime.Nanoseconds())) - if fnode.State.MessageTally { - fnode.State.TallySent(int(msg.Type())) + if fnode.State.GetDropRate() > 0 && rand.Int()%1000 < fnode.State.GetDropRate() && !msg.IsFullBroadcast() { + //drop the message, rather than processing it normally + + fnode.State.LogMessage("NetworkOutputs", "Drop, simCtrl", msg) + } else { + preSendTime := time.Now() + peer.Send(msg) + sendTime := time.Since(preSendTime) + TotalSendTime.Add(float64(sendTime.Nanoseconds())) + if fnode.State.MessageTally { + fnode.State.TallySent(int(msg.Type())) + } } } } diff --git a/engine/SimPeer.go b/engine/SimPeer.go index 43da98f8e9..7592524f6d 100644 --- a/engine/SimPeer.go +++ b/engine/SimPeer.go @@ -199,8 +199,8 @@ func AddSimPeer(fnodes []*FactomNode, i1 int, i2 int) { f1.Peers = append(f1.Peers, peer12) f2.Peers = append(f2.Peers, peer21) - // for _, p := range f1.Peers { - // fmt.Printf("%s's peer: %s\n", p.GetNameFrom(), p.GetNameTo()) - // } + for _, p := range f1.Peers { + fmt.Printf("%s's peer: %s\n", p.GetNameFrom(), p.GetNameTo()) + } } diff --git a/engine/ci_whitelist b/engine/ci_whitelist new file mode 100644 index 0000000000..67b1a87460 --- /dev/null +++ b/engine/ci_whitelist @@ -0,0 +1,2 @@ +engine/TestMakeALeader +engine/TestAnElection diff --git a/engine/debug/addNames.sh b/engine/debug/addNames.sh index 18b5b76a5b..24f2ff4d0f 100755 --- a/engine/debug/addNames.sh +++ b/engine/debug/addNames.sh @@ -20,50 +20,25 @@ read -d '' scriptVariable << 'EOF' -/455b7b[^\\(]/ {x+= gsub(/455b7b/,"455b7b(fnode01)");} -/367795[^\\(]/ {x+= gsub(/367795/,"367795(fnode02)");} -/fc37fa[^\\(]/ {x+= gsub(/fc37fa/,"fc37fa(fnode03)");} -/e23849[^\\(]/ {x+= gsub(/e23849/,"e23849(fnode04)");} -/271203[^\\(]/ {x+= gsub(/271203/,"271203(fnode05)");} -/a21d5a[^\\(]/ {x+= gsub(/a21d5a/,"a21d5a(fnode06)");} -/15ac8a[^\\(]/ {x+= gsub(/15ac8a/,"15ac8a(fnode07)");} -/f6e861[^\\(]/ {x+= gsub(/f6e861/,"f6e861(fnode08)");} -/30a663[^\\(]/ {x+= gsub(/30a663/,"30a663(fnode09)");} -/dfa8ac[^\\(]/ {x+= gsub(/dfa8ac/,"dfa8ac(fnode10)");} - -/455b / {x+= gsub(/455b/,"455b7b(fnode01)");} -/15ac / {x+= gsub(/15ac/,"367795(fnode02)");} -/2712 / {x+= gsub(/2712/,"fc37fa(fnode03)");} -/3677 / {x+= gsub(/3677/,"e23849(fnode04)");} -/a21d / {x+= gsub(/a21d/,"271203(fnode05)");} -/e238 / {x+= gsub(/e238/,"a21d5a(fnode06)");} -/fc37 / {x+= gsub(/fc37/,"15ac8a(fnode07)");} -/a21d / {x+= gsub(/a21d/,"f6e861(fnode08)");} -/e238 / {x+= gsub(/e238/,"30a663(fnode09)");} -/fc37 / {x+= gsub(/fc37/,"dfa8ac(fnode10)");} +/455b7b[^\\(]/ {x+= gsub(/455b7b[a-f0-9]*/,"455b7b(fnode0)");} +/367795[^\\(]/ {x+= gsub(/367795[a-f0-9]*/,"367795(fnode01)");} +/fc37fa[^\\(]/ {x+= gsub(/fc37fa[a-f0-9]*/,"fc37fa(fnode02)");} +/e23849[^\\(]/ {x+= gsub(/e23849[a-f0-9]*/,"e23849(fnode03)");} +/271203[^\\(]/ {x+= gsub(/271203[a-f0-9]*/,"271203(fnode04)");} +/a21d5a[^\\(]/ {x+= gsub(/a21d5a[a-f0-9]*/,"a21d5a(fnode05)");} +/15ac8a[^\\(]/ {x+= gsub(/15ac8a[a-f0-9]*/,"15ac8a(fnode06)");} +/f6e861[^\\(]/ {x+= gsub(/f6e861[a-f0-9]*/,"f6e861(fnode07)");} +/30a663[^\\(]/ {x+= gsub(/30a663[a-f0-9]*/,"30a663(fnode08)");} +/dfa8ac[^\\(]/ {x+= gsub(/dfa8ac[a-f0-9]*/,"dfa8ac(fnode09)");} + + {print;} -/455b7b$/ {x+= gsub(/455b7b/,"455b7b(fnode01)");} -/15ac8a$/ {x+= gsub(/367795/,"367795(fnode02)");} -/271203$/ {x+= gsub(/fc37fa/,"fc37fa(fnode03)");} -/367795$/ {x+= gsub(/e23849/,"e23849(fnode04)");} -/a21d5a$/ {x+= gsub(/271203/,"271203(fnode05)");} -/e23849$/ {x+= gsub(/a21d5a/,"a21d5a(fnode06)");} -/fc37fa$/ {x+= gsub(/15ac8a/,"15ac8a(fnode07)");} -/a21d5a$/ {x+= gsub(/f6e861/,"f6e861(fnode08)");} -/e23849$/ {x+= gsub(/30a663/,"30a663(fnode09)");} -/fc37fa$/ {x+= gsub(/dfa8ac/,"dfa8ac(fnode10)");} -/34353562/ {x+= gsub(/34353562/,"455b7b(fnode01)");} -/33363737/ {x+= gsub(/33363737/,"367795(fnode02)");} -/66633337/ {x+= gsub(/66633337/,"fc37fa(fnode03)");} -/65323338/ {x+= gsub(/65323338/,"e23849(fnode04)");} -/32373132/ {x+= gsub(/32373132/,"271203(fnode05)");} -/61323164/ {x+= gsub(/61323164/,"a21d5a(fnode06)");} -/31356163/ {x+= gsub(/31356163/,"15ac8a(fnode07)");} + # print warm fuzzy's to stderr + {if (FNR%1024 == 1) {printf("%40s:%d \\r", FILENAME, x)>"/dev/stderr";}} - {if (x%1024 == 0) {printf("%40s:%d\\r", FILENAME, x)>"/dev/stderr";}} - {print;} - + END{printf("%40s:%d\\n", FILENAME, x)>"/dev/stderr";} + EOF ################################ # End of AWK Scripts # diff --git a/engine/debug/extract.sh b/engine/debug/extract.sh index b6ac1924a0..62227906f1 100755 --- a/engine/debug/extract.sh +++ b/engine/debug/extract.sh @@ -5,22 +5,23 @@ ################################ read -d '' scriptVariable << 'EOF' { - n = substr($1,1,1); + n = substr($1,6,1)-1; print n; cmd = "tar xzvf "$1; print cmd; system(cmd); - foo = sprintf("%02d", n) - cmd = "ls *fnode0_*.txt | awk ' {f = $1; g = tolower(f); sub(/0_/,\\"" foo "_\\",g); if(f!=g) {cmd=\\\"mv -v \\\" f \\\" \\\" g; print cmd; system(cmd);}}'"; - print cmd, n; - system(cmd); - cmd = "mv out.txt out" foo " .txt" - print cmd, n; - system(cmd); - cmd = "mv err.txt err" foo " .txt" - print cmd, n; - system(cmd); - + if(n != 0) { + foo = sprintf("%02d", n) + cmd = "ls *fnode0_*.txt | awk ' {f = $1; g = tolower(f); sub(/0_/,\\"" foo "_\\",g); if(f!=g) {cmd=\\\"mv -v \\\" f \\\" \\\" g; print cmd; system(cmd);}}'"; + print cmd, n; + system(cmd); + cmd = "mv out.txt out" foo ".txt" + print cmd, n; + system(cmd); + cmd = "mv err.txt err" foo ".txt" + print cmd, n; + system(cmd); + } } EOF @@ -30,4 +31,4 @@ EOF - ls -r *.tgz | awk "$scriptVariable" + ls -r *.tgz | awk "$scriptVariable" diff --git a/engine/factomParams.go b/engine/factomParams.go index 18cd7c3d85..ac82af8364 100644 --- a/engine/factomParams.go +++ b/engine/factomParams.go @@ -30,6 +30,8 @@ func init() { flag.IntVar(&p.FaultTimeout, "faulttimeout", 120, "Seconds before considering Federated servers at-fault. Default is 120.") flag.IntVar(&p.RoundTimeout, "roundtimeout", 30, "Seconds before audit servers will increment rounds and volunteer.") flag.IntVar(&p2p.NumberPeersToBroadcast, "broadcastnum", 16, "Number of peers to broadcast to in the peer to peer networking") + flag.IntVar(&p.P2PIncoming, "p2pIncoming", 0, "Override the maximum number of other peers dialing into this node that will be accepted; default 200") + flag.IntVar(&p.P2POutgoing, "p2pOutgoing", 0, "Override the maximum number of peers this node will attempt to dial into; default 32") flag.StringVar(&p.ConfigPath, "config", "", "Override the config file location (factomd.conf)") flag.BoolVar(&p.CheckChainHeads, "checkheads", true, "Enables checking chain heads on boot") flag.BoolVar(&p.FixChainHeads, "fixheads", true, "If --checkheads is enabled, then this will also correct any errors reported") @@ -90,6 +92,7 @@ func init() { flag.StringVar(&p.NodeName, "nodename", "", "Assign a name to the node") flag.StringVar(&p.ControlPanelSetting, "controlpanelsetting", "", "Can set to 'disabled', 'readonly', or 'readwrite' to overwrite config file") flag.BoolVar(&p.FullHashesLog, "fullhasheslog", false, "true create a log of all unique hashes seen during processing") + flag.BoolVar(&p.ReparseAnchorChains, "reparseanchorchains", false, "If true, reparse bitcoin and ethereum anchor chains in the database") } func ParseCmdLine(args []string) *FactomParams { diff --git a/engine/factomd.go b/engine/factomd.go index 963fd0410a..4d678d38cc 100644 --- a/engine/factomd.go +++ b/engine/factomd.go @@ -8,6 +8,7 @@ import ( "fmt" "runtime" + "github.com/FactomProject/factomd/common/constants/runstate" . "github.com/FactomProject/factomd/common/globals" "github.com/FactomProject/factomd/common/interfaces" "github.com/FactomProject/factomd/common/primitives" @@ -37,14 +38,6 @@ var _ = fmt.Print // or create more context loggers off of this var packageLogger = log.WithFields(log.Fields{"package": "engine"}) -// Build sets the factomd build id using git's SHA -// Version sets the semantic version number of the build -// $ go install -ldflags "-X github.com/FactomProject/factomd/engine.Build=`git rev-parse HEAD` -X github.com/FactomProject/factomd/engine.=`cat VERSION`" -// It also seems to need to have the previous binary deleted if recompiling to have this message show up if no code has changed. -// Since we are tracking code changes, then there is no need to delete the binary to use the latest message -var Build string -var FactomdVersion string = "BuiltWithoutVersion" - func Factomd(params *FactomParams, listenToStdin bool) interfaces.IState { fmt.Printf("Go compiler version: %s\n", runtime.Version()) fmt.Printf("Using build: %s\n", Build) @@ -53,9 +46,9 @@ func Factomd(params *FactomParams, listenToStdin bool) interfaces.IState { fmt.Printf("Start time: %s\n", StartTime.String()) state0 := new(state.State) - state0.IsRunning = true - // Setup the name to catch any early logging + state0.RunState = runstate.New + // Setup the name to catch any early logging state0.FactomNodeName = state0.Prefix + "FNode0" state0.TimestampAtBoot = primitives.NewTimestampNow() state0.SetLeaderTimestamp(state0.TimestampAtBoot) diff --git a/engine/factomd_test.go b/engine/factomd_test.go index 681edc31f3..f9b83cb384 100644 --- a/engine/factomd_test.go +++ b/engine/factomd_test.go @@ -8,7 +8,6 @@ import ( "io/ioutil" "net/http" "os" - "os/exec" "runtime" "strings" "sync" @@ -31,117 +30,23 @@ import ( "github.com/FactomProject/factomd/wsapi" ) -func TestSetupANetwork(t *testing.T) { - if RanSimTest { - return - } - - RanSimTest = true - - state0 := SetupSim("LLLLAAAFFF", map[string]string{"--debuglog": "", "--blktime": "20"}, 14, 0, 0, t) - - RunCmd("9") // Puts the focus on node 9 - RunCmd("x") // Takes Node 9 Offline - RunCmd("w") // Point the WSAPI to send API calls to the current node. - RunCmd("10") // Puts the focus on node 9 - RunCmd("8") // Puts the focus on node 8 - RunCmd("w") // Point the WSAPI to send API calls to the current node. - RunCmd("7") - WaitBlocks(state0, 1) // Wait for 1 block - - WaitForMinute(state0, 2) // Waits for minute 2 - RunCmd("F100") // Set the Delay on messages from all nodes to 100 milliseconds - // .15 second minutes is too fast for dropping messages until the dropping is fixed (FD-971) is fixed - // could change to 4 second minutes and turn this back on -- Clay - // RunCmd("S10") // Set Drop Rate to 1.0 on everyone - RunCmd("g10") // Adds 10 identities to your identity pool. - - fn1 := GetFocus() - PrintOneStatus(0, 0) - if fn1.State.FactomNodeName != "FNode07" { - t.Fatalf("Expected FNode07, but got %s", fn1.State.FactomNodeName) - } - RunCmd("g1") // Adds 1 identities to your identity pool. - WaitForMinute(state0, 3) // Waits for 3 "Minutes" - RunCmd("g1") // // Adds 1 identities to your identity pool. - WaitForMinute(state0, 4) // Waits for 4 "Minutes" - RunCmd("g1") // Adds 1 identities to your identity pool. - WaitForMinute(state0, 5) // Waits for 5 "Minutes" - RunCmd("g1") // Adds 1 identities to your identity pool. - WaitForMinute(state0, 6) // Waits for 6 "Minutes" - WaitBlocks(state0, 1) // Waits for 1 block - WaitForMinute(state0, 1) // Waits for 1 "Minutes" - RunCmd("g1") // Adds 1 identities to your identity pool. - WaitForMinute(state0, 2) // Waits for 2 "Minutes" - RunCmd("g1") // Adds 1 identities to your identity pool. - WaitForMinute(state0, 3) // Waits for 3 "Minutes" - RunCmd("g20") // Adds 20 identities to your identity pool. - WaitBlocks(state0, 1) - RunCmd("9") // Focuses on Node 9 - RunCmd("x") // Brings Node 9 back Online - RunCmd("8") // Focuses on Node 8 - - time.Sleep(100 * time.Millisecond) - - fn2 := GetFocus() - PrintOneStatus(0, 0) - if fn2.State.FactomNodeName != "FNode08" { - t.Fatalf("Expected FNode08, but got %s", fn1.State.FactomNodeName) - } - - RunCmd("i") // Shows the identities being monitored for change. - //Test block recording lengths and error checking for pprof - RunCmd("b100") // Recording delays due to blocked go routines longer than 100 ns (0 ms) - - RunCmd("b") // specifically how long a block will be recorded (in nanoseconds). 1 records all blocks. - - RunCmd("babc") // Not sure that this does anything besides return a message to use "bnnn" - - RunCmd("b1000000") // Recording delays due to blocked go routines longer than 1000000 ns (1 ms) - - RunCmd("/") // Sort Status by Chain IDs - - RunCmd("/") // Sort Status by Node Name - - RunCmd("a1") // Shows Admin block for Node 1 - RunCmd("e1") // Shows Entry credit block for Node 1 - RunCmd("d1") // Shows Directory block - RunCmd("f1") // Shows Factoid block for Node 1 - RunCmd("a100") // Shows Admin block for Node 100 - RunCmd("e100") // Shows Entry credit block for Node 100 - RunCmd("d100") // Shows Directory block - RunCmd("f100") // Shows Factoid block for Node 1 - RunCmd("yh") // Nothing - RunCmd("yc") // Nothing - RunCmd("r") // Rotate the WSAPI around the nodes - WaitForMinute(state0, 1) // Waits 1 "Minute" - - RunCmd("g1") // Adds 1 identities to your identity pool. - WaitForMinute(state0, 3) // Waits 3 "Minutes" - WaitBlocks(fn1.State, 3) // Waits for 3 blocks - - ShutDownEverything(t) - -} - func TestLoad(t *testing.T) { if RanSimTest { return } RanSimTest = true - state0 := SetupSim("LFF", map[string]string{"--debuglog": "." /*"--db": "LDB"*/}, 15, 0, 0, t) - RunCmd("2") // select 2 - RunCmd("w") // feed load into follower - RunCmd("F200") // delay messages - RunCmd("R40") // Feed load - WaitBlocks(state0, 5) + // use a tree so the messages get reordered + state0 := SetupSim("LLF", map[string]string{"--debuglog": ""}, 15, 0, 0, t) + + RunCmd("2") // select 2 + RunCmd("R30") // Feed load + WaitBlocks(state0, 10) RunCmd("R0") // Stop load - WaitBlocks(state0, 5) - // should check holding and queues cleared out + WaitBlocks(state0, 1) ShutDownEverything(t) -} //TestLoad(){...} +} // testLoad(){...} func TestCatchup(t *testing.T) { if RanSimTest { @@ -195,18 +100,18 @@ func TestTXTimestampsAndBlocks(t *testing.T) { RunCmd("x") RunCmd("R0") // turn off the load } - func TestLoad2(t *testing.T) { if RanSimTest { return } RanSimTest = true + // use tree node setup so messages get reordered go RunCmd("Re") // Turn on tight allocation of EC as soon as the simulator is up and running state0 := SetupSim("LLLAF", map[string]string{"--blktime": "20", "--debuglog": ".", "--net": "tree"}, 24, 0, 0, t) StatusEveryMinute(state0) - RunCmd("4") // select node 5 + RunCmd("4") // select node 4 RunCmd("x") // take out 7 from the network WaitBlocks(state0, 1) WaitForMinute(state0, 1) @@ -231,7 +136,6 @@ func TestLoad2(t *testing.T) { } ShutDownEverything(t) } //TestLoad2(){...} - // The intention of this test is to detect the EC overspend/duplicate commits (FD-566) bug. // the bug happened when the FCT transaction and the commits arrived in different orders on followers vs the leader. // Using a message delay, drop and tree network makes this likely @@ -389,7 +293,6 @@ func TestActivationHeightElection(t *testing.T) { ShutDownEverything(t) } - func TestAnElection(t *testing.T) { if RanSimTest { return @@ -416,6 +319,22 @@ func TestAnElection(t *testing.T) { // wait for him to update via dbstate and become an audit WaitBlocks(state0, 2) WaitMinutes(state0, 1) + + { // debug holding queue + + for _, fnode := range GetFnodes() { + s := fnode.State + for _, h := range s.Hold.Messages() { + for _, m := range h { + s.LogMessage("newholding", "stuck", m) + } + } + } + } + + state2 := GetFnodes()[2].State + WaitForBlock(state2, 7) // wait for sync w/ network + WaitForAllNodes(state0) // PrintOneStatus(0, 0) @@ -1422,7 +1341,7 @@ func TestElection9(t *testing.T) { } RanSimTest = true - state0 := SetupSim("LLAL", map[string]string{"--debuglog": "", "--faulttimeout": "10"}, 8, 1, 1, t) + state0 := SetupSim("LLAL", map[string]string{"--debuglog": "", "--faulttimeout": "10"}, 88888, 1, 1, t) StatusEveryMinute(state0) CheckAuthoritySet(t) @@ -1433,7 +1352,7 @@ func TestElection9(t *testing.T) { RunCmd("3") WaitForMinute(state3, 9) // wait till the victim is at minute 9 RunCmd("x") - WaitMinutes(state0, 1) // Wait till fault completes + WaitMinutes(state0, 2) // Wait till fault completes RunCmd("x") WaitBlocks(state0, 2) // wait till the victim is back as the audit server @@ -1591,14 +1510,73 @@ func TestDBState(t *testing.T) { ShutDownEverything(t) } -func SystemCall(cmd string) { - fmt.Println("SystemCall(\"", cmd, "\")") - out, err := exec.Command("sh", "-c", cmd).Output() +func TestDebugLocation(t *testing.T) { + if RanSimTest { + return + } + RanSimTest = true + + tempdir := os.TempDir() + string(os.PathSeparator) + "logs" + string(os.PathSeparator) // get os agnostic path to the temp directory + + // toss any files that might preexist this run so we don't see old files + err := os.RemoveAll(tempdir) + if err != nil { + panic(err) + } + + // make sure the directory exists + err = os.MkdirAll(tempdir, os.ModePerm) if err != nil { - foo := err.Error() - fmt.Println(foo) - os.Exit(1) panic(err) } - fmt.Print(string(out)) + + // start a sim with a select set of logs + state0 := SetupSim("LF", map[string]string{"--debuglog": tempdir + "holding|networkinputs|ackqueue"}, 6, 0, 0, t) + WaitBlocks(state0, 1) + ShutDownEverything(t) + + // check the logs exist where we wanted them + DoesFileExists(tempdir+"fnode0_holding.txt", t) + DoesFileExists(tempdir+"fnode01_holding.txt", t) + DoesFileExists(tempdir+"fnode0_networkinputs.txt", t) + DoesFileExists(tempdir+"fnode01_networkinputs.txt", t) + DoesFileExists(tempdir+"fnode01_ackqueue.txt", t) + + // toss the files we created since they are no longer needed + err = os.RemoveAll(tempdir) + if err != nil { + panic(err) + } + +} + +func TestDebugLocationParse(t *testing.T) { + tempdir := os.TempDir() + string(os.PathSeparator) + "logs" + string(os.PathSeparator) // get os agnostic path to the temp directory + stringsToCheck := []string{tempdir + "holding", tempdir + "networkinputs", tempdir + ".", tempdir + "ackqueue"} + + for i := 0; i < len(stringsToCheck); i++ { + // Checks that the SplitUpDebugLogRegEx function works as expected + dirlocation, regex := messages.SplitUpDebugLogRegEx(stringsToCheck[i]) + if dirlocation != tempdir { + t.Fatalf("Error SplitUpDebugLogRegEx() did not return the correct directory location.") + } + if strings.Contains(regex, string(os.PathSeparator)) { + t.Fatalf("Error SplitUpDebugLogRegEx() did not return the correct directory regex.") + } + } +} + +func DoesFileExists(path string, t *testing.T) { + _, err := os.Stat(path) + if err != nil { + t.Fatalf("Error checking for File: %s", err) + } else { + t.Logf("Found file %s", path) + } + if os.IsNotExist(err) { + t.Fatalf("File %s doesn't exist", path) + } else { + t.Logf("Found file %s", path) + } + } diff --git a/engine/loadcreate.go b/engine/loadcreate.go index a98144ccf2..ae2c41a15c 100644 --- a/engine/loadcreate.go +++ b/engine/loadcreate.go @@ -141,6 +141,7 @@ func (lg *LoadGenerator) KeepUsFunded() { ts = "true" } + //EC3Eh7yQKShgjkUSFrPbnQpboykCzf4kw9QHxi47GGz5P2k3dbab is EC address if lg.PerSecond == 0 && limitBuys { if i%100 == 0 { // Log our occasional realization that we have nothing to do. diff --git a/engine/macRunSimTest.sh b/engine/macRunSimTest.sh new file mode 100755 index 0000000000..108c820589 --- /dev/null +++ b/engine/macRunSimTest.sh @@ -0,0 +1,42 @@ +#/bin/sh +# set -x + +if [ -z "$1" ] + then + echo excluding long tests + pattern='.*' + else + pattern="$1" +fi +if [ -z "$2" ] + then + echo excluding debug tests and long tests + npattern="TestPass|TestFail|TestRandom|long" + else + npattern="$2|TestPass|TestFail|TestRandom|long" +fi + + + +echo preparing to run: -$pattern- -$npattern- +grep -Eo " Test[^( ]+" factomd_test.go | grep -Ev "$npattern" | sort +sleep 3 + +mkdir -p test +#remove old logs +grep -Eo " Test[^( ]+" factomd_test.go | grep -Ev "$npattern" | sort | xargs -n 1 -I testname rm -rf test/testname +#compile the tests +go test -c github.com/FactomProject/factomd/engine -o test/factomd_test +#run the tests + +grep -Eo " Test[^( ]+" factomd_test.go | grep -Ev "$npattern" | sort | xargs -I TestMakeALeader -n1 bash -c 'echo "Run TestMakeALeader"; mkdir -p test/TestMakeALeader; cd test/TestMakeALeader; ../factomd_test --test.v --test.timeout 30m --test.run "^TestMakeALeader$" &> testlog.txt; pwd; grep -EH "PASS:|FAIL:|panic|bind| Timeout " testlog.txt' + +echo "Results:" +find . -name testlog.txt | sort | xargs grep -EHm1 "PASS:" +echo "" +find . -name testlog.txt | sort | xargs grep -EHm1 "FAIL:|panic|bind| Timeout " + + + +#(echo git checkout git rev-parse HEAD; find . -name testlog.txt | xargs grep -EH "PASS:|FAIL:|panic") | mail -s "Test results `date`" `whoami`@factom.com + diff --git a/engine/printSummary.go b/engine/printSummary.go index 1ab015975c..7155ecdf26 100644 --- a/engine/printSummary.go +++ b/engine/printSummary.go @@ -133,6 +133,12 @@ func GetSystemStatus(listenTo int, wsapiNode int) string { } prt = prt + fmt.Sprintf(fmtstr, "Holding", list) + list = "" + for _, f := range pnodes { + list = list + fmt.Sprintf(" %3d", f.State.Hold.GetSize()) + } + prt = prt + fmt.Sprintf(fmtstr, "NewHolding", list) + list = "" for _, f := range pnodes { list = list + fmt.Sprintf(" %3d", f.State.Commits.Len()) diff --git a/engine/profiler.go b/engine/profiler.go index 6993b3edb3..7ae285f366 100644 --- a/engine/profiler.go +++ b/engine/profiler.go @@ -8,7 +8,7 @@ import ( "fmt" "log" "net/http" - _ "net/http/pprof" + "net/http/pprof" "runtime" "github.com/prometheus/client_golang/prometheus" @@ -24,11 +24,20 @@ func StartProfiler(mpr int, expose bool) { if expose { pre = "" } - log.Println(http.ListenAndServe(fmt.Sprintf("%s:%s", pre, logPort), nil)) + + mux := http.NewServeMux() + mux.HandleFunc("/debug/pprof/", pprof.Index) + mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + mux.HandleFunc("/debug/pprof/profile", pprof.Profile) + mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + mux.HandleFunc("/debug/pprof/trace", pprof.Trace) + + log.Println(http.ListenAndServe(fmt.Sprintf("%s:%s", pre, logPort), mux)) //runtime.SetBlockProfileRate(100000) } func launchPrometheus(port int) { - http.Handle("/metrics", prometheus.Handler()) - go http.ListenAndServe(fmt.Sprintf(":%d", port), nil) + mux := http.NewServeMux() + mux.Handle("/metrics", prometheus.Handler()) + go http.ListenAndServe(fmt.Sprintf(":%d", port), mux) } diff --git a/engine/simAuthorities.go b/engine/simAuthorities.go index 683f9ed113..6e9c777808 100644 --- a/engine/simAuthorities.go +++ b/engine/simAuthorities.go @@ -850,8 +850,6 @@ func modifyLoadIdentities() { if len(list) == 0 { fmt.Println("Error when loading up identities for fnodes") } - // 0 is not modified here - addFnodeName(0) for i := 1; i < len(fnodes); i++ { if i-1 >= len(list) { @@ -894,12 +892,13 @@ func modifyLoadIdentities() { func addFnodeName(i int) { // full name - globals.FnodeNames[fnodes[i].State.IdentityChainID.String()] = fnodes[i].State.FactomNodeName + name := fnodes[i].State.FactomNodeName + globals.FnodeNames[fnodes[i].State.IdentityChainID.String()] = name // common short set - globals.FnodeNames[fmt.Sprintf("%x", fnodes[i].State.IdentityChainID.Bytes()[3:6])] = fnodes[i].State.FactomNodeName - globals.FnodeNames[fmt.Sprintf("%x", fnodes[i].State.IdentityChainID.Bytes()[:5])] = fnodes[i].State.FactomNodeName - globals.FnodeNames[fmt.Sprintf("%x", fnodes[i].State.IdentityChainID.Bytes()[:])] = fnodes[i].State.FactomNodeName - globals.FnodeNames[fmt.Sprintf("%x", fnodes[i].State.IdentityChainID.Bytes()[:8])] = fnodes[i].State.FactomNodeName + globals.FnodeNames[fmt.Sprintf("%x", fnodes[i].State.IdentityChainID.Bytes()[3:6])] = name + globals.FnodeNames[fmt.Sprintf("%x", fnodes[i].State.IdentityChainID.Bytes()[:5])] = name + globals.FnodeNames[fmt.Sprintf("%x", fnodes[i].State.IdentityChainID.Bytes()[:])] = name + globals.FnodeNames[fmt.Sprintf("%x", fnodes[i].State.IdentityChainID.Bytes()[:8])] = name } func shad(data []byte) []byte { diff --git a/engine/simControl.go b/engine/simControl.go index 5a9787880f..8a6f3d6ebf 100644 --- a/engine/simControl.go +++ b/engine/simControl.go @@ -100,6 +100,7 @@ func SimControl(listenTo int, listenStdin bool) { var faulting bool var cancelheight int = -1 var cancelindex int = -1 + var initchainCost = 11 ListenTo = listenTo @@ -203,6 +204,13 @@ func SimControl(listenTo int, listenStdin bool) { // os.Stderr.WriteString(fmt.Sprintf("Error in funding the wallet, %s\n", err.Error())) // break //} + + // Perfectly fund the g command + idcost := 13 + 15 + 1 // Cost for 1 ID : Root + Management + Register + need := (idcost * count) + initchainCost + FundWalletTOFF(fnodes[wsapiNode].State, 0, uint64(need)*fnodes[wsapiNode].State.GetFactoshisPerEC()) + + initchainCost = 0 // Init only happens once. We set to 0 to not count it again auths, skipped, err := authorityToBlockchain(count, fnodes[wsapiNode].State) if err != nil { os.Stderr.WriteString(fmt.Sprintf("Error making authorities, %s\n", err.Error())) diff --git a/engine/simWallet.go b/engine/simWallet.go index d8e54ee112..0fd0fc2e36 100644 --- a/engine/simWallet.go +++ b/engine/simWallet.go @@ -4,6 +4,8 @@ import ( "encoding/hex" "fmt" + "github.com/FactomProject/factomd/common/interfaces" + ed "github.com/FactomProject/ed25519" "github.com/FactomProject/factomd/common/factoid" "github.com/FactomProject/factomd/common/messages" @@ -18,69 +20,56 @@ func FundWallet(st *state.State, amt uint64) (error, string) { return FundWalletTOFF(st, 0, amt) } +// REVIEW: consider relocating many of these functions to testHelpers/simWallet.go + // FundWalletTOFF() // Entry Point where test code allows the transaction to have a time offset from the current time. func FundWalletTOFF(st *state.State, timeOffsetInMilliseconds int64, amt uint64) (error, string) { inSec, _ := primitives.HexToHash("FB3B471B1DCDADFEB856BD0B02D8BF49ACE0EDD372A3D9F2A95B78EC12A324D6") // private key or FCT Source outEC, _ := primitives.HexToHash("c23ae8eec2beb181a0da926bd2344e988149fbe839fbc7489f2096e7d6110243") // EC address - var sec [64]byte - copy(sec[:32], inSec.Bytes()) // pass 32 byte key in a 64 byte field for the crypto library - - pub := ed.GetPublicKey(&sec) // get the public key for our FCT source address - - rcd := factoid.NewRCD_1(pub[:]) // build the an RCD "redeem condition data structure" - - inAdd, err := rcd.GetAddress() - if err != nil { - panic(err) - } - - outAdd := factoid.NewAddress(outEC.Bytes()) + // So what we are going to do is get the current time in ms, add to it the offset provided (usually zero, except + // for tests) + ts := primitives.NewTimestampFromMilliseconds(uint64(primitives.NewTimestampNow().GetTimeMilli() + timeOffsetInMilliseconds)) - trans := new(factoid.Transaction) - trans.AddInput(inAdd, amt) - trans.AddECOutput(outAdd, amt) + return fundECWallet(st, inSec, outEC, ts, amt) +} - trans.AddRCD(rcd) - trans.AddAuthorization(rcd) +// FundECWallet get the current time in ms, add to it the offset provided (usually zero, except for tests) +func FundECWallet(st *state.State, inSec interfaces.IHash, outEC interfaces.IHash, amt uint64) (error, string) { + ts := primitives.NewTimestampFromMilliseconds(uint64(primitives.NewTimestampNow().GetTimeMilli())) + return fundECWallet(st, inSec, outEC, ts, amt) +} - // So what we are going to do is get the current time in ms, add to it the offset provided (usually zero, except - // for tests) - trans.SetTimestamp(primitives.NewTimestampFromMilliseconds( - uint64(primitives.NewTimestampNow().GetTimeMilli() + timeOffsetInMilliseconds))) +// fundEDWallet() buys EC credits adds fee on top of amt +func fundECWallet(st *state.State, inSec interfaces.IHash, outEC interfaces.IHash, timeInMilliseconds *primitives.Timestamp, amt uint64) (error, string) { - fee, err := trans.CalculateFee(st.GetFactoshisPerEC()) - if err != nil { - return err, "" - } - input, err := trans.GetInput(0) + trans, err := ComposeEcTransaction(inSec, outEC, timeInMilliseconds, amt, st.GetFactoshisPerEC()) if err != nil { - return err, "" + return err, "Failed to build transaction" } - input.SetAmount(amt + fee) - dataSig, err := trans.MarshalBinarySig() - if err != nil { - return err, "" - } - sig := factoid.NewSingleSignatureBlock(inSec.Bytes(), dataSig) - trans.SetSignatureBlock(0, sig) + // FIXME: consider building msg and pushing onto API Queue instead + return PostTransaction(st, trans) +} + +// create wsapi Post and invoke v2Request handler +func PostTransaction(st *state.State, trans *factoid.Transaction) (error, string) { t := new(wsapi.TransactionRequest) data, _ := trans.MarshalBinary() t.Transaction = hex.EncodeToString(data) j := primitives.NewJSON2Request("factoid-submit", 0, t) - _, err = v2Request(j, st.GetPort()) - //_, err = wsapi.HandleV2Request(st, j) + _, err := v2Request(j, st.GetPort()) + if err != nil { return err, "" } - _ = err return nil, fmt.Sprintf("%v", trans.GetTxID()) } +// SendTxn() adds transaction to APIQueue bypassing the wsapi / json encoding func SendTxn(s *state.State, amt uint64, userSecretIn string, userPubOut string, ecPrice uint64) (*factoid.Transaction, error) { txn, _ := NewTransaction(amt, userSecretIn, userPubOut, ecPrice) msg := new(messages.FactoidTransaction) @@ -93,6 +82,11 @@ func GetBalance(s *state.State, userStr string) int64 { return s.FactoidState.GetFactoidBalance(factoid.NewAddress(primitives.ConvertUserStrToAddress(userStr)).Fixed()) } +func GetBalanceEC(s *state.State, userStr string) int64 { + a := factoid.NewAddress(primitives.ConvertUserStrToAddress(userStr)) + return s.GetFactoidState().GetECBalance(a.Fixed()) +} + // generate a pair of user-strings Fs.., FA.. func RandomFctAddressPair() (string, string) { pkey := primitives.RandomPrivateKey() @@ -104,15 +98,18 @@ func RandomFctAddressPair() (string, string) { // construct a new factoid transaction func NewTransaction(amt uint64, userSecretIn string, userPublicOut string, ecPrice uint64) (*factoid.Transaction, error) { - inSec := factoid.NewAddress(primitives.ConvertUserStrToAddress(userSecretIn)) outPub := factoid.NewAddress(primitives.ConvertUserStrToAddress(userPublicOut)) + return ComposeFctTransaction(amt, inSec, outPub, ecPrice) +} - var sec [64]byte - copy(sec[:32], inSec.Bytes()) // pass 32 byte key in a 64 byte field for the crypto library - - pub := ed.GetPublicKey(&sec) // get the public key for our FCT source address +// create a transaction to transfer FCT between addresses +// adds EC fee on top of input amount +func ComposeFctTransaction(amt uint64, inSec interfaces.IHash, outPub interfaces.IHash, ecPrice uint64) (*factoid.Transaction, error) { + var sec [64]byte + copy(sec[:32], inSec.Bytes()) // pass 32 byte key in a 64 byte field for the crypto library + pub := ed.GetPublicKey(&sec) // get the public key for our FCT source address rcd := factoid.NewRCD_1(pub[:]) // build the an RCD "redeem condition data structure" inAdd, err := rcd.GetAddress() @@ -124,13 +121,8 @@ func NewTransaction(amt uint64, userSecretIn string, userPublicOut string, ecPri trans.AddInput(inAdd, amt) trans.AddOutput(outPub, amt) - /* - userIn := primitives.ConvertFctAddressToUserStr(inAdd) - userOut := primitives.ConvertFctAddressToUserStr(outPub) - fmt.Printf("Txn %v %v -> %v\n", amt, userIn, userOut) - */ - - // REVIEW: why is this different from engine.FundWallet() ? + // REVIEW: why is this not needed? + // seems to be different from FundWallet() ? //trans.AddRCD(rcd) trans.AddAuthorization(rcd) trans.SetTimestamp(primitives.NewTimestampNow()) @@ -156,3 +148,47 @@ func NewTransaction(amt uint64, userSecretIn string, userPublicOut string, ecPri return trans, nil } + +// create a transaction to buy Entry Credits +// this adds the EC fee on top of the input amount +func ComposeEcTransaction(inSec interfaces.IHash, outEC interfaces.IHash, timeInMilliseconds *primitives.Timestamp, amt uint64, ecPrice uint64) (*factoid.Transaction, error) { + var sec [64]byte + copy(sec[:32], inSec.Bytes()) // pass 32 byte key in a 64 byte field for the crypto library + pub := ed.GetPublicKey(&sec) // get the public key for our FCT source address + rcd := factoid.NewRCD_1(pub[:]) // build the an RCD "redeem condition data structure" + + inAdd, err := rcd.GetAddress() + if err != nil { + panic(err) + } + + outAdd := factoid.NewAddress(outEC.Bytes()) + + trans := new(factoid.Transaction) + trans.AddInput(inAdd, amt) + trans.AddECOutput(outAdd, amt) + trans.AddRCD(rcd) + trans.AddAuthorization(rcd) + trans.SetTimestamp(timeInMilliseconds) + + fee, err := trans.CalculateFee(ecPrice) + + if err != nil { + return nil, err + } + + input, err := trans.GetInput(0) + if err != nil { + return nil, err + } + input.SetAmount(amt + fee) + + dataSig, err := trans.MarshalBinarySig() + if err != nil { + return nil, err + } + sig := factoid.NewSingleSignatureBlock(inSec.Bytes(), dataSig) + trans.SetSignatureBlock(0, sig) + + return trans, nil +} diff --git a/engine/version.go b/engine/version.go new file mode 100644 index 0000000000..c8362e38e7 --- /dev/null +++ b/engine/version.go @@ -0,0 +1,9 @@ +package engine + +// Build sets the factomd build id using git's SHA +// Version sets the semantic version number of the build +// $ go install -ldflags "-X github.com/FactomProject/factomd/engine.Build=`git rev-parse HEAD` -X github.com/FactomProject/factomd/engine.=`cat VERSION`" +// It also seems to need to have the previous binary deleted if recompiling to have this message show up if no code has changed. +// Since we are tracking code changes, then there is no need to delete the binary to use the latest message +var Build string +var FactomdVersion string = "BuiltWithoutVersion" diff --git a/factomd.conf b/factomd.conf index 493d481985..91c8492b1b 100644 --- a/factomd.conf +++ b/factomd.conf @@ -32,7 +32,10 @@ ;CustomNetworkPort = 8110 ;CustomSeedURL = "" ;CustomSpecialPeers = "" - +; The maximum number of other peers dialing into this node that will be accepted +;P2PIncoming = 200 +; The maximum number of peers this node will attempt to dial into +;P2POutgoing = 32 ; --------------- NodeMode: FULL | SERVER ---------------- ;NodeMode = FULL ;LocalServerPrivKey = 4c38c72fc5cdad68f13b74674d3ffb1f3d63a112710868c9b08946553448d26d @@ -43,6 +46,11 @@ ; Private key all zeroes: ;ExchangeRateAuthorityPublicKeyLocalNet = 3b6a27bcceb6a42d62a3a8d02a6f0d73653215771de243a63ac048a18b59da29 +; The public keys used to validate anchor records in either the Bitcoin or Ethereuem anchor chains +;BitcoinAnchorRecordPublicKeys = "0426a802617848d4d16d87830fc521f4d136bb2d0c352850919c2679f189613a" ; m1 key +;BitcoinAnchorRecordPublicKeys = "d569419348ed7056ec2ba54f0ecd9eea02648b260b26e0474f8c07fe9ac6bf83" ; m2 key, currently in use +;EthereumAnchorRecordPublicKeys = "a4a7905ab2226f267c6b44e1d5db2c97638b7bbba72fd1823d053ccff2892455" + ; These define if the RPC and Control Panel connection to factomd should be encrypted, and if it is, what files ; are the secret key and the public certificate. factom-cli and factom-walletd uses the certificate specified here if TLS is enabled. ; To use default files and paths leave /full/path/to/... in place. @@ -55,6 +63,15 @@ ;FactomdRpcUser = "" ;FactomdRpcPass = "" +; RequestTimeout is the amount of time in seconds before a pending request for a +; missing DBState is considered too old and the state is put back into the +; missing states list. +;RequestTimeout = 120 +; RequestLimit is the maximum number of pending requests for missing states. +; factomd will stop making DBStateMissing requests until current requests are +; moved out of the waiting list +;RequestLimit = 200 + ; This paramater allows Cross-Origin Resource Sharing (CORS) so web browsers will use data returned from the API when called from the listed URLs ; Example paramaters are "http://www.example.com, http://anotherexample.com, *" ;CorsDomains = "" diff --git a/factomd.go b/factomd.go index c3f1e2ee4d..af284e525a 100644 --- a/factomd.go +++ b/factomd.go @@ -12,6 +12,7 @@ import ( "runtime" "time" + "github.com/FactomProject/factomd/common/constants/runstate" . "github.com/FactomProject/factomd/engine" ) @@ -44,9 +45,9 @@ func main() { sim_Stdin := params.Sim_Stdin state := Factomd(params, sim_Stdin) - for state.Running() { + for state.GetRunState() != runstate.Stopped { time.Sleep(time.Second) } - fmt.Println("Waiting to Shut Down") + fmt.Println("Waiting to Shut Down") // This may not be necessary anymore with the new run state method time.Sleep(time.Second * 5) } diff --git a/glide.lock b/glide.lock index bb3fb7c5ee..4e622fb340 100644 --- a/glide.lock +++ b/glide.lock @@ -27,13 +27,13 @@ imports: subpackages: - edwards25519 - name: github.com/FactomProject/factom - version: 92b9a9bb5aa25574d8b950bf4e9daec78c8980b2 + version: 8be07008a81f6a2aef8c8049f84bbb451f43aa1b - name: github.com/FactomProject/go-bip32 version: 3b593af1c415abc1017648e4eb24a88c32fee0f3 - name: github.com/FactomProject/go-bip39 version: d1007fb78d9a7ec65314dad412973e63caf4c527 - name: github.com/FactomProject/go-bip44 - version: fd672f46ddc35bb381f497f7b64c9cf34f9e0703 + version: b541a96d8da98567af7610ef96105a834e6ed46c - name: github.com/FactomProject/go-simplejson version: aabad6e819789e569bd6aabf444c935aa9ba1e44 - name: github.com/FactomProject/goleveldb diff --git a/longTest/BlockTiming_test.go b/longTest/BlockTiming_test.go new file mode 100644 index 0000000000..3bf6b23408 --- /dev/null +++ b/longTest/BlockTiming_test.go @@ -0,0 +1,52 @@ +package longtest + +import ( + "fmt" + "testing" + "time" + + . "github.com/FactomProject/factomd/testHelper" +) + +/* +send consistent load to simulator ramping up over 5 iterations. + +NOTE: must run this test with a large timeout such as -timeout=9999h +*/ +func TestBlockTiming(t *testing.T) { + ResetSimHome(t) // ditch the old data + + params := map[string]string{ + "--blktime": "30", + "--faulttimeout": "12", + "--startdelay": "0", + //"--db": "LDB", // XXX using the db incurs heavy IO + //"--debuglog": ".", // enable logs cause max ~ 50 TPS + } + state0 := SetupSim("LLLFF", params, 60, 0, 0, t) // start 6L 8F + + // adjust simulation parameters + RunCmd("s") // show node state summary + RunCmd("Re") // keep reloading EC wallet on 'tight' schedule (only small amounts) + + incrementLoad := 10 // tx + setLoad := 10 // tx/sec + + for x := 0; x < 5; x++ { + RunCmd(fmt.Sprintf("R%v", setLoad)) // Load tx/sec + startHt := state0.GetDBHeightComplete() + time.Sleep(time.Second * 300) // test 300s (5min) increments + + endHt := state0.GetDBHeightComplete() + delta := endHt - startHt + + // ramp up load + setLoad = setLoad + incrementLoad + + // show progress made during this run + t.Logf("LLHT: %v<=>%v moved %v", startHt, endHt, delta) + if delta < 9 { // 30 sec blocks - height should move at least 9 blocks each 5min period + t.Fatalf("only moved %v blocks", delta) + } + } +} diff --git a/LongTests/ChainTransaction_test.go b/longTest/ChainedTransactions_test.go similarity index 95% rename from LongTests/ChainTransaction_test.go rename to longTest/ChainedTransactions_test.go index cb3bd21076..54a4111565 100644 --- a/LongTests/ChainTransaction_test.go +++ b/longTest/ChainedTransactions_test.go @@ -1,4 +1,4 @@ -package longtests +package longtest import ( "fmt" @@ -11,12 +11,8 @@ import ( ) // FIXME: test runs > 40 min try to tune down to 10 min +// TODO: refactor to use testAccount helpers func TestChainedTransactions(t *testing.T) { - if RanSimTest { - return - } - RanSimTest = true - // a genesis block address w/ funding bankSecret := "Fs3E9gV6DXsYzf7Fqx1fVBQPQXV695eP3k5XbmHEZVRLkMdD9qCK" bankAddress := "FA2jK2HcLnRdS94dEcU27rF3meoJfpUcZPSinpb7AwQvPRY6RL1Q" @@ -69,7 +65,6 @@ func TestChainedTransactions(t *testing.T) { send := bal txn := func() { - //fmt.Printf("TXN %v %v => %v \n", send, depositAddresses[in], depositAddresses[out]) SendTxn(state0, send, depositSecrets[in], depositAddresses[out], ecPrice) } transactions = append(transactions, txn) diff --git a/longTest/LoadWith1pctDrop_test.go b/longTest/LoadWith1pctDrop_test.go new file mode 100644 index 0000000000..e7c2696b37 --- /dev/null +++ b/longTest/LoadWith1pctDrop_test.go @@ -0,0 +1,66 @@ +package longtest + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + . "github.com/FactomProject/factomd/testHelper" +) + +// authority node configuration +var nodesLoadWith1pctDrop string = "LLLLLLLLFFFFFF" + +/* +1st Part - Deletes old test data and re-initializes a new network +*/ +func TestSetupLoadWith1pctDrop(t *testing.T) { + homeDir := GetLongTestHome(t) + ResetTestHome(homeDir, t) + + params := map[string]string{ + "--db": "LDB", + "--net": "alot+", + "--factomhome": homeDir, + } + state0 := SetupSim(nodesLoadWith1pctDrop, params, 10, 0, 0, t) + WaitBlocks(state0, 1) +} + +/* +2nd Part Subsequent runs after network is setup + +can be re-run to check behavior when booting w/ existing DB's + +Replicates behavior of +factomd --network=LOCAL --fastsaverate=100 --checkheads=false --count=15 --net=alot+ --blktime=600 --faulttimeout=12 --enablenet=false --startdelay=2 $@ > out.txt 2> err.txt +*/ +func TestLoadWith1pctDrop(t *testing.T) { + params := map[string]string{ + "--db": "LDB", + "--fastsaverate": "100", + "--net": "alot+", + "--blktime": "30", + "--faulttimeout": "12", + "--startdelay": "2", + "--factomhome": GetLongTestHome(t), + } + state0 := StartSim(nodesLoadWith1pctDrop, params) + + // adjust simulation parameters + RunCmd("s") // show node state summary + RunCmd("Re") // keep reloading EC wallet on 'tight' schedule (only small amounts) + RunCmd("r") // reset all nodes in the simulation (maybe not needed) + RunCmd("S10") // message drop rate 1% + RunCmd("F500") // add 500 ms delay to all messages + RunCmd("R5") // Load 5 tx/sec + + time.Sleep(time.Second * 300) // wait 5 min + startHt := state0.GetDBHeightAtBoot() + endHt := state0.GetDBHeightComplete() + t.Logf("LLHT: %v<=>%v", startHt, endHt) + + // normally without load we expect to create 10 blocks over the span of 5 min + assert.True(t, endHt-startHt >= 5) // check that we created at least 1 block per min +} diff --git a/longTest/README.md b/longTest/README.md new file mode 100644 index 0000000000..40a4982f85 --- /dev/null +++ b/longTest/README.md @@ -0,0 +1,20 @@ +# factomd/longTest + +This folder contains simulation tests that take a very long time to run. +These tests are *not run* on circle.ci and are meant for manual testing. + +### LoadWith1pctDrop_test.go + +Basic loadtest meant to stress a simulated network while under load. + +1st part sets up the initial network. + +``` +go test -v ./longTest/... -run TestSetupLoadWith1pctDrop +``` + +2nd part can be run repeatedly and tests booting up while under load. + +``` +go test -v ./longTest/... -run TestLoadWith1pctDrop +``` diff --git a/LongTests/longTests.gox b/longTest/longTests.gox similarity index 100% rename from LongTests/longTests.gox rename to longTest/longTests.gox diff --git a/p2p/protocol.go b/p2p/protocol.go index 5bc042da6f..66203eb80f 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -56,10 +56,10 @@ var ( OnlySpecialPeers = false // dial out to special peers only AllowUnknownIncomingPeers = true // allow incoming connections from peers that are not in the special peer list NetworkDeadline = time.Duration(30) * time.Second - NumberPeersToConnect = 32 - NumberPeersToBroadcast = 8 // This gets overwritten by command line flag! - MaxNumberIncomingConnections = 150 - MaxNumberOfRedialAttempts = 5 // How many missing pings (and other) before we give up and close. + NumberPeersToConnect = 32 // default value; changeable in cfg and cmd line + NumberPeersToBroadcast = 16 // This gets overwritten by command line flag! + MaxNumberIncomingConnections = 200 // default value; changeable in cfg and cmd line + MaxNumberOfRedialAttempts = 5 // How many missing pings (and other) before we give up and close. StandardChannelSize = 5000 NetworkStatusInterval = time.Second * 9 ConnectionStatusInterval = time.Second * 122 diff --git a/peerTest/BrainSwapA_test.go b/peerTest/BrainSwapA_test.go new file mode 100644 index 0000000000..0f8f01d13b --- /dev/null +++ b/peerTest/BrainSwapA_test.go @@ -0,0 +1,56 @@ +package simtest + +import ( + "testing" + + . "github.com/FactomProject/factomd/testHelper" +) + +/* +This test is part of a Network/Follower pair of tests used to test +brainswapping between 2 different versions of factomd + +If you boot this simulator by itself - the tests will fail +*/ +func TestBrainSwapA(t *testing.T) { + + maxBlocks := 30 + peers := "127.0.0.1:37003" + // nodes usage 0123456 nodes 8 and 9 are in a separate sim of TestBrainSwapB + givenNodes := "LLLLAAA" + outputNodes := "LFLLFAA" + + ResetSimHome(t) + + // build config files for the test + for i := 0; i < len(givenNodes); i++ { + WriteConfigFile(i, i, "", t) + } + + params := map[string]string{ + "--db": "LDB", + "--network": "LOCAL", + "--net": "alot+", + "--enablenet": "true", + "--blktime": "30", + "--logPort": "38000", + "--port": "38001", + "--controlpanelport": "38002", + "--networkport": "38003", + "--peers": peers, + } + + state0 := SetupSim(givenNodes, params, int(maxBlocks), 0, 0, t) + + WaitForAllNodes(state0) + WriteConfigFile(9, 1, "ChangeAcksHeight = 10\n", t) + WriteConfigFile(8, 4, "ChangeAcksHeight = 10\n", t) + WaitForBlock(state0, 10) + AdjustAuthoritySet(outputNodes) + + WaitBlocks(state0, 3) + AssertAuthoritySet(t, outputNodes) + WaitBlocks(state0, 1) + Halt(t) + +} diff --git a/peerTest/BrainSwapB_test.go b/peerTest/BrainSwapB_test.go new file mode 100644 index 0000000000..ff15c968b1 --- /dev/null +++ b/peerTest/BrainSwapB_test.go @@ -0,0 +1,56 @@ +package simtest + +import ( + "testing" + + . "github.com/FactomProject/factomd/testHelper" +) + +/* +This test is part of a Network/Follower pair of tests used to test +brainswapping between 2 different versions of factomd + +If you boot this simulator by itself - the simulation will not progress and will eventually time out +*/ +func TestBrainSwapB(t *testing.T) { + + maxBlocks := 30 + peers := "127.0.0.1:38003" + // this sim starts with identities 8 & 9 + givenNodes := "FF" + outputNodes := "LA" + + ResetSimHome(t) + WriteConfigFile(9, 0, "", t) + WriteConfigFile(8, 1, "", t) + + params := map[string]string{ + "--db": "LDB", + "--network": "LOCAL", + "--net": "alot+", + "--enablenet": "true", + "--blktime": "30", + "--logPort": "37000", + "--port": "37001", + "--controlpanelport": "37002", + "--networkport": "37003", + "--peers": peers, + } + + state0 := SetupSim(givenNodes, params, int(maxBlocks), 0, 0, t) + + WaitForAllNodes(state0) + WriteConfigFile(1, 0, "ChangeAcksHeight = 10\n", t) // Setup A brain swap between L2 and F4 + WriteConfigFile(4, 1, "ChangeAcksHeight = 10\n", t) // Setup A brain swap between L2 and F4 + WaitForBlock(state0, 9) + RunCmd("1") // make sure the follower is lagging the audit so he doesn't beat the auditor to the ID change and produce a heartbeat that will kill him + RunCmd("x") + WaitForBlock(state0, 10) // wait till should have brainswapped + RunCmd("x") + AdjustAuthoritySet(outputNodes) + + WaitBlocks(state0, 3) + AssertAuthoritySet(t, outputNodes) + WaitBlocks(state0, 1) + Halt(t) +} diff --git a/simTest/BrainSwapFollower_test.go b/peerTest/BrainSwapFollower_test.go similarity index 92% rename from simTest/BrainSwapFollower_test.go rename to peerTest/BrainSwapFollower_test.go index dc6882b7be..701b712498 100644 --- a/simTest/BrainSwapFollower_test.go +++ b/peerTest/BrainSwapFollower_test.go @@ -18,8 +18,8 @@ func TestBrainSwapFollower(t *testing.T) { t.Run("Followers Sim", func(t *testing.T) { maxBlocks := 30 peers := "127.0.0.1:38003" - // thsi sim is 8 9 - given_Nodes := "FF" + // this sim starts with identities 8 & 9 + givenNodes := "FF" outputNodes := "LA" t.Run("Setup Config Files", func(t *testing.T) { @@ -49,7 +49,7 @@ func TestBrainSwapFollower(t *testing.T) { "--factomhome": globals.Params.FactomHome, } - state0 := SetupSim(given_Nodes, params, int(maxBlocks), 0, 0, t) + state0 := SetupSim(givenNodes, params, int(maxBlocks), 0, 0, t) t.Run("Wait For Identity Swap", func(t *testing.T) { WaitForAllNodes(state0) @@ -65,7 +65,7 @@ func TestBrainSwapFollower(t *testing.T) { t.Run("Verify Network", func(t *testing.T) { WaitBlocks(state0, 3) - CheckAuthoritySet(t) + AssertAuthoritySet(t, outputNodes) WaitBlocks(state0, 1) Halt(t) }) diff --git a/simTest/BrainSwapNetwork_test.go b/peerTest/BrainSwapNetwork_test.go similarity index 90% rename from simTest/BrainSwapNetwork_test.go rename to peerTest/BrainSwapNetwork_test.go index cb95a95289..14038cbfca 100644 --- a/simTest/BrainSwapNetwork_test.go +++ b/peerTest/BrainSwapNetwork_test.go @@ -19,14 +19,14 @@ func TestBrainSwapNetwork(t *testing.T) { maxBlocks := 30 peers := "127.0.0.1:37003" // nodes usage 0123456 nodes 8 and 9 are in a separate sim of TestBrainSwapFollower - given_Nodes := "LLLLAAA" - outputNodes := "LLLAAFF" + givenNodes := "LLLLAAA" + outputNodes := "LFLLFAA" t.Run("Setup Config Files", func(t *testing.T) { ResetFactomHome(t, "network") // build config files for the test - for i := 0; i < len(given_Nodes); i++ { + for i := 0; i < len(givenNodes); i++ { WriteConfigFile(i, i, "", t) } @@ -53,7 +53,7 @@ func TestBrainSwapNetwork(t *testing.T) { "--factomhome": globals.Params.FactomHome, } - state0 := SetupSim(given_Nodes, params, int(maxBlocks), 0, 0, t) + state0 := SetupSim(givenNodes, params, int(maxBlocks), 0, 0, t) t.Run("Wait For Identity Swap", func(t *testing.T) { WaitForAllNodes(state0) @@ -65,7 +65,7 @@ func TestBrainSwapNetwork(t *testing.T) { t.Run("Verify Network", func(t *testing.T) { WaitBlocks(state0, 3) - CheckAuthoritySet(t) + AssertAuthoritySet(t, outputNodes) WaitBlocks(state0, 1) Halt(t) }) diff --git a/peerTest/README.md b/peerTest/README.md new file mode 100644 index 0000000000..7f70eb229c --- /dev/null +++ b/peerTest/README.md @@ -0,0 +1,44 @@ +# factomd/peerTest + +This folder contains tests that must be run in parallel (2 tests at a time). + +These tests are useful for testing features between builds +by running 1 of each pair from previous/current builds. + + +## Add a test to circle.ci + +Tests in this folder *will* be run on circle.ci if you add +add the filename to `ci_whitelist` file in this directory + +## Naming Convention + + Peer tests are expected to be named in A/B pairs + + ``` + *A_test.go + *B_test.go + +## Naming Convention + + Peer tests are expected to be named in Follower/Network pairs + + ``` + * Follower_test.go + * Network_test.go + ``` + + The network test will run in the background while the follower test executes in the foreground. + see ./test.sh in the root of this repo for more details + +## BrainSwap + +Run these two tests simultaneously to observe +an identy swap between go processes. + +These two tests are configured to be peers. + +``` +nohup go test -v BrainSwapA_test.go & +go test -v BrainSwapB_test.go +``` diff --git a/receipts/receiptSaver.go b/receipts/receiptSaver.go index 2db17ac175..7f209071ff 100644 --- a/receipts/receiptSaver.go +++ b/receipts/receiptSaver.go @@ -60,7 +60,7 @@ func ExportEntryReceipt(entryID string, dbo interfaces.DBOverlaySimple) error { if err != nil { return err } - receipt, err := CreateFullReceipt(dbo, h) + receipt, err := CreateFullReceipt(dbo, h, false) if err != nil { return err } diff --git a/receipts/receipts.go b/receipts/receipts.go index 1ce6693bcc..2e522b5c8b 100644 --- a/receipts/receipts.go +++ b/receipts/receipts.go @@ -5,21 +5,20 @@ package receipts import ( + "encoding/hex" "encoding/json" "fmt" - "github.com/FactomProject/factomd/common/directoryBlock/dbInfo" "github.com/FactomProject/factomd/common/interfaces" "github.com/FactomProject/factomd/common/primitives" ) type Receipt struct { - Entry *JSON `json:"entry,omitempty"` - MerkleBranch []*primitives.MerkleNode `json:"merklebranch,omitempty"` - EntryBlockKeyMR *primitives.Hash `json:"entryblockkeymr,omitempty"` - DirectoryBlockKeyMR *primitives.Hash `json:"directoryblockkeymr,omitempty"` - BitcoinTransactionHash *primitives.Hash `json:"bitcointransactionhash,omitempty"` - BitcoinBlockHash *primitives.Hash `json:"bitcoinblockhash,omitempty"` + Entry *EntryJSON `json:"entry,omitempty"` + MerkleBranch []*primitives.MerkleNode `json:"merklebranch,omitempty"` + EntryBlockKeyMR *primitives.Hash `json:"entryblockkeymr,omitempty"` + DirectoryBlockKeyMR *primitives.Hash `json:"directoryblockkeymr,omitempty"` + DirectoryBlockHeight uint32 `json:"directoryblockheight,omitempty"` } func (e *Receipt) TrimReceipt() { @@ -182,26 +181,6 @@ func (e *Receipt) IsSameAs(r *Receipt) bool { } } - if e.BitcoinTransactionHash == nil { - if r.BitcoinTransactionHash != nil { - return false - } - } else { - if e.BitcoinTransactionHash.IsSameAs(r.BitcoinTransactionHash) == false { - return false - } - } - - if e.BitcoinBlockHash == nil { - if r.BitcoinBlockHash != nil { - return false - } - } else { - if e.BitcoinBlockHash.IsSameAs(r.BitcoinBlockHash) == false { - return false - } - } - return true } @@ -236,26 +215,26 @@ func DecodeReceiptString(str string) (*Receipt, error) { return receipt, nil } -type JSON struct { +type EntryJSON struct { Raw string `json:"raw,omitempty"` EntryHash string `json:"entryhash,omitempty"` - Json string `json:"json,omitempty"` + Timestamp int64 `json:"timestamp,omitempty"` } -func (e *JSON) JSONByte() ([]byte, error) { +func (e *EntryJSON) JSONByte() ([]byte, error) { return primitives.EncodeJSON(e) } -func (e *JSON) JSONString() (string, error) { +func (e *EntryJSON) JSONString() (string, error) { return primitives.EncodeJSONString(e) } -func (e *JSON) String() string { +func (e *EntryJSON) String() string { str, _ := e.JSONString() return str } -func (e *JSON) IsSameAs(r *JSON) bool { +func (e *EntryJSON) IsSameAs(r *EntryJSON) bool { if r == nil { return false } @@ -265,18 +244,18 @@ func (e *JSON) IsSameAs(r *JSON) bool { if e.EntryHash != r.EntryHash { return false } - if e.Json != r.Json { + if e.Timestamp != r.Timestamp { return false } return true } -func CreateFullReceipt(dbo interfaces.DBOverlaySimple, entryID interfaces.IHash) (*Receipt, error) { - return CreateReceipt(dbo, entryID) +func CreateFullReceipt(dbo interfaces.DBOverlaySimple, entryHash interfaces.IHash, includeRawEntry bool) (*Receipt, error) { + return CreateReceipt(dbo, entryHash, includeRawEntry) } func CreateMinimalReceipt(dbo interfaces.DBOverlaySimple, entryID interfaces.IHash) (*Receipt, error) { - receipt, err := CreateReceipt(dbo, entryID) + receipt, err := CreateReceipt(dbo, entryID, false) if err != nil { return nil, err } @@ -286,37 +265,45 @@ func CreateMinimalReceipt(dbo interfaces.DBOverlaySimple, entryID interfaces.IHa return receipt, nil } -func CreateReceipt(dbo interfaces.DBOverlaySimple, entryID interfaces.IHash) (*Receipt, error) { +func CreateReceipt(dbo interfaces.DBOverlaySimple, entryHash interfaces.IHash, includeRawEntry bool) (*Receipt, error) { receipt := new(Receipt) - receipt.Entry = new(JSON) - receipt.Entry.EntryHash = entryID.String() - - //EBlock + receipt.Entry = new(EntryJSON) + receipt.Entry.EntryHash = entryHash.String() + + // Optionally include the full marshalled entry in the receipt + if includeRawEntry { + entry, err := dbo.FetchEntry(entryHash) + if err != nil { + return nil, err + } else if entry == nil { + return nil, fmt.Errorf("entry not found") + } + raw, err := entry.MarshalBinary() + if err != nil { + return nil, err + } + receipt.Entry.Raw = hex.EncodeToString(raw) + } - hash, err := dbo.FetchIncludedIn(entryID) + // Entry Block + hash, err := dbo.FetchIncludedIn(entryHash) if err != nil { return nil, err - } - - if hash == nil { + } else if hash == nil { return nil, fmt.Errorf("Block containing entry not found") } eBlock, err := dbo.FetchEBlock(hash) if err != nil { return nil, err - } - - if eBlock == nil { + } else if eBlock == nil { return nil, fmt.Errorf("EBlock not found") } - hash = eBlock.DatabasePrimaryIndex() receipt.EntryBlockKeyMR = hash.(*primitives.Hash) - entries := eBlock.GetEntryHashes() - //fmt.Printf("eBlock entries - %v\n\n", entries) - branch := primitives.BuildMerkleBranchForEntryHash(entries, entryID, true) + eBlockEntries := eBlock.GetEntryHashes() + branch := primitives.BuildMerkleBranchForHash(eBlockEntries, entryHash, true) blockNode := new(primitives.MerkleNode) left, err := eBlock.HeaderHash() if err != nil { @@ -325,43 +312,26 @@ func CreateReceipt(dbo interfaces.DBOverlaySimple, entryID interfaces.IHash) (*R blockNode.Left = left.(*primitives.Hash) blockNode.Right = eBlock.BodyKeyMR().(*primitives.Hash) blockNode.Top = hash.(*primitives.Hash) - //fmt.Printf("eBlock blockNode - %v\n\n", blockNode) branch = append(branch, blockNode) receipt.MerkleBranch = append(receipt.MerkleBranch, branch...) - //str, _ := eBlock.JSONString() - //fmt.Printf("eBlock - %v\n\n", str) - - //DBlock - + // Directory Block hash, err = dbo.FetchIncludedIn(hash) if err != nil { return nil, err - } - - if hash == nil { + } else if hash == nil { return nil, fmt.Errorf("Block containing EBlock not found") } dBlock, err := dbo.FetchDBlock(hash) if err != nil { return nil, err - } - - if dBlock == nil { + } else if dBlock == nil { return nil, fmt.Errorf("DBlock not found") } - //str, _ = dBlock.JSONString() - //fmt.Printf("dBlock - %v\n\n", str) - - entries = dBlock.GetEntryHashesForBranch() - //fmt.Printf("dBlock entries - %v\n\n", entries) - - //merkleTree := primitives.BuildMerkleTreeStore(entries) - //fmt.Printf("dBlock merkleTree - %v\n\n", merkleTree) - - branch = primitives.BuildMerkleBranchForEntryHash(entries, receipt.EntryBlockKeyMR, true) + dBlockEntries := dBlock.GetEntryHashesForBranch() + branch = primitives.BuildMerkleBranchForHash(dBlockEntries, receipt.EntryBlockKeyMR, true) blockNode = new(primitives.MerkleNode) left, err = dBlock.GetHeaderHash() if err != nil { @@ -370,25 +340,31 @@ func CreateReceipt(dbo interfaces.DBOverlaySimple, entryID interfaces.IHash) (*R blockNode.Left = left.(*primitives.Hash) blockNode.Right = dBlock.BodyKeyMR().(*primitives.Hash) blockNode.Top = hash.(*primitives.Hash) - //fmt.Printf("dBlock blockNode - %v\n\n", blockNode) branch = append(branch, blockNode) receipt.MerkleBranch = append(receipt.MerkleBranch, branch...) - //DirBlockInfo - + // Directory Block Info hash = dBlock.DatabasePrimaryIndex() receipt.DirectoryBlockKeyMR = hash.(*primitives.Hash) - - dirBlockInfo, err := dbo.FetchDirBlockInfoByKeyMR(hash) - if err != nil { - return nil, err - } - - if dirBlockInfo != nil { - dbi := dirBlockInfo.(*dbInfo.DirBlockInfo) - - receipt.BitcoinTransactionHash = dbi.BTCTxHash.(*primitives.Hash) - receipt.BitcoinBlockHash = dbi.BTCBlockHash.(*primitives.Hash) + receipt.DirectoryBlockHeight = dBlock.GetDatabaseHeight() + + // Now that we have enough info available, find entry timestamp + mins := make(map[string]uint8) // create a map of possible minute markers + for i := byte(1); i <= 10; i++ { + h := make([]byte, 32) + h[len(h)-1] = i + mins[hex.EncodeToString(h)] = i + } + entryFound := false + for _, v := range eBlockEntries { + if v.IsSameAs(entryHash) { + entryFound = true + } + if n, exist := mins[v.String()]; exist && entryFound { + // Found a minute marker and found the entry already, set the timestamp and break + receipt.Entry.Timestamp = int64(dBlock.GetHeader().GetTimestamp().GetTimeSeconds() + 60*int64(n)) + break + } } return receipt, nil diff --git a/receipts/receipts_test.go b/receipts/receipts_test.go index 67913b735f..496101f4d1 100644 --- a/receipts/receipts_test.go +++ b/receipts/receipts_test.go @@ -12,35 +12,13 @@ import ( . "github.com/FactomProject/factomd/testHelper" ) -func TestAnchoringIntoBitcoin(t *testing.T) { - dbo := CreateAndPopulateTestDatabaseOverlay() - hash, err := primitives.NewShaHashFromStr("be5fb8c3ba92c0436269fab394ff7277c67e9b2de4431b723ce5d89799c0b93a") - if err != nil { - t.Errorf("%v", err) - } - receipt, err := CreateFullReceipt(dbo, hash) - if err != nil { - t.Errorf("%v", err) - } - if receipt == nil { - t.Errorf("Receipt is nil!") - } - - if receipt.BitcoinBlockHash.String() == "" { - t.Errorf("No Bitcoin Block Hash in receipt!") - } - if receipt.BitcoinTransactionHash.String() == "" { - t.Errorf("No Bitcoin Transaction Hash in receipt!") - } -} - func TestCreateFullReceipt(t *testing.T) { dbo := CreateAndPopulateTestDatabaseOverlay() hash, err := primitives.NewShaHashFromStr("be5fb8c3ba92c0436269fab394ff7277c67e9b2de4431b723ce5d89799c0b93a") if err != nil { t.Errorf("%v", err) } - receipt, err := CreateFullReceipt(dbo, hash) + receipt, err := CreateFullReceipt(dbo, hash, true) if err != nil { t.Errorf("%v", err) } @@ -56,7 +34,7 @@ func TestReceipts(t *testing.T) { blocks := CreateFullTestBlockSet() for _, block := range blocks[:len(blocks)-2] { for _, entry := range block.Entries { - receipt, err := CreateFullReceipt(dbo, entry.DatabasePrimaryIndex()) + receipt, err := CreateFullReceipt(dbo, entry.DatabasePrimaryIndex(), true) if err != nil { t.Error(err) } @@ -86,7 +64,7 @@ func TestReceipts(t *testing.T) { } func TestDecodeReceiptString(t *testing.T) { - receiptStr := `{"bitcoinblockhash":"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","bitcointransactionhash":"0000000000000000000000000000000000000000000000000000000000000000","directoryblockkeymr":"bdadd16c5335c369a1b784212f80764e1f47805c89d39141bd40d05153edcdf5","entry":{"key":"cf9503fad6a6cf3cf6d7a5a491e23d84f9dee6dacb8c12f428633995655bd0d0"},"entryblockkeymr":"905740850540f1d17fcb1fc7fd0c61a33150b2cdc0f88334f6a891ec34bd1cfc","merklebranch":[{"left":"0a2f96c96ea89ee82908be9f5aef2be4b533a32ffb3855aeb3b8327f9e989f3a","right":"cf9503fad6a6cf3cf6d7a5a491e23d84f9dee6dacb8c12f428633995655bd0d0","top":"905740850540f1d17fcb1fc7fd0c61a33150b2cdc0f88334f6a891ec34bd1cfc"},{"left":"6e7e64ac45ff57edbf8537a0c99fba2e9ee351ef3d3f4abd93af9f01107e592c","right":"905740850540f1d17fcb1fc7fd0c61a33150b2cdc0f88334f6a891ec34bd1cfc","top":"4f477201a150694ed0f85fee17c41282542f976fae479a4de553a37747b09f41"},{"left":"4f477201a150694ed0f85fee17c41282542f976fae479a4de553a37747b09f41","right":"18ab692a40f370e9529c180f2476684ccde4937b9a4b4605805e3f51e592f632","top":"890003f0db6cceca94031a70745fd83845726987cffa6fc95ddb0e2f6c64b499"},{"left":"1857570da9a1c93dac4993d3048faa80d1d1d939f4fc44a38e61781fdc123165","right":"890003f0db6cceca94031a70745fd83845726987cffa6fc95ddb0e2f6c64b499","top":"4d8ed632f7852a07055a0592c341b957815bdd46e82d2da7bdf58be54fc60bf9"},{"left":"4d8ed632f7852a07055a0592c341b957815bdd46e82d2da7bdf58be54fc60bf9","right":"f955a2709628086d656257885bf27b7c054a6acd0b3ebf5b769b3cf036ab04ee","top":"d6bd24e979e81feddb319483878c678865a80175d1954e5429f2d799eadd1bc9"},{"left":"49a5c28516f3c4d5e44f5cf0b2e5f5f00ca1187714dd9ee914e7df1eb7702972","right":"d6bd24e979e81feddb319483878c678865a80175d1954e5429f2d799eadd1bc9","top":"bdadd16c5335c369a1b784212f80764e1f47805c89d39141bd40d05153edcdf5"}]}` + receiptStr := `{"directoryblockkeymr":"bdadd16c5335c369a1b784212f80764e1f47805c89d39141bd40d05153edcdf5","entry":{"key":"cf9503fad6a6cf3cf6d7a5a491e23d84f9dee6dacb8c12f428633995655bd0d0"},"entryblockkeymr":"905740850540f1d17fcb1fc7fd0c61a33150b2cdc0f88334f6a891ec34bd1cfc","merklebranch":[{"left":"0a2f96c96ea89ee82908be9f5aef2be4b533a32ffb3855aeb3b8327f9e989f3a","right":"cf9503fad6a6cf3cf6d7a5a491e23d84f9dee6dacb8c12f428633995655bd0d0","top":"905740850540f1d17fcb1fc7fd0c61a33150b2cdc0f88334f6a891ec34bd1cfc"},{"left":"6e7e64ac45ff57edbf8537a0c99fba2e9ee351ef3d3f4abd93af9f01107e592c","right":"905740850540f1d17fcb1fc7fd0c61a33150b2cdc0f88334f6a891ec34bd1cfc","top":"4f477201a150694ed0f85fee17c41282542f976fae479a4de553a37747b09f41"},{"left":"4f477201a150694ed0f85fee17c41282542f976fae479a4de553a37747b09f41","right":"18ab692a40f370e9529c180f2476684ccde4937b9a4b4605805e3f51e592f632","top":"890003f0db6cceca94031a70745fd83845726987cffa6fc95ddb0e2f6c64b499"},{"left":"1857570da9a1c93dac4993d3048faa80d1d1d939f4fc44a38e61781fdc123165","right":"890003f0db6cceca94031a70745fd83845726987cffa6fc95ddb0e2f6c64b499","top":"4d8ed632f7852a07055a0592c341b957815bdd46e82d2da7bdf58be54fc60bf9"},{"left":"4d8ed632f7852a07055a0592c341b957815bdd46e82d2da7bdf58be54fc60bf9","right":"f955a2709628086d656257885bf27b7c054a6acd0b3ebf5b769b3cf036ab04ee","top":"d6bd24e979e81feddb319483878c678865a80175d1954e5429f2d799eadd1bc9"},{"left":"49a5c28516f3c4d5e44f5cf0b2e5f5f00ca1187714dd9ee914e7df1eb7702972","right":"d6bd24e979e81feddb319483878c678865a80175d1954e5429f2d799eadd1bc9","top":"bdadd16c5335c369a1b784212f80764e1f47805c89d39141bd40d05153edcdf5"}]}` receipt, err := DecodeReceiptString(receiptStr) if err != nil { t.Error(err) diff --git a/scripts/prebuild.sh b/scripts/prebuild.sh new file mode 100755 index 0000000000..c44f51216b --- /dev/null +++ b/scripts/prebuild.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -x +echo package engine > engine/overrideversion.go +echo import \"fmt\" >> engine/overrideversion.go +echo "// This is an autogenerated for GoLand builds, it can be safely removed" >> engine/overrideversion.go +echo "// Regular build.sh will override this" >> engine/overrideversion.go +echo func init\(\) \{ >> engine/overrideversion.go +echo if Build !\= \"\" \{ return \} >> engine/overrideversion.go +echo fmt.Println\(\"Build and version come from overrideversion.go\"\) >> engine/overrideversion.go +echo Build = \"GoLand-`git rev-parse HEAD``git status | grep -Eo modified |sort -u`\" >> engine/overrideversion.go +echo FactomdVersion = \"GoLand-`cat VERSION``git status | grep -Eo modified |sort -u`\" >> engine/overrideversion.go +echo \} >> engine/overrideversion.go diff --git a/simTest/.gitignore b/simTest/.gitignore new file mode 100644 index 0000000000..1fc3ed4c17 --- /dev/null +++ b/simTest/.gitignore @@ -0,0 +1 @@ +Test* diff --git a/simTest/AddFNode_test.go b/simTest/AddFNode_test.go new file mode 100644 index 0000000000..ce1625759b --- /dev/null +++ b/simTest/AddFNode_test.go @@ -0,0 +1,28 @@ +package simtest + +import ( + "testing" + + "github.com/FactomProject/factomd/engine" + + . "github.com/FactomProject/factomd/testHelper" +) + +/* +This test is useful to exercise reboot behavior +here we copy a db and boot up an additional follower +*/ +func TestAddFNode(t *testing.T) { + ResetSimHome(t) // clear out old test home + for i := 0; i < 6; i++ { // build config files for the test + WriteConfigFile(i, i, "", t) // just write the minimal config + } + state0 := SetupSim("LLLLLAA", map[string]string{"--db": "LDB"}, 25, 1, 1, t) + WaitForBlock(state0, 7) + CloneFnodeData(2, 7, t) + AddFNode() + state7 := engine.GetFnodes()[7].State // Get new node + WaitForBlock(state7, 7) + AssertAuthoritySet(t, "LLLLLAAF") + ShutDownEverything(t) +} diff --git a/simTest/AuditBrainSwap_test.go b/simTest/AuditBrainSwap_test.go index 4ddcc344d9..38d28fb0d6 100644 --- a/simTest/AuditBrainSwap_test.go +++ b/simTest/AuditBrainSwap_test.go @@ -1,7 +1,6 @@ package simtest import ( - "os" "testing" "github.com/FactomProject/factomd/common/globals" @@ -9,87 +8,41 @@ import ( . "github.com/FactomProject/factomd/testHelper" ) -// Test brainswapping a follower and an audit when the audit is lagging behind -func TestAuditBrainSwap(t *testing.T) { - - t.Run("Run Sim", func(t *testing.T) { - - t.Run("Setup Config Files", func(t *testing.T) { - dir, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - - globals.Params.FactomHome = dir + "/TestBrainSwap" - os.Setenv("FACTOM_HOME", globals.Params.FactomHome) - - t.Logf("Removing old run in %s", globals.Params.FactomHome) - if err := os.RemoveAll(globals.Params.FactomHome); err != nil { - t.Fatal(err) - } - - // build config files for the test - for i := 0; i < 6; i++ { - WriteConfigFile(i, i, "", t) // just write the minimal config - } - }) - - params := map[string]string{ - "--db": "LDB", // NOTE: using MAP causes an occasional error see FD-825 - "--network": "LOCAL", - "--net": "alot+", - "--enablenet": "true", - "--blktime": "10", - "--startdelay": "1", - "--stdoutlog": "out.txt", - "--stderrlog": "out.txt", - "--checkheads": "false", - "--controlpanelsetting": "readwrite", - "--debuglog": ".", - "--logPort": "38000", - "--port": "38001", - "--controlpanelport": "38002", - "--networkport": "38003", - "--peers": "127.0.0.1:37003", - "--factomhome": globals.Params.FactomHome, - } +/* +Test brainswapping a F <-> A - // start the 6 nodes running 012345 - state0 := SetupSim("LLLAFF", params, 15, 0, 0, t) - state4 := engine.GetFnodes()[4].State // Get node 4 - state5 := engine.GetFnodes()[5].State // Get node 5 +follower and an audit when the audit is lagging behind - t.Run("Wait For Identity Swap", func(t *testing.T) { - WaitForBlock(state0, 6) - WaitForAllNodes(state0) - // rewrite the config to have brainswaps - - WriteConfigFile(3, 5, "ChangeAcksHeight = 10\n", t) // Setup A brain swap between A3 and F5 - WriteConfigFile(5, 3, "ChangeAcksHeight = 10\n", t) - WaitForBlock(state0, 9) - RunCmd("3") // make sure the Audit is lagging the audit if the heartbeats conflit one will panic - RunCmd("x") - WaitForBlock(state5, 10) // wait till 5 should have have brainswapped - RunCmd("x") - WaitBlocks(state0, 1) - WaitForAllNodes(state0) - CheckAuthoritySet(t) - }) - - t.Run("Verify Network", func(t *testing.T) { - - if !state4.Leader { - t.Error("Node 4 did not become a leader") - } - - list := state0.ProcessLists.Get(state0.LLeaderHeight) - foundAudit, _ := list.GetAuditServerIndexHash(state5.GetIdentityChainID()) - if !foundAudit { - t.Error("Node 5 did not become an audit server") - } - - Halt(t) - }) - - }) +This test is useful for verifying that Leaders can swap without rebooting +And that Audits can reboot with lag (to prevent a panic if 2 nodes see the same audit heartbeat) +*/ +func TestAuditBrainSwap(t *testing.T) { + ResetSimHome(t) // clear out old test home + for i := 0; i < 6; i++ { // build config files for the test + WriteConfigFile(i, i, "", t) // just write the minimal config + } + + params := map[string]string{"--factomhome": globals.Params.FactomHome} + state0 := SetupSim("LLLAFF", params, 15, 0, 0, t) + state5 := engine.GetFnodes()[5].State // Get node 5 + _ = state5 + + WaitForBlock(state0, 6) + WaitForAllNodes(state0) + + // rewrite the config to have brainswaps + WriteConfigFile(3, 5, "ChangeAcksHeight = 10\n", t) // Setup A brain swap between A3 and F5 + WriteConfigFile(5, 3, "ChangeAcksHeight = 10\n", t) + WaitForBlock(state0, 9) + RunCmd("3") // make sure the Audit is lagging the audit if the heartbeats conflict one will panic + RunCmd("x") + WaitForBlock(state5, 10) // wait till 5 should have have brainswapped + RunCmd("x") + WaitBlocks(state0, 1) + WaitForAllNodes(state0) + CheckAuthoritySet(t) + + WaitForAllNodes(state0) + AssertAuthoritySet(t, "LLLFFA") + ShutDownEverything(t) } diff --git a/simTest/BrainSwap1_test.go b/simTest/BrainSwap1_test.go deleted file mode 100644 index b7664ecd8c..0000000000 --- a/simTest/BrainSwap1_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package simtest - -import ( - "os" - "testing" - - "github.com/FactomProject/factomd/common/globals" - "github.com/FactomProject/factomd/engine" - . "github.com/FactomProject/factomd/testHelper" -) - -// Test brainswapping a follower and a leader and swap a follower and an audit at the same height in the same build -func TestBrainSwap1(t *testing.T) { - - t.Run("Run Sim", func(t *testing.T) { - - t.Run("Setup Config Files", func(t *testing.T) { - dir, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - - globals.Params.FactomHome = dir + "/TestBrainSwap" - os.Setenv("FACTOM_HOME", globals.Params.FactomHome) - - t.Logf("Removing old run in %s", globals.Params.FactomHome) - if err := os.RemoveAll(globals.Params.FactomHome); err != nil { - t.Fatal(err) - } - - // build config files for the test - for i := 0; i < 6; i++ { - WriteConfigFile(i, i, "", t) // just write the minimal config - } - }) - - params := map[string]string{ - "--db": "LDB", // NOTE: using MAP causes an occasional error see FD-825 - "--network": "LOCAL", - "--net": "alot+", - "--enablenet": "true", - "--blktime": "10", - "--startdelay": "1", - "--stdoutlog": "out.txt", - "--stderrlog": "out.txt", - "--checkheads": "false", - "--controlpanelsetting": "readwrite", - "--debuglog": ".", - "--logPort": "38000", - "--port": "38001", - "--controlpanelport": "38002", - "--networkport": "38003", - "--peers": "127.0.0.1:37003", - "--factomhome": globals.Params.FactomHome, - } - - // start the 6 nodes running 012345 - state0 := SetupSim("LLLAFF", params, 15, 0, 0, t) - state2 := engine.GetFnodes()[2].State // Get node 2 - state3 := engine.GetFnodes()[3].State // Get node 3 - state4 := engine.GetFnodes()[4].State // Get node 4 - state5 := engine.GetFnodes()[5].State // Get node 5 - - t.Run("Wait For Identity Swap", func(t *testing.T) { - WaitForBlock(state0, 6) - WaitForAllNodes(state0) - // rewrite the config to have brainswaps - - WriteConfigFile(2, 4, "ChangeAcksHeight = 10\n", t) // Setup A brain swap between L2 and F4 - WriteConfigFile(4, 2, "ChangeAcksHeight = 10\n", t) - WriteConfigFile(3, 5, "ChangeAcksHeight = 10\n", t) // Setup A brain swap between A3 and F5 - WriteConfigFile(5, 3, "ChangeAcksHeight = 10\n", t) - WaitForBlock(state0, 9) - RunCmd("5") // make sure the follower is lagging the audit so he doesn't beat the auditor to the ID change and produce a heartbeat that will kill him - RunCmd("x") - WaitForBlock(state3, 10) // wait till should have 3 has brainswapped - RunCmd("x") - WaitBlocks(state0, 1) - WaitForAllNodes(state0) - CheckAuthoritySet(t) - }) - - t.Run("Verify Network", func(t *testing.T) { - - if state2.Leader { - t.Error("Node 2 did not become a follower") - } - if state3.Leader { - t.Error("Node 3 did not become a follower") - } - if !state4.Leader { - t.Error("Node 4 did not become a leader") - } - - list := state0.ProcessLists.Get(state0.LLeaderHeight) - foundAudit, _ := list.GetAuditServerIndexHash(state5.GetIdentityChainID()) - if !foundAudit { - t.Error("Node 5 did not become an audit server") - } - - Halt(t) - }) - - }) -} diff --git a/simTest/BrainSwap_test.go b/simTest/BrainSwap_test.go new file mode 100644 index 0000000000..d1416a39bd --- /dev/null +++ b/simTest/BrainSwap_test.go @@ -0,0 +1,45 @@ +package simtest + +import ( + "testing" + + "github.com/FactomProject/factomd/engine" + . "github.com/FactomProject/factomd/testHelper" +) + +/* +Test brainswapping F <-> L and F <-> A + +follower and a leader + follower and an audit +at the same height in the same build +*/ +func TestBrainSwap(t *testing.T) { + ResetSimHome(t) // clear out old test home + for i := 0; i < 6; i++ { // build config files for the test + WriteConfigFile(i, i, "", t) // just write the minimal config + } + + params := map[string]string{"--blktime": "15"} + state0 := SetupSim("LLLAFF", params, 15, 0, 0, t) + state3 := engine.GetFnodes()[3].State // Get node 3 + + WaitForBlock(state0, 6) + WaitForAllNodes(state0) + + // rewrite the config to orchestrate brainSwaps + WriteConfigFile(2, 4, "ChangeAcksHeight = 10\n", t) // Setup A brain swap between L2 and F4 + WriteConfigFile(4, 2, "ChangeAcksHeight = 10\n", t) + WriteConfigFile(3, 5, "ChangeAcksHeight = 10\n", t) // Setup A brain swap between A3 and F5 + WriteConfigFile(5, 3, "ChangeAcksHeight = 10\n", t) + + WaitForBlock(state0, 9) + RunCmd("5") // make sure the follower is lagging the audit so he doesn't beat the auditor to the ID change and produce a heartbeat that will kill him + RunCmd("x") + WaitForBlock(state3, 10) // wait till should have 3 has brainswapped + RunCmd("x") + WaitBlocks(state0, 1) + + WaitForAllNodes(state0) + AssertAuthoritySet(t, "LLFFLA") + ShutDownEverything(t) +} diff --git a/simTest/EntriesBeforeChain_test.go b/simTest/EntriesBeforeChain_test.go new file mode 100644 index 0000000000..500c33bbae --- /dev/null +++ b/simTest/EntriesBeforeChain_test.go @@ -0,0 +1,119 @@ +package simtest + +import ( + "bytes" + "fmt" + "testing" + + "github.com/FactomProject/factomd/common/interfaces" + + "github.com/FactomProject/factom" + . "github.com/FactomProject/factomd/testHelper" + "github.com/stretchr/testify/assert" +) + +func TestEntriesBeforeChain(t *testing.T) { + + encode := func(s string) []byte { + b := bytes.Buffer{} + b.WriteString(s) + return b.Bytes() + } + + id := "92475004e70f41b94750f4a77bf7b430551113b25d3d57169eadca5692bb043d" + extids := [][]byte{encode("foo"), encode("bar")} + var lastentry interfaces.IHash + + a := AccountFromFctSecret("Fs2zQ3egq2j99j37aYzaCddPq9AF3mgh64uG9gRaDAnrkjRx3eHs") + b := AccountFromFctSecret("Fs2BNvoDgSoGJpWg4PvRUxqvLE28CQexp5FZM9X5qU6QvzFBUn6D") + + numEntries := 9 // set the total number of entries to add + + params := map[string]string{"--debuglog": ""} + state0 := SetupSim("LLAAFF", params, 10, 0, 0, t) + + var entries []interfaces.IMsg + var oneFct uint64 = factom.FactoidToFactoshi("1") + var ecMargin = 100 + + { // publish entries + publish := func(i int) { + e := factom.Entry{ + ChainID: id, + ExtIDs: extids, + Content: encode(fmt.Sprintf("hello@%v", i)), // ensure no duplicate msg hashes + } + commit, _ := ComposeCommitEntryMsg(a.Priv, e) + reveal, _ := ComposeRevealEntryMsg(a.Priv, &e) + + state0.LogMessage("simtest", "commit", commit) + state0.LogMessage("simtest", "reveal", reveal) + + entries = append(entries, commit) + entries = append(entries, reveal) + + state0.APIQueue().Enqueue(commit) + state0.APIQueue().Enqueue(reveal) + } + + for x := 0; x < numEntries; x++ { + publish(x) + } + + } + + { // create chain + e := factom.Entry{ + ChainID: id, + ExtIDs: extids, + Content: encode("Hello World!"), + } + + c := factom.NewChain(&e) + + commit, _ := ComposeChainCommit(a.Priv, c) + reveal, _ := ComposeRevealEntryMsg(a.Priv, c.FirstEntry) + + state0.APIQueue().Enqueue(commit) + state0.APIQueue().Enqueue(reveal) + lastentry = reveal.Entry.GetHash() + + } + + // REVIEW is this a good enough test for holding + WaitMinutes(state0, 2) // ensure messages are reviewed in holding at least once + + { // fund FCT address & chain & entries + + WaitForZeroEC(state0, a.EcPub()) + // initially unfunded EC conversion + a.ConvertEC(uint64(numEntries + 11 + ecMargin)) // Chain costs 10 + 1 per k so our chain head costs 11 + + b.FundFCT(oneFct * 20) // transfer coinbase funds to b + b.SendFCT(a, oneFct*10) // use account b to fund a.ConvertEC() from above + + WaitForEcBalanceOver(state0, a.EcPub(), int64(ecMargin-1)) + } + + WaitBlocks(state0, 1) // give time for holding to clear + WaitForEcBalanceUnder(state0, a.EcPub(), int64(ecMargin+1)) + WaitForEntry(state0, lastentry) + + ShutDownEverything(t) + WaitForAllNodes(state0) + + assert.Equal(t, int64(ecMargin), a.GetECBalance()) // should have 100 extra EC's + + /* + for _, fnode := range engine.GetFnodes() { + s := fnode.State + for _, h := range s.Hold.Messages() { + for _, m := range h { + s.LogMessage("newholding", "stuck", m) + } + } + assert.Equal(t, 0, len(s.Holding), "messages stuck in holding") + assert.Equal(t, 0, s.Hold.GetSize(), "messages stuck in New Holding") + } + */ +} diff --git a/simTest/EntryBatch_test.go b/simTest/EntryBatch_test.go new file mode 100644 index 0000000000..94db6d7832 --- /dev/null +++ b/simTest/EntryBatch_test.go @@ -0,0 +1,117 @@ +package simtest + +import ( + "bytes" + "fmt" + "testing" + + "github.com/FactomProject/factomd/common/interfaces" + "github.com/stretchr/testify/assert" + + "github.com/FactomProject/factom" + . "github.com/FactomProject/factomd/testHelper" +) + +// this applies chain & entry creation in 'proper' chronological order +func TestEntryBatch(t *testing.T) { + + encode := func(s string) []byte { + b := bytes.Buffer{} + b.WriteString(s) + return b.Bytes() + } + + id := "92475004e70f41b94750f4a77bf7b430551113b25d3d57169eadca5692bb043d" + extids := [][]byte{encode("foo"), encode("bar")} + + a := AccountFromFctSecret("Fs2zQ3egq2j99j37aYzaCddPq9AF3mgh64uG9gRaDAnrkjRx3eHs") + b := AccountFromFctSecret("Fs2BNvoDgSoGJpWg4PvRUxqvLE28CQexp5FZM9X5qU6QvzFBUn6D") + + numEntries := 9 // set the total number of entries to add + + println(a.String()) + + params := map[string]string{"--debuglog": ""} + state0 := SetupSim("LLAAFF", params, 10, 0, 0, t) + + var entries []interfaces.IMsg + var oneFct uint64 = factom.FactoidToFactoshi("1") + var ecMargin = 100 // amount of ec to have left + + { // fund entries & chain create + WaitForZeroEC(state0, a.EcPub()) // assert we are starting from zero + + b.FundFCT(oneFct * 20) // transfer coinbase funds to b + b.SendFCT(a, oneFct*10) // use account b to fund a.ConvertEC() from above + a.ConvertEC(uint64(numEntries + 11 + ecMargin)) // Chain costs 10 + 1 per k so our chain head costs 11 + + WaitForEcBalanceOver(state0, a.EcPub(), int64(ecMargin-1)) // wait for all entries to process + } + + { // create the chain + e := factom.Entry{ + ChainID: id, + ExtIDs: extids, + Content: encode("Hello World!"), + } + + c := factom.NewChain(&e) + + commit, _ := ComposeChainCommit(a.Priv, c) + reveal, _ := ComposeRevealEntryMsg(a.Priv, c.FirstEntry) + + state0.APIQueue().Enqueue(commit) + state0.APIQueue().Enqueue(reveal) + } + + WaitMinutes(state0, 1) + + { // write entries + + for i := 0; i < numEntries; i++ { + e := factom.Entry{ + ChainID: id, + ExtIDs: extids, + Content: encode(fmt.Sprintf("hello@%v", i)), // ensure no duplicate msg hashes + } + commit, _ := ComposeCommitEntryMsg(a.Priv, e) + reveal, _ := ComposeRevealEntryMsg(a.Priv, &e) + + state0.LogMessage("simtest", "commit", commit) + state0.LogMessage("simtest", "reveal", reveal) + + entries = append(entries, commit) + entries = append(entries, reveal) + + state0.APIQueue().Enqueue(commit) + state0.APIQueue().Enqueue(reveal) + } + + } + + WaitForEcBalanceUnder(state0, a.EcPub(), int64(ecMargin+1)) // wait for all entries to process + WaitBlocks(state0, 1) // give time for holding to clear + + ShutDownEverything(t) + WaitForAllNodes(state0) + + assert.Equal(t, int64(ecMargin), a.GetECBalance()) // should have 100 extra EC's + + /* + { // check outputs + assert.Equal(t, int64(0), a.GetECBalance()) + + for _, fnode := range engine.GetFnodes() { + s := fnode.State + for _, h := range state0.Hold.Messages() { + for _, m := range h { + s.LogMessage("newholding", "stuck", m) + } + } + assert.Equal(t, 0, len(s.Holding), "messages stuck in holding") + assert.Equal(t, 0, s.Hold.GetSize(), "messages stuck in New Holding") + } + + } + */ +} diff --git a/simTest/HoldingRebound_test.go b/simTest/HoldingRebound_test.go new file mode 100644 index 0000000000..9969798054 --- /dev/null +++ b/simTest/HoldingRebound_test.go @@ -0,0 +1,125 @@ +package simtest + +import ( + "bytes" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/FactomProject/factom" + "github.com/FactomProject/factomd/state" + . "github.com/FactomProject/factomd/testHelper" +) + +func TestHoldingRebound(t *testing.T) { + encode := func(s string) []byte { + b := bytes.Buffer{} + b.WriteString(s) + return b.Bytes() + } + + id := "92475004e70f41b94750f4a77bf7b430551113b25d3d57169eadca5692bb043d" + extids := [][]byte{encode("foo"), encode("bar")} + a := AccountFromFctSecret("Fs2zQ3egq2j99j37aYzaCddPq9AF3mgh64uG9gRaDAnrkjRx3eHs") + + println(a.String()) + + params := map[string]string{"--debuglog": "."} + + state0 := SetupSim("L", params, 12, 0, 0, t) + + e := factom.Entry{ + ChainID: id, + ExtIDs: extids, + Content: encode("Hello World!"), + } + + c := factom.NewChain(&e) + + commit, _ := ComposeChainCommit(a.Priv, c) + reveal, _ := ComposeRevealEntryMsg(a.Priv, c.FirstEntry) + + state0.APIQueue().Enqueue(commit) + state0.APIQueue().Enqueue(reveal) + + a.FundEC(11) + GenerateCommitsAndRevealsInBatches(t, state0) + + ht := state0.GetDBHeightComplete() + WaitBlocks(state0, 2) + newHt := state0.GetDBHeightComplete() + assert.True(t, ht < newHt, "block height should progress") + + ShutDownEverything(t) + WaitForAllNodes(state0) + + for _, ml := range state0.Hold.Messages() { + for _, m := range ml { + state0.LogMessage("simTest", "stuck", m) + } + } +} + +func GenerateCommitsAndRevealsInBatches(t *testing.T, state0 *state.State) { + + encode := func(s string) []byte { + b := bytes.Buffer{} + b.WriteString(s) + return b.Bytes() + } + + // KLUDGE vars duplicated from original test - should refactor + id := "92475004e70f41b94750f4a77bf7b430551113b25d3d57169eadca5692bb043d" + a := AccountFromFctSecret("Fs2zQ3egq2j99j37aYzaCddPq9AF3mgh64uG9gRaDAnrkjRx3eHs") + + batchCount := 1 + setDelay := 0 // blocks to wait between sets of entries + numEntries := 250 // set the total number of entries to add + + logName := "simTest" + state0.LogPrintf(logName, "BATCHES:%v", batchCount) + state0.LogPrintf(logName, "ENTRIES:%v", numEntries) + state0.LogPrintf(logName, "DELAY_BLOCKS:%v", setDelay) + + var batchTimes = make(map[int]time.Duration) + + for BatchID := 0; BatchID < int(batchCount); BatchID++ { + + publish := func(i int) { + + extids := [][]byte{encode(fmt.Sprintf("batch%v", BatchID))} + + e := factom.Entry{ + ChainID: id, + ExtIDs: extids, + Content: encode(fmt.Sprintf("batch %v, seq: %v", BatchID, i)), // ensure no duplicate msg hashes + } + i++ + + commit, _ := ComposeCommitEntryMsg(a.Priv, e) + reveal, _ := ComposeRevealEntryMsg(a.Priv, &e) + + state0.APIQueue().Enqueue(commit) + state0.APIQueue().Enqueue(reveal) + } + + for x := 0; x < numEntries; x++ { + publish(x) + } + + { // measure time it takes to process all messages by observing entry credit spend + tstart := time.Now() + a.FundEC(uint64(numEntries + 1)) + WaitForEcBalanceUnder(state0, a.EcPub(), int64(BatchID+2)) + tend := time.Now() + batchTimes[BatchID] = tend.Sub(tstart) + state0.LogPrintf(logName, "BATCH %v RUNTIME %v", BatchID, batchTimes[BatchID]) + } + + if setDelay > 0 { + WaitBlocks(state0, int(setDelay)) // wait between batches + } + } +} diff --git a/simTest/LeaderBrainSwap_test.go b/simTest/LeaderBrainSwap_test.go new file mode 100644 index 0000000000..da634b2764 --- /dev/null +++ b/simTest/LeaderBrainSwap_test.go @@ -0,0 +1,62 @@ +package simtest + +import ( + "fmt" + "testing" + + "github.com/FactomProject/factomd/engine" + . "github.com/FactomProject/factomd/testHelper" +) + +/* +Test brainswapping F <-> L with no auditors + +This test is useful for catching a failure scenario where the timing between +identity swap is off leading to a stall +*/ +func TestLeaderBrainSwap(t *testing.T) { + ResetSimHome(t) // clear out old test home + for i := 0; i < 6; i++ { // build config files for the test + WriteConfigFile(i, i, "", t) // just write the minimal config + } + + params := map[string]string{"--blktime": "10"} + state0 := SetupSim("LLLFFF", params, 30, 0, 0, t) + state3 := engine.GetFnodes()[3].State // Get node 2 + + WaitForAllNodes(state0) + WaitForBlock(state0, 6) + + // FIXME https://factom.atlassian.net/browse/FD-950 - setting batch > 1 can occasionally cause failure + batches := 1 // use odd number to fulfill LFFFLL as end condition + + for batch := 0; batch < batches; batch++ { + + target := batch + 7 + + change := fmt.Sprintf("ChangeAcksHeight = %v\n", target) + + if batch%2 == 0 { + WriteConfigFile(1, 5, change, t) // Setup A brain swap between L1 and F5 + WriteConfigFile(5, 1, change, t) + + WriteConfigFile(2, 4, change, t) // Setup A brain swap between L2 and F4 + WriteConfigFile(4, 2, change, t) + + } else { + WriteConfigFile(5, 5, change, t) // Un-Swap + WriteConfigFile(1, 1, change, t) + + WriteConfigFile(4, 4, change, t) + WriteConfigFile(2, 2, change, t) + } + + WaitForBlock(state3, target) + WaitMinutes(state3, 1) + } + + WaitBlocks(state0, 1) + AssertAuthoritySet(t, "LFFFLL") + WaitForAllNodes(state0) + ShutDownEverything(t) +} diff --git a/simTest/MessageFilteringInput_test.go b/simTest/MessageFilteringInput_test.go new file mode 100644 index 0000000000..c33cce3f6f --- /dev/null +++ b/simTest/MessageFilteringInput_test.go @@ -0,0 +1,46 @@ +package simtest + +import ( + "strings" + "testing" + + . "github.com/FactomProject/factomd/engine" + . "github.com/FactomProject/factomd/testHelper" +) + +func TestFilterAPIInput(t *testing.T) { + + state0 := SetupSim("LLLLLAAF", map[string]string{"--debuglog": "."}, 25, 1, 1, t) + + RunCmd("1") + RunCmd("w") + RunCmd("s") + + apiRegex := "EOM.*5/.*minute 1" + SetInputFilter(apiRegex) + + WaitBlocks(state0, 5) + + // The message-filter call we did above should have caused an election and SO, Node01 should not be a leader anymore. + if GetFnodes()[1].State.Leader { + t.Fatalf("Node01 should not be leader!") + } + + CheckAuthoritySet(t) + + // Check Node01 Network Input logs to make sure there are no enqueued including our Regex + out := SystemCall(`grep "enqueue" fnode01_networkinputs.txt | grep "` + apiRegex + `" | grep -v "EmbeddedMsg" | wc -l`) + + if strings.TrimSuffix(strings.Trim(string(out), " "), "\n") != string("0") { + t.Fatalf("Filter missed let a message pass 1.") + } + + // Check Node01 Network Input logs to make sure there are no Dropped messaged besides the ones for our Regex + out2 := SystemCall(`grep "Drop, matched filter Regex" fnode01_networkinputs.txt | grep -v "` + apiRegex + `" | wc -l`) + + if strings.TrimSuffix(strings.Trim(string(out2), " "), "\n") != string("0") { + t.Fatalf("Filter missed let a message pass 2.") + } + + ShutDownEverything(t) +} diff --git a/simTest/MessageFilteringOutput_test.go b/simTest/MessageFilteringOutput_test.go new file mode 100644 index 0000000000..1029ef566b --- /dev/null +++ b/simTest/MessageFilteringOutput_test.go @@ -0,0 +1,45 @@ +package simtest + +import ( + "strings" + "testing" + + . "github.com/FactomProject/factomd/engine" + . "github.com/FactomProject/factomd/testHelper" +) + +func TestFilterAPIOutput(t *testing.T) { + + state0 := SetupSim("LLLLLAAF", map[string]string{"--debuglog": "."}, 25, 1, 1, t) + + RunCmd("1") + RunCmd("w") + RunCmd("s") + + apiRegex := "EOM.*5/.*minute 1" + SetOutputFilter(apiRegex) + + WaitBlocks(state0, 5) + + // The message-filter call we did above should have caused an election and SO, Node01 should not be a leader anymore. + if GetFnodes()[1].State.Leader { + t.Fatalf("Node01 should not be leader!") + } + CheckAuthoritySet(t) + + // Check Node01 Network Output logs to make sure there are no Dropped messaged besides the ones for our Regex + out := SystemCall(`grep "Drop, matched filter Regex" fnode01_networkoutputs.txt | grep -v "` + apiRegex + `" | wc -l`) + + if strings.TrimSuffix(strings.Trim(string(out), " "), "\n") != string("0") { + t.Fatalf("Filter missed let a message pass 1.") + } + + // Checks Node01 Network Outputs to make sure there are no Sent broadcast including our Regex + out2 := SystemCall(`grep "Send broadcast" fnode01_networkoutputs.txt | grep "` + apiRegex + `" | grep -v "EmbeddedMsg" | wc -l`) + + if strings.TrimSuffix(strings.Trim(string(out2), " "), "\n") != string("0") { + t.Fatalf("Filter missed let a message pass 2.") + } + + ShutDownEverything(t) +} diff --git a/simTest/README.md b/simTest/README.md new file mode 100644 index 0000000000..e8078458db --- /dev/null +++ b/simTest/README.md @@ -0,0 +1,22 @@ +# factomd/simTest + +This folder contains simulation tests that can run alone in isolation. + +## Add a test to circle.ci + +add the filename to `ci_whitelist` file in this directory + + +### Run a test + +NOTE: each `_test.go` file in this folder should be able to be run by itself + +EX: +``` + go test -v ./simTest/BrainSwap_test.go +``` + +This is in contrast to testing by module (as we do with other types of unit tests) +``` +go test -v ./engine/... +`` diff --git a/simTest/SetupANetwork_test.go b/simTest/SetupANetwork_test.go new file mode 100644 index 0000000000..5ea4a83dca --- /dev/null +++ b/simTest/SetupANetwork_test.go @@ -0,0 +1,98 @@ +package simtest + +import "time" + +import ( + "testing" + + . "github.com/FactomProject/factomd/engine" + . "github.com/FactomProject/factomd/testHelper" +) + +func TestSetupANetwork(t *testing.T) { + + state0 := SetupSim("LLLLAAAFFF", map[string]string{"--debuglog": ""}, 20, 0, 0, t) + + RunCmd("9") // Puts the focus on node 9 + RunCmd("x") // Takes Node 9 Offline + RunCmd("w") // Point the WSAPI to send API calls to the current node. + RunCmd("10") // Puts the focus on node 9 + RunCmd("8") // Puts the focus on node 8 + RunCmd("w") // Point the WSAPI to send API calls to the current node. + RunCmd("7") + WaitBlocks(state0, 1) // Wait for 1 block + + WaitForMinute(state0, 2) // Waits for minute 2 + RunCmd("F100") // Set the Delay on messages from all nodes to 100 milliseconds + // .15 second minutes is too fast for dropping messages until the dropping is fixed (FD-971) is fixed + // could change to 4 second minutes and turn this back on -- Clay + // RunCmd("S10") // Set Drop Rate to 1.0 on everyone + RunCmd("g10") // Adds 10 identities to your identity pool. + + fn1 := GetFocus() + PrintOneStatus(0, 0) + if fn1.State.FactomNodeName != "FNode07" { + t.Fatalf("Expected FNode07, but got %s", fn1.State.FactomNodeName) + } + RunCmd("g1") // Adds 1 identities to your identity pool. + WaitForMinute(state0, 3) // Waits for 3 "Minutes" + RunCmd("g1") // // Adds 1 identities to your identity pool. + WaitForMinute(state0, 4) // Waits for 4 "Minutes" + RunCmd("g1") // Adds 1 identities to your identity pool. + WaitForMinute(state0, 5) // Waits for 5 "Minutes" + RunCmd("g1") // Adds 1 identities to your identity pool. + WaitForMinute(state0, 6) // Waits for 6 "Minutes" + WaitBlocks(state0, 1) // Waits for 1 block + WaitForMinute(state0, 1) // Waits for 1 "Minutes" + RunCmd("g1") // Adds 1 identities to your identity pool. + WaitForMinute(state0, 2) // Waits for 2 "Minutes" + RunCmd("g1") // Adds 1 identities to your identity pool. + WaitForMinute(state0, 3) // Waits for 3 "Minutes" + RunCmd("g20") // Adds 20 identities to your identity pool. + WaitBlocks(state0, 1) + RunCmd("9") // Focuses on Node 9 + RunCmd("x") // Brings Node 9 back Online + RunCmd("8") // Focuses on Node 8 + + time.Sleep(100 * time.Millisecond) + + fn2 := GetFocus() + PrintOneStatus(0, 0) + if fn2.State.FactomNodeName != "FNode08" { + t.Fatalf("Expected FNode08, but got %s", fn1.State.FactomNodeName) + } + + RunCmd("i") // Shows the identities being monitored for change. + // Test block recording lengths and error checking for pprof + RunCmd("b100") // Recording delays due to blocked go routines longer than 100 ns (0 ms) + + RunCmd("b") // specifically how long a block will be recorded (in nanoseconds). 1 records all blocks. + + RunCmd("babc") // Not sure that this does anything besides return a message to use "bnnn" + + RunCmd("b1000000") // Recording delays due to blocked go routines longer than 1000000 ns (1 ms) + + RunCmd("/") // Sort Status by Chain IDs + + RunCmd("/") // Sort Status by Node Name + + RunCmd("a1") // Shows Admin block for Node 1 + RunCmd("e1") // Shows Entry credit block for Node 1 + RunCmd("d1") // Shows Directory block + RunCmd("f1") // Shows Factoid block for Node 1 + RunCmd("a100") // Shows Admin block for Node 100 + RunCmd("e100") // Shows Entry credit block for Node 100 + RunCmd("d100") // Shows Directory block + RunCmd("f100") // Shows Factoid block for Node 1 + RunCmd("yh") // Nothing + RunCmd("yc") // Nothing + RunCmd("r") // Rotate the WSAPI around the nodes + WaitForMinute(state0, 1) // Waits 1 "Minute" + + RunCmd("g1") // Adds 1 identities to your identity pool. + WaitForMinute(state0, 3) // Waits 3 "Minutes" + WaitBlocks(fn1.State, 3) // Waits for 3 blocks + + ShutDownEverything(t) + +} diff --git a/simTest/SimWallet_test.go b/simTest/SimWallet_test.go new file mode 100644 index 0000000000..7b2c204d36 --- /dev/null +++ b/simTest/SimWallet_test.go @@ -0,0 +1,38 @@ +package simtest + +import ( + "fmt" + "testing" + + "github.com/FactomProject/factom" + . "github.com/FactomProject/factomd/testHelper" +) + +func TestSimWallet(t *testing.T) { + + a := AccountFromFctSecret("Fs31kMMKBSCDwa7tSEzjQ4EfGeXARdUS22oBEJKNSJWbLAMTsEHr") + b := AccountFromFctSecret("Fs2BNvoDgSoGJpWg4PvRUxqvLE28CQexp5FZM9X5qU6QvzFBUn6D") + + fmt.Printf("A: %s", a) + fmt.Printf("B: %s", b) + + state0 := SetupSim("L", map[string]string{"--debuglog": ""}, 80, 0, 0, t) + + var oneFct uint64 = factom.FactoidToFactoshi("1") + a.FundFCT(10 * oneFct) // fund fct from coinbase 'bank' + a.SendFCT(b, oneFct) // fund Account b from Acct a + WaitForAnyFctBalance(state0, a.FctPub()) // wait for non-zero + WaitForFctBalanceUnder(state0, a.FctPub(), int64(9*oneFct)) // wait for at least 1 fct + WaitForFctBalanceOver(state0, b.FctPub(), int64(oneFct-1)) // wait for at least 1 fct + + a.FundEC(10) // fund EC from coinbase 'bank' + WaitForEcBalanceOver(state0, a.EcPub(), 1) // wait for at least 1 ec + + b.ConvertEC(10) // fund EC from account b + WaitForAnyEcBalance(state0, b.EcPub()) + WaitForEcBalanceUnder(state0, b.EcPub(), 11) + + WaitBlocks(state0, 1) + ShutDownEverything(t) + +} diff --git a/simTest/auditBrainTests/AuditBrainShared_test.go b/simTest/auditBrainTests/AuditBrainShared_test.go new file mode 100644 index 0000000000..6c8e1ead18 --- /dev/null +++ b/simTest/auditBrainTests/AuditBrainShared_test.go @@ -0,0 +1,63 @@ +package auditBrainTests_test + +import ( + "os" + "testing" + + "github.com/FactomProject/factomd/common/globals" + "github.com/FactomProject/factomd/engine" + "github.com/FactomProject/factomd/state" + . "github.com/FactomProject/factomd/testHelper" +) + +func SetupConfigFiles(t *testing.T) { + dir, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + globals.Params.FactomHome = dir + "/.sim" + os.Setenv("FACTOM_HOME", globals.Params.FactomHome) + + t.Logf("Removing old run in %s", globals.Params.FactomHome) + if err := os.RemoveAll(globals.Params.FactomHome); err != nil { + t.Fatal(err) + } + + // build config files for the test + for i := 0; i < 6; i++ { + WriteConfigFile(i, i, "", t) // just write the minimal config + } +} + +func SetupNodes(t *testing.T, givenNodes string) map[int]*state.State { + states := map[int]*state.State{} + states[0] = SetupSim(givenNodes, buildParmList(), 15, 0, 0, t) + for i := 1; i <= len(givenNodes)-1; i++ { + states[i] = engine.GetFnodes()[i].State + } + return states +} + +func buildParmList() map[string]string { + params := map[string]string{ + "--db": "LDB", // NOTE: using MAP causes an occasional error see FD-825 + "--network": "LOCAL", + "--net": "alot+", + "--enablenet": "true", + "--blktime": "10", + "--startdelay": "1", + "--stdoutlog": "out.txt", + "--stderrlog": "out.txt", + "--checkheads": "false", + "--controlpanelsetting": "readwrite", + "--debuglog": ".", + "--logPort": "38000", + "--port": "38001", + "--controlpanelport": "38002", + "--networkport": "38003", + "--peers": "127.0.0.1:37003", + "--factomhome": globals.Params.FactomHome, + } + return params +} diff --git a/simTest/auditBrainTests/AuditBrainSwap_test.go b/simTest/auditBrainTests/AuditBrainSwap_test.go new file mode 100644 index 0000000000..a27e4c154c --- /dev/null +++ b/simTest/auditBrainTests/AuditBrainSwap_test.go @@ -0,0 +1,56 @@ +package auditBrainTests_test + +import ( + "testing" + + "github.com/FactomProject/factomd/common/constants/servertype" + "github.com/FactomProject/factomd/state" + . "github.com/FactomProject/factomd/testHelper" +) + +// Test brainswapping a follower and an audit when the audit is lagging behind +func TestAuditBrainSwap(t *testing.T) { + t.Run("Run Brain Swap Sim", func(t *testing.T) { + t.Run("Setup Config Files", SetupConfigFiles) + states := SetupNodes(t, "LLLAFF") + swapIdentities(t, states) + verifyNetworkAfterSwap(t, states) + }) +} + +func swapIdentities(t *testing.T, states map[int]*state.State) bool { + return t.Run("Wait For Identity Swap", func(t *testing.T) { + WaitForBlock(states[0], 6) + WaitForAllNodes(states[0]) + + // rewrite the config to have brainswaps + WriteConfigFile(3, 5, "ChangeAcksHeight = 10\n", t) // Setup A brain swap between A3 and F5 + WriteConfigFile(5, 3, "ChangeAcksHeight = 10\n", t) + WaitForBlock(states[0], 9) + RunCmd("3") // make sure the Audit is lagging the audit if the heartbeats conflicts one will panic + RunCmd("x") + WaitForBlock(states[5], 10) // wait till 5 should have have brainswapped + RunCmd("x") + WaitBlocks(states[0], 1) + WaitForAllNodes(states[0]) + CheckAuthoritySet(t) + }) +} + +func verifyNetworkAfterSwap(t *testing.T, states map[int]*state.State) { + t.Run("Verify Network", func(t *testing.T) { + list := states[0].ProcessLists.Get(states[0].LLeaderHeight) + + serverType := servertype.GetServerType(list, states[3]) + if serverType != servertype.Follower { + t.Error("Node 3 did not become a follower but a " + serverType) + } + + serverType = servertype.GetServerType(list, states[5]) + if servertype.GetServerType(list, states[5]) != servertype.AuditServer { + t.Error("Node 5 did not become an audit server but a " + serverType) + } + + Halt(t) + }) +} diff --git a/simTest/ci_whitelist b/simTest/ci_whitelist new file mode 100644 index 0000000000..79f8b735a9 --- /dev/null +++ b/simTest/ci_whitelist @@ -0,0 +1,4 @@ +simTest/AddFNode_test.go +simTest/EntriesBeforeChain_test.go +simTest/HoldingRebound_test.go +simTest/SetupANetwork_test.go diff --git a/state/HoldingList.go b/state/HoldingList.go new file mode 100644 index 0000000000..fcfef470d6 --- /dev/null +++ b/state/HoldingList.go @@ -0,0 +1,214 @@ +package state + +import ( + "fmt" + + "github.com/FactomProject/factomd/common/constants" + "github.com/FactomProject/factomd/common/messages" + + "github.com/FactomProject/factomd/common/interfaces" +) + +// toggle to disable old/new for testing +const useNewHolding = true + +// This hold a slice of messages dependent on a hash +type HoldingList struct { + holding map[[32]byte][]interfaces.IMsg + s *State // for debug logging + dependents map[[32]byte]bool // used to avoid duplicate entries in holding +} + +func (l *HoldingList) Init(s *State) { + l.holding = make(map[[32]byte][]interfaces.IMsg) + l.s = s + l.dependents = make(map[[32]byte]bool) + + if !useNewHolding { + l.s.LogPrintf("newHolding", "DISABLED") + } +} + +func (l *HoldingList) Messages() map[[32]byte][]interfaces.IMsg { + return l.holding +} + +func (l *HoldingList) GetSize() int { + return len(l.dependents) +} + +func (l *HoldingList) Exists(h [32]byte) bool { + return l.dependents[h] +} + +// Add a message to a dependent holding list +func (l *HoldingList) Add(h [32]byte, msg interfaces.IMsg) bool { + + if l.dependents[msg.GetMsgHash().Fixed()] { + return false + } + + if l.holding[h] == nil { + l.holding[h] = []interfaces.IMsg{msg} + } else { + l.holding[h] = append(l.holding[h], msg) + } + + l.dependents[msg.GetMsgHash().Fixed()] = true + //l.s.LogMessage("newHolding", "add", msg) + return true +} + +// get and remove the list of dependent message for a hash +func (l *HoldingList) Get(h [32]byte) []interfaces.IMsg { + rval := l.holding[h] + delete(l.holding, h) + + for _, msg := range rval { + // l.s.LogMessage("newHolding", "delete", msg) + delete(l.dependents, msg.GetMsgHash().Fixed()) + } + return rval +} + +func (l *HoldingList) ExecuteForNewHeight(ht uint32) { + l.s.ExecuteFromHolding(HeightToHash(ht)) +} + +// clean stale messages from holding +func (l *HoldingList) Review() { + + if !useNewHolding && l.GetSize() > 0 { + panic("found messages in new-holding while disabled") + } + + for h := range l.holding { + dh := l.holding[h] + if nil == dh { + continue + } + for _, msg := range dh { + if l.isMsgStale(msg) { + l.Get(h) // remove all from holding + //l.s.LogMessage("newHolding", "RemoveFromDependantHolding()", msg) + continue + } + } + } +} + +func (l *HoldingList) isMsgStale(msg interfaces.IMsg) (res bool) { + + /* + REVIEW: + Maybe we should treat the message stream as a votes on the "highest known block" where known servers trump unknown servers who disagree? + + Consider setting HKB and HAB when we complete minute 1 of a block to the current leader height. + That at least would make us recover from a spoofed ack attack. + */ + + switch msg.Type() { + case constants.EOM_MSG: + if msg.(*messages.EOM).DBHeight < l.s.GetHighestKnownBlock()-1 { + res = true + } + case constants.ACK_MSG: + if msg.(*messages.Ack).DBHeight < l.s.GetHighestKnownBlock()-1 { + res = true + } + case constants.DIRECTORY_BLOCK_SIGNATURE_MSG: + if msg.(*messages.DirectoryBlockSignature).DBHeight < l.s.GetHighestKnownBlock()-1 { + res = true + } + default: + // l.s.LogMessage("newHolding", "SKIP_DBHT_REVIEW", msg) + } + + if msg.GetTimestamp().GetTime().UnixNano() < l.s.GetFilterTimeNano() { + res = true + } + + if res { + l.s.LogMessage("newHolding", "EXPIRE", msg) + } else { + // l.s.LogMessage("newHolding", "NOT_EXPIRED", msg) + } + + return res +} + +func (s *State) HoldForHeight(ht uint32, msg interfaces.IMsg) int { + // todo: test if this is necessary + if s.GetLLeaderHeight()+1 == ht && s.GetCurrentMinute() >= 9 { + s.LogMessage("newHolding", fmt.Sprintf("SKIP_HoldForHeight %x", ht), msg) + return 0 // send to old holding + } + s.LogMessage("newHolding", fmt.Sprintf("HoldForHeight %x", ht), msg) + return s.Add(HeightToHash(ht), msg) // add to new holding +} + +// Add a message to a dependent holding list +func (s *State) Add(h [32]byte, msg interfaces.IMsg) int { + + if !useNewHolding { + return 0 + } + + if msg == nil { + panic("Empty Message Added to Holding") + } + + if h == [32]byte{} { + panic("Empty Hash Passed to New Holding") + } + + if s.Hold.Add(h, msg) { + s.LogMessage("newHolding", fmt.Sprintf("add[%x]", h[:6]), msg) + } + + // mark as invalid for validator loop + return -2 // ensures message is not sent to hold holding +} + +// get and remove the list of dependent message for a hash +func (s *State) Get(h [32]byte) []interfaces.IMsg { + return s.Hold.Get(h) +} + +// Execute a list of messages from holding that are dependent on a hash +// the hash may be a EC address or a CainID or a height (ok heights are not really hashes but we cheat on that) +func (s *State) ExecuteFromHolding(h [32]byte) { + + if !useNewHolding && s.Hold.GetSize() > 0 { + panic("found messages in new-holding while disabled") + } + // get the list of messages waiting on this hash + l := s.Get(h) + if l == nil { + // s.LogPrintf("newHolding", "ExecuteFromDependantHolding(%x) nothing waiting", h[:6]) + return + } + s.LogPrintf("newHolding", "ExecuteFromDependantHolding(%d)[%x]", len(l), h[:6]) + + for _, m := range l { + s.LogPrintf("newHolding", "delete R-%x", m.GetMsgHash().Bytes()[:3]) + } + + go func() { + // add the messages to the msgQueue so they get executed as space is available + for _, m := range l { + s.LogMessage("msgQueue", "enqueue_from_dependent_holding", m) + s.msgQueue <- m + } + }() +} + +// put a height in the first 4 bytes of a hash so we can use it to look up dependent message in holding +func HeightToHash(height uint32) [32]byte { + var h [32]byte + h[0] = byte((height >> 24) & 0xFF) + h[1] = byte((height >> 16) & 0xFF) + h[2] = byte((height >> 8) & 0xFF) + h[3] = byte((height >> 0) & 0xFF) + return h +} diff --git a/state/MMR.go b/state/MMR.go index 3f7bd76992..8fe55dfe27 100644 --- a/state/MMR.go +++ b/state/MMR.go @@ -28,7 +28,7 @@ type MMRInfo struct { // starts the MMR processing for this state func (s *State) startMMR() { - go s.makeMMRs(s.asks, s.adds, s.dbheights) + go s.makeMMRs() } // Ask VM for an MMR for this height with delay ms before asking the network @@ -80,7 +80,7 @@ var MMR_enable bool = true // Receive all asks and all process list adds and create missing message requests any ask that has expired // and still pending. Add 10 seconds to the ask. // Doesn't really use (can't use) the process list but I have it for debug -func (s *State) makeMMRs(asks <-chan askRef, adds <-chan plRef, dbheights <-chan int) { +func (s *State) makeMMRs() { type dbhvm struct { dbh int vm int @@ -113,7 +113,7 @@ func (s *State) makeMMRs(asks <-chan askRef, adds <-chan plRef, dbheights <-chan readasks: for { select { - case ask := <-asks: + case ask := <-s.asks: addAsk(ask) default: break readasks @@ -125,7 +125,7 @@ func (s *State) makeMMRs(asks <-chan askRef, adds <-chan plRef, dbheights <-chan readadds: for { select { - case add := <-adds: + case add := <-s.adds: addAdd(add) default: break readadds @@ -182,7 +182,7 @@ func (s *State) makeMMRs(asks <-chan askRef, adds <-chan plRef, dbheights <-chan } select { - case dbheight = <-dbheights: + case dbheight = <-s.dbheights: // toss any old pending requests when the height moves up // todo: Keep asks in a list so cleanup is more efficient for ask, _ := range pending { @@ -191,11 +191,11 @@ func (s *State) makeMMRs(asks <-chan askRef, adds <-chan plRef, dbheights <-chan delete(pending, ask) } } - case ask := <-asks: + case ask := <-s.asks: addAsk(ask) addAllAsks() - case add := <-adds: + case add := <-s.adds: addAllAsks() // process all pending asks before any adds addAdd(add) diff --git a/state/crossBootReplay.go b/state/crossBootReplay.go index 3f0a715b33..8d3cf356f7 100644 --- a/state/crossBootReplay.go +++ b/state/crossBootReplay.go @@ -54,7 +54,7 @@ type CrossReplayFilter struct { // Indicates we have been running for awhile // and should already have the salts stopAddingSalts bool - bootTime time.Time + endTime time.Time currentSaltCache map[[8]byte]bool oldSaltCache map[[8]byte]bool db interfaces.IDatabase @@ -74,7 +74,7 @@ func NewCrossReplayFilter(path string) *CrossReplayFilter { c.oldSaltCache = make(map[[8]byte]bool) // Load the old salts into the map c.loadOldSalts() - c.bootTime = time.Now() + c.endTime = time.Now().Add(constants.CROSSBOOT_SALT_REPLAY_DURATION) var m MarshalableUint32 c.db.Get(heightBucket, lowest, &m) @@ -134,7 +134,7 @@ func (c *CrossReplayFilter) ExistSalt(salt [8]byte) (bool, error) { func (c *CrossReplayFilter) Run() { for { time.Sleep(time.Second * 5) - if time.Now().Before(c.bootTime.Add(constants.CROSSBOOT_SALT_REPLAY_DURATION * -1)) { + if c.endTime.Before(time.Now()) { // We no longer need to add salts c.stopAddingSalts = true return diff --git a/state/dbStateCatchup.go b/state/dbStateCatchup.go new file mode 100644 index 0000000000..acbea3f5db --- /dev/null +++ b/state/dbStateCatchup.go @@ -0,0 +1,774 @@ +// Copyright 2017 Factom Foundation +// Use of this source code is governed by the MIT +// license that can be found in the LICENSE file. + +package state + +import ( + "container/list" + "reflect" + "sync" + "time" + + "github.com/FactomProject/factomd/common/messages" +) + +type GenericListItem interface { + Height() uint32 +} + +func getHeightSafe(i GenericListItem) int { + if i == nil || reflect.ValueOf(i).IsNil() { + return -1 + } + return int(i.Height()) +} + +func waitForLoaded(s *State) { + // Don't start until the db is finished loading. + for !s.DBFinished { + time.Sleep(1 * time.Second) + } +} + +// TODO: Redesign Catchup. Some assumptions were made that made this more +// TODO: complex than it neeeded to be. +func (list *DBStateList) Catchup() { + missing := list.State.StatesMissing + waiting := list.State.StatesWaiting + received := list.State.StatesReceived + + factomSecond := time.Duration(list.State.GetDirectoryBlockInSeconds()) * time.Second / 600 + + requestTimeout := list.State.RequestTimeout + if requestTimeout < 1*time.Second { // If the timeout is 0 (default), base off blktime + // 10min block == 30s timeout for a request. + // 5min block == 15s timeout for a request. + // 1min block == 3s timeout for a request. + requestTimeout = factomSecond * 30 + list.State.RequestTimeout = requestTimeout + } + requestLimit := list.State.RequestLimit + + // Wait for db to be loaded + waitForLoaded(list.State) + + // keep the lists up to date with the saved states. + go func() { + // Notify missing will add the height to the missing + // if it is not received and not already requested. + notifyMissing := func(n uint32) bool { + if !waiting.Has(n) { + list.State.LogPrintf("dbstatecatchup", "{actual} notify missing %d", n) + missing.Add(n) + return true + } + return false + } + + for { + start := time.Now() + // get the height of the saved blocks + hs := func() uint32 { + // Sets the floor for what we will be requesting + // AKA : What we have. In reality the receivedlist should + // indicate that we have it, however, because a dbstate + // is not fully validated before we get it, we cannot + // assume that. + floor := uint32(0) + // Once it is in the db, we can assume it's all good. + if d, err := list.State.DB.FetchDBlockHead(); err == nil && d != nil { + floor = d.GetDatabaseHeight() // If it is in our db, let's make sure to stop asking + } + + list.State.LogPrintf("dbstatecatchup", "Floor diff %d / %d", list.State.GetHighestSavedBlk(), floor) + + // get the hightest block in the database at boot + b := list.State.GetDBHeightAtBoot() + + // don't request states that are in the database at boot time + if b > floor { + return b + } + return floor + }() + + // get the hight of the known blocks + hk := func() uint32 { + a := list.State.GetHighestAck() + k := list.State.GetHighestKnownBlock() + // check that known is more than 2 ahead of acknowledged to make + // sure not to ask for blocks that haven't finished + if k > a+2 { + return k + } + if a == 0 { + return a + } + return a - 1 // Acks are for height + 1 (sometimes +2 in min 0) + }() + + // The base means anything below we can toss + base := received.Base() + if base < hs { + list.State.LogPrintf("dbstatecatchup", "Received base set to %d", hs) + received.SetBase(hs) + base = hs + } + + receivedSlice := received.ListAsSlice() + + // When we pull the slice, we might be able to trim the receivedSlice for the next loop + sliceKeep := 0 + // TODO: Rewrite to stop redudundent looping over missing/waiting list + // TODO: for each delete. It shouldn't be too bad atm, as most things are in order. + for i, h := range receivedSlice { + list.State.LogPrintf("dbstatecatchup", "missing & waiting delete %d", h) + // remove any states from the missing list that have been saved. + missing.LockAndDelete(h) + // remove any states from the waiting list that have been saved. + waiting.LockAndDelete(h) + // Clean our our received list as well. + if h <= base { + sliceKeep = i + received.LockAndDelete(h) + } + } + + // find gaps in the received list + // we can start at `sliceKeep` because everything below it was removed + for i := sliceKeep; i < len(receivedSlice)-1; i++ { + h := receivedSlice[i] + // if the height of the next received state is not equal to the + // height of the current received state plus one then there is a + // gap in the received state list. + for n := h; n+1 < receivedSlice[i+1]; n++ { + // missing.Notify <- NewMissingState(n + 1) + r := notifyMissing(n + 1) + list.State.LogPrintf("dbstatecatchup", "{gf} notify missing %d [%t]", n, r) + } + } + + // TODO: Better limit the number of asks based on what we already asked for. + // TODO: If we implement that, ensure that we don't drop anything, as this covers any holes + // TODO: that might be made + max := 3000 // Limit the number of new asks we will add for each iteration + // add all known states after the last received to the missing list + for n := received.Heighestreceived() + 1; n <= hk && max > 0; n++ { + max-- + // missing.Notify <- NewMissingState(n) + r := notifyMissing(n) + list.State.LogPrintf("dbstatecatchup", "{hf (%d, %d)} notify missing %d [%t]", hk, max, n, r) + } + + list.State.LogPrintf("dbstatecatchup", "height update took %s. Base:%d/%d/%d, Miss[v%d, ^_, T%d], Wait [v_, ^%d, T%d], Rec[v%d, ^%d, T%d]", + time.Since(start), + received.Base(), hs, list.State.GetDBHeightAtBoot(), + getHeightSafe(missing.GetFront()), missing.Len(), + getHeightSafe(waiting.GetEnd()), waiting.Len(), + received.Base(), received.Heighestreceived(), received.List.Len()) + time.Sleep(5 * factomSecond) + } + }() + + // watch the waiting list and move any requests that have timed out back + // into the missing list. + go func() { + for { + base := received.Base() + waitingSlice := waiting.ListAsSlice() + //for e := waiting.List.Front(); e != nil; e = e.Next() { + for _, s := range waitingSlice { + // Instead of choosing if to ask for it, just remove it + if s.Height() <= base { + waiting.LockAndDelete(s.Height()) + continue + } + if s.RequestAge() > requestTimeout { + waiting.LockAndDelete(s.Height()) + if received.Get(s.Height()) == nil { + list.State.LogPrintf("dbstatecatchup", "request timeout : waiting -> missing %d", s.Height()) + missing.Add(s.Height()) + } + } + } + + time.Sleep(requestTimeout / 4) + } + }() + + // manage received dbstates + go func() { + for { + select { + case m := <-received.Notify: + s := NewReceivedState(m) + if s != nil { + list.State.LogPrintf("dbstatecatchup", "dbstate received : missing & waiting delete, received add %d", s.Height()) + missing.LockAndDelete(s.Height()) + waiting.LockAndDelete(s.Height()) + received.Add(s.Height(), s.Message()) + } + } + } + }() + + // request missing states from the network + go func() { + for { + if waiting.Len() < requestLimit { + // TODO: the batch limit should probably be set by a configuration variable + b, e := missing.NextConsecutiveMissing(10) + list.State.LogPrintf("dbstatecatchup", "dbstate requesting from %d to %d", b, e) + + if b == 0 && e == 0 { + time.Sleep(1 * time.Second) + continue + } + + // make sure the end doesn't come before the beginning + if e < b { + e = b + } + + msg := messages.NewDBStateMissing(list.State, b, e) + msg.SendOut(list.State, msg) + list.State.DBStateAskCnt += 1 // Total number of dbstates requests + for i := b; i <= e; i++ { + list.State.LogPrintf("dbstatecatchup", "\tdbstate requested : missing -> waiting %d", i) + missing.LockAndDelete(i) + waiting.Add(i) + } + } else { + // if the next missing state is a lower height than the last waiting + // state prune the waiting list + m := missing.GetFront() + w := waiting.GetEnd() + if m != nil && w != nil { + if m.Height() < w.Height() { + list.State.LogPrintf("dbstatecatchup", "waiting delete, cleanup %d", w.Height()) + waiting.LockAndDelete(w.Height()) + } + } + + time.Sleep(50 * time.Millisecond) + } + } + }() +} + +// MissingState is information about a DBState that is known to exist but is not +// available on the current node. +type MissingState struct { + height uint32 +} + +// NewMissingState creates a new MissingState for the DBState at a specific +// height. +func NewMissingState(height uint32) *MissingState { + s := new(MissingState) + s.height = height + return s +} + +func (s *MissingState) Height() uint32 { + return s.height +} + +type StatesMissing struct { + List *list.List + // Notify chan *MissingState + lock *sync.Mutex +} + +// NewStatesMissing creates a new list of missing DBStates. +func NewStatesMissing() *StatesMissing { + l := new(StatesMissing) + l.List = list.New() + // l.Notify = make(chan *MissingState) + l.lock = new(sync.Mutex) + return l +} + +// Add adds a new MissingState to the list. +func (l *StatesMissing) Add(height uint32) { + l.lock.Lock() + defer l.lock.Unlock() + + for e := l.List.Back(); e != nil; e = e.Prev() { + s := e.Value.(*MissingState) + if height > s.Height() { + l.List.InsertAfter(NewMissingState(height), e) + return + } else if height == s.Height() { + return + } + } + l.List.PushFront(NewMissingState(height)) +} + +// LockAndDelete removes a MissingState from the list. +func (l *StatesMissing) LockAndDelete(height uint32) { + l.lock.Lock() + defer l.lock.Unlock() + l.DeleteLockless(height) +} + +func (l *StatesMissing) DeleteLockless(height uint32) { + // DeleteLockless does not lock the mutex, if called from another top level func + if l == nil { + return + } + + for e := l.List.Front(); e != nil; e = e.Next() { + if e.Value.(*MissingState).Height() == height { + l.List.Remove(e) + break + } + } +} + +func (l *StatesMissing) Get(height uint32) *MissingState { + // We want to lock here, as something can be deleted/added as we are iterating + // and mess up our for loop + l.lock.Lock() + defer l.lock.Unlock() + + for e := l.List.Front(); e != nil; e = e.Next() { + s := e.Value.(*MissingState) + if s.Height() == height { + return s + } + } + return nil +} + +func (l *StatesMissing) GetFront() *MissingState { + // We want to lock here, as we first check the length, then grab the root. + // the root could be deleted after we checked the len. + l.lock.Lock() + defer l.lock.Unlock() + + e := l.List.Front() + if e != nil { + s := e.Value.(*MissingState) + if s != nil { + return s + } + } + return nil +} + +func (l *StatesMissing) Len() int { + return l.List.Len() +} + +// NextConsecutiveMissing returns the heights of the the next n or fewer +// consecutive missing states +func (l *StatesMissing) NextConsecutiveMissing(n int) (uint32, uint32) { + // We want to lock here, as something can be deleted/added as we are iterating + // and mess up our for loop + l.lock.Lock() + defer l.lock.Unlock() + + f := l.List.Front() + if f == nil { + return 0, 0 + } + beg := f.Value.(*MissingState).Height() + end := beg + c := 0 + for e := f.Next(); e != nil; e = e.Next() { + h := e.Value.(*MissingState).Height() + // We are looking to see if the consecutive height + // sequence is broken. Easy to check if h != the next one + // we are expecting. + if h != end+1 { + break + } + end++ + c++ + // TODO: the batch limit should probably be set as a configuration variable + if c == n { + break + } + } + return beg, end +} + +// GetNext pops the next MissingState from the list. +func (l *StatesMissing) GetNext() *MissingState { + // We want to lock here, as we first check the length, then grab the root. + // the root could be deleted after we checked the len. + l.lock.Lock() + defer l.lock.Unlock() + + e := l.List.Front() + if e != nil { + s := e.Value.(*MissingState) + l.DeleteLockless(s.Height()) + return s + } + return nil +} + +type WaitingState struct { + height uint32 + requestedTime time.Time +} + +func NewWaitingState(height uint32) *WaitingState { + s := new(WaitingState) + s.height = height + s.requestedTime = time.Now() + return s +} + +func (s *WaitingState) Height() uint32 { + return s.height +} + +func (s *WaitingState) RequestAge() time.Duration { + return time.Since(s.requestedTime) +} + +func (s *WaitingState) ResetRequestAge() { + s.requestedTime = time.Now() +} + +type StatesWaiting struct { + List *list.List + // Notify chan *WaitingState + lock *sync.Mutex +} + +func NewStatesWaiting() *StatesWaiting { + l := new(StatesWaiting) + l.List = list.New() + // l.Notify = make(chan *WaitingState) + l.lock = new(sync.Mutex) + return l +} + +func (l *StatesWaiting) ListAsSlice() []*WaitingState { + // Lock as we are iterating + l.lock.Lock() + defer l.lock.Unlock() + + slice := make([]*WaitingState, l.List.Len()) + i := 0 + for e := l.List.Front(); e != nil; e = e.Next() { + slice[i] = e.Value.(*WaitingState) + i++ + } + return slice + +} + +func (l *StatesWaiting) Add(height uint32) { + l.lock.Lock() + defer l.lock.Unlock() + + for e := l.List.Back(); e != nil; e = e.Prev() { + s := e.Value.(*WaitingState) + if s == nil { + n := NewWaitingState(height) + l.List.InsertAfter(n, e) + return + } else if height > s.Height() { + n := NewWaitingState(height) + l.List.InsertAfter(n, e) + return + } else if height == s.Height() { + return + } + } + l.List.PushFront(NewWaitingState(height)) +} + +func (l *StatesWaiting) LockAndDelete(height uint32) { + l.lock.Lock() + defer l.lock.Unlock() + + for e := l.List.Front(); e != nil; e = e.Next() { + s := e.Value.(*WaitingState) + if s.Height() == height { + l.List.Remove(e) + break + } + } +} + +func (l *StatesWaiting) Get(height uint32) *WaitingState { + // We want to lock here, as something can be deleted/added as we are iterating + // and mess up our for loop + l.lock.Lock() + defer l.lock.Unlock() + + for e := l.List.Front(); e != nil; e = e.Next() { + s := e.Value.(*WaitingState) + if s.Height() == height { + return s + } + } + return nil +} + +func (l *StatesWaiting) GetEnd() *WaitingState { + // We want to lock here, as check the length then grab the root. + // The root could be deleted after we checked for the length + l.lock.Lock() + defer l.lock.Unlock() + + e := l.List.Back() + if e != nil { + s := e.Value.(*WaitingState) + if s != nil { + return s + } + } + return nil +} + +func (l *StatesWaiting) Has(height uint32) bool { + // We want to lock here, as something can be deleted/added as we are iterating + // and mess up our for loop + l.lock.Lock() + defer l.lock.Unlock() + + for e := l.List.Front(); e != nil; e = e.Next() { + s := e.Value.(*WaitingState) + if s.Height() == height { + return true + } + } + return false +} + +func (l *StatesWaiting) Len() int { + return l.List.Len() +} + +// ReceivedState represents a DBStateMsg received from the network +type ReceivedState struct { + height uint32 + msg *messages.DBStateMsg +} + +// NewReceivedState creates a new member for the StatesReceived list +func NewReceivedState(msg *messages.DBStateMsg) *ReceivedState { + if msg == nil { + return nil + } + s := new(ReceivedState) + s.height = msg.DirectoryBlock.GetHeader().GetDBHeight() + s.msg = msg + return s +} + +// Height returns the block height of the received state +func (s *ReceivedState) Height() uint32 { + return s.height +} + +// Message returns the DBStateMsg received from the network. +func (s *ReceivedState) Message() *messages.DBStateMsg { + return s.msg +} + +// StatesReceived is the list of DBStates received from the network. "base" +// represents the height of known saved states. +type StatesReceived struct { + List *list.List + Notify chan *messages.DBStateMsg + base uint32 + lock *sync.Mutex +} + +func NewStatesReceived() *StatesReceived { + l := new(StatesReceived) + l.List = list.New() + l.Notify = make(chan *messages.DBStateMsg) + l.lock = new(sync.Mutex) + return l +} + +// Base returns the base height of the StatesReceived list +func (l *StatesReceived) Base() uint32 { + return l.base +} + +func (l *StatesReceived) SetBase(height uint32) { + l.lock.Lock() + defer l.lock.Unlock() + l.SetBaseLockless(height) +} + +func (l *StatesReceived) SetBaseLockless(height uint32) { + l.base = height + + for e := l.List.Front(); e != nil; e = e.Next() { + switch v := e.Value.(*ReceivedState).Height(); { + case v < l.base: + l.List.Remove(e) + case v == l.base: + l.List.Remove(e) + break + case v > l.base: + break + } + } +} + +// Heighestreceived returns the height of the last member in StatesReceived +func (l *StatesReceived) Heighestreceived() uint32 { + // We want to lock here, as we first check the length, then grab the root. + // the root could be deleted after we checked the len. + l.lock.Lock() + defer l.lock.Unlock() + + height := uint32(0) + s := l.List.Back() + if s != nil { + height = s.Value.(*ReceivedState).Height() + } + if l.Base() > height { + return l.Base() + } + return height +} + +// ListAsSlice will return the list as a slice +// to be iterated over in a threadsafe manner. +func (l *StatesReceived) ListAsSlice() []uint32 { + // Lock as we are iterating + l.lock.Lock() + defer l.lock.Unlock() + + slice := make([]uint32, l.List.Len()) + i := 0 + for e := l.List.Front(); e != nil; e = e.Next() { + slice[i] = e.Value.(*ReceivedState).Height() + i++ + } + return slice +} + +// Add adds a new received state to the list. +func (l *StatesReceived) Add(height uint32, msg *messages.DBStateMsg) { + if msg == nil { + return + } + + l.lock.Lock() + defer l.lock.Unlock() + + if height < l.base { + // We already know we had this height + // This should really never happen + return + } + + for e := l.List.Back(); e != nil; e = e.Prev() { + s := e.Value.(*ReceivedState) + if s == nil { + n := NewReceivedState(msg) + l.List.InsertAfter(n, e) + return + } else if height > s.Height() { + n := NewReceivedState(msg) + l.List.InsertAfter(n, e) + return + } else if height == s.Height() { + return + } + } + l.List.PushFront(NewReceivedState(msg)) +} + +// LockAndDelete removes a state from the StatesReceived list +func (l *StatesReceived) LockAndDelete(height uint32) { + l.lock.Lock() + defer l.lock.Unlock() + + for e := l.List.Back(); e != nil; e = e.Prev() { + s := e.Value.(*ReceivedState) + if s == nil { + break + } else if s.Height() == height { + l.List.Remove(e) + break + } + } +} + +// Get returns a member from the StatesReceived list +func (l *StatesReceived) Get(height uint32) *ReceivedState { + // We want to lock here, as something can be deleted/added as we are iterating + // and mess up our for loop + l.lock.Lock() + defer l.lock.Unlock() + + for e := l.List.Back(); e != nil; e = e.Prev() { + s := e.Value.(*ReceivedState) + if height > s.Height() { + + } + if s.Height() == height { + return s + } + } + + return nil +} + +func (l *StatesReceived) Has(height uint32) bool { + // We want to lock here, as something can be deleted/added as we are iterating + // and mess up our for loop + l.lock.Lock() + defer l.lock.Unlock() + + if height <= l.Base() { + return true + } + + for e := l.List.Front(); e != nil; e = e.Next() { + s := e.Value.(*ReceivedState) + if s == nil { + return false + } + if s.Height() == height { + return true + } + } + return false +} + +func (l *StatesReceived) GetNext() *ReceivedState { + l.lock.Lock() + defer l.lock.Unlock() + + if l.List.Len() == 0 { + return nil + } + e := l.List.Front() + if e != nil { + s := e.Value.(*ReceivedState) + + if s == nil { + l.List.Remove(e) + return nil + } + + if s.Height() == l.Base()+1 { + l.SetBaseLockless(s.Height()) + l.List.Remove(e) + return s + } + + if s.Height() <= l.Base() { + l.List.Remove(e) + } + } + return nil +} diff --git a/state/dbStateCatchup_test.go b/state/dbStateCatchup_test.go new file mode 100644 index 0000000000..301b9169e1 --- /dev/null +++ b/state/dbStateCatchup_test.go @@ -0,0 +1,363 @@ +package state_test + +import ( + "testing" + + "time" + + "math/rand" + + "math" + + "container/list" + + "github.com/FactomProject/factomd/common/directoryBlock" + "github.com/FactomProject/factomd/common/messages" + "github.com/FactomProject/factomd/state" +) + +// Made the lists generic so a test can be run on all of them +type GenericList interface { + Len() int + Add(uint32) + LockAndDelete(uint32) + Get(h uint32) state.GenericListItem + Has(uint32) bool + + // For testing + GetUnderyingList() *list.List +} + +var _ GenericList = (*MissingOverrideList)(nil) +var _ GenericList = (*RecievedOverrideList)(nil) +var _ GenericList = (*WaitingOverrideList)(nil) + +type RecievedOverrideList struct { + *state.StatesReceived +} + +func (l *RecievedOverrideList) Add(height uint32) { + msg := new(messages.DBStateMsg) + newdb := new(directoryBlock.DirectoryBlock) + header := new(directoryBlock.DBlockHeader) + header.DBHeight = height + newdb.Header = header + msg.DirectoryBlock = newdb + + l.StatesReceived.Add(height, msg) +} + +func (l *RecievedOverrideList) GetUnderyingList() *list.List { + return l.List +} + +func (l *RecievedOverrideList) Get(h uint32) state.GenericListItem { + if v := l.StatesReceived.Get(h); v != nil { + return v + } + return nil +} + +func (l *RecievedOverrideList) Len() int { + return l.StatesReceived.List.Len() +} + +type MissingOverrideList struct { + *state.StatesMissing +} + +func (l *MissingOverrideList) GetUnderyingList() *list.List { + return l.List +} + +func (l *MissingOverrideList) Get(h uint32) state.GenericListItem { + if v := l.StatesMissing.Get(h); v != nil { + return v + } + return nil +} + +func (l *MissingOverrideList) Has(h uint32) bool { + v := l.StatesMissing.Get(h) + return v != nil +} + +type WaitingOverrideList struct { + *state.StatesWaiting +} + +func (l *WaitingOverrideList) GetUnderyingList() *list.List { + return l.List +} + +func (l *WaitingOverrideList) Get(h uint32) state.GenericListItem { + if v := l.StatesWaiting.Get(h); v != nil { + return v + } + return nil +} + +// Testing concurrent read/write/deletes + +func TestWaitingListThreadSafety(t *testing.T) { + t.Parallel() + + list := state.NewStatesWaiting() + override := new(WaitingOverrideList) + override.StatesWaiting = list + testListThreadSafety(override, t, "TestWaitingListThreadSafety") +} + +func TestRecievedListThreadSafety(t *testing.T) { + t.Parallel() + + list := state.NewStatesReceived() + override := new(RecievedOverrideList) + override.StatesReceived = list + testListThreadSafety(override, t, "TestRecievedListThreadSafety") +} + +func TestMissingListThreadSafety(t *testing.T) { + t.Parallel() + + list := state.NewStatesMissing() + override := new(MissingOverrideList) + override.StatesMissing = list + testListThreadSafety(override, t, "TestMissingListThreadSafety") +} + +// This unit test verifies the dbstate lists are thread safe. +// It launches multiple instances of 4 threads: +// (1) [adds()] A write thread that radomly adds elements to the list +// (2) [dels()] A delete thread to delete those elements, in the order they are added +// but the list should be sorted, so they will be deleted from random points +// (3) [sucessful_reads()] A read thread that checks if the read is successful, as it hold the height from +// being deleted. This fails when we delete something as we iterate from (2) +// (4) [rand_reads()] This checks if the reads panic or not when contending with (2) +func testListThreadSafety(list GenericList, t *testing.T, testname string) { + done := false + + toAdd := make(chan int, 500) + // Run to add, ensure that we don't add repeats + go func() { + defer func() { + if r := recover(); r != nil { + // This thread will panic on r_close. Just a support thread + } + }() + + alreadyAdded := make(map[int]struct{}) + for { + if done { + return + } + + v := rand.Intn(100000000) + if _, ok := alreadyAdded[v]; ok { + continue + } + alreadyAdded[v] = struct{}{} + toAdd <- v + } + }() + + added := make(chan int, 10000) + + // Many random adds + adds := func() { + // make sure not to add the same number twice. That can mess up our DeleteLockless/reads + for { + if done { + return + } + v, open := <-toAdd + if !open { + return + } + + // t.Logf("Added %d", v) + list.Add(uint32(v)) + added <- v + // Should add at a slightly faster rate + time.Sleep(time.Duration(rand.Intn(95)) * time.Microsecond) + } + } + + // Random removes + dels := func() { + for { + if done { + return + } + + n, open := <-added + if !open || n == -1 { + return // Catch closed channel + } + // t.Logf("Deleted %d", n) + list.LockAndDelete(uint32(n)) + time.Sleep(time.Duration(rand.Intn(100)) * time.Microsecond) + } + } + + // Random reads that are guaranteed to succeed as they hold + // the height. + sucessful_reads := func() { + for { + if done { + return + } + + n, open := <-added + if !open || n == -1 { + return // Catch closed channel + } + + v := list.Get(uint32(n)) + if v == nil { + t.Errorf("Expected %d, but did not find it", n) + } + added <- n // Add it back to be deleted + time.Sleep(time.Duration(rand.Intn(100)) * time.Microsecond) + } + } + + // These reads don't hold the add, so they can fail to retrieve if they are deleted first. + // This could get us a panic though, so we are trying to induce that. + rand_reads := func() { + for { + if done { + return + } + + // Make it iterate all the way through + list.Has(uint32(math.MaxUint32)) + time.Sleep(time.Duration(rand.Intn(30)) * time.Microsecond) + } + } + var _ = sucessful_reads + + for i := 0; i < 7; i++ { + go adds() + go dels() + go sucessful_reads() + go rand_reads() + } + + timer := make(chan bool) + go func() { + time.Sleep(4 * time.Second) + timer <- true + }() + + <-timer + close(toAdd) + done = true + // Drain the channel so we don't block on an add + go func() { + for { + select { + case <-added: + default: + return + } + } + }() + // Let the add get the message + time.Sleep(150 * time.Millisecond) + + close(added) + time.Sleep(150 * time.Millisecond) + // Unit test will panic if there is race conditions +} + +func TestWaitingListListAdditions(t *testing.T) { + t.Parallel() + + list := state.NewStatesWaiting() + override := new(WaitingOverrideList) + override.StatesWaiting = list + testDBStateListAdditionsMissing(override, t, "TestWaitingListThreadSafety") +} + +func TestRecievedListListAdditions(t *testing.T) { + t.Parallel() + + list := state.NewStatesReceived() + override := new(RecievedOverrideList) + override.StatesReceived = list + testDBStateListAdditionsMissing(override, t, "TestRecievedListThreadSafety") +} + +func TestMissingListListAdditions(t *testing.T) { + t.Parallel() + + list := state.NewStatesMissing() + override := new(MissingOverrideList) + override.StatesMissing = list + testDBStateListAdditionsMissing(override, t, "TestMissingListThreadSafety") +} + +// Testing list behavior +func testDBStateListAdditionsMissing(list GenericList, t *testing.T, testname string) { + // Check overlapping adds and out of order + for i := 50; i < 100; i++ { + list.Add(uint32(i)) + } + for i := 70; i >= 0; i-- { + list.Add(uint32(i)) + } + + if list.Len() != 100 { + t.Errorf("Exp len of 100, found %d", list.Len()) + } + + // Check out of order retrievals + for i := 0; i < 100; i++ { + r := uint32(rand.Intn(100)) // Random spot check + h := list.Get(r) + if h.Height() != r { + t.Errorf("Random retrival failed. Exp %d, found %d", r, h.Height()) + } + } + + uList := list.GetUnderyingList() + + // Check sorted list + h := uList.Front() + for i := 0; i < 100; i++ { + hI := h.Value.(state.GenericListItem) + if hI.Height() != uint32(i) { + t.Errorf("Exp %d, found %d", i, hI.Height()) + } + h = h.Next() + } + + //if list.Len() != 0 { + // t.Errorf("Exp len of 0, found %d", list.Len()) + //} +} + +func TestMissingConsecutive(t *testing.T) { + testMissingConsecutive(t, []int{10, 11, 12, 13, 20}, 10, 10, 13) + testMissingConsecutive(t, []int{10, 11, 12, 13}, 10, 10, 13) + testMissingConsecutive(t, []int{10, 11, 12, 13, 15, 20}, 10, 10, 13) + testMissingConsecutive(t, []int{10, 11, 12, 13, 15, 20}, 2, 10, 12) + testMissingConsecutive(t, []int{1, 10, 11, 12, 13, 15, 20}, 10, 1, 1) + testMissingConsecutive(t, []int{10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21}, 10, 10, 20) + testMissingConsecutive(t, []int{}, 10, 0, 0) + testMissingConsecutive(t, []int{1, 2}, 10, 1, 2) + +} + +func testMissingConsecutive(t *testing.T, add []int, n, bExp, eExp int) { + m := state.NewStatesMissing() + for _, a := range add { + m.Add(uint32(a)) + } + + b, e := m.NextConsecutiveMissing(n) + if b != uint32(bExp) || e != uint32(eExp) { + t.Errorf("Expected %d-%d, found %d-%d", bExp, eExp, b, e) + } +} diff --git a/state/dbStateManager.go b/state/dbStateManager.go index 500edc8a6b..b6a7158d4c 100644 --- a/state/dbStateManager.go +++ b/state/dbStateManager.go @@ -216,7 +216,6 @@ func (dbs *DBState) MarshalBinary() (rval []byte, err error) { defer func(pe *error) { if *pe != nil { fmt.Fprintf(os.Stderr, "DBState.MarshalBinary err:%v", *pe) - } }(&err) @@ -352,6 +351,7 @@ func (dbs *DBState) UnmarshalBinary(p []byte) error { } type DBStateList struct { + // TODO: mjb: get rid of LastBegin LastEnd and TimeToAsk LastEnd int LastBegin int TimeToAsk interfaces.Timestamp @@ -522,12 +522,12 @@ func (dbsl *DBStateList) UnmarshalBinaryData(p []byte) (newData []byte, err erro return } - listLen, err := buf.PopVarInt() + l, err := buf.PopVarInt() if err != nil { dbsl.State.LogPrintf("dbstateprocess", "DBStateList.UnmarshalBinaryData listLen err: %v", err) return } - for i := 0; i < int(listLen); i++ { + for i := 0; i < int(l); i++ { dbs := new(DBState) err = buf.PopBinaryMarshallable(dbs) if dbs.SaveStruct.IdentityControl == nil { @@ -562,6 +562,7 @@ func (d *DBState) ValidNext(state *State, next *messages.DBStateMsg) int { _ = s dirblk := next.DirectoryBlock dbheight := dirblk.GetHeader().GetDBHeight() + // If we don't have the previous blocks processed yet, then let's wait on this one. highestSavedBlk := state.GetHighestSavedBlk() @@ -955,7 +956,6 @@ func (list *DBStateList) ProcessBlocks(d *DBState) (progress bool) { // its links patched, so we can't process it. But if this is a repeat block (we have already processed // at this height) then we simply return. if d.Locked || d.IsNew || d.Repeat { - s.LogPrintf("dbstateprocess", "ProcessBlocks(%d) Skipping d.Locked(%v) || d.IsNew(%v) || d.Repeat(%v) : ", dbht, d.Locked, d.IsNew, d.Repeat) return false } @@ -985,6 +985,7 @@ func (list *DBStateList) ProcessBlocks(d *DBState) (progress bool) { if pd == nil { s.LogPrintf("dbstateprocess", "ProcessBlocks(%d) Skipping Prev Block Missing", dbht) + s.LogPrintf("dbstateprocess", "list: %v", list.State.DBStates.String()) return false // Can't process out of order } if !pd.Saved { @@ -1212,9 +1213,9 @@ func (list *DBStateList) ProcessBlocks(d *DBState) (progress bool) { // if we are following by blocks then this move us forward but if we are following by minutes the // code in ProcessEOM for minute 10 will have moved us forward s.SetLeaderTimestamp(d.DirectoryBlock.GetTimestamp()) - s.MoveStateToHeight(dbht+1, 0) // todo: is there a reason not to do this in MoveStateToHeight? fs.(*FactoidState).DBHeight = dbht + 1 + s.MoveStateToHeight(dbht+1, 0) } // Note about dbsigs.... If we processed the previous minute, then we generate the DBSig for the next block. @@ -1427,9 +1428,12 @@ func (list *DBStateList) SaveDBStateToDB(d *DBState) (progress bool) { switch en.ECID() { case constants.ECIDChainCommit: list.State.NumNewChains++ + list.State.ExecuteFromHolding(en.GetEntryHash().Fixed()) case constants.ECIDEntryCommit: list.State.NumNewEntries++ + list.State.ExecuteFromHolding(en.GetEntryHash().Fixed()) } + } pl := list.State.ProcessLists.Get(uint32(dbheight)) @@ -1472,6 +1476,10 @@ func (list *DBStateList) SaveDBStateToDB(d *DBState) (progress bool) { } else { list.State.LogPrintf("dbstateprocess", "Error saving eblock from dbstate, eblock not allowed") } + // if this is chain head + if eb.GetHeader().GetEBSequence() == 0 { + list.State.ExecuteFromHolding(eb.GetHeader().GetChainID().Fixed()) + } } for _, e := range d.Entries { // If it's in the DBlock @@ -1501,7 +1509,6 @@ func (list *DBStateList) SaveDBStateToDB(d *DBState) (progress bool) { if err := list.State.DB.ProcessDBlockMultiBatch(d.DirectoryBlock); err != nil { panic(err.Error()) } - if err := list.State.DB.ExecuteMultiBatch(); err != nil { panic(err.Error()) } @@ -1513,11 +1520,7 @@ func (list *DBStateList) SaveDBStateToDB(d *DBState) (progress bool) { if err != nil { panic(err) } - if _, ok := allowedEBlocks[keymr.Fixed()]; ok { - for _, e := range eb.GetBody().GetEBEntries() { - pl.State.WriteEntry <- pl.GetNewEntry(e.Fixed()) - } - } else { + if _, ok := allowedEBlocks[keymr.Fixed()]; !ok { list.State.LogPrintf("dbstateprocess", "Error saving eblock from process list, eblock not allowed") } } @@ -1598,7 +1601,6 @@ func (list *DBStateList) SaveDBStateToDB(d *DBState) (progress bool) { } func (list *DBStateList) UpdateState() (progress bool) { - s := list.State _ = s if len(list.DBStates) != 0 { diff --git a/state/dbstateCatchup.go b/state/dbstateCatchup.go deleted file mode 100644 index 18a4607467..0000000000 --- a/state/dbstateCatchup.go +++ /dev/null @@ -1,130 +0,0 @@ -package state - -import ( - "github.com/FactomProject/factomd/common/messages" -) - -// Once a second at most, we check to see if we need to pull down some blocks to catch up. -func (list *DBStateList) Catchup(justDoIt bool) { - // We only check if we need updates once every so often. - - if !list.State.DBFinished { - return // don't ask for dbstates while we are loading from the database - } - - now := list.State.GetTimestamp() - - top := int(list.State.DBHeightAtBoot) - hs := int(list.State.GetHighestSavedBlk()) - heightAtBoot := int(list.State.GetDBHeightAtBoot()) - if hs < top { - hs = top // never ask for states we already have - } - ha := int(list.State.GetHighestAck()) - hk := int(list.State.GetHighestKnownBlock()) - - if hk > ha+2 { - ha = hk + 2 - } - - if hs < heightAtBoot { - hs = heightAtBoot // don't ask for blocks we have - } - if hk < heightAtBoot { - hk = heightAtBoot // don't ask for block we have - } - - begin := hs + 1 - end := ha - - ask := func() { - - tolerance := 1 - if list.State.Leader { - tolerance = 2 - } - - if list.TimeToAsk != nil && ha-hs > tolerance && now.GetTime().After(list.TimeToAsk.GetTime()) { - - // Find the first dbstate we don't have. - for i, v := range list.State.DBStatesReceived { - ix := i + list.State.DBStatesReceivedBase - if ix <= hs { - continue - } - if ix >= ha { - return - } - if v == nil { - begin = ix - break - } - } - - for len(list.State.DBStatesReceived)+list.State.DBStatesReceivedBase <= ha { - list.State.DBStatesReceived = append(list.State.DBStatesReceived, nil) - } - - // Find the end of the dbstates that we don't have. - for i, v := range list.State.DBStatesReceived { - ix := i + list.State.DBStatesReceivedBase - - if ix <= begin { - continue - } - if ix >= end { - break - } - if v != nil { - end = ix - 1 - break - } - } - - if list.State.RunLeader && !list.State.IgnoreMissing { - msg := messages.NewDBStateMissing(list.State, uint32(begin), uint32(end+5)) - - if msg != nil { - // list.State.RunLeader = false - // list.State.StartDelay = list.State.GetTimestamp().GetTimeMilli() - msg.SendOut(list.State, msg) - list.State.DBStateAskCnt++ - list.TimeToAsk.SetTimeSeconds(now.GetTimeSeconds() + 6) - list.LastBegin = begin - list.LastEnd = end - } - } - } - } - - if end-begin > 200 { - end = begin + 200 - } - - if end+3 > begin && justDoIt { - ask() - return - } - - // return if we are caught up, and clear our timer - if end-begin < 1 { - list.TimeToAsk = nil - return - } - - // First Ask. Because the timer is nil! - if list.TimeToAsk == nil { - // Okay, have nothing in play, so wait a bit just in case. - list.TimeToAsk = list.State.GetTimestamp() - list.TimeToAsk.SetTimeSeconds(now.GetTimeSeconds() + 6) - list.LastBegin = begin - list.LastEnd = end - return - } - - if list.TimeToAsk.GetTime().Before(now.GetTime()) { - ask() - return - } - -} diff --git a/state/factoidstate.go b/state/factoidstate.go index 5c478c9a9c..4d82334227 100644 --- a/state/factoidstate.go +++ b/state/factoidstate.go @@ -227,7 +227,7 @@ func (fs *FactoidState) ValidateTransactionAge(trans interfaces.ITransaction) er // Only add valid transactions to the current func (fs *FactoidState) AddTransaction(index int, trans interfaces.ITransaction) error { - if err := fs.Validate(index, trans); err != nil { + if err, _ := fs.Validate(index, trans); err != nil { return err } if err := fs.ValidateTransactionAge(trans); err != nil { @@ -268,7 +268,7 @@ func (fs *FactoidState) UpdateECTransaction(rt bool, trans interfaces.IECBlockEn fs.State.GetE(rt, t.ECPubKey.Fixed()), t.Credits) } - fs.State.PutE(rt, t.ECPubKey.Fixed(), v) + fs.State.PutE(rt, t.ECPubKey.Fixed(), v) // deduct Chain Commit fs.State.NumTransactions++ fs.State.Replay.IsTSValid(constants.INTERNAL_REPLAY, t.GetSigHash(), t.GetTimestamp()) fs.State.Replay.IsTSValid(constants.NETWORK_REPLAY, t.GetSigHash(), t.GetTimestamp()) @@ -282,10 +282,11 @@ func (fs *FactoidState) UpdateECTransaction(rt bool, trans interfaces.IECBlockEn fs.State.GetE(rt, t.ECPubKey.Fixed()), t.Credits) } - fs.State.PutE(rt, t.ECPubKey.Fixed(), v) + fs.State.PutE(rt, t.ECPubKey.Fixed(), v) // deduct EntryCommit fs.State.NumTransactions++ fs.State.Replay.IsTSValid(constants.INTERNAL_REPLAY, t.GetSigHash(), t.GetTimestamp()) fs.State.Replay.IsTSValid(constants.NETWORK_REPLAY, t.GetSigHash(), t.GetTimestamp()) + default: return fmt.Errorf("Unknown EC Transaction") } @@ -324,14 +325,23 @@ func (fs *FactoidState) UpdateTransaction(rt bool, trans interfaces.ITransaction for _, output := range trans.GetOutputs() { adr := output.GetAddress().Fixed() oldv := fs.State.GetF(rt, adr) + + // fs.State.LogPrintf("newHolding", "process FCT Deposit %x %s", adr, trans.String()) + fs.State.ExecuteFromHolding(adr) // Process deposit of FCT + fs.State.PutF(rt, adr, oldv+int64(output.GetAmount())) } if len(trans.GetECOutputs()) > 0 { - fs.State.LogPrintf("entrycredits", "At %d process %s", fs.DBHeight, trans.String()) + // fs.State.LogPrintf("entrycredits", "At %d process %s", fs.DBHeight, trans.String()) } for _, ecOut := range trans.GetECOutputs() { ecbal := int64(ecOut.GetAmount()) / int64(fs.State.FactoshisPerEC) - fs.State.PutE(rt, ecOut.GetAddress().Fixed(), fs.State.GetE(rt, ecOut.GetAddress().Fixed())+ecbal) + adr := ecOut.GetAddress().Fixed() + fs.State.PutE(rt, adr, fs.State.GetE(rt, adr)+ecbal) // Add EC's from FCT + + // execute any messages that were waiting on this EC address + // fs.State.LogPrintf("newHolding", "process EC Deposit %x %s", adr, trans.String()) + fs.State.ExecuteFromHolding(adr) // Process deposit of EC } fs.State.NumTransactions++ return nil @@ -376,24 +386,26 @@ func (fs *FactoidState) ProcessEndOfBlock(state interfaces.IState) { // Returns an error message about what is wrong with the transaction if it is // invalid, otherwise you are good to go. -func (fs *FactoidState) Validate(index int, trans interfaces.ITransaction) error { +func (fs *FactoidState) Validate(index int, trans interfaces.ITransaction) (err error, holdAddr [32]byte) { var sums = make(map[[32]byte]uint64, 10) // Look at the sum of an address's inputs for _, input := range trans.GetInputs() { // to a transaction. bal, err := factoid.ValidateAmounts(sums[input.GetAddress().Fixed()], input.GetAmount()) if err != nil { - return err + return err, holdAddr } curbal := fs.State.GetF(true, input.GetAddress().Fixed()) if int64(bal) > curbal { - return fmt.Errorf("%20s DBHT %d %s %d %s %d %s", + err = fmt.Errorf("%20s DBHT %d %s %d %s %d %s", fs.State.GetFactomNodeName(), fs.DBHeight, "Not enough funds in input addresses (", bal, ") to cover the transaction (", curbal, ")") + + return err, input.GetAddress().Fixed() } sums[input.GetAddress().Fixed()] = bal } - return nil + return nil, holdAddr } func (fs *FactoidState) GetCoinbaseTransaction(dbheight uint32, ftime interfaces.Timestamp) interfaces.ITransaction { diff --git a/state/grants.go b/state/grants.go index 6b4fe079f9..9ca86e4a22 100644 --- a/state/grants.go +++ b/state/grants.go @@ -485,7 +485,7 @@ func GetHardCodedGrants() []HardGrant { // The 42nd Factoid total: 1800 FCT HardGrant{194126, 1800e8, validateAddress("FA3AEL2H9XZy3n199USs2poCEJBkK1Egy6JXhLehfLJjUYMKh1zS")}, - + // Centis BV total: 1800 FCT HardGrant{194126, 1800e8, validateAddress("FA2hvRaci9Kks9cLNkEUFcxzUJuUFaaAE1eWYLqa2qk1k9pVFVBp")}, @@ -495,18 +495,18 @@ func GetHardCodedGrants() []HardGrant { // DBGrow Inc total: 1800 FCT HardGrant{194126, 1800e8, validateAddress("FA3HSuFo9Soa5ZnG82JHqyKiRi4Pw17LxPTo9AsCaFNLCGkXkgsu")}, - // Canonical Ledgers: 600 FCT + // Canonical Ledgers: 600 FCT // (2019-03-07 - 2019-04-07) HardGrant{194126, 600e8, validateAddress("FA2PEXgRiPd14NzUP47XfVTgEnvjtLSebBZvnM8gM7cJAMuqWs89")}, - // TRGG3R LLC: 1200 FCT + // TRGG3R LLC: 1200 FCT // (2019-04-07 - 2019-06-07) HardGrant{194126, 1200e8, validateAddress("FA2oecgJW3XWnXzHhQQoULmMeKC97uAgHcPd4kEowTb3csVkbDc9")}, // -------------------------------------------------------- - + // Anchor and Oracle master grants - // Factom-Inc-013 -- 900 FCT + // Factom-Inc-013 -- 900 FCT // Oracle Master -- (2019-06-09 - 2019-09-09) HardGrant{194126, 900e8, validateAddress("FA3fpiZ91MCRRFjVGfNXK4pg7vx3BT3aSRyoVqgptZCX7N5BNR8P")}, @@ -517,8 +517,8 @@ func GetHardCodedGrants() []HardGrant { // Committee Grants - // The Core Committee has via grant Core-Committee-002 been awarded an additional 500 FCT grant - // to be paid out in installments upon request from the committee. The current total amount set + // The Core Committee has via grant Core-Committee-002 been awarded an additional 500 FCT grant + // to be paid out in installments upon request from the committee. The current total amount set // aside for the Core Committee is 1000 FCT. // -------------------------------------------------------- @@ -594,11 +594,11 @@ func GetHardCodedGrants() []HardGrant { HardGrant{194126, 1876e8, validateAddress("FA2T1tgVwrHDVpMqHRRz5676x4CHkZqXGGp1CmBarYg5ZWcU85g4")}, // TFA-001 -- 4860 - // FAT Firmware upgrade for Ledger Nano S and X + // FAT Firmware upgrade for Ledger Nano S and X HardGrant{194126, 4860e8, validateAddress("FA3sUHyYThjwJSSnun5jx91obexMJibaUCeGFoZ9S1SBzcY1xPCP")}, // -------------------------------------------------------- - // Miscellaneous Grants + // Miscellaneous Grants // Go-Immutable-001 -- 20000 FCT // Comprehensive Market Strategy & Execution diff --git a/state/identity.go b/state/identity.go index 60a6b0dfb8..a9b00e4a0b 100644 --- a/state/identity.go +++ b/state/identity.go @@ -184,6 +184,7 @@ func ProcessIdentityToAdminBlock(st *State, chainID interfaces.IHash, servertype // Add to admin block if servertype == 0 { id.Status = constants.IDENTITY_PENDING_FEDERATED_SERVER + st.LogPrintf("executeMsg", "Add server 2 %x", chainID.Bytes()[3:6]) st.LeaderPL.AdminBlock.AddFedServer(chainID) } else if servertype == 1 { id.Status = constants.IDENTITY_PENDING_AUDIT_SERVER @@ -201,6 +202,7 @@ func ProcessIdentityToAdminBlock(st *State, chainID interfaces.IHash, servertype } st.LeaderPL.AdminBlock.AddEfficiency(chainID, id.Efficiency) + st.LogPrintf("executeMsg", "Added server %x", chainID.Bytes()[3:6]) return true } diff --git a/state/loadDatabase.go b/state/loadDatabase.go index a1090a3edb..9eca59c4de 100644 --- a/state/loadDatabase.go +++ b/state/loadDatabase.go @@ -43,6 +43,7 @@ func LoadDatabase(s *State) { } // prevent MMR processing from happening for blocks being loaded from the database s.DBHeightAtBoot = blkCnt + fmt.Fprintf(os.Stderr, "%20s Loading blocks from disk. Database load going from %d (savestate) to %d (disk)\n", s.GetFactomNodeName(), s.GetDBHeightComplete(), s.DBHeightAtBoot) first := time.Now() last := first @@ -50,6 +51,8 @@ func LoadDatabase(s *State) { //msg, err := s.LoadDBState(blkCnt) start := s.GetDBHeightComplete() + s.LogPrintf("dbstatecatchup", "LoadDatabase1 DBHeightAtBoot: %d, DBHeightComplete: %d", s.DBHeightAtBoot, start) + numberOfBlocksLoaded := 0 // The number of blocks we load off disk if start > 0 { start++ @@ -74,6 +77,11 @@ func LoadDatabase(s *State) { } msg, err := s.LoadDBState(uint32(i)) + es := "loaded" + if err != nil { + es = err.Error() + } + s.LogMessage("dbstatecatchup", fmt.Sprintf("LoadDatabase1 %d : %s", i, es), msg) if err != nil { s.Println(err.Error()) os.Stderr.WriteString(fmt.Sprintf("%20s Error reading database at block %d: %s\n", s.FactomNodeName, i, err.Error())) @@ -88,14 +96,10 @@ func LoadDatabase(s *State) { // this will cause s.DBFinished to go true } + numberOfBlocksLoaded++ s.LogMessage("InMsgQueue", "enqueue_LoadDatabase1", msg) msg.SetLocal(true) - s.InMsgQueue().Enqueue(msg) - if s.InMsgQueue().Length() > 200 || len(s.DBStatesReceived) > 50 { - for s.InMsgQueue().Length() > 50 || len(s.DBStatesReceived) > 50 { - time.Sleep(100 * time.Millisecond) - } - } + s.MsgQueue() <- msg } else { // os.Stderr.WriteString(fmt.Sprintf("%20s Last Block in database: %d\n", s.FactomNodeName, i)) break @@ -104,7 +108,7 @@ func LoadDatabase(s *State) { s.Print("\r", "\\|/-"[i%4:i%4+1]) } - if blkCnt == 0 { + if numberOfBlocksLoaded == 0 { // No blocks loaded from disk, therefore generate the genesis s.Println("\n***********************************") s.Println("******* New Database **************") s.Println("***********************************\n") diff --git a/state/plugins.go b/state/plugins.go index 3d0646e36b..a21e443559 100644 --- a/state/plugins.go +++ b/state/plugins.go @@ -67,13 +67,12 @@ func (u *UploadController) Close() { // sortRequests sorts through the initial requests to toss out repeats func (u *UploadController) sortRequests() { for { - backToTopSortRequests: select { // Avoid defering the lock, more overhead case s := <-u.requestUploadQueue: if _, ok := u.uploaded[s]; ok { // Already uploaded, toss out - goto backToTopSortRequests + continue } u.uploaded[s] = struct{}{} diff --git a/state/processList.go b/state/processList.go index 38d4b0f6ba..1f22c8133a 100644 --- a/state/processList.go +++ b/state/processList.go @@ -911,6 +911,7 @@ func (p *ProcessList) Process(s *State) (progress bool) { delete(s.Acks, msgHashFixed) //delete(s.Holding, msgHashFixed) + // REVIEW: does this leave msg in dependent holding? s.DeleteFromHolding(msgHashFixed, msg, "msg.Process done") } else { s.LogMessage("process", fmt.Sprintf("retry %v/%v/%v", p.DBHeight, i, j), msg) @@ -1062,17 +1063,9 @@ func (p *ProcessList) AddToProcessList(s *State, ack *messages.Ack, m interfaces s.LogPrintf("executeMsg", "m/ack mismatch m-%x a-%x", m.GetMsgHash().Fixed(), ack.GetHash().Fixed()) } - // Both the ack and the message hash to the same GetHash() - m.SetLocal(false) - ack.SetLocal(false) - ack.SetPeer2Peer(false) - m.SetPeer2Peer(false) - if ack.GetHash().Fixed() != m.GetMsgHash().Fixed() { s.LogPrintf("executeMsg", "m/ack mismatch m-%x a-%x", m.GetMsgHash().Fixed(), ack.GetHash().Fixed()) } - m.SendOut(s, m) - ack.SendOut(s, ack) for len(vm.List) <= int(ack.Height) { vm.List = append(vm.List, nil) @@ -1094,11 +1087,22 @@ func (p *ProcessList) AddToProcessList(s *State, ack *messages.Ack, m interfaces } s.LogMessage("processList", fmt.Sprintf("Added at %d/%d/%d by %s", ack.DBHeight, ack.VMIndex, ack.Height, atomic.WhereAmIString(1)), m) + + // If we add the message to the process list, ensure we actually process that + // message, so the next msg will be able to added without going into holding. if ack.IsLocal() { for p.Process(s) { } } + // Both the ack and the message hash to the same GetHash() + ack.SetLocal(false) + ack.SetPeer2Peer(false) + m.SetPeer2Peer(false) + m.SetLocal(false) + + m.SendOut(s, m) + ack.SendOut(s, ack) } func (p *ProcessList) ContainsDBSig(serverID interfaces.IHash) bool { diff --git a/state/rateCalculator_test.go b/state/rateCalculator_test.go index 912e0762dc..8ce07c2455 100644 --- a/state/rateCalculator_test.go +++ b/state/rateCalculator_test.go @@ -58,7 +58,7 @@ func (k *Exposer) String() string { return fmt.Sprintf("%f, %f, %f, %f, %f", k.AWA, k.ATA, k.ABU, k.CWA, k.CTA) } -func close(a, b, tolerance float64) bool { +func r_close(a, b, tolerance float64) bool { diff := a - b if diff < 0 { diff = diff * -1 @@ -70,25 +70,25 @@ func close(a, b, tolerance float64) bool { } func shouldbe(awa, abu, cwa float64, e *Exposer) error { - if !close(awa, e.AWA, 0.2) { + if !r_close(awa, e.AWA, 0.2) { return fmt.Errorf("AWA is %f, should be %f", awa, e.AWA) } // By Speeding up the tick time, these numbers are usually off - //if !close(ata, e.ATA, 15) { + //if !r_close(ata, e.ATA, 15) { //return fmt.Errorf("ATA is %f, should be %f", ata, e.ATA) //} - if !close(abu, e.ABU, 0.1) { + if !r_close(abu, e.ABU, 0.1) { return fmt.Errorf("ABU is %f, should be %f", abu, e.ABU) } - if !close(cwa, e.CWA, 0.2) { + if !r_close(cwa, e.CWA, 0.2) { return fmt.Errorf("CWA is %f, should be %f", cwa, e.CWA) } // By Speeding up the tick time, these numbers are usually off - //if !close(cta, e.CTA, 15) { + //if !r_close(cta, e.CTA, 15) { //return fmt.Errorf("CTA is %f, should be %f", cta, e.CTA) //} diff --git a/state/saveAndRestore.go b/state/saveAndRestore.go index 676b093fe7..9c6c2bf1d6 100644 --- a/state/saveAndRestore.go +++ b/state/saveAndRestore.go @@ -732,7 +732,7 @@ func (ss *SaveState) MarshalBinary() (rval []byte, err error) { } defer func(pe *error) { if *pe != nil { - fmt.Fprintf(os.Stderr, "SaveState.MarshalBinary err:%v", *pe) + fmt.Fprintf(os.Stderr, "SaveState.MarshalBinary err:%v\n", *pe) } }(&err) buf := primitives.NewBuffer(nil) diff --git a/state/state.go b/state/state.go index 4888c87c01..0e39f966c3 100644 --- a/state/state.go +++ b/state/state.go @@ -19,6 +19,8 @@ import ( "sync" "time" + "github.com/FactomProject/factomd/common/constants/runstate" + "github.com/FactomProject/factomd/activations" "github.com/FactomProject/factomd/common/adminBlock" "github.com/FactomProject/factomd/common/constants" @@ -37,6 +39,8 @@ import ( "github.com/FactomProject/factomd/wsapi" "github.com/FactomProject/logrustash" + "regexp" + "github.com/FactomProject/factomd/Utilities/CorrectChainHeads/correctChainHeads" log "github.com/sirupsen/logrus" ) @@ -49,7 +53,7 @@ var _ = fmt.Print type State struct { Logger *log.Entry - IsRunning bool + RunState runstate.RunState NetworkController *p2p.Controller Salt interfaces.IHash Cfg interfaces.IFactomConfig @@ -209,6 +213,10 @@ type State struct { IgnoreDone bool IgnoreMissing bool + // Timout and Limit for outstanding missing DBState requests + RequestTimeout time.Duration + RequestLimit int + LLeaderHeight uint32 Leader bool LeaderVMIndex int @@ -293,6 +301,10 @@ type State struct { // Directory Block State DBStates *DBStateList // Holds all DBStates not yet processed. + StatesMissing *StatesMissing + StatesWaiting *StatesWaiting + StatesReceived *StatesReceived + // Having all the state for a particular directory block stored in one structure // makes creating the next state, updating the various states, and setting up the next // state much more simple. @@ -412,10 +424,14 @@ type State struct { processCnt int64 // count of attempts to process .. so we can see if the thread is running MMRInfo // fields for MMR processing - reportedActivations [activations.ACTIVATION_TYPE_COUNT + 1]bool // flags about which activations we have reported (+1 because we don't use 0) - validatorLoopThreadID string - + reportedActivations [activations.ACTIVATION_TYPE_COUNT + 1]bool // flags about which activations we have reported (+1 because we don't use 0) + validatorLoopThreadID string + OutputRegEx *regexp.Regexp + OutputRegExString string + InputRegEx *regexp.Regexp + InputRegExString string executeRecursionDetection map[[32]byte]interfaces.IMsg + Hold HoldingList } var _ interfaces.IState = (*State)(nil) @@ -429,8 +445,8 @@ func (s *State) GetConfigPath() string { return s.ConfigFilePath } -func (s *State) Running() bool { - return s.IsRunning +func (s *State) GetRunState() runstate.RunState { + return s.RunState } func (s *State) Clone(cloneNumber int) interfaces.IState { @@ -464,6 +480,7 @@ func (s *State) Clone(cloneNumber int) interfaces.IState { newState.FactomNodeName = s.Prefix + "FNode" + number newState.FactomdVersion = s.FactomdVersion + newState.RunState = runstate.New // reset runstate since this clone will be started by sim node newState.DropRate = s.DropRate newState.LdbPath = s.LdbPath + "/Sim" + number newState.JournalFile = s.LogPath + "/journal" + number + ".log" @@ -538,6 +555,8 @@ func (s *State) Clone(cloneNumber int) interfaces.IState { newState.RpcPass = s.RpcPass newState.RpcAuthHash = s.RpcAuthHash + newState.RequestTimeout = s.RequestTimeout + newState.RequestLimit = s.RequestLimit newState.FactomdTLSEnable = s.FactomdTLSEnable newState.factomdTLSKeyFile = s.factomdTLSKeyFile newState.factomdTLSCertFile = s.factomdTLSCertFile @@ -777,6 +796,12 @@ func (s *State) LoadConfig(filename string, networkFlag string) { s.ControlPanelPort = cfg.App.ControlPanelPort s.RpcUser = cfg.App.FactomdRpcUser s.RpcPass = cfg.App.FactomdRpcPass + // if RequestTimeout is not set by the configuration it will default to 0. + // If it is 0, the loop that uses it will set it to the blocktime/20 + // We set it there, as blktime might change after this function (from mainnet selection) + s.RequestTimeout = time.Duration(cfg.App.RequestTimeout) * time.Second + s.RequestLimit = cfg.App.RequestLimit + s.StateSaverStruct.FastBoot = cfg.App.FastBoot s.StateSaverStruct.FastBootLocation = cfg.App.FastBootLocation s.FastBoot = cfg.App.FastBoot @@ -830,6 +855,13 @@ func (s *State) LoadConfig(filename string, networkFlag string) { s.IdentityChainID = identity s.LogPrintf("AckChange", "Load IdentityChainID \"%v\"", s.IdentityChainID.String()) } + + if cfg.App.P2PIncoming > 0 { + p2p.MaxNumberIncomingConnections = cfg.App.P2PIncoming + } + if cfg.App.P2POutgoing > 0 { + p2p.NumberPeersToConnect = cfg.App.P2POutgoing + } } else { s.LogPath = "database/" s.LdbPath = "database/ldb" @@ -922,6 +954,7 @@ func (s *State) Init() { //s.Logger = log.NewLogFromConfig(s.LogPath, s.LogLevel, "State") } + s.Hold.Init(s) // setup the dependant holding map s.TimeOffset = new(primitives.Timestamp) //interfaces.Timestamp(int64(rand.Int63() % int64(time.Microsecond*10))) s.InvalidMessages = make(map[[32]byte]interfaces.IMsg, 0) @@ -930,17 +963,17 @@ func (s *State) Init() { s.tickerQueue = make(chan int, 100) //ticks from a clock s.timerMsgQueue = make(chan interfaces.IMsg, 100) //incoming eom notifications, used by leaders s.ControlPanelChannel = make(chan DisplayState, 20) - s.networkInvalidMsgQueue = make(chan interfaces.IMsg, 100) //incoming message queue from the network messages - s.networkOutMsgQueue = NewNetOutMsgQueue(constants.INMSGQUEUE_MED) //Messages to be broadcast to the network - s.inMsgQueue = NewInMsgQueue(constants.INMSGQUEUE_HIGH) //incoming message queue for Factom application messages - s.inMsgQueue2 = NewInMsgQueue(constants.INMSGQUEUE_HIGH) //incoming message queue for Factom application messages - s.electionsQueue = NewElectionQueue(constants.INMSGQUEUE_HIGH) //incoming message queue for Factom application messages - s.apiQueue = NewAPIQueue(constants.INMSGQUEUE_HIGH) //incoming message queue from the API - s.ackQueue = make(chan interfaces.IMsg, 50) //queue of Leadership messages - s.msgQueue = make(chan interfaces.IMsg, 50) //queue of Follower messages - s.MissingEntries = make(chan *MissingEntry, constants.INMSGQUEUE_HIGH) //Entries I discover are missing from the database - s.UpdateEntryHash = make(chan *EntryUpdate, constants.INMSGQUEUE_HIGH) //Handles entry hashes and updating Commit maps. - s.WriteEntry = make(chan interfaces.IEBEntry, constants.INMSGQUEUE_HIGH) //Entries to be written to the database + s.networkInvalidMsgQueue = make(chan interfaces.IMsg, 100) //incoming message queue from the network messages + s.networkOutMsgQueue = NewNetOutMsgQueue(constants.INMSGQUEUE_MED) //Messages to be broadcast to the network + s.inMsgQueue = NewInMsgQueue(constants.INMSGQUEUE_HIGH) //incoming message queue for Factom application messages + s.inMsgQueue2 = NewInMsgQueue(constants.INMSGQUEUE_HIGH) //incoming message queue for Factom application messages + s.electionsQueue = NewElectionQueue(constants.INMSGQUEUE_HIGH) //incoming message queue for Factom application messages + s.apiQueue = NewAPIQueue(constants.INMSGQUEUE_HIGH) //incoming message queue from the API + s.ackQueue = make(chan interfaces.IMsg, 50) //queue of Leadership messages + s.msgQueue = make(chan interfaces.IMsg, 50) //queue of Follower messages + s.MissingEntries = make(chan *MissingEntry, constants.INMSGQUEUE_HIGH) //Entries I discover are missing from the database + s.UpdateEntryHash = make(chan *EntryUpdate, constants.INMSGQUEUE_HIGH) //Handles entry hashes and updating Commit maps. + s.WriteEntry = make(chan interfaces.IEBEntry, constants.INMSGQUEUE_LOW) //Entries to be written to the database if s.Journaling { f, err := os.Create(s.JournalFile) @@ -982,6 +1015,10 @@ func (s *State) Init() { s.DBStates.State = s s.DBStates.DBStates = make([]*DBState, 0) + s.StatesMissing = NewStatesMissing() + s.StatesWaiting = NewStatesWaiting() + s.StatesReceived = NewStatesReceived() + switch s.NodeMode { case "FULL": s.Leader = false @@ -1071,6 +1108,9 @@ func (s *State) Init() { s.Println("\nExchange rate chain id set to ", s.FERChainId) s.Println("\nExchange rate Authority Public Key set to ", s.ExchangeRateAuthorityPublicKey) + // We want this run after the network settings are configured + go s.DBStates.Catchup() // Launch in go routine as it blocks until we are synced from disk + s.AuditHeartBeats = make([]interfaces.IMsg, 0) // If we cloned the Identity control of another node, don't reset! @@ -1924,8 +1964,6 @@ func (s *State) UpdateState() (progress bool) { } } - s.DBStates.Catchup(false) - s.SetString() if s.ControlPanelDataRequest { s.CopyStateToControlPanel() @@ -2948,3 +2986,31 @@ func (s *State) IsActive(id activations.ActivationType) bool { return rval } + +func (s *State) PassOutputRegEx(RegEx *regexp.Regexp, RegExString string) { + s.OutputRegEx = RegEx + s.OutputRegExString = RegExString +} + +func (s *State) GetOutputRegEx() (*regexp.Regexp, string) { + return s.OutputRegEx, s.OutputRegExString +} + +func (s *State) PassInputRegEx(RegEx *regexp.Regexp, RegExString string) { + s.InputRegEx = RegEx + s.InputRegExString = RegExString +} + +func (s *State) GetInputRegEx() (*regexp.Regexp, string) { + return s.InputRegEx, s.InputRegExString +} + +func (s *State) GetIgnoreDone() bool { + return s.IgnoreDone +} + +func (s *State) ShutdownNode(exitCode int) { + fmt.Println(fmt.Sprintf("Initiating a graceful shutdown of node %s. The exit code is %v.", s.FactomNodeName, exitCode)) + s.RunState = runstate.Stopping + s.ShutdownChan <- exitCode +} diff --git a/state/stateConsensus.go b/state/stateConsensus.go index 9b7b1acef6..49de608aae 100644 --- a/state/stateConsensus.go +++ b/state/stateConsensus.go @@ -67,6 +67,9 @@ func (s *State) LogPrintf(logName string, format string, more ...interface{}) { } } func (s *State) AddToHolding(hash [32]byte, msg interfaces.IMsg) { + if msg.Type() == constants.VOLUNTEERAUDIT { + s.LogMessage("holding election?", "add", msg) + } _, ok := s.Holding[hash] if !ok { s.Holding[hash] = msg @@ -84,6 +87,16 @@ func (s *State) DeleteFromHolding(hash [32]byte, msg interfaces.IMsg, reason str } } +var FilterTimeLimit = int64(Range * 60 * 2 * 1000000000) // Filter hold two hours of messages, one in the past one in the future + +func (s *State) GetFilterTimeNano() int64 { + t := s.GetMessageFilterTimestamp().GetTime().UnixNano() // this is the start of the filter + if t == 0 { + panic("got 0 time") + } + return t +} + // this is the common validation to all messages. they must not be a reply, they must not be out size the time window // for the replay filter. func (s *State) Validate(msg interfaces.IMsg) (validToSend int, validToExec int) { @@ -108,8 +121,7 @@ func (s *State) Validate(msg interfaces.IMsg) (validToSend int, validToExec int) // Allow these thru as they do not have Ack's (they don't change processlists) default: // Make sure we don't put in an old ack'd message (outside our repeat filter range) - tlim := int64(Range * 60 * 2 * 1000000000) // Filter hold two hours of messages, one in the past one in the future - filterTime := s.GetMessageFilterTimestamp().GetTime().UnixNano() // this is the start of the filter + filterTime := s.GetFilterTimeNano() if filterTime == 0 { panic("got 0 time") @@ -119,8 +131,9 @@ func (s *State) Validate(msg interfaces.IMsg) (validToSend int, validToExec int) // Make sure we don't put in an old msg (outside our repeat range) { // debug - if msgtime < filterTime || msgtime > (filterTime+tlim) { + if msgtime < filterTime || msgtime > (filterTime+FilterTimeLimit) { s.LogPrintf("executeMsg", "MsgFilter %s", s.GetMessageFilterTimestamp().GetTime().String()) + s.LogPrintf("executeMsg", "Leader %s", s.GetLeaderTimestamp().GetTime().String()) s.LogPrintf("executeMsg", "Message %s", msg.GetTimestamp().GetTime().String()) } @@ -129,12 +142,11 @@ func (s *State) Validate(msg interfaces.IMsg) (validToSend int, validToExec int) if msgtime < filterTime { s.LogMessage("executeMsg", "drop message, more than an hour in the past", msg) return -1, -1 // Old messages are bad. - } else if msgtime > (filterTime + tlim) { + } else if msgtime > (filterTime + FilterTimeLimit) { s.LogMessage("executeMsg", "hold message from the future", msg) return 0, 0 // Far Future (>1H) stuff I can hold for now. It might be good later? } } - switch msg.Type() { case constants.REVEAL_ENTRY_MSG, constants.COMMIT_ENTRY_MSG, constants.COMMIT_CHAIN_MSG: if !s.NoEntryYet(msg.GetHash(), nil) { @@ -151,6 +163,14 @@ func (s *State) Validate(msg interfaces.IMsg) (validToSend int, validToExec int) if validToSend == -1 { // if the msg says drop then we drop... return -1, -1 } + if validToSend == -2 { // if the msg says New hold then we don't execute... + return 0, -2 + } + + if validToSend != 1 { // if the msg says anything other than valid + s.LogMessage("badmsgs", fmt.Sprintf("Invalid validity code %d", validToSend), msg) + panic("unexpected validity code") + } // if it is valid to send then we check other stuff ... @@ -236,7 +256,6 @@ func (s *State) executeMsg(msg interfaces.IMsg) (ret bool) { switch msg.Type() { case constants.REVEAL_ENTRY_MSG, constants.COMMIT_ENTRY_MSG, constants.COMMIT_CHAIN_MSG: if !s.NoEntryYet(msg.GetHash(), nil) { - //delete(s.Holding, msg.GetHash().Fixed()) s.DeleteFromHolding(msg.GetMsgHash().Fixed(), msg, "AlreadyCommitted") // delete commit s.DeleteFromHolding(msg.GetHash().Fixed(), msg, "AlreadyCommitted") // delete reveal s.Commits.Delete(msg.GetHash().Fixed()) @@ -244,7 +263,6 @@ func (s *State) executeMsg(msg interfaces.IMsg) (ret bool) { return true } s.AddToHolding(msg.GetMsgHash().Fixed(), msg) // add valid commit/reveal to holding in case it fails to get added - //s.Holding[msg.GetMsgHash().Fixed()] = msg } var vm *VM = nil @@ -263,7 +281,6 @@ func (s *State) executeMsg(msg interfaces.IMsg) (ret bool) { vml = len(vm.List) } } - local := msg.IsLocal() vmi := msg.GetVMIndex() hkb := s.GetHighestKnownBlock() @@ -302,6 +319,10 @@ func (s *State) executeMsg(msg interfaces.IMsg) (ret bool) { s.AddToHolding(msg.GetMsgHash().Fixed(), msg) // Add message where validToExecute==0 return false + case -2: + s.LogMessage("executeMsg", "back to new holding from executeMsg", msg) + return false + default: s.DeleteFromHolding(msg.GetMsgHash().Fixed(), msg, "InvalidMsg") // delete commit if !msg.SentInvalid() { @@ -310,6 +331,7 @@ func (s *State) executeMsg(msg interfaces.IMsg) (ret bool) { } return true } + } func (s *State) Process() (progress bool) { @@ -402,8 +424,8 @@ func (s *State) Process() (progress bool) { break // We have nothing for the system, given its current height. } if msg := s.DBStatesReceived[ix]; msg != nil { - s.LogPrintf("dbstateprocess", "Trying to process DBStatesReceived %d", s.DBStatesReceivedBase+ix) - s.executeMsg(msg) + ret := s.executeMsg(msg) + s.LogPrintf("dbstateprocess", "Trying to process DBStatesReceived %d, %t", s.DBStatesReceivedBase+ix, ret) } // if we can not process a DBStatesReceived then go process some messages @@ -412,6 +434,7 @@ func (s *State) Process() (progress bool) { } } } + // Process inbound messages preEmptyLoopTime := time.Now() emptyLoop: @@ -442,10 +465,10 @@ ackLoop: s.LogMessage("ackQueue", "Hold", ack) // toss the ack into holding and we will try again in a bit... TotalHoldingQueueInputs.Inc() - //s.Holding[ack.GetMsgHash().Fixed()] = ack s.AddToHolding(ack.GetMsgHash().Fixed(), ack) // Add ack where valid==0 continue } + s.LogMessage("ackQueue", "Execute2", ack) progress = s.executeMsg(ack) || progress @@ -463,20 +486,18 @@ ackLoop: if s.RunLeader { s.ReviewHolding() - for { - for _, msg := range s.XReview { - if msg == nil { - continue - } - // copy the messages we are responsible for and all ACKs to process to be executed - if msg.GetVMIndex() == s.LeaderVMIndex || msg.Type() == constants.ACK_MSG { - process = append(process, msg) - } + for _, msg := range s.XReview { + if msg == nil { + continue } - // toss everything else - s.XReview = s.XReview[:0] - break - } // skip review + // copy the messages we are responsible for and all msg that don't need ack + // messages that need ack will get processed when thier ack arrives + if msg.GetVMIndex() == s.LeaderVMIndex || !constants.NeedsAck(msg.Type()) { + process = append(process, msg) + } + } + // toss everything else + s.XReview = s.XReview[:0] } if ValidationDebug { s.LogPrintf("executeMsg", "end reviewHolding %d", len(s.XReview)) @@ -499,6 +520,7 @@ ackLoop: s.LogMessage("executeMsg", "From process", msg) s.UpdateState() } // processLoop for{...} + if ValidationDebug { s.LogPrintf("executeMsg", "end processloop") } @@ -542,7 +564,7 @@ func (s *State) ReviewHolding() { if s.ResendHolding == nil { s.ResendHolding = now } - if now.GetTimeMilli()-s.ResendHolding.GetTimeMilli() < 300 { + if now.GetTimeMilli()-s.ResendHolding.GetTimeMilli() < 100 { return } @@ -554,7 +576,6 @@ func (s *State) ReviewHolding() { s.LogPrintf("executeMsg", "Start reviewHolding") defer s.LogPrintf("executeMsg", "end reviewHolding holding=%d, xreview=%d", len(s.Holding), len(s.XReview)) } - s.Commits.Cleanup(s) s.DB.Trim() @@ -584,7 +605,6 @@ func (s *State) ReviewHolding() { s.LeaderNewMin++ // Either way, don't do it again until the ProcessEOM resets LeaderNewMin for k, v := range s.Holding { - if int(highest)-int(saved) > 1000 { TotalHoldingQueueOutputs.Inc() //delete(s.Holding, k) @@ -635,6 +655,7 @@ func (s *State) ReviewHolding() { } // If it is an entryCommit/ChainCommit/RevealEntry and it has a duplicate hash to an existing entry throw it away here + ce, ok := v.(*messages.CommitEntryMsg) if ok { x := s.NoEntryYet(ce.CommitEntry.EntryHash, ce.CommitEntry.GetTimestamp()) @@ -686,7 +707,6 @@ func (s *State) ReviewHolding() { } default: } - // If a Reveal Entry has a commit available, then process the Reveal Entry and send it out. if re, ok := v.(*messages.RevealEntryMsg); ok { if !s.NoEntryYet(re.GetHash(), s.GetLeaderTimestamp()) { @@ -694,7 +714,6 @@ func (s *State) ReviewHolding() { s.Commits.Delete(re.GetHash().Fixed()) continue } - // Needs to be our VMIndex as well, or ignore. if re.GetVMIndex() != s.LeaderVMIndex || !s.Leader { continue // If we are a leader, but it isn't ours, and it isn't a new minute, ignore. @@ -738,7 +757,6 @@ func (s *State) MoveStateToHeight(dbheight uint32, newMinute int) { vm.Synced = false // movestatetoheight } } - } // normally when loading by DBStates we jump from minute 0 to minute 0 // when following by minute we jump from minute 10 to minute 0 @@ -750,11 +768,6 @@ func (s *State) MoveStateToHeight(dbheight uint32, newMinute int) { //s.SetLLeaderHeight(int(dbheight)) // Update leader height in MoveStateToHeight if s.LLeaderHeight != dbheight { - //if s.DBSigDone == true { - // s.LogPrintf("executeMsg", "reset s.DBSigDone in MoveStateToHeight for block change") - //} - // - //s.DBSigDone = false // movestatetoheight (new block) if newMinute != 0 { panic(fmt.Sprintf("Can't jump to the middle of a block minute: %d", newMinute)) } @@ -810,7 +823,6 @@ func (s *State) MoveStateToHeight(dbheight uint32, newMinute int) { } s.CurrentMinute = newMinute // Update just the minute s.Leader, s.LeaderVMIndex = s.LeaderPL.GetVirtualServers(newMinute, s.IdentityChainID) // MoveStateToHeight minute - s.LogPrintf("executeMsg", "MoveStateToHeight new minute set leader=%v, vmIndex = %v", s.Leader, s.LeaderVMIndex) // We are between blocks make sure we are setup to sync // should already be true but if a DBSTATE got processed mid block @@ -821,7 +833,6 @@ func (s *State) MoveStateToHeight(dbheight uint32, newMinute int) { // If an election took place, our lists will be unsorted. Fix that s.LeaderPL.SortAuditServers() s.LeaderPL.SortFedServers() - } { // debug @@ -840,25 +851,13 @@ func (s *State) MoveStateToHeight(dbheight uint32, newMinute int) { } } - // force sync state to a ration state for between minutes - //s.Syncing = false // movestatetoheight - //s.EOM = false // movestatetoheight - //s.EOMDone = false // movestatetoheight - //s.DBSig = false // movestatetoheight - //s.EOMProcessed = 0 // movestatetoheight - //s.DBSigProcessed = 0 // movestatetoheight - // leave s.DBSigDone alone unless it's a block change - - //for _, vm := range s.LeaderPL.VMs { - // vm.Synced = false // movestatetoheight - //} - // set the limits because we might have added servers s.EOMLimit = len(s.LeaderPL.FedServers) // We add or remove server only on block boundaries s.DBSigLimit = s.EOMLimit // We add or remove server only on block boundaries - s.LogPrintf("dbstateprocess", "MoveStateToHeight(%d-:-%d) leader=%v leaderPL=%p, leaderVMIndex=%d", dbheight, newMinute, s.Leader, s.LeaderPL, s.LeaderVMIndex) + s.Hold.ExecuteForNewHeight(dbheight) // execute held messages + s.Hold.Review() // cleanup old messages } // Adds blocks that are either pulled locally from a database, or acquired from peers. @@ -898,48 +897,6 @@ func (s *State) AddDBState(isNew bool, fmt.Fprintf(os.Stderr, "AddDBState() out of order! at %d added %d\n", s.LLeaderHeight, ht) //panic("AddDBState out of order!") } - if ht > s.LLeaderHeight { - s.LogPrintf("dbstateprocess", "unexpected: ht > s.LLeaderHeight at %d added %d", s.LLeaderHeight, ht) - - //fmt.Println(fmt.Sprintf("SigType PROCESS: %10s Add DBState: s.SigType(%v)", s.FactomNodeName, s.SigType)) - s.MoveStateToHeight(ht, 0) // AddDBState() - s.StartDelay = s.GetTimestamp().GetTimeMilli() - s.RunLeader = false - LeaderPL := s.ProcessLists.Get(s.LLeaderHeight) - - if s.LLeaderHeight != 0 && s.LeaderPL != LeaderPL { - s.LogPrintf("ExecuteMsg", "AddDBState: Unexpected change in LeaderPL") - s.LeaderPL = LeaderPL - } - s.SetLeaderTimestamp(dbState.DirectoryBlock.GetTimestamp()) // move the leader timestamp to the start of the block - { - // Okay, we have just loaded a new DBState. The temp balances are no longer valid, if they exist. Nuke them. - s.LeaderPL.FactoidBalancesTMutex.Lock() - s.LeaderPL.FactoidBalancesT = map[[32]byte]int64{} - s.LeaderPL.FactoidBalancesTMutex.Unlock() - - s.LeaderPL.ECBalancesTMutex.Lock() - s.LeaderPL.ECBalancesT = map[[32]byte]int64{} - s.LeaderPL.ECBalancesTMutex.Unlock() - } - - Leader, LeaderVMIndex := s.LeaderPL.GetVirtualServers(s.CurrentMinute, s.IdentityChainID) // AddDBState() - { // debug - if s.Leader != Leader { - s.LogPrintf("executeMsg", "State.AddDBState() unexpectedly setting s.Leader to %v", Leader) - s.Leader = Leader - } - if s.LeaderVMIndex != LeaderVMIndex { - s.LogPrintf("executeMsg", "State.AddDBState() unexpectedly setting s.LeaderVMIndex to %v", LeaderVMIndex) - s.LeaderVMIndex = LeaderVMIndex - } - } - for s.ProcessLists.UpdateState(s.LLeaderHeight) { - } - } - if ht == 0 && s.LLeaderHeight == 0 { - s.MoveStateToHeight(1, 0) - } return dbState } @@ -969,12 +926,12 @@ func (s *State) FollowerExecuteMsg(m interfaces.IMsg) { } } -// exeute a msg with an optional delay (in factom seconds) +// execute a msg with an optional delay (in factom seconds) func (s *State) repost(m interfaces.IMsg, delay int) { //whereAmI := atomic.WhereAmIString(1) go func() { // This is a trigger to issue the EOM, but we are still syncing. Wait to retry. if delay > 0 { - time.Sleep((time.Duration(s.DirectoryBlockInSeconds*delay/600) * time.Second)) // delay in Factom seconds + time.Sleep(time.Duration(delay) * s.FactomSecond()) // delay in Factom seconds } //s.LogMessage("MsgQueue", fmt.Sprintf("enqueue_%s(%d)", whereAmI, len(s.msgQueue)), m) s.LogMessage("MsgQueue", fmt.Sprintf("enqueue (%d)", len(s.msgQueue)), m) @@ -982,6 +939,18 @@ func (s *State) repost(m interfaces.IMsg, delay int) { }() } +// FactomSecond finds the time duration of 1 second relative to 10min blocks. +// Blktime EOMs Second +// 600s 60s 1s +// 300s 30s 0.5s +// 120s 12s 0.2s +// 60s 6s 0.1s +// 30s 3s 0.05s +func (s *State) FactomSecond() time.Duration { + // Convert to time.second, then divide by 600 + return time.Duration(s.DirectoryBlockInSeconds) * time.Second / 600 +} + // Messages that will go into the Process List must match an Acknowledgement. // The code for this is the same for all such messages, so we put it here. // @@ -1049,6 +1018,7 @@ func (s *State) FollowerExecuteAck(msg interfaces.IMsg) { s.Acks[ack.GetHash().Fixed()] = ack // check if we have a message m, _ := s.Holding[ack.GetHash().Fixed()] + if m != nil { // We have an ack and a matching message go execute the message! if m.Validate(s) == 1 { @@ -1063,42 +1033,6 @@ func (s *State) FollowerExecuteAck(msg interfaces.IMsg) { } } -func (s *State) ExecuteEntriesInDBState(dbmsg *messages.DBStateMsg) { - height := dbmsg.DirectoryBlock.GetDatabaseHeight() - - if s.EntryDBHeightComplete > height { - return - } - s.LogPrintf("dbstateprocess", "Process entries in %d", height) - // If no Eblocks, leave - if len(dbmsg.EBlocks) == 0 { - return - } - - // All DBStates that got here are valid, so just checking the DBlock hash works - dblock, err := s.DB.FetchDBlockByHeight(height) - if err != nil || dblock == nil { - consenLogger.WithFields(log.Fields{"func": "ExecuteEntriesInDBState", "height": height}).Warnf("Dblock fetched is nil") - return // This is a weird case - } - - if !dbmsg.DirectoryBlock.GetHash().IsSameAs(dblock.GetHash()) { - consenLogger.WithFields(log.Fields{"func": "ExecuteEntriesInDBState", "height": height}).Errorf("Bad DBState. DBlock does not match found") - return // Bad DBlock - } - //todo: consider using func (s *State) WriteEntries() - s.DB.StartMultiBatch() - for _, e := range dbmsg.Entries { - s.WriteEntry <- e - } - err = s.DB.ExecuteMultiBatch() - if err != nil { - consenLogger.WithFields(log.Fields{"func": "ExecuteEntriesInDBState", "height": height}).Errorf("Was unable to execute multibatch") - return - } - // todo: Should we move the EntryDBHeightComplete here? -} - func (s *State) FollowerExecuteDBState(msg interfaces.IMsg) { dbstatemsg, _ := msg.(*messages.DBStateMsg) @@ -1125,6 +1059,7 @@ func (s *State) FollowerExecuteDBState(msg interfaces.IMsg) { switch valid { case 0: s.LogPrintf("dbstateprocess", "FollowerExecuteDBState hold for later %d", dbheight) + ix := int(dbheight) - s.DBStatesReceivedBase for len(s.DBStatesReceived) <= ix { s.DBStatesReceived = append(s.DBStatesReceived, nil) @@ -1224,6 +1159,7 @@ func (s *State) FollowerExecuteDBState(msg interfaces.IMsg) { dbstate.Locked = false dbstate.Signed = true s.DBStateAppliedCnt++ + s.DBStates.UpdateState() } else { //s.AddStatus(fmt.Sprintf("FollowerExecuteDBState(): dbstate added from local db at ht %d", dbheight)) dbstate.Saved = true @@ -1232,49 +1168,36 @@ func (s *State) FollowerExecuteDBState(msg interfaces.IMsg) { } //fmt.Println(fmt.Sprintf("SigType PROCESS: %10s Clear SigType follower execute DBState: !s.SigType(%v)", s.FactomNodeName, s.SigType)) - // clear all the syncing flags because we are done with this block s.EOM = false s.EOMDone = false s.DBSig = false s.DBSigDone = false s.Syncing = false // FollowerExecuteDBState - s.Saving = true - // Hurry up our next ask. When we get to where we have the data we asked for, then go ahead and ask for the next set. - if s.DBStates.LastEnd < int(dbheight) { - s.DBStates.Catchup(true) - } - if s.DBStates.LastBegin < int(dbheight)+1 { - s.DBStates.LastBegin = int(dbheight) + // At this point the block is good, make sure not to ask for it anymore + if !dbstatemsg.IsInDB { + s.StatesReceived.Notify <- msg.(*messages.DBStateMsg) } - s.DBStates.TimeToAsk = nil - // Ok, I just added a valid state to the list so go process it now so it doesn't have to wait on the other messages s.DBStates.UpdateState() - //d := dbstate - //if dbstatemsg.IsLocal() { - // if s.StateSaverStruct.FastBoot && d.DirectoryBlock.GetHeader().GetDBHeight() != 0 { - // dbstate.SaveStruct = SaveFactomdState(s, dbstate) - // - // if dbstate.SaveStruct != nil { - // err := s.StateSaverStruct.SaveDBStateList(s, s.DBStates, s.Network) - // if err != nil { - // s.LogPrintf("dbstateprocess", "Error trying to save a DBStateList %v", err) - // } - // } - // } - //} } func (s *State) FollowerExecuteMMR(m interfaces.IMsg) { // Just ignore missing messages for a period after going off line or starting up. + if s.IgnoreMissing { s.LogMessage("executeMsg", "drop IgnoreMissing", m) return } + // Drop the missing message response if it's already in the process list + _, valid := s.Replay.Valid(constants.INTERNAL_REPLAY, m.GetRepeatHash().Fixed(), m.GetTimestamp(), s.GetTimestamp()) + if !valid { + s.LogMessage("executeMsg", "drop, INTERNAL_REPLAY", m) + return + } mmr, _ := m.(*messages.MissingMsgResponse) @@ -1372,15 +1295,8 @@ func (s *State) FollowerExecuteDataResponse(m interfaces.IMsg) { if !ok { return } - if len(s.WriteEntry) < cap(s.WriteEntry) { + s.WriteEntry <- entry // DataResponse - if has(s, entry.GetHash()) { - s.LogPrintf("ehashes", "Duplicate DataResponse %x", entry.GetHash().Bytes()[:4]) - return - } - s.WriteEntry <- entry // DataResponse - s.LogMessage("executeMsg", "writeEntry", msg) - } } } @@ -1462,6 +1378,7 @@ func (s *State) FollowerExecuteRevealEntry(m interfaces.IMsg) { // still need this because of the call from FollowerExecuteCommitEntry and FollowerExecuteCommitChain ack, _ := s.Acks[m.GetMsgHash().Fixed()].(*messages.Ack) + if ack == nil { // todo: prevent this log from double logging s.LogMessage("executeMsg", "hold, no ack yet1", m) @@ -1502,7 +1419,6 @@ func (s *State) LeaderExecute(m interfaces.IMsg) { s.repost(m, 1) // Goes in the "do this really fast" queue so we are prompt about EOM's while syncing return } - LeaderExecutions.Inc() _, ok := s.Replay.Valid(constants.INTERNAL_REPLAY, m.GetRepeatHash().Fixed(), m.GetTimestamp(), s.GetTimestamp()) if !ok { @@ -1544,7 +1460,6 @@ func (s *State) LeaderExecuteEOM(m interfaces.IMsg) { //s.repost(m) return } - // The zero based minute for the message is equal to // the one based "LastMinute". This way we know we are // generating minutes in order. @@ -1553,7 +1468,6 @@ func (s *State) LeaderExecuteEOM(m interfaces.IMsg) { s.repost(m, 1) return } - eom := m.(*messages.EOM) // Put the System Height and Serial Hash into the EOM @@ -1568,6 +1482,7 @@ func (s *State) LeaderExecuteEOM(m interfaces.IMsg) { vm.EomMinuteIssued = s.CurrentMinute + 1 fix := false + if eom.DBHeight != s.LLeaderHeight || eom.VMIndex != s.LeaderVMIndex || eom.Minute != byte(s.CurrentMinute) { s.LogPrintf("executeMsg", "EOM has wrong data expected DBH/VM/M %d/%d/%d", s.LLeaderHeight, s.LeaderVMIndex, s.CurrentMinute) fix = true @@ -1575,7 +1490,7 @@ func (s *State) LeaderExecuteEOM(m interfaces.IMsg) { // make sure EOM has the right data eom.DBHeight = s.LLeaderHeight eom.VMIndex = s.LeaderVMIndex - // eom.Minute is zero based, while LeaderMinute is 1 based. So + // eom.Minute is zerobased, while LeaderMinute is 1 based. So // a simple assignment works. eom.Minute = byte(s.CurrentMinute) eom.Sign(s) @@ -1615,6 +1530,18 @@ func (s *State) LeaderExecuteDBSig(m interfaces.IMsg) { return } + if len(pl.VMs[dbs.VMIndex].List) > 0 && pl.VMs[dbs.VMIndex].List[0] != nil { + s.LogPrintf("executeMsg", "DBSig issue height = %d, length = %d", pl.VMs[dbs.VMIndex].Height, len(pl.VMs[dbs.VMIndex].List)) + s.LogPrintf("executeMsg", "msg=%p pl[0]=%p", m, pl.VMs[dbs.VMIndex].List[0]) + if pl.VMs[dbs.VMIndex].List[0] != m { + s.LogMessage("executeMsg", "drop, slot 0 taken by", pl.VMs[dbs.VMIndex].List[0]) + } else { + s.LogMessage("executeMsg", "duplicate execute", pl.VMs[dbs.VMIndex].List[0]) + } + + return + } + // Put the System Height and Serial Hash into the EOM dbs.SysHeight = uint32(pl.System.Height) @@ -1637,13 +1564,11 @@ func (s *State) LeaderExecuteDBSig(m interfaces.IMsg) { } func (s *State) LeaderExecuteCommitChain(m interfaces.IMsg) { - vm := s.LeaderPL.VMs[s.LeaderVMIndex] if len(vm.List) != vm.Height { s.repost(m, 1) return } - cc := m.(*messages.CommitChainMsg) // Check if this commit has more entry credits than any previous that we have. if !s.IsHighestCommit(cc.GetHash(), m) { @@ -1664,7 +1589,6 @@ func (s *State) LeaderExecuteCommitEntry(m interfaces.IMsg) { s.repost(m, 1) return } - ce := m.(*messages.CommitEntryMsg) // Check if this commit has more entry credits than any previous that we have. @@ -1682,7 +1606,6 @@ func (s *State) LeaderExecuteCommitEntry(m interfaces.IMsg) { func (s *State) LeaderExecuteRevealEntry(m interfaces.IMsg) { LeaderExecutions.Inc() - vm := s.LeaderPL.VMs[s.LeaderVMIndex] if len(vm.List) != vm.Height { s.repost(m, 1) @@ -1774,11 +1697,12 @@ func (s *State) ProcessCommitChain(dbheight uint32, commitChain interfaces.IMsg) h := c.GetHash() s.PutCommit(h, c) pl.EntryCreditBlock.GetBody().AddEntry(c.CommitChain) - entry := s.Holding[h.Fixed()] if entry != nil { s.repost(entry, 0) // Try and execute the reveal for this commit } + //s.LogMessage("newHolding", "process", commitChain) + s.ExecuteFromHolding(commitChain.GetHash().Fixed()) // process CommitChain return true } @@ -1796,11 +1720,12 @@ func (s *State) ProcessCommitEntry(dbheight uint32, commitEntry interfaces.IMsg) h := c.GetHash() s.PutCommit(h, c) pl.EntryCreditBlock.GetBody().AddEntry(c.CommitEntry) - entry := s.Holding[h.Fixed()] if entry != nil { s.repost(entry, 0) // Try and execute the reveal for this commit } + // s.LogMessage("newHolding", "process", commitEntry) + s.ExecuteFromHolding(commitEntry.GetHash().Fixed()) // process CommitEntry return true } //s.AddStatus("Cannot Process Commit Entry") @@ -1854,9 +1779,12 @@ func (s *State) ProcessRevealEntry(dbheight uint32, m interfaces.IMsg) (worked b // Put it in our list of new Entry Blocks for this Directory Block s.PutNewEBlocks(dbheight, chainID, eb) s.PutNewEntries(dbheight, myhash, msg.Entry) - + s.WriteEntry <- msg.Entry s.IncEntryChains() s.IncEntries() + // s.LogMessage("newHolding", "process", m) + s.ExecuteFromHolding(chainID.Fixed()) // Process Reveal for Chain + return true } @@ -1883,6 +1811,7 @@ func (s *State) ProcessRevealEntry(dbheight uint32, m interfaces.IMsg) (worked b // Put it in our list of new Entry Blocks for this Directory Block s.PutNewEBlocks(dbheight, chainID, eb) s.PutNewEntries(dbheight, myhash, msg.Entry) + s.WriteEntry <- msg.Entry s.IncEntries() return true @@ -2032,7 +1961,6 @@ func (s *State) ProcessEOM(dbheight uint32, msg interfaces.IMsg) bool { if s.EOMProcessed <= 0 { // why less than or equal? s.SendHeartBeat() // Only do this once per minute s.LogPrintf("dbsig-eom", "ProcessEOM complete for %d", e.Minute) - // setup to sync next minute ... s.Syncing = false // ProcessEOM (EOM complete) s.EOM = false // ProcessEOM (EOM complete) @@ -2041,7 +1969,6 @@ func (s *State) ProcessEOM(dbheight uint32, msg interfaces.IMsg) bool { for _, vm := range pl.VMs { vm.Synced = false // ProcessEOM (EOM complete) } - if !s.Leader { if s.CurrentMinute != int(e.Minute) { s.LogPrintf("dbsig-eom", "Follower jump to minute %d from %d", s.CurrentMinute, int(e.Minute)) @@ -2087,9 +2014,6 @@ func (s *State) ProcessEOM(dbheight uint32, msg interfaces.IMsg) bool { for _, v := range pl.NewEBlocks { eBlocks = append(eBlocks, v) } - for _, v := range pl.NewEntries { - entries = append(entries, v) - } dbstate := s.AddDBState(true, s.LeaderPL.DirectoryBlock, s.LeaderPL.AdminBlock, s.GetFactoidState().GetCurrentBlock(), s.LeaderPL.EntryCreditBlock, eBlocks, entries) if dbstate == nil { @@ -2122,7 +2046,7 @@ func (s *State) ProcessEOM(dbheight uint32, msg interfaces.IMsg) bool { return true } - // What I do once for all VMs at the beginning of processing a particular EOM + // What I do once for all VMs at the beginning of processing a particular EOM if !s.EOM { s.LogPrintf("dbsig-eom", "ProcessEOM start EOM processing for %d", e.Minute) @@ -2135,7 +2059,6 @@ func (s *State) ProcessEOM(dbheight uint32, msg interfaces.IMsg) bool { for _, vm := range pl.VMs { vm.Synced = false // ProcessEOM start } - if s.CurrentMinute != int(e.Minute) { s.LogPrintf("dbsig-eom", "Follower jump to minute %d from %d", s.CurrentMinute, int(e.Minute)) } @@ -2183,7 +2106,6 @@ func (s *State) ProcessEOM(dbheight uint32, msg interfaces.IMsg) bool { s.EOMDone = true // ProcessEOM s.EOMSyncTime = time.Now().UnixNano() - s.LeaderNewMin = 0 for _, eb := range pl.NewEBlocks { eb.AddEndOfMinuteMarker(byte(e.Minute + 1)) @@ -2306,7 +2228,6 @@ func (s *State) ProcessDBSig(dbheight uint32, msg interfaces.IMsg) bool { s.LogPrintf("dbsig-eom", "ProcessDBSig start DBSig processing for %d", dbs.Minute) //fmt.Printf("ProcessDBSig(): %s Start DBSig %s\n", s.FactomNodeName, dbs.String()) - s.Syncing = true // ProcessDBsig Start s.DBSig = true // ProcessDBsig Start s.DBSigDone = false // ProcessDBsig Start @@ -2315,7 +2236,6 @@ func (s *State) ProcessDBSig(dbheight uint32, msg interfaces.IMsg) bool { for _, vm := range pl.VMs { vm.Synced = false // ProcessDBsig Start } - pl.ResetDiffSigTally() } @@ -2462,7 +2382,6 @@ func (s *State) ProcessDBSig(dbheight uint32, msg interfaces.IMsg) bool { s.LogPrintf("dbsig-eom", "ProcessDBSig stop DBSig processing minute %d", s.CurrentMinute) //fmt.Println(fmt.Sprintf("All DBSigs are processed: allfaults(%v), && !s.DBSigDone(%v) && s.DBSigProcessed(%v)>= s.DBSigLimit(%v)", // allfaults, s.DBSigDone, s.DBSigProcessed, s.DBSigLimit)) - fails := 0 for i := range pl.FedServers { vm := pl.VMs[i] if len(vm.List) > 0 { @@ -2473,10 +2392,6 @@ func (s *State) ProcessDBSig(dbheight uint32, msg interfaces.IMsg) bool { } } } - if fails > 0 { - //fmt.Println("DBSig Fails Detected") - return false - } // TODO: check signatures here. Count what match and what don't. Then if a majority // disagree with us, null our entry out. Otherwise toss our DBState and ask for one from @@ -2797,7 +2712,6 @@ func (s *State) NewAck(msg interfaces.IMsg, balanceHash interfaces.IHash) interf // these don't affect the msg hash, just for local use... msg.SetLeaderChainID(s.IdentityChainID) - ack := new(messages.Ack) ack.DBHeight = s.LLeaderHeight ack.VMIndex = vmIndex @@ -2819,7 +2733,6 @@ func (s *State) NewAck(msg interfaces.IMsg, balanceHash interfaces.IHash) interf } ack.Sign(s) - ack.SetLocal(true) return ack diff --git a/state/stateConsensus_test.go b/state/stateConsensus_test.go index d29148160c..771073e949 100644 --- a/state/stateConsensus_test.go +++ b/state/stateConsensus_test.go @@ -7,6 +7,8 @@ package state_test import ( "testing" + "time" + "github.com/FactomProject/factomd/common/entryCreditBlock" "github.com/FactomProject/factomd/common/messages" . "github.com/FactomProject/factomd/state" @@ -48,3 +50,47 @@ func newCom() *messages.CommitEntryMsg { return commit } + +func TestFactomSecond(t *testing.T) { + s := testHelper.CreateEmptyTestState() + // Test the 10min + testFactomSecond(t, s, 600, time.Second) + + // Test every half + blktime := 600 + d := time.Second + for i := 0; i < 9; i++ { + testFactomSecond(t, s, blktime/2, d/2) + blktime = blktime / 2 + d = d / 2 + } + + // Test different common vectors + // 2min blocks == 1/5s seconds + testFactomSecond(t, s, 120, time.Second/5) + // 1min blocks == 1/10s seconds + testFactomSecond(t, s, 60, time.Second/10) + // 30s blocks == 1/20s seconds + testFactomSecond(t, s, 30, time.Second/20) + // 6s blocks == 1/100s seconds + testFactomSecond(t, s, 6, time.Second/100) + +} + +func testFactomSecond(t *testing.T, s *State, blktime int, second time.Duration) { + s.DirectoryBlockInSeconds = blktime + fs := s.FactomSecond() + if fs != second { + //avg := (fs + second) / 2 + diff := fs - second + if diff < 0 { + diff = diff * -1 + } + if diff < 2*time.Millisecond { + // This is close enough to be correct + } else { + t.Errorf("Blktime=%ds, Expect second of %s, found %s. Difference %s", blktime, second, fs, diff) + + } + } +} diff --git a/state/stateDisplay.go b/state/stateDisplay.go index ae3a2ea72c..4febadf8b6 100644 --- a/state/stateDisplay.go +++ b/state/stateDisplay.go @@ -55,6 +55,7 @@ type DisplayState struct { SimElection string SyncingState [256]string SyncingStateCurrent int + IgnoreDone bool } type FactoidTransaction struct { @@ -257,6 +258,7 @@ func DeepStateDisplayCopyDifference(s *State, prev *DisplayState) (*DisplayState ds.SyncingState = s.SyncingState ds.SyncingStateCurrent = s.SyncingStateCurrent + ds.IgnoreDone = s.GetIgnoreDone() return ds, nil } diff --git a/state/stateSaver.go b/state/stateSaver.go index fb80128edf..199e3671ca 100644 --- a/state/stateSaver.go +++ b/state/stateSaver.go @@ -37,10 +37,8 @@ func (sss *StateSaverStruct) SaveDBStateList(s *State, ss *DBStateList, networkN return nil // if we have closed the database then don't save } - hsb := int(ss.GetHighestSavedBlk()) //Save only every FastSaveRate states - - if hsb%ss.State.FastSaveRate != 0 || hsb < ss.State.FastSaveRate { + if int(s.LLeaderHeight)%ss.State.FastSaveRate != 0 || int(s.LLeaderHeight) < ss.State.FastSaveRate { return nil } @@ -48,25 +46,29 @@ func (sss *StateSaverStruct) SaveDBStateList(s *State, ss *DBStateList, networkN defer sss.Mutex.Unlock() //Actually save data from previous cached state to prevent dealing with rollbacks // Save the N block old state and then make a new savestate for the next save - if len(sss.TmpState) > 0 { - err := SaveToFile(s, sss.TmpDBHt, sss.TmpState, NetworkIDToFilename(networkName, sss.FastBootLocation)) + if sss.TmpDBHt != ss.State.LLeaderHeight && len(sss.TmpState) > 0 { + filename := NetworkIDToFilename(networkName, sss.FastBootLocation) + s.LogPrintf("executeMsg", "%d-:-%d %20s Saving %s for dbht %d", s.LLeaderHeight, s.CurrentMinute, s.FactomNodeName, filename, sss.TmpDBHt) + err := SaveToFile(s, sss.TmpDBHt, sss.TmpState, filename) if err != nil { fmt.Fprintln(os.Stderr, "SaveState SaveToFile Failed", err) return err } } - //Marshal state for future saving - b, err := ss.MarshalBinary() - if err != nil { - fmt.Fprintln(os.Stderr, "SaveState MarshalBinary Failed", err) - return err + if sss.TmpDBHt != ss.State.LLeaderHeight { + //Marshal state for future saving + b, err := ss.MarshalBinary() + if err != nil { + fmt.Fprintln(os.Stderr, "SaveState MarshalBinary Failed", err) + return err + } + //adding an integrity check + h := primitives.Sha(b) + b = append(h.Bytes(), b...) + sss.TmpState = b + sss.TmpDBHt = ss.State.LLeaderHeight } - //adding an integrity check - h := primitives.Sha(b) - b = append(h.Bytes(), b...) - sss.TmpState = b - sss.TmpDBHt = ss.State.LLeaderHeight return nil } diff --git a/state/state_test.go b/state/state_test.go index 1556f68eae..6935ec89d9 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -110,6 +110,20 @@ func TestLoadHoldingMap(t *testing.T) { } } +func TestDependentHoldingReview(t *testing.T) { + state := testHelper.CreateAndPopulateStaleHolding() + + if state.Hold.GetSize() == 0 { + t.Errorf("Error with Holding Map Length") + } + + state.Hold.Review() + + if state.Hold.GetSize() != 0 { + t.Errorf("Stale message should be dropped") + } +} + func TestLoadAcksMap(t *testing.T) { state := testHelper.CreateAndPopulateTestStateAndStartValidator() diff --git a/state/validation.go b/state/validation.go index 24c05fb783..913af1b3c7 100644 --- a/state/validation.go +++ b/state/validation.go @@ -9,6 +9,7 @@ import ( "time" "github.com/FactomProject/factomd/common/constants" + "github.com/FactomProject/factomd/common/constants/runstate" "github.com/FactomProject/factomd/common/interfaces" "github.com/FactomProject/factomd/common/messages" "github.com/FactomProject/factomd/util/atomic" @@ -19,12 +20,12 @@ var ValidationDebug bool = false // This is the tread with access to state. It does process and update state func (s *State) DoProcessing() { s.validatorLoopThreadID = atomic.Goid() - s.IsRunning = true + s.RunState = runstate.Running slp := false i3 := 0 - for s.IsRunning { + for s.GetRunState() == runstate.Running { p1 := true p2 := true @@ -68,6 +69,13 @@ func (s *State) DoProcessing() { } func (s *State) ValidatorLoop() { + defer func() { + if r := recover(); r != nil { + fmt.Println("A panic state occurred in ValidatorLoop.", r) + shutdown(s) + } + }() + CheckGrants() go s.DoProcessing() @@ -78,7 +86,7 @@ func (s *State) ValidatorLoop() { select { case <-s.ShutdownChan: // Check if we should shut down. - s.IsRunning = false + shutdown(s) time.Sleep(10 * time.Second) // wait till database close is complete return case <-s.tickerQueue: // Look for pending messages, and get one if there is one. @@ -113,3 +121,22 @@ func (s *State) ValidatorLoop() { } } } + +func shouldShutdown(state *State) bool { + select { + case <-state.ShutdownChan: + shutdown(state) + return true + default: + return false + } +} + +func shutdown(state *State) { + state.RunState = runstate.Stopping + fmt.Println("Closing the Database on", state.GetFactomNodeName()) + state.StateSaverStruct.StopSaving() + state.DB.Close() + fmt.Println("Database on", state.GetFactomNodeName(), "closed") + state.RunState = runstate.Stopped +} diff --git a/support/dev/simulator/brainSwap/test0.sh b/support/dev/simulator/brainSwap/test0.sh index 63514e9d6c..fe860d5f43 100755 --- a/support/dev/simulator/brainSwap/test0.sh +++ b/support/dev/simulator/brainSwap/test0.sh @@ -1,5 +1,4 @@ #!/usr/bin/env bash DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -SIM_TEST=${DIR}/../../../../simTest/BrainSwapNetwork_test.go -cd ${DIR}/v0 -go test -v $SIM_TEST #> out1.txt +SIM_TEST=${DIR}/../../../../peerTest/BrainSwapNetwork_test.go +cd ${DIR}/v0 && go test -v $SIM_TEST #> out1.txt diff --git a/support/dev/simulator/brainSwap/test1.sh b/support/dev/simulator/brainSwap/test1.sh index c072168ed7..678dc50da5 100755 --- a/support/dev/simulator/brainSwap/test1.sh +++ b/support/dev/simulator/brainSwap/test1.sh @@ -1,6 +1,5 @@ #!/usr/bin/env bash DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -SIM_TEST=${DIR}/../../../../simTest/BrainSwapFollower_test.go -cd ${DIR}/v1 -go test -v $SIM_TEST #> out1.txt +SIM_TEST=${DIR}/../../../../peerTest/BrainSwapFollower_test.go +cd ${DIR}/v1 && go test -v $SIM_TEST #> out1.txt diff --git a/test.sh b/test.sh index d4da6a8d94..11a3b27bfa 100755 --- a/test.sh +++ b/test.sh @@ -1,20 +1,94 @@ #!/usr/bin/env bash -# run same tests as specified in .circleci/config.yml -PACKAGES=$(glide nv | grep -v Utilities | grep -v LongTests | grep -v simTest) -FAIL="" - -for PKG in ${PACKAGES[*]} ; do - go test -v -vet=off $PKG - if [[ $? != 0 ]] ; then - FAIL=1 +# this script is specified in .circleci/config.yml +# to run as the 'tests' task + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" # get dir containing this script +cd $DIR # always from from script dir + +function runTests() { + if [[ "${CI}x" == "x" ]] ; then + # run locally + # 1. all unit tests except filtered packages + # 2. engine sim tests that are whitelisted + # 3. all files in simTest package + # 4. all sets of A/B tests in peerTest package + TESTS=$({ \ + glide nv | grep -v Utilities | grep -v longTest | grep -v peerTest | grep -v simTest | grep -v elections | grep -v activations | grep -v netTest | grep "\.\.\." ; \ + cat engine/ci_whitelist; \ + ls simTest/*_test.go; \ + ls peerTest/*A_test.go; \ + }) + else + # run on circle + # 1. run all unit tests + # 2. run only whitlisted tests in engine, peerTest, and simTest + TESTS=$({ \ + glide nv | grep -v Utilities | grep -v longTest | grep -v peerTest | grep -v simTest | grep -v elections | grep -v activations | grep -v netTest | grep "\.\.\." ; \ + cat */ci_whitelist; \ + } | circleci tests split --split-by=timings) + fi + + if [[ "${TESTS}x" == "x" ]] ; then + echo "No Tests" + exit 0 + else + echo '---------------' + echo "${TESTS}" + echo '---------------' fi -done - -if [[ "${FAIL}x" != "x" ]] ; then - echo "TESTS FAIL" - exit 1 -else - echo "ALL TESTS PASS" - exit 0 -fi + + # NOTE: peer tests are expected to be named + # in Follower/Network pairs + # Example: + # BrainSwapA_test.go # 'A' runs first in background + # BrainSwapB_test.go # 'B' test runs in foreground + BTEST="B_" + ATEST="A_" + FAILURES=() + FAIL="" + + # exit code should fail if any part of the command fails + set -o pipefail + + for TST in ${TESTS[*]} ; do + # start 'A' part of A/B test in background + if [[ `dirname ${TST}` == "peerTest" ]] ; then + ATEST_FILE=${TST/$BTEST/$ATEST} + TST=${TST/$ATEST/$BTEST} + echo "Concurrent Peer TEST: $ATEST_FILE" + nohup go test -v -timeout=10m -vet=off $ATEST_FILE &> testout.txt & + fi + + # run individual sim tests that have been whitelisted + if [[ `dirname ${TST}` == "engine" && ${TST/engine\//} != '...' ]] ; then + TST="./engine/... -run ${TST/engine\//}" + echo "Testing: $TST" + fi + + echo "START: ${TST}" + echo '---------------' + go test -v -timeout=10m -vet=off $TST | tee -a testout.txt | egrep 'PASS|FAIL|RUN' + if [[ $? != 0 ]] ; then + FAIL=1 + FAILURES+=($TST) + fi + echo "END: ${TST}" + echo '---------------' + + done + + if [[ "${FAIL}x" != "x" ]] ; then + echo "TESTS FAIL" + echo '---------------' + for F in ${FAILURES[*]} ; do + echo $F + done + exit 1 + else + echo "ALL TESTS PASS" + exit 0 + fi +} + +runTests diff --git a/testHelper/config.go b/testHelper/config.go index 7956e43225..bb354d8026 100644 --- a/testHelper/config.go +++ b/testHelper/config.go @@ -3,8 +3,11 @@ package testHelper import ( "fmt" "os" + "os/exec" "testing" + "path/filepath" + "github.com/FactomProject/factomd/util" ) @@ -70,18 +73,45 @@ LocalServerPrivKey = 3838383838386330626339393136366331343139 LocalServerPublicKey = 7eef4c8fac8907ad4f34a27c612a417344eb3c2fc1ec9b840693a2b4f90f0204 `} +// REVIEW: is there a build-in way better than this? +func CopyDir(src, dst string) error { + cmd := exec.Command("cp", "-r", src, dst) + return cmd.Run() +} + +func CloneFnodeData(fnode int, copyToNode int, t *testing.T) { + + simConfigPath := util.GetHomeDir() + "/.factom/m2/local-database/ldb/Sim0%v" + from := fmt.Sprintf(simConfigPath, fnode) + to := fmt.Sprintf(simConfigPath, copyToNode) + + if err := os.RemoveAll(to); err != nil { + t.Fatal(err) + } + + if err := CopyDir(from, to); err != nil { + t.Fatal(err) + } +} + // Write an identity to a config file for an Fnode, optionally appending extra config data +// NOTE: works only for simulation func WriteConfigFile(identityNumber int, fnode int, extra string, t *testing.T) { var simConfigPath string var configfile string if fnode == 0 { - simConfigPath = util.GetHomeDir() + "/.factom/m2" + simConfigPath = filepath.Join(GetSimTestHome(t), "/.factom/m2") configfile = fmt.Sprintf("%s/factomd.conf", simConfigPath) } else { - simConfigPath = util.GetHomeDir() + "/.factom/m2/simConfig" + simConfigPath = filepath.Join(GetSimTestHome(t), "/.factom/m2/simConfig") configfile = fmt.Sprintf("%s/factomd%03d.conf", simConfigPath, fnode) } + err := os.MkdirAll(simConfigPath, 0755) + if err != nil { + t.Fatal(err) + } + if _, err := os.Stat(simConfigPath); os.IsNotExist(err) { fmt.Fprintf(os.Stderr, "Creating directory"+simConfigPath+"\n") os.MkdirAll(simConfigPath, 0775) diff --git a/testHelper/constants.go b/testHelper/constants.go index 1890eb6a4f..29070e5800 100644 --- a/testHelper/constants.go +++ b/testHelper/constants.go @@ -1,14 +1,14 @@ package testHelper const ( - DBlockHeadPrimaryIndex = "30060a6e8247e0ee085f29714a95d91f659c78ceea7ced49fd312c115e0440c3" - DBlockHeadSecondaryIndex = "376c6f28ffeaf08a61df390b4a9731f241d340d913564c3ca6eab32e113b5cd2" + DBlockHeadPrimaryIndex = "e8e28ca4dfd65075cc113c522bf9476ffdf78235d553e8a192f477ee8c659cd0" + DBlockHeadSecondaryIndex = "5a0654af6690ff20f2a50970e2859daec94390a623e39598ffe79c76cbe45a4b" ABlockHeadPrimaryIndex = "073bd46185217fddbd6d422cc23f28fe8ea58bf63c2e7521b7de3623b52e3b41" ABlockHeadSecondaryIndex = "4d4d40eff3c48e054226d7308ac6a8e2230dc489be7ca39631abac258f33902b" - ECBlockHeadPrimaryIndex = "bdf70fe3bd42c74bb86668e10bff2c8c1c52c6c89f625d3d9b6e763081916a56" - ECBlockHeadSecondaryIndex = "c70b7f03068418741f6815f26560cbb2a11447c9b8717c4fe6ac2a0852750393" + ECBlockHeadPrimaryIndex = "6b1aa04211ffd258c19f3ae4d0299f2b71ccabb8105489ed35dd881fc40a1e68" + ECBlockHeadSecondaryIndex = "25a030e3a0eb491f7b5c0be7ee31575e8c274effc80854eafb1bf6c4ec4ea1e2" FBlockHeadPrimaryIndex = "84c8ac94c639117ef1b80d00c48f03c4a14bd74f17d86d56a8d8a4a73e6f91a8" FBlockHeadSecondaryIndex = "5f4d14e8b4bf5d8545d7ccaccccc99d2757b6ef5eaf5ffac03705457bb9d65ac" @@ -17,5 +17,5 @@ const ( EBlockHeadSecondaryIndex = "e79fb46ad81f0b4fac7f1e66728b40b390f8fcc3806e93f94550eec041eecff2" AnchorBlockHeadPrimaryIndex = "df3ade9eec4b08d5379cc64270c30ea7315d8a8a1a69efe2b98a60ecdd69e604" - AnchorBlockHeadSecondaryIndex = "7ab57fdb07ad75f35cb8f85bf435f60d5eab381c9da253780978cdb9d7ea5fae" + AnchorBlockHeadSecondaryIndex = "c4e4c26a0f280425cc4acafa66205cf2643e8d9c2c13cf6905edcfa1e5f90983" ) diff --git a/testHelper/simWallet.go b/testHelper/simWallet.go new file mode 100644 index 0000000000..49c0c34827 --- /dev/null +++ b/testHelper/simWallet.go @@ -0,0 +1,379 @@ +package testHelper + +// test helpers for Transaction & entry creations + +import ( + "bytes" + "crypto/sha256" + "encoding/binary" + "encoding/hex" + "text/template" + "time" + + "github.com/FactomProject/factom" + "github.com/FactomProject/factomd/common/entryBlock" + "github.com/FactomProject/factomd/common/entryCreditBlock" + "github.com/FactomProject/factomd/common/factoid" + "github.com/FactomProject/factomd/common/interfaces" + "github.com/FactomProject/factomd/common/messages" + "github.com/FactomProject/factomd/common/primitives" + "github.com/FactomProject/factomd/engine" + "github.com/FactomProject/factomd/state" +) + +// struct to generate FCT or EC addresses +// from the same private key +type testAccount struct { + Priv *primitives.PrivateKey +} + +var logName string = "simTest" + +func (d *testAccount) FctPriv() string { + x, _ := primitives.PrivateKeyStringToHumanReadableFactoidPrivateKey(d.Priv.PrivateKeyString()) + return x +} + +func (d *testAccount) FctPub() string { + s, _ := factoid.PublicKeyStringToFactoidAddressString(d.Priv.PublicKeyString()) + return s +} + +func (d *testAccount) EcPub() string { + s, _ := factoid.PublicKeyStringToECAddressString(d.Priv.PublicKeyString()) + return s +} + +func (d *testAccount) EcPriv() string { + s, _ := primitives.PrivateKeyStringToHumanReadableECPrivateKey(d.Priv.PrivateKeyString()) + return s +} + +func (d *testAccount) FctPrivHash() interfaces.IHash { + a := primitives.ConvertUserStrToAddress(d.FctPriv()) + x, _ := primitives.HexToHash(hex.EncodeToString(a)) + return x +} + +func (d *testAccount) FctAddr() interfaces.IHash { + a := primitives.ConvertUserStrToAddress(d.FctPub()) + x, _ := primitives.HexToHash(hex.EncodeToString(a)) + return x +} + +func (d *testAccount) EcPrivHash() interfaces.IHash { + a := primitives.ConvertUserStrToAddress(d.EcPriv()) + x, _ := primitives.HexToHash(hex.EncodeToString(a)) + return x +} + +func (d *testAccount) EcAddr() interfaces.IHash { + a := primitives.ConvertUserStrToAddress(d.EcPub()) + x, _ := primitives.HexToHash(hex.EncodeToString(a)) + return x +} + +// buy EC from coinbase 'bank' +func (d *testAccount) FundEC(amt uint64) { + state0 := engine.GetFnodes()[0].State + engine.FundECWallet(state0, GetBankAccount().FctPrivHash(), d.EcAddr(), uint64(amt)*state0.GetFactoshisPerEC()) +} + +// buy EC from account +func (d *testAccount) ConvertEC(amt uint64) { + state0 := engine.GetFnodes()[0].State + engine.FundECWallet(state0, d.FctPrivHash(), d.EcAddr(), uint64(amt)*state0.GetFactoshisPerEC()) +} + +// get FCT from coinbase 'bank' +func (d *testAccount) FundFCT(amt uint64) { + state0 := engine.GetFnodes()[0].State + _, err := engine.SendTxn(state0, uint64(amt), GetBankAccount().FctPriv(), d.FctPub(), state0.GetFactoshisPerEC()) + if err != nil { + panic(err) + } +} + +// transfer FCT from account +func (d *testAccount) SendFCT(a *testAccount, amt uint64) { + state0 := engine.GetFnodes()[0].State + engine.SendTxn(state0, uint64(amt), d.FctPriv(), a.FctPub(), state0.GetFactoshisPerEC()) +} + +// check EC balance +func (d *testAccount) GetECBalance() int64 { + state0 := engine.GetFnodes()[0].State + return engine.GetBalanceEC(state0, d.EcPub()) +} + +var testFormat string = ` +FCT + FctPriv: {{ .FctPriv }} + FctPub: {{ .FctPub }} + FctPrivHash: {{ .FctPrivHash }} + FctAddr: {{ .FctAddr }} +EC + EcPriv: {{ .EcPriv }} + EcPub: {{ .EcPub }} + EcPrivHash: {{ .EcPrivHash }} + EcAddr: {{ .EcAddr }} +` +var testTemplate *template.Template = template.Must( + template.New("").Parse(testFormat), +) + +func (d *testAccount) String() string { + b := &bytes.Buffer{} + testTemplate.Execute(b, d) + return b.String() +} + +func AccountFromFctSecret(s string) *testAccount { + d := new(testAccount) + h, _ := primitives.HumanReadableFactoidPrivateKeyToPrivateKey(s) + d.Priv = primitives.NewPrivateKeyFromHexBytes(h) + return d +} + +// This account has a balance from initial coinbase +func GetBankAccount() *testAccount { + return AccountFromFctSecret("Fs3E9gV6DXsYzf7Fqx1fVBQPQXV695eP3k5XbmHEZVRLkMdD9qCK") +} + +// build addresses from random key +func GetRandomAccount() *testAccount { + d := new(testAccount) + d.Priv = primitives.RandomPrivateKey() + return d +} + +// KLUDGE duplicates code from: factom lib +// TODO: refactor factom package to export these functions +func milliTime() (r []byte) { + buf := new(bytes.Buffer) + t := time.Now().UnixNano() + m := t / 1e6 + binary.Write(buf, binary.BigEndian, m) + return buf.Bytes()[2:] +} + +// KLUDGE duplicates code from: factom.ComposeEntryCommit() +// TODO: refactor factom package to export these functions +func commitEntryMsg(addr *factom.ECAddress, e *factom.Entry) (*bytes.Buffer, error) { + buf := new(bytes.Buffer) + + // 1 byte version + buf.Write([]byte{0}) + + // 6 byte milliTimestamp (truncated unix time) + buf.Write(milliTime()) + + // 32 byte Entry Hash + buf.Write(e.Hash()) + + // 1 byte number of entry credits to pay + if c, err := factom.EntryCost(e); err != nil { + return nil, err + } else { + buf.WriteByte(byte(c)) + } + + // 32 byte Entry Credit Address Public Key + 64 byte Signature + sig := addr.Sign(buf.Bytes()) + buf.Write(addr.PubBytes()) + buf.Write(sig[:]) + + return buf, nil +} + +// KLUDGE: copy from factom lib +// shad Double Sha256 Hash; sha256(sha256(data)) +func shad(data []byte) []byte { + h1 := sha256.Sum256(data) + h2 := sha256.Sum256(h1[:]) + return h2[:] +} + +// KLUDGE copy from factom +func composeChainCommitMsg(c *factom.Chain, ec *factom.ECAddress) (*bytes.Buffer, error) { + buf := new(bytes.Buffer) + + // 1 byte version + buf.Write([]byte{0}) + + // 6 byte milliTimestamp + buf.Write(milliTime()) + + e := c.FirstEntry + // 32 byte ChainID Hash + if p, err := hex.DecodeString(c.ChainID); err != nil { + return nil, err + } else { + // double sha256 hash of ChainID + buf.Write(shad(p)) + } + + // 32 byte Weld; sha256(sha256(EntryHash + ChainID)) + if cid, err := hex.DecodeString(c.ChainID); err != nil { + return nil, err + } else { + s := append(e.Hash(), cid...) + buf.Write(shad(s)) + } + + // 32 byte Entry Hash of the First Entry + buf.Write(e.Hash()) + + // 1 byte number of Entry Credits to pay + if d, err := factom.EntryCost(e); err != nil { + return nil, err + } else { + buf.WriteByte(byte(d + 10)) + } + + // 32 byte Entry Credit Address Public Key + 64 byte Signature + sig := ec.Sign(buf.Bytes()) + buf.Write(ec.PubBytes()) + buf.Write(sig[:]) + + return buf, nil +} + +func PrivateKeyToECAddress(key *primitives.PrivateKey) *factom.ECAddress { + // KLUDGE is there a better way to do this? + ecPub, _ := factoid.PublicKeyStringToECAddress(key.PublicKeyString()) + addr := factom.ECAddress{&[32]byte{}, &[64]byte{}} + copy(addr.Pub[:], ecPub.Bytes()) + copy(addr.Sec[:], key.Key[:]) + return &addr +} + +func ComposeCommitEntryMsg(pkey *primitives.PrivateKey, e factom.Entry) (*messages.CommitEntryMsg, error) { + msg, err := commitEntryMsg(PrivateKeyToECAddress(pkey), &e) + + commit := entryCreditBlock.NewCommitEntry() + commit.UnmarshalBinaryData(msg.Bytes()) + + m := new(messages.CommitEntryMsg) + m.CommitEntry = commit + m.SetValid() + return m, err +} + +func ComposeRevealEntryMsg(pkey *primitives.PrivateKey, e *factom.Entry) (*messages.RevealEntryMsg, error) { + entry := entryBlock.NewEntry() + entry.Content = primitives.ByteSlice{Bytes: e.Content} + + id, _ := primitives.HexToHash(e.ChainID) + entry.ChainID = id + + for _, extID := range e.ExtIDs { + entry.ExtIDs = append(entry.ExtIDs, primitives.ByteSlice{Bytes: extID}) + } + + m := new(messages.RevealEntryMsg) + m.Entry = entry + m.Timestamp = primitives.NewTimestampNow() + m.SetValid() + + return m, nil +} + +func ComposeChainCommit(pkey *primitives.PrivateKey, c *factom.Chain) (*messages.CommitChainMsg, error) { + msg, _ := composeChainCommitMsg(c, PrivateKeyToECAddress(pkey)) + e := entryCreditBlock.NewCommitChain() + _, err := e.UnmarshalBinaryData(msg.Bytes()) + if err != nil { + return nil, err + } + + m := new(messages.CommitChainMsg) + m.CommitChain = e + m.SetValid() + return m, nil +} + +// wait for non-zero EC balance +func WaitForAnyEcBalance(s *state.State, ecPub string) int64 { + s.LogPrintf(logName, "WaitForAnyEcBalance %v", ecPub) + return WaitForEcBalanceOver(s, ecPub, 0) +} + +// wait for non-zero FCT balance +func WaitForAnyFctBalance(s *state.State, fctPub string) int64 { + s.LogPrintf(logName, "WaitForAnyFctBalance %v", fctPub) + return WaitForFctBalanceOver(s, fctPub, 0) +} + +// wait for exactly Zero EC balance +// REVIEW: should we ditch this? +func WaitForZeroEC(s *state.State, ecPub string) int64 { + s.LogPrintf(logName, "WaitingForZeroEcBalance") + return WaitForEcBalanceUnder(s, ecPub, 1) +} + +const balanceWaitInterval = time.Millisecond * 20 + +// loop until balance is < target +func WaitForEcBalanceUnder(s *state.State, ecPub string, target int64) int64 { + + s.LogPrintf(logName, "WaitForEcBalanceUnder%v: %v", target, ecPub) + + for { + bal := engine.GetBalanceEC(s, ecPub) + time.Sleep(balanceWaitInterval) + + if bal < target { + s.LogPrintf(logName, "FoundEcBalanceUnder%v: %v", target, bal) + return bal + } + } +} + +// loop until balance is >= target +func WaitForEcBalanceOver(s *state.State, ecPub string, target int64) int64 { + + s.LogPrintf(logName, "WaitForEcBalanceOver%v: %v", target, ecPub) + + for { + bal := engine.GetBalanceEC(s, ecPub) + time.Sleep(balanceWaitInterval) + + if bal > target { + s.LogPrintf(logName, "FoundEcBalancerOver%v: %v", target, bal) + return bal + } + } +} + +// loop until balance is >= target +func WaitForFctBalanceUnder(s *state.State, fctPub string, target int64) int64 { + + s.LogPrintf(logName, "WaitForFctBalanceUnder%v: %v", target, fctPub) + + for { + bal := engine.GetBalance(s, fctPub) + time.Sleep(balanceWaitInterval) + + if bal < target { + s.LogPrintf(logName, "FoundFctBalanceUnder%v: %v", target, bal) + return bal + } + } +} + +// loop until balance is <= target +func WaitForFctBalanceOver(s *state.State, fctPub string, target int64) int64 { + + s.LogPrintf(logName, "WaitForFctBalanceOver%v: %v", target, fctPub) + + for { + bal := engine.GetBalance(s, fctPub) + time.Sleep(balanceWaitInterval) + + if bal > target { + s.LogPrintf(logName, "FoundMaxFctBalanceOver%v: %v", target, bal) + return bal + } + } +} diff --git a/testHelper/simulation.go b/testHelper/simulation.go index 3c100cc10e..9a8197ae7a 100644 --- a/testHelper/simulation.go +++ b/testHelper/simulation.go @@ -8,12 +8,16 @@ import ( "io/ioutil" "net/http" "os" + "path/filepath" "reflect" "strconv" "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/FactomProject/factomd/common/globals" + "github.com/FactomProject/factomd/common/interfaces" "github.com/FactomProject/factomd/common/primitives" "github.com/FactomProject/factomd/elections" "github.com/FactomProject/factomd/engine" @@ -34,24 +38,24 @@ var ExpectedHeight, Leaders, Audits, Followers int var startTime, endTime time.Time var RanSimTest = false // only run 1 sim test at a time -//EX. state0 := SetupSim("LLLLLLLLLLLLLLLAAAAAAAAAA", map[string]string {"--controlpanelsetting" : "readwrite"}, t) -func SetupSim(GivenNodes string, UserAddedOptions map[string]string, height int, electionsCnt int, RoundsCnt int, t *testing.T) *state.State { - fmt.Println("SetupSim(", GivenNodes, ",", UserAddedOptions, ",", height, ",", electionsCnt, ",", RoundsCnt, ")") - ExpectedHeight = height - l := len(GivenNodes) +// start simulation without promoting nodes to the authority set +// this is useful for creating scripts that will start/stop a simulation outside of the context of a unit test +// this allows for consistent tweaking of a simulation to induce load add message loss or adjust timing +func StartSim(GivenNodes string, UserAddedOptions map[string]string) *state.State { + CmdLineOptions := map[string]string{ "--db": "Map", "--network": "LOCAL", "--net": "alot+", "--enablenet": "false", - "--blktime": "10", - "--count": fmt.Sprintf("%v", l), + "--blktime": "15", + "--count": fmt.Sprintf("%v", len(GivenNodes)), "--startdelay": "1", "--stdoutlog": "out.txt", "--stderrlog": "out.txt", "--checkheads": "false", "--controlpanelsetting": "readwrite", - "--debuglog": ".|faulting|bad", + "--debuglog": "faulting|bad", "--logPort": "37000", "--port": "37001", "--controlpanelport": "37002", @@ -64,7 +68,7 @@ func SetupSim(GivenNodes string, UserAddedOptions map[string]string, height int, if key != "--debuglog" && value != "" { CmdLineOptions[key] = value } else { - CmdLineOptions[key] = CmdLineOptions[key] + "|" + value // add debug log flags to the default + CmdLineOptions[key] = value + "|" + CmdLineOptions[key] // add debug log flags to the default } // remove options not supported by the current flags set so we can merge this update into older code bases } @@ -121,12 +125,30 @@ func SetupSim(GivenNodes string, UserAddedOptions map[string]string, height int, typeOfT.Field(i).Name, f.Type(), f.Interface()) } fmt.Println() + return engine.Factomd(params, false).(*state.State) + +} + +//EX. state0 := SetupSim("LLLLLLLLLLLLLLLAAAAAAAAAA", map[string]string {"--controlpanelsetting" : "readwrite"}, t) +func SetupSim(GivenNodes string, UserAddedOptions map[string]string, height int, electionsCnt int, RoundsCnt int, t *testing.T) *state.State { + fmt.Println("SetupSim(", GivenNodes, ",", UserAddedOptions, ",", height, ",", electionsCnt, ",", RoundsCnt, ")") + + if UserAddedOptions["--factomhome"] == "" { + // default to create a new home dir for each sim test if not specificed + homeDir := GetSimTestHome(t) + err := os.MkdirAll(filepath.Join(homeDir, "/.factom/m2"), 0755) + if err != nil { + t.Fatal(err) + } + UserAddedOptions["--factomhome"] = homeDir + } + state0 := StartSim(GivenNodes, UserAddedOptions) + ExpectedHeight = height blkt := globals.Params.BlkTime roundt := elections.RoundTimeout et := elections.FaultTimeout startTime = time.Now() - state0 := engine.Factomd(params, false).(*state.State) // statusState = state0 calctime := time.Duration(float64(((height+3)*blkt)+(electionsCnt*et)+(RoundsCnt*roundt))*1.1) * time.Second endTime = time.Now().Add(calctime) @@ -157,6 +179,7 @@ func SetupSim(GivenNodes string, UserAddedOptions map[string]string, height int, WaitMinutes(state0, 1) // wait till initial DBState message for the genesis block is processed creatingNodes(GivenNodes, state0, t) + l := len(GivenNodes) t.Logf("Allocated %d nodes", l) if len(engine.GetFnodes()) != l { t.Fatalf("Should have allocated %d nodes", l) @@ -243,6 +266,7 @@ func StatusEveryMinute(s *state.State) { statusState = s go func() { for { + // If the state is no longer running, we can stop printing s := statusState if s != nil { newMinute := (s.CurrentMinute + 1) % 10 @@ -261,6 +285,8 @@ func StatusEveryMinute(s *state.State) { } engine.PrintOneStatus(0, 0) + } else { + return } } }() @@ -385,6 +411,31 @@ func AdjustAuthoritySet(adjustingNodes string) { Followers = Followers - follow } +func isAuditor(fnode int) bool { + nodes := engine.GetFnodes() + list := nodes[0].State.ProcessLists.Get(nodes[0].State.LLeaderHeight) + foundAudit, _ := list.GetAuditServerIndexHash(nodes[fnode].State.GetIdentityChainID()) + return foundAudit +} + +func isFollower(fnode int) bool { + return !(isAuditor(fnode) || engine.GetFnodes()[fnode].State.Leader) +} + +func AssertAuthoritySet(t *testing.T, givenNodes string) { + nodes := engine.GetFnodes() + for i, c := range []byte(givenNodes) { + switch c { + case 'L': + assert.True(t, nodes[i].State.Leader, "Expected node %v to be a leader", i) + case 'A': + assert.True(t, isAuditor(i), "Expected node %v to be an auditor", i) + default: + assert.True(t, isFollower(i), "Expected node %v to be a follower", i) + } + } +} + func CheckAuthoritySet(t *testing.T) { leadercnt, auditcnt, followercnt := CountAuthoritySet() @@ -416,8 +467,9 @@ func Halt(t *testing.T) { close(quit) t.Log("Shutting down the network") for _, fn := range engine.GetFnodes() { - fn.State.ShutdownChan <- 1 + fn.State.ShutdownNode(1) } + // sleep long enough for everyone to see the shutdown. time.Sleep(time.Duration(globals.Params.BlkTime) * time.Second) } @@ -466,17 +518,58 @@ func v2Request(req *primitives.JSON2Request, port int) (*primitives.JSON2Respons return nil, nil } -func ResetFactomHome(t *testing.T, subDir string) { +// use a test specific dir for simTest +func GetSimTestHome(t *testing.T) string { dir, err := os.Getwd() if err != nil { t.Fatal(err) } - globals.Params.FactomHome = dir + "/.sim/" + subDir - os.Setenv("FACTOM_HOME", globals.Params.FactomHome) + return dir + "/.sim/" + GetTestName() +} - t.Logf("Removing old run in %s", globals.Params.FactomHome) - if err := os.RemoveAll(globals.Params.FactomHome); err != nil { +// re-use a common dir for longTest +func GetLongTestHome(t *testing.T) string { + dir, err := os.Getwd() + if err != nil { t.Fatal(err) } + + return dir + "/.sim" +} + +// remove files from a home dir and remake .factom config dir +func ResetTestHome(homeDir string, t *testing.T) { + t.Logf("Removing old test run in %s", homeDir) + os.RemoveAll(homeDir) + os.MkdirAll(homeDir+"/.factom/m2", 0755) +} + +func ResetSimHome(t *testing.T) string { + h := GetSimTestHome(t) + ResetTestHome(h, t) + return h +} + +func AddFNode() { + engine.AddNode() + Followers++ +} + +func WaitForEntry(s *state.State, hash interfaces.IHash) bool { + s.LogPrintf(logName, "WaitForEntry: %s", hash.String()) + //hash, _ := primitives.NewShaHashFromStr(entryhash) + + for { + entry, err := s.FetchEntryByHash(hash) + if err != nil { + panic(err) + } + if entry != nil { + return true + } + + time.Sleep(time.Millisecond * 200) + } + return false } diff --git a/testHelper/stateFERHelper.go b/testHelper/stateFERHelper.go index ef1c511085..d5145b4e01 100644 --- a/testHelper/stateFERHelper.go +++ b/testHelper/stateFERHelper.go @@ -161,7 +161,7 @@ func CreateAndPopulateTestDatabaseOverlayForFER(testEntries []FEREntryWithHeight } } - err = dbo.RebuildDirBlockInfo() + err = dbo.ReparseAnchorChains() if err != nil { panic(err) } diff --git a/testHelper/testHelper.go b/testHelper/testHelper.go index 7d8fe97d12..5c760d0628 100644 --- a/testHelper/testHelper.go +++ b/testHelper/testHelper.go @@ -3,6 +3,13 @@ package testHelper //A package for functions used multiple times in tests that aren't useful in production code. import ( + "bytes" + "encoding/binary" + "os/exec" + "regexp" + "runtime" + + "github.com/FactomProject/factom" "github.com/FactomProject/factomd/common/adminBlock" "github.com/FactomProject/factomd/common/constants" "github.com/FactomProject/factomd/common/directoryBlock" @@ -12,15 +19,14 @@ import ( "github.com/FactomProject/factomd/common/primitives" "github.com/FactomProject/factomd/database/databaseOverlay" "github.com/FactomProject/factomd/database/mapdb" - //"github.com/FactomProject/factomd/engine" - //"github.com/FactomProject/factomd/log" + "time" - "github.com/FactomProject/factomd/state" - //"fmt" "fmt" "os" + "github.com/FactomProject/factomd/state" + "github.com/FactomProject/factomd/common/messages/electionMsgs" ) @@ -61,6 +67,46 @@ func CreatePopulateAndExecuteTestState() *state.State { return s } +func CreateAndPopulateStaleHolding() *state.State { + s := CreateAndPopulateTestState() + + // TODO: refactor into test helpers + a := AccountFromFctSecret("Fs2zQ3egq2j99j37aYzaCddPq9AF3mgh64uG9gRaDAnrkjRx3eHs") + + encode := func(s string) []byte { + b := bytes.Buffer{} + b.WriteString(s) + return b.Bytes() + } + + id := "92475004e70f41b94750f4a77bf7b430551113b25d3d57169eadca5692bb043d" + extids := [][]byte{encode(fmt.Sprintf("makeStaleMessages"))} + + e := factom.Entry{ + ChainID: id, + ExtIDs: extids, + Content: encode(fmt.Sprintf("this is a stale message")), + } + + // create stale MilliTime + mockTime := func() (r []byte) { + buf := new(bytes.Buffer) + t := time.Now().UnixNano() + m := t/1e6 - state.FilterTimeLimit // make msg too old + binary.Write(buf, binary.BigEndian, m) + return buf.Bytes()[2:] + } + + // adding a commit w/ no REVEAL + m, _ := ComposeCommitEntryMsg(a.Priv, e) + copy(m.CommitEntry.MilliTime[:], mockTime()) + + // add commit to holding + s.Hold.Add(m.GetMsgHash().Fixed(), m) + + return s +} + func CreateAndPopulateTestState() *state.State { s := new(state.State) s.TimestampAtBoot = new(primitives.Timestamp) @@ -148,6 +194,7 @@ func ExecuteAllBlocksFromDatabases(s *state.State) { msgs := GetAllDBStateMsgsFromDatabase(s) for _, dbs := range msgs { dbs.(*messages.DBStateMsg).IgnoreSigs = true + dbs.(*messages.DBStateMsg).IsInDB = true s.FollowerExecuteDBState(dbs) } @@ -168,19 +215,7 @@ func CreateTestDBStateList() []interfaces.IMsg { return answer } -func MakeSureAnchorValidationKeyIsPresent() { - priv := NewPrimitivesPrivateKey(0) - pub := priv.Pub - for _, v := range databaseOverlay.AnchorSigPublicKeys { - if v.String() == pub.String() { - return - } - } - databaseOverlay.AnchorSigPublicKeys = append(databaseOverlay.AnchorSigPublicKeys, pub) -} - func PopulateTestDatabaseOverlay(dbo *databaseOverlay.Overlay) { - MakeSureAnchorValidationKeyIsPresent() var prev *BlockSet = nil var err error @@ -230,7 +265,7 @@ func PopulateTestDatabaseOverlay(dbo *databaseOverlay.Overlay) { } } /* - err = dbo.RebuildDirBlockInfo() + err = dbo.ReparseAnchorChains() if err != nil { panic(err) } @@ -378,3 +413,39 @@ func PrintList(title string, list map[string]uint64) { fmt.Printf("%v - %v:%v\n", title, addr, amt) } } + +func SystemCall(cmd string) []byte { + fmt.Println("SystemCall(\"", cmd, "\")") + out, err := exec.Command("sh", "-c", cmd).Output() + if err != nil { + foo := err.Error() + fmt.Println(foo) + os.Exit(1) + panic(err) + } + fmt.Print(string(out)) + return out +} + +var testNameRe = regexp.MustCompile(`\.Test\w+$`) + +// find Test Function name in stack +func GetTestName() (name string) { + targetFrameIndex := 4 // limit caller frame depth to check for a test name + + programCounters := make([]uintptr, targetFrameIndex+2) + n := runtime.Callers(0, programCounters) + + if n > 0 { + frames := runtime.CallersFrames(programCounters[:n]) + var frameCandidate runtime.Frame + for more, frameIndex := true, 0; more && frameIndex <= targetFrameIndex; frameIndex++ { + frameCandidate, more = frames.Next() + if testNameRe.MatchString(frameCandidate.Function) { + return testNameRe.FindString(frameCandidate.Function)[1:] + } + } + } + + return name +} diff --git a/testHelper/testHelper_test.go b/testHelper/testHelper_test.go index f2b4fc1133..198e725c20 100644 --- a/testHelper/testHelper_test.go +++ b/testHelper/testHelper_test.go @@ -1,8 +1,12 @@ package testHelper_test import ( + "bytes" "crypto/rand" + "github.com/FactomProject/factom" + "github.com/FactomProject/factomd/util" + "github.com/FactomProject/factomd/engine" "github.com/FactomProject/ed25519" @@ -175,3 +179,94 @@ func TestTxnCreate(t *testing.T) { assert.Equal(t, outAddress, txn.Outputs[0].GetUserAddress()) } + +func TestCommitEntry(t *testing.T) { + + pkey := primitives.RandomPrivateKey() + //ecPriv, _:= primitives.PrivateKeyStringToHumanReadableECPrivateKey(pkey.PrivateKeyString()) + //ecAdd, _ := factoid.PublicKeyStringToECAddressString(pkey.PublicKeyString()) + //fmt.Printf("%v\n%v\n%v\n", ecPriv, ecPub, ecAdd) + + encode := func(s string) []byte { + b := bytes.Buffer{} + b.WriteString(s) + return b.Bytes() + } + + e := factom.Entry{ + ChainID: hex.EncodeToString(encode("chainfoo")), + ExtIDs: [][]byte{encode("foo"), encode("bar")}, + Content: encode("Hello World!"), + } + + commit, _ := ComposeCommitEntryMsg(pkey, e) + + assert.True(t, commit.CommitEntry.IsValid()) + assert.True(t, commit.IsValid()) +} + +// KLUDGE this is likely duplicated code +func encode(s string) []byte { + b := bytes.Buffer{} + b.WriteString(s) + return b.Bytes() +} + +func TestRevealEntry(t *testing.T) { + pkey := primitives.RandomPrivateKey() + + e := factom.Entry{ + ChainID: hex.EncodeToString(encode("chainfoo")), + ExtIDs: [][]byte{encode("foo"), encode("bar")}, + Content: encode("Hello World!"), + } + + reveal, err := ComposeRevealEntryMsg(pkey, &e) + assert.Nil(t, err) + assert.True(t, reveal.IsValid()) + //println(reveal.String()) + //println(reveal.Entry.String()) +} + +func TestAccountHelper(t *testing.T) { + fctS := "Fs1d5u3kambHECzarPsXWQTtYyf7womvg9u6kmFDm8F9cv5bSysh" + a := AccountFromFctSecret(fctS) + assert.Equal(t, a.FctPriv(), fctS) +} + +func TestChainCommit(t *testing.T) { + b := GetBankAccount() + id := "92475004e70f41b94750f4a77bf7b430551113b25d3d57169eadca5692bb043d" + extids := [][]byte{encode("foo"), encode("bar")} + + e := factom.Entry{ChainID: id, ExtIDs: extids, Content: encode("Hello World!")} + c := factom.NewChain(&e) + assert.Equal(t, c.ChainID, id) + + m, err := ComposeChainCommit(b.Priv, c) + + assert.Nil(t, err) + assert.True(t, m.CommitChain.IsValid()) + assert.True(t, m.IsValid()) +} + +// test that we can get the name of our test +func TestGetName(t *testing.T) { + TestGetFoo := func() string { + // add extra frame depth + return GetTestName() + } + assert.Equal(t, "TestGetName", TestGetFoo()) +} + +func TestResetFactomHome(t *testing.T) { + s := GetSimTestHome(t) + t.Logf("simhome: %v", s) + + h := ResetSimHome(t) + + t.Logf("reset home: %v", h) + t.Logf("util home: %v", util.GetHomeDir()) + + assert.Equal(t, s, h) +} diff --git a/testHelper/wsapi.go b/testHelper/wsapi.go index 15712a59ad..d5a678f585 100644 --- a/testHelper/wsapi.go +++ b/testHelper/wsapi.go @@ -1,10 +1,13 @@ package testHelper import ( + "bytes" "encoding/json" "fmt" "net/http" + "github.com/FactomProject/factomd/engine" + "github.com/FactomProject/web" ) @@ -108,3 +111,27 @@ func (t *TestResponseWriter) Write(b []byte) (int, error) { func GetBody(context *web.Context) string { return context.ResponseWriter.(*TestResponseWriter).Body } + +// REVIEW consider renaming since this is the debug url +func getAPIUrl() string { + return "http://localhost:" + fmt.Sprint(engine.GetFnodes()[0].State.GetPort()) + "/debug" +} + +func postRequest(jsonStr string) (*http.Response, error) { + req, err := http.NewRequest("POST", getAPIUrl(), bytes.NewBuffer([]byte(jsonStr))) + if err != nil { + return nil, err + } + req.Header.Set("content-type", "text/plain;") + + client := &http.Client{} + return client.Do(req) +} + +func SetInputFilter(apiRegex string) (*http.Response, error) { + return postRequest(`{"jsonrpc": "2.0", "id": 0, "method": "message-filter", "params":{"output-regex":"", "input-regex":"` + apiRegex + `"}}`) +} + +func SetOutputFilter(apiRegex string) (*http.Response, error) { + return postRequest(`{"jsonrpc": "2.0", "id": 0, "method": "message-filter", "params":{"output-regex":"` + apiRegex + `", "input-regex":""}}`) +} diff --git a/util/config.go b/util/config.go index 812022dc2f..f9bf98759c 100644 --- a/util/config.go +++ b/util/config.go @@ -40,6 +40,8 @@ type FactomdConfig struct { ExchangeRateAuthorityPublicKeyMainNet string ExchangeRateAuthorityPublicKeyTestNet string ExchangeRateAuthorityPublicKeyLocalNet string + BitcoinAnchorRecordPublicKeys []string + EthereumAnchorRecordPublicKeys []string // Network Configuration Network string @@ -58,11 +60,15 @@ type FactomdConfig struct { CustomSpecialPeers string CustomBootstrapIdentity string CustomBootstrapKey string + P2PIncoming int + P2POutgoing int FactomdTlsEnabled bool FactomdTlsPrivateKey string FactomdTlsPublicCert string FactomdRpcUser string FactomdRpcPass string + RequestTimeout int + RequestLimit int CorsDomains string ChangeAcksHeight uint32 @@ -140,6 +146,10 @@ CustomSeedURL = "" CustomSpecialPeers = "" CustomBootstrapIdentity = 38bab1455b7bd7e5efd15c53c777c79d0c988e9210f1da49a99d95b3a6417be9 CustomBootstrapKey = cc1985cdfae4e32b5a454dfda8ce5e1361558482684f3367649c3ad852c8e31a +; The maximum number of other peers dialing into this node that will be accepted +P2PIncoming = 200 +; The maximum number of peers this node will attempt to dial into +P2POutgoing = 32 ; --------------- NodeMode: FULL | SERVER ---------------- NodeMode = FULL LocalServerPrivKey = 4c38c72fc5cdad68f13b74674d3ffb1f3d63a112710868c9b08946553448d26d @@ -162,6 +172,16 @@ FactomdTlsPublicCert = "/full/path/to/factomdAPIpub.cert" FactomdRpcUser = "" FactomdRpcPass = "" +; RequestTimeout is the amount of time in seconds before a pending request for a +; missing DBState is considered too old and the state is put back into the +; missing states list. If RequestTimout is not set or is set to 0 it will become +; 1/10th of DirectoryBlockInSeconds +;RequestTimeout = 30 +; RequestLimit is the maximum number of pending requests for missing states. +; factomd will stop making DBStateMissing requests until current requests are +; moved out of the waiting list +RequestLimit = 200 + ; This paramater allows Cross-Origin Resource Sharing (CORS) so web browsers will use data returned from the API when called from the listed URLs ; Example paramaters are "http://www.example.com, http://anotherexample.com, *" CorsDomains = "" @@ -240,6 +260,8 @@ func (s *FactomdConfig) String() string { out.WriteString(fmt.Sprintf("\n CustomSpecialPeers %v", s.App.CustomSpecialPeers)) out.WriteString(fmt.Sprintf("\n CustomBootstrapIdentity %v", s.App.CustomBootstrapIdentity)) out.WriteString(fmt.Sprintf("\n CustomBootstrapKey %v", s.App.CustomBootstrapKey)) + out.WriteString(fmt.Sprintf("\n P2PIncoming %v", s.App.P2PIncoming)) + out.WriteString(fmt.Sprintf("\n P2POutgoing %v", s.App.P2POutgoing)) out.WriteString(fmt.Sprintf("\n NodeMode %v", s.App.NodeMode)) out.WriteString(fmt.Sprintf("\n IdentityChainID %v", s.App.IdentityChainID)) out.WriteString(fmt.Sprintf("\n LocalServerPrivKey %v", s.App.LocalServerPrivKey)) @@ -253,6 +275,8 @@ func (s *FactomdConfig) String() string { out.WriteString(fmt.Sprintf("\n FactomdRpcUser %v", s.App.FactomdRpcUser)) out.WriteString(fmt.Sprintf("\n FactomdRpcPass %v", s.App.FactomdRpcPass)) out.WriteString(fmt.Sprintf("\n ChangeAcksHeight %v", s.App.ChangeAcksHeight)) + out.WriteString(fmt.Sprintf("\n BitcoinAnchorRecordPublicKeys %v", s.App.BitcoinAnchorRecordPublicKeys)) + out.WriteString(fmt.Sprintf("\n EthereumAnchorRecordPublicKeys %v", s.App.EthereumAnchorRecordPublicKeys)) out.WriteString(fmt.Sprintf("\n Log")) out.WriteString(fmt.Sprintf("\n LogPath %v", s.Log.LogPath)) @@ -370,6 +394,18 @@ func ReadConfig(filename string) *FactomdConfig { break } + if len(cfg.App.BitcoinAnchorRecordPublicKeys) == 0 { + cfg.App.BitcoinAnchorRecordPublicKeys = []string{ + "0426a802617848d4d16d87830fc521f4d136bb2d0c352850919c2679f189613a", // m1 key + "d569419348ed7056ec2ba54f0ecd9eea02648b260b26e0474f8c07fe9ac6bf83", // m2 key + } + } + if len(cfg.App.EthereumAnchorRecordPublicKeys) == 0 { + cfg.App.EthereumAnchorRecordPublicKeys = []string{ + "a4a7905ab2226f267c6b44e1d5db2c97638b7bbba72fd1823d053ccff2892455", + } + } + return cfg } diff --git a/util/config_test.go b/util/config_test.go index 11dee1ba00..05ecade68e 100644 --- a/util/config_test.go +++ b/util/config_test.go @@ -88,6 +88,10 @@ CustomSeedURL = "" CustomSpecialPeers = "" CustomBootstrapIdentity = 38bab1455b7bd7e5efd15c53c777c79d0c988e9210f1da49a99d95b3a6417be9 CustomBootstrapKey = cc1985cdfae4e32b5a454dfda8ce5e1361558482684f3367649c3ad852c8e31a +; The maximum number of other peers dialing into this node that will be accepted +P2PIncoming = 200 +; The maximum number of peers this node will attempt to dial into +P2POutgoing = 32 ; --------------- NodeMode: FULL | SERVER | LIGHT ---------------- NodeMode = FULL LocalServerPrivKey = 4c38c72fc5cdad68f13b74674d3ffb1f3d63a112710868c9b08946553448d26d diff --git a/wsapi/debugapi.go b/wsapi/debugapi.go index 34a2c4155a..b6340c0b1e 100644 --- a/wsapi/debugapi.go +++ b/wsapi/debugapi.go @@ -17,6 +17,8 @@ import ( "github.com/FactomProject/factomd/common/interfaces" "github.com/FactomProject/factomd/common/primitives" + "regexp" + "github.com/FactomProject/web" ) @@ -131,6 +133,8 @@ func HandleDebugRequest( break case "sim-ctrl": resp, jsonError = HandleSimControl(state, params) + case "message-filter": + resp, jsonError = HandleMessageFilter(state, params) default: jsonError = NewMethodNotFoundError() break @@ -454,3 +458,37 @@ type SetDropRateRequest struct { type GetCommands struct { Commands []string `json:"commands"` } + +func HandleMessageFilter(state interfaces.IState, params interface{}) (interface{}, *primitives.JSONError) { + fmt.Println("Factom Node Name: ", state.GetFactomNodeName()) + x, ok := params.(map[string]interface{}) + if !ok { + return nil, NewCustomInvalidParamsError("ERROR! Invalid params passed in") + } + + fmt.Println(`x["output-regex"]`, x["output-regex"]) + fmt.Println(`x["input-regex"]`, x["input-regex"]) + + OutputString := fmt.Sprintf("%s", x["output-regex"]) + if OutputString != "" { + OutputRegEx := regexp.MustCompile(OutputString) + state.PassOutputRegEx(OutputRegEx, OutputString) + + } else if OutputString == "off" { + state.PassOutputRegEx(nil, "") + } + + InputString := fmt.Sprintf("%s", x["input-regex"]) + if InputString != "" { + InputRegEx := regexp.MustCompile(InputString) + state.PassInputRegEx(InputRegEx, InputString) + + } else if InputString == "off" { + state.PassInputRegEx(nil, "") + } + + h := new(MessageFilter) + h.Params = "Success" + + return h, nil +} diff --git a/wsapi/instrumentation.go b/wsapi/instrumentation.go index d5a16c1a2e..3755be9e68 100644 --- a/wsapi/instrumentation.go +++ b/wsapi/instrumentation.go @@ -95,6 +95,11 @@ var ( Help: "Time it takes to compelete a rawdata", }) + HandleV2APICallAnchors = prometheus.NewSummary(prometheus.SummaryOpts{ + Name: "factomd_wsapi_v2_api_call_anchors_ns", + Help: "Time it takes to compelete a ", + }) + HandleV2APICallReceipt = prometheus.NewSummary(prometheus.SummaryOpts{ Name: "factomd_wsapi_v2_api_call_receipt_ns", Help: "Time it takes to compelete a ", diff --git a/wsapi/wsapiStructs.go b/wsapi/wsapiStructs.go index 88cc4edd0b..66d5926d9f 100644 --- a/wsapi/wsapiStructs.go +++ b/wsapi/wsapiStructs.go @@ -83,6 +83,32 @@ type RawDataResponse struct { //TODO: add } +// For each chain: false or chain specific anchor response +type AnchorsResponse struct { + Height uint32 `json:"directoryblockheight"` + KeyMR string `json:"directoryblockkeymr"` + Bitcoin interface{} `json:"bitcoin"` + Ethereum interface{} `json:"ethereum"` +} + +type BitcoinAnchorResponse struct { + TransactionHash string `json:"transactionhash"` + BlockHash string `json:"blockhash"` +} + +type EthereumAnchorResponse struct { + RecordHeight int64 `json:"recordheight"` + DBHeightMax int64 `json:"dbheightmax"` + DBHeightMin int64 `json:"dbheightmin"` + WindowMR string `json:"windowmr"` + MerkleBranch []*primitives.MerkleNode `json:"merklebranch"` + + ContractAddress string `json:"contractaddress"` + TxID string `json:"txid"` + BlockHash string `json:"blockhash"` + TxIndex int64 `json:"txindex"` +} + type ReceiptResponse struct { Receipt *receipts.Receipt `json:"receipt"` } @@ -243,6 +269,11 @@ type HeightRequest struct { Height int64 `json:"height"` } +type HeightOrHashRequest struct { + Height *int64 `json:"height,omitempty"` + Hash string `json:"hash,omitempty"` +} + type ChainIDRequest struct { ChainID string `json:"chainid"` } @@ -286,6 +317,13 @@ type SendRawMessageRequest struct { Message string `json:"message"` } +// TODO: kept as "hash" for backwards compatibility (receipt call used to use the HashRequest), +// but in API v3 this should specify that its an entry hash +type ReceiptRequest struct { + EntryHash string `json:"hash"` + IncludeRawEntry bool `json:"includerawentry"` +} + type FactiodAccounts struct { NumbOfAccounts string `json:numberofacc` Height uint32 `json:"height"` @@ -355,3 +393,7 @@ type AuditStatus struct { ID string `json:"id"` Online bool `json:"online"` } + +type MessageFilter struct { + Params string `json:"params"` +} diff --git a/wsapi/wsapiV2.go b/wsapi/wsapiV2.go index 3424620415..873e0e67fc 100644 --- a/wsapi/wsapiV2.go +++ b/wsapi/wsapiV2.go @@ -15,7 +15,9 @@ import ( "strings" "time" + "github.com/FactomProject/factomd/anchor" "github.com/FactomProject/factomd/common/constants" + "github.com/FactomProject/factomd/common/directoryBlock/dbInfo" "github.com/FactomProject/factomd/common/entryBlock" "github.com/FactomProject/factomd/common/entryCreditBlock" "github.com/FactomProject/factomd/common/factoid" @@ -74,99 +76,70 @@ func HandleV2Request(state interfaces.IState, j *primitives.JSON2Request) (*prim params := j.Params state.LogPrintf("apilog", "request %v", j.String()) switch j.Method { + case "anchors": + resp, jsonError = HandleV2Anchors(state, params) case "chain-head": resp, jsonError = HandleV2ChainHead(state, params) - break case "commit-chain": resp, jsonError = HandleV2CommitChain(state, params) - break case "commit-entry": resp, jsonError = HandleV2CommitEntry(state, params) - break case "current-minute": resp, jsonError = HandleV2CurrentMinute(state, params) - break case "directory-block": resp, jsonError = HandleV2DirectoryBlock(state, params) - break case "directory-block-head": resp, jsonError = HandleV2DirectoryBlockHead(state, params) - break case "entry-block": resp, jsonError = HandleV2EntryBlock(state, params) - break case "admin-block": resp, jsonError = HandleV2AdminBlock(state, params) - break case "factoid-block": resp, jsonError = HandleV2FactoidBlock(state, params) - break case "entrycredit-block": resp, jsonError = HandleV2EntryCreditBlock(state, params) - break case "entry": resp, jsonError = HandleV2Entry(state, params) - break case "entry-credit-balance": resp, jsonError = HandleV2EntryCreditBalance(state, params) - break case "entry-credit-rate": resp, jsonError = HandleV2EntryCreditRate(state, params) - break case "factoid-balance": resp, jsonError = HandleV2FactoidBalance(state, params) - break case "factoid-submit": resp, jsonError = HandleV2FactoidSubmit(state, params) - break case "heights": resp, jsonError = HandleV2Heights(state, params) - break case "properties": resp, jsonError = HandleV2Properties(state, params) - break case "raw-data": resp, jsonError = HandleV2RawData(state, params) - break case "receipt": resp, jsonError = HandleV2Receipt(state, params) - break case "reveal-chain": resp, jsonError = HandleV2RevealChain(state, params) - break case "reveal-entry": resp, jsonError = HandleV2RevealEntry(state, params) - break case "factoid-ack": resp, jsonError = HandleV2FactoidACK(state, params) - break case "entry-ack": resp, jsonError = HandleV2EntryACK(state, params) - break case "pending-entries": resp, jsonError = HandleV2GetPendingEntries(state, params) - break case "pending-transactions": resp, jsonError = HandleV2GetPendingTransactions(state, params) - break case "send-raw-message": resp, jsonError = HandleV2SendRawMessage(state, params) - break case "transaction": resp, jsonError = HandleV2GetTranasction(state, params) - break case "dblock-by-height": resp, jsonError = HandleV2DBlockByHeight(state, params) - break case "ecblock-by-height": resp, jsonError = HandleV2ECBlockByHeight(state, params) - break case "fblock-by-height": resp, jsonError = HandleV2FBlockByHeight(state, params) - break case "ablock-by-height": resp, jsonError = HandleV2ABlockByHeight(state, params) - break case "authorities": resp, jsonError = HandleAuthorities(state, params) case "tps-rate": @@ -179,11 +152,10 @@ func HandleV2Request(state interfaces.IState, j *primitives.JSON2Request) (*prim resp, jsonError = HandleV2MultipleECBalances(state, params) case "diagnostics": resp, jsonError = HandleV2Diagnostics(state, params) - //case "factoid-accounts": - // resp, jsonError = HandleV2Accounts(state, params) + //case "factoid-accounts": + // resp, jsonError = HandleV2Accounts(state, params) default: jsonError = NewMethodNotFoundError() - break } if jsonError != nil { state.LogPrintf("apilog", "error %v", jsonError) @@ -736,24 +708,155 @@ func HandleV2RawData(state interfaces.IState, params interface{}) (interface{}, return d, nil } +func HandleV2Anchors(state interfaces.IState, params interface{}) (interface{}, *primitives.JSONError) { + n := time.Now() + defer HandleV2APICallAnchors.Observe(float64(time.Since(n).Nanoseconds())) + + request := new(HeightOrHashRequest) + err := MapToObject(params, request) + if err != nil { + return nil, NewInvalidParamsError() + } + + var hash interfaces.IHash + var directoryBlockHeight uint32 + dbo := state.GetDB() + if request.Height != nil { + directoryBlockHeight = uint32(*request.Height) + } else if request.Hash != "" { + hash, err = primitives.HexToHash(request.Hash) + if err != nil { + return nil, NewInvalidHashError() + } + // Find the object at hash and get its directory block height + db := state.GetDB() + if dBlock, _ := db.FetchDBlock(hash); dBlock != nil { + directoryBlockHeight = dBlock.GetDatabaseHeight() + } else if entry, _ := state.FetchEntryByHash(hash); entry != nil { + dBlockHash, err := db.FetchIncludedIn(hash) + if err != nil { + return nil, NewInternalDatabaseError() + } + eBlock, err := db.FetchEBlock(dBlockHash) + if err != nil { + return nil, NewInternalDatabaseError() + } + directoryBlockHeight = eBlock.GetDatabaseHeight() + } else if aBlock, _ := db.FetchABlock(hash); aBlock != nil { + directoryBlockHeight = aBlock.GetDatabaseHeight() + } else if eBlock, _ := db.FetchEBlock(hash); eBlock != nil { + directoryBlockHeight = eBlock.GetDatabaseHeight() + } else if ecBlock, _ := db.FetchECBlock(hash); ecBlock != nil { + directoryBlockHeight = ecBlock.GetDatabaseHeight() + } else if fBlock, _ := db.FetchFBlock(hash); fBlock != nil { + directoryBlockHeight = fBlock.GetDatabaseHeight() + } else if tx, _ := state.FetchECTransactionByHash(hash); tx != nil { + entryHash := tx.GetEntryHash() + dBlockHash, err := db.FetchIncludedIn(entryHash) + if err != nil { + return nil, NewInternalDatabaseError() + } + eBlock, err := db.FetchEBlock(dBlockHash) + if err != nil { + return nil, NewInternalDatabaseError() + } + directoryBlockHeight = eBlock.GetDatabaseHeight() + } else { + return nil, NewObjectNotFoundError() + } + } else { + return nil, NewInvalidParamsError() + } + + directoryBlockKeyMR, err := dbo.FetchDBKeyMRByHeight(directoryBlockHeight) + if err != nil || directoryBlockKeyMR == nil { + return nil, NewBlockNotFoundError() + } + + response := new(AnchorsResponse) + response.Height = directoryBlockHeight + response.KeyMR = directoryBlockKeyMR.String() + response.Bitcoin = false + response.Ethereum = false + + // Search for AnchorRecords for the requested DBlock + for i := directoryBlockHeight; i < directoryBlockHeight+1000; i++ { + tempKeyMR, err := dbo.FetchDBKeyMRByHeight(uint32(i)) + if err != nil { + return nil, NewBlockNotFoundError() + } else if tempKeyMR == nil { + break + } + + dirBlockInfo, err := dbo.FetchDirBlockInfoByKeyMR(tempKeyMR) + if err != nil { + return nil, NewBlockNotFoundError() + } else if dirBlockInfo == nil { + continue + } + + dbi := dirBlockInfo.(*dbInfo.DirBlockInfo) + // Only add the bitcoin anchor info if at the requested height. Remove the restriction once bitcoin anchors are windowed as well. + if i == directoryBlockHeight && dbi.BTCConfirmed { + bitcoin := new(BitcoinAnchorResponse) + bitcoin.TransactionHash = dbi.BTCTxHash.String() + bitcoin.BlockHash = dbi.BTCBlockHash.String() + response.Bitcoin = bitcoin + } + if dbi.EthereumConfirmed && !dbi.EthereumAnchorRecordEntryHash.IsSameAs(primitives.ZeroHash) { + anchorRecordEntry, err := dbo.FetchEntry(dbi.EthereumAnchorRecordEntryHash) + if err != nil { + return nil, NewCustomInternalError(err) + } + anchorRecordJSON := anchorRecordEntry.GetContent() + anchorRecord, err := anchor.UnmarshalAnchorRecord(anchorRecordJSON) + if err != nil { + return nil, NewCustomInternalError(err) + } + eth := new(EthereumAnchorResponse) + eth.DBHeightMax = int64(anchorRecord.DBHeightMax) + eth.DBHeightMin = int64(anchorRecord.DBHeightMin) + eth.WindowMR = anchorRecord.WindowMR + eth.RecordHeight = int64(anchorRecord.RecordHeight) + eth.ContractAddress = anchorRecord.Ethereum.ContractAddress + eth.TxID = anchorRecord.Ethereum.TxID + eth.BlockHash = anchorRecord.Ethereum.BlockHash + eth.TxIndex = anchorRecord.Ethereum.TxIndex + + var allWindowKeyMRs []interfaces.IHash + for i := eth.DBHeightMin; i <= eth.DBHeightMax; i++ { + keyMR, err := dbo.FetchDBKeyMRByHeight(uint32(i)) + if err != nil { + return nil, NewCustomInternalError(err) + } + allWindowKeyMRs = append(allWindowKeyMRs, keyMR) + } + eth.MerkleBranch = primitives.BuildMerkleBranchForHash(allWindowKeyMRs, directoryBlockKeyMR, true) + response.Ethereum = eth + break + } + } + + return response, nil +} + func HandleV2Receipt(state interfaces.IState, params interface{}) (interface{}, *primitives.JSONError) { n := time.Now() defer HandleV2APICallReceipt.Observe(float64(time.Since(n).Nanoseconds())) - hashkey := new(HashRequest) - err := MapToObject(params, hashkey) + request := new(ReceiptRequest) + err := MapToObject(params, request) if err != nil { return nil, NewInvalidParamsError() } - h, err := primitives.HexToHash(hashkey.Hash) + h, err := primitives.HexToHash(request.EntryHash) if err != nil { return nil, NewInvalidHashError() } - dbase := state.GetDB() - - receipt, err := receipts.CreateFullReceipt(dbase, h) + dbo := state.GetDB() + receipt, err := receipts.CreateFullReceipt(dbo, h, request.IncludeRawEntry) if err != nil { return nil, NewReceiptError() } @@ -903,6 +1006,19 @@ func HandleV2Entry(state interfaces.IState, params interface{}) (interface{}, *p if entry == nil { return nil, NewEntryNotFoundError() } + + // When fetching from the database, optimistic entry writing + // might have added entries not in the blockchain. + // To ensure the entry actually exists, we need to try + // fetching the eblock hash. If it exists, then the entry exists + // in the blockchain. + included, err := state.GetDB().FetchIncludedIn(h) + if err != nil { + return nil, NewInternalError() + } + if included == nil { + return nil, NewEntryNotFoundError() + } } e.ChainID = entry.GetChainIDHash().String()