diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
deleted file mode 100644
index 58c1a4a62ea..00000000000
--- a/.github/CODEOWNERS
+++ /dev/null
@@ -1,22 +0,0 @@
-# Lines starting with '#' are comments.
-# Each line is a file pattern followed by one or more owners.
-
-accounts/usbwallet @karalabe
-accounts/scwallet @gballet
-accounts/abi @gballet @MariusVanDerWijden
-cmd/clef @holiman
-cmd/puppeth @karalabe
-consensus @karalabe
-core/ @karalabe @holiman @rjl493456442
-eth/ @karalabe @holiman @rjl493456442
-graphql/ @gballet
-les/ @zsfelfoldi @rjl493456442
-light/ @zsfelfoldi @rjl493456442
-mobile/ @karalabe @ligi
-node/ @fjl @renaynay
-p2p/ @fjl @zsfelfoldi
-rpc/ @fjl @holiman
-p2p/simulations @fjl
-p2p/protocols @fjl
-p2p/testing @fjl
-signer/ @holiman
diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go
index f03b1f98c7f..2f961977edd 100644
--- a/accounts/abi/bind/backends/simulated.go
+++ b/accounts/abi/bind/backends/simulated.go
@@ -110,7 +110,7 @@ func NewSimulatedBackendWithDatabase(database *ethdb.ObjectDatabase, alloc core.
config: genesis.Config,
txCacher: txCacher,
}
- backend.events = filters.NewEventSystem(&filterBackend{database, backend}, false)
+ backend.events = filters.NewEventSystem(&filterBackend{database, backend})
backend.emptyPendingBlock()
return backend
}
@@ -137,7 +137,7 @@ func NewSimulatedBackendWithConfig(alloc core.GenesisAlloc, config *params.Chain
config: genesis.Config,
txCacher: txCacher,
}
- backend.events = filters.NewEventSystem(&filterBackend{database, backend}, false)
+ backend.events = filters.NewEventSystem(&filterBackend{database, backend})
backend.emptyPendingBlock()
return backend
}
diff --git a/accounts/keystore/account_cache_test.go b/accounts/keystore/account_cache_test.go
index b3fa4ab1cc4..433c8cbc543 100644
--- a/accounts/keystore/account_cache_test.go
+++ b/accounts/keystore/account_cache_test.go
@@ -18,7 +18,6 @@ package keystore
import (
"fmt"
- "io/ioutil"
"math/rand"
"os"
"path/filepath"
@@ -294,113 +293,3 @@ func TestCacheFind(t *testing.T) {
}
}
}
-
-func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error {
- var list []accounts.Account
- for d := 200 * time.Millisecond; d < 8*time.Second; d *= 2 {
- list = ks.Accounts()
- if reflect.DeepEqual(list, wantAccounts) {
- // ks should have also received change notifications
- select {
- case <-ks.changes:
- default:
- return fmt.Errorf("wasn't notified of new accounts")
- }
- return nil
- }
- time.Sleep(d)
- }
- return fmt.Errorf("\ngot %v\nwant %v", list, wantAccounts)
-}
-
-// TestUpdatedKeyfileContents tests that updating the contents of a keystore file
-// is noticed by the watcher, and the account cache is updated accordingly
-func TestUpdatedKeyfileContents(t *testing.T) {
- t.Skip("This test is unstable")
-
- t.Parallel()
-
- // Create a temporary kesytore to test with
- rand.Seed(time.Now().UnixNano())
- dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watch-test-%d-%d", os.Getpid(), rand.Int()))
- ks := NewKeyStore(dir, LightScryptN, LightScryptP)
-
- list := ks.Accounts()
- if len(list) > 0 {
- t.Error("initial account list not empty:", list)
- }
- time.Sleep(100 * time.Millisecond)
-
- // Create the directory and copy a key file into it.
- os.MkdirAll(dir, 0700)
- defer os.RemoveAll(dir)
- file := filepath.Join(dir, "aaa")
-
- // Place one of our testfiles in there
- if err := cp.CopyFile(file, cachetestAccounts[0].URL.Path); err != nil {
- t.Fatal(err)
- }
-
- // ks should see the account.
- wantAccounts := []accounts.Account{cachetestAccounts[0]}
- wantAccounts[0].URL = accounts.URL{Scheme: KeyStoreScheme, Path: file}
- if err := waitForAccounts(wantAccounts, ks); err != nil {
- t.Error(err)
- return
- }
-
- // needed so that modTime of `file` is different to its current value after forceCopyFile
- time.Sleep(1000 * time.Millisecond)
-
- // Now replace file contents
- if err := forceCopyFile(file, cachetestAccounts[1].URL.Path); err != nil {
- t.Fatal(err)
- return
- }
- wantAccounts = []accounts.Account{cachetestAccounts[1]}
- wantAccounts[0].URL = accounts.URL{Scheme: KeyStoreScheme, Path: file}
- if err := waitForAccounts(wantAccounts, ks); err != nil {
- t.Errorf("First replacement failed")
- t.Error(err)
- return
- }
-
- // needed so that modTime of `file` is different to its current value after forceCopyFile
- time.Sleep(1000 * time.Millisecond)
-
- // Now replace file contents again
- if err := forceCopyFile(file, cachetestAccounts[2].URL.Path); err != nil {
- t.Fatal(err)
- return
- }
- wantAccounts = []accounts.Account{cachetestAccounts[2]}
- wantAccounts[0].URL = accounts.URL{Scheme: KeyStoreScheme, Path: file}
- if err := waitForAccounts(wantAccounts, ks); err != nil {
- t.Errorf("Second replacement failed")
- t.Error(err)
- return
- }
-
- // needed so that modTime of `file` is different to its current value after ioutil.WriteFile
- time.Sleep(1000 * time.Millisecond)
-
- // Now replace file contents with crap
- if err := ioutil.WriteFile(file, []byte("foo"), 0644); err != nil {
- t.Fatal(err)
- return
- }
- if err := waitForAccounts([]accounts.Account{}, ks); err != nil {
- t.Errorf("Emptying account file failed")
- t.Error(err)
- return
- }
-}
-
-// forceCopyFile is like cp.CopyFile, but doesn't complain if the destination exists.
-func forceCopyFile(dst, src string) error {
- data, err := ioutil.ReadFile(src)
- if err != nil {
- return err
- }
- return ioutil.WriteFile(dst, data, 0644)
-}
diff --git a/cmd/devp2p/nodesetcmd.go b/cmd/devp2p/nodesetcmd.go
index 032bf4307f4..33204ea150e 100644
--- a/cmd/devp2p/nodesetcmd.go
+++ b/cmd/devp2p/nodesetcmd.go
@@ -95,8 +95,6 @@ var filterFlags = map[string]nodeFilterC{
"-ip": {1, ipFilter},
"-min-age": {1, minAgeFilter},
"-eth-network": {1, ethFilter},
- "-les-server": {0, lesFilter},
- "-snap": {0, snapFilter},
}
func parseFilters(args []string) ([]nodeFilter, error) {
@@ -183,23 +181,3 @@ func ethFilter(args []string) (nodeFilter, error) {
}
return f, nil
}
-
-func lesFilter(args []string) (nodeFilter, error) {
- f := func(n nodeJSON) bool {
- var les struct {
- _ []rlp.RawValue `rlp:"tail"`
- }
- return n.N.Load(enr.WithEntry("les", &les)) == nil
- }
- return f, nil
-}
-
-func snapFilter(args []string) (nodeFilter, error) {
- f := func(n nodeJSON) bool {
- var snap struct {
- _ []rlp.RawValue `rlp:"tail"`
- }
- return n.N.Load(enr.WithEntry("snap", &snap)) == nil
- }
- return f, nil
-}
diff --git a/cmd/evm/main.go b/cmd/evm/main.go
index f1a0772be6f..e60f79aabde 100644
--- a/cmd/evm/main.go
+++ b/cmd/evm/main.go
@@ -129,11 +129,6 @@ var (
Name: "noreturndata",
Usage: "disable return data output",
}
- EVMInterpreterFlag = cli.StringFlag{
- Name: "vm.evm",
- Usage: "External EVM configuration (default = built-in interpreter)",
- Value: "",
- }
)
var stateTransitionCommand = cli.Command{
@@ -185,7 +180,6 @@ func init() {
DisableStackFlag,
DisableStorageFlag,
DisableReturnDataFlag,
- EVMInterpreterFlag,
}
app.Commands = []cli.Command{
compileCommand,
diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go
index d257891a9b2..3c5bb656842 100644
--- a/cmd/evm/runner.go
+++ b/cmd/evm/runner.go
@@ -215,9 +215,8 @@ func runCmd(ctx *cli.Context) error {
Coinbase: genesisConfig.Coinbase,
BlockNumber: new(big.Int).SetUint64(genesisConfig.Number),
EVMConfig: vm.Config{
- Tracer: tracer,
- Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name),
- EVMInterpreter: ctx.GlobalString(EVMInterpreterFlag.Name),
+ Tracer: tracer,
+ Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name),
},
}
diff --git a/cmd/hack/hack.go b/cmd/hack/hack.go
index 4b831995254..a5c662b8fd9 100644
--- a/cmd/hack/hack.go
+++ b/cmd/hack/hack.go
@@ -1892,10 +1892,6 @@ func main() {
}
defer pprof.StopCPUProfile()
}
- //db := ethdb.MustOpen("/home/akhounov/.ethereum/geth/chaindata")
- //db := ethdb.MustOpen(node.DefaultDataDir() + "/geth/chaindata")
- //check(err)
- //defer db.Close()
if *action == "cfg" {
flow.TestGenCfg()
}
@@ -1907,39 +1903,18 @@ func main() {
if *action == "syncChart" {
mychart()
}
- //testRebuild()
if *action == "testRewind" {
testRewind(*chaindata, *block, *rewind)
}
- //hashFile()
- //buildHashFromFile()
if *action == "testResolve" {
testResolve(*chaindata)
}
- //rlpIndices()
- //printFullNodeRLPs()
- //testStartup()
- //testDifficulty()
- //testRewindTests()
- //if *reset != -1 {
- // testReset(uint64(*reset))
- //}
if *action == "testBlockHashes" {
testBlockHashes(*chaindata, *block, common.HexToHash(*hash))
}
- //printBuckets(db)
- //printTxHashes()
- //relayoutKeys()
- //upgradeBlocks()
- //compareTries()
if *action == "invTree" {
invTree("root", "right", "diff", *name)
}
- //invTree("iw", "ir", "id", *block, true)
- //loadAccount()
- //printBranches(uint64(*block))
- //extractTrie(*block)
- //repair()
if *action == "readAccount" {
readAccount(*chaindata, common.HexToAddress(*account), uint64(*block), uint64(*rewind))
}
@@ -1952,8 +1927,6 @@ func main() {
if *action == "nextIncarnation" {
nextIncarnation(*chaindata, common.HexToHash(*account))
}
- //repairCurrent()
- //fmt.Printf("\u00b3\n")
if *action == "dumpStorage" {
dumpStorage()
}
diff --git a/cmd/integration/commands/refetence_db.go b/cmd/integration/commands/refetence_db.go
index 18c714db2bd..434b52acc9b 100644
--- a/cmd/integration/commands/refetence_db.go
+++ b/cmd/integration/commands/refetence_db.go
@@ -368,14 +368,6 @@ MainLoop:
return ctx.Err()
case <-commitEvery.C:
log.Info("Progress", "bucket", bucket, "key", fmt.Sprintf("%x", k))
- //if err2 := dstTx.Commit(ctx); err2 != nil {
- // return err2
- //}
- //dstTx, err = dst.Begin(ctx, nil, ethdb.RW)
- //if err != nil {
- // return err
- //}
- //c = dstTx.Cursor(bucket)
}
}
prevK = nil
diff --git a/cmd/state/commands/check_change_sets.go b/cmd/state/commands/check_change_sets.go
index d92a420be56..2239f723f6b 100644
--- a/cmd/state/commands/check_change_sets.go
+++ b/cmd/state/commands/check_change_sets.go
@@ -174,22 +174,6 @@ func CheckChangeSets(genesis *core.Genesis, blockNum uint64, chaindata string, h
}
if !match {
- //fmt.Printf("\n\n")
- //fmt.Printf("All in DB: ==========================\n")
- //j := 0
- //err = changeset.Walk(historyDb, dbutils.PlainAccountChangeSetBucket, dbutils.EncodeBlockNumber(blockNum), 8*8, func(blockN uint64, k, v []byte) (bool, error) {
- // fmt.Printf("%d: 0x%x: %x\n", j, k, v)
- // j++
- // return true, nil
- //})
- //if err != nil {
- // return err
- //}
- //fmt.Printf("All Expected: ==========================\n")
- //for ii, c := range accountChanges.Changes {
- // fmt.Printf("%d: 0x%x: %x\n", ii, c.Key, c.Value)
- //}
-
return fmt.Errorf("check change set failed")
}
diff --git a/cmd/state/commands/opcode_tracer.go b/cmd/state/commands/opcode_tracer.go
index 20310fa21f9..55556944523 100644
--- a/cmd/state/commands/opcode_tracer.go
+++ b/cmd/state/commands/opcode_tracer.go
@@ -575,28 +575,10 @@ func OpcodeTracer(genesis *core.Genesis, blockNum uint64, chaindata string, numB
for j := range t.Opcodes {
o := &t.Opcodes[j]
//only print to the summary the opcodes that are interesting
- //isRStackUsed := o.MaxRStack != 0
isOpFault := o.Fault != ""
- if isOpFault { // && !isRStackUsed {
+ if isOpFault {
fmt.Fprintf(ot.fsumWriter, "Opcode FAULT\tb=%d taddr=%s TxF=%s opF=%s tx=%s\n", blockNum, t.TxAddr, t.Fault, t.OpcodeFault, t.TxHash.String())
fmt.Fprint(ot.fsumWriter, "\n")
-
- //print the stack
- //if l := o.StackTop.Len(); l>0 {
- // fmt.Fprintf(ot.summary, "\t%d:", o.MaxStack)
- // for i := 0; i < l; i++ {
- // fmt.Fprintf(ot.summary, "%x ", o.StackTop.Back(i))
- // }
- //}
-
- //print the Rstack
- //if o.MaxRStack > 0 {
- // fmt.Fprintf(ot.fsumWriter, "\trs:%d:", o.MaxRStack)
- // //fmt.Printf("return stack used in block %d, tx %s", BlockNum)
- // for i := 0; i < o.MaxRStack; i++ {
- // fmt.Fprintf(ot.fsumWriter, "%x ", o.RetStackTop[i])
- // }
- //}
}
}
isTxFault := t.Fault != ""
diff --git a/cmd/utils/diskusage.go b/cmd/utils/diskusage.go
deleted file mode 100644
index 5e8931cc645..00000000000
--- a/cmd/utils/diskusage.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2021 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-// +build !windows
-
-package utils
-
-import (
- "fmt"
-
- "golang.org/x/sys/unix"
-)
-
-func getFreeDiskSpace(path string) (uint64, error) { //nolint:deadcode, unused
- var stat unix.Statfs_t
- if err := unix.Statfs(path, &stat); err != nil {
- return 0, fmt.Errorf("failed to call Statfs: %v", err)
- }
-
- // Available blocks * size per block = available space in bytes
- var bavail = stat.Bavail
- //nolint:unconvert
- return uint64(bavail) * uint64(stat.Bsize), nil
-}
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index bac427b0dac..68a40d1809f 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -50,7 +50,6 @@ import (
"github.com/ledgerwatch/turbo-geth/core/vm"
"github.com/ledgerwatch/turbo-geth/crypto"
"github.com/ledgerwatch/turbo-geth/eth"
- "github.com/ledgerwatch/turbo-geth/eth/downloader"
"github.com/ledgerwatch/turbo-geth/eth/ethconfig"
"github.com/ledgerwatch/turbo-geth/eth/gasprice"
"github.com/ledgerwatch/turbo-geth/ethdb"
@@ -66,8 +65,6 @@ import (
"github.com/urfave/cli"
)
-const localhost = "127.0.0.1"
-
func init() {
cli.AppHelpTemplate = `{{.Name}} {{if .Flags}}[global options] {{end}}command{{if .Flags}} [command options]{{end}} [arguments...]
@@ -243,41 +240,6 @@ var (
Name: "override.berlin",
Usage: "Manually specify Berlin fork-block, overriding the bundled setting",
}
- // Light server and client settings
- LightServeFlag = cli.IntFlag{
- Name: "light.serve",
- Usage: "Maximum percentage of time allowed for serving LES requests (multi-threaded processing allows values over 100)",
- Value: ethconfig.Defaults.LightServ,
- }
- LightIngressFlag = cli.IntFlag{
- Name: "light.ingress",
- Usage: "Incoming bandwidth limit for serving light clients (kilobytes/sec, 0 = unlimited)",
- Value: ethconfig.Defaults.LightIngress,
- }
- LightEgressFlag = cli.IntFlag{
- Name: "light.egress",
- Usage: "Outgoing bandwidth limit for serving light clients (kilobytes/sec, 0 = unlimited)",
- Value: ethconfig.Defaults.LightEgress,
- }
- LightMaxPeersFlag = cli.IntFlag{
- Name: "light.maxpeers",
- Usage: "Maximum number of light clients to serve, or light servers to attach to",
- Value: ethconfig.Defaults.LightPeers,
- }
- UltraLightServersFlag = cli.StringFlag{
- Name: "ulc.servers",
- Usage: "List of trusted ultra-light servers",
- Value: strings.Join(ethconfig.Defaults.UltraLightServers, ","),
- }
- UltraLightFractionFlag = cli.IntFlag{
- Name: "ulc.fraction",
- Usage: "Minimum % of trusted ultra-light servers required to announce a new head",
- Value: ethconfig.Defaults.UltraLightFraction,
- }
- UltraLightOnlyAnnounceFlag = cli.BoolFlag{
- Name: "ulc.onlyannounce",
- Usage: "Ultra light server sends announcements only",
- }
DownloadOnlyFlag = cli.BoolFlag{
Name: "download-only",
Usage: "Run in download only mode - only fetch blocks but not process them",
@@ -362,7 +324,7 @@ var (
// Performance tuning settings
CacheFlag = cli.IntFlag{
Name: "cache",
- Usage: "Megabytes of memory allocated to internal caching (default = 4096 mainnet full node, 128 light mode)",
+ Usage: "Megabytes of memory allocated to internal caching (default = 4096 mainnet full node)",
Value: 1024,
}
CacheDatabaseFlag = cli.IntFlag{
@@ -559,20 +521,6 @@ var (
Usage: "Specify certificate authority",
Value: "",
}
- GraphQLEnabledFlag = cli.BoolFlag{
- Name: "graphql",
- Usage: "Enable GraphQL on the HTTP-RPC server. Note that GraphQL can only be started if an HTTP server is started as well.",
- }
- GraphQLCORSDomainFlag = cli.StringFlag{
- Name: "graphql.corsdomain",
- Usage: "Comma separated list of domains from which to accept cross origin requests (browser enforced)",
- Value: "",
- }
- GraphQLVirtualHostsFlag = cli.StringFlag{
- Name: "graphql.vhosts",
- Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.",
- Value: strings.Join(node.DefaultConfig.GraphQLVirtualHosts, ","),
- }
WSEnabledFlag = cli.BoolFlag{
Name: "ws",
Usage: "Enable the WS-RPC server",
@@ -602,16 +550,6 @@ var (
Usage: "HTTP path prefix on which JSON-RPC is served. Use '/' to serve on all paths.",
Value: "",
}
- GraphQLListenAddrFlag = cli.StringFlag{
- Name: "graphql.addr",
- Usage: "GraphQL server listening interface",
- Value: node.DefaultGraphQLHost,
- }
- GraphQLPortFlag = cli.IntFlag{
- Name: "graphql.port",
- Usage: "GraphQL server listening port",
- Value: node.DefaultGraphQLPort,
- }
ExecFlag = cli.StringFlag{
Name: "exec",
Usage: "Execute JavaScript statement",
@@ -757,16 +695,6 @@ var (
Usage: "Comma-separated InfluxDB tags (key/values) attached to all measurements",
Value: metrics.DefaultConfig.InfluxDBTags,
}
- EWASMInterpreterFlag = cli.StringFlag{
- Name: "vm.ewasm",
- Usage: "External ewasm configuration (default = built-in interpreter)",
- Value: "",
- }
- EVMInterpreterFlag = cli.StringFlag{
- Name: "vm.evm",
- Usage: "External EVM configuration (default = built-in interpreter)",
- Value: "",
- }
)
var MetricFlags = []cli.Flag{MetricsEnabledFlag, MetricsEnabledExpensiveFlag, MetricsHTTPFlag, MetricsPortFlag}
@@ -843,12 +771,8 @@ func setNodeUserIdentCobra(f *pflag.FlagSet, cfg *node.Config) {
func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
urls := params.MainnetBootnodes
switch {
- case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(LegacyBootnodesV4Flag.Name):
- if ctx.GlobalIsSet(LegacyBootnodesV4Flag.Name) {
- urls = SplitAndTrim(ctx.GlobalString(LegacyBootnodesV4Flag.Name))
- } else {
- urls = SplitAndTrim(ctx.GlobalString(BootnodesFlag.Name))
- }
+ case ctx.GlobalIsSet(BootnodesFlag.Name):
+ urls = SplitAndTrim(ctx.GlobalString(BootnodesFlag.Name))
case ctx.GlobalBool(RopstenFlag.Name):
urls = params.RopstenBootnodes
case ctx.GlobalBool(RinkebyFlag.Name):
@@ -879,12 +803,8 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
func setBootstrapNodesV5(ctx *cli.Context, cfg *p2p.Config) {
urls := params.MainnetBootnodes
switch {
- case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(LegacyBootnodesV5Flag.Name):
- if ctx.GlobalIsSet(LegacyBootnodesV5Flag.Name) {
- urls = SplitAndTrim(ctx.GlobalString(LegacyBootnodesV5Flag.Name))
- } else {
- urls = SplitAndTrim(ctx.GlobalString(BootnodesFlag.Name))
- }
+ case ctx.GlobalIsSet(BootnodesFlag.Name):
+ urls = SplitAndTrim(ctx.GlobalString(BootnodesFlag.Name))
case ctx.GlobalBool(RopstenFlag.Name):
urls = params.RopstenBootnodes
case ctx.GlobalBool(RinkebyFlag.Name):
@@ -941,71 +861,6 @@ func SplitAndTrim(input string) (ret []string) {
return ret
}
-// setGraphQL creates the GraphQL listener interface string from the set
-// command line flags, returning empty if the GraphQL endpoint is disabled.
-//func setGraphQL(ctx *cli.Context, cfg *node.Config) {
-// if ctx.GlobalIsSet(GraphQLCORSDomainFlag.Name) {
-// cfg.GraphQLCors = splitAndTrim(ctx.GlobalString(GraphQLCORSDomainFlag.Name))
-// }
-// if ctx.GlobalIsSet(GraphQLVirtualHostsFlag.Name) {
-// cfg.GraphQLVirtualHosts = splitAndTrim(ctx.GlobalString(GraphQLVirtualHostsFlag.Name))
-// }
-//}
-
-// setWS creates the WebSocket RPC listener interface string from the set
-// command line flags, returning empty if the HTTP endpoint is disabled.
-//func setWS(ctx *cli.Context, cfg *node.Config) {
-// if ctx.GlobalBool(WSEnabledFlag.Name) && cfg.WSHost == "" {
-// cfg.WSHost = localhost
-// if ctx.GlobalIsSet(LegacyWSListenAddrFlag.Name) {
-// cfg.WSHost = ctx.GlobalString(LegacyWSListenAddrFlag.Name)
-// log.Warn("The flag --wsaddr is deprecated and will be removed in the future, please use --ws.addr")
-// }
-// if ctx.GlobalIsSet(WSListenAddrFlag.Name) {
-// cfg.WSHost = ctx.GlobalString(WSListenAddrFlag.Name)
-// }
-// }
-// if ctx.GlobalIsSet(LegacyWSPortFlag.Name) {
-// cfg.WSPort = ctx.GlobalInt(LegacyWSPortFlag.Name)
-// log.Warn("The flag --wsport is deprecated and will be removed in the future, please use --ws.port")
-// }
-// if ctx.GlobalIsSet(WSPortFlag.Name) {
-// cfg.WSPort = ctx.GlobalInt(WSPortFlag.Name)
-// }
-//
-// if ctx.GlobalIsSet(LegacyWSAllowedOriginsFlag.Name) {
-// cfg.WSOrigins = splitAndTrim(ctx.GlobalString(LegacyWSAllowedOriginsFlag.Name))
-// log.Warn("The flag --wsorigins is deprecated and will be removed in the future, please use --ws.origins")
-// }
-// if ctx.GlobalIsSet(WSAllowedOriginsFlag.Name) {
-// cfg.WSOrigins = splitAndTrim(ctx.GlobalString(WSAllowedOriginsFlag.Name))
-// }
-//
-// if ctx.GlobalIsSet(LegacyWSApiFlag.Name) {
-// cfg.WSModules = splitAndTrim(ctx.GlobalString(LegacyWSApiFlag.Name))
-// log.Warn("The flag --wsapi is deprecated and will be removed in the future, please use --ws.api")
-// }
-// if ctx.GlobalIsSet(WSApiFlag.Name) {
-// cfg.WSModules = splitAndTrim(ctx.GlobalString(WSApiFlag.Name))
-// }
-//
-// if ctx.GlobalIsSet(WSPathPrefixFlag.Name) {
-// cfg.WSPathPrefix = ctx.GlobalString(WSPathPrefixFlag.Name)
-// }
-//}
-
-// setIPC creates an IPC path configuration from the set command line flags,
-// returning an empty string if IPC was explicitly disabled, or the set path.
-//func setIPC(ctx *cli.Context, cfg *node.Config) {
-// CheckExclusive(ctx, IPCDisabledFlag, IPCPathFlag)
-// switch {
-// case ctx.GlobalBool(IPCDisabledFlag.Name):
-// cfg.IPCPath = ""
-// case ctx.GlobalIsSet(IPCPathFlag.Name):
-// cfg.IPCPath = ctx.GlobalString(IPCPathFlag.Name)
-// }
-//}
-
// makeDatabaseHandles raises out the number of allowed file handles per process
// for Geth and returns half of the allowance to assign to the database.
func makeDatabaseHandles() int {
@@ -1092,28 +947,18 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
setBootstrapNodes(ctx, cfg)
setBootstrapNodesV5(ctx, cfg)
- lightClient := false
ethPeers := cfg.MaxPeers
- if lightClient {
- ethPeers = 0
- }
log.Info("Maximum peer count", "ETH", ethPeers, "total", cfg.MaxPeers)
if ctx.GlobalIsSet(MaxPendingPeersFlag.Name) {
cfg.MaxPendingPeers = ctx.GlobalInt(MaxPendingPeersFlag.Name)
}
- if ctx.GlobalIsSet(NoDiscoverFlag.Name) || lightClient {
+ if ctx.GlobalIsSet(NoDiscoverFlag.Name) {
cfg.NoDiscovery = true
}
- // if we're running a light client or server, force enable the v5 peer discovery
- // unless it is explicitly disabled with --nodiscover note that explicitly specifying
- // --v5disc overrides --nodiscover, in which case the later only disables v4 discovery
- forceV5Discovery := (lightClient) && !ctx.GlobalBool(NoDiscoverFlag.Name)
if ctx.GlobalIsSet(DiscoveryV5Flag.Name) {
cfg.DiscoveryV5 = ctx.GlobalBool(DiscoveryV5Flag.Name)
- } else if forceV5Discovery {
- cfg.DiscoveryV5 = true
}
if netrestrict := ctx.GlobalString(NetrestrictFlag.Name); netrestrict != "" {
@@ -1136,27 +981,10 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
// SetNodeConfig applies node-related command line flags to the config.
func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
SetP2PConfig(ctx, &cfg.P2P)
- //setIPC(ctx, cfg)
- //setGraphQL(ctx, cfg)
- //setWS(ctx, cfg)
setNodeUserIdent(ctx, cfg)
setDataDir(ctx, cfg)
setSmartCard(ctx, cfg)
- //if ctx.GlobalBool(LegacyRPCEnabledFlag.Name) ||
- // ctx.GlobalBool(HTTPEnabledFlag.Name) ||
- // ctx.GlobalIsSet(LegacyRPCPortFlag.Name) ||
- // ctx.GlobalIsSet(HTTPPortFlag.Name) ||
- // ctx.GlobalIsSet(LegacyRPCCORSDomainFlag.Name) ||
- // ctx.GlobalIsSet(HTTPCORSDomainFlag.Name) ||
- // ctx.GlobalIsSet(LegacyRPCApiFlag.Name) ||
- // ctx.GlobalIsSet(HTTPApiFlag.Name) ||
- // ctx.GlobalIsSet(LegacyRPCVirtualHostsFlag.Name) ||
- // ctx.GlobalIsSet(HTTPVirtualHostsFlag.Name) &&
- // cfg.HTTPHost == "" {
- // Fatalf("Turbo-Geth does not support native rpc. Use instead rpcdaemon.")
- //}
-
if ctx.GlobalIsSet(ExternalSignerFlag.Name) {
cfg.ExternalSigner = ctx.GlobalString(ExternalSignerFlag.Name)
}
@@ -1281,18 +1109,10 @@ func setDataDirCobra(f *pflag.FlagSet, cfg *node.Config) {
}
}
-func setGPO(ctx *cli.Context, cfg *gasprice.Config, light bool) {
- if ctx.GlobalIsSet(LegacyGpoBlocksFlag.Name) {
- cfg.Blocks = ctx.GlobalInt(LegacyGpoBlocksFlag.Name)
- log.Warn("The flag --gpoblocks is deprecated and will be removed in the future, please use --gpo.blocks")
- }
+func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
if ctx.GlobalIsSet(GpoBlocksFlag.Name) {
cfg.Blocks = ctx.GlobalInt(GpoBlocksFlag.Name)
}
- if ctx.GlobalIsSet(LegacyGpoPercentileFlag.Name) {
- cfg.Percentile = ctx.GlobalInt(LegacyGpoPercentileFlag.Name)
- log.Warn("The flag --gpopercentile is deprecated and will be removed in the future, please use --gpo.percentile")
- }
if ctx.GlobalIsSet(GpoPercentileFlag.Name) {
cfg.Percentile = ctx.GlobalInt(GpoPercentileFlag.Name)
}
@@ -1302,7 +1122,7 @@ func setGPO(ctx *cli.Context, cfg *gasprice.Config, light bool) {
}
//nolint
-func setGPOCobra(f *pflag.FlagSet, cfg *gasprice.Config, light bool) {
+func setGPOCobra(f *pflag.FlagSet, cfg *gasprice.Config) {
if v := f.Int(GpoBlocksFlag.Name, GpoBlocksFlag.Value, GpoBlocksFlag.Usage); v != nil {
cfg.Blocks = *v
}
@@ -1541,7 +1361,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
ks = keystores[0].(*keystore.KeyStore)
}
setEtherbase(ctx, ks, cfg)
- setGPO(ctx, &cfg.GPO, false)
+ setGPO(ctx, &cfg.GPO)
setTxPool(ctx, &cfg.TxPool)
setEthash(ctx, cfg)
setMiner(ctx, &cfg.Miner)
@@ -1593,13 +1413,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
cfg.EnablePreimageRecording = ctx.GlobalBool(VMEnableDebugFlag.Name)
}
- if ctx.GlobalIsSet(EWASMInterpreterFlag.Name) {
- cfg.EWASMInterpreter = ctx.GlobalString(EWASMInterpreterFlag.Name)
- }
-
- if ctx.GlobalIsSet(EVMInterpreterFlag.Name) {
- cfg.EVMInterpreter = ctx.GlobalString(EVMInterpreterFlag.Name)
- }
if ctx.GlobalIsSet(RPCGlobalGasCapFlag.Name) {
cfg.RPCGasCap = ctx.GlobalUint64(RPCGlobalGasCapFlag.Name)
}
@@ -1707,11 +1520,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
SetDNSDiscoveryDefaults(cfg, params.MainnetGenesisHash)
}
}
-
- // TODO(fjl): move trie cache generations into config
- //if gen := ctx.GlobalInt(TrieCacheGenFlag.Name); gen > 0 {
- // state.MaxTrieCacheSize = uint64(gen)
- //}
}
// SetDNSDiscoveryDefaults configures DNS discovery with the given URL if
@@ -1721,9 +1529,6 @@ func SetDNSDiscoveryDefaults(cfg *eth.Config, genesis common.Hash) {
return // already set through flags/config
}
protocol := "all"
- if cfg.SyncMode == downloader.LightSync {
- protocol = "les"
- }
if url := params.KnownDNSNetwork(genesis, protocol); url != "" {
cfg.EthDiscoveryURLs = []string{url}
}
@@ -1738,49 +1543,6 @@ func RegisterEthService(stack *node.Node, cfg *eth.Config) *eth.Ethereum {
return backend
}
-// RegisterEthStatsService configures the Ethereum Stats daemon and adds it to
-// the given node.
-//func RegisterEthStatsService(stack *node.Node, backend ethapi.Backend, url string) {
-// if err := ethstats.New(stack, backend, backend.Engine(), url); err != nil {
-// Fatalf("Failed to register the Ethereum Stats service: %v", err)
-// }
-//}
-
-// RegisterGraphQLService is a utility function to construct a new service and register it against a node.
-//func RegisterGraphQLService(stack *node.Node, backend ethapi.Backend, cfg node.Config) {
-// if err := graphql.New(stack, backend, cfg.GraphQLCors, cfg.GraphQLVirtualHosts); err != nil {
-// Fatalf("Failed to register the GraphQL service: %v", err)
-// }
-//}
-
-func SetupMetrics(ctx *cli.Context) {
- //if metrics.Enabled {
- // log.Info("Enabling metrics collection")
- //
- // var (
- // enableExport = ctx.GlobalBool(MetricsEnableInfluxDBFlag.Name)
- // endpoint = ctx.GlobalString(MetricsInfluxDBEndpointFlag.Name)
- // database = ctx.GlobalString(MetricsInfluxDBDatabaseFlag.Name)
- // username = ctx.GlobalString(MetricsInfluxDBUsernameFlag.Name)
- // password = ctx.GlobalString(MetricsInfluxDBPasswordFlag.Name)
- // )
- //
- // if enableExport {
- // tagsMap := SplitTagsFlag(ctx.GlobalString(MetricsInfluxDBTagsFlag.Name))
- //
- // log.Info("Enabling metrics export to InfluxDB")
- //
- // go influxdb.InfluxDBWithTags(metrics.DefaultRegistry, 10*time.Second, endpoint, database, username, password, "geth.", tagsMap)
- // }
- //
- // if ctx.GlobalIsSet(MetricsHTTPFlag.Name) {
- // address := fmt.Sprintf("%s:%d", ctx.GlobalString(MetricsHTTPFlag.Name), ctx.GlobalInt(MetricsPortFlag.Name))
- // log.Info("Enabling stand-alone metrics HTTP endpoint", "address", address)
- // exp.Setup(address)
- // }
- //}
-}
-
func SplitTagsFlag(tagsFlag string) map[string]string {
tags := strings.Split(tagsFlag, ",")
tagsMap := map[string]string{}
diff --git a/cmd/utils/flags_legacy.go b/cmd/utils/flags_legacy.go
deleted file mode 100644
index c5fe77cfbe4..00000000000
--- a/cmd/utils/flags_legacy.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2020 The go-ethereum Authors
-// This file is part of go-ethereum.
-//
-// go-ethereum is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// go-ethereum is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with go-ethereum. If not, see .
-// nolint:golint
-package utils
-
-import (
- "strings"
-
- "github.com/ledgerwatch/turbo-geth/eth/ethconfig"
- "github.com/ledgerwatch/turbo-geth/node"
-
- "github.com/urfave/cli"
-)
-
-var (
- // (Deprecated May 2020, shown in aliased flags section)
- LegacyRPCEnabledFlag = cli.BoolFlag{
- Name: "rpc",
- Usage: "Enable the HTTP-RPC server (deprecated, use --http)",
- }
- LegacyRPCListenAddrFlag = cli.StringFlag{
- Name: "rpcaddr",
- Usage: "HTTP-RPC server listening interface (deprecated, use --http.addr)",
- Value: node.DefaultHTTPHost,
- }
- LegacyRPCPortFlag = cli.IntFlag{
- Name: "rpcport",
- Usage: "HTTP-RPC server listening port (deprecated, use --http.port)",
- Value: node.DefaultHTTPPort,
- }
- LegacyRPCCORSDomainFlag = cli.StringFlag{
- Name: "rpccorsdomain",
- Usage: "Comma separated list of domains from which to accept cross origin requests (browser enforced) (deprecated, use --http.corsdomain)",
- Value: "",
- }
- LegacyRPCVirtualHostsFlag = cli.StringFlag{
- Name: "rpcvhosts",
- Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. (deprecated, use --http.vhosts)",
- Value: strings.Join(node.DefaultConfig.HTTPVirtualHosts, ","),
- }
- LegacyRPCApiFlag = cli.StringFlag{
- Name: "rpcapi",
- Usage: "API's offered over the HTTP-RPC interface (deprecated, use --http.api)",
- Value: "",
- }
- LegacyWSListenAddrFlag = cli.StringFlag{
- Name: "wsaddr",
- Usage: "WS-RPC server listening interface (deprecated, use --ws.addr)",
- Value: node.DefaultWSHost,
- }
- LegacyWSPortFlag = cli.IntFlag{
- Name: "wsport",
- Usage: "WS-RPC server listening port (deprecated, use --ws.port)",
- Value: node.DefaultWSPort,
- }
- LegacyWSApiFlag = cli.StringFlag{
- Name: "wsapi",
- Usage: "API's offered over the WS-RPC interface (deprecated, use --ws.api)",
- Value: "",
- }
- LegacyWSAllowedOriginsFlag = cli.StringFlag{
- Name: "wsorigins",
- Usage: "Origins from which to accept websockets requests (deprecated, use --ws.origins)",
- Value: "",
- }
- LegacyGpoBlocksFlag = cli.IntFlag{
- Name: "gpoblocks",
- Usage: "Number of recent blocks to check for gas prices (deprecated, use --gpo.blocks)",
- Value: ethconfig.Defaults.GPO.Blocks,
- }
- LegacyGpoPercentileFlag = cli.IntFlag{
- Name: "gpopercentile",
- Usage: "Suggested gas price is the given percentile of a set of recent transaction gas prices (deprecated, use --gpo.percentile)",
- Value: ethconfig.Defaults.GPO.Percentile,
- }
- LegacyBootnodesV4Flag = cli.StringFlag{
- Name: "bootnodesv4",
- Usage: "Comma separated enode URLs for P2P v4 discovery bootstrap (light server, full nodes) (deprecated, use --bootnodes)",
- Value: "",
- }
- LegacyBootnodesV5Flag = cli.StringFlag{
- Name: "bootnodesv5",
- Usage: "Comma separated enode URLs for P2P v5 discovery bootstrap (light server, light nodes) (deprecated, use --bootnodes)",
- Value: "",
- }
-
- // (Deprecated July 2020, shown in aliased flags section)
- LegacyGraphQLListenAddrFlag = cli.StringFlag{
- Name: "graphql.addr",
- Usage: "GraphQL server listening interface (deprecated, graphql can only be enabled on the HTTP-RPC server endpoint, use --graphql)",
- }
- LegacyGraphQLPortFlag = cli.IntFlag{
- Name: "graphql.port",
- Usage: "GraphQL server listening port (deprecated, graphql can only be enabled on the HTTP-RPC server endpoint, use --graphql)",
- Value: node.DefaultHTTPPort,
- }
-)
diff --git a/common/hexutil/json.go b/common/hexutil/json.go
index 50db208118e..fbc21241c8a 100644
--- a/common/hexutil/json.go
+++ b/common/hexutil/json.go
@@ -72,25 +72,6 @@ func (b Bytes) String() string {
return Encode(b)
}
-// ImplementsGraphQLType returns true if Bytes implements the specified GraphQL type.
-func (b Bytes) ImplementsGraphQLType(name string) bool { return name == "Bytes" }
-
-// UnmarshalGraphQL unmarshals the provided GraphQL query data.
-func (b *Bytes) UnmarshalGraphQL(input interface{}) error {
- var err error
- switch input := input.(type) {
- case string:
- data, err := Decode(input)
- if err != nil {
- return err
- }
- *b = data
- default:
- err = fmt.Errorf("unexpected type %T for Bytes", input)
- }
- return err
-}
-
// UnmarshalFixedJSON decodes the input as a string with 0x prefix. The length of out
// determines the required input length. This function is commonly used to implement the
// UnmarshalJSON method for fixed-size types.
@@ -206,25 +187,6 @@ func (b *Big) String() string {
return EncodeBig(b.ToInt())
}
-// ImplementsGraphQLType returns true if Big implements the provided GraphQL type.
-func (b Big) ImplementsGraphQLType(name string) bool { return name == "BigInt" }
-
-// UnmarshalGraphQL unmarshals the provided GraphQL query data.
-func (b *Big) UnmarshalGraphQL(input interface{}) error {
- var err error
- switch input := input.(type) {
- case string:
- return b.UnmarshalText([]byte(input))
- case int32:
- var num big.Int
- num.SetInt64(int64(input))
- *b = Big(num)
- default:
- err = fmt.Errorf("unexpected type %T for BigInt", input)
- }
- return err
-}
-
// Uint64 marshals/unmarshals as a JSON string with 0x prefix.
// The zero value marshals as "0x0".
type Uint64 uint64
@@ -272,23 +234,6 @@ func (b Uint64) String() string {
return EncodeUint64(uint64(b))
}
-// ImplementsGraphQLType returns true if Uint64 implements the provided GraphQL type.
-func (b Uint64) ImplementsGraphQLType(name string) bool { return name == "Long" }
-
-// UnmarshalGraphQL unmarshals the provided GraphQL query data.
-func (b *Uint64) UnmarshalGraphQL(input interface{}) error {
- var err error
- switch input := input.(type) {
- case string:
- return b.UnmarshalText([]byte(input))
- case int32:
- *b = Uint64(input)
- default:
- err = fmt.Errorf("unexpected type %T for Long", input)
- }
- return err
-}
-
// Uint marshals/unmarshals as a JSON string with 0x prefix.
// The zero value marshals as "0x0".
type Uint uint
diff --git a/common/types.go b/common/types.go
index 8e378d4730c..ccf29bfc9e9 100644
--- a/common/types.go
+++ b/common/types.go
@@ -171,21 +171,6 @@ func (h Hash) Value() (driver.Value, error) {
return h[:], nil
}
-// ImplementsGraphQLType returns true if Hash implements the specified GraphQL type.
-func (Hash) ImplementsGraphQLType(name string) bool { return name == "Bytes32" }
-
-// UnmarshalGraphQL unmarshals the provided GraphQL query data.
-func (h *Hash) UnmarshalGraphQL(input interface{}) error {
- var err error
- switch input := input.(type) {
- case string:
- err = h.UnmarshalText([]byte(input))
- default:
- err = fmt.Errorf("unexpected type %T for Hash", input)
- }
- return err
-}
-
// UnprefixedHash allows marshaling a Hash without 0x prefix.
type UnprefixedHash Hash
@@ -344,21 +329,6 @@ func (a Address) Value() (driver.Value, error) {
return a[:], nil
}
-// ImplementsGraphQLType returns true if Hash implements the specified GraphQL type.
-func (a Address) ImplementsGraphQLType(name string) bool { return name == "Address" }
-
-// UnmarshalGraphQL unmarshals the provided GraphQL query data.
-func (a *Address) UnmarshalGraphQL(input interface{}) error {
- var err error
- switch input := input.(type) {
- case string:
- err = a.UnmarshalText([]byte(input))
- default:
- err = fmt.Errorf("unexpected type %T for Address", input)
- }
- return err
-}
-
// UnprefixedAddress allows marshaling an Address without 0x prefix.
type UnprefixedAddress Address
diff --git a/consensus/ethash/algorithm_test.go b/consensus/ethash/algorithm_test.go
index 6343d75903b..037c632a4a3 100644
--- a/consensus/ethash/algorithm_test.go
+++ b/consensus/ethash/algorithm_test.go
@@ -692,53 +692,6 @@ func TestHashimoto(t *testing.T) {
}
}
-// Tests that caches generated on disk may be done concurrently.
-func TestConcurrentDiskCacheGeneration(t *testing.T) {
- t.Skip("turbo-geth doesn't use disk cache")
- /*
- // Create a temp folder to generate the caches into
- cachedir, err := ioutil.TempDir("", "")
- if err != nil {
- t.Fatalf("Failed to create temporary cache dir: %v", err)
- }
- defer os.RemoveAll(cachedir)
-
- // Define a heavy enough block, one from mainnet should do
- block := types.NewBlockWithHeader(&types.Header{
- Number: big.NewInt(3311058),
- ParentHash: common.HexToHash("0xd783efa4d392943503f28438ad5830b2d5964696ffc285f338585e9fe0a37a05"),
- UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
- Coinbase: common.HexToAddress("0xc0ea08a2d404d3172d2add29a45be56da40e2949"),
- Root: common.HexToHash("0x77d14e10470b5850332524f8cd6f69ad21f070ce92dca33ab2858300242ef2f1"),
- TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
- ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
- Difficulty: big.NewInt(167925187834220),
- GasLimit: 4015682,
- GasUsed: 0,
- Time: 1488928920,
- Extra: []byte("www.bw.com"),
- MixDigest: common.HexToHash("0x3e140b0784516af5e5ec6730f2fb20cca22f32be399b9e4ad77d32541f798cd0"),
- Nonce: types.EncodeNonce(0xf400cd0006070c49),
- })
- // Simulate multiple processes sharing the same datadir
- var pend sync.WaitGroup
-
- for i := 0; i < 3; i++ {
- pend.Add(1)
-
- go func(idx int) {
- defer pend.Done()
- ethash := New(Config{cachedir, 0, 1, false, "", 0, 0, false, ModeNormal, nil}, nil, false)
- defer ethash.Close()
- if err := ethash.verifySeal(nil, block.Header(), false); err != nil {
- t.Errorf("proc %d: block verification failed: %v", idx, err)
- }
- }(i)
- }
- pend.Wait()
- */
-}
-
// Benchmarks the cache generation performance.
func BenchmarkCacheGeneration(b *testing.B) {
for i := 0; i < b.N; i++ {
diff --git a/contracts/checkpointoracle/contract/oracle.go b/contracts/checkpointoracle/contract/oracle.go
deleted file mode 100644
index 6e09e930f19..00000000000
--- a/contracts/checkpointoracle/contract/oracle.go
+++ /dev/null
@@ -1,428 +0,0 @@
-// Code generated - DO NOT EDIT.
-// This file is a generated binding and any manual changes will be lost.
-
-package contract
-
-import (
- "math/big"
- "strings"
-
- ethereum "github.com/ledgerwatch/turbo-geth"
- "github.com/ledgerwatch/turbo-geth/accounts/abi"
- "github.com/ledgerwatch/turbo-geth/accounts/abi/bind"
- "github.com/ledgerwatch/turbo-geth/common"
- "github.com/ledgerwatch/turbo-geth/core/types"
- "github.com/ledgerwatch/turbo-geth/event"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var (
- _ = big.NewInt
- _ = strings.NewReader
- _ = ethereum.NotFound
- _ = bind.Bind
- _ = common.Big1
- _ = types.BloomLookup
- _ = event.NewSubscription
-)
-
-// CheckpointOracleABI is the input ABI used to generate the binding from.
-const CheckpointOracleABI = "[{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"_adminlist\",\"type\":\"address[]\"},{\"internalType\":\"uint256\",\"name\":\"_sectionSize\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_processConfirms\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_threshold\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"checkpointHash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"v\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"r\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"s\",\"type\":\"bytes32\"}],\"name\":\"NewCheckpointVote\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"GetAllAdmin\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"GetLatestCheckpoint\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_recentNumber\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"_recentHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"_hash\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"_sectionIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint8[]\",\"name\":\"v\",\"type\":\"uint8[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"r\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"s\",\"type\":\"bytes32[]\"}],\"name\":\"SetCheckpoint\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]"
-
-// CheckpointOracleFuncSigs maps the 4-byte function signature to its string representation.
-var CheckpointOracleFuncSigs = map[string]string{
- "45848dfc": "GetAllAdmin()",
- "4d6a304c": "GetLatestCheckpoint()",
- "d459fc46": "SetCheckpoint(uint256,bytes32,bytes32,uint64,uint8[],bytes32[],bytes32[])",
-}
-
-// CheckpointOracleBin is the compiled bytecode used for deploying new contracts.
-var CheckpointOracleBin = "0x608060405234801561001057600080fd5b506040516108703803806108708339818101604052608081101561003357600080fd5b810190808051604051939291908464010000000082111561005357600080fd5b90830190602082018581111561006857600080fd5b825186602082028301116401000000008211171561008557600080fd5b82525081516020918201928201910280838360005b838110156100b257818101518382015260200161009a565b50505050919091016040908152602083015190830151606090930151909450919250600090505b84518110156101855760016000808784815181106100f357fe5b60200260200101516001600160a01b03166001600160a01b0316815260200190815260200160002060006101000a81548160ff021916908315150217905550600185828151811061014057fe5b60209081029190910181015182546001808201855560009485529290932090920180546001600160a01b0319166001600160a01b0390931692909217909155016100d9565b50600592909255600655600755506106ce806101a26000396000f3fe608060405234801561001057600080fd5b50600436106100415760003560e01c806345848dfc146100465780634d6a304c1461009e578063d459fc46146100cf575b600080fd5b61004e6102b0565b60408051602080825283518183015283519192839290830191858101910280838360005b8381101561008a578181015183820152602001610072565b505050509050019250505060405180910390f35b6100a6610365565b6040805167ffffffffffffffff9094168452602084019290925282820152519081900360600190f35b61029c600480360360e08110156100e557600080fd5b81359160208101359160408201359167ffffffffffffffff6060820135169181019060a08101608082013564010000000081111561012257600080fd5b82018360208201111561013457600080fd5b8035906020019184602083028401116401000000008311171561015657600080fd5b91908080602002602001604051908101604052809392919081815260200183836020028082843760009201919091525092959493602081019350359150506401000000008111156101a657600080fd5b8201836020820111156101b857600080fd5b803590602001918460208302840111640100000000831117156101da57600080fd5b919080806020026020016040519081016040528093929190818152602001838360200280828437600092019190915250929594936020810193503591505064010000000081111561022a57600080fd5b82018360208201111561023c57600080fd5b8035906020019184602083028401116401000000008311171561025e57600080fd5b919080806020026020016040519081016040528093929190818152602001838360200280828437600092019190915250929550610380945050505050565b604080519115158252519081900360200190f35b600154606090819067ffffffffffffffff811180156102ce57600080fd5b506040519080825280602002602001820160405280156102f8578160200160208202803683370190505b50905060005b60015481101561035f576001818154811061031557fe5b9060005260206000200160009054906101000a90046001600160a01b031682828151811061033f57fe5b6001600160a01b03909216602092830291909101909101526001016102fe565b50905090565b60025460045460035467ffffffffffffffff90921691909192565b3360009081526020819052604081205460ff1661039c57600080fd5b868840146103a957600080fd5b82518451146103b757600080fd5b81518451146103c557600080fd5b6006546005548660010167ffffffffffffffff1602014310156103ea5750600061068d565b60025467ffffffffffffffff908116908616101561040a5750600061068d565b60025467ffffffffffffffff868116911614801561043c575067ffffffffffffffff851615158061043c575060035415155b156104495750600061068d565b856104565750600061068d565b60408051601960f81b6020808301919091526000602183018190523060601b60228401526001600160c01b031960c08a901b166036840152603e8084018b905284518085039091018152605e909301909352815191012090805b86518110156106875760006001848984815181106104ca57fe5b60200260200101518985815181106104de57fe5b60200260200101518986815181106104f257fe5b602002602001015160405160008152602001604052604051808581526020018460ff1660ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa158015610551573d6000803e3d6000fd5b505060408051601f1901516001600160a01b03811660009081526020819052919091205490925060ff16905061058657600080fd5b826001600160a01b0316816001600160a01b0316116105a457600080fd5b8092508867ffffffffffffffff167fce51ffa16246bcaf0899f6504f473cd0114f430f566cef71ab7e03d3dde42a418b8a85815181106105e057fe5b60200260200101518a86815181106105f457fe5b60200260200101518a878151811061060857fe5b6020026020010151604051808581526020018460ff1660ff16815260200183815260200182815260200194505050505060405180910390a2600754826001011061067e5750505060048790555050436003556002805467ffffffffffffffff191667ffffffffffffffff8616179055600161068d565b506001016104b0565b50600080fd5b97965050505050505056fea26469706673582212202ddf9eda76bf59c0fc65584c0b22d84ecef2c703765de60439596d6ac34c2b7264736f6c634300060b0033"
-
-// DeployCheckpointOracle deploys a new Ethereum contract, binding an instance of CheckpointOracle to it.
-func DeployCheckpointOracle(auth *bind.TransactOpts, backend bind.ContractBackend, _adminlist []common.Address, _sectionSize *big.Int, _processConfirms *big.Int, _threshold *big.Int) (common.Address, *types.Transaction, *CheckpointOracle, error) {
- parsed, err := abi.JSON(strings.NewReader(CheckpointOracleABI))
- if err != nil {
- return common.Address{}, nil, nil, err
- }
-
- address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(CheckpointOracleBin), backend, _adminlist, _sectionSize, _processConfirms, _threshold)
- if err != nil {
- return common.Address{}, nil, nil, err
- }
- return address, tx, &CheckpointOracle{CheckpointOracleCaller: CheckpointOracleCaller{contract: contract}, CheckpointOracleTransactor: CheckpointOracleTransactor{contract: contract}, CheckpointOracleFilterer: CheckpointOracleFilterer{contract: contract}}, nil
-}
-
-// CheckpointOracle is an auto generated Go binding around an Ethereum contract.
-type CheckpointOracle struct {
- CheckpointOracleCaller // Read-only binding to the contract
- CheckpointOracleTransactor // Write-only binding to the contract
- CheckpointOracleFilterer // Log filterer for contract events
-}
-
-// CheckpointOracleCaller is an auto generated read-only Go binding around an Ethereum contract.
-type CheckpointOracleCaller struct {
- contract *bind.BoundContract // Generic contract wrapper for the low level calls
-}
-
-// CheckpointOracleTransactor is an auto generated write-only Go binding around an Ethereum contract.
-type CheckpointOracleTransactor struct {
- contract *bind.BoundContract // Generic contract wrapper for the low level calls
-}
-
-// CheckpointOracleFilterer is an auto generated log filtering Go binding around an Ethereum contract events.
-type CheckpointOracleFilterer struct {
- contract *bind.BoundContract // Generic contract wrapper for the low level calls
-}
-
-// CheckpointOracleSession is an auto generated Go binding around an Ethereum contract,
-// with pre-set call and transact options.
-type CheckpointOracleSession struct {
- Contract *CheckpointOracle // Generic contract binding to set the session for
- CallOpts bind.CallOpts // Call options to use throughout this session
- TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
-}
-
-// CheckpointOracleCallerSession is an auto generated read-only Go binding around an Ethereum contract,
-// with pre-set call options.
-type CheckpointOracleCallerSession struct {
- Contract *CheckpointOracleCaller // Generic contract caller binding to set the session for
- CallOpts bind.CallOpts // Call options to use throughout this session
-}
-
-// CheckpointOracleTransactorSession is an auto generated write-only Go binding around an Ethereum contract,
-// with pre-set transact options.
-type CheckpointOracleTransactorSession struct {
- Contract *CheckpointOracleTransactor // Generic contract transactor binding to set the session for
- TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
-}
-
-// CheckpointOracleRaw is an auto generated low-level Go binding around an Ethereum contract.
-type CheckpointOracleRaw struct {
- Contract *CheckpointOracle // Generic contract binding to access the raw methods on
-}
-
-// CheckpointOracleCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract.
-type CheckpointOracleCallerRaw struct {
- Contract *CheckpointOracleCaller // Generic read-only contract binding to access the raw methods on
-}
-
-// CheckpointOracleTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract.
-type CheckpointOracleTransactorRaw struct {
- Contract *CheckpointOracleTransactor // Generic write-only contract binding to access the raw methods on
-}
-
-// NewCheckpointOracle creates a new instance of CheckpointOracle, bound to a specific deployed contract.
-func NewCheckpointOracle(address common.Address, backend bind.ContractBackend) (*CheckpointOracle, error) {
- contract, err := bindCheckpointOracle(address, backend, backend, backend)
- if err != nil {
- return nil, err
- }
- return &CheckpointOracle{CheckpointOracleCaller: CheckpointOracleCaller{contract: contract}, CheckpointOracleTransactor: CheckpointOracleTransactor{contract: contract}, CheckpointOracleFilterer: CheckpointOracleFilterer{contract: contract}}, nil
-}
-
-// NewCheckpointOracleCaller creates a new read-only instance of CheckpointOracle, bound to a specific deployed contract.
-func NewCheckpointOracleCaller(address common.Address, caller bind.ContractCaller) (*CheckpointOracleCaller, error) {
- contract, err := bindCheckpointOracle(address, caller, nil, nil)
- if err != nil {
- return nil, err
- }
- return &CheckpointOracleCaller{contract: contract}, nil
-}
-
-// NewCheckpointOracleTransactor creates a new write-only instance of CheckpointOracle, bound to a specific deployed contract.
-func NewCheckpointOracleTransactor(address common.Address, transactor bind.ContractTransactor) (*CheckpointOracleTransactor, error) {
- contract, err := bindCheckpointOracle(address, nil, transactor, nil)
- if err != nil {
- return nil, err
- }
- return &CheckpointOracleTransactor{contract: contract}, nil
-}
-
-// NewCheckpointOracleFilterer creates a new log filterer instance of CheckpointOracle, bound to a specific deployed contract.
-func NewCheckpointOracleFilterer(address common.Address, filterer bind.ContractFilterer) (*CheckpointOracleFilterer, error) {
- contract, err := bindCheckpointOracle(address, nil, nil, filterer)
- if err != nil {
- return nil, err
- }
- return &CheckpointOracleFilterer{contract: contract}, nil
-}
-
-// bindCheckpointOracle binds a generic wrapper to an already deployed contract.
-func bindCheckpointOracle(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {
- parsed, err := abi.JSON(strings.NewReader(CheckpointOracleABI))
- if err != nil {
- return nil, err
- }
- return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil
-}
-
-// Call invokes the (constant) contract method with params as input values and
-// sets the output to result. The result type might be a single field for simple
-// returns, a slice of interfaces for anonymous returns and a struct for named
-// returns.
-func (_CheckpointOracle *CheckpointOracleRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
- return _CheckpointOracle.Contract.CheckpointOracleCaller.contract.Call(opts, result, method, params...)
-}
-
-// Transfer initiates a plain transaction to move funds to the contract, calling
-// its default method if one is available.
-func (_CheckpointOracle *CheckpointOracleRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {
- return _CheckpointOracle.Contract.CheckpointOracleTransactor.contract.Transfer(opts)
-}
-
-// Transact invokes the (paid) contract method with params as input values.
-func (_CheckpointOracle *CheckpointOracleRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
- return _CheckpointOracle.Contract.CheckpointOracleTransactor.contract.Transact(opts, method, params...)
-}
-
-// Call invokes the (constant) contract method with params as input values and
-// sets the output to result. The result type might be a single field for simple
-// returns, a slice of interfaces for anonymous returns and a struct for named
-// returns.
-func (_CheckpointOracle *CheckpointOracleCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
- return _CheckpointOracle.Contract.contract.Call(opts, result, method, params...)
-}
-
-// Transfer initiates a plain transaction to move funds to the contract, calling
-// its default method if one is available.
-func (_CheckpointOracle *CheckpointOracleTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {
- return _CheckpointOracle.Contract.contract.Transfer(opts)
-}
-
-// Transact invokes the (paid) contract method with params as input values.
-func (_CheckpointOracle *CheckpointOracleTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
- return _CheckpointOracle.Contract.contract.Transact(opts, method, params...)
-}
-
-// GetAllAdmin is a free data retrieval call binding the contract method 0x45848dfc.
-//
-// Solidity: function GetAllAdmin() view returns(address[])
-func (_CheckpointOracle *CheckpointOracleCaller) GetAllAdmin(opts *bind.CallOpts) ([]common.Address, error) {
- var out []interface{}
- err := _CheckpointOracle.contract.Call(opts, &out, "GetAllAdmin")
-
- if err != nil {
- return *new([]common.Address), err
- }
-
- out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address)
-
- return out0, err
-
-}
-
-// GetAllAdmin is a free data retrieval call binding the contract method 0x45848dfc.
-//
-// Solidity: function GetAllAdmin() view returns(address[])
-func (_CheckpointOracle *CheckpointOracleSession) GetAllAdmin() ([]common.Address, error) {
- return _CheckpointOracle.Contract.GetAllAdmin(&_CheckpointOracle.CallOpts)
-}
-
-// GetAllAdmin is a free data retrieval call binding the contract method 0x45848dfc.
-//
-// Solidity: function GetAllAdmin() view returns(address[])
-func (_CheckpointOracle *CheckpointOracleCallerSession) GetAllAdmin() ([]common.Address, error) {
- return _CheckpointOracle.Contract.GetAllAdmin(&_CheckpointOracle.CallOpts)
-}
-
-// GetLatestCheckpoint is a free data retrieval call binding the contract method 0x4d6a304c.
-//
-// Solidity: function GetLatestCheckpoint() view returns(uint64, bytes32, uint256)
-func (_CheckpointOracle *CheckpointOracleCaller) GetLatestCheckpoint(opts *bind.CallOpts) (uint64, [32]byte, *big.Int, error) {
- var out []interface{}
- err := _CheckpointOracle.contract.Call(opts, &out, "GetLatestCheckpoint")
-
- if err != nil {
- return *new(uint64), *new([32]byte), *new(*big.Int), err
- }
-
- out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64)
- out1 := *abi.ConvertType(out[1], new([32]byte)).(*[32]byte)
- out2 := *abi.ConvertType(out[2], new(*big.Int)).(**big.Int)
-
- return out0, out1, out2, err
-
-}
-
-// GetLatestCheckpoint is a free data retrieval call binding the contract method 0x4d6a304c.
-//
-// Solidity: function GetLatestCheckpoint() view returns(uint64, bytes32, uint256)
-func (_CheckpointOracle *CheckpointOracleSession) GetLatestCheckpoint() (uint64, [32]byte, *big.Int, error) {
- return _CheckpointOracle.Contract.GetLatestCheckpoint(&_CheckpointOracle.CallOpts)
-}
-
-// GetLatestCheckpoint is a free data retrieval call binding the contract method 0x4d6a304c.
-//
-// Solidity: function GetLatestCheckpoint() view returns(uint64, bytes32, uint256)
-func (_CheckpointOracle *CheckpointOracleCallerSession) GetLatestCheckpoint() (uint64, [32]byte, *big.Int, error) {
- return _CheckpointOracle.Contract.GetLatestCheckpoint(&_CheckpointOracle.CallOpts)
-}
-
-// SetCheckpoint is a paid mutator transaction binding the contract method 0xd459fc46.
-//
-// Solidity: function SetCheckpoint(uint256 _recentNumber, bytes32 _recentHash, bytes32 _hash, uint64 _sectionIndex, uint8[] v, bytes32[] r, bytes32[] s) returns(bool)
-func (_CheckpointOracle *CheckpointOracleTransactor) SetCheckpoint(opts *bind.TransactOpts, _recentNumber *big.Int, _recentHash [32]byte, _hash [32]byte, _sectionIndex uint64, v []uint8, r [][32]byte, s [][32]byte) (*types.Transaction, error) {
- return _CheckpointOracle.contract.Transact(opts, "SetCheckpoint", _recentNumber, _recentHash, _hash, _sectionIndex, v, r, s)
-}
-
-// SetCheckpoint is a paid mutator transaction binding the contract method 0xd459fc46.
-//
-// Solidity: function SetCheckpoint(uint256 _recentNumber, bytes32 _recentHash, bytes32 _hash, uint64 _sectionIndex, uint8[] v, bytes32[] r, bytes32[] s) returns(bool)
-func (_CheckpointOracle *CheckpointOracleSession) SetCheckpoint(_recentNumber *big.Int, _recentHash [32]byte, _hash [32]byte, _sectionIndex uint64, v []uint8, r [][32]byte, s [][32]byte) (*types.Transaction, error) {
- return _CheckpointOracle.Contract.SetCheckpoint(&_CheckpointOracle.TransactOpts, _recentNumber, _recentHash, _hash, _sectionIndex, v, r, s)
-}
-
-// SetCheckpoint is a paid mutator transaction binding the contract method 0xd459fc46.
-//
-// Solidity: function SetCheckpoint(uint256 _recentNumber, bytes32 _recentHash, bytes32 _hash, uint64 _sectionIndex, uint8[] v, bytes32[] r, bytes32[] s) returns(bool)
-func (_CheckpointOracle *CheckpointOracleTransactorSession) SetCheckpoint(_recentNumber *big.Int, _recentHash [32]byte, _hash [32]byte, _sectionIndex uint64, v []uint8, r [][32]byte, s [][32]byte) (*types.Transaction, error) {
- return _CheckpointOracle.Contract.SetCheckpoint(&_CheckpointOracle.TransactOpts, _recentNumber, _recentHash, _hash, _sectionIndex, v, r, s)
-}
-
-// CheckpointOracleNewCheckpointVoteIterator is returned from FilterNewCheckpointVote and is used to iterate over the raw logs and unpacked data for NewCheckpointVote events raised by the CheckpointOracle contract.
-type CheckpointOracleNewCheckpointVoteIterator struct {
- Event *CheckpointOracleNewCheckpointVote // Event containing the contract specifics and raw log
-
- contract *bind.BoundContract // Generic contract to use for unpacking event data
- event string // Event name to use for unpacking event data
-
- logs chan types.Log // Log channel receiving the found contract events
- sub ethereum.Subscription // Subscription for errors, completion and termination
- done bool // Whether the subscription completed delivering logs
- fail error // Occurred error to stop iteration
-}
-
-// Next advances the iterator to the subsequent event, returning whether there
-// are any more events found. In case of a retrieval or parsing error, false is
-// returned and Error() can be queried for the exact failure.
-func (it *CheckpointOracleNewCheckpointVoteIterator) Next() bool {
- // If the iterator failed, stop iterating
- if it.fail != nil {
- return false
- }
- // If the iterator completed, deliver directly whatever's available
- if it.done {
- select {
- case log := <-it.logs:
- it.Event = new(CheckpointOracleNewCheckpointVote)
- if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
- it.fail = err
- return false
- }
- it.Event.Raw = log
- return true
-
- default:
- return false
- }
- }
- // Iterator still in progress, wait for either a data or an error event
- select {
- case log := <-it.logs:
- it.Event = new(CheckpointOracleNewCheckpointVote)
- if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
- it.fail = err
- return false
- }
- it.Event.Raw = log
- return true
-
- case err := <-it.sub.Err():
- it.done = true
- it.fail = err
- return it.Next()
- }
-}
-
-// Error returns any retrieval or parsing error occurred during filtering.
-func (it *CheckpointOracleNewCheckpointVoteIterator) Error() error {
- return it.fail
-}
-
-// Close terminates the iteration process, releasing any pending underlying
-// resources.
-func (it *CheckpointOracleNewCheckpointVoteIterator) Close() error {
- it.sub.Unsubscribe()
- return nil
-}
-
-// CheckpointOracleNewCheckpointVote represents a NewCheckpointVote event raised by the CheckpointOracle contract.
-type CheckpointOracleNewCheckpointVote struct {
- Index uint64
- CheckpointHash [32]byte
- V uint8
- R [32]byte
- S [32]byte
- Raw types.Log // Blockchain specific contextual infos
-}
-
-// FilterNewCheckpointVote is a free log retrieval operation binding the contract event 0xce51ffa16246bcaf0899f6504f473cd0114f430f566cef71ab7e03d3dde42a41.
-//
-// Solidity: event NewCheckpointVote(uint64 indexed index, bytes32 checkpointHash, uint8 v, bytes32 r, bytes32 s)
-func (_CheckpointOracle *CheckpointOracleFilterer) FilterNewCheckpointVote(opts *bind.FilterOpts, index []uint64) (*CheckpointOracleNewCheckpointVoteIterator, error) {
-
- var indexRule []interface{}
- for _, indexItem := range index {
- indexRule = append(indexRule, indexItem)
- }
-
- logs, sub, err := _CheckpointOracle.contract.FilterLogs(opts, "NewCheckpointVote", indexRule)
- if err != nil {
- return nil, err
- }
- return &CheckpointOracleNewCheckpointVoteIterator{contract: _CheckpointOracle.contract, event: "NewCheckpointVote", logs: logs, sub: sub}, nil
-}
-
-// WatchNewCheckpointVote is a free log subscription operation binding the contract event 0xce51ffa16246bcaf0899f6504f473cd0114f430f566cef71ab7e03d3dde42a41.
-//
-// Solidity: event NewCheckpointVote(uint64 indexed index, bytes32 checkpointHash, uint8 v, bytes32 r, bytes32 s)
-func (_CheckpointOracle *CheckpointOracleFilterer) WatchNewCheckpointVote(opts *bind.WatchOpts, sink chan<- *CheckpointOracleNewCheckpointVote, index []uint64) (event.Subscription, error) {
-
- var indexRule []interface{}
- for _, indexItem := range index {
- indexRule = append(indexRule, indexItem)
- }
-
- logs, sub, err := _CheckpointOracle.contract.WatchLogs(opts, "NewCheckpointVote", indexRule)
- if err != nil {
- return nil, err
- }
- return event.NewSubscription(func(quit <-chan struct{}) error {
- defer sub.Unsubscribe()
- for {
- select {
- case log := <-logs:
- // New log arrived, parse the event and forward to the user
- event := new(CheckpointOracleNewCheckpointVote)
- if err := _CheckpointOracle.contract.UnpackLog(event, "NewCheckpointVote", log); err != nil {
- return err
- }
- event.Raw = log
-
- select {
- case sink <- event:
- case err := <-sub.Err():
- return err
- case <-quit:
- return nil
- }
- case err := <-sub.Err():
- return err
- case <-quit:
- return nil
- }
- }
- }), nil
-}
-
-// ParseNewCheckpointVote is a log parse operation binding the contract event 0xce51ffa16246bcaf0899f6504f473cd0114f430f566cef71ab7e03d3dde42a41.
-//
-// Solidity: event NewCheckpointVote(uint64 indexed index, bytes32 checkpointHash, uint8 v, bytes32 r, bytes32 s)
-func (_CheckpointOracle *CheckpointOracleFilterer) ParseNewCheckpointVote(log types.Log) (*CheckpointOracleNewCheckpointVote, error) {
- event := new(CheckpointOracleNewCheckpointVote)
- if err := _CheckpointOracle.contract.UnpackLog(event, "NewCheckpointVote", log); err != nil {
- return nil, err
- }
- return event, nil
-}
diff --git a/contracts/checkpointoracle/contract/oracle.sol b/contracts/checkpointoracle/contract/oracle.sol
deleted file mode 100644
index 65bac09d28b..00000000000
--- a/contracts/checkpointoracle/contract/oracle.sol
+++ /dev/null
@@ -1,174 +0,0 @@
-pragma solidity ^0.6.0;
-
-/**
- * @title CheckpointOracle
- * @author Gary Rong, Martin Swende
- * @dev Implementation of the blockchain checkpoint registrar.
- */
-contract CheckpointOracle {
- /*
- Events
- */
-
- // NewCheckpointVote is emitted when a new checkpoint proposal receives a vote.
- event NewCheckpointVote(uint64 indexed index, bytes32 checkpointHash, uint8 v, bytes32 r, bytes32 s);
-
- /*
- Public Functions
- */
- constructor(address[] memory _adminlist, uint _sectionSize, uint _processConfirms, uint _threshold) public {
- for (uint i = 0; i < _adminlist.length; i++) {
- admins[_adminlist[i]] = true;
- adminList.push(_adminlist[i]);
- }
- sectionSize = _sectionSize;
- processConfirms = _processConfirms;
- threshold = _threshold;
- }
-
- /**
- * @dev Get latest stable checkpoint information.
- * @return section index
- * @return checkpoint hash
- * @return block height associated with checkpoint
- */
- function GetLatestCheckpoint()
- view
- public
- returns(uint64, bytes32, uint) {
- return (sectionIndex, hash, height);
- }
-
- // SetCheckpoint sets a new checkpoint. It accepts a list of signatures
- // @_recentNumber: a recent blocknumber, for replay protection
- // @_recentHash : the hash of `_recentNumber`
- // @_hash : the hash to set at _sectionIndex
- // @_sectionIndex : the section index to set
- // @v : the list of v-values
- // @r : the list or r-values
- // @s : the list of s-values
- function SetCheckpoint(
- uint _recentNumber,
- bytes32 _recentHash,
- bytes32 _hash,
- uint64 _sectionIndex,
- uint8[] memory v,
- bytes32[] memory r,
- bytes32[] memory s)
- public
- returns (bool)
- {
- // Ensure the sender is authorized.
- require(admins[msg.sender]);
-
- // These checks replay protection, so it cannot be replayed on forks,
- // accidentally or intentionally
- require(blockhash(_recentNumber) == _recentHash);
-
- // Ensure the batch of signatures are valid.
- require(v.length == r.length);
- require(v.length == s.length);
-
- // Filter out "future" checkpoint.
- if (block.number < (_sectionIndex+1)*sectionSize+processConfirms) {
- return false;
- }
- // Filter out "old" announcement
- if (_sectionIndex < sectionIndex) {
- return false;
- }
- // Filter out "stale" announcement
- if (_sectionIndex == sectionIndex && (_sectionIndex != 0 || height != 0)) {
- return false;
- }
- // Filter out "invalid" announcement
- if (_hash == ""){
- return false;
- }
-
- // EIP 191 style signatures
- //
- // Arguments when calculating hash to validate
- // 1: byte(0x19) - the initial 0x19 byte
- // 2: byte(0) - the version byte (data with intended validator)
- // 3: this - the validator address
- // -- Application specific data
- // 4 : checkpoint section_index(uint64)
- // 5 : checkpoint hash (bytes32)
- // hash = keccak256(checkpoint_index, section_head, cht_root, bloom_root)
- bytes32 signedHash = keccak256(abi.encodePacked(byte(0x19), byte(0), this, _sectionIndex, _hash));
-
- address lastVoter = address(0);
-
- // In order for us not to have to maintain a mapping of who has already
- // voted, and we don't want to count a vote twice, the signatures must
- // be submitted in strict ordering.
- for (uint idx = 0; idx < v.length; idx++){
- address signer = ecrecover(signedHash, v[idx], r[idx], s[idx]);
- require(admins[signer]);
- require(uint256(signer) > uint256(lastVoter));
- lastVoter = signer;
- emit NewCheckpointVote(_sectionIndex, _hash, v[idx], r[idx], s[idx]);
-
- // Sufficient signatures present, update latest checkpoint.
- if (idx+1 >= threshold){
- hash = _hash;
- height = block.number;
- sectionIndex = _sectionIndex;
- return true;
- }
- }
- // We shouldn't wind up here, reverting un-emits the events
- revert();
- }
-
- /**
- * @dev Get all admin addresses
- * @return address list
- */
- function GetAllAdmin()
- public
- view
- returns(address[] memory)
- {
- address[] memory ret = new address[](adminList.length);
- for (uint i = 0; i < adminList.length; i++) {
- ret[i] = adminList[i];
- }
- return ret;
- }
-
- /*
- Fields
- */
- // A map of admin users who have the permission to update CHT and bloom Trie root
- mapping(address => bool) admins;
-
- // A list of admin users so that we can obtain all admin users.
- address[] adminList;
-
- // Latest stored section id
- uint64 sectionIndex;
-
- // The block height associated with latest registered checkpoint.
- uint height;
-
- // The hash of latest registered checkpoint.
- bytes32 hash;
-
- // The frequency for creating a checkpoint
- //
- // The default value should be the same as the checkpoint size(32768) in the ethereum.
- uint sectionSize;
-
- // The number of confirmations needed before a checkpoint can be registered.
- // We have to make sure the checkpoint registered will not be invalid due to
- // chain reorg.
- //
- // The default value should be the same as the checkpoint process confirmations(256)
- // in the ethereum.
- uint processConfirms;
-
- // The required signatures to finalize a stable checkpoint.
- uint threshold;
-}
diff --git a/contracts/checkpointoracle/oracle.go b/contracts/checkpointoracle/oracle.go
deleted file mode 100644
index 0f3298a7478..00000000000
--- a/contracts/checkpointoracle/oracle.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-// Package checkpointoracle is a an on-chain light client checkpoint oracle.
-package checkpointoracle
-
-//go:generate abigen --sol contract/oracle.sol --pkg contract --out contract/oracle.go
-
-import (
- "errors"
- "math/big"
-
- "github.com/ledgerwatch/turbo-geth/accounts/abi/bind"
- "github.com/ledgerwatch/turbo-geth/common"
- "github.com/ledgerwatch/turbo-geth/contracts/checkpointoracle/contract"
- "github.com/ledgerwatch/turbo-geth/core/types"
-)
-
-// CheckpointOracle is a Go wrapper around an on-chain checkpoint oracle contract.
-type CheckpointOracle struct {
- address common.Address
- contract *contract.CheckpointOracle
-}
-
-// NewCheckpointOracle binds checkpoint contract and returns a registrar instance.
-func NewCheckpointOracle(contractAddr common.Address, backend bind.ContractBackend) (*CheckpointOracle, error) {
- c, err := contract.NewCheckpointOracle(contractAddr, backend)
- if err != nil {
- return nil, err
- }
- return &CheckpointOracle{address: contractAddr, contract: c}, nil
-}
-
-// ContractAddr returns the address of contract.
-func (oracle *CheckpointOracle) ContractAddr() common.Address {
- return oracle.address
-}
-
-// Contract returns the underlying contract instance.
-func (oracle *CheckpointOracle) Contract() *contract.CheckpointOracle {
- return oracle.contract
-}
-
-// LookupCheckpointEvents searches checkpoint event for specific section in the
-// given log batches.
-func (oracle *CheckpointOracle) LookupCheckpointEvents(blockLogs [][]*types.Log, section uint64, hash common.Hash) []*contract.CheckpointOracleNewCheckpointVote {
- var votes []*contract.CheckpointOracleNewCheckpointVote
-
- for _, logs := range blockLogs {
- for _, log := range logs {
- event, err := oracle.contract.ParseNewCheckpointVote(*log)
- if err != nil {
- continue
- }
- if event.Index == section && event.CheckpointHash == hash {
- votes = append(votes, event)
- }
- }
- }
- return votes
-}
-
-// RegisterCheckpoint registers the checkpoint with a batch of associated signatures
-// that are collected off-chain and sorted by lexicographical order.
-//
-// Notably all signatures given should be transformed to "ethereum style" which transforms
-// v from 0/1 to 27/28 according to the yellow paper.
-func (oracle *CheckpointOracle) RegisterCheckpoint(opts *bind.TransactOpts, index uint64, hash []byte, rnum *big.Int, rhash [32]byte, sigs [][]byte) (*types.Transaction, error) {
- var (
- r [][32]byte
- s [][32]byte
- v []uint8
- )
- for i := 0; i < len(sigs); i++ {
- if len(sigs[i]) != 65 {
- return nil, errors.New("invalid signature")
- }
- r = append(r, common.BytesToHash(sigs[i][:32]))
- s = append(s, common.BytesToHash(sigs[i][32:64]))
- v = append(v, sigs[i][64])
- }
- return oracle.contract.SetCheckpoint(opts, rnum, rhash, common.BytesToHash(hash), index, v, r, s)
-}
diff --git a/contracts/checkpointoracle/oracle_test.go b/contracts/checkpointoracle/oracle_test.go
deleted file mode 100644
index 33d1734b2a6..00000000000
--- a/contracts/checkpointoracle/oracle_test.go
+++ /dev/null
@@ -1,339 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package checkpointoracle
-
-import (
- "bytes"
- "context"
- "crypto/ecdsa"
- "encoding/binary"
- "errors"
- "math/big"
- "reflect"
- "sort"
- "testing"
- "time"
-
- "github.com/ledgerwatch/turbo-geth/accounts/abi/bind"
- "github.com/ledgerwatch/turbo-geth/accounts/abi/bind/backends"
- "github.com/ledgerwatch/turbo-geth/common"
- "github.com/ledgerwatch/turbo-geth/contracts/checkpointoracle/contract"
- "github.com/ledgerwatch/turbo-geth/core"
- "github.com/ledgerwatch/turbo-geth/crypto"
- "github.com/ledgerwatch/turbo-geth/params"
-)
-
-var (
- emptyHash = [32]byte{}
-
- checkpoint0 = params.TrustedCheckpoint{
- SectionIndex: 0,
- SectionHead: common.HexToHash("0x7fa3c32f996c2bfb41a1a65b3d8ea3e0a33a1674cde43678ad6f4235e764d17d"),
- CHTRoot: common.HexToHash("0x98fc5d3de23a0fecebad236f6655533c157d26a1aedcd0852a514dc1169e6350"),
- BloomRoot: common.HexToHash("0x99b5adb52b337fe25e74c1c6d3835b896bd638611b3aebddb2317cce27a3f9fa"),
- }
- checkpoint1 = params.TrustedCheckpoint{
- SectionIndex: 1,
- SectionHead: common.HexToHash("0x2d4dee68102125e59b0cc61b176bd89f0d12b3b91cfaf52ef8c2c82fb920c2d2"),
- CHTRoot: common.HexToHash("0x7d428008ece3b4c4ef5439f071930aad0bb75108d381308df73beadcd01ded95"),
- BloomRoot: common.HexToHash("0x652571f7736de17e7bbb427ac881474da684c6988a88bf51b10cca9a2ee148f4"),
- }
- checkpoint2 = params.TrustedCheckpoint{
- SectionIndex: 2,
- SectionHead: common.HexToHash("0x61c0de578c0115b1dff8ef39aa600588c7c6ecb8a2f102003d7cf4c4146e9291"),
- CHTRoot: common.HexToHash("0x407a08a407a2bc3838b74ca3eb206903c9c8a186ccf5ef14af07794efff1970b"),
- BloomRoot: common.HexToHash("0x058b4161f558ce295a92925efc57f34f9210d5a30088d7475c183e0d3e58f5ac"),
- }
-)
-
-var (
- // The block frequency for creating checkpoint(only used in test)
- sectionSize = big.NewInt(512)
-
- // The number of confirmations needed to generate a checkpoint(only used in test).
- processConfirms = big.NewInt(4)
-)
-
-// validateOperation executes the operation, watches and delivers all events fired by the backend and ensures the
-// correctness by assert function.
-func validateOperation(t *testing.T, c *contract.CheckpointOracle, backend *backends.SimulatedBackend, operation func(),
- assert func(<-chan *contract.CheckpointOracleNewCheckpointVote) error, opName string) {
- // Watch all events and deliver them to assert function
- var (
- sink = make(chan *contract.CheckpointOracleNewCheckpointVote)
- sub, _ = c.WatchNewCheckpointVote(nil, sink, nil)
- )
- defer func() {
- // Close all subscribers
- sub.Unsubscribe()
- }()
- operation()
-
- // flush pending block
- backend.Commit()
- if err := assert(sink); err != nil {
- t.Errorf("operation {%s} failed, err %s", opName, err)
- }
-}
-
-// validateEvents checks that the correct number of contract events
-// fired by contract backend.
-func validateEvents(target int, sink interface{}) (bool, []reflect.Value) {
- chanval := reflect.ValueOf(sink)
- chantyp := chanval.Type()
- if chantyp.Kind() != reflect.Chan || chantyp.ChanDir()&reflect.RecvDir == 0 {
- return false, nil
- }
- count := 0
- var recv []reflect.Value
- timeout := time.After(1 * time.Second)
- cases := []reflect.SelectCase{{Chan: chanval, Dir: reflect.SelectRecv}, {Chan: reflect.ValueOf(timeout), Dir: reflect.SelectRecv}}
- for {
- chose, v, _ := reflect.Select(cases)
- if chose == 1 {
- // Not enough event received
- return false, nil
- }
- count += 1
- recv = append(recv, v)
- if count == target {
- break
- }
- }
- done := time.After(50 * time.Millisecond)
- cases = cases[:1]
- cases = append(cases, reflect.SelectCase{Chan: reflect.ValueOf(done), Dir: reflect.SelectRecv})
- chose, _, _ := reflect.Select(cases)
- // If chose equal 0, it means receiving redundant events.
- return chose == 1, recv
-}
-
-func signCheckpoint(addr common.Address, privateKey *ecdsa.PrivateKey, index uint64, hash common.Hash) []byte {
- // EIP 191 style signatures
- //
- // Arguments when calculating hash to validate
- // 1: byte(0x19) - the initial 0x19 byte
- // 2: byte(0) - the version byte (data with intended validator)
- // 3: this - the validator address
- // -- Application specific data
- // 4 : checkpoint section_index(uint64)
- // 5 : checkpoint hash (bytes32)
- // hash = keccak256(checkpoint_index, section_head, cht_root, bloom_root)
- buf := make([]byte, 8)
- binary.BigEndian.PutUint64(buf, index)
- data := append([]byte{0x19, 0x00}, append(addr.Bytes(), append(buf, hash.Bytes()...)...)...)
- sig, _ := crypto.Sign(crypto.Keccak256(data), privateKey)
- sig[64] += 27 // Transform V from 0/1 to 27/28 according to the yellow paper
- return sig
-}
-
-// assertSignature verifies whether the recovered signers are equal with expected.
-func assertSignature(addr common.Address, index uint64, hash [32]byte, r, s [32]byte, v uint8, expect common.Address) bool {
- buf := make([]byte, 8)
- binary.BigEndian.PutUint64(buf, index)
- data := append([]byte{0x19, 0x00}, append(addr.Bytes(), append(buf, hash[:]...)...)...)
- pubkey, err := crypto.Ecrecover(crypto.Keccak256(data), append(r[:], append(s[:], v-27)...))
- if err != nil {
- return false
- }
- var signer common.Address
- copy(signer[:], crypto.Keccak256(pubkey[1:])[12:])
- return bytes.Equal(signer.Bytes(), expect.Bytes())
-}
-
-type Account struct {
- key *ecdsa.PrivateKey
- addr common.Address
-}
-type Accounts []Account
-
-func (a Accounts) Len() int { return len(a) }
-func (a Accounts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a Accounts) Less(i, j int) bool { return bytes.Compare(a[i].addr.Bytes(), a[j].addr.Bytes()) < 0 }
-
-func TestCheckpointRegister(t *testing.T) {
- t.Skip("deadlock")
- // Initialize test accounts
- var accounts Accounts
- for i := 0; i < 3; i++ {
- key, _ := crypto.GenerateKey()
- addr := crypto.PubkeyToAddress(key.PublicKey)
- accounts = append(accounts, Account{key: key, addr: addr})
- }
- sort.Sort(accounts)
-
- // Deploy registrar contract
- contractBackend := backends.NewSimulatedBackend(core.GenesisAlloc{accounts[0].addr: {Balance: big.NewInt(1000000000)}, accounts[1].addr: {Balance: big.NewInt(1000000000)}, accounts[2].addr: {Balance: big.NewInt(1000000000)}}, 10000000)
- defer contractBackend.Close()
-
- transactOpts, _ := bind.NewKeyedTransactorWithChainID(accounts[0].key, big.NewInt(1337))
-
- // 3 trusted signers, threshold 2
- contractAddr, _, c, err := contract.DeployCheckpointOracle(transactOpts, contractBackend, []common.Address{accounts[0].addr, accounts[1].addr, accounts[2].addr}, sectionSize, processConfirms, big.NewInt(2))
- if err != nil {
- t.Error("Failed to deploy registrar contract", err)
- }
- contractBackend.Commit()
-
- // getRecent returns block height and hash of the head parent.
- getRecent := func() (*big.Int, common.Hash) {
- h, _ := contractBackend.HeaderByNumber(context.Background(), nil)
- parentNumber := new(big.Int).Sub(h.Number, big.NewInt(1))
- parentHash := h.ParentHash
- return parentNumber, parentHash
- }
- // collectSig generates specified number signatures.
- collectSig := func(index uint64, hash common.Hash, n int, unauthorized *ecdsa.PrivateKey) (v []uint8, r [][32]byte, s [][32]byte) {
- for i := 0; i < n; i++ {
- sig := signCheckpoint(contractAddr, accounts[i].key, index, hash)
- if unauthorized != nil {
- sig = signCheckpoint(contractAddr, unauthorized, index, hash)
- }
- r = append(r, common.BytesToHash(sig[:32]))
- s = append(s, common.BytesToHash(sig[32:64]))
- v = append(v, sig[64])
- }
- return v, r, s
- }
- // insertEmptyBlocks inserts a batch of empty blocks to blockchain.
- insertEmptyBlocks := func(number int) {
- for i := 0; i < number; i++ {
- contractBackend.Commit()
- }
- }
- // assert checks whether the current contract status is same with
- // the expected.
- assert := func(index uint64, hash [32]byte, height *big.Int) error {
- lindex, lhash, lheight, err := c.GetLatestCheckpoint(nil)
- if err != nil {
- return err
- }
- if lindex != index {
- return errors.New("latest checkpoint index mismatch")
- }
- if !bytes.Equal(lhash[:], hash[:]) {
- return errors.New("latest checkpoint hash mismatch")
- }
- if lheight.Cmp(height) != 0 {
- return errors.New("latest checkpoint height mismatch")
- }
- return nil
- }
-
- // Test future checkpoint registration
- validateOperation(t, c, contractBackend, func() {
- number, hash := getRecent()
- v, r, s := collectSig(0, checkpoint0.Hash(), 2, nil)
- c.SetCheckpoint(transactOpts, number, hash, checkpoint0.Hash(), 0, v, r, s)
- }, func(events <-chan *contract.CheckpointOracleNewCheckpointVote) error {
- return assert(0, emptyHash, big.NewInt(0))
- }, "test future checkpoint registration")
-
- insertEmptyBlocks(int(sectionSize.Uint64() + processConfirms.Uint64()))
-
- // Test transaction replay protection
- validateOperation(t, c, contractBackend, func() {
- number, _ := getRecent()
- v, r, s := collectSig(0, checkpoint0.Hash(), 2, nil)
- hash := common.HexToHash("deadbeef")
- c.SetCheckpoint(transactOpts, number, hash, checkpoint0.Hash(), 0, v, r, s)
- }, func(events <-chan *contract.CheckpointOracleNewCheckpointVote) error {
- return assert(0, emptyHash, big.NewInt(0))
- }, "test transaction replay protection")
-
- // Test unauthorized signature checking
- validateOperation(t, c, contractBackend, func() {
- number, hash := getRecent()
- u, _ := crypto.GenerateKey()
- v, r, s := collectSig(0, checkpoint0.Hash(), 2, u)
- c.SetCheckpoint(transactOpts, number, hash, checkpoint0.Hash(), 0, v, r, s)
- }, func(events <-chan *contract.CheckpointOracleNewCheckpointVote) error {
- return assert(0, emptyHash, big.NewInt(0))
- }, "test unauthorized signature checking")
-
- // Test un-multi-signature checkpoint registration
- validateOperation(t, c, contractBackend, func() {
- number, hash := getRecent()
- v, r, s := collectSig(0, checkpoint0.Hash(), 1, nil)
- c.SetCheckpoint(transactOpts, number, hash, checkpoint0.Hash(), 0, v, r, s)
- }, func(events <-chan *contract.CheckpointOracleNewCheckpointVote) error {
- return assert(0, emptyHash, big.NewInt(0))
- }, "test un-multi-signature checkpoint registration")
-
- // Test valid checkpoint registration
- validateOperation(t, c, contractBackend, func() {
- number, hash := getRecent()
- v, r, s := collectSig(0, checkpoint0.Hash(), 2, nil)
- c.SetCheckpoint(transactOpts, number, hash, checkpoint0.Hash(), 0, v, r, s)
- }, func(events <-chan *contract.CheckpointOracleNewCheckpointVote) error {
- if valid, recv := validateEvents(2, events); !valid {
- return errors.New("receive incorrect number of events")
- } else {
- for i := 0; i < len(recv); i++ {
- event := recv[i].Interface().(*contract.CheckpointOracleNewCheckpointVote)
- if !assertSignature(contractAddr, event.Index, event.CheckpointHash, event.R, event.S, event.V, accounts[i].addr) {
- return errors.New("recover signer failed")
- }
- }
- }
- number, _ := getRecent()
- return assert(0, checkpoint0.Hash(), number.Add(number, big.NewInt(1)))
- }, "test valid checkpoint registration")
-
- distance := 3*sectionSize.Uint64() + processConfirms.Uint64() - contractBackend.Blockchain().CurrentHeader().Number.Uint64()
- insertEmptyBlocks(int(distance))
-
- // Test uncontinuous checkpoint registration
- validateOperation(t, c, contractBackend, func() {
- number, hash := getRecent()
- v, r, s := collectSig(2, checkpoint2.Hash(), 2, nil)
- c.SetCheckpoint(transactOpts, number, hash, checkpoint2.Hash(), 2, v, r, s)
- }, func(events <-chan *contract.CheckpointOracleNewCheckpointVote) error {
- if valid, recv := validateEvents(2, events); !valid {
- return errors.New("receive incorrect number of events")
- } else {
- for i := 0; i < len(recv); i++ {
- event := recv[i].Interface().(*contract.CheckpointOracleNewCheckpointVote)
- if !assertSignature(contractAddr, event.Index, event.CheckpointHash, event.R, event.S, event.V, accounts[i].addr) {
- return errors.New("recover signer failed")
- }
- }
- }
- number, _ := getRecent()
- return assert(2, checkpoint2.Hash(), number.Add(number, big.NewInt(1)))
- }, "test uncontinuous checkpoint registration")
-
- // Test old checkpoint registration
- validateOperation(t, c, contractBackend, func() {
- number, hash := getRecent()
- v, r, s := collectSig(1, checkpoint1.Hash(), 2, nil)
- c.SetCheckpoint(transactOpts, number, hash, checkpoint1.Hash(), 1, v, r, s)
- }, func(events <-chan *contract.CheckpointOracleNewCheckpointVote) error {
- number, _ := getRecent()
- return assert(2, checkpoint2.Hash(), number)
- }, "test uncontinuous checkpoint registration")
-
- // Test stale checkpoint registration
- validateOperation(t, c, contractBackend, func() {
- number, hash := getRecent()
- v, r, s := collectSig(2, checkpoint2.Hash(), 2, nil)
- c.SetCheckpoint(transactOpts, number, hash, checkpoint2.Hash(), 2, v, r, s)
- }, func(events <-chan *contract.CheckpointOracleNewCheckpointVote) error {
- number, _ := getRecent()
- return assert(2, checkpoint2.Hash(), number.Sub(number, big.NewInt(1)))
- }, "test stale checkpoint registration")
-}
diff --git a/core/block_validator.go b/core/block_validator.go
index 517348f6e7e..2328a2c5786 100644
--- a/core/block_validator.go
+++ b/core/block_validator.go
@@ -53,10 +53,6 @@ func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, engin
// header's transaction and uncle roots. The headers are assumed to be already
// validated at this point.
func (v *BlockValidator) ValidateBody(ctx context.Context, block *types.Block) error {
- // Check whether the block's known, and if not, that it's linkable
- //if v.bc.HasBlockAndState(block.Hash(), block.NumberU64()) {
- // return ErrKnownBlock
- //}
// Check whether the block is linkable
_, noHistory := params.GetNoHistoryByBlock(ctx, block.Number())
if !noHistory && v.bc.GetBlockByHash(block.ParentHash()) == nil {
diff --git a/core/blockchain.go b/core/blockchain.go
index fc7ebb52c63..fdbf0864e5b 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -655,17 +655,6 @@ func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
return rawdb.HasBody(bc.db, hash, number)
}
-// HasFastBlock checks if a fast block is fully present in the database or not.
-func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool {
- if !bc.HasBlock(hash, number) {
- return false
- }
- if bc.receiptsCache.Contains(hash) {
- return true
- }
- return rawdb.HasReceipts(bc.db, hash, number)
-}
-
// HasBlockAndState checks if a block and associated state trie is fully present
// in the database or not, caching it if present.
func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
@@ -866,100 +855,6 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
bc.Chainmu.Unlock()
return false
}
- // writeAncient writes blockchain and corresponding receipt chain into ancient store.
- //
- // this function only accepts canonical chain data. All side chain will be reverted
- // eventually.
- /*
- writeAncient := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
- var (
- previous = bc.CurrentFastBlock()
- )
- // If any error occurs before updating the head or we are inserting a side chain,
- // all the data written this time wll be rolled back.
- defer func() {
- if previous != nil {
- if err := bc.truncateAncient(previous.NumberU64()); err != nil {
- log.Crit("Truncate ancient store failed", "err", err)
- }
- }
- }()
- var deleted []*numberHash
- for i, block := range blockChain {
- // Short circuit insertion if shutting down or processing failed
- if bc.getProcInterrupt() {
- return 0, errInsertionInterrupted
- }
- // Short circuit insertion if it is required(used in testing only)
- if bc.terminateInsert != nil && bc.terminateInsert(block.Hash(), block.NumberU64()) {
- return i, errors.New("insertion is terminated for testing purpose")
- }
- // Short circuit if the owner header is unknown
- if !bc.HasHeader(block.Hash(), block.NumberU64()) {
- return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
- }
-
- // Turbo-Geth doesn't have fast sync support
- // Flush data into ancient database.
- size += rawdb.WriteAncientBlock(bc.db, block, receiptChain[i], bc.GetTd(block.Hash(), block.NumberU64()))
- if bc.enableTxLookupIndex {
- rawdb.WriteTxLookupEntries(bc.db, block)
- }
-
- // Write tx indices if any condition is satisfied:
- // * If user requires to reserve all tx indices(txlookuplimit=0)
- // * If all ancient tx indices are required to be reserved(txlookuplimit is even higher than ancientlimit)
- // * If block number is large enough to be regarded as a recent block
- // It means blocks below the ancientLimit-txlookupLimit won't be indexed.
- //
- // But if the `TxIndexTail` is not nil, e.g. Geth is initialized with
- // an external ancient database, during the setup, blockchain will start
- // a background routine to re-indexed all indices in [ancients - txlookupLimit, ancients)
- // range. In this case, all tx indices of newly imported blocks should be
- // generated.
- if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit {
- rawdb.WriteTxLookupEntries(batch, block)
- } else if rawdb.ReadTxIndexTail(bc.db) != nil {
- rawdb.WriteTxLookupEntries(batch, block)
- }
- stats.processed++
- }
-
- if !updateHead(blockChain[len(blockChain)-1]) {
- return 0, errors.New("side blocks can't be accepted as the ancient chain data")
- }
- previous = nil // disable rollback explicitly
-
- // Wipe out canonical block data.
- for _, nh := range deleted {
- rawdb.DeleteBlockWithoutNumber(bc.db, nh.hash, nh.number)
- rawdb.DeleteCanonicalHash(bc.db, nh.number)
- }
- for _, block := range blockChain {
- // Always keep genesis block in active database.
- if block.NumberU64() != 0 {
- rawdb.DeleteBlockWithoutNumber(bc.db, block.Hash(), block.NumberU64())
- rawdb.DeleteCanonicalHash(bc.db, block.NumberU64())
- }
- }
-
- // Wipe out side chain too.
- for _, nh := range deleted {
- for _, hash := range rawdb.ReadAllHashes(bc.db, nh.number) {
- rawdb.DeleteBlock(bc.db, hash, nh.number)
- }
- }
- for _, block := range blockChain {
- // Always keep genesis block in active database.
- if block.NumberU64() != 0 {
- for _, hash := range rawdb.ReadAllHashes(bc.db, block.NumberU64()) {
- rawdb.DeleteBlock(bc.db, hash, block.NumberU64())
- }
- }
- }
- return 0, nil
- }
- */
// writeLive writes blockchain and corresponding receipt chain into active store.
writeLive := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
skipPresenceCheck := false
@@ -1018,30 +913,6 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
return 0, nil
}
- /*
- // Write downloaded chain data and corresponding receipt chain data
- if len(ancientBlocks) > 0 {
- if n, err := writeAncient(ancientBlocks, ancientReceipts); err != nil {
- if err == errInsertionInterrupted {
- return 0, nil
- }
- return n, err
-
- }
- // Write the tx index tail (block number from where we index) before write any live blocks
- if len(liveBlocks) > 0 && liveBlocks[0].NumberU64() == ancientLimit+1 {
- // The tx index tail can only be one of the following two options:
- // * 0: all ancient blocks have been indexed
- // * ancient-limit: the indices of blocks before ancient-limit are ignored
- if tail := rawdb.ReadTxIndexTail(bc.db); tail == nil {
- if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit {
- rawdb.WriteTxIndexTail(bc.db, 0)
- } else {
- rawdb.WriteTxIndexTail(bc.db, ancientLimit-bc.txLookupLimit)
- }
- }
- }
- */
if len(liveBlocks) > 0 {
if n, err := writeLive(liveBlocks, liveReceipts); err != nil {
if err == errInsertionInterrupted {
@@ -1154,27 +1025,6 @@ func (bc *BlockChain) writeBlockWithState(ctx context.Context, block *types.Bloc
}
}
- // If the total difficulty is higher than our known, add it to the canonical chain
- // Second clause in the if statement reduces the vulnerability to selfish mining.
- // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
- //reorg := externTd.Cmp(localTd) > 0
- //currentBlock = bc.CurrentBlock()
- //if !reorg && externTd.Cmp(localTd) == 0 {
- // // Split same-difficulty blocks by number, then preferentially select
- // // the block generated by the local miner as the canonical block.
- // if block.NumberU64() < currentBlock.NumberU64() {
- // reorg = true
- // } else if block.NumberU64() == currentBlock.NumberU64() {
- // var currentPreserve, blockPreserve bool
- // if bc.shouldPreserve != nil {
- // currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block)
- // }
- // reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5)
- // }
- //}
- //if reorg {
- // Reorganise the chain if the parent is not the head block
-
if block.ParentHash() != currentBlock.Hash() {
if err := bc.reorg(currentBlock, block); err != nil {
return NonStatTy, err
@@ -1203,18 +1053,6 @@ func (bc *BlockChain) writeBlockWithState(ctx context.Context, block *types.Bloc
return status, nil
}
-// addFutureBlock checks if the block is within the max allowed window to get
-// accepted for future processing, and returns an error if the block is too far
-// ahead and was not added.
-func (bc *BlockChain) addFutureBlock(block *types.Block) error {
- max := uint64(time.Now().Unix() + maxTimeFutureBlocks)
- if block.Time() > max {
- return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max)
- }
- bc.futureBlocks.Add(block.Hash(), block)
- return nil
-}
-
// InsertChain attempts to insert the given batch of blocks in to the canonical
// chain or, otherwise, create a fork. If an error is returned it will return
// the index number of the failing block as well an error describing what went
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 0b30e13b79d..70faea95500 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -190,24 +190,6 @@ func (b *BlockGen) GetReceipts() []*types.Receipt {
return b.receipts
}
-// makeBlockChain creates a deterministic chain of blocks rooted at parent.
-func makeBlockChain(parent *types.Block, n int, engine consensus.Engine, db *ethdb.ObjectDatabase, seed int) []*types.Block { //nolint:unused
- blocks, _, _ := GenerateChain(params.TestChainConfig, parent, engine, db, n, func(i int, b *BlockGen) {
- b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)})
- }, false /*intermediate hashes*/)
- return blocks
-}
-
-// makeHeaderChain creates a deterministic chain of headers rooted at parent.
-func makeHeaderChain(parent *types.Header, n int, engine consensus.Engine, db *ethdb.ObjectDatabase, seed int) []*types.Header { //nolint:unused
- blocks := makeBlockChain(types.NewBlockWithHeader(parent), n, engine, db, seed)
- headers := make([]*types.Header, len(blocks))
- for i, block := range blocks {
- headers[i] = block.Header()
- }
- return headers
-}
-
var GenerateTrace bool
// GenerateChain creates a chain of n blocks. The first block's
@@ -263,9 +245,6 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
}
ctx := config.WithEIPsFlags(context.Background(), b.header.Number)
// Write state changes to db
- //if err := ibs.CommitBlock(ctx, stateWriter); err != nil {
- // return nil, nil, fmt.Errorf("call to CommitBlock to stateWriter: %w", err)
- //}
if err := ibs.CommitBlock(ctx, plainStateWriter); err != nil {
return nil, nil, fmt.Errorf("call to CommitBlock to plainStateWriter: %w", err)
}
diff --git a/core/headerchain_test.go b/core/headerchain_test.go
deleted file mode 100644
index 61372f4451a..00000000000
--- a/core/headerchain_test.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package core
-
-import (
- "errors"
- "fmt"
- "testing"
- "time"
-
- "github.com/ledgerwatch/turbo-geth/consensus"
- "github.com/ledgerwatch/turbo-geth/consensus/ethash"
- "github.com/ledgerwatch/turbo-geth/core/rawdb"
- "github.com/ledgerwatch/turbo-geth/core/types"
- "github.com/ledgerwatch/turbo-geth/ethdb"
- "github.com/ledgerwatch/turbo-geth/log"
- "github.com/ledgerwatch/turbo-geth/params"
-)
-
-func verifyUnbrokenCanonchain(hc *HeaderChain) error {
- h := hc.CurrentHeader()
- for {
- canonHash, err := rawdb.ReadCanonicalHash(hc.chainDb, h.Number.Uint64())
- if err != nil {
- return err
- }
- if exp := h.Hash(); canonHash != exp {
- return fmt.Errorf("canon hash chain broken, block %d got %x, expected %x",
- h.Number, canonHash[:8], exp[:8])
- }
- // Verify that we have the TD
- if td, errTd := rawdb.ReadTd(hc.chainDb, canonHash, h.Number.Uint64()); td == nil || errTd != nil {
- if errTd != nil {
- return errTd
- }
- return fmt.Errorf("canon TD missing at block %d", h.Number)
- }
- if h.Number.Uint64() == 0 {
- break
- }
- h = hc.GetHeader(h.ParentHash, h.Number.Uint64()-1)
- }
- return nil
-}
-
-func testInsert(t *testing.T, hc *HeaderChain, chain []*types.Header, wantStatus WriteStatus, wantErr error) {
- t.Helper()
-
- status, err := hc.InsertHeaderChain(chain, time.Now()) //nolint:staticcheck
- if status != wantStatus {
- t.Errorf("wrong write status from InsertHeaderChain: got %v, want %v, err=%v", status, wantStatus, err)
- }
- // Always verify that the header chain is unbroken
- if err = verifyUnbrokenCanonchain(hc); err != nil {
- t.Fatal(err)
- }
- if !errors.Is(err, wantErr) {
- t.Fatalf("unexpected error from InsertHeaderChain: %v", err)
- }
-}
-
-// This test checks status reporting of InsertHeaderChain.
-func TestHeaderInsertion(t *testing.T) {
- t.Skip("needs to be recovered for TG")
- var (
- db = ethdb.NewMemDatabase()
- genesis = new(Genesis).MustCommit(db)
- )
-
- hc, err := NewHeaderChain(db, params.AllEthashProtocolChanges, ethash.NewFaker(), func() bool { return false })
- if err != nil {
- t.Fatal(err)
- }
- // chain A: G->A1->A2...A128
- chainA := makeHeaderChain(genesis.Header(), 128, ethash.NewFaker(), db, 10)
- // chain B: G->A1->B2...B128
- chainB := makeHeaderChain(chainA[0], 128, ethash.NewFaker(), db, 10)
- log.Root().SetHandler(log.StdoutHandler)
-
- // Inserting 64 headers on an empty chain, expecting
- // 1 callbacks, 1 canon-status, 0 sidestatus,
- testInsert(t, hc, chainA[:64], CanonStatTy, nil)
-
- // Inserting 64 identical headers, expecting
- // 0 callbacks, 0 canon-status, 0 sidestatus,
- testInsert(t, hc, chainA[:64], NonStatTy, nil)
-
- // Inserting the same some old, some new headers
- // 1 callbacks, 1 canon, 0 side
- testInsert(t, hc, chainA[32:96], CanonStatTy, nil)
-
- // Inserting side blocks, but not overtaking the canon chain
- testInsert(t, hc, chainB[0:32], SideStatTy, nil)
-
- // Inserting more side blocks, but we don't have the parent
- testInsert(t, hc, chainB[34:36], NonStatTy, consensus.ErrUnknownAncestor)
-
- // Inserting more sideblocks, overtaking the canon chain
- testInsert(t, hc, chainB[32:97], CanonStatTy, nil)
-
- // Inserting more A-headers, taking back the canonicality
- testInsert(t, hc, chainA[90:100], CanonStatTy, nil)
-
- // And B becomes canon again
- testInsert(t, hc, chainB[97:107], CanonStatTy, nil)
-
- // And B becomes even longer
- testInsert(t, hc, chainB[107:128], CanonStatTy, nil)
-}
diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go
index 584ac46e35d..0b6a0f31ff5 100644
--- a/core/rawdb/accessors_chain.go
+++ b/core/rawdb/accessors_chain.go
@@ -273,14 +273,6 @@ func DeleteHeader(db DatabaseDeleter, hash common.Hash, number uint64) {
}
}
-// deleteHeaderWithoutNumber removes only the block header but does not remove
-// the hash to number mapping.
-func deleteHeaderWithoutNumber(db DatabaseDeleter, hash common.Hash, number uint64) {
- if err := db.Delete(dbutils.HeadersBucket, dbutils.HeaderKey(number, hash), nil); err != nil {
- log.Crit("Failed to delete header", "err", err)
- }
-}
-
// ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
func ReadBodyRLP(db ethdb.Database, hash common.Hash, number uint64) rlp.RawValue {
body := ReadBody(db, hash, number)
@@ -779,20 +771,6 @@ func DeleteBlock(db ethdb.Database, hash common.Hash, number uint64) error {
return nil
}
-// DeleteBlockWithoutNumber removes all block data associated with a hash, except
-// the hash to number mapping.
-func DeleteBlockWithoutNumber(db ethdb.Database, hash common.Hash, number uint64) error {
- if err := DeleteReceipts(db, number); err != nil {
- return err
- }
- deleteHeaderWithoutNumber(db, hash, number)
- DeleteBody(db, hash, number)
- if err := DeleteTd(db, hash, number); err != nil {
- return err
- }
- return nil
-}
-
const badBlockToKeep = 10
type badBlock struct {
diff --git a/core/state/intra_block_state.go b/core/state/intra_block_state.go
index b063756bea2..dac7d2b497c 100644
--- a/core/state/intra_block_state.go
+++ b/core/state/intra_block_state.go
@@ -451,30 +451,6 @@ func (sdb *IntraBlockState) GetState(addr common.Address, key *common.Hash, valu
}
}
-// GetProof returns the Merkle proof for a given account
-func (sdb *IntraBlockState) GetProof(a common.Address) ([][]byte, error) {
- //sdb.Lock()
- //defer sdb.Unlock()
- //var proof proofList
- //err := sdb.trie.Prove(crypto.Keccak256(a.Bytes()), 0, &proof)
- //return [][]byte(proof), err
- return nil, nil
-}
-
-// GetStorageProof returns the storage proof for a given key
-func (sdb *IntraBlockState) GetStorageProof(a common.Address, key common.Hash) ([][]byte, error) {
- //sdb.Lock()
- //defer sdb.Unlock()
- //var proof proofList
- //trie := sdb.StorageTrie(a)
- //if trie == nil {
- // return proof, errors.New("storage trie for requested address does not exist")
- //}
- //err := trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof)
- //return [][]byte(proof), err
- return nil, nil
-}
-
// GetCommittedState retrieves a value from the given account's committed storage trie.
// DESCRIBED: docs/programmers_guide/guide.md#address---identifier-of-an-account
func (sdb *IntraBlockState) GetCommittedState(addr common.Address, key *common.Hash, value *uint256.Int) {
diff --git a/core/types/receipt_test.go b/core/types/receipt_test.go
index 864d6d5b9c2..c7e8e00ee4a 100644
--- a/core/types/receipt_test.go
+++ b/core/types/receipt_test.go
@@ -92,10 +92,6 @@ func TestLegacyReceiptDecoding(t *testing.T) {
if dec.CumulativeGasUsed != receipt.CumulativeGasUsed {
t.Fatalf("Receipt CumulativeGasUsed mismatch, want %v, have %v", receipt.CumulativeGasUsed, dec.CumulativeGasUsed)
}
- // rlp.Decode doesn't restore .Bloom field anymore because TG switched to bitmap indices, see dbutils.LogIndex
- //if dec.Bloom != receipt.Bloom {
- // t.Fatalf("Bloom data mismatch, want %x, have %x", receipt.Bloom, dec.Bloom)
- //}
if len(dec.Logs) != len(receipt.Logs) {
t.Fatalf("Receipt log number mismatch, want %v, have %v", len(receipt.Logs), len(dec.Logs))
}
diff --git a/core/vm/evm.go b/core/vm/evm.go
index d484b53e921..549da5180c1 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -172,22 +172,6 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, state IntraBlockState, chain
interpreters: make([]Interpreter, 0, 1),
}
- if chainConfig.IsEWASM(blockCtx.BlockNumber) {
- // to be implemented by EVM-C and Wagon PRs.
- // if vmConfig.EWASMInterpreter != "" {
- // extIntOpts := strings.Split(vmConfig.EWASMInterpreter, ":")
- // path := extIntOpts[0]
- // options := []string{}
- // if len(extIntOpts) > 1 {
- // options = extIntOpts[1..]
- // }
- // evm.interpreters = append(evm.interpreters, NewEVMVCInterpreter(evm, vmConfig, options))
- // } else {
- // evm.interpreters = append(evm.interpreters, NewEWASMInterpreter(evm, vmConfig))
- // }
- panic("No supported ewasm interpreter yet.")
- }
-
evm.interpreters = append(evm.interpreters, NewEVMInterpreter(evm, vmConfig))
evm.interpreter = evm.interpreters[0]
diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go
index aaac77ad490..c627c7d911e 100644
--- a/core/vm/interpreter.go
+++ b/core/vm/interpreter.go
@@ -37,9 +37,6 @@ type Config struct {
NoReceipts bool // Do not calculate receipts
ReadOnly bool // Do no perform any block finalisation
- EWASMInterpreter string // External EWASM interpreter options
- EVMInterpreter string // External EVM interpreter options
-
ExtraEips []int // Additional EIPS that are to be enabled
}
diff --git a/eth/backend.go b/eth/backend.go
index c5742c58e25..bd60672bf78 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -108,8 +108,7 @@ type Ethereum struct {
networkID uint64
netRPCService *ethapi.PublicNetAPI
- p2pServer *p2p.Server
- txPoolStarted bool
+ p2pServer *p2p.Server
torrentClient *bittorrent.Client
@@ -123,9 +122,6 @@ type Ethereum struct {
// initialisation of the common Ethereum object)
func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
// Ensure configuration values are compatible and sane
- if config.SyncMode == downloader.LightSync {
- return nil, errors.New("can't run eth.Ethereum in light sync mode, use les.LightEthereum")
- }
if !config.SyncMode.IsValid() {
return nil, fmt.Errorf("invalid sync mode %d", config.SyncMode)
}
@@ -434,9 +430,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
}
checkpoint := config.Checkpoint
- if checkpoint == nil {
- //checkpoint = params.TrustedCheckpoints[genesisHash]
- }
if eth.handler, err = newHandler(&handlerConfig{
Database: chainDb,
Chain: eth.blockchain,
@@ -506,8 +499,6 @@ func BlockchainRuntimeConfig(config *ethconfig.Config) (vm.Config, *core.CacheCo
var (
vmConfig = vm.Config{
EnablePreimageRecording: config.EnablePreimageRecording,
- EWASMInterpreter: config.EWASMInterpreter,
- EVMInterpreter: config.EVMInterpreter,
NoReceipts: !config.StorageMode.Receipts,
}
cacheConfig = &core.CacheConfig{
@@ -583,7 +574,7 @@ func (s *Ethereum) APIs() []rpc.API {
{
Namespace: "eth",
Version: "1.0",
- Service: filters.NewPublicFilterAPI(s.APIBackend, false, 5*time.Minute),
+ Service: filters.NewPublicFilterAPI(s.APIBackend, 5*time.Minute),
Public: true,
},
//{
diff --git a/eth/bloombits.go b/eth/bloombits.go
index 4b04d72bdb2..e4800103a42 100644
--- a/eth/bloombits.go
+++ b/eth/bloombits.go
@@ -21,10 +21,6 @@ import (
)
const (
- // bloomServiceThreads is the number of goroutines used globally by an Ethereum
- // instance to service bloombits lookups for all running filters.
- bloomServiceThreads = 16
-
// bloomFilterThreads is the number of goroutines used locally per filter to
// multiplex requests onto the global servicing goroutines.
bloomFilterThreads = 3
@@ -37,8 +33,3 @@ const (
// to accumulate request an entire batch (avoiding hysteresis).
bloomRetrievalWait = time.Duration(0)
)
-
-// startBloomHandlers starts a batch of goroutines to accept bloom bit database
-// retrievals from possibly a range of filters and serving the data to satisfy.
-func (eth *Ethereum) startBloomHandlers(sectionSize uint64) {
-}
diff --git a/eth/debug.go b/eth/debug.go
deleted file mode 100644
index 99a73cb72a1..00000000000
--- a/eth/debug.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package eth
-
-import (
- "github.com/ledgerwatch/turbo-geth/p2p"
-)
-
-const (
- dbg1 = 1
-)
-
-const DebugName = "dbg" // Parity only supports 3 letter capabilities
-var DebugVersions = []uint{dbg1}
-var DebugLengths = map[uint]uint64{dbg1: 2}
-
-const DebugMaxMsgSize = 10 * 1024 * 1024
-
-// Debug customization for simulator, move it to sub-protocol
-const (
- DebugSetGenesisMsg = 0x00
-)
-
-type debugPeer struct {
- *p2p.Peer
- rw p2p.MsgReadWriter
-}
-
-// SendByteCode sends a BytecodeCode message.
-func (p *debugPeer) SendByteCode(id uint64, data [][]byte) error {
- msg := bytecodeMsg{ID: id, Code: data}
- return p2p.Send(p.rw, BytecodeCode, msg)
-}
diff --git a/eth/discovery.go b/eth/discovery.go
index be4a331793d..7a65dc33d50 100644
--- a/eth/discovery.go
+++ b/eth/discovery.go
@@ -17,52 +17,10 @@
package eth
import (
- "github.com/ledgerwatch/turbo-geth/core"
- "github.com/ledgerwatch/turbo-geth/core/forkid"
"github.com/ledgerwatch/turbo-geth/p2p/dnsdisc"
"github.com/ledgerwatch/turbo-geth/p2p/enode"
- "github.com/ledgerwatch/turbo-geth/rlp"
)
-// ethEntry is the "eth" ENR entry which advertises eth protocol
-// on the discovery network.
-type ethEntry struct {
- ForkID forkid.ID // Fork identifier per EIP-2124
-
- // Ignore additional fields (for forward compatibility).
- Rest []rlp.RawValue `rlp:"tail"`
-}
-
-// ENRKey implements enr.Entry.
-func (e ethEntry) ENRKey() string {
- return "eth"
-}
-
-// startEthEntryUpdate starts the ENR updater loop.
-func (eth *Ethereum) startEthEntryUpdate(ln *enode.LocalNode) {
- var newHead = make(chan core.ChainHeadEvent, 10)
- sub := eth.blockchain.SubscribeChainHeadEvent(newHead)
-
- go func() {
- defer sub.Unsubscribe()
- for {
- select {
- case <-newHead:
- ln.Set(eth.currentEthEntry())
- case <-sub.Err():
- // Would be nice to sync with eth.Stop, but there is no
- // good way to do that.
- return
- }
- }
- }()
-}
-
-func (eth *Ethereum) currentEthEntry() *ethEntry {
- return ðEntry{ForkID: forkid.NewID(eth.blockchain.Config(), eth.blockchain.Genesis().Hash(),
- eth.blockchain.CurrentHeader().Number.Uint64())}
-}
-
// setupDiscovery creates the node discovery source for the `eth` and `snap`
// protocols.
func setupDiscovery(urls []string) (enode.Iterator, error) {
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 2ffa89f5e4f..85d4ba5b690 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -63,11 +63,10 @@ var (
qosConfidenceCap = 10 // Number of peers above which not to modify RTT confidence
qosTuningImpact = 0.25 // Impact that a new tuning target has on the previous value
- maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection)
- maxHeadersProcess = 16536 // Number of header download results to import at once into the chain
- maxResultsProcess = 16536 // Number of content download results to import at once into the chain
- fullMaxForkAncestry uint64 = params.FullImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it)
- lightMaxForkAncestry uint64 = params.LightImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it)
+ maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection)
+ maxHeadersProcess = 16536 // Number of header download results to import at once into the chain
+ maxResultsProcess = 16536 // Number of content download results to import at once into the chain
+ fullMaxForkAncestry uint64 = params.FullImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it)
reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection
reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs
@@ -93,7 +92,6 @@ var (
errInvalidChain = errors.New("retrieved hash chain is invalid")
errInvalidBody = errors.New("retrieved block body is invalid")
errInvalidReceipt = errors.New("retrieved receipt is invalid")
- errCancelStateFetch = errors.New("state data download canceled (requested)")
errCancelContentProcessing = errors.New("content processing canceled (requested)")
errCanceled = errors.New("syncing canceled (requested)")
errNoSyncActive = errors.New("no sync active")
@@ -112,10 +110,8 @@ type Downloader struct {
mode uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode
mux *event.TypeMux // Event multiplexer to announce sync operation events
- checkpoint uint64 // Checkpoint block number to enforce head against (e.g. fast sync)
- genesis uint64 // Genesis block number to limit sync to (e.g. light client CHT)
- queue *queue // Scheduler for selecting the hashes to download
- peers *peerSet // Set of active peers from which download can proceed
+ queue *queue // Scheduler for selecting the hashes to download
+ peers *peerSet // Set of active peers from which download can proceed
stateDB ethdb.Database // Database to state sync into (and deduplicate via)
//stateBloom *trie.SyncBloom // Bloom filter for fast trie node existence checks
@@ -127,7 +123,6 @@ type Downloader struct {
chainConfig *params.ChainConfig
miningConfig *params.MiningConfig
- lightchain LightChain
blockchain BlockChain
// Callbacks
@@ -184,8 +179,8 @@ type Downloader struct {
mining *stagedsync.StagedSync
}
-// LightChain encapsulates functions required to synchronise a light chain.
-type LightChain interface {
+// BlockChain encapsulates functions required to sync a (full or fast) blockchain.
+type BlockChain interface {
// HasHeader verifies a header's presence in the local chain.
HasHeader(common.Hash, uint64) bool
@@ -203,18 +198,10 @@ type LightChain interface {
// SetHead rewinds the local chain to a new head.
SetHead(uint64) error
-}
-
-// BlockChain encapsulates functions required to sync a (full or fast) blockchain.
-type BlockChain interface {
- LightChain
// HasBlock verifies a block's presence in the local chain.
HasBlock(common.Hash, uint64) bool
- // HasFastBlock verifies a fast block's presence in the local chain.
- HasFastBlock(common.Hash, uint64) bool
-
// GetBlockByHash retrieves a block from the local chain.
GetBlockByHash(common.Hash) *types.Block
@@ -252,10 +239,7 @@ type BlockChain interface {
}
// New creates a new downloader to fetch hashes and blocks from remote peers.
-func New(checkpoint uint64, stateDB ethdb.Database, mux *event.TypeMux, chainConfig *params.ChainConfig, miningConfig *params.MiningConfig, chain BlockChain, lightchain LightChain, dropPeer peerDropFn, sm ethdb.StorageMode) *Downloader {
- if lightchain == nil {
- lightchain = chain
- }
+func New(checkpoint uint64, stateDB ethdb.Database, mux *event.TypeMux, chainConfig *params.ChainConfig, miningConfig *params.MiningConfig, chain BlockChain, dropPeer peerDropFn, sm ethdb.StorageMode) *Downloader {
dl := &Downloader{
mode: uint32(StagedSync),
stateDB: stateDB,
@@ -267,7 +251,6 @@ func New(checkpoint uint64, stateDB ethdb.Database, mux *event.TypeMux, chainCon
chainConfig: chainConfig,
miningConfig: miningConfig,
blockchain: chain,
- lightchain: lightchain,
dropPeer: dropPeer,
headerCh: make(chan dataPack, 1),
bodyCh: make(chan dataPack, 1),
@@ -323,12 +306,8 @@ func (d *Downloader) Progress() ethereum.SyncProgress {
switch {
case d.blockchain != nil && mode == FullSync:
current = d.blockchain.CurrentBlock().NumberU64()
- case d.blockchain != nil && mode == FastSync:
- current = d.blockchain.CurrentFastBlock().NumberU64()
- case d.lightchain != nil:
- current = d.lightchain.CurrentHeader().Number.Uint64()
default:
- log.Error("Unknown downloader chain/mode combo", "light", d.lightchain != nil, "full", d.blockchain != nil, "mode", mode)
+ log.Error("Unknown downloader chain/mode combo", "full", d.blockchain != nil, "mode", mode)
}
return ethereum.SyncProgress{
StartingBlock: d.syncStatsChainOrigin,
@@ -362,11 +341,6 @@ func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error {
return nil
}
-// RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer.
-func (d *Downloader) RegisterLightPeer(id string, version uint, peer LightPeer) error {
- return d.RegisterPeer(id, version, &lightPeerWrapper{peer})
-}
-
// UnregisterPeer remove a peer from the known list, preventing any action from
// the specified peer. An effort is also made to return any pending fetches into
// the queue.
@@ -434,14 +408,7 @@ func (d *Downloader) synchronise(id string, hash common.Hash, blockNumber uint64
if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
log.Info("Block synchronisation started")
}
- // If we are already full syncing, but have a fast-sync bloom filter laying
- // around, make sure it doesn't use memory any more. This is a special case
- // when the user attempts to fast sync a new empty network.
- //if mode == FullSync && d.stateBloom != nil {
- // d.stateBloom.Close()
- // If snap sync was requested, create the snap scheduler and switch to fast
- //}
- // but until snap becomes prevalent, we should support both. TODO(karalabe).
+
// Reset the queue, peer set and wake channels to clean any internal leftover state
d.queue.Reset(blockCacheMaxItems, blockCacheInitialItems)
d.peers.Reset()
@@ -500,7 +467,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, blockNumb
if err != nil {
d.mux.Post(FailedEvent{err})
} else {
- latest := d.lightchain.CurrentHeader()
+ latest := d.blockchain.CurrentHeader()
d.mux.Post(DoneEvent{latest})
}
}()
@@ -709,10 +676,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, blockNumb
fetchers = append(fetchers, func() error { return d.fetchBodies(origin + 1) }) // Bodies are retrieved during normal and fast sync
fetchers = append(fetchers, func() error { return d.fetchReceipts(origin + 1) }) // Receipts are retrieved during fast sync
- if mode == FastSync {
- // fast sync is not supported by turbo-geth
- panic("fast sync should never be called")
- } else if mode == FullSync {
+ if mode == FullSync {
fetchers = append(fetchers, d.processFullSyncContent)
}
return d.spawnSync(fetchers)
@@ -777,10 +741,6 @@ func (d *Downloader) Terminate() {
d.quitLock.Lock()
common.SafeClose(d.quitCh)
- //if d.stateBloom != nil {
- // d.stateBloom.Close()
- //}
-
d.quitLock.Unlock()
// Cancel any pending download requests
@@ -791,7 +751,6 @@ func (d *Downloader) Terminate() {
// the total time a pending synchronisation would take.
func (d *Downloader) fetchHeight(p *peerConnection) (uint64, error) {
p.log.Debug("Retrieving remote chain height")
- mode := d.getMode()
fetch := 1
if d.getMode() == StagedSync {
@@ -825,9 +784,6 @@ func (d *Downloader) fetchHeight(p *peerConnection) (uint64, error) {
// and request. If only 1 header was returned, make sure there's no pivot
// or there was not one requested.
head := headers[0]
- if (mode == FastSync || mode == LightSync) && head.Number.Uint64() < d.checkpoint {
- return 0, fmt.Errorf("%w: remote head %d below checkpoint %d", errUnsyncedPeer, head.Number, d.checkpoint)
- }
return head.Number.Uint64(), nil
case <-timeout:
@@ -908,45 +864,21 @@ func (d *Downloader) findAncestor(p *peerConnection, remoteHeight uint64) (uint6
switch mode {
case FullSync:
localHeight = d.blockchain.CurrentBlock().NumberU64()
- case FastSync:
- localHeight = d.blockchain.CurrentFastBlock().NumberU64()
case StagedSync:
headHash := rawdb.ReadHeadHeaderHash(d.stateDB)
headNumber := rawdb.ReadHeaderNumber(d.stateDB, headHash)
localHeight = *headNumber
default:
- localHeight = d.lightchain.CurrentHeader().Number.Uint64()
+ localHeight = d.blockchain.CurrentHeader().Number.Uint64()
}
p.log.Debug("Looking for common ancestor", "local", localHeight, "remote", remoteHeight)
maxForkAncestry := fullMaxForkAncestry
- if d.getMode() == LightSync {
- maxForkAncestry = lightMaxForkAncestry
- }
// Recap floor value for binary search
if localHeight >= maxForkAncestry && mode != StagedSync {
// We're above the max reorg threshold, find the earliest fork point
floor = int64(localHeight - maxForkAncestry)
}
- // If we're doing a light sync, ensure the floor doesn't go below the CHT, as
- // all headers before that point will be missing.
- if mode == LightSync {
- // If we don't know the current CHT position, find it
- if d.genesis == 0 {
- header := d.lightchain.CurrentHeader()
- for header != nil {
- d.genesis = header.Number.Uint64()
- if floor >= int64(d.genesis)-1 {
- break
- }
- header = d.lightchain.GetHeaderByHash(header.ParentHash)
- }
- }
- // We already know the "genesis" block number, cap floor to that
- if floor < int64(d.genesis)-1 {
- floor = int64(d.genesis) - 1
- }
- }
ancestor, err := d.findAncestorSpanSearch(p, mode, remoteHeight, localHeight, floor)
if err == nil {
@@ -1019,12 +951,8 @@ func (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, re
switch mode {
case FullSync:
known = d.blockchain.HasBlock(h, n)
- case FastSync:
- known = d.blockchain.HasFastBlock(h, n)
- case StagedSync:
- known = d.blockchain.HasHeader(h, n)
default:
- known = d.lightchain.HasHeader(h, n)
+ known = d.blockchain.HasHeader(h, n)
}
if known {
number, hash = n, h
@@ -1100,12 +1028,8 @@ func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode,
switch mode {
case FullSync:
known = d.blockchain.HasBlock(h, n)
- case FastSync:
- known = d.blockchain.HasFastBlock(h, n)
- case StagedSync:
- known = d.blockchain.HasHeader(h, n)
default:
- known = d.lightchain.HasHeader(h, n)
+ known = d.blockchain.HasHeader(h, n)
}
if !known {
end = check
@@ -1116,7 +1040,7 @@ func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode,
if mode == StagedSync {
header = rawdb.ReadHeader(d.stateDB, h, n)
} else {
- header = d.lightchain.GetHeaderByHash(h)
+ header = d.blockchain.GetHeaderByHash(h)
}
if header.Number.Uint64() != check {
p.log.Warn("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check)
@@ -1299,9 +1223,7 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64) error {
if n := len(headers); n > 0 {
// Retrieve the current head we're at
var head uint64
- if mode == LightSync {
- head = d.lightchain.CurrentHeader().Number.Uint64()
- } else if mode == StagedSync {
+ if mode == StagedSync {
headHash := rawdb.ReadHeadHeaderHash(d.stateDB)
headNumber := rawdb.ReadHeaderNumber(d.stateDB, headHash)
head = *headNumber
@@ -1685,22 +1607,22 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, blockNumber uin
if mode != StagedSync {
defer func() {
if rollback > 0 {
- lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0
- if mode != LightSync && mode != StagedSync {
+ lastHeader, lastFastBlock, lastBlock := d.blockchain.CurrentHeader().Number, common.Big0, common.Big0
+ if mode != StagedSync {
lastFastBlock = d.blockchain.CurrentFastBlock().Number()
lastBlock = d.blockchain.CurrentBlock().Number()
}
- if err := d.lightchain.SetHead(rollback - 1); err != nil { // -1 to target the parent of the first uncertain block
+ if err := d.blockchain.SetHead(rollback - 1); err != nil { // -1 to target the parent of the first uncertain block
// We're already unwinding the stack, only print the error to make it more visible
log.Error("Failed to roll back chain segment", "head", rollback-1, "err", err)
}
curFastBlock, curBlock := common.Big0, common.Big0
- if mode != LightSync && mode != StagedSync {
+ if mode != StagedSync {
curFastBlock = d.blockchain.CurrentFastBlock().Number()
curBlock = d.blockchain.CurrentBlock().Number()
}
log.Warn("Rolled back chain segment",
- "header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number),
+ "header", fmt.Sprintf("%d->%d", lastHeader, d.blockchain.CurrentHeader().Number),
"fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock),
"block", fmt.Sprintf("%d->%d", lastBlock, curBlock), "reason", rollbackErr)
}
@@ -1739,29 +1661,14 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, blockNumber uin
// L: Sync begins, and finds common ancestor at 11
// L: Request new headers up from 11 (R's TD was higher, it must have something)
// R: Nothing to give
- if mode != LightSync && mode != StagedSync {
+ if mode != StagedSync {
head := d.blockchain.CurrentBlock()
if !gotHeaders && blockNumber > head.NumberU64() {
return errStallingPeer
}
- }
- if mode != StagedSync {
// Disable any rollback and return
rollback = 0
}
- // If fast or light syncing, ensure promised headers are indeed delivered. This is
- // needed to detect scenarios where an attacker feeds a bad pivot and then bails out
- // of delivering the post-pivot blocks that would flag the invalid content.
- //
- // This check cannot be executed "as is" for full imports, since blocks may still be
- // queued for processing when the header download completes. However, as long as the
- // peer gave us something useful, we're already happy/progressed (above check).
- if mode == FastSync || mode == LightSync {
- head := d.lightchain.CurrentHeader()
- if blockNumber > head.Number.Uint64() {
- return errStallingPeer
- }
- }
return nil
}
// Otherwise split the chunk of headers into batches and process them
@@ -1779,7 +1686,7 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, blockNumber uin
chunk := headers[:limit]
// In case of header only syncing, validate the chunk immediately
- if mode == FastSync || mode == LightSync || mode == StagedSync {
+ if mode == StagedSync {
// If we're importing pure headers, verify based on their recentness
var pivot uint64
@@ -1812,7 +1719,7 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, blockNumber uin
}
}
} else {
- n, err = d.lightchain.InsertHeaderChain(chunk, frequency)
+ n, err = d.blockchain.InsertHeaderChain(chunk, frequency)
}
if err == nil && mode == StagedSync && newCanonical && d.headersState != nil {
if err1 := d.headersState.Update(d.stateDB, chunk[len(chunk)-1].Number.Uint64()); err1 != nil {
@@ -1821,7 +1728,7 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, blockNumber uin
}
if mode != StagedSync || err != nil {
// If some headers were inserted, add them too to the rollback list
- if (mode == FastSync || frequency > 1) && n > 0 && rollback == 0 {
+ if (frequency > 1) && n > 0 && rollback == 0 {
rollback = chunk[0].Number.Uint64()
}
log.Warn("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "parent", chunk[n].ParentHash, "err", err)
@@ -1838,8 +1745,8 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, blockNumber uin
}
}
}
- // Unless we're doing light chains, schedule the headers for associated content retrieval
- if mode == FullSync || mode == FastSync {
+ // Schedule the headers for associated content retrieval
+ if mode == FullSync {
// If we've reached the allowed number of pending headers, stall a bit
for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders {
select {
@@ -1955,30 +1862,6 @@ func (d *Downloader) importBlockResults(logPrefix string, results []*fetchResult
return 0, nil
}
-func (d *Downloader) commitPivotBlock(result *fetchResult) error {
- block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
- log.Debug("Committing fast sync pivot as new head", "number", block.Number(), "hash", block.Hash())
-
- // Commit the pivot block as the new head, will require full sync from here on
- if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}, atomic.LoadUint64(&d.ancientLimit)); err != nil {
- return err
- }
- if err := d.blockchain.FastSyncCommitHead(block.Hash()); err != nil {
- return err
- }
- atomic.StoreInt32(&d.committed, 1)
-
- // If we had a bloom filter for the state sync, deallocate it now. Note, we only
- // deallocate internally, but keep the empty wrapper. This ensures that if we do
- // a rollback after committing the pivot and restarting fast sync, we don't end
- // up using a nil bloom. Empty bloom is fine, it just returns that it does not
- // have the info we need, so reach down to the database instead.
- //if d.stateBloom != nil {
- // d.stateBloom.Close()
- //}
- return nil
-}
-
// DeliverHeaders injects a new batch of block headers received from a remote
// node into the download schedule.
func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) error {
diff --git a/eth/downloader/downloader_stagedsync_test.go b/eth/downloader/downloader_stagedsync_test.go
index ac0744ff55b..2f0669dfd02 100644
--- a/eth/downloader/downloader_stagedsync_test.go
+++ b/eth/downloader/downloader_stagedsync_test.go
@@ -45,7 +45,7 @@ func newStagedSyncTester() (*stagedSyncTester, func()) {
if err := rawdb.WriteBlock(context.Background(), tester.db, testGenesis); err != nil {
panic(err)
}
- tester.downloader = New(uint64(StagedSync), tester.db, new(event.TypeMux), params.TestChainConfig, nil, tester, nil, tester.dropPeer, ethdb.DefaultStorageMode)
+ tester.downloader = New(uint64(StagedSync), tester.db, new(event.TypeMux), params.TestChainConfig, nil, tester, tester.dropPeer, ethdb.DefaultStorageMode)
//tester.downloader.SetBatchSize(32*1024 /* cacheSize */, 16*1024 /* batchSize */)
tester.downloader.SetBatchSize(0 /* cacheSize */, 16*1024 /* batchSize */)
tester.downloader.SetStagedSync(
@@ -149,11 +149,6 @@ func (st *stagedSyncTester) HasBlock(hash common.Hash, number uint64) bool {
panic("")
}
-// HasFastBlock is part of the implementation of BlockChain interface defined in downloader.go
-func (st *stagedSyncTester) HasFastBlock(hash common.Hash, number uint64) bool {
- panic("")
-}
-
// HasHeader is part of the implementation of BlockChain interface defined in downloader.go
func (st *stagedSyncTester) HasHeader(hash common.Hash, number uint64) bool {
return rawdb.HasHeader(st.db, hash, number)
@@ -213,10 +208,6 @@ func (st *stagedSyncTester) sync(id string, td *big.Int) error {
st.lock.RLock()
hash := st.peers[id].chain.headBlock().Hash()
number := st.peers[id].chain.headBlock().NumberU64()
- // If no particular TD was requested, load from the peer's blockchain
- if td == nil {
- td = st.peers[id].chain.td(hash)
- }
st.lock.RUnlock()
// Synchronise with the chosen peer and ensure proper cleanup afterwards
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 38c5c173b45..d1648300d22 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -17,27 +17,11 @@
package downloader
import (
- "context"
- "errors"
"fmt"
- "math/big"
"strings"
"sync"
- "sync/atomic"
"testing"
"time"
-
- ethereum "github.com/ledgerwatch/turbo-geth"
- "github.com/ledgerwatch/turbo-geth/common"
- "github.com/ledgerwatch/turbo-geth/common/dbutils"
- "github.com/ledgerwatch/turbo-geth/consensus"
- "github.com/ledgerwatch/turbo-geth/consensus/ethash"
- "github.com/ledgerwatch/turbo-geth/core/types"
- "github.com/ledgerwatch/turbo-geth/core/vm"
- "github.com/ledgerwatch/turbo-geth/ethdb"
- "github.com/ledgerwatch/turbo-geth/event"
- "github.com/ledgerwatch/turbo-geth/params"
- "github.com/stretchr/testify/assert"
)
const OverwriteBlockCacheItems = 1024
@@ -52,1501 +36,6 @@ func init() {
fsHeaderContCheck = 500 * time.Millisecond
}
-// downloadTester is a test simulator for mocking out local block chain.
-type downloadTester struct {
- downloader *Downloader
-
- genesis *types.Block // Genesis blocks used by the tester and peers
- stateDb *ethdb.ObjectDatabase // Database used by the tester for syncing from peers
- peerDb *ethdb.ObjectDatabase // Database of the peers containing all data
- peers map[string]*downloadTesterPeer
- engine consensus.Engine
-
- ownHashes []common.Hash // Hash chain belonging to the tester
- ownHeaders map[common.Hash]*types.Header // Headers belonging to the tester
- ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester
- ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
- ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain
-
- ancientHeaders map[common.Hash]*types.Header // Ancient headers belonging to the tester
- ancientBlocks map[common.Hash]*types.Block // Ancient blocks belonging to the tester
- ancientReceipts map[common.Hash]types.Receipts // Ancient receipts belonging to the tester
- ancientChainTd map[common.Hash]*big.Int // Ancient total difficulties of the blocks in the local chain
-
- lock sync.RWMutex
-}
-
-// newTester creates a new downloader test mocker.
-func newTester() *downloadTester {
- tester := &downloadTester{
- genesis: testGenesis,
- peerDb: ethdb.NewMemDatabase(),
- peers: make(map[string]*downloadTesterPeer),
- ownHashes: []common.Hash{testGenesis.Hash()},
- ownHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()},
- ownBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},
- ownReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},
- ownChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},
-
- // Initialize ancient store with test genesis block
- ancientHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()},
- ancientBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},
- ancientReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},
- ancientChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},
- engine: ethash.NewFaker(),
- }
- tester.stateDb = ethdb.NewMemDatabase()
- err := tester.stateDb.Put(dbutils.BlockBodyPrefix, dbutils.BlockBodyKey(testGenesis.NumberU64(), testGenesis.Root()), []byte{0x00})
- if err != nil {
- panic(err)
- }
- tester.downloader = New(uint64(FullSync), tester.stateDb, new(event.TypeMux), params.TestChainConfig, nil, tester, nil, tester.dropPeer, ethdb.DefaultStorageMode)
- return tester
-}
-
-// terminate aborts any operations on the embedded downloader and releases all
-// held resources.
-func (dl *downloadTester) terminate() {
- dl.downloader.Terminate()
- dl.stateDb.Close()
-}
-
-// sync starts synchronizing with a remote peer, blocking until it completes.
-func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
- dl.lock.RLock()
- hash := dl.peers[id].chain.headBlock().Hash()
- number := dl.peers[id].chain.headBlock().NumberU64()
- // If no particular TD was requested, load from the peer's blockchain
- if td == nil {
- td = dl.peers[id].chain.td(hash)
- }
- dl.lock.RUnlock()
-
- // Synchronise with the chosen peer and ensure proper cleanup afterwards
- err := dl.downloader.synchronise(id, hash, number, mode, nil, func() error { return nil })
- select {
- case <-dl.downloader.cancelCh:
- // Ok, downloader fully cancelled after sync cycle
- default:
- // Downloader is still accepting packets, can block a peer up
- panic("downloader active post sync cycle") // panic will be caught by tester
- }
- return err
-}
-
-// HasHeader checks if a header is present in the testers canonical chain.
-func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
- return dl.GetHeaderByHash(hash) != nil
-}
-
-// HasBlock checks if a block is present in the testers canonical chain.
-func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
- return dl.GetBlockByHash(hash) != nil
-}
-
-// HasFastBlock checks if a block is present in the testers canonical chain.
-func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool {
- dl.lock.RLock()
- defer dl.lock.RUnlock()
-
- if _, ok := dl.ancientReceipts[hash]; ok {
- return true
- }
- _, ok := dl.ownReceipts[hash]
- return ok
-}
-
-// GetHeader retrieves a header from the testers canonical chain.
-func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
- dl.lock.RLock()
- defer dl.lock.RUnlock()
- return dl.getHeaderByHash(hash)
-}
-
-// getHeaderByHash returns the header if found either within ancients or own blocks)
-// This method assumes that the caller holds at least the read-lock (dl.lock)
-func (dl *downloadTester) getHeaderByHash(hash common.Hash) *types.Header {
- header := dl.ancientHeaders[hash]
- if header != nil {
- return header
- }
- return dl.ownHeaders[hash]
-}
-
-// GetBlock retrieves a block from the testers canonical chain.
-func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
- dl.lock.RLock()
- defer dl.lock.RUnlock()
-
- block := dl.ancientBlocks[hash]
- if block != nil {
- return block
- }
- return dl.ownBlocks[hash]
-}
-
-// CurrentHeader retrieves the current head header from the canonical chain.
-func (dl *downloadTester) CurrentHeader() *types.Header {
- dl.lock.RLock()
- defer dl.lock.RUnlock()
-
- for i := len(dl.ownHashes) - 1; i >= 0; i-- {
- if header := dl.ancientHeaders[dl.ownHashes[i]]; header != nil {
- return header
- }
- if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
- return header
- }
- }
- return dl.genesis.Header()
-}
-
-// CurrentBlock retrieves the current head block from the canonical chain.
-func (dl *downloadTester) CurrentBlock() *types.Block {
- dl.lock.RLock()
- defer dl.lock.RUnlock()
-
- for i := len(dl.ownHashes) - 1; i >= 0; i-- {
- if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil {
- // FIXME: Support ancients
- if _, err := dl.stateDb.Get(dbutils.BlockBodyPrefix, dbutils.BlockBodyKey(block.NumberU64(), block.Root())); err == nil {
- return block
- }
- return block
- }
- if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
- return block
- }
- }
- return dl.genesis
-}
-
-// CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
-func (dl *downloadTester) CurrentFastBlock() *types.Block {
- dl.lock.RLock()
- defer dl.lock.RUnlock()
-
- for i := len(dl.ownHashes) - 1; i >= 0; i-- {
- if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil {
- return block
- }
- if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
- return block
- }
- }
- return dl.genesis
-}
-
-// FastSyncCommitHead manually sets the head block to a given hash.
-func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
- // For now only check that the state trie is correct
- if block := dl.GetBlockByHash(hash); block != nil {
- return nil
- }
- return fmt.Errorf("non existent block: %x", hash[:4])
-}
-
-// GetTd retrieves the block's total difficulty from the canonical chain.
-func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
- dl.lock.RLock()
- defer dl.lock.RUnlock()
-
- return dl.getTd(hash)
-}
-
-// getTd retrieves the block's total difficulty if found either within
-// ancients or own blocks).
-// This method assumes that the caller holds at least the read-lock (dl.lock)
-func (dl *downloadTester) getTd(hash common.Hash) *big.Int {
- if td := dl.ancientChainTd[hash]; td != nil {
- return td
- }
- return dl.ownChainTd[hash]
-}
-
-// InsertHeaderChain injects a new batch of headers into the simulated chain.
-func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (i int, err error) {
- dl.lock.Lock()
- defer dl.lock.Unlock()
-
- // Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
- if dl.getHeaderByHash(headers[0].ParentHash) == nil {
- return 0, fmt.Errorf("error in InsertHeaderChain: unknown parent at first position, parent of number %d", headers[0].Number)
- }
- var hashes []common.Hash
- for i := 1; i < len(headers); i++ {
- hash := headers[i-1].Hash()
- if headers[i].ParentHash != headers[i-1].Hash() {
- return i, fmt.Errorf("non-contiguous import at position %d", i)
- }
- hashes = append(hashes, hash)
- }
- hashes = append(hashes, headers[len(headers)-1].Hash())
- // Do a full insert if pre-checks passed
- for i, header := range headers {
- hash := hashes[i]
- if dl.getHeaderByHash(hash) != nil {
- continue
- }
- if dl.getHeaderByHash(header.ParentHash) == nil {
- // This _should_ be impossible, due to precheck and induction
- return i, fmt.Errorf("error in InsertHeaderChain: unknown parent at position %d", i)
- }
- dl.ownHashes = append(dl.ownHashes, hash)
- dl.ownHeaders[hash] = header
-
- td := dl.getTd(header.ParentHash)
- dl.ownChainTd[hash] = new(big.Int).Add(td, header.Difficulty)
- }
- return len(headers), nil
-}
-
-func (dl *downloadTester) InsertBodyChain(_ string, _ context.Context, _ ethdb.Database, blocks types.Blocks) (bool, error) {
- return false, nil
-}
-
-func (dl *downloadTester) GetVMConfig() *vm.Config {
- return nil
-}
-
-// InsertChain injects a new batch of blocks into the simulated chain.
-func (dl *downloadTester) InsertChain(_ context.Context, blocks types.Blocks) (i int, err error) {
- dl.lock.Lock()
- defer dl.lock.Unlock()
- for i, block := range blocks {
- if _, ok := dl.ownBlocks[block.ParentHash()]; !ok {
- return i, fmt.Errorf("error in InsertChain: unknown parent at position %d / %d", i, len(blocks))
- }
- if hdr := dl.getHeaderByHash(block.Hash()); hdr == nil {
- dl.ownHashes = append(dl.ownHashes, block.Hash())
- dl.ownHeaders[block.Hash()] = block.Header()
- }
- dl.ownBlocks[block.Hash()] = block
- dl.ownReceipts[block.Hash()] = make(types.Receipts, 0)
- err := dl.stateDb.Put(dbutils.BlockBodyPrefix, dbutils.BlockBodyKey(block.NumberU64(), block.Root()), []byte{0x00})
- if err != nil {
- panic(err)
- }
- td := dl.getTd(block.ParentHash())
- dl.ownChainTd[block.Hash()] = new(big.Int).Add(td, block.Difficulty())
- }
- return len(blocks), nil
-}
-
-// InsertReceiptChain injects a new batch of receipts into the simulated chain.
-func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts, ancientLimit uint64) (i int, err error) {
- dl.lock.Lock()
- defer dl.lock.Unlock()
-
- for i := 0; i < len(blocks) && i < len(receipts); i++ {
- if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
- return i, errors.New("unknown owner")
- }
- if _, ok := dl.ancientBlocks[blocks[i].ParentHash()]; !ok {
- if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
- return i, errors.New("error in InsertReceiptChain: unknown parent")
- }
- }
- if blocks[i].NumberU64() <= ancientLimit {
- dl.ancientBlocks[blocks[i].Hash()] = blocks[i]
- dl.ancientReceipts[blocks[i].Hash()] = receipts[i]
-
- // Migrate from active db to ancient db
- dl.ancientHeaders[blocks[i].Hash()] = blocks[i].Header()
- dl.ancientChainTd[blocks[i].Hash()] = new(big.Int).Add(dl.ancientChainTd[blocks[i].ParentHash()], blocks[i].Difficulty())
- delete(dl.ownHeaders, blocks[i].Hash())
- delete(dl.ownChainTd, blocks[i].Hash())
- } else {
- dl.ownBlocks[blocks[i].Hash()] = blocks[i]
- dl.ownReceipts[blocks[i].Hash()] = receipts[i]
- }
- }
- return len(blocks), nil
-}
-
-// SetHead rewinds the local chain to a new head.
-func (dl *downloadTester) SetHead(head uint64) error {
- dl.lock.Lock()
- defer dl.lock.Unlock()
-
- // Find the hash of the head to reset to
- var hash common.Hash
- for h, header := range dl.ownHeaders {
- if header.Number.Uint64() == head {
- hash = h
- }
- }
- for h, header := range dl.ancientHeaders {
- if header.Number.Uint64() == head {
- hash = h
- }
- }
- if hash == (common.Hash{}) {
- return fmt.Errorf("unknown head to set: %d", head)
- }
- // Find the offset in the header chain
- var offset int
- for o, h := range dl.ownHashes {
- if h == hash {
- offset = o
- break
- }
- }
- // Remove all the hashes and associated data afterwards
- for i := offset + 1; i < len(dl.ownHashes); i++ {
- delete(dl.ownChainTd, dl.ownHashes[i])
- delete(dl.ownHeaders, dl.ownHashes[i])
- delete(dl.ownReceipts, dl.ownHashes[i])
- delete(dl.ownBlocks, dl.ownHashes[i])
-
- delete(dl.ancientChainTd, dl.ownHashes[i])
- delete(dl.ancientHeaders, dl.ownHashes[i])
- delete(dl.ancientReceipts, dl.ownHashes[i])
- delete(dl.ancientBlocks, dl.ownHashes[i])
- }
- dl.ownHashes = dl.ownHashes[:offset+1]
- return nil
-}
-
-// Rollback removes some recently added elements from the chain.
-func (dl *downloadTester) Rollback(hashes []common.Hash) {
-}
-
-func (dl *downloadTester) NotifyHeightKnownBlock(_ uint64) {}
-
-// newPeer registers a new block download source into the downloader.
-func (dl *downloadTester) newPeer(id string, version uint, chain *testChain) error {
- dl.lock.Lock()
- defer dl.lock.Unlock()
-
- peer := &downloadTesterPeer{dl: dl, id: id, chain: chain}
- dl.peers[id] = peer
- return dl.downloader.RegisterPeer(id, version, peer)
-}
-
-// dropPeer simulates a hard peer removal from the connection pool.
-func (dl *downloadTester) dropPeer(id string) {
- dl.lock.Lock()
- defer dl.lock.Unlock()
-
- delete(dl.peers, id)
- dl.downloader.UnregisterPeer(id)
-}
-
-func (dl *downloadTester) GetBlockByNumber(number uint64) *types.Block {
- panic("not implemented and should not be called")
-}
-
-func (dl *downloadTester) Engine() consensus.Engine {
- return dl.engine
-}
-
-func (dl *downloadTester) GetHeader(common.Hash, uint64) *types.Header {
- panic("not implemented and should not be called")
-}
-
-func (dl *downloadTester) Stop() {
-}
-
-type downloadTesterPeer struct {
- dl *downloadTester
- id string
- chain *testChain
- missingStates map[common.Hash]bool // State entries that fast sync should not return
-}
-
-// Head constructs a function to retrieve a peer's current head hash
-// and total difficulty.
-func (dlp *downloadTesterPeer) Head() (common.Hash, uint64) {
- b := dlp.chain.headBlock()
- return b.Hash(), b.NumberU64()
-}
-
-// RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
-// origin; associated with a particular peer in the download tester. The returned
-// function can be used to retrieve batches of headers from the particular peer.
-func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
- result := dlp.chain.headersByHash(origin, amount, skip, reverse)
- go dlp.dl.downloader.DeliverHeaders(dlp.id, result)
- return nil
-}
-
-// RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
-// origin; associated with a particular peer in the download tester. The returned
-// function can be used to retrieve batches of headers from the particular peer.
-func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
- result := dlp.chain.headersByNumber(origin, amount, skip, reverse)
- go dlp.dl.downloader.DeliverHeaders(dlp.id, result)
- return nil
-}
-
-// RequestBodies constructs a getBlockBodies method associated with a particular
-// peer in the download tester. The returned function can be used to retrieve
-// batches of block bodies from the particularly requested peer.
-func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
- txs, uncles := dlp.chain.bodies(hashes)
- go dlp.dl.downloader.DeliverBodies(dlp.id, txs, uncles)
- return nil
-}
-
-// RequestReceipts constructs a getReceipts method associated with a particular
-// peer in the download tester. The returned function can be used to retrieve
-// batches of block receipts from the particularly requested peer.
-func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
- receipts := dlp.chain.receipts(hashes)
- go dlp.dl.downloader.DeliverReceipts(dlp.id, receipts)
- return nil
-}
-
-// assertOwnChain checks if the local chain contains the correct number of items
-// of the various chain components.
-func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
- // Mark this method as a helper to report errors at callsite, not in here
- t.Helper()
-
- assertOwnForkedChain(t, tester, 1, []int{length})
-}
-
-// assertOwnForkedChain checks if the local forked chain contains the correct
-// number of items of the various chain components.
-func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
- // Mark this method as a helper to report errors at callsite, not in here
- t.Helper()
-
- // Initialize the counters for the first fork
- headers, blocks, receipts := lengths[0], lengths[0], lengths[0]
-
- // Update the counters for each subsequent fork
- for _, length := range lengths[1:] {
- headers += length - common
- blocks += length - common
- receipts += length - common
- }
- if tester.downloader.getMode() == LightSync {
- blocks, receipts = 1, 1
- }
- if hs := len(tester.ownHeaders) + len(tester.ancientHeaders) - 1; hs != headers {
- t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
- }
- if bs := len(tester.ownBlocks) + len(tester.ancientBlocks) - 1; bs != blocks {
- t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
- }
- if rs := len(tester.ownReceipts) + len(tester.ancientReceipts) - 1; rs != receipts {
- t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
- }
-}
-
-// Tests that simple synchronization against a canonical chain works correctly.
-// In this test common ancestor lookup should be short circuited and not require
-// binary searching.
-func TestCanonicalSynchronisation64Full(t *testing.T) { testCanonSync(t, 64, FullSync) }
-func TestCanonicalSynchronisation65Full(t *testing.T) { testCanonSync(t, 65, FullSync) }
-func TestCanonicalSynchronisation66Full(t *testing.T) { testCanonSync(t, 66, FullSync) }
-
-func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
- t.Skip("deadlock")
- tester := newTester()
- defer tester.terminate()
- defer tester.peerDb.Close()
-
- // Create a small enough block chain to download
- chain := getTestChainBase().shorten(blockCacheMaxItems - 15)
- tester.newPeer("peer", protocol, chain)
-
- // Synchronise with the peer and make sure all relevant data was retrieved
- if err := tester.sync("peer", nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- assertOwnChain(t, tester, chain.len())
-}
-
-// Tests that if a large batch of blocks are being downloaded, it is throttled
-// until the cached blocks are retrieved.
-func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
-func TestThrottling65Full(t *testing.T) { testThrottling(t, 65, FullSync) }
-func TestThrottling66Full(t *testing.T) { testThrottling(t, 66, FullSync) }
-
-func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
- t.Skip("deadlock")
- tester := newTester()
-
- // Create a long block chain to download and the tester
- targetBlocks := getTestChainBase().len() - 1
- testChain := getTestChainBase().copy(getTestChainBase().len())
- err := tester.newPeer("peer", protocol, testChain)
- if err != nil {
- t.Fatal(err)
- }
-
- // Wrap the importer to allow stepping
- blocked, proceed := uint32(0), make(chan struct{})
- tester.downloader.chainInsertHook = func(results []*fetchResult) {
- atomic.StoreUint32(&blocked, uint32(len(results)))
- <-proceed
- }
-
- // Start a synchronisation concurrently
- errc := make(chan error, 1)
- go func() {
- errc <- tester.sync("peer", nil, mode)
- }()
-
- // Iteratively take some blocks, always checking the retrieval count
- for {
- // Check the retrieval count synchronously (! reason for this ugly block)
- tester.lock.RLock()
- retrieved := len(tester.ownBlocks)
- tester.lock.RUnlock()
- if retrieved >= targetBlocks+1 {
- break
- }
- // Wait a bit for sync to throttle itself
- var cached, frozen int
- for start := time.Now(); time.Since(start) < 3*time.Second; {
- time.Sleep(25 * time.Millisecond)
-
- tester.lock.Lock()
- tester.downloader.queue.lock.Lock()
- tester.downloader.queue.resultCache.lock.Lock()
- {
- cached = tester.downloader.queue.resultCache.countCompleted()
- frozen = int(atomic.LoadUint32(&blocked))
- retrieved = len(tester.ownBlocks)
- }
- tester.downloader.queue.resultCache.lock.Unlock()
- tester.downloader.queue.lock.Unlock()
- tester.lock.Unlock()
-
- if cached == blockCacheMaxItems ||
- cached == blockCacheMaxItems-reorgProtHeaderDelay ||
- retrieved+cached+frozen == targetBlocks+1 ||
- retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
- break
- }
- }
- // Make sure we filled up the cache, then exhaust it
- time.Sleep(25 * time.Millisecond) // give it a chance to screw up
- tester.lock.RLock()
- retrieved = len(tester.ownBlocks)
- tester.lock.RUnlock()
- if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
- t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1)
- }
-
- // Permit the blocked blocks to import
- if atomic.LoadUint32(&blocked) > 0 {
- atomic.StoreUint32(&blocked, uint32(0))
- proceed <- struct{}{}
- }
- }
- // Check that we haven't pulled more blocks than available
- assertOwnChain(t, tester, targetBlocks+1)
- if err := <-errc; err != nil {
- t.Fatalf("block synchronization failed: %v", err)
- }
- tester.terminate()
-
-}
-
-// Tests that simple synchronization against a forked chain works correctly. In
-// this test common ancestor lookup should *not* be short circuited, and a full
-// binary search should be executed.
-func TestForkedSync64Full(t *testing.T) { testForkedSync(t, 64, FullSync) }
-func TestForkedSync65Full(t *testing.T) { testForkedSync(t, 65, FullSync) }
-func TestForkedSync66Full(t *testing.T) { testForkedSync(t, 66, FullSync) }
-
-func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
- t.Skip("deadlock")
- tester := newTester()
- defer tester.terminate()
- defer tester.peerDb.Close()
-
- chainA := getTestChainForkLightA().shorten(getTestChainBase().len() + 80)
- chainB := getTestChainForkLightB().shorten(getTestChainBase().len() + 80)
- tester.newPeer("fork A", protocol, chainA)
- tester.newPeer("fork B", protocol, chainB)
- // Synchronise with the peer and make sure all blocks were retrieved
- if err := tester.sync("fork A", nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- assertOwnChain(t, tester, chainA.len())
-
- // Synchronise with the second peer and make sure that fork is pulled too
- if err := tester.sync("fork B", nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- assertOwnForkedChain(t, tester, getTestChainBase().len(), []int{chainA.len(), chainB.len()})
-}
-
-// Tests that synchronising against a much shorter but much heavyer fork works
-// corrently and is not dropped.
-func TestHeavyForkedSync64Full(t *testing.T) { testHeavyForkedSync(t, 64, FullSync) }
-func TestHeavyForkedSync65Full(t *testing.T) { testHeavyForkedSync(t, 65, FullSync) }
-func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, 66, FullSync) }
-
-func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
- t.Skip("deadlock")
- tester := newTester()
- defer tester.terminate()
- defer tester.peerDb.Close()
-
- chainA := getTestChainForkLightA().shorten(getTestChainBase().len() + 80)
- chainB := getTestChainForkHeavy().shorten(getTestChainBase().len() + 80)
- tester.newPeer("light", protocol, chainA)
- tester.newPeer("heavy", protocol, chainB)
-
- // Synchronise with the peer and make sure all blocks were retrieved
- if err := tester.sync("light", nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- assertOwnChain(t, tester, chainA.len())
-
- // Synchronise with the second peer and make sure that fork is pulled too
- if err := tester.sync("heavy", nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- assertOwnForkedChain(t, tester, getTestChainBase().len(), []int{chainA.len(), chainB.len()})
-}
-
-// Tests that chain forks are contained within a certain interval of the current
-// chain head, ensuring that malicious peers cannot waste resources by feeding
-// long dead chains.
-func TestBoundedForkedSync64Full(t *testing.T) { testBoundedForkedSync(t, 64, FullSync) }
-func TestBoundedForkedSync65Full(t *testing.T) { testBoundedForkedSync(t, 65, FullSync) }
-func TestBoundedForkedSync66Full(t *testing.T) { testBoundedForkedSync(t, 66, FullSync) }
-
-func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) {
- t.Skip("deadlock")
-
- tester := newTester()
- defer tester.terminate()
- defer tester.peerDb.Close()
-
- chainA := getTestChainForkLightA()
- chainB := getTestChainForkLightB()
- tester.newPeer("original", protocol, chainA)
- tester.newPeer("rewriter", protocol, chainB)
-
- // Synchronise with the peer and make sure all blocks were retrieved
- if err := tester.sync("original", nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- assertOwnChain(t, tester, chainA.len())
-
- // Synchronise with the second peer and ensure that the fork is rejected to being too old
- if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
- t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
- }
-}
-
-// Tests that chain forks are contained within a certain interval of the current
-// chain head for short but heavy forks too. These are a bit special because they
-// take different ancestor lookup paths.
-func TestBoundedHeavyForkedSync64Full(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FullSync) }
-func TestBoundedHeavyForkedSync65Full(t *testing.T) { testBoundedHeavyForkedSync(t, 65, FullSync) }
-func TestBoundedHeavyForkedSync66Full(t *testing.T) { testBoundedHeavyForkedSync(t, 66, FullSync) }
-
-func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
- t.Skip("deadlock")
-
- tester := newTester()
-
- // Create a long enough forked chain
- chainA := getTestChainForkLightA()
- chainB := getTestChainForkHeavy()
- tester.newPeer("original", protocol, chainA)
-
- // Synchronise with the peer and make sure all blocks were retrieved
- if err := tester.sync("original", nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- assertOwnChain(t, tester, chainA.len())
-
- tester.newPeer("heavy-rewriter", protocol, chainB) //nolint:errcheck
- // Synchronise with the second peer and ensure that the fork is rejected to being too old
- if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
- t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
- }
- tester.terminate()
-}
-
-// Tests that an inactive downloader will not accept incoming block headers,
-// bodies and receipts.
-func TestInactiveDownloader63(t *testing.T) {
- t.Skip("deadlock")
-
- tester := newTester()
- defer tester.terminate()
- defer tester.peerDb.Close()
-
- // Check that neither block headers nor bodies are accepted
- if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
- t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
- }
- if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
- t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
- }
- if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
- t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
- }
-}
-
-// Tests that a canceled download wipes all previously accumulated state.
-func TestCancel64Full(t *testing.T) { testCancel(t, 64, FullSync) }
-func TestCancel65Full(t *testing.T) { testCancel(t, 65, FullSync) }
-func TestCancel66Full(t *testing.T) { testCancel(t, 66, FullSync) }
-
-func testCancel(t *testing.T, protocol uint, mode SyncMode) {
- t.Skip("deadlock")
-
- tester := newTester()
- defer tester.terminate()
- defer tester.peerDb.Close()
-
- chain := getTestChainBase().shorten(MaxHeaderFetch)
- tester.newPeer("peer", protocol, chain)
-
- // Make sure canceling works with a pristine downloader
- tester.downloader.Cancel()
- if !tester.downloader.queue.Idle() {
- t.Errorf("download queue not idle")
- }
- // Synchronise with the peer, but cancel afterwards
- if err := tester.sync("peer", nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- tester.downloader.Cancel()
- if !tester.downloader.queue.Idle() {
- t.Errorf("download queue not idle")
- }
-}
-
-// Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
-func TestMultiSynchronisation64Full(t *testing.T) { testMultiSynchronisation(t, 64, FullSync) }
-func TestMultiSynchronisation65Full(t *testing.T) { testMultiSynchronisation(t, 65, FullSync) }
-func TestMultiSynchronisation66Full(t *testing.T) { testMultiSynchronisation(t, 66, FullSync) }
-
-func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
- t.Skip("deadlock")
-
- tester := newTester()
- defer tester.terminate()
-
- // Create various peers with various parts of the chain
- targetPeers := 8
- chain := getTestChainBase().shorten(targetPeers * 100)
-
- for i := 0; i < targetPeers; i++ {
- id := fmt.Sprintf("peer #%d", i)
- tester.newPeer(id, protocol, chain.shorten(chain.len()/(i+1)))
- }
- if err := tester.sync("peer #0", nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- assertOwnChain(t, tester, chain.len())
-}
-
-// Tests that synchronisations behave well in multi-version protocol environments
-// and not wreak havoc on other nodes in the network.
-func TestMultiProtoSynchronisation64Full(t *testing.T) { testMultiProtoSync(t, 64, FullSync) }
-func TestMultiProtoSynchronisation65Full(t *testing.T) { testMultiProtoSync(t, 65, FullSync) }
-func TestMultiProtoSynchronisation66Full(t *testing.T) { testMultiProtoSync(t, 66, FullSync) }
-
-func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
- t.Skip("deadlock")
-
- tester := newTester()
- defer tester.terminate()
- defer tester.peerDb.Close()
-
- // Create a small enough block chain to download
- chain := getTestChainBase().shorten(blockCacheMaxItems - 15)
-
- // Create peers of every type
- assert.NoError(t, tester.newPeer("peer 64", 64, chain))
- assert.NoError(t, tester.newPeer("peer 65", 65, chain))
- assert.NoError(t, tester.newPeer("peer 66", 66, chain))
-
- // Synchronise with the requested peer and make sure all blocks were retrieved
- if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- assertOwnChain(t, tester, chain.len())
-
- // Check that no peers have been dropped off
- for _, version := range []int{64, 65, 66} {
- peer := fmt.Sprintf("peer %d", version)
- if _, ok := tester.peers[peer]; !ok {
- t.Errorf("%s dropped", peer)
- }
- }
-}
-
-// Tests that if a block is empty (e.g. header only), no body request should be
-// made, and instead the header should be assembled into a whole block in itself.
-func TestEmptyShortCircuit64Full(t *testing.T) { testEmptyShortCircuit(t, 64, FullSync) }
-func TestEmptyShortCircuit65Full(t *testing.T) { testEmptyShortCircuit(t, 65, FullSync) }
-func TestEmptyShortCircuit66Full(t *testing.T) { testEmptyShortCircuit(t, 66, FullSync) }
-
-func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
- t.Skip("deadlock")
-
- tester := newTester()
- defer tester.terminate()
-
- // Create a block chain to download
- chain := getTestChainBase().copy(getTestChainBase().len())
- tester.newPeer("peer", protocol, chain)
-
- // Instrument the downloader to signal body requests
- bodiesHave, receiptsHave := int32(0), int32(0)
- tester.downloader.bodyFetchHook = func(headers []*types.Header) {
- atomic.AddInt32(&bodiesHave, int32(len(headers)))
- }
- tester.downloader.receiptFetchHook = func(headers []*types.Header) {
- atomic.AddInt32(&receiptsHave, int32(len(headers)))
- }
- // Synchronise with the peer and make sure all blocks were retrieved
- if err := tester.sync("peer", nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- assertOwnChain(t, tester, chain.len())
-
- // Validate the number of block bodies that should have been requested
- bodiesNeeded, receiptsNeeded := 0, 0
- for _, block := range chain.blockm {
- if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
- bodiesNeeded++
- }
- }
- for _, receipt := range chain.receiptm {
- if mode == FastSync && len(receipt) > 0 {
- receiptsNeeded++
- }
- }
- if int(bodiesHave) != bodiesNeeded {
- t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
- }
- if int(receiptsHave) != receiptsNeeded {
- t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
- }
-}
-
-// Tests that headers are enqueued continuously, preventing malicious nodes from
-// stalling the downloader by feeding gapped header chains.
-func TestMissingHeaderAttack64Full(t *testing.T) { testMissingHeaderAttack(t, 64, FullSync) }
-func TestMissingHeaderAttack65Full(t *testing.T) { testMissingHeaderAttack(t, 65, FullSync) }
-func TestMissingHeaderAttack66Full(t *testing.T) { testMissingHeaderAttack(t, 66, FullSync) }
-
-func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
- t.Skip("deadlock")
-
- tester := newTester()
- defer tester.terminate()
- defer tester.peerDb.Close()
-
- chain := getTestChainBase().shorten(blockCacheMaxItems - 15)
- brokenChain := chain.shorten(chain.len())
- delete(brokenChain.headerm, brokenChain.chain[brokenChain.len()/2])
- tester.newPeer("attack", protocol, brokenChain)
-
- if err := tester.sync("attack", nil, mode); err == nil {
- t.Fatalf("succeeded attacker synchronisation")
- }
- // Synchronise with the valid peer and make sure sync succeeds
- tester.newPeer("valid", protocol, chain)
- if err := tester.sync("valid", nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- assertOwnChain(t, tester, chain.len())
-}
-
-// Tests that if requested headers are shifted (i.e. first is missing), the queue
-// detects the invalid numbering.
-func TestShiftedHeaderAttack64Full(t *testing.T) { testShiftedHeaderAttack(t, 64, FullSync) }
-func TestShiftedHeaderAttack65Full(t *testing.T) { testShiftedHeaderAttack(t, 65, FullSync) }
-func TestShiftedHeaderAttack66Full(t *testing.T) { testShiftedHeaderAttack(t, 66, FullSync) }
-
-func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
- t.Skip("deadlock")
-
- tester := newTester()
- defer tester.terminate()
- defer tester.peerDb.Close()
-
- chain := getTestChainBase().shorten(blockCacheMaxItems - 15)
-
- // Attempt a full sync with an attacker feeding shifted headers
- brokenChain := chain.shorten(chain.len())
- delete(brokenChain.headerm, brokenChain.chain[1])
- delete(brokenChain.blockm, brokenChain.chain[1])
- delete(brokenChain.receiptm, brokenChain.chain[1])
- tester.newPeer("attack", protocol, brokenChain)
- if err := tester.sync("attack", nil, mode); err == nil {
- t.Fatalf("succeeded attacker synchronisation")
- }
-
- // Synchronise with the valid peer and make sure sync succeeds
- tester.newPeer("valid", protocol, chain)
- if err := tester.sync("valid", nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- assertOwnChain(t, tester, chain.len())
-}
-
-// Tests that upon detecting an invalid header, the recent ones are rolled back
-// for various failure scenarios. Afterwards a full sync is attempted to make
-// sure no state was corrupted.
-// no fast sync for TurboGeth
-/*
-func TestInvalidHeaderRollback63Fast(t *testing.T) { testInvalidHeaderRollback(t, 63, FastSync) }
-func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(t, 64, FastSync) }
-func TestInvalidHeaderRollback65Fast(t *testing.T) { testInvalidHeaderRollback(t, 65, FastSync) }
-
-func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) {
- t.Skip("deadlock")
-
- tester := newTester()
-
- // Create a small enough block chain to download
- targetBlocks := 2*fsHeaderSafetyNet + 256 + fsMinFullBlocks
- chain := getTestChainBase().shorten(targetBlocks)
-
- // Attempt to sync with an attacker that feeds junk during the fast sync phase.
- // This should result in the last fsHeaderSafetyNet headers being rolled back.
- missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
- fastAttackChain := chain.shorten(chain.len())
- delete(fastAttackChain.headerm, fastAttackChain.chain[missing])
- tester.newPeer("fast-attack", protocol, fastAttackChain)
-
- if err := tester.sync("fast-attack", nil, mode); err == nil {
- t.Fatalf("succeeded fast attacker synchronisation")
- }
- if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
- t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
- }
-
- // Attempt to sync with an attacker that feeds junk during the block import phase.
- // This should result in both the last fsHeaderSafetyNet number of headers being
- // rolled back, and also the pivot point being reverted to a non-block status.
- missing = 2*fsHeaderSafetyNet + MaxHeaderFetch + 1
- blockAttackChain := chain.shorten(chain.len())
- delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) // Make sure the fast-attacker doesn't fill in
- delete(blockAttackChain.headerm, blockAttackChain.chain[missing])
- tester.newPeer("block-attack", protocol, blockAttackChain)
-
- if err := tester.sync("block-attack", nil, mode); err == nil {
- t.Fatalf("succeeded block attacker synchronisation")
- }
- if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
- t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
- }
- if mode == FastSync {
- if head := tester.CurrentBlock().NumberU64(); head != 0 {
- t.Errorf("fast sync pivot block #%d not rolled back", head)
- }
- }
-
- // Attempt to sync with an attacker that withholds promised blocks after the
- // fast sync pivot point. This could be a trial to leave the node with a bad
- // but already imported pivot block.
- withholdAttackChain := chain.shorten(chain.len())
- tester.newPeer("withhold-attack", protocol, withholdAttackChain)
- tester.downloader.syncInitHook = func(uint64, uint64) {
- for i := missing; i < withholdAttackChain.len(); i++ {
- delete(withholdAttackChain.headerm, withholdAttackChain.chain[i])
- }
- tester.downloader.syncInitHook = nil
- }
- if err := tester.sync("withhold-attack", nil, mode); err == nil {
- t.Fatalf("succeeded withholding attacker synchronisation")
- }
- if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
- t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
- }
- if mode == FastSync {
- if head := tester.CurrentBlock().NumberU64(); head != 0 {
- t.Errorf("fast sync pivot block #%d not rolled back", head)
- }
- }
-
- // synchronise with the valid peer and make sure sync succeeds. Since the last rollback
- // should also disable fast syncing for this process, verify that we did a fresh full
- // sync. Note, we can't assert anything about the receipts since we won't purge the
- // database of them, hence we can't use assertOwnChain.
- tester.newPeer("valid", protocol, chain)
- if err := tester.sync("valid", nil, mode); err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
- }
- if hs := len(tester.ownHeaders); hs != chain.len() {
- t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, chain.len())
- }
- if mode != LightSync {
- if bs := len(tester.ownBlocks); bs != chain.len() {
- t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, chain.len())
- }
- }
- tester.terminate()
-}
-*/
-
-// Tests that a peer advertising a high TD doesn't get to stall the downloader
-// afterwards by not sending any useful hashes.
-func TestHighTDStarvationAttack64Full(t *testing.T) { testHighTDStarvationAttack(t, 64, FullSync) }
-func TestHighTDStarvationAttack65Full(t *testing.T) { testHighTDStarvationAttack(t, 65, FullSync) }
-func TestHighTDStarvationAttack66Full(t *testing.T) { testHighTDStarvationAttack(t, 66, FullSync) }
-
-func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
-
- t.Skip("we ignore handshake TD")
- tester := newTester()
-
- chain := getTestChainBase().shorten(1)
- tester.newPeer("attack", protocol, chain)
- if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
- t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
- }
- tester.terminate()
-}
-
-// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
-func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
-func TestBlockHeaderAttackerDropping65(t *testing.T) { testBlockHeaderAttackerDropping(t, 65) }
-func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, 66) }
-
-func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
- t.Skip("deadlock")
-
- // Define the disconnection requirement for individual hash fetch errors
- tests := []struct {
- result error
- drop bool
- }{
- {nil, false}, // Sync succeeded, all is well
- {errBusy, false}, // Sync is already in progress, no problem
- {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop
- {errBadPeer, true}, // Peer was deemed bad for some reason, drop it
- {errStallingPeer, true}, // Peer was detected to be stalling, drop it
- {errUnsyncedPeer, true}, // Peer was detected to be unsynced, drop it
- {errNoPeers, false}, // No peers to download from, soft race, no issue
- {errTimeout, true}, // No hashes received in due time, drop the peer
- {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end
- {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser
- {errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter
- {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop
- {errInvalidBody, false}, // A bad peer was detected, but not the sync origin
- {errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin
- {errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
- }
- // Run the tests and check disconnection status
- tester := newTester()
- defer tester.terminate()
- defer tester.peerDb.Close()
- chain := getTestChainBase().shorten(1)
-
- for i, tt := range tests {
- // Register a new peer and ensure its presence
- id := fmt.Sprintf("test %d", i)
- if err := tester.newPeer(id, protocol, chain); err != nil {
- t.Fatalf("test %d: failed to register new peer: %v", i, err)
- }
- if _, ok := tester.peers[id]; !ok {
- t.Fatalf("test %d: registered peer not found", i)
- }
- // Simulate a synchronisation and check the required result
- tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
-
- _ = tester.downloader.Synchronise(id, tester.genesis.Hash(), tester.genesis.NumberU64(), FullSync, nil, func() error { return nil })
- if _, ok := tester.peers[id]; !ok != tt.drop {
- t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
- }
- }
-}
-
-// Tests that synchronisation progress (origin block number, current block number
-// and highest block number) is tracked and updated correctly.
-func TestSyncProgress64Full(t *testing.T) { testSyncProgress(t, 64, FullSync) }
-func TestSyncProgress65Full(t *testing.T) { testSyncProgress(t, 65, FullSync) }
-func TestSyncProgress66Full(t *testing.T) { testSyncProgress(t, 66, FullSync) }
-
-func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
- t.Skip("deadlock")
-
- tester := newTester()
- defer tester.terminate()
- defer tester.peerDb.Close()
- chain := getTestChainBase().shorten(blockCacheMaxItems - 15)
-
- // Set a sync init hook to catch progress changes
- starting := make(chan struct{})
- progress := make(chan struct{})
-
- tester.downloader.syncInitHook = func(origin, latest uint64) {
- starting <- struct{}{}
- <-progress
- }
- checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
-
- // Synchronise half the blocks and check initial progress
- tester.newPeer("peer-half", protocol, chain.shorten(chain.len()/2))
- pending := new(sync.WaitGroup)
- pending.Add(1)
-
- go func() {
- defer pending.Done()
- if err := tester.sync("peer-half", nil, mode); err != nil {
- panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
- }
- }()
- <-starting
- checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
- HighestBlock: uint64(chain.len()/2 - 1),
- })
- progress <- struct{}{}
- pending.Wait()
-
- // Synchronise all the blocks and check continuation progress
- tester.newPeer("peer-full", protocol, chain)
- pending.Add(1)
- go func() {
- defer pending.Done()
- if err := tester.sync("peer-full", nil, mode); err != nil {
- panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
- }
- }()
- <-starting
- checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
- StartingBlock: uint64(chain.len()/2 - 1),
- CurrentBlock: uint64(chain.len()/2 - 1),
- HighestBlock: uint64(chain.len() - 1),
- })
-
- // Check final progress after successful sync
- progress <- struct{}{}
- pending.Wait()
- checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
- StartingBlock: uint64(chain.len()/2 - 1),
- CurrentBlock: uint64(chain.len() - 1),
- HighestBlock: uint64(chain.len() - 1),
- })
-}
-
-func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) {
- // Mark this method as a helper to report errors at callsite, not in here
- t.Helper()
-
- p := d.Progress()
- p.KnownStates, p.PulledStates = 0, 0
- want.KnownStates, want.PulledStates = 0, 0
- if p != want {
- t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want)
- }
-}
-
-// Tests that synchronisation progress (origin block number and highest block
-// number) is tracked and updated correctly in case of a fork (or manual head
-// revertal).
-func TestForkedSyncProgress64Full(t *testing.T) { testForkedSyncProgress(t, 64, FullSync) }
-func TestForkedSyncProgress65Full(t *testing.T) { testForkedSyncProgress(t, 65, FullSync) }
-func TestForkedSyncProgress66Full(t *testing.T) { testForkedSyncProgress(t, 66, FullSync) }
-
-func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
- t.Skip("deadlock")
-
- tester := newTester()
- defer tester.terminate()
- defer tester.peerDb.Close()
- chainA := getTestChainForkLightA().shorten(getTestChainBase().len() + MaxHashFetch)
- chainB := getTestChainForkLightB().shorten(getTestChainBase().len() + MaxHashFetch)
-
- // Set a sync init hook to catch progress changes
- starting := make(chan struct{})
- progress := make(chan struct{})
-
- tester.downloader.syncInitHook = func(origin, latest uint64) {
- starting <- struct{}{}
- <-progress
- }
- checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
-
- // Synchronise with one of the forks and check progress
- tester.newPeer("fork A", protocol, chainA)
- pending := new(sync.WaitGroup)
- pending.Add(1)
- go func() {
- defer pending.Done()
- if err := tester.sync("fork A", nil, mode); err != nil {
- panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
- }
- }()
- <-starting
-
- checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
- HighestBlock: uint64(chainA.len() - 1),
- })
- progress <- struct{}{}
- pending.Wait()
-
- // Simulate a successful sync above the fork
- tester.downloader.syncStatsChainOrigin = tester.downloader.GetSyncStatsChainHeight()
-
- // Synchronise with the second fork and check progress resets
- tester.newPeer("fork B", protocol, chainB)
- pending.Add(1)
- go func() {
- defer pending.Done()
- if err := tester.sync("fork B", nil, mode); err != nil {
- panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
- }
- }()
- <-starting
- checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{
- StartingBlock: uint64(getTestChainBase().len()) - 1,
- CurrentBlock: uint64(chainA.len() - 1),
- HighestBlock: uint64(chainB.len() - 1),
- })
-
- // Check final progress after successful sync
- progress <- struct{}{}
- pending.Wait()
- checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
- StartingBlock: uint64(getTestChainBase().len()) - 1,
- CurrentBlock: uint64(chainB.len() - 1),
- HighestBlock: uint64(chainB.len() - 1),
- })
-}
-
-// Tests that if synchronisation is aborted due to some failure, then the progress
-// origin is not updated in the next sync cycle, as it should be considered the
-// continuation of the previous sync and not a new instance.
-func TestFailedSyncProgress64Full(t *testing.T) { testFailedSyncProgress(t, 64, FullSync) }
-func TestFailedSyncProgress65Full(t *testing.T) { testFailedSyncProgress(t, 65, FullSync) }
-func TestFailedSyncProgress66Full(t *testing.T) { testFailedSyncProgress(t, 66, FullSync) }
-
-func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
- t.Skip("deadlock")
-
- tester := newTester()
- defer tester.terminate()
- defer tester.peerDb.Close()
-
- chain := getTestChainBase().shorten(blockCacheMaxItems - 15)
-
- // Set a sync init hook to catch progress changes
- starting := make(chan struct{})
- progress := make(chan struct{})
-
- tester.downloader.syncInitHook = func(origin, latest uint64) {
- starting <- struct{}{}
- <-progress
- }
- checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
-
- // Attempt a full sync with a faulty peer
- brokenChain := chain.shorten(chain.len())
- missing := brokenChain.len() / 2
- delete(brokenChain.headerm, brokenChain.chain[missing])
- delete(brokenChain.blockm, brokenChain.chain[missing])
- delete(brokenChain.receiptm, brokenChain.chain[missing])
- tester.newPeer("faulty", protocol, brokenChain)
-
- pending := new(sync.WaitGroup)
- pending.Add(1)
- go func() {
- defer pending.Done()
- if err := tester.sync("faulty", nil, mode); err == nil {
- panic("succeeded faulty synchronisation")
- }
- }()
- <-starting
- checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
- HighestBlock: uint64(brokenChain.len() - 1),
- })
- progress <- struct{}{}
- pending.Wait()
- afterFailedSync := tester.downloader.Progress()
-
- // Synchronise with a good peer and check that the progress origin remind the same
- // after a failure
- tester.newPeer("valid", protocol, chain)
- pending.Add(1)
- go func() {
- defer pending.Done()
- if err := tester.sync("valid", nil, mode); err != nil {
- panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
- }
- }()
- <-starting
- checkProgress(t, tester.downloader, "completing", afterFailedSync)
-
- // Check final progress after successful sync
- progress <- struct{}{}
- pending.Wait()
- checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
- CurrentBlock: uint64(chain.len() - 1),
- HighestBlock: uint64(chain.len() - 1),
- })
-}
-
-// Tests that if an attacker fakes a chain height, after the attack is detected,
-// the progress height is successfully reduced at the next sync invocation.
-func TestFakedSyncProgress64Full(t *testing.T) { testFakedSyncProgress(t, 64, FullSync) }
-func TestFakedSyncProgress65Full(t *testing.T) { testFakedSyncProgress(t, 65, FullSync) }
-func TestFakedSyncProgress66Full(t *testing.T) { testFakedSyncProgress(t, 66, FullSync) }
-
-func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
- t.Skip("deadlock")
-
- tester := newTester()
- defer tester.terminate()
- defer tester.peerDb.Close()
- chain := getTestChainBase().shorten(blockCacheMaxItems - 15)
-
- // Set a sync init hook to catch progress changes
- starting := make(chan struct{})
- progress := make(chan struct{})
- tester.downloader.syncInitHook = func(origin, latest uint64) {
- starting <- struct{}{}
- <-progress
- }
- checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
-
- // Create and sync with an attacker that promises a higher chain than available.
- brokenChain := chain.shorten(chain.len())
- numMissing := 5
- for i := brokenChain.len() - 2; i > brokenChain.len()-numMissing; i-- {
- delete(brokenChain.headerm, brokenChain.chain[i])
- }
- tester.newPeer("attack", protocol, brokenChain)
-
- pending := new(sync.WaitGroup)
- pending.Add(1)
- go func() {
- defer pending.Done()
- if err := tester.sync("attack", nil, mode); err == nil {
- panic("succeeded attacker synchronisation")
- }
- }()
- <-starting
- checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
- HighestBlock: uint64(brokenChain.len() - 1),
- })
- progress <- struct{}{}
- pending.Wait()
- afterFailedSync := tester.downloader.Progress()
-
- // Synchronise with a good peer and check that the progress height has been reduced to
- // the true value.
- validChain := chain.shorten(chain.len() - numMissing)
- tester.newPeer("valid", protocol, validChain)
- pending.Add(1)
-
- go func() {
- defer pending.Done()
- if err := tester.sync("valid", nil, mode); err != nil {
- panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
- }
- }()
- <-starting
- checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
- CurrentBlock: afterFailedSync.CurrentBlock,
- HighestBlock: uint64(validChain.len() - 1),
- })
-
- // Check final progress after successful sync.
- progress <- struct{}{}
- pending.Wait()
- checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
- CurrentBlock: uint64(validChain.len() - 1),
- HighestBlock: uint64(validChain.len() - 1),
- })
-}
-
-// This test reproduces an issue where unexpected deliveries would
-// block indefinitely if they arrived at the right time.
-func TestDeliverHeadersHang64Full(t *testing.T) { testDeliverHeadersHang(t, 64, FullSync) }
-func TestDeliverHeadersHang64Fast(t *testing.T) { testDeliverHeadersHang(t, 64, FastSync) }
-func TestDeliverHeadersHang65Full(t *testing.T) { testDeliverHeadersHang(t, 65, FullSync) }
-func TestDeliverHeadersHang65Fast(t *testing.T) { testDeliverHeadersHang(t, 65, FastSync) }
-func TestDeliverHeadersHang66Fast(t *testing.T) { testDeliverHeadersHang(t, 66, FastSync) }
-
-func testDeliverHeadersHang(t *testing.T, protocol uint, mode SyncMode) {
- t.Skip("deadlock")
-
- t.Parallel()
-
- master := newTester()
- defer master.terminate()
- defer master.peerDb.Close()
- chain := getTestChainBase().shorten(15)
-
- for i := 0; i < 200; i++ {
- tester := newTester()
- tester.peerDb.Close()
- tester.peerDb = master.peerDb
- tester.newPeer("peer", protocol, chain)
-
- // Whenever the downloader requests headers, flood it with
- // a lot of unrequested header deliveries.
- tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
- peer: tester.downloader.peers.peers["peer"].peer,
- tester: tester,
- }
- if err := tester.sync("peer", nil, mode); err != nil {
- t.Errorf("test %d: sync failed: %v", i, err)
- }
- tester.terminate()
- }
-}
-
-type floodingTestPeer struct {
- peer Peer
- tester *downloadTester
-}
-
-func (ftp *floodingTestPeer) Head() (common.Hash, uint64) { return ftp.peer.Head() }
-func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
- return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
-}
-func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
- return ftp.peer.RequestBodies(hashes)
-}
-func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
- return ftp.peer.RequestReceipts(hashes)
-}
-
-func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
- deliveriesDone := make(chan struct{}, 500)
- for i := 0; i < cap(deliveriesDone)-1; i++ {
- peer := fmt.Sprintf("fake-peer%d", i)
- go func() {
- ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
- deliveriesDone <- struct{}{}
- }()
- }
-
- // None of the extra deliveries should block.
- timeout := time.After(60 * time.Second)
- launched := false
- for i := 0; i < cap(deliveriesDone); i++ {
- select {
- case <-deliveriesDone:
- if !launched {
- // Start delivering the requested headers
- // after one of the flooding responses has arrived.
- go func() {
- ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
- deliveriesDone <- struct{}{}
- }()
- launched = true
- }
- case <-timeout:
- panic("blocked")
- }
- }
- return nil
-}
-
func TestRemoteHeaderRequestSpan(t *testing.T) {
testCases := []struct {
remoteHeight uint64
@@ -1622,40 +111,6 @@ func TestRemoteHeaderRequestSpan(t *testing.T) {
}
}
-// Tests that peers below a pre-configured checkpoint block are prevented from
-// being fast-synced from, avoiding potential cheap eclipse attacks.
-func TestCheckpointEnforcement64Full(t *testing.T) { testCheckpointEnforcement(t, 64, FullSync) }
-func TestCheckpointEnforcement65Full(t *testing.T) { testCheckpointEnforcement(t, 65, FullSync) }
-func TestCheckpointEnforcement66Full(t *testing.T) { testCheckpointEnforcement(t, 66, FullSync) }
-
-func testCheckpointEnforcement(t *testing.T, protocol uint, mode SyncMode) {
- t.Skip("deadlock")
-
- // Create a new tester with a particular hard coded checkpoint block
- tester := newTester()
- defer tester.terminate()
- defer tester.peerDb.Close()
-
- tester.downloader.checkpoint = uint64(fsMinFullBlocks) + 256
- chain := getTestChainBase().shorten(int(tester.downloader.checkpoint) - 1)
-
- // Attempt to sync with the peer and validate the result
- tester.newPeer("peer", protocol, chain)
-
- var expect error
- if mode == FastSync || mode == LightSync {
- expect = errUnsyncedPeer
- }
- if err := tester.sync("peer", nil, mode); !errors.Is(err, expect) {
- t.Fatalf("block sync error mismatch: have %v, want %v", err, expect)
- }
- if mode == FastSync || mode == LightSync {
- assertOwnChain(t, tester, 1)
- } else {
- assertOwnChain(t, tester, chain.len())
- }
-}
-
func TestDataRace(t *testing.T) {
wg := &sync.WaitGroup{}
diff --git a/eth/downloader/metrics.go b/eth/downloader/metrics.go
index d832f586110..ba9907a6de6 100644
--- a/eth/downloader/metrics.go
+++ b/eth/downloader/metrics.go
@@ -38,8 +38,5 @@ var (
receiptDropMeter = metrics.NewRegisteredMeter("eth/downloader/receipts/drop", nil)
receiptTimeoutMeter = metrics.NewRegisteredMeter("eth/downloader/receipts/timeout", nil)
- stateInMeter = metrics.NewRegisteredMeter("eth/downloader/states/in", nil)
- stateDropMeter = metrics.NewRegisteredMeter("eth/downloader/states/drop", nil)
-
throttleCounter = metrics.NewRegisteredCounter("eth/downloader/throttle", nil)
)
diff --git a/eth/downloader/modes.go b/eth/downloader/modes.go
index 0cca01e0fa6..a76847f9c21 100644
--- a/eth/downloader/modes.go
+++ b/eth/downloader/modes.go
@@ -25,17 +25,11 @@ type SyncMode uint32
const (
FullSync SyncMode = iota // Synchronise the entire blockchain history from full blocks
StagedSync // Full sync but done in stages
- MgrSync // MarryGoRound sync
-
- // FIXME: these are kept for simplicity of rebasing
- FastSync // (not supported by turbo-geth)
- LightSync // (not supported by turbo-geth)
)
const (
FullSyncName = "full"
StagedSyncName = "staged"
- MgrSyncName = "mgr"
)
const MiningEnabled = true
@@ -51,8 +45,6 @@ func (mode SyncMode) String() string {
return FullSyncName
case StagedSync:
return StagedSyncName
- case MgrSync:
- return MgrSyncName
default:
return "unknown"
}
@@ -64,8 +56,6 @@ func (mode SyncMode) MarshalText() ([]byte, error) {
return []byte(FullSyncName), nil
case StagedSync:
return []byte(StagedSyncName), nil
- case MgrSync:
- return []byte(MgrSyncName), nil
default:
return nil, fmt.Errorf("unknown sync mode %d", mode)
}
@@ -77,10 +67,8 @@ func (mode *SyncMode) UnmarshalText(text []byte) error {
*mode = FullSync
case StagedSyncName:
*mode = StagedSync
- case MgrSyncName:
- *mode = MgrSync
default:
- return fmt.Errorf(`unknown sync mode %q, want one of %s`, text, []string{FullSyncName, StagedSyncName, MgrSyncName})
+ return fmt.Errorf(`unknown sync mode %q, want one of %s`, text, []string{FullSyncName, StagedSyncName})
}
return nil
}
diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go
index 0d202541d23..67cd7f9f2e8 100644
--- a/eth/downloader/peer.go
+++ b/eth/downloader/peer.go
@@ -74,42 +74,15 @@ type peerConnection struct {
lock sync.RWMutex
}
-// LightPeer encapsulates the methods required to synchronise with a remote light peer.
-type LightPeer interface {
+// Peer encapsulates the methods required to synchronise with a remote full peer.
+type Peer interface {
Head() (common.Hash, uint64)
RequestHeadersByHash(common.Hash, int, int, bool) error
RequestHeadersByNumber(uint64, int, int, bool) error
-}
-
-// Peer encapsulates the methods required to synchronise with a remote full peer.
-type Peer interface {
- LightPeer
RequestBodies([]common.Hash) error
RequestReceipts([]common.Hash) error
}
-// lightPeerWrapper wraps a LightPeer struct, stubbing out the Peer-only methods.
-type lightPeerWrapper struct {
- peer LightPeer
-}
-
-func (w *lightPeerWrapper) Head() (common.Hash, uint64) { return w.peer.Head() }
-func (w *lightPeerWrapper) RequestHeadersByHash(h common.Hash, amount int, skip int, reverse bool) error {
- return w.peer.RequestHeadersByHash(h, amount, skip, reverse)
-}
-func (w *lightPeerWrapper) RequestHeadersByNumber(i uint64, amount int, skip int, reverse bool) error {
- return w.peer.RequestHeadersByNumber(i, amount, skip, reverse)
-}
-func (w *lightPeerWrapper) RequestBodies([]common.Hash) error {
- panic("RequestBodies not supported in light client mode sync")
-}
-func (w *lightPeerWrapper) RequestReceipts([]common.Hash) error {
- panic("RequestReceipts not supported in light client mode sync")
-}
-func (w *lightPeerWrapper) RequestNodeData([]common.Hash) error {
- panic("RequestNodeData not supported in light client mode sync")
-}
-
// newPeerConnection creates a new downloader peer.
func newPeerConnection(id string, version uint, peer Peer, logger log.Logger) *peerConnection {
return &peerConnection{
diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go
index a3310ed86c9..1d7fc5fb458 100644
--- a/eth/downloader/queue.go
+++ b/eth/downloader/queue.go
@@ -69,16 +69,13 @@ type fetchResult struct {
Receipts types.Receipts
}
-func newFetchResult(header *types.Header, fastSync bool) *fetchResult {
+func newFetchResult(header *types.Header) *fetchResult {
item := &fetchResult{
Header: header,
}
if !header.EmptyBody() {
item.pending |= (1 << bodyType)
}
- if fastSync && !header.EmptyReceipts() {
- item.pending |= (1 << receiptType)
- }
return item
}
@@ -339,15 +336,6 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
q.blockTaskPool[hash] = header
q.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))
}
- // Queue for receipt retrieval
- if q.mode == FastSync && !header.EmptyReceipts() {
- if _, ok := q.receiptTaskPool[hash]; ok {
- log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash)
- } else {
- q.receiptTaskPool[hash] = header
- q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64()))
- }
- }
inserts = append(inserts, header)
q.headerHead = hash
from++
@@ -525,7 +513,7 @@ func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common
// we can ask the resultcache if this header is within the
// "prioritized" segment of blocks. If it is not, we need to throttle
- stale, throttle, item, err := q.resultCache.AddFetch(header, q.mode == FastSync)
+ stale, throttle, item, err := q.resultCache.AddFetch(header)
if stale {
// Don't put back in the task queue, this item has already been
// delivered upstream
diff --git a/eth/downloader/queue_test.go b/eth/downloader/queue_test.go
deleted file mode 100644
index 9db1f8bc757..00000000000
--- a/eth/downloader/queue_test.go
+++ /dev/null
@@ -1,471 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package downloader
-
-import (
- "fmt"
- "math/big"
- "math/rand"
- "sync"
- "testing"
- "time"
-
- "github.com/holiman/uint256"
- "github.com/ledgerwatch/turbo-geth/common"
- "github.com/ledgerwatch/turbo-geth/consensus/ethash"
- "github.com/ledgerwatch/turbo-geth/core"
- "github.com/ledgerwatch/turbo-geth/core/types"
- "github.com/ledgerwatch/turbo-geth/ethdb"
- "github.com/ledgerwatch/turbo-geth/log"
- "github.com/ledgerwatch/turbo-geth/params"
-)
-
-var (
- testdb = ethdb.NewMemDatabase()
- genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))
-)
-
-// makeChain creates a chain of n blocks starting at and including parent.
-// the returned hash chain is ordered head->parent. In addition, every 3rd block
-// contains a transaction and every 5th an uncle to allow testing correct block
-// reassembly.
-func makeChain(n int, seed byte, parent *types.Block, empty bool) ([]*types.Block, []types.Receipts) { //nolint:unparam
- blocks, receipts, err := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testdb, n, func(i int, block *core.BlockGen) {
- block.SetCoinbase(common.Address{seed})
- // Add one tx to every secondblock
- if !empty && i%2 == 0 {
- signer := types.MakeSigner(params.TestChainConfig, block.Number())
- tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, uint256.NewInt().SetUint64(1000), params.TxGas, nil, nil), signer, testKey)
- if err != nil {
- panic(err)
- }
- block.AddTx(tx)
- }
- }, false)
-
- if err != nil {
- panic(err)
- }
-
- return blocks, receipts
-}
-
-type chainData struct {
- blocks []*types.Block
- offset int
-}
-
-var chain *chainData
-var emptyChain *chainData
-var chainsMu sync.Mutex
-
-const targetTestBlocks = 128
-
-func getEmptyChain() *chainData {
- chainsMu.Lock()
- defer chainsMu.Unlock()
- if emptyChain == nil {
- blocks, _ := makeChain(targetTestBlocks, 0, genesis, true)
- emptyChain = &chainData{blocks, 0}
- }
- return emptyChain
-}
-func getChain() *chainData {
- chainsMu.Lock()
- defer chainsMu.Unlock()
- if chain == nil {
- blocks, _ := makeChain(targetTestBlocks, 0, genesis, false)
- chain = &chainData{blocks, 0}
- }
- return chain
-}
-
-func (chain *chainData) headers() []*types.Header {
- hdrs := make([]*types.Header, len(chain.blocks))
- for i, b := range chain.blocks {
- hdrs[i] = b.Header()
- }
- return hdrs
-}
-
-func (chain *chainData) Len() int {
- return len(chain.blocks)
-}
-
-func dummyPeer(id string) *peerConnection {
- p := &peerConnection{
- id: id,
- lacking: make(map[common.Hash]struct{}),
- }
- return p
-}
-
-func TestBasics(t *testing.T) {
- emptyCh := getEmptyChain()
- numOfBlocks := len(emptyCh.blocks)
- numOfReceipts := len(emptyCh.blocks) / 2
-
- q := newQueue(10, 10)
- if !q.Idle() {
- t.Errorf("new queue should be idle")
- }
- q.Prepare(1, FastSync)
- if res := q.Results("logPrefix", false); len(res) != 0 {
- t.Fatal("new queue should have 0 results")
- }
-
- // Schedule a batch of headers
- q.Schedule(getChain().headers(), 1)
- if q.Idle() {
- t.Errorf("queue should not be idle")
- }
- if got, exp := q.PendingBlocks(), getChain().Len(); got != exp {
- t.Errorf("wrong pending block count, got %d, exp %d", got, exp)
- }
- // Only non-empty receipts get added to task-queue
- if got, exp := q.PendingReceipts(), 64; got != exp {
- t.Errorf("wrong pending receipt count, got %d, exp %d", got, exp)
- }
- // Items are now queued for downloading, next step is that we tell the
- // queue that a certain peer will deliver them for us
- {
- peer := dummyPeer("peer-1")
- fetchReq, _, throttle := q.ReserveBodies(peer, 50)
- if !throttle {
- // queue size is only 10, so throttling should occur
- t.Fatal("should throttle")
- }
- // But we should still get the first things to fetch
- if got, exp := len(fetchReq.Headers), 5; got != exp {
- t.Fatalf("expected %d requests, got %d", exp, got)
- }
- if got, exp := fetchReq.Headers[0].Number.Uint64(), uint64(1); got != exp {
- t.Fatalf("expected header %d, got %d", exp, got)
- }
- }
- if exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got {
- t.Errorf("expected block task queue to be %d, got %d", exp, got)
- }
- if exp, got := q.receiptTaskQueue.Size(), numOfReceipts; exp != got {
- t.Errorf("expected receipt task queue to be %d, got %d", exp, got)
- }
- {
- peer := dummyPeer("peer-2")
- fetchReq, _, throttle := q.ReserveBodies(peer, 50)
-
- // The second peer should hit throttling
- if !throttle {
- t.Fatalf("should not throttle")
- }
- // And not get any fetches at all, since it was throttled to begin with
- if fetchReq != nil {
- t.Fatalf("should have no fetches, got %d", len(fetchReq.Headers))
- }
- }
- if exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got {
- t.Errorf("expected block task queue to be %d, got %d", exp, got)
- }
- if exp, got := q.receiptTaskQueue.Size(), numOfReceipts; exp != got {
- t.Errorf("expected receipt task queue to be %d, got %d", exp, got)
- }
- {
- // The receipt delivering peer should not be affected
- // by the throttling of body deliveries
- peer := dummyPeer("peer-3")
- fetchReq, _, throttle := q.ReserveReceipts(peer, 50)
- if !throttle {
- // queue size is only 10, so throttling should occur
- t.Fatal("should throttle")
- }
- // But we should still get the first things to fetch
- if got, exp := len(fetchReq.Headers), 5; got != exp {
- t.Fatalf("expected %d requests, got %d", exp, got)
- }
- if got, exp := fetchReq.Headers[0].Number.Uint64(), uint64(1); got != exp {
- t.Fatalf("expected header %d, got %d", exp, got)
- }
-
- }
- if exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got {
- t.Errorf("expected block task queue to be %d, got %d", exp, got)
- }
- if exp, got := q.receiptTaskQueue.Size(), numOfReceipts-5; exp != got {
- t.Errorf("expected receipt task queue to be %d, got %d", exp, got)
- }
- if got, exp := q.resultCache.countCompleted(), 0; got != exp {
- t.Errorf("wrong processable count, got %d, exp %d", got, exp)
- }
-}
-
-func TestEmptyBlocks(t *testing.T) {
- numOfBlocks := len(emptyChain.blocks)
-
- q := newQueue(10, 10)
-
- q.Prepare(1, FastSync)
- // Schedule a batch of headers
- q.Schedule(getEmptyChain().headers(), 1)
- if q.Idle() {
- t.Errorf("queue should not be idle")
- }
- if got, exp := q.PendingBlocks(), len(getEmptyChain().blocks); got != exp {
- t.Errorf("wrong pending block count, got %d, exp %d", got, exp)
- }
- if got, exp := q.PendingReceipts(), 0; got != exp {
- t.Errorf("wrong pending receipt count, got %d, exp %d", got, exp)
- }
- // They won't be processable, because the fetchresults haven't been
- // created yet
- if got, exp := q.resultCache.countCompleted(), 0; got != exp {
- t.Errorf("wrong processable count, got %d, exp %d", got, exp)
- }
-
- // Items are now queued for downloading, next step is that we tell the
- // queue that a certain peer will deliver them for us
- // That should trigger all of them to suddenly become 'done'
- {
- // Reserve blocks
- peer := dummyPeer("peer-1")
- fetchReq, _, _ := q.ReserveBodies(peer, 50)
-
- // there should be nothing to fetch, blocks are empty
- if fetchReq != nil {
- t.Fatal("there should be no body fetch tasks remaining")
- }
-
- }
- if q.blockTaskQueue.Size() != numOfBlocks-10 {
- t.Errorf("expected block task queue to be %d, got %d", numOfBlocks-10, q.blockTaskQueue.Size())
- }
- if q.receiptTaskQueue.Size() != 0 {
- t.Errorf("expected receipt task queue to be %d, got %d", 0, q.receiptTaskQueue.Size())
- }
- {
- peer := dummyPeer("peer-3")
- fetchReq, _, _ := q.ReserveReceipts(peer, 50)
-
- // there should be nothing to fetch, blocks are empty
- if fetchReq != nil {
- t.Fatal("there should be no body fetch tasks remaining")
- }
- }
- if q.blockTaskQueue.Size() != numOfBlocks-10 {
- t.Errorf("expected block task queue to be %d, got %d", numOfBlocks-10, q.blockTaskQueue.Size())
- }
- if q.receiptTaskQueue.Size() != 0 {
- t.Errorf("expected receipt task queue to be %d, got %d", 0, q.receiptTaskQueue.Size())
- }
- if got, exp := q.resultCache.countCompleted(), 10; got != exp {
- t.Errorf("wrong processable count, got %d, exp %d", got, exp)
- }
-}
-
-// XTestDelivery does some more extensive testing of events that happen,
-// blocks that become known and peers that make reservations and deliveries.
-// disabled since it's not really a unit-test, but can be executed to test
-// some more advanced scenarios
-func XTestDelivery(t *testing.T) {
- // the outside network, holding blocks
- blo, rec := makeChain(128, 0, genesis, false)
- world := newNetwork()
- world.receipts = rec
- world.chain = blo
- world.progress(10)
- if false {
- log.Root().SetHandler(log.StdoutHandler)
-
- }
- q := newQueue(10, 10)
- var wg sync.WaitGroup
- q.Prepare(1, FastSync)
- wg.Add(1)
- go func() {
- // deliver headers
- defer wg.Done()
- c := 1
- for {
- //fmt.Printf("getting headers from %d\n", c)
- hdrs := world.headers(c)
- l := len(hdrs)
- //fmt.Printf("scheduling %d headers, first %d last %d\n",
- // l, hdrs[0].Number.Uint64(), hdrs[len(hdrs)-1].Number.Uint64())
- q.Schedule(hdrs, uint64(c))
- c += l
- }
- }()
- wg.Add(1)
- go func() {
- // collect results
- defer wg.Done()
- tot := 0
- for {
- res := q.Results("logPrefix", true)
- tot += len(res)
- fmt.Printf("got %d results, %d tot\n", len(res), tot)
- // Now we can forget about these
- world.forget(res[len(res)-1].Header.Number.Uint64())
-
- }
- }()
- wg.Add(1)
- go func() {
- defer wg.Done()
- // reserve body fetch
- i := 4
- for {
- peer := dummyPeer(fmt.Sprintf("peer-%d", i))
- f, _, _ := q.ReserveBodies(peer, rand.Intn(30))
- if f != nil {
- var emptyList []*types.Header
- var txs [][]*types.Transaction
- var uncles [][]*types.Header
- numToSkip := rand.Intn(len(f.Headers))
- for _, hdr := range f.Headers[0 : len(f.Headers)-numToSkip] {
- txs = append(txs, world.getTransactions(hdr.Number.Uint64()))
- uncles = append(uncles, emptyList)
- }
- time.Sleep(100 * time.Millisecond)
- _, err := q.DeliverBodies(peer.id, txs, uncles)
- if err != nil {
- fmt.Printf("delivered %d bodies %v\n", len(txs), err)
- }
- } else {
- i++
- time.Sleep(200 * time.Millisecond)
- }
- }
- }()
- go func() {
- defer wg.Done()
- // reserve receiptfetch
- peer := dummyPeer("peer-3")
- for {
- f, _, _ := q.ReserveReceipts(peer, rand.Intn(50))
- if f != nil {
- var rcs [][]*types.Receipt
- for _, hdr := range f.Headers {
- rcs = append(rcs, world.getReceipts(hdr.Number.Uint64()))
- }
- _, err := q.DeliverReceipts(peer.id, rcs)
- if err != nil {
- fmt.Printf("delivered %d receipts %v\n", len(rcs), err)
- }
- time.Sleep(100 * time.Millisecond)
- } else {
- time.Sleep(200 * time.Millisecond)
- }
- }
- }()
- wg.Add(1)
- go func() {
- defer wg.Done()
- for i := 0; i < 50; i++ {
- time.Sleep(300 * time.Millisecond)
- //world.tick()
- //fmt.Printf("trying to progress\n")
- world.progress(rand.Intn(100))
- }
- for i := 0; i < 50; i++ {
- time.Sleep(2990 * time.Millisecond)
-
- }
- }()
- wg.Add(1)
- go func() {
- defer wg.Done()
- for {
- time.Sleep(990 * time.Millisecond)
- fmt.Printf("world block tip is %d\n",
- world.chain[len(world.chain)-1].Header().Number.Uint64())
- fmt.Println(q.Stats())
- }
- }()
- wg.Wait()
-}
-
-func newNetwork() *network {
- var l sync.RWMutex
- return &network{
- cond: sync.NewCond(&l),
- offset: 1, // block 1 is at blocks[0]
- }
-}
-
-// represents the network
-type network struct {
- offset int
- chain []*types.Block
- receipts []types.Receipts
- lock sync.RWMutex
- cond *sync.Cond
-}
-
-func (n *network) getTransactions(blocknum uint64) types.Transactions {
- index := blocknum - uint64(n.offset)
- return n.chain[index].Transactions()
-}
-func (n *network) getReceipts(blocknum uint64) types.Receipts {
- index := blocknum - uint64(n.offset)
- if got := n.chain[index].Header().Number.Uint64(); got != blocknum {
- fmt.Printf("Err, got %d exp %d\n", got, blocknum)
- panic("sd")
- }
- return n.receipts[index]
-}
-
-func (n *network) forget(blocknum uint64) {
- index := blocknum - uint64(n.offset)
- n.chain = n.chain[index:]
- n.receipts = n.receipts[index:]
- n.offset = int(blocknum)
-
-}
-func (n *network) progress(numBlocks int) {
-
- n.lock.Lock()
- defer n.lock.Unlock()
- //fmt.Printf("progressing...\n")
- newBlocks, newR := makeChain(numBlocks, 0, n.chain[len(n.chain)-1], false)
- n.chain = append(n.chain, newBlocks...)
- n.receipts = append(n.receipts, newR...)
- n.cond.Broadcast()
-
-}
-
-func (n *network) headers(from int) []*types.Header {
- numHeaders := 128
- var hdrs []*types.Header //nolint:prealloc
- index := from - n.offset
-
- for index >= len(n.chain) {
- // wait for progress
- n.cond.L.Lock()
- //fmt.Printf("header going into wait\n")
- n.cond.Wait()
- index = from - n.offset
- n.cond.L.Unlock()
- }
- n.lock.RLock()
- defer n.lock.RUnlock()
- for i, b := range n.chain[index:] {
- hdrs = append(hdrs, b.Header())
- if i >= numHeaders {
- break
- }
- }
- return hdrs
-}
diff --git a/eth/downloader/resultstore.go b/eth/downloader/resultstore.go
index d57ec62c024..5d7a494d87b 100644
--- a/eth/downloader/resultstore.go
+++ b/eth/downloader/resultstore.go
@@ -75,7 +75,7 @@ func (r *resultStore) SetThrottleThreshold(threshold uint64) uint64 {
// throttled - if true, the store is at capacity, this particular header is not prio now
// item - the result to store data into
// err - any error that occurred
-func (r *resultStore) AddFetch(header *types.Header, fastSync bool) (stale, throttled bool, item *fetchResult, err error) {
+func (r *resultStore) AddFetch(header *types.Header) (stale, throttled bool, item *fetchResult, err error) {
r.lock.Lock()
defer r.lock.Unlock()
@@ -85,7 +85,7 @@ func (r *resultStore) AddFetch(header *types.Header, fastSync bool) (stale, thro
return stale, throttled, item, err
}
if item == nil {
- item = newFetchResult(header, fastSync)
+ item = newFetchResult(header)
r.items[index] = item
}
return stale, throttled, item, err
diff --git a/eth/downloader/testchain_test.go b/eth/downloader/testchain_test.go
index b8a10591907..4b5b5918e7b 100644
--- a/eth/downloader/testchain_test.go
+++ b/eth/downloader/testchain_test.go
@@ -17,7 +17,6 @@
package downloader
import (
- "fmt"
"math/big"
"os"
"sync"
@@ -54,9 +53,6 @@ var (
testChainForkLightA *testChain
testChainForkLightAMu sync.Mutex
- testChainForkLightB *testChain
- testChainForkLightBMu sync.Mutex
-
testChainForkHeavy *testChain
testChainForkHeavyMu sync.Mutex
)
@@ -69,14 +65,6 @@ func getTestChainForkLightA() *testChain {
}
return testChainForkLightA
}
-func getTestChainForkLightB() *testChain {
- testChainForkLightBMu.Lock()
- defer testChainForkLightBMu.Unlock()
- if testChainForkLightB == nil {
- testChainForkLightB = getTestChainBase().makeFork(getForkLen(), false, 2)
- }
- return testChainForkLightB
-}
func getTestChainForkHeavy() *testChain {
testChainForkHeavyMu.Lock()
defer testChainForkHeavyMu.Unlock()
@@ -138,15 +126,6 @@ func (tc *testChain) makeFork(length int, heavy bool, seed byte) *testChain {
return fork
}
-// shorten creates a copy of the chain with the given length. It panics if the
-// length is longer than the number of available blocks.
-func (tc *testChain) shorten(length int) *testChain {
- if length > tc.len() {
- panic(fmt.Errorf("can't shorten test chain to %d blocks, it's only %d blocks long", length, tc.len()))
- }
- return tc.copy(length)
-}
-
func (tc *testChain) copy(newlen int) *testChain {
tc.cpyLock.Lock()
defer tc.cpyLock.Unlock()
@@ -295,17 +274,6 @@ func (tc *testChain) headersByNumber(origin uint64, amount int, skip int, revers
return result
}
-// receipts returns the receipts of the given block hashes.
-func (tc *testChain) receipts(hashes []common.Hash) [][]*types.Receipt {
- results := make([][]*types.Receipt, 0, len(hashes))
- for _, hash := range hashes {
- if receipt, ok := tc.receiptm[hash]; ok {
- results = append(results, receipt)
- }
- }
- return results
-}
-
// bodies returns the block bodies of the given block hashes.
func (tc *testChain) bodies(hashes []common.Hash) ([][]*types.Transaction, [][]*types.Header) {
transactions := make([][]*types.Transaction, 0, len(hashes))
diff --git a/eth/downloader/types.go b/eth/downloader/types.go
index 0c0a323ee10..579285fafa0 100644
--- a/eth/downloader/types.go
+++ b/eth/downloader/types.go
@@ -67,13 +67,3 @@ type receiptPack struct {
func (p *receiptPack) PeerId() string { return p.peerID }
func (p *receiptPack) Items() int { return len(p.receipts) }
func (p *receiptPack) Stats() string { return fmt.Sprintf("%d", len(p.receipts)) }
-
-// statePack is a batch of states returned by a peer.
-type statePack struct {
- peerID string
- states [][]byte
-}
-
-func (p *statePack) PeerId() string { return p.peerID }
-func (p *statePack) Items() int { return len(p.states) }
-func (p *statePack) Stats() string { return fmt.Sprintf("%d", len(p.states)) }
diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go
index 095a9cdd2bb..ebe47c7185a 100644
--- a/eth/ethconfig/config.go
+++ b/eth/ethconfig/config.go
@@ -67,8 +67,6 @@ var Defaults = Config{
},
NetworkID: 1,
TxLookupLimit: 2350000,
- LightPeers: 100,
- UltraLightFraction: 75,
DatabaseCache: 512,
TrieCleanCache: 256,
TrieCleanCacheJournal: "triecache",
@@ -154,20 +152,6 @@ type Config struct {
// Whitelist of required block number -> hash values to accept
Whitelist map[uint64]common.Hash `toml:"-"`
- // Light client options
- LightServ int `toml:",omitempty"` // Maximum percentage of time allowed for serving LES requests
- LightIngress int `toml:",omitempty"` // Incoming bandwidth limit for light servers
- LightEgress int `toml:",omitempty"` // Outgoing bandwidth limit for light servers
- LightPeers int `toml:",omitempty"` // Maximum number of LES client peers
- LightNoPrune bool `toml:",omitempty"` // Whether to disable light chain pruning
- LightNoSyncServe bool `toml:",omitempty"` // Whether to serve light clients before syncing
- SyncFromCheckpoint bool `toml:",omitempty"` // Whether to sync the header chain from the configured checkpoint
-
- // Ultra Light client options
- UltraLightServers []string `toml:",omitempty"` // List of trusted ultra light servers
- UltraLightFraction int `toml:",omitempty"` // Percentage of trusted servers to accept an announcement
- UltraLightOnlyAnnounce bool `toml:",omitempty"` // Whether to only announce headers, or also serve them
-
// Database options
SkipBcVersionCheck bool `toml:"-"`
DatabaseHandles int `toml:"-"`
@@ -203,12 +187,6 @@ type Config struct {
// Miscellaneous options
DocRoot string `toml:"-"`
- // Type of the EWASM interpreter ("" for default)
- EWASMInterpreter string
-
- // Type of the EVM interpreter ("" for default)
- EVMInterpreter string
-
// RPCGasCap is the global gas cap for eth-call variants.
RPCGasCap uint64 `toml:",omitempty"`
diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go
index 90ee1b7396b..f0f86449610 100644
--- a/eth/ethconfig/gen_config.go
+++ b/eth/ethconfig/gen_config.go
@@ -25,14 +25,8 @@ func (c Config) MarshalTOML() (interface{}, error) {
NoPrefetch bool
TxLookupLimit uint64 `toml:",omitempty"`
Whitelist map[uint64]common.Hash `toml:"-"`
- LightIngress int `toml:",omitempty"`
- LightEgress int `toml:",omitempty"`
StorageMode string
- LightNoPrune bool `toml:",omitempty"`
- LightNoSyncServe bool `toml:",omitempty"`
ArchiveSyncInterval int
- LightServ int `toml:",omitempty"`
- LightPeers int `toml:",omitempty"`
OnlyAnnounce bool
SkipBcVersionCheck bool `toml:"-"`
DatabaseHandles int `toml:"-"`
@@ -50,9 +44,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
TxPool core.TxPoolConfig
GPO gasprice.Config
EnablePreimageRecording bool
- DocRoot string `toml:"-"`
- EWASMInterpreter string
- EVMInterpreter string
+ DocRoot string `toml:"-"`
RPCGasCap uint64 `toml:",omitempty"`
RPCTxFeeCap float64 `toml:",omitempty"`
Checkpoint *params.TrustedCheckpoint `toml:",omitempty"`
@@ -69,10 +61,6 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.Whitelist = c.Whitelist
enc.StorageMode = c.StorageMode.ToString()
enc.ArchiveSyncInterval = c.ArchiveSyncInterval
- enc.LightServ = c.LightServ
- enc.LightIngress = c.LightIngress
- enc.LightEgress = c.LightEgress
- enc.LightPeers = c.LightPeers
enc.SkipBcVersionCheck = c.SkipBcVersionCheck
enc.DatabaseHandles = c.DatabaseHandles
enc.DatabaseCache = c.DatabaseCache
@@ -90,8 +78,6 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.GPO = c.GPO
enc.EnablePreimageRecording = c.EnablePreimageRecording
enc.DocRoot = c.DocRoot
- enc.EWASMInterpreter = c.EWASMInterpreter
- enc.EVMInterpreter = c.EVMInterpreter
enc.RPCGasCap = c.RPCGasCap
enc.RPCTxFeeCap = c.RPCTxFeeCap
enc.Checkpoint = c.Checkpoint
@@ -110,14 +96,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
NoPrefetch *bool
TxLookupLimit *uint64 `toml:",omitempty"`
Whitelist map[uint64]common.Hash `toml:"-"`
- LightIngress *int `toml:",omitempty"`
- LightEgress *int `toml:",omitempty"`
Mode *string
- LightNoPrune *bool `toml:",omitempty"`
- LightNoSyncServe *bool `toml:",omitempty"`
ArchiveSyncInterval *int
- LightServ *int `toml:",omitempty"`
- LightPeers *int `toml:",omitempty"`
OnlyAnnounce *bool
SkipBcVersionCheck *bool `toml:"-"`
DatabaseHandles *int `toml:"-"`
@@ -135,9 +115,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
TxPool *core.TxPoolConfig
GPO *gasprice.Config
EnablePreimageRecording *bool
- DocRoot *string `toml:"-"`
- EWASMInterpreter *string
- EVMInterpreter *string
+ DocRoot *string `toml:"-"`
RPCGasCap *uint64 `toml:",omitempty"`
RPCTxFeeCap *float64 `toml:",omitempty"`
Checkpoint *params.TrustedCheckpoint `toml:",omitempty"`
@@ -181,18 +159,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.ArchiveSyncInterval != nil {
c.ArchiveSyncInterval = *dec.ArchiveSyncInterval
}
- if dec.LightServ != nil {
- c.LightServ = *dec.LightServ
- }
- if dec.LightIngress != nil {
- c.LightIngress = *dec.LightIngress
- }
- if dec.LightEgress != nil {
- c.LightEgress = *dec.LightEgress
- }
- if dec.LightPeers != nil {
- c.LightPeers = *dec.LightPeers
- }
if dec.SkipBcVersionCheck != nil {
c.SkipBcVersionCheck = *dec.SkipBcVersionCheck
}
@@ -244,12 +210,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.DocRoot != nil {
c.DocRoot = *dec.DocRoot
}
- if dec.EWASMInterpreter != nil {
- c.EWASMInterpreter = *dec.EWASMInterpreter
- }
- if dec.EVMInterpreter != nil {
- c.EVMInterpreter = *dec.EVMInterpreter
- }
if dec.RPCGasCap != nil {
c.RPCGasCap = *dec.RPCGasCap
}
diff --git a/eth/fetcher/block_fetcher.go b/eth/fetcher/block_fetcher.go
index 9ec25739ab6..5f9e5269ae4 100644
--- a/eth/fetcher/block_fetcher.go
+++ b/eth/fetcher/block_fetcher.go
@@ -153,8 +153,6 @@ func (inject *blockOrHeaderInject) hash() common.Hash {
// BlockFetcher is responsible for accumulating block announcements from various peers
// and scheduling them for retrieval.
type BlockFetcher struct {
- light bool // The indicator whether it's a light fetcher or normal one.
-
// Various event channels
notify chan *blockAnnounce
inject chan *blockOrHeaderInject
@@ -197,9 +195,8 @@ type BlockFetcher struct {
}
// NewBlockFetcher creates a block fetcher to retrieve blocks based on hash announcements.
-func NewBlockFetcher(light bool, getHeader HeaderRetrievalFn, getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertHeaders headersInsertFn, insertChain chainInsertFn, dropPeer peerDropFn) *BlockFetcher {
+func NewBlockFetcher(getHeader HeaderRetrievalFn, getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertHeaders headersInsertFn, insertChain chainInsertFn, dropPeer peerDropFn) *BlockFetcher {
return &BlockFetcher{
- light: light,
notify: make(chan *blockAnnounce),
inject: make(chan *blockOrHeaderInject),
headerFilter: make(chan chan *headerFilterTask),
@@ -366,7 +363,7 @@ func (f *BlockFetcher) loop() {
break
}
// Otherwise if fresh and still unknown, try and import
- if (number+maxUncleDist < height) || (f.light && f.getHeader(hash) != nil) || (!f.light && f.getBlock(hash) != nil) {
+ if (number+maxUncleDist < height) || f.getBlock(hash) != nil {
f.forgetBlock(hash)
continue
}
@@ -418,12 +415,6 @@ func (f *BlockFetcher) loop() {
case op := <-f.inject:
// A direct block insertion was requested, try and fill any pending gaps
blockBroadcastInMeter.Mark(1)
-
- // Now only direct block injection is allowed, drop the header injection
- // here silently if we receive.
- if f.light {
- continue
- }
f.enqueue(op.origin, nil, op.block)
case <-fetchTimer.C:
@@ -514,7 +505,7 @@ func (f *BlockFetcher) loop() {
// Split the batch of headers into unknown ones (to return to the caller),
// known incomplete ones (requiring body retrievals) and completed blocks.
- unknown, incomplete, complete, lightHeaders := []*types.Header{}, []*blockAnnounce{}, []*types.Block{}, []*blockAnnounce{}
+ unknown, incomplete, complete := []*types.Header{}, []*blockAnnounce{}, []*types.Block{}
for _, header := range task.headers {
hash := header.Hash()
@@ -527,16 +518,6 @@ func (f *BlockFetcher) loop() {
f.forgetHash(hash)
continue
}
- // Collect all headers only if we are running in light
- // mode and the headers are not imported by other means.
- if f.light {
- if f.getHeader(hash) == nil {
- announce.header = header
- lightHeaders = append(lightHeaders, announce)
- }
- f.forgetHash(hash)
- continue
- }
// Only keep if not imported by other means
if f.getBlock(hash) == nil {
announce.header = header
@@ -581,10 +562,6 @@ func (f *BlockFetcher) loop() {
f.rescheduleComplete(completeTimer)
}
}
- // Schedule the header for light fetcher import
- for _, announce := range lightHeaders {
- f.enqueue(announce.origin, announce.header, nil)
- }
// Schedule the header-only blocks for import
for _, block := range complete {
if announce := f.completing[block.Hash()]; announce != nil {
@@ -670,12 +647,6 @@ func (f *BlockFetcher) rescheduleFetch(fetch *time.Timer) {
if len(f.announcedS) == 0 {
return
}
- // Schedule announcement retrieval quickly for light mode
- // since server won't send any headers to client.
- if f.light {
- fetch.Reset(lightTimeout)
- return
- }
// Otherwise find the earliest expiring announcement
earliest := time.Now()
for _, announce := range f.announced {
diff --git a/eth/fetcher/block_fetcher_test.go b/eth/fetcher/block_fetcher_test.go
index ee25573740e..f98f247a326 100644
--- a/eth/fetcher/block_fetcher_test.go
+++ b/eth/fetcher/block_fetcher_test.go
@@ -95,14 +95,14 @@ type fetcherTester struct {
}
// newTester creates a new fetcher test mocker.
-func newTester(light bool) *fetcherTester {
+func newTester() *fetcherTester {
tester := &fetcherTester{
hashes: []common.Hash{genesis.Hash()},
headers: map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
blocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
drops: make(map[string]bool),
}
- tester.fetcher = NewBlockFetcher(light, tester.getHeader, tester.getBlock, tester.verifyHeader, tester.broadcastBlock, tester.chainHeight, tester.insertHeaders, tester.insertChain, tester.dropPeer)
+ tester.fetcher = NewBlockFetcher(tester.getHeader, tester.getBlock, tester.verifyHeader, tester.broadcastBlock, tester.chainHeight, tester.insertHeaders, tester.insertChain, tester.dropPeer)
tester.fetcher.Start()
return tester
@@ -138,9 +138,6 @@ func (f *fetcherTester) chainHeight() uint64 {
f.lock.RLock()
defer f.lock.RUnlock()
- if f.fetcher.light {
- return f.headers[f.hashes[len(f.hashes)-1]].Number.Uint64()
- }
return f.blocks[f.hashes[len(f.hashes)-1]].NumberU64()
}
@@ -322,14 +319,14 @@ func verifyChainHeight(t *testing.T, fetcher *fetcherTester, height uint64) {
// Tests that a fetcher accepts block announcements and initiates retrievals for
// them, successfully importing into the local chain.
-func TestSequentialAnnouncements(t *testing.T) { testSequentialAnnouncements(t, false) }
+func TestSequentialAnnouncements(t *testing.T) { testSequentialAnnouncements(t) }
-func testSequentialAnnouncements(t *testing.T, light bool) {
+func testSequentialAnnouncements(t *testing.T) {
// Create a chain of blocks to import
targetBlocks := 4 * hashLimit
hashes, blocks := makeChain(targetBlocks, 0, genesis)
- tester := newTester(light)
+ tester := newTester()
defer tester.fetcher.Stop()
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
@@ -337,17 +334,10 @@ func testSequentialAnnouncements(t *testing.T, light bool) {
// Iteratively announce blocks until all are imported
imported := make(chan interface{})
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
- if light {
- if header == nil {
- t.Fatalf("Fetcher try to import empty header")
- }
- imported <- header
- } else {
- if block == nil {
- t.Fatalf("Fetcher try to import empty block")
- }
- imported <- block
+ if block == nil {
+ t.Fatalf("Fetcher try to import empty block")
}
+ imported <- block
}
for i := len(hashes) - 2; i >= 0; i-- {
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
@@ -359,15 +349,15 @@ func testSequentialAnnouncements(t *testing.T, light bool) {
// Tests that if blocks are announced by multiple peers (or even the same buggy
// peer), they will only get downloaded at most once.
-func TestConcurrentAnnouncements(t *testing.T) { testConcurrentAnnouncements(t, false) }
+func TestConcurrentAnnouncements(t *testing.T) { testConcurrentAnnouncements(t) }
-func testConcurrentAnnouncements(t *testing.T, light bool) {
+func testConcurrentAnnouncements(t *testing.T) {
// Create a chain of blocks to import
targetBlocks := 4 * hashLimit
hashes, blocks := makeChain(targetBlocks, 0, genesis)
// Assemble a tester with a built in counter for the requests
- tester := newTester(light)
+ tester := newTester()
defer tester.fetcher.Stop()
firstHeaderFetcher := tester.makeHeaderFetcher("first", blocks, -gatherSlack)
firstBodyFetcher := tester.makeBodyFetcher("first", blocks, 0)
@@ -386,17 +376,10 @@ func testConcurrentAnnouncements(t *testing.T, light bool) {
// Iteratively announce blocks until all are imported
imported := make(chan interface{}, 1)
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
- if light {
- if header == nil {
- t.Fatalf("Fetcher try to import empty header")
- }
- imported <- header
- } else {
- if block == nil {
- t.Fatalf("Fetcher try to import empty block")
- }
- imported <- block
+ if block == nil {
+ t.Fatalf("Fetcher try to import empty block")
}
+ imported <- block
}
for i := len(hashes) - 2; i >= 0; i-- {
@@ -416,14 +399,14 @@ func testConcurrentAnnouncements(t *testing.T, light bool) {
// Tests that announcements arriving while a previous is being fetched still
// results in a valid import.
-func TestOverlappingAnnouncements(t *testing.T) { testOverlappingAnnouncements(t, false) }
+func TestOverlappingAnnouncements(t *testing.T) { testOverlappingAnnouncements(t) }
-func testOverlappingAnnouncements(t *testing.T, light bool) {
+func testOverlappingAnnouncements(t *testing.T) {
// Create a chain of blocks to import
targetBlocks := 4 * hashLimit
hashes, blocks := makeChain(targetBlocks, 0, genesis)
- tester := newTester(light)
+ tester := newTester()
defer tester.fetcher.Stop()
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
@@ -435,17 +418,10 @@ func testOverlappingAnnouncements(t *testing.T, light bool) {
imported <- nil
}
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
- if light {
- if header == nil {
- t.Fatalf("Fetcher try to import empty header")
- }
- imported <- header
- } else {
- if block == nil {
- t.Fatalf("Fetcher try to import empty block")
- }
- imported <- block
+ if block == nil {
+ t.Fatalf("Fetcher try to import empty block")
}
+ imported <- block
}
for i := len(hashes) - 2; i >= 0; i-- {
@@ -462,14 +438,14 @@ func testOverlappingAnnouncements(t *testing.T, light bool) {
}
// Tests that announces already being retrieved will not be duplicated.
-func TestPendingDeduplication64(t *testing.T) { testPendingDeduplication(t, false) }
+func TestPendingDeduplication64(t *testing.T) { testPendingDeduplication(t) }
-func testPendingDeduplication(t *testing.T, light bool) {
+func testPendingDeduplication(t *testing.T) {
// Create a hash and corresponding block
hashes, blocks := makeChain(1, 0, genesis)
// Assemble a tester with a built in counter and delayed fetcher
- tester := newTester(light)
+ tester := newTester()
defer tester.fetcher.Stop()
headerFetcher := tester.makeHeaderFetcher("repeater", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("repeater", blocks, 0)
@@ -489,11 +465,6 @@ func testPendingDeduplication(t *testing.T, light bool) {
checkNonExist := func() bool {
return tester.getBlock(hashes[0]) == nil
}
- if light {
- checkNonExist = func() bool {
- return tester.getHeader(hashes[0]) == nil
- }
- }
// Announce the same block many times until it's fetched (wait for any pending ops)
for checkNonExist() {
tester.fetcher.Notify("repeater", hashes[0], 1, time.Now().Add(-arriveTimeout), headerWrapper, bodyFetcher)
@@ -510,15 +481,15 @@ func testPendingDeduplication(t *testing.T, light bool) {
// Tests that announcements retrieved in a random order are cached and eventually
// imported when all the gaps are filled in.
-func TestRandomArrivalImport(t *testing.T) { testRandomArrivalImport(t, false) }
+func TestRandomArrivalImport(t *testing.T) { testRandomArrivalImport(t) }
-func testRandomArrivalImport(t *testing.T, light bool) {
+func testRandomArrivalImport(t *testing.T) {
// Create a chain of blocks to import, and choose one to delay
targetBlocks := maxQueueDist
hashes, blocks := makeChain(targetBlocks, 0, genesis)
skip := targetBlocks / 2
- tester := newTester(light)
+ tester := newTester()
defer tester.fetcher.Stop()
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
@@ -526,17 +497,10 @@ func testRandomArrivalImport(t *testing.T, light bool) {
// Iteratively announce blocks, skipping one entry
imported := make(chan interface{}, len(hashes)-1)
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
- if light {
- if header == nil {
- t.Fatalf("Fetcher try to import empty header")
- }
- imported <- header
- } else {
- if block == nil {
- t.Fatalf("Fetcher try to import empty block")
- }
- imported <- block
+ if block == nil {
+ t.Fatalf("Fetcher try to import empty block")
}
+ imported <- block
}
for i := len(hashes) - 1; i >= 0; i-- {
if i != skip {
@@ -558,7 +522,7 @@ func TestQueueGapFill(t *testing.T) {
hashes, blocks := makeChain(targetBlocks, 0, genesis)
skip := targetBlocks / 2
- tester := newTester(false)
+ tester := newTester()
defer tester.fetcher.Stop()
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
@@ -586,7 +550,7 @@ func TestImportDeduplication(t *testing.T) {
hashes, blocks := makeChain(2, 0, genesis)
// Create the tester and wrap the importer with a counter
- tester := newTester(false)
+ tester := newTester()
defer tester.fetcher.Stop()
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
@@ -629,7 +593,7 @@ func TestDistantPropagationDiscarding(t *testing.T) {
low, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1
// Create a tester and simulate a head block being the middle of the above chain
- tester := newTester(false)
+ tester := newTester()
defer tester.fetcher.Stop()
tester.lock.Lock()
@@ -654,9 +618,9 @@ func TestDistantPropagationDiscarding(t *testing.T) {
// Tests that announcements with numbers much lower or higher than out current
// head get discarded to prevent wasting resources on useless blocks from faulty
// peers.
-func TestDistantAnnouncementDiscarding(t *testing.T) { testDistantAnnouncementDiscarding(t, false) }
+func TestDistantAnnouncementDiscarding(t *testing.T) { testDistantAnnouncementDiscarding(t) }
-func testDistantAnnouncementDiscarding(t *testing.T, light bool) {
+func testDistantAnnouncementDiscarding(t *testing.T) {
// Create a long chain to import and define the discard boundaries
hashes, blocks := makeChain(3*maxQueueDist, 0, genesis)
head := hashes[len(hashes)/2]
@@ -664,7 +628,7 @@ func testDistantAnnouncementDiscarding(t *testing.T, light bool) {
low, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1
// Create a tester and simulate a head block being the middle of the above chain
- tester := newTester(light)
+ tester := newTester()
defer tester.fetcher.Stop()
tester.lock.Lock()
@@ -697,30 +661,23 @@ func testDistantAnnouncementDiscarding(t *testing.T, light bool) {
// Tests that peers announcing blocks with invalid numbers (i.e. not matching
// the headers provided afterwards) get dropped as malicious.
-func TestInvalidNumberAnnouncement(t *testing.T) { testInvalidNumberAnnouncement(t, false) }
+func TestInvalidNumberAnnouncement(t *testing.T) { testInvalidNumberAnnouncement(t) }
-func testInvalidNumberAnnouncement(t *testing.T, light bool) {
+func testInvalidNumberAnnouncement(t *testing.T) {
// Create a single block to import and check numbers against
hashes, blocks := makeChain(1, 0, genesis)
- tester := newTester(light)
+ tester := newTester()
defer tester.fetcher.Stop()
badHeaderFetcher := tester.makeHeaderFetcher("bad", blocks, -gatherSlack)
badBodyFetcher := tester.makeBodyFetcher("bad", blocks, 0)
imported := make(chan interface{})
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
- if light {
- if header == nil {
- t.Fatalf("Fetcher try to import empty header")
- }
- imported <- header
- } else {
- if block == nil {
- t.Fatalf("Fetcher try to import empty block")
- }
- imported <- block
+ if block == nil {
+ t.Fatalf("Fetcher try to import empty block")
}
+ imported <- block
}
// Announce a block with a bad number, check for immediate drop
tester.fetcher.Notify("bad", hashes[0], 2, time.Now().Add(-arriveTimeout), badHeaderFetcher, badBodyFetcher)
@@ -756,7 +713,7 @@ func TestEmptyBlockShortCircuit(t *testing.T) {
// Create a chain of blocks to import
hashes, blocks := makeChain(32, 0, genesis)
- tester := newTester(false)
+ tester := newTester()
defer tester.fetcher.Stop()
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
@@ -796,7 +753,7 @@ func TestEmptyBlockShortCircuit(t *testing.T) {
// the fetcher remains operational.
func TestHashMemoryExhaustionAttack(t *testing.T) {
// Create a tester with instrumented import hooks
- tester := newTester(false)
+ tester := newTester()
defer tester.fetcher.Stop()
imported, announces := make(chan interface{}), int32(0)
@@ -844,7 +801,7 @@ func TestHashMemoryExhaustionAttack(t *testing.T) {
// system memory.
func TestBlockMemoryExhaustionAttack(t *testing.T) {
// Create a tester with instrumented import hooks
- tester := newTester(false)
+ tester := newTester()
defer tester.fetcher.Stop()
imported, enqueued := make(chan interface{}, 1), int32(0)
diff --git a/eth/filters/api.go b/eth/filters/api.go
index 02e4ff86b24..842569d69e1 100644
--- a/eth/filters/api.go
+++ b/eth/filters/api.go
@@ -30,7 +30,6 @@ import (
"github.com/ledgerwatch/turbo-geth/common/hexutil"
"github.com/ledgerwatch/turbo-geth/core/types"
"github.com/ledgerwatch/turbo-geth/ethdb"
- "github.com/ledgerwatch/turbo-geth/event"
"github.com/ledgerwatch/turbo-geth/rpc"
)
@@ -49,7 +48,6 @@ type filter struct {
// information related to the Ethereum protocol such als blocks, transactions and logs.
type PublicFilterAPI struct {
backend Backend
- mux *event.TypeMux
quit chan struct{}
chainDb ethdb.Database
events *EventSystem
@@ -59,12 +57,12 @@ type PublicFilterAPI struct {
}
// NewPublicFilterAPI returns a new PublicFilterAPI instance.
-func NewPublicFilterAPI(backend Backend, lightMode bool, timeout time.Duration) *PublicFilterAPI {
+func NewPublicFilterAPI(backend Backend, timeout time.Duration) *PublicFilterAPI {
api := &PublicFilterAPI{
backend: backend,
quit: make(chan struct{}, 1),
chainDb: backend.ChainDb(),
- events: NewEventSystem(backend, lightMode),
+ events: NewEventSystem(backend),
filters: make(map[rpc.ID]*filter),
timeout: timeout,
}
diff --git a/eth/filters/bench_test.go b/eth/filters/bench_test.go
index 2e7c068e29b..3103a01b92b 100644
--- a/eth/filters/bench_test.go
+++ b/eth/filters/bench_test.go
@@ -112,9 +112,6 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) {
compSize += uint64(len(comp))
rawdb.WriteBloomBits(db, uint(i), sectionIdx, sectionHead, comp)
}
- //if sectionIdx%50 == 0 {
- // b.Log(" section", sectionIdx, "/", cnt)
- //}
}
d := time.Since(start)
diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go
index ddaa2b88c91..993c1f4dfb4 100644
--- a/eth/filters/filter_system.go
+++ b/eth/filters/filter_system.go
@@ -19,7 +19,6 @@
package filters
import (
- "context"
"fmt"
"sync"
"time"
@@ -27,7 +26,6 @@ import (
ethereum "github.com/ledgerwatch/turbo-geth"
"github.com/ledgerwatch/turbo-geth/common"
"github.com/ledgerwatch/turbo-geth/core"
- "github.com/ledgerwatch/turbo-geth/core/rawdb"
"github.com/ledgerwatch/turbo-geth/core/types"
"github.com/ledgerwatch/turbo-geth/event"
"github.com/ledgerwatch/turbo-geth/log"
@@ -83,16 +81,13 @@ type subscription struct {
// EventSystem creates subscriptions, processes events and broadcasts them to the
// subscription which match the subscription criteria.
type EventSystem struct {
- backend Backend
- lightMode bool
- lastHead *types.Header
+ backend Backend
// Subscriptions
- txsSub event.Subscription // Subscription for new transaction event
- logsSub event.Subscription // Subscription for new log event
- rmLogsSub event.Subscription // Subscription for removed log event
- pendingLogsSub event.Subscription // Subscription for pending log event
- chainSub event.Subscription // Subscription for new chain event
+ txsSub event.Subscription // Subscription for new transaction event
+ logsSub event.Subscription // Subscription for new log event
+ rmLogsSub event.Subscription // Subscription for removed log event
+ chainSub event.Subscription // Subscription for new chain event
// Channels
install chan *subscription // install filter for event notification
@@ -110,10 +105,9 @@ type EventSystem struct {
//
// The returned manager has a loop that needs to be stopped with the Stop function
// or by stopping the given mux.
-func NewEventSystem(backend Backend, lightMode bool) *EventSystem {
+func NewEventSystem(backend Backend) *EventSystem {
m := &EventSystem{
backend: backend,
- lightMode: lightMode,
install: make(chan *subscription),
uninstall: make(chan *subscription),
txsCh: make(chan core.NewTxsEvent, txChanSize),
@@ -356,88 +350,6 @@ func (es *EventSystem) handleChainEvent(filters filterIndex, ev core.ChainEvent)
for _, f := range filters[BlocksSubscription] {
f.headers <- ev.Block.Header()
}
- if es.lightMode && len(filters[LogsSubscription]) > 0 {
- es.lightFilterNewHead(ev.Block.Header(), func(header *types.Header, remove bool) {
- for _, f := range filters[LogsSubscription] {
- if matchedLogs := es.lightFilterLogs(header, f.logsCrit.Addresses, f.logsCrit.Topics, remove); len(matchedLogs) > 0 {
- f.logs <- matchedLogs
- }
- }
- })
- }
-}
-
-func (es *EventSystem) lightFilterNewHead(newHeader *types.Header, callBack func(*types.Header, bool)) {
- oldh := es.lastHead
- es.lastHead = newHeader
- if oldh == nil {
- return
- }
- newh := newHeader
- // find common ancestor, create list of rolled back and new block hashes
- var oldHeaders, newHeaders []*types.Header
- for oldh.Hash() != newh.Hash() {
- if oldh.Number.Uint64() >= newh.Number.Uint64() {
- oldHeaders = append(oldHeaders, oldh)
- oldh = rawdb.ReadHeader(es.backend.ChainDb(), oldh.ParentHash, oldh.Number.Uint64()-1)
- }
- if oldh.Number.Uint64() < newh.Number.Uint64() {
- newHeaders = append(newHeaders, newh)
- newh = rawdb.ReadHeader(es.backend.ChainDb(), newh.ParentHash, newh.Number.Uint64()-1)
- if newh == nil {
- // happens when CHT syncing, nothing to do
- newh = oldh
- }
- }
- }
- // roll back old blocks
- for _, h := range oldHeaders {
- callBack(h, true)
- }
- // check new blocks (array is in reverse order)
- for i := len(newHeaders) - 1; i >= 0; i-- {
- callBack(newHeaders[i], false)
- }
-}
-
-// filter logs of a single header in light client mode
-func (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common.Address, topics [][]common.Hash, remove bool) []*types.Log {
- if bloomFilter(header.Bloom, addresses, topics) {
- // Get the logs of the block
- ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
- defer cancel()
- logsList, err := es.backend.GetLogs(ctx, header.Hash())
- if err != nil {
- return nil
- }
- var unfiltered []*types.Log
- for _, logs := range logsList {
- for _, log := range logs {
- logcopy := *log
- logcopy.Removed = remove
- unfiltered = append(unfiltered, &logcopy)
- }
- }
- logs := filterLogs(unfiltered, nil, nil, addresses, topics)
- if len(logs) > 0 && logs[0].TxHash == (common.Hash{}) {
- // We have matching but non-derived logs
- receipts, err := es.backend.GetReceipts(ctx, header.Hash())
- if err != nil {
- return nil
- }
- unfiltered = unfiltered[:0]
- for _, receipt := range receipts {
- for _, log := range receipt.Logs {
- logcopy := *log
- logcopy.Removed = remove
- unfiltered = append(unfiltered, &logcopy)
- }
- }
- logs = filterLogs(unfiltered, nil, nil, addresses, topics)
- }
- return logs
- }
- return nil
}
// eventLoop (un)installs filters and processes mux events.
diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go
index 60afa49f9fa..a92663392d5 100644
--- a/eth/filters/filter_system_test.go
+++ b/eth/filters/filter_system_test.go
@@ -177,7 +177,7 @@ func TestBlockSubscription(t *testing.T) {
defer db.Close()
var (
backend = &testBackend{db: db}
- api = NewPublicFilterAPI(backend, false, deadline)
+ api = NewPublicFilterAPI(backend, deadline)
genesis = (&core.Genesis{Config: params.TestChainConfig}).MustCommit(db)
chain, _, _ = core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {}, false /* intermediateHashes */)
chainEvents = []core.ChainEvent{}
@@ -231,7 +231,7 @@ func TestPendingTxFilter(t *testing.T) {
var (
backend = &testBackend{db: db}
- api = NewPublicFilterAPI(backend, false, deadline)
+ api = NewPublicFilterAPI(backend, deadline)
transactions = []*types.Transaction{
types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(uint256.Int), 0, new(uint256.Int), nil),
@@ -287,7 +287,7 @@ func TestLogFilterCreation(t *testing.T) {
defer db.Close()
var (
backend = &testBackend{db: db}
- api = NewPublicFilterAPI(backend, false, deadline)
+ api = NewPublicFilterAPI(backend, deadline)
testCases = []struct {
crit FilterCriteria
@@ -331,7 +331,7 @@ func TestInvalidLogFilterCreation(t *testing.T) {
defer db.Close()
var (
backend = &testBackend{db: db}
- api = NewPublicFilterAPI(backend, false, deadline)
+ api = NewPublicFilterAPI(backend, deadline)
)
// different situations where log filter creation should fail.
@@ -354,7 +354,7 @@ func TestInvalidGetLogsRequest(t *testing.T) {
defer db.Close()
var (
backend = &testBackend{db: db}
- api = NewPublicFilterAPI(backend, false, deadline)
+ api = NewPublicFilterAPI(backend, deadline)
blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
)
@@ -381,7 +381,7 @@ func TestLogFilter(t *testing.T) {
defer db.Close()
var (
backend = &testBackend{db: db}
- api = NewPublicFilterAPI(backend, false, deadline)
+ api = NewPublicFilterAPI(backend, deadline)
firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111")
secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222")
@@ -496,7 +496,7 @@ func TestPendingLogsSubscription(t *testing.T) {
defer db.Close()
var (
backend = &testBackend{db: db}
- api = NewPublicFilterAPI(backend, false, deadline)
+ api = NewPublicFilterAPI(backend, deadline)
firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111")
secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222")
@@ -632,7 +632,7 @@ func TestPendingTxFilterDeadlock(t *testing.T) {
var (
db = ethdb.NewMemoryDatabase()
backend = &testBackend{db: db}
- api = NewPublicFilterAPI(backend, false, timeout)
+ api = NewPublicFilterAPI(backend, timeout)
done = make(chan struct{})
)
diff --git a/eth/firehose.go b/eth/firehose.go
deleted file mode 100644
index b7b3de818d6..00000000000
--- a/eth/firehose.go
+++ /dev/null
@@ -1,140 +0,0 @@
-package eth
-
-import (
- "math/big"
-
- "github.com/ledgerwatch/turbo-geth/common"
- "github.com/ledgerwatch/turbo-geth/core/types/accounts"
- "github.com/ledgerwatch/turbo-geth/p2p"
- "github.com/ledgerwatch/turbo-geth/turbo/trie"
-)
-
-// FirehoseName is the official short name of the protocol used during capability negotiation.
-var FirehoseName = "frh" // Parity only supports 3 letter capabilities
-
-// FirehoseVersions are the supported versions of the Firehose protocol.
-var FirehoseVersions = []uint{1}
-
-// FirehoseLengths are the number of implemented message corresponding to different protocol versions.
-var FirehoseLengths = []uint64{12}
-
-// FirehoseMaxMsgSize is the maximum cap on the size of a message.
-const FirehoseMaxMsgSize = 10 * 1024 * 1024
-
-// MaxLeavesPerPrefix is the maximum number of leaves allowed per prefix.
-const MaxLeavesPerPrefix = 4096
-
-// Firehose protocol message codes
-const (
- GetStateRangesCode = 0x00
- StateRangesCode = 0x01
- GetStorageRangesCode = 0x02
- StorageRangesCode = 0x03
- GetStateNodesCode = 0x04
- StateNodesCode = 0x05
- GetStorageNodesCode = 0x06
- StorageNodesCode = 0x07
- GetBytecodeCode = 0x08
- BytecodeCode = 0x09
- GetStorageSizesCode = 0x0a
- StorageSizesCode = 0x0b
-)
-
-// Status of Firehose results.
-type Status uint
-
-const (
- // OK means success.
- OK Status = 0
- // NoData for the requested root; available blocks should be returned.
- NoData Status = 1
- // TooManyLeaves means that there're more than 4096 leaves matching the prefix.
- TooManyLeaves Status = 2
-)
-
-type firehosePeer struct {
- *p2p.Peer
- rw p2p.MsgReadWriter
-}
-
-type accountLeaf struct {
- Key common.Hash
- Val *accounts.Account
-}
-
-type firehoseAccountRange struct {
- Status Status
- Leaves []accountLeaf
-}
-
-type getStateRangesOrNodes struct {
- ID uint64
- Block common.Hash
- Prefixes []trie.Keybytes
-}
-
-type stateRangesMsg struct {
- ID uint64
- Entries []firehoseAccountRange
- AvailableBlocks []common.Hash
-}
-
-type storageReqForOneAccount struct {
- Account []byte // account address or hash thereof
- Prefixes []trie.Keybytes
-}
-
-type getStorageRangesOrNodes struct {
- ID uint64
- Block common.Hash
- Requests []storageReqForOneAccount
-}
-
-type storageLeaf struct {
- Key common.Hash
- Val big.Int
-}
-
-type storageRange struct {
- Status Status
- Leaves []storageLeaf
-}
-
-type storageRangesMsg struct {
- ID uint64
- Entries [][]storageRange
- AvailableBlocks []common.Hash
-}
-
-type stateNodesMsg struct {
- ID uint64
- Nodes [][]byte
- AvailableBlocks []common.Hash
-}
-
-type storageNodesMsg struct {
- ID uint64
- Nodes [][][]byte // indexing matches getStorageRangesOrNodes request: [#account/contract][#prefix][RLP]
- AvailableBlocks []common.Hash
-}
-
-type bytecodeRef struct {
- Account []byte // account address or hash thereof
- CodeHash common.Hash
-}
-
-type getBytecodeMsg struct {
- ID uint64
- Ref []bytecodeRef
-}
-
-type bytecodeMsg struct {
- ID uint64
- Code [][]byte
-}
-
-// SendByteCode sends a BytecodeCode message.
-func (p *firehosePeer) SendByteCode(id uint64, data [][]byte) error {
- msg := bytecodeMsg{ID: id, Code: data}
- return p2p.Send(p.rw, BytecodeCode, msg)
-}
diff --git a/eth/handler.go b/eth/handler.go
index 563820e50b3..b6457834659 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -95,8 +95,6 @@ type handler struct {
networkID uint64
forkFilter forkid.Filter // Fork ID filter, constant across the lifetime of the node
- fastSync uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks)
- snapSync uint32 // Flag whether fast sync should operate on top of the snap protocol
acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing)
checkpointNumber uint64 // Block number for the sync progress validator to cross reference
@@ -168,7 +166,7 @@ func newHandler(config *handlerConfig) (*handler, error) { //nolint:unparam
if err != nil {
log.Error("Get storage mode", "err", err)
}
- h.downloader = downloader.New(h.checkpointNumber, config.Database, h.eventMux, config.Chain.Config(), config.Mining, config.Chain, nil, h.removePeer, sm)
+ h.downloader = downloader.New(h.checkpointNumber, config.Database, h.eventMux, config.Chain.Config(), config.Mining, config.Chain, h.removePeer, sm)
h.downloader.SetTmpDir(h.tmpdir)
h.downloader.SetBatchSize(h.cacheSize, h.batchSize)
@@ -182,7 +180,7 @@ func newHandler(config *handlerConfig) (*handler, error) { //nolint:unparam
}
return 0, err
}
- h.blockFetcher = fetcher.NewBlockFetcher(false, nil, h.chain.GetBlockByHash, validator, h.BroadcastBlock, heighter, nil, inserter, h.removePeer)
+ h.blockFetcher = fetcher.NewBlockFetcher(nil, h.chain.GetBlockByHash, validator, h.BroadcastBlock, heighter, nil, inserter, h.removePeer)
fetchTx := func(peer string, hashes []common.Hash) error {
p := h.peers.peer(peer)
diff --git a/eth/handler_eth.go b/eth/handler_eth.go
index 7456f0b235e..164ab4d2da4 100644
--- a/eth/handler_eth.go
+++ b/eth/handler_eth.go
@@ -111,14 +111,6 @@ func (h *ethHandler) handleHeaders(peer *eth.Peer, headers []*types.Header) erro
// Stop the timer either way, decide later to drop or not
p.syncDrop.Stop()
p.syncDrop = nil
-
- // If we're doing a fast (or snap) sync, we must enforce the checkpoint block to avoid
- // eclipse attacks. Unsynced nodes are welcome to connect after we're done
- // joining the network
- if atomic.LoadUint32(&h.fastSync) == 1 {
- peer.Log().Warn("Dropping unsynced node during sync", "addr", peer.RemoteAddr(), "type", peer.Name())
- return errors.New("unsynced node cannot serve sync")
- }
}
// Filter out any explicitly requested headers, deliver the rest to the downloader
filter := len(headers) == 1
diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go
index 44565ad0e77..37eac68b8d9 100644
--- a/eth/handler_eth_test.go
+++ b/eth/handler_eth_test.go
@@ -19,7 +19,6 @@ package eth
import (
"fmt"
"math/big"
- "math/rand"
"sync/atomic"
"testing"
"time"
@@ -407,206 +406,6 @@ func testSendTransactions(t *testing.T, protocol uint) {
}
}
-// Tests that transactions get propagated to all attached peers, either via direct
-// broadcasts or via announcements/retrievals.
-func TestTransactionPropagation64(t *testing.T) { testTransactionPropagation(t, 64) }
-func TestTransactionPropagation65(t *testing.T) { testTransactionPropagation(t, 65) }
-
-func testTransactionPropagation(t *testing.T, protocol uint) {
- t.Skip("deadlock")
- // Create a source handler to send transactions from and a number of sinks
- // to receive them. We need multiple sinks since a one-to-one peering would
- // broadcast all transactions without announcement.
- source := newTestHandler()
- defer source.close()
-
- sinks := make([]*testHandler, 10)
- for i := 0; i < len(sinks); i++ {
- sinks[i] = newTestHandler()
- defer sinks[i].close()
-
- sinks[i].handler.acceptTxs = 1 // mark synced to accept transactions
- }
- // Interconnect all the sink handlers with the source handler
- for i, sink := range sinks {
- sink := sink // Closure for gorotuine below
-
- sourcePipe, sinkPipe := p2p.MsgPipe()
- defer sourcePipe.Close()
- defer sinkPipe.Close()
-
- sourcePeer := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{byte(i)}, "", nil), sourcePipe, source.txpool)
- sinkPeer := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{0}, "", nil), sinkPipe, sink.txpool)
- defer sourcePeer.Close()
- defer sinkPeer.Close()
-
- //nolint:errcheck
- go source.handler.runEthPeer(sourcePeer, func(peer *eth.Peer) error {
- return eth.Handle((*ethHandler)(source.handler), peer)
- })
- //nolint:errcheck
- go sink.handler.runEthPeer(sinkPeer, func(peer *eth.Peer) error {
- return eth.Handle((*ethHandler)(sink.handler), peer)
- })
- }
- // Subscribe to all the transaction pools
- txChs := make([]chan core.NewTxsEvent, len(sinks))
- for i := 0; i < len(sinks); i++ {
- txChs[i] = make(chan core.NewTxsEvent, 1024)
-
- sub := sinks[i].txpool.SubscribeNewTxsEvent(txChs[i])
- defer sub.Unsubscribe()
- }
- // Fill the source pool with transactions and wait for them at the sinks
- txs := make([]*types.Transaction, 1024)
- for nonce := range txs {
- tx := types.NewTransaction(uint64(nonce), common.Address{}, uint256.NewInt(), 100000, uint256.NewInt(), nil)
- tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)
-
- txs[nonce] = tx
- }
- source.txpool.AddRemotes(txs)
-
- // Iterate through all the sinks and ensure they all got the transactions
- for i := range sinks {
- for arrived := 0; arrived < len(txs); {
- select {
- case event := <-txChs[i]:
- arrived += len(event.Txs)
- case <-time.NewTimer(time.Second).C:
- t.Errorf("sink %d: transaction propagation timed out: have %d, want %d", i, arrived, len(txs))
- }
- }
- }
-}
-
-// Tests that post eth protocol handshake, clients perform a mutual checkpoint
-// challenge to validate each other's chains. Hash mismatches, or missing ones
-// during a fast sync should lead to the peer getting dropped.
-func TestCheckpointChallenge(t *testing.T) {
- t.Skip("Not relevant for Turbo-Geth")
- tests := []struct {
- syncmode downloader.SyncMode
- checkpoint bool
- timeout bool
- empty bool
- match bool
- drop bool
- }{
- // If checkpointing is not enabled locally, don't challenge and don't drop
- {downloader.FullSync, false, false, false, false, false},
- {downloader.FastSync, false, false, false, false, false},
-
- // If checkpointing is enabled locally and remote response is empty, only drop during fast sync
- {downloader.FullSync, true, false, true, false, false},
- {downloader.FastSync, true, false, true, false, true}, // Special case, fast sync, unsynced peer
-
- // If checkpointing is enabled locally and remote response mismatches, always drop
- {downloader.FullSync, true, false, false, false, true},
- {downloader.FastSync, true, false, false, false, true},
-
- // If checkpointing is enabled locally and remote response matches, never drop
- {downloader.FullSync, true, false, false, true, false},
- {downloader.FastSync, true, false, false, true, false},
-
- // If checkpointing is enabled locally and remote times out, always drop
- {downloader.FullSync, true, true, false, true, true},
- {downloader.FastSync, true, true, false, true, true},
- }
- for _, tt := range tests {
- t.Run(fmt.Sprintf("sync %v checkpoint %v timeout %v empty %v match %v", tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match), func(t *testing.T) {
- testCheckpointChallenge(t, tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match, tt.drop) //nolint:scopelint
- })
- }
-}
-
-func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpoint bool, timeout bool, empty bool, match bool, drop bool) {
- // Reduce the checkpoint handshake challenge timeout
- defer func(old time.Duration) { syncChallengeTimeout = old }(syncChallengeTimeout)
- syncChallengeTimeout = 250 * time.Millisecond
-
- // Create a test handler and inject a CHT into it. The injection is a bit
- // ugly, but it beats creating everything manually just to avoid reaching
- // into the internals a bit.
- handler := newTestHandler()
- defer handler.close()
-
- if syncmode == downloader.FastSync {
- atomic.StoreUint32(&handler.handler.fastSync, 1)
- } else {
- atomic.StoreUint32(&handler.handler.fastSync, 0)
- }
- var response *types.Header
- if checkpoint {
- number := (uint64(rand.Intn(500))+1)*params.CHTFrequency - 1
- response = &types.Header{Number: big.NewInt(int64(number)), Extra: []byte("valid")}
-
- handler.handler.checkpointNumber = number
- handler.handler.checkpointHash = response.Hash()
- }
- // Create a challenger peer and a challenged one
- p2pLocal, p2pRemote := p2p.MsgPipe()
- defer p2pLocal.Close()
- defer p2pRemote.Close()
-
- local := eth.NewPeer(eth.ETH64, p2p.NewPeer(enode.ID{1}, "", nil), p2pLocal, handler.txpool)
- remote := eth.NewPeer(eth.ETH64, p2p.NewPeer(enode.ID{2}, "", nil), p2pRemote, handler.txpool)
- defer local.Close()
- defer remote.Close()
-
- //nolint:errcheck
- go handler.handler.runEthPeer(local, func(peer *eth.Peer) error {
- return eth.Handle((*ethHandler)(handler.handler), peer)
- })
- // Run the handshake locally to avoid spinning up a remote handler
- var (
- genesis = handler.chain.Genesis()
- head = handler.headBlock
- )
- td, err := rawdb.ReadTd(handler.db, head.Hash(), head.NumberU64())
- if err != nil {
- t.Fatal(err)
- }
- if err := remote.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewID(handler.chain.Config(), genesis.Hash(), head.NumberU64()), forkid.NewFilter(handler.chain.Config(), genesis.Hash(), func() uint64 { return head.NumberU64() })); err != nil {
- t.Fatalf("failed to run protocol handshake: %v", err)
- }
- // Connect a new peer and check that we receive the checkpoint challenge
- if checkpoint {
- if err := remote.ExpectRequestHeadersByNumber(response.Number.Uint64(), 1, 0, false); err != nil {
- t.Fatalf("challenge mismatch: %v", err)
- }
- // Create a block to reply to the challenge if no timeout is simulated
- if !timeout {
- if empty {
- if err := remote.SendBlockHeaders([]*types.Header{}); err != nil {
- t.Fatalf("failed to answer challenge: %v", err)
- }
- } else if match {
- if err := remote.SendBlockHeaders([]*types.Header{response}); err != nil {
- t.Fatalf("failed to answer challenge: %v", err)
- }
- } else {
- if err := remote.SendBlockHeaders([]*types.Header{{Number: response.Number}}); err != nil {
- t.Fatalf("failed to answer challenge: %v", err)
- }
- }
- }
- }
- // Wait until the test timeout passes to ensure proper cleanup
- time.Sleep(syncChallengeTimeout + 300*time.Millisecond)
-
- // Verify that the remote peer is maintained or dropped
- if drop {
- if peers := handler.handler.peers.len(); peers != 0 {
- t.Fatalf("peer count mismatch: have %d, want %d", peers, 0)
- }
- } else {
- if peers := handler.handler.peers.len(); peers != 1 {
- t.Fatalf("peer count mismatch: have %d, want %d", peers, 1)
- }
- }
-}
-
// Tests that blocks are broadcast to a sqrt number of peers only.
func TestBroadcastBlock1Peer(t *testing.T) { testBroadcastBlock(t, 1, 1) }
func TestBroadcastBlock2Peers(t *testing.T) { testBroadcastBlock(t, 2, 1) }
@@ -698,13 +497,6 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) {
}
}
-// Tests that a propagated malformed block (uncles or transactions don't match
-// with the hashes in the header) gets discarded and not broadcast forward.
-func TestBroadcastMalformedBlock64(t *testing.T) {
- // FIXME: restore after the Berlin relese
- t.Skip("fails")
- testBroadcastMalformedBlock(t, 64)
-}
func TestBroadcastMalformedBlock65(t *testing.T) { testBroadcastMalformedBlock(t, 65) }
func testBroadcastMalformedBlock(t *testing.T, protocol uint) {
diff --git a/eth/peerset.go b/eth/peerset.go
index f0bbc58b6e3..95c755d7c77 100644
--- a/eth/peerset.go
+++ b/eth/peerset.go
@@ -155,11 +155,6 @@ func (ps *peerSet) peerWithHighestNumber() *eth.Peer {
return bestPeer
}
-// peerWithHighestTD is an alias to the highest number for testing and rebase simplicity
-func (ps *peerSet) peerWithHighestTD() *eth.Peer { //nolint:unused
- return ps.peerWithHighestNumber()
-}
-
// close disconnects all peers.
func (ps *peerSet) close() {
ps.lock.Lock()
diff --git a/eth/stagedsync/all_stages.go b/eth/stagedsync/all_stages.go
index 13dc66fd6b7..3785b380bfe 100644
--- a/eth/stagedsync/all_stages.go
+++ b/eth/stagedsync/all_stages.go
@@ -134,42 +134,7 @@ func createStageBuilders(blocks []*types.Block, blockNum uint64, checkRoot bool)
ID: stages.IntermediateHashes,
Description: "Generate intermediate hashes and computing state root",
ExecFunc: func(s *StageState, u Unwinder) error {
- /*
- var a accounts.Account
- c := world.TX.(ethdb.HasTx).Tx().Cursor(dbutils.PlainStateBucket)
- for k, v, err := c.First(); k != nil; k, v, err = c.Next() {
- if err != nil {
- return err
- }
- if len(k) != 20 {
- fmt.Printf("%x => %x\n", k, v)
- } else {
- if err1 := a.DecodeForStorage(v); err1 != nil {
- return err1
- }
- fmt.Printf("%x => bal: %d nonce: %d codehash: %x, inc: %d\n", k, a.Balance.ToBig(), a.Nonce, a.CodeHash, a.Incarnation)
- }
- }
- c.Close()
- */
- /*
- c = world.TX.(ethdb.HasTx).Tx().Cursor(dbutils.CurrentStateBucket)
- for k, v, err := c.First(); k != nil; k, v, err = c.Next() {
- if err != nil {
- return err
- }
- if len(k) != 32 {
- fmt.Printf("%x => %x\n", k, v)
- } else {
- if err1 := a.DecodeForStorage(v); err1 != nil {
- return err1
- }
- fmt.Printf("%x => bal: %d nonce: %d codehash: %x, inc: %d\n", k, a.Balance.ToBig(), a.Nonce, a.CodeHash, a.Incarnation)
- }
- }
- c.Close()
- */
- _, err := SpawnIntermediateHashesStage(s, world.TX, checkRoot /* checkRoot */, world.cache, world.TmpDir, world.QuitCh)
+ _, err := SpawnIntermediateHashesStage(s, world.TX, checkRoot, world.cache, world.TmpDir, world.QuitCh)
return err
},
UnwindFunc: func(u *UnwindState, s *StageState) error {
diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go
index 76eb6536ae2..a78ffc376cd 100644
--- a/eth/stagedsync/stage_execute.go
+++ b/eth/stagedsync/stage_execute.go
@@ -479,16 +479,6 @@ func writeAccountPlain(logPrefix string, db ethdb.Database, key string, acc acco
return rawdb.PlainWriteAccount(db, address, acc)
}
-func recoverCodeHashHashed(acc *accounts.Account, db ethdb.Getter, key string) {
- var addrHash common.Hash
- copy(addrHash[:], []byte(key))
- if acc.Incarnation > 0 && acc.IsEmptyCodeHash() {
- if codeHash, err2 := db.Get(dbutils.ContractCodeBucket, dbutils.GenerateStoragePrefix(addrHash[:], acc.Incarnation)); err2 == nil {
- copy(acc.CodeHash[:], codeHash)
- }
- }
-}
-
func cleanupContractCodeBucket(
logPrefix string,
db ethdb.Database,
@@ -524,29 +514,12 @@ func recoverCodeHashPlain(acc *accounts.Account, db ethdb.Getter, key string) {
}
}
-func deleteAccountHashed(db rawdb.DatabaseDeleter, key string) error {
- var addrHash common.Hash
- copy(addrHash[:], []byte(key))
- return rawdb.DeleteAccount(db, addrHash)
-}
-
func deleteAccountPlain(db ethdb.Deleter, key string) error {
var address common.Address
copy(address[:], key)
return rawdb.PlainDeleteAccount(db, address)
}
-func deleteChangeSets(batch ethdb.Deleter, timestamp uint64, accountBucket, storageBucket string) error {
- changeSetKey := dbutils.EncodeBlockNumber(timestamp)
- if err := batch.Delete(accountBucket, changeSetKey, nil); err != nil {
- return err
- }
- if err := batch.Delete(storageBucket, changeSetKey, nil); err != nil {
- return err
- }
- return nil
-}
-
func min(a, b uint64) uint64 {
if a <= b {
return a
diff --git a/eth/stagedsync/stagedsync.go b/eth/stagedsync/stagedsync.go
index 506b3a286f3..058a63fe8d8 100644
--- a/eth/stagedsync/stagedsync.go
+++ b/eth/stagedsync/stagedsync.go
@@ -12,8 +12,6 @@ import (
"github.com/ledgerwatch/turbo-geth/turbo/stages/bodydownload"
)
-const prof = false // whether to profile
-
type StagedSync struct {
PrefetchedBlocks *bodydownload.PrefetchedBlocks
stageBuilders StageBuilders
diff --git a/eth/sync.go b/eth/sync.go
index c542974d99f..abf5d3370a4 100644
--- a/eth/sync.go
+++ b/eth/sync.go
@@ -27,7 +27,6 @@ import (
"github.com/ledgerwatch/turbo-geth/core/types"
"github.com/ledgerwatch/turbo-geth/eth/downloader"
"github.com/ledgerwatch/turbo-geth/eth/protocols/eth"
- "github.com/ledgerwatch/turbo-geth/log"
"github.com/ledgerwatch/turbo-geth/p2p/enode"
)
@@ -294,14 +293,6 @@ func (h *handler) doSync(op *chainSyncOp) error {
if err != nil {
return err
}
- if atomic.LoadUint32(&h.fastSync) == 1 {
- log.Info("Fast sync complete, auto disabling")
- atomic.StoreUint32(&h.fastSync, 0)
- }
- if atomic.LoadUint32(&h.snapSync) == 1 {
- log.Info("Snap sync complete, auto disabling")
- atomic.StoreUint32(&h.snapSync, 0)
- }
// If we've successfully finished a sync cycle and passed any required checkpoint,
// enable accepting transactions from the network.
headHash := rawdb.ReadHeadHeaderHash(h.database)
diff --git a/eth/sync_test.go b/eth/sync_test.go
deleted file mode 100644
index 659faf77a90..00000000000
--- a/eth/sync_test.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package eth
-
-import (
- "sync/atomic"
- "testing"
- "time"
-
- "github.com/ledgerwatch/turbo-geth/eth/downloader"
- "github.com/ledgerwatch/turbo-geth/eth/protocols/eth"
- "github.com/ledgerwatch/turbo-geth/p2p"
- "github.com/ledgerwatch/turbo-geth/p2p/enode"
-)
-
-func TestFastSyncDisabling64(t *testing.T) { testFastSyncDisabling(t, 64) }
-func TestFastSyncDisabling65(t *testing.T) { testFastSyncDisabling(t, 65) }
-
-// Tests that fast sync gets disabled as soon as a real block is successfully
-// imported into the blockchain.
-func testFastSyncDisabling(t *testing.T, protocol uint) {
- t.Skip("should be restored. skipped for turbo-geth")
-
- // Create an empty handler and ensure it's in fast sync mode
- empty := newTestHandler()
- if atomic.LoadUint32(&empty.handler.fastSync) == 0 {
- t.Fatalf("fast sync disabled on pristine blockchain")
- }
- defer empty.close()
-
- // Create a full handler and ensure fast sync ends up disabled
- full := newTestHandlerWithBlocks(1024)
- if atomic.LoadUint32(&full.handler.fastSync) == 1 {
- t.Fatalf("fast sync not disabled on non-empty blockchain")
- }
- defer full.close()
-
- // Sync up the two handlers
- emptyPipe, fullPipe := p2p.MsgPipe()
- defer emptyPipe.Close()
- defer fullPipe.Close()
-
- emptyPeer := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, "", nil), emptyPipe, empty.txpool)
- fullPeer := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, "", nil), fullPipe, full.txpool)
- defer emptyPeer.Close()
- defer fullPeer.Close()
-
- //nolint:errcheck
- go empty.handler.runEthPeer(emptyPeer, func(peer *eth.Peer) error {
- return eth.Handle((*ethHandler)(empty.handler), peer)
- })
- //nolint:errcheck
- go full.handler.runEthPeer(fullPeer, func(peer *eth.Peer) error {
- return eth.Handle((*ethHandler)(full.handler), peer)
- })
- // Wait a bit for the above handlers to start
- time.Sleep(250 * time.Millisecond)
-
- // Check that fast sync was disabled
- op := peerToSyncOp(downloader.FastSync, empty.handler.peers.peerWithHighestTD())
- if err := empty.handler.doSync(op); err != nil {
- t.Fatal("sync failed:", err)
- }
- if atomic.LoadUint32(&empty.handler.fastSync) == 1 {
- t.Fatalf("fast sync not disabled after successful synchronisation")
- }
-}
diff --git a/ethdb/kv_lmdb.go b/ethdb/kv_lmdb.go
index fd5c412702c..5227f815568 100644
--- a/ethdb/kv_lmdb.go
+++ b/ethdb/kv_lmdb.go
@@ -636,16 +636,6 @@ func (tx *lmdbTx) Commit(ctx context.Context) error {
log.Info("Batch", "commit", commitTook)
}
- //if tx.db.opts.flags&lmdb.Readonly == 0 && !tx.db.opts.inMem { // call fsync only after main transaction commit
- // fsyncTimer := time.Now()
- // if err := tx.db.env.Sync(tx.flags&NoSync == 0); err != nil {
- // log.Warn("fsync after commit failed", "err", err)
- // }
- // fsyncTook := time.Since(fsyncTimer)
- // if fsyncTook > 20*time.Second {
- // log.Info("Batch", "fsync", fsyncTook)
- // }
- //}
return nil
}
diff --git a/go.mod b/go.mod
index bc3459c2d76..67aa485a74d 100644
--- a/go.mod
+++ b/go.mod
@@ -33,7 +33,6 @@ require (
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa
github.com/google/uuid v1.1.5
github.com/gorilla/websocket v1.4.2
- github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29
github.com/grpc-ecosystem/go-grpc-middleware v1.2.2
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d
diff --git a/go.sum b/go.sum
index b8d6536d9f1..d62ad04f94e 100644
--- a/go.sum
+++ b/go.sum
@@ -409,8 +409,6 @@ github.com/gosuri/uilive v0.0.3/go.mod h1:qkLSc0A5EXSP6B04TrN4oQoxqFI7A8XvoXSlJi
github.com/gosuri/uiprogress v0.0.0-20170224063937-d0567a9d84a1/go.mod h1:C1RTYn4Sc7iEyf6j8ft5dyoZ4212h8G1ol9QQluh5+0=
github.com/gosuri/uiprogress v0.0.1/go.mod h1:C1RTYn4Sc7iEyf6j8ft5dyoZ4212h8G1ol9QQluh5+0=
github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
-github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29 h1:sezaKhEfPFg8W0Enm61B9Gs911H8iesGY5R8NDPtd1M=
-github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.2.2 h1:FlFbCRLd5Jr4iYXZufAvgWN6Ao0JrI5chLINnUXDDr0=
@@ -620,7 +618,6 @@ github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
-github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
diff --git a/graphql/graphiql.go b/graphql/graphiql.go
deleted file mode 100644
index 864ebf57df2..00000000000
--- a/graphql/graphiql.go
+++ /dev/null
@@ -1,120 +0,0 @@
-// The MIT License (MIT)
-//
-// Copyright (c) 2016 Muhammed Thanish
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package graphql
-
-import (
- "bytes"
- "fmt"
- "net/http"
-)
-
-// GraphiQL is an in-browser IDE for exploring GraphiQL APIs.
-// This handler returns GraphiQL when requested.
-//
-// For more information, see https://github.com/graphql/graphiql.
-type GraphiQL struct{}
-
-func respond(w http.ResponseWriter, body []byte, code int) {
- w.Header().Set("Content-Type", "application/json; charset=utf-8")
- w.Header().Set("X-Content-Type-Options", "nosniff")
- w.WriteHeader(code)
- _, _ = w.Write(body)
-}
-
-func errorJSON(msg string) []byte {
- buf := bytes.Buffer{}
- fmt.Fprintf(&buf, `{"error": "%s"}`, msg)
- return buf.Bytes()
-}
-
-func (h GraphiQL) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- if r.Method != "GET" {
- respond(w, errorJSON("only GET requests are supported"), http.StatusMethodNotAllowed)
- return
- }
- w.Header().Set("Content-Type", "text/html")
- w.Write(graphiql)
-}
-
-var graphiql = []byte(`
-
-
-
-
-
-
-
-
-
-
-
-
Loading...
-
-
-
-`)
diff --git a/graphql/graphql.go b/graphql/graphql.go
deleted file mode 100644
index b3955c72492..00000000000
--- a/graphql/graphql.go
+++ /dev/null
@@ -1,1126 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-// Package graphql provides a GraphQL interface to Ethereum node data.
-package graphql
-
-import (
- "context"
- "errors"
- "fmt"
- "strconv"
- "time"
-
- "github.com/holiman/uint256"
-
- ethereum "github.com/ledgerwatch/turbo-geth"
- "github.com/ledgerwatch/turbo-geth/common"
- "github.com/ledgerwatch/turbo-geth/common/hexutil"
- "github.com/ledgerwatch/turbo-geth/core/rawdb"
- "github.com/ledgerwatch/turbo-geth/core/state"
- "github.com/ledgerwatch/turbo-geth/core/types"
- "github.com/ledgerwatch/turbo-geth/core/vm"
- "github.com/ledgerwatch/turbo-geth/eth/filters"
- "github.com/ledgerwatch/turbo-geth/internal/ethapi"
- "github.com/ledgerwatch/turbo-geth/rpc"
-)
-
-var (
- errBlockInvariant = errors.New("block objects must be instantiated with at least one of num or hash")
-)
-
-type Long int64
-
-// ImplementsGraphQLType returns true if Long implements the provided GraphQL type.
-func (b Long) ImplementsGraphQLType(name string) bool { return name == "Long" }
-
-// UnmarshalGraphQL unmarshals the provided GraphQL query data.
-func (b *Long) UnmarshalGraphQL(input interface{}) error {
- var err error
- switch input := input.(type) {
- case string:
- // uncomment to support hex values
- //if strings.HasPrefix(input, "0x") {
- // // apply leniency and support hex representations of longs.
- // value, err := hexutil.DecodeUint64(input)
- // *b = Long(value)
- // return err
- //} else {
- value, err1 := strconv.ParseInt(input, 10, 64)
- *b = Long(value)
- return err1
- //}
- case int32:
- *b = Long(input)
- case int64:
- *b = Long(input)
- default:
- err = fmt.Errorf("unexpected type %T for Long", input)
- }
- return err
-}
-
-// Account represents an Ethereum account at a particular block.
-type Account struct {
- backend ethapi.Backend
- address common.Address
- blockNrOrHash rpc.BlockNumberOrHash
-}
-
-// getState fetches the IntraBlockState object for an account.
-func (a *Account) getState(ctx context.Context) (*state.IntraBlockState, error) {
- state, _, err := a.backend.StateAndHeaderByNumber(ctx, *a.blockNrOrHash.BlockNumber)
- return state, err
-}
-
-func (a *Account) Address(ctx context.Context) (common.Address, error) {
- return a.address, nil
-}
-
-func (a *Account) Balance(ctx context.Context) (hexutil.Big, error) {
- state, err := a.getState(ctx)
- if err != nil {
- return hexutil.Big{}, err
- }
- return hexutil.Big(*state.GetBalance(a.address).ToBig()), nil
-}
-
-func (a *Account) TransactionCount(ctx context.Context) (hexutil.Uint64, error) {
- state, err := a.getState(ctx)
- if err != nil {
- return 0, err
- }
- return hexutil.Uint64(state.GetNonce(a.address)), nil
-}
-
-func (a *Account) Code(ctx context.Context) (hexutil.Bytes, error) {
- state, err := a.getState(ctx)
- if err != nil {
- return hexutil.Bytes{}, err
- }
- return state.GetCode(a.address), nil
-}
-
-func (a *Account) Storage(ctx context.Context, args struct{ Slot common.Hash }) (common.Hash, error) {
- state, err := a.getState(ctx)
- if err != nil {
- return common.Hash{}, err
- }
- var val uint256.Int
- state.GetState(a.address, &args.Slot, &val)
- return val.Bytes32(), nil
-}
-
-// Log represents an individual log message. All arguments are mandatory.
-type Log struct {
- backend ethapi.Backend
- transaction *Transaction
- log *types.Log
-}
-
-func (l *Log) Transaction(ctx context.Context) *Transaction {
- return l.transaction
-}
-
-func (l *Log) Account(ctx context.Context, args BlockNumberArgs) *Account {
- return &Account{
- backend: l.backend,
- address: l.log.Address,
- blockNrOrHash: args.NumberOrLatest(),
- }
-}
-
-func (l *Log) Index(ctx context.Context) int32 {
- return int32(l.log.Index)
-}
-
-func (l *Log) Topics(ctx context.Context) []common.Hash {
- return l.log.Topics
-}
-
-func (l *Log) Data(ctx context.Context) hexutil.Bytes {
- return l.log.Data
-}
-
-// Transaction represents an Ethereum transaction.
-// backend and hash are mandatory; all others will be fetched when required.
-type Transaction struct {
- backend ethapi.Backend
- hash common.Hash
- tx *types.Transaction
- block *Block
- index uint64
-}
-
-// resolve returns the internal transaction object, fetching it if needed.
-func (t *Transaction) resolve(ctx context.Context) (*types.Transaction, error) {
- if t.tx == nil {
- tx, blockHash, _, index := rawdb.ReadTransaction(t.backend.ChainDb(), t.hash)
- if tx != nil {
- t.tx = tx
- blockNrOrHash := rpc.BlockNumberOrHashWithHash(blockHash, false)
- t.block = &Block{
- backend: t.backend,
- numberOrHash: &blockNrOrHash,
- }
- t.index = index
- } else {
- t.tx = t.backend.GetPoolTransaction(t.hash)
- }
- }
- return t.tx, nil
-}
-
-func (t *Transaction) Hash(ctx context.Context) common.Hash {
- return t.hash
-}
-
-func (t *Transaction) InputData(ctx context.Context) (hexutil.Bytes, error) {
- tx, err := t.resolve(ctx)
- if err != nil || tx == nil {
- return hexutil.Bytes{}, err
- }
- return tx.Data(), nil
-}
-
-func (t *Transaction) Gas(ctx context.Context) (hexutil.Uint64, error) {
- tx, err := t.resolve(ctx)
- if err != nil || tx == nil {
- return 0, err
- }
- return hexutil.Uint64(tx.Gas()), nil
-}
-
-func (t *Transaction) GasPrice(ctx context.Context) (hexutil.Big, error) {
- tx, err := t.resolve(ctx)
- if err != nil || tx == nil {
- return hexutil.Big{}, err
- }
- return hexutil.Big(*tx.GasPrice().ToBig()), nil
-}
-
-func (t *Transaction) Value(ctx context.Context) (hexutil.Big, error) {
- tx, err := t.resolve(ctx)
- if err != nil || tx == nil {
- return hexutil.Big{}, err
- }
- return hexutil.Big(*tx.Value().ToBig()), nil
-}
-
-func (t *Transaction) Nonce(ctx context.Context) (hexutil.Uint64, error) {
- tx, err := t.resolve(ctx)
- if err != nil || tx == nil {
- return 0, err
- }
- return hexutil.Uint64(tx.Nonce()), nil
-}
-
-func (t *Transaction) To(ctx context.Context, args BlockNumberArgs) (*Account, error) {
- tx, err := t.resolve(ctx)
- if err != nil || tx == nil {
- return nil, err
- }
- to := tx.To()
- if to == nil {
- return nil, nil
- }
- return &Account{
- backend: t.backend,
- address: *to,
- blockNrOrHash: args.NumberOrLatest(),
- }, nil
-}
-
-func (t *Transaction) From(ctx context.Context, args BlockNumberArgs) (*Account, error) {
- tx, err := t.resolve(ctx)
- if err != nil || tx == nil {
- return nil, err
- }
- signer := types.LatestSigner(t.backend.ChainConfig())
- from, _ := types.Sender(signer, tx)
- return &Account{
- backend: t.backend,
- address: from,
- blockNrOrHash: args.NumberOrLatest(),
- }, nil
-}
-
-func (t *Transaction) Block(ctx context.Context) (*Block, error) {
- if _, err := t.resolve(ctx); err != nil {
- return nil, err
- }
- return t.block, nil
-}
-
-func (t *Transaction) Index(ctx context.Context) (*int32, error) {
- if _, err := t.resolve(ctx); err != nil {
- return nil, err
- }
- if t.block == nil {
- return nil, nil
- }
- index := int32(t.index)
- return &index, nil
-}
-
-// getReceipt returns the receipt associated with this transaction, if any.
-func (t *Transaction) getReceipt(ctx context.Context) (*types.Receipt, error) {
- if _, err := t.resolve(ctx); err != nil {
- return nil, err
- }
- if t.block == nil {
- return nil, nil
- }
- receipts, err := t.block.resolveReceipts(ctx)
- if err != nil {
- return nil, err
- }
- return receipts[t.index], nil
-}
-
-func (t *Transaction) Status(ctx context.Context) (*Long, error) {
- receipt, err := t.getReceipt(ctx)
- if err != nil || receipt == nil {
- return nil, err
- }
- ret := Long(receipt.Status)
- return &ret, nil
-}
-
-func (t *Transaction) GasUsed(ctx context.Context) (*Long, error) {
- receipt, err := t.getReceipt(ctx)
- if err != nil || receipt == nil {
- return nil, err
- }
- ret := Long(receipt.GasUsed)
- return &ret, nil
-}
-
-func (t *Transaction) CumulativeGasUsed(ctx context.Context) (*Long, error) {
- receipt, err := t.getReceipt(ctx)
- if err != nil || receipt == nil {
- return nil, err
- }
- ret := Long(receipt.CumulativeGasUsed)
- return &ret, nil
-}
-
-func (t *Transaction) CreatedContract(ctx context.Context, args BlockNumberArgs) (*Account, error) {
- receipt, err := t.getReceipt(ctx)
- if err != nil || receipt == nil || receipt.ContractAddress == (common.Address{}) {
- return nil, err
- }
- return &Account{
- backend: t.backend,
- address: receipt.ContractAddress,
- blockNrOrHash: args.NumberOrLatest(),
- }, nil
-}
-
-func (t *Transaction) Logs(ctx context.Context) (*[]*Log, error) {
- receipt, err := t.getReceipt(ctx)
- if err != nil || receipt == nil {
- return nil, err
- }
- ret := make([]*Log, 0, len(receipt.Logs))
- for _, log := range receipt.Logs {
- ret = append(ret, &Log{
- backend: t.backend,
- transaction: t,
- log: log,
- })
- }
- return &ret, nil
-}
-
-func (t *Transaction) R(ctx context.Context) (hexutil.Big, error) {
- tx, err := t.resolve(ctx)
- if err != nil || tx == nil {
- return hexutil.Big{}, err
- }
- _, r, _ := tx.RawSignatureValues()
- return hexutil.Big(*r.ToBig()), nil
-}
-
-func (t *Transaction) S(ctx context.Context) (hexutil.Big, error) {
- tx, err := t.resolve(ctx)
- if err != nil || tx == nil {
- return hexutil.Big{}, err
- }
- _, _, s := tx.RawSignatureValues()
- return hexutil.Big(*s.ToBig()), nil
-}
-
-func (t *Transaction) V(ctx context.Context) (hexutil.Big, error) {
- tx, err := t.resolve(ctx)
- if err != nil || tx == nil {
- return hexutil.Big{}, err
- }
- v, _, _ := tx.RawSignatureValues()
- return hexutil.Big(*v.ToBig()), nil
-}
-
-type BlockType int
-
-// Block represents an Ethereum block.
-// backend, and numberOrHash are mandatory. All other fields are lazily fetched
-// when required.
-type Block struct {
- backend ethapi.Backend
- numberOrHash *rpc.BlockNumberOrHash
- hash common.Hash
- header *types.Header
- block *types.Block
- receipts []*types.Receipt
-}
-
-// resolve returns the internal Block object representing this block, fetching
-// it if necessary.
-func (b *Block) resolve(ctx context.Context) (*types.Block, error) {
- if b.block != nil {
- return b.block, nil
- }
- if b.numberOrHash == nil {
- latest := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber)
- b.numberOrHash = &latest
- }
- var err error
- b.block, err = b.backend.BlockByNumberOrHash(ctx, *b.numberOrHash)
- if b.block != nil && b.header == nil {
- b.header = b.block.Header()
- if hash, ok := b.numberOrHash.Hash(); ok {
- b.hash = hash
- }
- }
- return b.block, err
-}
-
-// resolveHeader returns the internal Header object for this block, fetching it
-// if necessary. Call this function instead of `resolve` unless you need the
-// additional data (transactions and uncles).
-func (b *Block) resolveHeader(ctx context.Context) (*types.Header, error) {
- if b.numberOrHash == nil && b.hash == (common.Hash{}) {
- return nil, errBlockInvariant
- }
- var err error
- if b.header == nil {
- if b.hash != (common.Hash{}) {
- b.header, err = b.backend.HeaderByHash(ctx, b.hash)
- } else {
- b.header, err = b.backend.HeaderByNumberOrHash(ctx, *b.numberOrHash)
- }
- }
- return b.header, err
-}
-
-// resolveReceipts returns the list of receipts for this block, fetching them
-// if necessary.
-func (b *Block) resolveReceipts(ctx context.Context) ([]*types.Receipt, error) {
- if b.receipts == nil {
- hash := b.hash
- if hash == (common.Hash{}) {
- header, err := b.resolveHeader(ctx)
- if err != nil {
- return nil, err
- }
- hash = header.Hash()
- }
- receipts, err := b.backend.GetReceipts(ctx, hash)
- if err != nil {
- return nil, err
- }
- b.receipts = receipts
- }
- return b.receipts, nil
-}
-
-func (b *Block) Number(ctx context.Context) (Long, error) {
- header, err := b.resolveHeader(ctx)
- if err != nil {
- return 0, err
- }
-
- return Long(header.Number.Uint64()), nil
-}
-
-func (b *Block) Hash(ctx context.Context) (common.Hash, error) {
- if b.hash == (common.Hash{}) {
- header, err := b.resolveHeader(ctx)
- if err != nil {
- return common.Hash{}, err
- }
- b.hash = header.Hash()
- }
- return b.hash, nil
-}
-
-func (b *Block) GasLimit(ctx context.Context) (Long, error) {
- header, err := b.resolveHeader(ctx)
- if err != nil {
- return 0, err
- }
- return Long(header.GasLimit), nil
-}
-
-func (b *Block) GasUsed(ctx context.Context) (Long, error) {
- header, err := b.resolveHeader(ctx)
- if err != nil {
- return 0, err
- }
- return Long(header.GasUsed), nil
-}
-
-func (b *Block) Parent(ctx context.Context) (*Block, error) {
- // If the block header hasn't been fetched, and we'll need it, fetch it.
- if b.numberOrHash == nil && b.header == nil {
- if _, err := b.resolveHeader(ctx); err != nil {
- return nil, err
- }
- }
- if b.header != nil && b.header.Number.Uint64() > 0 {
- num := rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(b.header.Number.Uint64() - 1))
- return &Block{
- backend: b.backend,
- numberOrHash: &num,
- hash: b.header.ParentHash,
- }, nil
- }
- return nil, nil
-}
-
-func (b *Block) Difficulty(ctx context.Context) (hexutil.Big, error) {
- header, err := b.resolveHeader(ctx)
- if err != nil {
- return hexutil.Big{}, err
- }
- return hexutil.Big(*header.Difficulty), nil
-}
-
-func (b *Block) Timestamp(ctx context.Context) (hexutil.Uint64, error) {
- header, err := b.resolveHeader(ctx)
- if err != nil {
- return 0, err
- }
- return hexutil.Uint64(header.Time), nil
-}
-
-func (b *Block) Nonce(ctx context.Context) (hexutil.Bytes, error) {
- header, err := b.resolveHeader(ctx)
- if err != nil {
- return hexutil.Bytes{}, err
- }
- return header.Nonce[:], nil
-}
-
-func (b *Block) MixHash(ctx context.Context) (common.Hash, error) {
- header, err := b.resolveHeader(ctx)
- if err != nil {
- return common.Hash{}, err
- }
- return header.MixDigest, nil
-}
-
-func (b *Block) TransactionsRoot(ctx context.Context) (common.Hash, error) {
- header, err := b.resolveHeader(ctx)
- if err != nil {
- return common.Hash{}, err
- }
- return header.TxHash, nil
-}
-
-func (b *Block) StateRoot(ctx context.Context) (common.Hash, error) {
- header, err := b.resolveHeader(ctx)
- if err != nil {
- return common.Hash{}, err
- }
- return header.Root, nil
-}
-
-func (b *Block) ReceiptsRoot(ctx context.Context) (common.Hash, error) {
- header, err := b.resolveHeader(ctx)
- if err != nil {
- return common.Hash{}, err
- }
- return header.ReceiptHash, nil
-}
-
-func (b *Block) OmmerHash(ctx context.Context) (common.Hash, error) {
- header, err := b.resolveHeader(ctx)
- if err != nil {
- return common.Hash{}, err
- }
- return header.UncleHash, nil
-}
-
-func (b *Block) OmmerCount(ctx context.Context) (*int32, error) {
- block, err := b.resolve(ctx)
- if err != nil || block == nil {
- return nil, err
- }
- count := int32(len(block.Uncles()))
- return &count, err
-}
-
-func (b *Block) Ommers(ctx context.Context) (*[]*Block, error) {
- block, err := b.resolve(ctx)
- if err != nil || block == nil {
- return nil, err
- }
- ret := make([]*Block, 0, len(block.Uncles()))
- for _, uncle := range block.Uncles() {
- blockNumberOrHash := rpc.BlockNumberOrHashWithHash(uncle.Hash(), false)
- ret = append(ret, &Block{
- backend: b.backend,
- numberOrHash: &blockNumberOrHash,
- header: uncle,
- })
- }
- return &ret, nil
-}
-
-func (b *Block) ExtraData(ctx context.Context) (hexutil.Bytes, error) {
- header, err := b.resolveHeader(ctx)
- if err != nil {
- return hexutil.Bytes{}, err
- }
- return header.Extra, nil
-}
-
-func (b *Block) LogsBloom(ctx context.Context) (hexutil.Bytes, error) {
- header, err := b.resolveHeader(ctx)
- if err != nil {
- return hexutil.Bytes{}, err
- }
- return header.Bloom.Bytes(), nil
-}
-
-func (b *Block) TotalDifficulty(ctx context.Context) (hexutil.Big, error) {
- h := b.hash
- if h == (common.Hash{}) {
- header, err := b.resolveHeader(ctx)
- if err != nil {
- return hexutil.Big{}, err
- }
- h = header.Hash()
- }
- return hexutil.Big(*b.backend.GetTd(ctx, h)), nil
-}
-
-// BlockNumberArgs encapsulates arguments to accessors that specify a block number.
-type BlockNumberArgs struct {
- // TODO: Ideally we could use input unions to allow the query to specify the
- // block parameter by hash, block number, or tag but input unions aren't part of the
- // standard GraphQL schema SDL yet, see: https://github.com/graphql/graphql-spec/issues/488
- Block *hexutil.Uint64
-}
-
-// NumberOr returns the provided block number argument, or the "current" block number or hash if none
-// was provided.
-func (a BlockNumberArgs) NumberOr(current rpc.BlockNumberOrHash) rpc.BlockNumberOrHash {
- if a.Block != nil {
- blockNr := rpc.BlockNumber(*a.Block)
- return rpc.BlockNumberOrHashWithNumber(blockNr)
- }
- return current
-}
-
-// NumberOrLatest returns the provided block number argument, or the "latest" block number if none
-// was provided.
-func (a BlockNumberArgs) NumberOrLatest() rpc.BlockNumberOrHash {
- return a.NumberOr(rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber))
-}
-
-func (b *Block) Miner(ctx context.Context, args BlockNumberArgs) (*Account, error) {
- header, err := b.resolveHeader(ctx)
- if err != nil {
- return nil, err
- }
- return &Account{
- backend: b.backend,
- address: header.Coinbase,
- blockNrOrHash: args.NumberOrLatest(),
- }, nil
-}
-
-func (b *Block) TransactionCount(ctx context.Context) (*int32, error) {
- block, err := b.resolve(ctx)
- if err != nil || block == nil {
- return nil, err
- }
- count := int32(len(block.Transactions()))
- return &count, err
-}
-
-func (b *Block) Transactions(ctx context.Context) (*[]*Transaction, error) {
- block, err := b.resolve(ctx)
- if err != nil || block == nil {
- return nil, err
- }
- ret := make([]*Transaction, 0, len(block.Transactions()))
- for i, tx := range block.Transactions() {
- ret = append(ret, &Transaction{
- backend: b.backend,
- hash: tx.Hash(),
- tx: tx,
- block: b,
- index: uint64(i),
- })
- }
- return &ret, nil
-}
-
-func (b *Block) TransactionAt(ctx context.Context, args struct{ Index int32 }) (*Transaction, error) {
- block, err := b.resolve(ctx)
- if err != nil || block == nil {
- return nil, err
- }
- txs := block.Transactions()
- if args.Index < 0 || int(args.Index) >= len(txs) {
- return nil, nil
- }
- tx := txs[args.Index]
- return &Transaction{
- backend: b.backend,
- hash: tx.Hash(),
- tx: tx,
- block: b,
- index: uint64(args.Index),
- }, nil
-}
-
-func (b *Block) OmmerAt(ctx context.Context, args struct{ Index int32 }) (*Block, error) {
- block, err := b.resolve(ctx)
- if err != nil || block == nil {
- return nil, err
- }
- uncles := block.Uncles()
- if args.Index < 0 || int(args.Index) >= len(uncles) {
- return nil, nil
- }
- uncle := uncles[args.Index]
- blockNumberOrHash := rpc.BlockNumberOrHashWithHash(uncle.Hash(), false)
- return &Block{
- backend: b.backend,
- numberOrHash: &blockNumberOrHash,
- header: uncle,
- }, nil
-}
-
-// BlockFilterCriteria encapsulates criteria passed to a `logs` accessor inside
-// a block.
-type BlockFilterCriteria struct {
- Addresses *[]common.Address // restricts matches to events created by specific contracts
-
- // The Topic list restricts matches to particular event topics. Each event has a list
- // of topics. Topics matches a prefix of that list. An empty element slice matches any
- // topic. Non-empty elements represent an alternative that matches any of the
- // contained topics.
- //
- // Examples:
- // {} or nil matches any topic list
- // {{A}} matches topic A in first position
- // {{}, {B}} matches any topic in first position, B in second position
- // {{A}, {B}} matches topic A in first position, B in second position
- // {{A, B}}, {C, D}} matches topic (A OR B) in first position, (C OR D) in second position
- Topics *[][]common.Hash
-}
-
-// runFilter accepts a filter and executes it, returning all its results as
-// `Log` objects.
-func runFilter(ctx context.Context, be ethapi.Backend, filter *filters.Filter) ([]*Log, error) {
- logs, err := filter.Logs(ctx)
- if err != nil || logs == nil {
- return nil, err
- }
- ret := make([]*Log, 0, len(logs))
- for _, log := range logs {
- ret = append(ret, &Log{
- backend: be,
- transaction: &Transaction{backend: be, hash: log.TxHash},
- log: log,
- })
- }
- return ret, nil
-}
-
-func (b *Block) Logs(ctx context.Context, args struct{ Filter BlockFilterCriteria }) ([]*Log, error) {
- var addresses []common.Address
- if args.Filter.Addresses != nil {
- addresses = *args.Filter.Addresses
- }
- var topics [][]common.Hash
- if args.Filter.Topics != nil {
- topics = *args.Filter.Topics
- }
- hash := b.hash
- if hash == (common.Hash{}) {
- header, err := b.resolveHeader(ctx)
- if err != nil {
- return nil, err
- }
- hash = header.Hash()
- }
- // Construct the range filter
- filter := filters.NewBlockFilter(b.backend, hash, addresses, topics)
-
- // Run the filter and return all the logs
- return runFilter(ctx, b.backend, filter)
-}
-
-func (b *Block) Account(ctx context.Context, args struct {
- Address common.Address
-}) (*Account, error) {
- if b.numberOrHash == nil {
- _, err := b.resolveHeader(ctx)
- if err != nil {
- return nil, err
- }
- }
- return &Account{
- backend: b.backend,
- address: args.Address,
- blockNrOrHash: *b.numberOrHash,
- }, nil
-}
-
-// CallData encapsulates arguments to `call` or `estimateGas`.
-// All arguments are optional.
-type CallData struct {
- From *common.Address // The Ethereum address the call is from.
- To *common.Address // The Ethereum address the call is to.
- Gas *hexutil.Uint64 // The amount of gas provided for the call.
- GasPrice *hexutil.Big // The price of each unit of gas, in wei.
- Value *hexutil.Big // The value sent along with the call.
- Data *hexutil.Bytes // Any data sent with the call.
-}
-
-// CallResult encapsulates the result of an invocation of the `call` accessor.
-type CallResult struct {
- data hexutil.Bytes // The return data from the call
- gasUsed Long // The amount of gas used
- status Long // The return status of the call - 0 for failure or 1 for success.
-}
-
-func (c *CallResult) Data() hexutil.Bytes {
- return c.data
-}
-
-func (c *CallResult) GasUsed() Long {
- return c.gasUsed
-}
-
-func (c *CallResult) Status() Long {
- return c.status
-}
-
-func (b *Block) Call(ctx context.Context, args struct {
- Data ethapi.CallArgs
-}) (*CallResult, error) {
- if b.numberOrHash == nil {
- _, err := b.resolve(ctx)
- if err != nil {
- return nil, err
- }
- }
- result, err := ethapi.DoCall(ctx, b.backend, args.Data, *b.numberOrHash, nil, vm.Config{}, 5*time.Second, b.backend.RPCGasCap())
- if err != nil {
- return nil, err
- }
- status := Long(1)
- if result.Failed() {
- status = 0
- }
-
- return &CallResult{
- data: result.ReturnData,
- gasUsed: Long(result.UsedGas),
- status: status,
- }, nil
-}
-
-func (b *Block) EstimateGas(ctx context.Context, args struct {
- Data ethapi.CallArgs
-}) (Long, error) {
- if b.numberOrHash == nil {
- _, err := b.resolveHeader(ctx)
- if err != nil {
- return 0, err
- }
- }
- gas, err := ethapi.DoEstimateGas(ctx, b.backend, args.Data, *b.numberOrHash, b.backend.RPCGasCap())
- return Long(gas), err
-}
-
-type Pending struct {
- backend ethapi.Backend
-}
-
-func (p *Pending) TransactionCount(ctx context.Context) (int32, error) {
- txs, err := p.backend.GetPoolTransactions()
- return int32(len(txs)), err
-}
-
-func (p *Pending) Transactions(ctx context.Context) (*[]*Transaction, error) {
- txs, err := p.backend.GetPoolTransactions()
- if err != nil {
- return nil, err
- }
- ret := make([]*Transaction, 0, len(txs))
- for i, tx := range txs {
- ret = append(ret, &Transaction{
- backend: p.backend,
- hash: tx.Hash(),
- tx: tx,
- index: uint64(i),
- })
- }
- return &ret, nil
-}
-
-func (p *Pending) Account(ctx context.Context, args struct {
- Address common.Address
-}) *Account {
- pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber)
- return &Account{
- backend: p.backend,
- address: args.Address,
- blockNrOrHash: pendingBlockNr,
- }
-}
-
-func (p *Pending) Call(ctx context.Context, args struct {
- Data ethapi.CallArgs
-}) (*CallResult, error) {
- pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber)
- result, err := ethapi.DoCall(ctx, p.backend, args.Data, pendingBlockNr, nil, vm.Config{}, 5*time.Second, p.backend.RPCGasCap())
- if err != nil {
- return nil, err
- }
- status := Long(1)
- if result.Failed() {
- status = 0
- }
-
- return &CallResult{
- data: result.ReturnData,
- gasUsed: Long(result.UsedGas),
- status: status,
- }, nil
-}
-
-func (p *Pending) EstimateGas(ctx context.Context, args struct {
- Data ethapi.CallArgs
-}) (Long, error) {
- pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber)
- gas, err := ethapi.DoEstimateGas(ctx, p.backend, args.Data, pendingBlockNr, p.backend.RPCGasCap())
- return Long(gas), err
-}
-
-// Resolver is the top-level object in the GraphQL hierarchy.
-type Resolver struct {
- backend ethapi.Backend
-}
-
-func (r *Resolver) Block(ctx context.Context, args struct {
- Number *Long
- Hash *common.Hash
-}) (*Block, error) {
- var block *Block
- if args.Number != nil {
- if *args.Number < 0 {
- return nil, nil
- }
- number := rpc.BlockNumber(*args.Number)
- numberOrHash := rpc.BlockNumberOrHashWithNumber(number)
- block = &Block{
- backend: r.backend,
- numberOrHash: &numberOrHash,
- }
- } else if args.Hash != nil {
- numberOrHash := rpc.BlockNumberOrHashWithHash(*args.Hash, false)
- block = &Block{
- backend: r.backend,
- numberOrHash: &numberOrHash,
- }
- } else {
- numberOrHash := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber)
- block = &Block{
- backend: r.backend,
- numberOrHash: &numberOrHash,
- }
- }
- // Resolve the header, return nil if it doesn't exist.
- // Note we don't resolve block directly here since it will require an
- // additional network request for light client.
- h, err := block.resolveHeader(ctx)
- if err != nil {
- return nil, err
- } else if h == nil {
- return nil, nil
- }
- return block, nil
-}
-
-func (r *Resolver) Blocks(ctx context.Context, args struct {
- From *Long
- To *Long
-}) ([]*Block, error) {
- from := rpc.BlockNumber(*args.From)
-
- var to rpc.BlockNumber
- if args.To != nil {
- to = rpc.BlockNumber(*args.To)
- } else {
- to = rpc.BlockNumber(r.backend.CurrentBlock().Number().Int64())
- }
- if to < from {
- return []*Block{}, nil
- }
- ret := make([]*Block, 0, to-from+1)
- for i := from; i <= to; i++ {
- numberOrHash := rpc.BlockNumberOrHashWithNumber(i)
- ret = append(ret, &Block{
- backend: r.backend,
- numberOrHash: &numberOrHash,
- })
- }
- return ret, nil
-}
-
-func (r *Resolver) Pending(ctx context.Context) *Pending {
- return &Pending{r.backend}
-}
-
-func (r *Resolver) Transaction(ctx context.Context, args struct{ Hash common.Hash }) (*Transaction, error) {
- tx := &Transaction{
- backend: r.backend,
- hash: args.Hash,
- }
- // Resolve the transaction; if it doesn't exist, return nil.
- t, err := tx.resolve(ctx)
- if err != nil {
- return nil, err
- } else if t == nil {
- return nil, nil
- }
- return tx, nil
-}
-
-func (r *Resolver) SendRawTransaction(ctx context.Context, args struct{ Data hexutil.Bytes }) (common.Hash, error) {
- tx := new(types.Transaction)
- if err := tx.UnmarshalBinary(args.Data); err != nil {
- return common.Hash{}, err
- }
- hash, err := ethapi.SubmitTransaction(ctx, r.backend, tx)
- return hash, err
-}
-
-// FilterCriteria encapsulates the arguments to `logs` on the root resolver object.
-type FilterCriteria struct {
- FromBlock *hexutil.Uint64 // beginning of the queried range, nil means genesis block
- ToBlock *hexutil.Uint64 // end of the range, nil means latest block
- Addresses *[]common.Address // restricts matches to events created by specific contracts
-
- // The Topic list restricts matches to particular event topics. Each event has a list
- // of topics. Topics matches a prefix of that list. An empty element slice matches any
- // topic. Non-empty elements represent an alternative that matches any of the
- // contained topics.
- //
- // Examples:
- // {} or nil matches any topic list
- // {{A}} matches topic A in first position
- // {{}, {B}} matches any topic in first position, B in second position
- // {{A}, {B}} matches topic A in first position, B in second position
- // {{A, B}}, {C, D}} matches topic (A OR B) in first position, (C OR D) in second position
- Topics *[][]common.Hash
-}
-
-func (r *Resolver) Logs(ctx context.Context, args struct{ Filter FilterCriteria }) ([]*Log, error) {
- // Convert the RPC block numbers into internal representations
- begin := rpc.LatestBlockNumber.Int64()
- if args.Filter.FromBlock != nil {
- begin = int64(*args.Filter.FromBlock)
- }
- end := rpc.LatestBlockNumber.Int64()
- if args.Filter.ToBlock != nil {
- end = int64(*args.Filter.ToBlock)
- }
- var addresses []common.Address
- if args.Filter.Addresses != nil {
- addresses = *args.Filter.Addresses
- }
- var topics [][]common.Hash
- if args.Filter.Topics != nil {
- topics = *args.Filter.Topics
- }
- // Construct the range filter
- filter := filters.NewRangeFilter(filters.Backend(r.backend), begin, end, addresses, topics)
- return runFilter(ctx, r.backend, filter)
-}
-
-func (r *Resolver) GasPrice(ctx context.Context) (hexutil.Big, error) {
- price, err := r.backend.SuggestPrice(ctx)
- return hexutil.Big(*price), err
-}
-
-func (r *Resolver) ChainID(ctx context.Context) (hexutil.Big, error) {
- return hexutil.Big(*r.backend.ChainConfig().ChainID), nil
-}
-
-// SyncState represents the synchronisation status returned from the `syncing` accessor.
-type SyncState struct {
- progress ethereum.SyncProgress
-}
-
-func (s *SyncState) StartingBlock() hexutil.Uint64 {
- return hexutil.Uint64(s.progress.StartingBlock)
-}
-
-func (s *SyncState) CurrentBlock() hexutil.Uint64 {
- return hexutil.Uint64(s.progress.CurrentBlock)
-}
-
-func (s *SyncState) HighestBlock() hexutil.Uint64 {
- return hexutil.Uint64(s.progress.HighestBlock)
-}
-
-func (s *SyncState) PulledStates() *hexutil.Uint64 {
- ret := hexutil.Uint64(s.progress.PulledStates)
- return &ret
-}
-
-func (s *SyncState) KnownStates() *hexutil.Uint64 {
- ret := hexutil.Uint64(s.progress.KnownStates)
- return &ret
-}
-
-// Syncing returns false in case the node is currently not syncing with the network. It can be up to date or has not
-// yet received the latest block headers from its pears. In case it is synchronizing:
-// - startingBlock: block number this node started to synchronise from
-// - currentBlock: block number this node is currently importing
-// - highestBlock: block number of the highest block header this node has received from peers
-// - pulledStates: number of state entries processed until now
-// - knownStates: number of known state entries that still need to be pulled
-func (r *Resolver) Syncing() (*SyncState, error) {
- progress := r.backend.Downloader().Progress()
-
- // Return not syncing if the synchronisation already completed
- if progress.CurrentBlock >= progress.HighestBlock {
- return nil, nil
- }
- // Otherwise gather the block sync stats
- return &SyncState{progress}, nil
-}
diff --git a/graphql/graphql_test.go b/graphql/graphql_test.go
deleted file mode 100644
index 498336d8f15..00000000000
--- a/graphql/graphql_test.go
+++ /dev/null
@@ -1,237 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package graphql
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "math/big"
- "net/http"
- "strings"
- "testing"
- "time"
-
- "github.com/ledgerwatch/turbo-geth/consensus/ethash"
- "github.com/ledgerwatch/turbo-geth/core"
- "github.com/ledgerwatch/turbo-geth/eth"
- "github.com/ledgerwatch/turbo-geth/eth/ethconfig"
- "github.com/ledgerwatch/turbo-geth/eth/stagedsync"
- "github.com/ledgerwatch/turbo-geth/ethdb"
- "github.com/ledgerwatch/turbo-geth/node"
- "github.com/ledgerwatch/turbo-geth/params"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestBuildSchema(t *testing.T) {
- ddir, err := ioutil.TempDir("", "graphql-buildschema")
- if err != nil {
- t.Fatalf("failed to create temporary datadir: %v", err)
- }
- // Copy config
- conf := node.DefaultConfig
- conf.DataDir = ddir
- stack, err := node.New(&conf)
- if err != nil {
- t.Fatalf("could not create new node: %v", err)
- }
- // Make sure the schema can be parsed and matched up to the object model.
- if err := newHandler(stack, nil, []string{}, []string{}); err != nil {
- t.Errorf("Could not construct GraphQL handler: %v", err)
- }
-}
-
-// Tests that a graphQL request is successfully handled when graphql is enabled on the specified endpoint
-func TestGraphQLBlockSerialization(t *testing.T) {
- stack := createNode(t, true)
- defer stack.Close()
- // start node
- if err := stack.Start(); err != nil {
- t.Fatalf("could not start node: %v", err)
- }
-
- for i, tt := range []struct {
- body string
- want string
- code int
- }{
- { // Should return latest block
- body: `{"query": "{block{number}}","variables": null}`,
- want: `{"data":{"block":{"number":10}}}`,
- code: 200,
- },
- { // Should return info about latest block
- body: `{"query": "{block{number,gasUsed,gasLimit}}","variables": null}`,
- want: `{"data":{"block":{"number":10,"gasUsed":0,"gasLimit":11500000}}}`,
- code: 200,
- },
- {
- body: `{"query": "{block(number:0){number,gasUsed,gasLimit}}","variables": null}`,
- want: `{"data":{"block":{"number":0,"gasUsed":0,"gasLimit":11500000}}}`,
- code: 200,
- },
- {
- body: `{"query": "{block(number:-1){number,gasUsed,gasLimit}}","variables": null}`,
- want: `{"data":{"block":null}}`,
- code: 200,
- },
- {
- body: `{"query": "{block(number:-500){number,gasUsed,gasLimit}}","variables": null}`,
- want: `{"data":{"block":null}}`,
- code: 200,
- },
- {
- body: `{"query": "{block(number:\"0\"){number,gasUsed,gasLimit}}","variables": null}`,
- want: `{"data":{"block":{"number":0,"gasUsed":0,"gasLimit":11500000}}}`,
- code: 200,
- },
- {
- body: `{"query": "{block(number:\"-33\"){number,gasUsed,gasLimit}}","variables": null}`,
- want: `{"data":{"block":null}}`,
- code: 200,
- },
- {
- body: `{"query": "{block(number:\"1337\"){number,gasUsed,gasLimit}}","variables": null}`,
- want: `{"data":{"block":null}}`,
- code: 200,
- },
- {
- body: `{"query": "{block(number:\"0xbad\"){number,gasUsed,gasLimit}}","variables": null}`,
- want: `{"errors":[{"message":"strconv.ParseInt: parsing \"0xbad\": invalid syntax"}],"data":{}}`,
- code: 400,
- },
- { // hex strings are currently not supported. If that's added to the spec, this test will need to change
- body: `{"query": "{block(number:\"0x0\"){number,gasUsed,gasLimit}}","variables": null}`,
- want: `{"errors":[{"message":"strconv.ParseInt: parsing \"0x0\": invalid syntax"}],"data":{}}`,
- code: 400,
- },
- {
- body: `{"query": "{block(number:\"a\"){number,gasUsed,gasLimit}}","variables": null}`,
- want: `{"errors":[{"message":"strconv.ParseInt: parsing \"a\": invalid syntax"}],"data":{}}`,
- code: 400,
- },
- {
- body: `{"query": "{bleh{number}}","variables": null}"`,
- want: `{"errors":[{"message":"Cannot query field \"bleh\" on type \"Query\".","locations":[{"line":1,"column":2}]}]}`,
- code: 400,
- },
- // should return `estimateGas` as decimal
- {
- body: `{"query": "{block{ estimateGas(data:{}) }}"}`,
- want: `{"data":{"block":{"estimateGas":53000}}}`,
- code: 200,
- },
- // should return `status` as decimal
- {
- body: `{"query": "{block {number call (data : {from : \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\", to: \"0x6295ee1b4f6dd65047762f924ecd367c17eabf8f\", data :\"0x12a7b914\"}){data status}}}"}`,
- want: `{"data":{"block":{"number":10,"call":{"data":"0x","status":1}}}}`,
- code: 200,
- },
- } {
- resp, err := http.Post(fmt.Sprintf("%s/graphql", stack.HTTPEndpoint()), "application/json", strings.NewReader(tt.body))
- if err != nil {
- t.Fatalf("could not post: %v", err)
- }
- bodyBytes, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- t.Fatalf("could not read from response body: %v", err)
- }
- if have := string(bodyBytes); have != tt.want {
- t.Errorf("testcase %d %s,\nhave:\n%v\nwant:\n%v", i, tt.body, have, tt.want)
- }
- if tt.code != resp.StatusCode {
- t.Errorf("testcase %d %s,\nwrong statuscode, have: %v, want: %v", i, tt.body, resp.StatusCode, tt.code)
- }
- }
-}
-
-// Tests that a graphQL request is not handled successfully when graphql is not enabled on the specified endpoint
-func TestGraphQLHTTPOnSamePort_GQLRequest_Unsuccessful(t *testing.T) {
- stack := createNode(t, false)
- defer stack.Close()
- if err := stack.Start(); err != nil {
- t.Fatalf("could not start node: %v", err)
- }
- body := strings.NewReader(`{"query": "{block{number}}","variables": null}`)
- resp, err := http.Post(fmt.Sprintf("%s/graphql", stack.HTTPEndpoint()), "application/json", body)
- if err != nil {
- t.Fatalf("could not post: %v", err)
- }
- // make sure the request is not handled successfully
- assert.Equal(t, http.StatusNotFound, resp.StatusCode)
-}
-
-func createNode(t *testing.T, gqlEnabled bool) *node.Node {
- stack, err := node.New(&node.Config{
- HTTPHost: "127.0.0.1",
- HTTPPort: 0,
- WSHost: "127.0.0.1",
- WSPort: 0,
- })
- if err != nil {
- t.Fatalf("could not create node: %v", err)
- }
- if !gqlEnabled {
- return stack
- }
- createGQLService(t, stack)
- return stack
-}
-
-func createGQLService(t *testing.T, stack *node.Node) { //nolint:unparam
- // create backend
- ethConf := ðconfig.Config{
- Genesis: &core.Genesis{
- Config: params.AllEthashProtocolChanges,
- GasLimit: 11500000,
- Difficulty: big.NewInt(1048576),
- },
- Ethash: ethash.Config{
- PowMode: ethash.ModeFake,
- },
- NetworkID: 1337,
- TrieCleanCache: 5,
- TrieCleanCacheJournal: "triecache",
- TrieCleanCacheRejournal: 60 * time.Minute,
- TrieDirtyCache: 5,
- TrieTimeout: 60 * time.Minute,
- SnapshotCache: 5,
- }
- ethBackend, err := eth.New(stack, ethConf)
- if err != nil {
- t.Fatalf("could not create eth backend: %v", err)
- }
- // Create some blocks and import them
- chain, _, _ := core.GenerateChain(
- params.AllEthashProtocolChanges,
- ethBackend.BlockChain().Genesis(),
- ethash.NewFaker(), ethBackend.ChainDb().(*ethdb.ObjectDatabase), 10, func(i int, gen *core.BlockGen) {}, false)
- if _, err = stagedsync.InsertBlocksInStages(ethBackend.ChainDb(), ethdb.DefaultStorageMode, params.TestChainConfig, ethBackend.BlockChain().GetVMConfig(), ethBackend.BlockChain().Engine(), chain, true /* checkRoot */); err != nil {
- t.Fatalf("could not create import blocks: %v", err)
- }
- _, err = ethBackend.BlockChain().InsertChain(context.TODO(), chain)
- if err != nil {
- t.Fatalf("could not create import blocks: %v", err)
- }
-
- // create gql service
- err = New(stack, ethBackend.APIBackend, []string{}, []string{})
- if err != nil {
- t.Fatalf("could not create graphql service: %v", err)
- }
-}
diff --git a/graphql/schema.go b/graphql/schema.go
deleted file mode 100644
index 6ea63db6367..00000000000
--- a/graphql/schema.go
+++ /dev/null
@@ -1,323 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package graphql
-
-const schema string = `
- # Bytes32 is a 32 byte binary string, represented as 0x-prefixed hexadecimal.
- scalar Bytes32
- # Address is a 20 byte Ethereum address, represented as 0x-prefixed hexadecimal.
- scalar Address
- # Bytes is an arbitrary length binary string, represented as 0x-prefixed hexadecimal.
- # An empty byte string is represented as '0x'. Byte strings must have an even number of hexadecimal nybbles.
- scalar Bytes
- # BigInt is a large integer. Input is accepted as either a JSON number or as a string.
- # Strings may be either decimal or 0x-prefixed hexadecimal. Output values are all
- # 0x-prefixed hexadecimal.
- scalar BigInt
- # Long is a 64 bit unsigned integer.
- scalar Long
-
- schema {
- query: Query
- mutation: Mutation
- }
-
- # Account is an Ethereum account at a particular block.
- type Account {
- # Address is the address owning the account.
- address: Address!
- # Balance is the balance of the account, in wei.
- balance: BigInt!
- # TransactionCount is the number of transactions sent from this account,
- # or in the case of a contract, the number of contracts created. Otherwise
- # known as the nonce.
- transactionCount: Long!
- # Code contains the smart contract code for this account, if the account
- # is a (non-self-destructed) contract.
- code: Bytes!
- # Storage provides access to the storage of a contract account, indexed
- # by its 32 byte slot identifier.
- storage(slot: Bytes32!): Bytes32!
- }
-
- # Log is an Ethereum event log.
- type Log {
- # Index is the index of this log in the block.
- index: Int!
- # Account is the account which generated this log - this will always
- # be a contract account.
- account(block: Long): Account!
- # Topics is a list of 0-4 indexed topics for the log.
- topics: [Bytes32!]!
- # Data is unindexed data for this log.
- data: Bytes!
- # Transaction is the transaction that generated this log entry.
- transaction: Transaction!
- }
-
- # Transaction is an Ethereum transaction.
- type Transaction {
- # Hash is the hash of this transaction.
- hash: Bytes32!
- # Nonce is the nonce of the account this transaction was generated with.
- nonce: Long!
- # Index is the index of this transaction in the parent block. This will
- # be null if the transaction has not yet been mined.
- index: Int
- # From is the account that sent this transaction - this will always be
- # an externally owned account.
- from(block: Long): Account!
- # To is the account the transaction was sent to. This is null for
- # contract-creating transactions.
- to(block: Long): Account
- # Value is the value, in wei, sent along with this transaction.
- value: BigInt!
- # GasPrice is the price offered to miners for gas, in wei per unit.
- gasPrice: BigInt!
- # Gas is the maximum amount of gas this transaction can consume.
- gas: Long!
- # InputData is the data supplied to the target of the transaction.
- inputData: Bytes!
- # Block is the block this transaction was mined in. This will be null if
- # the transaction has not yet been mined.
- block: Block
-
- # Status is the return status of the transaction. This will be 1 if the
- # transaction succeeded, or 0 if it failed (due to a revert, or due to
- # running out of gas). If the transaction has not yet been mined, this
- # field will be null.
- status: Long
- # GasUsed is the amount of gas that was used processing this transaction.
- # If the transaction has not yet been mined, this field will be null.
- gasUsed: Long
- # CumulativeGasUsed is the total gas used in the block up to and including
- # this transaction. If the transaction has not yet been mined, this field
- # will be null.
- cumulativeGasUsed: Long
- # CreatedContract is the account that was created by a contract creation
- # transaction. If the transaction was not a contract creation transaction,
- # or it has not yet been mined, this field will be null.
- createdContract(block: Long): Account
- # Logs is a list of log entries emitted by this transaction. If the
- # transaction has not yet been mined, this field will be null.
- logs: [Log!]
- r: BigInt!
- s: BigInt!
- v: BigInt!
- }
-
- # BlockFilterCriteria encapsulates log filter criteria for a filter applied
- # to a single block.
- input BlockFilterCriteria {
- # Addresses is list of addresses that are of interest. If this list is
- # empty, results will not be filtered by address.
- addresses: [Address!]
- # Topics list restricts matches to particular event topics. Each event has a list
- # of topics. Topics matches a prefix of that list. An empty element array matches any
- # topic. Non-empty elements represent an alternative that matches any of the
- # contained topics.
- #
- # Examples:
- # - [] or nil matches any topic list
- # - [[A]] matches topic A in first position
- # - [[], [B]] matches any topic in first position, B in second position
- # - [[A], [B]] matches topic A in first position, B in second position
- # - [[A, B]], [C, D]] matches topic (A OR B) in first position, (C OR D) in second position
- topics: [[Bytes32!]!]
- }
-
- # Block is an Ethereum block.
- type Block {
- # Number is the number of this block, starting at 0 for the genesis block.
- number: Long!
- # Hash is the block hash of this block.
- hash: Bytes32!
- # Parent is the parent block of this block.
- parent: Block
- # Nonce is the block nonce, an 8 byte sequence determined by the miner.
- nonce: Bytes!
- # TransactionsRoot is the keccak256 hash of the root of the trie of transactions in this block.
- transactionsRoot: Bytes32!
- # TransactionCount is the number of transactions in this block. if
- # transactions are not available for this block, this field will be null.
- transactionCount: Int
- # StateRoot is the keccak256 hash of the state trie after this block was processed.
- stateRoot: Bytes32!
- # ReceiptsRoot is the keccak256 hash of the trie of transaction receipts in this block.
- receiptsRoot: Bytes32!
- # Miner is the account that mined this block.
- miner(block: Long): Account!
- # ExtraData is an arbitrary data field supplied by the miner.
- extraData: Bytes!
- # GasLimit is the maximum amount of gas that was available to transactions in this block.
- gasLimit: Long!
- # GasUsed is the amount of gas that was used executing transactions in this block.
- gasUsed: Long!
- # Timestamp is the unix timestamp at which this block was mined.
- timestamp: Long!
- # LogsBloom is a bloom filter that can be used to check if a block may
- # contain log entries matching a filter.
- logsBloom: Bytes!
- # MixHash is the hash that was used as an input to the PoW process.
- mixHash: Bytes32!
- # Difficulty is a measure of the difficulty of mining this block.
- difficulty: BigInt!
- # TotalDifficulty is the sum of all difficulty values up to and including
- # this block.
- totalDifficulty: BigInt!
- # OmmerCount is the number of ommers (AKA uncles) associated with this
- # block. If ommers are unavailable, this field will be null.
- ommerCount: Int
- # Ommers is a list of ommer (AKA uncle) blocks associated with this block.
- # If ommers are unavailable, this field will be null. Depending on your
- # node, the transactions, transactionAt, transactionCount, ommers,
- # ommerCount and ommerAt fields may not be available on any ommer blocks.
- ommers: [Block]
- # OmmerAt returns the ommer (AKA uncle) at the specified index. If ommers
- # are unavailable, or the index is out of bounds, this field will be null.
- ommerAt(index: Int!): Block
- # OmmerHash is the keccak256 hash of all the ommers (AKA uncles)
- # associated with this block.
- ommerHash: Bytes32!
- # Transactions is a list of transactions associated with this block. If
- # transactions are unavailable for this block, this field will be null.
- transactions: [Transaction!]
- # TransactionAt returns the transaction at the specified index. If
- # transactions are unavailable for this block, or if the index is out of
- # bounds, this field will be null.
- transactionAt(index: Int!): Transaction
- # Logs returns a filtered set of logs from this block.
- logs(filter: BlockFilterCriteria!): [Log!]!
- # Account fetches an Ethereum account at the current block's state.
- account(address: Address!): Account!
- # Call executes a local call operation at the current block's state.
- call(data: CallData!): CallResult
- # EstimateGas estimates the amount of gas that will be required for
- # successful execution of a transaction at the current block's state.
- estimateGas(data: CallData!): Long!
- }
-
- # CallData represents the data associated with a local contract call.
- # All fields are optional.
- input CallData {
- # From is the address making the call.
- from: Address
- # To is the address the call is sent to.
- to: Address
- # Gas is the amount of gas sent with the call.
- gas: Long
- # GasPrice is the price, in wei, offered for each unit of gas.
- gasPrice: BigInt
- # Value is the value, in wei, sent along with the call.
- value: BigInt
- # Data is the data sent to the callee.
- data: Bytes
- }
-
- # CallResult is the result of a local call operation.
- type CallResult {
- # Data is the return data of the called contract.
- data: Bytes!
- # GasUsed is the amount of gas used by the call, after any refunds.
- gasUsed: Long!
- # Status is the result of the call - 1 for success or 0 for failure.
- status: Long!
- }
-
- # FilterCriteria encapsulates log filter criteria for searching log entries.
- input FilterCriteria {
- # FromBlock is the block at which to start searching, inclusive. Defaults
- # to the latest block if not supplied.
- fromBlock: Long
- # ToBlock is the block at which to stop searching, inclusive. Defaults
- # to the latest block if not supplied.
- toBlock: Long
- # Addresses is a list of addresses that are of interest. If this list is
- # empty, results will not be filtered by address.
- addresses: [Address!]
- # Topics list restricts matches to particular event topics. Each event has a list
- # of topics. Topics matches a prefix of that list. An empty element array matches any
- # topic. Non-empty elements represent an alternative that matches any of the
- # contained topics.
- #
- # Examples:
- # - [] or nil matches any topic list
- # - [[A]] matches topic A in first position
- # - [[], [B]] matches any topic in first position, B in second position
- # - [[A], [B]] matches topic A in first position, B in second position
- # - [[A, B]], [C, D]] matches topic (A OR B) in first position, (C OR D) in second position
- topics: [[Bytes32!]!]
- }
-
- # SyncState contains the current synchronisation state of the client.
- type SyncState{
- # StartingBlock is the block number at which synchronisation started.
- startingBlock: Long!
- # CurrentBlock is the point at which synchronisation has presently reached.
- currentBlock: Long!
- # HighestBlock is the latest known block number.
- highestBlock: Long!
- # PulledStates is the number of state entries fetched so far, or null
- # if this is not known or not relevant.
- pulledStates: Long
- # KnownStates is the number of states the node knows of so far, or null
- # if this is not known or not relevant.
- knownStates: Long
- }
-
- # Pending represents the current pending state.
- type Pending {
- # TransactionCount is the number of transactions in the pending state.
- transactionCount: Int!
- # Transactions is a list of transactions in the current pending state.
- transactions: [Transaction!]
- # Account fetches an Ethereum account for the pending state.
- account(address: Address!): Account!
- # Call executes a local call operation for the pending state.
- call(data: CallData!): CallResult
- # EstimateGas estimates the amount of gas that will be required for
- # successful execution of a transaction for the pending state.
- estimateGas(data: CallData!): Long!
- }
-
- type Query {
- # Block fetches an Ethereum block by number or by hash. If neither is
- # supplied, the most recent known block is returned.
- block(number: Long, hash: Bytes32): Block
- # Blocks returns all the blocks between two numbers, inclusive. If
- # to is not supplied, it defaults to the most recent known block.
- blocks(from: Long, to: Long): [Block!]!
- # Pending returns the current pending state.
- pending: Pending!
- # Transaction returns a transaction specified by its hash.
- transaction(hash: Bytes32!): Transaction
- # Logs returns log entries matching the provided filter.
- logs(filter: FilterCriteria!): [Log!]!
- # GasPrice returns the node's estimate of a gas price sufficient to
- # ensure a transaction is mined in a timely fashion.
- gasPrice: BigInt!
- # Syncing returns information on the current synchronisation state.
- syncing: SyncState
- # ChainID returns the current chain ID for transaction replay protection.
- chainID: BigInt!
- }
-
- type Mutation {
- # SendRawTransaction sends an RLP-encoded transaction to the network.
- sendRawTransaction(data: Bytes!): Bytes32!
- }
-`
diff --git a/graphql/service.go b/graphql/service.go
deleted file mode 100644
index d11380f7620..00000000000
--- a/graphql/service.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package graphql
-
-import (
- "encoding/json"
- "net/http"
-
- "github.com/graph-gophers/graphql-go"
- "github.com/ledgerwatch/turbo-geth/internal/ethapi"
- "github.com/ledgerwatch/turbo-geth/node"
-)
-
-type handler struct {
- Schema *graphql.Schema
-}
-
-func (h handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- var params struct {
- Query string `json:"query"`
- OperationName string `json:"operationName"`
- Variables map[string]interface{} `json:"variables"`
- }
- if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil {
- http.Error(w, err.Error(), http.StatusBadRequest)
- return
- }
-
- response := h.Schema.Exec(r.Context(), params.Query, params.OperationName, params.Variables)
- responseJSON, err := json.Marshal(response)
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
- if len(response.Errors) > 0 {
- w.WriteHeader(http.StatusBadRequest)
- }
-
- w.Header().Set("Content-Type", "application/json")
- //nolint:errcheck
- w.Write(responseJSON)
-
-}
-
-// New constructs a new GraphQL service instance.
-func New(stack *node.Node, backend ethapi.Backend, cors, vhosts []string) error {
- if backend == nil {
- panic("missing backend")
- }
- // check if http server with given endpoint exists and enable graphQL on it
- return newHandler(stack, backend, cors, vhosts)
-}
-
-// newHandler returns a new `http.Handler` that will answer GraphQL queries.
-// It additionally exports an interactive query browser on the / endpoint.
-func newHandler(stack *node.Node, backend ethapi.Backend, cors, vhosts []string) error {
- q := Resolver{backend}
-
- s, err := graphql.ParseSchema(schema, &q)
- if err != nil {
- return err
- }
- h := handler{Schema: s}
- handler := node.NewHTTPHandlerStack(h, cors, vhosts)
-
- stack.RegisterHandler("GraphQL UI", "/graphql/ui", GraphiQL{})
- stack.RegisterHandler("GraphQL", "/graphql", handler)
- stack.RegisterHandler("GraphQL", "/graphql/", handler)
-
- return nil
-}
diff --git a/node/config.go b/node/config.go
index 69543d49f67..cf055ac9987 100644
--- a/node/config.go
+++ b/node/config.go
@@ -173,20 +173,6 @@ type Config struct {
// private APIs to untrusted users is a major security risk.
WSExposeAll bool `toml:",omitempty"`
- // GraphQLCors is the Cross-Origin Resource Sharing header to send to requesting
- // clients. Please be aware that CORS is a browser enforced security, it's fully
- // useless for custom HTTP clients.
- GraphQLCors []string `toml:",omitempty"`
-
- // GraphQLVirtualHosts is the list of virtual hostnames which are allowed on incoming requests.
- // This is by default {'localhost'}. Using this prevents attacks like
- // DNS rebinding, which bypasses SOP by simply masquerading as being within the same
- // origin. These attacks do not utilize CORS, since they are not cross-domain.
- // By explicitly checking the Host-header, the server will not allow requests
- // made against the server with a malicious host domain.
- // Requests using ip address directly are not affected
- GraphQLVirtualHosts []string `toml:",omitempty"`
-
// Logger is a custom logger to use with the p2p.Server.
Logger log.Logger `toml:",omitempty"`
@@ -286,7 +272,7 @@ func DefaultWSEndpoint() string {
}
// ExtRPCEnabled returns the indicator whether node enables the external
-// RPC(http, ws or graphql).
+// RPC(http or ws).
func (c *Config) ExtRPCEnabled() bool {
return c.HTTPHost != "" || c.WSHost != ""
}
diff --git a/node/defaults.go b/node/defaults.go
index cf2f99226cf..42a338d2758 100644
--- a/node/defaults.go
+++ b/node/defaults.go
@@ -29,24 +29,21 @@ import (
)
const (
- DefaultHTTPHost = "localhost" // Default host interface for the HTTP RPC server
- DefaultHTTPPort = 8545 // Default TCP port for the HTTP RPC server
- DefaultWSHost = "localhost" // Default host interface for the websocket RPC server
- DefaultWSPort = 8546 // Default TCP port for the websocket RPC server
- DefaultGraphQLHost = "localhost" // Default host interface for the GraphQL server
- DefaultGraphQLPort = 8547 // Default TCP port for the GraphQL server
+ DefaultHTTPHost = "localhost" // Default host interface for the HTTP RPC server
+ DefaultHTTPPort = 8545 // Default TCP port for the HTTP RPC server
+ DefaultWSHost = "localhost" // Default host interface for the websocket RPC server
+ DefaultWSPort = 8546 // Default TCP port for the websocket RPC server
)
// DefaultConfig contains reasonable default settings.
var DefaultConfig = Config{
- DataDir: DefaultDataDir(),
- HTTPPort: DefaultHTTPPort,
- HTTPModules: []string{"net", "web3"},
- HTTPVirtualHosts: []string{"localhost"},
- HTTPTimeouts: rpc.DefaultHTTPTimeouts,
- WSPort: DefaultWSPort,
- WSModules: []string{"net", "web3"},
- GraphQLVirtualHosts: []string{"localhost"},
+ DataDir: DefaultDataDir(),
+ HTTPPort: DefaultHTTPPort,
+ HTTPModules: []string{"net", "web3"},
+ HTTPVirtualHosts: []string{"localhost"},
+ HTTPTimeouts: rpc.DefaultHTTPTimeouts,
+ WSPort: DefaultWSPort,
+ WSModules: []string{"net", "web3"},
P2P: p2p.Config{
ListenAddr: ":30303",
MaxPeers: 50,
diff --git a/params/config.go b/params/config.go
index 8d66bb374b8..02bd3d8a467 100644
--- a/params/config.go
+++ b/params/config.go
@@ -244,16 +244,16 @@ var (
//
// This configuration is intentionally not using keyed fields to force anyone
// adding flags to the config to also have to set these fields.
- AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil}
+ AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
// AllCliqueProtocolChanges contains every protocol change (EIPs) introduced
// and accepted by the Ethereum core developers into the Clique consensus.
//
// This configuration is intentionally not using keyed fields to force anyone
// adding flags to the config to also have to set these fields.
- AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}}
+ AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}}
- TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil}
+ TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
TestRules = TestChainConfig.Rules(new(big.Int))
)
@@ -327,7 +327,6 @@ type ChainConfig struct {
BerlinBlock *big.Int `json:"berlinBlock,omitempty"` // Berlin switch block (nil = no fork, 0 = already on berlin)
YoloV3Block *big.Int `json:"yoloV3Block,omitempty"` // YOLO v3: Gas repricings TODO @holiman add EIP references
- EWASMBlock *big.Int `json:"ewasmBlock,omitempty"` // EWASM switch block (nil = no fork, 0 = already activated)
// Various consensus engines
Ethash *EthashConfig `json:"ethash,omitempty"`
@@ -440,11 +439,6 @@ func (c *ChainConfig) IsBerlin(num *big.Int) bool {
return isForked(c.BerlinBlock, num) || isForked(c.YoloV3Block, num)
}
-// IsEWASM returns whether num represents a block number after the EWASM fork
-func (c *ChainConfig) IsEWASM(num *big.Int) bool {
- return isForked(c.EWASMBlock, num)
-}
-
// CheckCompatible checks whether scheduled fork transitions have been imported
// with a mismatching chain configuration.
func (c *ChainConfig) CheckCompatible(newcfg *ChainConfig, height uint64) *ConfigCompatError {
@@ -553,9 +547,6 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head *big.Int) *Confi
if isForkIncompatible(c.YoloV3Block, newcfg.YoloV3Block, head) {
return newCompatError("YOLOv3 fork block", c.YoloV3Block, newcfg.YoloV3Block)
}
- if isForkIncompatible(c.EWASMBlock, newcfg.EWASMBlock, head) {
- return newCompatError("ewasm fork block", c.EWASMBlock, newcfg.EWASMBlock)
- }
return nil
}
diff --git a/params/eip_ctx.go b/params/eip_ctx.go
index 6c389f042f4..3a349061987 100644
--- a/params/eip_ctx.go
+++ b/params/eip_ctx.go
@@ -15,7 +15,6 @@ const (
IsByzantiumEnabled
IsConstantinopleEnabled
IsPetersburgEnabled
- IsEWASM
BlockNumber
NoHistory
WithHistoryHighest
@@ -29,7 +28,6 @@ func (c *ChainConfig) WithEIPsFlags(ctx context.Context, blockNum *big.Int) cont
ctx = context.WithValue(ctx, IsByzantiumEnabled, c.IsByzantium(blockNum))
ctx = context.WithValue(ctx, IsConstantinopleEnabled, c.IsConstantinople(blockNum))
ctx = context.WithValue(ctx, IsPetersburgEnabled, c.IsPetersburg(blockNum))
- ctx = context.WithValue(ctx, IsEWASM, c.IsEWASM(blockNum))
ctx = context.WithValue(ctx, BlockNumber, blockNum)
return ctx
}
diff --git a/tests/init_test.go b/tests/init_test.go
index 738b7cd6a52..e33fcb4e549 100644
--- a/tests/init_test.go
+++ b/tests/init_test.go
@@ -18,7 +18,6 @@ package tests
import (
"encoding/json"
- "flag"
"fmt"
"io"
"io/ioutil"
@@ -34,12 +33,6 @@ import (
"github.com/ledgerwatch/turbo-geth/params"
)
-// Command line flags to configure the interpreters.
-var (
- testEVM = flag.String("vm.evm", "", "EVM configuration")
- testEWASM = flag.String("vm.ewasm", "", "EWASM configuration")
-)
-
var (
baseDir = filepath.Join(".", "testdata")
blockTestDir = filepath.Join(baseDir, "BlockchainTests")
diff --git a/tests/state_test.go b/tests/state_test.go
index f06781f3ae1..7bc18611297 100644
--- a/tests/state_test.go
+++ b/tests/state_test.go
@@ -90,7 +90,7 @@ const traceErrorLimit = 400000
func withTrace(t *testing.T, gasLimit uint64, test func(vm.Config) error) {
// Use config from command line arguments.
- config := vm.Config{EVMInterpreter: *testEVM, EWASMInterpreter: *testEWASM}
+ config := vm.Config{}
err := test(config)
if err == nil {
return
diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go
index cf1789215cd..27348a9d011 100644
--- a/turbo/cli/default_flags.go
+++ b/turbo/cli/default_flags.go
@@ -55,8 +55,6 @@ var DefaultFlags = []cli.Flag{
utils.FakePoWFlag,
utils.GpoBlocksFlag,
utils.GpoPercentileFlag,
- utils.EWASMInterpreterFlag,
- utils.EVMInterpreterFlag,
utils.InsecureUnlockAllowedFlag,
utils.MetricsEnabledFlag,
utils.MetricsEnabledExpensiveFlag,
diff --git a/turbo/node/node.go b/turbo/node/node.go
index 3f00e975b7b..6ae3b586ea6 100644
--- a/turbo/node/node.go
+++ b/turbo/node/node.go
@@ -178,9 +178,6 @@ func prepare(ctx *cli.Context) {
log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc))
debug.SetGCPercent(int(gogc))
- // Start metrics export if enabled
- utils.SetupMetrics(ctx)
-
// Start system runtime metrics collection
go metrics.CollectProcessMetrics(10 * time.Second)
}
diff --git a/turbo/stages/blockchain_test.go b/turbo/stages/blockchain_test.go
index abb4e8e025f..36bd1caf11b 100644
--- a/turbo/stages/blockchain_test.go
+++ b/turbo/stages/blockchain_test.go
@@ -2160,9 +2160,6 @@ func TestIncompleteAncientReceiptChainInsertion(t *testing.T) {
if ancient.CurrentFastBlock().NumberU64() != previousFastBlock.NumberU64() {
t.Fatalf("failed to rollback ancient data, want %d, have %d", previousFastBlock.NumberU64(), ancient.CurrentFastBlock().NumberU64())
}
- //if frozen, err := ancient.ChainDb().Ancients(); err != nil || frozen != 1 {
- // t.Fatalf("failed to truncate ancient data")
- //}
ancient.TerminateInsert = nil
if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err != nil {
t.Fatalf("failed to insert receipt %d: %v", n, err)