diff --git a/install/install.sh b/install/install.sh index 4e6fd6a55..d146c1812 100755 --- a/install/install.sh +++ b/install/install.sh @@ -361,7 +361,7 @@ install() { { cp -r "$PACKAGE_FILES_PATH/override" "$RP_SHARE_PATH" || fail "Could not copy override folder to the Rocket Pool system directory."; } >&2 { cp -r "$PACKAGE_FILES_PATH/scripts" "$RP_SHARE_PATH" || fail "Could not copy scripts folder to the Rocket Pool system directory."; } >&2 { cp -r "$PACKAGE_FILES_PATH/templates" "$RP_SHARE_PATH" || fail "Could not copy templates folder to the Rocket Pool system directory."; } >&2 - { find "$HD_SHARE_PATH/scripts" -name "*.sh" -exec chmod +x {} \; 2>/dev/null || fail "Could not set executable permissions on package files."; } >&2 + { find "$RP_SHARE_PATH/scripts" -name "*.sh" -exec chmod +x {} \; 2>/dev/null || fail "Could not set executable permissions on package files."; } >&2 # Clean up unnecessary files from old installations diff --git a/src/go.mod b/src/go.mod index 92e46f701..13062c0a2 100644 --- a/src/go.mod +++ b/src/go.mod @@ -28,7 +28,7 @@ require ( github.com/prometheus/client_golang v1.18.0 github.com/prysmaticlabs/prysm/v4 v4.2.1 github.com/rivo/tview v0.0.0-20230208211350-7dfff1ce7854 - github.com/rocket-pool/node-manager-core v0.1.1-0.20240324201808-55b9188c549b + github.com/rocket-pool/node-manager-core v0.1.1-0.20240325035720-824357e2dad7 github.com/rocket-pool/rocketpool-go v1.8.3-0.20240324192746-e467edda77d1 github.com/shirou/gopsutil/v3 v3.24.2 github.com/tyler-smith/go-bip39 v1.1.0 diff --git a/src/go.sum b/src/go.sum index 61c8ef125..3062ec985 100644 --- a/src/go.sum +++ b/src/go.sum @@ -577,8 +577,8 @@ github.com/rocket-pool/batch-query v1.0.0 h1:5HejmT1n1fIdLIqUhTNwbkG2PGOPl3IVjCp github.com/rocket-pool/batch-query v1.0.0/go.mod h1:d1CmxShzk0fioJ4yX0eFGhz2an1odnW/LZ2cp3eDGIQ= github.com/rocket-pool/go-merkletree v1.0.1-0.20220406020931-c262d9b976dd h1:p9KuetSKB9nte9I/MkkiM3pwKFVQgqxxPTQ0y56Ff6s= github.com/rocket-pool/go-merkletree v1.0.1-0.20220406020931-c262d9b976dd/go.mod h1:UE9fof8P7iESVtLn1K9CTSkNRYVFHZHlf96RKbU33kA= -github.com/rocket-pool/node-manager-core v0.1.1-0.20240324201808-55b9188c549b h1:YGc9XdzpxvmGPAF38wPdOZcZFhKHxGoP4Uq6M5+vh1A= -github.com/rocket-pool/node-manager-core v0.1.1-0.20240324201808-55b9188c549b/go.mod h1:mUIsrmHwbpBwYo1Gp8nPmQ6SeNGxxgXMssT83eicDso= +github.com/rocket-pool/node-manager-core v0.1.1-0.20240325035720-824357e2dad7 h1:u/SGtrPcGDKZmH+wS9XEP5+T/UrwtrwIwqTV7CXS/aE= +github.com/rocket-pool/node-manager-core v0.1.1-0.20240325035720-824357e2dad7/go.mod h1:mUIsrmHwbpBwYo1Gp8nPmQ6SeNGxxgXMssT83eicDso= github.com/rocket-pool/rocketpool-go v1.8.3-0.20240324192746-e467edda77d1 h1:tl639aAfFraUVH33mCEth/oLyHiLGyW/sVm9NOb1FBs= github.com/rocket-pool/rocketpool-go v1.8.3-0.20240324192746-e467edda77d1/go.mod h1:NOI4fBev5mMwZejmAWVOl/ysYLzfHne3b/sAXb2Dy1w= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= diff --git a/src/rocketpool-cli/rocketpool-cli.go b/src/rocketpool-cli/rocketpool-cli.go index 950436fcf..85dca1892 100644 --- a/src/rocketpool-cli/rocketpool-cli.go +++ b/src/rocketpool-cli/rocketpool-cli.go @@ -200,6 +200,8 @@ func validateFlags(c *cli.Context) error { return fmt.Errorf("Invalid nonce: %s\n", customNonce) } snCtx.Nonce = nonce + } else { + snCtx.Nonce = big.NewInt(0) } // Make sure the config directory exists diff --git a/src/rocketpool-daemon/node/defend-pdao-props.go b/src/rocketpool-daemon/node/defend-pdao-props.go index 9b061ad2e..579ee79a0 100644 --- a/src/rocketpool-daemon/node/defend-pdao-props.go +++ b/src/rocketpool-daemon/node/defend-pdao-props.go @@ -51,26 +51,29 @@ type DefendPdaoProps struct { } func NewDefendPdaoProps(ctx context.Context, sp *services.ServiceProvider, logger log.ColorLogger) *DefendPdaoProps { + cfg := sp.GetConfig() + log := &logger + maxFee, maxPriorityFee := getAutoTxInfo(cfg, log) return &DefendPdaoProps{ ctx: ctx, sp: sp, - log: &logger, + log: log, + cfg: cfg, + w: sp.GetWallet(), + rp: sp.GetRocketPool(), + bc: sp.GetBeaconClient(), + rs: cfg.GetRocketPoolResources(), + gasThreshold: cfg.AutoTxGasThreshold.Value, + maxFee: maxFee, + maxPriorityFee: maxPriorityFee, lastScannedBlock: nil, + intervalSize: big.NewInt(int64(config.EventLogInterval)), } } // Defend pDAO proposals func (t *DefendPdaoProps) Run(state *state.NetworkState) error { - // Get services - t.cfg = t.sp.GetConfig() - t.w = t.sp.GetWallet() - t.rp = t.sp.GetRocketPool() - t.w = t.sp.GetWallet() t.nodeAddress, _ = t.w.GetAddress() - t.maxFee, t.maxPriorityFee = getAutoTxInfo(t.cfg, t.log) - t.gasThreshold = t.cfg.AutoTxGasThreshold.Value - t.intervalSize = big.NewInt(int64(config.EventLogInterval)) - t.rs = t.cfg.GetRocketPoolResources() // Bindings propMgr, err := proposals.NewProposalManager(t.ctx, t.log, t.cfg, t.rp, t.bc) diff --git a/src/rocketpool-daemon/node/distribute-minipools.go b/src/rocketpool-daemon/node/distribute-minipools.go index 9f9f24f64..7a1ee066e 100644 --- a/src/rocketpool-daemon/node/distribute-minipools.go +++ b/src/rocketpool-daemon/node/distribute-minipools.go @@ -26,7 +26,7 @@ import ( // Distribute minipools task type DistributeMinipools struct { sp *services.ServiceProvider - log log.ColorLogger + log *log.ColorLogger cfg *config.SmartNodeConfig w *wallet.Wallet rp *rocketpool.RocketPool @@ -42,46 +42,54 @@ type DistributeMinipools struct { // Create distribute minipools task func NewDistributeMinipools(sp *services.ServiceProvider, logger log.ColorLogger) *DistributeMinipools { + cfg := sp.GetConfig() + log := &logger + maxFee, maxPriorityFee := getAutoTxInfo(cfg, log) + gasThreshold := cfg.AutoTxGasThreshold.Value + + if gasThreshold == 0 { + log.Println("Automatic tx gas threshold is 0, disabling auto-distribute.") + } + + distributeThresholdFloat := cfg.DistributeThreshold.Value + // Safety clamp + if distributeThresholdFloat >= 8 { + log.Printlnf("WARNING: Auto-distribute threshold is more than 8 ETH (%.6f ETH), reducing to 7.5 ETH for safety", distributeThresholdFloat) + distributeThresholdFloat = 7.5 + } else if distributeThresholdFloat == 0 { + log.Println("Auto-distribute threshold is 0, disabling auto-distribute.") + return nil + } + distributeThreshold := eth.EthToWei(distributeThresholdFloat) + return &DistributeMinipools{ - sp: sp, - log: logger, - eight: eth.EthToWei(8), + sp: sp, + log: log, + cfg: cfg, + w: sp.GetWallet(), + rp: sp.GetRocketPool(), + bc: sp.GetBeaconClient(), + d: sp.GetDocker(), + gasThreshold: gasThreshold, + distributeThreshold: distributeThreshold, + maxFee: maxFee, + maxPriorityFee: maxPriorityFee, + eight: eth.EthToWei(8), } } // Distribute minipools func (t *DistributeMinipools) Run(state *state.NetworkState) error { - // Get services - t.cfg = t.sp.GetConfig() - t.w = t.sp.GetWallet() - t.rp = t.sp.GetRocketPool() - t.bc = t.sp.GetBeaconClient() - t.d = t.sp.GetDocker() - t.w = t.sp.GetWallet() - nodeAddress, _ := t.w.GetAddress() - t.maxFee, t.maxPriorityFee = getAutoTxInfo(t.cfg, &t.log) - t.gasThreshold = t.cfg.AutoTxGasThreshold.Value - // Check if auto-distributing is disabled if t.gasThreshold == 0 { - t.log.Println("Automatic tx gas threshold is 0, disabling auto-distribute.") - return nil - } - distributeThreshold := t.cfg.DistributeThreshold.Value - // Safety clamp - if distributeThreshold >= 8 { - t.log.Printlnf("WARNING: Auto-distribute threshold is more than 8 ETH (%.6f ETH), reducing to 7.5 ETH for safety", distributeThreshold) - distributeThreshold = 7.5 - } else if distributeThreshold == 0 { - t.log.Println("Auto-distribute threshold is 0, disabling auto-distribute.") return nil } - t.distributeThreshold = eth.EthToWei(distributeThreshold) // Log t.log.Println("Checking for minipools to distribute...") // Get prelaunch minipools + nodeAddress, _ := t.w.GetAddress() minipools, err := t.getDistributableMinipools(nodeAddress, state) if err != nil { return err @@ -191,7 +199,7 @@ func (t *DistributeMinipools) distributeMinipools(submissions []*eth.Transaction // Get the max fee maxFee := t.maxFee if maxFee == nil || maxFee.Uint64() == 0 { - maxFee, err = gas.GetMaxFeeWeiForDaemon(&t.log) + maxFee, err = gas.GetMaxFeeWeiForDaemon(t.log) if err != nil { return err } @@ -200,7 +208,7 @@ func (t *DistributeMinipools) distributeMinipools(submissions []*eth.Transaction opts.GasTipCap = t.maxPriorityFee // Print the gas info - if !gas.PrintAndCheckGasInfoForBatch(submissions, true, t.gasThreshold, &t.log, maxFee) { + if !gas.PrintAndCheckGasInfoForBatch(submissions, true, t.gasThreshold, t.log, maxFee) { return nil } @@ -213,7 +221,7 @@ func (t *DistributeMinipools) distributeMinipools(submissions []*eth.Transaction } // Print TX info and wait for them to be included in a block - err = tx.PrintAndWaitForTransactionBatch(t.cfg, t.rp, &t.log, submissions, callbacks, opts) + err = tx.PrintAndWaitForTransactionBatch(t.cfg, t.rp, t.log, submissions, callbacks, opts) if err != nil { return err } diff --git a/src/rocketpool-daemon/node/download-reward-trees.go b/src/rocketpool-daemon/node/download-reward-trees.go index 1377098ca..e357d8ac7 100644 --- a/src/rocketpool-daemon/node/download-reward-trees.go +++ b/src/rocketpool-daemon/node/download-reward-trees.go @@ -5,6 +5,7 @@ import ( "os" "github.com/rocket-pool/node-manager-core/utils/log" + "github.com/rocket-pool/rocketpool-go/rocketpool" rprewards "github.com/rocket-pool/smartnode/rocketpool-daemon/common/rewards" "github.com/rocket-pool/smartnode/rocketpool-daemon/common/services" "github.com/rocket-pool/smartnode/rocketpool-daemon/common/state" @@ -14,26 +15,25 @@ import ( // Manage download rewards trees task type DownloadRewardsTrees struct { sp *services.ServiceProvider - log log.ColorLogger + log *log.ColorLogger + cfg *config.SmartNodeConfig + rp *rocketpool.RocketPool } // Create manage fee recipient task func NewDownloadRewardsTrees(sp *services.ServiceProvider, logger log.ColorLogger) *DownloadRewardsTrees { return &DownloadRewardsTrees{ sp: sp, - log: logger, + log: &logger, + cfg: sp.GetConfig(), + rp: sp.GetRocketPool(), } } // Manage fee recipient func (t *DownloadRewardsTrees) Run(state *state.NetworkState) error { - // Get services - cfg := t.sp.GetConfig() - rp := t.sp.GetRocketPool() - nodeAddress, _ := t.sp.GetWallet().GetAddress() - // Check if the user opted into downloading rewards files - if cfg.RewardsTreeMode.Value != config.RewardsMode_Download { + if t.cfg.RewardsTreeMode.Value != config.RewardsMode_Download { return nil } @@ -42,12 +42,13 @@ func (t *DownloadRewardsTrees) Run(state *state.NetworkState) error { // Get the current interval currentIndex := state.NetworkDetails.RewardIndex + nodeAddress, _ := t.sp.GetWallet().GetAddress() // Check for missing intervals missingIntervals := []uint64{} for i := uint64(0); i < currentIndex; i++ { // Check if the tree file exists - treeFilePath := cfg.GetRewardsTreePath(i) + treeFilePath := t.cfg.GetRewardsTreePath(i) _, err := os.Stat(treeFilePath) if os.IsNotExist(err) { t.log.Printlnf("You are missing the rewards tree file for interval %d.", i) @@ -64,11 +65,11 @@ func (t *DownloadRewardsTrees) Run(state *state.NetworkState) error { // Download missing intervals for _, missingInterval := range missingIntervals { fmt.Printf("Downloading interval %d file... ", missingInterval) - intervalInfo, err := rprewards.GetIntervalInfo(rp, cfg, nodeAddress, missingInterval, nil) + intervalInfo, err := rprewards.GetIntervalInfo(t.rp, t.cfg, nodeAddress, missingInterval, nil) if err != nil { return fmt.Errorf("error getting interval %d info: %w", missingInterval, err) } - err = rprewards.DownloadRewardsFile(cfg, &intervalInfo) + err = rprewards.DownloadRewardsFile(t.cfg, &intervalInfo) if err != nil { fmt.Println() return err diff --git a/src/rocketpool-daemon/node/manage-fee-recipient.go b/src/rocketpool-daemon/node/manage-fee-recipient.go index 62f432868..c5ef6df36 100644 --- a/src/rocketpool-daemon/node/manage-fee-recipient.go +++ b/src/rocketpool-daemon/node/manage-fee-recipient.go @@ -7,6 +7,7 @@ import ( "os" "time" + "github.com/docker/docker/client" "github.com/ethereum/go-ethereum/common" "github.com/rocket-pool/node-manager-core/beacon" @@ -29,8 +30,9 @@ type ManageFeeRecipient struct { ctx context.Context sp *services.ServiceProvider cfg *config.SmartNodeConfig - log log.ColorLogger + log *log.ColorLogger bc beacon.IBeaconClient + d *client.Client } // Create manage fee recipient task @@ -38,22 +40,20 @@ func NewManageFeeRecipient(ctx context.Context, sp *services.ServiceProvider, lo return &ManageFeeRecipient{ ctx: ctx, sp: sp, - log: logger, + log: &logger, + cfg: sp.GetConfig(), + bc: sp.GetBeaconClient(), + d: sp.GetDocker(), } } // Manage fee recipient func (t *ManageFeeRecipient) Run(state *state.NetworkState) error { - // Get services - t.cfg = t.sp.GetConfig() - t.bc = t.sp.GetBeaconClient() - d := t.sp.GetDocker() - nodeAddress, _ := t.sp.GetWallet().GetAddress() - // Log t.log.Println("Checking for correct fee recipient...") // Get the fee recipient info for the node + nodeAddress, _ := t.sp.GetWallet().GetAddress() feeRecipientInfo, err := t.getFeeRecipientInfo(nodeAddress, state) if err != nil { return fmt.Errorf("error getting fee recipient info: %w", err) @@ -90,7 +90,7 @@ func (t *ManageFeeRecipient) Run(state *state.NetworkState) error { t.log.Printlnf("Error updating fee recipient files: %s", err.Error()) t.log.Println("Shutting down the validator client for safety to prevent you from being penalized...") - err = validator.StopValidator(t.cfg, t.bc, &t.log, d, false) + err = validator.StopValidator(t.cfg, t.bc, t.log, t.d, false) if err != nil { return fmt.Errorf("error stopping validator client: %w", err) } @@ -99,7 +99,7 @@ func (t *ManageFeeRecipient) Run(state *state.NetworkState) error { // Restart the VC t.log.Println("Fee recipient files updated successfully! Restarting validator client...") - err = validator.StopValidator(t.cfg, t.bc, &t.log, d, true) + err = validator.StopValidator(t.cfg, t.bc, t.log, t.d, true) if err != nil { return fmt.Errorf("error restarting validator client: %w", err) } @@ -132,7 +132,7 @@ func (t *ManageFeeRecipient) getFeeRecipientInfo(nodeAddress common.Address, sta beaconConfig := state.BeaconConfig beaconHead, err := t.bc.GetBeaconHead(t.ctx) if err != nil { - return nil, fmt.Errorf("Error getting Beacon head: %w", err) + return nil, fmt.Errorf("error getting Beacon head: %w", err) } // Check if the user just opted out diff --git a/src/rocketpool-daemon/node/promote-minipools.go b/src/rocketpool-daemon/node/promote-minipools.go index 4eaf754e4..b285d353b 100644 --- a/src/rocketpool-daemon/node/promote-minipools.go +++ b/src/rocketpool-daemon/node/promote-minipools.go @@ -26,7 +26,7 @@ import ( // Promote minipools task type PromoteMinipools struct { sp *services.ServiceProvider - log log.ColorLogger + log *log.ColorLogger cfg *config.SmartNodeConfig w *wallet.Wallet rp *rocketpool.RocketPool @@ -38,27 +38,28 @@ type PromoteMinipools struct { // Create promote minipools task func NewPromoteMinipools(sp *services.ServiceProvider, logger log.ColorLogger) *PromoteMinipools { + cfg := sp.GetConfig() + log := &logger + maxFee, maxPriorityFee := getAutoTxInfo(cfg, log) return &PromoteMinipools{ - sp: sp, - log: logger, + sp: sp, + log: log, + cfg: cfg, + w: sp.GetWallet(), + rp: sp.GetRocketPool(), + gasThreshold: cfg.AutoTxGasThreshold.Value, + maxFee: maxFee, + maxPriorityFee: maxPriorityFee, } } // Stake prelaunch minipools func (t *PromoteMinipools) Run(state *state.NetworkState) error { - // Get services - t.cfg = t.sp.GetConfig() - t.w = t.sp.GetWallet() - t.rp = t.sp.GetRocketPool() - t.w = t.sp.GetWallet() - nodeAddress, _ := t.w.GetAddress() - t.maxFee, t.maxPriorityFee = getAutoTxInfo(t.cfg, &t.log) - t.gasThreshold = t.cfg.AutoTxGasThreshold.Value - // Log t.log.Println("Checking for minipools to promote...") // Get prelaunch minipools + nodeAddress, _ := t.w.GetAddress() minipools, err := t.getVacantMinipools(nodeAddress, state) if err != nil { return err @@ -176,7 +177,7 @@ func (t *PromoteMinipools) promoteMinipools(submissions []*eth.TransactionSubmis // Get the max fee maxFee := t.maxFee if maxFee == nil || maxFee.Uint64() == 0 { - maxFee, err = gas.GetMaxFeeWeiForDaemon(&t.log) + maxFee, err = gas.GetMaxFeeWeiForDaemon(t.log) if err != nil { return err } @@ -186,7 +187,7 @@ func (t *PromoteMinipools) promoteMinipools(submissions []*eth.TransactionSubmis // Print the gas info forceSubmissions := []*eth.TransactionSubmission{} - if !gas.PrintAndCheckGasInfoForBatch(submissions, true, t.gasThreshold, &t.log, maxFee) { + if !gas.PrintAndCheckGasInfoForBatch(submissions, true, t.gasThreshold, t.log, maxFee) { // Check for the timeout buffers for i, mpd := range minipools { creationTime := time.Unix(mpd.StatusTime.Int64(), 0) @@ -214,7 +215,7 @@ func (t *PromoteMinipools) promoteMinipools(submissions []*eth.TransactionSubmis } // Print TX info and wait for them to be included in a block - err = tx.PrintAndWaitForTransactionBatch(t.cfg, t.rp, &t.log, submissions, callbacks, opts) + err = tx.PrintAndWaitForTransactionBatch(t.cfg, t.rp, t.log, submissions, callbacks, opts) if err != nil { return err } diff --git a/src/rocketpool-daemon/node/reduce-bonds.go b/src/rocketpool-daemon/node/reduce-bonds.go index 1e48a3fb9..21a7abd46 100644 --- a/src/rocketpool-daemon/node/reduce-bonds.go +++ b/src/rocketpool-daemon/node/reduce-bonds.go @@ -34,7 +34,7 @@ const ( // Reduce bonds task type ReduceBonds struct { sp *services.ServiceProvider - log log.ColorLogger + log *log.ColorLogger cfg *config.SmartNodeConfig w *wallet.Wallet rp *rocketpool.RocketPool @@ -44,37 +44,33 @@ type ReduceBonds struct { maxPriorityFee *big.Int } -// Details required to check for bond reduction eligibility -type minipoolBondReductionDetails struct { - Address common.Address - DepositBalance *big.Int - ReduceBondTime time.Time - ReduceBondCancelled bool - Status types.MinipoolStatus -} - // Create reduce bonds task func NewReduceBonds(sp *services.ServiceProvider, logger log.ColorLogger) *ReduceBonds { + cfg := sp.GetConfig() + log := &logger + maxFee, maxPriorityFee := getAutoTxInfo(cfg, log) + gasThreshold := cfg.AutoTxGasThreshold.Value + + if gasThreshold == 0 { + log.Println("Automatic tx gas threshold is 0, disabling auto-reduce.") + } + return &ReduceBonds{ - sp: sp, - log: logger, + sp: sp, + log: log, + cfg: cfg, + w: sp.GetWallet(), + rp: sp.GetRocketPool(), + gasThreshold: gasThreshold, + maxFee: maxFee, + maxPriorityFee: maxPriorityFee, } } // Reduce bonds func (t *ReduceBonds) Run(state *state.NetworkState) error { - // Get services - t.cfg = t.sp.GetConfig() - t.w = t.sp.GetWallet() - t.rp = t.sp.GetRocketPool() - t.w = t.sp.GetWallet() - nodeAddress, _ := t.w.GetAddress() - t.maxFee, t.maxPriorityFee = getAutoTxInfo(t.cfg, &t.log) - t.gasThreshold = t.cfg.AutoTxGasThreshold.Value - // Check if auto-bond-reduction is disabled if t.gasThreshold == 0 { - t.log.Println("Automatic tx gas threshold is 0, disabling auto-reduce.") return nil } @@ -104,6 +100,7 @@ func (t *ReduceBonds) Run(state *state.NetworkState) error { } // Get reduceable minipools + nodeAddress, _ := t.w.GetAddress() minipools, mpBindings, err := t.getReduceableMinipools(nodeAddress, windowStart, windowLength, latestBlockTime, state, opts) if err != nil { return err @@ -213,14 +210,14 @@ func (t *ReduceBonds) forceFeeDistribution(state *state.NetworkState) (bool, err // Get the max fee maxFee := t.maxFee if maxFee == nil || maxFee.Uint64() == 0 { - maxFee, err = gas.GetMaxFeeWeiForDaemon(&t.log) + maxFee, err = gas.GetMaxFeeWeiForDaemon(t.log) if err != nil { return false, err } } // Print the gas info - if !gas.PrintAndCheckGasInfo(txInfo.SimulationResult, true, t.gasThreshold, &t.log, maxFee, txInfo.SimulationResult.SafeGasLimit) { + if !gas.PrintAndCheckGasInfo(txInfo.SimulationResult, true, t.gasThreshold, t.log, maxFee, txInfo.SimulationResult.SafeGasLimit) { return false, nil } @@ -229,7 +226,7 @@ func (t *ReduceBonds) forceFeeDistribution(state *state.NetworkState) (bool, err opts.GasLimit = txInfo.SimulationResult.SafeGasLimit // Print TX info and wait for it to be included in a block - err = tx.PrintAndWaitForTransaction(t.cfg, t.rp, &t.log, txInfo, opts) + err = tx.PrintAndWaitForTransaction(t.cfg, t.rp, t.log, txInfo, opts) if err != nil { return false, err } @@ -343,7 +340,7 @@ func (t *ReduceBonds) reduceBonds(submissions []*eth.TransactionSubmission, mini // Get the max fee maxFee := t.maxFee if maxFee == nil || maxFee.Uint64() == 0 { - maxFee, err = gas.GetMaxFeeWeiForDaemon(&t.log) + maxFee, err = gas.GetMaxFeeWeiForDaemon(t.log) if err != nil { return err } @@ -352,7 +349,7 @@ func (t *ReduceBonds) reduceBonds(submissions []*eth.TransactionSubmission, mini opts.GasTipCap = t.maxPriorityFee // Print the gas info - if !gas.PrintAndCheckGasInfoForBatch(submissions, true, t.gasThreshold, &t.log, maxFee) { + if !gas.PrintAndCheckGasInfoForBatch(submissions, true, t.gasThreshold, t.log, maxFee) { for _, mp := range minipools { timeSinceReductionStart := latestBlockTime.Sub(mp.ReduceBondTime.Formatted()) remainingTime := windowDuration - timeSinceReductionStart @@ -370,7 +367,7 @@ func (t *ReduceBonds) reduceBonds(submissions []*eth.TransactionSubmission, mini } // Print TX info and wait for them to be included in a block - err = tx.PrintAndWaitForTransactionBatch(t.cfg, t.rp, &t.log, submissions, callbacks, opts) + err = tx.PrintAndWaitForTransactionBatch(t.cfg, t.rp, t.log, submissions, callbacks, opts) if err != nil { return err } diff --git a/src/rocketpool-daemon/node/stake-prelaunch-minipools.go b/src/rocketpool-daemon/node/stake-prelaunch-minipools.go index 86df32876..57a8d899c 100644 --- a/src/rocketpool-daemon/node/stake-prelaunch-minipools.go +++ b/src/rocketpool-daemon/node/stake-prelaunch-minipools.go @@ -31,7 +31,7 @@ import ( // Stake prelaunch minipools task type StakePrelaunchMinipools struct { sp *services.ServiceProvider - log log.ColorLogger + log *log.ColorLogger cfg *config.SmartNodeConfig w *wallet.Wallet vMgr *validator.ValidatorManager @@ -46,30 +46,31 @@ type StakePrelaunchMinipools struct { // Create stake prelaunch minipools task func NewStakePrelaunchMinipools(sp *services.ServiceProvider, logger log.ColorLogger) *StakePrelaunchMinipools { + cfg := sp.GetConfig() + log := &logger + maxFee, maxPriorityFee := getAutoTxInfo(cfg, log) return &StakePrelaunchMinipools{ - sp: sp, - log: logger, + sp: sp, + log: log, + cfg: sp.GetConfig(), + w: sp.GetWallet(), + vMgr: sp.GetValidatorManager(), + rp: sp.GetRocketPool(), + bc: sp.GetBeaconClient(), + d: sp.GetDocker(), + gasThreshold: cfg.AutoTxGasThreshold.Value, + maxFee: maxFee, + maxPriorityFee: maxPriorityFee, } } // Stake prelaunch minipools func (t *StakePrelaunchMinipools) Run(state *state.NetworkState) error { - // Get services - t.cfg = t.sp.GetConfig() - t.w = t.sp.GetWallet() - t.rp = t.sp.GetRocketPool() - t.bc = t.sp.GetBeaconClient() - t.w = t.sp.GetWallet() - t.d = t.sp.GetDocker() - t.vMgr = t.sp.GetValidatorManager() - nodeAddress, _ := t.w.GetAddress() - t.maxFee, t.maxPriorityFee = getAutoTxInfo(t.cfg, &t.log) - t.gasThreshold = t.cfg.AutoTxGasThreshold.Value - // Log t.log.Println("Checking for minipools to launch...") // Get prelaunch minipools + nodeAddress, _ := t.w.GetAddress() minipools, err := t.getPrelaunchMinipools(nodeAddress, state) if err != nil { return err @@ -107,7 +108,7 @@ func (t *StakePrelaunchMinipools) Run(state *state.NetworkState) error { NOTE: This is prompted by the CLI now, so automatic restarting may be obviated // Restart validator process if any minipools were staked successfully if stakedMinipools { - if err := validator.RestartValidator(t.cfg, t.bc, &t.log, t.d); err != nil { + if err := validator.RestartValidator(t.cfg, t.bc, t.log, t.d); err != nil { return err } } @@ -228,7 +229,7 @@ func (t *StakePrelaunchMinipools) stakeMinipools(submissions []*eth.TransactionS // Get the max fee maxFee := t.maxFee if maxFee == nil || maxFee.Uint64() == 0 { - maxFee, err = gas.GetMaxFeeWeiForDaemon(&t.log) + maxFee, err = gas.GetMaxFeeWeiForDaemon(t.log) if err != nil { return false, err } @@ -239,7 +240,7 @@ func (t *StakePrelaunchMinipools) stakeMinipools(submissions []*eth.TransactionS // Print the gas info forceSubmissions := []*eth.TransactionSubmission{} forceMinipools := []*rpstate.NativeMinipoolDetails{} - if !gas.PrintAndCheckGasInfoForBatch(submissions, true, t.gasThreshold, &t.log, maxFee) { + if !gas.PrintAndCheckGasInfoForBatch(submissions, true, t.gasThreshold, t.log, maxFee) { // Check for the timeout buffers for i, mpd := range minipools { prelaunchTime := time.Unix(mpd.StatusTime.Int64(), 0) @@ -270,7 +271,7 @@ func (t *StakePrelaunchMinipools) stakeMinipools(submissions []*eth.TransactionS } // Print TX info and wait for them to be included in a block - err = tx.PrintAndWaitForTransactionBatch(t.cfg, t.rp, &t.log, submissions, callbacks, opts) + err = tx.PrintAndWaitForTransactionBatch(t.cfg, t.rp, t.log, submissions, callbacks, opts) if err != nil { return false, err } diff --git a/src/rocketpool-daemon/node/verify-pdao-props.go b/src/rocketpool-daemon/node/verify-pdao-props.go index 98cdee5d4..725052291 100644 --- a/src/rocketpool-daemon/node/verify-pdao-props.go +++ b/src/rocketpool-daemon/node/verify-pdao-props.go @@ -59,11 +59,22 @@ type VerifyPdaoProps struct { } func NewVerifyPdaoProps(ctx context.Context, sp *services.ServiceProvider, logger log.ColorLogger) *VerifyPdaoProps { + cfg := sp.GetConfig() + log := &logger + maxFee, maxPriorityFee := getAutoTxInfo(cfg, log) return &VerifyPdaoProps{ ctx: ctx, sp: sp, - log: &logger, + log: log, + cfg: cfg, + w: sp.GetWallet(), + rp: sp.GetRocketPool(), + bc: sp.GetBeaconClient(), + gasThreshold: cfg.AutoTxGasThreshold.Value, + maxFee: maxFee, + maxPriorityFee: maxPriorityFee, lastScannedBlock: nil, + intervalSize: big.NewInt(int64(config.EventLogInterval)), validPropCache: map[uint64]bool{}, rootSubmissionCache: map[uint64]map[uint64]*protocol.RootSubmitted{}, } @@ -71,17 +82,8 @@ func NewVerifyPdaoProps(ctx context.Context, sp *services.ServiceProvider, logge // Verify pDAO proposals func (t *VerifyPdaoProps) Run(state *state.NetworkState) error { - // Get services - t.cfg = t.sp.GetConfig() - t.w = t.sp.GetWallet() - t.rp = t.sp.GetRocketPool() - t.w = t.sp.GetWallet() - t.nodeAddress, _ = t.w.GetAddress() - t.maxFee, t.maxPriorityFee = getAutoTxInfo(t.cfg, t.log) - t.gasThreshold = t.cfg.AutoTxGasThreshold.Value - t.intervalSize = big.NewInt(int64(config.EventLogInterval)) - // Bindings + t.nodeAddress, _ = t.w.GetAddress() propMgr, err := proposals.NewProposalManager(t.ctx, t.log, t.cfg, t.rp, t.bc) if err != nil { return fmt.Errorf("error creating proposal manager: %w", err) diff --git a/src/rocketpool-daemon/watchtower/cancel-bond-reductions.go b/src/rocketpool-daemon/watchtower/cancel-bond-reductions.go index ec46d22f8..e69787e7c 100644 --- a/src/rocketpool-daemon/watchtower/cancel-bond-reductions.go +++ b/src/rocketpool-daemon/watchtower/cancel-bond-reductions.go @@ -50,6 +50,10 @@ func NewCancelBondReductions(sp *services.ServiceProvider, logger log.ColorLogge sp: sp, log: logger, errLog: errorLogger, + cfg: sp.GetConfig(), + w: sp.GetWallet(), + rp: sp.GetRocketPool(), + ec: sp.GetEthClient(), coll: coll, lock: lock, isRunning: false, @@ -95,11 +99,7 @@ func (t *CancelBondReductions) Run(state *state.NetworkState) error { // Check for bond reductions to cancel func (t *CancelBondReductions) checkBondReductions(state *state.NetworkState) error { - // Get services - t.cfg = t.sp.GetConfig() - t.w = t.sp.GetWallet() - t.rp = t.sp.GetRocketPool() - t.ec = t.sp.GetEthClient() + // Update contract bindings var err error t.mpMgr, err = minipool.NewMinipoolManager(t.rp) if err != nil { diff --git a/src/rocketpool-daemon/watchtower/check-solo-migrations.go b/src/rocketpool-daemon/watchtower/check-solo-migrations.go index dd127feb6..e4c9abb32 100644 --- a/src/rocketpool-daemon/watchtower/check-solo-migrations.go +++ b/src/rocketpool-daemon/watchtower/check-solo-migrations.go @@ -54,6 +54,11 @@ func NewCheckSoloMigrations(sp *services.ServiceProvider, logger log.ColorLogger sp: sp, log: logger, errLog: errorLogger, + cfg: sp.GetConfig(), + w: sp.GetWallet(), + rp: sp.GetRocketPool(), + ec: sp.GetEthClient(), + bc: sp.GetBeaconClient(), coll: coll, lock: lock, isRunning: false, @@ -100,12 +105,7 @@ func (t *CheckSoloMigrations) Run(state *state.NetworkState) error { // Check for solo staker migration validity func (t *CheckSoloMigrations) checkSoloMigrations(state *state.NetworkState) error { - // Get services - t.cfg = t.sp.GetConfig() - t.w = t.sp.GetWallet() - t.rp = t.sp.GetRocketPool() - t.ec = t.sp.GetEthClient() - t.bc = t.sp.GetBeaconClient() + // Update contract bindings var err error t.mpMgr, err = minipool.NewMinipoolManager(t.rp) if err != nil { diff --git a/src/rocketpool-daemon/watchtower/dissolve-timed-out-minipools.go b/src/rocketpool-daemon/watchtower/dissolve-timed-out-minipools.go index 4010baad7..2033a5323 100644 --- a/src/rocketpool-daemon/watchtower/dissolve-timed-out-minipools.go +++ b/src/rocketpool-daemon/watchtower/dissolve-timed-out-minipools.go @@ -37,6 +37,10 @@ type DissolveTimedOutMinipools struct { func NewDissolveTimedOutMinipools(sp *services.ServiceProvider, logger log.ColorLogger) *DissolveTimedOutMinipools { return &DissolveTimedOutMinipools{ sp: sp, + cfg: sp.GetConfig(), + w: sp.GetWallet(), + rp: sp.GetRocketPool(), + ec: sp.GetEthClient(), log: logger, } } @@ -46,11 +50,7 @@ func (t *DissolveTimedOutMinipools) Run(state *state.NetworkState) error { // Log t.log.Println("Checking for timed out minipools to dissolve...") - // Get services - t.cfg = t.sp.GetConfig() - t.w = t.sp.GetWallet() - t.rp = t.sp.GetRocketPool() - t.ec = t.sp.GetEthClient() + // Update contract bindings var err error t.mpMgr, err = minipool.NewMinipoolManager(t.rp) if err != nil { @@ -72,7 +72,7 @@ func (t *DissolveTimedOutMinipools) Run(state *state.NetworkState) error { // Dissolve minipools for _, mp := range minipools { if err := t.dissolveMinipool(mp); err != nil { - t.log.Println(fmt.Errorf("Could not dissolve minipool %s: %w", mp.Common().Address.Hex(), err)) + t.log.Println(fmt.Errorf("error dissolving minipool %s: %w", mp.Common().Address.Hex(), err)) } } diff --git a/src/rocketpool-daemon/watchtower/finalize-pdao-proposals.go b/src/rocketpool-daemon/watchtower/finalize-pdao-proposals.go index 0f53a25c7..099e9a36d 100644 --- a/src/rocketpool-daemon/watchtower/finalize-pdao-proposals.go +++ b/src/rocketpool-daemon/watchtower/finalize-pdao-proposals.go @@ -32,6 +32,10 @@ type FinalizePdaoProposals struct { func NewFinalizePdaoProposals(sp *services.ServiceProvider, logger log.ColorLogger) *FinalizePdaoProposals { return &FinalizePdaoProposals{ sp: sp, + cfg: sp.GetConfig(), + w: sp.GetWallet(), + ec: sp.GetEthClient(), + rp: sp.GetRocketPool(), log: logger, } } @@ -41,12 +45,6 @@ func (t *FinalizePdaoProposals) Run(state *state.NetworkState) error { // Log t.log.Println("Checking for vetoable proposals to finalize...") - // Get services - t.cfg = t.sp.GetConfig() - t.w = t.sp.GetWallet() - t.rp = t.sp.GetRocketPool() - t.ec = t.sp.GetEthClient() - // Get timed out minipools propIDs := t.getFinalizableProposals(state) if len(propIDs) == 0 { @@ -59,7 +57,7 @@ func (t *FinalizePdaoProposals) Run(state *state.NetworkState) error { // Finalize proposals for _, propID := range propIDs { if err := t.finalizeProposal(propID); err != nil { - t.log.Println(fmt.Errorf("Could not finalize proposal %d: %w", propID, err)) + t.log.Println(fmt.Errorf("error finalizing proposal %d: %w", propID, err)) } } diff --git a/src/rocketpool-daemon/watchtower/generate-rewards-tree.go b/src/rocketpool-daemon/watchtower/generate-rewards-tree.go index ada0a5928..77f31e08c 100644 --- a/src/rocketpool-daemon/watchtower/generate-rewards-tree.go +++ b/src/rocketpool-daemon/watchtower/generate-rewards-tree.go @@ -49,6 +49,10 @@ func NewGenerateRewardsTree(ctx context.Context, sp *services.ServiceProvider, l sp: sp, log: logger, errLog: errorLogger, + cfg: sp.GetConfig(), + rp: sp.GetRocketPool(), + ec: sp.GetEthClient(), + bc: sp.GetBeaconClient(), lock: lock, isRunning: false, } @@ -56,11 +60,6 @@ func NewGenerateRewardsTree(ctx context.Context, sp *services.ServiceProvider, l // Check for generation requests func (t *GenerateRewardsTree) Run() error { - // Get services - t.cfg = t.sp.GetConfig() - t.rp = t.sp.GetRocketPool() - t.ec = t.sp.GetEthClient() - t.bc = t.sp.GetBeaconClient() t.log.Println("Checking for manual rewards tree generation requests...") // Check if rewards generation is already running @@ -79,10 +78,10 @@ func (t *GenerateRewardsTree) Run() error { t.log.Println("Watchtower storage directory doesn't exist, creating...") err = os.Mkdir(requestDir, 0755) if err != nil { - return fmt.Errorf("Error creating watchtower storage directory: %w", err) + return fmt.Errorf("error creating watchtower storage directory: %w", err) } } else if err != nil { - return fmt.Errorf("Error enumerating files in watchtower storage directory: %w", err) + return fmt.Errorf("error enumerating files in watchtower storage directory: %w", err) } for _, file := range files { @@ -92,14 +91,14 @@ func (t *GenerateRewardsTree) Run() error { indexString := strings.TrimSuffix(filename, config.RegenerateRewardsTreeRequestSuffix) index, err := strconv.ParseUint(indexString, 0, 64) if err != nil { - return fmt.Errorf("Error parsing index from [%s]: %w", filename, err) + return fmt.Errorf("error parsing index from [%s]: %w", filename, err) } // Delete the file path := filepath.Join(requestDir, filename) err = os.Remove(path) if err != nil { - return fmt.Errorf("Error removing request file [%s]: %w", path, err) + return fmt.Errorf("error removing request file [%s]: %w", path, err) } // Generate the rewards tree @@ -171,12 +170,12 @@ func (t *GenerateRewardsTree) generateRewardsTree(index uint64) { t.log.Printlnf("%s Primary EC cannot retrieve state for historical block %d, using archive EC [%s]", generationPrefix, elBlockHeader.Number.Uint64(), archiveEcUrl) ec, err := ethclient.Dial(archiveEcUrl) if err != nil { - t.handleError(fmt.Errorf("Error connecting to archive EC: %w", err)) + t.handleError(fmt.Errorf("error connecting to archive EC: %w", err)) return } client, err = rocketpool.NewRocketPool(ec, rs.StorageAddress, rs.MulticallAddress, rs.BalanceBatcherAddress) if err != nil { - t.handleError(fmt.Errorf("%s Error creating Rocket Pool client connected to archive EC: %w", err)) + t.handleError(fmt.Errorf("%s Error creating Rocket Pool client connected to archive EC: %w", generationPrefix, err)) return } @@ -186,18 +185,18 @@ func (t *GenerateRewardsTree) generateRewardsTree(index uint64) { return nil }, opts) if err != nil { - t.handleError(fmt.Errorf("%s Error verifying rETH address with Archive EC: %w", err)) + t.handleError(fmt.Errorf("%s error verifying rETH address with Archive EC: %w", generationPrefix, err)) return } // Create the state manager with the archive EC stateManager, err = state.NewNetworkStateManager(t.ctx, client, t.cfg, ec, t.bc, &t.log) if err != nil { - t.handleError(fmt.Errorf("%s Error creating new NetworkStateManager with ARchive EC: %w", err)) + t.handleError(fmt.Errorf("%s error creating new NetworkStateManager with ARchive EC: %w", generationPrefix, err)) return } } else { // No archive node specified - t.handleError(fmt.Errorf("***ERROR*** Primary EC cannot retrieve state for historical block %d and the Archive EC is not specified.", elBlockHeader.Number.Uint64())) + t.handleError(fmt.Errorf("***ERROR*** Primary EC cannot retrieve state for historical block %d and the Archive EC is not specified", elBlockHeader.Number.Uint64())) return } @@ -206,7 +205,7 @@ func (t *GenerateRewardsTree) generateRewardsTree(index uint64) { // Sanity check the rETH address to make sure the client is working right if address != rs.RethAddress { - t.handleError(fmt.Errorf("***ERROR*** Your Primary EC provided %s as the rETH address, but it should have been %s!", address.Hex(), rs.RethAddress.Hex())) + t.handleError(fmt.Errorf("***ERROR*** Your Primary EC provided %s as the rETH address, but it should have been %s", address.Hex(), rs.RethAddress.Hex())) return } diff --git a/src/rocketpool-daemon/watchtower/metrics-exporter.go b/src/rocketpool-daemon/watchtower/metrics-exporter.go index d969d4b1f..450369627 100644 --- a/src/rocketpool-daemon/watchtower/metrics-exporter.go +++ b/src/rocketpool-daemon/watchtower/metrics-exporter.go @@ -51,7 +51,7 @@ func runMetricsServer(sp *services.ServiceProvider, logger log.ColorLogger, scru }) err := http.ListenAndServe(fmt.Sprintf("%s:%d", metricsAddress, metricsPort), nil) if err != nil { - return fmt.Errorf("Error running HTTP server: %w", err) + return fmt.Errorf("error running HTTP server: %w", err) } return nil diff --git a/src/rocketpool-daemon/watchtower/respond-challenges.go b/src/rocketpool-daemon/watchtower/respond-challenges.go index 2704aae05..9e3de08a3 100644 --- a/src/rocketpool-daemon/watchtower/respond-challenges.go +++ b/src/rocketpool-daemon/watchtower/respond-challenges.go @@ -4,7 +4,9 @@ import ( "fmt" "github.com/rocket-pool/node-manager-core/eth" + "github.com/rocket-pool/node-manager-core/node/wallet" "github.com/rocket-pool/rocketpool-go/dao/oracle" + "github.com/rocket-pool/rocketpool-go/rocketpool" "github.com/rocket-pool/node-manager-core/utils/log" "github.com/rocket-pool/smartnode/rocketpool-daemon/common/gas" @@ -12,11 +14,15 @@ import ( "github.com/rocket-pool/smartnode/rocketpool-daemon/common/state" "github.com/rocket-pool/smartnode/rocketpool-daemon/common/tx" "github.com/rocket-pool/smartnode/rocketpool-daemon/watchtower/utils" + "github.com/rocket-pool/smartnode/shared/config" ) // Respond to challenges task type RespondChallenges struct { sp *services.ServiceProvider + cfg *config.SmartNodeConfig + w *wallet.Wallet + rp *rocketpool.RocketPool log log.ColorLogger } @@ -24,27 +30,26 @@ type RespondChallenges struct { func NewRespondChallenges(sp *services.ServiceProvider, logger log.ColorLogger, m *state.NetworkStateManager) *RespondChallenges { return &RespondChallenges{ sp: sp, + cfg: sp.GetConfig(), + w: sp.GetWallet(), + rp: sp.GetRocketPool(), log: logger, } } // Respond to challenges func (t *RespondChallenges) Run() error { - // Get services - cfg := t.sp.GetConfig() - w := t.sp.GetWallet() - rp := t.sp.GetRocketPool() - nodeAddress, _ := w.GetAddress() + nodeAddress, _ := t.w.GetAddress() // Log t.log.Println("Checking for challenges to respond to...") - member, err := oracle.NewOracleDaoMember(rp, nodeAddress) + member, err := oracle.NewOracleDaoMember(t.rp, nodeAddress) if err != nil { return fmt.Errorf("error creating Oracle DAO member binding: %w", err) } // Check for active challenges - err = rp.Query(nil, nil, member.IsChallenged) + err = t.rp.Query(nil, nil, member.IsChallenged) if err != nil { return fmt.Errorf("error checking if member is challenged: %w", err) } @@ -56,13 +61,13 @@ func (t *RespondChallenges) Run() error { t.log.Printlnf("Node %s has an active challenge against it, responding...", nodeAddress.Hex()) // Get transactor - opts, err := w.GetTransactor() + opts, err := t.w.GetTransactor() if err != nil { return err } // Create an oDAO manager - odaoMgr, err := oracle.NewOracleDaoManager(rp) + odaoMgr, err := oracle.NewOracleDaoManager(t.rp) if err != nil { return fmt.Errorf("error creating Oracle DAO manager binding: %w", err) } @@ -77,18 +82,18 @@ func (t *RespondChallenges) Run() error { } // Print the gas info - maxFee := eth.GweiToWei(utils.GetWatchtowerMaxFee(cfg)) + maxFee := eth.GweiToWei(utils.GetWatchtowerMaxFee(t.cfg)) if !gas.PrintAndCheckGasInfo(txInfo.SimulationResult, false, 0, &t.log, maxFee, 0) { return nil } // Set the gas settings opts.GasFeeCap = maxFee - opts.GasTipCap = eth.GweiToWei(utils.GetWatchtowerPrioFee(cfg)) + opts.GasTipCap = eth.GweiToWei(utils.GetWatchtowerPrioFee(t.cfg)) opts.GasLimit = txInfo.SimulationResult.SafeGasLimit // Print TX info and wait for it to be included in a block - err = tx.PrintAndWaitForTransaction(cfg, rp, &t.log, txInfo, opts) + err = tx.PrintAndWaitForTransaction(t.cfg, t.rp, &t.log, txInfo, opts) if err != nil { return err } diff --git a/src/rocketpool-daemon/watchtower/submit-network-balances.go b/src/rocketpool-daemon/watchtower/submit-network-balances.go index 34d6b1699..dc6bf9e4b 100644 --- a/src/rocketpool-daemon/watchtower/submit-network-balances.go +++ b/src/rocketpool-daemon/watchtower/submit-network-balances.go @@ -78,6 +78,11 @@ func NewSubmitNetworkBalances(ctx context.Context, sp *services.ServiceProvider, sp: sp, log: &logger, errLog: &errorLogger, + cfg: sp.GetConfig(), + w: sp.GetWallet(), + ec: sp.GetEthClient(), + rp: sp.GetRocketPool(), + bc: sp.GetBeaconClient(), lock: lock, isRunning: false, } @@ -196,14 +201,7 @@ func (t *SubmitNetworkBalances) Run(state *state.NetworkState) error { t.isRunning = true t.lock.Unlock() - // Get services - t.cfg = t.sp.GetConfig() - t.w = t.sp.GetWallet() - t.rp = t.sp.GetRocketPool() - t.ec = t.sp.GetEthClient() - t.bc = t.sp.GetBeaconClient() nodeAddress, _ := t.w.GetAddress() - logPrefix := "[Balance Report]" t.log.Printlnf("%s Starting balance report in a separate thread.", logPrefix) diff --git a/src/rocketpool-daemon/watchtower/submit-rewards-tree-rolling.go b/src/rocketpool-daemon/watchtower/submit-rewards-tree-rolling.go index fecb1024f..65d3134bf 100644 --- a/src/rocketpool-daemon/watchtower/submit-rewards-tree-rolling.go +++ b/src/rocketpool-daemon/watchtower/submit-rewards-tree-rolling.go @@ -112,6 +112,11 @@ func NewSubmitRewardsTree_Rolling(ctx context.Context, sp *services.ServiceProvi sp: sp, log: logger, errLog: errorLogger, + cfg: cfg, + w: sp.GetWallet(), + ec: sp.GetEthClient(), + rp: sp.GetRocketPool(), + bc: bc, stateMgr: stateMgr, genesisTime: genesisTime, logPrefix: logPrefix, @@ -157,12 +162,7 @@ func (t *SubmitRewardsTree_Rolling) Run(headState *state.NetworkState) error { t.lock.Unlock() t.log.Printlnf("%s Running record update in a separate thread.", t.logPrefix) - // Get services - t.cfg = t.sp.GetConfig() - t.w = t.sp.GetWallet() - t.rp = t.sp.GetRocketPool() - t.ec = t.sp.GetEthClient() - t.bc = t.sp.GetBeaconClient() + // Update contract bindings nodeAddress, _ := t.w.GetAddress() var err error t.rewardsPool, err = rewards.NewRewardsPool(t.rp) @@ -529,11 +529,11 @@ func (t *SubmitRewardsTree_Rolling) generateTree(rp *rocketpool.RocketPool, stat // Generate the rewards file treegen, err := rprewards.NewTreeGenerator(&t.log, t.logPrefix, rp, t.cfg, t.bc, currentIndex, startTime, endTime, snapshotBeaconBlock, snapshotElBlockHeader, uint64(intervalsPassed), state, t.recordMgr.Record) if err != nil { - return fmt.Errorf("Error creating Merkle tree generator: %w", err) + return fmt.Errorf("error creating Merkle tree generator: %w", err) } rewardsFile, err := treegen.GenerateTree(t.ctx) if err != nil { - return fmt.Errorf("Error generating Merkle tree: %w", err) + return fmt.Errorf("error generating Merkle tree: %w", err) } for address, network := range rewardsFile.GetHeader().InvalidNetworkNodes { t.printMessage(fmt.Sprintf("WARNING: Node %s has invalid network %d assigned! Using 0 (mainnet) instead.", address.Hex(), network)) @@ -546,13 +546,13 @@ func (t *SubmitRewardsTree_Rolling) generateTree(rp *rocketpool.RocketPool, stat ) err = localMinipoolPerformanceFile.Write() if err != nil { - return fmt.Errorf("Error serializing minipool performance file into JSON: %w", err) + return fmt.Errorf("error serializing minipool performance file into JSON: %w", err) } if nodeTrusted { minipoolPerformanceCid, err := localMinipoolPerformanceFile.CreateCompressedFileAndCid() if err != nil { - return fmt.Errorf("Error getting the CID for file %s: %w", compressedMinipoolPerformancePath, err) + return fmt.Errorf("error getting the CID for file %s: %w", compressedMinipoolPerformancePath, err) } t.printMessage(fmt.Sprintf("Calculated minipool performance CID: %s", minipoolPerformanceCid)) rewardsFile.SetMinipoolPerformanceFileCID(minipoolPerformanceCid.String()) @@ -571,19 +571,19 @@ func (t *SubmitRewardsTree_Rolling) generateTree(rp *rocketpool.RocketPool, stat // Write the rewards tree to disk err = localRewardsFile.Write() if err != nil { - return fmt.Errorf("Error saving rewards tree file to %s: %w", rewardsTreePath, err) + return fmt.Errorf("error saving rewards tree file to %s: %w", rewardsTreePath, err) } if nodeTrusted { cid, err := localRewardsFile.CreateCompressedFileAndCid() if err != nil { - return fmt.Errorf("Error getting CID for file %s: %w", compressedRewardsTreePath, err) + return fmt.Errorf("error getting CID for file %s: %w", compressedRewardsTreePath, err) } t.printMessage(fmt.Sprintf("Calculated rewards tree CID: %s", cid)) // Submit to the contracts err = t.submitRewardsSnapshot(big.NewInt(int64(currentIndex)), snapshotBeaconBlock, elBlockIndex, rewardsFile.GetHeader(), cid.String(), big.NewInt(int64(intervalsPassed))) if err != nil { - return fmt.Errorf("Error submitting rewards snapshot: %w", err) + return fmt.Errorf("error submitting rewards snapshot: %w", err) } t.printMessage(fmt.Sprintf("Successfully submitted rewards snapshot for interval %d.", currentIndex)) @@ -598,7 +598,7 @@ func (t *SubmitRewardsTree_Rolling) generateTree(rp *rocketpool.RocketPool, stat func (t *SubmitRewardsTree_Rolling) submitRewardsSnapshot(index *big.Int, consensusBlock uint64, executionBlock uint64, rewardsFileHeader *sharedtypes.RewardsFileHeader, cid string, intervalsPassed *big.Int) error { treeRootBytes, err := hex.DecodeString(nmc_utils.RemovePrefix(rewardsFileHeader.MerkleRoot)) if err != nil { - return fmt.Errorf("Error decoding merkle root: %w", err) + return fmt.Errorf("error decoding merkle root: %w", err) } treeRoot := common.BytesToHash(treeRootBytes) diff --git a/src/rocketpool-daemon/watchtower/submit-rewards-tree-stateless.go b/src/rocketpool-daemon/watchtower/submit-rewards-tree-stateless.go index bc1575248..1ae68cf0f 100644 --- a/src/rocketpool-daemon/watchtower/submit-rewards-tree-stateless.go +++ b/src/rocketpool-daemon/watchtower/submit-rewards-tree-stateless.go @@ -61,6 +61,11 @@ func NewSubmitRewardsTree_Stateless(ctx context.Context, sp *services.ServicePro sp: sp, log: &logger, errLog: &errorLogger, + cfg: sp.GetConfig(), + w: sp.GetWallet(), + rp: sp.GetRocketPool(), + ec: sp.GetEthClient(), + bc: sp.GetBeaconClient(), lock: lock, isRunning: false, generationPrefix: "[Merkle Tree]", @@ -145,12 +150,7 @@ func (t *SubmitRewardsTree_Stateless) Run(nodeTrusted bool, state *state.Network } t.lock.Unlock() - // Get services - t.cfg = t.sp.GetConfig() - t.w = t.sp.GetWallet() - t.rp = t.sp.GetRocketPool() - t.ec = t.sp.GetEthClient() - t.bc = t.sp.GetBeaconClient() + // Refresh contract bindings nodeAddress, _ := t.w.GetAddress() t.rewardsPool, err = rewards.NewRewardsPool(t.rp) if err != nil { @@ -193,7 +193,7 @@ func (t *SubmitRewardsTree_Stateless) Run(nodeTrusted bool, state *state.Network // Save the compressed file and get the CID for it cid, err := localRewardsFile.CreateCompressedFileAndCid() if err != nil { - return fmt.Errorf("Error getting CID for file %s: %w", compressedRewardsTreePath, err) + return fmt.Errorf("error getting CID for file %s: %w", compressedRewardsTreePath, err) } t.printMessage(fmt.Sprintf("Calculated rewards tree CID: %s", cid)) @@ -328,7 +328,7 @@ func (t *SubmitRewardsTree_Stateless) generateTreeImpl(rp *rocketpool.RocketPool if nodeTrusted { minipoolPerformanceCid, err := localMinipoolPerformanceFile.CreateCompressedFileAndCid() if err != nil { - return fmt.Errorf("Error getting CID for file %s: %w", compressedMinipoolPerformancePath, err) + return fmt.Errorf("error getting CID for file %s: %w", compressedMinipoolPerformancePath, err) } t.printMessage(fmt.Sprintf("Calculated minipool performance CID: %s", minipoolPerformanceCid)) rewardsFile.SetMinipoolPerformanceFileCID(minipoolPerformanceCid.String()) @@ -354,7 +354,7 @@ func (t *SubmitRewardsTree_Stateless) generateTreeImpl(rp *rocketpool.RocketPool // Save the compressed file and get the CID for it cid, err := localRewardsFile.CreateCompressedFileAndCid() if err != nil { - return fmt.Errorf("Error getting CID for file %s : %w", rewardsTreePath, err) + return fmt.Errorf("error getting CID for file %s : %w", rewardsTreePath, err) } t.printMessage(fmt.Sprintf("Calculated rewards tree CID: %s", cid)) @@ -478,7 +478,7 @@ func (t *SubmitRewardsTree_Stateless) getSnapshotConsensusBlock(endTime time.Tim // Check if the required epoch is finalized yet if beaconHead.FinalizedEpoch < requiredEpoch { - return 0, 0, fmt.Errorf("Snapshot end time = %s, slot (epoch) = %d (%d)... waiting until epoch %d is finalized (currently %d).", endTime, targetSlot, targetSlotEpoch, requiredEpoch, beaconHead.FinalizedEpoch) + return 0, 0, fmt.Errorf("snapshot end time = %s, slot (epoch) = %d (%d)... waiting until epoch %d is finalized (currently %d)", endTime, targetSlot, targetSlotEpoch, requiredEpoch, beaconHead.FinalizedEpoch) } // Get the first successful block diff --git a/src/rocketpool-daemon/watchtower/submit-rpl-price.go b/src/rocketpool-daemon/watchtower/submit-rpl-price.go index c64103415..8fe0cbe70 100644 --- a/src/rocketpool-daemon/watchtower/submit-rpl-price.go +++ b/src/rocketpool-daemon/watchtower/submit-rpl-price.go @@ -39,11 +39,6 @@ const ( twapNumberOfSeconds uint32 = 60 * 60 * 12 // 12 hours ) -type poolObserveResponse struct { - TickCumulatives []*big.Int `abi:"tickCumulatives"` - SecondsPerLiquidityCumulativeX128s []*big.Int `abi:"secondsPerLiquidityCumulativeX128s"` -} - // Submit RPL price task type SubmitRplPrice struct { ctx context.Context @@ -67,6 +62,11 @@ func NewSubmitRplPrice(ctx context.Context, sp *services.ServiceProvider, logger sp: sp, log: logger, errLog: errorLogger, + cfg: sp.GetConfig(), + ec: sp.GetEthClient(), + w: sp.GetWallet(), + rp: sp.GetRocketPool(), + bc: sp.GetBeaconClient(), lock: lock, } } @@ -191,14 +191,7 @@ func (t *SubmitRplPrice) Run(state *state.NetworkState) error { t.isRunning = true t.lock.Unlock() - // Get services - t.cfg = t.sp.GetConfig() - t.w = t.sp.GetWallet() - t.rp = t.sp.GetRocketPool() - t.ec = t.sp.GetEthClient() - t.bc = t.sp.GetBeaconClient() nodeAddress, _ := t.w.GetAddress() - logPrefix := "[Price Report]" t.log.Printlnf("%s Starting price report in a separate thread.", logPrefix) @@ -492,7 +485,7 @@ func (t *SubmitRplPrice) updateL2Prices(state *state.NetworkState) error { // Get transactor opts, err := t.w.GetTransactor() if err != nil { - return fmt.Errorf("Failed getting transactor: %q", err) + return fmt.Errorf("failed getting transactor: %q", err) } // Check if any rates are stale @@ -804,7 +797,7 @@ func (t *SubmitRplPrice) updateScroll(cfg *config.SmartNodeConfig, rp *rocketpoo return nil }, nil) if err != nil { - return fmt.Errorf("Error getting cross domain message fee for Scroll: %w", err) + return fmt.Errorf("error getting cross domain message fee for Scroll: %w", err) } opts.Value = messageFee diff --git a/src/rocketpool-daemon/watchtower/submit-scrub-minipools.go b/src/rocketpool-daemon/watchtower/submit-scrub-minipools.go index 713f303ce..0bae58d17 100644 --- a/src/rocketpool-daemon/watchtower/submit-scrub-minipools.go +++ b/src/rocketpool-daemon/watchtower/submit-scrub-minipools.go @@ -90,6 +90,11 @@ func NewSubmitScrubMinipools(sp *services.ServiceProvider, logger log.ColorLogge sp: sp, log: logger, errLog: errorLogger, + cfg: sp.GetConfig(), + w: sp.GetWallet(), + rp: sp.GetRocketPool(), + ec: sp.GetEthClient(), + bc: sp.GetBeaconClient(), coll: coll, lock: lock, isRunning: false, @@ -118,12 +123,7 @@ func (t *SubmitScrubMinipools) Run(state *state.NetworkState) error { checkPrefix := "[Minipool Scrub]" t.log.Printlnf("%s Starting scrub check in a separate thread.", checkPrefix) - // Get services - t.cfg = t.sp.GetConfig() - t.w = t.sp.GetWallet() - t.rp = t.sp.GetRocketPool() - t.ec = t.sp.GetEthClient() - t.bc = t.sp.GetBeaconClient() + // Update contract bindings var err error t.mpMgr, err = minipool.NewMinipoolManager(t.rp) if err != nil {